From d6ee0752539c882a2407998ce5a77127969f0feb Mon Sep 17 00:00:00 2001 From: Junyi Liu Date: Fri, 17 Oct 2025 14:21:49 -0400 Subject: [PATCH] bump version to 25.3.21 --- httpproxy/cloudsim/pom.xml | 2 +- .../java/oracle/nosql/cloudsim/CloudSim.java | 2 +- httpproxy/httpproxy/pom.xml | 2 +- .../main/java/oracle/nosql/proxy/Config.java | 66 +- .../java/oracle/nosql/proxy/DataService.java | 1217 ++- .../nosql/proxy/DataServiceHandler.java | 103 +- .../nosql/proxy/ExcessiveUsageException.java | 2 +- .../nosql/proxy/JsonCollSerializer.java | 8 +- .../oracle/nosql/proxy/KVHandleStats.java | 2 +- .../nosql/proxy/LimiterManagerStats.java | 2 +- .../java/oracle/nosql/proxy/MonitorStats.java | 21 +- .../main/java/oracle/nosql/proxy/Proxy.java | 42 +- .../java/oracle/nosql/proxy/ProxyLogger.java | 2 +- .../java/oracle/nosql/proxy/ProxyMain.java | 2 +- .../nosql/proxy/ProxySerialization.java | 2 +- .../oracle/nosql/proxy/RequestException.java | 2 +- .../oracle/nosql/proxy/RequestLimits.java | 2 +- .../oracle/nosql/proxy/ValueSerializer.java | 11 +- .../proxy/audit/ProxyAuditContextBuilder.java | 2 +- .../nosql/proxy/audit/ProxyAuditManager.java | 2 +- .../nosql/proxy/cloud/CacheUpdateService.java | 2 +- .../nosql/proxy/cloud/CloudDataService.java | 16 +- .../nosql/proxy/cloud/HealthService.java | 2 +- .../nosql/proxy/cloud/LogControlService.java | 2 +- .../nosql/proxy/cloud/ProxyHealthSource.java | 2 +- .../nosql/proxy/filter/FilterHandler.java | 112 +- .../nosql/proxy/filter/FilterService.java | 150 +- .../oracle/nosql/proxy/kv/KVDataService.java | 5 +- .../nosql/proxy/kv/KVTenantManager.java | 2 +- .../oracle/nosql/proxy/kv/LoginService.java | 2 +- .../oracle/nosql/proxy/kv/LogoutService.java | 2 +- .../nosql/proxy/kv/StoreSecurityService.java | 2 +- .../nosql/proxy/kv/TokenRenewService.java | 2 +- .../nosql/proxy/protocol/BinaryProtocol.java | 2 +- .../nosql/proxy/protocol/ByteInputStream.java | 2 +- .../proxy/protocol/ByteOutputStream.java | 2 +- .../nosql/proxy/protocol/HttpConstants.java | 16 +- .../nosql/proxy/protocol/JsonProtocol.java | 290 +- .../nosql/proxy/protocol/NsonProtocol.java | 68 +- .../nosql/proxy/protocol/PackedInteger.java | 2 +- .../oracle/nosql/proxy/protocol/Protocol.java | 33 +- .../proxy/protocol/SerializationUtil.java | 2 +- .../oracle/nosql/proxy/rest/ErrorCode.java | 6 +- .../nosql/proxy/rest/RequestParams.java | 2 +- .../nosql/proxy/rest/RestDataService.java | 412 +- .../java/oracle/nosql/proxy/rest/UrlInfo.java | 2 +- .../rest/cloud/CloudRestDataService.java | 242 +- .../oracle/nosql/proxy/sc/CommonResponse.java | 2 +- .../proxy/sc/GetDdlWorkRequestResponse.java | 60 + .../nosql/proxy/sc/GetKmsKeyInfoResponse.java | 55 + .../nosql/proxy/sc/GetStoreResponse.java | 2 +- .../nosql/proxy/sc/GetTableResponse.java | 2 +- .../proxy/sc/GetWorkRequestResponse.java | 24 +- .../oracle/nosql/proxy/sc/IndexResponse.java | 2 +- .../nosql/proxy/sc/ListRuleResponse.java | 2 +- .../nosql/proxy/sc/ListTableInfoResponse.java | 2 +- .../nosql/proxy/sc/ListTableResponse.java | 2 +- .../proxy/sc/ListWorkRequestResponse.java | 56 +- .../nosql/proxy/sc/LocalTenantManager.java | 102 +- .../nosql/proxy/sc/ReplicaStatsResponse.java | 2 +- .../nosql/proxy/sc/SCTenantManager.java | 272 +- .../nosql/proxy/sc/TableHistoryResponse.java | 2 +- .../nosql/proxy/sc/TableUsageResponse.java | 2 +- .../oracle/nosql/proxy/sc/TableUtils.java | 113 +- .../oracle/nosql/proxy/sc/TenantManager.java | 81 +- .../proxy/sc/TenantManagerConstants.java | 2 +- .../nosql/proxy/security/AccessChecker.java | 62 +- .../proxy/security/AccessCheckerFactory.java | 27 +- .../nosql/proxy/security/AccessContext.java | 28 +- .../proxy/util/CloudServiceTableCache.java | 2 +- .../oracle/nosql/proxy/util/ErrorManager.java | 10 +- .../proxy/util/PassThroughTableCache.java | 2 +- .../proxy/util/ProxyThreadPoolExecutor.java | 303 + .../nosql/proxy/util/ShutdownManager.java | 2 +- .../oracle/nosql/proxy/util/TableCache.java | 2 +- .../oracle/nosql/util/HostNameResolver.java | 53 + .../java/oracle/nosql/util/filter/Rule.java | 336 +- .../oracle/nosql/util/http/HttpConstants.java | 3 + .../nosql/util/ph/HealthReportAgent.java | 4 +- .../nosql/util/tmi/DdlHistoryEntry.java | 66 + .../oracle/nosql/util/tmi/KmsKeyInfo.java | 99 + .../util/tmi/ListWorkRequestsResult.java | 32 + .../java/oracle/nosql/util/tmi/TableInfo.java | 11 + .../oracle/nosql/util/tmi/WorkRequest.java | 176 + httpproxy/pom.xml | 2 +- httpproxy/tests/src/assembly/test.xml | 21 + .../oracle/nosql/proxy/AsyncLatencyTest.java | 120 + .../oracle/nosql/proxy/BadProtocolTest.java | 1879 ++++ .../oracle/nosql/proxy/ChildTableTest.java | 1588 ++++ .../oracle/nosql/proxy/ConcurrentDDLTest.java | 1329 +++ .../oracle/nosql/proxy/CreationTimeTest.java | 1520 ++++ .../java/oracle/nosql/proxy/DDosTest.java | 1053 +++ .../proxy/DistributedRateLimitingTest.java | 791 ++ .../oracle/nosql/proxy/ElasticityTest.java | 1403 +++ .../nosql/proxy/JsonCollectionTest.java | 846 ++ .../oracle/nosql/proxy/LatencyTestBase.java | 671 ++ .../java/oracle/nosql/proxy/LimitsTest.java | 1312 +++ .../oracle/nosql/proxy/MonitorStatsTest.java | 590 ++ .../oracle/nosql/proxy/MultiDeleteTest.java | 306 + .../nosql/proxy/MultiRegionTableTest.java | 675 ++ .../java/oracle/nosql/proxy/NumericTest.java | 535 ++ .../oracle/nosql/proxy/ParallelQueryTest.java | 453 + .../oracle/nosql/proxy/ProxyConfigTest.java | 159 + .../nosql/proxy/ProxyHealthSourceTest.java | 131 + .../oracle/nosql/proxy/ProxyI18NTest.java | 436 + .../java/oracle/nosql/proxy/ProxyTest.java | 3228 +++++++ .../oracle/nosql/proxy/ProxyTestBase.java | 2096 +++++ .../oracle/nosql/proxy/QueryResumeTest.java | 266 + .../java/oracle/nosql/proxy/QueryTest.java | 3091 +++++++ .../nosql/proxy/QueryThrottlingTest.java | 152 + .../oracle/nosql/proxy/RowMetadataTest.java | 1621 ++++ .../oracle/nosql/proxy/SerializationTest.java | 2295 +++++ .../nosql/proxy/ServerlessNsonTest.java | 439 + .../oracle/nosql/proxy/ServerlessTest.java | 135 + .../nosql/proxy/ServerlessTestBase.java | 643 ++ .../main/java/oracle/nosql/proxy/SslTest.java | 162 + .../oracle/nosql/proxy/SyncLatencyTest.java | 115 + .../oracle/nosql/proxy/TableCacheTest.java | 350 + .../oracle/nosql/proxy/TenantLogTest.java | 392 + .../java/oracle/nosql/proxy/TimeUtils.java | 120 + .../oracle/nosql/proxy/TimestampTest.java | 215 + .../java/oracle/nosql/proxy/WarmupTest.java | 416 + .../oracle/nosql/proxy/WriteMultipleTest.java | 1007 +++ .../oracle/nosql/proxy/filter/FilterTest.java | 722 ++ .../nosql/proxy/filter/FilterTestBase.java | 227 + .../oracle/nosql/proxy/filter/RuleTest.java | 372 + .../nosql/proxy/kv/KVNonSecureProxyTest.java | 375 + .../oracle/nosql/proxy/kv/KVProxyTest.java | 1959 +++++ .../java/oracle/nosql/proxy/kv/KVSmoke.java | 174 + .../java/oracle/nosql/proxy/kv/MRTest.java | 347 + .../nosql/proxy/rest/ChildTableTest.java | 833 ++ .../nosql/proxy/rest/ConfigurationTest.java | 327 + .../oracle/nosql/proxy/rest/FilterTest.java | 1203 +++ .../nosql/proxy/rest/FreeTableTest.java | 659 ++ .../oracle/nosql/proxy/rest/IndexTest.java | 945 ++ .../nosql/proxy/rest/JsonPayloadTest.java | 890 ++ .../oracle/nosql/proxy/rest/QueryTest.java | 1044 +++ .../nosql/proxy/rest/RestAPITestBase.java | 967 ++ .../oracle/nosql/proxy/rest/RestCurlTest.java | 61 + .../java/oracle/nosql/proxy/rest/RowTest.java | 1199 +++ .../oracle/nosql/proxy/rest/TableTest.java | 2502 ++++++ .../nosql/proxy/rest/ThrottleLimitTest.java | 899 ++ .../nosql/proxy/rest/WorkRequestTest.java | 453 + .../nosql/proxy/security/IAMRetryTest.java | 324 + .../nosql/proxy/security/SecureTestUtil.java | 95 + .../proxy/security/TestSignatureProvider.java | 96 + .../oracle/nosql/proxy/util/CreateStore.java | 908 ++ .../nosql/proxy/util/CreateStoreUtils.java | 332 + .../nosql/proxy/util/ElasticityTestSetup.java | 323 + .../nosql/proxy/util/FreePortLocator.java | 178 + .../oracle/nosql/proxy/util/KVLiteBase.java | 125 + .../oracle/nosql/proxy/util/PortFinder.java | 114 + .../oracle/nosql/proxy/util/TestBase.java | 377 + .../oracle/nosql/query/JsonLoaderCloud.java | 206 + .../oracle/nosql/query/PrimIndexSetup.java | 72 + .../oracle/nosql/query/PrimIndexSetup2.java | 86 + .../oracle/nosql/query/PrimIndexSetup3.java | 33 + .../oracle/nosql/query/ProxyOperation.java | 129 + .../java/oracle/nosql/query/QTCaseCloud.java | 780 ++ .../oracle/nosql/query/QTDefaultImpl.java | 317 + .../oracle/nosql/query/QTFactoryCloud.java | 34 + .../java/oracle/nosql/query/QTSuiteCloud.java | 29 + .../main/java/oracle/nosql/query/QTest.java | 187 + .../oracle/nosql/query/RowPropsSetup.java | 35 + .../oracle/nosql/query/RunQueryTests.java | 66 + .../java/oracle/nosql/query/UserTable.java | 75 + .../nosql/proxy/rest/curl_smoke_test.sh | 97 + .../resources/oracle/nosql/proxy/utf16be.json | Bin 0 -> 58 bytes .../oracle/nosql/proxy/utf16bebom.json | Bin 0 -> 60 bytes .../resources/oracle/nosql/proxy/utf16le.json | Bin 0 -> 58 bytes .../oracle/nosql/proxy/utf16lebom.json | Bin 0 -> 60 bytes .../resources/oracle/nosql/proxy/utf8.json | 1 + .../oracle/nosql/proxy/utf8_jsondata.txt | 170 + .../oracle/nosql/proxy/utf8_testdata.txt | 16 + .../resources/oracle/nosql/proxy/utf8bom.json | 1 + kvclient/pom.xml | 2 +- kvmain/pom.xml | 2 +- .../bind/serial/TupleSerialKeyCreator.java | 1 + .../com/sleepycat/collections/DataView.java | 2 +- .../main/java/com/sleepycat/je/Cursor.java | 558 +- .../main/java/com/sleepycat/je/Database.java | 7 +- .../java/com/sleepycat/je/DbInternal.java | 22 +- .../java/com/sleepycat/je/Environment.java | 52 +- .../com/sleepycat/je/EnvironmentConfig.java | 59 + .../com/sleepycat/je/EnvironmentStats.java | 16 +- .../main/java/com/sleepycat/je/JEVersion.java | 2 +- .../com/sleepycat/je/OperationResult.java | 77 +- .../com/sleepycat/je/SecondaryCursor.java | 64 +- .../com/sleepycat/je/SecondaryDatabase.java | 9 + .../com/sleepycat/je/SecondaryKeyCreator.java | 4 + .../je/SecondaryMultiKeyCreator.java | 1 + .../com/sleepycat/je/TransactionConfig.java | 43 + .../java/com/sleepycat/je/WriteOptions.java | 37 +- .../je/beforeimage/BeforeImageContext.java | 20 +- .../je/beforeimage/BeforeImageIndex.java | 100 +- .../je/beforeimage/BeforeImageLN.java | 5 +- .../je/beforeimage/BeforeImageLNLogEntry.java | 51 +- .../BeforeImageOutputWireRecord.java | 52 +- .../com/sleepycat/je/cleaner/Cleaner.java | 1 + .../com/sleepycat/je/cleaner/DataEraser.java | 1 + .../je/cleaner/ExtinctionScanner.java | 11 +- .../sleepycat/je/cleaner/FileProcessor.java | 7 +- .../java/com/sleepycat/je/cleaner/LNInfo.java | 7 + .../je/config/EnvironmentParams.java | 8 + .../je/config/RemovedProperties.java | 2 + .../java/com/sleepycat/je/dbi/CursorImpl.java | 403 +- .../java/com/sleepycat/je/dbi/DbTree.java | 21 +- .../com/sleepycat/je/dbi/EnvironmentImpl.java | 7 +- .../com/sleepycat/je/dbi/MemoryBudget.java | 0 .../com/sleepycat/je/dbi/WriteParams.java | 48 +- .../com/sleepycat/je/log/LogEntryType.java | 40 +- .../java/com/sleepycat/je/log/LogItem.java | 10 + .../sleepycat/je/log/entry/LNEntryInfo.java | 9 + .../sleepycat/je/log/entry/LNLogEntry.java | 252 +- .../je/log/entry/NameLNLogEntry.java | 6 +- .../je/recovery/RecoveryManager.java | 23 +- .../je/rep/BinaryProtocolException.java | 31 + .../com/sleepycat/je/rep/NetworkRestore.java | 164 +- .../je/rep/ReplicaConnectRetryException.java | 43 +- .../je/rep/ReplicaRetryException.java | 57 - .../je/rep/ReplicatedEnvironment.java | 2 +- .../sleepycat/je/rep/ReplicationConfig.java | 55 +- .../je/rep/arbiter/impl/ArbiterAcker.java | 58 +- .../je/rep/arbiter/impl/ArbiterImpl.java | 3 +- .../com/sleepycat/je/rep/impl/RepImpl.java | 4 - .../com/sleepycat/je/rep/impl/RepParams.java | 21 + .../je/rep/impl/node/DurabilityQuorum.java | 8 +- .../impl/node/ElectionStatesContinuation.java | 11 + .../sleepycat/je/rep/impl/node/Feeder.java | 49 +- .../je/rep/impl/node/FeederManager.java | 43 +- .../sleepycat/je/rep/impl/node/RepNode.java | 87 +- .../sleepycat/je/rep/impl/node/Replica.java | 11 +- .../com/sleepycat/je/rep/net/DataChannel.java | 30 + .../sleepycat/je/rep/stream/FeederFilter.java | 7 + .../je/rep/stream/FeederReplicaHandshake.java | 8 +- .../je/rep/stream/FeederReplicaSyncup.java | 8 +- .../sleepycat/je/rep/stream/FeederTxns.java | 51 +- .../je/rep/stream/InputWireRecord.java | 4 +- .../rep/stream/MatchpointSearchResults.java | 3 +- .../je/rep/stream/ReplicaFeederHandshake.java | 34 +- .../je/rep/stream/ReplicaFeederSyncup.java | 78 +- .../je/rep/subscription/ServerAuthMethod.java | 30 +- .../rep/subscription/StreamAuthenticator.java | 15 + .../subscription/SubscriptionCallback.java | 6 +- .../rep/subscription/SubscriptionConfig.java | 4 +- .../SubscriptionProcessMessageThread.java | 18 +- .../rep/subscription/SubscriptionThread.java | 26 +- .../com/sleepycat/je/rep/txn/MasterTxn.java | 10 +- .../com/sleepycat/je/rep/txn/NullTxn.java | 15 +- .../je/rep/utilint/BinaryProtocol.java | 12 +- .../je/rep/utilint/ServiceDispatcher.java | 2 +- .../je/rep/utilint/ServiceHandshake.java | 56 +- .../rep/utilint/net/AbstractDataChannel.java | 27 +- .../je/rep/utilint/net/SSLDataChannel.java | 4 + .../je/rep/utilint/net/SimpleDataChannel.java | 15 + .../main/java/com/sleepycat/je/tree/BIN.java | 125 +- .../main/java/com/sleepycat/je/tree/IN.java | 48 +- .../main/java/com/sleepycat/je/tree/LN.java | 34 +- .../java/com/sleepycat/je/tree/NameLN.java | 7 +- .../com/sleepycat/je/txn/BasicLocker.java | 5 + .../java/com/sleepycat/je/txn/LockResult.java | 6 +- .../java/com/sleepycat/je/txn/LockType.java | 30 +- .../java/com/sleepycat/je/txn/Locker.java | 5 + .../com/sleepycat/je/txn/PreparedTxn.java | 58 - .../main/java/com/sleepycat/je/txn/Txn.java | 80 +- .../java/com/sleepycat/je/txn/TxnChain.java | 5 + .../com/sleepycat/je/txn/WriteLockInfo.java | 33 +- .../com/sleepycat/je/util/TimeSupplier.java | 30 + .../je/util/verify/BtreeVerifier.java | 8 +- .../com/sleepycat/je/utilint/TestHook.java | 4 + .../sleepycat/je/utilint/TestHookAdapter.java | 5 + .../sleepycat/je/utilint/TestHookExecute.java | 9 + .../main/java/oracle/kv/KVStoreFactory.java | 2 +- kvmain/src/main/java/oracle/kv/KVVersion.java | 10 +- .../main/java/oracle/kv/KeyValueVersion.java | 26 +- .../main/java/oracle/kv/OperationResult.java | 20 + kvmain/src/main/java/oracle/kv/Value.java | 438 +- .../oracle/kv/impl/admin/CommandService.java | 18 + .../kv/impl/admin/CommandServiceAPI.java | 16 + .../kv/impl/admin/CommandServiceImpl.java | 20 + .../oracle/kv/impl/admin/SysTableMonitor.java | 1 + .../kv/impl/admin/TableDdlOperation.java | 4 +- .../kv/impl/admin/client/CommandShell.java | 8 +- .../kv/impl/admin/client/PlanCommand.java | 3 +- .../oracle/kv/impl/admin/plan/Planner.java | 4 + .../impl/admin/plan/TablePlanGenerator.java | 2 + .../kv/impl/admin/plan/task/AddTable.java | 17 + .../kv/impl/admin/plan/task/EvolveTable.java | 8 +- .../admin/plan/task/RepairShardQuorum.java | 12 + .../impl/api/AsyncRequestDispatcherImpl.java | 2 +- .../java/oracle/kv/impl/api/KVStoreImpl.java | 221 +- .../kv/impl/api/KeyValueVersionInternal.java | 3 +- .../kv/impl/api/RequestHandlerImpl.java | 17 +- .../oracle/kv/impl/api/bulk/BulkMultiGet.java | 1 + .../java/oracle/kv/impl/api/bulk/BulkPut.java | 57 +- .../kv/impl/api/ops/BasicDeleteHandler.java | 1 + .../kv/impl/api/ops/BasicPutHandler.java | 38 +- .../java/oracle/kv/impl/api/ops/Delete.java | 60 +- .../oracle/kv/impl/api/ops/DeleteHandler.java | 32 +- .../kv/impl/api/ops/DeleteIfVersion.java | 7 +- .../impl/api/ops/DeleteIfVersionHandler.java | 9 +- .../java/oracle/kv/impl/api/ops/Execute.java | 17 +- .../kv/impl/api/ops/IndexIterateHandler.java | 1 + .../oracle/kv/impl/api/ops/IndexScanner.java | 12 + .../api/ops/InternalOperationHandler.java | 123 +- .../kv/impl/api/ops/MultiDeleteTable.java | 33 +- .../impl/api/ops/MultiDeleteTableHandler.java | 54 +- .../api/ops/MultiKeyOperationHandler.java | 20 +- .../api/ops/MultiTableOperationHandler.java | 22 +- .../kv/impl/api/ops/OperationHandler.java | 47 +- .../kv/impl/api/ops/PutBatchHandler.java | 121 +- .../oracle/kv/impl/api/ops/PutHandler.java | 33 +- .../kv/impl/api/ops/PutIfAbsentHandler.java | 12 +- .../kv/impl/api/ops/PutIfPresentHandler.java | 10 +- .../kv/impl/api/ops/PutIfVersionHandler.java | 10 +- .../oracle/kv/impl/api/ops/PutResolve.java | 49 +- .../kv/impl/api/ops/PutResolveHandler.java | 34 +- .../oracle/kv/impl/api/ops/RequestValue.java | 10 +- .../java/oracle/kv/impl/api/ops/Result.java | 75 +- .../kv/impl/api/ops/ResultIndexKeys.java | 2 +- .../kv/impl/api/ops/ResultIndexRows.java | 3 +- .../impl/api/ops/ResultKeyValueVersion.java | 22 +- .../kv/impl/api/ops/ResultValueVersion.java | 7 + .../api/ops/ReturnResultValueVersion.java | 2 + .../java/oracle/kv/impl/api/ops/Scanner.java | 15 +- .../api/ops/SingleKeyOperationHandler.java | 54 +- .../oracle/kv/impl/api/ops/TableQuery.java | 21 +- .../kv/impl/api/ops/TableQueryHandler.java | 7 +- .../impl/api/parallelscan/ParallelScan.java | 1 + .../oracle/kv/impl/api/table/AddTable.java | 18 +- .../kv/impl/api/table/DDLGenerator.java | 90 +- .../oracle/kv/impl/api/table/EvolveTable.java | 15 +- .../kv/impl/api/table/FieldValueImpl.java | 17 +- .../impl/api/table/FieldValueReaderImpl.java | 7 + .../oracle/kv/impl/api/table/IndexImpl.java | 80 +- .../oracle/kv/impl/api/table/IndexScan.java | 3 +- .../table/JsonCollectionReturnRowImpl.java | 5 +- .../impl/api/table/JsonCollectionRowImpl.java | 9 +- .../oracle/kv/impl/api/table/NsonRow.java | 11 +- .../oracle/kv/impl/api/table/NsonUtil.java | 21 +- .../oracle/kv/impl/api/table/NumberUtils.java | 6 +- .../kv/impl/api/table/ReturnRowImpl.java | 6 +- .../oracle/kv/impl/api/table/RowImpl.java | 39 + .../kv/impl/api/table/RowReaderImpl.java | 10 + .../kv/impl/api/table/TableAPIImpl.java | 116 +- .../kv/impl/api/table/TableBuilder.java | 2 + .../kv/impl/api/table/TableBuilderBase.java | 9 + .../kv/impl/api/table/TableEventHandler.java | 84 +- .../kv/impl/api/table/TableEvolver.java | 89 +- .../oracle/kv/impl/api/table/TableImpl.java | 308 +- .../kv/impl/api/table/TableJsonUtils.java | 422 +- .../kv/impl/api/table/TableMetadata.java | 25 +- .../kv/impl/api/table/TableMultiGetBatch.java | 1 + .../oracle/kv/impl/api/table/TablePath.java | 27 +- .../oracle/kv/impl/api/table/TableScan.java | 2 + .../kv/impl/api/table/TabularFormatter.java | 20 +- .../oracle/kv/impl/api/table/TupleValue.java | 21 + .../oracle/kv/impl/api/table/ValueReader.java | 14 + .../kv/impl/api/table/ValueSerializer.java | 11 + .../impl/api/table/serialize/AvroEncoder.java | 2 +- .../kv/impl/async/AbstractEndpointGroup.java | 100 +- .../oracle/kv/impl/async/EndpointGroup.java | 12 + .../kv/impl/async/InetNetworkAddress.java | 9 +- .../dialog/nio/NioChannelThreadPool.java | 14 +- .../nio/NioChannelThreadPoolPerfTracker.java | 4 +- .../async/dialog/nio/NioCreatorEndpoint.java | 1 + .../async/dialog/nio/NioEndpointHandler.java | 10 +- .../dialog/nio/NioResponderEndpoint.java | 4 +- .../nio/PreReadWrappedEndpointHandler.java | 6 +- .../nio/PreWriteWrappedEndpointHandler.java | 6 +- .../client/admin/DdlStatementExecutor.java | 40 +- .../oracle/kv/impl/param/LoadParameters.java | 3 +- .../oracle/kv/impl/param/ParameterUtils.java | 1 - .../impl/pubsub/CheckpointTableManager.java | 4 +- .../java/oracle/kv/impl/pubsub/DataEntry.java | 194 +- .../impl/pubsub/NoSQLStreamFeederFilter.java | 89 +- .../kv/impl/pubsub/NoSQLSubscriptionImpl.java | 34 +- .../kv/impl/pubsub/OpenTransactionBuffer.java | 381 +- .../oracle/kv/impl/pubsub/PublishingUnit.java | 217 +- .../kv/impl/pubsub/ReplicationStreamCbk.java | 80 +- .../pubsub/ReplicationStreamConsumer.java | 10 +- .../pubsub/ReplicationStreamConsumerStat.java | 4 +- .../oracle/kv/impl/pubsub/StreamDelEvent.java | 102 +- .../oracle/kv/impl/pubsub/StreamPutEvent.java | 102 +- .../oracle/kv/impl/pubsub/StreamTxnEvent.java | 201 + .../kv/impl/pubsub/SubscriptionStatImpl.java | 2 +- .../security/StreamServerAuthHandler.java | 64 +- .../kv/impl/query/compiler/CodeGenerator.java | 66 +- .../kv/impl/query/compiler/ExprSFW.java | 11 +- .../impl/query/compiler/FuncCreationTime.java | 92 + .../compiler/FuncCreationTimeMillis.java | 87 + .../query/compiler/FuncExpirationTime.java | 2 +- .../compiler/FuncExpirationTimeMillis.java | 2 +- .../impl/query/compiler/FuncRowMetadata.java | 89 + .../kv/impl/query/compiler/FunctionLib.java | 13 +- .../kv/impl/query/compiler/IndexAnalyzer.java | 226 +- .../kv/impl/query/compiler/IndexExpr.java | 39 +- .../kv/impl/query/compiler/Translator.java | 249 +- .../kv/impl/query/compiler/parser/KVQL.g4 | 70 +- .../kv/impl/query/compiler/parser/KVQL.interp | 21 +- .../kv/impl/query/compiler/parser/KVQL.tokens | 476 +- .../compiler/parser/KVQLBaseListener.java | 78 +- .../query/compiler/parser/KVQLLexer.interp | 17 +- .../impl/query/compiler/parser/KVQLLexer.java | 2377 ++--- .../query/compiler/parser/KVQLLexer.tokens | 476 +- .../query/compiler/parser/KVQLListener.java | 82 +- .../query/compiler/parser/KVQLParser.java | 7831 +++++++++-------- .../kv/impl/query/runtime/FuncCountIter.java | 19 - .../query/runtime/FuncCreationTimeIter.java | 153 + .../runtime/FuncCreationTimeMillisIter.java | 144 + .../query/runtime/FuncRowMetadataIter.java | 159 + .../kv/impl/query/runtime/InsertRowIter.java | 4 + .../query/runtime/NestedLoopJoinIter.java | 9 +- .../query/runtime/PartitionUnionIter.java | 23 +- .../kv/impl/query/runtime/PlanIter.java | 16 +- .../kv/impl/query/runtime/ReceiveIter.java | 30 +- .../query/runtime/RuntimeControlBlock.java | 6 + .../runtime/server/ServerDeleteRowIter.java | 2 +- .../query/runtime/server/ServerTableIter.java | 6 + .../runtime/server/ServerUpdateRowIter.java | 1 + .../runtime/server/TableScannerFactory.java | 26 + .../main/java/oracle/kv/impl/rep/RepNode.java | 70 +- .../oracle/kv/impl/rep/RepNodeSecurity.java | 16 +- .../kv/impl/rep/admin/RepNodeAdminImpl.java | 10 + .../impl/rep/migration/MigrationManager.java | 49 +- .../impl/rep/migration/MigrationSource.java | 50 +- .../rep/migration/MigrationStreamHandle.java | 17 +- .../impl/rep/migration/MigrationTarget.java | 71 +- .../rep/migration/TargetMonitorExecutor.java | 18 +- .../impl/rep/migration/TransferProtocol.java | 14 +- .../stats/IntermediateTableSizeUpdate.java | 129 +- .../kv/impl/rep/stats/KeyStatsCollector.java | 52 +- .../partreader/PartitionReader.java | 6 + .../kv/impl/rep/table/IndexKeyCreator.java | 4 + .../kv/impl/rep/table/MaintenanceThread.java | 1 + .../kv/impl/rep/table/ResourceCollector.java | 70 +- .../kv/impl/rep/table/TableManager.java | 4 +- .../kv/impl/security/AccessCheckUtils.java | 10 +- .../impl/security/KVStorePrivilegeLabel.java | 10 +- .../kv/impl/security/NamespacePrivilege.java | 2 +- .../kv/impl/security/TablePrivilege.java | 39 +- .../kv/impl/sna/ProcessServiceManager.java | 18 +- .../kv/impl/tif/FeederSubscriptionCbk.java | 14 +- .../oracle/kv/impl/util/SerialVersion.java | 23 +- .../impl/xregion/agent/RegionAgentConfig.java | 9 +- .../impl/xregion/agent/RegionAgentThread.java | 9 + .../impl/xregion/agent/mrt/MRTSubscriber.java | 25 +- .../kv/impl/xregion/service/JsonConfig.java | 28 +- .../impl/xregion/service/MRTableMetrics.java | 27 +- .../impl/xregion/service/ReqRespManager.java | 8 + .../kv/impl/xregion/service/ServiceMDMan.java | 36 +- .../kv/impl/xregion/service/StatsManager.java | 118 +- .../impl/xregion/service/XRegionService.java | 3 +- .../service/XRegionServiceMetrics.java | 29 +- .../java/oracle/kv/pubsub/NoSQLPublisher.java | 9 +- .../oracle/kv/pubsub/NoSQLSubscription.java | 14 + .../kv/pubsub/NoSQLSubscriptionConfig.java | 180 +- .../oracle/kv/pubsub/StreamOperation.java | 183 +- .../java/oracle/kv/pubsub/StreamPosition.java | 2 +- .../java/oracle/kv/query/ExecuteOptions.java | 85 + .../java/oracle/kv/shell/ExecuteCommand.java | 3 +- .../main/java/oracle/kv/shell/GetCommand.java | 4 +- .../oracle/kv/stats/ServiceAgentMetrics.java | 16 + .../oracle/kv/table/FieldValueFactory.java | 2 +- kvmain/src/main/java/oracle/kv/table/Row.java | 64 +- .../main/java/oracle/kv/table/TableAPI.java | 48 +- .../java/oracle/kv/table/WriteOptions.java | 14 +- .../java/oracle/kv/txn/TransactionId.java | 37 + .../java/oracle/kv/txn/TransactionIdImpl.java | 86 + .../main/java/oracle/kv/util/shell/Shell.java | 9 +- kvstore/pom.xml | 2 +- kvtest/kvclient-IT/pom.xml | 2 +- .../src/main/java/oracle/kv/ValueTest.java | 168 +- .../oracle/kv/impl/api/ops/OpsSerialTest.java | 51 +- .../kv/impl/api/ops/ResultSerialTest.java | 68 +- kvtest/kvdatacheck-IT/pom.xml | 2 +- kvtest/kvquery-IT/pom.xml | 2 +- .../main/java/qt/framework/JsonLoaderKV.java | 59 +- .../main/java/qt/framework/QTDefaultImpl.java | 2 +- .../src/main/resources/cases/gb/q/all | 148 +- .../resources/cases/idc_inner_join/after.ddl | 21 + .../cases/idc_inner_join/before.data | 421 + .../resources/cases/idc_inner_join/before.ddl | 81 + .../cases/idc_inner_join/explans/err01.r | 1 + .../cases/idc_inner_join/explans/err02.r | 1 + .../cases/idc_inner_join/explans/err03.r | 1 + .../cases/idc_inner_join/explans/err04.r | 1 + .../cases/idc_inner_join/explans/err05.r | 1 + .../cases/idc_inner_join/explans/loj01.r | 133 + .../cases/idc_inner_join/explans/loj02.r | 133 + .../cases/idc_inner_join/explans/loj03.r | 156 + .../cases/idc_inner_join/explans/loj04.r | 138 + .../cases/idc_inner_join/explans/loj05.r | 186 + .../cases/idc_inner_join/explans/loj06.r | 195 + .../cases/idc_inner_join/explans/loj07.r | 206 + .../cases/idc_inner_join/explans/loj08.r | 204 + .../cases/idc_inner_join/explans/loj09.r | 175 + .../cases/idc_inner_join/explans/loj10.r | 175 + .../cases/idc_inner_join/explans/loj11.r | 269 + .../cases/idc_inner_join/explans/nt01.r | 176 + .../cases/idc_inner_join/explans/nt02.r | 211 + .../cases/idc_inner_join/explans/nt03.r | 210 + .../cases/idc_inner_join/explans/nt04.r | 177 + .../cases/idc_inner_join/explans/nt05.r | 256 + .../cases/idc_inner_join/explans/nt06.r | 215 + .../cases/idc_inner_join/explans/q1.r | 130 + .../cases/idc_inner_join/explans/q10.r | 265 + .../cases/idc_inner_join/explans/q11.r | 187 + .../cases/idc_inner_join/explans/q12.r | 273 + .../cases/idc_inner_join/explans/q13.r | 423 + .../cases/idc_inner_join/explans/q14.r | 147 + .../cases/idc_inner_join/explans/q15.r | 134 + .../cases/idc_inner_join/explans/q16.r | 195 + .../cases/idc_inner_join/explans/q17.r | 134 + .../cases/idc_inner_join/explans/q18.r | 107 + .../cases/idc_inner_join/explans/q19.r | 172 + .../cases/idc_inner_join/explans/q2.r | 130 + .../cases/idc_inner_join/explans/q20.r | 203 + .../cases/idc_inner_join/explans/q21.r | 228 + .../cases/idc_inner_join/explans/q22.r | 280 + .../cases/idc_inner_join/explans/q3.r | 136 + .../cases/idc_inner_join/explans/q4.r | 136 + .../cases/idc_inner_join/explans/q5.r | 136 + .../cases/idc_inner_join/explans/q6.r | 136 + .../cases/idc_inner_join/explans/q7.r | 258 + .../cases/idc_inner_join/explans/q8.r | 245 + .../cases/idc_inner_join/explans/q9.r | 281 + .../cases/idc_inner_join/expres/err01.r | 1 + .../cases/idc_inner_join/expres/err02.r | 1 + .../cases/idc_inner_join/expres/err03.r | 1 + .../cases/idc_inner_join/expres/err04.r | 1 + .../cases/idc_inner_join/expres/err05.r | 1 + .../cases/idc_inner_join/expres/loj01.r | 6 + .../cases/idc_inner_join/expres/loj02.r | 6 + .../cases/idc_inner_join/expres/loj03.r | 6 + .../cases/idc_inner_join/expres/loj04.r | 6 + .../cases/idc_inner_join/expres/loj05.r | 6 + .../cases/idc_inner_join/expres/loj06.r | 6 + .../cases/idc_inner_join/expres/loj07.r | 10 + .../cases/idc_inner_join/expres/loj08.r | 5 + .../cases/idc_inner_join/expres/loj09.r | 6 + .../cases/idc_inner_join/expres/loj10.r | 6 + .../cases/idc_inner_join/expres/loj11.r | 6 + .../cases/idc_inner_join/expres/nt01.r | 6 + .../cases/idc_inner_join/expres/nt02.r | 6 + .../cases/idc_inner_join/expres/nt03.r | 1 + .../cases/idc_inner_join/expres/nt04.r | 6 + .../cases/idc_inner_join/expres/nt05.r | 4 + .../cases/idc_inner_join/expres/nt06.r | 6 + .../cases/idc_inner_join/expres/q1.r | 6 + .../cases/idc_inner_join/expres/q10.r | 9 + .../cases/idc_inner_join/expres/q11.r | 12 + .../cases/idc_inner_join/expres/q12.r | 10 + .../cases/idc_inner_join/expres/q13.r | 6 + .../cases/idc_inner_join/expres/q14.r | 4 + .../cases/idc_inner_join/expres/q15.r | 6 + .../cases/idc_inner_join/expres/q16.r | 5 + .../cases/idc_inner_join/expres/q18.r | 1 + .../cases/idc_inner_join/expres/q19.r | 3 + .../cases/idc_inner_join/expres/q2.r | 6 + .../cases/idc_inner_join/expres/q20.r | 6 + .../cases/idc_inner_join/expres/q21.r | 9 + .../cases/idc_inner_join/expres/q22.r | 84 + .../cases/idc_inner_join/expres/q3.r | 6 + .../cases/idc_inner_join/expres/q4.r | 6 + .../cases/idc_inner_join/expres/q5.r | 10 + .../cases/idc_inner_join/expres/q6.r | 10 + .../cases/idc_inner_join/expres/q7.r | 6 + .../cases/idc_inner_join/expres/q8.r | 6 + .../cases/idc_inner_join/expres/q9.r | 6 + .../resources/cases/idc_inner_join/q/err01.q | 3 + .../resources/cases/idc_inner_join/q/err02.q | 3 + .../resources/cases/idc_inner_join/q/err03.q | 3 + .../resources/cases/idc_inner_join/q/err04.q | 3 + .../resources/cases/idc_inner_join/q/err05.q | 3 + .../resources/cases/idc_inner_join/q/loj01.q | 4 + .../resources/cases/idc_inner_join/q/loj02.q | 4 + .../resources/cases/idc_inner_join/q/loj03.q | 5 + .../resources/cases/idc_inner_join/q/loj04.q | 4 + .../resources/cases/idc_inner_join/q/loj05.q | 5 + .../resources/cases/idc_inner_join/q/loj06.q | 5 + .../resources/cases/idc_inner_join/q/loj07.q | 5 + .../resources/cases/idc_inner_join/q/loj08.q | 7 + .../resources/cases/idc_inner_join/q/loj09.q | 6 + .../resources/cases/idc_inner_join/q/loj10.q | 5 + .../resources/cases/idc_inner_join/q/loj11.q | 7 + .../resources/cases/idc_inner_join/q/nt01.q | 4 + .../resources/cases/idc_inner_join/q/nt02.q | 5 + .../resources/cases/idc_inner_join/q/nt03.q | 6 + .../resources/cases/idc_inner_join/q/nt04.q | 4 + .../resources/cases/idc_inner_join/q/nt05.q | 5 + .../resources/cases/idc_inner_join/q/nt06.q | 6 + .../resources/cases/idc_inner_join/q/q1.q | 3 + .../resources/cases/idc_inner_join/q/q10.q | 7 + .../resources/cases/idc_inner_join/q/q11.q | 5 + .../resources/cases/idc_inner_join/q/q12.q | 6 + .../resources/cases/idc_inner_join/q/q13.q | 7 + .../resources/cases/idc_inner_join/q/q14.q | 4 + .../resources/cases/idc_inner_join/q/q15.q | 3 + .../resources/cases/idc_inner_join/q/q16.q | 6 + .../resources/cases/idc_inner_join/q/q18.q | 3 + .../resources/cases/idc_inner_join/q/q19.q | 7 + .../resources/cases/idc_inner_join/q/q2.q | 3 + .../resources/cases/idc_inner_join/q/q20.q | 5 + .../resources/cases/idc_inner_join/q/q21.q | 6 + .../resources/cases/idc_inner_join/q/q22.q | 7 + .../resources/cases/idc_inner_join/q/q3.q | 4 + .../resources/cases/idc_inner_join/q/q4.q | 4 + .../resources/cases/idc_inner_join/q/q5.q | 4 + .../resources/cases/idc_inner_join/q/q6.q | 4 + .../resources/cases/idc_inner_join/q/q7.q | 7 + .../resources/cases/idc_inner_join/q/q8.q | 5 + .../resources/cases/idc_inner_join/q/q9.q | 9 + .../cases/idc_inner_join/test.config | 13 + .../cases/idc_maths/explans/idx_atan2_dv.r | 53 +- .../cases/idc_multirow_update/after.ddl | 11 + .../cases/idc_multirow_update/before.data | 1562 ++++ .../cases/idc_multirow_update/before.ddl | 58 + .../idc_multirow_update/explans/fuzz01.r | 167 + .../idc_multirow_update/explans/fuzz02.r | 150 + .../idc_multirow_update/explans/fuzz03.r | 150 + .../cases/idc_multirow_update/explans/q1.r | 1 + .../cases/idc_multirow_update/explans/q10.r | 70 + .../cases/idc_multirow_update/explans/q11.r | 102 + .../cases/idc_multirow_update/explans/q12.r | 92 + .../cases/idc_multirow_update/explans/q13.r | 92 + .../cases/idc_multirow_update/explans/q14.r | 136 + .../cases/idc_multirow_update/explans/q15.r | 220 + .../cases/idc_multirow_update/explans/q17.r | 294 + .../cases/idc_multirow_update/explans/q18.r | 324 + .../cases/idc_multirow_update/explans/q19.r | 1 + .../cases/idc_multirow_update/explans/q2.r | 124 + .../cases/idc_multirow_update/explans/q20.r | 1 + .../cases/idc_multirow_update/explans/q3.r | 275 + .../cases/idc_multirow_update/explans/q4.r | 143 + .../cases/idc_multirow_update/explans/q5.r | 1 + .../cases/idc_multirow_update/explans/q6.r | 92 + .../cases/idc_multirow_update/explans/q7.r | 107 + .../cases/idc_multirow_update/explans/q8.r | 55 + .../cases/idc_multirow_update/explans/q9.r | 100 + .../cases/idc_multirow_update/expres/fuzz01.r | 3 + .../cases/idc_multirow_update/expres/fuzz02.r | 3 + .../cases/idc_multirow_update/expres/fuzz03.r | 3 + .../cases/idc_multirow_update/expres/q1.r | 1 + .../cases/idc_multirow_update/expres/q10.r | 4 + .../cases/idc_multirow_update/expres/q11.r | 1 + .../cases/idc_multirow_update/expres/q12.r | 1 + .../cases/idc_multirow_update/expres/q13.r | 2 + .../cases/idc_multirow_update/expres/q14.r | 3 + .../cases/idc_multirow_update/expres/q15.r | 3 + .../cases/idc_multirow_update/expres/q17.r | 3 + .../cases/idc_multirow_update/expres/q18.r | 3 + .../cases/idc_multirow_update/expres/q19.r | 1 + .../cases/idc_multirow_update/expres/q2.r | 3 + .../cases/idc_multirow_update/expres/q20.r | 1 + .../cases/idc_multirow_update/expres/q3.r | 4 + .../cases/idc_multirow_update/expres/q4.r | 3 + .../cases/idc_multirow_update/expres/q5.r | 1 + .../cases/idc_multirow_update/expres/q6.r | 4 + .../cases/idc_multirow_update/expres/q7.r | 2 + .../cases/idc_multirow_update/expres/q8.r | 4 + .../cases/idc_multirow_update/expres/q9.r | 1 + .../cases/idc_multirow_update/q/fuzz01.q | 10 + .../cases/idc_multirow_update/q/fuzz02.q | 10 + .../cases/idc_multirow_update/q/fuzz03.q | 10 + .../cases/idc_multirow_update/q/q1.q | 9 + .../cases/idc_multirow_update/q/q10.q | 7 + .../cases/idc_multirow_update/q/q11.q | 9 + .../cases/idc_multirow_update/q/q12.q | 9 + .../cases/idc_multirow_update/q/q13.q | 3 + .../cases/idc_multirow_update/q/q14.q | 10 + .../cases/idc_multirow_update/q/q15.q | 10 + .../cases/idc_multirow_update/q/q17.q | 12 + .../cases/idc_multirow_update/q/q18.q | 12 + .../cases/idc_multirow_update/q/q19.q | 7 + .../cases/idc_multirow_update/q/q2.q | 8 + .../cases/idc_multirow_update/q/q20.q | 7 + .../cases/idc_multirow_update/q/q3.q | 11 + .../cases/idc_multirow_update/q/q4.q | 9 + .../cases/idc_multirow_update/q/q5.q | 10 + .../cases/idc_multirow_update/q/q6.q | 7 + .../cases/idc_multirow_update/q/q7.q | 4 + .../cases/idc_multirow_update/q/q8.q | 7 + .../cases/idc_multirow_update/q/q9.q | 8 + .../cases/idc_multirow_update/test.config | 22 + .../cases/inner_joins/explans/lq01.r | 16 +- .../cases/inner_joins/explans/lq02.r | 16 +- .../cases/inner_joins/explans/lq03.r | 16 +- .../cases/inner_joins/explans/lq04.r | 16 +- .../cases/inner_joins/explans/lq05.r | 16 +- .../cases/inner_joins/explans/oq01.r | 16 +- .../cases/inner_joins/explans/oq02.r | 16 +- .../cases/inner_joins/explans/oq03.r | 16 +- .../cases/inner_joins/explans/oq04.r | 11 +- .../cases/inner_joins/explans/oq05.r | 24 +- .../cases/inner_joins/explans/oq06.r | 448 +- .../resources/cases/inner_joins/explans/q25.r | 77 +- .../resources/cases/inner_joins/explans/q26.r | 80 +- .../resources/cases/insert/explans/ins03r.r | 4 +- .../main/resources/cases/insert/q/ins03r.q | 2 +- .../src/main/resources/cases/joins/before.ddl | 2 + .../resources/cases/joins/explans/inner01.r | 131 + .../resources/cases/joins/explans/inner02.r | 131 + .../resources/cases/joins/explans/inner03.r | 135 + .../resources/cases/joins/explans/inner04.r | 131 + .../resources/cases/joins/explans/lina17.r | 108 + .../resources/cases/joins/explans/lind24.r | 2 +- .../resources/cases/joins/explans/lind25.r | 99 + .../resources/cases/joins/explans/lind26.r | 99 + .../resources/cases/joins/explans/lind27.r | 121 + .../resources/cases/joins/explans/lind28.r | 186 + .../resources/cases/joins/explans/lind29.r | 213 + .../resources/cases/joins/explans/lind30.r | 213 + .../resources/cases/joins/explans/lind31.r | 73 + .../resources/cases/joins/explans/lind32.r | 94 + .../resources/cases/joins/explans/lind33.r | 123 + .../resources/cases/joins/explans/lind34.r | 117 + .../resources/cases/joins/explans/lind35.r | 121 + .../resources/cases/joins/explans/lind36.r | 121 + .../resources/cases/joins/explans/treead01.r | 318 +- .../resources/cases/joins/explans/treed03.r | 2 +- .../resources/cases/joins/explans/treed11.r | 88 + .../resources/cases/joins/explans/treed12.r | 92 + .../resources/cases/joins/explans/treed13.r | 88 + .../resources/cases/joins/expres/inner01.r | 12 + .../resources/cases/joins/expres/inner02.r | 12 + .../resources/cases/joins/expres/inner03.r | 12 + .../resources/cases/joins/expres/inner04.r | 12 + .../resources/cases/joins/expres/lina17.r | 1 + .../resources/cases/joins/expres/lind25.r | 11 + .../resources/cases/joins/expres/lind26.r | 11 + .../resources/cases/joins/expres/lind27.r | 29 + .../resources/cases/joins/expres/lind28.r | 29 + .../resources/cases/joins/expres/lind29.r | 23 + .../resources/cases/joins/expres/lind30.r | 29 + .../resources/cases/joins/expres/lind31.r | 7 + .../resources/cases/joins/expres/lind32.r | 22 + .../resources/cases/joins/expres/lind33.r | 4 + .../resources/cases/joins/expres/lind34.r | 4 + .../resources/cases/joins/expres/lind35.r | 21 + .../resources/cases/joins/expres/lind36.r | 21 + .../resources/cases/joins/expres/treed11.r | 39 + .../resources/cases/joins/expres/treed12.r | 39 + .../resources/cases/joins/expres/treed13.r | 39 + .../src/main/resources/cases/joins/q/all | 312 +- .../main/resources/cases/joins/q/inner01.q | 4 + .../main/resources/cases/joins/q/inner02.q | 4 + .../main/resources/cases/joins/q/inner03.q | 4 + .../main/resources/cases/joins/q/inner04.q | 4 + .../src/main/resources/cases/joins/q/lina17.q | 5 + .../src/main/resources/cases/joins/q/lind25.q | 5 + .../src/main/resources/cases/joins/q/lind26.q | 5 + .../src/main/resources/cases/joins/q/lind27.q | 3 + .../src/main/resources/cases/joins/q/lind28.q | 7 + .../src/main/resources/cases/joins/q/lind29.q | 7 + .../src/main/resources/cases/joins/q/lind30.q | 7 + .../src/main/resources/cases/joins/q/lind31.q | 4 + .../src/main/resources/cases/joins/q/lind32.q | 3 + .../src/main/resources/cases/joins/q/lind33.q | 5 + .../src/main/resources/cases/joins/q/lind34.q | 5 + .../src/main/resources/cases/joins/q/lind35.q | 6 + .../src/main/resources/cases/joins/q/lind36.q | 4 + .../main/resources/cases/joins/q/treed11.q | 4 + .../main/resources/cases/joins/q/treed12.q | 4 + .../main/resources/cases/joins/q/treed13.q | 4 + .../src/main/resources/cases/json_idx/q/all | 10 + .../cases/maths/explans/idx_power01.r | 47 +- .../cases/maths/explans/idx_power02.r | 71 +- .../resources/cases/row_metadata/after.ddl | 3 + .../resources/cases/row_metadata/before.data | 586 ++ .../resources/cases/row_metadata/before.ddl | 99 + .../cases/row_metadata/explans/aq02.r | 45 + .../cases/row_metadata/explans/jc_aq02.r | 46 + .../cases/row_metadata/explans/nex06.r | 45 + .../cases/row_metadata/explans/partial_mq01.r | 83 + .../cases/row_metadata/explans/q01.r | 68 + .../cases/row_metadata/explans/sort01.r | 137 + .../cases/row_metadata/explans/sort16.r | 65 + .../cases/row_metadata/explans/unnest01.r | 133 + .../cases/row_metadata/explans/unnest02.r | 92 + .../cases/row_metadata/expres/aq02.r | 3 + .../cases/row_metadata/expres/jc_aq02.r | 3 + .../cases/row_metadata/expres/nex06.r | 2 + .../cases/row_metadata/expres/partial_mq01.r | 1 + .../resources/cases/row_metadata/expres/q01.r | 4 + .../cases/row_metadata/expres/sort01.r | 8 + .../cases/row_metadata/expres/sort16.r | 8 + .../cases/row_metadata/expres/unnest01.r | 25 + .../cases/row_metadata/expres/unnest02.r | 25 + .../resources/cases/row_metadata/q/aq02.q | 4 + .../resources/cases/row_metadata/q/jc_aq02.q | 4 + .../resources/cases/row_metadata/q/nex06.q | 4 + .../cases/row_metadata/q/partial_mq01.q | 3 + .../main/resources/cases/row_metadata/q/q01.q | 8 + .../resources/cases/row_metadata/q/sort01.q | 6 + .../resources/cases/row_metadata/q/sort16.q | 5 + .../resources/cases/row_metadata/q/unnest01.q | 3 + .../resources/cases/row_metadata/q/unnest02.q | 7 + .../resources/cases/row_metadata/test.config | 42 + .../main/resources/cases/rowprops/before.ddl | 4 + .../cases/rowprops/explans/jc_ct01.r | 90 + .../cases/rowprops/explans/jc_xins02.r | 39 +- .../cases/rowprops/explans/jc_xupd01.r | 44 +- .../cases/rowprops/explans/jc_xxdel01.r | 37 +- .../cases/rowprops/explans/jc_xxdel02.r | 59 +- .../resources/cases/rowprops/explans/xins02.r | 86 +- .../resources/cases/rowprops/explans/xupd01.r | 89 +- .../cases/rowprops/explans/xxdel01.r | 120 +- .../cases/rowprops/explans/xxdel02.r | 93 +- .../resources/cases/rowprops/expres/jc_ct01.r | 4 + .../cases/rowprops/expres/jc_mod02.r | 8 +- .../cases/rowprops/expres/jc_xins02.r | 2 +- .../cases/rowprops/expres/jc_xupd01.r | 2 +- .../cases/rowprops/expres/jc_xxdel01.r | 2 +- .../cases/rowprops/expres/jc_xxdel02.r | 4 +- .../resources/cases/rowprops/expres/xins02.r | 2 +- .../resources/cases/rowprops/expres/xupd01.r | 2 +- .../resources/cases/rowprops/expres/xxdel01.r | 2 +- .../resources/cases/rowprops/expres/xxdel02.r | 4 +- .../src/main/resources/cases/rowprops/q/all | 529 +- .../main/resources/cases/rowprops/q/jc_ct01.q | 5 + .../resources/cases/rowprops/q/jc_xins02.q | 2 +- .../resources/cases/rowprops/q/jc_xupd01.q | 2 +- .../resources/cases/rowprops/q/jc_xxdel01.q | 2 +- .../resources/cases/rowprops/q/jc_xxdel02.q | 2 +- .../main/resources/cases/rowprops/q/xins02.q | 6 +- .../main/resources/cases/rowprops/q/xupd01.q | 6 +- .../main/resources/cases/rowprops/q/xxdel01.q | 6 +- .../main/resources/cases/rowprops/q/xxdel02.q | 6 +- kvtest/kvstore-IT/pom.xml | 2 +- .../oracle/kv/impl/admin/DdlSyntaxTest.java | 154 + .../kv/impl/admin/SecuredAdminClientTest.java | 1 + .../impl/admin/client/CommandShellTest.java | 28 + .../kv/impl/api/AsyncRequestHandlerTest.java | 2 +- .../kv/impl/api/RequestHandlerTest.java | 34 +- .../kv/impl/api/ops/ClientTestServices.java | 2 +- .../api/security/OpAccessCheckTestUtils.java | 4 +- .../impl/api/security/SecureTableOpsTest.java | 673 +- .../kv/impl/api/table/CreationTimeTest.java | 1118 +++ .../oracle/kv/impl/api/table/DmlTest.java | 195 +- .../api/table/IndexSerializationTest.java | 4 +- .../kv/impl/api/table/JsonMetadataTest.java | 50 +- .../kv/impl/api/table/MetadataTest.java | 7 +- .../kv/impl/api/table/NumberUtilsTest.java | 91 + .../impl/api/table/TableChangeSerialTest.java | 13 +- .../api/table/TableFieldValuesSerialTest.java | 12 +- .../kv/impl/api/table/TableOpsSerialTest.java | 31 +- .../kv/impl/api/table/TableTestBase.java | 2 +- .../kv/impl/api/table/ValueReaderTest.java | 13 + .../dialog/nio/NioEndpointHandlerTest.java | 6 +- .../kv/impl/query/ElasticityDelayTest.java | 4 +- .../kv/impl/query/FailureInjectionTest.java | 1 + .../impl/query/shell/ShowDdlCommandTest.java | 2 +- .../java/oracle/kv/impl/rep/MetadataTest.java | 4 + .../kv/impl/rep/PartitionMigrationTest.java | 659 +- .../impl/rep/PartitionMigrationTestBase.java | 29 +- .../kv/impl/rep/RepNodeServiceTest.java | 5 +- .../java/oracle/kv/impl/rep/TableTest.java | 5 +- .../rep/migration/EodSendFailureTest.java | 309 + .../rep/migration/RowCreationTimeTest.java | 318 + .../PartitionGenerationTestBase.java | 21 +- .../kv/impl/rep/stats/PartitionScanTest.java | 4 +- .../rep/stats/PartitionSizeLimitTest.java | 182 + .../kv/impl/rep/stats/StorageStatTest.java | 8 +- .../kv/impl/rep/stats/TableIndexScanTest.java | 8 +- .../kv/impl/security/PrivilegeTest.java | 27 + .../oracle/kv/impl/util/KVRepTestConfig.java | 12 + .../oracle/kv/impl/util/SerialTestUtils.java | 1 + .../oracle/kv/impl/util/SpeedyTTLTime.java | 75 + .../kv/impl/util/SpeedyTTLTimeTest.java | 77 + .../java/oracle/kv/impl/util/TestUtils.java | 6 +- .../kv/impl/xregion/XRegionTestBase.java | 13 +- .../java/oracle/kv/pubsub/PubSubTestBase.java | 283 +- .../oracle/kv/pubsub/StreamRowMDTestBase.java | 358 + .../java/oracle/kv/table/SecureDDLTest.java | 124 +- .../java/oracle/kv/util/DDLTestUtils.java | 11 + .../java/oracle/kv/util/TableTestUtils.java | 1 + kvtest/kvtif-IT/pom.xml | 2 +- .../oracle/kv/impl/tif/SubscriptionTest.java | 4 +- .../kv/impl/tif/TextIndexFeederTestBase.java | 20 +- kvtest/pom.xml | 2 +- packaging/pom.xml | 2 +- pom.xml | 2 +- recovery/pom.xml | 2 +- sql/pom.xml | 2 +- 885 files changed, 98149 insertions(+), 10123 deletions(-) create mode 100644 httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetDdlWorkRequestResponse.java create mode 100644 httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetKmsKeyInfoResponse.java create mode 100644 httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ProxyThreadPoolExecutor.java create mode 100644 httpproxy/httpproxy/src/main/java/oracle/nosql/util/HostNameResolver.java create mode 100644 httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/KmsKeyInfo.java create mode 100644 httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/ListWorkRequestsResult.java create mode 100644 httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/WorkRequest.java create mode 100644 httpproxy/tests/src/assembly/test.xml create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/AsyncLatencyTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/BadProtocolTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ChildTableTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ConcurrentDDLTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/CreationTimeTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/DDosTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/DistributedRateLimitingTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ElasticityTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/JsonCollectionTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/LatencyTestBase.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/LimitsTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/MonitorStatsTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiDeleteTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiRegionTableTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/NumericTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ParallelQueryTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyConfigTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyHealthSourceTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyI18NTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTestBase.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryResumeTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryThrottlingTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/RowMetadataTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/SerializationTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessNsonTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessTestBase.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/SslTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/SyncLatencyTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/TableCacheTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/TenantLogTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/TimeUtils.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/TimestampTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/WarmupTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/WriteMultipleTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/FilterTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/FilterTestBase.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/RuleTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVNonSecureProxyTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVProxyTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVSmoke.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/MRTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ChildTableTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ConfigurationTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/FilterTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/FreeTableTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/IndexTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/JsonPayloadTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/QueryTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RestAPITestBase.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RestCurlTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RowTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/TableTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ThrottleLimitTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/WorkRequestTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/security/IAMRetryTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/security/SecureTestUtil.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/security/TestSignatureProvider.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/util/CreateStore.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/util/CreateStoreUtils.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/util/ElasticityTestSetup.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/util/FreePortLocator.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/util/KVLiteBase.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/util/PortFinder.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/proxy/util/TestBase.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/JsonLoaderCloud.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup2.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup3.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/ProxyOperation.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/QTCaseCloud.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/QTDefaultImpl.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/QTFactoryCloud.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/QTSuiteCloud.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/QTest.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/RowPropsSetup.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/RunQueryTests.java create mode 100644 httpproxy/tests/src/main/java/oracle/nosql/query/UserTable.java create mode 100755 httpproxy/tests/src/main/resources/oracle/nosql/proxy/rest/curl_smoke_test.sh create mode 100644 httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16be.json create mode 100644 httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16bebom.json create mode 100644 httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16le.json create mode 100644 httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16lebom.json create mode 100644 httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8.json create mode 100644 httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8_jsondata.txt create mode 100644 httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8_testdata.txt create mode 100644 httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8bom.json mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/Cursor.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/Database.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/DbInternal.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/EnvironmentConfig.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/OperationResult.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/SecondaryCursor.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/SecondaryDatabase.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/WriteOptions.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageContext.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageIndex.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLN.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLNLogEntry.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageOutputWireRecord.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/cleaner/Cleaner.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/cleaner/DataEraser.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/cleaner/ExtinctionScanner.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/cleaner/FileProcessor.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/cleaner/LNInfo.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/dbi/CursorImpl.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/dbi/EnvironmentImpl.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/dbi/MemoryBudget.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/dbi/WriteParams.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/log/LogEntryType.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/log/LogItem.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/log/entry/LNEntryInfo.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/log/entry/LNLogEntry.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/log/entry/NameLNLogEntry.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/recovery/RecoveryManager.java create mode 100644 kvmain/src/main/java/com/sleepycat/je/rep/BinaryProtocolException.java delete mode 100644 kvmain/src/main/java/com/sleepycat/je/rep/ReplicaRetryException.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/rep/stream/InputWireRecord.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/tree/BIN.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/tree/IN.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/tree/LN.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/tree/NameLN.java delete mode 100644 kvmain/src/main/java/com/sleepycat/je/txn/PreparedTxn.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/txn/TxnChain.java mode change 100644 => 100755 kvmain/src/main/java/com/sleepycat/je/util/TimeSupplier.java create mode 100644 kvmain/src/main/java/oracle/kv/impl/pubsub/StreamTxnEvent.java create mode 100644 kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncCreationTime.java create mode 100644 kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncCreationTimeMillis.java create mode 100644 kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncRowMetadata.java create mode 100644 kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCreationTimeIter.java create mode 100644 kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCreationTimeMillisIter.java create mode 100644 kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncRowMetadataIter.java create mode 100644 kvmain/src/main/java/oracle/kv/txn/TransactionId.java create mode 100644 kvmain/src/main/java/oracle/kv/txn/TransactionIdImpl.java create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/after.ddl create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/before.data create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/before.ddl create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err04.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err05.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj04.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj05.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj06.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj07.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj08.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj09.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj10.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj11.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt04.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt05.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt06.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q1.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q10.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q11.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q12.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q13.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q14.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q15.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q16.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q17.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q18.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q19.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q2.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q20.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q21.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q22.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q3.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q4.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q5.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q6.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q7.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q8.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q9.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err04.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err05.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj04.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj05.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj06.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj07.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj08.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj09.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj10.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj11.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt04.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt05.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt06.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q1.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q10.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q11.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q12.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q13.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q14.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q15.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q16.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q18.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q19.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q2.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q20.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q21.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q22.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q3.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q4.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q5.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q6.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q7.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q8.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q9.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err01.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err02.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err03.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err04.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err05.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj01.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj02.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj03.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj04.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj05.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj06.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj07.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj08.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj09.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj10.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj11.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt01.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt02.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt03.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt04.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt05.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt06.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q1.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q10.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q11.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q12.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q13.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q14.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q15.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q16.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q18.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q19.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q2.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q20.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q21.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q22.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q3.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q4.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q5.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q6.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q7.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q8.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q9.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/test.config create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/after.ddl create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/before.data create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/before.ddl create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q1.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q10.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q11.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q12.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q13.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q14.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q15.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q17.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q18.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q19.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q2.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q20.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q3.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q4.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q5.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q6.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q7.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q8.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q9.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q1.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q10.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q11.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q12.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q13.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q14.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q15.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q17.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q18.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q19.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q2.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q20.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q3.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q4.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q5.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q6.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q7.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q8.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q9.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz01.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz02.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz03.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q1.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q10.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q11.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q12.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q13.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q14.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q15.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q17.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q18.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q19.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q2.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q20.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q3.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q4.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q5.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q6.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q7.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q8.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q9.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/test.config create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner04.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lina17.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind25.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind26.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind27.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind28.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind29.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind30.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind31.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind32.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind33.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind34.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind35.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind36.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed11.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed12.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed13.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner03.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner04.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lina17.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind25.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind26.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind27.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind28.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind29.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind30.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind31.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind32.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind33.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind34.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind35.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind36.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed11.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed12.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed13.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner01.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner02.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner03.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner04.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lina17.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind25.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind26.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind27.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind28.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind29.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind30.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind31.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind32.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind33.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind34.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind35.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind36.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed11.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed12.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed13.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/after.ddl create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/before.data create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/before.ddl create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/aq02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/jc_aq02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/nex06.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/partial_mq01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/q01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/sort01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/sort16.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/unnest01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/unnest02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/aq02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/jc_aq02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/nex06.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/partial_mq01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/q01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/sort01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/sort16.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/unnest01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/unnest02.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/aq02.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/jc_aq02.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/nex06.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/partial_mq01.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/q01.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/sort01.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/sort16.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/unnest01.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/unnest02.q create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/row_metadata/test.config create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_ct01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_ct01.r create mode 100644 kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_ct01.q create mode 100644 kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/CreationTimeTest.java create mode 100644 kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/EodSendFailureTest.java create mode 100644 kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/RowCreationTimeTest.java create mode 100644 kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SpeedyTTLTime.java create mode 100644 kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SpeedyTTLTimeTest.java create mode 100644 kvtest/kvstore-IT/src/main/java/oracle/kv/pubsub/StreamRowMDTestBase.java diff --git a/httpproxy/cloudsim/pom.xml b/httpproxy/cloudsim/pom.xml index ce909a87..fa608355 100644 --- a/httpproxy/cloudsim/pom.xml +++ b/httpproxy/cloudsim/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql proxy - 25.1.13 + 25.3.21 cloudsim diff --git a/httpproxy/cloudsim/src/main/java/oracle/nosql/cloudsim/CloudSim.java b/httpproxy/cloudsim/src/main/java/oracle/nosql/cloudsim/CloudSim.java index 11237310..1eac0db7 100644 --- a/httpproxy/cloudsim/src/main/java/oracle/nosql/cloudsim/CloudSim.java +++ b/httpproxy/cloudsim/src/main/java/oracle/nosql/cloudsim/CloudSim.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/pom.xml b/httpproxy/httpproxy/pom.xml index 3e1f30c7..686c36b0 100644 --- a/httpproxy/httpproxy/pom.xml +++ b/httpproxy/httpproxy/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql proxy - 25.1.13 + 25.3.21 httpproxy diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/Config.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/Config.java index 79cbb192..0f9eb58d 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/Config.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/Config.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -97,6 +97,17 @@ enum ParamContext {CLOUD, ON_PREM, ALL, HIDDEN} new ParamDef("httpPort", "80", ParamType.INT, ParamContext.ALL); public static ParamDef HTTPS_PORT = new ParamDef("httpsPort", "443", ParamType.INT, ParamContext.ALL); + + /* + * This is used to size the number of Netty threads used to handle incoming + * requests from the network. If REQUEST_THREAD_POOL_SIZE is 0 these threads + * will also be used to handle the requests through sending the request + * to KV. In this case the number should be relatively high in order to + * keep up with traffic. If REQUEST_THREAD_POOL_SIZE is non-zero + * requests are handled using a separate pool. If that is the case this + * number can be smaller. If 0 Netty defaults to 2 * nCPUs (maybe 3x, + * depending on netty release) + */ public static ParamDef NUM_REQUEST_THREADS = new ParamDef("numRequestThreads", "32", ParamType.INT, ParamContext.ALL); @@ -107,6 +118,20 @@ enum ParamContext {CLOUD, ON_PREM, ALL, HIDDEN} public static ParamDef MONITOR_STATS_ENABLED = new ParamDef("monitorStatsEnabled", "false", ParamType.BOOL, ParamContext.ALL); + /* + * Set to non-zero to use a thread pool separate from Netty for handling + * requests. The pool is sized based on this parameter + */ + public static ParamDef REQUEST_THREAD_POOL_SIZE = + new ParamDef("requestThreadPoolSize", "0", ParamType.INT, ParamContext.ALL); + /* + * Set to non-zero to use a thread pool to handle async responses from + * KV client calls rather than using the KV threads themselves. This + * parameter is only used if ASYNC is true + */ + public static ParamDef KV_THREAD_POOL_SIZE = + new ParamDef("kvThreadPoolSize", "0", + ParamType.INT, ParamContext.ALL); /** Wallet file that holds the salt value for the query cache */ public static ParamDef SALT_WALLET = @@ -300,6 +325,7 @@ enum ParamContext {CLOUD, ON_PREM, ALL, HIDDEN} * errorCacheSize: Maximum number of unique IP addresses to track * error rates for. */ + @Deprecated public static ParamDef ERROR_CACHE_SIZE = new ParamDef("errorCacheSize", "10000", ParamType.INT, ParamContext.CLOUD); @@ -407,6 +433,14 @@ enum ParamContext {CLOUD, ON_PREM, ALL, HIDDEN} new ParamDef("childTableEnabled", "true", ParamType.BOOL, ParamContext.CLOUD); + /** + * Cloud only: true if enabled customer managed encryption key(CMEK), it is + * disabled by default. + */ + public static ParamDef CMEK_ENABLED = + new ParamDef("cmekEnabled", "false", + ParamType.BOOL, ParamContext.CLOUD); + /* ------- End params ---------*/ /* @@ -810,7 +844,6 @@ public void setNumRequestThreads(int numRequestThreads) { Integer.toString(numRequestThreads)); } - public int getNumAcceptThreads() { return getInt(NUM_ACCEPT_THREADS); } @@ -820,6 +853,24 @@ public void setNumAcceptThreads(int numThreads) { Integer.toString(numThreads)); } + public int getRequestThreadPoolSize() { + return getInt(REQUEST_THREAD_POOL_SIZE); + } + + public void setRequestThreadPoolSize(int size) { + paramVals.setProperty(REQUEST_THREAD_POOL_SIZE.paramName, + Integer.toString(size)); + } + + public int getKVThreadPoolSize() { + return getInt(KV_THREAD_POOL_SIZE); + } + + public void setKVThreadPoolSize(int size) { + paramVals.setProperty(KV_THREAD_POOL_SIZE.paramName, + Integer.toString(size)); + } + public String getHostname() { return paramVals.getProperty(HOSTNAME.paramName); } @@ -1024,10 +1075,6 @@ public int getErrorCreditMs() { return getInt(ERROR_CREDIT_MS); } - public int getErrorCacheSize() { - return getInt(ERROR_CACHE_SIZE); - } - public int getErrorCacheLifetimeMs() { return getInt(ERROR_CACHE_LIFETIME_MS); } @@ -1080,6 +1127,10 @@ public int getTableCacheCheckIntervalSec() { return getInt(TABLE_CACHE_CHECK_INTERVAL_SEC); } + public boolean isCmekEnabled() { + return getBool(CMEK_ENABLED); + } + /* Helpers to convert a String property value to a type */ private boolean getBool(ParamDef def) { String val = paramVals.getProperty(def.paramName, def.defaultVal); @@ -1168,7 +1219,8 @@ KVStoreConfig makeTemplateKVStoreConfig() { */ int requestLimit = getInt(KV_REQUEST_LIMIT); if (requestLimit < 0 || getAsync() == false) { - requestLimit = getNumRequestThreads(); + requestLimit = Math.max(getNumRequestThreads(), + getRequestThreadPoolSize()); } if (requestLimit > RequestLimitConfig.DEFAULT_MAX_ACTIVE_REQUESTS) { kvConfig.setRequestLimit( diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/DataService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/DataService.java index 2eaf7c00..bc471c53 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/DataService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/DataService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -16,9 +16,13 @@ import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN; import static io.netty.handler.codec.http.HttpResponseStatus.OK; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static oracle.nosql.common.http.Constants.CONTENT_LENGTH; +import static oracle.nosql.common.http.Constants.CONTENT_TYPE; +import static oracle.nosql.common.http.Constants.KEEP_ALIVE; +import static oracle.nosql.proxy.JsonCollSerializer.createValueFromNson; import static oracle.nosql.proxy.ProxySerialization.getConsistency; -import static oracle.nosql.proxy.ProxySerialization.getDurability; import static oracle.nosql.proxy.ProxySerialization.getContinuationKey; +import static oracle.nosql.proxy.ProxySerialization.getDurability; import static oracle.nosql.proxy.ProxySerialization.getMultiRowOptions; import static oracle.nosql.proxy.ProxySerialization.getReadKB; import static oracle.nosql.proxy.ProxySerialization.raiseBadProtocolError; @@ -29,16 +33,16 @@ import static oracle.nosql.proxy.ProxySerialization.readMaxReadKB; import static oracle.nosql.proxy.ProxySerialization.readMaxWriteKB; import static oracle.nosql.proxy.ProxySerialization.readNonNullEmptyString; -import static oracle.nosql.proxy.ProxySerialization.readNumberLimit; import static oracle.nosql.proxy.ProxySerialization.readNumVariables; +import static oracle.nosql.proxy.ProxySerialization.readNumberLimit; import static oracle.nosql.proxy.ProxySerialization.readOpCode; import static oracle.nosql.proxy.ProxySerialization.readPrimaryKeySerializer; import static oracle.nosql.proxy.ProxySerialization.readRowSerializer; import static oracle.nosql.proxy.ProxySerialization.readShardId; import static oracle.nosql.proxy.ProxySerialization.readString; +import static oracle.nosql.proxy.ProxySerialization.readTTL; import static oracle.nosql.proxy.ProxySerialization.readTableLimits; import static oracle.nosql.proxy.ProxySerialization.readTopologySeqNum; -import static oracle.nosql.proxy.ProxySerialization.readTTL; import static oracle.nosql.proxy.ProxySerialization.readVersion; import static oracle.nosql.proxy.ProxySerialization.writeByteArray; import static oracle.nosql.proxy.ProxySerialization.writeConsumedCapacity; @@ -54,71 +58,28 @@ import static oracle.nosql.proxy.ProxySerialization.writeSuccess; import static oracle.nosql.proxy.ProxySerialization.writeTableOperationResult; import static oracle.nosql.proxy.ProxySerialization.writeVersion; -import static oracle.nosql.proxy.protocol.BinaryProtocol.mapDDLError; import static oracle.nosql.proxy.protocol.BinaryProtocol.BAD_PROTOCOL_MESSAGE; import static oracle.nosql.proxy.protocol.BinaryProtocol.BATCH_OP_NUMBER_LIMIT_EXCEEDED; +import static oracle.nosql.proxy.protocol.BinaryProtocol.CURRENT_QUERY_VERSION; import static oracle.nosql.proxy.protocol.BinaryProtocol.ILLEGAL_ARGUMENT; import static oracle.nosql.proxy.protocol.BinaryProtocol.ON_DEMAND; import static oracle.nosql.proxy.protocol.BinaryProtocol.PROVISIONED; import static oracle.nosql.proxy.protocol.BinaryProtocol.QUERY_V1; import static oracle.nosql.proxy.protocol.BinaryProtocol.QUERY_V4; import static oracle.nosql.proxy.protocol.BinaryProtocol.QUERY_V5; -import static oracle.nosql.proxy.protocol.BinaryProtocol.CURRENT_QUERY_VERSION; import static oracle.nosql.proxy.protocol.BinaryProtocol.RECOMPILE_QUERY; import static oracle.nosql.proxy.protocol.BinaryProtocol.REPLICA_STATS_LIMIT; import static oracle.nosql.proxy.protocol.BinaryProtocol.REQUEST_SIZE_LIMIT_EXCEEDED; +import static oracle.nosql.proxy.protocol.BinaryProtocol.REQUEST_TIMEOUT; import static oracle.nosql.proxy.protocol.BinaryProtocol.SECURITY_INFO_UNAVAILABLE; import static oracle.nosql.proxy.protocol.BinaryProtocol.SERVER_ERROR; import static oracle.nosql.proxy.protocol.BinaryProtocol.TABLE_NOT_FOUND; import static oracle.nosql.proxy.protocol.BinaryProtocol.TABLE_USAGE_NUMBER_LIMIT; -import static oracle.nosql.proxy.protocol.BinaryProtocol.UNSUPPORTED_QUERY_VERSION; - -// same as NSON types -import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_BOOLEAN; -import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_DOUBLE; -import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_INTEGER; -import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_LONG; -import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_MAP; -import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_NUMBER; -import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_STRING; -import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_TIMESTAMP; - import static oracle.nosql.proxy.protocol.BinaryProtocol.UNKNOWN_ERROR; import static oracle.nosql.proxy.protocol.BinaryProtocol.UNKNOWN_OPERATION; +import static oracle.nosql.proxy.protocol.BinaryProtocol.UNSUPPORTED_QUERY_VERSION; import static oracle.nosql.proxy.protocol.BinaryProtocol.V1; -import static oracle.nosql.proxy.protocol.JsonProtocol.OPC_REQUEST_ID; -import static oracle.nosql.proxy.protocol.Protocol.OPERATION_NOT_SUPPORTED; -import static oracle.nosql.proxy.protocol.Protocol.READ_LIMIT_EXCEEDED; -import static oracle.nosql.proxy.protocol.Protocol.SERIAL_VERSION_STRING; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.ADD_REPLICA; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.DELETE; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.DELETE_IF_VERSION; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.DROP_TABLE; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.DROP_REPLICA; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET_INDEXES; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET_REPLICA_STATS; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET_TABLE; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET_TABLE_USAGE; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.INTERNAL_DDL; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.INTERNAL_STATUS; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.LIST_TABLES; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.MULTI_DELETE; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.PREPARE; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.PUT; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.PUT_IF_ABSENT; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.PUT_IF_PRESENT; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.PUT_IF_VERSION; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.QUERY; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.SYSTEM_REQUEST; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.SYSTEM_STATUS_REQUEST; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.TABLE_REQUEST; -import static oracle.nosql.proxy.protocol.Protocol.OpCode.WRITE_MULTIPLE; - -import static oracle.nosql.common.http.Constants.KEEP_ALIVE; -import static oracle.nosql.common.http.Constants.CONTENT_LENGTH; -import static oracle.nosql.common.http.Constants.CONTENT_TYPE; - +import static oracle.nosql.proxy.protocol.BinaryProtocol.mapDDLError; import static oracle.nosql.proxy.protocol.HttpConstants.NOSQL_DATA_PATH; import static oracle.nosql.proxy.protocol.HttpConstants.PROXY_SERIAL_VERSION_HEADER; import static oracle.nosql.proxy.protocol.HttpConstants.PROXY_VERSION_HEADER; @@ -126,33 +87,46 @@ import static oracle.nosql.proxy.protocol.HttpConstants.X_FORWARDED_FOR_HEADER; import static oracle.nosql.proxy.protocol.HttpConstants.X_REAL_IP_HEADER; import static oracle.nosql.proxy.protocol.HttpConstants.pathInURIAllVersions; +import static oracle.nosql.proxy.protocol.JsonProtocol.OPC_REQUEST_ID; import static oracle.nosql.proxy.protocol.NsonProtocol.*; -import static oracle.nosql.proxy.JsonCollSerializer.createValueFromNson; +import static oracle.nosql.proxy.protocol.Protocol.OPERATION_NOT_SUPPORTED; +import static oracle.nosql.proxy.protocol.Protocol.READ_LIMIT_EXCEEDED; +import static oracle.nosql.proxy.protocol.Protocol.SERIAL_VERSION_STRING; import java.io.IOException; import java.io.UncheckedIOException; import java.math.MathContext; import java.math.RoundingMode; -import java.time.temporal.ChronoUnit; import java.time.Clock; +import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Level; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; import oracle.kv.Consistency; import oracle.kv.Durability; import oracle.kv.FaultException; -import oracle.kv.Key; import oracle.kv.KVSecurityException; +import oracle.kv.Key; import oracle.kv.MetadataNotFoundException; import oracle.kv.PrepareQueryException; import oracle.kv.ResourceLimitException; @@ -162,10 +136,10 @@ import oracle.kv.impl.api.Request; import oracle.kv.impl.api.ops.Result; import oracle.kv.impl.api.query.PreparedStatementImpl; -import oracle.kv.impl.api.table.PrimaryKeyImpl; +import oracle.kv.impl.api.query.QueryPublisher.QuerySubscription; import oracle.kv.impl.api.query.QueryStatementResultImpl; import oracle.kv.impl.api.query.QueryStatementResultImpl.QueryResultIterator; -import oracle.kv.impl.api.query.QueryPublisher.QuerySubscription; +import oracle.kv.impl.api.table.PrimaryKeyImpl; import oracle.kv.impl.api.table.TableAPIImpl; import oracle.kv.impl.api.table.TableAPIImpl.GeneratedValueInfo; import oracle.kv.impl.api.table.TableAPIImpl.OpFactory; @@ -180,8 +154,8 @@ import oracle.kv.query.ExecuteOptions; import oracle.kv.query.PrepareCallback.QueryOperation; import oracle.kv.table.FieldDef; -import oracle.kv.table.FieldValue; import oracle.kv.table.FieldRange; +import oracle.kv.table.FieldValue; import oracle.kv.table.MultiRowOptions; import oracle.kv.table.ReadOptions; import oracle.kv.table.RecordValue; @@ -203,47 +177,34 @@ import oracle.nosql.nson.util.NettyByteOutputStream; import oracle.nosql.nson.values.MapWalker; import oracle.nosql.nson.values.PathFinder; -import oracle.nosql.nson.values.TimestampValue; import oracle.nosql.proxy.MonitorStats.OperationType; -import oracle.nosql.proxy.ProxySerialization.FieldValueWriterImpl; -import oracle.nosql.proxy.ProxySerialization.RowReaderImpl; import oracle.nosql.proxy.ValueSerializer.RowSerializerImpl; import oracle.nosql.proxy.audit.ProxyAuditManager; import oracle.nosql.proxy.filter.FilterHandler; -import oracle.nosql.proxy.filter.FilterHandler.Action; import oracle.nosql.proxy.filter.FilterHandler.Filter; import oracle.nosql.proxy.protocol.ByteInputStream; import oracle.nosql.proxy.protocol.ByteOutputStream; import oracle.nosql.proxy.protocol.JsonProtocol; +import oracle.nosql.proxy.protocol.NsonProtocol; import oracle.nosql.proxy.protocol.Protocol; import oracle.nosql.proxy.protocol.Protocol.OpCode; import oracle.nosql.proxy.protocol.SerializationUtil; -import oracle.nosql.proxy.protocol.NsonProtocol; import oracle.nosql.proxy.sc.GetTableResponse; import oracle.nosql.proxy.sc.IndexResponse; import oracle.nosql.proxy.sc.ListTableResponse; import oracle.nosql.proxy.sc.TableUsageResponse; import oracle.nosql.proxy.sc.TableUtils; -import oracle.nosql.proxy.sc.TableUtils.PrepareCB; import oracle.nosql.proxy.sc.TableUtils.MapPrepareCB; +import oracle.nosql.proxy.sc.TableUtils.PrepareCB; import oracle.nosql.proxy.sc.TenantManager; import oracle.nosql.proxy.security.AccessContext; import oracle.nosql.proxy.security.AccessContext.Type; import oracle.nosql.proxy.util.ErrorManager; -import oracle.nosql.proxy.util.TableCache.TableEntry; +import oracle.nosql.proxy.util.ProxyThreadPoolExecutor; +import oracle.nosql.util.filter.Rule; import oracle.nosql.util.tmi.IndexInfo; import oracle.nosql.util.tmi.TableInfo; import oracle.nosql.util.tmi.TableLimits; - - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpMethod; - import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; @@ -278,6 +239,14 @@ public abstract class DataService extends DataServiceHandler implements Service private final int maxRetriesPerRequest; private final int retryDelayMs; + /* + * If non-null use this executor for async completions for async KV + * requests. This does not apply to all request types, only the simple + * ones -- put, get, delete, multidelete, write multiple. + * It is not used for query or prepare. + */ + private final ProxyThreadPoolExecutor executor; + /* create a default RC */ private RequestContextFactory rcFactory = new RequestContextFactory() { @@ -301,30 +270,30 @@ public RequestContext createRequestContext( new HashMap(); static { - opCodeMap.put(DELETE, OperationType.DELETE); - opCodeMap.put(DELETE_IF_VERSION, OperationType.DELETE); - opCodeMap.put(GET, OperationType.GET); - opCodeMap.put(PUT, OperationType.PUT); - opCodeMap.put(PUT_IF_ABSENT, OperationType.PUT); - opCodeMap.put(PUT_IF_PRESENT, OperationType.PUT); - opCodeMap.put(PUT_IF_VERSION, OperationType.PUT); - opCodeMap.put(QUERY, OperationType.QUERY); - opCodeMap.put(PREPARE, OperationType.PREPARE); - opCodeMap.put(WRITE_MULTIPLE, OperationType.WRITE_MULTIPLE); - opCodeMap.put(MULTI_DELETE, OperationType.MULTI_DELETE); - opCodeMap.put(TABLE_REQUEST, OperationType.TABLE_REQUEST); - opCodeMap.put(GET_TABLE, OperationType.GET_TABLE); - opCodeMap.put(GET_TABLE_USAGE, OperationType.GET_TABLE_USAGE); - opCodeMap.put(LIST_TABLES, OperationType.LIST_TABLES); - opCodeMap.put(GET_INDEXES, OperationType.GET_INDEXES); - opCodeMap.put(SYSTEM_REQUEST, OperationType.SYSTEM_REQUEST); - opCodeMap.put(SYSTEM_STATUS_REQUEST, + opCodeMap.put(OpCode.DELETE, OperationType.DELETE); + opCodeMap.put(OpCode.DELETE_IF_VERSION, OperationType.DELETE); + opCodeMap.put(OpCode.GET, OperationType.GET); + opCodeMap.put(OpCode.PUT, OperationType.PUT); + opCodeMap.put(OpCode.PUT_IF_ABSENT, OperationType.PUT); + opCodeMap.put(OpCode.PUT_IF_PRESENT, OperationType.PUT); + opCodeMap.put(OpCode.PUT_IF_VERSION, OperationType.PUT); + opCodeMap.put(OpCode.QUERY, OperationType.QUERY); + opCodeMap.put(OpCode.PREPARE, OperationType.PREPARE); + opCodeMap.put(OpCode.WRITE_MULTIPLE, OperationType.WRITE_MULTIPLE); + opCodeMap.put(OpCode.MULTI_DELETE, OperationType.MULTI_DELETE); + opCodeMap.put(OpCode.TABLE_REQUEST, OperationType.TABLE_REQUEST); + opCodeMap.put(OpCode.GET_TABLE, OperationType.GET_TABLE); + opCodeMap.put(OpCode.GET_TABLE_USAGE, OperationType.GET_TABLE_USAGE); + opCodeMap.put(OpCode.LIST_TABLES, OperationType.LIST_TABLES); + opCodeMap.put(OpCode.GET_INDEXES, OperationType.GET_INDEXES); + opCodeMap.put(OpCode.SYSTEM_REQUEST, OperationType.SYSTEM_REQUEST); + opCodeMap.put(OpCode.SYSTEM_STATUS_REQUEST, OperationType.SYSTEM_STATUS_REQUEST); - opCodeMap.put(ADD_REPLICA, OperationType.TABLE_REQUEST); - opCodeMap.put(DROP_REPLICA, OperationType.TABLE_REQUEST); - opCodeMap.put(INTERNAL_DDL, OperationType.TABLE_REQUEST); - opCodeMap.put(INTERNAL_STATUS, OperationType.TABLE_REQUEST); - opCodeMap.put(GET_REPLICA_STATS, OperationType.GET_TABLE_USAGE); + opCodeMap.put(OpCode.ADD_REPLICA, OperationType.TABLE_REQUEST); + opCodeMap.put(OpCode.DROP_REPLICA, OperationType.TABLE_REQUEST); + opCodeMap.put(OpCode.INTERNAL_DDL, OperationType.TABLE_REQUEST); + opCodeMap.put(OpCode.INTERNAL_STATUS, OperationType.TABLE_REQUEST); + opCodeMap.put(OpCode.GET_REPLICA_STATS, OperationType.GET_TABLE_USAGE); } /* @@ -340,26 +309,26 @@ private void initOperation(OpCode op, ProxyOperation operation) { private void initOperations() { - initOperation(DELETE, this::handleDelete); - initOperation(DELETE_IF_VERSION, this::handleDelete); - initOperation(GET, this::handleGet); - initOperation(PUT, this::handlePut); - initOperation(PUT_IF_ABSENT, this::handlePut); - initOperation(PUT_IF_PRESENT, this::handlePut); - initOperation(PUT_IF_VERSION, this::handlePut); - initOperation(WRITE_MULTIPLE, this::handleWriteMultiple); - initOperation(MULTI_DELETE, this::handleMultiDelete); - initOperation(QUERY, this::handleQuery); + initOperation(OpCode.DELETE, this::handleDelete); + initOperation(OpCode.DELETE_IF_VERSION, this::handleDelete); + initOperation(OpCode.GET, this::handleGet); + initOperation(OpCode.PUT, this::handlePut); + initOperation(OpCode.PUT_IF_ABSENT, this::handlePut); + initOperation(OpCode.PUT_IF_PRESENT, this::handlePut); + initOperation(OpCode.PUT_IF_VERSION, this::handlePut); + initOperation(OpCode.WRITE_MULTIPLE, this::handleWriteMultiple); + initOperation(OpCode.MULTI_DELETE, this::handleMultiDelete); + initOperation(OpCode.QUERY, this::handleQuery); /* the following ops are not yet available async */ - initOperation(PREPARE, this::handlePrepare); - initOperation(TABLE_REQUEST, this::handleTableOp); - initOperation(GET_TABLE, this::handleGetTable); - initOperation(GET_TABLE_USAGE, this::handleTableUsage); - initOperation(LIST_TABLES, this::handleListTables); - initOperation(GET_INDEXES, this::handleGetIndexes); - initOperation(SYSTEM_REQUEST, this::handleSystemOp); - initOperation(SYSTEM_STATUS_REQUEST, this::handleSystemStatus); + initOperation(OpCode.PREPARE, this::handlePrepare); + initOperation(OpCode.TABLE_REQUEST, this::handleTableOp); + initOperation(OpCode.GET_TABLE, this::handleGetTable); + initOperation(OpCode.GET_TABLE_USAGE, this::handleTableUsage); + initOperation(OpCode.LIST_TABLES, this::handleListTables); + initOperation(OpCode.GET_INDEXES, this::handleGetIndexes); + initOperation(OpCode.SYSTEM_REQUEST, this::handleSystemOp); + initOperation(OpCode.SYSTEM_STATUS_REQUEST, this::handleSystemStatus); /* * the following ops are for MR table @@ -378,11 +347,11 @@ private void initOperations() { * * - GET_REPLICA_STATS: get replication stats info of MR table. */ - initOperation(ADD_REPLICA, this::handleAddReplica); - initOperation(DROP_REPLICA, this::handleDropReplica); - initOperation(INTERNAL_DDL, this::handleInternalDdl); - initOperation(INTERNAL_STATUS, this::handleInternalStatus); - initOperation(GET_REPLICA_STATS, this::handleGetReplicaStats); + initOperation(OpCode.ADD_REPLICA, this::handleAddReplica); + initOperation(OpCode.DROP_REPLICA, this::handleDropReplica); + initOperation(OpCode.INTERNAL_DDL, this::handleInternalDdl); + initOperation(OpCode.INTERNAL_STATUS, this::handleInternalStatus); + initOperation(OpCode.GET_REPLICA_STATS, this::handleGetReplicaStats); } private void validateConfig(String conf, int val, int min, int max) { @@ -418,6 +387,16 @@ public DataService(SkLogger logger, retryDelayMs, 10, 100); this.activeRetryCount = new AtomicInteger(0); + if (config.getKVThreadPoolSize() > 0) { + /* + * create an Executor to handle async completions from KV + */ + executor = new ProxyThreadPoolExecutor( + config.getKVThreadPoolSize(), "ProxyKVResponse"); + } else { + executor = null; + } + initOperations(); } @@ -450,32 +429,29 @@ protected FullHttpResponse handleRequest(FullHttpRequest request, ChannelHandlerContext ctx, LogContext lc, Object callerContext) { - /* Block all requests if there is "big red button" rule */ - Action action = checkBlockAll(lc); - if (action != null) { - return action.handleRequest(null, null, lc); - } int threads = activeWorkerThreads.incrementAndGet(); if (stats != null) { stats.markOpActiveWorkerThreads(threads); } - /* get readonly/header fields from request, put in rc */ RequestContext rc = rcFactory.createRequestContext(request, ctx, lc, callerContext); - /* this service now always manages reference counting. */ /* since returning from this method will release() the */ /* message, retain() it here */ request.retain(); - try { + /* Block all requests if there is "big red button" rule */ + Rule rule = getBlockAll(lc); + if (rule != null) { + return filter.handleRequest(rc, rule); + } + /* Handle OPTIONS method for pre-flight request. */ if (HttpMethod.OPTIONS.equals(rc.request.method())) { return handleOptions(rc.request, rc.lc); } - /* read binary header. this may throw errors. */ rc.readBinaryHeader(forceV3, forceV4); rc.opType = getOpType(rc.opCode); @@ -486,7 +462,7 @@ protected FullHttpResponse handleRequest(FullHttpRequest request, /* response sending managed by handleRequest() */ return null; } catch (Exception e) { - //e.printStackTrace(); + // e.printStackTrace(); int code = BAD_PROTOCOL_MESSAGE; if (e instanceof RequestException) { code = ((RequestException)e).getErrorCode(); @@ -532,7 +508,6 @@ protected boolean isErrorLimitingResponse(FullHttpResponse resp, /* Note: this method should never ever throw an exception */ private FullHttpResponse handleRequestInternal(RequestContext rc) { - /* Validate the input */ final FullHttpResponse violation = validateHttpRequest(rc); if (violation != null) { @@ -543,6 +518,7 @@ private FullHttpResponse handleRequestInternal(RequestContext rc) { markOpStart(rc); return handleRequestWithContext(rc); } catch (Throwable e) { + // e.printStackTrace(); markOpFailed(rc, 1 /* serverFailure */); final String faultMsg = e.toString(); if (logger.isLoggable(Level.INFO) && @@ -616,6 +592,9 @@ private boolean attemptRetry(RequestContext rc, int delayMs) { } /* + * NOTE: with use of ProxyThreadPoolExecutor for netty and/or KV + * completions it's possible to get some queue size info + * * TODO: get executor queue size. If above a threshold, don't * do internal retry. We currently can't do this, because netty's * NioEventExecutor doesn't expose the underlying queue. @@ -970,7 +949,7 @@ private FullHttpResponse formulateErrorResponse( throw re; } catch (FilterRequestException fre) { /* this will currently always return null. Hmmm... */ - return handleFilterRequest(fre, rc.requestId, rc.lc); + return handleFilterRequest(fre, rc); } catch (Throwable t) { /* * This error may indicate a bug in the proxy. Make sure @@ -1063,7 +1042,6 @@ protected void handleKVSecurityException(OpCode op, */ private void finishOp(RequestContext rc, FullHttpResponse response) { - if (response == null || rc == null) { return; } @@ -1148,6 +1126,9 @@ protected String mapTableName(AccessContext actx, OpCode op, String name) { @Override public void shutDown() { tm.shutDown(); + if (executor != null) { + executor.shutdown(true); + } } /** @@ -1172,10 +1153,10 @@ protected String mapErrorMessage(Map tableActxs, /** * Get */ - private boolean handleGet(RequestContext rc) + private boolean handleGet(final RequestContext rc) throws IOException { - GetOpInfo info = new GetOpInfo(); + final GetOpInfo info = new GetOpInfo(); if (rc.isNson()) { getV4GetOpInfo(info, rc); } else { @@ -1207,70 +1188,86 @@ private boolean handleGet(RequestContext rc) ReadOptions options = createReadOptions(info.consistency, rc.timeoutMs, rc.actx.getAuthString(), rc.lc); - CompletableFuture future; if (!useAsync()) { Result res = doGet(tableApi, pkey, options); - future = CompletableFuture.completedFuture(res); + handleGetResponse(res, null, rc, info, pkey); + /* handleGetResponse sends the reply so pretend this is async */ + return true; } else { - future = tableApi.getAsyncInternal(pkey, options); + CompletableFuture future = + future = tableApi.getAsyncInternal(pkey, options); + future.whenComplete((result, e) -> { + if (executor != null) { + executor.execute(()-> { + handleGetResponse(result, e, rc, info, pkey); + }); + } else { + handleGetResponse(result, e, rc, info, pkey); + } + }); } + /* true == async */ + return true; + } - /* Set up an anonymous function to be called when future completes. */ - future.whenComplete((result, e) -> { - FullHttpResponse resp = null; - try { - if (e != null) { - resp = formulateErrorResponse(e, rc); + private void handleGetResponse(final Result result, + final Throwable e, + final RequestContext rc, + final GetOpInfo info, + final RowSerializer pkey) { + + Table table = rc.entry.getTable(); + TableAPIImpl tableApi = rc.entry.getTableAPI(); + FullHttpResponse resp = null; + try { + if (e != null) { + resp = formulateErrorResponse(e, rc); + } else { + if (rc.isNson()) { + NsonProtocol.writeGetResponse(rc, this, result, + info.consistency, + tableApi, + (TableImpl)table, + pkey); } else { - if (rc.isNson()) { - NsonProtocol.writeGetResponse(rc, this, result, - info.consistency, - tableApi, - (TableImpl)table, - pkey); - } else { - writeSuccess(rc.bbos); - writeThroughput(rc.bbos, - result.getReadKB(), - result.getWriteKB(), - isAbsolute(info.consistency)); - if (result.getSuccess()) { - rc.bbos.writeBoolean(true); - /* Read row */ - RowReaderImpl reader = - new RowReaderImpl(rc.bbos, table); - tableApi.createRowFromGetResult( - result, pkey, reader); - reader.done(); - writeExpirationTime(rc.bbos, - reader.getExpirationTime()); - writeVersion(rc.bbos, reader.getVersion()); - if (rc.serialVersion > Protocol.V2) { - writeModificationTime(rc.bbos, + writeSuccess(rc.bbos); + writeThroughput(rc.bbos, + result.getReadKB(), + result.getWriteKB(), + isAbsolute(info.consistency)); + if (result.getSuccess()) { + rc.bbos.writeBoolean(true); + /* Read row */ + ProxySerialization.RowReaderImpl reader = + new ProxySerialization.RowReaderImpl(rc.bbos, table); + tableApi.createRowFromGetResult( + result, pkey, reader); + reader.done(); + writeExpirationTime(rc.bbos, + reader.getExpirationTime()); + writeVersion(rc.bbos, reader.getVersion()); + if (rc.serialVersion > Protocol.V2) { + writeModificationTime(rc.bbos, reader.getModificationTime()); - } - } else { - rc.bbos.writeBoolean(false); } + } else { + rc.bbos.writeBoolean(false); } - markDataOpSucceeded(rc, - result.getNumRecords(), - result.getReadKB(), - result.getWriteKB()); - rc.setThroughput(result); - resp = defaultHttpResponse(rc); } - } catch (Exception ue) { - logger.info("Unexpected exception in getAsync response " + - "builder method: " + ue.getMessage(), rc.lc); - resp = formulateErrorResponse(ue, rc); - } finally { - finishOp(rc, resp); + markDataOpSucceeded(rc, + result.getNumRecords(), + result.getReadKB(), + result.getWriteKB()); + rc.setThroughput(result); + resp = defaultHttpResponse(rc); } - }); - - /* true == async */ - return true; + } catch (Exception ue) { + logger.info("Unexpected exception in getAsync response " + + "builder method: " + ue.getMessage(), rc.lc); + resp = formulateErrorResponse(ue, rc); + } finally { + finishOp(rc, resp); + } } /** @@ -1319,6 +1316,9 @@ private boolean handlePut(RequestContext rc) if (info.TTL != null && row != null) { ((RowSerializerImpl)row).setTTL(info.TTL); } + if (info.rowMetadata != null && row != null) { + ((RowSerializerImpl)row).setRowMetadata(info.rowMetadata); + } /* * ReturnRow is set from request. @@ -1331,7 +1331,7 @@ private boolean handlePut(RequestContext rc) } if (!rc.isNson()) { - info.matchVersion = (rc.opCode == PUT_IF_VERSION) ? + info.matchVersion = (rc.opCode == OpCode.PUT_IF_VERSION) ? readVersion(rc.bbis) : null; } @@ -1356,7 +1356,6 @@ private boolean handlePut(RequestContext rc) Key kvKey = null; Value kvValue = null; KVStoreImpl store = tableApi.getStore(); - CompletableFuture future = null; if (table.isJsonCollection()) { @@ -1366,7 +1365,7 @@ private boolean handlePut(RequestContext rc) * 1. ensuring if they are present that they have a path * 2. translating/replacing any value for the counter to the * default (0) - * Also, if the table is MR (without or without counters) the + * Also, if the table is MR (with or without counters) the * appropriate format and region id need to be set in the Value * that is created. */ @@ -1374,7 +1373,7 @@ private boolean handlePut(RequestContext rc) /* this is in case returnRow is not null and needs to be set */ row = pkey; /* this method fills in PrimaryKey as well */ - kvValue = createValueFromNson(table, pkey, rc.bbis); + kvValue = createValueFromNson(table, pkey, rc.bbis, info.rowMetadata); kvKey = table.createKeyInternal(pkey, false, store, genInfo); } else { kvKey = table.createKeyInternal(row, false, store, genInfo); @@ -1396,93 +1395,112 @@ private boolean handlePut(RequestContext rc) final RowSerializer rowForReturnRow = row; if (!useAsync()) { Result res = tableApi.getStore().executeRequest(rq); - future = CompletableFuture.completedFuture(res); - if (genInfo != null) { - res.setGeneratedValue(genInfo.getGeneratedValue()); - } + handlePutResponse(res, null, rc, + genInfo, rowForReturnRow, returnRow); + /* handlePutResponse sends the reply so pretend this is async */ + return true; } else { - future = store.executeRequestAsync(rq); - } + CompletableFuture future = store.executeRequestAsync(rq); + future.whenComplete((result, e) -> { + if (executor != null) { + executor.execute(()-> { + handlePutResponse(result, e, rc, + genInfo, rowForReturnRow, returnRow); + }); + } else { + handlePutResponse(result, e, rc, + genInfo, rowForReturnRow, returnRow); - future.whenComplete((result, e) -> { - FullHttpResponse resp = null; - try { - if (e != null) { - resp = formulateErrorResponse(e, rc); - } else { - if (genInfo != null) { - result.setGeneratedValue(genInfo.getGeneratedValue()); } + }); + } - if (rc.isNson()) { - NsonProtocol.writePutResponse(rc, this, result, - tableApi, - rowForReturnRow, - returnRow); - } else { - Version version = result.getNewVersion(); + /* true == async */ + return true; + } - writeSuccess(rc.bbos); - writeThroughput(rc.bbos, - result.getReadKB(), - result.getWriteKB(), - true); // absolute + private void handlePutResponse(Result result, Throwable e, + RequestContext rc, + GeneratedValueInfo genInfo, + RowSerializer rowForReturnRow, + ReturnRow returnRow) { + TableImpl table = (TableImpl) rc.entry.getTable(); + TableAPIImpl tableApi = rc.entry.getTableAPI(); + FullHttpResponse resp = null; + try { + if (e != null) { + resp = formulateErrorResponse(e, rc); + } else { + if (genInfo != null) { + result.setGeneratedValue(genInfo.getGeneratedValue()); + } - /* - * version and previous info are independent. A - * response may have one, the other or both. - */ - if (version != null) { - rc.bbos.writeBoolean(true); - writeVersion(rc.bbos, version); - } else { - rc.bbos.writeBoolean(false); - } + if (rc.isNson()) { + NsonProtocol.writePutResponse(rc, this, result, + tableApi, + rowForReturnRow, + returnRow); + } else { + Version version = result.getNewVersion(); + + writeSuccess(rc.bbos); + writeThroughput(rc.bbos, + result.getReadKB(), + result.getWriteKB(), + true); // absolute + + /* + * version and previous info are independent. A + * response may have one, the other or both. + */ + if (version != null) { + rc.bbos.writeBoolean(true); + writeVersion(rc.bbos, version); + } else { + rc.bbos.writeBoolean(false); + } - /* return row */ - /* Note this is where the bbis/input row */ - /* may be referenced... */ - writeExistingRow(rc.bbos, (version != null), - returnRow, tableApi, - rowForReturnRow, result, - rc.serialVersion); - - /* generated value for identity column or uuid */ - if (rc.serialVersion > V1) { - /* only return it if the operation succeeded */ - if ((table.hasIdentityColumn() || - table.hasUUIDcolumn()) - && version != null) { - FieldValue generated = - result.getGeneratedValue(); - if (generated != null) { - rc.bbos.writeBoolean(true); - writeFieldValue(rc.bbos, generated); - } else { - rc.bbos.writeBoolean(false); - } + /* return row */ + /* Note this is where the bbis/input row */ + /* may be referenced... */ + writeExistingRow(rc.bbos, (version != null), + returnRow, tableApi, + rowForReturnRow, result, + rc.serialVersion); + + /* generated value for identity column or uuid */ + if (rc.serialVersion > V1) { + /* only return it if the operation succeeded */ + if ((table.hasIdentityColumn() || + table.hasUUIDcolumn()) + && version != null) { + FieldValue generated = + result.getGeneratedValue(); + if (generated != null) { + rc.bbos.writeBoolean(true); + writeFieldValue(rc.bbos, generated); } else { rc.bbos.writeBoolean(false); } + } else { + rc.bbos.writeBoolean(false); } } - markDataOpSucceeded(rc, - result.getNumRecords(), - result.getReadKB(), - result.getWriteKB()); - rc.setThroughput(result); - resp = defaultHttpResponse(rc); } - } catch (Exception ue) { - logger.info("Unexpected exception in putAsync response " + - "builder method: " + ue.getMessage(), rc.lc); - resp = formulateErrorResponse(ue, rc); - } finally { - finishOp(rc, resp); + markDataOpSucceeded(rc, + result.getNumRecords(), + result.getReadKB(), + result.getWriteKB()); + rc.setThroughput(result); + resp = defaultHttpResponse(rc); } - }); - /* true == asynchronous */ - return true; + } catch (Exception ue) { + logger.info("Unexpected exception in putAsync response " + + "builder method: " + ue.getMessage(), rc.lc); + resp = formulateErrorResponse(ue, rc); + } finally { + finishOp(rc, resp); + } } /** @@ -1523,6 +1541,9 @@ private boolean handleDelete(RequestContext rc) RowSerializer pkey = getPrimaryKeySerializer(rc.bbis, table, rc.entry.getRequestLimits()); + + pkey.setRowMetadata(info.rowMetadata); + /* * ReturnRow is set from request. */ @@ -1544,66 +1565,86 @@ private boolean handleDelete(RequestContext rc) doTombStone(rc.entry), rc.lc); - CompletableFuture future; if (!useAsync()) { Result res = doDelete(tableApi, pkey, info.matchVersion, returnRow, options); - future = CompletableFuture.completedFuture(res); - } else if (info.matchVersion == null) { - future = tableApi.deleteAsyncInternal(pkey, returnRow, options); + handleDeleteResponse(res, null, rc, pkey, returnRow); + /* handleDeleteResponse sends the reply so pretend this is async */ + return true; } else { - future = tableApi.deleteIfVersionAsyncInternal(pkey, - info.matchVersion, - returnRow, - options); - } - - future.whenComplete((result, e) -> { - FullHttpResponse resp = null; - try { - if (e != null) { - resp = formulateErrorResponse(e, rc); - } else { - if (rc.isNson()) { - NsonProtocol.writeDeleteResponse(rc, this, result, - tableApi, pkey, - returnRow); + CompletableFuture future; + if (info.matchVersion == null) { + future = tableApi.deleteAsyncInternal(pkey, returnRow, options); + } else { + future = tableApi.deleteIfVersionAsyncInternal( + pkey, + info.matchVersion, + returnRow, + options); + } + future.whenComplete((result, e) -> { + if (executor != null) { + executor.execute(()-> { + handleDeleteResponse(result, e, rc, + pkey, returnRow); + }); } else { - writeSuccess(rc.bbos); - writeThroughput(rc.bbos, - result.getReadKB(), - result.getWriteKB(), - true); // absolute - - /* did the delete happen? */ - rc.bbos.writeBoolean(result.getSuccess()); - - /* return row */ - writeExistingRow(rc.bbos, result.getSuccess(), - returnRow, - tableApi, pkey, result, - rc.serialVersion); + handleDeleteResponse(result, e, rc, pkey, returnRow); } - markDataOpSucceeded(rc, - result.getNumRecords(), - result.getReadKB(), - result.getWriteKB()); - rc.setThroughput(result); - resp = defaultHttpResponse(rc); - } - } catch (Exception ue) { - logger.info("Unexpected exception in deleteAsync response " + - "builder method: " + ue.getMessage(), rc.lc); - resp = formulateErrorResponse(ue, rc); - } finally { - finishOp(rc, resp); - } - }); + }); + } /* true == asynchronous */ return true; } + private void handleDeleteResponse(final Result result, + final Throwable e, + final RequestContext rc, + final RowSerializer pkey, + final ReturnRow returnRow) { + FullHttpResponse resp = null; + TableAPIImpl tableApi = rc.entry.getTableAPI(); + try { + if (e != null) { + resp = formulateErrorResponse(e, rc); + } else { + if (rc.isNson()) { + NsonProtocol.writeDeleteResponse(rc, this, result, + tableApi, pkey, + returnRow); + } else { + writeSuccess(rc.bbos); + writeThroughput(rc.bbos, + result.getReadKB(), + result.getWriteKB(), + true); // absolute + + /* did the delete happen? */ + rc.bbos.writeBoolean(result.getSuccess()); + + /* return row */ + writeExistingRow(rc.bbos, result.getSuccess(), + returnRow, + tableApi, pkey, result, + rc.serialVersion); + } + markDataOpSucceeded(rc, + result.getNumRecords(), + result.getReadKB(), + result.getWriteKB()); + rc.setThroughput(result); + resp = defaultHttpResponse(rc); + } + } catch (Exception ue) { + logger.info("Unexpected exception in deleteAsync response " + + "builder method: " + ue.getMessage(), rc.lc); + resp = formulateErrorResponse(ue, rc); + } finally { + finishOp(rc, resp); + } + } + private String chooseNamespace(String actxNamespace, String queryNamespace) { if (isOnPrem()) { /* onprem, the namespace in the query takes priority */ @@ -1619,10 +1660,11 @@ private String chooseNamespace(String actxNamespace, String queryNamespace) { return queryNamespace; } - - /** * Query + * + * Note that the use of a separate Executor and thread pool + * to handle async responses from KV requests does not apply to queries */ private boolean handleQuery(final RequestContext rc) throws IOException { @@ -1668,7 +1710,8 @@ private boolean handleQuery(final RequestContext rc) info.queryVersion, info.maxServerMemory, rc.actx.getAuthString(), - rc.lc); + rc.lc, + info.rowMetadata); /* * If not onprem, durability will be null. See mapDurability() */ @@ -1689,6 +1732,10 @@ private boolean handleQuery(final RequestContext rc) if (info.isPrepared == false) { + if (info.numOperations != 0 || info.operationNumber != 0) { + throw new IllegalArgumentException( + "Parallel queries require a prepared query"); + } /* this method also enforces limit on query string length */ cbInfo = TableUtils.getCallbackInfo(rc.actx, info.statement, tm); cbInfo.checkSupportedDml(); @@ -1830,7 +1877,7 @@ private boolean handleQuery(final RequestContext rc) } /* - * Set ExceuteOptions.updateLimit for update query to limit the max + * Set ExecuteOptions.updateLimit for update query to limit the max * number of records can be updated in a single query: * - In onprem, set it to the limit set by application. * - In cloud, the max number of records that can be updated in a @@ -1844,6 +1891,13 @@ private boolean handleQuery(final RequestContext rc) } } + /* insert/update/upsert not allowed to be parallel */ + if (isUpdateOp && + (info.numOperations != 0 || info.operationNumber != 0)) { + throw new IllegalArgumentException( + "Cannot perform parallel query on inserts or updates"); + } + /* FUTURE: use info.durability */ NsonSerializer ns = null; @@ -1891,6 +1945,48 @@ private boolean handleQuery(final RequestContext rc) rc.bbos.writeInt(0); } + /* + * this method validates the parameters and will throw if invalid. + * It returns the total number of operations. If > 0 this is a parallel + * query + */ + int numberOfOperations = + getParallelQueryOperations(info, prep, store.getTopology()); + + /* + * Compute synchronous query results. If this is a parallel query + * the appropriate set of shards or partitions needs to be passed + */ + Set shards = null; + Set partitions = null; + + if (info.shardId > 0) { + /* + * this is where the caller is explicitly handling a shard and + * is never parallel + */ + shards = new HashSet<>(1); + shards.add(new RepGroupId(info.shardId)); + } else if (numberOfOperations > 1) { + execOpts.setIsSimpleQuery(info.isSimpleQuery); + if (prep.getDistributionKind().equals( + PreparedStatementImpl.DistributionKind.ALL_SHARDS)) { + /* used shard-based split, even if all partition query */ + shards = computeParallelShards(info, store, rc); + } else { + /* + * there is no current async kv call to handle a set of + * partitions, so turn off async for this path. + * FUTURE: leave it async if KV supports it. See + * doAsyncQuery() + */ + partitions = + computeParallelPartitions( + info, store.getTopology().getNumPartitions()); + doAsync = false; + } + } + if (doAsync) { if (info.traceLevel >= 5) { @@ -1902,7 +1998,8 @@ private boolean handleQuery(final RequestContext rc) final NsonSerializer nser = ns; Publisher qpub = - doAsyncQuery(store, prep, variables, execOpts, info.shardId, + doAsyncQuery(store, prep, variables, execOpts, + shards, partitions, info.traceLevel, rc.lc); Subscriber qsub = new Subscriber() { @@ -2019,11 +2116,9 @@ public void onComplete() { int numResults = 0; - /* - * Compute synchronous query results - */ QueryStatementResultImpl qres = - doQuery(store, prep, variables, execOpts, info.shardId, + doQuery(store, prep, variables, execOpts, + shards, partitions, info.traceLevel, rc.lc); if (ns == null && info.queryVersion > QUERY_V1) { @@ -2063,6 +2158,146 @@ public void onComplete() { return false; // sync } + /** + * Validate parallel query operation parameters and return total number + * of operations + */ + private int getParallelQueryOperations(QueryOpInfo info, + PreparedStatementImpl prep, + Topology topo) { + if (info.numOperations > 0) { + if (info.operationNumber <= 0 || info.operationNumber > + info.numOperations) { + throw new IllegalArgumentException( + "Invalid parallel query parameters"); + } + /* + * cannot trust prep.isSimpleQuery() on an already-prepared + * statement, use the info passed from the driver + */ + if (!info.isSimpleQuery || prep.getDistributionKind().equals( + PreparedStatementImpl.DistributionKind.SINGLE_PARTITION)) { + /* allow 1 but it's the same as if it were 0, not parallel */ + if (info.numOperations > 1) { + throw new IllegalArgumentException( + "Invalid number of operations for parallel query"); + } + /* a single partition query is not parallel */ + return 0; + } + if (prep.getDistributionKind().equals( + PreparedStatementImpl.DistributionKind.ALL_SHARDS)) { + if (info.numOperations > topo.getNumRepGroups()) { + throw new IllegalArgumentException( + "Invalid number of operations for parallel query, " + + "it must be less than or equal to " + + topo.getNumRepGroups()); + } + } else if (info.numOperations > topo.getNumPartitions()) { + throw new IllegalArgumentException( + "Invalid number of operations for parallel query, " + + "it must be less than or equal to " + + topo.getNumPartitions()); + } + return info.numOperations; + } else if (info.operationNumber != 0) { + throw new IllegalArgumentException( + "Invalid parallel query parameters"); + } + return 0; + } + + /* + * These methods use a combination of the store topology, the total + * number of parallel operations and the operation number to return sets + * of items (shards/partitions) in a deterministic manner. The sets must be + * the same/repeatable for any + * combination in order to properly partition the data being + * queried. + * + * It has already been verified that the number of operations is <= + * number of shards or partitions in the topology. The simplest algorithm + * is to walk the items assigning each to an operation number "bucket" + * until all of the items have been assigned. If the items aren't evenly + * divisible by the number of operations some buckets will have additional + * items. + * + * For example if the number of items is 8 and number of operations is 3 + * then bucket 1 gets items 1, 4, 7, bucket 2 gets 2, 5, 8, and + * bucket 3 gets 3, 6. + * + * These assignments are logically static for the duration of a query but + * rather than round-trip them it's simpler to recalculate, which is not + * deemed expensive. + * + * This calculation does the above. Items are 1-based. Starting at 1 and + * going to the last item these items go in the target bucket (B) + * B = bucket number + * I = item number (start at 1) + * N = number of operations + * for (int I = 1; I <= numberOfItems; I++) { + * if ((I - B) % N == 0) { + * add to bucket B + * } + */ + private Set computeParallelShards(QueryOpInfo info, + KVStoreImpl store, + RequestContext rc) { + + /* + * Must use the driver's "base" topology for all queries + */ + int numShards; + try { + numShards = + store.getDispatcher().getTopologyManager().getTopology( + store, rc.driverTopoSeqNum, rc.timeoutMs).getNumRepGroups(); + /* + * if the driver's notion of topology is different from the current + * store topology, it means elasticity is happening and all-shard + * parallel queries are not compatible with elasticity + */ + if (store.getTopology().getNumRepGroups() != numShards) { + /* + * use of RECOMPILE_QUERY is not very specific but can cause the + * caller to "start over" which is the behavior expected, because + * trying again will likely succeed + */ + throw new RequestException( + RECOMPILE_QUERY, "Parallel queries on indexes are not " + + "supported during certain points in an elasticity " + + "operation. Please retry the entire coordinated operation"); + } + } catch (TimeoutException te) { + throw new RequestException( + REQUEST_TIMEOUT, "Failed to get server state required to " + + "execute a query"); + } + + Set shards = new HashSet<>(); + int numOperations = info.numOperations; + int bucket = info.operationNumber; + for (int i = 1; i <= numShards; i++) { + if ((i - bucket) % numOperations == 0) { + shards.add(new RepGroupId(i)); + } + } + return shards; + } + + /* see comment above. operation number is 1-based */ + private Set computeParallelPartitions(QueryOpInfo info, + int numPartitions) { + Set partitions = new HashSet<>(); + int numOperations = info.numOperations; + int bucket = info.operationNumber; + for (int i = 1; i <= numPartitions; i++) { + if ((i - bucket) % numOperations == 0) { + partitions.add(i); + } + } + return partitions; + } private void finishQuery( QueryOpInfo qinfo, @@ -2137,7 +2372,7 @@ private void finishQuery( /* Write the proxy-side query plan. */ serializePreparedQuery(rc.bbos, cbInfo, prep); /* Write the driver-side query plan, if any. */ - FieldValueWriterImpl valWriter = new FieldValueWriterImpl(); + ProxySerialization.FieldValueWriterImpl valWriter = new ProxySerialization.FieldValueWriterImpl(); prep.serializeForDriver(rc.bbos, qinfo.queryVersion, valWriter); } else if (!qinfo.isSimpleQuery) { /* check for null qres is here only to eliminate warning. */ @@ -2166,6 +2401,9 @@ private void finishQuery( /** * Prepare + * + * Note that the use of a separate Executor and thread pool + * to handle async responses from KV requests does not apply to queries */ private boolean handlePrepare(RequestContext rc) throws IOException { @@ -2245,7 +2483,7 @@ private boolean handlePrepare(RequestContext rc) writeString(rc.bbos, prep.toString()); } - FieldValueWriterImpl valWriter = new FieldValueWriterImpl(); + ProxySerialization.FieldValueWriterImpl valWriter = new ProxySerialization.FieldValueWriterImpl(); prep.serializeForDriver(rc.bbos, info.queryVersion, valWriter); if (!prep.isSimpleQuery()) { @@ -2439,113 +2677,129 @@ protected boolean handleWriteMultiple(RequestContext rc) future = tableApi.executeAsyncInternal(info.tableOps, options); future.whenComplete((result, e) -> { - FullHttpResponse resp = null; - try { - /* - * If we got a CompletionException, it needs to first be - * unwrapped. Eventually this should never happen, but for now - * we keep this in for safety. - */ - if (e != null && e instanceof CompletionException) { - /* don't unwrap (let fail) if running in tests */ - if (!inTest()) { - e = e.getCause(); - } + if (executor != null) { + executor.execute(()-> { + handleWriteMultipleResponse(result, e, rc, + tableApi, info); + }); + } else { + handleWriteMultipleResponse(result, e, rc, + tableApi, info); } - if (e != null && e instanceof TableOpExecutionException) { - TableOpExecutionException toee = - (TableOpExecutionException)e; - if (rc.isNson()) { - NsonProtocol.writeWriteMultipleResponse( - rc, - this, - null, /* result comes from exception */ - info, - tableApi, - toee); - } else { - int failedOpIdx = toee.getFailedOperationIndex(); - TableOperationResult failedOpResult = - toee.getFailedOperationResult(); + }); - /* serialize failed operation info */ - writeSuccess(rc.bbos); - rc.bbos.writeBoolean(false); - writeThroughput(rc.bbos, - toee.getReadKB(), - toee.getWriteKB(), - true); // absolute - rc.bbos.writeByte(failedOpIdx); + /* true == asynchronous */ + return true; + } + + private void handleWriteMultipleResponse(final Result result, + Throwable e, + final RequestContext rc, + final TableAPIImpl tableApi, + final WriteMultipleOpInfo info ) { + FullHttpResponse resp = null; + try { + /* + * If we got a CompletionException, it needs to first be + * unwrapped. Eventually this should never happen, but for now + * we keep this in for safety. + */ + if (e != null && e instanceof CompletionException) { + /* don't unwrap (let fail) if running in tests */ + if (!inTest()) { + e = e.getCause(); + } + } + if (e != null && e instanceof TableOpExecutionException) { + TableOpExecutionException toee = + (TableOpExecutionException)e; + if (rc.isNson()) { + NsonProtocol.writeWriteMultipleResponse( + rc, + this, + null, /* result comes from exception */ + info, + tableApi, + toee); + } else { + int failedOpIdx = toee.getFailedOperationIndex(); + TableOperationResult failedOpResult = + toee.getFailedOperationResult(); + + /* serialize failed operation info */ + writeSuccess(rc.bbos); + rc.bbos.writeBoolean(false); + writeThroughput(rc.bbos, + toee.getReadKB(), + toee.getWriteKB(), + true); // absolute + rc.bbos.writeByte(failedOpIdx); + TableOperationInfo opInfo = + rc.tableOpInfos.get(failedOpIdx); + writeTableOperationResult( + rc.bbos, + rc.serialVersion, + info.tableOps.get(failedOpIdx).getType(), + opInfo.returnInfo, + opInfo.table, + failedOpResult, + null); + } + markOpFailed(rc, 1 /* isServerFailure */); + resp = defaultHttpResponse(rc); + } else if (e != null) { + resp = formulateErrorResponse(e, rc); + } else { + if (rc.isNson()) { + NsonProtocol.writeWriteMultipleResponse( + rc, + this, + result, + info, + tableApi, + null); + } else { + final List results = + tableApi.createResultsFromExecuteResult( + result, info.tableOps); + writeSuccess(rc.bbos); + rc.bbos.writeBoolean(true); + writeThroughput(rc.bbos, + result.getReadKB(), + result.getWriteKB(), + true); // absolute + writeInt(rc.bbos, results.size()); + int idx = 0; + for (TableOperationResult opResult: results) { TableOperationInfo opInfo = - rc.tableOpInfos.get(failedOpIdx); + rc.tableOpInfos.get(idx); + FieldValue generatedValue = + opInfo.genInfo != null ? + opInfo.genInfo.getGeneratedValue() : null; writeTableOperationResult( rc.bbos, rc.serialVersion, - info.tableOps.get(failedOpIdx).getType(), + info.tableOps.get(idx).getType(), opInfo.returnInfo, opInfo.table, - failedOpResult, - null); + opResult, + generatedValue); + idx++; } - markOpFailed(rc, 1 /* isServerFailure */); - resp = defaultHttpResponse(rc); - } else if (e != null) { - resp = formulateErrorResponse(e, rc); - } else { - if (rc.isNson()) { - NsonProtocol.writeWriteMultipleResponse( - rc, - this, - result, - info, - tableApi, - null); - } else { - final List results = - tableApi.createResultsFromExecuteResult( - result, info.tableOps); - writeSuccess(rc.bbos); - rc.bbos.writeBoolean(true); - writeThroughput(rc.bbos, - result.getReadKB(), - result.getWriteKB(), - true); // absolute - writeInt(rc.bbos, results.size()); - int idx = 0; - for (TableOperationResult opResult: results) { - TableOperationInfo opInfo = - rc.tableOpInfos.get(idx); - FieldValue generatedValue = - opInfo.genInfo != null ? - opInfo.genInfo.getGeneratedValue() : null; - writeTableOperationResult( - rc.bbos, - rc.serialVersion, - info.tableOps.get(idx).getType(), - opInfo.returnInfo, - opInfo.table, - opResult, - generatedValue); - idx++; - } - } - markDataOpSucceeded(rc, - result.getNumRecords(), result.getReadKB(), - result.getWriteKB()); - rc.setThroughput(result); - resp = defaultHttpResponse(rc); } - } catch (Exception ue) { - logger.info("Unexpected exception in multiOp response " + - "builder method: " + ue.getMessage(), rc.lc); - resp = formulateErrorResponse(ue, rc); - } finally { - finishOp(rc, resp); + markDataOpSucceeded(rc, + result.getNumRecords(), result.getReadKB(), + result.getWriteKB()); + rc.setThroughput(result); + resp = defaultHttpResponse(rc); } - }); - - /* true == asynchronous */ - return true; + } catch (Exception ue) { + logger.info("Unexpected exception in multiOp response " + + "builder method: " + ue.getMessage(), rc.lc); + resp = formulateErrorResponse(ue, rc); + } finally { + finishOp(rc, resp); + } } /** @@ -2585,6 +2839,8 @@ protected boolean handleMultiDelete(RequestContext rc) getPrimaryKeySerializer(rc.bbis, table, rc.entry.getRequestLimits()); + pKey.setRowMetadata(info.rowMetadata); + /* * Read FieldRange and create MultiRowOptions. This requires the * Table to create the FieldRange instance @@ -2618,55 +2874,69 @@ protected boolean handleMultiDelete(RequestContext rc) rc.lc) .setMaxWriteKB(info.maxWriteKB); - CompletableFuture future; if (!useAsync()) { Result res = tableApi.multiDeleteInternal(pKey, info.continuationKey, mro, options); - future = CompletableFuture.completedFuture(res); + handleMultiDeleteResponse(res, null, rc); + /* handleMultiDeleteResponse sends reply, pretend this is async */ + return true; } else { - future = + CompletableFuture future = tableApi.multiDeleteAsyncInternal(pKey, info.continuationKey, mro, options); - } - - future.whenComplete((result, e) -> { - FullHttpResponse resp = null; - try { - if (e != null) { - resp = formulateErrorResponse(e, rc); - } else { - if (rc.isNson()) { - NsonProtocol.writeMultiDeleteResponse(rc, this, result, - tableApi); + future.whenComplete((result, e) -> { + if (executor != null) { + executor.execute(()-> { + handleMultiDeleteResponse(result, e, rc); + }); } else { - writeSuccess(rc.bbos); - writeThroughput(rc.bbos, - result.getReadKB(), - result.getWriteKB(), - true); // absolute - writeInt(rc.bbos, result.getNDeletions()); - writeByteArray(rc.bbos, result.getPrimaryResumeKey()); + handleMultiDeleteResponse(result, e, rc); } + }); + } - markDataOpSucceeded(rc, - result.getNumRecords(), result.getReadKB(), - result.getWriteKB()); - rc.setThroughput(result); + /* true == asynchronous */ + return true; + } - resp = defaultHttpResponse(rc); + private void handleMultiDeleteResponse(final Result result, + final Throwable e, + final RequestContext rc) { + FullHttpResponse resp = null; + TableAPIImpl tableApi = rc.entry.getTableAPI(); + + try { + if (e != null) { + resp = formulateErrorResponse(e, rc); + } else { + if (rc.isNson()) { + NsonProtocol.writeMultiDeleteResponse(rc, this, result, + tableApi); + } else { + writeSuccess(rc.bbos); + writeThroughput(rc.bbos, + result.getReadKB(), + result.getWriteKB(), + true); // absolute + writeInt(rc.bbos, result.getNDeletions()); + writeByteArray(rc.bbos, result.getPrimaryResumeKey()); } - } catch (Exception ue) { - logger.info("Unexpected exception in multiDelete response " + - "builder method: " + ue.getMessage(), rc.lc); - resp = formulateErrorResponse(ue, rc); - } finally { - finishOp(rc, resp); - } - }); - /* true == asynchronous */ - return true; + markDataOpSucceeded(rc, + result.getNumRecords(), result.getReadKB(), + result.getWriteKB()); + rc.setThroughput(result); + + resp = defaultHttpResponse(rc); + } + } catch (Exception ue) { + logger.info("Unexpected exception in multiDelete response " + + "builder method: " + ue.getMessage(), rc.lc); + resp = formulateErrorResponse(ue, rc); + } finally { + finishOp(rc, resp); + } } /** @@ -2944,7 +3214,7 @@ private boolean handleTableOp(RequestContext rc) /* for testing SECURITY_INFO_UNAVAILABLE retries */ if (inTest() && rc.numRetries == 0 && - authRetriesEnabled == true && rc.opCode == DROP_TABLE && + authRetriesEnabled == true && rc.opCode == OpCode.DROP_TABLE && Boolean.getBoolean("test.simulateSIU")) { throw new RequestException(SECURITY_INFO_UNAVAILABLE, "simulated submitting auth request to IAM"); @@ -3345,8 +3615,8 @@ protected void checkRequestSizeLimit(int requestSize) { } private static boolean isPutOp(OpCode op) { - return (op == PUT || op == PUT_IF_ABSENT || - op == PUT_IF_PRESENT || op == PUT_IF_VERSION); + return (op == OpCode.PUT || op == OpCode.PUT_IF_ABSENT || + op == OpCode.PUT_IF_PRESENT || op == OpCode.PUT_IF_VERSION); } /* @@ -3426,6 +3696,7 @@ private class DelOpInfo { boolean returnInfo; Version matchVersion; boolean abortIfUnsuccessful; // only used by WriteMultiple + String rowMetadata; } /* multi-delete info */ @@ -3435,6 +3706,7 @@ private class MultiDelOpInfo { int fieldRangeOffset; int maxWriteKB; byte[] continuationKey; + String rowMetadata; } /* put info TODO: can this extend DelOpInfo? */ @@ -3447,6 +3719,7 @@ private class PutOpInfo { TimeToLive TTL; Version matchVersion; boolean abortIfUnsuccessful; // only used by WriteMultiple + String rowMetadata; } /* public for access from NsonProtocol */ @@ -3486,6 +3759,9 @@ private static class QueryOpInfo { Map bindVars; String queryName; String batchName; + int numOperations; + int operationNumber; + String rowMetadata; } /* @@ -3642,6 +3918,8 @@ private int getV4DelOpInfo(DelOpInfo info, } else if (name.equals(ABORT_ON_FAIL)) { /* Write Multiple only */ info.abortIfUnsuccessful = Nson.readNsonBoolean(bis); + } else if (name.equals(ROW_METADATA)) { + info.rowMetadata = Nson.readNsonString(bis); } else { skipUnknownField(walker, name); } @@ -3696,6 +3974,8 @@ private int getV4MultiDelOpInfo(MultiDelOpInfo info, info.continuationKey = Nson.readNsonBinary(bis); } else if (name.equals(MAX_WRITE_KB)) { info.maxWriteKB = Nson.readNsonInt(bis); + } else if (name.equals(ROW_METADATA)) { + info.rowMetadata = Nson.readNsonString(bis); } else { skipUnknownField(walker, name); } @@ -4026,6 +4306,10 @@ private void getV4QueryOpInfo(QueryOpInfo info, RequestContext rc) info.isSimpleQuery = Nson.readNsonBoolean(bis); } else if (name.equals(QUERY_NAME)) { info.queryName = Nson.readNsonString(bis); + } else if (name.equals(NUM_QUERY_OPERATIONS)) { + info.numOperations = Nson.readNsonInt(bis); + } else if (name.equals(QUERY_OPERATION_NUM)) { + info.operationNumber = Nson.readNsonInt(bis); } else if (name.equals(VIRTUAL_SCAN)) { info.virtualScan = readVirtualScan(bis); } else if (name.equals(SERVER_MEMORY_CONSUMPTION)) { @@ -4034,6 +4318,8 @@ private void getV4QueryOpInfo(QueryOpInfo info, RequestContext rc) if (!(tm.isSecureStore() || maxMem < info.maxServerMemory)) { info.maxServerMemory = maxMem; } + } else if (name.equals(ROW_METADATA)) { + info.rowMetadata = Nson.readNsonString(bis); } else { skipUnknownField(walker, name); } @@ -4359,6 +4645,8 @@ private int getV4PutOpInfo(PutOpInfo info, } else if (name.equals(ABORT_ON_FAIL)) { /* Write Multiple only */ info.abortIfUnsuccessful = Nson.readNsonBoolean(bis); + } else if (name.equals(ROW_METADATA)) { + info.rowMetadata = Nson.readNsonString(bis); } else { skipUnknownField(walker, name); } @@ -4606,7 +4894,7 @@ private void getWriteMultipleOpInfo2(WriteMultipleOpInfo info, /* Read matched version for putIfVersion and deleteIfVersion op */ Version matchVersion = - (op == PUT_IF_VERSION || op == DELETE_IF_VERSION) ? + (op == OpCode.PUT_IF_VERSION || op == OpCode.DELETE_IF_VERSION) ? readVersion(rc.bbis) : null; /* @@ -4934,9 +5222,12 @@ private void createPutTableOp(OpCode opCode, valueSizeLimit, pinfo.exactMatch); - if (pinfo.TTL != null) { + if (pinfo.TTL != null && row != null) { row.setTTL(pinfo.TTL); } + if (row != null) { + row.setRowMetadata(pinfo.rowMetadata); + } /* Check sub request size limit */ checkRequestSizeLimit(bis.getOffset()-startOffset, false, limits); @@ -4951,7 +5242,7 @@ private void createPutTableOp(OpCode opCode, if (table.isJsonCollection()) { pkey = (PrimaryKeyImpl) table.createPrimaryKey(); /* this method fills in PrimaryKey as well */ - kvValue = createValueFromNson(table, pkey, rc.bbis); + kvValue = createValueFromNson(table, pkey, rc.bbis, pinfo.rowMetadata); kvKey = table.createKeyInternal(pkey, false, store, opInfo.genInfo); } else { kvKey = table.createKeyInternal(row, false, store, opInfo.genInfo); diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/DataServiceHandler.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/DataServiceHandler.java index c8d94a66..f7081fd0 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/DataServiceHandler.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/DataServiceHandler.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -139,7 +139,6 @@ import oracle.nosql.proxy.audit.ProxyAuditContextBuilder; import oracle.nosql.proxy.audit.ProxyAuditManager; import oracle.nosql.proxy.filter.FilterHandler; -import oracle.nosql.proxy.filter.FilterHandler.Action; import oracle.nosql.proxy.filter.FilterHandler.Filter; import oracle.nosql.proxy.protocol.ByteInputStream; import oracle.nosql.proxy.protocol.ByteOutputStream; @@ -346,15 +345,11 @@ public HealthStatus checkStatus(List errors) { } /* - * Checks if there is a block-all rule and return its action handler if - * exists. + * Returns the block all rule. */ - protected Action checkBlockAll(LogContext lc) { + protected Rule getBlockAll(LogContext lc) { if (filter != null) { - Rule rule = filter.getFilterAllRule(); - if (rule != null) { - return filter.getAction(rule); - } + return filter.getFilterAllRule(); } return null; } @@ -412,13 +407,9 @@ protected Filter getQueryFilter(QueryOperation op) { * in the given FilterRequestException. */ protected FullHttpResponse handleFilterRequest(FilterRequestException fre, - String requestId, - LogContext lc) { + RequestContext rc) { if (filter != null) { - final Rule rule = fre.getRule(); - return filter.getAction(rule).handleRequest(null, - requestId, - lc); + return filter.handleRequest(rc, fre.getRule()); } return null; } @@ -505,7 +496,7 @@ protected QueryStatementResultImpl doQuery(KVStoreImpl store, byte traceLevel, LogContext lc) { - return doQuery(store, pstmt, variables, execOpts, -1 /* shardId */, + return doQuery(store, pstmt, variables, execOpts, null, null, traceLevel, lc); } @@ -529,13 +520,15 @@ private BoundStatement bindVariables(PreparedStatement pstmt, return bs; } - protected QueryStatementResultImpl doQuery(KVStoreImpl store, - PreparedStatement pstmt, - Map variables, - ExecuteOptions execOpts, - int shardId, - byte traceLevel, - LogContext lc) { + protected QueryStatementResultImpl doQuery( + KVStoreImpl store, + PreparedStatement pstmt, + Map variables, + ExecuteOptions execOpts, + Set shards, + Set partitions, + byte traceLevel, + LogContext lc) { QueryStatementResultImpl qres = null; @@ -543,39 +536,40 @@ protected QueryStatementResultImpl doQuery(KVStoreImpl store, pstmt = bindVariables(pstmt, variables, traceLevel, lc); } - if (shardId == -1) { + if (shards == null && partitions == null) { qres = (QueryStatementResultImpl)store.executeSync(pstmt, execOpts); - } else { - /* Execute the query only at the specified shard */ - RepGroupId rgid = new RepGroupId(shardId); - Set shards = new HashSet(1); - shards.add(rgid); + } else if (shards != null) { if (traceLevel >= 2) { - trace("Executing query on shard " + shardId, lc); + trace("Executing query on shards " + shards, lc); } qres = (QueryStatementResultImpl) store.executeSyncShards(pstmt, execOpts, shards); + } else { + if (traceLevel >= 2) { + trace("Executing query on partitions " + partitions, lc); + } + /* there is no KVStoreImpl API for direct execution */ + qres = (QueryStatementResultImpl) ((PreparedStatementImpl) pstmt). + executeSyncPartitions((KVStoreImpl)store, execOpts, partitions); } return qres; } + /* + * Note: the partitions parameter is not used but is in preparation + * for the addition of an async kv call that handles a partition set + */ protected Publisher doAsyncQuery( KVStoreImpl store, PreparedStatement pstmt, Map variables, ExecuteOptions execOpts, - int shardId, + Set shards, + Set partitions, /* FUTURE */ byte traceLevel, LogContext lc) { - /* FUTURE: Set partitions = null; */ - Set shards = null; - if (shardId >= 0) { // TODO: is zero valid? - shards = new HashSet(1); - shards.add(new RepGroupId(shardId)); - } - try { if (variables != null) { BoundStatement bs = bindVariables(pstmt, variables, @@ -806,7 +800,9 @@ protected ExecuteOptions createExecuteOptions(String namespace, short queryVersion, long maxServerMemory, String authString, - LogContext lc) { + LogContext lc, + String rowMetaData + ) { ExecuteOptions opts = createExecuteOptions(namespace, authString, lc); opts.setIsCloudQuery(queryVersion > QUERY_V1 ? true : false) .setDriverQueryVersion(queryVersion) @@ -821,7 +817,8 @@ protected ExecuteOptions createExecuteOptions(String namespace, .setTraceLevel(traceLevel) .setDoLogFileTracing(logFileTracing) .setQueryName(queryName) - .setBatchCounter(batchCounter); + .setBatchCounter(batchCounter) + .setRowMetadata(rowMetaData); if (maxServerMemory > 0) { opts.setMaxServerMemoryConsumption(maxServerMemory); @@ -2419,7 +2416,10 @@ public void readREST() { lc.setId(requestId); driverProto = DriverProto.REST; - /* TODO: timeout, buffers, preferThrottling */ + /* REST only uses the output buffer */ + resetOutputBuffer(); + + /* TODO: timeout, preferThrottling */ } /** @@ -2616,18 +2616,27 @@ public void setTimeoutMs(int timeoutMs) { */ public void resetBuffers() { resetInputBuffer(); - if (bbos == null) { - ByteBuf resp = ctx.alloc().directBuffer(RESPONSE_BUFFER_SIZE); - bbos = new ByteOutputStream(resp); - } - bbos.buffer().writerIndex(0); + resetOutputBuffer(); } /** * Reset input buffer. */ public void resetInputBuffer() { - bbis.buffer().readerIndex(inputOffset); + if (bbis != null) { + bbis.buffer().readerIndex(inputOffset); + } + } + + /** + * Reset output buffer. + */ + public void resetOutputBuffer() { + if (bbos == null) { + ByteBuf resp = ctx.alloc().directBuffer(RESPONSE_BUFFER_SIZE); + bbos = new ByteOutputStream(resp); + } + bbos.buffer().writerIndex(0); } /** diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ExcessiveUsageException.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ExcessiveUsageException.java index d8562028..93b776e6 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ExcessiveUsageException.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ExcessiveUsageException.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/JsonCollSerializer.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/JsonCollSerializer.java index 36e81aaa..1c5956ff 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/JsonCollSerializer.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/JsonCollSerializer.java @@ -74,7 +74,8 @@ class JsonCollSerializer { */ static Value createValueFromNson(TableImpl table, PrimaryKey pkey, - ByteInputStream bis) + ByteInputStream bis, + String rowMetadata) throws IOException { /* get callbacks for primary key fields */ @@ -155,9 +156,12 @@ static Value createValueFromNson(TableImpl table, format = Value.Format.MULTI_REGION_TABLE; regionId = Region.LOCAL_REGION_ID; } + if (rowMetadata != null) { + format = Value.Format.TABLE_V5; + } byte[] after = reserializer.getBytes(); - return Value.internalCreateValue(after, format, regionId); + return Value.internalCreateValue(after, format, regionId, rowMetadata); } /* diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/KVHandleStats.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/KVHandleStats.java index 907eccf1..9839db56 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/KVHandleStats.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/KVHandleStats.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/LimiterManagerStats.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/LimiterManagerStats.java index d5d46e38..a786325a 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/LimiterManagerStats.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/LimiterManagerStats.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/MonitorStats.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/MonitorStats.java index 939dc1be..62f2d423 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/MonitorStats.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/MonitorStats.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -23,7 +23,6 @@ import oracle.nosql.common.sklogger.MetricProcessor; import oracle.nosql.common.sklogger.MetricRegistry; import oracle.nosql.common.sklogger.PerfQuantile; -import oracle.nosql.common.sklogger.SizeQuantile; import oracle.nosql.common.sklogger.SkLogger; @@ -82,7 +81,7 @@ public class MonitorStats { * number of currently active worker threads. This is different * from number of active requests when running in async mode. */ - private SizeQuantile activeWorkerThreads; + private LongGauge activeWorkerThreads; /* * Proxy request operation metrics @@ -101,8 +100,8 @@ public class MonitorStats { /* * Proxy data operation charged metrics */ - private SizeQuantile dataResponseReadSize; - private SizeQuantile dataResponseWriteSize; + private Counter dataResponseReadSize; + private Counter dataResponseWriteSize; /* * Proxy kvstore clients metrics @@ -183,14 +182,14 @@ public MonitorStats(SkLogger metricLogger) { } activeWorkerThreads = - MetricRegistry.getSizeQuantile(ACTIVE_WORKER_THREADS_NAME); + MetricRegistry.getLongGauge(ACTIVE_WORKER_THREADS_NAME); /* * Proxy data operation charged metrics */ - dataResponseReadSize = MetricRegistry.getSizeQuantile( + dataResponseReadSize = MetricRegistry.getCounter( DATA_RESPONSE_READ_SIZE_NAME); - dataResponseWriteSize = MetricRegistry.getSizeQuantile( + dataResponseWriteSize = MetricRegistry.getCounter( DATA_RESPONSE_WRITE_SIZE_NAME); if (isKVHandleStatsEnabled) { @@ -260,17 +259,17 @@ public void markDataOpSucceeded(long startTime, } incrementDriverRequestTotal(dLang, dProto); if (readKB > 0) { - dataResponseReadSize.observe(readKB); + dataResponseReadSize.incrValue(readKB); } if (writeKB > 0) { - dataResponseWriteSize.observe(writeKB); + dataResponseWriteSize.incrValue(writeKB); } trackOpPerf(startTime, dataOpCount, requestLatency, requestServerFailed, opLabelValues); } public void markOpActiveWorkerThreads(int threads) { - activeWorkerThreads.observe(threads); + activeWorkerThreads.setValue(threads); } /** diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/Proxy.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/Proxy.java index c66222ac..f70404f6 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/Proxy.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/Proxy.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -58,6 +58,7 @@ import oracle.nosql.proxy.sc.TenantManager; import oracle.nosql.proxy.security.AccessChecker; import oracle.nosql.proxy.util.ErrorManager; +import oracle.nosql.proxy.util.ProxyThreadPoolExecutor; import oracle.nosql.proxy.util.ShutdownManager; import oracle.nosql.util.HttpServerHealth; import oracle.nosql.util.ph.HealthReportAgent; @@ -88,6 +89,7 @@ public final class Proxy { private final Config config; HttpServer server; + ProxyThreadPoolExecutor executor; private final AtomicBoolean shutdown = new AtomicBoolean(false); @@ -276,7 +278,14 @@ public void shutdown(long timeout, limiterManager.shutDown(); } server.shutdown(); + /* + * executor is owned by the proxy, do clean shutdown + */ + if (executor != null) { + executor.shutdown(true); + } MetricRegistry.defaultRegistry.stopProcessors(); + final String subject = "Proxy shut down"; logger.logEvent("Proxy", Level.INFO, subject, null /* message */, null /* throwable */); @@ -439,20 +448,29 @@ private char[] retrieveKeystorePass() { */ public void startServer(SslContext sslCtx, boolean startNetty) throws Exception { + /* + * If internal thread pool is to be used, create it and pass it to + * HttpServer + */ + if (config.getRequestThreadPoolSize() > 0 && startNetty) { + executor = new ProxyThreadPoolExecutor( + config.getRequestThreadPoolSize(), "ProxyRequest"); + } - requestHandler = new ServiceRequestHandler(logControl, logger); - addServices(); - warmupCache(); /* * NOTE: if config.getHostname() is null the server will listen * on all available interfaces. This is the default value. */ + requestHandler = new ServiceRequestHandler(logControl, logger); + addServices(); + warmupCache(); if (startNetty) { server = new HttpServer(config.getHostname(), config.getHttpPort(), config.getHttpsPort(), config.getNumAcceptThreads(), config.getNumRequestThreads(), + executor, MAX_REQUEST_SIZE, MAX_CHUNK_SIZE, config.getIdleReadTimeout(), @@ -509,7 +527,9 @@ private void addServices() { switch (config.getProxyType()) { case KVPROXY: requestHandler.addService("ProxyData", - new KVDataService(logger, tm, stats, audit, + new KVDataService(logger, tm, + stats, + audit, config, logControl)); final KVStoreImpl store = ((KVTenantManager)tm).getStore(); @@ -535,7 +555,8 @@ private void addServices() { logger); final CloudDataService dataService = - new CloudDataService(logger, tm, ac, stats, audit, filter, + new CloudDataService(logger, tm, + ac, stats, audit, filter, errorManager, limiterManager, config, @@ -543,7 +564,8 @@ private void addServices() { requestHandler.addService("ProxyData", dataService); final CloudRestDataService restDataService = - new CloudRestDataService(logger, tm, ac, stats, audit, filter, + new CloudRestDataService(logger, tm, + ac, stats, audit, filter, errorManager, limiterManager, config, @@ -583,13 +605,15 @@ private void addServices() { config.getPullRulesIntervalSec(), logger); requestHandler.addService("ProxyData", - new CloudDataService(logger, tm, ac, stats, audit, filter, + new CloudDataService(logger, tm, + ac, stats, audit, filter, errorManager, limiterManager, config, logControl)); final CloudRestDataService restDataService = - new CloudRestDataService(logger, tm, ac, stats, audit, + new CloudRestDataService(logger, tm, + ac, stats, audit, filter, errorManager, limiterManager, config, logControl); diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxyLogger.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxyLogger.java index beb01fec..f55916a6 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxyLogger.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxyLogger.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxyMain.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxyMain.java index 6ed04207..5ce79a14 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxyMain.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxyMain.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxySerialization.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxySerialization.java index 79cdb36b..910185f8 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxySerialization.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ProxySerialization.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/RequestException.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/RequestException.java index 697a2b03..d95623bc 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/RequestException.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/RequestException.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/RequestLimits.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/RequestLimits.java index 229100a2..8dbd10a6 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/RequestLimits.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/RequestLimits.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ValueSerializer.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ValueSerializer.java index 86444b34..ecabdeca 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ValueSerializer.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/ValueSerializer.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -87,6 +87,7 @@ public static class RowSerializerImpl extends RecordValueSerializerImpl private final int keySizeLimit; private final int valueSizeLimit; private TimeToLive ttl; + private String rowMetadata; public RowSerializerImpl(ByteInputStream in, int driverType, @@ -114,6 +115,14 @@ public TimeToLive getTTL() { return ttl; } + public void setRowMetadata(String rowMetadata) { + this.rowMetadata = rowMetadata; + } + + public String getRowMetadata() { + return rowMetadata; + } + @Override public Table getTable() { return table; diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/audit/ProxyAuditContextBuilder.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/audit/ProxyAuditContextBuilder.java index 3541cda9..2c1a88aa 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/audit/ProxyAuditContextBuilder.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/audit/ProxyAuditContextBuilder.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/audit/ProxyAuditManager.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/audit/ProxyAuditManager.java index ee2c7b4d..c857b3b1 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/audit/ProxyAuditManager.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/audit/ProxyAuditManager.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/CacheUpdateService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/CacheUpdateService.java index 04f63312..1191857e 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/CacheUpdateService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/CacheUpdateService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. */ package oracle.nosql.proxy.cloud; diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/CloudDataService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/CloudDataService.java index befe64bf..256d1164 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/CloudDataService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/CloudDataService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -76,7 +76,8 @@ public CloudDataService(SkLogger logger, LimiterManager limiterManager, Config config, LogControl logControl) { - super(logger, tm, stats, audit, filter, errorManager, limiterManager, + super(logger, tm, stats, audit, + filter, errorManager, limiterManager, config, logControl); this.ac = ac; } @@ -502,8 +503,15 @@ protected boolean handleInternalStatus(RequestContext rc) CommonResponse res; switch (info.getResourceType()) { case WORKREQUEST: - res = TableUtils.getWorkRequest(null /* AccessContext */, info.ocid, - tm, true /* internal */, rc.lc); + /* + * By design, CMEK related work requests are only visible through + * the REST API, so the binary API specifically only fetches DDL + * related requests. This is used by the GAT DDL in other region to + * check the DDL request information in current region. + */ + res = TableUtils.getDdlWorkRequest(null /* AccessContext */, + info.ocid, tm, + true /* internal */, rc.lc); break; case TABLE: res = TableUtils.getTable(null /* AccessContext */, info.ocid, diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/HealthService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/HealthService.java index c48236f5..6ac71f7b 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/HealthService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/HealthService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/LogControlService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/LogControlService.java index 007d1c82..8b028119 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/LogControlService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/LogControlService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/ProxyHealthSource.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/ProxyHealthSource.java index 6ab057e8..cd101536 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/ProxyHealthSource.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/cloud/ProxyHealthSource.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/filter/FilterHandler.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/filter/FilterHandler.java index d4348aa6..72143f97 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/filter/FilterHandler.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/filter/FilterHandler.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -26,16 +26,18 @@ import java.util.Set; import java.util.TreeMap; -import io.netty.buffer.ByteBuf; import io.netty.handler.codec.http.FullHttpResponse; import oracle.nosql.common.contextlogger.LogContext; import oracle.nosql.common.sklogger.SkLogger; +import oracle.nosql.proxy.DataServiceHandler.RequestContext; import oracle.nosql.proxy.RequestException; import oracle.nosql.proxy.protocol.Protocol.OpCode; import oracle.nosql.proxy.sc.ListRuleResponse; import oracle.nosql.proxy.sc.TenantManager; import oracle.nosql.util.filter.Rule; +import oracle.nosql.util.filter.Rule.ActionType; import oracle.nosql.util.filter.Rule.OpType; +import oracle.nosql.util.filter.Rule.ReturnErrorAction; /* * The class manages rules in memory that contains: @@ -103,11 +105,11 @@ public enum RuleType { private final TenantManager tm; private final SkLogger logger; - /* The action handler of ActionType.DROP_REQUEST */ - private static final DropRequest dropRequest = new DropRequest(); + /* The action handlers */ + private final Map actions = new HashMap<>(); /* The map of OpCode and OpType */ - private static Map opMap = new HashMap<>(); + private static final Map opMap = new HashMap<>(); static { /* ddl op */ opMap.put(OpCode.TABLE_REQUEST, OpType.DDL); @@ -158,6 +160,13 @@ public enum RuleType { /* table usage */ opMap.put(OpCode.GET_TABLE_USAGE, OpType.READ); opMap.put(OpCode.GET_REPLICA_STATS, OpType.READ); + + /* configuration operations */ + opMap.put(OpCode.GET_CONFIGURATION, OpType.CONFIG_READ); + opMap.put(OpCode.GET_CONFIG_KMS_KEY, OpType.CONFIG_READ); + opMap.put(OpCode.UPDATE_CONFIGURATION, OpType.CONFIG_UPDATE); + opMap.put(OpCode.UPDATE_CONFIG_KMS_KEY, OpType.CONFIG_UPDATE); + opMap.put(OpCode.REMOVE_CONFIG_KMS_KEY, OpType.CONFIG_UPDATE); } public FilterHandler(TenantManager tm, @@ -173,6 +182,8 @@ public int compare(RuleType o1, RuleType o2) { this.tm = tm; this.logger = logger; + initActionHandlers(); + if (pullIntervalSec > 0) { /* * Starts a demon thread that pull persistent rules from SC and load @@ -187,6 +198,12 @@ public int compare(RuleType o1, RuleType o2) { } } + /* Initialize action handlers */ + private void initActionHandlers() { + actions.put(ActionType.DROP_REQUEST, this::handleDropRequest); + actions.put(ActionType.RETURN_ERROR, this::handleReturnError); + } + /* * Add a transient rule. * @@ -557,14 +574,14 @@ static OpType getOpType(OpCode op) { throw new IllegalStateException("Operation type not found: " + op); } - /* Returns the action handler of the given rule */ - public Action getAction(Rule rule) { - switch(rule.getAction()) { - case DROP_REQUEST: - return dropRequest; + /* Handles the request using the action defined by the rule. */ + public FullHttpResponse handleRequest(RequestContext rc, Rule rule) { + ActionHandler handler = actions.get(rule.getActionType()); + if (handler != null) { + return handler.handleRequest(rc, rule); } throw new IllegalStateException("Unsupported action type: " + - rule.getAction()); + rule.getActionType()); } private RuleType getType(Rule rule) { @@ -656,25 +673,28 @@ public void filterRequest(OpCode op, /* * Action handler interface */ - public interface Action { - FullHttpResponse handleRequest(ByteBuf responseBuffer, - String requestId, - LogContext lc); + public interface ActionHandler { + FullHttpResponse handleRequest(RequestContext rc, Rule rule); } /* * The action handler that simply drops request. */ - private static class DropRequest implements Action { - @Override - public FullHttpResponse handleRequest(ByteBuf responseBuffer, - String requestId, - LogContext lc) { - if (responseBuffer != null) { - responseBuffer.release(); - } - return null; + private FullHttpResponse handleDropRequest(RequestContext rc, Rule rule) { + if (rc != null && rc.bbos != null) { + /* release the buffer */ + rc.bbos.getBuffer().release(); } + return null; + } + + /* + * The action handler that throws the specified error + */ + private FullHttpResponse handleReturnError(RequestContext rc, Rule rule) { + ReturnErrorAction action = (ReturnErrorAction)rule.getAction(); + throw new RequestException(action.getErrorCode(), + action.getErrorMessage()); } /* @@ -697,11 +717,21 @@ public void run() { "interval is " + intervalSec + "sec"); while (true) { PullRulesResult ret = execute(null); - if (ret.getNumLoaded() > 0 || ret.getNumDeleted() > 0) { - logger.info("[Filter] Reload persistent rules: " + ret); - } else { - logger.fine("[Filter] Reload persistent rules: " + ret); + boolean rulesChanged = false; + if (ret != null) { + if (ret.getNumLoaded() > 0 || ret.getNumDeleted() > 0) { + logger.info("[Filter] Reload persistent rules: " + ret); + rulesChanged = true; + } else { + logger.fine("[Filter] Reload persistent rules: " + ret); + } } + + if (rulesChanged || + rules.values().stream().anyMatch(sub -> !sub.isEmpty())) { + logger.info("[Filter] Rules: " + rules); + } + try { Thread.sleep(intervalSec * 1000); } catch (InterruptedException e) { @@ -721,29 +751,35 @@ PullRulesResult execute(LogContext lc) { prules = getPersistentRules(null); if (retries > 0) { logger.info( - "[Filter] List persist rules successfully after " + - retries + " retry", lc); + "[Filter] Get persisted rules successfully after " + + retries + " retries", lc); } break; } catch (RuntimeException ex) { - if (retries++ < MAX_RETRIES) { - logger.warning( - "[Filter] Failed to list persist rules, retry=" + - retries + ", error=" + ex.getMessage(), lc); + if (retries < MAX_RETRIES) { + logger.info( + "[Filter] Failed to get persisted rules on retry " + + retries + ": " + ex.getMessage(), lc); try { Thread.sleep(DELAY_MS); } catch (InterruptedException ignored) { /* do nothing */ } + retries++; continue; } - logger.warning("[Filter] Failed to list persist rules " + - "after retry " + retries + " times : " + - ex.getMessage(), lc); - throw ex; + logger.warning("[Filter] Failed to get persisted rules " + + "after " + retries + " retries, will retry in " + + intervalSec + " seconds: " + ex.getMessage(), lc); + break; } } + if (prules == null) { + /* Unable to get persist rules, will retry */ + return null; + } + int numDel = 0; if (prules.length > 0 || !rules.isEmpty()) { /* diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/filter/FilterService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/filter/FilterService.java index 5e8d9f4a..81c77a82 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/filter/FilterService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/filter/FilterService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -25,15 +25,12 @@ import static oracle.nosql.proxy.protocol.Protocol.RESOURCE_NOT_FOUND; import static oracle.nosql.proxy.protocol.Protocol.RESOURCE_EXISTS; import static oracle.nosql.proxy.protocol.Protocol.UNKNOWN_OPERATION; -import static oracle.nosql.proxy.protocol.JsonProtocol.checkNotEmpty; -import static oracle.nosql.proxy.protocol.JsonProtocol.checkNotNull; import static oracle.nosql.proxy.protocol.JsonProtocol.checkNotNullEmpty; import java.io.IOException; -import java.util.ArrayList; +import java.io.InputStream; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.logging.Level; @@ -54,15 +51,23 @@ import oracle.nosql.proxy.filter.FilterHandler.PullRulesResult; import oracle.nosql.proxy.filter.FilterHandler.RuleWrapper; import oracle.nosql.util.filter.Rule; -import oracle.nosql.util.filter.Rule.ActionType; -import oracle.nosql.util.filter.Rule.OpType; -import oracle.nosql.proxy.protocol.JsonProtocol.JsonPayload; import oracle.nosql.proxy.rest.RequestParams; import oracle.nosql.proxy.rest.UrlInfo; -import oracle.nosql.proxy.protocol.JsonProtocol.JsonArray; +import oracle.nosql.proxy.protocol.ByteInputStream; /** - * Filter service provides rest API to manage the rules: + * The Filter Service is a way to manage which requests are accepted by the + * proxy. + * + * Some of the motivations for refusing requests are: + * - Disabling certain types of requests in a denial of service attack + * - Should the region be impaired in some way, this provides a method for + * controlling what requests may be serviced + * - Some request types are not universally enabled, and must be configured + * for this proxy + * + * The following rest APIs are provided to manage the rules used to control + * which requests are accepted by the proxy: * * POST /V0/tools/filters * add or update a rule. @@ -80,12 +85,7 @@ * reload persistent rules to cache */ public class FilterService implements Service { - static final String NAME = "name"; - static final String ACTION = "action"; - static final String OPERATIONS = "operations"; - static final String TENANT = "tenant"; - static final String USER = "user"; - static final String TABLE = "table"; + private static final String NAME = "name"; /* * Response buffer size, minimum. Consider adjusting this per-request, @@ -215,85 +215,16 @@ private void handleAddRule(FullHttpResponse response, RuleOp op, LogContext lc) { - JsonPayload pl = null; - try { - pl = request.parsePayload(); - if (pl == null) { - throw new RequestException(ILLEGAL_ARGUMENT, - "The payload of " + op.name() + - " request must not be empty"); - } - - Rule rule = parseRule(pl); + try (InputStream in = new ByteInputStream(request.getPayload())) { + Rule rule = Rule.fromJson(in); RuleWrapper ret = handler.addRule(rule); - - /* build response */ buildRuleResponse(response, ret); } catch (IOException ioe) { throw new RequestException(ILLEGAL_ARGUMENT, "Invalid payload of " + op.name() + ": " + ioe.getMessage()); - } finally { - if (pl != null) { - pl.close(); - } } } - /* Parses a rule object from JSON payload */ - private Rule parseRule(JsonPayload pl) throws IOException { - ActionType action = null; - String name = null; - String tenant = null; - String user = null; - String table = null; - List ops = null; - - while (pl.hasNext()) { - if (pl.isField(NAME)) { - name = pl.readString(); - checkNotEmpty(NAME, name); - } else if (pl.isField(ACTION)) { - String value = pl.readString(); - checkNotEmpty(ACTION, value); - if (value != null) { - action = ActionType.valueOf(value); - } - } else if (pl.isField(TENANT)) { - tenant = pl.readString(); - checkNotEmpty(TENANT, tenant); - } else if (pl.isField(USER)) { - user = pl.readString(); - checkNotEmpty(USER, user); - } else if (pl.isField(TABLE)) { - table = pl.readString(); - checkNotEmpty(TABLE, table); - } else if (pl.isField(OPERATIONS)) { - ops = new ArrayList<>(); - JsonArray ja = pl.readArray(); - while(ja.hasNext()) { - String op = ja.readString(); - checkNotNullEmpty("element of " + OPERATIONS, op); - ops.add(op); - } - } else { - throw new IllegalArgumentException("Invalid field of Rule: " + - pl.getCurrentField()); - } - } - - checkNotNull(NAME, name); - - if (tenant == null && user == null && table == null && ops == null) { - throw new RequestException(ILLEGAL_ARGUMENT, - "One of properties must be specified: " + TENANT + ", " + USER + - ", " + TABLE + ", " + OPERATIONS); - } - - return Rule.createRule(name, action, tenant, user, table, - (ops != null ? ops.toArray(new String[ops.size()]) : null), - System.currentTimeMillis()); - } - /* Gets a rule */ private void handleGetRule(FullHttpResponse response, RequestParams request, @@ -327,9 +258,14 @@ private void handleListRules(FullHttpResponse response, /* Build response */ JsonBuilder jb = JsonBuilder.create(false); while (iter.hasNext()) { - jb.startObject(null); - appendRule(jb, iter.next(), all); - jb.endObject(); + RuleWrapper rw = iter.next(); + String json; + if (all) { + json = Rule.getGson().toJson(rw, RuleWrapper.class); + } else { + json = Rule.getGson().toJson(rw.getRule(), Rule.class); + } + jb.appendJson(null, json); } buildResponse(response, jb.toString()); } @@ -457,39 +393,7 @@ private static void buildResponse(FullHttpResponse resp, String info) { private static void buildRuleResponse(FullHttpResponse response, RuleWrapper rw) { - final JsonBuilder jb = JsonBuilder.create(); - appendRule(jb, rw, false); - buildResponse(response, jb.toString()); - } - - private static void appendRule(JsonBuilder jb, - RuleWrapper rw, - boolean showType) { - Rule r = rw.getRule(); - String name = rw.getRuleName(); - jb.append("name", name); - if (showType) { - jb.append("type", (rw.isTransient() ? "TRANSIENT" : "PERSISTENT")); - } - jb.append("action", r.getAction().name()); - if (r.getTenant() != null) { - jb.append("tenant", r.getTenant()); - } - if (r.getUser() != null) { - jb.append("user", r.getUser()); - } - if (r.getTable() != null) { - jb.append("table", r.getTable()); - } - jb.startArray("operations"); - for (OpType e : r.getOpTypes()) { - jb.append(e.name()); - } - jb.endArray(); - - if (r.getCreateTimeMs() > 0) { - jb.append("createTime", r.getCreateTime()); - } + buildResponse(response, rw.getRule().toJson()); } /* diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/KVDataService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/KVDataService.java index f4d65d3a..bbf92451 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/KVDataService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/KVDataService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -77,7 +77,8 @@ public KVDataService(SkLogger logger, ProxyAuditManager audit, Config config, LogControl logControl) { - super(logger, tm, stats, audit, null, null, null, config, logControl); + super(logger, tm, stats, audit, + null, null, null, config, logControl); this.isSecure = config.useSSL(); } diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/KVTenantManager.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/KVTenantManager.java index 43f29a6f..b10281de 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/KVTenantManager.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/KVTenantManager.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/LoginService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/LoginService.java index bd67abc6..c65e249c 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/LoginService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/LoginService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/LogoutService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/LogoutService.java index 29f4e542..5c1c11d8 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/LogoutService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/LogoutService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/StoreSecurityService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/StoreSecurityService.java index 5ff68ee9..c32efef7 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/StoreSecurityService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/StoreSecurityService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/TokenRenewService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/TokenRenewService.java index 392ecb43..43f7276b 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/TokenRenewService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/kv/TokenRenewService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/BinaryProtocol.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/BinaryProtocol.java index d14c6961..c7e3c340 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/BinaryProtocol.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/BinaryProtocol.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/ByteInputStream.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/ByteInputStream.java index f71c7f51..03e60080 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/ByteInputStream.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/ByteInputStream.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/ByteOutputStream.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/ByteOutputStream.java index cc6aa1f0..435e23d8 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/ByteOutputStream.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/ByteOutputStream.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/HttpConstants.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/HttpConstants.java index 76740c8b..e8158f03 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/HttpConstants.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/HttpConstants.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -351,6 +351,20 @@ public class HttpConstants { public static final String SORT_BY = "sort_by"; public static final String SORT_ORDER_ASC = "sort_order_asc"; + /** + * Used by list-workrequests request + */ + public static final String TYPES = "types"; + + /** + * Used by configuration requests + */ + public static final String GENERAL = "general"; + public static final String DRY_RUN = "dryrun"; + public static final String IF_MATCH = "ifmatch"; + public static final String KEY_ID = "keyid"; + public static final String VAULT_ID = "vaultid"; + /** * Used by setTableActive to specify the timestamp of the dml operation * which activates the table in IDLE state. diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/JsonProtocol.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/JsonProtocol.java index 76137a7c..b8e379a1 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/JsonProtocol.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/JsonProtocol.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -64,16 +64,18 @@ import oracle.nosql.proxy.sc.TableUtils; import oracle.nosql.proxy.sc.TenantManager; import oracle.nosql.proxy.security.AccessContext; -import oracle.nosql.util.tmi.DdlHistoryEntry; +import oracle.nosql.util.fault.ErrorCode; import oracle.nosql.util.tmi.IndexInfo; import oracle.nosql.util.tmi.IndexInfo.IndexField; import oracle.nosql.util.tmi.IndexInfo.IndexState; +import oracle.nosql.util.tmi.KmsKeyInfo; import oracle.nosql.util.tmi.ReplicaInfo; import oracle.nosql.util.tmi.ReplicaInfo.ReplicaState; import oracle.nosql.util.tmi.TableInfo; import oracle.nosql.util.tmi.TableInfo.ActivityPhase; import oracle.nosql.util.tmi.TableLimits; import oracle.nosql.util.tmi.TableUsage; +import oracle.nosql.util.tmi.WorkRequest; /** * JSON protocol related: @@ -213,9 +215,9 @@ public class JsonProtocol extends Protocol { private static final String ITEMS = "items"; /* Table */ - private static final String ID = "id"; - private static final String TIME_CREATED = "timeCreated"; - private static final String TIME_UPDATED = "timeUpdated"; + public static final String ID = "id"; + public static final String TIME_CREATED = "timeCreated"; + public static final String TIME_UPDATED = "timeUpdated"; private static final String LIFE_CYCLE_DETAILS = "lifecycleDetails"; private static final String PRIMARY_KEY = "primaryKey"; private static final String SHARD_KEY = "shardKey"; @@ -282,6 +284,18 @@ public class JsonProtocol extends Protocol { public static final String CREATE_REPLICA_DETAILS = "CreateReplicaDetails"; public static final String REGION = "region"; + /* For configuration APIs */ + public static final String UPDATE_CONFIGURATION_DETAILS = + "UpdateConfigurationDetails"; + public static final String ENVIRONMENT = "environment"; + public static final String HOSTED_ENVIRONMENT = "HOSTED"; + public static final String MULTI_TENANCY_ENVIRONMENT = "MULTI_TENANCY"; + public static final String IS_OPC_DRY_RUN = "is-opc-dry-run"; + + public static final String KMS_KEY = "kmsKey"; + public static final String KMS_VAULT_ID = "kmsVaultId"; + public static final String KMS_KEY_STATE = "kmsKeyState"; + public enum PutOption { IF_ABSENT, IF_PRESENT @@ -576,70 +590,82 @@ private static String formatDateString(Timestamp ts) { /** * Build Get-WorkRequest response */ - public static String buildWorkRequest(DdlHistoryEntry ddlEntry) { + public static String buildWorkRequest(WorkRequest workRequest) { final JsonBuilder jb = JsonBuilder.create(); - buildWorkRequestSummary(jb, ddlEntry); + buildWorkRequestSummary(jb, workRequest); return jb.toString(); } private static void buildWorkRequestSummary(JsonBuilder jb, - DdlHistoryEntry ddlEntry) { - String opType = null; - if (ddlEntry.getOperation() != null) { - opType = getWorkRequestOperation(ddlEntry.getOperationEnum()); - } - DdlHistoryEntry.Status ddlStatus = ddlEntry.getStatusEnum(); - String state = getWorkRequestStatus(ddlStatus); - int percentage = getWorkRequestProgress(ddlStatus); - - jb.append(OPERATION_TYPE, opType) - .append(STATUS, state) - .append(ID, ddlEntry.getWorkRequestId()) - .append(COMPARTMENT_ID, ddlEntry.getCompartmentId()) - .append(PERCENT_COMPLETE, percentage) - .append(TIME_ACCEPTED, formatDateString(ddlEntry.getCreateTime())); - - if (ddlEntry.getStartTime() != null) { - jb.append(TIME_STARTED, - formatDateString(ddlEntry.getStartTime())); - } - if (ddlStatus == DdlHistoryEntry.Status.SUCCEEDED || - ddlStatus == DdlHistoryEntry.Status.FAILED) { - if (ddlEntry.getUpdateTime() != null) { - /* SUCCEEDED, FAILED */ - jb.append(TIME_FINISHED, - formatDateString(ddlEntry.getUpdateTime())); + WorkRequest request) { + + final WorkRequest.Status status = request.getStatus(); + jb.append(OPERATION_TYPE, mapOperationType(request.getType())) + .append(STATUS, request.getStatus().name()) + .append(ID, request.getId()) + .append(COMPARTMENT_ID, request.getCompartmentId()) + .append(PERCENT_COMPLETE, getWorkRequestProgress(status)) + .append(TIME_ACCEPTED, formatDateString(request.getTimeAccepted())); + + if (request.getTimeStarted() > 0) { + jb.append(TIME_STARTED, formatDateString(request.getTimeStarted())); + } + + if (request.getTimeFinished() > 0) { + jb.append(TIME_FINISHED, formatDateString(request.getTimeFinished())); + } + + String entityUrl = null; + String entityId = request.getEntityId(); + if (request.getEntityType() == WorkRequest.EntityType.TABLE) { + entityUrl = buildGetTableUrl(REST_CURRENT_VERSION, + request.getEntityName(), + request.getCompartmentId()); + if (entityId == null) { + entityId = NameUtils.makeQualifiedName( + request.getCompartmentId(), + request.getEntityName()); } + } else { + entityUrl = buildGetConfigurationUrl(REST_CURRENT_VERSION, + request.getCompartmentId()); } - String entityUrl = buildGetTableUrl(REST_CURRENT_VERSION, - ddlEntry.getTableName(), - ddlEntry.getCompartmentId()); - String identifier = (ddlEntry.getTableOcid() != null) ? - ddlEntry.getTableOcid() : - NameUtils.makeQualifiedName(ddlEntry.getCompartmentId(), - ddlEntry.getTableName()); - /* resources */ jb.startArray(RESOURCES) .startObject(null) - .append(ENTITY_TYPE, "TABLE") - .append(ACTION_TYPE, getActionType(ddlEntry)) - .append(IDENTIFIER, identifier) + .append(ENTITY_TYPE, request.getEntityType().name()) + .append(ACTION_TYPE, request.getActionType().name()) + .append(IDENTIFIER, request.getEntityId()) .append(ENTITY_URI, entityUrl) .endObject() .endArray(); } + private static String mapOperationType(WorkRequest.OperationType type) { + switch(type) { + case CREATE_TABLE: + case UPDATE_TABLE: + case DELETE_TABLE: + return type.name(); + case UPDATE_KMS_KEY: + case REMOVE_KMS_KEY: + return "UPDATE_CONFIGURATION"; + default: + throw new IllegalArgumentException( + "Unknown WorkRequest OperationType: " + type); + } + } + /** * Build List-WorkRequests response */ - public static String buildWorkRequestCollection(DdlHistoryEntry[] entries) { + public static String buildWorkRequestCollection(WorkRequest[] requests) { JsonBuilder jb = JsonBuilder.create();; jb.startArray(ITEMS); - for (DdlHistoryEntry ddlEntry : entries) { + for (WorkRequest request : requests) { jb.startObject(null); - buildWorkRequestSummary(jb, ddlEntry); + buildWorkRequestSummary(jb, request); jb.endObject(); } jb.endArray(); @@ -649,93 +675,51 @@ public static String buildWorkRequestCollection(DdlHistoryEntry[] entries) { /** * Build List-WorkRequestErrors response */ - public static String buildWorkRequestErrors(DdlHistoryEntry ddlEntry) { + public static String buildWorkRequestErrors(WorkRequest workRequest) { final JsonBuilder jb = JsonBuilder.create(); jb.startArray(ITEMS); - if (ddlEntry.getErrorCode() != null) { - buildWorkRequestError(jb, ddlEntry); + if (workRequest.getErrorCode() != ErrorCode.NO_ERROR) { + buildWorkRequestError(jb, workRequest); } jb.endArray(); return jb.toString(); } private static void buildWorkRequestError(JsonBuilder jb, - DdlHistoryEntry ddlEntry) { - Timestamp ts = (ddlEntry.getUpdateTime() != null) ? - ddlEntry.getUpdateTime() : ddlEntry.getCreateTime(); - String msg = ddlEntry.getResultMsg(); - if (msg != null && ddlEntry.getTableOcid() != null) { - msg = msg.replace(ddlEntry.getTableOcid(), ddlEntry.getTableName()); - } + WorkRequest workRequest) { + long time = (workRequest.getTimeFinished() > 0) ? + workRequest.getTimeFinished() : + workRequest.getTimeStarted(); + jb.startObject(null) - .append(CODE, ddlEntry.getErrorCodeEnum().getType()) - .append(MESSAGE, msg) - .append(TIMESTAMP, formatDateString(ts)) + .append(CODE, workRequest.getErrorCode().getType()) + .append(MESSAGE, workRequest.getErrorMessage()) + .append(TIMESTAMP, formatDateString(time)) .endObject(); } /** * Build List-WorkRequestLogs response */ - public static String buildWorkRequestLogs(DdlHistoryEntry ddlEntry) { + public static String buildWorkRequestLogs(WorkRequest workRequest) { final JsonBuilder jb = JsonBuilder.create(); jb.startArray(ITEMS); - buildWorkRequestLog(jb, ddlEntry); + buildWorkRequestLog(jb, workRequest); jb.endArray(); return jb.toString(); } private static void buildWorkRequestLog(JsonBuilder jb, - DdlHistoryEntry ddl) { - Timestamp ts = (ddl.getUpdateTime() != null) ? - ddl.getUpdateTime() : ddl.getCreateTime(); + WorkRequest workRequest) { + long time = (workRequest.getTimeFinished() > 0) ? + workRequest.getTimeFinished() : + workRequest.getTimeAccepted(); jb.startObject(null) - .append(MESSAGE, ddl.getResultMsg()) - .append(TIMESTAMP, formatDateString(ts)) + .append(MESSAGE, workRequest.getErrorMessage()) + .append(TIMESTAMP, formatDateString(time)) .endObject(); } - private static String getWorkRequestOperation(DdlHistoryEntry.DdlOp op) { - switch(op) { - case createTable: - return "CREATE_TABLE"; - case dropTable: - return "DELETE_TABLE"; - case alter: - case update: - case createIndex: - case dropIndex: - case changeCompartment: - case updateTableReplica: - case parentAddReplica: - case parentAddReplicaTable: - case parentDropReplica: - case parentDropReplicaTable: - case parentCreateIndex: - case parentDropIndex: - case parentAlterTable: - case parentUpdateTable: - return "UPDATE_TABLE"; - default: - throw new IllegalStateException("Unknown DdlOp: " + op); - } - } - - private static String getWorkRequestStatus(DdlHistoryEntry.Status state) { - switch (state) { - case ACCEPTED: - return "ACCEPTED"; - case INPROGRESS: - return "IN_PROGRESS"; - case SUCCEEDED: - return "SUCCEEDED"; - case FAILED: - return "FAILED"; - default: - throw new IllegalStateException("Invalid state " + state); - } - } - /** * Since we don't have progress percentage information returned from SC, * just simple return the progress based on the state: @@ -743,49 +727,16 @@ private static String getWorkRequestStatus(DdlHistoryEntry.Status state) { * IN_PROGRESS: 50 * SUCCEEDED/FAILED: 100 */ - private static int getWorkRequestProgress(DdlHistoryEntry.Status state) { - if (state == DdlHistoryEntry.Status.ACCEPTED) { + private static int getWorkRequestProgress(WorkRequest.Status state) { + if (state == WorkRequest.Status.ACCEPTED) { return 0; - } else if (state == DdlHistoryEntry.Status.INPROGRESS) { + } else if (state == WorkRequest.Status.IN_PROGRESS) { return 50; } /* SUCCEEDED, fAILED */ return 100; } - private static String getActionType(DdlHistoryEntry ddlEntry) { - DdlHistoryEntry.DdlOp op = ddlEntry.getOperationEnum(); - DdlHistoryEntry.Status state = ddlEntry.getStatusEnum(); - if (state == DdlHistoryEntry.Status.SUCCEEDED || - state == DdlHistoryEntry.Status.FAILED) { - switch(op) { - case createTable: - return "CREATED"; - case dropTable: - return "DELETED"; - case alter: - case update: - case createIndex: - case dropIndex: - case changeCompartment: - case updateTableReplica: - case parentAddReplica: - case parentAddReplicaTable: - case parentDropReplica: - case parentDropReplicaTable: - case parentCreateIndex: - case parentDropIndex: - case parentAlterTable: - case parentUpdateTable: - return "UPDATED"; - default: - throw new IllegalStateException("Unknown DdlOp: " + op); - } - } else { - return "IN_PROGRESS"; - } - } - /** * Builds GET TABLE url: * //tables/?compartmentId= @@ -808,6 +759,20 @@ private static String buildGetTableUrl(String root, return sb.toString(); } + /** + * Builds GET Configuration url: + * //configuration?compartmentId= + */ + private static String buildGetConfigurationUrl(String root, + String compartmentId) { + StringBuilder sb = new StringBuilder(URL_PATH_DELIMITER); + sb.append(root) + .append(URL_PATH_DELIMITER) + .append("configuration?compartmentId=") + .append(compartmentId); + return sb.toString(); + } + /** * Returns the LifecycleState based on the TableInfo.State and * TableInfo.Activity. @@ -1399,6 +1364,43 @@ public static void buildFieldValue(JsonBuilder jb, } } + public static String buildConfiguration(KmsKeyInfo key) { + JsonBuilder jb = JsonBuilder.create(); + jb.append(ENVIRONMENT, (key.isHostedEnv() ? HOSTED_ENVIRONMENT : + MULTI_TENANCY_ENVIRONMENT)); + if (key.getState() != null) { + jb.startObject(KMS_KEY); + jb.append(KMS_KEY_STATE, mapKmsKeyState(key.getState())); + if (key.getKeyId() != null) { + jb.append(ID, key.getKeyId()) + .append(KMS_VAULT_ID, key.getVaultId()) + .append(TIME_CREATED, formatDateString(key.getCreateTime())) + .append(TIME_UPDATED, formatDateString(key.getUpdateTime())); + } + jb.endObject(); + } + return jb.toString(); + } + + private static String mapKmsKeyState(KmsKeyInfo.KeyState state) { + switch (state) { + case UPDATING: + return "UPDATING"; + case ACTIVE: + return "ACTIVE"; + case DELETED: + return "DELETED"; + case FAILED: + return "FAILED"; + case REVERTING: + return "REVERTING"; + case DISABLED: + return "DISABLED"; + default: + throw new IllegalStateException("Unexpected KeyState: " + state); + } + } + /** * Build the JSON string for RequestUsage */ diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/NsonProtocol.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/NsonProtocol.java index 1d71ed94..781e070b 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/NsonProtocol.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/NsonProtocol.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * Licensed under the Universal Permissive License v 1.0 as shown at * https://oss.oracle.com/licenses/upl/ @@ -12,26 +12,18 @@ import static oracle.nosql.proxy.ProxySerialization.getTableState; import static oracle.nosql.proxy.protocol.BinaryProtocol.ON_DEMAND; import static oracle.nosql.proxy.protocol.BinaryProtocol.PROVISIONED; -import static oracle.nosql.proxy.protocol.BinaryProtocol.QUERY_V1; import static oracle.nosql.proxy.protocol.BinaryProtocol.QUERY_V4; import static oracle.nosql.proxy.protocol.BinaryProtocol.QUERY_V5; import java.io.IOException; -import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.TreeMap; import oracle.kv.Consistency; import oracle.kv.StatementResult; import oracle.kv.Version; -import oracle.kv.table.FieldValue; -import oracle.kv.table.ReturnRow; -import oracle.kv.table.TableOpExecutionException; -import oracle.kv.table.TableOperation; -import oracle.kv.table.TableOperationResult; import oracle.kv.impl.api.ops.Result; import oracle.kv.impl.api.query.PreparedStatementImpl; import oracle.kv.impl.api.query.QueryStatementResultImpl; @@ -44,6 +36,11 @@ import oracle.kv.impl.query.runtime.RuntimeControlBlock; import oracle.kv.impl.topo.RepGroupId; import oracle.kv.impl.topo.Topology; +import oracle.kv.table.FieldValue; +import oracle.kv.table.ReturnRow; +import oracle.kv.table.TableOpExecutionException; +import oracle.kv.table.TableOperation; +import oracle.kv.table.TableOperationResult; import oracle.nosql.common.json.JsonUtils; import oracle.nosql.nson.Nson.NsonSerializer; import oracle.nosql.nson.util.NettyByteOutputStream; @@ -161,6 +158,7 @@ public class NsonProtocol { public static String LIST_MAX_TO_READ = "lx"; public static String LIST_START_INDEX = "ls"; public static String MATCH_VERSION = "mv"; + public static String MAX_QUERY_PARALLELISM = "mp"; public static String MAX_READ_KB = "mr"; public static String MAX_SHARD_USAGE_PERCENT = "ms"; public static String MAX_WRITE_KB = "mw"; @@ -168,6 +166,7 @@ public class NsonProtocol { public static String NAMESPACE = "ns"; public static String NUMBER_LIMIT = "nl"; public static String NUM_OPERATIONS = "no"; + public static String NUM_QUERY_OPERATIONS = "nq"; public static String OPERATION = "op"; public static String OPERATIONS = "os"; public static String OPERATION_ID = "od"; @@ -181,6 +180,7 @@ public class NsonProtocol { public static String QUERY = "q"; public static String QUERY_BATCH_TRACES = "qts"; public static String QUERY_NAME = "qn"; + public static String QUERY_OPERATION_NUM = "on"; public static String QUERY_VERSION = "qv"; public static String RANGE = "rg"; public static String RANGE_PATH = "rp"; @@ -190,6 +190,7 @@ public class NsonProtocol { public static String RESOURCE = "ro"; public static String RESOURCE_ID = "rd"; public static String RETURN_ROW = "rr"; + public static String ROW_METADATA = "mt"; public static String SHARD_ID = "si"; public static String SERVER_MEMORY_CONSUMPTION = "sm"; public static String START = "sr"; @@ -266,6 +267,7 @@ public class NsonProtocol { /* row metadata */ public static String EXPIRATION = "xp"; + public static String CREATION_TIME = "ct"; public static String MODIFIED = "md"; public static String ROW = "r"; public static String ROW_VERSION = "rv"; @@ -274,6 +276,7 @@ public class NsonProtocol { public static String EXISTING_MOD_TIME = "em"; public static String EXISTING_VALUE = "el"; public static String EXISTING_VERSION = "ev"; + public static String EXISTING_ROW_METADATA = "ed"; public static String GENERATED = "gn"; public static String RETURN_INFO = "ri"; @@ -636,10 +639,13 @@ private static void writeOperationResult(NsonSerializer ns, ns.endMapField(EXISTING_VALUE); Version version = opResult.getPreviousVersion(); + long creationTime = row.getCreationTime(); long modTime = row.getLastModificationTime(); + writeMapField(ns, CREATION_TIME, creationTime); writeMapField(ns, EXISTING_MOD_TIME, modTime); writeMapField(ns, EXISTING_VERSION, version.toByteArray()); + writeMapField(ns, EXISTING_ROW_METADATA, row.getRowMetadata()); endMap(ns, RETURN_INFO); } } @@ -1078,7 +1084,8 @@ static void writeTableAccessInfo(NioByteOutputStream out, private static void writePreparedQuery(NsonSerializer ns, NioByteOutputStream buf, PrepareCB cbInfo, - PreparedStatementImpl prep) + PreparedStatementImpl prep, + Topology topo) throws IOException { if (buf.isDirect()) { @@ -1094,6 +1101,8 @@ private static void writePreparedQuery(NsonSerializer ns, writeMapField(ns, TABLE_NAME, cbInfo.getTableName()); writeMapField(ns, QUERY_OPERATION, (int)cbInfo.getOperation().ordinal()); + int maxParallelism = computeMaxParallelism(prep, topo); + writeMapField(ns, MAX_QUERY_PARALLELISM, maxParallelism); /* * serialize the table access info and prepared query into a @@ -1111,6 +1120,30 @@ private static void writePreparedQuery(NsonSerializer ns, writeMapField(ns, PREPARED_QUERY, buf.array(), 0, buf.getOffset()); } + /* + * Single partition, along with any query that requires sorting or + * aggregation on client: 0 (indicates no parallelism possible) + * All shards: num shards + * All partitions: number of partitions + */ + private static int computeMaxParallelism(PreparedStatementImpl prep, + Topology topo) { + if (prep.getDistributionKind() == null) { + /* this happens for update queries */ + return 0; + } + + if (!prep.isSimpleQuery() || prep.getDistributionKind().equals( + PreparedStatementImpl.DistributionKind.SINGLE_PARTITION)) { + return 0; + } + if (prep.getDistributionKind().equals( + PreparedStatementImpl.DistributionKind.ALL_SHARDS)) { + return topo.getNumRepGroups(); + } + /* else ALL_PARTITIONS */ + return topo.getNumPartitions(); + } public static void writeQueryFinish(NsonSerializer ns, DataServiceHandler handler, @@ -1153,7 +1186,7 @@ public static void writeQueryFinish(NsonSerializer ns, if (isPrepared == false) { /* Write the proxy-side query plan. */ - writePreparedQuery(ns, buf, cbInfo, prep); + writePreparedQuery(ns, buf, cbInfo, prep, topo); /* Write the driver-side query plan. */ FieldValueWriterImpl valWriter = new FieldValueWriterImpl(); buf.setWriteIndex(0); // reset to beginning @@ -1276,10 +1309,13 @@ protected static void writeRow(NsonSerializer ns, startMap(ns, ROW); /* row metadata */ + writeMapField(ns, CREATION_TIME, result.getPreviousCreationTime()); writeMapField(ns, MODIFIED, result.getPreviousModificationTime()); writeMapField(ns, EXPIRATION, result.getPreviousExpirationTime()); writeMapField(ns, ROW_VERSION, result.getPreviousVersion().toByteArray()); + writeMapField(ns, ROW_METADATA, result.getPreviousValue() != null ? + result.getPreviousValue().getRowMetadata() : null); /* row value is last */ /* TODO: when available, direct Avro to NSON? */ ns.startMapField(VALUE); @@ -1317,12 +1353,20 @@ protected static void writeReturnRow(NsonSerializer ns, ns.endMapField(EXISTING_VALUE); Version version = reader.getVersion(); - long modTime = result.getPreviousModificationTime(); + long creationTime = result.getPreviousCreationTime(); + writeMapField(ns, CREATION_TIME, creationTime); + + long modTime = result.getPreviousModificationTime(); writeMapField(ns, EXISTING_MOD_TIME, modTime); + if (version != null) { writeMapField(ns, EXISTING_VERSION, version.toByteArray()); } + String prevRowMetadata = result.getPreviousValue().getRowMetadata(); + if (prevRowMetadata != null) { + writeMapField(ns, EXISTING_ROW_METADATA, prevRowMetadata); + } endMap(ns, RETURN_INFO); } diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/PackedInteger.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/PackedInteger.java index 342362f9..e69c8dce 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/PackedInteger.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/PackedInteger.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle Berkeley * DB Java Edition made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/Protocol.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/Protocol.java index a1bf38a5..6a920299 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/Protocol.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/Protocol.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -110,7 +110,14 @@ public static enum OpCode { DROP_REPLICA(34), GET_REPLICA_STATS(35), INTERNAL_DDL(36), - INTERNAL_STATUS(37); + INTERNAL_STATUS(37), + + /* added for Configuration APIs */ + GET_CONFIGURATION(38), + UPDATE_CONFIGURATION(39), + GET_CONFIG_KMS_KEY(40), + UPDATE_CONFIG_KMS_KEY(41), + REMOVE_CONFIG_KMS_KEY(42); private static final OpCode[] VALUES = values(); OpCode(int code) { @@ -193,6 +200,28 @@ public static boolean isDdlOp(OpCode op) { op == OpCode.CREATE_INDEX || op == OpCode.DROP_INDEX); } + + public static boolean isConfigurationRequestOp(OpCode op) { + return (op.ordinal() >= GET_CONFIGURATION.ordinal() && + op.ordinal() <= REMOVE_CONFIG_KMS_KEY.ordinal()); + } + + public static boolean isUpdateConfigurationRequestOp(OpCode op) { + return (op.ordinal() == UPDATE_CONFIGURATION.ordinal() || + op.ordinal() == REMOVE_CONFIG_KMS_KEY.ordinal()); + } + + public static OpCode[] getGetConfigurationSubOps() { + return new OpCode[] {OpCode.GET_CONFIG_KMS_KEY}; + } + + public static OpCode[] getListWorkRequestSubOps(boolean cmekEnabled) { + if (cmekEnabled) { + return new OpCode[] {OpCode.GET_TABLE, + OpCode.GET_CONFIG_KMS_KEY}; + } + return new OpCode[] {OpCode.GET_TABLE}; + } } /* diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/SerializationUtil.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/SerializationUtil.java index 2567386a..48ea4cec 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/SerializationUtil.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/protocol/SerializationUtil.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/ErrorCode.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/ErrorCode.java index 6fa7b396..36141b79 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/ErrorCode.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/ErrorCode.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -234,12 +234,12 @@ public String getErrorCode() { SECURITY_INFO_UNAVAILABLE { @Override public HttpResponseStatus getHttpStatusCode() { - return HttpResponseStatus.NOT_FOUND; + return HttpResponseStatus.SERVICE_UNAVAILABLE; } @Override public String getErrorCode() { - return TYPE_NOT_AUTHORIZED_OR_NOT_FOUND; + return TYPE_SERVICE_AVAILABLE; } }, CANNOT_CANCEL_WORK_REQUEST { diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/RequestParams.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/RequestParams.java index 32523e31..53323318 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/RequestParams.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/RequestParams.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/RestDataService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/RestDataService.java index 43a4cbe8..95f93b35 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/RestDataService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/RestDataService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -42,6 +42,7 @@ import static oracle.nosql.proxy.protocol.Protocol.OpCode.DROP_REPLICA; import static oracle.nosql.proxy.protocol.Protocol.OpCode.DROP_TABLE; import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET; +import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET_CONFIGURATION; import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET_INDEX; import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET_INDEXES; import static oracle.nosql.proxy.protocol.Protocol.OpCode.GET_TABLE; @@ -54,7 +55,9 @@ import static oracle.nosql.proxy.protocol.Protocol.OpCode.PREPARE; import static oracle.nosql.proxy.protocol.Protocol.OpCode.PUT; import static oracle.nosql.proxy.protocol.Protocol.OpCode.QUERY; +import static oracle.nosql.proxy.protocol.Protocol.OpCode.REMOVE_CONFIG_KMS_KEY; import static oracle.nosql.proxy.protocol.Protocol.OpCode.SUMMARIZE; +import static oracle.nosql.proxy.protocol.Protocol.OpCode.UPDATE_CONFIGURATION; import static oracle.nosql.proxy.security.AccessContext.EXTERNAL_OCID_PREFIX; import java.io.IOException; @@ -74,8 +77,6 @@ import java.util.function.Supplier; import java.util.logging.Level; -import org.checkerframework.checker.nullness.qual.NonNull; - import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; @@ -132,7 +133,6 @@ import oracle.nosql.proxy.MonitorStats.OperationType; import oracle.nosql.proxy.audit.ProxyAuditManager; import oracle.nosql.proxy.filter.FilterHandler; -import oracle.nosql.proxy.filter.FilterHandler.Action; import oracle.nosql.proxy.filter.FilterHandler.Filter; import oracle.nosql.proxy.protocol.ByteInputStream; import oracle.nosql.proxy.protocol.ByteOutputStream; @@ -155,13 +155,14 @@ import oracle.nosql.proxy.security.AccessContext.Type; import oracle.nosql.proxy.util.ErrorManager; import oracle.nosql.proxy.util.TableCache.TableEntry; -import oracle.nosql.util.tmi.DdlHistoryEntry; +import oracle.nosql.util.filter.Rule; import oracle.nosql.util.tmi.IndexInfo; import oracle.nosql.util.tmi.TableInfo; import oracle.nosql.util.tmi.TableLimits; +import oracle.nosql.util.tmi.WorkRequest; /** - * Rest service that handles below requests: + * Rest service that handles the following requests: * * GET //tables * POST //tables @@ -193,6 +194,10 @@ * GET //workRequests/{workRequestId}/logs * DELETE //workRequests/{workRequestId} * + * GET //configuration/ + * PUT //configuration/ + * POST //configuration/actions/unassignkmskey + * * OPTIONS //* */ /* @@ -224,12 +229,6 @@ public abstract class RestDataService extends DataServiceHandler private final static int DEFAULT_PAGE_SIZE = 1000; private final static int DEFAULT_TIMEOUT_MS = 5000; - /* - * Response buffer size, minimum. Consider adjusting this per-request, - * or other mechanism to reduce the frequency of resizing the buffer. - */ - private static final int RESPONSE_BUFFER_SIZE = 1024; - /* The UTC zone */ private final static ZoneId UTCZone = ZoneId.of(ZoneOffset.UTC.getId()); /* DataTimeFormatter -- use the static ISO_D */ @@ -254,6 +253,7 @@ public abstract class RestDataService extends DataServiceHandler private final Map> methodMap = new HashMap<>(); + private final boolean isCmekEnabled; private void initMethods() { /* Table resource */ @@ -312,6 +312,12 @@ private void initMethods() { GET_WORKREQUEST_LOGS); initMethod(HttpMethod.DELETE, "/workRequests/{workRequestId}", CANCEL_WORKREQUEST); + + /* Configuration */ + initMethod(HttpMethod.GET, "/configuration", GET_CONFIGURATION); + initMethod(HttpMethod.PUT, "/configuration", UPDATE_CONFIGURATION); + initMethod(HttpMethod.POST, "/configuration/actions/unassignkmskey", + REMOVE_CONFIG_KMS_KEY); } private final Map operationMap = @@ -353,6 +359,11 @@ private void initOperations() { this::handleListWorkRequestErrors); initOperation(GET_WORKREQUEST_LOGS, this::handleListWorkRequestLogs); initOperation(CANCEL_WORKREQUEST, this::handleCancelWorkRequest); + + /* Configuration */ + initOperation(GET_CONFIGURATION, this::handleGetConfiguration); + initOperation(UPDATE_CONFIGURATION, this::handleUpdateConfiguration); + initOperation(REMOVE_CONFIG_KMS_KEY, this::handleRemoveKmsKey); } /* @@ -406,10 +417,12 @@ public RestDataService(SkLogger logger, LimiterManager limiterManager, Config config, LogControl logControl) { - super(logger, tm, stats, audit, filter, errorManager, limiterManager, + super(logger, tm, stats, audit, + filter, errorManager, limiterManager, config, logControl); initMethods(); initOperations(); + isCmekEnabled = config.isCmekEnabled(); } private void initOperation(OpCode op, ProxyOperation operation) { @@ -486,11 +499,16 @@ public FullHttpResponse handleRequest(FullHttpRequest request, ChannelHandlerContext ctx, LogContext lc) { final RequestContext rc = new RequestContext(request, ctx, lc, null); + /* set up REST specific information */ rc.readREST(); + /* handle request */ FullHttpResponse resp = handleRequestInternal(rc); + /* Close the output stream, the response may still be sent later */ + rc.releaseBuffers(); + /* check for excessive errors */ if (isErrorLimitingResponse(resp, rc.ctx)) { if (incrementErrorRate(resp, rc)) { @@ -530,25 +548,22 @@ private FullHttpResponse handleRequestInternal(RequestContext rc) { logger.fine("Proxy data service request on channel: " + rc.ctx, rc.lc); - /* Block all requests if there is "big red button" rule */ - Action action = checkBlockAll(rc.lc); - if (action != null) { - return action.handleRequest(null, null, rc.lc); - } - final HttpMethod method = rc.request.method(); try { + /* Block all requests if there is "big red button" rule */ + Rule rule = getBlockAll(rc.lc); + if (rule != null) { + return filter.handleRequest(rc, rule); + } + /* Handle OPTIONS used for pre-flight request. */ if (HttpMethod.OPTIONS.equals(method)) { return handleOptions(rc.request, rc.lc); } /* Validate the input */ - FullHttpResponse violation = validateHttpRequest(rc.request, - rc.requestId, - rc.ctx, - rc.lc); + FullHttpResponse violation = validateHttpRequest(rc); if (violation != null) { return violation; } @@ -574,10 +589,8 @@ private FullHttpResponse handleRequestInternal(RequestContext rc) { "Request exception" /* subject */, faultMsg /* message */, e); } - return createErrorResponse(rc.ctx, - e.getMessage(), - ErrorCode.INTERNAL_SERVER_ERROR, - rc.requestId); + return createErrorResponse(rc, e.getMessage(), + mapExceptionToErrorCode(e)); } } @@ -597,10 +610,7 @@ protected String getAdditionalAllowedHeaders() { /* * Validate the request. */ - protected FullHttpResponse validateHttpRequest(FullHttpRequest request, - String requestId, - ChannelHandlerContext ctx, - LogContext lc) { + protected FullHttpResponse validateHttpRequest(RequestContext rc) { return null; } @@ -616,67 +626,36 @@ private FullHttpResponse handleRequest(RequestContext rc) { boolean doRetry = true; final String serialVersion = rc.restParams.getRoot(); - @NonNull - /* TODO: use buffer methods in RequestContext */ - ByteBuf responseBuffer; - rc.opCode = findOpCode(rc.request.method(), rc.restParams); rc.opType = getOpType(rc.opCode); /* TODO: change this to match attmptRetry() logic in DataService */ while (true) { - /* - * Allocate the output buffer and stream here to simplify reference - * counting and ensure that the stream is closed. - */ - responseBuffer = rc.ctx.alloc().directBuffer(RESPONSE_BUFFER_SIZE); rc.origTableName = null; rc.mappedTableName = null; rc.actx = null; - String compartmentId = null; FullHttpResponse resp = null; try { try { try { + /* Check whether the configuration operation is supported */ + checkConfigurationOperation(rc.opCode); + /* Filters request based on the OpCode */ filterRequest(rc.opCode, rc.lc); - /* - * Get the operation method and call it - */ + /* Get the operation handler */ ProxyOperation operation = operationMap.get(rc.opCode); if (operation == null) { throw new RequestException(UNKNOWN_OPERATION, "Unknown op code: " + rc.opCode); } - if (opHasCompartmentIdInUrl(rc.opCode)) { - compartmentId = readCompartmentId(rc.restParams); - } - if (!OpCode.isWorkRequestOp(rc.opCode)) { - if (opHasTableNameOrIdInUrl(rc.opCode)) { - rc.origTableName = readTableNameOrId(rc.restParams); - } - rc.actx = checkAccess(rc.request, - compartmentId, rc.opCode, - rc.origTableName, - this, rc.lc); - rc.mappedTableName = getMapTableName(rc.actx, - rc.opCode, - rc.origTableName); - } else { - String workRequestId = null; - if (opHasWorkRequestIdInUrl(rc.opCode)) { - workRequestId = readWorkRequestId(rc.restParams); - } - rc.actx = checkWorkRequestAccess(rc.request, - rc.opCode, - compartmentId, - workRequestId, rc.lc); - } + /* Perform access check */ + checkAccess(rc); startAudit(rc, rc.origTableName, rc.actx); @@ -688,7 +667,7 @@ private FullHttpResponse handleRequest(RequestContext rc) { } /* Create response */ - resp = createResponse(responseBuffer, rc.requestId); + resp = createResponse(rc.bbos.getByteBuf(), rc.requestId); /* Execute operation */ operation.handle(resp, rc, rc.actx, serialVersion); @@ -733,8 +712,8 @@ private FullHttpResponse handleRequest(RequestContext rc) { } catch (MetadataNotFoundException mnfe) { if (doRetry) { - /* release/reset request-specific resources */ - responseBuffer.release(); + /* reset response buffer for retry */ + rc.resetOutputBuffer(); if (rc.mappedTableName != null) { /* remove the table from the cache */ @@ -795,12 +774,13 @@ private FullHttpResponse handleRequest(RequestContext rc) { if (rc.actx != null) { rc.actx.resetTableNameMapping(); } - /* release/reset request-specific resources */ - responseBuffer.release(); + + /* rest response buffer for retry */ + rc.resetOutputBuffer(); doRetry = false; continue; } catch (FilterRequestException bre) { - return handleFilterRequest(bre, rc.requestId, rc.lc); + return handleFilterRequest(bre, rc); } catch (Throwable e) { throw new RequestException(SERVER_ERROR, "Unknown Exception" + e); @@ -808,17 +788,16 @@ private FullHttpResponse handleRequest(RequestContext rc) { } } catch (RequestException re) { logRequestException(re, rc.lc); - responseBuffer.clear(); /* Mark failure */ markOpFailed(rc, getRequestExceptionFailure(re)); /* Build error response */ - resp = createErrorResponse( - responseBuffer, - mapErrorMessage(rc.actx, rc.origTableName, re.getMessage()), - mapExceptionToErrorCode(re), - rc.requestId); + resp = createErrorResponse(rc, + mapErrorMessage(rc.actx, + rc.origTableName, + re.getMessage()), + mapExceptionToErrorCode(re)); if (logger.isLoggable(Level.FINE, rc.lc)) { logger.fine("handleRequest, failed op=" + opCodeToRestString(rc.opCode) + @@ -829,15 +808,116 @@ private FullHttpResponse handleRequest(RequestContext rc) { } } + private void checkConfigurationOperation(OpCode opCode) { + if (!isCmekEnabled && OpCode.isUpdateConfigurationRequestOp(opCode)) { + throw new RequestException(ILLEGAL_ARGUMENT, + "The configuration operation is not enabled: " + opCode); + } + } + + /* + * Performs access check for 3 types of operations: + * 1. Work request + * 2. Configuration + * 3. DDL and DML + */ + private void checkAccess(RequestContext rc) { + String compartmentId = null; + + if (opHasCompartmentIdInUrl(rc.opCode)) { + compartmentId = readCompartmentId(rc.restParams); + } + + if (OpCode.isWorkRequestOp(rc.opCode)) { + String workRequestId = null; + if (opHasWorkRequestIdInUrl(rc.opCode)) { + workRequestId = readWorkRequestId(rc.restParams); + } + OpCode[] authorizeOps = null; + boolean shouldAuthorizeAllOps = true; + if (rc.opCode == OpCode.LIST_WORKREQUESTS) { + /* + * The list-work-requests returns all work requests for + * operations based on the work request mechanism. Currently, + * we have DDL and CMEK operations. + * + * List-work-requests does not require all sub-operations must + * be authorized, it will only return the work requests for + * those operations that are authorized for this request. + * + * e.g. If user only has permission to read DDL work request, + * list-work-requests will only return DDL work requests. If + * user has permissions to read work requests for both DDL and + * CMEK operations, work requests for both operations will be + * returned. + * + * The authorizeOps contains all the operations to be authorized. + * The shouldAuthorizeAllOps flag is set to false, indicating + * that not all operations need to be authorized. After + * permission check, the authorized operations are returned in + * AccessContext.AuthorizedOps. + */ + authorizeOps = OpCode.getListWorkRequestSubOps(isCmekEnabled); + shouldAuthorizeAllOps = false; + } + rc.actx = checkWorkRequestAccess(rc.request, + rc.opCode, + authorizeOps, + shouldAuthorizeAllOps, + compartmentId, + workRequestId, + rc.lc); + } else if (OpCode.isConfigurationRequestOp(rc.opCode)) { + OpCode[] authorizeOpCodes = null; + if (rc.opCode == OpCode.GET_CONFIGURATION) { + /* + * Get-configuration returns the service level configurations, + * we only have kms-key configuration so far, more configuration + * could be added in future. + * + * Get-configuration requires user has the permission to read + * all the sub configurations. If the user does not have + * permission to read any sub configuration, the user does not + * have permission to execute get-configuration. + */ + authorizeOpCodes = OpCode.getGetConfigurationSubOps(); + } + rc.actx = checkConfigurationAccess(rc.request, + rc.opCode, + authorizeOpCodes, + compartmentId, + rc.lc); + } else { + if (opHasTableNameOrIdInUrl(rc.opCode)) { + rc.origTableName = readTableNameOrId(rc.restParams); + } + rc.actx = checkAccess(rc.request, compartmentId, rc.opCode, + rc.origTableName, this, rc.lc); + rc.mappedTableName = getMapTableName(rc.actx, rc.opCode, + rc.origTableName); + } + } + /** * Check authorization for WorkRequest operation */ protected abstract AccessContext checkWorkRequestAccess(FullHttpRequest request, OpCode opCode, + OpCode[] authorizeOpCodes, + boolean shouldAuthorizeAllOps, String compartmentId, String workRequestId, LogContext lc); + /** + * Check authorization for Configuration operation + */ + protected abstract AccessContext + checkConfigurationAccess(FullHttpRequest request, + OpCode opCode, + OpCode[] authorizeOps, + String compartmentId, + LogContext lc); /** * Default - nothing to do, return the original error message. @@ -969,6 +1049,9 @@ private static boolean opHasCompartmentIdInUrl(OpCode op) { case PREPARE: case SUMMARIZE: case LIST_WORKREQUESTS: + case GET_CONFIGURATION: + case UPDATE_CONFIGURATION: + case REMOVE_CONFIG_KMS_KEY: return true; default: return false; @@ -2351,7 +2434,8 @@ private QueryStatementResultImpl doQuery(RequestContext rc, queryVersion, 0, /* maxServerMemory*/ actx.getAuthString(), - lc); + lc, + null /* rowMetadata */); if (!isPrepared) { PrepareCB cbInfo = getStatementInfo(actx, statement, true); String ns = (actx.getNamespace() != null ? @@ -2522,13 +2606,13 @@ private void handleGetWorkRequest(FullHttpResponse response, GetWorkRequestResponse res = TableUtils.getWorkRequest(actx, workRequestId, tm, rc.lc); - if (!res.getSuccess()) { + if (!res.getSuccess()) { throw new RequestException(res.getErrorCode(), res.getErrorString()); } /* Build response */ - String workRequest = buildWorkRequest(res.getDdlEntry()); + String workRequest = buildWorkRequest(res.getWorkRequest()); buildResponse(response, workRequest); markTMOpSucceeded(rc, 1 /* TM operation count*/); @@ -2544,11 +2628,10 @@ private void handleListWorkRequests(FullHttpResponse response, /* Query parameter */ int limit = readLimit(rc.restParams); String page = readPage(rc.restParams); - int startIndex = (page == null) ? 0 : parseStartIndexPageToken(page); /* List WorkRequests */ ListWorkRequestResponse resp = TableUtils.listWorkRequests(actx, - startIndex, + page, limit, tm, rc.lc); @@ -2558,13 +2641,10 @@ private void handleListWorkRequests(FullHttpResponse response, } /* build response */ - DdlHistoryEntry[] ddlEntries = resp.getWorkRequestInfos(); - String tc = buildWorkRequestCollection(ddlEntries); - - boolean hasMore = (ddlEntries.length == limit); - String nextPageToken = hasMore ? - generateLastIndexPageToken(resp.getLastIndexReturned()) : null; - buildPaginatedResponse(response, tc, nextPageToken); + WorkRequest[] requests = resp.getWorkRequests(); + buildPaginatedResponse(response, + buildWorkRequestCollection(requests), + resp.getNextPageToken()); markTMOpSucceeded(rc, 1 /* TM operation count*/); } @@ -2572,11 +2652,10 @@ private void handleListWorkRequests(FullHttpResponse response, /** * List work request errors */ - private void handleListWorkRequestErrors( - FullHttpResponse response, - RequestContext rc, - AccessContext actx, - String apiVersion) { + private void handleListWorkRequestErrors(FullHttpResponse response, + RequestContext rc, + AccessContext actx, + String apiVersion) { /* Path parameter */ String workRequestId = readWorkRequestId(rc.restParams); GetWorkRequestResponse res = TableUtils.getWorkRequest(actx, @@ -2588,7 +2667,7 @@ private void handleListWorkRequestErrors( } /* Build response */ - String info = buildWorkRequestErrors(res.getDdlEntry()); + String info = buildWorkRequestErrors(res.getWorkRequest()); buildResponse(response, info); markTMOpSucceeded(rc, 1 /* TM operation count*/); } @@ -2596,11 +2675,10 @@ private void handleListWorkRequestErrors( /** * List work request logs */ - private void handleListWorkRequestLogs( - FullHttpResponse response, - RequestContext rc, - AccessContext actx, - String apiVersion) { + private void handleListWorkRequestLogs(FullHttpResponse response, + RequestContext rc, + AccessContext actx, + String apiVersion) { /* Path parameter */ String workRequestId = readWorkRequestId(rc.restParams); @@ -2613,7 +2691,7 @@ private void handleListWorkRequestLogs( } /* Build response */ - String info = buildWorkRequestLogs(res.getDdlEntry()); + String info = buildWorkRequestLogs(res.getWorkRequest()); buildResponse(response, info); markTMOpSucceeded(rc, 1 /* TM operation count*/); @@ -2662,6 +2740,39 @@ protected void handleDropReplica(FullHttpResponse response, "DropReplica operation is not supported"); } + /** + * Get configuration + */ + protected void handleGetConfiguration(FullHttpResponse response, + RequestContext rc, + AccessContext actx, + String apiVersion) { + throw new RequestException(OPERATION_NOT_SUPPORTED, + "GetConfiguration is not supported"); + } + + /** + * UpdateConfigration + */ + protected void handleUpdateConfiguration(FullHttpResponse response, + RequestContext rc, + AccessContext actx, + String apiVersion) { + throw new RequestException(OPERATION_NOT_SUPPORTED, + "UpdateConfiguration is not supported"); + } + + /** + * RemoveCmek + */ + protected void handleRemoveKmsKey(FullHttpResponse response, + RequestContext rc, + AccessContext actx, + String apiVersion) { + throw new RequestException(OPERATION_NOT_SUPPORTED, + "RemoveKmsKey is not supported"); + } + private static byte[] parseQueryPageToken(String pageToken) { if (pageToken != null) { try { @@ -3025,8 +3136,8 @@ private static FieldValue createKeyFieldValue(String name, } } - private static void buildWorkRequestIdResponse(FullHttpResponse resp, - String workRequestId) { + protected static void buildWorkRequestIdResponse(FullHttpResponse resp, + String workRequestId) { resp.setStatus(HttpResponseStatus.ACCEPTED); resp.headers().set(OPC_WORK_REQUEST_ID, workRequestId); } @@ -3039,9 +3150,9 @@ private static void buildResponse(FullHttpResponse resp, String info) { resp.headers().setInt(CONTENT_LENGTH, payload.readableBytes()); } - private static void buildTaggedResponse(FullHttpResponse resp, - String info, - byte[] etag) { + protected static void buildTaggedResponse(FullHttpResponse resp, + String info, + byte[] etag) { buildResponse(resp, info); if (etag != null) { resp.headers().set(ETAG, encodeBase64(etag)); @@ -3115,7 +3226,7 @@ private static ErrorCode getErrorCode(int errorCode) { if (errorMap.containsKey(errorCode)) { return errorMap.get(errorCode); } - return null; + return ErrorCode.UNKNOWN_ERROR; } /* @@ -3205,63 +3316,54 @@ private static Map createErrorCodeMap() { * TODO: perhaps do a hard-close on the connection in this path or take * other action if an attack is suspected. */ - protected FullHttpResponse invalidRequest(ChannelHandlerContext ctx, - HttpHeaders headers, - String msg, - int errorCode, - CharSequence requestId, - LogContext lc) { + protected FullHttpResponse invalidRequest(RequestContext rc, + String message, + int errorCode) { - final CharSequence realIp = headers.get(X_REAL_IP_HEADER); - final CharSequence forwardedFor = headers.get(X_FORWARDED_FOR_HEADER); - final String remoteAddr = ctx.channel().remoteAddress().toString(); + if (logger.isLoggable(Level.FINE, rc.lc)) { + CharSequence realIp = rc.headers.get(X_REAL_IP_HEADER); + CharSequence forwardedFor = rc.headers.get(X_FORWARDED_FOR_HEADER); + String remoteAddr = rc.ctx.channel().remoteAddress().toString(); + + StringBuilder sb = new StringBuilder(); + sb.append(message); + sb.append(", remote address=").append(remoteAddr); + if (realIp != null) { + sb.append(", ").append(X_REAL_IP_HEADER).append("=").append(realIp); + } + if (forwardedFor != null) { + sb.append(", ").append(X_FORWARDED_FOR_HEADER).append("=") + .append(forwardedFor); + } - StringBuilder sb = new StringBuilder(); - sb.append(msg); - sb.append(", remote address=").append(remoteAddr); - if (realIp != null) { - sb.append(", ").append(X_REAL_IP_HEADER).append("=").append(realIp); + logger.fine(sb.toString(), rc.lc); } - if (forwardedFor != null) { - sb.append(", ").append(X_FORWARDED_FOR_HEADER).append("=") - .append(forwardedFor); - } - - logger.fine(sb.toString(), lc); - return createErrorResponse(ctx, msg, getErrorCode(errorCode), requestId); + return createErrorResponse(rc, message, getErrorCode(errorCode)); } - private static FullHttpResponse createErrorResponse( - ChannelHandlerContext ctx, - String errorMessage, - ErrorCode errorCode, - CharSequence requestId) { - - ByteBuf payload = ctx.alloc().directBuffer(); - return createErrorResponse(payload, errorMessage, errorCode, requestId); - } + private static FullHttpResponse createErrorResponse(RequestContext rc, + String errorMessage, + ErrorCode errorCode) { - private static FullHttpResponse createErrorResponse( - ByteBuf payload, - String errorMessage, - ErrorCode errorCode, - CharSequence requestId) { + rc.resetOutputBuffer(); - FullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1, - errorCode.getHttpStatusCode(), payload); + FullHttpResponse resp = + new DefaultFullHttpResponse(HTTP_1_1, + errorCode.getHttpStatusCode(), + rc.bbos.getByteBuf()); String body = JsonBuilder.create() .append("code", errorCode.getErrorCode()) .append("message", errorMessage) .toString(); - payload.writeCharSequence(body, UTF_8); + rc.bbos.getByteBuf().writeCharSequence(body, UTF_8); HttpHeaders headers = resp.headers(); headers.set(CONTENT_TYPE, APPLICATION_JSON_NOCHARSET) - .setInt(CONTENT_LENGTH, payload.readableBytes()) - .set(ACCESS_CONTROL_ALLOW_ORIGIN, "*") - .set(OPC_REQUEST_ID, requestId); + .setInt(CONTENT_LENGTH, rc.bbos.getByteBuf().readableBytes()) + .set(ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .set(OPC_REQUEST_ID, rc.requestId); return resp; } @@ -3282,8 +3384,8 @@ protected static String readCompartmentId(RequestParams request) { return readCompartmentId(request, false /* required */); } - private static String readCompartmentId(RequestParams request, - boolean required) { + protected static String readCompartmentId(RequestParams request, + boolean required) { String compartmentId = request.getQueryParamAsString(COMPARTMENT_ID); if (required) { checkNotNullEmpty(COMPARTMENT_ID, compartmentId); @@ -3429,6 +3531,10 @@ private static String readWorkRequestId(RequestParams request) { return workRequestId; } + protected static boolean readDryRun(RequestParams request) { + return Boolean.valueOf(request.getHeaderAsString(IS_OPC_DRY_RUN)); + } + private Consistency getConsistency(RequestParams request) { String consistency = request.getQueryParamAsString(CONSISTENCY); checkNotEmpty(CONSISTENCY, consistency); diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/UrlInfo.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/UrlInfo.java index 4401d552..04fb3153 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/UrlInfo.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/UrlInfo.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/cloud/CloudRestDataService.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/cloud/CloudRestDataService.java index 5a5694f7..c3e7caab 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/cloud/CloudRestDataService.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/rest/cloud/CloudRestDataService.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -19,11 +19,21 @@ import static oracle.nosql.proxy.protocol.HttpConstants.AUTHORIZATION; import static oracle.nosql.proxy.protocol.JsonProtocol.COMPARTMENT_ID; import static oracle.nosql.proxy.protocol.JsonProtocol.CREATE_REPLICA_DETAILS; +import static oracle.nosql.proxy.protocol.JsonProtocol.ENVIRONMENT; import static oracle.nosql.proxy.protocol.JsonProtocol.FREE_TIER_SYS_TAGS; +import static oracle.nosql.proxy.protocol.JsonProtocol.HOSTED_ENVIRONMENT; +import static oracle.nosql.proxy.protocol.JsonProtocol.ID; +import static oracle.nosql.proxy.protocol.JsonProtocol.KMS_KEY; +import static oracle.nosql.proxy.protocol.JsonProtocol.KMS_KEY_STATE; +import static oracle.nosql.proxy.protocol.JsonProtocol.KMS_VAULT_ID; import static oracle.nosql.proxy.protocol.JsonProtocol.MAX_READ_UNITS; import static oracle.nosql.proxy.protocol.JsonProtocol.MAX_WRITE_UNITS; import static oracle.nosql.proxy.protocol.JsonProtocol.REGION; import static oracle.nosql.proxy.protocol.JsonProtocol.REST_CURRENT_VERSION; +import static oracle.nosql.proxy.protocol.JsonProtocol.TIME_CREATED; +import static oracle.nosql.proxy.protocol.JsonProtocol.TIME_UPDATED; +import static oracle.nosql.proxy.protocol.JsonProtocol.UPDATE_CONFIGURATION_DETAILS; +import static oracle.nosql.proxy.protocol.JsonProtocol.buildConfiguration; import static oracle.nosql.proxy.protocol.JsonProtocol.checkNonNegativeInt; import static oracle.nosql.proxy.protocol.JsonProtocol.checkNotEmpty; import static oracle.nosql.proxy.protocol.JsonProtocol.checkNotNull; @@ -46,21 +56,26 @@ import oracle.nosql.common.sklogger.SkLogger; import oracle.nosql.proxy.Config; import oracle.nosql.proxy.MonitorStats; +import oracle.nosql.proxy.RequestException; import oracle.nosql.proxy.audit.ProxyAuditManager; import oracle.nosql.proxy.cloud.CloudDataService; import oracle.nosql.proxy.filter.FilterHandler; import oracle.nosql.proxy.filter.FilterHandler.Filter; +import oracle.nosql.proxy.protocol.JsonProtocol.JsonObject; import oracle.nosql.proxy.protocol.JsonProtocol.JsonPayload; import oracle.nosql.proxy.protocol.Protocol.OpCode; import oracle.nosql.proxy.rest.RequestParams; import oracle.nosql.proxy.rest.RestDataService; +import oracle.nosql.proxy.sc.GetKmsKeyInfoResponse; import oracle.nosql.proxy.sc.GetTableResponse; import oracle.nosql.proxy.sc.TableUtils; import oracle.nosql.proxy.sc.TableUtils.PrepareCB; import oracle.nosql.proxy.sc.TenantManager; +import oracle.nosql.proxy.sc.WorkRequestIdResponse; import oracle.nosql.proxy.security.AccessChecker; import oracle.nosql.proxy.security.AccessContext; import oracle.nosql.proxy.util.ErrorManager; +import oracle.nosql.util.tmi.KmsKeyInfo; import oracle.nosql.util.tmi.TableLimits; public class CloudRestDataService extends RestDataService { @@ -96,21 +111,15 @@ public void flushTableCache(String tableName) { * Validate the request. */ @Override - protected FullHttpResponse validateHttpRequest(FullHttpRequest request, - String requestId, - ChannelHandlerContext ctx, - LogContext lc) { - final HttpHeaders headers = request.headers(); + protected FullHttpResponse validateHttpRequest(RequestContext rc) { + final HttpHeaders headers = rc.headers; final CharSequence auth = headers.get(AUTHORIZATION); if (auth == null) { - return invalidRequest(ctx, - headers, + return invalidRequest(rc, "Authorization header is missing", - INVALID_AUTHORIZATION, // error code - requestId, - lc); + INVALID_AUTHORIZATION); } - return super.validateHttpRequest(request, requestId, ctx, lc); + return super.validateHttpRequest(rc); } @Override @@ -180,6 +189,8 @@ protected AccessContext checkAccess(FullHttpRequest request, @Override protected AccessContext checkWorkRequestAccess(FullHttpRequest request, OpCode opCode, + OpCode[] authorizeOps, + boolean shouldAuthorizeAllOps, String compartmentId, String workRequestId, LogContext lc) { @@ -188,6 +199,8 @@ protected AccessContext checkWorkRequestAccess(FullHttpRequest request, request.uri(), request.headers(), opCode, + authorizeOps, + shouldAuthorizeAllOps, compartmentId, workRequestId, TableUtils.getPayload(request), @@ -198,6 +211,27 @@ protected AccessContext checkWorkRequestAccess(FullHttpRequest request, return actx; } + @Override + protected + AccessContext checkConfigurationAccess(FullHttpRequest request, + OpCode opCode, + OpCode[] authorizedOps, + String compartmentId, + LogContext lc) { + AccessContext actx = + ac.checkConfigurationAccess(request.method(), + request.uri(), + request.headers(), + opCode, + authorizedOps, + compartmentId, + TableUtils.getPayload(request), + this, + lc); + updateLogContext(lc, actx, opCode); + return actx; + } + @Override protected String mapTableName(AccessContext actx, OpCode op, String name) { String mapTableName = actx.getMapTableName(name); @@ -466,6 +500,190 @@ private static String readRegion(RequestParams request) { return idxName; } + /** + * Get configuration + * + * GET /configuration?compartmentId= + */ + @Override + protected void handleGetConfiguration(FullHttpResponse response, + RequestContext rc, + AccessContext actx, + String apiVersion) { + + GetKmsKeyInfoResponse resp = TableUtils.getKmsKeyInfo(actx, tm, rc.lc); + if (!resp.getSuccess()) { + throw new RequestException(resp.getErrorCode(), + resp.getErrorString()); + } + + KmsKeyInfo key = resp.getKeyInfo(); + if (logger.isLoggable(Level.FINE, rc.lc)) { + logger.fine("GetConfiguration: tenant=" + actx.getTenantId() + + ", kmsKeyInfo=" + key.toString(), rc.lc); + } + + String keyInfo = buildConfiguration(key); + buildTaggedResponse(response, keyInfo, key.getETag()); + + markTMOpSucceeded(rc, 1 /* TM operation count*/); + } + + /** + * Update Configuration + * + * Currently, only updating kms key is supported. + * + * URL: + * PUT /configuration?compartmentId= + * + * Header parameters: + * - if-match + * - is-opc-dry-run + * + * Payload: + * { + * "environment":, + * "kmsKey": { + * "id": , + * "kmsVaultId": + * } + * } + */ + @Override + protected void handleUpdateConfiguration(FullHttpResponse response, + RequestContext rc, + AccessContext actx, + String apiVersion) { + /* Query parameter */ + String compartmentId = readCompartmentId(rc.restParams, true); + + /* Header parameters */ + byte[] ifMatch = readIfMatch(rc.restParams); + boolean dryRun = readDryRun(rc.restParams); + + checkNotNull(UPDATE_CONFIGURATION_DETAILS, rc.restParams.getPayload()); + + boolean isHostedEnv = false; + String kmsKeyId = null; + String kmsVaultId = null; + + JsonPayload pl = null; + try { + pl = rc.restParams.parsePayload(); + while (pl.hasNext()) { + if (pl.isField(ENVIRONMENT)) { + isHostedEnv = HOSTED_ENVIRONMENT.equals(pl.readString()); + } else if (pl.isField(KMS_KEY)) { + JsonObject jo = pl.readObject(); + if (jo != null) { + while (jo.hasNext()) { + if (jo.isField(ID)) { + kmsKeyId = jo.readString(); + } else if (jo.isField(KMS_VAULT_ID)) { + kmsVaultId = jo.readString(); + } else if (jo.isField(KMS_KEY_STATE) || + jo.isField(TIME_CREATED) || + jo.isField(TIME_UPDATED)) { + jo.readString(); + } else { + throw new IllegalArgumentException( + "Unexpected field of KmsKey: " + + jo.getCurrentField()); + } + } + } + } else { + handleUnknownField(UPDATE_CONFIGURATION_DETAILS, pl); + } + } + } catch (IOException ioe) { + throw new IllegalArgumentException( + "Invalid payload for UpdateConfiguration request: " + + ioe.getMessage()); + } finally { + if (pl != null) { + pl.close(); + } + } + + if (!isHostedEnv) { + throw new IllegalArgumentException( + "Configuration for non Hosted Environment is not supported"); + } + if (kmsKeyId == null || kmsKeyId.isBlank()) { + throw new IllegalArgumentException( + "The kms key Id must not be null or empty"); + } + + /* + * Only one kind of configuration can be updated in a request. This is + * because changing configurations can be handled by different sub + * services, handling a mix of successes and failures adds complexity. + * + * TODO: If multiple configurations are supported in future, check only + * one configuration can be updated at a time. + */ + WorkRequestIdResponse resp = + TableUtils.updateKmsKey(actx, tm, compartmentId, kmsKeyId, + kmsVaultId, ifMatch, dryRun, rc.lc, + rc.request, ac, this, + this::updateLogContext); + if (!resp.getSuccess()) { + throw new RequestException(resp.getErrorCode(), + resp.getErrorString()); + } + + if (logger.isLoggable(Level.FINE, rc.lc)) { + logger.fine("handleUpdateConfiguration: workRequestId: " + + resp.getWorkRequestId(), rc.lc); + } + + /* build response */ + buildWorkRequestIdResponse(response, resp.getWorkRequestId()); + + markTMOpSucceeded(rc, 1 /* TM operation count*/); + } + + /** + * Remove kms key + * + * URL: + * POST /configuration/actions/unassignkmskey? + * compartmentId= + * + * Header parameters: + * - if-match + * - is-opc-dry-run + */ + @Override + protected void handleRemoveKmsKey(FullHttpResponse response, + RequestContext rc, + AccessContext actx, + String apiVersion) { + + /* Header parameters */ + byte[] ifMatch = readIfMatch(rc.restParams); + boolean dryRun = readDryRun(rc.restParams); + + WorkRequestIdResponse resp = TableUtils.removeKmsKey(actx, tm, ifMatch, + dryRun, rc.lc); + if (!resp.getSuccess()) { + throw new RequestException(resp.getErrorCode(), + resp.getErrorString()); + } + + if (logger.isLoggable(Level.FINE, rc.lc)) { + logger.fine("handleRemoveKmsKey: workRequestId " + + resp.getWorkRequestId(), rc.lc); + } + + /* build response */ + buildWorkRequestIdResponse(response, resp.getWorkRequestId()); + + markTMOpSucceeded(rc, 1 /* TM operation count*/); + } + /* * return true if response indicates an error that should * be part of error rate limiting. diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/CommonResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/CommonResponse.java index 74850933..da7ffe8b 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/CommonResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/CommonResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetDdlWorkRequestResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetDdlWorkRequestResponse.java new file mode 100644 index 00000000..69d9477c --- /dev/null +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetDdlWorkRequestResponse.java @@ -0,0 +1,60 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.nosql.proxy.sc; + +import oracle.nosql.common.json.JsonUtils; +import oracle.nosql.util.fault.ErrorResponse; +import oracle.nosql.util.tmi.DdlHistoryEntry; + +/** + * Response for a TenantManager getWorkRequest operation, when the work request + * that has been retrieved is for a DDL request + */ +public class GetDdlWorkRequestResponse extends CommonResponse { + private final DdlHistoryEntry workRequestInfo; + + public GetDdlWorkRequestResponse(int httpResponse, + DdlHistoryEntry workRequestInfo) { + super(httpResponse); + this.workRequestInfo = workRequestInfo; + } + + public GetDdlWorkRequestResponse(ErrorResponse err) { + super(err); + workRequestInfo = null; + } + + /** + * Returns a DdlHistoryEntry object describing the table on success, null + * on failure. + */ + public DdlHistoryEntry getDdlEntry() { + return workRequestInfo; + } + + @Override + public String successPayload() { + try { + return JsonUtils.print(workRequestInfo); + } catch (IllegalArgumentException iae) { + return ("Error serializing payload: " + iae.getMessage()); + } + } + + @Override + public String toString() { + return "GetDdlWorkRequestResponse [workRequestInfo=" + workRequestInfo + + ", toString()=" + super.toString() + "]"; + } +} diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetKmsKeyInfoResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetKmsKeyInfoResponse.java new file mode 100644 index 00000000..6e122783 --- /dev/null +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetKmsKeyInfoResponse.java @@ -0,0 +1,55 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.nosql.proxy.sc; + +import oracle.nosql.common.json.JsonUtils; +import oracle.nosql.util.fault.ErrorResponse; +import oracle.nosql.util.tmi.KmsKeyInfo; + +/** + * Response to a TenantManager getKmsKey operation. + */ +public class GetKmsKeyInfoResponse extends CommonResponse { + + private final KmsKeyInfo keyInfo; + + GetKmsKeyInfoResponse(KmsKeyInfo keyInfo, int httpResponse) { + super(httpResponse); + this.keyInfo = keyInfo; + } + + public GetKmsKeyInfoResponse(ErrorResponse err) { + super(err); + keyInfo = null; + } + + public KmsKeyInfo getKeyInfo() { + return keyInfo; + } + + @Override + public String successPayload() { + try { + return JsonUtils.print(keyInfo); + } catch (IllegalArgumentException iae) { + return ("Error serializing payload: " + iae.getMessage()); + } + } + + @Override + public String toString() { + return "ConfigurationResponse [keyinfo=" + keyInfo + ", toString()=" + + super.toString() + "]"; + } +} diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetStoreResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetStoreResponse.java index 3db2b02d..08149f56 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetStoreResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetStoreResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetTableResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetTableResponse.java index 9e23b79c..bdc66597 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetTableResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetTableResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetWorkRequestResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetWorkRequestResponse.java index fb9ab6d5..e4e5061f 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetWorkRequestResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/GetWorkRequestResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -15,37 +15,35 @@ import oracle.nosql.common.json.JsonUtils; import oracle.nosql.util.fault.ErrorResponse; -import oracle.nosql.util.tmi.DdlHistoryEntry; +import oracle.nosql.util.tmi.WorkRequest; /** * Response to a TenantManager getWorkRequest operation. */ public class GetWorkRequestResponse extends CommonResponse { - private final DdlHistoryEntry workRequestInfo; + private final WorkRequest workRequest; - public GetWorkRequestResponse(int httpResponse, - DdlHistoryEntry workRequestInfo) { + public GetWorkRequestResponse(int httpResponse, WorkRequest workRequest) { super(httpResponse); - this.workRequestInfo = workRequestInfo; + this.workRequest = workRequest; } public GetWorkRequestResponse(ErrorResponse err) { super(err); - workRequestInfo = null; + workRequest = null; } /** - * Returns a DdlHistoryEntry object describing the table on success, null - * on failure. + * Returns a WorkRequest object */ - public DdlHistoryEntry getDdlEntry() { - return workRequestInfo; + public WorkRequest getWorkRequest() { + return workRequest; } @Override public String successPayload() { try { - return JsonUtils.print(workRequestInfo); + return JsonUtils.print(workRequest); } catch (IllegalArgumentException iae) { return ("Error serializing payload: " + iae.getMessage()); } @@ -53,7 +51,7 @@ public String successPayload() { @Override public String toString() { - return "GetWorkRequestResponse [workRequestInfo=" + workRequestInfo + + return "GetWorkRequestResponse [workRequest=" + workRequest + ", toString()=" + super.toString() + "]"; } } diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/IndexResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/IndexResponse.java index bae2276f..1032f827 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/IndexResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/IndexResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListRuleResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListRuleResponse.java index 7ffee67f..f668f216 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListRuleResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListRuleResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListTableInfoResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListTableInfoResponse.java index e0efbaaa..380dd01a 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListTableInfoResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListTableInfoResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListTableResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListTableResponse.java index 6c5bd498..7e59ff80 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListTableResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListTableResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListWorkRequestResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListWorkRequestResponse.java index 7e63c7c3..3d1446f9 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListWorkRequestResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ListWorkRequestResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -15,38 +15,42 @@ import oracle.nosql.common.json.JsonUtils; import oracle.nosql.util.fault.ErrorResponse; -import oracle.nosql.util.tmi.DdlHistoryEntry; +import oracle.nosql.util.tmi.WorkRequest; /** * Response to a TenantManager listWorkRequests operation. */ public class ListWorkRequestResponse extends CommonResponse { - private final DdlHistoryEntry[] workRequestInfos; - private final int lastIndexReturned; + private final WorkRequest[] workRequests; + private final String nextPageToken; public ListWorkRequestResponse(int httpResponse, - DdlHistoryEntry[] workRequestInfos, - int lastIndexReturned) { + WorkRequest[] workRequests, + String nextPageToken) { super(httpResponse); - this.workRequestInfos = workRequestInfos; - this.lastIndexReturned = lastIndexReturned; + this.workRequests = workRequests; + this.nextPageToken = nextPageToken; } public ListWorkRequestResponse(ErrorResponse err) { super(err); - workRequestInfos = null; - lastIndexReturned = 0; + workRequests = null; + nextPageToken = null; } /** - * Returns a list of DdlHistoryEntry, or null on failure + * Returns an array of WorkRequest, or null on failure */ - public DdlHistoryEntry[] getWorkRequestInfos() { - return workRequestInfos; + public WorkRequest[] getWorkRequests() { + return workRequests; } - public int getLastIndexReturned() { - return lastIndexReturned; + /** + * Returns the starting point for retrieving next batch of results, or null + * on failure + */ + public String getNextPageToken() { + return nextPageToken; } /** @@ -60,8 +64,10 @@ public String successPayload() { try { StringBuilder sb = new StringBuilder(); sb.append("{\"workRequests\": "); - sb.append(JsonUtils.prettyPrint(workRequestInfos)).append(","); - sb.append("\"lastIndex\": ").append(lastIndexReturned).append("}"); + sb.append(JsonUtils.prettyPrint(workRequests)).append(","); + sb.append("\"nextPageToken\": ") + .append(nextPageToken) + .append("}"); return sb.toString(); } catch (IllegalArgumentException iae) { return ("Error serializing payload: " + iae.getMessage()); @@ -71,18 +77,18 @@ public String successPayload() { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("ListWorkRequestResponse [tableInfos=["); - if (workRequestInfos == null) { - sb.append("null"); - } else { - for (int i = 0; i < workRequestInfos.length; i++) { - sb.append(workRequestInfos[i].toString()); - if (i < (workRequestInfos.length - 1)) { + sb.append("ListWorkRequestResponse [workRequests=["); + if (workRequests != null) { + for (int i = 0; i < workRequests.length; i++) { + sb.append(workRequests[i].toString()); + if (i < (workRequests.length - 1)) { sb.append(","); } } } - sb.append("]]"); + sb.append("], nextPageToken=") + .append(nextPageToken) + .append("]"); return sb.toString(); } } diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/LocalTenantManager.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/LocalTenantManager.java index 11eb804b..18423f6d 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/LocalTenantManager.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/LocalTenantManager.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -90,9 +90,12 @@ import oracle.nosql.util.tmi.TableInfo; import oracle.nosql.util.tmi.TableInfo.ActivityPhase; import oracle.nosql.util.tmi.TableInfo.TableState; +import oracle.nosql.util.tmi.WorkRequest.EntityType; +import oracle.nosql.util.tmi.WorkRequest.OperationType; import oracle.nosql.util.tmi.TableLimits; import oracle.nosql.util.tmi.TableUsage; import oracle.nosql.util.tmi.TenantLimits; +import oracle.nosql.util.tmi.WorkRequest; /* * A TenantManager instance that creates and uses a KVStore instance @@ -1414,10 +1417,10 @@ public String getWorkRequestId(TableInfo ti, OpCode opCode) { } @Override - public GetWorkRequestResponse getWorkRequest(AccessContext actx, - String workRequestId, - boolean internal, - LogContext lc) { + public GetDdlWorkRequestResponse getDdlWorkRequest(AccessContext actx, + String workRequestId, + boolean internal, + LogContext lc) { /* Path parameter */ WorkRequestId workReqId = WorkRequestId.fromString(workRequestId); @@ -1484,7 +1487,7 @@ public GetWorkRequestResponse getWorkRequest(AccessContext actx, false /* autoReclaimable */, null /* limits */, null /* retryToken */); - return new GetWorkRequestResponse(200, ddlEntry); + return new GetDdlWorkRequestResponse(200, ddlEntry); } private DdlHistoryEntry.DdlOp mapDdlOp(OpCode op) { @@ -1505,6 +1508,93 @@ private DdlHistoryEntry.DdlOp mapDdlOp(OpCode op) { } } + @Override + public GetWorkRequestResponse getWorkRequest(AccessContext actx, + String workRequestId, + boolean internal, + LogContext lc) { + + /* Path parameter */ + WorkRequestId workReqId = WorkRequestId.fromString(workRequestId); + + /* + * Check access again since compartmentId and tableName are + * extracted from workRequestId until now. + */ + WorkRequest.ActionType actionType = WorkRequest.ActionType.IN_PROGRESS; + WorkRequest.Status status = WorkRequest.Status.IN_PROGRESS; + Timestamp createTime = new Timestamp(workReqId.getAcceptedTime()); + + ErrorCode errorCode = null; + String errorMsg = null; + if (actx != null) { + actx.setCompartmentId(workReqId.getCompartmentId()); + GetTableResponse res = getTable(actx, + workReqId.getTableName(), + workReqId.getOperationId(), + false, + lc); + if (res.getSuccess()) { + final TableInfo ti = res.getTableInfo(); + status = (ti.getStateEnum() == TableState.ACTIVE || + ti.getStateEnum() == TableState.DROPPED)? + WorkRequest.Status.SUCCEEDED : + WorkRequest.Status.IN_PROGRESS; + if (workReqId.getOpCode() == OpCode.CREATE_TABLE) { + actionType = WorkRequest.ActionType.CREATED; + } else if (workReqId.getOpCode() == OpCode.DROP_TABLE) { + actionType = WorkRequest.ActionType.DELETED; + } + } else { + if (workReqId.getOpCode() == OpCode.DROP_TABLE && + res.getErrorCode() == TABLE_NOT_FOUND) { + status = WorkRequest.Status.SUCCEEDED; + actionType = WorkRequest.ActionType.DELETED; + } else { + status = WorkRequest.Status.FAILED; + errorCode = ErrorCode.values()[res.getErrorCode()]; + errorMsg = res.getErrorString(); + } + } + } + + String identifier = NameUtils.makeQualifiedName( + workReqId.getCompartmentId(), + workReqId.getTableName()); + WorkRequest workRequest = + new WorkRequest(workRequestId, + mapOperationType(workReqId.getOpCode()), + status, + workReqId.getCompartmentId(), + identifier, + workReqId.getTableName(), + EntityType.TABLE, + null /* tags */, + actionType, + createTime.getTime(), + 0 /* timeStarted */, + 0 /* timeUpdated */, + errorCode, + errorMsg); + return new GetWorkRequestResponse(200, workRequest); + } + + private OperationType mapOperationType(OpCode op) { + switch(op) { + case CREATE_TABLE: + return OperationType.CREATE_TABLE; + case DROP_TABLE: + return OperationType.DELETE_TABLE; + case ALTER_TABLE: + case CREATE_INDEX: + case DROP_INDEX: + return OperationType.UPDATE_TABLE; + default: + throw new IllegalArgumentException( + "Unexpected OpCode for mapOperationType: " + op); + } + } + @Override public ReplicaStatsResponse getReplicaStats(AccessContext actx, String tableName, diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ReplicaStatsResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ReplicaStatsResponse.java index b673b203..edeee11f 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ReplicaStatsResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/ReplicaStatsResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/SCTenantManager.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/SCTenantManager.java index 8abeb668..65a6ebcb 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/SCTenantManager.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/SCTenantManager.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -17,7 +17,11 @@ import static oracle.nosql.proxy.protocol.HttpConstants.COMPARTMENT_ID; import static oracle.nosql.proxy.protocol.HttpConstants.CROSS_REGION_DDL; import static oracle.nosql.proxy.protocol.HttpConstants.DML_MS; +import static oracle.nosql.proxy.protocol.HttpConstants.DRY_RUN; import static oracle.nosql.proxy.protocol.HttpConstants.END_TIMESTAMP; +import static oracle.nosql.proxy.protocol.HttpConstants.GENERAL; +import static oracle.nosql.proxy.protocol.HttpConstants.IF_MATCH; +import static oracle.nosql.proxy.protocol.HttpConstants.KEY_ID; import static oracle.nosql.proxy.protocol.HttpConstants.LIMIT; import static oracle.nosql.proxy.protocol.HttpConstants.NAME_ONLY; import static oracle.nosql.proxy.protocol.HttpConstants.NAME_PATTERN; @@ -35,9 +39,13 @@ import static oracle.nosql.proxy.protocol.HttpConstants.TABLE_STOREINFO; import static oracle.nosql.proxy.protocol.HttpConstants.TABLE_USAGE; import static oracle.nosql.proxy.protocol.HttpConstants.TENANT_ID; +import static oracle.nosql.proxy.protocol.HttpConstants.VAULT_ID; import static oracle.nosql.proxy.security.AccessContext.INTERNAL_OCID_PREFIX; import static oracle.nosql.util.http.HttpConstants.REQUEST_ORIGIN_HEADER; import static oracle.nosql.util.http.HttpConstants.REQUEST_ORIGIN_PROXY; +import static oracle.nosql.util.http.HttpConstants.WORK_REQUEST_TYPE; +import static oracle.nosql.util.http.HttpConstants.WORK_REQUEST_DDL; +import static oracle.nosql.util.http.HttpConstants.WORK_REQUEST_KMSKEY; import java.lang.reflect.Type; import java.net.HttpURLConnection; @@ -81,6 +89,8 @@ import oracle.nosql.util.tmi.DropInputs; import oracle.nosql.util.tmi.IndexInfo; import oracle.nosql.util.tmi.IndexInfo.IndexState; +import oracle.nosql.util.tmi.KmsKeyInfo; +import oracle.nosql.util.tmi.ListWorkRequestsResult; import oracle.nosql.util.tmi.ReplicaStats; import oracle.nosql.util.tmi.StoreInfo; import oracle.nosql.util.tmi.TableCollection; @@ -90,6 +100,7 @@ import oracle.nosql.util.tmi.TableLimits; import oracle.nosql.util.tmi.TableUsage; import oracle.nosql.util.tmi.TenantLimits; +import oracle.nosql.util.tmi.WorkRequest; /** * Implementation of TenantManager that talks over an HTTP/1 connection to @@ -135,6 +146,7 @@ public class SCTenantManager implements TenantManager { private String tmUrlBase; /* /V0/tm/ */ private String tmWorkRequestsBase; /* /V0/tm/workRequests */ private String filterRequestsBase; /* /V0/filters */ + private String cmekUrlBase; /* /V0/cmek/kmsKey */ private List pingUrls; private final String tmAPIVersion; private final Cache limitsCache; @@ -171,17 +183,17 @@ public SCTenantManager(String tmAPIVersion, /* * cache TenantLimits objects. Configuration calls are - * self-explanatory. The cache has a maxium size and expiration time - * on entries. Don't use a built-in loader. + * self-explanatory. The cache has an expiration time on entries. + * Don't use a built-in loader. * - * Expire after 10 minutes, max of 500 entries (tenants) in the cache. + * Expire after 10 minutes in the cache. * * There are other configuration options for this object, such as stats. * They are not used at this time. */ - limitsCache = - CacheBuilder.build(new CacheConfig().setCapacity(500). - setLifetime(10 * 60 * 1000)); + limitsCache = CacheBuilder.build( + new CacheConfig().setLifetime(10 * 60 * 1000) + .setName("TenantLimitsCache")); scRequest = new HttpRequest().disableRetry() .setTimeout(this.connectTimeoutMs, this.readTimeoutMs) @@ -955,10 +967,10 @@ public String getWorkRequestId(TableInfo tableInfo, OpCode opCode) { } @Override - public GetWorkRequestResponse getWorkRequest(AccessContext actx, - String workRequestId, - boolean internal, - LogContext lc) { + public GetDdlWorkRequestResponse getDdlWorkRequest(AccessContext actx, + String workRequestId, + boolean internal, + LogContext lc) { try { final StringBuilder sb = new StringBuilder() .append(getTMWorkRequestsBase()) @@ -973,23 +985,66 @@ public GetWorkRequestResponse getWorkRequest(AccessContext actx, } final String url = sb.toString(); - - logTrace(lc, "GetWorkRequest: " + url); + logTrace(lc, "GetDdlWorkRequest: " + url); final HttpResponse response = doHttpRequest(scRequest, HttpMethod.GET, url, null /* payload */, scSSLHandler, lc); if (response.getStatusCode() != HttpURLConnection.HTTP_OK) { - ErrorResponse er = getErrorResponse(response, "GetWorkRequest"); - logError(response, lc, "GetWorkRequest error: " + er); - return new GetWorkRequestResponse(er); + ErrorResponse er = getErrorResponse(response, + "GetDdlWorkRequest"); + logError(response, lc, "GetDdlWorkRequest error: " + er); + return new GetDdlWorkRequestResponse(er); } DdlHistoryEntry ddlEntry = deserializePojo(response.getOutput(), DdlHistoryEntry.class); - return new GetWorkRequestResponse(response.getStatusCode(), - ddlEntry); + return new GetDdlWorkRequestResponse(response.getStatusCode(), + ddlEntry); + } catch (Exception e) { + logException("GetWorkRequest", e, lc); + return new GetDdlWorkRequestResponse(handleError(e)); + } + } + + @Override + public GetWorkRequestResponse getWorkRequest(AccessContext actx, + String workRequestId, + boolean internal, + LogContext lc) { + + try { + final StringBuilder sb = new StringBuilder() + .append(getTMWorkRequestsBase()) + .append(workRequestId); + addQueryParam(sb, GENERAL, "true", true); + if (actx != null && actx.getTenantId() != null) { + addQueryParam(sb, TENANT_ID, actx.getTenantId(), false); + } + if (internal) { + addInternalQueryParam(sb, false); + } + + final String url = sb.toString(); + + logTrace(lc, "GetWorkRequest: " + url); + final HttpResponse resp = + doHttpRequest(scRequest, HttpMethod.GET, url, + null /* payload */, + scSSLHandler, lc); + + if (resp.getStatusCode() != HttpURLConnection.HTTP_OK) { + ErrorResponse er = + getErrorResponse(resp, "GetWorkRequest"); + logError(resp, lc, "GetWorkRequest error: " + er); + return new GetWorkRequestResponse(er); + } + + WorkRequest workRequest = deserializePojo(resp.getOutput(), + WorkRequest.class); + return new GetWorkRequestResponse(resp.getStatusCode(), + workRequest); } catch (Exception e) { logException("GetWorkRequest", e, lc); return new GetWorkRequestResponse(handleError(e)); @@ -998,20 +1053,27 @@ public GetWorkRequestResponse getWorkRequest(AccessContext actx, @Override public ListWorkRequestResponse listWorkRequests(AccessContext actx, - int startIndex, + String nextPageToken, int limit, LogContext lc) { try { StringBuilder sb = new StringBuilder() .append(getTMWorkRequestsBase()); addQueryParam(sb, actx, true); - if (startIndex >= 0) { - addQueryParam(sb, START_INDEX, - String.valueOf(startIndex), false); + if (nextPageToken != null) { + addQueryParam(sb, START_INDEX, nextPageToken, false); } if (limit > 0) { addQueryParam(sb, LIMIT, String.valueOf(limit), false); } + + if (actx.getAuthorizedOps() != null) { + for (OpCode op : actx.getAuthorizedOps()) { + addQueryParam(sb, WORK_REQUEST_TYPE, + mapWorkRequestType(op), false); + } + } + final String url = sb.toString(); logTrace(lc, "ListWorkRequests: " + url); @@ -1027,18 +1089,30 @@ public ListWorkRequestResponse listWorkRequests(AccessContext actx, return new ListWorkRequestResponse(er); } - DdlHistoryEntry[] ddlEntries = deserializePojo(response.getOutput(), - DdlHistoryEntry[].class); - int lastIndex = startIndex + ddlEntries.length; + ListWorkRequestsResult result = + deserializePojo(response.getOutput(), + ListWorkRequestsResult.class); return new ListWorkRequestResponse(response.getStatusCode(), - ddlEntries, - lastIndex); + result.getWorkRequests(), + result.getNextPageToken()); } catch (Exception e) { logException("ListWorkRequests", e, lc); return new ListWorkRequestResponse(handleError(e)); } } + private String mapWorkRequestType(OpCode op) { + switch(op) { + case GET_TABLE: + return WORK_REQUEST_DDL; + case GET_CONFIG_KMS_KEY: + return WORK_REQUEST_KMSKEY; + default: + throw new IllegalArgumentException( + "Invalid sub operation for list-workrequest: " + op); + } + } + @Override public ListRuleResponse listRules(LogContext lc) { @@ -1057,7 +1131,8 @@ public ListRuleResponse listRules(LogContext lc) { return new ListRuleResponse(er); } - Rule[] rules = deserializePojo(response.getOutput(), Rule[].class); + Rule[] rules = Rule.getGson().fromJson(response.getOutput(), + Rule[].class); logTrace(lc, "listRules: " + rules.length + " rules returned"); return new ListRuleResponse(response.getStatusCode(), rules); } catch (Exception e) { @@ -1255,6 +1330,133 @@ public ReplicaStatsResponse getReplicaStats(AccessContext actx, } } + /* + * GET V0/cmek/kmskey + */ + @Override + public GetKmsKeyInfoResponse getKmsKey(AccessContext actx, + boolean internal, + LogContext lc) { + final String op = "getKmsKey"; + try { + String url = getCmekUrlBase(); + if (internal) { + url += "?" + INTERNAL_QUERY; + } + logTrace(lc, op + ": " + url); + + final HttpResponse res = doHttpRequest(scRequest, HttpMethod.GET, + url, null /* payload */, + scSSLHandler, lc); + if (res.getStatusCode() != HttpURLConnection.HTTP_OK) { + ErrorResponse err = getErrorResponse(res, op); + logError(res, lc, op + " error: " + err); + return new GetKmsKeyInfoResponse(err); + } + + KmsKeyInfo keyInfo = deserializePojo(res.getOutput(), + KmsKeyInfo.class); + return new GetKmsKeyInfoResponse(keyInfo, res.getStatusCode()); + } catch (Exception ex) { + logException(op, ex, lc); + return new GetKmsKeyInfoResponse(handleError(ex)); + } + } + + /* + * PUT V0/cmek/kmskey?tenantid=&&keyid= + * [&&vaultid=] + * [&&dryrun=] + * [&&ifmatch=] + */ + @Override + public WorkRequestIdResponse updateKmsKey(AccessContext actx, + String kmsKeyId, + String kmsVaultId, + byte[] matchETag, + boolean dryRun, + LogContext lc) { + final String op = "updateKmsKey"; + try { + StringBuilder sb = new StringBuilder(getCmekUrlBase()); + addQueryParam(sb, TENANT_ID, actx.getTenantId(), true); + addQueryParam(sb, KEY_ID, kmsKeyId, false); + if (kmsVaultId != null) { + addQueryParam(sb, VAULT_ID, kmsVaultId, false); + } + addQueryParam(sb, DRY_RUN, String.valueOf(dryRun), false); + if (matchETag != null) { + addQueryParam(sb, IF_MATCH, JsonUtils.encodeBase64(matchETag), + false); + } + final String url = sb.toString(); + + logTrace(lc, op + ": " + url); + + final HttpResponse res = doHttpRequest(scRequest, HttpMethod.PUT, + url, null /* payload */, + scSSLHandler, lc); + if (res.getStatusCode() != HttpURLConnection.HTTP_OK) { + ErrorResponse er = getErrorResponse(res, op); + logError(res, lc, op + " error: " + er); + return new WorkRequestIdResponse(er); + } + + String workRequestId = res.getOutput(); + logTrace(lc, op + ": response from TM: " + workRequestId); + + return new WorkRequestIdResponse(res.getStatusCode(), workRequestId); + + } catch (Exception ex) { + logException(op, ex, lc); + return new WorkRequestIdResponse(handleError(ex)); + } + } + + /* + * DELETE V0/cmek/kmskey?tenantid= + * [&&dryrun=] + * [&&ifmatch=] + */ + @Override + public WorkRequestIdResponse removeKmsKey(AccessContext actx, + byte[] matchETag, + boolean dryRun, + LogContext lc) { + + final String op = "removeKmsKey"; + try { + StringBuilder sb = new StringBuilder(getCmekUrlBase()); + addQueryParam(sb, TENANT_ID, actx.getTenantId(), true); + addQueryParam(sb, DRY_RUN, String.valueOf(dryRun), false); + if (matchETag != null) { + addQueryParam(sb, IF_MATCH, JsonUtils.encodeBase64(matchETag), + false); + } + final String url = sb.toString(); + + logTrace(lc, op + ": " + url); + + final HttpResponse res = doHttpRequest(scRequest, HttpMethod.DELETE, + url, null /* payload */, + scSSLHandler, lc); + if (res.getStatusCode() != HttpURLConnection.HTTP_OK) { + ErrorResponse er = getErrorResponse(res, op); + logError(res, lc, op + " error: " + er); + return new WorkRequestIdResponse(er); + } + + String workRequestId = res.getOutput(); + logTrace(lc, op + ": response from TM: " + workRequestId); + + return new WorkRequestIdResponse(res.getStatusCode(), workRequestId); + + } catch (Exception ex) { + logException(op, ex, lc); + return new WorkRequestIdResponse(handleError(ex)); + } + } + private GetTableResponse executeTableRequest(HttpMethod method, String url, String payload, @@ -1330,6 +1532,17 @@ synchronized private String getTMUrlBase() { return tmUrlBase; } + + /** + * Access to the base URL must be protected in case the SC is reset. + */ + synchronized private String getCmekUrlBase() { + if (cmekUrlBase == null) { + establishURLBase(); + } + return cmekUrlBase; + } + /** * Access to SC ping URL must be protected in case the SC is reset. */ @@ -1417,6 +1630,9 @@ public void establishURLBase(String scUrl, boolean reset) { /* filters url base */ filterRequestsBase = scAPIBase + "/filters"; } + if (cmekUrlBase == null || reset) { + cmekUrlBase = scAPIBase + "/cmek/kmskey"; + } } /** diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableHistoryResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableHistoryResponse.java index 849397b2..50ef2162 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableHistoryResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableHistoryResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableUsageResponse.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableUsageResponse.java index 76b43ba4..8303f673 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableUsageResponse.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableUsageResponse.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableUtils.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableUtils.java index 50c1f93a..33b66d93 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableUtils.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TableUtils.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -595,28 +595,34 @@ public static PrepareCB getCallbackInfo(AccessContext actx, } /** - * Returns workRequest information + * Gets DDL workRequest information + * + * This is used by cross region internal-status request, for GAT DDL op. */ - public static GetWorkRequestResponse getWorkRequest(AccessContext actx, - String workReqId, - TenantManager tm, - LogContext lc) { - return getWorkRequest(actx, workReqId, tm, false /* internal */, lc); + public static GetDdlWorkRequestResponse getDdlWorkRequest( + AccessContext actx, + String workReqId, + TenantManager tm, + boolean internal, + LogContext lc) { + return tm.getDdlWorkRequest(actx, workReqId, internal, lc); } + /** + * Gets workRequest information + */ public static GetWorkRequestResponse getWorkRequest(AccessContext actx, String workReqId, TenantManager tm, - boolean internal, LogContext lc) { - return tm.getWorkRequest(actx, workReqId, internal, lc); + return tm.getWorkRequest(actx, workReqId, false /* internal */, lc); } /** - * List workRequests + * Lists workRequests */ public static ListWorkRequestResponse listWorkRequests(AccessContext actx, - int startIndex, + String startIndex, int limit, TenantManager tm, LogContext lc) { @@ -926,6 +932,91 @@ public static ReplicaStatsResponse getReplicaStats(AccessContext actx, limit, lc); } + /** + * Get the service level kms key information + * + * @param actx the AccessContext instance + * @param tm an instance of TenantManager to use + * @param lc the LogContext instance + * + * @return an instance of GetKmsKeyInfoResponse that represents the + * service level configuration information + */ + public static GetKmsKeyInfoResponse getKmsKeyInfo(AccessContext actx, + TenantManager tm, + LogContext lc) { + return tm.getKmsKey(actx, false /* internal */, lc); + } + + /** + * Updates the service level kms key + * + * @param actx the AccessContext instance + * @param tm the TenantManager instance + * @param configuration the new configuration + * @param matchETag the index ETag to be matched + * @param dryRun set true if test update configuration without actually + * executing it + * @param lc the LogContext instance + * @param request the http request + * @param ac an instance of AccessChecker to use + * @param filter the handler to filter request + * @param updateLc the handler to update log context + * + * @return an instance of WorkRequestIdResponse + */ + public static WorkRequestIdResponse updateKmsKey(AccessContext actx, + TenantManager tm, + String tenantId, + String kmsKeyId, + String kmsVaultId, + byte[] matchEtag, + boolean dryRun, + LogContext lc, + FullHttpRequest request, + AccessChecker ac, + Filter filter, + UpdateLogContext updateLc){ + + if (ac != null) { + actx = ac.checkConfigurationAccess( + request.method(), + request.uri(), + request.headers(), + OpCode.UPDATE_CONFIG_KMS_KEY, + null /* authorizeOps */, + tenantId, + getPayload(request), + filter, + lc); + if (updateLc != null) { + updateLc.update(lc, actx, OpCode.UPDATE_CONFIGURATION); + } + } + return tm.updateKmsKey(actx, kmsKeyId, kmsVaultId, matchEtag, + dryRun, lc); + } + + /** + * Removes the kms key used by the service + * + * @param actx the AccessContext instance + * @param tm the TenantManager instance + * @param matchETag the index ETag to be matched + * @param dryRun set true if test update configuration without actually + * executing it + * @param lc the LogContext instance + * + * @return an instance of WorkRequestIdResponse + */ + public static WorkRequestIdResponse removeKmsKey(AccessContext actx, + TenantManager tm, + byte[] matchEtag, + boolean dryRun, + LogContext lc) { + return tm.removeKmsKey(actx, matchEtag, dryRun, lc); + } + /* * Map cross region ddl operation to ddl op for permission check */ diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TenantManager.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TenantManager.java index 3a1fe51d..70874681 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TenantManager.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TenantManager.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -311,7 +311,7 @@ default String getWorkRequestId(TableInfo tableInfo, OpCode opCode) { } /** - * Gets the work request information. + * Gets the DDL work request information. * * @param actx access context * @param workRequestId the workRequest id @@ -319,6 +319,22 @@ default String getWorkRequestId(TableInfo tableInfo, OpCode opCode) { * won't be throttled. * @param lc log context * + * @return GetDdlWorkRequestResponse + */ + default GetDdlWorkRequestResponse getDdlWorkRequest(AccessContext actx, + String workRequestId, + boolean internal, + LogContext lc) { + return null; + } + + /** + * Gets the work request information. + * + * @param actx access context + * @param workRequestId the workRequest id + * @param lc log context + * * @return GetWorkRequestResponse */ default GetWorkRequestResponse getWorkRequest(AccessContext actx, @@ -341,7 +357,7 @@ default GetWorkRequestResponse getWorkRequest(AccessContext actx, * @return ListWorkRequestResponse */ default ListWorkRequestResponse listWorkRequests(AccessContext actx, - int startIndex, + String startIndex, int limit, LogContext lc) { return null; @@ -457,6 +473,65 @@ ReplicaStatsResponse getReplicaStats(AccessContext actx, long startTime, int limit, LogContext lc); + /** + * Gets the service level kms key information + * + * @param actx the AccessContext instance + * @param internal whether this is an internal. For Cloud, internal + * won't be throttled. + * @param lc log context + * + * @return a GetKmsKeyInfoResponse representing the kms key information + */ + default GetKmsKeyInfoResponse getKmsKey(AccessContext actx, + boolean internal, + LogContext lc) { + return new GetKmsKeyInfoResponse( + ErrorResponse.build(ErrorCode.UNSUPPORTED_OPERATION, + "getKmsKey is not supported")); + } + + /** + * Updates the service level kms key + * + * @param actx the AccessContext instance + * @param kmsKeyId the kms key Id + * @param kmsVaultId the kms vault Id + * @param matchETag the index ETag to be matched + * @param dryRun true if test this operation without actually executing it + * @param lc the LogContext instance + * + * @return WorkRequestIdResponse representing the work request Id. + */ + default WorkRequestIdResponse updateKmsKey(AccessContext actx, + String kmsKeyId, + String kmsVaultId, + byte[] matchETag, + boolean dryRun, + LogContext lc) { + return new WorkRequestIdResponse( + ErrorResponse.build(ErrorCode.UNSUPPORTED_OPERATION, + "updateKmsKey is not supported")); + } + + /** + * Removes the kms key used by the service + * + * @param actx the AccessContext instance + * @param matchETag the index ETag to be matched + * @param dryRun true if test this operation without actually executing it + * @param lc the LogContext instance + * + * @return WorkRequestIdResponse representing the work request Id. + */ + default WorkRequestIdResponse removeKmsKey(AccessContext actx, + byte[] matchETag, + boolean dryRun, + LogContext lc) { + return new WorkRequestIdResponse( + ErrorResponse.build(ErrorCode.UNSUPPORTED_OPERATION, + "removeKmsKey is not supported")); + } /** * Certain environments require that a newly-created store is diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TenantManagerConstants.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TenantManagerConstants.java index a8c4d54b..d26c4d47 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TenantManagerConstants.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/sc/TenantManagerConstants.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessChecker.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessChecker.java index ac14e0e2..a6a38841 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessChecker.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessChecker.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -125,6 +125,19 @@ public AccessContext checkAccess(HttpMethod httpMethod, * @param requestUri request URI * @param httpHeaders HTTP headers * @param opCode a work request operation code + * @param authorizeOps the sub operations to be authorized. If not provided, + * check the authorization of {@code opCode}. The sub operations must be + * valid sub operation of {@code opCode}. + * @param shouldAuthorizeAllOps set to true if requires all the sub + * operations specified in authorizeOps must be authorized, if any of + * sub operations is not authorized, + * {@link BinaryProtocol#INSUFFICIENT_PERMISSION} error will be returned. + * If false, the authorized sub operations will be returned with + * {@link AccessContext#getAuthorizedOps}, if none of sub operations is + * authorized, {@link BinaryProtocol#INSUFFICIENT_PERMISSION} error will + * be returned. + * This flag will be ignored when {@code authorizeOps} is not provided, in + * which case {@code opCode} will always be checked for authorization. * @param compartmentId compartmentId * @param workRequestId work request id * @param payload the request payload @@ -145,6 +158,8 @@ public default AccessContext checkWorkRequestAccess( String uri, HttpHeaders headers, OpCode opCode, + @Nullable OpCode[] authorizeOps, + boolean shouldAuthorizeAllOps, @Nullable String compartmentId, @Nullable String workRequestId, @Nullable byte[] payload, @@ -155,6 +170,51 @@ public default AccessContext checkWorkRequestAccess( return null; } + /** + * Given HTTP method, headers and paylod extracted from a HTTP request, + * checks that the invocation of the configuration operation is valid. + * This method should be called before the actual operation execution. The + * implementation must be re-entrant. This method also must return the check + * results immediately without throwing a retryable exception. + * + * @param httpMethod HTTP method + * @param requestUri request URI + * @param httpHeaders HTTP headers + * @param opCode a work request operation code + * @param authorizeOps the sub operations to be authorized. If not provided, + * check the authorization of {@code opCode}. The sub operations must be + * valid sub operations of {@code opCode}. If any of sub operations is not + * authorized, {@link BinaryProtocol#INSUFFICIENT_PERMISSION} error will + * be returned. + * @param compartmentId compartmentId + * @param payload the request payload + * @param filter the filter interface to block request if needed + * @param lc the log context object + * @return AccessContext the object contains requesting subject information, + * which contains tenant id, principal id and permissions associated. + * @throws RequestException the errors occurred during access checking. + * All errors are thrown as this type of exception but may with different + * error code.

+ *

  • {@link BinaryProtocol#INVALID_AUTHORIZATION} that indicates + * authorization header is invalid cannot be verified. + *
  • + * {@link BinaryProtocol#INSUFFICIENT_PERMISSION} indicates authorization + * header is valid but the caller subject doesn't have the permission to + * perform the operation.
  • + */ + public default AccessContext checkConfigurationAccess( + HttpMethod httpMethod, + String uri, + HttpHeaders headers, + OpCode opCode, + @Nullable OpCode[] authorizeOps, + String compartmentId, + @Nullable byte[] payload, + Filter filter, + LogContext lc) { + return null; + } + /** * Close access checker, stop threads and release resources used * by access checker. diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessCheckerFactory.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessCheckerFactory.java index 946dd22b..fb6da7eb 100755 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessCheckerFactory.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessCheckerFactory.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -270,6 +270,8 @@ public AccessContext checkWorkRequestAccess( String uri, HttpHeaders headers, OpCode op, + @Nullable OpCode[] authorizeOps, + boolean shouldAuthorizeAllOps, @Nullable String compartmentId, @Nullable String workRequestId, @Nullable byte[] payload, @@ -286,6 +288,29 @@ public AccessContext checkWorkRequestAccess( } return actx; } + + @Override + public AccessContext checkConfigurationAccess( + HttpMethod httpMethod, + String uri, + HttpHeaders headers, + OpCode op, + @Nullable OpCode[] authorizeOps, + @Nullable String compartmentId, + @Nullable byte[] payload, + Filter filter, + LogContext lc) + throws RequestException { + + checkAccess(httpMethod, uri, headers, op, compartmentId, + null /* tableName */, null /* actx*/, filter, lc); + + InsecureAccessContext actx = new InsecureAccessContext(); + if (compartmentId != null) { + actx.setCompartmentId(compartmentId); + } + return actx; + } } /** diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessContext.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessContext.java index ccbab062..dbdfe95a 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessContext.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/security/AccessContext.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -15,6 +15,8 @@ import java.util.Map; +import oracle.nosql.proxy.protocol.Protocol.OpCode; + /** * The instance has request security and access context. */ @@ -181,20 +183,44 @@ public default String getRequestId() { return null; } + /** + * Return the flag indicates whether the table is active, used for + * auto-reclaimable table. + */ public default boolean isTableInactive() { return false; } + /** + * Reset the table to be active status, used for auto-reclaimable table. + */ public default void resetTableInactive() { } + /** + * Return the OBO token, used for GAT. + */ public default String getOboToken() { return null; } + /** + * Mark the current operation is internal cross-region ddl, used for GAT. + */ public default void setIsInternalDdl(boolean value) { } + /** + * Return the authorized sub operations. + * + * This method is currently only used for authorization check for + * list-work-requests operation, returning the corresponding authorized + * sub operations. For all other operations, it returns {@code null}. + */ + public default OpCode[] getAuthorizedOps() { + return null; + } + public static AccessContext NULL_KV_CTX = new AccessContext() { @Override diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/CloudServiceTableCache.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/CloudServiceTableCache.java index 2a5bfc65..0ba2932d 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/CloudServiceTableCache.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/CloudServiceTableCache.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ErrorManager.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ErrorManager.java index f614d0ad..7ab1dfeb 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ErrorManager.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ErrorManager.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: @@ -56,7 +56,7 @@ public class ErrorManager { private final SkLogger logger; private final MonitorStats stats; - /* an LRU cache of rate limiters */ + /* A Timeout cache of rate limiters */ private Cache rateLimiters; /* metrics tracking raw number of delays/DNRs */ @@ -169,10 +169,10 @@ public ErrorManager(SkLogger logger, dnrPercentage = (double)config.getErrorDnrThreshold() * 100.0 / (double)delayResponseThreshold; - /* LRU cache */ + /* Timeout cache */ this.rateLimiters = CacheBuilder.build( - new CacheConfig().setCapacity(config.getErrorCacheSize()) - .setLifetime(config.getErrorCacheLifetimeMs())); + new CacheConfig().setLifetime(config.getErrorCacheLifetimeMs()) + .setName("ErrorRateCache")); this.delayPool = new ScheduledThreadPoolExecutor( config.getErrorDelayPoolSize()); diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/PassThroughTableCache.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/PassThroughTableCache.java index ef3e4c78..5f43e205 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/PassThroughTableCache.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/PassThroughTableCache.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ProxyThreadPoolExecutor.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ProxyThreadPoolExecutor.java new file mode 100644 index 00000000..349c324c --- /dev/null +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ProxyThreadPoolExecutor.java @@ -0,0 +1,303 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.nosql.proxy.util; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +/** + * An implmentation of the Java Executor interface that creates and + * manages a fixed-size thread pool in a lightweight manner, with as + * little locking as possible. + *

    + * The implementation uses a non-blocking queue structure, + * ConcurrentLinkedQueue, to hold Runnable tasks executed by threads + * kept in a pool. + *

    + * This makes insertion and removal of tasks very cheap with good + * concurrency. If the queue is empty threads will wait on a separate + * lock and condition that are only used when the queue size goes to + * zero and increments from zero. This means that locking/waiting only + * happens when the queue and Executor are fairly idle vs putting + * locks in the path of the Executor when the queue is busy + *

    + * Possible enhancements (be careful of performance impact) + * o wait time statistics + * o allowing for a dynamic vs fixed-size thread pool + * o limit queue size. This would likely mean blocking in the execute() path + * to slow down producers or rejecting execution + * + * TODO: consider making this a public, reusable class in this or other + * common repository + */ +public class ProxyThreadPoolExecutor implements Executor { + + /* simple, concurrent FIFO queue for tasks */ + ConcurrentLinkedQueue taskQueue = + new ConcurrentLinkedQueue<>(); + + /* track the size of the queue as well as active threads */ + private final AtomicInteger queueSize = new AtomicInteger(); + private final AtomicInteger activeThreadCount = new AtomicInteger(); + + /* stats for lifetime of queue */ + private final AtomicLong totalTasksProcessed = new AtomicLong(); + private final AtomicLong totalThreadWakeups = new AtomicLong(); + + /* lock and condition used for waiting on the queue */ + private final ReentrantLock waitLock = new ReentrantLock(); + private final Condition available = waitLock.newCondition(); + + /* used to shutdown the Executor */ + volatile private boolean active = true; + /* + * tracks queue size on last execute. This is volatile vs Atomic as it's + * only used for either set or get + */ + volatile private int lastQueueSize; + + /* the pool */ + private final Thread[] threadPool; + private final int numThreads; + + /** + * Create an Executor instance that uses a fixed-size thread pool to + * execute the Runnable tasks. The pool must be shutdown using the + * {@link #shutdown} method to clean up the queue and thread pool. + * + * @param numThreads the number of threads for the pool + * @param namePrefix a string used to name the threads created for the + * pool. The name of a thread is the prefix plus ".N" where N is an + * integer. If this parameter is null a default prefix ("ProxyPool") is + * used + */ + public ProxyThreadPoolExecutor(int numThreads, + String namePrefix) { + this.numThreads = numThreads; + if (namePrefix == null) { + namePrefix = "ProxyPool"; + } + if (numThreads <=0 ) { + throw new IllegalArgumentException( + "Pool size must be greater than zero"); + } + threadPool = new Thread[numThreads]; + for (int i = 0; i < numThreads; i++) { + Thread th = new Thread( + new ExecutorThread(), (namePrefix + "." + i)); + threadPool[i] = th; + th.start(); + } + } + + /** + * Returns the current number of elements in the task queue + * @return the size + */ + public int getQueueSize() { + return queueSize.get(); + } + + /** + * Returns the number of threads actively handling a task + * @return the number + */ + public int getActiveThreadCount() { + return activeThreadCount.get(); + } + + /** + * Returns the number of queue tasks processed for the lifetime of + * the executor + * @return the number + */ + public long getTotalTasksProcessed() { + return totalTasksProcessed.get(); + } + + /** + * Returns the number of thread wakeup calls done for the lifetime of + * the executor + * @return the number + */ + public long getTotalThreadWakeups() { + return totalThreadWakeups.get(); + } + + /** + * Shutdown the Executor, including waiting for the threads in the + * pool, optionally using graceful shutdown to ensure that all current + * tasks in the queue are run. If graceful is false the threads still + * exit but any tasks not yet run are removed from the queue and + * ignored. + * + * @param graceful if true, shut down gracefully + */ + public void shutdown(boolean graceful) { + try { + active = false; + /* + * graceful shutdown allows task queue to be emptied + */ + if (graceful && !taskQueue.isEmpty()) { + while (!taskQueue.isEmpty()) { + wakeupAll(); + } + } + /* + * these are redundant for graceful shutdown but + * do not hurt; efficiency doesn't matter in shutdown + */ + taskQueue.clear(); + wakeupAll(); + + /* wait for threads */ + for (int i = 0; i < numThreads; i++) { + threadPool[i].join(); + } + } catch (InterruptedException e) { + /* ignore */ + } + } + + /* + * A note on synchronization between the task queue and threads that operate + * on it. The goal is to have sufficient threads running to keep up with the + * task queue and also avoid excessive await/signal calls that can be + * concurrency hotspots. + * + * This is done by: + * 1. tracking the "last" queue size each time a task is added + * 2. if, on adding a task, the queue is larger than it was previously + * wakeup a thread, but only if there aren't a number of threads already + * running. That number is currently the size of the queue itself. This + * number is perhaps subject to change. + * + * As the queue grows additional threads are adding to + * hopefully enable the consumer threads to keep up with the producer + * calls. At some point if the queue size grows beyond the thread pool + * size the queue itself will keep growing, essentially without bound. + * This means that the thread pool should be sized with the + * producer/consumer paths in mind. + */ + @Override + public void execute(Runnable r) { + if (!active) { + throw new RejectedExecutionException( + "Executor has been shut down"); + } + taskQueue.add(r); + int size = queueSize.getAndIncrement(); + totalTasksProcessed.getAndIncrement(); + int lqs = lastQueueSize; + lastQueueSize = size; + int activeThreads = activeThreadCount.get(); + + /* + * Wake up if any of these is true + * o there are no active threads + * o the queue is growing (i.e. number of threads isn't keeping up) + * o the number of active threads is <= queue size + * + * If all threads in the pool are active, don't bother with a wakeup + */ + if (((activeThreads == 0 || + (size - lqs) > 0)) && + activeThreads <= size && + activeThreads < numThreads) { + totalThreadWakeups.getAndIncrement(); + signalAvailable(); + } + } + + /* + * The wait-related methods are used when the queue is empty, allowing + * threads to wait for a task. The lock taken in this path should not + * be a concurrency hotspot because it's only used when the queue is + * lightly used, indicating less load. In a busy system the queue will + * have tasks available most of the time. + */ + private void signalAvailable() { + /* + * there should be waiting threads, wake one up + */ + try { + waitLock.lock(); + available.signal(); + } finally { + waitLock.unlock(); + } + } + + private void waitForAvailable() { + try { + waitLock.lock(); + available.await(); + } catch (InterruptedException ie) { + /* ignore */ + } finally { + waitLock.unlock(); + } + } + + /* + * wakeup all waiting threads + */ + private void wakeupAll() { + try { + waitLock.lock(); + available.signalAll(); + } finally { + waitLock.unlock(); + } + } + + /** + * This is the class that is run by the pool threads. It looks for + * Runnable tasks on the queue and runs them. If no tasks are available + * it waits on a condition that is signaled when a task is available + */ + private class ExecutorThread implements Runnable { + @Override + public void run() { + /* + * this conditional allows for graceful shutdown, handling + * the queue before exiting. not-graceful shutdown is done + * using the interrupted() status, leaving the queue with + * entries if not empty + */ + while (!Thread.currentThread().isInterrupted() && + (active || !taskQueue.isEmpty())) { + Runnable task = taskQueue.poll(); + if (task != null) { + queueSize.decrementAndGet(); + activeThreadCount.incrementAndGet(); + totalTasksProcessed.getAndIncrement(); + task.run(); + activeThreadCount.decrementAndGet(); + } else { + waitForAvailable(); + } + } + } + } +} diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ShutdownManager.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ShutdownManager.java index 0552ae5f..3dda4ec0 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ShutdownManager.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ShutdownManager.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/TableCache.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/TableCache.java index dc57b8ab..4cd93de3 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/TableCache.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/TableCache.java @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. * * This file was distributed by Oracle as part of a version of Oracle NoSQL * Database made available at: diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/HostNameResolver.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/HostNameResolver.java new file mode 100644 index 00000000..708b2068 --- /dev/null +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/HostNameResolver.java @@ -0,0 +1,53 @@ +package oracle.nosql.util; + +import java.io.BufferedReader; +import java.io.FileReader; +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; + +/** + * A utility for all cloud components to get the hostname for the node that is + * running the Docker container. + * Nodes managed by OKE are given hostnames that are OCIDs. + * After we migrate to OKE, it was hard to understand which instance is + * referred to; the instance name is easier for humans than the ocid. To be + * able to provide the instance name, we save the instance name to the file + * /etc/instance-name, which is mounted to /tmp/etc/instance-name inside the + * container. + * As we want to use instance name as host name if possible, we will resolve + * host name in following order: + * 1. Read from /tmp/etc/instance-name file. + * 2. Read from HOST_NAME system environment. + * 3. Resolve host name from DNS. + */ +public class HostNameResolver { + + private static final String INSTANCE_NAME_PATH = "/tmp/etc/instance-name"; + private static String HOST_NAME_ENV = "HOST_NAME"; + + public static String getHostName() { + String hostName = readFileLine(INSTANCE_NAME_PATH); + if (hostName != null && !hostName.isEmpty()) { + return hostName; + } + hostName = System.getenv(HOST_NAME_ENV); + if (hostName != null && !hostName.isEmpty()) { + return hostName; + } + try { + return InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + throw new RuntimeException("Cannot resolve local host name: " + e); + } + } + + private static String readFileLine(String filePath) { + try (BufferedReader br = new BufferedReader(new FileReader(filePath))) { + String line = br.readLine(); + return line; + } catch (IOException e) { + return null; + } + } +} diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/filter/Rule.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/filter/Rule.java index 4e85f82b..c5699ecd 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/filter/Rule.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/filter/Rule.java @@ -13,28 +13,41 @@ package oracle.nosql.util.filter; -import java.time.Instant; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.lang.reflect.Type; +import java.sql.Timestamp; import java.util.Arrays; import java.util.HashSet; import java.util.Objects; import java.util.Set; -import oracle.nosql.common.JsonBuilder; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonDeserializationContext; +import com.google.gson.JsonDeserializer; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonSerializationContext; +import com.google.gson.JsonSerializer; +import com.google.gson.JsonSyntaxException; /* * Used in definition of the JSON payloads for the REST APIs between the proxy * and SC filters service. * * To serialize a Java object into a Json string: - * Foo foo; - * String jsonPayload = JsonUtils.toJson(foo); + * Rule rule; + * String jsonPayload = rule.toJson(); * * To deserialize a Json string into this object: - * Foo foo = JsonUtils.fromJsont(, Foo.class); + * Rule rule = Rule.fromJson( | ); * * The Rule class represents the filter rule which has below information: * o name, the rule name, required. - * o action, the action type of the rule, default to DROP_REQUEST. + * o action, the action of the rule, default to DROP_REQUEST. * o tenant, the principal tenant ocid. * o user, the principal ocid. * o table, the target table ocid. @@ -43,21 +56,32 @@ */ public class Rule { - private static final ActionType DEF_ACTION = ActionType.DROP_REQUEST; + private static final Gson gson = new GsonBuilder() + .registerTypeAdapter(Action.class, new ActionSerializer()) + .setDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS") + .create(); + + public static final Action DROP_REQUEST = new DropRequestAction(); + private static final Action DEFAULT_ACTION = DROP_REQUEST; public enum OpType { - ALL, - DDL, - WRITE, - READ + ALL, /* all ops */ + DDL, /* ddl ops */ + WRITE, /* dml write */ + READ, /* dml read, read metadata, work-request, usage */ + CONFIG_READ, /* read configuration */ + CONFIG_UPDATE /* update configuration */ } public enum ActionType { - DROP_REQUEST + DROP_REQUEST, + RETURN_ERROR }; + /* The name of the rule */ private String name; - private ActionType action; + /* The action to take if match the rule */ + private Action action; /* The principal tenant ocid */ private String tenant; @@ -65,30 +89,32 @@ public enum ActionType { private String user; /* The target table ocid */ private String table; + /* The operations */ private String[] operations; - private long createTimeMs; + /* The time stamp the rule was created */ + private Timestamp createTime; private transient Set opTypes; public static Rule createRule(String name, - ActionType action, - String tenantOcid, - String userOcid, + Action action, + String tenantId, + String userId, String tableOcid, String[] operations) { - return createRule(name, action, tenantOcid, userOcid, tableOcid, - operations, 0); + return new Rule(name, action, tenantId, userId, tableOcid, + operations, null /* createTime */); } public static Rule createRule(String name, - ActionType action, - String tenantOcid, - String userOcid, + Action action, + String tenantId, + String userId, String tableOcid, String[] operations, - long createTimeMs) { - return new Rule(name, action, tenantOcid, userOcid, tableOcid, - operations, createTimeMs); + Timestamp createTime) { + return new Rule(name, action, tenantId, userId, tableOcid, + operations, createTime); } /* Needed for serialization */ @@ -96,22 +122,21 @@ public Rule() { } private Rule(String name, - ActionType action, + Action action, String tenantOcid, String userOcid, String tableOcid, String[] ops, - long createTime) { + Timestamp createTime) { this.name = name; this.action = action; tenant = tenantOcid; user = userOcid; table = tableOcid; - createTimeMs = createTime; + this.createTime = createTime; operations = ops; validate(); - setOpTypes(); } public void setName(String name) { @@ -122,8 +147,12 @@ public String getName() { return name; } - public ActionType getAction() { - return (action != null) ? action : DEF_ACTION; + public Action getAction() { + return action; + } + + public ActionType getActionType() { + return getAction().getType(); } public String getTenant() { @@ -143,31 +172,17 @@ public String[] getOperations() { } public Set getOpTypes() { - if (opTypes != null) { - return opTypes; - } - setOpTypes(); - return opTypes; - } - - private void setOpTypes() { - opTypes = new HashSet<>(); - if (getOperations() != null) { + if (opTypes == null) { + opTypes = new HashSet<>(); for (String op : getOperations()) { opTypes.add(parseOpType(op)); } } + return opTypes; } - public String getCreateTime() { - if (createTimeMs > 0) { - return Instant.ofEpochMilli(createTimeMs).toString(); - } - return null; - } - - public long getCreateTimeMs() { - return createTimeMs; + public Timestamp getCreateTime() { + return createTime; } /* @@ -178,7 +193,8 @@ public boolean attributesEqual(Rule o) { return stringsEqual(getTenant(), o.getTenant()) && stringsEqual(getUser(), o.getUser()) && stringsEqual(getTable(), o.getTable()) && - operationsEqual(getOpTypes(), o.getOpTypes()); + operationsEqual(getOpTypes(), o.getOpTypes()) && + getAction().equals(o.getAction()); } public boolean operationsEqual(Set ops) { @@ -186,31 +202,48 @@ public boolean operationsEqual(Set ops) { } public String toJson() { - JsonBuilder jb = JsonBuilder.create(); - jb.append("name", getName()); - jb.append("action", getAction().name()); - if (getTenant() != null) { - jb.append("tenant", getTenant()); - } - if (getUser() != null) { - jb.append("user", getUser()); - } - if (getTable() != null) { - jb.append("table", getTable()); - } - if (getOperations() != null) { - jb.startArray("operations"); - for (String op : getOperations()) { - jb.append(op); + return gson.toJson(this); + } + + /* + * Constructs Rule from JSON stream + */ + public static Rule fromJson(InputStream in) { + try (InputStreamReader reader = new InputStreamReader(in)) { + Rule rule = gson.fromJson(reader, Rule.class); + if (rule == null) { + throw new IllegalArgumentException( + "Failed to deserailize JSON to Rule object: JSON is empty"); } - jb.endArray(); + rule.validate(); + return rule; + } catch (JsonSyntaxException | IOException ex) { + throw new IllegalArgumentException( + "Failed to deserailize JSON to Rule object: " + ex.getMessage()); } + } - if (getCreateTimeMs() > 0) { - jb.append("createTimeMs", getCreateTimeMs()); - jb.append("createTime", getCreateTime()); + /* + * Constructs Rule from JSON string + */ + public static Rule fromJson(String json) { + try { + Rule rule = gson.fromJson(json, Rule.class); + if (rule == null) { + throw new IllegalArgumentException( + "Failed to deserailize JSON to Rule object: " + json); + } + rule.validate(); + return rule; + } catch (JsonSyntaxException jse) { + throw new IllegalArgumentException( + "Failed to deserailize JSON to Rule object: " + + jse.getMessage() + ", json=" + json); } - return jb.toString(); + } + + public static Gson getGson() { + return gson; } @Override @@ -218,20 +251,34 @@ public String toString() { return toJson(); } - public void validate() { + private void validate() { if (name == null) { throw new IllegalArgumentException("Rule name should not be null"); } + + if (action != null) { + action.validate(); + } else { + action = DEFAULT_ACTION; + } + if (operations == null || operations.length == 0) { throw new IllegalArgumentException( "Rule operations should not be null or empty"); } + for (String op : operations) { + parseOpType(op); + } + + if (createTime == null) { + createTime = new Timestamp(System.currentTimeMillis()); + } } private static OpType parseOpType(String name) { try { return OpType.valueOf(name.toUpperCase()); - } catch(IllegalArgumentException iae) { + } catch (IllegalArgumentException iae) { throw new IllegalArgumentException("Invalid operation type '" + name + "', not one of the values accepted for Enum class: " + Arrays.toString(OpType.values())); @@ -242,7 +289,7 @@ private static OpType parseOpType(String name) { * Checks if the given OpType set represents the all the operation types if * match any of below 2 conditions: * 1. contain OpType.ALL - * 2. contain all the other OpType except OpType.ALL.s + * 2. contain all the other OpType except OpType.ALL */ public static boolean isAllOpType(Set ops) { for (OpType op : OpType.values()) { @@ -283,4 +330,143 @@ public static boolean operationsEqual(Set ops1, } return false; } + + /* + * Action to take when the rule is matched. + */ + public static class Action { + private ActionType type; + + private Action(ActionType type) { + this.type = type; + } + + public ActionType getType() { + return type; + } + + public void validate() { + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof Action)) { + return false; + } + return type == ((Action)obj).getType(); + } + } + + /* + * Drops request + */ + public static class DropRequestAction extends Action { + public DropRequestAction() { + super(ActionType.DROP_REQUEST); + } + } + + /* + * Returns the specified error + * - errorCode: the response error code (refer to Response error codes + * in httpproxy oracle.nosql.proxy.protocol.Protocol class) + * - errorMessage: the returned error message. + */ + public static class ReturnErrorAction extends Action { + private int errorCode; + private String errorMessage; + + public ReturnErrorAction(int errorCode, String errorMessage) { + super(ActionType.RETURN_ERROR); + this.errorCode = errorCode; + this.errorMessage = errorMessage; + + validate(); + } + + public int getErrorCode() { + return errorCode; + } + + public String getErrorMessage() { + return errorMessage; + } + + @Override + public void validate() { + if (errorCode <= 0) { + throw new IllegalArgumentException( + "The errorCode must be positive int, see error " + + "codes in oracle.nosql.proxy.protocol class"); + } + + if (errorMessage == null) { + throw new IllegalArgumentException( + "The errorMessage must be not null"); + } + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof ReturnErrorAction)) { + return false; + } + + ReturnErrorAction o1 = (ReturnErrorAction)obj; + return super.equals(obj) && + (getErrorCode() == o1.getErrorCode()) && + Objects.equals(getErrorMessage(), o1.getErrorMessage()); + } + } + + /* Customized Json Serializer/Deserialize for Action */ + private static class ActionSerializer + implements JsonSerializer, JsonDeserializer { + + @Override + public JsonElement serialize(Action action, + Type typeOfSrc, + JsonSerializationContext context) { + switch(action.getType()) { + case DROP_REQUEST: + return context.serialize(action, DropRequestAction.class); + case RETURN_ERROR: + return context.serialize(action, ReturnErrorAction.class); + default: + throw new JsonParseException("Unknown action: " + action); + } + } + + @Override + public Action deserialize(JsonElement json, + Type typeOfT, + JsonDeserializationContext context) + throws JsonParseException { + + JsonObject jsonObject = json.getAsJsonObject(); + String type = jsonObject.get("type").getAsString(); + ActionType actionType; + + try { + actionType = ActionType.valueOf(type); + } catch (IllegalArgumentException ex) { + throw new JsonParseException("Unknown action type: " + type); + } + + switch(actionType) { + case DROP_REQUEST: + return context.deserialize(json, DropRequestAction.class); + case RETURN_ERROR: + return context.deserialize(json, ReturnErrorAction.class); + default: + throw new JsonParseException("Unknown action type: " + type); + } + } + } } diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/http/HttpConstants.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/http/HttpConstants.java index 83819f11..269db0a9 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/http/HttpConstants.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/http/HttpConstants.java @@ -304,6 +304,9 @@ public class HttpConstants { * Used for WorkRequest related APIs */ public static final String WORK_REQUEST_ID = "workRequestId"; + public static final String WORK_REQUEST_TYPE = "type"; + public static final String WORK_REQUEST_DDL = "ddl"; + public static final String WORK_REQUEST_KMSKEY = "kmskey"; /** * Used for Backfill to RQS API diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/ph/HealthReportAgent.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/ph/HealthReportAgent.java index 33028f44..d05176f5 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/ph/HealthReportAgent.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/ph/HealthReportAgent.java @@ -23,6 +23,7 @@ import oracle.nosql.common.sklogger.ScheduleStart; import oracle.nosql.common.sklogger.SkLogger; +import oracle.nosql.util.HostNameResolver; import oracle.nosql.util.HttpRequest; import oracle.nosql.util.HttpRequest.ConnectionHandler; import oracle.nosql.util.HttpResponse; @@ -42,7 +43,6 @@ public class HealthReportAgent { private static long INTERVAL = 60_000; - private static String HOST_NAME_ENV = "HOST_NAME"; private static String COMPONENT_NAME_ENV = "COMPONENT_NAME"; private static String COMPONENT_ID_ENV = "COMPONENT_ID"; @@ -74,7 +74,7 @@ public HealthReportAgent(boolean isGlobalComponent, long interval, SkLogger logger, HealthSource source) { - this.hostName = System.getenv(HOST_NAME_ENV); + this.hostName = HostNameResolver.getHostName(); this.componentName = System.getenv(COMPONENT_NAME_ENV); this.componentId = System.getenv(COMPONENT_ID_ENV); final String phUrl = URL.getPhUrl(); diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/DdlHistoryEntry.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/DdlHistoryEntry.java index d5a029ca..4f6dcd4f 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/DdlHistoryEntry.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/DdlHistoryEntry.java @@ -14,6 +14,9 @@ import oracle.nosql.util.fault.ErrorCode; import oracle.nosql.util.fault.RequestFault; import oracle.nosql.util.tmi.ReplicaInfo.ReplicaState; +import oracle.nosql.util.tmi.WorkRequest.ActionType; +import oracle.nosql.util.tmi.WorkRequest.EntityType; +import oracle.nosql.util.tmi.WorkRequest.OperationType; /** * A bean class to record DDL events. @@ -832,6 +835,69 @@ public String toString() { return JsonUtils.toJson(this); } + /* + * Converts to a WorkRequest object, representing general work request + * information. + */ + public WorkRequest toWorkRequest() { + OperationType operationType = null; + WorkRequest.Status workRequestStatus = null; + ActionType actionType = null; + + DdlOp op = getOperationEnum(); + if (op == DdlOp.createTable) { + operationType = OperationType.CREATE_TABLE; + } else if (op == DdlOp.dropTable) { + operationType = OperationType.DELETE_TABLE; + } else { + operationType = OperationType.UPDATE_TABLE; + } + + long timeFinished = 0; + switch (getStatusEnum()) { + case ACCEPTED: + workRequestStatus = WorkRequest.Status.ACCEPTED; + actionType = ActionType.IN_PROGRESS; + break; + case INPROGRESS: + workRequestStatus = WorkRequest.Status.IN_PROGRESS; + actionType = ActionType.IN_PROGRESS; + break; + case SUCCEEDED: + workRequestStatus = WorkRequest.Status.SUCCEEDED; + if (op == DdlOp.createTable) { + actionType = ActionType.CREATED; + } else if (op == DdlOp.dropTable) { + actionType = ActionType.DELETED; + } else { + actionType = ActionType.UPDATED; + } + timeFinished = updateTime.getTime(); + break; + case FAILED: + workRequestStatus = WorkRequest.Status.FAILED; + actionType = ActionType.UPDATED; + timeFinished = updateTime.getTime(); + break; + } + + return new WorkRequest(workRequestId, + operationType, + workRequestStatus, + compartmentId, + tableOcid, + tableName, + EntityType.TABLE, + getTags(), + actionType, + createTime.getTime(), + (startTime != null ? + startTime.getTime() : 0), + timeFinished, + getErrorCodeEnum(), + resultMsg); + } + /* The local sub ddl request information */ public static class SubRequest { private String workRequestId; diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/KmsKeyInfo.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/KmsKeyInfo.java new file mode 100644 index 00000000..ea38b1d3 --- /dev/null +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/KmsKeyInfo.java @@ -0,0 +1,99 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + */ +package oracle.nosql.util.tmi; + +import java.nio.ByteBuffer; + +import oracle.nosql.common.json.JsonUtils; + +/** + * Used in defining the response payload for the REST API get-kms-key from SC + * to proxy. + */ +public class KmsKeyInfo { + + public enum KeyState{ + UPDATING, + REVERTING, + ACTIVE, + DELETED, + FAILED, + DISABLED + } + + private final Boolean isHostedEnv; + private final String dedicatedTenantId; + private final String keyId; + private final String vaultId; + private final KeyState state; + private final long createTime; + private final long updateTime; + + public KmsKeyInfo(Boolean isHostedEnv, + String dedicatedTenantId, + String keyId, + String vaultId, + KeyState state, + long createTime, + long updateTime) { + this.isHostedEnv = isHostedEnv; + this.dedicatedTenantId = dedicatedTenantId; + this.keyId = keyId; + this.vaultId = vaultId; + this.state = state; + this.createTime = createTime; + this.updateTime = updateTime; + } + + public KmsKeyInfo(Boolean isHostedEnv, + String dedicatedTenantId, + KeyState state) { + this(isHostedEnv, dedicatedTenantId, null /* keyId */, + null /* vaultId */, state, 0 /* createTime */, + 0 /* updateTime */); + } + + public String getDedicatedTenantId() { + return dedicatedTenantId; + } + + public Boolean isHostedEnv() { + return isHostedEnv; + } + + public String getKeyId() { + return keyId; + } + + public String getVaultId() { + return vaultId; + } + + public KeyState getState() { + return state; + } + + public long getCreateTime() { + return createTime; + } + + public long getUpdateTime() { + return updateTime; + } + + public byte[] getETag() { + /* + * The "updateTime" reflects the last change to the KmsKeyInfo, use + * it as ETag of KmsKeyInfo. + */ + final ByteBuffer buffer = ByteBuffer.allocate(8); + buffer.putLong(updateTime > 0 ? updateTime : createTime); + return buffer.array(); + } + + @Override + public String toString() { + return JsonUtils.print(this); + } +} diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/ListWorkRequestsResult.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/ListWorkRequestsResult.java new file mode 100644 index 00000000..e4d35f08 --- /dev/null +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/ListWorkRequestsResult.java @@ -0,0 +1,32 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + */ +package oracle.nosql.util.tmi; + +/** + * Used in defining the response payload for the REST API list-work-requests + * from SC to proxy. + */ +public class ListWorkRequestsResult { + /* The array of WorkRequests */ + private final WorkRequest[] workRequests; + /* + * The page token represents the starting point for retrieving next batch + * of results. + */ + private final String nextPageToken; + + public ListWorkRequestsResult(WorkRequest[] requests, + String nextPageToken) { + this.workRequests = requests; + this.nextPageToken = nextPageToken; + } + + public WorkRequest[] getWorkRequests() { + return workRequests; + } + + public String getNextPageToken() { + return nextPageToken; + } +} diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/TableInfo.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/TableInfo.java index 9b80c22f..dffc68d0 100644 --- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/TableInfo.java +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/TableInfo.java @@ -147,6 +147,9 @@ public enum SchemaState { /* schema state */ private SchemaState schemaState; + /* The flag indicates if use FORCE in freezing table schema in ddl */ + private boolean freezeForce; + /* * MR table information */ @@ -558,6 +561,14 @@ public SchemaState getSchemaState() { return schemaState; } + public void setFreezeForce(boolean value) { + freezeForce = value; + } + + public boolean getFreezeForce() { + return freezeForce; + } + public boolean isFrozen() { return schemaState == SchemaState.FROZEN; } diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/WorkRequest.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/WorkRequest.java new file mode 100644 index 00000000..1d665f0c --- /dev/null +++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/WorkRequest.java @@ -0,0 +1,176 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + */ +package oracle.nosql.util.tmi; + +import oracle.nosql.common.json.JsonUtils; +import oracle.nosql.util.fault.ErrorCode; + +/** + * Used in defining the response payload for the REST API get-work-request and + * list-work-requests from SC to proxy. + * + * It represents general work request information for DDL operation and CMEK + * operation. + */ +public class WorkRequest { + + public enum EntityType { + TABLE, + CONFIGURATION + } + + public enum OperationType { + CREATE_TABLE, + UPDATE_TABLE, + DELETE_TABLE, + UPDATE_KMS_KEY, + REMOVE_KMS_KEY + }; + + public enum Status { + ACCEPTED, + IN_PROGRESS, + FAILED, + SUCCEEDED, + CANCELING, + CANCELED + }; + + public enum ActionType { + CREATED, + UPDATED, + DELETED, + IN_PROGRESS + } + + /* The work request Id */ + private final String id; + /* The operation type */ + private final OperationType type; + /* The status of work request */ + private final Status status; + /* The ocid of the compartment that contains the work request*/ + private final String compartmentId; + + /* + * The resource affected by this work request. + */ + + /* The resource identifier */ + private final String entityId; + /* The resource name */ + private final String entityName; + /* The resource type */ + private final EntityType entityType; + /* The action type */ + private final ActionType actionType; + /* The tags of the resource */ + private final byte[] tags; + + /* The time stamp the request was created */ + private final long timeAccepted; + /* The time stamp the request was started */ + private final long timeStarted; + /* The time stamp the request was finished */ + private final long timeFinished; + + /* The error encountered while executing a work request */ + private final ErrorCode errorCode; + /* The description of the issue encountered */ + private final String errorMessage; + + public WorkRequest(String id, + OperationType type, + Status status, + String compartmentId, + String entityId, + String entityName, + EntityType entityType, + byte[] tags, + ActionType actionType, + long timeAccepted, + long timeStarted, + long timeFinished, + ErrorCode errorCode, + String errorMessage) { + this.id = id; + this.type = type; + this.status = status; + this.compartmentId = compartmentId; + + this.entityId = entityId; + this.entityName = entityName; + this.entityType = entityType; + this.actionType = actionType; + this.tags = tags; + + this.timeAccepted = timeAccepted; + this.timeStarted = timeStarted; + this.timeFinished = timeFinished; + + this.errorCode = errorCode; + this.errorMessage = errorMessage; + } + + public String getId() { + return id; + } + + public OperationType getType() { + return type; + } + + public Status getStatus() { + return status; + } + + public String getCompartmentId() { + return compartmentId; + } + + public String getEntityId() { + return entityId; + } + + public String getEntityName() { + return entityName; + } + + public EntityType getEntityType() { + return entityType; + } + + public byte[] getTags() { + return tags; + } + + public ActionType getActionType() { + return actionType; + } + + public long getTimeAccepted() { + return timeAccepted; + } + + public long getTimeStarted() { + return timeStarted; + } + + public long getTimeFinished() { + return timeFinished; + } + + public ErrorCode getErrorCode() { + return errorCode; + } + + public String getErrorMessage() { + return errorMessage; + } + + @Override + public String toString() { + return JsonUtils.print(this); + } + } diff --git a/httpproxy/pom.xml b/httpproxy/pom.xml index 07460140..6fad8d9c 100644 --- a/httpproxy/pom.xml +++ b/httpproxy/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kv - 25.1.13 + 25.3.21 proxy diff --git a/httpproxy/tests/src/assembly/test.xml b/httpproxy/tests/src/assembly/test.xml new file mode 100644 index 00000000..2a864b5b --- /dev/null +++ b/httpproxy/tests/src/assembly/test.xml @@ -0,0 +1,21 @@ + +test + + tar.gz + zip + +oracle-nosql-proxy-tests-${project.version} +true + + + /lib + true + true + false + runtime + + + diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/AsyncLatencyTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/AsyncLatencyTest.java new file mode 100644 index 00000000..655c92cf --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/AsyncLatencyTest.java @@ -0,0 +1,120 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assume.assumeTrue; + +import org.junit.Test; +import org.junit.BeforeClass; + + +/** + * Verify that latencies remain stable for asynchronous operations when the + * number of concurrent requests is higher then the number of proxy + * worker threads. + * + * These tests only runs against a local server and not minicloud. + * + * The tests use a KVLite that has a test hook that injects long + * latencies into all requests + */ +public class AsyncLatencyTest extends LatencyTestBase { + + /* + * This test manages its own kvlite/proxy startup to control specific + * setup properties to allow for a test hook that injects latency into + * kvlite, and start proxy with only 2 worker threads + * + * note this hides the superclass static method so it won't be called + */ + @BeforeClass + public static void staticSetUp() + throws Exception { + + // this test doesn't run on minicloud or cloud + assumeTrue(!Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + + latencySetUp(true /*useAsync*/, 100 /*delayMs*/); + } + + @Test + public void testAsyncGetPutLatency() throws Exception { + + // skip this test if running on minicloud + assumeTrue(cloudRunning == false); + + // with async, we should be able to keep the same latencies + // even when using more client threads than proxy threads + testLatency("asyncGetPutLatency", + 3 /*readThreads*/, + 3 /*writeThreads*/, + 3 /*rwThreads*/, + 0 /*qThreads*/, + 10 /*runSeconds*/, + 90 /*minReadLatencyMs*/, + 150 /*maxReadLatencyMs*/, + 90 /*minWriteLatencyMs*/, + 150 /*maxWriteLatencyMs*/, + 0 /*minQueryLatencyMs*/, + 0 /*maxQueryLatencyMs*/); + } + + + @Test + public void testAsyncQueryLatency() throws Exception { + + // skip this test if running on minicloud + assumeTrue(cloudRunning == false); + + // This test has too many random failures in jenkins to be + // useful. Most are due to lack of CPU or resources in those + // test environments. So only run this test if verbose is + // enabled, which isn't by default in jenkins. + assumeTrue(verbose); + + // with async, we should be able to keep the same latencies + // even when using more client threads than proxy threads + testLatency("asyncQueryLatency", + 0 /*readThreads*/, + 0 /*writeThreads*/, + 0 /*rwThreads*/, + 8 /*qThreads*/, + 10 /*runSeconds*/, + 0 /*minReadLatencyMs*/, + 0 /*maxReadLatencyMs*/, + 0 /*minWriteLatencyMs*/, + 0 /*maxWriteLatencyMs*/, + 90 /*minQueryLatencyMs*/, + 250 /*maxQueryLatencyMs*/); + } + + + @Test + public void testAsyncGetPutQueryLatency() throws Exception { + + // skip this test if running on minicloud + assumeTrue(cloudRunning == false); + + // with async, we should be able to keep the same latencies + // even when using more client threads than proxy threads + testLatency("asyncGetPutQueryLatency", + 2 /*readThreads*/, + 2 /*writeThreads*/, + 2 /*rwThreads*/, + 4 /*qThreads*/, + 10 /*runSeconds*/, + 90 /*minReadLatencyMs*/, + 170 /*maxReadLatencyMs*/, + 90 /*minWriteLatencyMs*/, + 170 /*maxWriteLatencyMs*/, + 90 /*minQueryLatencyMs*/, + 250 /*maxQueryLatencyMs*/); + } + +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/BadProtocolTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/BadProtocolTest.java new file mode 100644 index 00000000..10ef2637 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/BadProtocolTest.java @@ -0,0 +1,1879 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static io.netty.handler.codec.http.HttpMethod.POST; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static oracle.nosql.proxy.protocol.BinaryProtocol.BAD_PROTOCOL_MESSAGE; +import static oracle.nosql.proxy.protocol.BinaryProtocol.ILLEGAL_ARGUMENT; +import static oracle.nosql.proxy.protocol.BinaryProtocol.REQUEST_SIZE_LIMIT_EXCEEDED; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_ARRAY; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_BINARY; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_BOOLEAN; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_INTEGER; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_MAP; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_STRING; +import static oracle.nosql.proxy.protocol.BinaryProtocol.UNSUPPORTED_PROTOCOL; +import static oracle.nosql.proxy.protocol.HttpConstants.ACCEPT; +import static oracle.nosql.proxy.protocol.HttpConstants.AUTHORIZATION; +import static oracle.nosql.proxy.protocol.HttpConstants.CONNECTION; +import static oracle.nosql.proxy.protocol.HttpConstants.CONTENT_LENGTH; +import static oracle.nosql.proxy.protocol.HttpConstants.CONTENT_TYPE; +import static oracle.nosql.proxy.protocol.HttpConstants.NOSQL_DATA_PATH; +import static oracle.nosql.proxy.protocol.HttpConstants.NOSQL_VERSION; +import static oracle.nosql.proxy.protocol.HttpConstants.REQUEST_ID_HEADER; +import static oracle.nosql.proxy.protocol.HttpConstants.REQUEST_COMPARTMENT_ID; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.io.IOException; +import java.math.BigDecimal; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.UUID; + +import oracle.kv.impl.topo.RepNodeId; +import oracle.nosql.driver.Consistency; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.TimeToLive; +import oracle.nosql.driver.Version; +import oracle.nosql.driver.http.NoSQLHandleImpl; +import oracle.nosql.driver.httpclient.HttpClient; +import oracle.nosql.driver.httpclient.ResponseHandler; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.GetIndexesRequest; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.ListTablesRequest; +import oracle.nosql.driver.ops.MultiDeleteRequest; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.Request; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableUsageRequest; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.serde.BinarySerializerFactory; +import oracle.nosql.driver.ops.serde.Serializer; +import oracle.nosql.driver.query.QueryDriver; +import oracle.nosql.driver.util.ByteInputStream; +import oracle.nosql.driver.util.ByteOutputStream; +import oracle.nosql.driver.util.NettyByteInputStream; +import oracle.nosql.driver.util.NettyByteOutputStream; +import oracle.nosql.driver.util.SerializationUtil; +import oracle.nosql.driver.values.ArrayValue; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.protocol.Protocol.OpCode; +import oracle.nosql.proxy.security.SecureTestUtil; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponseStatus; + +/** + * Tests on handling bad protocol on proxy side + */ +public class BadProtocolTest extends ProxyTestBase { + + private static final short PROXY_SERIAL_VERSION = + oracle.nosql.proxy.protocol.BinaryProtocol.SERIAL_VERSION; + + private final static String tableName = "users"; + + private final BinarySerializerFactory factory = + new BinarySerializerFactory(); + + private final MapValue key = createTestKey(1); + private final MapValue record = createTestValue(); + + private final GetRequest getRequest = new GetRequest() + .setTableName(tableName) + .setConsistency(Consistency.ABSOLUTE) + .setKey(key); + + private final PutRequest putRequest = new PutRequest() + .setTableName(tableName) + .setValue(record) + .setTTL(TimeToLive.ofDays(1)); + + private final PutRequest putIfVersionRequest = new PutRequest() + .setOption(Option.IfVersion) + .setTableName(tableName) + .setValue(record) + .setMatchVersion(genVersion()); + + private final DeleteRequest deleteRequest = new DeleteRequest() + .setTableName(tableName) + .setKey(key); + + private final DeleteRequest deleteIfVersionRequest = new DeleteRequest() + .setTableName(tableName) + .setMatchVersion(genVersion()) + .setKey(key); + + private final MultiDeleteRequest multiDeleteRequest = + new MultiDeleteRequest() + .setTableName(tableName) + .setKey(key) + .setMaxWriteKB(1024) + .setContinuationKey(genBytes(20, null)); + + private final WriteMultipleRequest writeMultipleRequest = + new WriteMultipleRequest() + .add(putRequest, false); + + private final String statement = "select * from users"; + private final PrepareRequest prepareRequest = new PrepareRequest() + .setStatement(statement); + + private final String boundStatement = "declare $id integer; " + + "select * from users where id = $id"; + private final PrepareRequest prepareBoundStmtRequest = new PrepareRequest() + .setStatement(boundStatement); + + private final TableRequest tableRequest = new TableRequest() + .setStatement(createTableDDL) + .setTableLimits(new TableLimits(50, 50, 50)); + + private final TableRequest tableSetLimitsRequest = new TableRequest() + .setTableName(tableName) + .setTableLimits(new TableLimits(50, 50, 50)); + + private final GetIndexesRequest getIndexesRequest = new GetIndexesRequest() + .setTableName(tableName) + .setIndexName("idx1"); + + private final GetTableRequest getTableRequest = new GetTableRequest() + .setTableName(tableName) + .setOperationId("1"); + + private final ListTablesRequest listTablesRequest = new ListTablesRequest() + .setStartIndex(0) + .setLimit(0); + + private final TableUsageRequest tableUsageRequest = new TableUsageRequest() + .setTableName(tableName) + .setStartTime(System.currentTimeMillis()) + .setEndTime(System.currentTimeMillis() + 3600_000) + .setLimit(10); + + /* Create a table */ + private final static String createTableDDL = + "CREATE TABLE IF NOT EXISTS " + tableName + "(" + + "id INTEGER, " + + "name STRING, " + + "count LONG, " + + "avg DOUBLE, " + + "sum NUMBER, " + + "exp BOOLEAN, " + + "key BINARY, " + + "map MAP(INTEGER), " + + "array ARRAY(STRING), " + + "record RECORD(rid INTEGER, rs STRING), " + + "PRIMARY KEY(id))"; + + private final static String createIndexDDL = + "CREATE INDEX IF NOT EXISTS idx1 ON " + tableName + "(name)"; + + private ByteBuf buf; + private HttpClient httpClient; + private NoSQLHandleConfig httpConfig; + private String kvRequestURI; + private int timeoutMs; + private int requestId = 0; + + @BeforeClass + public static void staticSetUp() + throws Exception { + + /* + * This test composes the request and send to the proxy but does not + * use the driver, the request is not signed, therefore cannot be run + * in cloud test. + */ + assumeTrue("Skip BadProtocolTest in could test", + !Boolean.getBoolean(USECLOUD_PROP)); + + ProxyTestBase.staticSetUp(); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + + buf = Unpooled.buffer(); + + URL url = new URL("http", getProxyHost(), getProxyPort(), "/"); + httpConfig = new NoSQLHandleConfig(url); + + kvRequestURI = httpConfig.getServiceURL().toString() + + NOSQL_VERSION + "/" + NOSQL_DATA_PATH; + timeoutMs = httpConfig.getDefaultRequestTimeout(); + + httpClient = createHttpClient(getProxyHost(), + getProxyPort(), + httpConfig.getNumThreads(), + "BadProtocolTest", + null /* Logger */); + assertNotNull(httpClient); + + createTable(); + + if (isSecure()) { + /* warm up security caches */ + handle.put(putRequest); + handle.get(getRequest); + handle.delete(deleteRequest); + handle.getTable(new GetTableRequest().setTableName(tableName)); + handle.getTableUsage(tableUsageRequest); + handle.getIndexes(getIndexesRequest); + handle.query(createQueryWithBoundStmtRequest()); + } + } + + @Override + public void tearDown() throws Exception { + + if (buf != null) { + buf.release(buf.refCnt()); + } + + if (httpClient != null) { + httpClient.shutdown(); + } + super.tearDown(); + } + + @Before + public void setVersion() throws Exception { + /* + * This test suite is completely V2/V3-centric. So + * set the serial version to 3 if higher. + */ + forceV3((NoSQLHandleImpl)handle); + } + + /** + * Test bad protocol data on below values: + * 1. SerialVersion + * 2. OpCode + * 3. RequestTimeout + * 4. TableName + * 5. ReturnRowFlag + * 6. MapValue + * 7. IfUpdateTTL + * 8. TTLValue + */ + @Test + public void testPutRequest() { + + final int[] lengths = { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 6 /* TableName: String */, + 1 /* ReturnRowFlag: boolean */, + 1 /* Durability: one byte */, + 1 /* ExactMatch: boolean */, + 1 /* IdentityCacheSize: packed int */, + 248 /* Record: MapValue */, + 1 /* IfUpdateTTL: boolean */, + 2 /* TTL: value(packed long) + unit(byte)*/ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, putRequest); + + try { + String test; + int offset = 0; + int pos = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* + * SerialVersion + */ + + /* SerialVersion: 0 */ + test = "Bad serialVersion: 0"; + buf.setShort(offset, 0); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* SerialVersion: PROXY_SERIAL_VERSION + 1 */ + test = "Bad serialVersion: PROXY_SERIAL_VERSION + 1"; + refillBuffer(buf, bufBytes); + buf.setShort(offset, PROXY_SERIAL_VERSION + 1); + executeRequest(test, buf, UNSUPPORTED_PROTOCOL); + + /* + * OpCode + */ + + /* Invalid OpCode */ + offset += lengths[pos++]; + test = "Bad OpCode"; + int invalidOpCode = OpCode.values().length; + refillBuffer(buf, bufBytes); + buf.setByte(offset, invalidOpCode); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * RequestTimeout + */ + + /* requestTimeout: -5000 */ + test = "Bad requestTimeout: -5000"; + offset += lengths[pos++]; + int invalidTimeout = -5000; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, invalidTimeout); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * TableName + */ + + /* Invalid TableName: null or empty string */ + String invalidTableName = null; + test = "TableName: " + invalidTableName; + offset += lengths[pos++]; + refillBuffer(buf, bufBytes); + setString(out, offset, invalidTableName); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + invalidTableName = ""; + test = "TableName: " + invalidTableName; + refillBuffer(buf, bufBytes); + setString(out, offset, invalidTableName); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * ReturnRowFlag + */ + offset += lengths[pos++]; + + /* + * Durability + * Only in V3 and above + */ + short serialVersion = ((NoSQLHandleImpl)handle).getSerialVersion(); + if (serialVersion > 2) { + offset += lengths[pos++]; + } else { + pos++; + } + + /* + * ExactMatch + */ + offset += lengths[pos++]; + + /* + * IdentityCacheSize + */ + offset += lengths[pos++]; + + /* + * MapValue + */ + offset += lengths[pos++]; + testMapValue(buf, out, bufBytes, offset, lengths[pos]); + + /* + * IfUpdateTTLFlag + */ + offset += lengths[pos++]; + + /* + * TTL + */ + long invalidTTL = -2; + offset += lengths[pos++]; + test = "TTL: " + invalidTTL; + refillBuffer(buf, bufBytes); + setPackedLong(out, offset, invalidTTL); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + test = "TTL: invalid ttl unit"; + refillBuffer(buf, bufBytes); + buf.setByte(offset + 1, -1); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + } catch (IOException ioe) { + fail("Write failed: " + ioe.getMessage()); + } finally { + out.close(); + } + } + + private void testMapValue(ByteBuf buffer, + ByteOutputStream out, + byte[] bufBytes, + int baseOffset, + int length) throws IOException { + final int headerLen = 9; /* 1(type) + 4(length) + 4 (size)/*/ + final String[] fields = new String[] { + "avg", + "array", + "record", + "name", + "count", + "sum", + "id", + "exp", + "map", + "key" + }; + final int[] lengths = new int[] { + 13, /* avg: DOUBLE, 4(name) + 1(type) + 8(double) */ + 36, /* array: ARRAY, 6(name) + 1(type) + 29(value) */ + 34, /* record: RECORD, 7(name) + 1(type) + 26(value) */ + 19, /* name: STRING, 5(name) + 1(type) + 13(value) */ + 16, /* count: LONG, 6(name) + 1(type) + 9(value) */ + 44, /* sum: NUMBER, 4(name) + 1(type) + 39(value) */ + 5, /* id: INTEGER, 3(name) + 1(type) + 1(value) */ + 6, /* exp: BOOLEAN, 4(name) + 1(type) + 1(value) */ + 30, /* map: MAP, 4(name) + 1(type) + 25(value) */ + 36 /* key: BINARY, 4(name) + 1(type) + 31(value) */ + }; + + final Map offsets = new HashMap(); + int offset = baseOffset + headerLen; + for (int i = 0; i < fields.length; i++) { + offsets.put(fields[i], offset); + offset += lengths[i]; + } + + offset = baseOffset; + String test; + ByteInputStream in; + int pos = 0; + int value; + String svalue; + + /* Corrupted type of top MapValue */ + value = -1; + test = "MapValue: corrupted type of top MapValue, " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = TYPE_INTEGER; + test = "MapValue: corrupted type of top MapValue, " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Wrong length value */ + offset += 1; + refillBuffer(buffer, bufBytes); + in = new NettyByteInputStream(buffer); + value = bufBytes.length + 1; + setInt(out, offset, value); + test = "MapValue: wrong length value, " + value ; + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + refillBuffer(buffer, bufBytes); + value = -1; + setInt(out, offset, value); + test = "MapValue: wrong length value, " + value ; + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Wrong size value */ + offset += 4; + refillBuffer(buffer, bufBytes); + value = -1; + setInt(out, offset, value); + test = "MapValue: wrong size value, " + value ; + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Field: avg + */ + String fname = "avg"; + offset = offsets.get(fname); + svalue = null; + refillBuffer(buffer, bufBytes); + setString(out, offset, svalue); + test = "MapValue: field name is null" ; + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + svalue = ""; + refillBuffer(buffer, bufBytes); + setString(out, offset, svalue); + test = "MapValue: field name is empty string" ; + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Corrupted value type */ + value = 100; + offset += fname.length() + 1; + test = "MapValue: corrupted type of field \"avg\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid value type for DOUBLE */ + value = TYPE_BOOLEAN; + test = "MapValue: invalid value type for field \"avg\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Field: array + */ + final int[] arrayElemLens = new int[] { + 4 /* length */, + 4 /* size */, + 1 /* 1st value's type */, + 6 /* 1st value */, + 1 /* 2nd value's type */, + 6 /* 2nd value */, + 1 /* 3rd value's type */, + 6 /* 3rd value */, + }; + + pos = 0; + fname = "array"; + offset = offsets.get(fname); + + /* Invalid value type for array value */ + offset += fname.length() + 1; + value = TYPE_MAP; + test = "MapValue: invalid value type for field \"array\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, ILLEGAL_ARGUMENT); + + value = TYPE_INTEGER; + test = "MapValue: invalid value type for field \"array\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid length value of array value */ + length = readInt(in, offset); + offset++; + value = -1; + test = "MapValue: invalid length of \"array\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = length + 1; + test = "MapValue: invalid length of \"array\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = length - 1; + test = "MapValue: invalid length of \"array\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid size value of array value */ + offset += arrayElemLens[pos++]; + int size = readInt(in, offset); + value = -1; + test = "MapValue: invalid size of \"array\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = size + 2; + test = "MapValue: invalid size of \"array\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid element type */ + offset += arrayElemLens[pos++]; + value = -1; + test = "MapValue: invalid element type of \"array\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = TYPE_BINARY; + test = "MapValue: invalid element type of \"array\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, ILLEGAL_ARGUMENT); + + /* Invalid element value */ + offset += arrayElemLens[pos++]; + test = "MapValue: invalid element value of \"array\""; + refillBuffer(buffer, bufBytes); + setPackedInt(out, offset, -1); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + refillBuffer(buffer, bufBytes); + setPackedInt(out, offset, 100); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + final int[] mapElemLens = new int[] { + 4 /* length */, + 4 /* size */, + 3 /* k1 */, + 1 /* type */, + 1 /* k1's value */, + 3 /* k2 */, + 1 /* type */, + 2 /* k2's value */, + 3 /* k3 */, + 1 /* type */, + 2 /* k3's value */, + }; + + pos = 0; + fname = "map"; + offset = offsets.get(fname); + + /* Invalid value type for map value */ + offset += fname.length() + 1; + value = TYPE_ARRAY; + test = "MapValue: invalid value type for field \"map\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, ILLEGAL_ARGUMENT); + + value = TYPE_INTEGER; + test = "MapValue: invalid value type for field \"map\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid length value of map value */ + length = readInt(in, offset); + offset++; + value = -1; + test = "MapValue: invalid length of \"map\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = length + 1; + test = "MapValue: invalid length of \"map\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = length - 1; + test = "MapValue: invalid length of \"map\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid size value of map value */ + offset += mapElemLens[pos++]; + size = readInt(in, offset); + value = -1; + test = "MapValue: invalid size of \"map\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = size + 2; + test = "MapValue: invalid size of \"map\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid key */ + offset += mapElemLens[pos++]; + svalue = null; + test = "MapValue: invalid key \"map\", " + value ; + refillBuffer(buffer, bufBytes); + setString(out, offset, svalue); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid element type */ + offset += mapElemLens[pos++]; + value = -1; + test = "MapValue: invalid element type of \"map\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = TYPE_BINARY; + test = "MapValue: invalid element type of \"map\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, ILLEGAL_ARGUMENT); + + /* Invalid element value */ + offset += mapElemLens[pos++]; + test = "MapValue: invalid element value of \"map\""; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, -1); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + test = "MapValue: invalid element value of \"map\""; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, 0); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = TYPE_STRING; + test = "MapValue: invalid element value of \"map\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset - 1, value); + setString(out, offset, null); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = TYPE_STRING; + test = "MapValue: invalid element value of \"map\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset - 1, value); + setString(out, offset, ""); + executeRequest(test, buf, ILLEGAL_ARGUMENT); + + /* Record value */ + final int[] recordElemLens = new int[] { + 4 /*length*/, + 4 /*size*/, + 3 /*record.ri's name*/, + 1 /*record.ri's type*/, + 2 /*record.ri's value*/, + 3 /*record.rs's name*/, + 1 /*record.rs's type*/, + 13 /*record.rs's value*/, + }; + + pos = 0; + fname = "record"; + offset = offsets.get(fname); + + /* Invalid value type for RECORD */ + offset += fname.length() + 1; + value = TYPE_INTEGER; + test = "MapValue: invalid value type for field \"record\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid length value of RECORD */ + length = readInt(in, offset); + offset++; + value = -1; + test = "MapValue: invalid length of \"record\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = length + 1; + test = "MapValue: invalid length of \"record\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = length - 1; + test = "MapValue: invalid length of \"record\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid size value of record */ + offset += recordElemLens[pos++]; + size = readInt(in, offset); + value = -1; + test = "MapValue: invalid size of \"record\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = size + 2; + test = "MapValue: invalid size of \"record\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid field name */ + offset += recordElemLens[pos++]; + svalue = null; + test = "MapValue: invalid field name of \"record\", " + value ; + refillBuffer(buffer, bufBytes); + setString(out, offset, svalue); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + svalue = ""; + test = "MapValue: invalid field name of \"record\", " + value ; + refillBuffer(buffer, bufBytes); + setString(out, offset, svalue); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + } + + /** + * Test bad protocol data on below values: + * 1. Version + */ + @Test + public void testPutIfVersionRequst() { + + final int[] lengths = { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 6 /* TableName: String */, + 1 /* ReturnRowFlag: boolean */, + 1 /* Durability: one byte */, + 1 /* ExactMatch: boolean */, + 1 /* IdentityCacheSize: packed int */, + 248 /* Record: MapValue */, + 1 /* IfUpdateTTL: boolean */, + 1 /* TTL: packed long (-1) */, + 51 /* Version: byte array */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, putIfVersionRequest); + + try { + String test; + int offset = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* + * Version + */ + for (int i = 0; i < lengths.length - 1; i++) { + offset += lengths[i]; + } + byte[] versionBytes = null; + test = "Version: null"; + refillBuffer(buf, bufBytes); + setByteArray(out, offset, versionBytes); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + versionBytes = new byte[0]; + test = "Version: empty byte array"; + refillBuffer(buf, bufBytes); + + setByteArray(out, offset, versionBytes); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + versionBytes = genBytes(10, null); + test = "Version: invalid binary format"; + refillBuffer(buf, bufBytes); + setByteArray(out, offset, versionBytes); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + } catch (IOException ioe) { + fail("Write failed: " + ioe.getMessage()); + } finally { + out.close(); + } + } + + /** + * Test bad protocol data on below values: + * 1. Consistency + * 2. PrimaryKey type + */ + @Test + public void testGetRequest() { + + final int[] lengths = { + 2 /* SerialVersion: short*/, + 1 /* OpCode: byte*/, + 3 /* RequestTimeout: packed int */, + 6 /* TableName: string */, + 1 /* Consistency: boolean */, + 14 /* Key: 1(TYPE_MAP) + 4(length) + 4(size) + 3("id") + + 1(TYPE_INT) + 1(1-value) */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, getRequest); + + try { + String test; + int pos; + int offset = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* + * Consistency + */ + + /* Move to offset of consistency */ + for (pos = 0; pos < 4; pos++) { + offset += lengths[pos]; + } + + /* Invalid consistency type */ + int value = -1; + test = "Invalid consistency type: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = 3; + test = "Invalid consistency type: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * PrimaryKey + */ + offset += lengths[pos++]; + + value = -1; + test = "Invalid value type of PrimaryKey: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = TYPE_ARRAY; + test = "Invalid value type of PrimaryKey: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + } finally { + out.close(); + } + } + + /** + * Test bad protocol on below values: + * 1. MaxWriteKB + * 2. ContinuationKey + */ + @Test + public void testMultiDeleteRequest() { + final int[] lengths = { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 6 /* TableName: string */, + 1 /* Durability: one byte */, + 14 /* FieldValue: MapValue */, + 1 /* HasFieldRange: boolean */, + 3 /* MaxWriteKB: packed int */, + 21 /* ContinuationKey: byte array */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, multiDeleteRequest); + + try { + String test; + int pos; + int offset = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* Move to offset of MaxWriteKB */ + for (pos = 0; pos < 7; pos++) { + offset += lengths[pos]; + } + + /* + * MaxWriteKB + */ + int value = -1; + test = "Invalid maxWriteKB: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + if (!onprem) { + value = rlimits.getRequestWriteKBLimit() + 1; + test = "Invalid maxWriteKB: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + } + + /* + * Continuation Key + */ + offset += lengths[pos]; + value = -2; + test = "Invalid continuation key: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = 100; + test = "Invalid continuation key: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + } catch (IOException ioe) { + fail("Failed to write to buffer: " + ioe); + } finally { + out.close(); + } + } + + /** + * Test bad protocol on below values: + * 1. Statement + */ + @Test + public void testPrepareStatement() { + final int[] lengths = new int[] { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 20 /* Statement: string */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, prepareRequest); + + try { + String test; + int pos; + int offset = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* + * Statement + */ + for (pos = 0; pos < 3; pos++) { + offset += lengths[pos]; + } + + String svalue = null; + test = "Invalid statement: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + svalue = ""; + test = "Invalid statement: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + int value = statement.length() + 1; + test = "Invalid statement value, its length is " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, statement.length() + 1); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = -2; + test = "Invalid statement value, its length is " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + } catch (IOException ioe) { + fail("Failed to write to buffer: " + ioe); + } finally { + out.close(); + } + } + + /** + * Test bad protocol on below values: + * 1. PreparedStatement + * 2. Variables Number + * 3. Variable Name + * 4. Variable Value + */ + @Test + public void testQueryRequest() { + final QueryRequest queryReq = createQueryWithBoundStmtRequest(); + + final int prepStmtLen = + 4 /* int, length of PreparedStatement */+ + queryReq.getPreparedStatement().getStatement().length; + + final int[] lengths = { + 2 /* SerialVersion: short*/, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 1 /* Consistency: byte */, + 1 /* NumberLimit: packed int */, + 3 /* MaxReadKB: packed int */, + 1 /* ContinuationKey: byte array */, + 1 /* IsPreparedStatement: boolean */, + 2 /* QueryVersion: short */, + 1 /* traceLevel: packed int */, + 1 /* MaxWriteKB: packed int */, + 1 /* MathContext: byte */, + 1 /* ToplogySeqNum: packed int */, + 1 /* ShardId: packed int */, + 1 /* isSimpleQuery: boolean */, + prepStmtLen /* PreparedStatement: byte array */, + 1 /* VariablesNumber: packed int */, + 4 /* VariableName: string */, + 2 /* VariableValue: INT_TYPE + packed int */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, queryReq); + + try { + String test; + int pos; + int offset = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* + * PreparedStatement + */ + for (pos = 0; pos < 15; pos++) { + offset += lengths[pos]; + } + + int value = -1; + test = "Invalid prepared Statement"; + refillBuffer(buf, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = 0; + test = "Invalid prepared Statement"; + refillBuffer(buf, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Variables number + */ + value = -1; + offset += lengths[pos++]; + test = "Invalid variable number: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = 2; + test = "Invalid variable number: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Variable name + */ + offset += lengths[pos++]; + String svalue = null; + test = "Invalid variable name: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + svalue = ""; + test = "Invalid variable name: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Variable value + */ + offset += lengths[pos++]; + value = -1; + test = "Invalid variable value type: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = TYPE_ARRAY; + test = "Invalid variable value type: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeRequest(test, buf, ILLEGAL_ARGUMENT); + + } catch (IOException ioe) { + fail("Failed to write to buffer: " + ioe); + } finally { + out.close(); + } + } + + /** + * Test bad protocol on below values: + * 1. Operation number + * 2. OpCode of sub request + */ + @Test + public void testWriteMultipleRequest() { + final int[] lengths = { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 6 /* TableName: string */, + 1 /* OperationNum: packed int */, + 1 /* Durability: one byte */, + 1 /* isAbortIfUnsuccessful: boolean */, + 253 /* Request */ + }; + + final WriteMultipleRequest umReq = writeMultipleRequest; + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, umReq); + + try { + String test; + int pos; + int offset = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* + * Operation number + */ + for (pos = 0; pos < 4; pos++) { + offset += lengths[pos]; + } + + int value = -1; + test = "Invalid operation number: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = 3; + test = "Invalid operation number: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* durability */ + value = 4; /* bad: only one of three values set */ + offset += lengths[pos++]; + test = "Invalid durability: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + /* + * OpCode of sub request + */ + offset += lengths[pos++]; /* isAbortIfUnsuccessful */ + offset += lengths[pos++]; + + value = -1; + test = "Invalid operation code: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + value = OpCode.GET.ordinal(); + test = "Invalid operation code: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + } catch (IOException ioe) { + fail("Failed to write to buffer: " + ioe); + } finally { + out.close(); + } + } + + /** + * Test bad protocol on below values: + * 1. ReadKB + * 2. WriteKB + * 3. StorageGB + */ + @Test + public void testTableRequest() { + final int[] lengths = { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 215 /* Statement: string */, + 1 /* HasLimit: boolean */, + 4 /* ReadKB: int */, + 4 /* WriteKB: int */, + 4 /* StorageGB: int */, + 1 /* LimitsMode: byte */, + 1 /* HasTableName: boolean */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, tableRequest); + + try { + String test; + int pos; + int offset = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* + * ReadKB + */ + for (pos = 0; pos < 5; pos++) { + offset += lengths[pos]; + } + + int value = 0; + test = "Invalid readKB: " + value; + refillBuffer(buf, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, ILLEGAL_ARGUMENT); + + /* + * WriteKB + */ + offset += lengths[pos++]; + value = 0; + test = "Invalid writeKB: " + value; + refillBuffer(buf, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, ILLEGAL_ARGUMENT); + + /* + * StorageMaxGB + */ + offset += lengths[pos++]; + value = 0; + test = "Invalid StorageMaxGB: " + value; + refillBuffer(buf, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, ILLEGAL_ARGUMENT); + + } catch (Exception ioe) { + fail("Failed to write to buffer: " + ioe); + } finally { + out.close(); + } + } + + @Test + public void testGetIndexesRequest() { + final int[] lengths = { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 6 /* TableName: string */, + 1 /* HasIndex: boolean */, + 5 /* IndexName: string */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, getIndexesRequest); + + try { + String test; + int pos; + int offset = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* + * Index name + */ + for (pos = 0; pos < 5; pos++) { + offset += lengths[pos]; + } + + String svalue = null; + test = "Invalid Index name: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + svalue = ""; + test = "Invalid Index name: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + } catch (Exception ioe) { + fail("Failed to write to buffer: " + ioe); + } finally { + out.close(); + } + } + + @Test + public void testListTablesRequest() { + final int[] lengths = { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 4 /* StartIndex: int */, + 4 /* Limit: int*/ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, listTablesRequest); + + try { + String test; + int pos; + int offset = 0; + + test = "OK test"; + executeRequest(test, buf, 0); + + /* + * Start index + */ + for (pos = 0; pos < 3; pos++) { + offset += lengths[pos]; + } + + int value = -1; + test = "Invalid start index: " + value; + refillBuffer(buf, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Limit + */ + offset += lengths[pos++]; + test = "Invalid limit: " + value; + refillBuffer(buf, bufBytes); + setInt(out, offset, value); + executeRequest(test, buf, BAD_PROTOCOL_MESSAGE); + + } catch (Exception ioe) { + fail("Failed to write to buffer: " + ioe); + } finally { + out.close(); + } + } + + @Test + public void testBrokenRequest() { + final Request[] requests = new Request[] { + getRequest, + putRequest, + putIfVersionRequest, + deleteRequest, + deleteIfVersionRequest, + multiDeleteRequest, + writeMultipleRequest, + prepareRequest, + createQueryWithBoundStmtRequest(), + tableRequest, + tableSetLimitsRequest, + getIndexesRequest, + getTableRequest, + listTablesRequest, + tableUsageRequest + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + for (Request request : requests) { + buf.clear(); + serializeRequest(out, request); + testBrokenMessage(request.getClass().getName(), buf, 1, + BAD_PROTOCOL_MESSAGE); + } + out.close(); + } + + /* + * TODO: Enable this test after enhance the validation check especially + * for serialized PreparedStatment. + */ + @Ignore + public void testRandomCorruptedRequest() { + final Request[] requests = new Request[] { + getRequest, + putRequest, + putIfVersionRequest, + deleteRequest, + deleteIfVersionRequest, + multiDeleteRequest, + writeMultipleRequest, + prepareRequest, + createQueryWithBoundStmtRequest(), + tableRequest, + tableSetLimitsRequest, + getIndexesRequest, + getTableRequest, + listTablesRequest, + tableUsageRequest + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final Random rand = new Random(System.currentTimeMillis()); + final int round = 10; + + for (Request request : requests) { + buf.clear(); + byte[] bufBytes = serializeRequest(out, request); + for (int i = 0; i < round; i++) { + final int offset = rand.nextInt(buf.writerIndex() - 2); + byte[] corruptedBytes = corruptBuffer(buf, rand, offset); + executeRequest(request.getClass().getName(), + buf, + -1/* don't check error code */, + new ExecuteFailHandler() { + @Override + public void fail(String test, Throwable t) { + printBytes(test + " offset=" + offset, + corruptedBytes); + } + } + ); + refillBuffer(buf, bufBytes); + } + } + out.close(); + } + + /* + * Test the check on request size limit on proxy. + */ + @Test + public void testRequestSizeLimit() { + assumeTrue(onprem == false); + + int limit = rlimits.getRequestSizeLimit(); + final ByteOutputStream out = new NettyByteOutputStream(buf); + + try { + String test = "Put request with size > " + limit; + PutRequest putReq = new PutRequest() + .setTableName(tableName) + .setValue(new MapValue() + .put("id", 0) + .put("key", genBytes(limit, null))); + + serializeRequest(out, putReq); + executeRequest(test, buf, REQUEST_SIZE_LIMIT_EXCEEDED); + } finally { + out.close(); + } + } + + private byte[] corruptBuffer(ByteBuf buffer, Random rand, int offset) { + int len = rand.nextInt(buffer.writerIndex() - offset); + byte[] bytes = genBytes(len, rand); + buffer.setBytes(offset, bytes); + return bytes; + } + + private QueryRequest createQueryWithBoundStmtRequest() { + final PrepareResult prepRet = handle.prepare(prepareBoundStmtRequest); + prepRet.getPreparedStatement() + .setVariable("$id", new IntegerValue(1)); + + final QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepRet) + .setMaxReadKB(1024) + .setLimit(100); + return queryReq; + } + + private byte[] serializeRequest(ByteOutputStream out, Request request) { + + request.setDefaults(httpConfig); + + Serializer ser = request.createSerializer(factory); + try { + short serialVersion = ((NoSQLHandleImpl)handle).getSerialVersion(); + out.writeShort(serialVersion); + if (request instanceof QueryRequest || + request instanceof PrepareRequest) { + ser.serialize(request, serialVersion, + QueryDriver.QUERY_V3, out); + } else { + ser.serialize(request, serialVersion, out); + } + } catch (IOException e) { + fail("Failed to serialize put request"); + } + + final byte[] bytes = new byte[buf.writerIndex()]; + System.arraycopy(buf.array(), 0, bytes, 0, bytes.length); + + return bytes; + } + + private void testBrokenMessage(String name, + ByteBuf buffer, + int offset, + int errCode) { + for (int i = 0; i < buffer.writerIndex() - 1; i++) { + buffer.readerIndex(0); + buffer.writerIndex(offset + i); + executeRequest("testBrokenMessage - " + name + ": " + + buffer.writerIndex(), buffer, errCode); + } + } + + private void executeRequest(String test, ByteBuf buffer, int expErrCode) { + executeRequest(test, buffer, expErrCode, null); + } + + private void executeRequest(String test, + ByteBuf buffer, + int expErrCode, + ExecuteFailHandler failHandler) { + + ResponseHandler responseHandler = null; + ByteInputStream bis = null; + + /* Increase reference count of buffer by 1*/ + buffer.retain(); + + try { + Channel channel = httpClient.getChannel(timeoutMs); + responseHandler = new ResponseHandler(httpClient, null, channel); + + final FullHttpRequest request = + new DefaultFullHttpRequest(HTTP_1_1, POST, kvRequestURI, + buffer, + false /* Don't validate hdrs */); + HttpHeaders headers = request.headers(); + headers.add(HttpHeaderNames.HOST, getProxyHost()) + .add(REQUEST_ID_HEADER, nextRequestId()) + .set(CONTENT_TYPE, "application/octet-stream") + .set(CONNECTION, "keep-alive") + .set(ACCEPT, "application/octet-stream") + .setInt(CONTENT_LENGTH, buffer.readableBytes()); + + if (!onprem) { + headers.set(AUTHORIZATION, SecureTestUtil.getAuthHeader( + getTenantId(), isSecure())); + } + if (isSecure()) { + headers.add(REQUEST_COMPARTMENT_ID, getTenantId()); + } + + httpClient.runRequest(request, responseHandler, channel); + + assertFalse("Request timed out after " + timeoutMs + " ms", + responseHandler.await(timeoutMs)); + /* Validates the response from proxy */ + assertEquals(HttpResponseStatus.OK, responseHandler.getStatus()); + bis = new NettyByteInputStream(responseHandler.getContent()); + int errCode = bis.readByte(); + if (expErrCode >= 0) { + if (expErrCode == errCode) { + return; + } + /* support V4 server error codes */ + if (errCode == 6) { /* nson MAP */ + errCode = getV4ErrorCode(responseHandler.getContent()); + } + assertEquals(test + " failed", expErrCode, errCode); + } + } catch (Throwable t) { + if (failHandler != null) { + failHandler.fail(test, t); + } + fail(test + " failed: " + t); + } finally { + if (bis != null) { + bis.close(); + } + if (responseHandler != null) { + responseHandler.close(); + } + } + } + + private String nextRequestId() { + return String.valueOf(requestId++); + } + + private void refillBuffer(ByteBuf buffer, byte[] bytes) { + buffer.setBytes(0, bytes); + buffer.readerIndex(0); + buffer.writerIndex(bytes.length); + } + + private void setPackedInt(ByteOutputStream out, int offset, int value) + throws IOException { + + int savedOffset = out.getOffset(); + out.setWriteIndex(offset); + SerializationUtil.writePackedInt(out, value); + out.setWriteIndex(savedOffset); + } + + private void setInt(ByteOutputStream out, int offset, int value) + throws IOException { + + int savedOffset = out.getOffset(); + out.setWriteIndex(offset); + out.writeInt(value); + out.setWriteIndex(savedOffset); + } + + private void setPackedLong(ByteOutputStream out, int offset, long value) + throws IOException { + + int savedOffset = out.getOffset(); + out.setWriteIndex(offset); + SerializationUtil.writePackedLong(out, value); + out.setWriteIndex(savedOffset); + } + + private void setString(ByteOutputStream out, int offset, String value) + throws IOException { + + int savedOffset = out.getOffset(); + out.setWriteIndex(offset); + SerializationUtil.writeString(out, value); + out.setWriteIndex(savedOffset); + } + + private void setByteArray(ByteOutputStream out, int offset, byte[] bytes) + throws IOException { + + int savedOffset = out.getOffset(); + out.setWriteIndex(offset); + SerializationUtil.writeByteArray(out, bytes); + out.setWriteIndex(savedOffset); + } + + private int readInt(ByteInputStream in, int offset) + throws IOException { + + int savedOffset = in.getOffset(); + in.setOffset(offset); + int value = in.readInt(); + in.setOffset(savedOffset); + return value; + } + + private void createTable() { + tableOperation(handle, createTableDDL, + new TableLimits(20000, 20000, 50), + TableResult.State.ACTIVE, 10000); + tableOperation(handle, createIndexDDL, null, + TableResult.State.ACTIVE, 10000); + } + + private Version genVersion() { + final UUID uuid = UUID.randomUUID(); + final long vlsn = 123456789; + final long lsn = 0x1234567812345678L; + final RepNodeId repNodeId = new RepNodeId(1234, 1234); + final oracle.kv.Version kvVersion = + new oracle.kv.Version(uuid, vlsn, repNodeId, lsn); + return Version.createVersion(kvVersion.toByteArray()); + } + + private MapValue createTestValue() { + MapValue row = new MapValue(); + row.put("id", 1); + row.put("name", "string value"); + row.put("count", Long.MAX_VALUE); + row.put("avg", Double.MAX_VALUE); + row.put("sum", new BigDecimal("12345678901234567890123456789012345678")); + row.put("exp", true); + row.put("key", genBytes(30, null)); + + MapValue map = new MapValue(); + map.put("k1", 100); + map.put("k2", 200); + map.put("k3", 300); + row.put("map", map); + + ArrayValue array = new ArrayValue(); + array.add("elem1"); + array.add("elem2"); + array.add("elem3"); + row.put("array", array); + + MapValue rec = new MapValue(); + rec.put("rid", 1024); + rec.put("rs", "nosql"); + row.put("record", rec); + + return row; + } + + private MapValue createTestKey(int id) { + return new MapValue().put("id", id); + } + + private byte[] genBytes(int length, Random rand) { + byte[] bytes = new byte[length]; + for (int i = 0; i < bytes.length; i++) { + bytes[i] = (rand == null)? (byte)(i % 256) : + (byte)rand.nextInt(256); + } + return bytes; + } + + private static void printBytes(String title, byte[] bytes) { + final char[] hexArray = "0123456789ABCDEF".toCharArray(); + StringBuilder sb = new StringBuilder(title); + sb.append("["); + sb.append(bytes.length); + sb.append("]"); + for (int j = 0; j < bytes.length; j++ ) { + int v = bytes[j] & 0xFF; + if (j % 5 == 0) { + sb.append("\n\t"); + } + sb.append("(byte)0x"); + sb.append(hexArray[v >>> 4]); + sb.append(hexArray[v & 0x0F]); + sb.append(", "); + } + System.out.println(sb.toString()); + } + + /** + * For debug purpose. + */ + @SuppressWarnings("unused") + private byte[] corruptBuffer(ByteBuf buffer) { + int offset = 18; + byte[] bytes = new byte[] { +(byte)0xA1, (byte)0x76, (byte)0x46, (byte)0x11, (byte)0x0C, +(byte)0xD8, (byte)0x25, (byte)0x66, + }; + buffer.setBytes(offset, bytes); + return bytes; + } + + /* + * Interface invoked by executeRequest() when fails. + */ + @FunctionalInterface + private interface ExecuteFailHandler { + void fail(String test, Throwable ex); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ChildTableTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ChildTableTest.java new file mode 100644 index 00000000..32a02544 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ChildTableTest.java @@ -0,0 +1,1588 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2020 Oracle and/or its affiliates. All rights reserved. + * + */ +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.Arrays; +import java.util.List; + +import org.junit.Test; + +import oracle.nosql.driver.IndexExistsException; +import oracle.nosql.driver.IndexLimitException; +import oracle.nosql.driver.IndexNotFoundException; +import oracle.nosql.driver.KeySizeLimitException; +import oracle.nosql.driver.RowSizeLimitException; +import oracle.nosql.driver.TableExistsException; +import oracle.nosql.driver.TableLimitException; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.TimeToLive; +import oracle.nosql.driver.Version; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.ListTablesResult; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableResult.State; +import oracle.nosql.driver.ops.TableUsageRequest; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleRequest.OperationRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.nosql.driver.ops.WriteMultipleResult.OperationResult; +import oracle.nosql.driver.values.MapValue; + +/** + * Test child table operations and data access operations. + */ +public class ChildTableTest extends ProxyTestBase { + private final static TableLimits limits = new TableLimits(500, 500, 1); + private final static int WAIT_MS = 10000; + + private final static String createTDdl = + "create table t(id integer, name string, s string, primary key(id))"; + private final static String createTADdl = + "create table t.a(ida integer, name string, s string, primary key(ida))"; + private final static String createIfNotExistsTADdl = + "create table if not exists t.a(" + + "ida integer, name string, s string, primary key(ida))"; + private final static String createTABDdl = + "create table t.a.b(idb integer, name string, s string, primary key(idb))"; + private final static String createTGDdl = + "create table t.g(idg integer, name string, s string, primary key(idg))"; + private final static String createXDdl = + "create table x(id integer, name string, s string, primary key(id))"; + + /** + * Test child table related table operations: + * 1. create table + * 2. get table + * 3. list tables + * 4. create/drop index + * 5. alter table + * 6. drop table + */ + @Test + public void testBasicTableOps() { + + TableResult tr; + + /* + * Create table + */ + tr = tableOperation(handle, createTDdl, limits, WAIT_MS); + if (!onprem) { + assertNotNull(tr.getTableLimits()); + } + checkTableInfo(tr, "t", limits); + + tr = tableOperation(handle, createTADdl, null, WAIT_MS); + checkTableInfo(tr, "t.a", null); + + /* create table t.a again, expect to get TableExistsException */ + tableOperation(handle, createTADdl, + null /* tableLimits */, + null /* tableName */, + TableResult.State.ACTIVE, + TableExistsException.class); + + /* creating table with if not exists should succeed */ + tr = tableOperation(handle, createIfNotExistsTADdl, null, WAIT_MS); + + tr = tableOperation(handle, createTABDdl, null, WAIT_MS); + checkTableInfo(tr, "t.a.b", null); + + tr = tableOperation(handle, createTGDdl, null, WAIT_MS); + checkTableInfo(tr, "t.g", null); + + /* + * Get table + */ + tr = getTable("t.a", handle); + checkTableInfo(tr, "t.a", null); + + tr = getTable("t.a.b", handle); + checkTableInfo(tr, "t.a.b", null); + + /* + * List tables + */ + if (!onprem) { + ListTablesResult lsr = listTables(handle); + assertEquals(4, lsr.getTables().length); + String[] tables = lsr.getTables(); + Arrays.sort(tables); + assertTrue(Arrays.equals(new String[] {"t", "t.a", "t.a.b", "t.g"}, + tables)); + } + + /* + * Create/Drop index + */ + String ddl; + + ddl = "create index idx1 on t.a.b(s)"; + tr = tableOperation(handle, ddl, null, WAIT_MS); + + /* create index again, expect to get IndexExistsException */ + tableOperation(handle, ddl, null, null, TableResult.State.ACTIVE, + IndexExistsException.class); + + ddl = "create index if not exists idx1 on t.a.b(s)"; + tr = tableOperation(handle, ddl, null, WAIT_MS); + + ddl = "drop index idx1 on t.a.b"; + tr = tableOperation(handle, ddl, null, WAIT_MS); + + /* drop index again, expect to get IndexNotFoundException */ + tableOperation(handle, ddl, null, null, TableResult.State.ACTIVE, + IndexNotFoundException.class); + + ddl = "drop index if exists idx1 on t.a.b"; + tr = tableOperation(handle, ddl, null, WAIT_MS); + + /* + * Alter table + */ + ddl = "alter table t.a (add i integer)"; + tr = tableOperation(handle, ddl, null, WAIT_MS); + + ddl = "alter table t.g (drop s)"; + tr = tableOperation(handle, ddl, null, WAIT_MS); + + /* + * Drop table + */ + + ddl = "drop table t.a.b"; + tableOperation(handle, ddl, null, WAIT_MS); + + ddl = "drop table if exists t.a.b"; + tableOperation(handle, ddl, null, WAIT_MS); + } + + /** + * Test that the child table will be counted against the tenancy's total + * number of tables + */ + @Test + public void testLimitTables() { + assumeTrue("Skipping testLimitTables if not minicloud or cloud test " + + "or tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + final int tableLimit = tenantLimits.getNumTables(); + if (tableLimit > NUM_TABLES) { + /* + * To prevent this test from running too long, skip the test if the + * table number limit > ProxyTestBase.NUM_TABLES + */ + return; + } + + String ddl = "create table p(id integer, s string, primary key(id))"; + tableOperation(handle, ddl, new TableLimits(50, 50, 1), WAIT_MS); + + String fmt = "create table %s(%s integer, s string, primary key(%s))"; + for (int i = 0; i < tableLimit; i++) { + String table = "p.c" + i; + ddl = String.format(fmt, table, "ck", "ck"); + try { + tableOperation(handle, ddl, null, WAIT_MS); + if (i == tableLimit - 1) { + fail("create table should have failed, num create table: " + + i + ", limit: " + tableLimit); + } + } catch (TableLimitException tle) { + if (i < tableLimit - 1) { + fail("create table should succeed: " + table); + } + } + } + + /* + * List tables + */ + ListTablesResult lsr = listTables(handle); + assertEquals(tableLimit, lsr.getTables().length); + } + + /** + * Test that the number of columns in child table is subjected to the + * TableRequestLimits.columnsPerTable. + * + * The child table inherits the primary key of its parent, its parent + * table's primary key fields should be counted. + */ + @Test + public void testLimitColumns() { + assumeTrue("Skipping testLimitColumns if not minicloud or cloud test " + + "or tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + final int columnLimit = tenantLimits.getStandardTableLimits(). + getColumnsPerTable(); + + String ddl = "create table p(" + + " k1 integer, " + + " k2 integer, " + + " k3 integer, " + + " s string, " + + " primary key(k1, k2, k3))"; + tableOperation(handle, ddl, new TableLimits(50, 50, 1), WAIT_MS); + + /* + * Create table p.c with N columns, N is the number of column per table. + */ + StringBuilder sb; + sb = new StringBuilder("create table p.c(c1 integer, primary key(c1)"); + for (int i = 4; i < columnLimit; i++) { + sb.append(", s").append(i).append(" string"); + } + sb.append(")"); + tableOperation(handle, sb.toString(), null, WAIT_MS); + + /* + * Create table p.c.d with N + 1 columns, N is the number of column per + * table. + */ + sb = new StringBuilder("create table p.c.d(d1 integer, primary key(d1)"); + for (int i = 5; i < columnLimit + 1; i++) { + sb.append(", s").append(i).append(" string"); + } + sb.append(")"); + try { + tableOperation(handle, sb.toString(), null, WAIT_MS); + fail("create table should have failed as its column number " + + "exceeded the max number of column per table: " + columnLimit); + } catch (IllegalArgumentException iae) { + /* expected */ + } + + /* + * Adding more field to p.c should fail as the columns number will + * exceed the limit + */ + ddl = "alter table p.c(add n1 integer)"; + try { + tableOperation(handle, ddl, null, WAIT_MS); + fail("adding column should have failed as its column number " + + "exceeded the max number of column per table: " + columnLimit); + } catch (IllegalArgumentException iae) { + /* expected */ + } + } + + @Test + public void testLimitIndexes() { + assumeTrue("Skipping testLimitIndexes if not minicloud or cloud test " + + "or tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + final int indexLimit = tenantLimits.getStandardTableLimits(). + getIndexesPerTable(); + + /* Create table t */ + tableOperation(handle, createTDdl, limits, WAIT_MS); + + /* Create table t.c and N indexes (N = indexLimit - 2) */ + StringBuilder sb = new StringBuilder("create table t.c(idc integer, "); + for (int i = 0; i < indexLimit; i++) { + sb.append("c").append(i).append(" integer, "); + } + sb.append("primary key(idc))"); + tableOperation(handle, sb.toString(), null, WAIT_MS); + + for (int i = 0; i < indexLimit; i++) { + sb.setLength(0); + sb.append("create index idx").append(i) + .append(" on t.c(c").append(i).append(")"); + + tableOperation(handle, sb.toString(), null, WAIT_MS); + } + + String ddl = "create index idxC0C1 on t.c(c0, c1)"; + tableOperation(handle, ddl, null, null, TableResult.State.ACTIVE, + IndexLimitException.class); + } + + @Test + public void testLimitKeyValueSize() { + + assumeTrue("Skipping testLimitKeyValueSize if onprem test", + !onprem && tenantLimits != null); + + String ddl; + + ddl = "create table t(k1 string, s string, primary key(k1))"; + tableOperation(handle, ddl, limits, WAIT_MS); + + ddl = "create table t.c(k2 string, s string, primary key(k2))"; + tableOperation(handle, ddl, null, WAIT_MS); + + ddl = "create table t.c.g(k3 string, s string, primary key(k3))"; + tableOperation(handle, ddl, null, WAIT_MS); + + MapValue row = new MapValue(); + String s1 = "a"; + + final int maxPKeySize = tenantLimits.getStandardTableLimits(). + getPrimaryKeySizeLimit(); + final int maxValSize = tenantLimits.getStandardTableLimits(). + getRowSizeLimit(); + final int maxIdxKeySize = + (cloudRunning ? tenantLimits.getStandardTableLimits(). + getIndexKeySizeLimit() : 64); + + PutRequest req; + String sval; + + /* + * Primary key size exceed size limit + */ + + /* Put row to t.c with max key size, should succeed */ + sval = genString(maxPKeySize - 1); + row.put("k1", s1) + .put("k2", sval) + .put("s", s1); + req = new PutRequest().setTableName("t.c").setValue(row); + handle.put(req); + + /* + * Put row with max key size + 1, should have failed with + * KeySizeLimitException + */ + row.put("k1", sval + "a"); + try { + handle.put(req); + fail("Expect to catch KeySizeLimitException but not"); + } catch (KeySizeLimitException ex) { + /* expected */ + } + + /* Put row to t.c.g with max key size, should succeed */ + sval = genString(maxPKeySize - 2); + row.put("k1", s1) + .put("k2", s1) + .put("k3", sval) + .put("s", s1); + req = new PutRequest().setTableName("t.c.g").setValue(row); + handle.put(req); + + /* + * Put row to t.c.g with max key size + 1, should have failed with + * KeySizeLimitException + */ + row.put("k3", sval + "a"); + req = new PutRequest().setTableName("t.c.g").setValue(row); + try { + handle.put(req); + fail("Expect to catch KeySizeLimitException but not"); + } catch (KeySizeLimitException ex) { + /* expected */ + } + + /* + * Row size exceed size limit + */ + + /* + * Put row to t.c with length > max value size, should fail with + * RowSizeLimitException + */ + sval = genString(maxValSize); + row.put("k1", s1) + .put("k2", s1) + .put("s", sval); + req = new PutRequest().setTableName("t.c").setValue(row); + try { + handle.put(req); + fail("Expect to catch RowSizeLimitException but not"); + } catch (RowSizeLimitException ex) { + /* expected */ + } + + /* + * Put row to t.c.g with length > max value size, should fail with + * RowSizeLimitException + */ + row.put("k1", s1) + .put("k2", s1) + .put("k3", s1) + .put("s", sval); + req = new PutRequest().setTableName("t.c.g").setValue(row); + try { + handle.put(req); + fail("Expect to catch RowSizeLimitException but not"); + } catch (RowSizeLimitException ex) { + /* expected */ + } + + /* + * Index key size exceed size limit + */ + String[] indexDdls = new String[] { + "create index idxc1 on t.c(s)", + "create index idxg1 on t.c.g(s)", + }; + + for (String idxDdl : indexDdls) { + tableOperation(handle, idxDdl, null, WAIT_MS); + } + + /* Put row to t.c with max index key size, should succeed */ + sval = genString(maxIdxKeySize); + row.put("k1", s1) + .put("k2", s1) + .put("s", sval); + req = new PutRequest().setTableName("t.c").setValue(row); + handle.put(req); + + /* + * Put row to t.c with max index key size + 1, should fail with + * KeySizeLimitException + */ + row.put("s", sval + "a"); + try { + handle.put(req); + fail("Expected to catch KeySizeLimitException"); + } catch (KeySizeLimitException ex) { + /* expected */ + } + + /* Put row to t.c.g with max index key size, should succeed */ + row.put("k1", s1) + .put("k2", s1) + .put("k3", s1) + .put("s", sval); + req = new PutRequest().setTableName("t.c.g").setValue(row); + handle.put(req); + + /* + * Put row to t.c.g with max index key size + 1, should fail with + * KeySizeLimitException + */ + row.put("s", sval + "a"); + try { + handle.put(req); + fail("Expect to catch KeySizeLimitException but not"); + } catch (KeySizeLimitException ex) { + /* expected */ + } + } + + /** + * Test invalid table operations on child table: + * 1. Can't set limits on child table when create table + * 2. Can't create table if its parent doesn't exist + * 3. Don't allow to update limits of child table + * 4. Can't drop the parent table if referenced by any child + * 5. Don't allow to get table usage of child table + */ + @Test + public void testInvalidTableOps() { + /* Cannot set limits on child table */ + tableOperation(handle, createTADdl, limits, null, + TableResult.State.ACTIVE, + (cloudRunning ? IllegalArgumentException.class : + TableNotFoundException.class)); + + /* The parent table of t.a does not exist */ + tableOperation(handle, createTADdl, null, null, + TableResult.State.ACTIVE, + (cloudRunning ? IllegalArgumentException.class : + TableNotFoundException.class)); + + tableOperation(handle, createTDdl, limits, WAIT_MS); + tableOperation(handle, createTADdl, null, WAIT_MS); + + /* Don't allow to update limits of child table */ + if (!onprem) { + tableOperation(handle, null, new TableLimits(600, 400, 1), "t.a", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + } + + /* Cannot drop the parent table still referenced by child table */ + String ddl = "drop table t"; + tableOperation(handle, ddl, null, null, TableResult.State.DROPPED, + IllegalArgumentException.class); + + if (cloudRunning) { + /* Don't allow to get table usage of child table */ + TableUsageRequest tuReq = new TableUsageRequest().setTableName("t.a"); + try { + handle.getTableUsage(tuReq); + fail("GetTableUsage on child table should have failed"); + } catch (IllegalArgumentException iae) { + /* expected */ + } + } + } + + /** + * Test put/get/delete row of child table. + */ + @Test + public void testPutGetDelete() { + int recordKB = 2; + tableOperation(handle, createTDdl, limits, WAIT_MS); + tableOperation(handle, createTADdl, null, WAIT_MS); + tableOperation(handle, createTABDdl, null, WAIT_MS); + + MapValue row; + MapValue key; + + String longStr = genString((recordKB - 1) * 1024); + /* put a row to table t */ + row = makeTRow(1, longStr); + doPutRow("t", row, recordKB); + + /* put a row to table t.a */ + row = makeTARow(1, 2, longStr); + doPutRow("t.a", row, recordKB); + key = new MapValue().put("id", 1).put("ida", 2); + doGetRow("t.a", key, row, recordKB, null); + doDeleteRow("t.a", key, recordKB); + + /* put a row to table t.a.b */ + row = makeTABRow(1, 2, 3, longStr); + doPutRow("t.a.b", row, recordKB); + key = new MapValue().put("id", 1).put("ida", 2).put("idb", 3); + doGetRow("t.a.b", key, row, recordKB, null); + doDeleteRow("t.a.b", key, recordKB); + } + + /** + * Test query against child table + */ + @Test + public void testQuery() { + tableOperation(handle, createTDdl, limits, WAIT_MS); + tableOperation(handle, createTADdl, null, WAIT_MS); + tableOperation(handle, createTABDdl, null, WAIT_MS); + + final int keyCost = getMinRead(); + + final int numT = 30; + final int numAPerT = 2; + final int numBPerA = 1; + + final int numA = numT * numAPerT; + final int numB = numT * numAPerT * numBPerA; + + final int rkbT = 1; + final int rkbA = 2; + final int rkbB = 2; + final int rkbMaxTA = Math.max(rkbT, rkbA); + final int rkbMax = Math.max(Math.max(rkbT, rkbA), rkbB); + + final String s1 = genString(1); + final String s1K = genString(1024); + + for (int i = 0; i < numT; i++) { + doPutRow("t", makeTRow(i, s1), rkbT); + for (int j = 0; j < numAPerT; j++) { + doPutRow("t.a", makeTARow(i, j, s1K), rkbA); + for (int k = 0; k < numBPerA; k++) { + doPutRow("t.a.b", makeTABRow(i, j, k, s1K), rkbB); + } + } + } + + String query; + int count; + int cost; + int limit = 10; + + query = "select id, ida from t.a"; + count = numA; + cost = numA * keyCost; + runQueryWithLimit(query, cost/5, limit, count, cost, keyCost); + + query = "select * from t.a"; + count = numA; + cost = numA * rkbA; + if (!dontDoubleChargeKey()) { + cost += numA * keyCost; + } + runQueryWithLimit(query, cost/5, limit, count, cost, rkbA); + + query = "select id, ida, idb from t.a.b where idb = 100"; + count = 0; + cost = numB * keyCost; + runQueryWithLimit(query, cost/5, limit, count, cost, keyCost); + + query = "select * from t.a.b where s is null"; + count = 0; + cost = numB * rkbB; + if (!dontDoubleChargeKey()) { + cost += numB * keyCost; + } + runQueryWithLimit(query, cost/5, limit, count, cost, rkbB); + + query = "select t.id, a.ida from nested tables(t descendants(t.a a))"; + count = numA; + cost = (numT + numA) * keyCost; + runQueryWithLimit(query, cost/5, limit, count, cost, keyCost); + + query = "select * from nested tables(t descendants(t.a a))"; + count = numA; + cost = numT * rkbT + numA * rkbA; + if (!dontDoubleChargeKey()) { + cost += (numT + numA) * keyCost; + } + runQueryWithLimit(query, cost/5, limit, count, cost, rkbMaxTA); + + query = "select t.id, a.ida, b.idb " + + "from nested tables(t descendants(t.a a, t.a.b b))"; + count = numB; + cost = (numT + numA + numB) * keyCost; + runQueryWithLimit(query, cost/5, limit, count, cost, keyCost); + + query = "select * from nested tables(t descendants(t.a a, t.a.b b))"; + count = numB; + cost = numT * rkbT + numA * rkbA + numB * rkbB; + if (!dontDoubleChargeKey()) { + cost += (numT + numA + numB) * keyCost; + } + runQueryWithLimit(query, cost/5, limit, count, cost, rkbMax); + + query = "select a.ida, t.id from nested tables(t.a a ancestors(t))"; + count = numA; + cost = (numA + numT) * keyCost; + runQueryWithLimit(query, cost/5, limit, count, cost, 2 * keyCost); + + query = "select * from nested tables(t.a a ancestors(t))"; + count = numA; + cost = numA * rkbA + numT * rkbT; + if (!dontDoubleChargeKey()) { + cost += (numA + numT) * keyCost; + } + runQueryWithLimit(query, cost/5, limit, count, cost, rkbT + rkbA); + + query = "select b.idb, a.ida, t.id " + + "from nested tables(t.a.b b ancestors(t, t.a a))"; + count = numB; + cost = (numB + numA + numT) * keyCost; + runQueryWithLimit(query, cost/5, limit, count, cost, 3 * keyCost); + + query = "select * from nested tables(t.a.b b ancestors(t, t.a a))"; + count = numB; + cost = numB * rkbB + numA * rkbA + numT * rkbT; + if (!dontDoubleChargeKey()) { + cost += (numB + numA + numT) * keyCost; + } + runQueryWithLimit(query, cost/5, limit, count, cost, rkbT + rkbA + rkbB); + + query = "select a.ida, t.id, b.idb " + + "from nested tables(t.a a ancestors(t) descendants(t.a.b b))"; + count = numA; + cost = (numA + numT + numB) * keyCost; + runQueryWithLimit(query, cost/5, limit, count, cost, 2 * keyCost); + + query = "select * " + + "from nested tables(t.a a ancestors(t) descendants(t.a.b b))"; + count = numB; + cost = numA * rkbA + numT * rkbT + numB * rkbB; + if (!dontDoubleChargeKey()) { + cost += (numA + numT + numB) * keyCost; + } + runQueryWithLimit(query, cost/5, limit, count, cost, rkbT + rkbA); + + String ddl = "create index if not exists idxName on t.a(name)"; + tableOperation(handle, ddl, null, WAIT_MS); + + query = "select a.id, a.ida, a.name, b.idb " + + "from nested tables(t.a a ancestors(t) descendants(t.a.b b)) " + + "where a.name > 'n'"; + count = numB; + /* + * TODO: NOSQL-719 + * Enable the cost check in cloud test after fix it + */ + if (useCloudService) { + cost = 0; + } else { + cost = (numA + numT + numB) * keyCost; + if (!dontDoubleChargeKey()) { + cost += numA * keyCost; + } + } + runQueryWithLimit(query, cost/5, limit, count, cost, 2 * keyCost); + + query = "select * " + + "from nested tables(t.a a ancestors(t) descendants(t.a.b b)) " + + "where a.name > 'n'"; + count = numB; + cost = numA * rkbA + numT * rkbT + numB * rkbB; + if (!dontDoubleChargeKey()) { + cost += (2 * numA + numT + numB) * keyCost; + } + runQueryWithLimit(query, cost/5, limit, count, cost, rkbT + rkbA); + } + + @Test + public void testWriteMultiple() + throws Exception { + + tableOperation(handle, createTDdl, limits, WAIT_MS); + tableOperation(handle, createTADdl, null, WAIT_MS); + tableOperation(handle, createTABDdl, null, WAIT_MS); + tableOperation(handle, createXDdl, limits, WAIT_MS); + + final int rkb = 2; + final String s1K = genString((rkb - 1) * 1024 + 1); + + WriteMultipleRequest req = new WriteMultipleRequest(); + WriteMultipleResult res; + MapValue key; + MapValue row; + PutRequest put; + DeleteRequest del; + + /* Put operations */ + int id = 1; + row = makeTRow(id, s1K); + put = new PutRequest().setTableName("t").setValue(row); + req.add(put, true); + + row = makeTARow(id, 1, s1K); + put = new PutRequest().setTableName("t.a").setValue(row); + req.add(put, true); + + row = makeTABRow(id, 1, 1, s1K); + put = new PutRequest().setTableName("t.a.b").setValue(row); + req.add(put, true); + + row = makeTABRow(id, 1, 2, s1K); + put = new PutRequest().setTableName("t.a.b").setValue(row); + req.add(put, true); + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + if (!onprem) { + assertEquals(rkb * req.getNumOperations(), res.getWriteKB()); + } + + Version verT11 = res.getResults().get(0).getVersion(); + Version verTA11 = res.getResults().get(1).getVersion(); + Version verTAB11 = res.getResults().get(2).getVersion(); + + /* + * Test ReturnInfo + */ + req.clear(); + + /* putIfAbsent with existing row */ + row = makeTRow(id, s1K); + put = new PutRequest().setTableName("t") + .setOption(Option.IfAbsent) + .setReturnRow(true) + .setValue(row); + req.add(put, false); + + /* putIfVersion with unmatched version */ + row = makeTARow(id, 1, s1K); + put = new PutRequest() + .setTableName("t.a") + .setMatchVersion(verT11) + .setReturnRow(true) + .setValue(row); + req.add(put, false); + + /* deleteIfVersion with unmatched version*/ + key = makeTABKey(id, 1, 1); + del = new DeleteRequest() + .setTableName("t.a.b") + .setMatchVersion(verTA11) + .setReturnRow(true) + .setKey(key); + req.add(del, false); + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + assertEquals(0, res.getWriteKB()); + + List results = res.getResults(); + OperationResult r = results.get(0); + assertFalse(r.getSuccess()); + assertEquals(makeTRow(id, s1K), r.getExistingValue()); + assertTrue(Arrays.equals(verT11.getBytes(), + r.getExistingVersion().getBytes())); + assertTrue(r.getExistingModificationTime() > 0); + + r = results.get(1); + assertFalse(r.getSuccess()); + assertEquals(makeTARow(id, 1, s1K), r.getExistingValue()); + assertTrue(Arrays.equals(verTA11.getBytes(), + r.getExistingVersion().getBytes())); + assertTrue(r.getExistingModificationTime() > 0); + + r = results.get(2); + assertFalse(r.getSuccess()); + assertEquals(makeTABRow(id, 1, 1, s1K), r.getExistingValue()); + assertTrue(Arrays.equals(verTAB11.getBytes(), + r.getExistingVersion().getBytes())); + assertTrue(r.getExistingModificationTime() > 0); + + /* + * abortIfUnsuccessful = true, check failedOperation only. + */ + req.clear(); + row = makeTRow(id, s1K); + put = new PutRequest().setTableName("t") + .setReturnRow(true) + .setValue(row); + req.add(put, true); + + row = makeTABRow(id, 1, 1, s1K + "_u"); + put = new PutRequest().setTableName("t.a.b") + .setReturnRow(true) + .setOption(Option.IfAbsent) + .setReturnRow(true) + .setValue(row); + req.add(put, true); + + res = handle.writeMultiple(req); + assertFalse(res.getSuccess()); + assertEquals(1, res.getFailedOperationIndex()); + r = res.getFailedOperationResult(); + assertFalse(r.getSuccess()); + assertEquals(makeTABRow(id, 1, 1, s1K), r.getExistingValue()); + assertNotNull(Arrays.equals(verTAB11.getBytes(), + r.getExistingVersion().getBytes())); + assertTrue(r.getExistingModificationTime() > 0); + + /* + * Delete operations + */ + req.clear(); + key = makeTKey(id); + del = new DeleteRequest().setTableName("t").setKey(key); + req.add(del, true); + + key = makeTAKey(id, 1); + del = new DeleteRequest().setTableName("t.a").setKey(key); + req.add(del, true); + + key = makeTABKey(id, 1, 1); + del = new DeleteRequest() + .setTableName("t.a.b") + .setMatchVersion(verTAB11) + .setKey(key); + req.add(del, true); + + key = makeTABKey(id, 1, 2); + del = new DeleteRequest().setTableName("t.a.b").setKey(key); + req.add(del, true); + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + if (!onprem) { + assertEquals(rkb * req.getNumOperations(), res.getWriteKB()); + } + + /* + * Test GeneratedValue for Identity columns + */ + req.clear(); + + String ddl; + ddl = "alter table t(add seq integer generated always as identity)"; + tableOperation(handle, ddl, null, WAIT_MS); + ddl = "alter table t.a.b(add seq long generated always as identity" + + "(start with 100 increment by -2))"; + tableOperation(handle, ddl, null, WAIT_MS); + + id++; + row = makeTRow(id, s1K); + put = new PutRequest().setTableName("T").setValue(row); + req.add(put, true); + + row = makeTARow(id, 1, s1K); + put = new PutRequest().setTableName("T.A").setValue(row); + req.add(put, true); + + int numRows = 3; + for (int i = 0; i < numRows; i++) { + row = makeTABRow(id, 1, i, s1K); + put = new PutRequest().setTableName("T.A.B").setValue(row); + req.add(put, true); + } + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + if (!onprem) { + assertEquals(rkb * req.getNumOperations(), res.getWriteKB()); + } + + List ops = req.getOperations(); + String tname; + int seqT = 1; + int seqTAB = 100; + int seqStep = -2; + for (int i = 0; i < res.getResults().size(); i++) { + r = res.getResults().get(i); + tname = ops.get(i).getRequest().getTableName(); + if (tname.equalsIgnoreCase("t.a")) { + assertNull(r.getGeneratedValue()); + } else { + /* + * TODO: NOSQL-720 + * enable below check in cloud test after fix it + */ + if (!useCloudService) { + assertNotNull(r.getGeneratedValue()); + if (tname.equalsIgnoreCase("t")) { + assertEquals(seqT, r.getGeneratedValue().getInt()); + } else { + /* t.a.b */ + assertEquals(seqTAB, r.getGeneratedValue().getInt()); + seqTAB += seqStep; + } + } + } + } + + /* Test puts to single table */ + req.clear(); + + id++; + row = makeTARow(id, 0, s1K); + put = new PutRequest().setTableName("t.a").setValue(row); + req.add(put, true); + + row = makeTARow(id, 1, s1K); + put = new PutRequest().setTableName("T.a").setValue(row); + req.add(put, true); + + row = makeTARow(id, 2, s1K); + put = new PutRequest() + .setTableName("T.A") + .setOption(Option.IfAbsent) + .setValue(row); + req.add(put, true); + + key = makeTAKey(id, 3); + del = new DeleteRequest().setTableName("t.A").setKey(key); + req.add(del, false); + + key = makeTAKey(id, 4); + del = new DeleteRequest().setTableName("t.a").setKey(key); + req.add(del, false); + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + int i = 0; + for (OperationResult or : res.getResults()) { + if (i++ < 3) { + assertTrue(or.getSuccess()); + } else { + assertFalse(or.getSuccess()); + } + } + + /* + * Negative cases + */ + + /* + * Table not found: t.unknown. + * + * Sub requests: + * put -> t.unknown + * put -> t + */ + req.clear(); + + row = makeTRow(1, s1K); + put = new PutRequest().setTableName("t.unknown").setValue(row); + req.add(put, true); + + put = new PutRequest().setTableName("t").setValue(row); + req.add(put, true); + try { + handle.writeMultiple(req); + fail("Operation should have failed with TableNotFoundException"); + } catch (TableNotFoundException e) { + /* expected */ + checkErrorMessage(e); + } + + /* + * Table not found: t.unknown. + * + * Sub requests: + * put -> t + * put -> t.unknown + */ + req.clear(); + + row = makeTRow(1, s1K); + put = new PutRequest().setTableName("t").setValue(row); + req.add(put, true); + + put = new PutRequest().setTableName("t.unknown").setValue(row); + req.add(put, true); + + try { + handle.writeMultiple(req); + fail("Operation should have failed with TableNotFoundException"); + } catch (TableNotFoundException e) { + /* expected */ + checkErrorMessage(e); + } + + /* + * IllegalArgumentException: Tables not related: t x + * + * Sub requests: + * put -> t + * put -> x + */ + req.clear(); + + try { + row = makeTRow(1, s1K); + put = new PutRequest().setTableName("t").setValue(row); + req.add(put, true); + + put = new PutRequest().setTableName("x").setValue(row); + req.add(put, true); + handle.writeMultiple(req); + fail("Operation should have failed with IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /* expected */ + checkErrorMessage(e); + } + + + /* + * IllegalArgumentException: Shard key does not match + * + * Sub requests: + * put {id=1,..} -> t.a + * put {id=1,..} -> t.a.b + * put {id=2,..} -> T.A + * put {id=2,..} -> T.A.B + */ + req.clear(); + + row = makeTARow(1, 1, s1K); + put = new PutRequest().setTableName("t.a").setValue(row); + req.add(put, true); + + row = makeTABRow(1, 1, 1, s1K); + put = new PutRequest().setTableName("t.a.b").setValue(row); + req.add(put, true); + + key = makeTAKey(2, 1); + del = new DeleteRequest().setTableName("T.A").setKey(key); + req.add(del, true); + + key = makeTABKey(2, 1, 1); + del = new DeleteRequest().setTableName("T.A.B").setKey(key); + req.add(del, true); + + try { + handle.writeMultiple(req); + fail("Operation should have failed with IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /* expected */ + checkErrorMessage(e); + } + + /* + * IllegalArgumentException: Missing primary key field: idb + * + * Sub requests: + * put {id=1,ida=1} -> t.a + * put {id=1,ida=1} -> t.a.b + */ + req.clear(); + + row = makeTARow(1, 1, s1K); + put = new PutRequest().setTableName("t.a").setValue(row); + req.add(put, true); + + put = new PutRequest().setTableName("t.a.b").setValue(row); + req.add(put, true); + + try { + handle.writeMultiple(req); + fail("Operation should have failed with IllegalArgumentException"); + } catch (IllegalArgumentException e) { + /* expected */ + checkErrorMessage(e); + } + } + + /* Test using table ocid in put/delete operations */ + @Test + public void testWriteMultipleWithOcid() throws Exception { + assumeTrue("Skipping testWriteMulitpleWithOcid if not minicloud test", + cloudRunning); + + tableOperation(handle, createTDdl, limits, WAIT_MS); + tableOperation(handle, createTADdl, null, WAIT_MS); + tableOperation(handle, createTABDdl, null, WAIT_MS); + + String ddl = "create table t1(id integer, s string, primary key(id))"; + tableOperation(handle, ddl, limits, WAIT_MS); + + int rkb = 1; + final String s1 = genString(1); + + WriteMultipleRequest req = new WriteMultipleRequest(); + WriteMultipleResult res; + MapValue key; + MapValue row; + PutRequest put; + DeleteRequest del; + + String ocidT = getTable("t", handle).getTableId(); + String ocidTA = getTable("t.a", handle).getTableId(); + String ocidTAB = getTable("t.a.b", handle).getTableId(); + String ocidT1 = getTable("t1", handle).getTableId(); + + /* put ops */ + int id = 1; + row = makeTRow(id, s1); + put = new PutRequest().setTableName(ocidT).setValue(row); + req.add(put, true); + + row = makeTARow(id, 1, s1); + put = new PutRequest().setTableName(ocidTA).setValue(row); + req.add(put, true); + + row = makeTARow(id, 2, s1); + put = new PutRequest().setTableName(ocidTA).setValue(row); + req.add(put, true); + + row = makeTABRow(id, 1, 1, s1); + put = new PutRequest().setTableName(ocidTAB).setValue(row); + req.add(put, true); + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + if (!onprem) { + assertEquals(rkb * req.getNumOperations(), res.getWriteKB()); + } + + /* delete ops */ + req.clear(); + + key = makeTKey(id); + del = new DeleteRequest().setTableName(ocidT).setKey(key); + req.add(del, true); + + key = makeTAKey(id, 1); + del = new DeleteRequest().setTableName(ocidTA).setKey(key); + req.add(del, true); + + key = makeTAKey(id, 2); + del = new DeleteRequest().setTableName(ocidTA).setKey(key); + req.add(del, true); + + key = makeTABKey(id, 1, 1); + del = new DeleteRequest().setTableName(ocidTAB).setKey(key); + req.add(del, true); + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + if (!onprem) { + assertEquals(rkb * req.getNumOperations(), res.getWriteKB()); + } + + /* + * All operations should be on same table or tables belongs to same + * parents + */ + req.clear(); + + row = makeTABRow(id, 1, 1, s1); + put = new PutRequest().setTableName(ocidTAB).setValue(row); + req.add(put, true); + + row = new MapValue().put("id", 1).put("s", s1); + put = new PutRequest().setTableName(ocidT1).setValue(row); + req.add(put, true); + + try { + res = handle.writeMultiple(req); + fail("Operation should have failed with IAE"); + } catch (IllegalArgumentException ex) { + checkErrorMessage(ex); + } + } + + @Test + public void testWriteMultipleTTL() { + + final TimeToLive tTTL = TimeToLive.ofDays(1); + tableOperation(handle, addUsingTTL(createTDdl, tTTL), limits, WAIT_MS); + + final TimeToLive aTTL = TimeToLive.ofDays(3); + tableOperation(handle, addUsingTTL(createTADdl, aTTL), null, WAIT_MS); + + final TimeToLive bTTL = TimeToLive.ofDays(5); + tableOperation(handle, addUsingTTL(createTABDdl, bTTL), null, WAIT_MS); + + final TimeToLive userTTL = TimeToLive.ofDays(10); + + final String s1 = genString(1); + final int rowKB = 1; + WriteMultipleRequest req = new WriteMultipleRequest(); + WriteMultipleResult res; + PutRequest put; + + /* Use table default TTL */ + int id = 1; + MapValue trow = makeTRow(id, s1); + put = new PutRequest() + .setTableName("t") + .setValue(trow); + req.add(put, true); + + MapValue arow = makeTARow(id, 1, s1); + put = new PutRequest() + .setTableName("t.a") + .setValue(arow); + req.add(put, true); + + MapValue brow = makeTABRow(id, 1, 1, s1); + put = new PutRequest() + .setTableName("t.a.b") + .setValue(brow); + req.add(put, true); + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + + doGetRow("t", makeTKey(id), trow, rowKB, tTTL); + doGetRow("t.a", makeTAKey(id, 1), arow, rowKB, aTTL); + doGetRow("t.a.b", makeTABKey(id, 1, 1), brow, rowKB, bTTL); + + /* Update to user specified TTL */ + req.clear(); + + put = new PutRequest() + .setTableName("t") + .setValue(trow) + .setTTL(userTTL); + req.add(put, true); + + put = new PutRequest() + .setTableName("t.a") + .setValue(arow) + .setTTL(userTTL); + req.add(put, true); + + put = new PutRequest() + .setTableName("t.a.b") + .setValue(brow) + .setTTL(userTTL); + req.add(put, true); + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + + doGetRow("t", makeTKey(id), trow, rowKB, userTTL); + doGetRow("t.a", makeTAKey(id, 1), arow, rowKB, userTTL); + doGetRow("t.a.b", makeTABKey(id, 1, 1), brow, rowKB, userTTL); + + /* Update back to default TTL */ + req.clear(); + + put = new PutRequest() + .setTableName("t") + .setValue(trow) + .setUseTableDefaultTTL(true); + req.add(put, true); + + put = new PutRequest() + .setTableName("t.a") + .setValue(arow) + .setUseTableDefaultTTL(true); + req.add(put, true); + + put = new PutRequest() + .setTableName("t.a.b") + .setValue(brow) + .setUseTableDefaultTTL(true); + req.add(put, true); + + res = handle.writeMultiple(req); + assertTrue(res.getSuccess()); + + doGetRow("t", makeTKey(id), trow, rowKB, tTTL); + doGetRow("t.a", makeTAKey(id, 1), arow, rowKB, aTTL); + doGetRow("t.a.b", makeTABKey(id, 1, 1), brow, rowKB, bTTL); + } + + private void runQueryWithLimit(String query, int maxReadKB, int limit, + int expCount, int expReadKB, int recordKB) { + + runQuery(query, 0, 0, expCount, expReadKB, recordKB); + + if (maxReadKB > 0) { + runQuery(query, maxReadKB, 0, expCount, expReadKB, recordKB); + } + + if (limit > 0) { + runQuery(query, 0, limit, expCount, expReadKB, recordKB); + } + + if (checkKVVersion(21, 2, 18)) { + /* Query should always make progress with small limit */ + for (int kb = 1; kb <= 5; kb++) { + runQuery(query, kb, 0, expCount, expReadKB, recordKB); + } + } + } + + private void runQuery(String statement, + int maxReadKB, + int limit, + int expCount, + int expCost, + int recordKB) { + + final boolean dispResult = false; + + QueryRequest req = new QueryRequest(); + PrepareRequest prepReq = new PrepareRequest() + .setStatement(statement); + PrepareResult prepRet = handle.prepare(prepReq); + req.setPreparedStatement(prepRet); + + if (maxReadKB > 0) { + req.setMaxReadKB(maxReadKB); + } + if (limit > 0) { + req.setLimit(limit); + } + + QueryResult ret; + int cnt = 0; + int batches = 0; + int cost = 0; + int batchSize = 0; + do { + ret = handle.query(req); + batches++; + batchSize = ret.getResults().size(); + + if (maxReadKB > 0) { + if (checkKVVersion(21, 2, 18)) { + /* + * Query should suspend after read the table row or key + * (for key only query) if current read cost exceeded size + * limit, so at most the readKB over the size limit. + */ + assertTrue("The read cost should be at most " + recordKB + + " kb beyond the maximum readKB " + maxReadKB + + ", but actual " + ret.getReadKB(), + ret.getReadKB() <= maxReadKB + recordKB); + } else { + assertTrue("The read cost should be at most 1" + + " kb beyond the maximum readKB " + maxReadKB + + ", but actual " + ret.getReadKB(), + ret.getReadKB() <= maxReadKB + 1); + } + } + + if (limit > 0) { + assertTrue("The record count should not exceed the limit of " + + limit + ": " + batchSize, batchSize <= limit); + } + + cost += ret.getReadKB(); + cnt += batchSize; + + for (MapValue mv : ret.getResults()) { + if (dispResult) { + String json = mv.toJson(); + if (json.length() > 50) { + json = json.substring(0, 50) + "..." + + (json.length() - 50) + " bytes ..."; + } + System.out.println(json); + } + } + } while(!req.isDone()); + + if (expCount > 0) { + assertEquals("'" + statement + "'\nshould return " + expCount + + " rows but actual got " + cnt + " rows", + expCount, cnt); + } + + if (maxReadKB == 0 && limit == 0 && expCost < 2 * 1024 * 1024) { + assertEquals("'" + statement + "' + " + + "should be done in single batch but actual " + + batches + " batches", 1, batches); + } + + if (checkKVVersion(22, 1, 1) == false) { + return; + } + + if (!onprem) { + assertTrue(cost > 0); + + if (expCost > 0) { + if (batches == 1) { + assertEquals("'" + statement + "'\nexpect read cost " + + expCost + "kb, but actual " + cost + " kb", + expCost, cost); + } + } + } + } + + private void doPutRow(String tableName, MapValue row, int recordKB) { + PutRequest req = new PutRequest() + .setTableName(tableName) + .setValue(row); + PutResult ret = handle.put(req); + assertNotNull(ret.getVersion()); + assertCost(ret, 0, recordKB); + } + + private void doGetRow(String tableName, + MapValue key, + MapValue expRow, + int recordKB, + TimeToLive ttl) { + GetRequest req = new GetRequest() + .setTableName(tableName) + .setKey(key); + GetResult ret = handle.get(req); + assertEquals(expRow, ret.getValue()); + assertCost(ret, recordKB, 0); + if (ttl != null) { + assertTimeToLive(ttl, ret.getExpirationTime()); + } + } + + private void doDeleteRow(String tableName, MapValue key, int readKB) { + DeleteRequest req = new DeleteRequest() + .setTableName(tableName) + .setKey(key); + DeleteResult ret = handle.delete(req); + assertTrue(ret.getSuccess()); + assertCost(ret, getMinRead() * 2 /* key read in absolute consistency */, + readKB); + } + + @Override + void dropAllTables() { + dropAllTables(handle, true); + } + + private MapValue makeTRow(int id, String longStr) { + MapValue row = makeTKey(id); + row.put("name", "n" + id) + .put("s", longStr); + return row; + } + + private MapValue makeTKey(int id) { + MapValue row = new MapValue(); + row.put("id", id); + return row; + } + + private MapValue makeTARow(int id, int ida, String longStr) { + MapValue row = makeTAKey(id, ida); + row.put("name", "n" + id + "_" + ida) + .put("s", longStr); + return row; + } + + private MapValue makeTAKey(int id, int ida) { + MapValue row = new MapValue(); + row.put("id", id) + .put("ida", ida); + return row; + } + + private MapValue makeTABRow(int id, int ida, int idb, String longStr) { + MapValue row = makeTABKey(id, ida, idb); + row.put("name", "n" + id + "_" + ida + "_" + idb) + .put("s", longStr); + return row; + } + + private MapValue makeTABKey(int id, int ida, int idb) { + MapValue row = new MapValue(); + row.put("id", id) + .put("ida", ida) + .put("idb", idb); + return row; + } + + private void checkTableInfo(TableResult tr, + String tableName, + TableLimits limits) { + assertEquals(tableName, tr.getTableName()); + assertEquals(State.ACTIVE, tr.getTableState()); + if (onprem) { + return; + } + if (limits != null) { + TableLimits tl = tr.getTableLimits(); + assertEquals(limits.getReadUnits(), tl.getReadUnits()); + assertEquals(limits.getWriteUnits(), tl.getWriteUnits()); + assertEquals(limits.getStorageGB(), tl.getStorageGB()); + } else { + assertNull(tr.getTableLimits()); + } + } + + private static String genString(int len) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < len; i++) { + sb.append('a'); + } + return sb.toString(); + } + + private static String addUsingTTL(final String ddl, TimeToLive ttl) { + String newDdl = ddl; + if (ttl != null) { + newDdl += " using ttl " + ttl.getValue() + " " + ttl.getUnit(); + } + return newDdl; + } + + private void assertTimeToLive(TimeToLive ttl, long actual) { + final long DAY_IN_MILLIS = 24 * 60 * 60 * 1000; + long expected = ttl.toExpirationTime(System.currentTimeMillis()); + assertTrue("Actual TTL duration " + actual + "ms differs by " + + "more than a day from expected duration of " + expected +"ms", + Math.abs(actual - expected) < DAY_IN_MILLIS); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ConcurrentDDLTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ConcurrentDDLTest.java new file mode 100644 index 00000000..78dd736f --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ConcurrentDDLTest.java @@ -0,0 +1,1329 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.lang.Thread.UncaughtExceptionHandler; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicInteger; + +//import oracle.nosql.driver.IndexLimitException; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +//import oracle.nosql.driver.TableLimitException; +import oracle.nosql.driver.ops.GetIndexesRequest; +import oracle.nosql.driver.ops.GetIndexesResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.ListTablesRequest; +import oracle.nosql.driver.ops.ListTablesResult; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableResult.State; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.NullValue; +import oracle.nosql.proxy.security.SecureTestUtil; +import oracle.nosql.util.tmi.TableRequestLimits; + +import org.junit.Test; + +/** + * Concurrently DDL test: + * o testSingleTable: + * Execute ddls asynchronously to a single table. + * o testMultipleTables + * Execute ddls on multiple tables. + * o testMultiTenants: + * Execute ddls on tables in multiple tenants. + * o testTableIndexLimits: + * Create tables/indexes to reach the limits of tables/indexes. + */ +public class ConcurrentDDLTest extends ProxyTestBase { + + /* + * The number of threads to run ddl test. + */ + private final static int CONCURRENT_NUM = 3; + private final static int waitMillis = 60_000; + + private final Map handleCache = + new HashMap(); + + private DDLExecutor ddlExecutor; + + @Override + public void setUp() throws Exception{ + super.setUp(); + ddlExecutor = new ClientExecutor(); + } + + @Override + public void tearDown() throws Exception { + clearHandleCache(); + super.tearDown(); + } + + private void clearHandleCache() { + final Iterator> iter = + handleCache.entrySet().iterator(); + + while(iter.hasNext()) { + Entry e = iter.next(); + String tenantId = e.getKey(); + if (tenantId.equals(getTenantId())) { + continue; + } + NoSQLHandle nosqlHandle = e.getValue(); + dropAllTables(nosqlHandle, false); + + if (getSCURL() != null) { + deleteTier(tenantId); + } + nosqlHandle.close(); + iter.remove(); + } + } + + @Test + public void testSingleTable() { + assumeTrue("Skip the test if not minicloud or cloud test or " + + "tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + TableRequestLimits requestLimits = + tenantLimits.getStandardTableLimits(); + final int numFields = Math.min(requestLimits.getColumnsPerTable(), 10); + final int numIndexes = Math.min(requestLimits.getIndexesPerTable(), 3); + final int evolveLimit = requestLimits.getSchemaEvolutions(); + final int numAddField = Math.min((evolveLimit + 1) / 2, 3); + final int numDropField = Math.min(((evolveLimit > numAddField) ? + (evolveLimit - numAddField) : 0), + numAddField); + + final int numRows = 100; + final TableLimits tableLimits = new TableLimits(1000, 500, 10); + final String tenantId = getTenantId(); + final NoSQLHandle nosqlHandle = getTenantHandle(tenantId); + + final DDLGenerator plan = new DDLGenerator(tenantId); + final String tableName = makeTableName(tenantId, 0); + List ddls; + + /* + * Create table + */ + ddls = plan.createTable(numFields, 1, tableLimits).build(true); + execAsyncAndWait(ddls); + + /* Load rows to table */ + loadRows(nosqlHandle, tableName, numFields, numRows); + + /* + * Create indexes, alter table add field .. + */ + ddls = plan.createIndex(tableName, numIndexes) + .addField(tableName, numAddField) + .build(true); + execAsyncAndWait(ddls); + + /* Verify the existence of indexes and do counting by index */ + assertNumIndexes(tenantId, tableName, numIndexes); + for (int idxIndex = 0; idxIndex < numIndexes; idxIndex++) { + assertRowCountByIndex(nosqlHandle, tableName, idxIndex, + numRows); + } + /* Check row value and expiration time */ + putRow(nosqlHandle, tableName, numRows, numFields); + checkRow(nosqlHandle, tableName, numRows, numFields, numAddField); + + /* + * Drop index, alter table drop field .. + */ + ddls = plan.dropIndex(tableName, numIndexes) + .dropField(tableName, numDropField) + .build(true); + execAsyncAndWait(ddls); + + /* + * Verify no index existed + */ + assertNumIndexes(tenantId, tableName, 0); + checkRow(nosqlHandle, tableName, numRows - 1, numFields, + (numAddField - numDropField)); + } + + @Test + public void testMultipleTables() { + assumeTrue("Skip the test if not minicloud or cloud test or " + + "tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + TableRequestLimits requestLimits = + tenantLimits.getStandardTableLimits(); + final int numTables = Math.min(tenantLimits.getNumTables(), 3); + final int numFields = Math.min(requestLimits.getColumnsPerTable(), 5); + final int numIndexes = Math.min(requestLimits.getIndexesPerTable(), 3); + final int evolveLimit = requestLimits.getSchemaEvolutions(); + final int numAddField = Math.min((evolveLimit + 1) / 2, 3); + final int numDropField = Math.min(((evolveLimit > numAddField) ? + (evolveLimit - numAddField) : 0), + numAddField); + + final int numRows = 100; + final TableLimits tableLimits = new TableLimits(1000, 500, 10); + + final NoSQLHandle nosqlHandle = getTenantHandle(getTenantId()); + final String tenantId = getTenantId(); + final String[] tableNames = getTableNames(tenantId, numTables); + + final DDLGenerator plan = new DDLGenerator(tenantId); + List ddls; + + /* + * Create tables + */ + ddls = plan.createTable(numFields, numTables, tableLimits).build(true); + execWithThreads(CONCURRENT_NUM, ddls); + + /* Load rows to tables*/ + loadRowsToTables(nosqlHandle, CONCURRENT_NUM, tableNames, + numFields, numRows); + + /* + * Create index, alter table add field .. + */ + for (String tableName : tableNames) { + plan.createIndex(tableName, numIndexes) + .addField(tableName, numAddField); + } + ddls = plan.build(true); + execWithThreads(CONCURRENT_NUM, ddls); + + /* Check index and row value */ + for (String tableName : tableNames) { + assertNumIndexes(tenantId, tableName, numIndexes); + assertRowCountByIndex(nosqlHandle, tableName, + numIndexes - 1, numRows); + checkRow(nosqlHandle, tableName, numRows - 1, + numFields, numAddField); + } + + /* + * Drop index, drop field + */ + for (String tableName : tableNames) { + plan.dropIndex(tableName, numIndexes) + .dropField(tableName, numDropField); + + } + ddls = plan.build(true); + execWithThreads(CONCURRENT_NUM, ddls); + + /* Check index and row value */ + for (String tableName : tableNames) { + assertNumIndexes(tenantId, tableName, 0); + checkRow(nosqlHandle, tableName, numRows - 1, numFields, + (numAddField - numDropField)); + } + + /* + * Drop tables + */ + ddls = plan.dropTable(numTables).build(true); + execWithThreads(CONCURRENT_NUM, ddls); + assertNumTables(tenantId, 0); + } + + @Test + public void testMultiTenants() { + /* This test needs 3 tenants, it is not applicable in cloud test */ + assumeTrue("Skip this test if not minicloud test", useMiniCloud); + + final int numTenants = 3; + + TableRequestLimits requestLimits = + tenantLimits.getStandardTableLimits(); + final int numTables = Math.min(tenantLimits.getNumTables(), 3); + final int numFields = Math.min(requestLimits.getColumnsPerTable(), 5); + final int numIndexes = Math.min(requestLimits.getIndexesPerTable(), 3); + final int evolveLimit = requestLimits.getSchemaEvolutions(); + final int numAddField = Math.min((evolveLimit + 1) / 2, 3); + final int numDropField = Math.min(((evolveLimit > numAddField) ? + (evolveLimit - numAddField) : 0), + numAddField); + + final int numRows = 100; + final TableLimits tableLimits = new TableLimits(1000, 500, 10); + final String[] tenantIds = new String[numTenants]; + for (int i = 0; i < numTenants; i++) { + String tenantId = makeTenantId(i); + if (getSCURL() != null) { + addTier(tenantId, tenantLimits); + } + tenantIds[i] = tenantId; + } + + final DDLGenerator plan = new DDLGenerator(getTenantId()); + List ddls = null; + + /* + * Create tables + */ + for (String tenantId : tenantIds) { + plan.setTenantId(tenantId) + .createTable(numFields, numTables, tableLimits); + } + ddls = plan.build(true); + execWithThreads(CONCURRENT_NUM, ddls); + + /* Load rows to tables */ + for (String tenantId : tenantIds) { + NoSQLHandle nosqlHandle = getTenantHandle(tenantId); + String[] tableNames = getTableNames(tenantId, numTables); + loadRowsToTables(nosqlHandle, CONCURRENT_NUM, tableNames, + numFields, numRows); + } + + /* + * Create Indexes, alter table add fields + */ + for (int tableIndex = 0; tableIndex < numTables; tableIndex++) { + for (String tenantId : tenantIds) { + String tableName = makeTableName(tenantId, tableIndex); + plan.setTenantId(tenantId) + .createIndex(tableName, numIndexes) + .addField(tableName, numAddField); + } + } + ddls = plan.build(true); + execWithThreads(CONCURRENT_NUM, ddls); + + /* Verify after create index, alter table add field */ + for (String tenantId : tenantIds) { + NoSQLHandle nosqlHandle = getTenantHandle(tenantId); + for (int tableIndex = 0; tableIndex < numTables; tableIndex++) { + String tableName = makeTableName(tenantId, tableIndex); + assertNumIndexes(tenantId, tableName, numIndexes); + assertRowCountByIndex(nosqlHandle, tableName, + numIndexes - 1, numRows); + /* Check row value */ + checkRow(nosqlHandle, tableName, numRows - 1, numFields, + numAddField); + } + } + + /* + * Drop Indexes and drop field + */ + for (int tableIndex = 0; tableIndex < numTables; tableIndex++) { + for (String tenantId : tenantIds) { + String tableName = makeTableName(tenantId, tableIndex); + plan.setTenantId(tenantId) + .dropIndex(tableName, numIndexes) + .dropField(tableName, numDropField); + } + } + ddls = plan.build(true); + execWithThreads(CONCURRENT_NUM, ddls); + + /* Verify after drop index and drop field */ + for (String tenantId : tenantIds) { + NoSQLHandle nosqlHandle = getTenantHandle(tenantId); + for (int tableIndex = 0; tableIndex < numTables; tableIndex++) { + String tableName = makeTableName(tenantId, tableIndex); + assertNumIndexes(tenantId, tableName, 0); + /* Check row value */ + checkRow(nosqlHandle, tableName, numRows - 1, numFields, + (numAddField - numDropField)); + } + } + + /* + * Drop tables + */ + for (String tenantId : tenantIds) { + plan.setTenantId(tenantId).dropTable(numTables); + } + ddls = plan.build(true); + execWithThreads(CONCURRENT_NUM, ddls); + /* No table exists */ + for (String tenantId : tenantIds) { + assertNumTables(tenantId, 0); + } + } + + @Test + public void testTableIndexLimits() { + /* + * This test aim to create the max number of tables in a tenant, it is + * not applicable for cloud testing + */ + assumeTrue("Skip this test if not minicloud test", useMiniCloud); + + TableRequestLimits requestLimits = + tenantLimits.getStandardTableLimits(); + final int numTables = tenantLimits.getNumTables(); + final int numIndexes = requestLimits.getIndexesPerTable(); + final int numFields = numIndexes + 1; + + final TableLimits tableLimits = new TableLimits(1000, 500, 10); + final String tenantId = getTenantId(); + + final DDLGenerator plan = new DDLGenerator(tenantId); + List ddls = null; + + /* Create 2 tables with X column, X is column number limit per table */ + ddls = plan.createTable(numFields, 2, tableLimits).build(true); + execAsyncAndWait(ddls); + assertNumTables(tenantId, 2); + + /* + * Create M indexes on a table, M is the index number limit per table. + */ + final String table0Name = makeTableName(tenantId, 0); + ddls = plan.createIndex(table0Name, numIndexes).build(true); + execAsyncAndWait(ddls); + assertNumIndexes(tenantId, table0Name, numIndexes); + + /* + * Create M + 1 indexes on a table, M is the index number limit per + * table. Creating last index should have failed. + * + * TODO: bug? + */ + /*final String table1Name = makeTableName(tenantId, 1); + ddls = plan.createIndex(table1Name, numIndexes + 1).build(true); + execWithThreads(numThreads, ddls, IndexLimitException.class, 1); + assertNumIndexes(tenantId, table1Name, numIndexes); */ + + /* drop tables */ + ddls = plan.dropTable(2).build(true); + execAsyncAndWait(ddls); + assertNumTables(tenantId, 0); + + /* + * Create N tables, N is the table number limit. + */ + ddls = plan.createTable(numFields, numTables, tableLimits).build(true); + execWithThreads(CONCURRENT_NUM, ddls); + assertNumTables(tenantId, numTables); + + /* + * Drop last i tables, then create i + 1 tables concurrently, the total + * number of tables is N + 1 that exceeded the limit N, so creating + * the last table should have failed. + * + * TODO: bug? + */ + /* + int nd = 2; + String[] tableNames = getTableNames(tenantId, numTables - nd, nd); + ddls = plan.dropTable(tableNames).build(true); + execWithThreads(numThreads, ddls); + assertNumTables(tenantId, numTables - nd); + + tableNames = getTableNames(tenantId, numTables - nd, nd + 1); + ddls = plan.createTable(numFields, tableNames, tableLimits).build(true); + execWithThreads(numThreads, ddls, TableLimitException.class, 1); + assertNumTables(tenantId, numTables); */ + + /* Drop all tables */ + ddls = plan.dropTable(numTables).build(true); + execWithThreads(CONCURRENT_NUM, ddls); + assertNumTables(tenantId, 0); + } + + private static String[] getTableNames(String tenantId, int numTables) { + return getTableNames(tenantId, 0, numTables); + } + + private static String[] getTableNames(String tenantId, int from, int num) { + final String[] tableNames = new String[num]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = makeTableName(tenantId, from + i); + } + return tableNames; + } + + private void loadRows(NoSQLHandle nosqlHandle, + String tableName, + int numFields, + int numRows) { + final PutRequest putReq = new PutRequest().setTableName(tableName); + + PutResult putRet; + for (int i = 0; i < numRows; i++) { + MapValue row = createRow(i, numFields); + putReq.setValue(row); + try { + putRet = nosqlHandle.put(putReq); + assertNotNull(putRet.getVersion()); + } catch (Exception ex) { + fail("Failed to put row to table " + tableName + ": " + + ex.getMessage()); + } + } + } + + private void execAsyncAndWait(List ddls) { + + final Map> results = + new HashMap>(); + + String tenantId; + TableResult tret; + List trets; + for (DDLInfo ddl : ddls) { + tenantId = ddl.getTenantId(); + try { + tret = ddlExecutor.execNoWait(ddl); + } catch (Throwable t) { + fail("Execute " + ddl + " failed: " + t); + return; + } + if (results.containsKey(tenantId)) { + trets = results.get(tenantId); + } else { + trets = new ArrayList(); + results.put(tenantId, trets); + } + trets.add(tret); + } + + /* Wait for completion of ddls' executions */ + for (Entry> e : results.entrySet()) { + for (TableResult ret : e.getValue()) { + try { + ddlExecutor.waitForDone(e.getKey(), waitMillis, ret); + } catch (Throwable t) { + fail("WaitForDone failed: " + t); + } + } + } + } + + private void execWithThreads(int numThreads, List ddls) { + execWithThreads(numThreads, ddls, null, 0); + } + + private void execWithThreads(int numThreads, + List ddls, + Class expectedExceptionClass, + int expNumException) { + + final ArrayBlockingQueue ddlQueue = + new ArrayBlockingQueue(ddls.size(), true, ddls); + + /* Start threads */ + final List threads = new ArrayList(numThreads); + final TestExceptionHandler handler = + new TestExceptionHandler(expectedExceptionClass); + + for (int i = 0; i < numThreads; i++) { + Thread thd = new DDLThread(ddlQueue, "ddlThd" + i); + threads.add(thd); + thd.setUncaughtExceptionHandler(handler); + thd.start(); + } + + /* Join all threads */ + for (Thread thread : threads) { + try { + thread.join(); + } catch (Exception ex) { + fail("Wait for thread " + thread + ": " + ex); + } + } + + /* Check if expect to catch exception */ + Map failures = handler.getUnexpectedException(); + if (!failures.isEmpty()) { + for (Entry e : failures.entrySet()) { + System.err.println( + "Unexpected exception caught from " + e.getKey()); + e.getValue().printStackTrace(); + } + fail("DDLThreads execute failed " + failures.keySet()); + } + + if (expectedExceptionClass != null) { + String exCls = expectedExceptionClass.getName(); + int actNumEx = handler.getNumExpectedException(); + if (expNumException > 0) { + assertEquals("Expect to catch " + exCls + " " + + expNumException + " times but caught " + + actNumEx + " times", expNumException, actNumEx); + } else { + assertTrue("Expect to catch " + exCls + " " + expNumException + + " times but not", + actNumEx > 0); + } + } + } + + /** + * Load rows to the specified tables + */ + private void loadRowsToTables(NoSQLHandle nosqlHandle, + int numThreads, + String[] tableNames, + int numFields, + int numRows) { + + final ExecutorService executor = + Executors.newFixedThreadPool(numThreads); + final ArrayList> futures = + new ArrayList>(numThreads); + + for (String tableName : tableNames) { + LoadTask task = new LoadTask(nosqlHandle, tableName, numFields, + numRows); + futures.add(executor.submit(task)); + } + executor.shutdown(); + + for (Future f : futures) { + try { + int count = f.get().intValue(); + assertEquals(numRows, count); + } catch (Exception ex) { + fail("LoadTask failed: " + ex); + } + } + } + + private void putRow(NoSQLHandle nosqlHandle, + String tableName, + int id, + int numFields) { + final MapValue row = createRow(id, numFields, 0); + PutRequest putReq = new PutRequest() + .setTableName(tableName) + .setValue(row); + try { + PutResult putRet = nosqlHandle.put(putReq); + assertNotNull(putRet.getVersion()); + } catch (Exception ex) { + fail("Failed to put row to table " + tableName + ": " + + ex.getMessage()); + + } + } + + private void checkRow(NoSQLHandle nosqlHandle, + String tableName, + int id, + int numFields, + int numNewFields) { + + final MapValue expRow = createRow(id, numFields, numNewFields); + + final MapValue key = createKey(id); + final GetRequest getReq = new GetRequest() + .setTableName(tableName) + .setKey(key); + + try { + GetResult getRet = nosqlHandle.get(getReq); + assertEquals(expRow, getRet.getValue()); + } catch (Exception ex) { + fail("Failed to get row: " + ex.getMessage()); + } + } + + private void assertRowCountByIndex(NoSQLHandle nosqlHandle, + String tableName, + int idxIndex, + int expCount) { + final String fieldName = makeFieldName(idxIndex); + final String query = "SELECT count(" + fieldName+ ") FROM " + tableName; + long ret = execCountQuery(nosqlHandle, query); + assertEquals(expCount, ret); + } + + private long countRows(NoSQLHandle nosqlHandle, String tableName) { + final String query = "SELECT count(*) FROM " + tableName; + return execCountQuery(nosqlHandle, query); + } + + private long execCountQuery(NoSQLHandle nosqlHandle, String query) { + + try { + PrepareRequest prepReq = new PrepareRequest() + .setStatement(query); + PrepareResult prepRet = nosqlHandle.prepare(prepReq); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepRet); + List results = new ArrayList(); + do { + QueryResult result = nosqlHandle.query(queryReq); + if (!result.getResults().isEmpty()) { + results.addAll(result.getResults()); + } + } while (!queryReq.isDone()); + assertEquals(1, results.size()); + MapValue value = results.get(0); + return value.get("Column_1").asLong().getLong(); + } catch (Exception ex) { + fail("Failed to execute [" + query + "]: " + ex.getMessage()); + } + return -1; + } + + private void assertNumTables(String tenantId, int exp) { + assertEquals(exp, getNumTables(tenantId)); + } + + private int getNumTables(String tenantId) { + final ListTablesRequest ltReq = new ListTablesRequest(); + try { + NoSQLHandle nosqlHandle = getTenantHandle(tenantId); + ListTablesResult ltRet = nosqlHandle.listTables(ltReq); + String[] tables = ltRet.getTables(); + return tables.length; + } catch (Exception ex) { + fail("Failed to get index: " + ex.getMessage()); + } + return -1; + } + + private void assertNumIndexes(String tenantId, String tableName, int exp) { + assertEquals(exp, getNumIndexes(tenantId, tableName)); + } + + private int getNumIndexes(String tenantId, String tableName) { + final NoSQLHandle nosqlHandle = getTenantHandle(tenantId); + final GetIndexesRequest giReq = new GetIndexesRequest() + .setTableName(tableName); + try { + GetIndexesResult giRet = nosqlHandle.getIndexes(giReq); + return giRet.getIndexes().length; + } catch (Exception ex) { + fail("Failed to get index: " + ex.getMessage()); + } + return -1; + } + + private TableResult execTableRequestNoWait(NoSQLHandle nosqlHandle, + String statement, + TableLimits limits) { + TableRequest request = new TableRequest().setStatement(statement). + setTableLimits(limits). + setTimeout(waitMillis); + return nosqlHandle.tableRequest(request); + } + + void waitForDone(NoSQLHandle nosqlHandle, int waitMs, TableResult result) { + if (result.getTableState() == State.ACTIVE || + result.getTableState() == State.DROPPED) { + return; + } + result.waitForCompletion(nosqlHandle, waitMs, 1500); + } + + /* Create Row value */ + private MapValue createRow(int id, int numFields) { + return createRow(id, numFields, 0); + } + + private MapValue createRow(int id, int numFields, int numNewFields) { + MapValue row = createKey(id); + for (int i = 0; i < numFields; i++) { + String value = makeString((id % (63 - numFields)) + i + 1, i); + row.put(makeFieldName(i), value); + } + for (int i = 0; i < numNewFields; i++) { + row.put(makeNewFieldName(i), NullValue.getInstance()); + } + return row; + } + + /* Create primary key value */ + private MapValue createKey(int id) { + return new MapValue().put("id", id); + } + + /* Create tenantId */ + private static String makeTenantId(int index) { + return "CDTTenant" + index; + } + + /* Create table name */ + private static String makeTableName(String tenantId, int index) { + return tenantId + "T" + index; + } + + /* Create index name */ + private static String makeIndexName(int index, int ... fieldIndex) { + final StringBuilder sb = new StringBuilder(); + sb.append("idx"); + sb.append(index); + for (int f : fieldIndex) { + sb.append("_f"); + sb.append(f); + } + return sb.toString(); + } + + private static String makeFieldName(int index) { + return "f" + index; + } + + private static String makeNewFieldName(int index) { + return "nf" + index; + } + + private static String makeString(int length, int from) { + StringBuilder sb = new StringBuilder(); + for (int i = from; i < length + from; i++) { + sb.append((char)('a' + (i % 26))); + } + return sb.toString(); + } + + private NoSQLHandle getTenantHandle(String tenantId) { + NoSQLHandle nosqlHandle = handleCache.get(tenantId); + if (nosqlHandle != null) { + return nosqlHandle; + } + synchronized(handleCache) { + nosqlHandle = handleCache.get(tenantId); + if (nosqlHandle == null) { + if (tenantId.equals(getTenantId())) { + nosqlHandle = handle; + } else { + try { + nosqlHandle = configHandle(getProxyURL(), tenantId); + } catch (Exception ex) { + fail("Failed to get nosql handle for tenant: " + + tenantId); + return null; + } + } + handleCache.put(tenantId, nosqlHandle); + } + return nosqlHandle; + } + } + + private NoSQLHandle configHandle(URL url, String tenantId) { + + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(url); + hconfig.configureDefaultRetryHandler(5, 0); + hconfig.setRequestTimeout(10_000); + SecureTestUtil.setAuthProvider(hconfig, isSecure(), onprem, tenantId); + return getHandle(hconfig); + } + + /** + * A class to generate the sequence of DDL objects. + */ + private static class DDLGenerator { + private final List ddlsList; + private String tenantId; + + DDLGenerator(String defaultTenantId) { + ddlsList = new ArrayList(); + tenantId = defaultTenantId; + } + + DDLGenerator createTable(int numFields, int num, TableLimits limits) { + if (num > 0) { + String[] tableNames = getTableNames(tenantId, num); + createTable(numFields, tableNames, limits); + } + return this; + } + + DDLGenerator createTable(int numFields, + String[] tableNames, + TableLimits limits) { + if (tableNames != null && tableNames.length > 0) { + addDDLs(DDLType.CREATE_TABLE, tableNames, + makeCreateTableDDLs(tableNames, numFields), + limits); + } + return this; + } + + DDLGenerator createIndex(String tableName, int num) { + if (num > 0) { + addDDLs(DDLType.CREATE_INDEX, tableName, + makeCreateIndexDDLs(tableName, num)); + } + return this; + } + + DDLGenerator addField(String tableName, int num) { + if (num > 0) { + addDDLs(DDLType.ALTER_TABLE, tableName, + makeAddFieldDDLs(tableName, num)); + } + return this; + } + + DDLGenerator dropField(String tableName, int num) { + if (num > 0) { + addDDLs(DDLType.ALTER_TABLE, tableName, + makeDropFieldDDLs(tableName, num)); + } + return this; + } + + DDLGenerator dropIndex(String tableName, int num) { + if (num > 0) { + addDDLs(DDLType.DROP_INDEX, tableName, + makeDropIndexDDLs(tableName, num)); + } + return this; + } + + DDLGenerator dropTable(int num) { + if (num > 0) { + String[] tableNames = getTableNames(tenantId, num); + dropTable(tableNames); + } + return this; + } + + DDLGenerator dropTable(String[] tableNames) { + if (tableNames != null && tableNames.length > 0) { + addDDLs(DDLType.DROP_TABLE, tableNames, + makeDropTableDDLs(tableNames), + null); + } + return this; + } + + private void addDDLs(DDLType type, String tableName, String[] ddls) { + DDLInfo[] ddlInfos = new DDLInfo[ddls.length]; + for (int i = 0; i < ddls.length; i++) { + String ddl = ddls[i]; + String indexName = null; + if (type == DDLType.CREATE_INDEX || + type == DDLType.DROP_INDEX) { + indexName = makeIndexName(i, i); + } + if (tableName == null) { + if (type == DDLType.CREATE_TABLE || + type == DDLType.DROP_TABLE) { + tableName = makeTableName(getTenantId(), i); + } + } + DDLInfo info = new DDLInfo(getTenantId(), tableName, + indexName, type, ddl); + ddlInfos[i] = info; + } + ddlsList.add(ddlInfos); + } + + private void addDDLs(DDLType type, + String[] tableNames, + String[] ddls, + TableLimits limits) { + DDLInfo[] ddlInfos = new DDLInfo[ddls.length]; + for (int i = 0; i < ddls.length; i++) { + String tableName = tableNames[i]; + String ddl = ddls[i]; + DDLInfo info = new DDLInfo(getTenantId(), tableName, type, + ddl, limits); + ddlInfos[i] = info; + } + ddlsList.add(ddlInfos); + } + + DDLGenerator setTenantId(String tenantId) { + this.tenantId = tenantId; + return this; + } + + String getTenantId() { + return tenantId; + } + + /* + * Merges the elements in multiple DDLInfo[] into a queue. + */ + List build(boolean clear) { + final List queue = new ArrayList(); + @SuppressWarnings("unchecked") + Iterator[] ddlIters = new Iterator[ddlsList.size()]; + List indexes = new ArrayList(ddlIters.length); + for (int i = 0; i < ddlIters.length; i++) { + DDLInfo[] ddls = ddlsList.get(i); + ddlIters[i] = Arrays.asList(ddls).iterator(); + indexes.add(i); + } + + Iterator indIter = indexes.iterator(); + while(indIter.hasNext()) { + int ind = indIter.next(); + Iterator ddlIter = ddlIters[ind]; + + if (ddlIter.hasNext()) { + queue.add(ddlIter.next()); + } else { + indIter.remove(); + } + if (!indIter.hasNext()) { + if (indexes.isEmpty()) { + break; + } + indIter = indexes.iterator(); + } + } + if (clear) { + clear(); + } + return queue; + } + + void clear() { + ddlsList.clear(); + } + + private static String[] makeCreateTableDDLs(String[] tableNames, + int numFields) { + final String[] ddls = new String[tableNames.length]; + int i = 0; + for (String tableName : tableNames) { + ddls[i++] = makeCreateTableDDL(tableName, numFields); + } + return ddls; + } + + private static String makeCreateTableDDL(String tableName, + int numFields) { + final StringBuilder sb = new StringBuilder("CREATE TABLE "); + sb.append(tableName); + sb.append("("); + + sb.append("id INTEGER, "); + for (int i = 0; i < numFields; i++) { + sb.append(makeFieldName(i)); + sb.append(" STRING, "); + } + sb.append("PRIMARY KEY(id))"); + return sb.toString(); + } + + private static String[] makeDropTableDDLs(String[] tableNames) { + final String[] ddls = new String[tableNames.length]; + int i = 0; + for (String table : tableNames) { + ddls[i++] = makeDropTableDDL(table); + } + return ddls; + } + + private static String makeDropTableDDL(String tableName) { + final StringBuilder sb = new StringBuilder("DROP TABLE "); + sb.append(tableName); + return sb.toString(); + } + + private static String[] makeCreateIndexDDLs(String tableName, int num) { + final String[] ddls = new String[num]; + for (int i = 0; i < ddls.length; i++) { + final int fidx = i; + ddls[i] = makeCreateIndexDDL(tableName, i, fidx); + } + return ddls; + } + + private static String makeCreateIndexDDL(String tableName, + int idxIndex, + int... fieldIndex) { + final StringBuilder sb = new StringBuilder("CREATE INDEX "); + sb.append(makeIndexName(idxIndex, fieldIndex)); + sb.append(" on "); + sb.append(tableName); + sb.append("("); + + boolean firstField = true; + for (int fidx : fieldIndex) { + if (firstField) { + firstField = false; + } else { + sb.append(", "); + } + sb.append(makeFieldName(fidx)); + } + sb.append(")"); + return sb.toString(); + } + + private static String[] makeAddFieldDDLs(String tableName, int num) { + final String[] ddls = new String[num]; + for (int i = 0; i < ddls.length; i++) { + ddls[i] = makeAddDropFieldDDL(tableName, true, i); + } + return ddls; + } + + private static String[] makeDropFieldDDLs(String tableName, int num) { + final String[] ddls = new String[num]; + for (int i = 0; i < ddls.length; i++) { + ddls[i] = makeAddDropFieldDDL(tableName, false, i); + } + return ddls; + } + + private static String makeAddDropFieldDDL(String tableName, + boolean addField, + int newFieldIndex) { + final StringBuilder sb = new StringBuilder("ALTER TABLE "); + sb.append(tableName); + sb.append("("); + sb.append(addField ? "ADD " : "DROP "); + sb.append(makeNewFieldName(newFieldIndex)); + if (addField) { + sb.append(" STRING"); + } + sb.append(")"); + return sb.toString(); + } + + private static String[] makeDropIndexDDLs(String tableName, int num) { + final String[] ddls = new String[num]; + for (int i = 0; i < ddls.length; i++) { + final int fidx = i; + ddls[i] = makeDropIndexDDL(tableName, i, fidx); + } + return ddls; + } + + private static String makeDropIndexDDL(String tableName, + int idxIndex, + int ... fieldIndex) { + final StringBuilder sb = new StringBuilder("DROP INDEX "); + sb.append(makeIndexName(idxIndex, fieldIndex)); + sb.append(" ON "); + sb.append(tableName); + return sb.toString(); + } + } + + /* DDL types */ + static enum DDLType { + CREATE_TABLE, + CREATE_INDEX, + DROP_TABLE, + DROP_INDEX, + ALTER_TABLE + } + + /** + * A thread to execute DDL statements that read from a queue. + */ + private class DDLThread extends Thread { + private final BlockingQueue queue; + + DDLThread(BlockingQueue queue, + String name) { + this.queue = queue; + setName(name); + } + + @Override + public void run() { + DDLInfo ddlInfo; + while((ddlInfo = queue.poll()) != null) { + TableResult ret = ddlExecutor.execNoWait(ddlInfo); + ddlExecutor.waitForDone(ddlInfo.getTenantId(), waitMillis, ret); + } + } + } + + /** + * A class encapsulates ddl and the target tenantId. + */ + static class DDLInfo { + private final String tenantId; + private final String tableName; + private final String indexName; + private final String ddl; + private final TableLimits limits; + private final DDLType type; + + DDLInfo(String tenantId, + String tableName, + DDLType type, + String ddl) { + this(tenantId, tableName, null, type, ddl); + } + + DDLInfo(String tenantId, + String tableName, + DDLType type, + String ddl, + TableLimits limits) { + this(tenantId, tableName, null, type, ddl, limits); + } + + DDLInfo(String tenantId, + String tableName, + String indexName, + DDLType type, + String ddl) { + this(tenantId, tableName, indexName, type, ddl, null); + } + + DDLInfo(String tenantId, + String tableName, + String indexName, + DDLType type, + String ddl, + TableLimits limits) { + this.tenantId = tenantId; + this.tableName = tableName; + this.indexName = indexName; + this.ddl = ddl; + this.type = type; + this.limits = limits; + } + + String getTenantId() { + return tenantId; + } + + String getTableName() { + return tableName; + } + + String getIndexName() { + return indexName; + } + + String getDDL() { + return ddl; + } + + TableLimits getTableLimits() { + return limits; + } + + DDLType getType() { + return type; + } + + @Override + public String toString() { + return "tenantId=" + tenantId + "; tableName=" + tableName + + "; type=" + type + "; ddl=" + ddl; + } + } + + /** + * Load N rows to the specified table. + */ + private class LoadTask implements Callable { + + private final NoSQLHandle nosqlHandle; + private final String tableName; + private final int nFields; + private final int nRows; + + LoadTask(NoSQLHandle nosqlHandle, + String tableName, + int nFields, + int nRows) { + this.nosqlHandle = nosqlHandle; + this.tableName = tableName; + this.nFields = nFields; + this.nRows = nRows; + } + + @Override + public Integer call() throws Exception { + loadRows(nosqlHandle, tableName, nFields, nRows); + return (int)countRows(nosqlHandle, tableName); + } + } + + interface DDLExecutor { + T execNoWait(DDLInfo ddlInfo); + void waitForDone(String tenantId, int waitMs, T result); + } + + private class ClientExecutor implements DDLExecutor { + @Override + public TableResult execNoWait(DDLInfo ddl) { + NoSQLHandle nosqlHandle = getTenantHandle(ddl.getTenantId()); + return execTableRequestNoWait(nosqlHandle, ddl.getDDL(), + ddl.getTableLimits()); + } + + @Override + public void waitForDone(String tenantId, + int waitMs, + TableResult result) { + NoSQLHandle nosqlHandle = getTenantHandle(tenantId); + ConcurrentDDLTest.this.waitForDone(nosqlHandle, waitMs, result); + } + } + + /* UncaughtExceptionHandler for DDLThead */ + private class TestExceptionHandler implements UncaughtExceptionHandler { + + private Class expectedExceptionCls; + private AtomicInteger numExpectedException; + private Map unexpectedExceptions; + + public TestExceptionHandler(Class expExCls) { + expectedExceptionCls = expExCls; + numExpectedException = new AtomicInteger(); + unexpectedExceptions = + Collections.synchronizedMap(new HashMap()); + } + + @Override + public void uncaughtException(Thread t, Throwable e) { + if (e.getClass() == expectedExceptionCls) { + numExpectedException.incrementAndGet(); + } else { + unexpectedExceptions.put(t.getName(), e); + } + } + + public int getNumExpectedException() { + return numExpectedException.get(); + } + + public Map getUnexpectedException() { + return unexpectedExceptions; + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/CreationTimeTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/CreationTimeTest.java new file mode 100644 index 00000000..2f059557 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/CreationTimeTest.java @@ -0,0 +1,1520 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import oracle.nosql.driver.Consistency; +import oracle.nosql.driver.Durability; +import oracle.nosql.driver.Durability.ReplicaAckPolicy; +import oracle.nosql.driver.Durability.SyncPolicy; +import oracle.nosql.driver.SystemException; +import oracle.nosql.driver.TimeToLive; +import oracle.nosql.driver.Version; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.nosql.driver.ops.WriteRequest; +import oracle.nosql.driver.ops.WriteResult; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.JsonNullValue; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.NullValue; +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.runners.MethodSorters; + +/* + * The tests are ordered so that the zzz* test goes last so it picks up + * DDL history reliably. + */ +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class CreationTimeTest extends ProxyTestBase { + + @Test + public void smokeTest() { + + try { + + MapValue key = new MapValue().put("id", 10); + + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + + /* drop a table */ + TableResult tres = tableOperation(handle, + "drop table if exists testusers", + null, TableResult.State.DROPPED, + 20000); + assertNotNull(tres.getTableName()); + assertTrue(tres.getTableState() == TableResult.State.DROPPED); + assertNull(tres.getTableLimits()); + + /* Create a table */ + tres = tableOperation( + handle, + "create table if not exists testusers(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* Create an index */ + tres = tableOperation( + handle, + "create index if not exists Name on testusers(name)", + null, + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* PUT */ + PutRequest putRequest = new PutRequest() + .setValue(value) // key is 10 + .setTableName("testusers"); + + long startTime1 = System.currentTimeMillis(); + + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + long interval1 = System.currentTimeMillis() - startTime1; + // no return row so creation time is 0 + checkCreationTime(res.getExistingCreationTime(), 0, 0); + + + long startTime2 = System.currentTimeMillis(); + /* put another one. set TTL to test that path */ + putRequest.setTTL(TimeToLive.ofHours(2)); + value.put("id", 20); // key is 20 + handle.put(putRequest); + long interval2 = System.currentTimeMillis() - startTime2; + // no return row so creation time is 0 + checkCreationTime(res.getExistingCreationTime(), 0, 0); + + /* + * Test ReturnRow for simple put of a row that exists. 2 cases: + * 1. unconditional (will return info) + * 2. if absent (will return info) + */ + value.put("id", 20); + // turn on returning row + putRequest.setReturnRow(true); + + PutResult pr = handle.put(putRequest); + + assertNotNull(pr.getVersion()); /* success */ + assertNotNull(pr.getExistingVersion()); + assertNotNull(pr.getExistingValue()); + assertTrue(pr.getExistingCreationTime()!=0); + checkCreationTime(pr.getExistingCreationTime(), startTime2, interval2); + assertTrue(pr.getExistingModificationTime() != 0); + + + putRequest.setOption(Option.IfAbsent); + pr = handle.put(putRequest); + assertNull(pr.getVersion()); /* failure */ + assertNotNull(pr.getExistingVersion()); + assertNotNull(pr.getExistingValue()); + checkCreationTime(pr.getExistingCreationTime(), startTime2, interval2); + assertTrue(pr.getExistingModificationTime() != 0); + + /* clean up */ + putRequest.setReturnRow(false); + putRequest.setOption(null); + + /* GET first row, id: 10 */ + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName("testusers"); + + GetResult res1 = handle.get(getRequest); + assertNotNull("Get failed", res1.getJsonValue()); + assertReadKB(res1); + + assertTrue(res1.getCreationTime() > 0); + assertTrue(res1.getCreationTime() - startTime1 <= interval1); + checkCreationTime(res1.getCreationTime(), startTime1, interval1); + + + /* DELETE same key, id: 10 */ + DeleteRequest delRequest = new DeleteRequest() + .setKey(key) + .setTableName("testusers") + .setReturnRow(true); + + DeleteResult del = handle.delete(delRequest); + assertTrue("Delete failed", del.getSuccess()); + checkCreationTime(del.getExistingCreationTime(), startTime1, interval1); + + /* GET -- no row, it was removed above */ + getRequest.setTableName("testusers"); + res1 = handle.get(getRequest); + assertNull(res1.getValue()); + // no row hence creationTime is 0 + assertEquals(0, res1.getCreationTime()); + } catch (Exception e) { + checkErrorMessage(e); + e.printStackTrace(); + fail("Exception in test"); + } + } + + @Test + public void testPutGetDelete() { + + final String tableName = "testusers"; + final int recordKB = 2; + + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists testusers(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + final String name = genString((recordKB - 1) * 1024); + MapValue value = new MapValue().put("id", 10).put("name", name); + MapValue newValue = new MapValue().put("id", 11).put("name", name); + MapValue newValue1 = new MapValue().put("id", 12).put("name", name); + MapValue newValue2 = new MapValue().put("id", 13).put("name", name); + + /* Durability will be ignored unless run with -Donprem=true */ + Durability dur = new Durability(SyncPolicy.WRITE_NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + + + /* Put a row */ + long startTime = System.currentTimeMillis(); + PutRequest putReq = new PutRequest() + .setValue(value) // key is 10 + .setDurability(dur) + .setTableName(tableName) + .setReturnRow(true); + PutResult putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */ ); + long interval = System.currentTimeMillis() - startTime; + // no return row hence creationTime is 0 + assertEquals(0, putRes.getExistingCreationTime()); + + + /* Put a row again with SetReturnRow(false). + * expect no row returned + */ + putReq.setReturnRow(false); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + true /* put over write */); + Version oldVersion = putRes.getVersion(); + // no return row + assertEquals(0, putRes.getExistingCreationTime()); + + /* + * Put row again with SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be zero */ + recordKB, + true /* put overWrite */); + oldVersion = putRes.getVersion(); + checkCreationTime(putRes.getExistingCreationTime(), startTime, interval); + + /* + * Put a new row with SetReturnRow(true), + * expect no existing row returned. + */ + putReq = new PutRequest() + .setValue(newValue) + .setDurability(dur) + .setTableName(tableName) + .setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + // no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* PutIfAbsent an existing row, it should fail */ + putReq = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + // no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* + * PutIfAbsent fails + SetReturnRow(true), + * return existing value and version + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be recent */ + recordKB, + false /* put overWrite */); + checkCreationTime(putRes.getExistingCreationTime(), startTime, interval); + + /* PutIfPresent an existing row, it should succeed */ + putReq = new PutRequest() + .setOption(Option.IfPresent) + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + // no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + oldVersion = putRes.getVersion(); + + /* + * PutIfPresent succeed + SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + checkCreationTime(putRes.getExistingCreationTime(), startTime, interval); + Version ifVersion = putRes.getVersion(); + + /* PutIfPresent a new row, it should fail */ + putReq = new PutRequest() + .setOption(Option.IfPresent) + .setValue(newValue1) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + // op didn't succeed + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* + * PutIfPresent fail + SetReturnRow(true), + * expect no existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + // op didn't succeed, no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* PutIfAbsent a new row, it should succeed */ + putReq = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(newValue1) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + assertNull(putRes.getExistingRowMetadata()); + // no returnRow + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* PutIfAbsent success + SetReturnRow(true) */ + putReq.setValue(newValue2).setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + // no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* + * PutIfVersion an existing row with unmatched version, it should fail. + */ + putReq = new PutRequest() + .setOption(Option.IfVersion) + .setMatchVersion(oldVersion) + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + // op didn't succeed + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* + * PutIfVersion fails + SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + ifVersion /* expPrevVersion */, + true, /* modtime should be recent */ + recordKB, + false /* put overWrite */); + checkCreationTime(putRes.getExistingCreationTime(), startTime, interval); + + + /* + * Put an existing row with matching version, it should succeed. + */ + putReq = new PutRequest() + .setOption(Option.IfVersion) + .setMatchVersion(ifVersion) + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + // no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + ifVersion = putRes.getVersion(); + + /* + * PutIfVersion succeed + SetReturnRow(true), + * expect no existing row returned. + */ + putReq.setMatchVersion(ifVersion).setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + // no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + Version newVersion = putRes.getVersion(); + + + /* + * Get + */ + MapValue key = new MapValue().put("id", 10); + + /* Get a row */ + GetRequest getReq = new GetRequest() + .setKey(key) + .setTableName(tableName); + GetResult getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + true /* rowPresent*/, + value, + null, /* Don't check version if Consistency.EVENTUAL */ + true, /* modtime should be recent */ + recordKB); + checkCreationTime(getRes.getCreationTime(), startTime, interval); + + /* Get a row with ABSOLUTE consistency */ + getReq.setConsistency(Consistency.ABSOLUTE); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + true /* rowPresent*/, + value, + newVersion, + true, /* modtime should be recent */ + recordKB); + checkCreationTime(getRes.getCreationTime(), startTime, interval); + + getReq = new GetRequest() + .setKey(key) + .setTableName(tableName); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + true /* rowPresent*/, + value, + null, /* Don't check version if Consistency.EVENTUAL */ + true, /* modtime should be recent */ + recordKB); + checkCreationTime(getRes.getCreationTime(), startTime, interval); + + + /* Get non-existing row */ + key = new MapValue().put("id", 100); + getReq = new GetRequest() + .setKey(key) + .setTableName(tableName); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + false /* rowPresent*/, + null /* expValue */, + null /* expVersion */, + false, /* modtime should be zero */ + recordKB); + // no row + assertEquals(0, getRes.getCreationTime()); + + /* Get a row with ABSOLUTE consistency */ + getReq.setConsistency(Consistency.ABSOLUTE); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + false /* rowPresent*/, + null /* expValue */, + null /* expVersion */, + false, /* modtime should be zero */ + recordKB); + // no row + assertEquals(0, getRes.getCreationTime()); + + /* Delete a row */ + key = new MapValue().put("id", 10); + DeleteRequest delReq = new DeleteRequest() + .setKey(key) + .setTableName(tableName); + DeleteResult delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + // no return row + checkCreationTime(delRes.getExistingCreationTime(), 0, 0); + + /* Put the row back to store */ + startTime = System.currentTimeMillis(); + putReq = new PutRequest() + .setValue(value) + .setReturnRow(true) + .setTableName(tableName); + putRes = handle.put(putReq); + oldVersion = putRes.getVersion(); + assertNotNull(oldVersion); + interval = System.currentTimeMillis() - startTime; + // in NsonProtocol.writeReturnRow():1344 result contains creationTime + // and modificationTime but version is null so they all get skipped. + // no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* Delete succeed + setReturnRow(true), existing row returned. */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be zero */ + recordKB); + checkCreationTime(delRes.getExistingCreationTime(), startTime, interval); + + /* Delete fail + setReturnRow(true), no existing row returned. */ + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertEquals(0, delRes.getExistingCreationTime()); + + /* Put the row back to store */ + startTime = System.currentTimeMillis(); + putReq = new PutRequest() + .setValue(value) + .setTableName(tableName); + putRes = handle.put(putReq); + ifVersion = putRes.getVersion(); + interval = System.currentTimeMillis() - startTime; + // no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + + /* DeleteIfVersion with unmatched version, it should fail */ + delReq = new DeleteRequest() + .setMatchVersion(oldVersion) + .setKey(key) + .setTableName(tableName); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + // no return row + assertEquals(0, delRes.getExistingCreationTime()); + + /* + * DeleteIfVersion with unmatched version + setReturnRow(true), + * the existing row returned. + */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + ifVersion /* expPrevVersion */, + true, /* modtime should be recent */ + recordKB); + checkCreationTime(delRes.getExistingCreationTime(), startTime, interval); + + /* DeleteIfVersion with matched version, it should succeed. */ + delReq = new DeleteRequest() + .setMatchVersion(ifVersion) + .setKey(key) + .setTableName(tableName); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + // no return row + checkCreationTime(delRes.getExistingCreationTime(), 0, 0); + + /* Put the row back to store */ + startTime = System.currentTimeMillis(); + putReq = new PutRequest() + .setValue(value) + .setTableName(tableName); + putRes = handle.put(putReq); + ifVersion = putRes.getVersion(); + interval = System.currentTimeMillis() - startTime; + // no return row + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* + * DeleteIfVersion with matched version + setReturnRow(true), + * it should succeed but no existing row returned. + */ + delReq.setMatchVersion(ifVersion).setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + false /* returnRow */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + // no return row + checkCreationTime(delRes.getExistingCreationTime(), 0, 0); + + /* DeleteIfVersion with a key not existing, it should fail. */ + delReq = new DeleteRequest() + .setMatchVersion(ifVersion) + .setKey(key) + .setTableName(tableName); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* returnRow */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertEquals(0, delRes.getExistingCreationTime()); + + /* + * DeleteIfVersion with a key not existing + setReturnRow(true), + * it should fail and no existing row returned. + */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* returnRow */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertEquals(0, delRes.getExistingCreationTime()); + } + + private void checkCreationTime(long creationTime, long startTime, long interval) { + assertTrue("creationTime should be >= than " + startTime + " " + + (creationTime - startTime), + creationTime >= startTime); + + assertTrue("creationTime not in interval: " + interval, + creationTime - startTime <= interval); + +// if (creationTime >= startTime && +// creationTime - startTime <= interval) { +// System.out.println(" PASSED ct: " + creationTime + " i:" + interval); +// } else { +// System.out.println(" !!! FAILED ct: " + creationTime + " i:" + interval + " !!!"); +// } + } + + private void checkModTime(long modTime, boolean modTimeRecent) { + if (modTimeRecent) { + if (modTime < (System.currentTimeMillis() - 2000)) { + fail("Expected modtime to be recent, got " + modTime); + } + } else { + if (modTime != 0) { + fail("Expected modtime to be zero, got " + modTime); + } + } + } + + private void checkPutResult(PutRequest request, + PutResult result, + boolean shouldSucceed, + boolean rowPresent, + MapValue expPrevValue, + Version expPrevVersion, + boolean modTimeRecent, + int recordKB, + boolean putOverWrite) { + if (shouldSucceed) { + assertNotNull("Put should succeed", result.getVersion()); + } else { + assertNull("Put should fail", result.getVersion()); + } + checkExistingValueVersion(request, result, shouldSucceed, rowPresent, + expPrevValue, expPrevVersion); + + checkModTime(result.getExistingModificationTime(), modTimeRecent); + + int[] expCosts = getPutReadWriteCost(request, + shouldSucceed, + rowPresent, + recordKB, + putOverWrite); + + if (onprem == false) { + assertReadKB(result, expCosts[0], true /* isAbsolute */); + assertWriteKB(result, expCosts[1]); + } + } + + private void checkDeleteResult(DeleteRequest request, + DeleteResult result, + boolean shouldSucceed, + boolean rowPresent, + MapValue expPrevValue, + Version expPrevVersion, + boolean modTimeRecent, + int recordKB) { + + assertEquals("Delete should " + (shouldSucceed ? "succeed" : " fail"), + shouldSucceed, result.getSuccess()); + checkExistingValueVersion(request, result, shouldSucceed, rowPresent, + expPrevValue, expPrevVersion); + + checkModTime(result.getExistingModificationTime(), modTimeRecent); + + int[] expCosts = getDeleteReadWriteCost(request, + shouldSucceed, + rowPresent, + recordKB); + + if (onprem == false) { + assertReadKB(result, expCosts[0], true /* isAbsolute */); + assertWriteKB(result, expCosts[1]); + } + } + + private void checkGetResult(GetRequest request, + GetResult result, + boolean rowPresent, + MapValue expValue, + Version expVersion, + boolean modTimeRecent, + int recordKB) { + + + if (rowPresent) { + if (expValue != null) { + assertEquals("Unexpected value", expValue, result.getValue()); + } else { + assertNotNull("Unexpected value", expValue); + } + if (expVersion != null) { + assertArrayEquals("Unexpected version", + expVersion.getBytes(), + result.getVersion().getBytes()); + } else { + assertNotNull("Unexpected version", result.getVersion()); + } + } else { + assertNull("Unexpected value", expValue); + assertNull("Unexpected version", result.getVersion()); + } + + checkModTime(result.getModificationTime(), modTimeRecent); + + final int minRead = getMinRead(); + int expReadKB = rowPresent ? recordKB : minRead; + + if (onprem == false) { + assertReadKB(result, expReadKB, + (request.getConsistencyInternal() == Consistency.ABSOLUTE)); + assertWriteKB(result, 0); + } + } + + private void checkExistingValueVersion(WriteRequest request, + WriteResult result, + boolean shouldSucceed, + boolean rowPresent, + MapValue expPrevValue, + Version expPrevVersion) { + + boolean hasReturnRow = rowPresent; + if (hasReturnRow) { + assertNotNull("PrevValue should be non-null", + result.getExistingValueInternal()); + if (expPrevValue != null) { + assertEquals("Unexpected PrevValue", + expPrevValue, result.getExistingValueInternal()); + } + assertNotNull("PrevVersion should be non-null", + result.getExistingVersionInternal()); + if (expPrevVersion != null) { + assertNotNull(result.getExistingVersionInternal()); + assertArrayEquals("Unexpected PrevVersion", + expPrevVersion.getBytes(), + result.getExistingVersionInternal().getBytes()); + } + } else { + assertNull("PrevValue should be null", + result.getExistingValueInternal()); + assertNull("PrevVersion should be null", + result.getExistingVersionInternal()); + } + } + + @Test + public void testReadQuery() throws InterruptedException { + final String createTable1 = + "create table tjson(id integer, info json, primary key(id))"; + final String createTable2 = + "create table trecord(id integer, " + + "info record(name string, age integer), " + + "primary key(id))"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + tableOperation(handle, createTable2, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + MapValue rowNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", NullValue.getInstance()) + .put("age", 20)); + MapValue rowJsonNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", JsonNullValue.getInstance()) + .put("age", 20)); + + MapValue[] rows = new MapValue[] {rowNull, rowJsonNull}; + Map tableExpRows = new HashMap(); + tableExpRows.put("tjson", rowJsonNull); + tableExpRows.put("trecord", rowNull); + + long startTime = System.currentTimeMillis(); + /* + * Put rows with NullValue or JsonNullValue, they should be converted + * to the right value for the target type. + */ + for (Map.Entry e : tableExpRows.entrySet()) { + String table = e.getKey(); + MapValue expRow = e.getValue(); + + for (MapValue row : rows) { + PutRequest putReq = new PutRequest() + .setTableName(table) + .setValue(row); + + PutResult putRet = handle.put(putReq); + Version pVersion = putRet.getVersion(); + assertNotNull(pVersion); + long interval = System.currentTimeMillis() - startTime; + + MapValue key = new MapValue().put("id", row.get("id")); + GetRequest getReq = new GetRequest() + .setTableName(table) + .setConsistency(Consistency.ABSOLUTE) + .setKey(key); + GetResult getRet = handle.get(getReq); + assertEquals(expRow, getRet.getValue()); + assertNotNull(getRet.getVersion()); + assertTrue(Arrays.equals(pVersion.getBytes(), + getRet.getVersion().getBytes())); + checkCreationTime(getRet.getCreationTime(), startTime, interval); + assertTrue(getRet.getModificationTime() > 0); + } + } + long interval = System.currentTimeMillis() - startTime; + + /* + * Query with variable for json field and set NullValue or + * JsonNullValue to variable, the NullValue is expected to be converted + * to JsonNullValue. + */ + String query = + "select id, info, creation_time($t) as ct, " + + "creation_time_millis($t) as ctm, " + + "modification_time($t) as mt " + + "from tjson $t "; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + boolean shouldRetry = false; + do { + try { + QueryResult queryRet = handle.query(queryReq); + assertEquals(1, queryRet.getResults().size()); + + for (MapValue v : queryRet.getResults()) { + assertTrue(v.get("ct").isTimestamp()); + checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(v.get("ctm").isLong()); + checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval); + assertTrue(v.get("mt").isTimestamp()); + assertTrue(v.get("mt").asTimestamp().getValue().getTime() > 0); + } + + } catch (SystemException e) { + shouldRetry = e.okToRetry(); + System.out.println("Caught " + (e.okToRetry() ? "retryable" : + "") + " ex: " + e.getMessage()); + System.out.println( + "Retrying query: " + queryReq.getStatement()); + e.printStackTrace(); + Thread.sleep(300); + } + } while (shouldRetry); + } + + @Test + public void testTableMultiWrite() { + final String createTable = + "create table tMW(s integer, id integer, info json, primary key(shard(s), id))"; + + tableOperation(handle, createTable, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + /* multi write */ + WriteMultipleRequest wmReq = new WriteMultipleRequest(); + String tableName = "tMW"; + + for (int i = 0; i < 10; i++) { + PutRequest pr = new PutRequest() + .setTableName(tableName) + .setRowMetadata("{\"n\":" + i + "}") + .setValue(new MapValue() + .put("s", 1) + .put("id", i) + .put("info", new MapValue().put("name", "John"))); + wmReq.add(pr, true); + } + + long startTime = System.currentTimeMillis(); + WriteMultipleResult wmRes = handle.writeMultiple(wmReq); + assertEquals(10, wmRes.getResults().size()); + long interval = System.currentTimeMillis() - startTime; + + + QueryRequest queryReq = new QueryRequest() + .setStatement("select s, id, $t.info.name as name, " + + "creation_time($t) as ct, creation_time_millis($t) as ctm " + + "from " + tableName + " $t ORDER BY id ASC"); + QueryResult qRes = handle.query(queryReq); + + int i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(1, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertEquals("John", v.get("name").asString().getString()); + assertTrue(v.get("ct").isTimestamp()); + checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(v.get("ctm").isLong()); + checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval); + i++; + } + assertEquals(10, qRes.getResults().size()); + assertEquals(10, i); + + wmReq.getOperations().forEach((req) -> { + PutRequest put = (PutRequest)req.getRequest(); + put.setReturnRow(true); + }); + + wmRes = handle.writeMultiple(wmReq); + wmRes.getResults().forEach((res) -> { + checkCreationTime(res.getExistingCreationTime(), startTime, interval); + }); + } + + @Test + public void testNullJsonNull() throws InterruptedException { + final String createTable1 = + "create table tjson(id integer, info json, primary key(id))"; + final String createTable2 = + "create table trecord(id integer, " + + "info record(name string, age integer), " + + "primary key(id))"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + tableOperation(handle, createTable2, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + MapValue rowNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", NullValue.getInstance()) + .put("age", 20)); + MapValue rowJsonNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", JsonNullValue.getInstance()) + .put("age", 20)); + + MapValue[] rows = new MapValue[] {rowNull, rowJsonNull}; + Map tableExpRows = new HashMap(); + tableExpRows.put("tjson", rowJsonNull); + tableExpRows.put("trecord", rowNull); + + long startTime = System.currentTimeMillis(); + /* + * Put rows with NullValue or JsonNullValue, they should be converted + * to the right value for the target type. + */ + for (Map.Entry e : tableExpRows.entrySet()) { + String table = e.getKey(); + MapValue expRow = e.getValue(); + + for (MapValue row : rows) { + PutRequest putReq = new PutRequest() + .setTableName(table) + .setValue(row); + + PutResult putRet = handle.put(putReq); + Version pVersion = putRet.getVersion(); + assertNotNull(pVersion); + long interval = System.currentTimeMillis() - startTime; + + MapValue key = new MapValue().put("id", row.get("id")); + GetRequest getReq = new GetRequest() + .setTableName(table) + .setConsistency(Consistency.ABSOLUTE) + .setKey(key); + GetResult getRet = handle.get(getReq); + assertEquals(expRow, getRet.getValue()); + assertNotNull(getRet.getVersion()); + assertTrue(Arrays.equals(pVersion.getBytes(), + getRet.getVersion().getBytes())); + checkCreationTime(getRet.getCreationTime(), startTime, interval); + } + } + long interval = System.currentTimeMillis() - startTime; + + String query = + "select id, info, creation_time($t) as ct, " + + "creation_time_millis($t) as ctm " + + "from tjson $t"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + boolean shouldRetry = false; + do { + try { + QueryResult queryRet = handle.query(queryReq); + assertEquals(1, queryRet.getResults().size()); + + MapValue v = queryRet.getResults().get(0); + assertTrue(v.get("ct").isTimestamp()); + checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(v.get("ctm").isLong()); + checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval); + } catch (SystemException e) { + shouldRetry = e.okToRetry(); + System.out.println("Caught " + (e.okToRetry() ? "retryable" : "") + " ex: " + e.getMessage()); + System.out.println("Retrying query: " + queryReq.getStatement()); + e.printStackTrace(); + Thread.sleep(300); + } + } while (shouldRetry); + + + query = + "select id, info, creation_time($t) as ct, " + + "creation_time_millis($t) as ctm " + + "from trecord $t"; + prepReq = new PrepareRequest().setStatement(query); + prepRet = handle.prepare(prepReq); + prepStmt = prepRet.getPreparedStatement(); + + queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + shouldRetry = false; + do { + try { + QueryResult queryRet = handle.query(queryReq); + assertEquals(1, queryRet.getResults().size()); + + MapValue v = queryRet.getResults().get(0); + assertTrue(v.get("ct").isTimestamp()); + checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(v.get("ctm").isLong()); + checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval); + } catch (SystemException e) { + shouldRetry = e.okToRetry(); + System.out.println("Caught " + (e.okToRetry() ? "retryable" : "") + " ex: " + e.getMessage()); + System.out.println("Retrying query: " + queryReq.getStatement()); + e.printStackTrace(); + Thread.sleep(300); + } + } while (shouldRetry); + } + + private String genString(int length) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + sb.append((char)('A' + i % 26)); + } + return sb.toString(); + } + + @Test + public void testCollection() { + final String tableName = "testusersColl"; + + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists " + tableName + + "(id integer, primary key(id)) as json collection", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + MapValue value = new MapValue().put("id", 10).put("name", "John"); + + /* Put row */ + long startTime = System.currentTimeMillis(); + PutRequest putReq = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult putRes = handle.put(putReq); + long interval = System.currentTimeMillis() - startTime; + // no return value + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* Get the row back */ + GetRequest getReq = new GetRequest() + .setTableName(tableName) + .setKey(value); + GetResult getRet = handle.get(getReq); + checkCreationTime(getRet.getCreationTime(), startTime, interval); + + /* Delete row check prev/existing is still null */ + DeleteRequest delReq = new DeleteRequest() + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + DeleteResult delRes = handle.delete(delReq); + checkCreationTime(delRes.getExistingCreationTime(), startTime, interval); + + + /* Put again */ + startTime = System.currentTimeMillis(); + putReq = new PutRequest() + .setValue(value) + .setTableName(tableName); + putRes = handle.put(putReq); + interval = System.currentTimeMillis() - startTime; + // no return + checkCreationTime(putRes.getExistingCreationTime(), 0, 0); + + /* Query */ + QueryRequest queryReq = new QueryRequest() + .setStatement("select id, name, creation_time($t) as ct," + + "creation_time_millis($t) as ctm from " + + tableName + " $t"); + QueryResult qRes = handle.query(queryReq); + + assertEquals(1, qRes.getResults().size()); + assertEquals(10, qRes.getResults().get(0).get("id").getInt()); + assertEquals("John", qRes.getResults().get(0).get("name").getString()); + assertTrue(qRes.getResults().get(0).get("ct").isTimestamp()); + checkCreationTime(qRes.getResults().get(0).get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(qRes.getResults().get(0).get("ctm").isLong()); + checkCreationTime(qRes.getResults().get(0).get("ctm").asLong().getValue(), startTime, interval); + } + + @Test + public void testCollectionMultiWrite() { + final String tableName = "testusersCollMWrite"; + + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists " + tableName + + "(s integer, id integer, primary key(shard(s), id)) as json collection", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* multi write */ + WriteMultipleRequest wmReq = new WriteMultipleRequest(); + + for (int i = 0; i < 10; i++) { + PutRequest pr = new PutRequest() + .setTableName(tableName) + .setValue(new MapValue() + .put("s", 1) + .put("id", i) + .put("name", "John")); + wmReq.add(pr, true); + } + long startTime = System.currentTimeMillis(); + WriteMultipleResult wmRes = handle.writeMultiple(wmReq); + assertEquals(10, wmRes.getResults().size()); + long interval = System.currentTimeMillis() - startTime; + + /* query read metadata */ + QueryRequest queryReq = new QueryRequest() + .setStatement("select s, id, name, creation_time($t) as ct," + + "creation_time_millis($t) as ctm from " + + tableName + " $t ORDER BY id ASC"); + QueryResult qRes = handle.query(queryReq); + + int i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(1, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertEquals("John", v.get("name").asString().getString()); + assertTrue(v.get("ct").isTimestamp()); + checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(v.get("ctm").isLong()); + checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval); + i++; + } + assertEquals(10, qRes.getResults().size()); + assertEquals(10, i); + } + + @Test + public void testWriteQuery() throws InterruptedException { + final String tableName = "t"; + final String createTable1 = + "create table "+ tableName +" (s integer, id integer, info json, primary key(shard(s), id))"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + // do a few inserts + long startTime = System.currentTimeMillis(); + String query = "declare $id integer; insert into " + tableName + " values( 0, $id, {})"; + for (int i = 0; i < 10; i++) { + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + prepStmt.setVariable("$id", new IntegerValue(i)); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + QueryResult queryRes = handle.query(queryReq); + assertNotNull(queryRes); + assertEquals(1, queryRes.getResults().get(0).asMap().get("NumRowsInserted").asInteger().getInt()); + } + long interval = System.currentTimeMillis() - startTime; + + // check they have the correct row metadata + query = + "select $t.s, $t.id, $t.info, creation_time($t) as ct, " + + "creation_time_millis($t) as ctm from " + + tableName + " $t order by $t.id"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + QueryResult qRes = handle.query(queryReq); + int i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(0, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertTrue(v.get("ct").isTimestamp()); + checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(v.get("ctm").isLong()); + checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval); + i++; + } + assertEquals(10, i); + + + // update many + query = "update " + tableName + " t SET t.info = t.info where t.s = 0"; + + queryReq = new QueryRequest() + .setStatement(query); + + qRes = handle.query(queryReq); + assertEquals(1, qRes.getResults().size()); + assertEquals(10, qRes.getResults().get(0).asMap().get("NumRowsUpdated").asInteger().getInt()); + + + // check they have the correct row metadata + query = + "select $t.s, $t.id, $t.info, creation_time($t) as ct, " + + "creation_time_millis($t) as ctm from " + + tableName + " $t order by $t.id"; + prepReq = new PrepareRequest().setStatement(query); + prepRet = handle.prepare(prepReq); + prepStmt = prepRet.getPreparedStatement(); + + queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + qRes = handle.query(queryReq); + i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(0, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertTrue(v.get("ct").isTimestamp()); + checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(v.get("ctm").isLong()); + checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval); + i++; + } + assertEquals(10, i); + } + + @Test + public void testWriteQueryCollection() throws InterruptedException { + final String tableName = "t"; + final String createTable1 = + "create table "+ tableName +" (s integer, id integer, primary key(shard(s), id)) as json collection"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + // do a few inserts + long startTime = System.currentTimeMillis(); + String query = "declare $id integer; insert into " + tableName + " values( 0, $id, {\"info\":1})"; + for (int i = 0; i < 10; i++) { + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + prepStmt.setVariable("$id", new IntegerValue(i)); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + QueryResult queryRes = handle.query(queryReq); + assertNotNull(queryRes); + assertEquals(1, queryRes.getResults().get(0).asMap().get("NumRowsInserted").asInteger().getInt()); + } + long interval = System.currentTimeMillis() - startTime; + + // check they have the correct row metadata + query = + "select $t.s, $t.id, $t.info, creation_time($t) as ct, " + + "creation_time_millis($t) as ctm," + + "modification_time($t) as mt " + + "from " + tableName + " $t order by $t.id"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + QueryResult qRes = handle.query(queryReq); + int i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(0, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertTrue(v.get("ct").isTimestamp()); + checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(v.get("ctm").isLong()); + checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval); + assertTrue(v.get("mt").isTimestamp()); + assertTrue(v.get("mt").asTimestamp().getValue().getTime() > 0); + i++; + } + assertEquals(10, i); + + + // update many + query = "update " + tableName + " t SET t.info=3 where t.s = 0"; + + queryReq = new QueryRequest() + .setStatement(query); + + qRes = handle.query(queryReq); + assertEquals(1, qRes.getResults().size()); + assertEquals(10, qRes.getResults().get(0).asMap().get("NumRowsUpdated").asInteger().getInt()); + + + // check they have the correct row metadata + query = + "select $t.s, $t.id, $t.info,creation_time($t) as ct, " + + "creation_time_millis($t) as ctm, " + + "modification_time($t) as mt " + + "from " + tableName + " $t order by $t.id"; + prepReq = new PrepareRequest().setStatement(query); + prepRet = handle.prepare(prepReq); + prepStmt = prepRet.getPreparedStatement(); + + queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + qRes = handle.query(queryReq); + i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(0, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertTrue(v.get("ct").isTimestamp()); + checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval); + assertTrue(v.get("ctm").isLong()); + checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval); + assertTrue(v.get("mt").isTimestamp()); + assertTrue(v.get("mt").asTimestamp().getValue().getTime() > 0); + i++; + } + assertEquals(10, i); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/DDosTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/DDosTest.java new file mode 100644 index 00000000..c2a35fcb --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/DDosTest.java @@ -0,0 +1,1053 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static io.netty.handler.codec.http.HttpMethod.POST; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_ARRAY; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_BOOLEAN; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_INTEGER; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_MAP; +import static oracle.nosql.proxy.protocol.HttpConstants.ACCEPT; +import static oracle.nosql.proxy.protocol.HttpConstants.AUTHORIZATION; +import static oracle.nosql.proxy.protocol.HttpConstants.CONNECTION; +import static oracle.nosql.proxy.protocol.HttpConstants.CONTENT_LENGTH; +import static oracle.nosql.proxy.protocol.HttpConstants.CONTENT_TYPE; +import static oracle.nosql.proxy.protocol.HttpConstants.NOSQL_DATA_PATH; +import static oracle.nosql.proxy.protocol.HttpConstants.NOSQL_VERSION; +import static oracle.nosql.proxy.protocol.HttpConstants.REQUEST_COMPARTMENT_ID; +import static oracle.nosql.proxy.protocol.HttpConstants.REQUEST_ID_HEADER; +import static oracle.nosql.proxy.protocol.Protocol.BAD_PROTOCOL_MESSAGE; +import static oracle.nosql.proxy.protocol.Protocol.ILLEGAL_ARGUMENT; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.io.IOException; +import java.math.BigDecimal; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeoutException; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponseStatus; +import oracle.nosql.driver.Consistency; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.TimeToLive; +import oracle.nosql.driver.http.NoSQLHandleImpl; +import oracle.nosql.driver.httpclient.HttpClient; +import oracle.nosql.driver.httpclient.ResponseHandler; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.GetIndexesRequest; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.Request; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableUsageRequest; +import oracle.nosql.driver.ops.serde.BinarySerializerFactory; +import oracle.nosql.driver.ops.serde.Serializer; +import oracle.nosql.driver.query.QueryDriver; +import oracle.nosql.driver.util.ByteInputStream; +import oracle.nosql.driver.util.ByteOutputStream; +import oracle.nosql.driver.util.NettyByteInputStream; +import oracle.nosql.driver.util.NettyByteOutputStream; +import oracle.nosql.driver.util.SerializationUtil; +import oracle.nosql.driver.values.ArrayValue; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.protocol.Protocol.OpCode; +import oracle.nosql.proxy.security.SecureTestUtil; + +/** + * Tests on handling bad protocol on proxy side + */ +public class DDosTest extends ProxyTestBase { + + private final static String tableName = "users"; + + private final BinarySerializerFactory factory = + new BinarySerializerFactory(); + + private final MapValue key = createTestKey(1); + private final MapValue record = createTestValue(); + + private final GetRequest getRequest = new GetRequest() + .setTableName(tableName) + .setConsistency(Consistency.ABSOLUTE) + .setKey(key); + + private final PutRequest putRequest = new PutRequest() + .setTableName(tableName) + .setValue(record) + .setTTL(TimeToLive.ofDays(1)); + + private final DeleteRequest deleteRequest = new DeleteRequest() + .setTableName(tableName) + .setKey(key); + + private final String statement = "select * from users"; + private final PrepareRequest prepareRequest = new PrepareRequest() + .setStatement(statement); + + private final String boundStatement = "declare $id integer; " + + "select * from users where id = $id"; + private final PrepareRequest prepareBoundStmtRequest = new PrepareRequest() + .setStatement(boundStatement); + + private final GetIndexesRequest getIndexesRequest = new GetIndexesRequest() + .setTableName(tableName) + .setIndexName("idx1"); + + private final TableUsageRequest tableUsageRequest = new TableUsageRequest() + .setTableName(tableName) + .setStartTime(System.currentTimeMillis()) + .setEndTime(System.currentTimeMillis() + 3600_000) + .setLimit(10); + + /* Create a table */ + private final static String createTableDDL = + "CREATE TABLE IF NOT EXISTS " + tableName + "(" + + "id INTEGER, " + + "name STRING, " + + "count LONG, " + + "avg DOUBLE, " + + "sum NUMBER, " + + "exp BOOLEAN, " + + "key BINARY, " + + "map MAP(INTEGER), " + + "array ARRAY(STRING), " + + "record RECORD(rid INTEGER, rs STRING), " + + "PRIMARY KEY(id))"; + + private final static String createIndexDDL = + "CREATE INDEX IF NOT EXISTS idx1 ON " + tableName + "(name)"; + + private ByteBuf buf; + private HttpClient httpClient; + private NoSQLHandleConfig httpConfig; + private String kvRequestURI; + private int timeoutMs; + private int requestId = 0; + + @BeforeClass + public static void staticSetUp() + throws Exception { + + assumeTrue("Skip DDosTest in onprem or minicloud or cloud test", + !Boolean.getBoolean(ONPREM_PROP) && + !Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + + /* this test requires error limiting */ + System.setProperty(PROXY_ERROR_LIMITING_PROP, "true"); + + staticSetUp(tenantLimits); + } + + @Override + public void setUp() throws Exception { + if (onprem || cloudRunning) { + return; + } + super.setUp(); + + buf = Unpooled.buffer(); + + URL url = new URL("http", getProxyHost(), getProxyPort(), "/"); + httpConfig = new NoSQLHandleConfig(url); + + httpConfig.configureDefaultRetryHandler(0, 0); + timeoutMs = 1000; + httpConfig.setRequestTimeout(timeoutMs); + + kvRequestURI = httpConfig.getServiceURL().toString() + + NOSQL_VERSION + "/" + NOSQL_DATA_PATH; + + httpClient = createHttpClient(getProxyHost(), + getProxyPort(), + httpConfig.getNumThreads(), + "DDosTest", + null /* Logger */); + assertNotNull(httpClient); + createTable(); + + if (isSecure()) { + /* warm up security caches */ + handle.put(putRequest); + handle.get(getRequest); + handle.delete(deleteRequest); + handle.getTable(new GetTableRequest().setTableName(tableName)); + handle.getTableUsage(tableUsageRequest); + handle.getIndexes(getIndexesRequest); + handle.query(createQueryWithBoundStmtRequest()); + } + } + + @Override + public void tearDown() throws Exception { + if (onprem || cloudRunning) { + return; + } + + if (buf != null) { + buf.release(buf.refCnt()); + } + + if (httpClient != null) { + httpClient.shutdown(); + } + super.tearDown(); + } + + @Before + public void setVersion() throws Exception { + /* + * This test suite is somewhat V2/V3-centric. So + * set the serial version to 3 if higher. + */ + forceV3((NoSQLHandleImpl)handle); + } + + /* + * Test bad protocol data on below values: + * 1. SerialVersion + * 2. OpCode + * 3. RequestTimeout + * 4. TableName + * 5. ReturnRowFlag + * 6. MapValue + * 7. IfUpdateTTL + * 8. TTLValue + */ + @Test + public void testPutDDoS() { + + assumeTrue(onprem == false); + assumeTrue(cloudRunning == false); + + final int[] lengths = { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 6 /* TableName: String */, + 1 /* ReturnRowFlag: boolean */, + 1 /* Durability: one byte */, + 1 /* ExactMatch: boolean */, + 1 /* IdentityCacheSize: packed int */, + 248 /* Record: MapValue */, + 1 /* IfUpdateTTL: boolean */, + 2 /* TTL: value(packed long) + unit(byte)*/ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, putRequest); + + try { + String test; + int offset = 0; + int pos = 0; + + test = "PUT OK test"; + executeDDoSRequests(test, buf, 0); + + /* + * SerialVersion + */ + + /* SerialVersion: 0 */ + test = "PUT Bad serialVersion: 0"; + buf.setShort(offset, 0); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * OpCode + */ + + /* Invalid OpCode */ + offset += lengths[pos++]; + test = "PUT Bad OpCode"; + int invalidOpCode = OpCode.values().length; + refillBuffer(buf, bufBytes); + buf.setByte(offset, invalidOpCode); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * RequestTimeout + */ + + /* requestTimeout: -5000 */ + test = "PUT Bad requestTimeout: -5000"; + offset += lengths[pos++]; + int invalidTimeout = -5000; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, invalidTimeout); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * TableName + */ + + /* Invalid TableName: empty string */ + String invalidTableName = ""; + test = "PUT empty TableName"; + offset += lengths[pos++]; + refillBuffer(buf, bufBytes); + setString(out, offset, invalidTableName); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * ReturnRowFlag + */ + offset += lengths[pos++]; + + /* + * Durability + * Only in V3 and above + */ + short serialVersion = ((NoSQLHandleImpl)handle).getSerialVersion(); + if (serialVersion > 2) { + offset += lengths[pos++]; + } else { + pos++; + } + + /* + * ExactMatch + */ + offset += lengths[pos++]; + + /* + * IdentityCacheSize + */ + offset += lengths[pos++]; + + /* + * MapValue + */ + offset += lengths[pos++]; + testMapValue(buf, out, bufBytes, offset, lengths[pos]); + + /* + * IfUpdateTTLFlag + */ + offset += lengths[pos++]; + + /* + * TTL + */ + long invalidTTL = -2; + offset += lengths[pos++]; + test = "PUT TTL: " + invalidTTL; + refillBuffer(buf, bufBytes); + setPackedLong(out, offset, invalidTTL); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + test = "PUT TTL: invalid ttl unit"; + refillBuffer(buf, bufBytes); + buf.setByte(offset + 1, -1); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + } catch (IOException ioe) { + fail("Write failed: " + ioe.getMessage()); + } finally { + out.close(); + } + } + + private void testMapValue(ByteBuf buffer, + ByteOutputStream out, + byte[] bufBytes, + int baseOffset, + int length) throws IOException { + final int headerLen = 9; /* 1(type) + 4(length) + 4 (size)/*/ + final String[] fields = new String[] { + "avg", + "array", + "record", + "name", + "count", + "sum", + "id", + "exp", + "map", + "key" + }; + final int[] lengths = new int[] { + 13, /* avg: DOUBLE, 4(name) + 1(type) + 8(double) */ + 36, /* array: ARRAY, 6(name) + 1(type) + 29(value) */ + 34, /* record: RECORD, 7(name) + 1(type) + 26(value) */ + 19, /* name: STRING, 5(name) + 1(type) + 13(value) */ + 16, /* count: LONG, 6(name) + 1(type) + 9(value) */ + 44, /* sum: NUMBER, 4(name) + 1(type) + 39(value) */ + 5, /* id: INTEGER, 3(name) + 1(type) + 1(value) */ + 6, /* exp: BOOLEAN, 4(name) + 1(type) + 1(value) */ + 30, /* map: MAP, 4(name) + 1(type) + 25(value) */ + 36 /* key: BINARY, 4(name) + 1(type) + 31(value) */ + }; + + final Map offsets = new HashMap(); + int offset = baseOffset + headerLen; + for (int i = 0; i < fields.length; i++) { + offsets.put(fields[i], offset); + offset += lengths[i]; + } + + offset = baseOffset; + String test; + ByteInputStream in; + int value; + String svalue; + + /* Corrupted type of top MapValue */ + value = -1; + test = "MapValue: corrupted type of top MapValue, " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Wrong length value */ + offset += 1; + refillBuffer(buffer, bufBytes); + in = new NettyByteInputStream(buffer); + value = bufBytes.length + 1; + setInt(out, offset, value); + test = "MapValue: wrong length value, " + value ; + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Wrong size value */ + offset += 4; + refillBuffer(buffer, bufBytes); + value = -1; + setInt(out, offset, value); + test = "MapValue: wrong size value, " + value ; + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Field: avg + */ + String fname = "avg"; + offset = offsets.get(fname); + svalue = null; + refillBuffer(buffer, bufBytes); + setString(out, offset, svalue); + test = "MapValue: field name is null" ; + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Corrupted value type */ + value = 100; + offset += fname.length() + 1; + test = "MapValue: corrupted type of field \"avg\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid value type for DOUBLE */ + value = TYPE_BOOLEAN; + test = "MapValue: invalid value type for field \"avg\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + fname = "array"; + offset = offsets.get(fname); + + /* Invalid value type for array value */ + offset += fname.length() + 1; + value = TYPE_MAP; + test = "MapValue: invalid value type for field \"array\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeDDoSRequests(test, buf, ILLEGAL_ARGUMENT); + + value = TYPE_INTEGER; + test = "MapValue: invalid value type for field \"array\", " + value ; + refillBuffer(buffer, bufBytes); + buffer.setByte(offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* Invalid length value of array value */ + length = readInt(in, offset); + offset++; + value = -1; + test = "MapValue: invalid length of \"array\", " + value ; + refillBuffer(buffer, bufBytes); + setInt(out, offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + } + + + /* + * Test bad protocol data on below values: + * 1. Consistency + * 2. PrimaryKey type + */ + @Test + public void testGetDDoS() { + + assumeTrue(onprem == false); + assumeTrue(cloudRunning == false); + + final int[] lengths = { + 2 /* SerialVersion: short*/, + 1 /* OpCode: byte*/, + 3 /* RequestTimeout: packed int */, + 6 /* TableName: string */, + 1 /* Consistency: boolean */, + 14 /* Key: 1(TYPE_MAP) + 4(length) + 4(size) + 3("id") + + 1(TYPE_INT) + 1(1-value) */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, getRequest); + + try { + String test; + int pos; + int offset = 0; + + test = "GET OK test"; + executeDDoSRequests(test, buf, 0); + + /* + * Consistency + */ + + /* Move to offset of consistency */ + for (pos = 0; pos < 4; pos++) { + offset += lengths[pos]; + } + + /* Invalid consistency type */ + int value = -1; + test = "GET Invalid consistency type: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + value = 3; + test = "GET Invalid consistency type: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * PrimaryKey + */ + offset += lengths[pos++]; + + value = -1; + test = "GET Invalid value type of PrimaryKey: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + value = TYPE_ARRAY; + test = "GET Invalid value type of PrimaryKey: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + } finally { + out.close(); + } + } + + /* + * Test bad protocol on below values: + * 1. Statement + */ + @Test + public void testPrepareDDoS() { + + assumeTrue(onprem == false); + assumeTrue(cloudRunning == false); + + final int[] lengths = new int[] { + 2 /* SerialVersion: short */, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 20 /* Statement: string */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, prepareRequest); + + try { + String test; + int pos; + int offset = 0; + + test = "PREPARE OK test"; + executeDDoSRequests(test, buf, 0); + + /* + * Statement + */ + for (pos = 0; pos < 3; pos++) { + offset += lengths[pos]; + } + + String svalue = null; + test = "PREPARE Invalid statement: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + svalue = ""; + test = "PREPARE Invalid statement: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + } catch (IOException ioe) { + fail("Failed to write to buffer: " + ioe); + } finally { + out.close(); + } + } + + /* + * Test bad protocol on below values: + * 1. PreparedStatement + * 2. Variables Number + * 3. Variable Name + * 4. Variable Value + */ + @Test + public void testQueryDDoS() { + + assumeTrue(onprem == false); + assumeTrue(cloudRunning == false); + + final QueryRequest queryReq = createQueryWithBoundStmtRequest(); + + final int prepStmtLen = + 4 /* int, length of PreparedStatement */+ + queryReq.getPreparedStatement().getStatement().length; + + final int[] lengths = { + 2 /* SerialVersion: short*/, + 1 /* OpCode: byte */, + 3 /* RequestTimeout: packed int */, + 1 /* Consistency: byte */, + 1 /* NumberLimit: packed int */, + 3 /* MaxReadKB: packed int */, + 1 /* ContinuationKey: byte array */, + 1 /* IsPreparedStatement: boolean */, + 2 /* QueryVersion: short */, + 1 /* traceLevel: packed int */, + 1 /* MaxWriteKB: packed int */, + 1 /* MathContext: byte */, + 1 /* ToplogySeqNum: packed int */, + 1 /* ShardId: packed int */, + 1 /* isSimpleQuery: boolean */, + prepStmtLen /* PreparedStatement: byte array */, + 1 /* VariablesNumber: packed int */, + 4 /* VariableName: string */, + 2 /* VariableValue: INT_TYPE + packed int */ + }; + + final ByteOutputStream out = new NettyByteOutputStream(buf); + final byte[] bufBytes = serializeRequest(out, queryReq); + + try { + String test; + int pos; + int offset = 0; + + test = "QUERY OK test"; + executeDDoSRequests(test, buf, 0); + + /* + * PreparedStatement + */ + for (pos = 0; pos < 15; pos++) { + offset += lengths[pos]; + } + + int value = -1; + test = "QUERY Invalid prepared Statement"; + refillBuffer(buf, bufBytes); + setInt(out, offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + value = 0; + test = "QUERY Invalid prepared Statement"; + refillBuffer(buf, bufBytes); + setInt(out, offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Variables number + */ + value = -1; + offset += lengths[pos++]; + test = "QUERY Invalid variable number: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + value = 2; + test = "QUERY Invalid variable number: " + value; + refillBuffer(buf, bufBytes); + setPackedInt(out, offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Variable name + */ + offset += lengths[pos++]; + String svalue = null; + test = "QUERY Invalid variable name: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + svalue = ""; + test = "QUERY Invalid variable name: " + svalue; + refillBuffer(buf, bufBytes); + setString(out, offset, svalue); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + /* + * Variable value + */ + offset += lengths[pos++]; + value = -1; + test = "QUERY Invalid variable value type: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE); + + value = TYPE_ARRAY; + test = "QUERY Invalid variable value type: " + value; + refillBuffer(buf, bufBytes); + buf.setByte(offset, value); + executeDDoSRequests(test, buf, ILLEGAL_ARGUMENT); + + } catch (IOException ioe) { + fail("Failed to write to buffer: " + ioe); + } finally { + out.close(); + } + } + + private QueryRequest createQueryWithBoundStmtRequest() { + final PrepareResult prepRet = handle.prepare(prepareBoundStmtRequest); + prepRet.getPreparedStatement() + .setVariable("$id", new IntegerValue(1)); + + final QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepRet) + .setMaxReadKB(1024) + .setLimit(100); + return queryReq; + } + + private byte[] serializeRequest(ByteOutputStream out, Request request) { + + request.setDefaults(httpConfig); + + Serializer ser = request.createSerializer(factory); + try { + short serialVersion = ((NoSQLHandleImpl)handle).getSerialVersion(); + out.writeShort(serialVersion); + if (request instanceof QueryRequest || + request instanceof PrepareRequest) { + ser.serialize(request, serialVersion, + QueryDriver.QUERY_V3, out); + } else { + ser.serialize(request, serialVersion, out); + } + } catch (IOException e) { + fail("Failed to serialize put request"); + } + + final byte[] bytes = new byte[buf.writerIndex()]; + System.arraycopy(buf.array(), 0, bytes, 0, bytes.length); + + return bytes; + } + + private void executeRequest(String test, + ByteBuf buffer, + int expErrCode, + int minLatencyMs, + int maxLatencyMs, + int requestNum) { + + ResponseHandler responseHandler = null; + ByteInputStream bis = null; + + try { + Channel channel = httpClient.getChannel(timeoutMs); + responseHandler = new ResponseHandler(httpClient, null, channel); + + final FullHttpRequest request = + new DefaultFullHttpRequest(HTTP_1_1, POST, kvRequestURI, + buffer, + false /* Don't validate hdrs */); + HttpHeaders headers = request.headers(); + headers.add(HttpHeaderNames.HOST, getProxyHost()) + .add(REQUEST_ID_HEADER, nextRequestId()) + .set(CONTENT_TYPE, "application/octet-stream") + .set(CONNECTION, "keep-alive") + .set(ACCEPT, "application/octet-stream") + .setInt(CONTENT_LENGTH, buffer.readableBytes()); + + if (!onprem) { + headers.set(AUTHORIZATION, SecureTestUtil.getAuthHeader( + getTenantId(), isSecure())); + } + if (isSecure()) { + headers.add(REQUEST_COMPARTMENT_ID, getTenantId()); + } + + long startMs = System.currentTimeMillis(); + httpClient.runRequest(request, responseHandler, channel); + + if (responseHandler.await(timeoutMs)) { + throw new TimeoutException(); + } + + long endMs = System.currentTimeMillis(); + int latencyMs = (int)(endMs - startMs); + + if (latencyMs < minLatencyMs || latencyMs > maxLatencyMs) { + fail("Request " + requestNum + " took " + latencyMs + + "ms, expected between " + minLatencyMs + "ms and " + + maxLatencyMs + "ms"); + } + + if (verbose) { + System.out.println("Request " + requestNum + " took " + + latencyMs + "ms"); + } + + /* Validates the response from proxy */ + assertEquals(HttpResponseStatus.OK, responseHandler.getStatus()); + bis = new NettyByteInputStream(responseHandler.getContent()); + int errCode = bis.readByte(); + if (expErrCode >= 0) { + if (expErrCode == errCode) { + return; + } + /* support V4 server error codes */ + if (errCode == 6) { /* nson MAP */ + errCode = getV4ErrorCode(responseHandler.getContent()); + } + assertEquals(test + " failed", expErrCode, errCode); + } + } catch (Throwable t) { + if (t instanceof TimeoutException) { + /* did we expect a timeout? */ + /* if timeoutMs is within min/max latency, yes */ + if (maxLatencyMs > timeoutMs ) { + /* all good, expected */ + if (verbose) { + System.out.println("Request " + requestNum + + " timed out (expected)"); + } + } else { + fail(test + " Request " + requestNum + + " timed out after " + timeoutMs + " ms"); + } + } else { + fail(test + " failed: " + t); + } + } finally { + if (bis != null) { + bis.close(); + } + if (responseHandler != null) { + responseHandler.close(); + } + } + } + + private void executeDDoSRequests(String test, + ByteBuf buffer, + int expErrCode) { + if (expErrCode != 0) { + /* sleep to cool down error limiters */ + try { + if (verbose) { + System.out.println(test + " Sleeping for 4 seconds..."); + } + Thread.sleep(4000); + } catch (Exception e) { + fail(e.getMessage()); + } + } + + /* first 5 should return expected error code */ + /* latency should be in single-digit ms, after first */ + for (int x=0; x<5; x++) { + executeRequest(test, buffer.retainedDuplicate(), expErrCode, 0, + (x==0) ? 500 : 100, x); + } + + /* next 5 should be slowed to >200ms latency */ + for (int x=0; x<5; x++) { + executeRequest(test, buffer.retainedDuplicate(), expErrCode, + (expErrCode==0) ? 0 : 200, + (expErrCode==0) ? 100 : 500, x); + } + + if (expErrCode == 0) { + return; + } + + /* at this point we expect requests to mostly timeout */ + + /* fire off parallel threads to effect >10 errs/sec */ + Thread threads[] = new Thread[5]; + for(int x=0; x<5; x++) { + threads[x] = new Thread(() -> + { + for (int y=0; y<3; y++) { + executeRequest(test, buffer.retainedDuplicate(), + expErrCode, 200, timeoutMs + 100, y); + } + }); + threads[x].start(); + } + /* wait for threads to finish */ + for(int x=0; x<5; x++) { + try { + threads[x].join(); + } catch (Exception ignored) {} + } + } + + private String nextRequestId() { + return String.valueOf(requestId++); + } + + private void refillBuffer(ByteBuf buffer, byte[] bytes) { + buffer.setBytes(0, bytes); + buffer.readerIndex(0); + buffer.writerIndex(bytes.length); + } + + private void setPackedInt(ByteOutputStream out, int offset, int value) + throws IOException { + + int savedOffset = out.getOffset(); + out.setWriteIndex(offset); + SerializationUtil.writePackedInt(out, value); + out.setWriteIndex(savedOffset); + } + + private void setInt(ByteOutputStream out, int offset, int value) + throws IOException { + + int savedOffset = out.getOffset(); + out.setWriteIndex(offset); + out.writeInt(value); + out.setWriteIndex(savedOffset); + } + + private void setPackedLong(ByteOutputStream out, int offset, long value) + throws IOException { + + int savedOffset = out.getOffset(); + out.setWriteIndex(offset); + SerializationUtil.writePackedLong(out, value); + out.setWriteIndex(savedOffset); + } + + private void setString(ByteOutputStream out, int offset, String value) + throws IOException { + + int savedOffset = out.getOffset(); + out.setWriteIndex(offset); + SerializationUtil.writeString(out, value); + out.setWriteIndex(savedOffset); + } + + private int readInt(ByteInputStream in, int offset) + throws IOException { + + int savedOffset = in.getOffset(); + in.setOffset(offset); + int value = in.readInt(); + in.setOffset(savedOffset); + return value; + } + + private void createTable() { + tableOperation(handle, createTableDDL, + new TableLimits(20000, 20000, 50), + TableResult.State.ACTIVE, 10000); + tableOperation(handle, createIndexDDL, null, + TableResult.State.ACTIVE, 10000); + } + + private MapValue createTestValue() { + MapValue row = new MapValue(); + row.put("id", 1); + row.put("name", "string value"); + row.put("count", Long.MAX_VALUE); + row.put("avg", Double.MAX_VALUE); + row.put("sum", new BigDecimal("12345678901234567890123456789012345678")); + row.put("exp", true); + row.put("key", genBytes(30, null)); + + MapValue map = new MapValue(); + map.put("k1", 100); + map.put("k2", 200); + map.put("k3", 300); + row.put("map", map); + + ArrayValue array = new ArrayValue(); + array.add("elem1"); + array.add("elem2"); + array.add("elem3"); + row.put("array", array); + + MapValue rec = new MapValue(); + rec.put("rid", 1024); + rec.put("rs", "nosql"); + row.put("record", rec); + + return row; + } + + private MapValue createTestKey(int id) { + return new MapValue().put("id", id); + } + + private byte[] genBytes(int length, Random rand) { + byte[] bytes = new byte[length]; + for (int i = 0; i < bytes.length; i++) { + bytes[i] = (rand == null)? (byte)(i % 256) : + (byte)rand.nextInt(256); + } + return bytes; + } + +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/DistributedRateLimitingTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/DistributedRateLimitingTest.java new file mode 100644 index 00000000..0c3e5742 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/DistributedRateLimitingTest.java @@ -0,0 +1,791 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assume.assumeTrue; +import static org.junit.Assert.fail; + +import java.io.FileWriter; +import java.io.IOException; +import java.io.PrintWriter; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.ReadThrottlingException; +import oracle.nosql.driver.WriteThrottlingException; +import oracle.nosql.driver.RequestTimeoutException; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.Request; +import oracle.nosql.driver.ops.RetryStats; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.security.SecureTestUtil; + +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests for distributed rate limiting. + * + * These tests use many client threads and multiple + * client handles to simulate many different clients. + * In all cases, overall read/write units are maintained for + * each table used, and compared periodically and at the + * end of each test to verify they are reasonably close to + * the specified table limits. + * They also verify that there are very few or no throttling + * exceptions. + * + * Note that this test does NOT use driver-side rate limiters. + * TODO: add that as a config, verify use of both works OK + */ +public class DistributedRateLimitingTest extends ProxyTestBase { + + protected static int numTables = Integer.getInteger("test.numtables", 3); + protected static int baseUnits = Integer.getInteger("test.baseunits", 10); + protected static Random rand = new Random(System.currentTimeMillis()); + protected static int maxRowSize = 20000; + protected static int maxRows = 10000; + protected static int readTimeoutMs = 3000; + protected static int writeTimeoutMs = 3000; + + protected static int writerIntervalMs = + Integer.getInteger("test.outintervalms", 1000); + protected static PrintWriter printWriter; + static { + String outFile = System.getProperty("test.outfile"); + if (outFile == null) { + printWriter = null; + } else { + try { + printWriter = new PrintWriter(new FileWriter(outFile)); + } catch (Exception e) { + printWriter = null; + } + } + } + + @BeforeClass + public static void staticSetUp() + throws Exception { + + assumeTrue("Skipping DistributedRateLimitingTest for minicloud or " + + "cloud or onprem runs", + !Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP) && + !Boolean.getBoolean(ONPREM_PROP)); + + staticSetUp(tenantLimits); + } + + /* note this overrides base class @Before */ + @Override + @Before + public void setUp() throws Exception { + + /* + * Configure the endpoint + */ + if (handles == null) { + handles = new NoSQLHandle[numProxies]; + for (int x=0; x {doPopulateThread(tNum);}); + threads[x].start(); + } + /* wait for threads to finish */ + verbose("Waiting for population threads to finish..."); + for(int x=0; x allResults = new ArrayList(); +/* TODO: in proxy: check current rate for table. if over, reduce maxReadKB */ + int maxKB = getTableUnits(tableNum) / 10; + if (maxKB < 5) maxKB = 5; + qreq = new QueryRequest().setStatement(query) + .setTimeout(10000) + .setMaxReadKB(maxKB); + if (rc.preferThrottling) { + if (setPreferThrottling(qreq) == false) { + return; + } + } + do { + QueryResult qr = handle.query(qreq); + List results = qr.getResults(); + for (MapValue mv : results) { + /* need to walk values, in case iteration triggers */ + /* more requests internally */ + allResults.add(mv); + } + rc.readCollectors[tableNum].collect(qr.getReadUnits()); + rc.writeCollectors[tableNum].collect(qr.getWriteUnits()); + /* this must be called _after_ getResults() */ + RetryStats rs = qr.getRetryStats(); + if (rs != null) { + int ne = rs.getNumExceptions(ReadThrottlingException.class); + if (ne > 0) { + rc.readCollectors[tableNum].addErrors(ne); + } + } + if (System.currentTimeMillis() > endMillis) { + break; + } + } while (!qreq.isDone()); + } catch (RequestTimeoutException rte) { + verbose("query '" + query + "' timed out: " + rte); + } catch (Exception e) { + verbose("query '" + query + "' got error: " + e); + RetryStats rs = qreq.getRetryStats(); + if (rs != null) { + int ne = rs.getNumExceptions(ReadThrottlingException.class); + if (ne > 0) { + rc.readCollectors[tableNum].addErrors(ne); + } + } + } + } + + + private static void runOneQueryClient(RunConfig rc, int tableNum) + throws IOException, InterruptedException { + + verbose("Driver thread " + Thread.currentThread().getId() + + " performing query operations..."); + + long endMillis = System.currentTimeMillis() + (rc.runSeconds * 1000); + + NoSQLHandle handle; + + while (System.currentTimeMillis() < endMillis) { + + /* simple count */ + handle = handles[rand.nextInt(handles.length)]; + runQuery(rc, "select count(*) from RLTable" + tableNum, + tableNum, handle, endMillis); + + /* full scan/dump */ + handle = handles[rand.nextInt(handles.length)]; + runQuery(rc, "select * from RLTable" + tableNum, + tableNum, handle, endMillis); + + /* more complex, with sort */ + handle = handles[rand.nextInt(handles.length)]; + runQuery(rc, "select audience_data from RLTable" + tableNum + + " where cookie_id > 1000 and cookie_id < 10000" + + " order by audience_data", tableNum, handle, endMillis); + } + } + + protected static void runTest( + int readThreads, + int writeThreads, + int qThreads, + int runSeconds) + throws Exception { + + /* skip this test if running on minicloud */ + assumeTrue(cloudRunning == false); + + boolean preferThrottling = Boolean.getBoolean("test.preferthrottling"); + + final int totalThreads = + readThreads + writeThreads + qThreads; + + TPCollector[] readCollectors = new TPCollector[numTables]; + TPCollector[] writeCollectors = new TPCollector[numTables]; + for (int x=0; x {collecterWatcher(rc);}); + threads[numThreads].start(); + numThreads++; + + for(int x=0; x {doWriteThread(rc, tNum);}); + threads[numThreads].start(); + numThreads++; + } + for(int x=0; x {doReadThread(rc, tNum);}); + threads[numThreads].start(); + numThreads++; + } + for(int x=0; x {doQueryThread(rc, tNum);}); + threads[numThreads].start(); + numThreads++; + } + + /* wait for threads to finish */ + for(int x=0; x 0 || qThreads > 0) { + double RUs = readCollectors[x].getOverallRate(); + System.out.println("RUs=" + getTableUnits(x) + " actual=" + RUs); + System.out.println(" read throttling errors = " + + readCollectors[x].getErrors()); + if (RUs > max || RUs < min) { + sb.append("RUs for " + getTableUnits(x) + + "RUs table failed: " + "min=" + min + ", max=" + + max + ", actual=" + RUs + "\n"); + } + } + if (writeThreads > 0) { + double WUs = writeCollectors[x].getOverallRate(); + System.out.println("WUs=" + getTableUnits(x) + " actual=" + WUs); + System.out.println(" write throttling errors = " + + writeCollectors[x].getErrors()); + if (WUs > max || WUs < min) { + sb.append("WUs for " + getTableUnits(x) + + "WUs table failed: " + "min=" + min + ", max=" + + max + ", actual=" + WUs + "\n"); + } +// TODO: error counts, maybe less than 1/sec? some threshold? +// Only if preferThrottling == false + } + } + if (sb.length() > 0) { + fail(sb.toString()); + } + } + + private static boolean setPreferThrottling(Request req) { + Class requestClass = null; + try { + requestClass = Class.forName("oracle.nosql.driver.ops.Request"); + } catch (Throwable e) { + System.out.println("Could not find Request class:" + e); + return false; + } + Method setThrottleFunction = null; + try { + setThrottleFunction = requestClass.getMethod( + "setPreferThrottlingExceptions", + boolean.class); + } catch (Throwable e) { + verbose("Could not find " + + "Request.setPreferThrottlingExceptions(): " + e); + verbose("Skipping test"); + return false; + } + try { + setThrottleFunction.invoke(req, true); + } catch (Exception e) { + verbose("Could not invoke " + + "Request.setPreferThrottlingExceptions(): " + e); + verbose("Skipping test"); + return false; + } + return true; + } + + @Test + public void basicWriteTest() throws Exception { + runTest(0, 15, 0, 15); + } + + @Test + public void basicReadTest() throws Exception { + runTest(numTables * 5, 0, 0, 15); + } + + @Test + public void basicReadWriteTest() throws Exception { + runTest(numTables * 5, numTables * 5, 0, 15); + } + + @Test + public void basicQueryTest() throws Exception { + runTest(0, 0, numTables * 5, 20); + } + + @Test + public void readWriteQueryTest() throws Exception { + runTest(numTables * 4, numTables * 4, numTables * 4, 30); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ElasticityTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ElasticityTest.java new file mode 100644 index 00000000..a806296e --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ElasticityTest.java @@ -0,0 +1,1403 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.io.File; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Iterator; +import java.util.Random; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.logging.Logger; +import java.util.stream.Collectors; + +import oracle.kv.KVStore; +import oracle.kv.KVStoreConfig; +import oracle.kv.KVStoreFactory; +import oracle.kv.StatementResult; +import oracle.kv.impl.admin.CommandServiceAPI; +import oracle.kv.impl.api.KVStoreImpl; +import oracle.kv.impl.api.table.TableKey; +import oracle.kv.impl.sna.StorageNodeAgent; +import oracle.kv.impl.topo.DatacenterId; +import oracle.kv.impl.topo.PartitionId; +import oracle.kv.impl.topo.StorageNodeId; +import oracle.kv.impl.util.CommonLoggerUtils; +import oracle.kv.impl.util.FileUtils; +import oracle.kv.impl.util.FormatUtils; +import oracle.kv.impl.util.PollCondition; +import oracle.kv.table.Index; +import oracle.kv.table.IndexKey; +import oracle.kv.table.PrimaryKey; +import oracle.kv.table.ReadOptions; +import oracle.kv.table.RecordValue; +import oracle.kv.table.Row; +import oracle.kv.table.Table; +import oracle.kv.table.TableAPI; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.RequestTimeoutException; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.StringValue; + +import oracle.nosql.proxy.security.SecureTestUtil; +import oracle.nosql.proxy.util.PortFinder; +import oracle.nosql.proxy.util.CreateStoreUtils; +import oracle.nosql.proxy.util.CreateStore; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +/** + * Tests the correctness of query under elasticity operations. + * + * Two kind of elasticity operations are used in this tests. A store expansion + * expand a 3x1 store into a 6x1 store. A store contraction contracts a 6x1 + * store into a 3x1 store. A series of such expansion and contraction can be + * conducted in a test. + * + * The secondary query tests work in the folloiwng pattern. The table rows have + * the following schema: + * userId name count + * where userId is an integer which is the primary key, name is a string which + * is not unique and count is the number of rows that has the same name. When + * we insert the rows, we group rows of the same name together, i.e., into + * consecutive userId blocks. The index is on the name field and the tests + * query the rows with a specified name. The test then start up two threads, an + * elasticity thread and a query thread. The elasticity thread does a series of + * elasticity operations described above while in the mean time the query + * thread does the query and verify the results. We also verify the query + * results before and after the elasticity operations to make sure there is no + * problem with the test insertion or elasticity operation. + * + * [KVSTORE-1518] + */ +public class ElasticityTest extends ProxyTestBase { + + private static boolean trace = false; + + private static final int startPort = 5000; + private static final int haRange = 5; + private static final Random rand = new Random(); + + private static final int POLL_CONDITION_INTERVAL = 1000; + private static final int NUM_ERRORS_TO_DISPLAY = 10; + private static final AtomicInteger topoCandidateSequencer = + new AtomicInteger(0); + + private final Logger logger = Logger.getLogger(getClass().getName()); + private CreateStore createStore = null; + private KVStore kvstore = null; + private StorageNodeAgent[] extraSNAs = new StorageNodeAgent[3]; + + private Proxy proxy; + private NoSQLHandle handle = null; + + private int maxReadKB = 25; + + String usersDDL = + "CREATE TABLE IF NOT EXISTS users ( " + + " uid integer, " + + " name string, " + + " int integer, " + + " count long, " + + " PRIMARY KEY (uid))"; + + String childDDL = + "CREATE TABLE IF NOT EXISTS users.child ( " + + " cid integer, " + + " cname string, " + + " cint integer, " + + " count integer, " + + " PRIMARY KEY (cid))"; + + String idxNameDDL = + "CREATE INDEX IF NOT EXISTS idx_name ON users(name)"; + + String idxIntDDL = + "CREATE INDEX IF NOT EXISTS idx_int ON users(int)"; + + String users2DDL = + "CREATE TABLE IF NOT EXISTS users2 ( " + + " uid1 integer, " + + " uid2 integer, " + + " name string, " + + " int integer, " + + " count long, " + + " PRIMARY KEY(shard(uid1), uid2))"; + + String idx2IntDDL = + "CREATE INDEX IF NOT EXISTS idx_int ON users2(int)"; + + String[] queries = { + // 0 + "declare $name string; " + + "select * from users where name = $name", + // 1 + "declare $low integer; $high integer; " + + "select * from users where $low <= int and int <= $high " + + "order by int", + // 2 + "declare $low integer; $high integer; " + + "select * from users where $low <= int and int <= $high " + + "order by int desc", + // 3 + "declare $low integer; $high integer; " + + "select int, count(*) as count " + + "from users " + + "where $low <= int and int <= $high " + + "group by int", + // 4 + "declare $low integer; $high integer; " + + "select int, count(*) as count " + + "from users " + + "group by int", + // 5 + "declare $low integer; $high integer; " + + "select * from users where $low <= uid and uid <= $high ", + // 6 + "declare $uid1 integer; $low integer; $high integer; " + + "select * from users2 where uid1 = $uid1 and $low <= int and int <= $high", + // 7 + "declare $name string; " + + "select p.uid, p.name, p.count as pcount, c.cid, c.count as ccount " + + "from nested tables(users p descendants(users.child c)) " + + "where p.name = $name", + // 8 + "declare $name string; " + + "select p.uid, p.name, p.count as pcount, c.cid, c.count as ccount " + + "from users p, users.child c " + + "where p.uid = c.uid and p.name = $name", + }; + + int numRows; + int maxNameId1; + int maxNameId2; + int minNumRowsPerName = 1; + + final int maxChildRows = 30; + + // Maps nameId to count + final Map countMap = new HashMap(); + + /** Represents the test state. */ + private class TestState { + + TestState(int numQueryThreads) { + this.numQueryThreads = numQueryThreads; + } + + int numQueryThreads; + + private final AtomicInteger elasticityCount = + new AtomicInteger(0); + private final AtomicBoolean elasticityDone = + new AtomicBoolean(false); + private final AtomicInteger queryThreadDoneCount = + new AtomicInteger(0); + private final ConcurrentLinkedQueue errors = + new ConcurrentLinkedQueue<>(); + + private int getElasticityCount() { + return elasticityCount.get(); + } + + private void incElasticityCount() { + elasticityCount.getAndIncrement(); + } + + private boolean isElasticityDone() { + return elasticityDone.get(); + } + + private void setElasticityDone() { + elasticityDone.set(true); + } + + private boolean areQueriesDone() { + return queryThreadDoneCount.get() >= numQueryThreads; + } + + private void setQueryThreadDone() { + int done = queryThreadDoneCount.getAndIncrement(); + } + + private void reportError(Throwable t) { + errors.add(t); + } + + private Collection getErrors() { + return errors; + } + } + + private class QueryException extends RuntimeException { + + public static final long serialVersionUID = 1L; + + private long timestamp; + private String qname; + + private QueryException(String qname, String message) { + super(message); + this.timestamp = System.currentTimeMillis(); + this.qname = qname; + } + + @Override + public String toString() { + return String.format( + "Error executing query at %s with name=%s : %s", + FormatUtils.formatDateTimeMillis(timestamp), + qname, + getMessage()); + } + } + + private class ElasticityException extends RuntimeException { + + public static final long serialVersionUID = 1L; + + private ElasticityException(Throwable cause) { + super(cause); + } + } + + private static void trace(String msg) { + + if (trace) { + System.out.println(msg); + } + } + + + /* these override the Before/AfterClass methods in ProxyTestBase */ + @BeforeClass + public static void staticSetUp() { + assumeTrue("Skipping ElasticityTest for minicloud or cloud test", + !Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + } + + @AfterClass + public static void staticTearDown() {} + + @Override + @Before + public void setUp() throws Exception { + } + + @Override + @After + public void tearDown() throws Exception { + + if (proxy != null) { + proxy.shutdown(3, TimeUnit.SECONDS); + proxy = null; + } + + if (handle != null) { + handle.close(); + handle = null; + } + + if (kvstore != null) { + kvstore.close(); + kvstore = null; + } + + if (createStore != null) { + createStore.shutdown(); + createStore = null; + } + } + + private static void cleanupTestDir(String testDir) { + File testDirFile = new File(testDir); + if (!testDirFile.exists()) { + return; + } + clearDirectory(testDirFile); + } + + private void createStore( + String testSubDir, + int capacity, + int partitions) throws Exception { + + int port = getKVPort(); + String testDir = getTestDir() + "/" + testSubDir; + + cleanupTestDir(testDir); + + createStore = + new CreateStore( + testDir, + getStoreName(), + port, + 3, /* nsns */ + 3, /* rf */ + partitions, + capacity, + 256, /* mb */ + false, /* use threads */ + null); + final File root = new File(testDir); + root.mkdirs(); + createStore.start(); + + kvstore = KVStoreFactory.getStore( + new KVStoreConfig(getStoreName(), + String.format("%s:%s", getHostName(), port))); + + proxy = ProxyTestBase.startProxy(); + + handle = createHandle(); + } + + private NoSQLHandle createHandle() { + + NoSQLHandleConfig hconfig = + new NoSQLHandleConfig(ProxyTestBase.getProxyEndpoint()); + + /* 5 retries, default retry algorithm */ + hconfig.configureDefaultRetryHandler(5, 0); + + hconfig.setRequestTimeout(30000); + //hconfig.setNumThreads(20); + + SecureTestUtil.setAuthProvider(hconfig, + ProxyTestBase.SECURITY_ENABLED, + ProxyTestBase.onprem(), + ProxyTestBase.getTenantId()); + hconfig.setLogger(logger); + + /* Open the handle */ + NoSQLHandle h = NoSQLHandleFactory.createNoSQLHandle(hconfig); + + /* do a simple op to set the protocol version properly */ + try { + GetTableRequest getTable = + new GetTableRequest().setTableName("noop"); + h.getTable(getTable); + } catch (TableNotFoundException e) {} + + return h; + } + + private void expandStore(int capacity) throws Exception { + + final CommandServiceAPI cs = createStore.getAdmin(); + final String hostname = createStore.getHostname(); + final String poolname = CreateStore.STORAGE_NODE_POOL_NAME; + final int portsPerFinder = 20; + + /* deploy 3 more sns */ + for (int i = 0; i < 3; ++i) { + int sid = i + 4; + PortFinder pf = new PortFinder( + startPort + (3 + i) * portsPerFinder, haRange, hostname); + int port = pf.getRegistryPort(); + + extraSNAs[i] = CreateStoreUtils.createUnregisteredSNA( + createStore.getRootDir(), + pf, + capacity, + String.format("config%s.xml", i + 3), + false /* useThreads */, + false /* createAdmin */, + 2 /* mb */, + null /* extra params */); + + CreateStoreUtils.waitForAdmin(hostname, port, 20, logger); + createStore.setExpansionSnas(extraSNAs); + + int planId = cs.createDeploySNPlan( + String.format("deploy sn%s", sid), + new DatacenterId(1), + hostname, + port, + "comment"); + + runPlan(planId); + + StorageNodeId snid = extraSNAs[i].getStorageNodeId(); + cs.addStorageNodeToPool(poolname, snid); + } + + String expandTopoName = + String.format("expand-%s", + topoCandidateSequencer.getAndIncrement()); + cs.copyCurrentTopology(expandTopoName); + cs.redistributeTopology(expandTopoName, poolname); + + int planId = cs.createDeployTopologyPlan( + "deploy expansion", expandTopoName, null); + runPlan(planId); + } + + private void contractStore() throws Exception { + + final CommandServiceAPI cs = createStore.getAdmin(); + final String poolname = CreateStore.STORAGE_NODE_POOL_NAME; + + for (int i = 0; i < 3; ++i) { + cs.removeStorageNodeFromPool(poolname, new StorageNodeId(i + 4)); + } + + String contractTopoName = + String.format("contract-%s", + topoCandidateSequencer.getAndIncrement()); + cs.copyCurrentTopology(contractTopoName); + cs.contractTopology(contractTopoName, poolname); + + verbose("Elasticity: Starting deploy-contraction plan"); + + int planId = cs.createDeployTopologyPlan( + "deploy contraction", contractTopoName, null); + runPlan(planId); + + for (int i = 0; i < 3; ++i) { + + verbose("Elasticity: Starting remove SN plan for SN " + (i+4)); + + planId = cs.createRemoveSNPlan( + String.format("remove sn%s", i + 4), + new StorageNodeId(i + 4)); + runPlan(planId); + + extraSNAs[i].shutdown(true, true, "contration"); + extraSNAs[i] = null; + + Files.deleteIfExists( + Paths.get(createStore.getRootDir(), + String.format("config%s.xml", i + 3))); + FileUtils.deleteDirectory( + Paths.get(createStore.getRootDir(), + createStore.getStoreName(), + String.format("sn%s", i + 4)) + .toFile()); + } + } + + private void runPlan(int planId) throws Exception { + final CommandServiceAPI cs = createStore.getAdmin(); + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + cs.assertSuccess(planId); + } + + private void createTableAndIndex() { + + TableLimits limits = new TableLimits(90000, 15000, 50); + int timeout = 20000; + + ProxyTestBase.tableOperation(handle, usersDDL, limits, + TableResult.State.ACTIVE, timeout); + ProxyTestBase.tableOperation(handle, childDDL, null, + TableResult.State.ACTIVE, timeout); + ProxyTestBase.tableOperation(handle, idxNameDDL, null, + TableResult.State.ACTIVE, timeout); + ProxyTestBase.tableOperation(handle, idxIntDDL, null, + TableResult.State.ACTIVE, timeout); + ProxyTestBase.tableOperation(handle, users2DDL, limits, + TableResult.State.ACTIVE, timeout); + ProxyTestBase.tableOperation(handle, idx2IntDDL, null, + TableResult.State.ACTIVE, timeout); + } + + /** + * Populates {@code numRows} of rows and returns the maximum name ID. + * + * The userID is the natural series. The name is in the form of name.id + * where id is also the natural series but multiple rows can have the same + * id. The number of rows having the same name.id is picked at random + * between 1 and 5% {@code numRows}. The count stores the total number of + * rows that has the same name.id with that row so that by looking at the + * count field, we know how many rows should be returned for a query on + * that name.id. + */ + private void populateRows(String tableName, String testName) { + + TableAPI tableAPI = kvstore.getTableAPI(); + Table table = tableAPI.getTable("in.valid.iac.name.space:" + tableName); + Table childTable = tableAPI.getTable("in.valid.iac.name.space:users.child"); + boolean users2 = tableName.equals("users2"); + boolean innerJoin = testName.contains("InnerJoin"); + + MapValue row = new MapValue(); + int maxRowsPerNameId = Math.max(1, numRows * 5 / 100); + int uid1 = 0; + int uid2 = 0; + int nameId = 0; + int nrows = 0; + + Row kvrow = null; + if (table != null) { + kvrow = table.createRow(); + } + + PutRequest putRequest = new PutRequest() + .setValue(row) + .setTableName(tableName); + + if (users2) { + uid1 = 2; // all rows will go to partition 4 + } + + while (nrows < numRows) { + int maxRowsPerNameId2 = Math.min(numRows - nrows, maxRowsPerNameId); + long rowsPerNameId = ( + maxRowsPerNameId2 <= minNumRowsPerName ? + minNumRowsPerName : + minNumRowsPerName + + rand.nextInt(maxRowsPerNameId2 - minNumRowsPerName)); + + String name = ("name." + nameId); + if (!users2) { + countMap.put(nameId, rowsPerNameId); + } + + for (int i = 0; i < rowsPerNameId; ++i) { + if (users2) { + row.put("uid1", uid1); + row.put("uid2", uid2); + if (kvrow != null) { + kvrow.put("uid1", uid1); + kvrow.put("uid2", uid2); + } + } else { + row.put("uid", uid1); + if (kvrow != null) { + kvrow.put("uid", uid1); + } + } + row.put("name", name); + row.put("int", nameId); + row.put("count", rowsPerNameId); + + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + + if (users2) { + ++uid2; + } else { + int numChildRows = rand.nextInt(maxChildRows); + if (numChildRows == 0 && innerJoin) { + numChildRows = 1; + } + + if (numChildRows > 0) { + + MapValue childRow = new MapValue(); + PutRequest cputRequest = new PutRequest() + .setValue(childRow) + .setTableName("users.child"); + + childRow.put("uid", uid1); + + for (int j = 0; j < numChildRows; ++j) { + childRow.put("cid", j); + childRow.put("cint", rand.nextInt(10)); + childRow.put("count", numChildRows); + + PutResult cres = handle.put(cputRequest); + assertNotNull("Put failed", cres.getVersion()); + } + } + + ++uid1; + } + ++nrows; + + /* + if (!users2) { + PartitionId pid = ((KVStoreImpl)kvstore). + getPartitionId(TableKey.createKey(table, kvrow, false).getKey()); + trace("Inserted row " + row + + " in P-" + pid.getPartitionId()); + } + */ + } + + ++nameId; + } + + if (users2) { + maxNameId2 = nameId; + } else { + maxNameId1 = nameId; + } + } + + private int getUserId(MapValue row) { + return row.get("uid").getInt(); + } + + private int getUserId2(MapValue row) { + return row.get("uid2").getInt(); + } + + private String getName(MapValue row) { + return row.get("name").getString(); + } + + private int getInt(MapValue row) { + return row.get("int").getInt(); + } + + private long getCount(MapValue row) { + return row.get("count").getLong(); + } + + private PartitionId getPartition(Table table, RecordValue rec) { + Row row = table.createRow(rec); + return ((KVStoreImpl) kvstore).getPartitionId( + TableKey.createKey(table, row, false).getKey()); + } + + /** Executes a secondary query and verifies the result. */ + private void queryAndVerify( + TestState testState, + int qid, + int qcount) { + + String qname = ("Q" + qid + "-" + qcount); + + PrepareRequest preq = new PrepareRequest(); + preq.setGetQueryPlan(true); + preq.setStatement(queries[qid]); + PrepareResult pres = handle.prepare(preq); + PreparedStatement prep = pres.getPreparedStatement(); + + //verbose("driver topo seq num = " + prep.topologySeqNum()); + + QueryRequest qreq = new QueryRequest(); + qreq.setPreparedStatement(prep); + qreq.setTimeout(30000); + qreq.setQueryName(qname); + qreq.setMaxReadKB(maxReadKB); + qreq.setTraceLevel(3); + //if (qcount > 4800) { + // qreq.setTraceLevel(0); + //} + + int maxNameId = maxNameId1; + int searchPKey = -1; + if (qid == 5) { + searchPKey = rand.nextInt(numRows); + } else if (qid == 6) { + searchPKey = 2; + maxNameId = maxNameId2; + } + + int searchNameId = rand.nextInt(maxNameId); + + String searchKey = ("name." + searchNameId); + + //verbose("Executing query " + qname + " with search pkey " + + // searchPKey + " and search key " + searchKey); + + if (qid == 0 || qid == 7 || qid == 8) { + prep.setVariable("$name", new StringValue(searchKey)); + } else if (qid == 5) { + int lowKey = searchPKey - 20; + int highKey = searchPKey + 20; + prep.setVariable("$low", new IntegerValue(lowKey)); + prep.setVariable("$high", new IntegerValue(highKey)); + } else if (qid == 6) { + int lowKey = searchNameId - 100; + int highKey = searchNameId + 100; + prep.setVariable("$uid1", new IntegerValue(searchPKey)); + prep.setVariable("$low", new IntegerValue(lowKey)); + prep.setVariable("$high", new IntegerValue(highKey)); + } else { + int lowKey = searchNameId - 2; + int highKey = searchNameId + 2; + prep.setVariable("$low", new IntegerValue(lowKey)); + prep.setVariable("$high", new IntegerValue(highKey)); + } + + List results = new ArrayList<>(); + + try { + do { + QueryResult res = handle.query(qreq); + List list = res.getResults(); + for (MapValue val : list) { + results.add(val); + } + } while (!qreq.isDone()); + + if (results.isEmpty()) { + throw new QueryException(qname, "no records found"); + } + + verifyQueryResults(qid, qname, + maxNameId, searchNameId, searchPKey, + results); + } catch (QueryException e) { + testState.reportError(e); + if (testState.errors.size() == 1) { + trace("Elasticity:-1 query " + qname + " failed"); + trace(CommonLoggerUtils.getStackTrace(e)); + if (trace) { + qreq.printTrace(System.out); + } + } + throw e; + } catch (RequestTimeoutException rte) { + /* Don't fail the test. Due to the high load imposed to the store + * by ElasticityTest, it is possible for queries to timeout or + * get a "failed to read base topology" errors. We don't want + * to consider such erros as failures; only wrong query results + * are considered failures. */ + testState.reportError(rte); + verbose("Elasticity:-2 query " + qname + " failed"); + verbose(CommonLoggerUtils.getStackTrace(rte)); + } catch (TableNotFoundException tnfe) { + /* Don't fail the test. Same reason as above */ + testState.reportError(tnfe); + verbose("Elasticity:-2 query " + qname + " failed"); + verbose(CommonLoggerUtils.getStackTrace(tnfe)); + } catch (Throwable t) { + testState.reportError(t); + if (testState.errors.size() <= 10) { + trace("Elasticity:-2 query " + qname + " failed: " + t); + } + verbose("Elasticity:-2 query " + qname + " failed"); + verbose(CommonLoggerUtils.getStackTrace(t)); + throw new QueryException(qname, t.getMessage()); + } + } + + /** + * Verifies that the query result consists of a block of consecutive rows + * and the count matches the specified value. + */ + private void verifyQueryResults( + int qid, + String qname, + int maxNameId, + int searchNameId, + int searchPKey, + List results) { + + if (qid == 7 || qid == 8) { + verifyQ7Results(qid, qname, results); + return; + } + + long expectedCount = 0; + long actualCount = 0; + int prevInt = -1; + + /* Queries with range scan: compute expected count + * Make sure results are sorted and */ + if (qid == 1 || qid == 2 || qid == 6) { + + for (MapValue row : results) { + + ++actualCount; + int currInt = getInt(row); + + if (prevInt < 0) { + expectedCount += getCount(row); + prevInt = currInt; + continue; + } + + if (qid == 1 && prevInt > currInt) { + throw new QueryException(qname, "Query results are out of order"); + } + + if (qid == 2 && prevInt < currInt) { + throw new QueryException(qname, "Query results are out of order"); + } + + if (prevInt != currInt) { + expectedCount += getCount(row); + prevInt = currInt; + } + } + } + + /* Group-by queries */ + if (qid == 3 || qid == 4) { + if (qid == 4) { + expectedCount = maxNameId; + } else { + expectedCount = 5; + if (searchNameId == 0 || searchNameId == maxNameId - 1) { + expectedCount = 3; + } else if (searchNameId == 1 || searchNameId == maxNameId - 2) { + expectedCount = 4; + } + } + + for (MapValue row : results) { + + ++actualCount; + int currInt = getInt(row); + long currCount = getCount(row); + + if (countMap.get(currInt) != currCount) { + throw new QueryException(qname, + "Unexpected group count. Expected = " + countMap.get(currInt) + + " actual = " + currCount); + } + + if (prevInt < 0) { + prevInt = currInt; + continue; + } + + if (prevInt > currInt) { + throw new QueryException(qname, "Query results are out of order"); + } + + if (prevInt != currInt) { + prevInt = currInt; + } + } + } + + if (qid == 5) { + expectedCount = 41; + + if (searchPKey - 20 < 0) { + expectedCount -= (20 - searchPKey); + } else if (searchPKey + 20 >= numRows) { + expectedCount -= (searchPKey + 20 - numRows + 1); + } + } + + if (qid < 3 || qid == 5) { + /* Make sure the row Ids are consecutive without missing or duplicate */ + Collections.sort(results, Comparator.comparingInt((r) -> getUserId(r))); + + MapValue firstRow = results.get(0); + if (qid == 0) { + expectedCount = getCount(firstRow); + } + int startId = getUserId(firstRow); + int prevId = startId - 1; + + actualCount = 0; + for (MapValue row : results) { + ++actualCount; + int currId = getUserId(row); + if (prevId < currId - 1) { + notifyIncorrectRows("missing", qid, qname, + currId-1, prevId, currId); + } else if (prevId == currId) { + + notifyIncorrectRows("duplicating", qid, qname, + currId, prevId, currId); + } + prevId = currId; + } + } + + if (qid == 6) { + /* Make sure the row Ids are consecutive without missing or duplicate */ + Collections.sort(results, Comparator.comparingInt((r) -> getUserId2(r))); + + MapValue firstRow = results.get(0); + int startId = getUserId2(firstRow); + int prevId = startId - 1; + + actualCount = 0; + for (MapValue row : results) { + ++actualCount; + int currId = getUserId2(row); + if (prevId < currId - 1) { + notifyIncorrectRows("missing", qid, qname, + currId-1, prevId, currId); + } else if (prevId == currId) { + + notifyIncorrectRows("duplicating", qid, qname, + currId, prevId, currId); + } + prevId = currId; + } + } + + /* Make sure the count is correct. */ + if (actualCount != expectedCount) { + throw new QueryException( + qname, "incorrect count, expected = " + + expectedCount + " actual = " + actualCount); + } + } + + private void verifyQ7Results( + int qid, + String qname, + List results) { + + /* Make sure the row Ids are consecutive without missing or duplicate */ + Collections.sort(results, Comparator.comparingInt((r) -> getUserId(r))); + + MapValue firstRow = results.get(0); + int startUid = getUserId(firstRow); + int prevUid = startUid - 1; + long numExpectedParentRows = firstRow.get("pcount").getLong(); + long numActualParentRows = 0; + int prevCid = -1; + int numExpectedChildRows = -1; + int numActualChildRows = -1; + + for (MapValue row : results) { + + //verbose(row); + + int currUid = getUserId(row); + + if (prevUid < currUid - 1) { + notifyIncorrectRows("missing", qid, qname, + currUid-1, prevUid, currUid); + } + + if (prevUid == currUid) { + + if (prevCid == -1) { + notifyIncorrectRows("duplicating", qid, qname, + currUid, prevUid, currUid); + } + + ++numActualChildRows; + int currCid = row.get("cid").getInt(); + if (prevCid < currCid - 1) { + throw new QueryException( + qname, + "missing row (" + currUid + ", " + (prevCid+1) + ")" + + "prevCid = " + prevCid + " currCid = " + currCid); + } else if (prevCid == currCid) { + throw new QueryException( + qname, + "duplicating row (" + currUid + ", " + currCid + ")"); + } + + prevCid = currCid; + } else { + if (numActualChildRows != numExpectedChildRows) { + throw new QueryException( + qname, + "incorrect number of child rows for parent uid : " + + currUid + " expected = " + numExpectedChildRows + + " actual = " + numActualChildRows); + } + + ++numActualParentRows; + + if (row.get("ccount").isNull()) { + prevCid = -1; + numExpectedChildRows = 0; + numActualChildRows = 0; + } else { + int currCid = row.get("cid").getInt(); + if (currCid != 0) { + throw new QueryException( + qname, + "missing row (" + currUid + ", " + 0 + ")" + + " currCid = " + currCid); + } + prevCid = 0; + numExpectedChildRows = row.get("ccount").getInt(); + numActualChildRows = 1; + } + } + + prevUid = currUid; + } + + if (numActualParentRows != numExpectedParentRows) { + throw new QueryException( + qname, + "incorrect number of parent rows: expected = " + + numExpectedParentRows + " actual = " + + numActualParentRows); + } + } + + private void notifyIncorrectRows(String cause, + int qid, + String qname, + int problemId, + int prevId, + int currId) { + + String tableName; + if (qid == 6) { + tableName = "users2"; + } else { + tableName = "users"; + } + + Table table = kvstore.getTableAPI().getTable("in.valid.iac.name.space:" + + tableName); + final Row row = table.createRow(); + if (qid == 6) { + row.put("uid", problemId); + } else { + row.put("uid", problemId); + } + throw new QueryException( + qname, + String.format( + "%s row with userId %s, " + + "prevId=%s, currId=%s, " + + "partition of the %s row: %s", + cause, problemId, prevId, currId, cause, + getPartition(table, row))); + + } + + /** + * Verifies the test state after elasticity ops and queries are done. + */ + private void verifyTestState(TestState testState) { + + if (testState.isElasticityDone() && + testState.areQueriesDone() && + testState.getErrors().isEmpty()) { + return; + } + + List errorMessages = new ArrayList<>(); + + /* + Predicate unexpectedError = + (t) -> (!(t instanceof ElasticityException)) && + (!(t instanceof QueryException)); + + if (testState.getErrors().stream() + .filter(unexpectedError).count() != 0) { + errorMessages.add( + String.format( + "Unexpected exceptions. %s", + toErrorString(testState, unexpectedError))); + } + */ + + Predicate elasticityError = + (t) -> (t instanceof ElasticityException); + + if (testState.getErrors().stream() + .filter(elasticityError).count() != 0) { + errorMessages.add( + String.format( + "Unexpected elasticity exceptions. " + + "Total elasticity routines done: %s. " + + "%s", + testState.getElasticityCount(), + toErrorString(testState, elasticityError))); + } + + collectQueryErrors(testState, errorMessages); + + assertTrue(errorMessages.stream() + .collect(Collectors.joining("\n")), + errorMessages.isEmpty()); + } + + private String toErrorString(TestState testState, + Predicate filter) { + StringBuilder sb = new StringBuilder(); + long totalCount = testState.getErrors().stream().filter(filter).count(); + sb.append("Total number of errors: ").append(totalCount).append("\n"); + + testState.getErrors().stream().filter(filter) + .limit(NUM_ERRORS_TO_DISPLAY) + .forEach((t) -> { + sb.append("> ").append(CommonLoggerUtils.getStackTrace(t)). + append("\n"); + }); + return sb.toString(); + } + + private void collectQueryErrors(TestState testState, + List errorMessages) { + + List queryErrors = new ArrayList<>(); + for (Throwable t : testState.getErrors()) { + if (!(t instanceof QueryException)) { + continue; + } + QueryException qe = (QueryException) t; + queryErrors.add(qe); + } + + if (queryErrors.isEmpty()) { + return; + } + + StringBuilder sb = new StringBuilder(); + queryErrors.stream() + .limit(NUM_ERRORS_TO_DISPLAY) + .forEach((qe) -> { sb.append("> ").append(qe).append("\n"); }); + errorMessages.add( + String.format("Unexpected query exceptions. " + + "Total number of failures: %s.\n" + "%s", + queryErrors.size(), + sb.toString())); + } + + /* Start a thread to execute queries and verify the results. */ + private void startQueryThread(TestState testState, int qid) { + + Thread th = new Thread(() -> { + try { + int count = 0; + //while (count < 200) { + while (!testState.isElasticityDone()) { + queryAndVerify(testState, qid, count); + count++; + } + testState.setQueryThreadDone(); + } catch (Throwable t) { + /* make sure all query threads exit on failure */ + testState.setQueryThreadDone(); + testState.setElasticityDone(); + fail(t.getMessage()); + } + }); + th.setDaemon(true); + th.start(); + } + + private void startElasticityThread(TestState testState, + List routines) { + Thread th = new Thread(() -> { + try { + for (ElasticityRoutine routine : routines) { + routine.run(); + testState.incElasticityCount(); + } + } catch (Throwable t) { + testState.reportError(new ElasticityException(t)); + } finally { + testState.setElasticityDone(); + } + }); + th.setDaemon(true); + th.start(); + } + + private interface ElasticityRoutine { + void run() throws Exception; + } + + /** + * Tests the basic case that a query is executed under store expansion. + * This test is expected to exercise the most basic interaction between + * query and partition migration. + */ + @Test + public void testSmallExpansion() throws Exception { + int[] qids = { 0 }; + numRows = 1000; + testExpansion("smallExpansion", 1, 10, qids, "users"); + } + + @Test + public void testSmallExpansionSort() throws Exception { + int[] qids = { 1 }; + numRows = 1000; + testExpansion("smallExpansionSort", 1, 10, qids, "users"); + } + + @Test + public void testSmallExpansionSortDesc() throws Exception { + int[] qids = { 2 }; + numRows = 1000; + testExpansion("smallExpansionSortDesc", 1, 10, qids, "users"); + } + + @Test + public void testSmallExpansionGroup() throws Exception { + int[] qids = { 3 }; + numRows = 1000; + testExpansion("smallExpansionGroup", 1, 10, qids, "users"); + } + + @Test + public void testSmallExpansionGroup2() throws Exception { + int[] qids = { 4 }; + numRows = 1000; + testExpansion("smallExpansionGroup2", 1, 10, qids, "users"); + } + + @Test + public void testSmallExpansionAllPartitions() throws Exception { + int[] qids = { 5 }; + numRows = 1000; + testExpansion("smallExpansionAllParititions", 1, 10, qids, "users"); + } + + @Test + public void testSmallExpansionSinglePartition() throws Exception { + int[] qids = { 6, 6, 6, 6, 6 }; + numRows = 3000; + testExpansion("smallExpansionSinglePartition", 1, 10, qids, "users2"); + } + + @Test + public void testSmallExpansionJoin() throws Exception { + int[] qids = { 7 }; + numRows = 1000; + testExpansion("smallExpansionJoin", 1, 10, qids, "users"); + } + + @Test + public void testSmallExpansionInnerJoin() throws Exception { + int[] qids = { 8 }; + numRows = 1000; + testExpansion("smallExpansionInnerJoin", 1, 10, qids, "users"); + } + + @Test + public void testBigExpansionJoin() throws Exception { + org.junit.Assume.assumeTrue(!isLinux); /* skip if linux */ + int[] qids = { 7 }; + numRows = 10000; + testExpansion("bigExpansionJoin", 3, 20, qids, "users"); + } + + @Test + public void testBigExpansion() throws Exception { + org.junit.Assume.assumeTrue(!isLinux); /* skip if linux */ + int[] qids = { 0, 1, 4 }; + numRows = 10000; + testExpansion("bigExpansion", 3, 20, qids, "users"); + } + + private void testExpansion( + String testSubDir, + int capacity, + int partitions, + int[] qids, + String tableName) + throws Exception { + + createStore(testSubDir, capacity, partitions); + createTableAndIndex(); + populateRows(tableName, testSubDir); + + TestState testState = new TestState(qids.length); + + startElasticityThread(testState, Arrays.asList(() -> expandStore(capacity))); + //testState.setElasticityDone(); + try { + Thread.sleep(100); + } catch (InterruptedException e) { + } + + for (int qid : qids) { + startQueryThread(testState, qid); + } + + final long timeoutMillis = 600 * 1000; + PollCondition.await( + POLL_CONDITION_INTERVAL, timeoutMillis, + () -> + testState.isElasticityDone() && testState.areQueriesDone()); + + verifyTestState(testState); + } + + /** + * Tests the basic case that a query is executed under store contraction. + * This test is expected to exercise the most basic interaction between + * query and partition migration. + */ + @Test + public void testSmallContraction() throws Exception { + int[] qids = { 0 }; + numRows = 1000; + testContraction("smallContraction", 1, 10, qids); + } + + @Test + public void testSmallContractionSort() throws Exception { + int[] qids = { 1 }; + numRows = 1000; + testContraction("smallContractionSort", 1, 10, qids); + } + + public void testContraction( + String testSubDir, + int capacity, + int partitions, + int[] qids) throws Exception { + + createStore(testSubDir, capacity, partitions); + expandStore(capacity); + createTableAndIndex(); + populateRows("users", testSubDir); + TestState testState = new TestState(qids.length); + + verbose("Elasticity: Starting store contraction"); + + for (int qid : qids) { + startQueryThread(testState, qid); + } + //testState.setQueryAndVerifyDone(); + startElasticityThread(testState, Arrays.asList(() -> contractStore())); + + /* Waits for both to finish. */ + final long timeoutMillis = 60 * 1000; + PollCondition.await( + POLL_CONDITION_INTERVAL, timeoutMillis, + () -> + testState.isElasticityDone() && testState.areQueriesDone()); + + verbose("Elasticity: Store contraction done"); + + /* Verify the results. */ + verifyTestState(testState); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/JsonCollectionTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/JsonCollectionTest.java new file mode 100644 index 00000000..98777aca --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/JsonCollectionTest.java @@ -0,0 +1,846 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.sql.Timestamp; +import java.util.ArrayList; + + +import oracle.nosql.driver.Version; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.nosql.driver.values.BinaryValue; +import oracle.nosql.driver.values.FieldValue; +import oracle.nosql.driver.values.JsonNullValue; +import oracle.nosql.driver.values.JsonUtils; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.TimestampValue; + +import org.junit.Ignore; +import org.junit.Test; + + +public class JsonCollectionTest extends ProxyTestBase { + + /* + * Basic data tests for JSON Collection + * put, get, writemultiple, simple query + */ + @Test + public void testJsonCollection() { + TableLimits limits = new TableLimits(10, 10, 1); + String createTable = "create table noschema " + + "(id integer, sid integer, primary key(shard(id), sid)) " + + "as json collection"; + String insertQ = "insert into noschema(id, sid, name) values(5, 6, 'jack') returning *"; + + tableOperation(handle, createTable, limits, null, + TableResult.State.ACTIVE, null); + PrepareRequest prepReq = new PrepareRequest() + .setStatement(insertQ); + PrepareResult prepRet = handle.prepare(prepReq); + + + QueryRequest qReq = new QueryRequest().setPreparedStatement(prepRet); + QueryResult qRes = handle.query(qReq); + + /* expected value from above insert query */ + MapValue value = new MapValue() + .put("id", 5) + .put("sid", 6) + .put("name", "jack"); + + for (MapValue res : qRes.getResults()) { + assertEquals(value, res); + } + + qReq = new QueryRequest() + .setStatement("select * from noschema"); + + qRes = handle.query(qReq); + for (MapValue res : qRes.getResults()) { + assertEquals(value, res); + } + + value = new MapValue() + .put("a", "aval") + .put("id", 10) + .put("name", "jane") + .put("sid", 7); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("noschema"); + + PutResult pres = handle.put(putRequest); + assertNotNull("Put failed", pres.getVersion()); + + qReq = new QueryRequest() + .setStatement("select * from noschema"); + + qRes = handle.query(qReq); + for (MapValue res : qRes.getResults()) { + if (res.get("id").getInt() == 10) { + assertEquals(res, value); + } + } + + /* do a write multiple */ + WriteMultipleRequest wmReq = new WriteMultipleRequest(); + for (int i = 0; i < 10; i++) { + /* need new MapValue -- it is not copied */ + value = new MapValue() + .put("a", "aval") + .put("name", "jane") + .put("id", 10) // needs to be the same, it's shard key + .put("sid", i + 20) + .put("multindex", i); + PutRequest pr = new PutRequest() + .setValue(value) + .setTableName("noschema"); + wmReq.add(pr, false); + } + + WriteMultipleResult wmRes = handle.writeMultiple(wmReq); + assertEquals(10, wmRes.getResults().size()); + qReq = new QueryRequest() + .setStatement("select * from noschema"); + + int count = 0; + do { + qRes = handle.query(qReq); + for (MapValue res : qRes.getResults()) { + ++count; + } + } while (!qReq.isDone()); + assertEquals(count, 12); // 12 rows + + /* do a delete query */ + qReq = new QueryRequest() + .setStatement("delete from noschema where id = 10 and sid = 20"); + qRes = handle.query(qReq); + for (MapValue res : qRes.getResults()) { + assertEquals(1, res.get("numRowsDeleted").getInt()); + } + } + + /* + * Exercise code that allows valid non-information-losing casts for + * primary key fields + */ + @Test + public void testJsonCollectionKeyCast() { + TableLimits limits = new TableLimits(10, 10, 1); + String createTable = "create table noschema " + + "(id integer, id1 long, id2 number, id3 string, id4 double, " + + "primary key(shard(id), id1, id2, id3, id4)) " + + "as json collection"; + tableOperation(handle, createTable, limits, null, + TableResult.State.ACTIVE, null); + + /* normal types */ + MapValue value = new MapValue() + .put("id", 5) + .put("id1", 6L) + .put("id2", 7.6) + .put("id3", "jack") + .put("id4", 5.6); + + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("noschema"); + + PutResult pres = handle.put(putRequest); + assertNotNull("Put failed", pres.getVersion()); + + qry("select * from noschema"); + + /* use some coercion, success means no exception */ + value.put("id", "6") // string to int + .put("id1", "6789") // string to long + .put("id2", 5L) // long to number + .put("id3", "joe") // strings must be strings + .put("id4", 7L); // long to double + pres = handle.put(putRequest); + assertNotNull("Put failed", pres.getVersion()); + qry("select * from noschema"); + + value.put("id", 5678L) // long to int, no loss + .put("id1", 1.0) // float/double to long + .put("id2", 56.67F) // float to number + .put("id3", "jane") // strings must be strings + .put("id4", "56.0005"); // string to double + pres = handle.put(putRequest); + assertNotNull("Put failed", pres.getVersion()); + qry("select * from noschema"); + + /* invalid coercion */ + value.put("id", 56780000000L) // long to int, data loss + .put("id1", 1.0) // float/double to long + .put("id2", 56.67F) // float to number + .put("id3", "jane") // strings must be strings + .put("id4", true); + try { + pres = handle.put(putRequest); + fail("should have failed"); + } catch (IllegalArgumentException iae) { + } + + /* valid, except for non-string for string */ + value.put("id", 7); + value.put("id3", 8); + value.put("id4", 5.6); + try { + pres = handle.put(putRequest); + fail("should have failed"); + } catch (IllegalArgumentException iae) { + } + } + + private void qry(String query) { + if (!verbose) { + return; + } + QueryRequest qReq = new QueryRequest() + .setStatement(query); + QueryResult qRes = handle.query(qReq); + System.out.println("Results of " + query + ":"); + for (MapValue res : qRes.getResults()) { + System.out.println("\t" + res); + } + } + + @Test + public void testPutIf() { + TableLimits limits = new TableLimits(10, 10, 1); + String createTable = "create table noschema(" + + "majorKey1 STRING, " + + "majorKey2 STRING, " + + "minorKey STRING, " + + "PRIMARY KEY (SHARD(majorKey1, majorKey2), minorKey))" + + "as json collection"; + + tableOperation(handle, createTable, limits, null, + TableResult.State.ACTIVE, null); + + final MapValue mapVal = new MapValue() + .put("majorKey1", "k020f3dd0") + .put("majorKey2", "80") + .put("minorKey", "1e") + .put("firstThread", false) + .put("operation", "POPULATE") + .put("index", 27777); + + final MapValue mapVal1 = new MapValue() + .put("majorKey1", "k020f3dd1") + .put("majorKey2", "81") + .put("minorKey", "1f") + .put("firstThread", false) + .put("operation", "POPULATE") + .put("index", 27777); + + final MapValue mapVal2 = new MapValue() + .put("majorKey1", "k020f3dd2") + .put("majorKey2", "81") + .put("minorKey", "1f") + .put("firstThread", false) + .put("operation", "POPULATE") + .put("index", 27777); + + final MapValue mapVal3 = new MapValue() + .put("majorKey1", "k020f3dd3") + .put("majorKey2", "81") + .put("minorKey", "1f") + .put("firstThread", false) + .put("operation", "POPULATE") + .put("index", 27777); + + /* Put a row */ + PutRequest putReq = new PutRequest() + .setValue(mapVal) + .setTableName("noschema"); + PutResult putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(0, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(1, putRes.getWriteKB(), putRes.getWriteUnits()); + + /* Put a row again with SetReturnRow(false). + * expect no row returned. + */ + putReq.setReturnRow(false); + putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(0, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits()); + Version oldVersion = putRes.getVersion(); + + /* + * Put row again with SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertEquals(oldVersion, putRes.getExistingVersion()); + assertNotNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits()); + oldVersion = putRes.getVersion(); + + /* + * Put a new row with SetReturnRow(true), + * expect no existing row returned. + */ + putReq = new PutRequest() + .setValue(mapVal1) + .setTableName("noschema") + .setReturnRow(true); + putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(0, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(1, putRes.getWriteKB(), putRes.getWriteUnits()); + + /* PutIfAbsent an existing row, it should fail */ + putReq = new PutRequest() + .setValue(mapVal) + .setTableName("noschema") + .setOption(PutRequest.Option.IfAbsent); + putRes = handle.put(putReq); + assertNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits()); + + + /* + * PutIfAbsent fails + SetReturnRow(true), + * return existing value and version + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + assertNull(putRes.getVersion()); + assertEquals(mapVal, putRes.getExistingValue()); + assertEquals(oldVersion, putRes.getExistingVersion()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits()); + + /* PutIfPresent an existing row, it should succeed */ + putReq = new PutRequest() + .setValue(mapVal) + .setTableName("noschema") + .setOption(PutRequest.Option.IfPresent); + putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits()); + oldVersion = putRes.getVersion(); + + /* + * PutIfPresent succeed + SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertEquals(mapVal, putRes.getExistingValue()); + assertEquals(oldVersion, putRes.getExistingVersion()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits()); + Version ifVersion = putRes.getVersion(); + + /* PutIfPresent a new row, it should fail */ + putReq = new PutRequest() + .setValue(mapVal2) + .setTableName("noschema") + .setOption(PutRequest.Option.IfPresent); + putRes = handle.put(putReq); + assertNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits()); + + /* + * PutIfPresent fail + SetReturnRow(true), + * expect no existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + assertNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits()); + + /* PutIfAbsent a new row, it should succeed */ + putReq = new PutRequest() + .setOption(PutRequest.Option.IfAbsent) + .setValue(mapVal2) + .setTableName("noschema"); + putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(1, putRes.getWriteKB(), putRes.getWriteUnits()); + + /* PutIfAbsent success + SetReturnRow(true) */ + putReq = new PutRequest() + .setOption(PutRequest.Option.IfAbsent) + .setValue(mapVal3) + .setTableName("noschema"); + putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(1, putRes.getWriteKB(), putRes.getWriteUnits()); + + /* + * PutIfVersion an existing row with unmatched version, it should fail. + */ + putReq = new PutRequest() + .setOption(PutRequest.Option.IfVersion) + .setMatchVersion(oldVersion) + .setValue(mapVal) + .setTableName("noschema"); + putRes = handle.put(putReq); + assertNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits()); + + /* + * PutIfVersion fails + SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + assertNull(putRes.getVersion()); + assertEquals(ifVersion, putRes.getExistingVersion()); + assertEquals(mapVal, putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits()); + + /* + * Put an existing row with matching version, it should succeed. + */ + putReq = new PutRequest() + .setOption(PutRequest.Option.IfVersion) + .setMatchVersion(ifVersion) + .setValue(mapVal) + .setTableName("noschema"); + putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits()); + ifVersion = putRes.getVersion(); + + /* + * PutIfVersion succeed + SetReturnRow(true), + * expect existing row returned. + */ + putReq.setMatchVersion(ifVersion).setReturnRow(true); + putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + assertNull(putRes.getExistingVersion()); + assertNull(putRes.getExistingValue()); + assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true); + assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits()); + } + + @Test + public void testIndexes() { + TableLimits limits = new TableLimits(10, 10, 1); + String createTable = "create table noschema " + + "(iD integer, SiD integer, primary key(shard(id), sid)) " + + "as json collection"; + String createIndex = "create index idx on noschema(name as string)"; + String createIndex1 = "create index idx1 on noschema(age as integer)"; + + tableOperation(handle, createTable, limits, null, + TableResult.State.ACTIVE, null); + tableOperation(handle, createIndex, null, null, + TableResult.State.ACTIVE, null); + tableOperation(handle, createIndex1, null, null, + TableResult.State.ACTIVE, null); + + MapValue value = new MapValue() + .put("a", "aval") + .put("sid", 7); + + PutRequest putRequest = new PutRequest() + .setTableName("noschema"); + for (int i = 0; i < 10; i++) { + value.put("id", i) + .put("nAme", ("jane" + i)) + .put("age", i); + putRequest.setValue(value); + PutResult pres = handle.put(putRequest); + assertNotNull("Put failed", pres.getVersion()); + } + + QueryRequest qReq = + new QueryRequest().setStatement("select * from noschema"); + QueryResult qRes = handle.query(qReq); + assertEquals(10, qRes.getResults().size()); + for (MapValue res : qRes.getResults()) { + /* assert case-preservation */ + assertTrue(res.toString().contains("SiD")); + assertTrue(res.toString().contains("nAme")); + } + + qReq = + new QueryRequest().setStatement( + "select * from noschema where age > 3 order by age"); + + ArrayList results = new ArrayList(); + do { + qRes = handle.query(qReq); + results.addAll(qRes.getResults()); + } while (!qReq.isDone()); + assertEquals(6, results.size()); + for (MapValue res : results) { + /* assert case-preservation */ + assertTrue(res.toString().contains("nAme")); + assertTrue(res.toString().contains("SiD")); + } + } + + @Ignore + public void testGeoIndexes() { + TableLimits limits = new TableLimits(10, 10, 1); + String createTable = "create table geo " + + "(id integer, primary key(id)) as json collection"; + final String pointIndex = + "create index idx_kind_ptn on geo(info.kind as string," + + "info.point as point)"; + final String geoIndex = + "create index idx_geom on geo(info.geom as geometry " + + "{\"max_covering_cells\":400})"; + + final String[] data = new String[] { + "insert into geo values(1, {\"info\": { " + + "\"kind\": \"farm\", \"point\": {\"type\":\"point\", " + + "\"coordinates\": [23.549, 35.2908]}}})", + "insert into geo values(2, {\"info\": { " + + "\"kind\": \"park\", \"point\": {\"type\":\"point\", " + + "\"coordinates\": [24.9, 35.4]}}})" + }; + + tableOperation(handle, createTable, limits, null, + TableResult.State.ACTIVE, null); + tableOperation(handle, pointIndex, null, null, + TableResult.State.ACTIVE, null); + tableOperation(handle, geoIndex, null, null, + TableResult.State.ACTIVE, null); + + for (String q : data) { + QueryRequest qReq = new QueryRequest().setStatement(q); + QueryResult qRes = handle.query(qReq); + System.out.println(qRes); + } + + QueryRequest qReq = + new QueryRequest().setStatement( + "select /* FORCE_PRIMARY_INDEX(geo) */ * from geo g where geo_near(g.info.point, " + + "{\"type\": \"point\", \"coordinates\": [24.0175, 35.5156]}," + + "5000)"); + QueryResult qRes = handle.query(qReq); + for (MapValue val : qRes.getResults()) { + System.out.println(val); + } + } + + /* + * Check edge and invalid situations for JSON Collection. + * Invalid: + * o invalid (not JSON) types + * o attempt to schema evolve + * o bad key + * Edge: + * o identity column as key, evolve sequence + * o TTL, with evolution + */ + @Test + public void testJsonCollectionEdge() { + TableLimits limits = new TableLimits(10, 10, 1); + String createTable = "create table noschema " + + "(id integer, primary key(id)) as json collection"; + tableOperation(handle, createTable, limits, null, + TableResult.State.ACTIVE, null); + + /* + * Bad types + */ + final Timestamp ts = Timestamp.valueOf("2018-05-02 10:23:42.123"); + final FieldValue tsVal = new TimestampValue(ts); + badType("time", tsVal, "noschema"); + + badType("bin", new BinaryValue(new byte[4]), "noschema"); + + /* + * Try to evolve in an illegal manner + */ + final String alter = "alter table noschema(add name string)"; + TableResult tres = tableOperation(handle, alter, null, null, + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* + * new table, with identity col, evolve it to change sequence start + */ + createTable = "create table noschema1 " + + "(id integer generated always as identity, " + + "primary key(id)) as json collection"; + tres = tableOperation(handle, createTable, limits, 5000); + + tres = tableOperation(handle, "alter table noschema1 (modify id " + + "generated always as identity(start with 1002))", + null, 5000); + + /* + * Put a row and verify that the generated value is 1002 + */ + MapValue value = new MapValue() + .put("name", "myname") + .put("nullval", JsonNullValue.getInstance()); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("noschema1"); + PutResult pres = handle.put(putRequest); + + QueryRequest qReq = new QueryRequest() + .setStatement("select * from noschema1"); + QueryResult qRes = handle.query(qReq); + for (MapValue res : qRes.getResults()) { + assertTrue(res.get("id").getInt() == 1002); + } + + /* + * Add a TTL + */ + tres = tableOperation(handle, "alter table noschema1 using TTL 5 days", + null, 5000); + assertTrue(tres.getDdl().toLowerCase().contains("5 days")); + + tres = tableOperation(handle, "alter table noschema1 using TTL 2 hours", + null, 5000); + assertTrue(tres.getDdl().toLowerCase().contains("2 hours")); + } + + @Test + public void testNested() { + TableLimits limits = new TableLimits(10, 10, 1); + final String createTable = "create table noschema(id long, " + + "primary key(id)) as json collection"; + TableResult tres = tableOperation(handle, createTable, limits, 5000); + String json = "{" + + "\"id\":0," + + "\"name\": \"Foo\"," + + "\"tags\": [\"rock\",\"metal\",\"bar\"]" + + "}"; + + String json1 = "{" + + "\"id\":1," + + "\"name\": \"Foo\"," + + "\"obj\": {\"a\":1,\"b\":2,\"c\":3, " + + "\"tags\": [\"rock\",\"metal\",\"bar\"]" + + "}}"; + + String json2 = "{" + + "\"id\":2," + + "\"obj\": {\"a\":1,\"b\":2,\"c\":3, " + + "\"obj1\": {\"d\":1,\"e\":2,\"f\":3} " + + "}}"; + + String[] docs = new String[]{json, json1, json2}; + int i = 0; + for (String doc : docs) { + MapValue val = (MapValue)FieldValue.createFromJson(doc,null); + PutRequest pr = new PutRequest() + .setValue(val) + .setTableName("noschema"); + PutResult pres = handle.put(pr); + assertNotNull("Put failed", pres.getVersion()); + + GetRequest gr = new GetRequest() + .setKey(new MapValue().put("id", i++)) + .setTableName("noschema"); + GetResult gres = handle.get(gr); + assertTrue(JsonUtils.jsonEquals(val.toString(), + gres.getValue().toString())); + } + } + + @Test + public void testDelete() { + TableLimits limits = new TableLimits(10, 10, 1); + String createTable = "create table noschema(" + + "majorKey1 STRING, " + + "majorKey2 STRING, " + + "minorKey STRING, " + + "PRIMARY KEY (SHARD(majorKey1, majorKey2), minorKey))" + + "as json collection"; + + tableOperation(handle, createTable, limits, null, + TableResult.State.ACTIVE, null); + + final MapValue key = new MapValue() + .put("majorKey1", "k020f3dd0") + .put("majorKey2", "80") + .put("minorKey", "1e"); + + /* put a row */ + PutRequest putReq = new PutRequest() + .setTableName("noschema") + .setValue(key); + PutResult putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + + /* Delete a row */ + DeleteRequest delReq = new DeleteRequest() + .setKey(key) + .setTableName("noschema"); + DeleteResult delRes = handle.delete(delReq); + assertTrue(delRes.getSuccess()); + assertNull(delRes.getExistingVersion()); + assertNull(delRes.getExistingValue()); + assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true); + assertWriteKB(1, delRes.getWriteKB(), delRes.getWriteUnits()); + + /* Put the row back to store */ + putReq = new PutRequest().setValue(key).setTableName("noschema"); + putRes = handle.put(putReq); + Version oldVersion = putRes.getVersion(); + assertNotNull(oldVersion); + + /* Delete succeed + setReturnRow(true), existing row returned. */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + assertTrue(delRes.getSuccess()); + assertEquals(oldVersion, delRes.getExistingVersion()); + assertEquals(key, delRes.getExistingValue()); + assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true); + assertWriteKB(1, delRes.getWriteKB(), delRes.getWriteUnits()); + + /* Delete fail + setReturnRow(true), no existing row returned. */ + delRes = handle.delete(delReq); + assertFalse(delRes.getSuccess()); + assertNull(delRes.getExistingVersion()); + assertNull(delRes.getExistingValue()); + assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true); + assertWriteKB(0, delRes.getWriteKB(), delRes.getWriteUnits()); + + /* Put the row back to store */ + putReq = new PutRequest().setValue(key).setTableName("noschema"); + putRes = handle.put(putReq); + Version ifVersion = putRes.getVersion(); + assertNotNull(ifVersion); + + /* DeleteIfVersion with unmatched version, it should fail */ + delReq = new DeleteRequest() + .setMatchVersion(oldVersion) + .setKey(key) + .setTableName("noschema"); + delRes = handle.delete(delReq); + assertFalse(delRes.getSuccess()); + assertNull(delRes.getExistingVersion()); + assertNull(delRes.getExistingValue()); + assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true); + assertWriteKB(0, delRes.getWriteKB(), delRes.getWriteUnits()); + + /* + * DeleteIfVersion with unmatched version + setReturnRow(true), + * the existing row returned. + */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + assertFalse(delRes.getSuccess()); + assertEquals(ifVersion, delRes.getExistingVersion()); + assertEquals(key, delRes.getExistingValue()); + assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true); + assertWriteKB(0, delRes.getWriteKB(), delRes.getWriteUnits()); + + /* DeleteIfVersion with matched version, it should succeed. */ + delReq = new DeleteRequest() + .setMatchVersion(ifVersion) + .setKey(key) + .setTableName("noschema"); + delRes = handle.delete(delReq); + assertTrue(delRes.getSuccess()); + assertNull(delRes.getExistingVersion()); + assertNull(delRes.getExistingValue()); + assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true); + assertWriteKB(1, delRes.getWriteKB(), delRes.getWriteUnits()); + + /* Put the row back to store */ + putReq = new PutRequest().setValue(key).setTableName("noschema"); + putRes = handle.put(putReq); + ifVersion = putRes.getVersion(); + assertNotNull(ifVersion); + + /* + * DeleteIfVersion with matched version + setReturnRow(true), + * it should succeed but no existing row returned. + */ + delReq.setMatchVersion(ifVersion).setReturnRow(true); + delRes = handle.delete(delReq); + assertTrue(delRes.getSuccess()); + assertNull(delRes.getExistingVersion()); + assertNull(delRes.getExistingValue()); + assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true); + assertWriteKB(1, delRes.getWriteKB(), delRes.getWriteUnits()); + } + + private void badType(String fieldName, FieldValue val, String tableName) { + MapValue value = new MapValue() + .put("id", 10) + .put(fieldName, val); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + + try { + PutResult pres = handle.put(putRequest); + fail("operation should have thrown IAE"); + } catch (IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains("Invalid JSON type")); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/LatencyTestBase.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/LatencyTestBase.java new file mode 100644 index 00000000..98db1cf3 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/LatencyTestBase.java @@ -0,0 +1,671 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assume.assumeTrue; +import static org.junit.Assert.fail; + +import java.net.URL; +import java.io.IOException; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import oracle.kv.impl.api.Request; +import oracle.kv.impl.api.RequestHandlerImpl; +import oracle.kv.impl.rep.RepNodeService; +import oracle.kv.impl.sna.ManagedRepNode; +import oracle.kv.impl.sna.ManagedService; +import oracle.kv.impl.test.TestHook; +import oracle.kv.impl.test.TestStatus; +import oracle.kv.util.kvlite.KVLite; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.RequestTimeoutException; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.security.SecureTestUtil; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.AfterClass; + + +/** + * A Base to use for running tests with artificially injected latency. + * + * These tests only runs against a local server and not minicloud. + * + * The tests use a KVLite that has a test hook that injects + * latencies (specified in latencySetUp) into all requests + */ +public class LatencyTestBase extends ProxyTestBase implements TestHook { + + private static int requestDelayMs; + private static int previousRequestThreads; + private static int previousPoolThreads; + private static boolean previousUseThreads; + private static boolean previousUseAsync; + + // note this hides the superclass static method so it won't be called + // and we don't do anything, instead provide a method for subclasses to + // call after their BeforeClass method. + @BeforeClass + public static void staticSetUp() + throws Exception { + } + + // This can be called in subclass BeforeClass methods + public static void latencySetUp(boolean useAsync, int delayMs) + throws Exception { + + requestDelayMs = delayMs; + + /* + * Run kvlite in this jvm, so we can set a testHook on each + * request to inject latency. + * Note: it is currently impossible to create a thread-based kvlite + * in anything other than a 1x1 (single node, single shard) + * configuration. But that's OK for the purposes of this test. + */ + previousUseThreads = Boolean.getBoolean(KVLITE_USETHREADS_PROP); + System.setProperty(KVLITE_USETHREADS_PROP, "true"); + + /* + * configure the proxy to use sync or async calls (note this + * overrides any given cmdline parameters, which will get reset after + * this testcase finishes) + */ + previousUseAsync = Boolean.getBoolean(PROXY_ASYNC_PROP); + System.setProperty(PROXY_ASYNC_PROP, Boolean.toString(useAsync)); + + /* + * set the number of request processing threads very low, so + * we can verify no additional latency when RCn > PTn + * (RCn = number of concurrent requests) + * (PTn = number of proxy request threads) + * + * store the old value so we can reset it, if it was set + */ + previousRequestThreads = Integer.getInteger( + PROXY_REQUEST_THREADS_PROP, 0); + System.setProperty(PROXY_REQUEST_THREADS_PROP, "2"); + + previousPoolThreads = Integer.getInteger( + PROXY_REQUEST_POOL_SIZE_PROP, 0); + System.setProperty(PROXY_REQUEST_POOL_SIZE_PROP, "0"); + + /* this will silence stderr from kvlite */ + TestStatus.setActive(true); + + staticSetUp(tenantLimits); + } + + @AfterClass + public static void resetProperties() { + System.setProperty(KVLITE_USETHREADS_PROP, + Boolean.toString(previousUseThreads)); + System.setProperty(PROXY_ASYNC_PROP, + Boolean.toString(previousUseAsync)); + if (previousRequestThreads <= 0) { + System.clearProperty(PROXY_REQUEST_THREADS_PROP); + } else { + System.setProperty(PROXY_REQUEST_THREADS_PROP, + Integer.toString(previousRequestThreads)); + } + System.setProperty(PROXY_REQUEST_POOL_SIZE_PROP, + Integer.toString(previousPoolThreads)); + } + + @Before + public void asyncSetUp() + throws Exception { + + // set a test hook such that every request takes at least Nms + setRequestDelayHook(kvlite); + } + + @Override + public void doHook(Request r) { + // this will be run at the beginning of each request in kvlite + try { + Thread.sleep(requestDelayMs); + } catch (InterruptedException e) { + } + } + + /** + * set a per-request test hook, if defined. + */ + private void setRequestDelayHook(KVLite kvlite) { + if (requestDelayMs <= 0) { + return; + } + + /* + * KVLite runs one RN with useThreads=true. So we can get its + * ManagedRepNode service from the static ManagedService class. + */ + ManagedRepNode mrn = (ManagedRepNode)ManagedService.getMainService(); + if (mrn == null) { + throw new RuntimeException( + "Error: can't set request delay hook: no ManagedRepNode"); + } + + RepNodeService rns = mrn.getRepNodeService(); + if (rns == null) { + throw new RuntimeException( + "Error: can't set request delay hook: no RepNodeService"); + } + + RequestHandlerImpl rhi = rns.getReqHandler(); + if (rhi == null) { + throw new RuntimeException( + "Error: can't set request delay hook: no RequestHandlerImpl"); + } + + rhi.setTestHook(this); + if (verbose) { + System.out.println("Set request delay hook " + + "for " + requestDelayMs + "ms delay on " + + rns.getRepNodeParams().getRepNodeId().getFullName()); + } + } + + + protected static NoSQLHandle createClientHandleAndTestTable( + String tableName, int numThreads) + throws Exception { + + URL serviceURL = + new URL("http", getProxyHost(), getProxyPort(), "/"); + NoSQLHandleConfig config = new NoSQLHandleConfig(serviceURL); + SecureTestUtil.setAuthProvider(config, isSecure(), + onprem, getTenantId()); + config.configureDefaultRetryHandler(0, 0); + NoSQLHandle myhandle = NoSQLHandleFactory.createNoSQLHandle(config); + + /* + * Create a simple table with an integer key and a single + * string field + */ + final String createTableStatement = + "CREATE TABLE IF NOT EXISTS " + tableName + + "(cookie_id LONG, audience_data STRING, PRIMARY KEY(cookie_id))"; + + TableRequest tableRequest = new TableRequest() + .setStatement(createTableStatement) + .setTableLimits(new TableLimits(100000, 100000, 50)); + TableResult tres = myhandle.tableRequest(tableRequest); + if (verbose) { + System.out.println("Creating table " + tableName); + } + /* + * The table request is asynchronous, so wait for the operation + * to complete. + */ + tres.waitForCompletion(myhandle, + 60000, /* wait 60 sec */ + 100); /* delay ms for poll */ + if (verbose) { + System.out.println("Created table " + tableName); + } + /* + * Ideally this would be done earlier but at this time kv + * requires that a table be created before the system table + * is initialized. TODO: watch kv for changes in this area + */ + waitForStoreInit(20); // wait 20s for init + return myhandle; + } + + protected static void dropTableAndCloseHandle( + NoSQLHandle myhandle, String tableName) + throws Exception { + + // drop the table + if (verbose) { + System.out.println("Dropping table " + tableName); + } + TableRequest tableRequest = new TableRequest() + .setStatement("DROP TABLE IF EXISTS " + tableName); + myhandle.tableRequest(tableRequest); + + // close handle + if (verbose) { + System.out.println("Closing handle..."); + } + myhandle.close(); + } + + + protected static class LatencyCollector { + String ltype; + long[] latencies; + AtomicInteger sampleNum; + AtomicLong total_us; + + LatencyCollector(String ltype, int numSamples) { + this.ltype = ltype; + this.latencies = new long[numSamples]; + this.sampleNum = new AtomicInteger(0); + this.total_us = new AtomicLong(0); + } + + void collect(long lat_ns) { + if (lat_ns==0) return; + int sample = sampleNum.incrementAndGet(); + latencies[sample % latencies.length] = lat_ns; + total_us.addAndGet(lat_ns / 1000); + } + + long avgLatencyUs() { + return total_us.get() / totalSamples(); + } + + int totalSamples() { + return sampleNum.get(); + } + + long avgLatencyMs() { + return avgLatencyUs() / 1000; + } + + void dumpLatencies() { + int totalSamples = totalSamples(); + if (totalSamples > latencies.length) { + totalSamples = latencies.length; + } + System.out.println("latencies: " + totalSamples + " samples:"); + for (int i=0; i latencies.length) { + totalSamples = latencies.length; + } + Arrays.sort(latencies, 0, totalSamples); + if (pct >= 100) { + return latencies[(totalSamples - 1)] / 1000; + } + return latencies[(totalSamples * pct) / 100] / 1000; + } + + long percentileLatencyMs(int pct) { + return percentileLatencyUs(pct) / 1000; + } + } + + protected static class RunConfig { + NoSQLHandle handle; + String tableName; + int runSeconds; + long maxID; + int maxSize; + int readTimeoutMs; + int writeTimeoutMs; + LatencyCollector readLatencyCollector; + LatencyCollector writeLatencyCollector; + LatencyCollector queryLatencyCollector; + + protected RunConfig( + NoSQLHandle handle, + String tableName, + int runSeconds, + long maxID, + int maxSize, + int readTimeoutMs, + int writeTimeoutMs) { + this.handle = handle; + this.tableName = tableName; + this.runSeconds = runSeconds; + this.maxID = maxID; + this.maxSize = maxSize; + this.readTimeoutMs = readTimeoutMs; + this.writeTimeoutMs = writeTimeoutMs; + readLatencyCollector = new LatencyCollector("get", 100000); + writeLatencyCollector = new LatencyCollector("put", 100000); + queryLatencyCollector = new LatencyCollector("query", 100000); + } + } + + protected static void runClient(RunConfig rc, int get_pct, int put_pct) { + try { + runOneClient(rc, get_pct, put_pct); + } catch (IOException e) { + } + } + + protected static void runOneClient(RunConfig rc, int get_pct, int put_pct) + throws IOException { + + Random rand = new Random(System.currentTimeMillis()); + + // generate random data for puts + final int leftLimit = 32; // space + final int rightLimit = 126; // tilde + String generatedString = rand.ints(leftLimit, rightLimit + 1) + .limit(rc.maxSize) + .collect(StringBuilder::new, + StringBuilder::appendCodePoint, StringBuilder::append) + .toString(); + + MapValue value = new MapValue(); + MapValue key = new MapValue(); + + PutRequest putRequest = new PutRequest() + .setTableName(rc.tableName); + putRequest.setTimeout(rc.writeTimeoutMs); + GetRequest getRequest = new GetRequest() + .setTableName(rc.tableName); + getRequest.setTimeout(rc.readTimeoutMs); + + if (verbose) { + System.out.println("Driver thread " + + Thread.currentThread().getId() + " performing " + + get_pct + "% get, " + put_pct + "% put operations..."); + } + + /* factor out proxy warmup for table and store */ + boolean done = false; + while (!done) { + try { + key.put("cookie_id", 0L); + getRequest.setKey(key); + rc.handle.get(getRequest); + done = true; + } catch (Exception e) { + // ignore + } + } + + long endMillis = System.currentTimeMillis() + (rc.runSeconds * 1000); + + while (System.currentTimeMillis() < endMillis) { + + boolean do_get = (rand.nextInt(100) > (100 - get_pct)); + boolean do_put = (rand.nextInt(100) > (100 - put_pct)); + + // if neither, load next line and continue + if (do_get==false && do_put==false) { + continue; + } + + // set up random data + long id = rand.nextLong() % rc.maxID; + + if (do_put) { + value.put("cookie_id", id); + int begin = rand.nextInt(rc.maxSize / 4); + int end = begin + rand.nextInt((rc.maxSize * 3) / 4); + String sub = generatedString.substring(begin, end); + value.put("audience_data", sub); + + long start = System.nanoTime(); + putRequest.setValue(value); + try { + PutResult putRes = rc.handle.put(putRequest); + if (putRes.getVersion() == null) { + System.err.println("put failed!"); + } + } catch (Exception e) { + System.err.println(System.currentTimeMillis() + " PUT E"); + if (verbose) { + System.err.println(" " + e); + } + } + long elapsed = System.nanoTime() - start; + rc.writeLatencyCollector.collect(elapsed); + } + + if (do_get) { + long start = System.nanoTime(); + key.put("cookie_id", id); + getRequest.setKey(key); + try { + rc.handle.get(getRequest); + } catch (Exception e) { + System.err.println(System.currentTimeMillis() + " GET E"); + if (verbose) { + System.err.println(" " + e); + } + } + long elapsed = System.nanoTime() - start; + rc.readLatencyCollector.collect(elapsed); + } + + } + + } + + protected static void runQueries(RunConfig rc) { + try { + runOneQueryClient(rc); + } catch (IOException e) { + } catch (InterruptedException ie) { + return; + } + } + + private static void runQuery(RunConfig rc, String query) { + try { + List allResults = new ArrayList(); + /* factor out the one-time cost of prepare */ + PrepareRequest preq = new PrepareRequest().setStatement(query); + PrepareResult pres = rc.handle.prepare(preq); + QueryRequest qreq = new QueryRequest(). + setPreparedStatement(pres.getPreparedStatement()); + long start = System.nanoTime(); + do { + QueryResult qr = rc.handle.query(qreq); + List results = qr.getResults(); + for (MapValue mv : results) { + // need to walk values, in case iteration triggers + // more requests internally + allResults.add(mv); + } + } while (!qreq.isDone()); + long elapsed = System.nanoTime() - start; + rc.queryLatencyCollector.collect(elapsed); + //System.err.println("query '" + query + "' ran to completion, " + + //"numResults=" + allResults.size()); + } catch (RequestTimeoutException rte) { + System.err.println("query '" + query + "' timed out: " + rte); + } catch (Exception e) { + System.err.println("query '" + query + "' got error: " + e); + } + } + + private static void runOneQueryClient(RunConfig rc) + throws IOException, InterruptedException { + + if (verbose) { + System.out.println("Driver thread " + + Thread.currentThread().getId() + + " performing query operations..."); + } + + long endMillis = System.currentTimeMillis() + (rc.runSeconds * 1000); + + while (System.currentTimeMillis() < endMillis) { + + // simple count + runQuery(rc, "select count(*) from " + rc.tableName); + + // full scan/dump + runQuery(rc, "select * from " + rc.tableName); + + // more complex, with sort + runQuery(rc, "select audience_data from " + rc.tableName + + " where cookie_id > 1000 and cookie_id < 10000" + + " order by audience_data"); + + TimeUnit.MILLISECONDS.sleep(10); + } + } + + private static void checkOpLatencies( + long minLatencyMs, long maxLatencyMs, + final String opType, LatencyCollector lc) { + + if (minLatencyMs <= 0 || maxLatencyMs <= 0) { + return; + } + + long latencyMs = lc.avgLatencyMs(); + if (latencyMs < minLatencyMs || latencyMs > maxLatencyMs) { + if (verbose) { + lc.dumpLatencies(); + } + long max = lc.percentileLatencyMs(100); + long lat99 = lc.percentileLatencyMs(99); + long lat95 = lc.percentileLatencyMs(95); + fail(opType + " latency of " + latencyMs + + "ms is out of range.\n" + + "Expected average latency is between " + + minLatencyMs + "ms and " + + maxLatencyMs + "ms. 95th=" + lat95 + " 99th=" + lat99 + + " max=" + max + " (samples=" + lc.totalSamples() + ")"); + } + } + + protected static void testLatency( + String tableName, + int readThreads, + int writeThreads, + int rwThreads, + int qThreads, + int runSeconds, + int minReadLatencyMs, + int maxReadLatencyMs, + int minWriteLatencyMs, + int maxWriteLatencyMs, + int minQueryLatencyMs, + int maxQueryLatencyMs) + throws Exception { + + // skip this test if running on minicloud + assumeTrue(cloudRunning == false); + + /* + * create threads, have them all hit the proxy as fast as + * possible with get/put requests for about 10 seconds. + * Verify that the resultant latency average is just over 100ms. + * (in the sync case, this will be much higher) + */ + + final int totalThreads = + readThreads + writeThreads + rwThreads + qThreads; + + NoSQLHandle myhandle = + createClientHandleAndTestTable(tableName, totalThreads); + + RunConfig rc = new RunConfig( + myhandle, + tableName, + runSeconds, + 10000 /*maxID*/, + 5000 /*maxSize*/, + 2000 /*readTimeoutMs*/, + 2000 /*writeTimeoutMs*/); + + Thread threads[] = new Thread[totalThreads]; + + if (qThreads == totalThreads) { + // run puts to prepopulate data + for(int x=0; x {runClient(rc, 0, 100);}); + threads[x].start(); + } + for(int x=0; x {runClient(rc, 100, 0);}); + threads[numThreads].start(); + numThreads++; + } + for(int x=0; x {runClient(rc, 0, 100);}); + threads[numThreads].start(); + numThreads++; + } + for(int x=0; x {runClient(rc, 50, 50);}); + threads[numThreads].start(); + numThreads++; + } + for(int x=0; x {runQueries(rc);}); + threads[numThreads].start(); + numThreads++; + } + + // wait for threads to finish + for(int x=0; x 0 || rwThreads > 0) { + System.out.println("average latency for get ops: " + + rc.readLatencyCollector.avgLatencyMs() + "ms"); + System.out.println("99th percentile latency for get ops: " + + rc.readLatencyCollector.percentileLatencyMs(99) + "ms"); + } + if (writeThreads > 0 || rwThreads > 0) { + System.out.println("average latency for put ops: " + + rc.writeLatencyCollector.avgLatencyMs() + "ms"); + System.out.println("99th percentile latency for put ops: " + + rc.writeLatencyCollector.percentileLatencyMs(99) + "ms"); + } + if (qThreads > 0) { + System.out.println("average latency for query ops: " + + rc.queryLatencyCollector.avgLatencyMs() + "ms"); + System.out.println("99th percentile latency for query ops: " + + rc.queryLatencyCollector.percentileLatencyMs(99) + "ms"); + } + + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/LimitsTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/LimitsTest.java new file mode 100644 index 00000000..06b7e02c --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/LimitsTest.java @@ -0,0 +1,1312 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static oracle.nosql.driver.ops.TableLimits.CapacityMode.ON_DEMAND; +import static oracle.nosql.driver.ops.TableLimits.CapacityMode.PROVISIONED; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.UUID; + +import org.junit.Test; + +import oracle.kv.impl.topo.RepNodeId; +import oracle.nosql.driver.DeploymentException; +import oracle.nosql.driver.EvolutionLimitException; +import oracle.nosql.driver.IndexLimitException; +import oracle.nosql.driver.KeySizeLimitException; +import oracle.nosql.driver.OperationThrottlingException; +import oracle.nosql.driver.RequestSizeLimitException; +import oracle.nosql.driver.RowSizeLimitException; +import oracle.nosql.driver.TableLimitException; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.Version; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableResult.State; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.StringValue; +import oracle.nosql.util.tmi.TableRequestLimits; +import oracle.nosql.util.tmi.TenantLimits; + +/** + * Test various limits: + * DDL limits (minicloud only) + * o num indexes + * o num tables + * o num schema evolutions + * Data limits (minicloud and not) + * o key size + * o index key size + */ +public class LimitsTest extends ProxyTestBase { + + final static int INDEX_KEY_SIZE_LIMIT = 64; + final static int KEY_SIZE_LIMIT = rlimits.getPrimaryKeySizeLimit(); + final static int QUERY_SIZE_LIMIT = rlimits.getQueryStringSizeLimit(); + final static int REQUEST_SIZE_LIMIT = rlimits.getRequestSizeLimit(); + final static int ROW_SIZE_LIMIT = rlimits.getRowSizeLimit(); + final static int BATCH_REQUEST_SIZE_LIMIT = + rlimits.getBatchRequestSizeLimit(); + + final static String tableName = "limitTable"; + + /* Create a table */ + final static String createTableDDL = + "CREATE TABLE IF NOT EXISTS limitTable (" + + "sid INTEGER, " + + "id INTEGER, " + + "name STRING, " + + "json JSON, " + + "PRIMARY KEY(SHARD(sid), id))"; + + /* Create a table used for key limits */ + final static String createKeyTable1DDL = + "CREATE TABLE IF NOT EXISTS keyLimitTable1 (" + + "name STRING, " + + "city STRING, " + + "PRIMARY KEY(name))"; + + /* Create a table used for key limits */ + final static String createKeyTable2DDL = + "CREATE TABLE IF NOT EXISTS keyLimitTable2 (" + + "name STRING, " + + "city STRING, " + + "address STRING, " + + "PRIMARY KEY(shard(name), city))"; + + /* Create an index for key limits */ + final static String createKeyTableIndexDDL = + "CREATE INDEX CityIndex on keyLimitTable1 (city)"; + + /** + * Test limit on number of indexes + */ + @Test + public void testIndexLimit() throws Exception { + assumeTrue("Skip the test if not minicloud or cloud test or " + + "tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + TableRequestLimits limits = tenantLimits.getStandardTableLimits(); + int indexLimit = limits.getIndexesPerTable(); + + /* create a table with a bunch of fields */ + StringBuilder sb = new StringBuilder(); + sb.append("create table limitTable(id integer, "); + for (int i = 0; i < indexLimit + 1; i++) { + sb.append("name").append(i).append(" string,"); + } + sb.append("primary key(id))"); + + tableOperation(handle, sb.toString(), + new TableLimits(20000, 20000, 50), + TableResult.State.ACTIVE, 20000); + + for (int i = 0; i < indexLimit + 1; i++) { + sb = new StringBuilder(); + sb.append("create index idx").append(i). + append(" on limitTable(name").append(i).append(")"); + final String statement = sb.toString(); + if (i == indexLimit) { + try { + tableOperation(handle, statement, null, + TableResult.State.ACTIVE, 20000); + fail("Adding index should have failed"); + } catch (IndexLimitException ile) {} + } else { + tableOperation(handle, statement, null, + TableResult.State.ACTIVE, 20000); + } + } + + /* listIndexes is a test-only method right now */ + String[] indexes = listIndexes(handle, tableName); + assertEquals("Unexpected number of indexes", indexLimit, indexes.length); + } + + /** + * Test limit on index size + */ + @Test + public void testIndexSizeLimit() throws Exception { + assumeTrue(onprem == false); /* not for onprem */ + + /* create a table add some long-ish fields */ + final String tableDDL = "create table limitTable(id integer, " + + "data string, primary key(id))"; + + tableOperation(handle, tableDDL, + new TableLimits(20000, 20000, 50), + TableResult.State.ACTIVE, 20000); + + /* add some rows with data that exceeds default 64 bytes */ + String data = makeString(400); + for (int i = 0; i < 20; i++) { + PutRequest prq = new PutRequest().setTableName("limitTable") + .setValue(new MapValue().put("id", i).put("data", data)); + PutResult prs = handle.put(prq); + assertNotNull(prs.getVersion()); + } + + /* + * Create an index that should fail because of key size limit + */ + try { + final String statement = "create index idx on limitTable(data)"; + tableOperation(handle, statement, null, + TableResult.State.ACTIVE, 20000); + } catch (IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains("KeySizeLimitException")); + /* expected */ + } + } + + /** + * Test limit on number of schema evolutions + */ + @Test + public void testEvolutionLimit() throws Exception { + assumeTrue("Skip the test if not minicloud or cloud test or " + + "tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + TableRequestLimits limits = tenantLimits.getStandardTableLimits(); + int evoLimit = limits.getSchemaEvolutions(); + if (evoLimit > NUM_SCHEMA_EVOLUTIONS) { + /* + * To prevent this test from running too long, skip the test if the + * table evolution times limit > ProxyTestBase.NUM_SCHEMA_EVOLUTIONS + */ + return; + } + + createTable(); + + for (int i = 0; i < evoLimit + 1; i++) { + StringBuilder sb = new StringBuilder(); + sb.append("alter table limitTable(add name"). + append(i).append(" string)"); + final String statement = sb.toString(); + if (i == evoLimit) { + try { + tableOperation(handle, statement, null, + TableResult.State.ACTIVE, 20000); + fail("Alter table should have failed, num alter table: " + + i + ", limit: " + evoLimit); + } catch (EvolutionLimitException ele) {} + } else { + tableOperation(handle, statement, null, + TableResult.State.ACTIVE, 20000); + } + } + } + + /** + * Test limit on number of tables + */ + @Test + public void testNumTablesLimit() throws Exception { + assumeTrue("Skip the test if not minicloud or cloud test or " + + "tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + TenantLimits limits = tenantLimits; + int tableLimit = limits.getNumTables(); + if (tableLimit > NUM_TABLES) { + /* + * To prevent this test from running too long, skip the test if the + * table number limit > ProxyTestBase.NUM_TABLES + */ + return; + } + + for (int i = 0; i < tableLimit + 1; i++) { + StringBuilder sb = new StringBuilder(); + sb.append("create table t").append(i).append("(id integer, "). + append("primary key(id))"); + final String statement = sb.toString(); + if (i == tableLimit) { + /* + * Make the limits on these small so they don't trip + * the size/throughput per-tenant limits before hitting the + * number of tables limits + */ + try { + tableOperation(handle, statement, + new TableLimits(500, 500, 10), + TableResult.State.ACTIVE, 20000); + fail("create table should have failed, num create table: " + + i + ", limit: " + tableLimit); + } catch (TableLimitException iae) {} + } else { + tableOperation(handle, statement, + new TableLimits(500, 500, 10), + TableResult.State.ACTIVE, 20000); + } + } + } + + /* + * Not strictly a limit, but test the case where an index is created on + * existing JSON and there is a type mismatch. + */ + @Test + public void testBadIndexType() { + final String jsonRow = + "{\"sid\": 1, \"id\": 1, \"name\": \"joe\", \"json\":" + + "{\"age\":5}"+ + "}"; + final String conflictingIndex = + "create index idx on limitTable(json.age as string)"; + + createTable(); + + /* put the JSON */ + PutRequest prq = new PutRequest().setValueFromJson(jsonRow, null). + setTableName(tableName); + PutResult prs = handle.put(prq); + assertNotNull(prs.getVersion()); + + /* create a conflicting index, string index on integer field */ + try { + tableOperation(handle, conflictingIndex, null, + TableResult.State.ACTIVE, 20000); + fail("Attempt to add a conflicting index should have failed"); + } catch (IllegalArgumentException iae) { + // success + } + } + + /** + * The query size limit is artificially 200. See ProxyTestBase + */ + @Test + public void testQuerySizeLimit() { + assumeTrue(onprem == false); + createTable(); + final StringBuilder sb = new StringBuilder(); + sb.append("select aaaaa,bbbbb,ccccccccccc, dddddddd,") + .append("eeeeeee,ffffff ") + .append("from limitTable ") + .append("where xxxxxxxxxxxxxxxx = yyyyyyyyyyyyyyyyyyy"); + while (sb.toString().length() < QUERY_SIZE_LIMIT) { + sb.append(" and xxxxxxxxxxxxxxx = yyyyyyyyyyyyyyyyyyy"); + } + + final String longQuery = sb.toString(); + + createTable(); + + QueryRequest qr = new QueryRequest().setStatement(longQuery); + PrepareRequest pr = new PrepareRequest().setStatement(longQuery); + + try { + handle.query(qr); + fail("Query should have failed"); + } catch (IllegalArgumentException iae) { + // success + } + + try { + handle.prepare(pr); + fail("Prepare should have failed"); + } catch (IllegalArgumentException iae) { + // success + } + } + + /** + * Test key size limits (primary, index) and value size limit + */ + @Test + public void testKeyValueSizeLimit() { + assumeTrue("Skip the test if onprem test or tenantLimits is not provided", + !onprem && tenantLimits != null); + + final oracle.kv.Version kvver = + new oracle.kv.Version(UUID.randomUUID(), 1, new RepNodeId(1, 1), 1); + final Version dummyVersion = Version.createVersion(kvver.toByteArray()); + + createKeyTable(); + + /* + * PrimaryKey size limit + */ + final int keySizeLimit = tenantLimits.getStandardTableLimits() + .getPrimaryKeySizeLimit(); + PutRequest putReq; + WriteMultipleRequest umReq = new WriteMultipleRequest(); + int expFailIndex = keySizeLimit + 1; + for (int i = 32; i <= keySizeLimit + 1; i++) { + String name = makeName(i); + /* Put */ + putReq = new PutRequest() + .setTableName("keyLimitTable1") + .setValue(new MapValue() + .put("name", name) + .put("city", "Omaha")); + try { + handle.put(putReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* WriteMultipleRequest(Put) */ + umReq.clear(); + umReq.add(putReq, true); + try { + handle.writeMultiple(umReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* PutIfAbsent */ + putReq.setOption(Option.IfAbsent); + try { + handle.put(putReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* WriteMultipleRequest(PutIfAbsent) */ + umReq.clear(); + umReq.add(putReq, true); + try { + handle.writeMultiple(umReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* PutIfPresent */ + putReq.setOption(Option.IfPresent); + try { + handle.put(putReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* WriteMultipleRequest(PutIfPresent) */ + umReq.clear(); + umReq.add(putReq, true); + try { + handle.writeMultiple(umReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* PutIfVersion */ + putReq.setOption(Option.IfVersion) + .setMatchVersion(dummyVersion); + try { + handle.put(putReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* WriteMultipleRequest(PutIfVersion) */ + umReq.clear(); + umReq.add(putReq, true); + try { + handle.writeMultiple(umReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + } + + /* Primary key contains 2 fields: 1 shard key + 1 minor key */ + String name = makeName(32); + expFailIndex = keySizeLimit - name.length() + 1; + for (int i = 0; i <= keySizeLimit - name.length() + 1; i++) { + String city = makeName(i); + /* Put */ + putReq = new PutRequest() + .setTableName("keyLimitTable2") + .setValue(new MapValue().put("name", name) + .put("city", city)); + + try { + handle.put(putReq); + if (i + name.length() > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* PutIfAbsent */ + putReq.setOption(Option.IfAbsent); + try { + handle.put(putReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* PutIfPresent */ + putReq.setOption(Option.IfPresent); + try { + handle.put(putReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + + /* PutIfVersion */ + putReq.setOption(Option.IfVersion) + .setMatchVersion(dummyVersion); + try { + handle.put(putReq); + if (i > keySizeLimit) { + fail("Put should have failed"); + } + } catch (KeySizeLimitException ex) { + assertEquals(expFailIndex, i); + } + } + + /* + * IndexKey size limit + */ + final int indexKeyLimit = INDEX_KEY_SIZE_LIMIT; + + /* + * Expect index size limit exception + */ + name = makeName(62); + for (int i = 1; i < indexKeyLimit + 1; i++) { + String city = makeName(i); + try { + putReq = new PutRequest() + .setTableName("keyLimitTable1") + .setValue(new MapValue().put("name", name) + .put("city", city)); + handle.put(putReq); + if (i > indexKeyLimit) { + fail("Put should have failed"); + } + } catch (Exception e) { + assertEquals(indexKeyLimit, i); + } + } + + /* + * Value size limit + */ + final int rowSizeLimit = ROW_SIZE_LIMIT; + String address = makeName(rowSizeLimit); + try { + putReq = new PutRequest() + .setTableName("keyLimitTable2") + .setValue(new MapValue().put("name", "aaaa") + .put("city", "Omaha") + .put("address", address)); + handle.put(putReq); + fail("Put should have failed"); + } catch (RowSizeLimitException ex) { + } + + /* + * Value size limit check on WriteMultiple sub request. + */ + address = makeName(ROW_SIZE_LIMIT); + umReq = new WriteMultipleRequest(); + umReq.add(new PutRequest() + .setTableName("keyLimitTable2") + .setValue(new MapValue().put("name", "aaaa") + .put("city", "Omaha") + .put("address", address)), + false); + try { + handle.writeMultiple(umReq); + fail("WriteMultiple should have failed"); + } catch (RowSizeLimitException ex) { + } + } + + @Test + public void testInsertKeyValueSize() { + assumeTrue("Skip testKeyValueSizeInsert() if run against on-prem", + onprem == false); + assumeKVVersion("testInsertKeyValueSize", 21, 3, 5); + + final int maxKeySize = rlimits.getPrimaryKeySizeLimit(); + final int maxRowSize = rlimits.getRowSizeLimit(); + + tableOperation(handle, createKeyTable1DDL, + new TableLimits(1000, 1000, 5), + TableResult.State.ACTIVE, 10000); + + String ddl = "CREATE TABLE IF NOT EXISTS testId(" + + "id INTEGER GENERATED ALWAYS AS IDENTITY, pk STRING, " + + "s STRING, PRIMARY KEY(pk, id))"; + tableOperation(handle, ddl, new TableLimits(1000, 1000, 5), + TableResult.State.ACTIVE, 10000); + + + ddl = "CREATE TABLE IF NOT EXISTS test2pk(" + + "sk STRING, pk STRING, s STRING, PRIMARY KEY(shard(sk), pk))"; + tableOperation(handle, ddl, new TableLimits(1000, 1000, 5), + TableResult.State.ACTIVE, 10000); + + /* Test insert query */ + PrepareRequest preq; + PrepareResult pret; + + PreparedStatement pstmt; + QueryRequest qreq; + QueryResult qret; + String insert; + + String fmt = "insert into keyLimitTable1 values('%s', '%s')"; + String name64 = makeName(maxKeySize); + String city512K = makeName(maxRowSize - 5); /* 5 - overhead */ + + insert = String.format(fmt, name64, city512K); + preq = new PrepareRequest().setStatement(insert); + pret = handle.prepare(preq); + qreq = new QueryRequest().setPreparedStatement(pret); + qret = handle.query(qreq); + assertEquals(1, qret.getResults().size()); + + insert = String.format(fmt, name64 + "a", city512K); + preq = new PrepareRequest().setStatement(insert); + try { + handle.prepare(preq); + fail("Prepare should fail: key size exceeded"); + } catch (KeySizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + qreq = new QueryRequest().setStatement(insert); + try { + handle.query(qreq); + fail("Query should fail: key size exceeded"); + } catch (KeySizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + insert = String.format(fmt, name64, city512K + "a"); + preq = new PrepareRequest().setStatement(insert); + try { + handle.prepare(preq); + fail("Prepare should fail: value size exceeded"); + } catch (RowSizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + qreq = new QueryRequest().setStatement(insert); + try { + handle.query(qreq); + fail("Query should fail: value size exceeded"); + } catch (RowSizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + insert = "declare $name string; $city string; " + + "insert into keyLimitTable1(name, city) values($name, $city)"; + preq = new PrepareRequest().setStatement(insert); + pret = handle.prepare(preq); + pstmt = pret.getPreparedStatement(); + + pstmt.setVariable("$name", new StringValue(name64)); + pstmt.setVariable("$city", new StringValue(city512K)); + qreq = new QueryRequest().setPreparedStatement(pstmt); + qret = handle.query(qreq); + assertEquals(1, qret.getResults().size()); + + pstmt.clearVariables(); + pstmt.setVariable("$name", new StringValue(name64 + "a")); + pstmt.setVariable("$city", new StringValue(city512K)); + qreq = new QueryRequest().setPreparedStatement(pstmt); + try { + qret = handle.query(qreq); + fail("Query should fail: key size exceeded"); + } catch (KeySizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + pstmt.clearVariables(); + pstmt.setVariable("$name", new StringValue(name64)); + pstmt.setVariable("$city", new StringValue(city512K + "a")); + qreq = new QueryRequest().setPreparedStatement(pstmt); + try { + qret = handle.query(qreq); + fail("Query should fail: value size exceeded"); + } catch (RowSizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + fmt = "insert into testId(pk, s) values('%s', '%s')"; + insert = String.format(fmt, name64, city512K); + preq = new PrepareRequest().setStatement(insert); + pret = handle.prepare(preq); + + qreq = new QueryRequest().setPreparedStatement(pret); + try { + qret = handle.query(qreq); + fail("Query should fail: key size exceeded"); + } catch (KeySizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + /* PK has 2 components */ + String s32 = makeName(32); + String s33 = makeName(33); + fmt = "insert into test2pk values('%s', '%s', 'a')"; + insert = String.format(fmt, s32, s32); + preq = new PrepareRequest().setStatement(insert); + pret = handle.prepare(preq); + qreq = new QueryRequest().setPreparedStatement(pret); + qret = handle.query(qreq); + assertEquals(1, qret.getResults().size()); + + /* Key size exceeded, sk: 33, pk: 32 */ + insert = String.format(fmt, s33, s32); + preq = new PrepareRequest().setStatement(insert); + try { + handle.prepare(preq); + fail("Prepare should fail: key size exceeded"); + } catch (KeySizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + /* Key size exceeded, sk: 32, pk: 33 */ + insert = String.format(fmt, s32, s33); + preq = new PrepareRequest().setStatement(insert); + try { + handle.prepare(preq); + fail("Prepare should fail: key size exceeded"); + } catch (KeySizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + /* Query with variables */ + insert = "declare $sk string; $pk string; " + + "upsert into test2pk(sk, pk, s) values($sk, $pk, 'a')"; + preq = new PrepareRequest().setStatement(insert); + pret = handle.prepare(preq); + pstmt = pret.getPreparedStatement(); + + pstmt.setVariable("$sk", new StringValue(s32)); + pstmt.setVariable("$pk", new StringValue(s32)); + qreq = new QueryRequest().setPreparedStatement(pstmt); + qret = handle.query(qreq); + assertEquals(1, qret.getResults().size()); + + /* Key size exceeded, sk: 33, pk: 32 */ + pstmt.clearVariables(); + pstmt.setVariable("$sk", new StringValue(s33)); + pstmt.setVariable("$pk", new StringValue(s32)); + qreq = new QueryRequest().setPreparedStatement(pstmt); + try { + qret = handle.query(qreq); + fail("Query should fail: key size exceeded"); + } catch (KeySizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + + /* Key size exceeded, sk: 32, pk: 33 */ + pstmt.clearVariables(); + pstmt.setVariable("$sk", new StringValue(s32)); + pstmt.setVariable("$pk", new StringValue(s33)); + qreq = new QueryRequest().setPreparedStatement(pstmt); + try { + qret = handle.query(qreq); + fail("Query should fail: key size exceeded"); + } catch (KeySizeLimitException ex) { + /* expected */ + checkErrorMessage(ex); + } + } + + /** + * Test index key size by populating a table before creating the + * index. Index creation should fail. + */ + @Test + public void testCreateIndexFail() { + assumeTrue("Skip the test if onprem test or tenantLimits is not provided", + !onprem && tenantLimits != null); + + final int indexKeyLimit = tenantLimits.getStandardTableLimits() + .getIndexKeySizeLimit(); + createTable(); + + /* + * Populate with a bunch of values that will fail an index key size + * limit check. Use the name field. + */ + MapValue value = new MapValue() + .put("id", 1) + .put("name", makeName(indexKeyLimit+1)) + .putFromJson("json", "{\"a\": \"boo\"}", null); + PutRequest putReq = new PutRequest() + .setTableName("limitTable") + .setValue(value); + + for (int i = 0; i < 500; i++) { + value.put("sid", i); + handle.put(putReq); + } + + final String statementSize = "create index name on limitTable(name)"; + final String statementType = + "create index name on limitTable(json.a as integer)"; + try { + tableOperation(handle, statementSize, null, + TableResult.State.ACTIVE, 20000); + fail("Adding index should have failed"); + } catch (IllegalArgumentException iae) { + } + + /* + * Try adding an index on JSON with the wrong type. This will + * also fail. The rows have a string in that field. + */ + try { + tableOperation(handle, statementType, null, + TableResult.State.ACTIVE, 20000); + fail("Adding index should have failed"); + } catch (IllegalArgumentException iae) { + } + } + + @Test + public void testRequestSizeLimit() { + assumeTrue("Skip the test if onprem test or tenantLimits is not provided", + !onprem && tenantLimits != null); + + final TableRequestLimits limits = tenantLimits.getStandardTableLimits(); + final int reqSizeLimit = limits.getRequestSizeLimit(); + final int batchReqSizeLimit = limits.getBatchRequestSizeLimit(); + + createTable(); + + MapValue value = new MapValue() + .put("sid", 0) + .put("id", 1) + .put("name", "jack.smith") + .put("json", makeName(reqSizeLimit)); + PutRequest putReq = new PutRequest() + .setTableName("limitTable") + .setValue(value); + try { + handle.put(putReq); + fail("Put should have failed"); + } catch (RequestSizeLimitException ex) { + } + + /* + * WriteMultipleRequest with max number of operations and data size, + * it is expected to succeed. + */ + final int numOps = 50; + int dataSizePerOp = ROW_SIZE_LIMIT - 1024; + WriteMultipleRequest umReq = new WriteMultipleRequest(); + for (int i = 0; i < numOps; i++) { + value = new MapValue() + .put("sid", 0) + .put("id", i) + .put("name", "jack.smith") + .put("json", makeName(dataSizePerOp)); + umReq.add(new PutRequest() + .setTableName("limitTable") + .setValue(value), + false); + } + try { + handle.writeMultiple(umReq); + } catch (Exception ex) { + fail("WriteMultiple failed: " + ex.getMessage()); + } + + /* + * WriteMultipleRequest's request size exceeded batchReqSizeLimit, + * it is expected to fail. + */ + dataSizePerOp = batchReqSizeLimit/numOps; + umReq = new WriteMultipleRequest(); + for (int i = 0; i < numOps; i++) { + value = new MapValue() + .put("sid", 0) + .put("id", i) + .put("name", "jack.smith") + .put("json", makeName(dataSizePerOp)); + umReq.add(new PutRequest() + .setTableName("limitTable") + .setValue(value), + false); + } + try { + handle.writeMultiple(umReq); + fail("WriteMultiple should have failed"); + } catch (RequestSizeLimitException ex) { + } + + /* + * Each sub request size should not exceed REQUEST_SIZE_LIMIT. + */ + dataSizePerOp = REQUEST_SIZE_LIMIT; + umReq = new WriteMultipleRequest(); + value = new MapValue() + .put("sid", 0) + .put("id", 0) + .put("name", "jack.smith") + .put("json", makeName(dataSizePerOp)); + umReq.add(new PutRequest() + .setTableName("limitTable") + .setValue(value), + false); + try { + handle.writeMultiple(umReq); + fail("WriteMultiple should have failed"); + } catch (RequestSizeLimitException ex) { + } + } + + /** + * Test the number of column limit per table. + */ + @Test + public void testColumnLimit() { + assumeTrue("Skip the test if not minicloud or cloud test or " + + "tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + TableRequestLimits requestLimits = + tenantLimits.getStandardTableLimits(); + final int numFields = requestLimits.getColumnsPerTable(); + final TableLimits tableLimits = new TableLimits(1000, 1000, 1); + + String ddl = makeCreateTableDDL("columnLimitOK", numFields); + tableOperation(handle, ddl, tableLimits, State.ACTIVE, 20_000); + + /* + * Create a table with column number exceeded limit, it is expected + * to fail. + */ + ddl = makeCreateTableDDL("bad", numFields + 1); + try { + tableOperation(handle, ddl, tableLimits, State.ACTIVE, 20_000); + fail("Creating table with the number of columns that exceeded " + + "the limit should have fail"); + } catch (IllegalArgumentException iae) { + } + + /* + * Enforcing the # of columns limit via alter table is only + * supported by the "real" cloud -- minicloud or level 3 tests + */ + if (cloudRunning) { + /* + * Add one more field to a table which already has max number of + * column, it is expected to fail. + */ + ddl = "ALTER TABLE columnLimitOK(ADD nc1 INTEGER)"; + try { + tableOperation(handle, ddl, null, State.ACTIVE, 20_000); + fail("Adding an new field to a table with max number of " + + "columns should have failed."); + } catch (IllegalArgumentException iae) { + } + + /* Drop a column, then add an new column. */ + ddl = "ALTER TABLE columnLimitOK(drop c0)"; + tableOperation(handle, ddl, null, State.ACTIVE, 20_000); + ddl = "ALTER TABLE columnLimitOK(ADD nc1 INTEGER)"; + tableOperation(handle, ddl, null, State.ACTIVE, 20_000); + } + } + + /** + * Tests limits on total size and throughput allowed per-table and + * per-tenant. + */ + @Test + public void testTableProvisioningLimits() { + /* + * This test aims to create tables exceeds the tenant capacity, it is + * not applicable in cloud test + */ + assumeTrue(useMiniCloud); + + TableRequestLimits requestLimits = + tenantLimits.getStandardTableLimits(); + final int maxRead = requestLimits.getTableReadUnits(); + final int maxWrite = requestLimits.getTableWriteUnits(); + final int maxSize = requestLimits.getTableSize(); + + /* TODO: when per-tenant limits are available get them */ + final int maxTenantRead = tenantLimits.getTenantReadUnits(); + final int maxTenantWrite = tenantLimits.getTenantWriteUnits(); + final int maxTenantSize = tenantLimits.getTenantSize(); + + TableLimits limits = new TableLimits(maxRead + 1, 1, 1); + String ddl = makeCreateTableDDL("limits", 2); + + assertDeploymentException(ddl, limits, null, "read"); + + limits = new TableLimits(1, maxWrite + 1, 1); + assertDeploymentException(ddl, limits, null, "write"); + + limits = new TableLimits(1, 1, maxSize + 1); + assertDeploymentException(ddl, limits, null, "size"); + + /* make a table and try to evolve it past read limit */ + limits = new TableLimits(maxRead, maxWrite, maxSize); + tableOperation(handle, ddl, limits, State.ACTIVE, 20_000); + + limits = new TableLimits(maxRead+1, maxWrite, maxSize); + assertDeploymentException(null, limits, "limits", "read"); + + /* + * Test per-tenant limits by trying to create another table. If it's one + * table this only works if the per-table limit is >= 1/2 of the + * tenant limit. See ProxyTestBase's TenantLimits. + */ + limits = new TableLimits(maxTenantRead - maxRead + 1, 1, 1); + ddl = makeCreateTableDDL("limits1", 2); + assertDeploymentException(ddl, limits, null, + new String[] {"read", "tenant"}); + + limits = new TableLimits(1, maxTenantWrite - maxWrite + 1, 1); + assertDeploymentException(ddl, limits, null, + new String[] {"write", "tenant"}); + + limits = new TableLimits(1, 1, maxTenantSize - maxSize + 1); + assertDeploymentException(ddl, limits, null, + new String[] {"size", "tenant"}); + } + + /** + * Use a special tier and tenant for this test. Otherwise the + * operations that happened in other tests get involved in this + * test case. + */ + @Test + public void testOperationLimits() { + /* + * This test needs adjust the rate of ddl execution and table limits + * reduction, it is not applicable in cloud test + */ + assumeTrue(useMiniCloud); + + /* + * In order to isolate this test from others as well as allowing it + * to run more than once/day, use a timestamp on the test tier to + * make it unique. + */ + String suffix = Long.toString(System.currentTimeMillis()); + final String limitsTenant = "LimitsTenant." + suffix; + final int ddlRate = 4; + final int redRate = 2; + + /* + * Throttling exceptions are retry-able so don't retry to get the + * right exception (vs timeout) + */ + handle = configNoRetryHandle(limitsTenant); + + /* run few operations ahead to warm up security cache */ + if (isSecure()) { + try { + createTable(limitsTenant); + handle.getTable(new GetTableRequest() + .setTableName(tableName)); + } catch (TableNotFoundException e) { + } + } + + int origDDLRate = tenantLimits.getDdlRequestsRate(); + int origReductionRate = tenantLimits.getTableLimitReductionsRate(); + tenantLimits.setDdlRequestsRate(ddlRate); + tenantLimits.setTableLimitReductionsRate(redRate); + addTier(limitsTenant, tenantLimits); + try { + for (int i = 0; i < 10; i++) { + try { + createTable(limitsTenant); + if (i == 5) { + fail("DDL operation should have failed"); + } + } catch (OperationThrottlingException e) { + // success + break; + } + } + /* reset DDL rate to avoid failure for that reason */ + tenantLimits.setDdlRequestsRate(origDDLRate); + addTier(limitsTenant, tenantLimits); + + /* + * 2 reductions are allowed. The 3rd should throw + */ + TableLimits limits = new TableLimits(10000, 20000, 50); + tableOperation(handle, null, limits, limitsTenant, tableName, + null /* matchETag */, State.ACTIVE, 20_000); + limits = new TableLimits(20000, 10000, 50); + tableOperation(handle, null, limits, limitsTenant, tableName, + null /* matchETag */, State.ACTIVE, 20_000); + limits = new TableLimits(19000, 10000, 50); + failLimitsChange(limits, limitsTenant); + + /* read */ + limits = new TableLimits(10000, 10000, 50); + failLimitsChange(limits, limitsTenant); + + /* write */ + limits = new TableLimits(20000, 1000, 50); + failLimitsChange(limits, limitsTenant); + + /* size */ + limits = new TableLimits(20000, 10000, 30); + failLimitsChange(limits, limitsTenant); + } finally { + /* cleanup */ + tenantLimits.setTableLimitReductionsRate(origDDLRate); + tenantLimits.setTableLimitReductionsRate(origReductionRate); + handle = configHandle(getProxyURL()); + deleteTier(limitsTenant); + } + } + + /** + * Test tenant max auto scaling table count and limits mode max change per + * day. + */ + @Test + public void testAutoScalingTableLimits() { + assumeTrue("Skip the test if not minicloud or cloud test or " + + "tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + /* + * Create 3 auto scaling tables. + */ + final String CREATE_TABLEX = "create table if not exists testusersX(" + + "id integer, name string, primary key(id))"; + final String CREATE_TABLE1 = "create table if not exists testusers1(" + + "id integer, name string, primary key(id))"; + final String CREATE_TABLE2 = "create table if not exists testusers2(" + + "id integer, name string, primary key(id))"; + TableResult tres = tableOperation(handle, + CREATE_TABLEX, + new TableLimits(20), + 20000); + verifyAutoScalingResult(tres, 20); + tres = tableOperation(handle, + CREATE_TABLE1, + new TableLimits(30), + 20000); + verifyAutoScalingResult(tres, 30); + tres = tableOperation(handle, + CREATE_TABLE2, + new TableLimits(40), + 20000); + verifyAutoScalingResult(tres, 40); + + /* + * Cannot create more than 3 auto scaling tables. + */ + final String CREATE_TABLE3 = "create table if not exists testusers3(" + + "id integer, name string, primary key(id))"; + tableOperation(handle, + CREATE_TABLE3, + new TableLimits(50), + null, + TableResult.State.ACTIVE, + TableLimitException.class); + + /* + * Alter the table limits mode from AUTO_SCALING to PROVISIONED + */ + tres = tableOperation(handle, + null, + new TableLimits(30, 40, 50), + "testusersX", + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + assertEquals(30, tres.getTableLimits().getReadUnits()); + assertEquals(40, tres.getTableLimits().getWriteUnits()); + assertEquals(50, tres.getTableLimits().getStorageGB()); + assertEquals(PROVISIONED, tres.getTableLimits().getMode()); + + if (tenantLimits.getBillingModeChangeRate() == 1) { + /* + * Cannot change the limits mode any more after reaching mode max + * allowed changes per day. + */ + tableOperation(handle, + null, + new TableLimits(10), + "testusersX", + TableResult.State.ACTIVE, + OperationThrottlingException.class); + } else { + /* + * Alter the table limits mode from PROVISIONED to AUTO_SCALING + */ + tres = tableOperation(handle, + null, + new TableLimits(10), + "testusersX", + TableResult.State.ACTIVE, + 20000); + verifyAutoScalingResult(tres, 10); + + /* + * Cannot change the limits mode any more after reaching mode max + * allowed changes per day. + */ + tableOperation(handle, + null, + new TableLimits(300, 400, 500), + "testusersX", + TableResult.State.ACTIVE, + OperationThrottlingException.class); + } + } + + private void verifyAutoScalingResult(TableResult tres, int tableSize) { + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + assertEquals(tenantLimits.getAutoScalingTableReadUnits(), + tres.getTableLimits().getReadUnits()); + assertEquals(tenantLimits.getAutoScalingTableWriteUnits(), + tres.getTableLimits().getWriteUnits()); + assertEquals(tableSize, tres.getTableLimits().getStorageGB()); + assertEquals(ON_DEMAND, tres.getTableLimits().getMode()); + } + + private void failLimitsChange(TableLimits limits, String compartmentId) { + try { + tableOperation(handle, null, limits, compartmentId, tableName, + null /* matchETag */, State.ACTIVE, 20_000); + fail("Attempt at reduction should have failed"); + } catch (IllegalArgumentException | OperationThrottlingException iae) { + // success + } + } + + private void assertDeploymentException(String statement, + TableLimits limits, + String name, + String ... contains) { + try { + tableOperation(handle, statement, limits, + name, State.ACTIVE, 20_000); + fail("Operation should have thrown"); + } catch (DeploymentException de) { + for (String s : contains) { + assertTrue(de.getMessage(), de.getMessage().contains(s)); + } + } + } + + private String makeCreateTableDDL(String name, int numFields) { + final StringBuilder sb = new StringBuilder("CREATE TABLE "); + sb.append(name); + sb.append("(id INTEGER, "); + for (int i = 0; i < numFields - 1; i++) { + sb.append("c"); + sb.append(i); + sb.append(" STRING, "); + } + sb.append("PRIMARY KEY(id))"); + return sb.toString(); + } + + private String makeName(int len) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < len; i++) { + sb.append("a"); + } + return sb.toString(); + } + + /* shared by test cases */ + private void createTable() { + tableOperation(handle, createTableDDL, + new TableLimits(20000, 20000, 50), + TableResult.State.ACTIVE, 10000); + } + + /* shared by test cases */ + private void createTable(String compartmentId) { + tableOperation(handle, createTableDDL, + new TableLimits(20000, 20000, 50), + compartmentId, null /* tableName */, + null /* matchETag */, TableResult.State.ACTIVE, 10000); + } + + /* shared by test cases */ + private void createKeyTable() { + tableOperation(handle, createKeyTable1DDL, + new TableLimits(20000, 20000, 50), + TableResult.State.ACTIVE, 10000); + tableOperation(handle, createKeyTableIndexDDL, + null, + TableResult.State.ACTIVE, 10000); + tableOperation(handle, createKeyTable2DDL, + new TableLimits(20000, 20000, 50), + TableResult.State.ACTIVE, 10000); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/MonitorStatsTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MonitorStatsTest.java new file mode 100644 index 00000000..a2999044 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MonitorStatsTest.java @@ -0,0 +1,590 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static oracle.nosql.proxy.MonitorStats.ACTIVE_REQUEST_NAME; +import static oracle.nosql.proxy.MonitorStats.DATA_RESPONSE_READ_SIZE_NAME; +import static oracle.nosql.proxy.MonitorStats.DATA_RESPONSE_WRITE_SIZE_NAME; +import static oracle.nosql.proxy.MonitorStats.REQUEST_LABELS; +import static oracle.nosql.proxy.MonitorStats.REQUEST_LATENCY_NAME; +import static oracle.nosql.proxy.MonitorStats.REQUEST_SERVER_FAILED_NAME; +import static oracle.nosql.proxy.MonitorStats.REQUEST_THROTTLING_FAILED_NAME; +import static oracle.nosql.proxy.MonitorStats.REQUEST_TOTAL_NAME; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicLong; + +import oracle.nosql.common.sklogger.Counter; +import oracle.nosql.common.sklogger.LongGauge; +import oracle.nosql.common.sklogger.MetricFamilySamples; +import oracle.nosql.common.sklogger.MetricFamilySamples.Sample; +import oracle.nosql.common.sklogger.MetricRegistry; +import oracle.nosql.common.sklogger.PerfQuantile; +import oracle.nosql.common.sklogger.SizeQuantile; +import oracle.nosql.common.sklogger.StatsData; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.TimeToLive; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetIndexesRequest; +import oracle.nosql.driver.ops.GetIndexesResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.ListTablesRequest; +import oracle.nosql.driver.ops.ListTablesResult; +import oracle.nosql.driver.ops.MultiDeleteRequest; +import oracle.nosql.driver.ops.MultiDeleteResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.MonitorStats.OperationType; + +import org.junit.Test; + +/** + * This is a Proxy concurrent smoke test along with monitor stats checking. + * It can also be used to generate monitor data samples by setting log + * configuration through System property java.util.logging.config.file and + * a longer test time through System property monitorSeconds. + */ +public class MonitorStatsTest extends ProxyTestBase { + + private static final String WATCHER_NAME = "MonitorStatsTest"; + /* + * The number of threads to run smoke test. This number should match the + * connection pool size in the NoSQLHandle config. + */ + private static final int CONCURRENT_NUM = 3; + private static final String TABLE_PREFIX = "userStats"; + private ExecutorService executor = + Executors.newFixedThreadPool(CONCURRENT_NUM); + + /* + * Total requests for each type + */ + private final Map requestTotal; + /* + * Failed data requests for each type + */ + private final Map serverFailed; + private final Map userFailed; + private final Map throttlingFailed; + /* + * Total data operations for each type. Failed data request count 0 ops, + * and a multiple request count N ops. + */ + private final Map operationTotal; + /* + * Data operation charged metrics + */ + private final AtomicLong writeKBCharged = new AtomicLong(0); + private final AtomicLong readKBCharged = new AtomicLong(0); + + public MonitorStatsTest() { + requestTotal = new HashMap(); + serverFailed = new HashMap(); + userFailed = new HashMap(); + throttlingFailed = new HashMap(); + operationTotal = new HashMap(); + reset(); + } + + @Test + public void smokeTest() { + + final long startTime = System.nanoTime(); + /* + * Set monitorSeconds to a longer time if this test is used to generate + * monitor data samples that collected by MetricRegistry at background. + */ + int seconds = Integer.parseInt( + System.getProperty("monitorSeconds", "30")); + long totalTime = seconds * 1_000_000_000L; + + while(true) { + /* + * Submit number of smokeTest tasks. + */ + Collection> tasks = new ArrayList>(); + for (int k = 0; k < CONCURRENT_NUM; k++) { + final String tableName = TABLE_PREFIX + k; + tasks.add(new Callable() { + @Override + public Void call() { + smokeTest(tableName); + return null; + } + }); + } + try { + reset(); + List> futures = executor.invokeAll(tasks); + for(Future f : futures) { + f.get(); + } + } catch (InterruptedException e) { + fail("unexpected interrupt"); + } catch (ExecutionException e) { + fail("unexpected ExecutionException: " + e.getCause()); + } + checkMonitorData(); + if (System.nanoTime() - startTime > totalTime) { + break; + } + } + } + + private void reset() { + for(OperationType type : OperationType.values()) { + requestTotal.put(type, new AtomicLong(0)); + serverFailed.put(type, new AtomicLong(0)); + userFailed.put(type, new AtomicLong(0)); + throttlingFailed.put(type, new AtomicLong(0)); + operationTotal.put(type, new AtomicLong(0)); + } + writeKBCharged.set(0); + readKBCharged.set(0); + + // Reset metrics by using the same watcher name to get metrics. + MetricRegistry.defaultRegistry.getAllMetricFactory(WATCHER_NAME); + } + + private void smokeTest(String tableName) { + + try { + + MapValue key = new MapValue().put("id", 10); + + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + + TableResult tres; + /* DDL language error */ + try { + tres = tableOperation(handle, + "adrop table if exists " + tableName, + null, 20000); + fail("Expected IAE"); + } catch (IllegalArgumentException iae) { + } + requestTotal.get(OperationType.TABLE_REQUEST).incrementAndGet(); + userFailed.get(OperationType.TABLE_REQUEST).incrementAndGet(); + + /* drop a table */ + tres = tableOperation(handle, + "drop table if exists " + + tableName, + null, TableResult.State.DROPPED, 40000); + requestTotal.get(OperationType.TABLE_REQUEST).incrementAndGet(); + operationTotal.get(OperationType.TABLE_REQUEST).incrementAndGet(); + assertNotNull(tres.getTableName()); + + /* Create a table */ + tres = tableOperation( + handle, + "create table if not exists " + tableName + "(id integer, " + + "name string, primary key(id))", + new TableLimits(1000, 500, 50), + TableResult.State.ACTIVE, + 30000); + requestTotal.get(OperationType.TABLE_REQUEST).incrementAndGet(); + operationTotal.get(OperationType.TABLE_REQUEST).incrementAndGet(); + /* + * TODO + * There is a loop to get and wait table status, so we don't know + * the exact get table request count. + */ + requestTotal.get(OperationType.GET_TABLE).incrementAndGet(); + operationTotal.get(OperationType.GET_TABLE).incrementAndGet(); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* Create an index */ + tres = tableOperation( + handle, + "create index if not exists Name on " + tableName + "(name)", + null, + TableResult.State.ACTIVE, + 50000); + requestTotal.get(OperationType.TABLE_REQUEST).incrementAndGet(); + operationTotal.get(OperationType.TABLE_REQUEST).incrementAndGet(); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* GetTableRequest for table that doesn't exist */ + try { + GetTableRequest getTable = + new GetTableRequest() + .setTableName("not_a_table"); + tres = handle.getTable(getTable); + fail("Table should not be found"); + } catch (TableNotFoundException tnfe) {} + requestTotal.get(OperationType.GET_TABLE).incrementAndGet(); + userFailed.get(OperationType.GET_TABLE).incrementAndGet(); + + /* list tables */ + ListTablesRequest listTables = new ListTablesRequest(); + + /* ListTablesRequest returns ListTablesResult */ + ListTablesResult lres = handle.listTables(listTables); + assertNotNull(lres.toString()); + requestTotal.get(OperationType.LIST_TABLES).incrementAndGet(); + operationTotal.get(OperationType.LIST_TABLES).incrementAndGet(); + + /* Get indexes */ + GetIndexesRequest getIndexes = new GetIndexesRequest() + .setTableName(tableName); + + /* GetIndexesRquest returns GetIndexesResult */ + GetIndexesResult giRes = handle.getIndexes(getIndexes); + if (testV3) { + /* + * TODO: GetIndexesResult.toString() in v5.4 might need enhance + * to handle null for String[] fieldTypes. Otherwise, when force + * V3 protocol, giRes.toString() causes NPE. + */ + assertNotNull(giRes); + } else { + assertNotNull(giRes.toString()); + } + requestTotal.get(OperationType.GET_INDEXES).incrementAndGet(); + operationTotal.get(OperationType.GET_INDEXES).incrementAndGet(); + + /* PUT */ + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + + PutResult res = handle.put(putRequest); + requestTotal.get(OperationType.PUT).incrementAndGet(); + operationTotal.get(OperationType.PUT).incrementAndGet(); + readKBCharged.addAndGet(res.getReadUnitsInternal()); + writeKBCharged.addAndGet(res.getWriteUnitsInternal()); + assertNotNull("Put failed", res.getVersion()); + assertWriteKB(res); + assertNull(res.getExistingValue()); + assertNull(res.getExistingVersion()); + + /* put a few more. set TTL to test that path */ + putRequest.setTTL(TimeToLive.ofHours(2)); + for (int i = 20; i < 30; i++) { + value.put("id", i); + res = handle.put(putRequest); + requestTotal.get(OperationType.PUT).incrementAndGet(); + operationTotal.get(OperationType.PUT).incrementAndGet(); + readKBCharged.addAndGet(res.getReadUnitsInternal()); + writeKBCharged.addAndGet(res.getWriteUnitsInternal()); + } + + /* + * Test ReturnRow for simple put of a row that exists. 2 cases: + * 1. unconditional (will return info) + * 2. if absent (will return info) + */ + value.put("id", 20); + putRequest.setReturnRow(true); + PutResult pr = handle.put(putRequest); + requestTotal.get(OperationType.PUT).incrementAndGet(); + operationTotal.get(OperationType.PUT).incrementAndGet(); + readKBCharged.addAndGet(pr.getReadUnitsInternal()); + writeKBCharged.addAndGet(pr.getWriteUnitsInternal()); + assertNotNull(pr.getVersion()); // success + assertNotNull(pr.getExistingVersion()); + assertNotNull(pr.getExistingValue()); + assertTrue(pr.getExistingModificationTime() != 0); + assertReadKB(pr); + + putRequest.setOption(Option.IfAbsent); + pr = handle.put(putRequest); + requestTotal.get(OperationType.PUT).incrementAndGet(); + operationTotal.get(OperationType.PUT).incrementAndGet(); + readKBCharged.addAndGet(pr.getReadUnitsInternal()); + writeKBCharged.addAndGet(pr.getWriteUnitsInternal()); + assertNull(pr.getVersion()); // failure + assertNotNull(pr.getExistingVersion()); + assertNotNull(pr.getExistingValue()); + assertTrue(pr.getExistingModificationTime() != 0); + assertReadKB(pr); + + /* clean up */ + putRequest.setReturnRow(false); + putRequest.setOption(null); + + /* GET */ + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + + GetResult res1 = handle.get(getRequest); + requestTotal.get(OperationType.GET).incrementAndGet(); + operationTotal.get(OperationType.GET).incrementAndGet(); + readKBCharged.addAndGet(res1.getReadUnitsInternal()); + writeKBCharged.addAndGet(res1.getWriteUnitsInternal()); + assertNotNull("Get failed", res1.getJsonValue()); + assertReadKB(res1); + + /* DELETE */ + DeleteRequest delRequest = new DeleteRequest() + .setKey(key) + .setTableName(tableName); + + DeleteResult del = handle.delete(delRequest); + requestTotal.get(OperationType.DELETE).incrementAndGet(); + operationTotal.get(OperationType.DELETE).incrementAndGet(); + readKBCharged.addAndGet(del.getReadUnitsInternal()); + writeKBCharged.addAndGet(del.getWriteUnitsInternal()); + assertTrue("Delete failed", del.getSuccess()); + assertWriteKB(del); + + /* GET -- no row, it was removed above */ + getRequest.setTableName(tableName); + res1 = handle.get(getRequest); + requestTotal.get(OperationType.GET).incrementAndGet(); + operationTotal.get(OperationType.GET).incrementAndGet(); + readKBCharged.addAndGet(res1.getReadUnitsInternal()); + writeKBCharged.addAndGet(res1.getWriteUnitsInternal()); + assertNull(res1.getValue()); + assertReadKB(res1); + + /* MULTIDELETE */ + MultiDeleteRequest multiDelRequest = new MultiDeleteRequest(); + multiDelRequest.setKey(new MapValue().put("id", 21)); + multiDelRequest.setTableName(tableName); + MultiDeleteResult multiRes = handle.multiDelete(multiDelRequest); + requestTotal.get(OperationType.MULTI_DELETE).incrementAndGet(); + operationTotal.get(OperationType.MULTI_DELETE).incrementAndGet(); + readKBCharged.addAndGet(multiRes.getReadUnitsInternal()); + writeKBCharged.addAndGet(multiRes.getWriteUnitsInternal()); + assertWriteKB(multiRes); + + /* MULTIDELETE -- no table */ + multiDelRequest.setKey(new MapValue().put("id", 0)); + multiDelRequest.setTableName("InvalidTable"); + try { + handle.multiDelete(multiDelRequest); + fail("Attempt to access missing table should have thrown"); + } catch (TableNotFoundException nse) { + // success + } + requestTotal.get(OperationType.MULTI_DELETE).incrementAndGet(); + userFailed.get(OperationType.MULTI_DELETE).incrementAndGet(); + + /* GET -- no table */ + try { + getRequest.setTableName("foo"); + res1 = handle.get(getRequest); + fail("Attempt to access missing table should have thrown"); + } catch (TableNotFoundException nse) { + // success + } + requestTotal.get(OperationType.GET).incrementAndGet(); + userFailed.get(OperationType.GET).incrementAndGet(); + + /* PUT -- invalid row -- this will throw */ + try { + value.remove("id"); + value.put("not_a_field", 1); + res = handle.put(putRequest); + fail("Attempt to put invalid row should have thrown"); + } catch (IllegalArgumentException iae) { + // success + } + requestTotal.get(OperationType.PUT).incrementAndGet(); + userFailed.get(OperationType.PUT).incrementAndGet(); + } catch (Exception e) { + e.printStackTrace(); + fail("Exception in test: " + e); + } + } + + /* + * Check collected metrics are expected after executing some SC requests + * and data requests. + */ + @SuppressWarnings("unchecked") + private void checkMonitorData() { + for (MetricFamilySamples metricFamily : + MetricRegistry.defaultRegistry.getAllMetricFactory( + WATCHER_NAME)) { + final String metricName = metricFamily.getName(); + if (metricName.equals(ACTIVE_REQUEST_NAME)) { + assertEquals(StatsData.Type.LONG_GAUGE, metricFamily.getType()); + assertEquals(0, metricFamily.getLabelNames().size()); + for (Sample s : metricFamily.getSamples()) { + Sample sample = + (Sample) s; + assertEquals(0, sample.labelValues.size()); + assertEquals(0, sample.dataValue.getGaugeVal()); + } + } else if (metricName.equals(REQUEST_TOTAL_NAME)) { + assertEquals(StatsData.Type.COUNTER, metricFamily.getType()); + assertArrayEquals(REQUEST_LABELS, + metricFamily.getLabelNames().toArray()); + for (Sample s : metricFamily.getSamples()) { + Sample sample = + (Sample) s; + assertEquals(REQUEST_LABELS.length, + sample.labelValues.size()); + OperationType opType = getOperationType(sample.labelValues); + if (opType.equals(OperationType.GET_TABLE)) { + /* + * TODO + * There is a loop to get and wait table status, so we + * don't know the exact get table request count. + */ + assertTrue(sample.labelValues + " reqTotal error", + sample.dataValue.getCount() >= + operationTotal.get(opType).get()); + } else { + assertEquals(sample.labelValues + " reqTotal error", + requestTotal.get(opType).get(), + sample.dataValue.getCount()); + } + } + } else if (metricName.equals(REQUEST_LATENCY_NAME)) { + assertEquals(metricFamily.getType(), + StatsData.Type.PERF_QUANTILE); + assertArrayEquals(REQUEST_LABELS, + metricFamily.getLabelNames().toArray()); + for (Sample s : metricFamily.getSamples()) { + Sample sample = + (Sample) s; + assertEquals(REQUEST_LABELS.length, + sample.labelValues.size()); + assertEquals(0, sample.dataValue.getOverflowCount()); + assertTrue("95th can't be negative", + sample.dataValue.get99th() >= 0); + assertTrue("99th can't be negative", + sample.dataValue.get95th() >= 0); + assertTrue("min can't be negative", + sample.dataValue.getMin() >= 0); + assertTrue("max can't be negative", + sample.dataValue.getMax() >= + sample.dataValue.getMin()); + OperationType opType = getOperationType(sample.labelValues); + if (opType.equals(OperationType.GET_TABLE)) { + /* + * TODO + * There is a loop to get and wait table status, so we + * don't know the exact get table request count. + */ + assertTrue(sample.labelValues + " opsTotal error", + sample.dataValue.getOperationCount() >= + operationTotal.get(opType).get()); + assertTrue(sample.labelValues + " requestCount error", + sample.dataValue.getRequestCount() >= + requestTotal.get(opType).get() - + serverFailed.get(opType).get() - + throttlingFailed.get(opType).get() - + userFailed.get(opType).get()); + } else { + assertEquals(sample.labelValues + " opsTotal error", + operationTotal.get(opType).get(), + sample.dataValue.getOperationCount()); + assertEquals(sample.labelValues + " requestCount error", + requestTotal.get(opType).get() - + serverFailed.get(opType).get() - + throttlingFailed.get(opType).get() - + userFailed.get(opType).get(), + sample.dataValue.getRequestCount()); + } + } + } else if (metricName.equals(DATA_RESPONSE_READ_SIZE_NAME)) { + assertEquals(StatsData.Type.SIZE_QUANTILE, + metricFamily.getType()); + assertEquals(0, metricFamily.getLabelNames().size()); + for (Sample s : metricFamily.getSamples()) { + Sample sample = + (Sample) s; + assertEquals(0, sample.labelValues.size()); + assertEquals(readKBCharged.get(), + sample.dataValue.getSum()); + for(double perfVal : sample.dataValue.getQuantileValues()) { + assertTrue("perf value can't be negative", + perfVal >= 0); + } + } + } else if (metricName.equals(DATA_RESPONSE_WRITE_SIZE_NAME)) { + assertEquals(StatsData.Type.SIZE_QUANTILE, + metricFamily.getType()); + assertEquals(0, metricFamily.getLabelNames().size()); + for (Sample s : metricFamily.getSamples()) { + Sample sample = + (Sample) s; + assertEquals(0, sample.labelValues.size()); + assertEquals(writeKBCharged.get(), + sample.dataValue.getSum()); + for(double perfVal : sample.dataValue.getQuantileValues()) { + assertTrue("perf value can't be negative", + perfVal >= 0); + } + } + } else if (metricName.equals(REQUEST_SERVER_FAILED_NAME)) { + assertEquals(StatsData.Type.COUNTER, metricFamily.getType()); + assertArrayEquals(REQUEST_LABELS, + metricFamily.getLabelNames().toArray()); + for (Sample s : metricFamily.getSamples()) { + Sample sample = + (Sample) s; + assertEquals(REQUEST_LABELS.length, + sample.labelValues.size()); + OperationType opType = getOperationType(sample.labelValues); + assertEquals(serverFailed.get(opType).get(), + sample.dataValue.getCount()); + } + } else if (metricName.equals(REQUEST_THROTTLING_FAILED_NAME)) { + assertEquals(StatsData.Type.COUNTER, metricFamily.getType()); + assertArrayEquals(REQUEST_LABELS, + metricFamily.getLabelNames().toArray()); + for (Sample s : metricFamily.getSamples()) { + Sample sample = + (Sample) s; + assertEquals(REQUEST_LABELS.length, + sample.labelValues.size()); + OperationType opType = getOperationType(sample.labelValues); + assertEquals(throttlingFailed.get(opType).get(), + sample.dataValue.getCount()); + } + } else if (metricName.startsWith(KVHandleStats.KV_HANDLE_NAME)) { + /* TODO: check kvstore handle metrics? */ + } else { + fail("unkown metric name: " + metricName); + } + } + } + + /* + * Check and convert operation label values to OperationType. + */ + private OperationType getOperationType(List opLabelValues) { + assertEquals(1, opLabelValues.size()); + for(OperationType type : OperationType.values()) { + if (type.getValue()[0].equals(opLabelValues.get(0))) { + return type; + } + } + fail("Unknown label values: " + opLabelValues.get(0)); + return null; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiDeleteTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiDeleteTest.java new file mode 100644 index 00000000..5ae4e73d --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiDeleteTest.java @@ -0,0 +1,306 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import oracle.nosql.driver.FieldRange; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.ops.MultiDeleteRequest; +import oracle.nosql.driver.ops.MultiDeleteResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.MapValue; + +import org.junit.Test; + +/** + * Test on MultiDelete operation. + */ +public class MultiDeleteTest extends ProxyTestBase { + final static int WRITE_KB_LIMIT = rlimits.getRequestWriteKBLimit(); + + final static String tableName = "multiDeleteTable"; + + /* Create a table */ + final static String createTableDDL = + "CREATE TABLE IF NOT EXISTS multiDeleteTable(" + + "sid INTEGER, id INTEGER, name STRING, longString STRING, " + + "PRIMARY KEY(SHARD(sid), id))"; + + @Override + public void setUp() throws Exception { + super.setUp(); + tableOperation(handle, createTableDDL, + new TableLimits(10000, 10000, 50), + TableResult.State.ACTIVE, 10000); + } + + @Override + public void tearDown() throws Exception { + deleteTable(tableName); + super.tearDown(); + } + + /** + * Test on success cases + */ + @Test + public void testMultiDelete() { + final int numMajor = 5; + final int numPerMajor = 100; + final int recordKB = 2; + + loadRows(numMajor, numPerMajor, recordKB); + + /* Deletes rows with the shard key {"sid":0}, maxWriteKB = 0 */ + int maxWriteKB = 0; + MapValue pKey = new MapValue().put("sid", 0); + runMultiDelete(pKey, null, maxWriteKB, numPerMajor, recordKB); + + /* Deletes rows with shard key {"sid":1}, maxWriteKB = 10 */ + maxWriteKB = 10; + pKey.put("sid", 1); + runMultiDelete(pKey, null, maxWriteKB, numPerMajor, recordKB); + + /* Deletes rows with shard key {"sid":3}, maxWriteKB = 51 */ + maxWriteKB = 51; + pKey.put("sid", 2); + runMultiDelete(pKey, null, maxWriteKB, numPerMajor, recordKB); + + /* + * Deletes rows with shard key {"sid":3} and "id" < 10, + * maxWriteKB = 8. + */ + FieldRange range; + maxWriteKB = 8; + range = new FieldRange("id").setEnd(new IntegerValue(10), false); + pKey.put("sid", 3); + runMultiDelete(pKey, range, maxWriteKB, 10, recordKB); + + /* + * Deletes rows with shard key {"sid":3} and 10 <= "id" <= 19, + * maxWriteKB = 18 + */ + maxWriteKB = 18; + range = new FieldRange("id") + .setStart(new IntegerValue(10), true) + .setEnd(new IntegerValue(19), true); + runMultiDelete(pKey, range, maxWriteKB, 10, recordKB); + + /* + * Deletes rows with shard key {"sid":3} and 20 <= "id" < 31, + * maxWriteKB = 20 + */ + maxWriteKB = 20; + range = new FieldRange("id") + .setStart(new IntegerValue(20), true) + .setEnd(new IntegerValue(31), false); + runMultiDelete(pKey, range, maxWriteKB, 11, recordKB); + + /* + * Deletes rows with shard key {"sid":3} and "id" >= 31, + * maxWriteKB = 25 + */ + maxWriteKB = 25; + range = new FieldRange("id").setStart(new IntegerValue(31), true); + runMultiDelete(pKey, range, maxWriteKB, numPerMajor - 31, recordKB); + runMultiDelete(pKey, range, maxWriteKB, 0, recordKB); + + /* + * Deletes rows with shard key {"sid":4} and 10 <= "id" <= 19, + * maxWriteKB = 0 + */ + maxWriteKB = 0; + pKey.put("sid", 4); + range = new FieldRange("id").setStart(new IntegerValue(10), true) + .setEnd(new IntegerValue(19), true); + runMultiDelete(pKey, range, maxWriteKB, 10, recordKB); + } + + /* Test MultiDelete failed due to invalid argument */ + @Test + public void testInvalidArgument() { + + MultiDeleteRequest req = new MultiDeleteRequest(); + + /* Missing tableName */ + execMultiDeleteExpIAE(req); + + /* Missing a key */ + req.setTableName(tableName); + execMultiDeleteExpIAE(req); + + /* Invalid primary key */ + req.setKey(new MapValue().put("name", 0)); + execMultiDeleteExpIAE(req); + + /* Missing shard field from the primary key */ + req.setKey(new MapValue().put("id", 0)); + execMultiDeleteExpIAE(req); + + /* Invalid FieldRange */ + req.setKey(new MapValue().put("sid", 0)); + FieldRange range = new FieldRange("name") + .setStart(new IntegerValue(1), false); + req.setRange(range); + execMultiDeleteExpIAE(req); + + /* Invalid FieldRange */ + range = new FieldRange("id") + .setStart(new IntegerValue(1), false) + .setEnd(new IntegerValue(0), true); + req.setRange(range); + execMultiDeleteExpIAE(req); + + /* maxWriteKB should be >= 0 */ + try { + req.setMaxWriteKB(-1); + fail("Expect to catch IAE but not"); + } catch (IllegalArgumentException ignored) { + } + + /* maxWriteKB can not exceed WRITE_KB_LIMIT */ + req.setMaxWriteKB(WRITE_KB_LIMIT + 1); + execMultiDeleteExpIAE(req); + + /* Table not found */ + req.setTableName("InvalidTable"); + try { + execMultiDeleteExpIAE(req); + } catch (TableNotFoundException ignored) { + } + } + + /** + * Runs MultiDelete request and verify its result. + */ + private void runMultiDelete(MapValue pKey, + FieldRange range, + int maxWriteKB, + int expNumDeleted, + int recordKB) { + + int nDeleted = 0; + int totalReadKB = 0; + int totalWriteKB = 0; + int totalReadUnits = 0; + int totalWriteUnits = 0; + + byte[] continuationKey = null; + + final int minRead = getMinRead(); + int expWriteKB = 0; + int expReadKB = 0; + int writeKBLimit = (maxWriteKB != 0) ? maxWriteKB : WRITE_KB_LIMIT; + + while(true) { + MultiDeleteResult ret = execMultiDelete(pKey, continuationKey, + range, maxWriteKB); + nDeleted += ret.getNumDeletions(); + totalReadKB += ret.getReadKB(); + totalWriteKB += ret.getWriteKB(); + totalReadUnits += ret.getReadUnits(); + totalWriteUnits += ret.getWriteUnits(); + + if (!onprem) { + if (ret.getNumDeletions() > 0) { + assertTrue(ret.getWriteKB() > 0 && ret.getReadKB() > 0); + } else { + expReadKB += minRead; + } + } + + if (ret.getContinuationKey() == null) { + break; + } + + if (!onprem) { + assertTrue(ret.getWriteUnits() >= writeKBLimit && + ret.getWriteUnits() < writeKBLimit + recordKB); + } + } + + assertTrue(nDeleted == expNumDeleted); + + if (onprem) { + return; + } + + expWriteKB += nDeleted * recordKB; + expReadKB += nDeleted * minRead; + + assertReadKB(expReadKB, totalReadKB, totalReadUnits, + true /* isAbsolute */); + + assertWriteKB(expWriteKB, totalWriteKB, totalWriteUnits); + } + + /** + * Executes the MultiDelete request. + */ + private MultiDeleteResult execMultiDelete(MapValue key, + byte[] continuationKey, + FieldRange range, + int maxWriteKB) { + + MultiDeleteRequest mdReq = new MultiDeleteRequest() + .setTableName(tableName) + .setKey(key) + .setContinuationKey(continuationKey) + .setRange(range) + .setMaxWriteKB(maxWriteKB); + + return handle.multiDelete(mdReq); + } + + /** + * Executes the MultiDelete request, it is expected to catch IAE. + */ + private void execMultiDeleteExpIAE(MultiDeleteRequest mdReq) { + try { + handle.multiDelete(mdReq); + fail("Expect to catch IAE but not"); + } catch (IllegalArgumentException ignored) { + } + } + + private void loadRows(int numMajor, int numPerMajor, int nKB) { + + MapValue value = new MapValue(); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + + /* Load rows */ + final String longString = genString((nKB - 1) * 1024); + for (int i = 0; i < numMajor; i++) { + value.put("sid", i); + for (int j = 0; j < numPerMajor; j++) { + value.put("id", j); + value.put("name", "name_" + i + "_" + j); + value.put("longString", longString); + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + } + } + } + + private String genString(int len) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < len; i++) { + sb.append((char)('A' + i % 26)); + } + return sb.toString(); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiRegionTableTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiRegionTableTest.java new file mode 100644 index 00000000..4c8533e8 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiRegionTableTest.java @@ -0,0 +1,675 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.junit.BeforeClass; +import org.junit.Test; + +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.MultiDeleteRequest; +import oracle.nosql.driver.ops.MultiDeleteResult; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryIterableResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.kv.Consistency; +import oracle.kv.KVStore; +import oracle.kv.KVStoreConfig; +import oracle.kv.KVStoreFactory; +import oracle.kv.Value; +import oracle.kv.ValueVersion; +import oracle.kv.impl.api.table.FieldDefImpl; +import oracle.kv.impl.api.table.FieldValueImpl; +import oracle.kv.impl.api.table.PrimaryKeyImpl; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TableAPIImpl; +import oracle.kv.impl.api.table.TableImpl; +import oracle.kv.impl.api.table.TablePath; +import oracle.kv.table.PrimaryKey; +import oracle.kv.table.ReadOptions; +import oracle.nosql.driver.Version; +import oracle.nosql.driver.values.FieldValue; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.LongValue; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.NumberValue; +import oracle.nosql.driver.values.StringValue; + +public class MultiRegionTableTest extends ProxyTestBase { + private static final ReadOptions readOptions = + new ReadOptions(Consistency.ABSOLUTE, 0, null); + + private static KVStore store; + private static TableAPIImpl tableAPI; + + private static final String MR_TABLE = "mrtable"; + + private final TableLimits limits = new TableLimits(100, 100, 1); + private final int WAIT_MS = 10000; + + @BeforeClass + public static void staticSetUp() + throws Exception { + + assumeTrue("Skipping MultiRegionTableTest if run in onprem mode", + !Boolean.getBoolean(ONPREM_PROP)); + + System.setProperty(TEST_MRTABLE_PROP, MR_TABLE); + ProxyTestBase.staticSetUp(); + + if (!cloudRunning) { + store = getStore(false /* excludeTombstone */); + tableAPI = (TableAPIImpl)store.getTableAPI(); + } + } + + @Test + public void testBasicOps() { + /* local proxy test only */ + assumeTrue("Skipping testBasicOps in minicloud test", !cloudRunning); + runBasicOps(MR_TABLE, true); + runBasicOps("singleton", false); + } + + public void runBasicOps(String tableName, boolean isMRTable) { + String tableDdl = "CREATE TABLE IF NOT EXISTS " + tableName + + "(sid INTEGER, " + + " id INTEGER, " + + " s STRING, " + + " j JSON, " + + " PRIMARY KEY(SHARD(sid), id))"; + + TableResult tret = tableOperation(handle, tableDdl, limits, WAIT_MS); + String kvTableName = getKVTableName(tret); + + int sid = 0; + int id = 1; + MapValue row = createRow(sid, id); + Version ver; + + /* put */ + ver = doPut(tableName, row, null /* Option */, null /* matchVersion */); + assertNotNull(ver); + checkRow(row, tableName, kvTableName); + + /* putIfPresent */ + ver = doPut(tableName, row, Option.IfPresent, null /* matchVersion */); + assertNotNull(ver); + checkRow(row, tableName, kvTableName); + + /* putIfVersion */ + ver = doPut(tableName, row, Option.IfVersion, ver); + assertNotNull(ver); + checkRow(row, tableName, kvTableName); + + /* delete */ + boolean deleted = doDelete(tableName, row, null /* matchVersion */); + assertTrue(deleted); + checkRowDeleted(row, isMRTable, tableName, kvTableName); + + /* putIfAbsent */ + ver = doPut(tableName, row, Option.IfAbsent, null /* matchVersion */); + assertNotNull(ver); + checkRow(row, tableName, kvTableName); + + /* deleteIfVersion */ + deleted = doDelete(tableName, row, ver); + assertTrue(deleted); + checkRowDeleted(row, isMRTable, tableName, kvTableName); + + /* writeMultiple */ + sid++; + int numOps = 3; + List rows = new ArrayList<>(); + for (int i = 0; i < numOps; i++) { + rows.add(createRow(sid, i)); + } + doWriteMultiple(tableName, rows, true /* putOp */); + for (MapValue val : rows) { + checkRow(val, tableName, kvTableName); + } + doWriteMultiple(tableName, rows, false /* putOp */); + for (MapValue val : rows) { + checkRowDeleted(val, isMRTable, tableName, kvTableName); + } + + /* multiDelete */ + sid++; + rows.clear(); + for (int i = 0; i < numOps; i++) { + row = createRow(sid, i); + assertNotNull(doPut(tableName, row, null, null)); + rows.add(createRow(sid, i)); + } + + MapValue key = new MapValue().put("sid", sid); + int ndel = doMultiDelete(tableName, key); + assertEquals(numOps, ndel); + for (MapValue val : rows) { + checkRowDeleted(val, isMRTable, tableName, kvTableName); + } + + /* query */ + String query; + List results; + Map values = new HashMap<>(); + + sid++; + row = createRow(sid, 0); + values.put("$sid", row.get("sid")); + values.put("$id", row.get("id")); + values.put("$s", row.get("s")); + values.put("$j", row.get("j")); + + query = "DECLARE $sid INTEGER; $id INTEGER; $s STRING; $j JSON; " + + "INSERT INTO " + tableName + + "(sid, id, s, j) VALUES($sid, $id, $s, $j)"; + results = doQuery(query, values); + checkRow(row, tableName, kvTableName); + + FieldValue sval = new StringValue(row.get("s").getString() + "_upd"); + row.put("s", sval); + values.put("$s", sval); + values.remove("$j"); + query = "DECLARE $sid INTEGER; $id INTEGER; $s STRING;" + + "UPDATE " + tableName + + " SET s = $s WHERE sid = $sid and id = $id"; + results = doQuery(query, values); + assertEquals(1, results.size()); + checkRow(row, tableName, kvTableName); + + query = "SELECT * FROM " + tableName; + results = doQuery(query); + assertEquals(1, results.size()); + assertEquals(row, results.get(0)); + + values.remove("$s"); + values.remove("$j"); + query = "DECLARE $sid INTEGER; $id INTEGER; DELETE FROM " + + tableName + " WHERE sid = $sid and id = $id"; + results = doQuery(query, values); + assertEquals(1, results.size()); + checkRowDeleted(row, isMRTable, tableName, kvTableName); + + /* query without being prepared */ + sid++; + id = 1; + row = createRow(sid, id); + assertNotNull(doPut(tableName, row, null, null)); + query = "DELETE FROM " + tableName + " WHERE sid = " + sid + + " and id = " + id; + results = doQuery(query); + assertEquals(1, results.size()); + checkRowDeleted(row, isMRTable, tableName, kvTableName); + + dropTable(handle, tableName); + } + + @Test + public void testCRDT() { + runCRDTTest("testCRDT"); + } + + private void runCRDTTest(String tableName) { + + String tableDdl = "CREATE TABLE IF NOT EXISTS " + tableName + + "(sid INTEGER, " + + " id INTEGER, " + + " s STRING, " + + " ci INTEGER AS MR_COUNTER, " + + " cn NUMBER AS MR_COUNTER, " + + " j JSON(ci AS INTEGER MR_COUNTER, " + + " cl as LONG MR_COUNTER), " + + " PRIMARY KEY(SHARD(sid), id))"; + + TableResult tr = tableOperation(handle, tableDdl, limits, WAIT_MS); + String kvTableName = getKVTableName(tr); + + int sid = 0; + int id = 1; + int step = 2; + + MapValue row = createRow(sid, id); + assertNotNull(doPut(tableName, row, null, null)); + + List results; + Map values = new HashMap<>(); + values.put("$sid", new IntegerValue(sid)); + values.put("$id", new IntegerValue(id)); + + String query = "DECLARE $sid INTEGER; $id INTEGER; " + + "UPDATE " + tableName + + "$t SET ci = ci + " + step + + ", cn = cn + " + step + + ", $t.j.ci = $t.j.ci - " + step + + ", $t.j.cl = $t.j.cl + " + step + + " WHERE sid = $sid and id = $id " + + " RETURNING ci, cn, $t.j.ci as jci, $t.j.cl as jcl"; + results = doQuery(query, values); + assertEquals(1, results.size()); + + FieldValue ciVal = new IntegerValue(2); + FieldValue cnVal = new NumberValue("2"); + FieldValue jciVal = new IntegerValue(-2); + FieldValue jclVal = new LongValue(2); + + MapValue rec = results.get(0); + assertEquals(ciVal, rec.get("ci")); + assertEquals(cnVal, rec.get("cn")); + assertEquals(jciVal, rec.get("jci")); + assertEquals(jclVal, rec.get("jcl")); + + row.put("ci", ciVal); + row.put("cn", cnVal); + row.get("j").asMap().put("ci", jciVal).put("cl", jclVal); + checkRow(row, tableName, kvTableName); + + dropTable(handle, tableName); + } + + @Test + public void testFreezeTable() { + /* + * Skip this test in local proxy test, because freeze/unfreeze schema + * is managed by SC + */ + assumeTrue("Skipping testBasicOps in local test", cloudRunning); + + final String tableName = "testFreezeTable"; + String freezeDdl = "ALTER TABLE " + tableName + " FREEZE SCHEMA"; + String unfreezeDdl = "ALTER TABLE " + tableName + " UNFREEZE SCHEMA"; + TableLimits limits = new TableLimits(100, 100, 1); + TableLimits newLimits = new TableLimits(200, 150, 1); + TableResult tr; + String ddl; + + /* + * 0. Create table with schema frozen. + */ + String tableDdl = "CREATE TABLE " + tableName + + "(id INTEGER, s STRING, j JSON, PRIMARY KEY(id)) " + + "WITH SCHEMA FROZEN"; + tableOperation(handle, tableDdl, limits, WAIT_MS); + + /* freeze schema of the table already frozen, do nothing */ + tableOperation(handle, freezeDdl, null, WAIT_MS); + + /* + * 1. Test update table after freeze table: cannot alter table schema + * but able to update ttl or limits + */ + + /* altering table schema should fail */ + ddl = "ALTER TABLE " + tableName + "(ADD i INTEGER)"; + tableOperation(handle, ddl, null /* limits */, null /* tableName */, + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* updating ttl or limits should succeed */ + ddl = "ALTER TABLE " + tableName + " USING TTL 1 days"; + tr = tableOperation(handle, ddl, null, WAIT_MS); + assertTrue(tr.getSchema().contains("\"ttl\":\"1 DAYS\"")); + + ddl = "ALTER TABLE " + tableName + " USING TTL 1 days"; + tr = tableOperation(handle, null /* statement */, newLimits, tableName, + TableResult.State.ACTIVE, WAIT_MS); + assertEquals(newLimits.getWriteUnits(), + tr.getTableLimits().getWriteUnits()); + + /* + * 2. Test alter table after unfreeze schema. + */ + + /* unfreeze schema */ + tableOperation(handle, unfreezeDdl, null, WAIT_MS); + /* unfreeze schema again, do nothing */ + tableOperation(handle, unfreezeDdl, null, WAIT_MS); + + /* dropping JSON field should succeed after unfreezed schema */ + ddl = "ALTER TABLE " + tableName + "(DROP j)"; + tableOperation(handle, ddl, null, WAIT_MS); + + /* + * 3. Test cannot freeze table without a JSON field + */ + + /* freezing table without a JSON field should fail */ + tableOperation(handle, freezeDdl, null /* limits */, + null /* tableName */, TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* Add a JSON field, freezing table should succeed */ + ddl = "ALTER TABLE " + tableName + "(ADD j1 JSON)"; + tableOperation(handle, ddl, null, WAIT_MS); + tableOperation(handle, freezeDdl, null, WAIT_MS); + + /* + * Creating table with schema frozen but without a JSON field + * should fail + */ + ddl = "CREATE TABLE tnojson(id INTEGER, s STRING, PRIMARY KEY(id)) " + + "WITH SCHEMA FROZEN"; + tableOperation(handle, ddl, null /* limits */, + null /* tableName */, TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* + * Test freeze table force + */ + + /* + * Create a table without a JSON field and freeze it using + * "with schema frozen force" + */ + ddl = "CREATE TABLE tnojson(id INTEGER, s STRING, PRIMARY KEY(id)) " + + "WITH SCHEMA FROZEN FORCE"; + tableOperation(handle, ddl, limits, WAIT_MS); + + /* Alter the TTL of the frozen table */ + ddl = "ALTER TABLE tnojson USING TTL 3 days"; + tableOperation(handle, unfreezeDdl, null, WAIT_MS); + + /* Fail: can't alter table schema */ + ddl = "ALTER TABLE tnojson (ADD i INTEGER)"; + tableOperation(handle, ddl, null /* limits */, + null /* tableName */, TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* unfreeze table */ + ddl = "ALTER TABLE tnojson UNFREEZE SCHEMA"; + tableOperation(handle, ddl, null, WAIT_MS); + /* unfreeze table again, do nothing */ + tableOperation(handle, ddl, null, WAIT_MS); + + /* The table is mutable now, add a new field */ + ddl = "ALTER TABLE tnojson (ADD i INTEGER)"; + tableOperation(handle, ddl, null /* limits */, WAIT_MS); + + /* freeze table using "freeze schema force" */ + ddl = "ALTER TABLE tnojson FREEZE SCHEMA FORCE"; + tableOperation(handle, ddl, null /* limits */, WAIT_MS); + /* freeze table again, do nothing */ + tableOperation(handle, ddl, null /* limits */, WAIT_MS); + + /* Fail: can't alter frozen table's schema */ + ddl = "ALTER TABLE tnojson (DROP i)"; + tableOperation(handle, ddl, null /* limits */, + null /* tableName */, TableResult.State.ACTIVE, + IllegalArgumentException.class); + } + + private void checkRow(MapValue row, + String tableName, + String kvTableName) { + + MapValue retRow = doGet(tableName, row); + assertEquals(row, retRow); + + /* skip checking the raw value if not local test */ + if (store == null) { + return; + } + + final int regionId = getRegionId(); + PrimaryKeyImpl pkey = getKVPrimaryKey(kvTableName, row); + Value value = getKVValue(pkey); + assertEquals(Value.Format.TABLE_V1, value.getFormat()); + assertTrue(value.getValue().length > 0); + + TableImpl table = pkey.getTableImpl(); + if (table.hasSchemaMRCounters()) { + RowImpl kvRow = getKVRow(pkey); + for (int i = 0; i < table.getFields().size(); i++) { + if (table.isPrimKeyAtPos(i)) { + continue; + } + + FieldDefImpl fdef = table.getFieldDef(i); + FieldValueImpl fval; + if (fdef.isMRCounter()) { + fval = kvRow.get(table.getFields().get(i)); + checkMRCounterValue(fval, regionId); + } else if (fdef.hasJsonMRCounter()) { + for (TablePath path : table.getSchemaMRCounterPaths(i)) { + fval = kvRow.evaluateScalarPath(path, 0); + checkMRCounterValue(fval, regionId); + } + } + } + } + } + + private void checkMRCounterValue(FieldValueImpl fval, int regionId) { + if (fval != null && !fval.isNull()) { + assertTrue(fval.isMRCounter()); + assertTrue(!fval.getMRCounterMap().isEmpty()); + if (fval.toString().startsWith("-")) { + regionId = -regionId; + } + assertTrue(fval.getMRCounterMap().containsKey(regionId)); + } + } + + private void checkRowDeleted(MapValue key, + boolean isMRTable, + String tableName, + String kvTableName) { + assertNull(doGet(tableName, key)); + + /* skip checking the raw value if not local test */ + if (store == null) { + return; + } + + PrimaryKeyImpl pkey = getKVPrimaryKey(kvTableName, key); + Value value = getKVValue(pkey); + if (isMRTable) { + checkTombStoneNone(value); + } else { + assertNull(value); + } + } + + private void checkTombStoneNone(Value value) { + assertEquals(Value.Format.NONE, value.getFormat()); + assertTrue(value.getValue().length == 0); + } + + private PrimaryKeyImpl getKVPrimaryKey(String kvTableName, MapValue key) { + TableImpl table = getKVTable(kvTableName); + assertNotNull("table not found: " + kvTableName, table); + return table.createPrimaryKeyFromJson(key.toJson(), false); + } + + private Value getKVValue(PrimaryKeyImpl pkey) { + ValueVersion vv = store.get(pkey.getPrimaryKey(false), + readOptions.getConsistency(), + readOptions.getTimeout(), + readOptions.getTimeoutUnit()); + if (vv != null) { + return vv.getValue(); + } + return null; + } + + private RowImpl getKVRow(PrimaryKey pkey) { + return (RowImpl)tableAPI.get(pkey, readOptions); + } + + private TableImpl getKVTable(String kvTableName) { + return (TableImpl)tableAPI.getTable(kvTableName); + } + + private Version doPut(String tableName, + MapValue row, + Option option, + Version matchVersion) { + + PutRequest req = new PutRequest() + .setTableName(tableName) + .setOption(option) + .setValue(row); + if (matchVersion != null) { + req.setMatchVersion(matchVersion); + } + + PutResult ret = handle.put(req); + return ret.getVersion(); + } + + private MapValue doGet(String tableName, MapValue key) { + GetRequest req = new GetRequest() + .setTableName(tableName) + .setKey(key); + GetResult ret = handle.get(req); + assertTrue(ret.getReadKB() > 0); + return ret.getValue(); + } + + private boolean doDelete(String tableName, + MapValue key, + Version matchVersion) { + + DeleteRequest req = new DeleteRequest() + .setTableName(tableName) + .setKey(key) + .setMatchVersion(matchVersion); + DeleteResult ret = handle.delete(req); + assertTrue(ret.getWriteKB() > 0); + return ret.getSuccess(); + } + + private void doWriteMultiple(String tableName, + List rows, + boolean putOp) { + + WriteMultipleRequest req = new WriteMultipleRequest(); + for (MapValue row : rows) { + if (putOp) { + req.add(new PutRequest() + .setTableName(tableName) + .setValue(row), true /* abortIfUnsucessful */); + } else { + req.add(new DeleteRequest() + .setTableName(tableName) + .setKey(row), true /* abortIfUnsucessful */); + } + } + + WriteMultipleResult ret = handle.writeMultiple(req); + assertTrue(ret.getSuccess()); + assertTrue (ret.getWriteKB() > 0); + } + + private int doMultiDelete(String tableName, MapValue key) { + MultiDeleteRequest req = new MultiDeleteRequest() + .setTableName(tableName) + .setKey(key); + MultiDeleteResult ret = handle.multiDelete(req); + return ret.getNumDeletions(); + } + + private List doQuery(String query, + Map values) { + + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement pstmt = prepRet.getPreparedStatement(); + + if (values != null) { + for (Entry e : values.entrySet()) { + pstmt.setVariable(e.getKey(), e.getValue()); + } + } + + List results = new ArrayList<>(); + try (@SuppressWarnings("resource") + QueryRequest req = new QueryRequest().setPreparedStatement(pstmt)) { + try (QueryIterableResult ret = handle.queryIterable(req)) { + for (MapValue row : ret) { + results.add(row); + } + } + } + return results; + } + + private List doQuery(String query) { + List results = new ArrayList<>(); + try (@SuppressWarnings("resource") + QueryRequest req = new QueryRequest().setStatement(query)) { + try (QueryIterableResult ret = handle.queryIterable(req)) { + for (MapValue row : ret) { + results.add(row); + } + } + } + return results; + } + + private MapValue createRow(int sid, int id) { + MapValue row = createPrimaryKey(sid, id); + row.put("s", "s" + sid + id); + + String json = "{\"cl\":0, \"ci\":0}"; + FieldValue jval = MapValue.createFromJson(json, null); + row.put("j", jval); + return row; + } + + private MapValue createPrimaryKey(int sid, int id) { + MapValue row = new MapValue(); + row.put("sid", sid); + row.put("id", id); + return row; + } + + private String getKVTableName(TableResult ret) { + String tid = ret.getTableId(); + return (cloudRunning ? tid.replace(".", "_") : tid); + } + + private static KVStore getStore(boolean excludeTombstone) { + if (kvlite == null) { + KVStoreConfig config = new KVStoreConfig(getStoreName(), + "localhost:5000"); + config.setExcludeTombstones(excludeTombstone); + config.setEnableTableCache(false); + return KVStoreFactory.getStore(config); + } + + String hostPort = getHostName() + ":" + getKVPort(); + KVStoreConfig config = new KVStoreConfig(getStoreName(), hostPort); + config.setExcludeTombstones(excludeTombstone); + config.setEnableTableCache(false); + return KVStoreFactory.getStore(config); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/NumericTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/NumericTest.java new file mode 100644 index 00000000..600392aa --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/NumericTest.java @@ -0,0 +1,535 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; + +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.FieldValue; +import oracle.nosql.driver.values.FieldValue.Type; +import oracle.nosql.driver.values.JsonOptions; +import oracle.nosql.driver.values.MapValue; + +import org.junit.Test; + +public class NumericTest extends ProxyTestBase { + + final String tableName = "numericTest"; + final String createTableDdl = + "create table if not exists numericTest (" + + "id integer, " + + "i integer, " + + "l long, " + + "f float," + + "d double, " + + "n number, " + + "primary key(id))"; + + @Override + public void setUp() throws Exception { + super.setUp(); + + tableOperation(handle, createTableDdl, + new TableLimits(20000, 20000, 50), + TableResult.State.ACTIVE, 10000); + } + + @Override + public void tearDown() throws Exception { + deleteTable(tableName); + super.tearDown(); + } + + /** + * Test put numeric values, values are parsed from JSON. + */ + @Test + public void testPutWithJson() { + + final int intVal = 123456; + final long longVal = 987654321012345678L; + final float fltVal = 1.234567f; + final double dblVal = 9.8765432123456d; + + int[] ints = new int[] { + Integer.MIN_VALUE, + Integer.MAX_VALUE, + 0, + intVal, + }; + + long [] longs = new long[] { + Long.MIN_VALUE, + Long.MAX_VALUE, + 0L, + longVal, + }; + + float[] flts = new float[] { + Float.MIN_VALUE, + Float.MAX_VALUE, + 0.0f, + fltVal + }; + + double[] dbls = new double[] { + Double.MIN_VALUE, + Double.MAX_VALUE, + 0.0d, + dblVal + }; + + BigInteger bint = + new BigInteger("98765432109876543210987654321098765432109876543210"); + BigDecimal[] decs = new BigDecimal[] { + new BigDecimal(bint, -1024), + new BigDecimal(bint, 1024), + BigDecimal.ZERO, + BigDecimal.valueOf(longVal) + }; + + for (int i = 0; i < ints.length; i++) { + runPutGetTest(ints[i], longs[i], flts[i], dbls[i], decs[i], false); + runPutGetTest(ints[i], longs[i], flts[i], dbls[i], decs[i], true); + } + } + + private void runPutGetTest(int i, long l, float f, double d, BigDecimal dec, + boolean numericAsNumber) { + + final MapValue key = new MapValue().put("id", 1); + final MapValue row = new MapValue() + .put("id", 1) + .put("i", i) + .put("l", l) + .put("f", f) + .put("d", d) + .put("n", dec); + + final JsonOptions jsonOpts = + new JsonOptions().setNumericAsNumber(numericAsNumber); + String jsonStr = row.toJson(jsonOpts); + + PutRequest putReq = new PutRequest() + .setTableName(tableName) + .setValueFromJson(jsonStr, jsonOpts); + PutResult putRet = handle.put(putReq); + assertTrue(putRet != null && putRet.getVersion() != null); + + GetRequest getReq = new GetRequest() + .setTableName(tableName) + .setKey(key); + GetResult getRet = handle.get(getReq); + assertTrue(getRet != null && getRet.getValue() != null); + + MapValue value = getRet.getValue(); + assertType(value.get("i"), Type.INTEGER); + assertTrue("Wrong value of \"i\"", value.getInt("i") == i); + + assertType(value.get("l"), Type.LONG); + assertTrue("Wrong value of \"l\"", value.getLong("l") == l); + + assertType(value.get("f"), Type.DOUBLE); + assertTrue("Wrong value of \"f\"", value.getDouble("f") == f); + + assertType(value.get("d"), Type.DOUBLE); + assertTrue("Wrong value of \"d\"", value.getDouble("d") == d); + + assertType(value.get("n"), Type.NUMBER); + assertTrue("Wrong value of \"n\"", + value.getNumber("n").compareTo(dec) == 0); + } + + /** + * Put numeric values with other compatible type values. + */ + @Test + public void testCompatibleTypes() { + + final MapValue value = new MapValue().put("id", 1); + Map expValues = new HashMap(); + + /* + * Target KV field type: Integer + * Value types: LONG, DOUBLE, NUMBER + */ + String fname = "i"; + + /* Use LONG for Integer type */ + final long longToIntOK = Integer.MAX_VALUE; + final long longToIntFail = (long)Integer.MAX_VALUE + 1; + + value.put(fname, longToIntOK); + expValues.put(fname, (int)longToIntOK); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, longToIntFail); + putAsOtherNuermicTypeTest(value, false); + + /* Use DOUBLE for Integer type */ + final double doubleToIntOK = 1.2345678E7d; + final double doubleToIntFail = -1.1d; + + value.put(fname, doubleToIntOK); + expValues.put(fname, (int)doubleToIntOK); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, doubleToIntFail); + putAsOtherNuermicTypeTest(value, false); + + /* Use NUMBER for Integer type */ + final BigDecimal decimalToIntOK = BigDecimal.valueOf(Integer.MIN_VALUE); + final BigDecimal decimalToIntFail = BigDecimal.valueOf(Long.MAX_VALUE); + + value.put(fname, decimalToIntOK); + expValues.put(fname, decimalToIntOK.intValue()); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, decimalToIntFail); + putAsOtherNuermicTypeTest(value, false); + + /* + * Target KV field type: Long + * Value types: INTEGER, DOUBLE, NUMBER + */ + expValues.clear(); + value.remove(fname); + fname = "l"; + + /* Use INTEGER for Long type */ + final int intToLongOK = Integer.MAX_VALUE; + + value.put(fname, intToLongOK); + expValues.put(fname, (long)intToLongOK); + putAsOtherNuermicTypeTest(value, true, expValues); + + /* Use DOUBLE for Long type */ + final double doubleToLongOK = 1.234567890123E12d; + final double doubleToLongFail = -1.1d; + + value.put(fname, doubleToLongOK); + expValues.put(fname, (long)doubleToLongOK); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, doubleToLongFail); + putAsOtherNuermicTypeTest(value, false); + + /* Use NUMBER for Long type */ + final BigDecimal decimalToLongOK = BigDecimal.valueOf(Long.MAX_VALUE); + final BigDecimal decimalToLongFail = new BigDecimal("1234567890.1"); + + value.put(fname, decimalToLongOK); + expValues.put(fname, decimalToLongOK.longValue()); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, decimalToLongFail); + putAsOtherNuermicTypeTest(value, false); + + /* + * Target KV field type: Float + * Value types: INTEGER, LONG, DOUBLE, NUMBER + */ + expValues.clear(); + value.remove(fname); + fname = "f"; + + /* Use INTEGER for Float type */ + final int intToFloatOK = 16777216; + final int intToFloatFail = 16777217; + + value.put(fname, intToFloatOK); + expValues.put(fname, (double)intToFloatOK); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, intToFloatFail); + putAsOtherNuermicTypeTest(value, false); + + /* Use LONG for Float type */ + final long longToFloatOK = Long.MAX_VALUE; + final long longToFloatFail = Long.MAX_VALUE - 1; + + value.put(fname, longToFloatOK); + expValues.put(fname, (double)longToFloatOK); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, longToFloatFail); + putAsOtherNuermicTypeTest(value, false); + + /* Use DOUBLE for Float type */ + final double doubleToFloatOK = -Float.MAX_VALUE; + final double doubleToFloatFail = Double.MAX_VALUE; + + value.put(fname, doubleToFloatOK); + expValues.put(fname, doubleToFloatOK); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, doubleToFloatFail); + putAsOtherNuermicTypeTest(value, false); + + /* Use NUMBER for Float type */ + float flt = 1.23456E2f; + final BigDecimal decimalToFloatOK = BigDecimal.valueOf(flt); + final BigDecimal decimalToFloatFail = + BigDecimal.valueOf(Double.MAX_VALUE); + + value.put(fname, decimalToFloatOK); + expValues.put(fname, (double)flt); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, decimalToFloatFail); + putAsOtherNuermicTypeTest(value, false); + + /* + * Target KV field type: Double + * Value types: INTEGER, LONG, NUMBER + */ + expValues.clear(); + value.remove(fname); + fname = "d"; + + /* Use INTEGER for Double type */ + final int intToDoubleOK = Integer.MAX_VALUE; + + value.put(fname, intToDoubleOK); + expValues.put(fname, (double)intToDoubleOK); + putAsOtherNuermicTypeTest(value, true, expValues); + + /* Use LONG for Double type */ + final long longToDoubleOK = Long.MAX_VALUE; + final long longToDoubleFail = Long.MAX_VALUE - 1; + + value.put(fname, longToDoubleOK); + expValues.put(fname, (double)longToDoubleOK); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, longToDoubleFail); + putAsOtherNuermicTypeTest(value, false); + + /* Use NUMBER for Double type */ + double dbl = Double.MAX_VALUE; + final BigDecimal decimalToDoubleOK = BigDecimal.valueOf(dbl); + final BigDecimal decimalToDoubleFail = + BigDecimal.valueOf(Long.MAX_VALUE - 1); + + value.put(fname, decimalToDoubleOK); + expValues.put(fname, dbl); + putAsOtherNuermicTypeTest(value, true, expValues); + + value.put(fname, decimalToDoubleFail); + putAsOtherNuermicTypeTest(value, false); + + /* + * Target KV field type: Number + * Value types: INTEGER, LONG, DOUBLE + */ + expValues.clear(); + value.remove(fname); + fname = "n"; + + /* Use INTEGER for Number type */ + final int intToNumberOK = Integer.MAX_VALUE; + value.put(fname, intToNumberOK); + expValues.put(fname, BigDecimal.valueOf(intToNumberOK)); + putAsOtherNuermicTypeTest(value, true, expValues); + + /* Use LONG for Number type */ + final long longToNumberOK = Long.MAX_VALUE; + value.put(fname, longToNumberOK); + expValues.put(fname, BigDecimal.valueOf(longToNumberOK)); + putAsOtherNuermicTypeTest(value, true, expValues); + + /* Use DOUBLE for Number type */ + final double doubleToNumberOK = Double.MAX_VALUE; + value.put(fname, doubleToNumberOK); + expValues.put(fname, BigDecimal.valueOf(doubleToNumberOK)); + putAsOtherNuermicTypeTest(value, true, expValues); + } + + /* + * Test Get/Delete op with a key parsed from JSON. + */ + @Test + public void testGetDeleteWithKeyFromJson() { + String tabName = "tableWithNumberKey"; + String ddl = "create table if not exists " + tabName + "(" + + "pk number, " + + "str string, " + + "primary key(pk))"; + + tableOperation(handle, ddl, + new TableLimits(1000, 1000, 50), + TableResult.State.ACTIVE, 10000); + + JsonOptions optNumericAsNumber = + new JsonOptions().setNumericAsNumber(true); + + BigDecimal bd = new BigDecimal("123456789012345678901234567890"); + /* this used to fail but with driver 5.4.11 it works */ + runGetDeleteTest(tabName, bd, null, true); + runGetDeleteTest(tabName, bd, optNumericAsNumber, true); + + bd = BigDecimal.valueOf(Integer.MAX_VALUE); + runGetDeleteTest(tabName, bd, null, true); + runGetDeleteTest(tabName, bd, optNumericAsNumber, true); + + bd = BigDecimal.valueOf(Long.MAX_VALUE); + runGetDeleteTest(tabName, bd, null, true); + runGetDeleteTest(tabName, bd, optNumericAsNumber, true); + + bd = BigDecimal.valueOf(Float.MAX_VALUE); + runGetDeleteTest(tabName, bd, null, true); + runGetDeleteTest(tabName, bd, optNumericAsNumber, true); + + bd = BigDecimal.valueOf(Double.MAX_VALUE); + runGetDeleteTest(tabName, bd, null, true); + runGetDeleteTest(tabName, bd, optNumericAsNumber, true); + } + + private void runGetDeleteTest(String tname, + BigDecimal bd, + JsonOptions jsonOpts, + boolean expSucceed) { + + /* Put a row */ + MapValue mapVal = new MapValue() + .put("pk", bd) + .put("str", "strdata"); + PutRequest putReq = new PutRequest() + .setTableName(tname) + .setValue(mapVal); + PutResult putRes = handle.put(putReq); + assertNotNull(putRes.getVersion()); + + mapVal = new MapValue().put("pk", bd); + String pkJson = mapVal.toJson(jsonOpts); + + /* + * Get the row, the key is parsed from JSON string with the + * specified options. + */ + GetRequest getReq = new GetRequest() + .setKeyFromJson(pkJson, jsonOpts) + .setTableName(tname); + GetResult getRes = handle.get(getReq); + if (expSucceed) { + assertNotNull(getRes.getValue()); + } else { + assertNull(getRes.getValue()); + } + + /* + * Delete the row, the key is parsed from JSON string with the + * specified options. + */ + DeleteRequest delReq = new DeleteRequest() + .setKeyFromJson(pkJson, jsonOpts) + .setTableName(tname); + DeleteResult delRes = handle.delete(delReq); + assertTrue(expSucceed == delRes.getSuccess()); + } + + private void putAsOtherNuermicTypeTest(MapValue value, + boolean shouldSucceed) { + + putAsOtherNuermicTypeTest(value, shouldSucceed, null); + } + + private void putAsOtherNuermicTypeTest(MapValue value, + boolean shouldSucceed, + Map expValues) { + + runPutAsOtherNuermicTypeTest(value, false, shouldSucceed, expValues); + runPutAsOtherNuermicTypeTest(value, true, shouldSucceed, expValues); + } + + private void runPutAsOtherNuermicTypeTest(MapValue value, + boolean numericAsNumber, + boolean shouldSucceed, + Map expValues){ + + final JsonOptions jsonOpts = + new JsonOptions().setNumericAsNumber(numericAsNumber); + final String jsonStr = value.toJson(jsonOpts); + PutRequest putReq = new PutRequest() + .setTableName(tableName) + .setValueFromJson(jsonStr, jsonOpts); + try { + PutResult putRet = handle.put(putReq); + if (shouldSucceed) { + assertTrue("Put failed", + putRet != null && putRet.getVersion() != null); + MapValue key = new MapValue().put("id", 1); + GetRequest getReq = new GetRequest() + .setTableName(tableName) + .setKey(key); + GetResult getRet = handle.get(getReq); + assertTrue(getRet != null); + if (expValues != null) { + checkValue(getRet.getValue(), expValues); + } + } else { + fail("Put should have failed"); + } + } catch (Exception ex) { + if (shouldSucceed) { + fail("Put failed: " + ex.getMessage()); + } + //System.out.println(ex.getMessage()); + } + } + + private void checkValue(MapValue value, Map expValues) { + + for (Entry e : expValues.entrySet()) { + String fname = e.getKey(); + Object fval = e.getValue(); + + if (fval instanceof Integer) { + FieldValue fieldValue = value.get(fname); + assertType(fieldValue, Type.INTEGER); + assertTrue(fieldValue.getInt() == (int)fval); + } else if (fval instanceof Long) { + FieldValue fieldValue = value.get(fname); + assertType(fieldValue, Type.LONG); + assertTrue(fieldValue.getLong() == (long)fval); + } else if (fval instanceof Double) { + FieldValue fieldValue = value.get(fname); + assertType(fieldValue, Type.DOUBLE); + assertTrue(fieldValue.getDouble() == (double)fval); + } else if (fval instanceof BigDecimal) { + FieldValue fieldValue = value.get(fname); + assertType(fieldValue, Type.NUMBER); + assertTrue( + fieldValue.getNumber().compareTo((BigDecimal)fval) == 0); + } else { + fail("Unexpected value: " + fval); + } + } + } + + private void assertType(FieldValue value, FieldValue.Type type) { + assertTrue("Wrong type. expect " + type + " actual " + value.getType(), + value.getType() == type); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ParallelQueryTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ParallelQueryTest.java new file mode 100644 index 00000000..e3b81c99 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ParallelQueryTest.java @@ -0,0 +1,453 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.Result; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; + +import oracle.nosql.proxy.kv.KVTenantManager; +import oracle.nosql.proxy.sc.TenantManager; +import oracle.nosql.proxy.sc.LocalTenantManager; +import oracle.nosql.proxy.security.AccessCheckerFactory; +import oracle.nosql.proxy.security.SecureTestUtil; + +/** + * Test parallel queries. This is a separate test from QueryTest + * because it requires use of a multi-shard store to test parallel + * indexed queries + */ +public class ParallelQueryTest extends ProxyTestBase { + + protected static TenantManager tm; + protected static String proxyEndpoint; + protected static String parallelStoreName = "ParallelQueryStore"; + + + @BeforeClass + public static void staticSetUp() + throws Exception { + + assumeTrue("Skip ParallelQuery in minicloud or cloud test", + !Boolean.getBoolean("usemc") && + !Boolean.getBoolean("usecloud")); + startKV(); + startLocalProxy(); + } + + @AfterClass + public static void staticTearDown() + throws Exception { + + stopProxy(); + + if (kvlite != null) { + kvlite.stop(false); + } + + cleanupTestDir(); + } + + private static void startKV() { + verbose = Boolean.getBoolean(VERBOSE_PROP); + /* + * use a different store name to avoid conflicts in topology + */ + kvlite = startKVLite(hostName, + parallelStoreName, + false, /* don't useThreads */ + verbose, + true, /* multishard true */ + 0, /* default memory MB */ + false, /* not secure */ + getKVPort(), /* default */ + getPortRange(), /* default */ + getTestDir()); + } + + protected static void startLocalProxy() { + onprem = Boolean.getBoolean(ONPREM_PROP); + + Properties commandLine = new Properties(); + + commandLine.setProperty(Config.STORE_NAME.paramName, + parallelStoreName); + commandLine.setProperty(Config.HELPER_HOSTS.paramName, + (hostName + ":" + getKVPort())); + Config.ProxyType ptype = (onprem ? Config.ProxyType.KVPROXY : + Config.ProxyType.CLOUDTEST); + commandLine.setProperty(Config.PROXY_TYPE.paramName, ptype.name()); + commandLine.setProperty(Config.VERBOSE.paramName, + Boolean.toString(verbose)); + /* use a non-privileged port */ + commandLine.setProperty(Config.HTTP_PORT.paramName, "8095"); + + /* allow query tracing */ + commandLine.setProperty(Config.QUERY_TRACING.paramName, "true"); + + /* use defaults for thread pools and sizes */ + + /* simple access checker */ + ac = AccessCheckerFactory.createInsecureAccessChecker(); + Config cfg = new Config(commandLine); + /* create an appropriate TenantManager */ + if (onprem) { + /* note: in KVPROXY mode the proxy *requires* a KVTenantManager */ + tm = KVTenantManager.createTenantManager(cfg); + } else { + tm = LocalTenantManager.createTenantManager(cfg); + } + proxy = Proxy.initialize(cfg, tm, ac, null); + proxyEndpoint = "http://" + hostName + ":" + cfg.getHttpPort(); + } + + protected static void stopProxy() throws Exception { + if (proxy != null) { + proxy.shutdown(3, TimeUnit.SECONDS); + proxy = null; + } + if (tm != null) { + tm.close(); + tm = null; + } + } + + @Before + public void setUp() throws Exception { + /* + * Configure the endpoint + */ + if (handle == null) { + NoSQLHandleConfig config = + new NoSQLHandleConfig(proxyEndpoint); + SecureTestUtil.setAuthProvider(config, false, + onprem, getTenantId()); + handle = getHandle(config); /* see ProxyTestBase */ + } + dropAllTables(handle, true); + } + + @After + public void tearDown() throws Exception { + if (handle != null) { + dropAllTables(handle, true); + handle.close(); + handle = null; + } + } + + @Test + public void testParallelQueryArgs() { + final String tableName = "ParallelQuery"; + final String createTable = "create table " + tableName + + "(id integer, primary key(id)) as json collection"; + final String query = "select * from " + tableName; + tableOperation(handle, createTable, + new TableLimits(4, 1000, 1000), + TableResult.State.ACTIVE, 10000); + PreparedStatement ps = + handle.prepare(new PrepareRequest().setStatement(query)) + .getPreparedStatement(); + + QueryRequest qr = new QueryRequest().setStatement(query). + setNumberOfOperations(1); + failParallelQuery(qr, "not prepared1", IllegalArgumentException.class); + qr.setNumberOfOperations(0).setOperationNumber(1); + failParallelQuery(qr, "not prepared2", IllegalArgumentException.class); + + /* use prepared statement now to check other params */ + qr.setPreparedStatement(ps).setStatement(null); + failParallelQuery(qr, "numops set, opnum not set", + IllegalArgumentException.class); + + qr.setNumberOfOperations(1).setOperationNumber(0); + failParallelQuery(qr, "opnum set, numops not set", + IllegalArgumentException.class); + + qr.setNumberOfOperations(1).setOperationNumber(2); + failParallelQuery(qr, "opnum too large", + IllegalArgumentException.class); + + qr.setNumberOfOperations(ps.getMaximumParallelism() + 1); + failParallelQuery(qr, "numops too large", + IllegalArgumentException.class); + + qr.setNumberOfOperations(-1); + failParallelQuery(qr, "negative numops", + IllegalArgumentException.class); + + qr.setNumberOfOperations(1).setOperationNumber(-1); + failParallelQuery(qr, "negative opnum", + IllegalArgumentException.class); + + String upd = "insert into " + tableName + "(id) values (2000)"; + ps = handle.prepare(new PrepareRequest().setStatement(upd)) + .getPreparedStatement(); + assertEquals(0, ps.getMaximumParallelism()); + /* any non-zero value is illegal for updates */ + qr.setPreparedStatement(ps).setOperationNumber(1). + setNumberOfOperations(1); + failParallelQuery(qr, "cannot insert/update", + IllegalArgumentException.class); + } + + @Test + public void testParallelMisc() { + final int numRows = 1000; + final String tableName = "ParallelQuery"; + final String createTable = "create table " + tableName + + "(id integer, primary key(id)) as json collection"; + String createIndex = "create index idx on " + tableName + + "(name as string)"; + tableOperation(handle, createTable, + new TableLimits(4, 1000, 1000), + TableResult.State.ACTIVE, 10000); + tableOperation(handle, createIndex, null, null, + TableResult.State.ACTIVE, null); + + final String query1 = "select * from " + tableName; /* yes */ + final String query2 = "select * from " + tableName + /* no */ + " order by id"; + final String query3 = "select * from " + tableName + /* yes */ + " where name = 'joe'"; + final String query4 = "select count(*) from " + tableName; /* no */ + + final String[] queries = new String[]{query1, query2, query3, query4}; + /* + * These answers rely on the default configuration of a multishard + * KVLite + */ + final int[] answers = new int[]{multishardPartitions, + 0, multishardShards, 0}; + for (int i = 0; i < queries.length; i++) { + assertEquals(answers[i], maxParallel(queries[i])); + } + } + + /* + * Use query parallelism. + * 1. in a non-threaded fashion to test that the use of subsets of a + * table return complete, non-intersecting results + * 2. in a threaded, truly parallel scenario + * + * Start with all partition parallelism and use a JSON collection table + * TODO: + * o parallel indexed queries + * o queries that cannot be parallel (max 1) + */ + @Test + public void testParallelQuery() { + final int numRows = 1000; + final String tableName = "ParallelQuery"; + final String createTable = "create table " + tableName + + "(id integer, primary key(id)) as json collection"; + String createIndex = "create index idx on " + tableName + + "(name as string)"; + final String query = "select * from " + tableName; + /* use an index query that will still return all results */ + final String indexQuery = "select * from " + tableName + + " where name > 'm'"; + tableOperation(handle, createTable, + new TableLimits(10000, 10000, 1000), + TableResult.State.ACTIVE, 10000); + tableOperation(handle, createIndex, null, null, + TableResult.State.ACTIVE, null); + PreparedStatement ps = + handle.prepare(new PrepareRequest().setStatement(query)) + .getPreparedStatement(); + int max = ps.getMaximumParallelism(); + assertEquals(multishardPartitions, max); + + /* load rows sufficient to cover all partitions */ + PutRequest preq = new PutRequest().setTableName(tableName); + putRowsInParallelTable(preq, numRows); + + final AtomicInteger readKB = new AtomicInteger(); + final Set keys = ConcurrentHashMap.newKeySet(); + for (int i = 0; i < max; i++) { + doSubsetQuery(ps, max, i + 1, keys, readKB); + } + /* did all of the results get read and are they unique? */ + assertEquals(numRows, keys.size()); + assertEquals(numRows, readKB.get()); + + /* + * do another "parallel" query but with 3 subsets and + * make sure that all rows are read, with no duplicates + */ + readKB.set(0); + keys.clear(); + for (int i = 0; i < 3; i++) { + doSubsetQuery(ps, 3, i + 1, keys, readKB); + } + assertEquals(numRows, keys.size()); + assertEquals(numRows, readKB.get()); + + /* use indexed, all shard query that returns all results */ + PreparedStatement psIndex = + handle.prepare(new PrepareRequest().setStatement(indexQuery)) + .getPreparedStatement(); + max = psIndex.getMaximumParallelism(); + assertEquals(multishardShards, max); + + readKB.set(0); + keys.clear(); + for (int i = 0; i < max; i++) { + doSubsetQuery(psIndex, max, i + 1, keys, readKB); + } + assertEquals(numRows, keys.size()); + assertEquals(numRows, readKB.get()); + + /* + * this is all shards use max of 2 + */ + readKB.set(0); + keys.clear(); + for (int i = 0; i < 2; i++) { + doSubsetQuery(psIndex, 2, i + 1, keys, readKB); + } + assertEquals(numRows, keys.size()); + assertEquals(numRows, readKB.get()); + + /* + * Rather than create a new test, tables, etc. reuse the existing + * table and data and run these queries in threads vs sequentially + */ + doQueryInThreads(ps, numRows); + doQueryInThreads(psIndex, numRows); + } + + private void doQueryInThreads(final PreparedStatement ps, int numRows) { + /* + * If the max is < 10, use it as num operations. If > 10 use + * 10 + */ + final AtomicInteger readKB = new AtomicInteger(); + final Set keys = ConcurrentHashMap.newKeySet(); + final int max = Math.min(ps.getMaximumParallelism(), 10); + assertTrue(max >= multishardShards); + + ExecutorService executor = Executors.newFixedThreadPool(max); + /* create a list of callables and start them at the same time */ + Collection> tasks = new ArrayList>(); + for (int i = 0; i < max; i++) { + final int opNum = i + 1; + tasks.add(new Callable() { + @Override + public Void call() { + doSubsetQuery(ps, max, opNum, keys, readKB); + return null; + } + }); + } + try { + List> futures = executor.invokeAll(tasks); + for(Future f : futures) { + f.get(); + } + } catch (Exception e) { + fail("Exception: " + e); + } + /* did all of the results get read and are they unique? */ + assertEquals(numRows, keys.size()); + assertEquals(numRows, readKB.get()); + } + + /* + * Do a single portion (operation) of a parallel query + */ + private void doSubsetQuery(PreparedStatement ps, + int numOperations, + int operationNumber, + Set keys, + final AtomicInteger readKB) { + QueryRequest qr = new QueryRequest().setPreparedStatement(ps); + qr.setNumberOfOperations(numOperations); + qr.setOperationNumber(operationNumber); + QueryResult qres = null; + do { + qres = handle.query(qr); + for (MapValue v : qres.getResults()) { + keys.add(v.get("id").getInt()); + } + } while (!qr.isDone()); + readKB.addAndGet(qres.getReadKB()); + } + + private void putRowsInParallelTable(PutRequest preq, int numRows) { + for (int id = 0; id < numRows; id++) { + MapValue row = new MapValue() + .put("id", id) + .put("name", ("name_" + id)) + .put("age", (id % 25)); + preq.setValue(row); + PutResult pret = handle.put(preq); + assertNotNull(pret.getVersion()); + } + } + + /* + * Return the max amount of parallelism + */ + private int maxParallel(String query) { + return handle.prepare(new PrepareRequest().setStatement(query)) + .getPreparedStatement().getMaximumParallelism(); + } + + private void failParallelQuery(QueryRequest qr, + final String msg, + Class expected) { + try { + handle.query(qr); + fail("Expected exception on parallel query for : " + + msg); + } catch (Exception e) { + if (!expected.equals(e.getClass())) { + fail("Unexpected exception. Expected " + expected + ", got " + + e + " for case: " + msg); + } + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyConfigTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyConfigTest.java new file mode 100644 index 00000000..9b057a0b --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyConfigTest.java @@ -0,0 +1,159 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.util.Properties; +import java.util.concurrent.TimeUnit; + +import oracle.kv.Consistency; +import oracle.kv.KVStoreConfig; +import oracle.kv.Durability; +import oracle.nosql.proxy.util.TestBase; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Validate the proxy configuration + */ +public class ProxyConfigTest extends TestBase { + private static PrintStream original; + + @BeforeClass + public static void staticSetUp() throws Exception { + /* + * Filter out the stderr output from proxy startup + */ + original = System.out; + System.setErr(new PrintStream(new OutputStream() { + @Override + public void write(int b) throws IOException {} + })); + } + + @AfterClass + public static void staticTearDown() throws Exception { + if (original != null) { + System.setErr(original); + } + } + + @Test + public void testPrecedence() throws Exception { + + Properties fileProps = new Properties(); + fileProps.setProperty(Config.NUM_REQUEST_THREADS.paramName, "123"); + fileProps.setProperty(Config.MONITOR_STATS_ENABLED.paramName, "true"); + fileProps.setProperty(Config.IDLE_READ_TIMEOUT.paramName, "8888"); + String configFileName = createConfigFile(fileProps); + + + Properties commandLine = new Properties(); + commandLine.setProperty(Config.NUM_REQUEST_THREADS.paramName, "456"); + commandLine.setProperty(Config.NUM_ACCEPT_THREADS.paramName, "5"); + commandLine.setProperty(Config.KV_REQUEST_TIMEOUT.paramName, "4000"); + commandLine.setProperty(Config.KV_CONSISTENCY.paramName, "ABSOLUTE"); + commandLine.setProperty(Config.CONFIG_FILE.paramName, configFileName); + + Config config = new Config(commandLine); + KVStoreConfig kvConfig = config.getTemplateKVStoreConfig(); + + /* don't validate all fields, just a few */ + assertEquals(456, config.getNumRequestThreads()); + assertEquals(5, config.getNumAcceptThreads()); + assertEquals(true, config.isMonitorStatsEnabled()); + assertEquals(8888, config.getIdleReadTimeout()); + assertEquals(4000, kvConfig.getRequestTimeout(TimeUnit.MILLISECONDS)); + assertEquals(Consistency.ABSOLUTE, kvConfig.getConsistency()); + } + + @Test + public void testSslProtocols() throws Exception { + Properties commandLine = new Properties(); + commandLine.setProperty(Config.SSL_PROTOCOLS.paramName, + "TLSv1.3,TLSv1.1"); + Config config = new Config(commandLine); + assertEquals(config.getSSLProtocols().length, 2); + + commandLine.setProperty(Config.SSL_PROTOCOLS.paramName, + "TLSv1.4,TLSv1.1"); + try { + new Config(commandLine); + } catch (IllegalArgumentException e) { + } + + commandLine.setProperty(Config.SSL_PROTOCOLS.paramName, + "TLSv1,"); + try { + new Config(commandLine); + } catch (IllegalArgumentException e) { + } + } + + @Test + public void testKVDurability() throws Exception { + String[] s = new String[]{"-" + Config.KV_DURABILITY.paramName, "COMMIT_ALL_SYNC"}; + Config c = new Config(s); + KVStoreConfig kvConfig = c.makeTemplateKVStoreConfig(); + Durability durability = kvConfig.getDurability(); + assertEquals(durability.getMasterSync(), Durability.SyncPolicy.SYNC); + assertEquals(durability.getReplicaSync(), Durability.SyncPolicy.SYNC); + assertEquals(durability.getReplicaAck(), + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); + + s = new String[]{"-" + Config.KV_DURABILITY.paramName, "COMMIT_ALL_WRITE_NO_SYNC"}; + c = new Config(s); + kvConfig = c.makeTemplateKVStoreConfig(); + durability = kvConfig.getDurability(); + assertEquals(durability.getMasterSync(), Durability.SyncPolicy.WRITE_NO_SYNC); + assertEquals(durability.getReplicaSync(), Durability.SyncPolicy.WRITE_NO_SYNC); + assertEquals(durability.getReplicaAck(), + Durability.ReplicaAckPolicy.SIMPLE_MAJORITY); + + } + + @Test + public void testCommandLine() throws Exception { + + String[] s = new String[] {"-foo"}; + try { + new Config(s); + } catch (IllegalArgumentException e) { + } + + s = new String[] {Config.STORE_NAME.paramName, "StagingStore"}; + try { + new Config(s); + } catch (IllegalArgumentException e) { + } + + s = new String[] {"-" + Config.STORE_NAME.paramName, "StagingStore"}; + Config config = new Config(s); + assertEquals("StagingStore", config.getStoreName()); + + } + + + private String createConfigFile(Properties fileContents) + throws Exception { + + File configFile = new File(getTestDir(), "mock.config.props"); + OutputStream output = new FileOutputStream(configFile); + fileContents.store(output, "Mock config file"); + return configFile.getAbsolutePath(); + } + +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyHealthSourceTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyHealthSourceTest.java new file mode 100644 index 00000000..3e27a7e9 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyHealthSourceTest.java @@ -0,0 +1,131 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; + +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import org.junit.Test; + +import oracle.nosql.common.sklogger.SkLogger; +import oracle.nosql.proxy.cloud.ProxyHealthSource; +import oracle.nosql.proxy.sc.SCTenantManager; +import oracle.nosql.proxy.security.AccessChecker; +import oracle.nosql.proxy.security.AccessCheckerFactory; +import oracle.nosql.util.HostPort; +import oracle.nosql.util.ServiceDirectory; +import oracle.nosql.util.ph.HealthStatus; + +public class ProxyHealthSourceTest extends ProxyTestBase { + + private static final SkLogger logger = new SkLogger( + ProxyHealthSourceTest.class.getName(), + "proxy", "proxytest.log"); + + @Test + public void scConnectivityTest() throws Exception { + /* + * 1. Set up a test Proxy with Wrong SC IP address. + */ + SCTenantManager testTM = + new SCTenantManager("V0", + 0, 0, true /* isChildTableEnabled */, 0, + new TestServiceDirectory()); + AccessChecker checker = + AccessCheckerFactory.createInsecureAccessChecker(); + HostPort hp = new HostPort("errorhost", 8888); + testTM.establishURLBase(hp.toUrl(false), true /* reset */); + Properties commandLine = new Properties(); + commandLine.setProperty(Config.PROXY_TYPE.paramName, + Config.ProxyType.CLOUDTEST.name()); + + commandLine.setProperty(Config.HTTP_PORT.paramName, + Integer.toString(9095)); + + commandLine.setProperty(Config.HTTPS_PORT.paramName, + Integer.toString(9096)); + commandLine.setProperty(Config.NUM_REQUEST_THREADS.paramName, + Integer.toString(1)); + /* Disable pulling rules thread in FilterHandler */ + commandLine.setProperty(Config.PULL_RULES_INTERVAL_SEC.paramName, + Integer.toString(0)); + + Proxy testProxy = ProxyMain.startProxy(commandLine, testTM, + checker, audit); + + /* + * 2. Check Proxy HealthStatus is RED as SC can't be connected. + */ + ProxyHealthSource healthSource = testProxy.getHealthSource(); + List errors = new ArrayList<>(); + HealthStatus status = healthSource.getStatus("Proxy", + "Proxy0", + "localhost", + logger, + errors); + assertEquals(HealthStatus.YELLOW, status); + assertEquals(1, errors.size()); + /* + * Minicloud test only + */ + if (useMiniCloud) { + /* + * 3. Set TM to the real SC IP address. + */ + hp = new HostPort(scHost, scPort); + testTM.establishURLBase(hp.toUrl(false), true /* reset */); + /* + * Wait more than 1 minute for last failed SC request expired. + */ + try { + Thread.sleep(61_000); + } catch (InterruptedException e) { + } + /* + * 4. Check Proxy HealthStatus is GREEN now. + */ + healthSource = testProxy.getHealthSource(); + errors = new ArrayList<>(); + status = healthSource.getStatus("Proxy", "Proxy0", "localhost", + logger, errors); + assertEquals(errors.toString(), HealthStatus.GREEN, status); + assertEquals(errors.toString(), 0, errors.size()); + } + } + + class TestServiceDirectory implements ServiceDirectory { + + /** + * Returns a positive value as the service (region) identifier + * A positive value is required to indicate that this proxy + * is in a "cloud" environment - whether it's cloudsim or a unittest. + */ + @Override + public int getLocalServiceInteger() { + return 1; + } + + @Override + public String getLocalServiceName() { + return "localPP"; + } + + @Override + public String translateToRegionName(String serviceName) { + return serviceName + "-region"; + } + + @Override + public String validateRemoteReplica(String targetRegionName) { + return targetRegionName + "-servicename"; + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyI18NTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyI18NTest.java new file mode 100644 index 00000000..b59be760 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyI18NTest.java @@ -0,0 +1,436 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.util.UUID; + +import oracle.nosql.driver.NoSQLException; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.FieldValue; +import oracle.nosql.driver.values.JsonOptions; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.TimestampValue; + +import org.junit.Test; + +/** + * Test I18N in the cloud driver. This test and associated data files were + * supplied by the i18n group. + */ +public class ProxyI18NTest extends ProxyTestBase { + private static final String basePath = + getProxyBase() + "/oracle/nosql/proxy/"; + private static final String[] Jsonfilesname = { + "utf8.json","utf8bom.json", + "utf16le.json", "utf16lebom.json", + "utf16be.json","utf16bebom.json" + }; + + private static final String expfile = "utf8_testdata.txt"; + private static final String outputres = "results_testdata.txt"; + private static final String jsondata = "utf8_jsondata.txt"; + private String expstr = null; + + /* + * Test creation from various json encodings + */ + @Test + public void createJsonTest(){ + try { + for (String element : Jsonfilesname) { + File file = new File(basePath + element); + FileInputStream fis = new FileInputStream(file); + MapValue mv = + FieldValue.createFromJson(fis, new JsonOptions()).asMap(); + expstr = readexpfile("fr",1); + assertEquals(expstr, mv.getString("name")); + fis.close(); + } + } catch (Exception e) { + fail("Exception: " + e); + } + } + + /* + * Test French input + */ + @Test + public void fr_simpleTest() throws Exception { + + try { + /* + * Create a simple table with an integer key and a single + * name field + */ + TableResult tres = tableOperation( + handle, + "create table if not exists users(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* PUT a row */ + + /* construct a simple row */ + MapValue value = new MapValue().put("id", 1). + put("name", "\u00E7\u00E9_myname"); + + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("users"); + + PutResult putRes = handle.put(putRequest); + assertNotNull("Put failed", putRes.getVersion()); + assertWriteKB(putRes); + + /* GET the row */ + MapValue key = new MapValue().put("id", 1); + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName("users"); + + GetResult getRes = handle.get(getRequest); + + String getstr = getRes.getValue().toString(); + + expstr = readexpfile("fr",2); + + assertEquals(expstr, getstr); + + /* PUT a row using JSON */ + + /* construct a simple row */ + String jsonString = readexpfile("fr",3); + putRequest = new PutRequest() + .setValueFromJson(jsonString, null) // no options + .setTableName("users"); + + putRes = handle.put(putRequest); + + /* GET the new row */ + key = new MapValue().put("id", 2); + getRequest = new GetRequest() + .setKey(key) + .setTableName("users"); + + getRes = handle.get(getRequest); + + expstr = readexpfile("fr",3); + getstr = getRes.getValue().toString(); + + assertEquals(expstr, getstr); + + /* DELETE a row */ + DeleteRequest delRequest = new DeleteRequest() + .setKey(key) + .setTableName("users"); + + handle.delete(delRequest); + } catch (NoSQLException nse) { + System.err.println("Op failed: " + nse.getMessage()); + } catch (Exception e) { + System.err.println("Exception processing msg: " + e); + e.printStackTrace(); + } + } + + @Test + public void jsontableTest(){ + + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists restaurants(uid string, " + + "restaurantJSON JSON, primary key(uid))", + new TableLimits(500, 500, 50), + TableResult.State.ACTIVE, + 20000); + + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* Create an index */ + tres = tableOperation( + handle, + "CREATE INDEX IF NOT EXISTS idx_json_name on restaurants " + + " (restaurantJSON.name as string)", + null, + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* populate jason data to table */ + BufferedReader br = null; + FileReader fr = null; + + try { + String jsonString = ""; + String currLine; + int pCount = 0; + boolean buildObj = false; + boolean beganParsing = false; + String jsonfilePath = basePath + jsondata; + + fr = new FileReader(jsonfilePath); + br = new BufferedReader(fr); + + + /* + * Parse the sample JSON file to find the matching parenthesis to + * construct JSON string + */ + while ((currLine = br.readLine()) != null) { + pCount += countParens(currLine, '{'); + + // Empty line in the data file + if (currLine.length() == 0) { + continue; + } + + // Comments must start at column 0 in the + // data file. + if (currLine.charAt(0) == '#') { + continue; + } + + // If we've found at least one open paren, it's time to + // start collecting data + if (pCount > 0) { + buildObj = true; + beganParsing = true; + } + + if (buildObj) { + jsonString += currLine; + } + + /* + * If our open and closing parens balance (the count + * is zero) then we've collected an entire object + */ + pCount -= countParens(currLine, '}'); + if (pCount < 1) + { + buildObj = false; + /* + * If we started parsing data, but buildObj is false + * then that means we've reached the end of a JSON + * object in the input file. So write the object + * to the table, using the PutRequest + */ + } + + if (beganParsing && !buildObj) { + + /* + * Use the putFromJSON to automatically convert JSON string + * into JSON object + */ + MapValue value = new MapValue().put("uid", generateUUID()). + putFromJson("restaurantJSON", jsonString, null); + PutRequest putRequest = new PutRequest().setValue(value). + setTableName("restaurants"); + PutResult putRes = handle.put(putRequest); + assertNotNull(putRes.getVersion()); + jsonString = ""; + } + } + + /* query json table */ + /* index line 4 name in expfile */ + String idxname = readexpfile("fr", 4 ); + String predQuery = "SELECT * FROM restaurants r WHERE " + + " r.restaurantJSON.name = \"" + idxname + "\""; + //String predQuery = "SELECT * FROM restaurants r WHERE " + + //" r.restaurantJSON.name < \"" + idxname + "\""; + + //Create the Query Request + QueryRequest queryRequest = new QueryRequest(). + setStatement(predQuery); + + //Execute the query and get the response + QueryResult queryRes = handle.query(queryRequest); + if (queryRes.getResults().size() >0) { + String name; + String address; + String phonenumber; + String mobile_reserve_url; + + for (MapValue record : queryRes.getResults()) { + MapValue jsonValue = record.get("restaurantJSON").asMap(); + + name = jsonValue.getString("name"); + address = jsonValue.getString("address"); + phonenumber = jsonValue.getString("phone"); + mobile_reserve_url = + jsonValue.getString("mobile_reserve_url"); + + // write the result data to an outputfile + String oputrespath = basePath + outputres; + OutputStreamWriter pw = null; + FileOutputStream fs = + new FileOutputStream(oputrespath,true); + pw = new OutputStreamWriter(fs,"UTF8"); + pw.write(name + "\t"); + pw.write(address + "\t"); + pw.write(phonenumber + "\t"); + pw.write(mobile_reserve_url + "\n"); + pw.close(); + /* delete output file */ + new File(oputrespath).delete(); + } + } + } catch (FileNotFoundException fnfe) { + fail(" File not found: " + fnfe ); + } catch (IOException ioe) { + fail("IOException: " + ioe ); + System.exit(-1); + } catch (NoSQLException nse) { + fail("jsontableTest Op failed: " + nse.getMessage()); + } catch (Exception e) { + fail( "Exception processing msg: " + e ); + } finally { + try { + if (br != null) { + br.close(); + } + if (fr != null) { + fr.close(); + } + } catch (IOException iox) { + // ignore + } + } + } + + @Test + public void timestampTest(){ + String timesp1 = "1970-01-01T00:00:00Z"; + String timesp2 = "1970-01-01T00:00:00+00:00"; + timestamp(timesp1); + timestamp(timesp2); + } + + /* + * Used by populateTable() to know when a JSON object + * begins and ends in the input data file. + */ + private static int countParens(String line, char p) { + int c = 0; + for (int i = 0; i < line.length(); i++) { + if (line.charAt(i) == p) { + c++; + } + } + return c; + } + + private static String generateUUID() { + return UUID.randomUUID().toString().replace("-", ""); + } + + private static String readexpfile(String lang, int line){ + BufferedReader br = null; + InputStreamReader isr = null; + String currLine; + String expstr = null; + String langfg = lang+"{"; + + try { + //Get the expected data file + File file = new File(basePath + expfile); + + //Read file content using utf8 + isr = new InputStreamReader(new FileInputStream(file), "utf8"); + + br = new BufferedReader(isr); + + while ((currLine = br.readLine()) != null) { + + // Empty line in the data file + if (currLine.length() == 0) { + continue; + } + + // Comments must start at column 0 in the data file. + if (currLine.charAt(0) == '#') { + continue; + } + + /* + * If we've found the expected language, it's time to start + * collecting data + */ + if (currLine.equals(langfg)) { + int i = 0; + while (i != line){ + i++ ; + currLine = br.readLine(); + } + String currstr = currLine.substring(currLine.indexOf(":")+1); + expstr = currstr.trim(); + break; + } + } + } catch (Exception e) { + fail("Exception: " + e); + } finally { + try { + if (isr != null) { + isr.close(); + } + if (br != null) { + br.close(); + } + } catch (IOException ioe) { + // ignore + } + } + + return expstr; + } + + /* Used by timestamp test is enabled */ + private static void timestamp(String s) { + try { + @SuppressWarnings("unused") + TimestampValue v = new TimestampValue(s); + // System.out.println(s +" = long(" + v.getLong() + ")"); + } catch (Exception e) { + fail("test timestamp failed: " + e.getMessage()); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTest.java new file mode 100644 index 00000000..c2dfb8a6 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTest.java @@ -0,0 +1,3228 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static oracle.nosql.driver.ops.TableLimits.CapacityMode.ON_DEMAND; +import static oracle.nosql.proxy.protocol.Protocol.TABLE_USAGE_NUMBER_LIMIT; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.net.URL; +import java.sql.Timestamp; +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import oracle.nosql.driver.Consistency; +import oracle.nosql.driver.DefinedTags; +import oracle.nosql.driver.Durability; +import oracle.nosql.driver.Durability.SyncPolicy; +import oracle.nosql.driver.FreeFormTags; +import oracle.nosql.driver.Durability.ReplicaAckPolicy; +import oracle.nosql.driver.IndexExistsException; +import oracle.nosql.driver.IndexNotFoundException; +import oracle.nosql.driver.KeySizeLimitException; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.OperationThrottlingException; +import oracle.nosql.driver.ReadThrottlingException; +import oracle.nosql.driver.RowSizeLimitException; +import oracle.nosql.driver.TableExistsException; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.TimeToLive; +import oracle.nosql.driver.Version; +import oracle.nosql.driver.WriteThrottlingException; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetIndexesRequest; +import oracle.nosql.driver.ops.GetIndexesResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.ListTablesRequest; +import oracle.nosql.driver.ops.ListTablesResult; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.TableUsageResult.TableUsage; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableUsageRequest; +import oracle.nosql.driver.ops.TableUsageResult; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.nosql.driver.ops.WriteMultipleResult.OperationResult; +import oracle.nosql.driver.ops.WriteRequest; +import oracle.nosql.driver.ops.WriteResult; +import oracle.nosql.driver.values.BinaryValue; +import oracle.nosql.driver.values.BooleanValue; +import oracle.nosql.driver.values.FieldValue; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.JsonNullValue; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.NullValue; +import oracle.nosql.driver.values.StringValue; +import oracle.nosql.driver.values.TimestampValue; +import oracle.nosql.util.HttpResponse; + +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.runners.MethodSorters; + +/* + * The tests are ordered so that the zzz* test goes last so it picks up + * DDL history reliably. + */ +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class ProxyTest extends ProxyTestBase { + /* + * The time stamp string pattern used to parse start/end range parameter + * of table usage. + */ + private final static String TimestampPattern = + "yyyy-MM-dd['T'HH:mm:ss[.SSS]]"; + private final static ZoneId UTCZone = ZoneId.of(ZoneOffset.UTC.getId()); + private final static DateTimeFormatter timestampFormatter = + DateTimeFormatter.ofPattern(TimestampPattern).withZone(UTCZone); + + private final static int USAGE_TIME_SLICE_MS = 60 * 1000; + final static int KEY_SIZE_LIMIT = rlimits.getPrimaryKeySizeLimit(); + final static int ROW_SIZE_LIMIT = rlimits.getRowSizeLimit(); + + @Test + public void smokeTest() { + + try { + + MapValue key = new MapValue().put("id", 10); + + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + + /* drop a table */ + TableResult tres = tableOperation(handle, + "drop table if exists testusers", + null, TableResult.State.DROPPED, + 20000); + assertNotNull(tres.getTableName()); + assertTrue(tres.getTableState() == TableResult.State.DROPPED); + assertNull(tres.getTableLimits()); + + /* Create a table */ + tres = tableOperation( + handle, + "create table if not exists testusers(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* Create an index */ + tres = tableOperation( + handle, + "create index if not exists Name on testusers(name)", + null, + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* GetTableRequest for table that doesn't exist */ + try { + GetTableRequest getTable = + new GetTableRequest() + .setTableName("not_a_table"); + tres = handle.getTable(getTable); + fail("Table should not be found"); + } catch (TableNotFoundException tnfe) {} + + /* list tables */ + ListTablesRequest listTables = + new ListTablesRequest(); + ListTablesResult lres = handle.listTables(listTables); + /* + * the test cases don't yet clean up so there may be additional + * tables present, be flexible in this assertion. + */ + assertTrue(lres.getTables().length >= 1); + assertNotNull(lres.toString()); + + /* getTableUsage. It won't return much in test mode */ + if (!onprem) { + TableUsageRequest gtu = new TableUsageRequest() + .setTableName("testusers").setLimit(2) + .setEndTime(System.currentTimeMillis()); + TableUsageResult gtuRes = handle.getTableUsage(gtu); + assertNotNull(gtuRes); + assertNotNull(gtuRes.getUsageRecords()); + } + + /* PUT */ + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("testusers"); + + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + assertWriteKB(res); + /* put a few more. set TTL to test that path */ + putRequest.setTTL(TimeToLive.ofHours(2)); + for (int i = 20; i < 30; i++) { + value.put("id", i); + handle.put(putRequest); + } + + /* + * Test ReturnRow for simple put of a row that exists. 2 cases: + * 1. unconditional (will return info) + * 2. if absent (will return info) + */ + value.put("id", 20); + putRequest.setReturnRow(true); + PutResult pr = handle.put(putRequest); + assertNotNull(pr.getVersion()); /* success */ + assertNotNull(pr.getExistingVersion()); + assertNotNull(pr.getExistingValue()); + assertTrue(pr.getExistingModificationTime() != 0); + assertReadKB(pr); + assertWriteKB(pr); + + putRequest.setOption(Option.IfAbsent); + pr = handle.put(putRequest); + assertNull(pr.getVersion()); /* failure */ + assertNotNull(pr.getExistingVersion()); + assertNotNull(pr.getExistingValue()); + assertTrue(pr.getExistingModificationTime() != 0); + assertReadKB(pr); + + /* clean up */ + putRequest.setReturnRow(false); + putRequest.setOption(null); + + /* GET */ + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName("testusers"); + + GetResult res1 = handle.get(getRequest); + assertNotNull("Get failed", res1.getJsonValue()); + assertReadKB(res1); + + /* DELETE */ + DeleteRequest delRequest = new DeleteRequest() + .setKey(key) + .setTableName("testusers"); + + DeleteResult del = handle.delete(delRequest); + assertTrue("Delete failed", del.getSuccess()); + assertWriteKB(del); + + /* GET -- no row, it was removed above */ + getRequest.setTableName("testusers"); + res1 = handle.get(getRequest); + assertNull(res1.getValue()); + assertReadKB(res1); + + /* GET -- no table */ + try { + getRequest.setTableName("not_a_table"); + res1 = handle.get(getRequest); + fail("Attempt to access missing table should have thrown"); + } catch (TableNotFoundException nse) { + /* success */ + } + + /* PUT -- invalid row -- this will throw */ + try { + value.remove("id"); + value.put("not_a_field", 1); + res = handle.put(putRequest); + fail("Attempt to put invalid row should have thrown"); + } catch (IllegalArgumentException iae) { + /* success */ + } + } catch (Exception e) { + checkErrorMessage(e); + e.printStackTrace(); + fail("Exception in test"); + } + } + + @Test + public void testCaseSensitivity() + throws Exception { + + assumeKVVersion("testCaseSensitivity", 23, 3, 0); + + String ddl = + "create table foo(id integer, S string, " + + "primary key(Id, s))"; + tableOperation( + handle, + ddl, + new TableLimits(500, 500, 50), + TableResult.State.ACTIVE, + 20000); + + MapValue val = new MapValue().put("id", 1).put("s", "xyz"); + PutRequest putReq = new PutRequest() + .setTableName("foo") + .setValue(val); + handle.put(putReq); + + GetRequest getRequest = new GetRequest() + .setKey(new MapValue().put("id", 1).put("s", "xyz")) + .setTableName("foo"); + GetResult res = handle.get(getRequest); + /* + * "Id" in pkey should have been turned into "id" and "s" to "S" + */ + assertTrue(res.getValue().contains("id")); + assertFalse(res.getValue().contains("Id")); + assertTrue(res.getValue().contains("S")); + assertFalse(res.getValue().contains("s")); + } + + @Test + public void testSimpleThroughput() throws Exception { + + assumeTrue(onprem == false); + + final String create = "create table testusersTp(id integer," + + "name string, primary key(id))"; + + /* Create a table */ + TableResult tres = tableOperation( + handle, + create, + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + + /* + * Handle some Put cases + */ + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("testusersTp"); + + + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + int origRead = res.getReadKB(); + int origWrite = res.getWriteKB(); + assertEquals(1, origWrite); + assertEquals(0, origRead); + + /* + * do a second put. Read should still be 0, write will increase + * because it's an update, which counts the "delete" + */ + res = handle.put(putRequest); + int newRead = res.getReadKB(); + int newWrite = res.getWriteKB(); + assertEquals(2*origWrite, newWrite); + assertEquals(0, newRead); + + /* set return row and expect read unit is 1 */ + putRequest.setReturnRow(true); + res = handle.put(putRequest); + newRead = res.getReadKB(); + newWrite = res.getWriteKB(); + assertEquals(2*origWrite, newWrite); + assertEquals(1, newRead); + + /* make it ifAbsent and verify read and write consumption */ + putRequest.setOption(PutRequest.Option.IfAbsent); + res = handle.put(putRequest); + assertNull("Put should have failed", res.getVersion()); + /* use read units because in a write, readKB != readUnits */ + newRead = res.getReadUnits(); + newWrite = res.getWriteKB(); + /* + * no write, but read is min read + record size, former for the version + * and the latter for the value + */ + assertEquals(0, newWrite); + assertEquals(1 + origWrite, newRead); + } + + /** + * Test bad urls. + */ + @Test + public void testBadURL() throws Exception { + /* bad port */ + tryURL(new URL("http", getProxyHost(), getProxyPort() + 7, "/")); + /* bad host */ + tryURL(new URL("http", "nohost", getProxyPort(), "/")); + } + + private void tryURL(URL url) { + try { + NoSQLHandleConfig config = new NoSQLHandleConfig(url); + setHandleConfig(config); + NoSQLHandle myhandle = getHandle(config); + myhandle.close(); + fail("Connection should have failed"); + } catch (Exception e) { + /* TODO: check for specific exception */ + /* success */ + } + } + + /** + * Test that throttling happens. This requires its own table and + * handle. + */ + @Test + public void throttleTest() throws Exception { + + assumeTrue(onprem == false); + + /* this test is invalid with proxy-level rate limiting */ + assumeTrue(Boolean.getBoolean(PROXY_DRL_ENABLED_PROP) == false); + + /* + * Create a new handle configured with no retries + */ + NoSQLHandleConfig config = new NoSQLHandleConfig(getProxyEndpoint()); + setHandleConfig(config); + + /* + * no retries + */ + config.configureDefaultRetryHandler(0, 0); + + /* + * Open the handle + */ + NoSQLHandle myhandle = getHandle(config); + + MapValue key = new MapValue().put("id", 10); + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + + /* Create a table with small throughput */ + TableResult tres = tableOperation( + myhandle, + "create table testusersThrottle(id integer, " + + "name string, primary key(id))", + new TableLimits(1, 1, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + + int num = 0; + try { + while (true) { + /* PUT */ + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("testusersThrottle"); + myhandle.put(putRequest); + num++; + if (num > 1000) { + fail("Throttling exception should have been thrown"); + } + } + } catch (WriteThrottlingException wte) { + checkErrorMessage(wte); + /* success */ + } + num = 0; + try { + while (true) { + /* GET */ + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName("testusersThrottle"); + GetResult gres = myhandle.get(getRequest); + assertNotNull(gres.getValue()); + num++; + if (num > 1000) { + fail("Throttling exception should have been thrown"); + } + } + } catch (ReadThrottlingException wte) { + checkErrorMessage(wte); + /* success */ + } + /* Query based on single partition scanning */ + String query = "select * from testusersThrottle where id = 10"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRes = handle.prepare(prepReq); + assertTrue("Prepare statement failed", + prepRes.getPreparedStatement() != null); + + /* Query with size limit */ + num = 0; + try { + while (true) { + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepRes) + .setMaxReadKB(3); + /* Query */ + QueryResult res = myhandle.query(queryReq); + assertTrue(res.getResults().size() == 1); + num++; + if (num > 1000) { + fail("Throttling exception should have been thrown"); + } + } + } catch (ReadThrottlingException rte) { + checkErrorMessage(rte); + /* success */ + } + /* Alter table limit to increase read limit */ + tres = tableOperation( + myhandle, + null, + new TableLimits(10, 200, 50), + "testusersThrottle", + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("testusersThrottle"); + for (int i = 0; i < 200; i++) { + value.put("id", 100 + i); + myhandle.put(putRequest); + } + + /* prepare should get throttled */ + try { + for (int i = 0; i < 1000; i++) { + query = "select * from testusersThrottle where name = \"jane\""; + prepReq = new PrepareRequest().setStatement(query); + prepRes = myhandle.prepare(prepReq); + } + fail("Throttling exception should have been thrown"); + } catch (Exception rte) { + checkErrorMessage(rte); + } + + /* Query based on all partitions scanning */ + /* Use the original handle to get throttling retries */ + query = "select * from testusersThrottle where name = \"jane\""; + prepReq = new PrepareRequest().setStatement(query); + prepRes = handle.prepare(prepReq); + assertTrue("Prepare statement failed", + prepRes.getPreparedStatement() != null); + + /* Query with size limit */ + Thread.sleep(2000); /* try to avoid previous throttling */ + num = 0; + try { + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepRes) + .setMaxReadKB(20); + do { + /* Query */ + QueryResult res = myhandle.query(queryReq); + + /* it's possible to get 0 results and continuation key */ + num += res.getResults().size(); + if (num > 1000) { + fail("Throttling exception should have been thrown"); + } + } while (!queryReq.isDone()); + } catch (ReadThrottlingException rte) { + /* success */ + checkErrorMessage(rte); + } + assertTrue(num > 0); + + /* Query without limits */ + Thread.sleep(1000); + num = 0; + try { + while (true) { + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepRes); + /* Query */ + QueryResult res = myhandle.query(queryReq); + assertTrue(res.getResults().size() > 0); + num++; + if (num > 1000) { + fail("Throttling exception should have been thrown"); + } + } + } catch (ReadThrottlingException rte) { + /* success */ + checkErrorMessage(rte); + } + } + + @Test + public void droppedTableTest() throws Exception { + assumeTrue("Skipping droppedTableTest for minicloud test", + !cloudRunning); + + final String CREATE_TABLE = "create table if not exists testDropped(" + + "id integer, name string, primary key(id))"; + + /* create a table */ + TableResult tres; + tres = tableOperation(handle, + CREATE_TABLE, + new TableLimits(500, 500, 5), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* start a background thread to drop the table after 5 seconds */ + Thread bg = new Thread(()-> { + try { + Thread.sleep(5_000); + } catch (Exception e) {} + tableOperation(handle, "drop table testDropped", + null, 20000); + }); + bg.start(); + + /* + * Run gets/puts for 10 seconds. After about 5 seconds they should + * start failing and consistently fail thereafter. + * Note: this test is mainly designed to exercise the + * MetadataNotFoundException retry logic in the proxy + */ + MapValue key = new MapValue().put("id", 10); + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + long endTimeMs = System.currentTimeMillis() + 10_000; + while (true) { + /* PUT */ + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTimeout(1000) + .setTableName("testDropped"); + + try { + handle.put(putRequest); + } catch (TableNotFoundException tnfe) { + /* expected */ + } + + /* GET */ + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTimeout(2000) + .setTableName("testDropped"); + + try { + handle.get(getRequest); + } catch (TableNotFoundException tnfe) { + /* expected */ + } + + if (System.currentTimeMillis() > endTimeMs) { + break; + } + + try { + Thread.sleep(50); + } catch (Exception e) { + break; + } + } + bg.join(10000); + } + + @Test + public void ddlTest() throws Exception { + final String CREATE_TABLE = "create table if not exists testusersX(" + + "id integer, name string, primary key(id))"; + final String CREATE_TABLE_NO_IFNOTEXISTS = "create table testusersX(" + + "id integer, name string, primary key(id))"; + final String CREATE_TABLE_SCHEMA_DIFF = + "create table if not exists testusersX(" + + "id integer, name string, age integer, primary key(id))"; + + final String BAD_DDL = "create tab x(id integer, " + + "name string, primary key(id))"; + final String ALTER_DDL = "alter table testusersX(add name1 string)"; + final String BAD_ADD_INDEX = + "create index idx on testusers_not_here(name)"; + final String BAD_ADD_TEXT_INDEX = + "create fulltext index idxText on testusersX(name)"; + final String ADD_INDEX = "create index idx on testusersX(name)"; + final String DROP_INDEX = "drop index idx on testusersX"; + final String DROP_INDEX_IFX = "drop index if exists idx on testusersX"; + final String DROP_DDL = "drop table testusersX"; + + TableResult tres; + + /* + * Bad syntax + */ + try { + tres = tableOperation(handle, + BAD_DDL, + null, + 20000); + fail("Expected IAE"); + } catch (IllegalArgumentException iae) { + checkErrorMessage(iae); + } + + /* + * Table doesn't exist + */ + tres = tableOperation(handle, + BAD_ADD_INDEX, + TableResult.State.ACTIVE, + TableNotFoundException.class); + + /* + * create the table to alter it + */ + tres = tableOperation(handle, + CREATE_TABLE, + new TableLimits(5000, 5000, 50), + 20000); + + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* + * Table already exists. + */ + tres = tableOperation(handle, + CREATE_TABLE_NO_IFNOTEXISTS, + new TableLimits(5000, 5000, 50), + null, TableResult.State.ACTIVE, + TableExistsException.class); + + /* + * "create table if not exists" should not check schema for existing + * table. + */ + tres = tableOperation(handle, + CREATE_TABLE_SCHEMA_DIFF, + new TableLimits(5000, 5000, 50), + 20000); + + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* + * Add index + */ + tres = tableOperation(handle, + ADD_INDEX, + null, + 20000); + + /* + * Index already exists. + */ + tres = tableOperation(handle, + ADD_INDEX, + TableResult.State.ACTIVE, + IndexExistsException.class); + + /* + * FullText index is not allowed. + */ + tres = tableOperation(handle, + BAD_ADD_TEXT_INDEX, + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* + * Drop index + */ + tres = tableOperation(handle, + DROP_INDEX, + null, + 20000); + + /* + * Drop index again, using if exists + */ + tres = tableOperation(handle, + DROP_INDEX_IFX, + null, + 20000); + + /* + * Alter the table + */ + tres = tableOperation(handle, + ALTER_DDL, + null, + 20000); + /* + * Alter the table limits + */ + if (!onprem) { + tres = tableOperation(handle, + null, + new TableLimits(50, 50, 10), + "testusersX", + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + assertEquals(50, tres.getTableLimits().getReadUnits()); + } + + /* + * drop the table + * NOTE: this sequence may, or may not work with the real Tenant + * Manager. The local/test version only updates the state of its + * cached tables on demand. I.e. if a table is dropped and never + * "gotten" again, it could live in the cache in DROPPING state for + * a very long time. TODO: time out cache entries. + */ + tres = tableOperation(handle, + DROP_DDL, + null, + 20000); + + /* + * the table should be gone now + */ + try { + GetTableRequest getTable = + new GetTableRequest().setTableName("testusersX"); + tres = handle.getTable(getTable); + fail("Table should not be found"); + } catch (TableNotFoundException tnfe) { + checkErrorMessage(tnfe); + } + + /* + * Unsupported ddl operations + */ + + /* CRATE USER */ + tres = tableOperation(handle, + "CREATE USER guest IDENTIFIED BY \"welcome\"", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + /* ALTER USER */ + tres = tableOperation(handle, + "ALTER USER guest ACCOUNT LOCK", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* DROP USER */ + tres = tableOperation(handle, + "DROP USER guest", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* CREATE ROLE */ + tres = tableOperation(handle, + "CREATE ROLE employee", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* DROP ROLE */ + tres = tableOperation(handle, + "DROP ROLE employee", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* GRANT */ + tres = tableOperation(handle, + "GRANT readwrite TO USER guest", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* REVOKE */ + tres = tableOperation(handle, + "REVOKE readwrite FROM USER guest", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* SHOW */ + tres = tableOperation(handle, + "SHOW TABLES", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* DESCRIBE */ + tres = tableOperation(handle, + "DESCRIBE TABLE testusersX", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + /* + * DML operation with TableRequest + */ + tres = tableOperation(handle, + "SELECT * FROM testusersX", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + + tres = tableOperation(handle, + "UPDATE testusersX SET name = \"test\" " + + "where id = 1", + TableResult.State.ACTIVE, + IllegalArgumentException.class); + } + + @Test + public void testGetProvisionedTable() throws Exception { + TableLimits tableLimits = new TableLimits(10, 20, 1); + testGetTable(tableLimits, tableLimits); + } + + @Test + public void testGetAutoScalingTable() throws Exception { + if (cloudRunning && tenantLimits == null) { + /* Skip this test if tenantLimits is not provided */ + return; + } + + TableLimits tableLimits = new TableLimits(1); + TableLimits expectedLimits; + if (cloudRunning) { + expectedLimits = new TableLimits( + tenantLimits.getAutoScalingTableReadUnits(), + tenantLimits.getAutoScalingTableWriteUnits(), + tableLimits.getStorageGB(), + ON_DEMAND); + } else { + expectedLimits = new TableLimits( + Integer.MAX_VALUE - 1, + Integer.MAX_VALUE - 1, + tableLimits.getStorageGB(), + ON_DEMAND); + } + testGetTable(tableLimits, expectedLimits); + } + + private void testGetTable(TableLimits tableLimits, + TableLimits expectedLimits) throws Exception { + final String tableName = "getTableTest"; + final String statement = "create table if not exists " + tableName + + "(id integer, name string, primary key(id))"; + + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableLimits(tableLimits) + .setTimeout(15000); + + TableResult tres = handle.tableRequest(tableRequest); + TableLimits resultLimits = tres.getTableLimits(); + if (resultLimits != null) { + assertEquals(expectedLimits.getStorageGB(), + resultLimits.getStorageGB()); + assertEquals(expectedLimits.getMode(), + resultLimits.getMode()); + } + + /* + * Get table with operation id, the table name is invalid, expect to + * get TableNotFoundException. + */ + GetTableRequest getReq = new GetTableRequest() + .setTableName("invalid") + .setOperationId(tres.getOperationId()); + try { + tres = handle.getTable(getReq); + fail("Expect to get TableNotFoundException but not"); + } catch (IllegalArgumentException ex) { + /* expected */ + } + tres.waitForCompletion(handle, 20000, 1000); + + /* + * Get table, check the schema text contains the table name. + */ + getReq = new GetTableRequest().setTableName(tableName); + tres = handle.getTable(getReq); + assertTableOcid(tres.getTableId()); + assertNotNull(tres.getSchema()); + resultLimits = tres.getTableLimits(); + if (resultLimits != null) { + assertEquals(expectedLimits.getReadUnits(), + resultLimits.getReadUnits()); + assertEquals(expectedLimits.getWriteUnits(), + resultLimits.getWriteUnits()); + assertEquals(expectedLimits.getStorageGB(), + resultLimits.getStorageGB()); + assertEquals(expectedLimits.getMode(), + resultLimits.getMode()); + } + assertTrue(tres.getSchema().contains(tableName)); + } + + @Test + public void testListTables() { + final int numTables = 8; + final String ddlFmt = + "create table %s (id integer, name string, primary key(id))"; + final TableLimits tableLimits = new TableLimits(10, 10, 1); + final String[] namePrefix = new String[] {"USERB", "userA", "userC"}; + + if (onprem) { + handle.doSystemRequest("create namespace NS001", 20000, 1000); + } + + /* + * create tables + */ + TableResult tres; + Set nameSorted = new TreeSet<>(); + for (int i = 0; i < numTables; i++) { + /* if onprem, create a mix of tables, some with namespaces */ + String tableName; + if (onprem && (i % 2) == 1) { + tableName = "NS001:" + namePrefix[i % namePrefix.length] + i; + } else { + tableName = namePrefix[i % namePrefix.length] + i; + } + tres = tableOperation(handle, + String.format(ddlFmt, tableName), + tableLimits, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + nameSorted.add(tableName); + } + List nameSortedList = new ArrayList(nameSorted); + + /* + * List all tables + */ + ListTablesRequest req = new ListTablesRequest(); + ListTablesResult res = handle.listTables(req); + List returnedTableNames = Arrays.asList(res.getTables()); + if (cloudRunning) { + /* verify tables sorted by name */ + assertEquals(nameSortedList, returnedTableNames); + } else { + /* verify all added tables are in list */ + for (String name : nameSorted) { + assertTrue("Table " + name + " missing from listTables", + returnedTableNames.contains(name)); + } + } + + /* + * List all tables with limit + */ + int[] values = new int[] {0, 6, 2, 1}; + List tables; + for (int limit : values) { + tables = doListTables(limit); + if (cloudRunning) { + /* verify tables sorted by name */ + assertEquals(nameSortedList, tables); + } + } + } + + /* Run list tables with limit specified */ + private List doListTables(int limit) { + List tables = new ArrayList<>(); + + ListTablesRequest req = new ListTablesRequest(); + req.setLimit(limit); + ListTablesResult res; + while(true) { + res = handle.listTables(req); + if (res.getTables().length > 0) { + tables.addAll(Arrays.asList(res.getTables())); + } + + if (limit == 0 || res.getTables().length < limit) { + break; + } + assertEquals(limit, res.getTables().length); + req.setStartIndex(res.getLastReturnedIndex()); + } + return tables; + } + + /** + * Tests serialization of types, including some coercion to schema + * types in the proxy. + */ + @Test + public void typeTest() throws Exception { + + final String TABLE_CREATE = + "create table if not exists Types( " + + "id integer, " + + "primary key(id), " + + "longField long, " + + "doubleField double, " + + "stringField string, " + + "numberField number, " + + "enumField enum(a,b,c)" + + ")"; + + final String jsonString = + "{" + + "\"id\":1, " + + "\"longField\": 123 ," + // int => long + "\"doubleField\":4 ," + // int => double + "\"stringField\":\"abc\" ," + // no coercion + "\"numberField\":4.5 ," + // double => number + "\"enumField\":\"b\"" + // string => enum + "}"; + TableResult tres; + + tres = tableOperation(handle, + TABLE_CREATE, + new TableLimits(50, 50, 50), + TableResult.State.ACTIVE, + 20000); + + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + PutRequest pr = new PutRequest().setValueFromJson(jsonString, null). + setTableName("Types"); + PutResult pres = handle.put(pr); + assertNotNull(pres.getVersion()); + } + + @Test + public void recreateTest() throws Exception { + final String CREATE_TABLE = + "create table recreate( " + + "id integer, " + + "primary key(id), " + + "name string)"; + final String DROP_TABLE = "drop table recreate"; + TableResult tres = tableOperation(handle, + CREATE_TABLE, + new TableLimits(50, 50, 50), + 20000); + + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + PutRequest pr = new PutRequest() + .setTableName("recreate") + .setValue(new MapValue().put("id", 1).put("name", "joe")); + PutResult pres = handle.put(pr); + assertNotNull(pres.getVersion()); + + tres = tableOperation(handle, + DROP_TABLE, + null, + 20000); + + tres = tableOperation(handle, + CREATE_TABLE, + new TableLimits(50, 50, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + pres = handle.put(pr); + assertNotNull(pres.getVersion()); + } + + + /** + * This test does a lot of simple operations in a loop in multiple threads, + * looking for HTTP transport problems. This is probably temporary. + */ + @Test + public void httpTest() { + ExecutorService executor = Executors.newFixedThreadPool(3); + Collection> tasks = new ArrayList>(); + for (int i = 0; i < 6; i++) { + tasks.add(new Callable() { + @Override + public Void call() { + doHttpTest(); + return null; + } + }); + } + try { + List> futures = executor.invokeAll(tasks); + for(Future f : futures) { + f.get(); + } + } catch (Exception e) { + fail("Exception: " + e); + } + } + + private void doHttpTest() { + try { + + MapValue key = new MapValue().put("id", 10); + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + + for (int i = 0; i < 10; i++) { + try { + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists testusers(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + } catch (Exception e) { + System.out.println("httpTest: exception in Thread " + + Thread.currentThread().getId() + + " on attempt " + i + ": " + + e); + } + } + + for (int i = 0; i < 100; i++) { + /* PUT */ + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("testusers"); + + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + assertWriteKB(res); + + /* GET */ + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName("testusers"); + + GetResult res1 = handle.get(getRequest); + assertNotNull("Get failed", res1.getJsonValue()); + assertReadKB(res1); + } + } catch (Exception e) { + fail("Internal Exception: " + e); + } + } + + @Test + public void testPutGetDelete() { + + final String tableName = "testusers"; + final int recordKB = 2; + + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists testusers(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + final String name = genString((recordKB - 1) * 1024); + MapValue value = new MapValue().put("id", 10).put("name", name); + MapValue newValue = new MapValue().put("id", 11).put("name", name); + MapValue newValue1 = new MapValue().put("id", 12).put("name", name); + MapValue newValue2 = new MapValue().put("id", 13).put("name", name); + + /* Durability will be ignored unless run with -Donprem=true */ + Durability dur = new Durability(SyncPolicy.WRITE_NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + + /* Put a row with empty table name: should get illegal argument */ + PutRequest putReq = new PutRequest() + .setValue(value) + .setDurability(dur) + .setTableName(""); + try { + handle.put(putReq); + fail("expected illegal argument exception on empty table name"); + } catch (IllegalArgumentException iae) { + /* success */ + } + + /* Put a row */ + putReq = new PutRequest() + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + PutResult putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */ ); + + /* Put a row again with SetReturnRow(false). + * expect no row returned + */ + putReq.setReturnRow(false); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + true /* put over write */); + Version oldVersion = putRes.getVersion(); + + /* + * Put row again with SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be zero */ + recordKB, + true /* put overWrite */); + oldVersion = putRes.getVersion(); + + /* + * Put a new row with SetReturnRow(true), + * expect no existing row returned. + */ + putReq = new PutRequest() + .setValue(newValue) + .setDurability(dur) + .setTableName(tableName) + .setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + + /* PutIfAbsent an existing row, it should fail */ + putReq = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + /* + * PutIfAbsent fails + SetReturnRow(true), + * return existing value and version + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be recent */ + recordKB, + false /* put overWrite */); + + /* PutIfPresent an existing row, it should succeed */ + putReq = new PutRequest() + .setOption(Option.IfPresent) + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + oldVersion = putRes.getVersion(); + + /* + * PutIfPresent succeed + SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + Version ifVersion = putRes.getVersion(); + + /* PutIfPresent an new row, it should fail */ + putReq = new PutRequest() + .setOption(Option.IfPresent) + .setValue(newValue1) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + /* + * PutIfPresent fail + SetReturnRow(true), + * expect no existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + + /* PutIfAbsent an new row, it should succeed */ + putReq = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(newValue1) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + + /* PutIfAbsent success + SetReturnRow(true) */ + putReq.setValue(newValue2).setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + + /* + * PutIfVersion an existing row with unmatched version, it should fail. + */ + putReq = new PutRequest() + .setOption(Option.IfVersion) + .setMatchVersion(oldVersion) + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + /* + * PutIfVersion fails + SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + ifVersion /* expPrevVersion */, + true, /* modtime should be recent */ + recordKB, + false /* put overWrite */); + + /* + * Put an existing row with matching version, it should succeed. + */ + putReq = new PutRequest() + .setOption(Option.IfVersion) + .setMatchVersion(ifVersion) + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + ifVersion = putRes.getVersion(); + /* + * PutIfVersion succeed + SetReturnRow(true), + * expect no existing row returned. + */ + putReq.setMatchVersion(ifVersion).setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + Version newVersion = putRes.getVersion(); + + /* + * Put with IfVersion but no matched version is specified, put should + * fail. + */ + putReq = new PutRequest() + .setOption(Option.IfVersion) + .setValue(value) + .setDurability(dur) + .setTableName(tableName); + try { + putRes = handle.put(putReq); + fail("Put with IfVersion should fail"); + } catch (IllegalArgumentException iae) { + checkErrorMessage(iae); + } + + /* + * Get + */ + + /* Get a row with empty table name: should get illegal argument */ + MapValue key = new MapValue().put("id", 10); + GetRequest getReq = new GetRequest() + .setKey(key) + .setTableName(""); + try { + handle.get(getReq); + fail("expected illegal argument exception on empty table name"); + } catch (IllegalArgumentException iae) { + /* success */ + } + + /* Get a row */ + getReq = new GetRequest() + .setKey(key) + .setTableName(tableName); + GetResult getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + true /* rowPresent*/, + value, + null, /* Don't check version if Consistency.EVENTUAL */ + true, /* modtime should be recent */ + recordKB); + + /* Get a row with ABSOLUTE consistency */ + getReq.setConsistency(Consistency.ABSOLUTE); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + true /* rowPresent*/, + value, + newVersion, + true, /* modtime should be recent */ + recordKB); + + /* Get non-existing row */ + key = new MapValue().put("id", 100); + getReq = new GetRequest() + .setKey(key) + .setTableName(tableName); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + false /* rowPresent*/, + null /* expValue */, + null /* expVersion */, + false, /* modtime should be zero */ + recordKB); + + /* Get a row with ABSOLUTE consistency */ + getReq.setConsistency(Consistency.ABSOLUTE); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + false /* rowPresent*/, + null /* expValue */, + null /* expVersion */, + false, /* modtime should be zero */ + recordKB); + + /* Delete a row with empty table name: should get illegal argument */ + key = new MapValue().put("id", 10); + DeleteRequest delReq = new DeleteRequest() + .setKey(key) + .setTableName(""); + try { + handle.delete(delReq); + fail("expected illegal argument exception on empty table name"); + } catch (IllegalArgumentException iae) { + /* success */ + } + + /* Delete a row */ + delReq = new DeleteRequest() + .setKey(key) + .setTableName(tableName); + DeleteResult delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + + /* Put the row back to store */ + putReq = new PutRequest().setValue(value).setTableName(tableName); + putRes = handle.put(putReq); + oldVersion = putRes.getVersion(); + assertNotNull(oldVersion); + + /* Delete succeed + setReturnRow(true), existing row returned. */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be zero */ + recordKB); + + /* Delete fail + setReturnRow(true), no existing row returned. */ + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + + /* Put the row back to store */ + putReq = new PutRequest().setValue(value).setTableName(tableName); + putRes = handle.put(putReq); + ifVersion = putRes.getVersion(); + + /* DeleteIfVersion with unmatched version, it should fail */ + delReq = new DeleteRequest() + .setMatchVersion(oldVersion) + .setKey(key) + .setTableName(tableName); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + + /* + * DeleteIfVersion with unmatched version + setReturnRow(true), + * the existing row returned. + */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + ifVersion /* expPrevVersion */, + true, /* modtime should be recent */ + recordKB); + + /* DeleteIfVersion with matched version, it should succeed. */ + delReq = new DeleteRequest() + .setMatchVersion(ifVersion) + .setKey(key) + .setTableName(tableName); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + + /* Put the row back to store */ + putReq = new PutRequest().setValue(value).setTableName(tableName); + putRes = handle.put(putReq); + ifVersion = putRes.getVersion(); + + /* + * DeleteIfVersion with matched version + setReturnRow(true), + * it should succeed but no existing row returned. + */ + delReq.setMatchVersion(ifVersion).setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + false /* returnRow */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + + /* DeleteIfVersion with a key not existing, it should fail. */ + delReq = new DeleteRequest() + .setMatchVersion(ifVersion) + .setKey(key) + .setTableName(tableName); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* returnRow */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + /* + * DeleteIfVersion with a key not existing + setReturnRow(true), + * it should fail and no existing row returned. + */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* returnRow */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + } + + /* + * Test GetIndexesRequest. + */ + @Test + public void testGetIndexes() { + + /* Request to get all indexes */ + final GetIndexesRequest getAllIndexes = new GetIndexesRequest() + .setTableName("testusers"); + + /* Request to get index idxName */ + final GetIndexesRequest getIndexName = new GetIndexesRequest() + .setTableName("testusers") + .setIndexName("idxName"); + + GetIndexesResult giRes; + + /* Table does not exist, expects to get TableNotFoundException */ + try { + giRes = handle.getIndexes(getAllIndexes); + fail("Expected to catch TableNotFoundException"); + } catch (TableNotFoundException tnfe) { + /* Succeed */ + checkErrorMessage(tnfe); + } + + /* Create table */ + TableResult tres = tableOperation( + handle, + "create table if not exists testusers(id integer, " + + "name string, age integer, primary key(id))", + new TableLimits(500, 500, 50), + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* Get indexes, 0 index returned */ + giRes = handle.getIndexes(getAllIndexes); + assertTrue(giRes.getIndexes().length == 0); + + /* Get index idxName, expects to get IndexNotFoundException */ + try { + giRes = handle.getIndexes(getIndexName); + fail("Expected to caught IndexNotFoundException but not"); + } catch (IndexNotFoundException infe) { + /* Succeed */ + checkErrorMessage(infe); + } + + /* Create indexes */ + tres = tableOperation( + handle, + "create index if not exists idxName on testusers(name)", + null, + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + tres = tableOperation( + handle, + "create index if not exists idxAgeName on testusers(age, name)", + null, + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* Get indexes, 2 indexes returned */ + giRes = handle.getIndexes(getAllIndexes); + assertTrue(giRes.getIndexes().length == 2); + + /* Get idxName, 1 index returned */ + giRes = handle.getIndexes(getIndexName); + assertTrue(giRes.getIndexes().length == 1); + + /* Invalid argument - miss table name */ + try { + GetIndexesRequest badReq = new GetIndexesRequest(); + handle.getIndexes(badReq); + fail("Expected to caught IllegalArgumentException " + + "because of missing table name"); + } catch (IllegalArgumentException iae) { + /* Succeed */ + checkErrorMessage(iae); + } + } + + private void checkModTime(long modTime, boolean modTimeRecent) { + if (modTimeRecent) { + if (modTime < (System.currentTimeMillis() - 2000)) { + fail("Expected modtime to be recent, got " + modTime); + } + } else { + if (modTime != 0) { + fail("Expected modtime to be zero, got " + modTime); + } + } + } + + private void checkPutResult(PutRequest request, + PutResult result, + boolean shouldSucceed, + boolean rowPresent, + MapValue expPrevValue, + Version expPrevVersion, + boolean modTimeRecent, + int recordKB, + boolean putOverWrite) { + if (shouldSucceed) { + assertNotNull("Put should succeed", result.getVersion()); + } else { + assertNull("Put should fail", result.getVersion()); + } + checkExistingValueVersion(request, result, shouldSucceed, rowPresent, + expPrevValue, expPrevVersion); + + checkModTime(result.getExistingModificationTime(), modTimeRecent); + + int[] expCosts = getPutReadWriteCost(request, + shouldSucceed, + rowPresent, + recordKB, + putOverWrite); + + if (onprem == false) { + assertReadKB(result, expCosts[0], true /* isAbsolute */); + assertWriteKB(result, expCosts[1]); + } + } + + private void checkDeleteResult(DeleteRequest request, + DeleteResult result, + boolean shouldSucceed, + boolean rowPresent, + MapValue expPrevValue, + Version expPrevVersion, + boolean modTimeRecent, + int recordKB) { + + assertEquals("Delete should " + (shouldSucceed ? "succeed" : " fail"), + shouldSucceed, result.getSuccess()); + checkExistingValueVersion(request, result, shouldSucceed, rowPresent, + expPrevValue, expPrevVersion); + + checkModTime(result.getExistingModificationTime(), modTimeRecent); + + int[] expCosts = getDeleteReadWriteCost(request, + shouldSucceed, + rowPresent, + recordKB); + + if (onprem == false) { + assertReadKB(result, expCosts[0], true /* isAbsolute */); + assertWriteKB(result, expCosts[1]); + } + } + + private void checkGetResult(GetRequest request, + GetResult result, + boolean rowPresent, + MapValue expValue, + Version expVersion, + boolean modTimeRecent, + int recordKB) { + + + if (rowPresent) { + if (expValue != null) { + assertEquals("Unexpected value", expValue, result.getValue()); + } else { + assertNotNull("Unexpected value", expValue); + } + if (expVersion != null) { + assertArrayEquals("Unexpected version", + expVersion.getBytes(), + result.getVersion().getBytes()); + } else { + assertNotNull("Unexpected version", result.getVersion()); + } + } else { + assertNull("Unexpected value", expValue); + assertNull("Unexpected version", result.getVersion()); + } + + checkModTime(result.getModificationTime(), modTimeRecent); + + final int minRead = getMinRead(); + int expReadKB = rowPresent ? recordKB : minRead; + + if (onprem == false) { + assertReadKB(result, expReadKB, + (request.getConsistencyInternal() == Consistency.ABSOLUTE)); + assertWriteKB(result, 0); + } + } + + private void checkExistingValueVersion(WriteRequest request, + WriteResult result, + boolean shouldSucceed, + boolean rowPresent, + MapValue expPrevValue, + Version expPrevVersion) { + + boolean hasReturnRow = rowPresent; + if (hasReturnRow) { + assertNotNull("PrevValue should be non-null", + result.getExistingValueInternal()); + if (expPrevValue != null) { + assertEquals("Unexpected PrevValue", + expPrevValue, result.getExistingValueInternal()); + } + assertNotNull("PrevVersion should be non-null", + result.getExistingVersionInternal()); + if (expPrevVersion != null) { + assertNotNull(result.getExistingVersionInternal()); + assertArrayEquals("Unexpected PrevVersion", + expPrevVersion.getBytes(), + result.getExistingVersionInternal().getBytes()); + } + } else { + assertNull("PrevValue should be null", + result.getExistingValueInternal()); + assertNull("PrevVersion should be null", + result.getExistingVersionInternal()); + } + } + + @Test + public void testDataSizeLimit() { + + assumeTrue(onprem == false); + + final String tableName = "dataSizeTest"; + final String createTableDdl = "create table if not exists dataSizeTest" + + "(sk String, data String, pk String, primary key(shard(sk), pk))"; + + /* Create a table */ + TableResult tres = tableOperation( + handle, + createTableDdl, + new TableLimits(500, 500, 50), + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* Key size exceeds the limit */ + MapValue row = new MapValue(); + row.put("sk", genString(KEY_SIZE_LIMIT)) + .put("pk", "pk") + .put("data", ""); + PutRequest putReq = new PutRequest() + .setTableName(tableName) + .setValue(row); + try { + handle.put(putReq); + fail("Key size exceeds the limit, expect to fail"); + } catch (KeySizeLimitException e) { + checkErrorMessage(e); + } + + row = new MapValue(); + row.put("data", "data") + .put("sk", "sk") + .put("pk", genString(KEY_SIZE_LIMIT)); + putReq = new PutRequest().setTableName(tableName).setValue(row); + try { + handle.put(putReq); + fail("Key size exceeds the limit, expect to fail"); + } catch (KeySizeLimitException e) { + checkErrorMessage(e); + } + + /* Data size exceeds the limit */ + row = new MapValue(); + row.put("sk", "sk") + .put("pk", "pk") + .put("data", genString(ROW_SIZE_LIMIT)); + putReq = new PutRequest().setTableName(tableName).setValue(row); + try { + handle.put(putReq); + fail("Data size exceeds the limit, expect to fail"); + } catch (RowSizeLimitException e) { + checkErrorMessage(e); + } + } + + /* + * Test on put values(compatible or incompatible) to KV table non-numeric + * primitive data types: + * o BOOLEAN + * o STRING + * o ENUM + * o TIMESTAMP + * o BINARY + * o FIXED_BINARY + * o JSON + */ + @Test + public void testNonNumericDataTypes() { + final String tableName = "DataTypes"; + final String createTableDdl = + "CREATE TABLE IF NOT EXISTS " + tableName + "(" + + "id INTEGER, " + + "bl BOOLEAN, " + + "s STRING, " + + "e ENUM(red, yellow, blue), " + + "ts TIMESTAMP(9), " + + "bi BINARY, " + + "fbi BINARY(10), " + + "json JSON," + + "PRIMARY KEY(id)" + + ")"; + + final FieldValue intVal = new IntegerValue(1); + final FieldValue boolVal = BooleanValue.trueInstance(); + final FieldValue strVal = new StringValue("oracle nosql"); + final FieldValue enumStrVal = new StringValue("red"); + + final Timestamp ts = Timestamp.valueOf("2018-05-02 10:23:42.123"); + final FieldValue tsVal = new TimestampValue(ts); + final FieldValue tsStrVal = new StringValue("2018-05-02T10:23:42.123"); + + byte[] byte10 = genBytes(10); + byte[] byte20 = genBytes(20); + final FieldValue bi10Val = new BinaryValue(byte10); + final FieldValue bi20Val = new BinaryValue(byte20); + final FieldValue strByte10 = + new StringValue(ProxySerialization.encodeBase64(byte10)); + final FieldValue strByte20 = + new StringValue(ProxySerialization.encodeBase64(byte20)); + + /* Create a table */ + TableResult tres = tableOperation( + handle, + createTableDdl, + new TableLimits(500, 500, 50), + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + FieldValue[] invalidValues; + FieldValue[] validValues; + String targetField; + + /* Boolean type */ + targetField = "bl"; + invalidValues = new FieldValue[] {intVal, tsVal, bi10Val}; + validValues = new FieldValue[] {boolVal, strVal}; + runPut(tableName, targetField, invalidValues, false); + runPut(tableName, targetField, validValues, true); + + /* String type */ + targetField = "s"; + invalidValues = new FieldValue[] {intVal, boolVal, tsVal, bi10Val}; + validValues = new FieldValue[] {strVal}; + runPut(tableName, targetField, invalidValues, false); + runPut(tableName, targetField, validValues, true); + + /* Emum type */ + targetField = "e"; + invalidValues = + new FieldValue[] {intVal, boolVal, strVal, tsVal, bi10Val}; + validValues = new FieldValue[] {enumStrVal}; + runPut(tableName, targetField, invalidValues, false); + runPut(tableName, targetField, validValues, true); + + /* Timestamp type */ + targetField = "ts"; + invalidValues = new FieldValue[] {intVal, boolVal, strVal, bi10Val}; + validValues = new FieldValue[] {tsVal, tsStrVal}; + runPut(tableName, targetField, invalidValues, false); + runPut(tableName, targetField, validValues, true); + + /* Binary type */ + targetField = "bi"; + invalidValues = new FieldValue[] {intVal, boolVal, strVal, tsVal}; + validValues = new FieldValue[] {bi10Val, bi20Val, strByte10, strByte20}; + runPut(tableName, targetField, invalidValues, false); + runPut(tableName, targetField, validValues, true); + + /* Fixed binary type */ + targetField = "fbi"; + invalidValues = new FieldValue[] {intVal, boolVal, strVal, tsVal, + bi20Val, strByte20}; + validValues = new FieldValue[] {bi10Val, strByte10}; + runPut(tableName, targetField, invalidValues, false); + runPut(tableName, targetField, validValues, true); + + /* JSON type */ + targetField = "json"; + invalidValues = new FieldValue[] {intVal, boolVal, strVal, tsVal, + bi10Val}; + runPut(tableName, targetField, validValues, true); + } + + /** + * Test case-insensitivity of table names + */ + @Test + public void testCase() { + final String create1 = "create table foo(i integer, primary key(i))"; + final String create2 = "create table Foo(i integer, primary key(i))"; + final String alter1 = "alter table FoO(add name string)"; + final String drop = "drop table fOo"; + + final TableLimits limits = new TableLimits(500, 500, 50); + tableOperation(handle, create1, limits, null, + TableResult.State.ACTIVE, null); + tableOperation(handle, create2, limits, null, + TableResult.State.ACTIVE, TableExistsException.class); + + /* get with different case */ + GetTableRequest getTable = + new GetTableRequest().setTableName("FoO"); + /* this will throw if the table isn't found */ + handle.getTable(getTable); + + /* alter with different case */ + tableOperation(handle, alter1, null, null, + TableResult.State.ACTIVE, null); + + tableOperation(handle, drop, null, null, + TableResult.State.DROPPED, null); + } + + @Test + public void testNullJsonNull() { + final String createTable1 = + "create table tjson(id integer, info json, primary key(id))"; + final String createTable2 = + "create table trecord(id integer, " + + "info record(name string, age integer), " + + "primary key(id))"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + tableOperation(handle, createTable2, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + MapValue rowNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", NullValue.getInstance()) + .put("age", 20)); + MapValue rowJsonNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", JsonNullValue.getInstance()) + .put("age", 20)); + + MapValue[] rows = new MapValue[] {rowNull, rowJsonNull}; + Map tableExpRows = new HashMap(); + tableExpRows.put("tjson", rowJsonNull); + tableExpRows.put("trecord", rowNull); + + /* + * Put rows with NullValue or JsonNullValue, they should be converted + * to the right value for the target type. + */ + for (Map.Entry e : tableExpRows.entrySet()) { + String table = e.getKey(); + MapValue expRow = e.getValue(); + + for (MapValue row : rows) { + PutRequest putReq = new PutRequest() + .setTableName(table) + .setValue(row); + PutResult putRet = handle.put(putReq); + Version pVersion = putRet.getVersion(); + assertNotNull(pVersion); + + MapValue key = new MapValue().put("id", row.get("id")); + GetRequest getReq = new GetRequest() + .setTableName(table) + .setConsistency(Consistency.ABSOLUTE) + .setKey(key); + GetResult getRet = handle.get(getReq); + assertEquals(expRow, getRet.getValue()); + assertNotNull(getRet.getVersion()); + assertTrue(Arrays.equals(pVersion.getBytes(), + getRet.getVersion().getBytes())); + } + } + + /* + * Query with variable for json field and set NullValue or + * JsonNullValue to variable, the NullValue is expected to be converted + * to JsonNullValue. + */ + String query = "declare $name json;" + + "select * from tjson t where t.info.name = $name"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + + prepStmt.setVariable("$name", JsonNullValue.getInstance()); + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + QueryResult queryRet = handle.query(queryReq); + assertEquals(1, queryRet.getResults().size()); + assertEquals(rowJsonNull, queryRet.getResults().get(0)); + + prepStmt.setVariable("$name", NullValue.getInstance()); + queryRet = handle.query(queryReq); + assertEquals(0, queryRet.getResults().size()); + } + + /** + * Tests that read-only operation rate throttling happens in the SC, in the + * cloud. + * Operations: + * getTable() + * getIndexes() + * getTableUsage() + * listTables() + * These two use direct REST calls to the SC as they are not available in + * the driver API at this time: + * getDdlHistory() + * getPeakUsage() + */ + @Test + public void testOpThrottling() { + /* + * This test need adjust the op rate using SC api, it is for minicloud + * test only + */ + assumeTrue("Skip this test if not minicloud test", useMiniCloud); + + final String create = "create table testOpThrottle(id integer," + + "name string, primary key(id))"; + try { + setOpThrottling(getTenantId(), DEFAULT_OP_THROTTLE); + + /* + * create a table to use for further operations + */ + tableOperation(handle, create, + new TableLimits(500, 500, 50), + TableResult.State.ACTIVE, + 20000); + + /* getTable */ + try { + GetTableRequest req = + new GetTableRequest().setTableName("testOpThrottle"); + for (int i = 0; i < 100; i++) { + handle.getTable(req); + } + fail("getTable should have been throttled"); + } catch (OperationThrottlingException e) { + /* success */ + checkErrorMessage(e); + } + + /* getIndexes */ + try { + GetIndexesRequest req = + new GetIndexesRequest().setTableName("testOpThrottle"); + for (int i = 0; i < 100; i++) { + handle.getIndexes(req); + } + fail("getIndexes should have been throttled"); + } catch (OperationThrottlingException e) { + /* success */ + checkErrorMessage(e); + } + + /* getTableUsage */ + try { + TableUsageRequest req = + new TableUsageRequest().setTableName("testOpThrottle"); + for (int i = 0; i < 100; i++) { + handle.getTableUsage(req); + } + fail("getTableUsage should have been throttled"); + } catch (OperationThrottlingException e) { + /* success */ + checkErrorMessage(e); + } + + /* listTables */ + try { + ListTablesRequest req = new ListTablesRequest(); + for (int i = 0; i < 100; i++) { + handle.listTables(req); + } + fail("listTables should have been throttled"); + } catch (OperationThrottlingException e) { + /* success */ + checkErrorMessage(e); + } + + /* Peak usage (via direct REST to SC) */ + try { + for (int i = 0; i < 100; i++) { + HttpResponse response = getPeakUsage(getTenantId(), + "testOpThrottle", + 0, 0); + /* method returns error in response, not exception */ + if (response.getStatusCode() != 200) { + assertEquals(429, response.getStatusCode()); + assertTrue(response.getOutput() + .contains("OperationRateLimitExceeded")); + throw new OperationThrottlingException("ignored"); + } + } + fail("getPeakUsage should have been throttled"); + } catch (OperationThrottlingException e) { + /* success */ + checkErrorMessage(e); + } + } finally { + setOpThrottling(getTenantId(), NO_OP_THROTTLE); + } + } + @Test + public void testExactMatch() { + final String tableName = "tMatch"; + final String createTable = + "create table tMatch(id integer, name string, " + + "age integer, primary key(id))"; + + tableOperation(handle, createTable, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + /* use extra values, not exact match */ + MapValue value = new MapValue() + .put("id", 1) + .put("name", "myname") + .put("age", 5) + .put("extra", "foo"); + + PutRequest putReq = new PutRequest() + .setTableName(tableName) + .setValue(value); + PutResult putRet = handle.put(putReq); + assertNotNull(putRet.getVersion()); + + /* set exact match to true, this shoudl fail */ + putReq.setExactMatch(true); + try { + putRet = handle.put(putReq); + fail("Put should have thrown IAE"); + } catch (Exception e) { + /* success */ + checkErrorMessage(e); + } + + /* test via query insert */ + String insertQ = + "insert into tMatch(id, name, age) values(5, 'fred', 6)"; + QueryRequest qReq = new QueryRequest().setStatement(insertQ); + QueryResult qRes = handle.query(qReq); + for (MapValue res : qRes.getResults()) { + assertEquals(1, res.get("NumRowsInserted").getInt()); + } + + /* try using prepared query */ + insertQ = + "insert into tMatch(id, name, age) values(6, 'jack', 6)"; + PrepareRequest prepReq = new PrepareRequest().setStatement(insertQ); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + qReq = new QueryRequest() + .setPreparedStatement(prepStmt); + qRes = handle.query(qReq); + for (MapValue res : qRes.getResults()) { + assertEquals(1, res.get("NumRowsInserted").getInt()); + } + } + + @Test + public void testIdentityColumn() { + final String tableName = "tIdentity"; + final String createTable1 = + "create table tIdentity(id integer, id1 long generated always " + + "as identity, name string, primary key(shard(id), id1))"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + MapValue value = new MapValue() + .put("id", 1) + .put("name", "myname"); + + /* test single put */ + PutRequest putReq = new PutRequest() + .setTableName(tableName) + .setValue(value) + .setIdentityCacheSize(5); + PutResult putRet = handle.put(putReq); + assertNotNull(putRet.getVersion()); + assertNotNull(putRet.getGeneratedValue()); + + /* test WriteMultiple */ + WriteMultipleRequest wmReq = new WriteMultipleRequest(); + for (int i = 0; i < 10; i++) { + PutRequest putRequest = new PutRequest() + .setValue(value) + .setIdentityCacheSize(i) + .setTableName(tableName); + /* cause last operation to fail and not return a generated value */ + if (i == 9) { + putRequest.setOption(PutRequest.Option.IfPresent); + } + wmReq.add(putRequest, false); + } + + WriteMultipleResult wmRes = handle.writeMultiple(wmReq); + assertEquals(10, wmRes.getResults().size()); + int i = 0; + int lastIdVal = -1; + for (OperationResult result : wmRes.getResults()) { + if (i++ == 9) { + assertNull(result.getGeneratedValue()); + } else { + assertNotNull(result.getGeneratedValue()); + if (lastIdVal < 0) { + lastIdVal = result.getGeneratedValue().getInt(); + } else { + assertTrue(result.getGeneratedValue().getInt() > lastIdVal); + lastIdVal = result.getGeneratedValue().getInt(); + } + } + } + + /* + * Verify that a failed operation (without an exception) will not + * return a generated value. The system may have generated one, but + * it is not relevant in this case. + */ + putReq.setOption(PutRequest.Option.IfPresent); + putRet = handle.put(putReq); + assertNull(putRet.getGeneratedValue()); + + + /* try an invalid case, use value from above, plus the id col */ + putReq.setValue(value.put("id1", 1)); + try { + putRet = handle.put(putReq); + fail("Exception should have been thrown on put"); + } catch (Exception e) { + /* success */ + checkErrorMessage(e); + } + + /* try an insert query */ + String insertQ = "insert into tIdentity(id, name) values(5, 'fred')"; + QueryRequest qReq = new QueryRequest().setStatement(insertQ); + QueryResult qRes = handle.query(qReq); + for (MapValue res : qRes.getResults()) { + assertEquals(1, res.get("NumRowsInserted").getInt()); + } + + insertQ = "insert into tIdentity(id, name) values(5, 'jack')"; + + PrepareRequest prepReq = new PrepareRequest().setStatement(insertQ); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + qReq = new QueryRequest() + .setPreparedStatement(prepStmt); + qRes = handle.query(qReq); + for (MapValue res : qRes.getResults()) { + assertEquals(1, res.get("NumRowsInserted").getInt()); + } + } + + @Test + public void testNameValidations() { + assumeTrue(cloudRunning); + + String ddl = "create table if not exists " + + "%s(id integer, primary key(id))"; + TableLimits limits = new TableLimits(500, 500, 50); + try { + tableOperation(handle, String.format(ddl, "ocid_nosqltable_1"), + limits, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("reserved keyword")); + } + + try { + tableOperation(handle, String.format(ddl, "ocid.nosqltable.1"), + limits, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("alphanumeric values")); + } + + try { + tableOperation(handle, String.format(ddl, "oci"), limits, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("reserved keyword")); + } + + try { + tableOperation(handle, String.format(ddl, "OCID"), limits, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("reserved keyword")); + } + + try { + tableOperation(handle, String.format(ddl, "Foo-ta"), limits, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("alphanumeric values")); + } + + try { + tableOperation(handle, String.format(ddl, "7oo"), limits, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains( + "Identifiers must start with a letter")); + } + + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < 257; i++) { + sb.append("o"); + } + String longName = sb.toString(); + try { + tableOperation(handle, String.format(ddl, longName), limits, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("256 characters")); + } + + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists user(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + ddl = "create index if not exists %s on user(name)"; + try { + tableOperation(handle, String.format(ddl, "ocid_nosqltable_1"), + null, TableResult.State.ACTIVE, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("reserved keyword")); + } + try { + tableOperation(handle, String.format(ddl, "ocid.nosqltable.1"), + null, TableResult.State.ACTIVE, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("alphanumeric values")); + } + try { + tableOperation(handle, String.format(ddl, "oci"), null, + TableResult.State.ACTIVE, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("reserved keyword")); + } + try { + tableOperation(handle, String.format(ddl, "OCID"), null, + TableResult.State.ACTIVE, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("reserved keyword")); + } + try { + tableOperation(handle, String.format(ddl, "foo-index"), null, + TableResult.State.ACTIVE, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("alphanumeric values")); + } + try { + tableOperation(handle, String.format(ddl, "foo.index"), null, + TableResult.State.ACTIVE, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("alphanumeric values")); + } + try { + tableOperation(handle, String.format(ddl, "7oo"), null, + TableResult.State.ACTIVE, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains( + "Identifiers must start with a letter")); + } + sb = new StringBuilder(); + for (int i = 0; i < 65; i++) { + sb.append("o"); + } + longName = sb.toString(); + try { + tableOperation(handle, String.format(ddl, longName), null, + TableResult.State.ACTIVE, 20000); + fail("expect to fail"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("64 char")); + } + } + + @Test + public void testGetTableUsage() { + assumeTrue(cloudRunning); + + final String tableName = "testGetTableUsage"; + final String createTableDDL = "create table " + tableName + + "(id integer, name string, primary key(id))"; + tableOperation(handle, createTableDDL, + new TableLimits(100, 100, 1), + TableResult.State.ACTIVE, 10000); + + final int numUsagesPerDay = (24 * 60 * 60 * 1000) / USAGE_TIME_SLICE_MS; + final int maxNumUsagesPerRequest = TABLE_USAGE_NUMBER_LIMIT; + + String startDate = "2019-01-16"; + String endDate = "2019-01-16T23:59:00"; + int limit = 0; + TableUsage[] usages; + + /* limit = 0, expect to get all the records of the day of 2019-01-16 */ + usages = runGetTableUsage(tableName, startDate, endDate, limit); + /* the number of records per day = 24 * 60 */ + verifyTableUsages(usages, startDate, null, numUsagesPerDay); + + /* limit = 100, return the first 100 */ + usages = runGetTableUsage(tableName, startDate, endDate, 100); + verifyTableUsages(usages, startDate, null, 100); + + /* + * limit = 0, expect to get all records from startDate to + * endDate(inclusively) + */ + endDate = "2019-01-25T23:59:00"; + limit = 0; + usages = runGetTableUsage(tableName, startDate, endDate, limit); + verifyTableUsages(usages, startDate, null, numUsagesPerDay * 10); + + /* + * limit = 5001, expect to get all records from startDate up + * to 5001 + */ + limit = 5001; + usages = runGetTableUsage(tableName, startDate, null, limit); + verifyTableUsages(usages, startDate, null, limit); + + /* + * limit = 0, expect to get all records from startDate + */ + usages = runGetTableUsage(tableName, startDate, null, 0 /* limit */); + verifyTableUsages(usages, startDate, null, maxNumUsagesPerRequest); + + endDate = "2019-01-26"; + limit = 100; + /* + * limit = 100, expect to get all records to 2020-01-26T00:00:00 up + * to 100 + */ + usages = runGetTableUsage(tableName, null, endDate, limit); + verifyTableUsages(usages, null, endDate, limit); + + /* + * limit = 0, expect to get all records to endDate + */ + usages = runGetTableUsage(tableName, null, endDate, 0 /* limit */); + verifyTableUsages(usages, null, endDate, maxNumUsagesPerRequest); + + /* + * startDate == null, endDate == null, limit = 0, expect to return + * single record for the penultimate period + */ + usages = runGetTableUsage(tableName, null /* startDate */, + null /* endDate */, 0 /* limit */); + assertEquals(1, usages.length); + + /* + * Expect to get IAE if the specified limit exceeds the + * TABLE_USAGE_NUMBER_LIMIT. + */ + try { + runGetTableUsage(tableName, startDate, null, + TABLE_USAGE_NUMBER_LIMIT + 1); + fail("Expect to get IAE but not"); + } catch (IllegalArgumentException iae) { + /* succeed */ + } + + /* + * Invalid arguments: startTime, endTime and limit must not be a + * negative value. + */ + TableUsageRequest req = new TableUsageRequest() + .setTableName(tableName); + try { + req.setStartTime(-1); + handle.getTableUsage(req); + } catch (IllegalArgumentException iae) { + /* expected */ + } + req.setStartTime(0); + + try { + req.setEndTime(-1); + handle.getTableUsage(req); + } catch (IllegalArgumentException iae) { + /* expected */ + } + req.setEndTime(0); + + try { + req.setLimit(-1); + handle.getTableUsage(req); + } catch (IllegalArgumentException iae) { + /* expected */ + } + req.setLimit(0); + } + + @Test + public void testLargeRow() { + try { + doLargeRow(handle, false); + } catch (Exception e) { + /* success */ + } + try { + doLargeRow(handle, true); + } catch (Exception e) { + /* success */ + } + } + + /* + * Test case to ensure that a small table with throughput of 1 + * can work. This was prompted by a proxy issue with throughput + * of 1. + */ + @Test + public void testLowThroughput() throws Exception { + final String createTable = + "create table Users(id integer, name string, primary key(id))"; + + /* + * Read throughput 1 + */ + tableOperation(handle, createTable, new TableLimits(1, 1, 1), + null, TableResult.State.ACTIVE, null); + + /* + * Put 3 rows + */ + MapValue value = new MapValue() + .put("id", 1) + .put("name", "name1"); + PutRequest putRequest = + new PutRequest().setValue(value).setTableName("Users"); + PutResult pres = handle.put(putRequest); + assertNotNull(pres.getVersion()); + value.put("id", 2).put("name", "name2"); + pres = handle.put(putRequest); + assertNotNull(pres.getVersion()); + value.put("id", 3).put("name", "name3"); + pres = handle.put(putRequest); + assertNotNull(pres.getVersion()); + + /* + * Loop on queries. Success means completion without exceptions. + */ + try { + QueryRequest qReq = + new QueryRequest().setStatement("select * from Users"); + for (int i = 0; i < 10; i++) { + Thread.sleep(200); + runQuery(qReq); + } + } catch (Exception e) { + fail("test failed with exception " + e); + } + } + + /* + * Tests support for flexible casting of types in the proxy where there is + * no data loss, e.g.: + * String "1" to Integer (or other numeric) + * String "true" or "false" to Boolean + * Valid timestamp mappings + */ + @Test + public void testFlexibleMapping() throws Exception { + final String createTable = + "create table flex(id integer, primary key(id), " + + "str string, " + + "bool boolean, " + + "int integer, " + + "long long, " + + "doub double, " + + "num number, " + + "ts timestamp(3))"; + + /* JSON with various valid mappings */ + + /* string value for numeric fields */ + final String strToNum = "{" + + "\"id\": 1, \"str\": \"str\", \"bool\": true, \"int\": \"5\", " + + "\"long\": \"456\", \"doub\":\"5.6\", \"num\":\"12345678910\", " + + "\"ts\": \"2017-08-21T13:34:35.123\"" + + "}"; + + /* int timestamp */ + final String intToTs = "{" + + "\"id\": 1, \"str\": \"str\", \"bool\": true, \"int\": 5, " + + "\"long\": 456, \"doub\":5.6, \"num\":12345678910, " + + "\"ts\": 12" + + "}"; + + /* long timestamp */ + final String longToTs = "{" + + "\"id\": 1, \"str\": \"str\", \"bool\": true, \"int\": 5, " + + "\"long\": 456, \"doub\":5.6, \"num\":12345678910, " + + "\"ts\": 1234567891011" + + "}"; + + /* string boolean */ + final String strToBool = "{" + + "\"id\": 1, \"str\": \"str\", \"bool\": \"true\", \"int\": 5, " + + "\"long\": 456, \"doub\":5.6, \"num\":12345678910, " + + "\"ts\": 1234567891011" + + "}"; + + final String[] mappings = {strToNum, intToTs, longToTs, strToBool}; + + tableOperation(handle, createTable, new TableLimits(100, 100, 1), + null, TableResult.State.ACTIVE, null); + + for (String s : mappings) { + PutRequest pr = new PutRequest().setValueFromJson(s, null). + setTableName("flex"); + PutResult pres = handle.put(pr); + assertNotNull(pres.getVersion()); + } + } + + @Test + public void testDropTable() { + + String tableName = "testDropTable"; + String createTable = "create table " + tableName + + "(id integer, primary key(id))"; + TableLimits limits = new TableLimits(10, 10, 1); + String dropTable = "drop table " + tableName; + String dropTableIfExists = "drop table if exists " + tableName; + + /* + * drop table that doesn't exist, should get TableNotFoundException + */ + tableOperation(handle, dropTable, TableResult.State.DROPPED, + TableNotFoundException.class); + + /* drop table not existing with "if exists", should succeed */ + tableOperation(handle, dropTableIfExists, TableResult.State.DROPPED, + null); + + /* drop an existing table, should succeed */ + tableOperation(handle, createTable, limits, null, + TableResult.State.ACTIVE, null); + tableOperation(handle, dropTable, TableResult.State.DROPPED, null); + + /* drop an existing table with "if exists", should succeed */ + tableOperation(handle, createTable, limits, null, + TableResult.State.ACTIVE, null); + tableOperation(handle, dropTableIfExists, TableResult.State.DROPPED, + null); + } + + @Test + public void testTableTags() { + assumeTrue(cloudRunning); + + final int waitMs = 20000; + final int delayMs = 300; + + /* + * Create table with definedTags and freeFormTags + */ + String tableName = "testTableTags"; + String ddl = "create table " + tableName + + "(id integer, primary key(id))"; + TableLimits limits = new TableLimits(10, 10, 1); + + DefinedTags dtags = new DefinedTags(); + dtags.addTag(DEFINED_TAG_NAMESPACE, DEFINED_TAG_PROP, "v0"); + + FreeFormTags ftags = new FreeFormTags(); + ftags.addTag("scope", "test"); + ftags.addTag("test", "function"); + + TableRequest req = new TableRequest() + .setStatement(ddl) + .setTableLimits(limits) + .setDefinedTags(dtags) + .setFreeFormTags(ftags); + + TableResult tr = handle.tableRequest(req); + tr.waitForCompletion(handle, waitMs, delayMs); + + tr = getTable(tableName, handle); + assertNotNull(tr); + assertTagsEquals(dtags, tr.getDefinedTags()); + assertTagsEquals(ftags, tr.getFreeFormTags()); + + /* + * Update tags + */ + + dtags.addTag(DEFINED_TAG_NAMESPACE, DEFINED_TAG_PROP, "v1"); + + ftags = new FreeFormTags(); + ftags.addTag("scope", "cloudtest"); + ftags.addTag("test", "stress"); + + req = new TableRequest() + .setTableName(tableName) + .setFreeFormTags(ftags) + .setDefinedTags(dtags); + tr = handle.tableRequest(req); + tr.waitForCompletion(handle, waitMs, delayMs); + + tr = getTable(tableName, handle); + assertNotNull(tr); + assertTagsEquals(dtags, tr.getDefinedTags()); + assertTagsEquals(ftags, tr.getFreeFormTags()); + + /* + * Clear tags + */ + dtags = new DefinedTags(); + ftags = new FreeFormTags(); + req = new TableRequest() + .setTableName(tableName) + .setFreeFormTags(ftags) + .setDefinedTags(dtags); + tr = handle.tableRequest(req); + tr.waitForCompletion(handle, waitMs, delayMs); + + tr = getTable(tableName, handle); + assertNotNull(tr); + assertTagsEquals(dtags, tr.getDefinedTags()); + assertTagsEquals(ftags, tr.getFreeFormTags()); + } + + /* Test ddls ops using matchETag */ + @Test + public void testMatchETag() { + assumeTrue(cloudRunning); + + String comptId = getCompartmentId(); + String tableName = "testMatchETag"; + String ddl = "create table " + tableName + + "(id integer, primary key(id))"; + int waitMs = 20000; + + TableResult ret; + String etag; + String oldETag; + + /* create table */ + ret = tableOperation(handle, ddl, new TableLimits(10, 10, 1), + TableResult.State.ACTIVE, waitMs); + etag = ret.getMatchETag(); + + /* alter table using valid ETag */ + ddl = "alter table " + tableName + "(add info json)"; + ret = tableOperation(handle, ddl, null /* limits */, comptId, + null /* tableName */, etag, + TableResult.State.ACTIVE, waitMs); + oldETag = etag; + etag = ret.getMatchETag(); + + /* alter table using invalid ETag */ + try { + ddl = "alter table " + tableName + "(drop info)"; + tableOperation(handle, ddl, null /* limits */, comptId, + null /* tableName */, oldETag, + TableResult.State.ACTIVE, waitMs); + fail("expect to fail"); + } catch (IllegalArgumentException ex) { + } + + /* update table limits using invalid ETag */ + TableLimits newLimits = new TableLimits(20, 20, 2); + try { + tableOperation(handle, null /* ddl */, newLimits, comptId, + tableName, oldETag, TableResult.State.ACTIVE, + waitMs); + fail("expect to fail"); + } catch (IllegalArgumentException ex) { + } + + /* update table limits using valid ETag */ + ret = tableOperation(handle, null /* ddl */, newLimits, comptId, + tableName, etag, TableResult.State.ACTIVE, + waitMs); + assertEquals(newLimits.getReadUnits(), + ret.getTableLimits().getReadUnits()); + assertEquals(newLimits.getWriteUnits(), + ret.getTableLimits().getWriteUnits()); + assertEquals(newLimits.getStorageGB(), + ret.getTableLimits().getStorageGB()); + assertEquals(newLimits.getMode(), + ret.getTableLimits().getMode()); + oldETag = etag; + etag = ret.getMatchETag(); + + /* update tags using invalid ETag */ + FreeFormTags ftags = new FreeFormTags(); + ftags.addTag("scope", "test"); + + TableRequest req = new TableRequest() + .setTableName(tableName) + .setMatchEtag(oldETag) + .setFreeFormTags(ftags); + ret = handle.tableRequest(req); + try { + ret.waitForCompletion(handle, waitMs, 500); + fail("expect to fail"); + } catch (IllegalArgumentException ex) { + } + + /* update tags using valid ETag */ + req.setMatchEtag(etag); + ret = handle.tableRequest(req); + ret.waitForCompletion(handle, waitMs, 500); + ret = getTable(tableName, handle); + assertTagsEquals(ftags, ret.getFreeFormTags()); + oldETag = etag; + etag = ret.getMatchETag(); + + /* drop table using invalid ETag */ + ddl = "drop table " + tableName; + try { + tableOperation(handle, ddl, null /* limits */, comptId, + null /* tableName */, oldETag, + TableResult.State.ACTIVE, waitMs); + fail("Expect to fail"); + } catch (IllegalArgumentException ex) { + } + + /* drop table using valid ETag */ + tableOperation(handle, ddl, null /* limits */, comptId, + null /* tableName */, etag, + TableResult.State.DROPPED, waitMs); + } + + private void runQuery(QueryRequest req) { + do { + QueryResult res = handle.query(req); + res.getResults(); + } while (!req.isDone()); + } + + private TableUsage[] runGetTableUsage(String tableName, + String startTime, + String endTime, + int limit) { + + TableUsageRequest req = new TableUsageRequest() + .setTableName(tableName); + if (startTime != null) { + req.setStartTime(startTime); + } + if (endTime != null) { + req.setEndTime(endTime); + } + if (limit > 0) { + req.setLimit(limit); + } + TableUsageResult res = handle.getTableUsage(req); + return res.getUsageRecords(); + } + + private void verifyTableUsages(TableUsage[] usages, + String startDate, + String endDate, + int expNum) { + + assertEquals(expNum, usages.length); + + final long delta = USAGE_TIME_SLICE_MS;; + long startTime = 0; + if (startDate != null) { + startTime = parseTimestamp(startDate); + } else if (endDate != null) { + startTime = parseTimestamp(endDate) - (expNum - 1) * delta; + } + + if (startTime > 0) { + for (TableUsage usage : usages) { + assertEquals(startTime, usage.getStartTime()); + startTime += delta; + } + } + } + + /** + * Parses the timestamp in string format to milliseconds since epoch. + */ + private static long parseTimestamp(String timestampStr) { + TemporalAccessor ta; + try { + ta = timestampFormatter.parse(timestampStr); + } catch (DateTimeParseException dtpe) { + throw new RuntimeException("Fail to parse timestamp string: " + + dtpe.getMessage()); + } + Instant instant; + if (ta.isSupported(ChronoField.HOUR_OF_DAY)) { + instant = Instant.from(ta); + } else { + instant = LocalDate.from(ta).atStartOfDay(UTCZone).toInstant(); + } + return instant.toEpochMilli(); + } + + private void runPut(String tableName, + String targetField, + FieldValue[] values, + boolean expSucceed) { + MapValue row = new MapValue().put("id", 1); + for (FieldValue value : values) { + row.put(targetField, value); + + PutRequest putReq = new PutRequest() + .setTableName(tableName) + .setValue(row); + try { + handle.put(putReq); + if (!expSucceed) { + fail("Expect to fail but succeed"); + } + } catch (Throwable ex) { + if (expSucceed) { + fail("Expect to succeed but fail"); + } + } + } + } + + private String genString(int length) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + sb.append((char)('A' + i % 26)); + } + return sb.toString(); + } + + private byte[] genBytes(int size) { + byte[] bytes = new byte[size]; + for (int i = 0; i < bytes.length; i++) { + bytes[i] = (byte)(i % 256); + } + return bytes; + } + + private void assertTagsEquals(DefinedTags exp, DefinedTags tags) { + MapValue v0 = (MapValue)MapValue.createFromJson(exp.toString(), null); + MapValue v1 = (MapValue)MapValue.createFromJson(tags.toString(), null); + if (useCloudService) { + /* + * Ignore the default defined tag added by cloud service + * automatically + */ + v1.remove(DEFAULT_DEFINED_TAG_NAMESPACE); + } + assertEquals(v0, v1); + } + + private void assertTagsEquals(FreeFormTags exp, FreeFormTags tags) { + assertEquals(MapValue.createFromJson(exp.toString(), null), + MapValue.createFromJson(tags.toString(), null)); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTestBase.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTestBase.java new file mode 100644 index 00000000..d2ae418c --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTestBase.java @@ -0,0 +1,2096 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static oracle.nosql.proxy.protocol.HttpConstants.TENANT_ID; +import static oracle.nosql.proxy.protocol.NsonProtocol.ERROR_CODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.io.BufferedInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.lang.reflect.Method; +import java.net.HttpURLConnection; +import java.net.URL; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import io.netty.buffer.ByteBuf; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.util.SelfSignedCertificate; +import io.netty.util.internal.logging.InternalLoggerFactory; +import io.netty.util.internal.logging.JdkLoggerFactory; +import oracle.kv.KVVersion; +import oracle.kv.impl.api.KVStoreImpl; +import oracle.kv.impl.api.table.TableAPIImpl; +import oracle.kv.impl.as.AggregationService; +import oracle.kv.util.kvlite.KVLite; +import oracle.nosql.common.contextlogger.LogContext; +import oracle.nosql.common.json.JsonUtils; +import oracle.nosql.common.sklogger.SkLogger; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.http.NoSQLHandleImpl; +import oracle.nosql.driver.httpclient.HttpClient; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.ListTablesRequest; +import oracle.nosql.driver.ops.ListTablesResult; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.Result; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableResult.State; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.nosql.driver.values.ArrayValue; +import oracle.nosql.driver.values.FieldValue; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.nson.Nson; +import oracle.nosql.nson.values.MapWalker; +import oracle.nosql.proxy.audit.ProxyAuditManager; +import oracle.nosql.proxy.kv.KVTenantManager; +import oracle.nosql.proxy.protocol.HttpConstants; +import oracle.nosql.proxy.sc.LocalTenantManager; +import oracle.nosql.proxy.sc.TenantManager; +import oracle.nosql.proxy.security.AccessChecker; +import oracle.nosql.proxy.security.AccessCheckerFactory; +import oracle.nosql.proxy.security.AccessContext; +import oracle.nosql.proxy.security.SecureTestUtil; +import oracle.nosql.proxy.util.KVLiteBase; +import oracle.nosql.proxy.util.PassThroughTableCache; +import oracle.nosql.proxy.util.TableCache.TableEntry; +import oracle.nosql.util.HttpRequest; +import oracle.nosql.util.HttpResponse; +import oracle.nosql.util.tmi.TableInfo; +import oracle.nosql.util.tmi.TableRequestLimits; +import oracle.nosql.util.tmi.TenantLimits; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestRule; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; + +public class ProxyTestBase extends KVLiteBase { + + /* + * Proxy state + */ + private static int PROXY_PORT = 8095; + protected static int PROXY_HTTPS_PORT = 8096; + private static int PROXY_ACCEPT_THREADS = 2; + private static int PROXY_REQUEST_THREADS = 0; /* use default */ + private static int PROXY_RETRY_DELAY_MS_DEFAULT = 30; + /* + * If these next 2 are non-zero they will use thread pools for requests + * as well as handling async completions from KV. Default off. They can be + * turned on using -Dproxy.request.poolsize= and + * -Dproxy.kv.poolsize= from the command line + */ + private static int PROXY_REQUEST_POOL_SIZE = 0; + private static int PROXY_KV_POOL_SIZE = 0; + private static boolean PROXY_MONITOR_STATS_ENABLED = false; + protected static boolean SECURITY_ENABLED = false; + protected static String TEST_TENANT_ID = "ProxyTestTenant"; + protected static String TEST_COMPARTMENT_ID = null; + protected static String TEST_COMPARTMENT_ID_FOR_UPDATE = null; + + protected static String PROXY_REQUEST_THREADS_PROP = "proxy.request.threads"; + protected static String PROXY_ACCEPT_THREADS_PROP = "proxy.accept.threads"; + protected static String PROXY_REQUEST_POOL_SIZE_PROP = + "proxy.request.poolsize"; + protected static String PROXY_KV_POOL_SIZE_PROP = + "proxy.kv.poolsize"; + protected static String PROXY_ASYNC_PROP = "test.async"; + protected static String PROXY_ERROR_LIMITING_PROP = "test.errorlimiting"; + protected static String PROXY_THROTTLING_RETRIES_PROP = + "test.throttlingretries"; + protected static String PROXY_AUTH_RETRIES_PROP = "test.authretries"; + protected static String PROXY_RETRY_DELAY_MS_PROP = "test.retrydelayms"; + protected static String PROXY_NUM_PROXIES_PROP = "test.numproxies"; + protected static String PROXY_DRL_ENABLED_PROP = "test.drlenabled"; + protected static String PROXY_DRL_USE_DISTRIBUTED_PROP = + "test.drlusedistributed"; + protected static String PROXY_DRL_TABLENAME_PROP = "test.drltablename"; + protected static String PROXY_DRL_RATE_FACTOR_PROP = "test.drlratefactor"; + protected static String KVLITE_USETHREADS_PROP = "test.usethreads"; + protected static String KVLITE_MULTISHARD_PROP = "test.multishard"; + protected static String KVLITE_MEMORYMB_PROP = "test.memorymb"; + protected static String ONPREM_PROP = "onprem"; + protected static String USEMC_PROP = "usemc"; + protected static String USECLOUD_PROP = "usecloud"; + protected static String VERBOSE_PROP = "test.verbose"; + protected static String TENANT_ID_PROP = "tenant.id"; + protected static String TEST_V3_PROP = "test.v3"; + protected static String NO_LIMITS_PROP = "test.nolimits"; + protected static String OS_PROP = "os.name"; + + protected static boolean isLinux = + System.getProperty(OS_PROP).toLowerCase().contains("linux"); + protected static boolean isMac = + System.getProperty(OS_PROP).toLowerCase().contains("mac"); + + protected static String TEST_MRTABLE_PROP = "test.mrtable"; + + protected static String COMPARTMENT_ID_PROP = "test.compartment"; + protected static String COMPARTMENT_ID_FOR_UPDATE_PROP = "test.compartment.forupdate"; + protected static String OCI_CONFIG_FILE_PROP = "oci.config.file"; + protected static String OCI_PROFILE_PROP = "oci.profile"; + protected static String TENANT_LIMITS_FILE_PROP = "tenant.limits.file"; + + /* + * Tests don't need or use peak throughput information. Ideally there would + * be a way to disable it. + */ + protected static final int PEAK_THROUGHPUT_COLLECTION_PERIOD_DEFAULT_SEC = + Integer.MAX_VALUE; + protected static final int PEAK_THROUGHPUT_DEFAULT_TTL_DAY = 1; + + /* + * Operation throttling constants. 0 means reset to the default in the SC. + * A small number eliminates throttling by allowed an operation for every + * millisecond in that number (1 = 1 op/ms, 1000 means 1 op/second). + */ + protected static final int NO_OP_THROTTLE = 1; + protected static final int DEFAULT_OP_THROTTLE = 0; + + protected final static String DEFINED_TAG_NAMESPACE = "tagging_test"; + protected final static String DEFINED_TAG_PROP = "tbac_key"; + protected final static String DEFAULT_DEFINED_TAG_NAMESPACE = "Oracle-Tags"; + + private static final int MIN_READ = 1; + + protected static String hostName = getHostName(); + protected static final int startPort = 13240; + protected static KVLite kvlite; + protected static Proxy[] proxies; + protected static Proxy proxy = null; + protected static int numProxies = 1; + protected static TenantManager[] tms; + protected static TenantManager tm = null; + protected static AccessChecker ac = null; + protected static ProxyAuditManager audit = null; + + /* set to true if running against an existing cloud proxy */ + protected static boolean useMiniCloud = false; + + protected static boolean cloudRunning = false; + protected static String tmUrlBase = null; + protected static boolean onprem = false; + protected static boolean verbose = false; + protected static boolean multishard = false; + protected static int memoryMB = 0; + protected static boolean SSLRunning = false; + protected static boolean testV3 = false; + + protected static boolean useCloudService = false; + protected static String OCI_CONFIG_FILE = "~/.oci/config"; + protected static String OCI_PROFILE = null; + + protected static RequestLimits rlimits = RequestLimits.defaultLimits(); + + /* non-static, used by sub-classes */ + AggregationService as; + + /* + * Create http and https handles. In test mode the SSL config is not secure + * but still exercises SSL. + */ + protected static NoSQLHandle handle = null; + protected static NoSQLHandle[] handles = null; + protected static int currentHandleNum = 0; + + protected static NoSQLHandle sslHandle = null; + + /* + * An instance with non-default limits to make tests run reasonably + */ + protected static int NUM_TABLES = 10; + protected static int NUM_SCHEMA_EVOLUTIONS = 6; + + protected static TenantLimits tenantLimits = + TenantLimits.getNewDefault(); + static { + tenantLimits.setNumTables(NUM_TABLES) + /* + * NOTE: the per-table read/write limits need to be >= + * 1/2 of the per-tenant limit in order for + * LimitsTest to work correctly. + * See testTableProvisioningLimits. + */ + .setDdlRequestsRate(400) + .setTableLimitReductionsRate(50) + .setNumFreeTables(3) + .setNumAutoScalingTables(3) + .setBillingModeChangeRate(2); + TableRequestLimits tableLimits = tenantLimits.getStandardTableLimits(); + tableLimits.setTableReadUnits(90000) + .setTableWriteUnits(30000) + .setSchemaEvolutions(NUM_SCHEMA_EVOLUTIONS); + } + + protected static boolean USE_SSL_HOOK = false; + protected static boolean TEST_TRACE = Boolean.getBoolean("test.trace"); + + @Rule + public final TestRule watchman = new TestWatcher() { + + @Override + protected void starting(Description description) { + if (TEST_TRACE) { + System.out.println("Starting test: " + + description.getMethodName()); + } + } + }; + + @BeforeClass + public static void staticSetUp() + throws Exception { + staticSetUp(tenantLimits); + } + + public static void staticSetUp(TenantLimits tl) + throws Exception { + + doStaticSetup(); + + cleanupTestDir(); + + proxy = startup(tl, true/*startkvlite*/); + + if (useMiniCloud) { + if (scHost != null && scPort != null) { + tmUrlBase = "http://" + scHost + ":" + scPort + "/V0/tm/"; + } + } + } + + @AfterClass + public static void staticTearDown() + throws Exception { + + if (useMiniCloud) { + if (scUrlBase != null) { + deleteTier(getTenantId()); + } + return; + } + + if (handle != null) { + for (int x=0; x= numProxies) { + currentHandleNum = 0; + } + return handles[currentHandleNum++]; + } + + /* Used by ElasticityTest to start a proxy without also starting KVLite */ + public static Proxy startProxy() + throws Exception { + + return startup(tenantLimits, false); + } + + private static Proxy startup(TenantLimits pTenantLimts, + boolean startKVLite) + throws Exception { + + /* + * Determine if running against an existing cloud proxy such as the + * MiniCloud. If so, don't start KVLite or a proxy or the aggregation + * service. Also check for proxy host and port set in system properties + * to override the defaults. + */ + onprem = Boolean.getBoolean(ONPREM_PROP); + useMiniCloud = Boolean.getBoolean(USEMC_PROP); + useCloudService = Boolean.getBoolean(USECLOUD_PROP); + /* cloudRunning is general flag for both minicloud or cloud test */ + cloudRunning = useMiniCloud || useCloudService; + verbose = Boolean.getBoolean(VERBOSE_PROP); + testV3 = Boolean.getBoolean(TEST_V3_PROP); + + verbose("Starting tests in verbose output mode"); + + String proxyHost = System.getProperty("proxy.host"); + if (proxyHost != null) { + hostName = proxyHost; + } + + PROXY_PORT = Integer.getInteger("proxy.port", PROXY_PORT); + PROXY_MONITOR_STATS_ENABLED = Boolean.getBoolean("monitor"); + SECURITY_ENABLED = Boolean.getBoolean("security"); + + String tenantId = System.getProperty(TENANT_ID_PROP); + if (tenantId != null) { + TEST_TENANT_ID = tenantId; + } + + String compartmentId = System.getProperty(COMPARTMENT_ID_PROP); + if (compartmentId != null) { + TEST_COMPARTMENT_ID = compartmentId; + } + + compartmentId = System.getProperty(COMPARTMENT_ID_FOR_UPDATE_PROP); + if (compartmentId != null) { + TEST_COMPARTMENT_ID_FOR_UPDATE = compartmentId; + } + + numProxies = Integer.getInteger(PROXY_NUM_PROXIES_PROP, 1); + + if (useCloudService) { + + if (proxyHost == null) { + fail("System property \"proxy.host\" must be set if run " + + "against cloud service"); + } else { + /* To prevent the test from running to production env */ + if (!proxyHost.endsWith("oci.oc-test.com")) { + fail("The test can only be run against the service in " + + "pre-production, proxy.host=" + proxyHost); + } + } + + PROXY_HTTPS_PORT = 443; + + if (TEST_COMPARTMENT_ID == null) { + fail("System property \"" + COMPARTMENT_ID_PROP + + "\" must be set if run against cloud service"); + } + + if (TEST_COMPARTMENT_ID_FOR_UPDATE == null) { + fail("System property \"" + COMPARTMENT_ID_FOR_UPDATE_PROP + + "\" must be set if run against cloud service"); + } + + String value = System.getProperty(OCI_PROFILE_PROP); + if (value != null) { + OCI_PROFILE = value; + } + if (OCI_PROFILE == null) { + fail("System property \"" + OCI_PROFILE_PROP + + "\" must be set when test against cloud service"); + } + + value = System.getProperty(OCI_CONFIG_FILE_PROP); + if (value != null) { + OCI_CONFIG_FILE = value; + } + + value = System.getProperty(TENANT_LIMITS_FILE_PROP); + if (value != null) { + try (FileInputStream fis = new FileInputStream(value)) { + tenantLimits = JsonUtils.readValue(fis, TenantLimits.class); + } catch (IOException ex) { + fail("Unable to load tenant limits from file: " + value); + } + } else { + /* + * If tenantLimits is not provided, set the tenantLimits to null. + * Some tests depend on the tenantLimits will be skipped in + * cloud test if the tenantLimits is not provided. + */ + tenantLimits = null; + } + } + + if (cloudRunning) { + /* + * Add a new tier with specified tenantLimits, add the test + * tenantId associated with the tier. + */ + addTier(getTenantId(), pTenantLimts); + return proxy; + } + + boolean noLimits = Boolean.getBoolean(NO_LIMITS_PROP); + + if (startKVLite) { + multishard = Boolean.getBoolean(KVLITE_MULTISHARD_PROP); + memoryMB = Integer.getInteger(KVLITE_MEMORYMB_PROP, 0); + boolean useThreads = Boolean.getBoolean(KVLITE_USETHREADS_PROP); + if (useThreads) { + multishard = false; + verbose("Starting kvlite using threads in this jvm"); + } else { + verbose("Starting kvlite using separate jvm process, " + + "multishard=" + multishard + + ", memoryMB=" + memoryMB); + } + + kvlite = startKVLite(hostName, + null, /* default store name */ + useThreads, + verbose, + multishard, + memoryMB, + false); /* secured */ + } + + /* + * Set Netty to use JDK logger factory. + * + * Since 19.1, KV added slf4j-api.jar on the class path. By default, + * Netty tries to instantiate slf4j logger first then JDK logger, so + * it will use slf4j-api by default because of that KV change. However, + * slf4j needs additional implementation jar to do actual logging, + * otherwise, it will only produce NOP warnings. + */ + InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE); + SelfSignedCertificate ssc = new SelfSignedCertificate(getHostName()); + prepareTruststore(ssc.certificate()); + /* + * Configure both HTTP and HTTPS + * TODO: think about how to inject connection failures into + * proxy's HTTP handling -- drop connections, etc + */ + Properties commandLine = new Properties(); + + /* + * the no-limits property is not used in general. It's present to + * see the failures that occur when expected limits are not + * enforced (-Dtest.nolimits=true) + */ + if (noLimits) { + commandLine.setProperty(Config.NO_LIMITS.paramName, + Boolean.toString(noLimits)); + } + commandLine.setProperty(Config.STORE_NAME.paramName, + getStoreName()); + + commandLine.setProperty(Config.HELPER_HOSTS.paramName, + (hostName + ":" + getKVPort())); + + + Config.ProxyType ptype = (onprem ? Config.ProxyType.KVPROXY : + Config.ProxyType.CLOUDTEST); + + commandLine.setProperty(Config.PROXY_TYPE.paramName, ptype.name()); + + if (USE_SSL_HOOK) { + commandLine.setProperty(Config.SSL_CERTIFICATE.paramName, + ssc.certificate().getAbsolutePath()); + commandLine.setProperty(Config.SSL_PRIVATE_KEY.paramName, + ssc.privateKey().getAbsolutePath()); + + /* netty disable TLSv1.3 by default, enable it for SslTest */ + commandLine.setProperty(Config.SSL_PROTOCOLS.paramName, + "TLSv1.3,TLSv1.2,TLSv1.1"); + SSLRunning = true; + } else { + SSLRunning = false; + } + + + /* + * note the properties need to be checked at start time, to allow + * the BeforeClass method to set them for specific tests + */ + int reqThreads = Integer.getInteger(PROXY_REQUEST_THREADS_PROP, + PROXY_REQUEST_THREADS); + int accThreads = Integer.getInteger(PROXY_ACCEPT_THREADS_PROP, + PROXY_ACCEPT_THREADS); + int requestPoolSize = Integer.getInteger(PROXY_REQUEST_POOL_SIZE_PROP, + PROXY_REQUEST_POOL_SIZE); + int kvPoolSize = Integer.getInteger(PROXY_KV_POOL_SIZE_PROP, + PROXY_KV_POOL_SIZE); + + commandLine.setProperty(Config.NUM_REQUEST_THREADS.paramName, + Integer.toString(reqThreads)); + commandLine.setProperty(Config.NUM_ACCEPT_THREADS.paramName, + Integer.toString(accThreads)); + commandLine.setProperty(Config.REQUEST_THREAD_POOL_SIZE.paramName, + Integer.toString(requestPoolSize)); + commandLine.setProperty(Config.KV_THREAD_POOL_SIZE.paramName, + Integer.toString(kvPoolSize)); + commandLine.setProperty(Config.MONITOR_STATS_ENABLED.paramName, + Boolean.toString(PROXY_MONITOR_STATS_ENABLED)); + + commandLine.setProperty(Config.VERBOSE.paramName, + Boolean.toString(verbose)); + + /* async now defaults to true */ + setDefaultTrue(commandLine, PROXY_ASYNC_PROP, Config.ASYNC.paramName); + + /* default auth retries to true */ + setDefaultTrue(commandLine, PROXY_AUTH_RETRIES_PROP, + Config.AUTH_RETRIES_ENABLED.paramName); + + /* Error limiting configs */ + /* default error limiting to true */ + setDefaultTrue(commandLine, PROXY_ERROR_LIMITING_PROP, + Config.ERROR_LIMITING_ENABLED.paramName); + + /* + * TODO: Possibly configure these? + * ERROR_DELAY_RESPONSE_THRESHOLD 5 + * ERROR_DELAY_RESPONSE_MS 200 + * ERROR_DNR_THRESHOLD 10 + * ERROR_CREDIT_MS 1000 + * ERROR_CACHE_SIZE 10000 + * ERROR_CACHE_LIFETIME_MS 3600000 + * ERROR_DELAY_POOL_SIZE 5 + */ + + /* Rate limiting configs */ + commandLine.setProperty(Config.DRL_ENABLED.paramName, + Boolean.toString( + Boolean.getBoolean(PROXY_DRL_ENABLED_PROP))); + commandLine.setProperty(Config.DRL_USE_DISTRIBUTED.paramName, + Boolean.toString( + Boolean.getBoolean(PROXY_DRL_USE_DISTRIBUTED_PROP))); + String prop = System.getProperty(PROXY_DRL_TABLENAME_PROP); + if (prop != null && prop.compareTo("") != 0) { + commandLine.setProperty(Config.DRL_TABLE_NAME.paramName, prop); + } + prop = System.getProperty(PROXY_DRL_RATE_FACTOR_PROP); + if (prop != null && prop.compareTo("") != 0) { + commandLine.setProperty(Config.DRL_RATE_FACTOR.paramName, prop); + } + + int retryDelayMs = Integer.getInteger(PROXY_RETRY_DELAY_MS_PROP, + PROXY_RETRY_DELAY_MS_DEFAULT); + commandLine.setProperty(Config.RETRY_DELAY_MS.paramName, + Integer.toString(retryDelayMs)); + //This is needed to enable query tracing, in addition to setting + //traceLevel in the driver. + commandLine.setProperty(Config.QUERY_TRACING.paramName, "true"); + + /* + * This is to test MR table locally, set MR table names to property + * "test.mrtable", comma separated. + */ + String tables = System.getProperty("test.mrtable"); + List mrTableNames = null; + if (tables != null) { + mrTableNames = new ArrayList<>(); + for (String tname : tables.split(",")) { + mrTableNames.add(tname.trim().toLowerCase()); + } + } + + /* create a simple access checker */ + ac = AccessCheckerFactory.createInsecureAccessChecker(); + + proxies = new Proxy[numProxies]; + tms = new TenantManager[numProxies]; + for (int x=0; x 0) { + TimeUnit.MILLISECONDS.sleep(300); + } + /* create an appropriate TenantManager */ + if (onprem) { + /* note: in KVPROXY mode the proxy *requires* a KVTenantManager */ + tms[x] = KVTenantManager.createTenantManager(cfg); + } else { + tms[x] = new TestTenantManager(cfg, mrTableNames); + } + proxies[x] = Proxy.initialize(cfg, tms[x], ac, audit); + assert proxies[x] != null; + } + proxy = proxies[0]; + tm = tms[0]; + return proxy; + } + + protected static int getRegionId() { + return tm.getLocalRegionId(); + } + + public static Proxy getProxy() { + return getProxy(0); + } + + public static Proxy getProxy(int proxyNum) { + return proxies != null ? proxies[proxyNum] : null; + } + + protected NoSQLHandle configHandle(String endpoint) { + + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(endpoint); + return setupHandle(hconfig); + } + + protected NoSQLHandle configHandle(URL url) { + + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(url); + return setupHandle(hconfig); + } + + /* Set configuration values for the handle */ + protected NoSQLHandle setupHandle(NoSQLHandleConfig hconfig) { + + /* + * 5 retries, default retry algorithm + */ + hconfig.configureDefaultRetryHandler(5, 0); + + hconfig.setRequestTimeout(30000); + + setHandleConfig(hconfig); + + /* allow test cases to add/modify handle config */ + perTestHandleConfig(hconfig); + return getHandle(hconfig); + } + + protected void setHandleConfig(NoSQLHandleConfig config) { + config.setDefaultCompartment(TEST_COMPARTMENT_ID); + if (useCloudService) { + SecureTestUtil.setAuthProvider(config, OCI_CONFIG_FILE, OCI_PROFILE); + } else { + SecureTestUtil.setAuthProvider(config, + SECURITY_ENABLED, + onprem, + getTenantId()); + } + } + + protected void perTestHandleConfig(NoSQLHandleConfig hconfig) { + /* no-op */ + } + + protected NoSQLHandle configNoRetryHandle(String tenant) { + + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(getProxyEndpoint()); + + /* + * no retry + */ + hconfig.configureDefaultRetryHandler(0, 0); + + SecureTestUtil.setAuthProvider(hconfig, + SECURITY_ENABLED, + onprem, + tenant); + return getHandle(hconfig); + } + + protected void checkErrorMessage(Throwable t) { + if (t == null || t.getMessage() == null) { + return; + } + assertFalse(t.getMessage().contains(AccessContext.INTERNAL_OCID_PREFIX)); + } + + public static String getProxyEndpoint() { + return getProxyEndpoint(0); + } + + public static String getProxyEndpoint(int proxyNum) { + try { + if (useCloudService) { + return getProxyHttpsEndpoint(proxyNum); + } + return "http://" + hostName + ":" + getProxyPort(proxyNum); + } catch (Exception e) { + } + return null; + } + + public static String getProxyHttpsEndpoint() { + return getProxyHttpsEndpoint(0); + } + + public static String getProxyHttpsEndpoint(int proxyNum) { + try { + return "https://" + hostName + ":" + getProxyHttpsPort(proxyNum); + } catch (Exception e) { + } + return null; + } + + public static URL getProxyURL() { + return getProxyURL(0); + } + + public static URL getProxyURL(int proxyNum) { + try { + return new URL("http", hostName, getProxyPort(proxyNum), "/"); + } catch (Exception e) { + } + return null; + } + + /** + * Allows classes to create a differently-configured NoSQLHandle. + */ + protected NoSQLHandle getHandle(NoSQLHandleConfig config) { + /* + * Create a Logger. Configuration for the logger is in proxy/build.xml + */ + Logger logger = Logger.getLogger(getClass().getName()); + config.setLogger(logger); + + /* + * Open the handle + */ + NoSQLHandle h = NoSQLHandleFactory.createNoSQLHandle(config); + + /* do a simple op to set the protocol version properly */ + try { + GetTableRequest getTable = + new GetTableRequest().setTableName("noop"); + h.getTable(getTable); + } catch (TableNotFoundException e) {} + + return h; + } + + /* + * Takes a certificate file and puts it in a trust store (KeyStore) + * named "proxycert" and sets the javax.net.ssl.trustStore property to + * that file so the driver SSL configuration finds it. + */ + protected static void prepareTruststore(File certFile) + throws Exception { + + File trustStore = new File(getTestDir(), "proxycert"); + BufferedInputStream bis = null; + try { + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(null, null); + FileInputStream fis = new FileInputStream(certFile); + bis = new BufferedInputStream(fis); + CertificateFactory factory = CertificateFactory.getInstance("X.509"); + Certificate cert = null; + while (bis.available() > 0) { + cert = factory.generateCertificate(bis); + } + ks.setCertificateEntry("test", cert); + ks.store(new FileOutputStream(trustStore), "123456".toCharArray()); + + } finally { + if (bis != null) { + bis.close(); + } + } + + /* set the trust store property, so driver can use that */ + System.setProperty("javax.net.ssl.trustStore", + trustStore.getAbsolutePath()); + } + + /* + * Utility methods for use by subclasses + */ + + /** + * Starts an instance of AggregationService using the specified + * poll periods. There must only be one instance running in the process + * at any time. There is no (current) way to stop this service, which + * runs in its own thread. + */ + synchronized AggregationService startAggregationService( + int throughputPollPeriodSec, + int sizePollPeriodSec, + boolean verbose) throws Exception { + + /* + * Don't start the AS if running against an existing cloud service + */ + if (cloudRunning || onprem) { + return null; + } + + if (as != null) { + throw new IllegalArgumentException( + "Can't start AggregationService, it's already running"); + } + + as = startAggregationService(throughputPollPeriodSec, + sizePollPeriodSec, + verbose); + return as; + } + + synchronized public static AggregationService startAggregationServiceStatic( + int throughputPollPeriodSec, + int sizePollPeriodSec, + boolean verbose) throws Exception { + + final int maxRetries = 10; + final int delay = 1000; + int numRetries = 0; + + /* + * Don't start the AS if running against an existing cloud service + */ + if (cloudRunning || onprem) { + return null; + } + + AggregationService aggSrv; + + Exception failEx = null; + while (numRetries < maxRetries) { + try { + /* NOTE: verbose isn't yet used */ + aggSrv = AggregationService.createAggregationService( + getStoreName(), + new String[] {(hostName + ":" + getKVPort())}, + 180, /* throughputHistorySecs */ + throughputPollPeriodSec, + sizePollPeriodSec, + PEAK_THROUGHPUT_COLLECTION_PERIOD_DEFAULT_SEC, + PEAK_THROUGHPUT_DEFAULT_TTL_DAY, + 5); /* max threads */ + assert aggSrv != null; + return aggSrv; + } catch (IllegalStateException ise) { + failEx = ise; + try { Thread.sleep(delay); } catch (InterruptedException ie) {} + ++numRetries; + } + } + throw new IllegalArgumentException( + "Unable to start AggregationService, last exception: " + failEx); + } + + synchronized void stopAggregationService() throws Exception { + if (as != null) { + as.stop(); + as = null; + } + } + + /** + * Executes a table (DDL) operation using the supplied statement. + * + * @param handle handle to the proxy + * @param statement a table statement + * @param state if non-null, wait for the table to reach the supplied state + */ + static TableResult tableOperation(NoSQLHandle handle, + String statement, + TableResult.State state, + Class expected) { + return tableOperation(handle, statement, null, null, null, + state, expected); + } + + /** + * Executes a table (DDL) operation using the supplied statement. + * + * @param handle handle to the proxy + * @param statement a table statement + * @param state if non-null, wait for the table to reach the supplied state + * @param tableName if non-null set it; it is used for changing table limits + */ + static TableResult tableOperation(NoSQLHandle handle, + String statement, + TableLimits limits, + String tableName, + TableResult.State state, + Class expected) { + return tableOperation(handle, statement, limits, null, tableName, + state, expected); + } + + static TableResult tableOperation(NoSQLHandle handle, + String statement, + TableLimits limits, + String compartment, + String tableName, + TableResult.State state, + Class expected) { + + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableName(tableName) + .setTableLimits(limits) + .setCompartment(compartment) + .setTimeout(15000); + + try { + TableResult tres = handle.tableRequest(tableRequest); + assertNotNull(tres); + if (state != null) { + tres.waitForCompletion(handle, 20000, 200); + assertEquals(state, tres.getTableState()); + } + if (expected != null) { + fail("Expect to fail but succeed"); + } + return tres; + } catch (Exception e) { + if (expected == null || + !expected.equals(e.getClass())) { + fail("Unexpected exception. Expected " + expected + ", got " + + e); + } + } + return null; + } + + /** + * Simpler version of tableOperation. This will not support + * a change of limits as it doesn't accept a table name. + */ + protected static TableResult tableOperation(NoSQLHandle handle, + String statement, + TableLimits limits, + int waitMillis) { + assertTrue(waitMillis > 500); + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableLimits(limits) + .setTimeout(15000); + + return handle.doTableRequest(tableRequest, waitMillis, 200); + } + + /** + * Simpler version of tableOperation. This will not support + * a change of limits as it doesn't accept a table name. + */ + protected static TableResult tableOperation(NoSQLHandle handle, + String compartment, + String statement, + TableLimits limits, + int waitMillis) { + assertTrue(waitMillis > 500); + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableLimits(limits) + .setCompartment(compartment) + .setTimeout(15000); + + return handle.doTableRequest(tableRequest, waitMillis, 1000); + } + + /** + * Executes a table (DDL) operation using the supplied statement. + * This method should only be called if success is expected. + * + * @param handle handle to the proxy + * @param statement a table statement + * @limits must be non-null if it is a create table statement + * @param state if non-null, wait for the table to reach the supplied state + * @param waitMillis the amount of time to wait for the state to be reached + * if state is non-null; ignored if state is null. + */ + protected static TableResult tableOperation(NoSQLHandle handle, + String statement, + TableLimits limits, + TableResult.State state, + int waitMillis) { + return tableOperation(handle, statement, limits, null, + state, waitMillis); + } + + protected static TableResult tableOperation(NoSQLHandle handle, + String statement, + TableLimits limits, + String tableName, + TableResult.State state, + int waitMillis) { + return tableOperation(handle, statement, limits, getCompartmentId(), + tableName, null /* matchETag */, state, + waitMillis); + } + + /** + * Executes a table (DDL) operation using the supplied statement. + * This method should only be called if success is expected. + * + * @param handle handle to the proxy + * @param statement a table statement + * @limits must be non-null if it is a create table statement + * @param comparmtentId compartment id if available + * @tableName if non-null it's an alter table limits + * @matchETag the etag that must be matched for this operation + * @param state if non-null, wait for the table to reach the supplied state + * @param waitMillis the amount of time to wait for the state to be reached + * if state is non-null; ignored if state is null. + */ + static TableResult tableOperation(NoSQLHandle handle, + String statement, + TableLimits limits, + String compartment, + String tableName, + String matchETag, + TableResult.State state, + int waitMillis) { + + assertTrue(waitMillis > 500); + + String startTimeStr = Instant.now().toString(); + + TableResult tres = null; + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableLimits(limits) + .setTableName(tableName) + .setCompartment(compartment) + .setMatchEtag(matchETag) + .setTimeout(15000); + + tres = handle.tableRequest(tableRequest); + assertNotNull(tres); + + if (tres != null && + tres.getTableName() != null && + state != null) { + tres.waitForCompletion(handle, waitMillis, 200); + String msg = "[" + Instant.now() + "]Table " + tres.getTableName() + + " failed to reach " + state + " within " + waitMillis + + "ms, startTime = " + startTimeStr; + assertEquals(msg, state, tres.getTableState()); + } + return tres; + } + + /* list tables */ + ListTablesResult listTables() { + return listTables(handle); + } + + /* list tables */ + ListTablesResult listTables(NoSQLHandle thandle) { + + ListTablesRequest listTables = + new ListTablesRequest(); + /* ListTablesRequest returns ListTablesResult */ + ListTablesResult lres = thandle.listTables(listTables); + return lres; + } + + /* list tables */ + protected List listNonSYSTables(NoSQLHandle thandle) { + + ListTablesResult ltr = listTables(thandle); + ArrayList tables = new ArrayList(); + for (String tableName: ltr.getTables()) { + if (tableName.startsWith("SYS$")) { + continue; + } + tables.add(tableName); + } + return tables; + } + + /** + * Gets indexes for the named table, returning them as independent + * JSON strings, one for each index. + * + * For now, these are extracted from the GetTableResult. At some point this + * may be added to the public API. + */ + static String[] listIndexes(NoSQLHandle handle, String tableName) { + final String INDEXES = "indexes"; /* this is from TableJsonUtils */ + GetTableRequest getTable = + new GetTableRequest().setTableName(tableName); + TableResult res = handle.getTable(getTable); + /* parse the JSON for navigation */ + + String[] indexes = null; + MapValue map = FieldValue.createFromJson(res.getSchema(), null).asMap(); + if (map.get(INDEXES) != null) { + ArrayValue array = map.get(INDEXES).asArray(); + indexes = new String[array.size()]; + for (int i = 0; i < array.size(); i++) { + indexes[i] = array.get(i).toJson(); + } + } + + /* indexes are in top-level "indexes" field, which is an array */ + + return indexes; + } + + /** + * Delays for the specified number of milliseconds, ignoring exceptions + */ + public static void delay(int delayMS) { + try { + Thread.sleep(delayMS); + } catch (Exception e) { + } + } + + /** + * Delete all records from table specified. + */ + void deleteTable(String tableName) { + QueryRequest queryRequest = new QueryRequest().setStatement( + ("delete from " + tableName)); + do { + handle.query(queryRequest); + } while (!queryRequest.isDone()); + } + + /* TODO: when we can rely on the sizes, assert specific sizes */ + void assertReadKB(Result res) { + if (onprem) return; + assertTrue(res.getReadKBInternal() > 0); + } + + /* TODO: when we can rely on the sizes, assert specific sizes */ + void assertWriteKB(Result res) { + if (onprem) return; + assertTrue(res.getWriteKBInternal() > 0); + } + + void assertReadKB(Result result, int expReadKB, boolean isAbsolute) { + if (onprem) return; + assertReadKB(expReadKB, + result.getReadKBInternal(), + result.getReadUnitsInternal(), + isAbsolute); + } + + void assertWriteKB(Result result, int expWriteKB) { + if (onprem) return; + assertWriteKB(expWriteKB, + result.getWriteKBInternal(), + result.getWriteUnitsInternal()); + } + + void assertReadKB(int expKB, + int actualKB, + int actualUnits, + boolean isAbsolute) { + if (onprem) return; + assertEquals("Wrong readKB", expKB, actualKB); + assertReadKBUnits(actualKB, actualUnits, isAbsolute); + } + + void assertReadKBUnits(int actualKB, int actualUnits, boolean isAbsolute) { + if (onprem) return; + int exp = isAbsolute ? actualKB * 2 : actualKB; + assertEquals("Wrong readUnits", exp, actualUnits); + } + + void assertWriteKB(int expKB, int actualKB, int actualUnits) { + if (onprem) return; + assertEquals("Wrong writeKB", expKB, actualKB); + assertEquals("Wrong writeUnits", expKB, actualUnits); + } + + void assertCost(Result ret, int readUnits, int writeUnits) { + if (onprem) return; + assertEquals(readUnits, ret.getReadUnitsInternal()); + assertEquals(writeUnits, ret.getWriteUnitsInternal()); + } + + static void dropTable(NoSQLHandle nosqlHandle, String tableName) { + TableResult tres = dropTableWithoutWait(nosqlHandle, tableName); + + if (tres.getTableState().equals(TableResult.State.DROPPED)) { + return; + } + + tres.waitForCompletion(nosqlHandle, 20000, 200); + } + + static void dropTableWithoutWait(String tableName) { + dropTableWithoutWait(handle, tableName); + } + + private static TableResult dropTableWithoutWait(NoSQLHandle nosqlHandle, + String tableName) { + final String dropTableDdl = "drop table if exists " + tableName; + + TableRequest tableRequest = new TableRequest() + .setStatement(dropTableDdl) + .setTimeout(100000); + + TableResult tres = nosqlHandle.tableRequest(tableRequest); + assertNotNull(tres); + return tres; + } + + /** + * Lists all tables and drops them. + */ + void dropAllTables() { + dropAllTables(handle, false); + } + + protected static void dropAllTables(NoSQLHandle nosqlHandle, boolean wait) { + + /* get the names of all tables under this tenant */ + ListTablesRequest listTables = new ListTablesRequest(); + ListTablesResult lres = nosqlHandle.listTables(listTables); + ArrayList droppedTables = new ArrayList(); + + String[] tables = lres.getTables(); + if (tables.length == 0) { + return; + } + + /* + * clean up all the tables in descending order of name, this is to drop + * child table before its parent + */ + Arrays.sort(tables, String.CASE_INSENSITIVE_ORDER.reversed()); + for (int i = 0; i < tables.length; i++) { + String tableName = tables[i]; + /* on-prem config may find system tables, which can't be dropped */ + if (tableName.startsWith("SYS$")) { + continue; + } + + /* ignore, but note exceptions */ + try { + if (wait) { + dropTable(nosqlHandle, tableName); + continue; + } + TableResult tres = dropTableWithoutWait(nosqlHandle, tableName); + droppedTables.add(tres); + } catch (Exception e) { + System.err.println("DropAllTables: drop fail, table " + + tableName + ": " + e); + } + } + if (wait) { + return; + } + + /* + * don't wait for ACTIVE state. This may mean occasional + * failures but as long as tests pass that is ok. + */ + + /* wait for all tables dropped */ + for (TableResult tres: droppedTables) { + /* ignore, but note exceptions */ + try { + tres.waitForCompletion(nosqlHandle, 100000, 200); + } catch (Exception e) { + System.err.println("DropAllTables: drop wait fail, table " + + tres + ": " + e); + } + } + } + + static String tenantIdQueryString() { + return "?" + TENANT_ID + "=" + getTenantId(); + } + + /* these may be more flexible in the future */ + + static String getEndpoint() { + return getEndpoint(0); + } + + static String getEndpoint(int proxyNum) { + return getProxyHost() + ":" + getProxyPort(proxyNum); + } + + public static boolean onprem() { + return onprem; + } + + public static int getProxyPort() { + return getProxyPort(0); + } + + public static int getProxyPort(int proxyNum) { + return PROXY_PORT + (proxyNum * 10); + } + + public static String getProxyHost() { + return hostName; + } + + public static int getProxyHttpsPort() { + return getProxyHttpsPort(0); + } + + public static int getProxyHttpsPort(int proxyNum) { + return PROXY_HTTPS_PORT + (proxyNum * 10); + } + + protected static String getCompartmentId() { + if (TEST_COMPARTMENT_ID != null) { + return TEST_COMPARTMENT_ID; + } + return getTenantId(); + } + + public static String getTenantId() { + return TEST_TENANT_ID; + } + + public static boolean isSecure() { + return SECURITY_ENABLED; + } + + static boolean getProxyMonitorStatusEnabled() { + return PROXY_MONITOR_STATS_ENABLED; + } + + protected static String getProxyBase() { + return System.getProperty("proxyroot"); + } + + /* + * TODO: Remove this method after upgrade the configured KV has the fix for + * NOSQL-378. + * + * Now the fix for NOSQL-378 has been included in KV 21.3 but not for 21.2 + * used by current proxy yet. The fix impacts the expected read cost in + * query related tests including QueryTest, ChildTableTest and KVProxyTest. + * + * In order to make the unit tests can be run with KV with or without fix, + * call this method to check if current KV has this fix and adjust query + * cost if needed. + */ + protected static boolean dontDoubleChargeKey() { + return checkKVVersion(21, 3, 1); + } + + /* + * Used to skip test if run against KV prior to the specified version + * ... + */ + protected static void assumeKVVersion(String test, + int major, + int minior, + int patch) { + + assumeTrue("Skipping " + test + " if run against KV prior to " + + (major + "." + minior + "." + patch) + ": " + + KVVersion.CURRENT_VERSION.getNumericVersionString(), + checkKVVersion(major, minior, patch)); + } + + /* + * Returns true if the current KV is >= version + */ + public static boolean checkKVVersion(int major, int minior, int patch) { + KVVersion minVersion = new KVVersion(major, minior, patch, null); + return KVVersion.CURRENT_VERSION.compareTo(minVersion) >= 0; + } + + protected static String makeString(int size) { + final String pattern = "abcde"; + StringBuilder sb = new StringBuilder(size); + while (sb.length() < size) { + sb.append(pattern); + } + sb.delete(size, size + pattern.length()); + return sb.toString(); + } + + /** + * Returns the expected read and write KB for the PutRequest, the return + * value is an int array that contains 2 values: ReadKB and WriteKB. + */ + static int[] getPutReadWriteCost(PutRequest request, + boolean shouldSucceed, + boolean rowPresent, + int recordKB, + boolean putOverwrite) { + + final int minRead = getMinRead(); + int readKB = 0; + int writeKB = 0; + + if (request.getOption() != null) { + boolean readReturnRow = rowPresent; + + switch (request.getOption()) { + case IfAbsent: + readKB = readReturnRow ? recordKB : minRead; + writeKB = shouldSucceed ? recordKB : 0; + break; + case IfVersion: + readKB = readReturnRow ? recordKB : minRead; + writeKB = shouldSucceed ? (recordKB /* old record size */ + + recordKB /* new record size */) : 0; + break; + case IfPresent: + /* + * PutIfPresent can return previous row and cost MIN_READ for + * searching existing row + */ + readKB = readReturnRow ? recordKB : minRead; + writeKB = shouldSucceed ? (recordKB /* old record size */ + + recordKB /* new record size */) : 0; + break; + } + } else { + /* Put can return previous row. If putOverwrite is true put + * overwrites existing row i.e. delete + insert and consume 2x + * write units. + */ + readKB = rowPresent ? recordKB : 0; + writeKB = (putOverwrite) ? recordKB + recordKB : recordKB; + } + + return new int[] {readKB, writeKB}; + } + + /** + * Returns the expected read and write KB for the DeleteRequest, the return + * value is an int array that contains 2 values: ReadKB and WriteKB. + */ + static int[] getDeleteReadWriteCost(DeleteRequest request, + boolean shouldSucceed, + boolean rowPresent, + int recordKB) { + + final int minRead = getMinRead(); + int readKB = 0; + int writeKB = 0; + + boolean readReturnRow = rowPresent; + if (request.getMatchVersion() != null) { + /* + * The record is present but the version does not matched, read + * cost is recordKB, otherwise MIN_READ. + */ + readKB = readReturnRow ? recordKB : minRead; + writeKB = shouldSucceed ? recordKB : 0; + } else { + /* Delete can return previous row */ + readKB = readReturnRow ? recordKB : minRead; + writeKB = shouldSucceed ? recordKB : 0; + } + + return new int[] {readKB, writeKB}; + } + + protected static int getMinRead() { + return MIN_READ; + } + + protected static String getSCURL() { + if (cloudRunning) { + return scUrlBase; + } + return null; + } + + protected static String setOpThrottling(String tenantId, int value) { + if (!useMiniCloud) { + return null; + } + final String TID = "tenantId"; + final String RATE = "rate"; + + /* a map for results -- makes JSON handling easier */ + Map payload = new HashMap(); + Map oprate = new HashMap(); + payload.put("opRate", oprate); + oprate.put(RATE, value); + if (tenantId != null) { + oprate.put(TID, tenantId); + } + + final String url = tmUrlBase + "config"; + final HttpRequest httpRequest = new HttpRequest().disableRetry(); + + HttpResponse response = + httpRequest.doHttpPost(url, JsonUtils.print(payload)); + return response.getOutput(); + } + + protected static HttpResponse getPeakUsage(String tenantId, + String tableName, + long startTime, + long endTime) { + /* Cloud-only */ + if (tmUrlBase == null) { + return null; + } + + String url = tmUrlBase + "tables/" + tableName + + "/peakusage?tenantid=" + tenantId + "&compartmentid=" + tenantId; + + if (startTime != 0) { + url = url + "&start_timestamp=" + TimeUtils.getTimeStr(startTime); + } + if (endTime != 0) { + url = url + "&end_timestamp=" + TimeUtils.getTimeStr(endTime); + } + + final HttpRequest httpRequest = new HttpRequest().disableRetry(); + + return httpRequest.doHttpGet(url); + } + + protected static List doQuery(NoSQLHandle qHandle, String query) { + List results = new ArrayList(); + QueryRequest queryRequest = new QueryRequest().setStatement(query); + do { + QueryResult qres = qHandle.query(queryRequest); + results.addAll(qres.getResults()); + } while (!queryRequest.isDone()); + return results; + } + + protected static List doPreparedQuery( + NoSQLHandle qHandle, String query) { + + List results = new ArrayList(); + PrepareRequest prepReq = new PrepareRequest() + .setStatement(query); + PrepareResult prepRet = qHandle.prepare(prepReq); + assertNotNull(prepRet.getPreparedStatement()); + + QueryRequest queryRequest = + new QueryRequest().setPreparedStatement(prepRet); + do { + QueryResult qres = qHandle.query(queryRequest); + results.addAll(qres.getResults()); + } while (!queryRequest.isDone()); + return results; + } + + protected static void doTableRequest(NoSQLHandle nosqlHandle, + String statement, + boolean isDrop) { + TableRequest tableRequest = new TableRequest() + .setStatement(statement); + TableResult tres = nosqlHandle.tableRequest(tableRequest); + State waitState = isDrop ? State.DROPPED : State.ACTIVE; + + tres.waitForCompletion(nosqlHandle, 60000, 200); + assertEquals(tres.getTableState(), waitState); + } + + /** + * Simple put utility, assumes success + */ + protected static void doPut(NoSQLHandle nosqlHandle, + String tableName, + String rowAsJson) { + PutRequest preq = new PutRequest().setTableName(tableName) + .setValueFromJson(rowAsJson, null); + PutResult pres = nosqlHandle.put(preq); + assertNotNull(pres.getVersion()); + } + + /** + * Creates a (driver-based) MapValue from a JSON string. + */ + protected static MapValue createMapValueFromJson(String json) { + return oracle.nosql.driver.values.JsonUtils.createValueFromJson( + json, null).asMap(); + } + + /* + * NOTE: this may not work on-prem because the limit is not + * enforced. + */ + public static int getEffectiveMaxReadKB(QueryRequest qr) { + return (qr.getMaxReadKB() == 0 ? rlimits.getRequestReadKBLimit() : + qr.getMaxReadKB()); + } + + /** + * This is factored here so that it can be used by both the cloudsim-based + * tests and kv. + */ + protected void doLargeRow(NoSQLHandle thandle, boolean doWriteMultiple) { + final String createTableStatement = + "create table bigtable(" + + "id integer, " + + "large array(string), " + + "primary key(id))"; + TableRequest tableRequest = new TableRequest() + .setStatement(createTableStatement); + TableResult tres = thandle.tableRequest(tableRequest); + tres.waitForCompletion(thandle, 10000, 200); + MapValue value = new MapValue().put("id", 1); + ArrayValue array = createLargeStringArray(3500000); + value.put("large", array); + PutRequest preq = new PutRequest().setTableName("bigtable"). + setValue(value); + PutResult pres = thandle.put(preq); + assertNotNull(pres.getVersion()); + + if (doWriteMultiple) { + /* + * Now with write multiple + */ + WriteMultipleRequest wmReq = new WriteMultipleRequest(); + /* don't reuse the PutRequest above, it has been modified */ + preq = new PutRequest().setTableName("bigtable").setValue(value); + wmReq.add(preq, false); + WriteMultipleResult wmRes = thandle.writeMultiple(wmReq); + assertEquals(1, wmRes.size()); + } + } + + private ArrayValue createLargeStringArray(int size) { + ArrayValue array = new ArrayValue(); + int tsize = 0; + final String s = "abcdefghijklmnop"; + while (tsize < size) { + array.add(s); + tsize += s.length(); + } + return array; + } + + protected static void verbose(String msg) { + if (verbose) { + System.out.println(msg); + } + } + + /** + * Get an HttpClient instance. + * Used by tests that need low-level http clients. + */ + protected static HttpClient createHttpClient(String host, + int port, + int numThreads, + String name, + Logger logger) { + /* + * java SDK changed its internal HttpClient constructors + * as of 5.3.2. So use reflection to figure out which + * methods to call. + */ + try { + Class hcClass = Class.forName( + "oracle.nosql.driver.httpclient.HttpClient"); + + try { + /* new driver method */ + return (HttpClient)hcClass.getMethod( + "createMinimalClient", + String.class, + int.class, + SslContext.class, + int.class, + String.class, + Logger.class) + .invoke(null, + host, + port, + null /* SslContext */, + 0 /* handshakeTimeout */, + name, + logger); + } catch (NoSuchMethodException e) { + /* old driver method */ + return (HttpClient)hcClass.getDeclaredConstructor( + String.class, + int.class, + int.class, + int.class, + int.class, + SslContext.class, + String.class, + Logger.class) + .newInstance( + host, + port, + numThreads, + 0 /* ConnectionPoolSize */, + 0 /* PoolMaxPending */, + null /* SslContext */, + name, + logger); + } + } catch (Exception e) { + System.out.println("Can't create HttpClient: " + e); + return null; + } + } + + protected static int getV4ErrorCode(ByteBuf buf) { + oracle.nosql.proxy.protocol.ByteInputStream b = null; + try { + buf.readerIndex(0); + b = new oracle.nosql.proxy.protocol.ByteInputStream(buf); + MapWalker walker = new MapWalker(b); + while (walker.hasNext()) { + walker.next(); + String name = walker.getCurrentName(); + if (name.equals(ERROR_CODE)) { + return Nson.readNsonInt(b); + } else { + walker.skip(); + } + } + } catch (Exception e) { + return -1; + } finally { + if (b != null) { + b.close(); + } + } + return -1; + } + + protected static void forceV3(NoSQLHandleImpl handle) { + assertNotNull(handle); + short version = handle.getSerialVersion(); + if (version <= 3) { + return; + } + + /* Sigh. we can't guarantee that the SDK has this method. */ + Class clientClass = null; + try { + clientClass = Class.forName("oracle.nosql.driver.http.Client"); + } catch (Throwable e) { + System.out.println("Could not find Client class:" + e); + clientClass = null; + } + assertNotNull(clientClass); + Method setVersionFunction = null; + try { + setVersionFunction = clientClass.getMethod("setSerialVersion", + short.class); + } catch (Throwable e) { + verbose("Could not find Client.setSerialVersion(): " + e); + verbose("Skipping test"); + setVersionFunction = null; + } + assumeTrue(setVersionFunction != null); + try { + setVersionFunction.invoke(handle.getClient(), (short)3); + } catch (Exception e) { + verbose("Could not invoke Client.setSerialVersion(): " + e); + verbose("Skipping test"); + assumeTrue(false); + } + verbose("Set serial version to 3"); + } + + + protected static TableResult getTable(String tableName, + NoSQLHandle handle) { + GetTableRequest getTable = + new GetTableRequest().setTableName(tableName); + return handle.getTable(getTable); + } + + /* set the given cmdline parameter to true unless set otherwise in prop */ + protected static void setDefaultTrue(Properties cmdLine, + String prop, String param) { + boolean propVal = true; + if (System.getProperty(prop) != null) { + propVal = Boolean.getBoolean(prop); + } + cmdLine.setProperty(param, Boolean.toString(propVal)); + } + + /* + * Get table ocid using SC rest call + * /V0/tm/tables/?tenantid=&&compartmentid= + */ + protected static String scGetTableOcid(String tenantId, + String compartmentId, + String tableName) + throws Exception { + + if (tmUrlBase == null) { + return null; + } + + HttpRequest httpRequest = new HttpRequest().disableRetry(); + String url = tmUrlBase + "tables/" + tableName + + "?" + HttpConstants.TENANT_ID + "=" + tenantId + + "&&" + HttpConstants.COMPARTMENT_ID + "=" + compartmentId; + + HttpResponse res = httpRequest.doHttpGet(url); + if (res.getStatusCode() != HttpURLConnection.HTTP_OK) { + throw new IllegalStateException( + "Failed to get table " + res.getOutput()); + } + + TableInfo tif = JsonUtils.readValue(res.getOutput(), TableInfo.class); + String ocid = tif.getTableOcid(); + if (ocid != null) { + return ocid.replace("_", "."); + } + return null; + } + + public static String currentTimeString() { + return ZonedDateTime.now(ZoneOffset.UTC). + format(DateTimeFormatter.ISO_INSTANT); + } + + protected static void assertTableOcid(String ocid) { + if (onprem) { + assertNull(ocid); + } else { + assertNotNull(ocid); + if (cloudRunning) { + assertTrue(ocid.contains(AccessContext.EXTERNAL_OCID_PREFIX)); + } + } + } + + public static class Timer { + private long start; + private long end; + + public Timer() { + start = System.currentTimeMillis(); + } + + public Timer start() { + start = System.currentTimeMillis(); + return this; + } + + public Timer stop() { + end = System.currentTimeMillis(); + return this; + } + + public double getTimeSeconds() { + return (end - start)/1000.0; + } + + public long getTimeMillis() { + return end - start; + } + + public long getStartMillis() { + return start; + } + + public long getEndMillis() { + return end; + } + + @Override + public String toString() { + return Double.toString(getTimeSeconds()); + } + } + + /* + * The TanantManager used in local proxy cloud unit test. + * - Simulate MR table, mrTableNames is list of MR table names. + */ + private static class TestTenantManager extends LocalTenantManager { + + private final List mrTableNames; + + private TestTenantManager(Config cfg, List mrTableNames) { + super(connectKVStore(cfg.getTemplateKVStoreConfig() + .setStoreName(cfg.getStoreName()) + .setHelperHosts(cfg.getHelperHosts())), + cfg.getStoreName(), + false, /* noLimits */ + cfg.getHelperHosts()); + this.mrTableNames = mrTableNames; + } + + @Override + public void createTableCache(Config config, + MonitorStats stats, + SkLogger logger) { + + tableCache = new PassThroughTableCache(this, logger) { + @Override + public TableEntry getTable(String namespace, + String tableName, + String nsname, + LogContext lc) { + TableEntry entry = super.getTable(namespace, + tableName, + nsname, + lc); + return convertEntry(entry); + } + }; + } + + private TableEntry convertEntry(TableEntry entry) { + + return new TableEntry(entry.getTable()) { + + @Override + public KVStoreImpl getStore() { + return entry.getStore(); + } + + @Override + public TableAPIImpl getTableAPI() { + return entry.getTableAPI(); + } + + @Override + public String getStoreName() { + return entry.getStoreName(); + } + + @Override + public RequestLimits getRequestLimits() { + return entry.getRequestLimits(); + } + + @Override + public boolean isMultiRegion() { + if (mrTableNames != null) { + return mrTableNames.contains( + getTable().getFullName().toLowerCase()); + } + return false; + } + + @Override + public boolean isInitialized() { + return true; + } + }; + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryResumeTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryResumeTest.java new file mode 100644 index 00000000..084753f0 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryResumeTest.java @@ -0,0 +1,266 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import oracle.kv.KVStore; +import oracle.kv.KVStoreConfig; +import oracle.kv.KVStoreFactory; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; + +import oracle.nosql.proxy.security.SecureTestUtil; +import oracle.nosql.proxy.util.CreateStore; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assume; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + + +public class QueryResumeTest extends ProxyTestBase { + + private final Logger logger = Logger.getLogger(getClass().getName()); + + private CreateStore createStore = null; + private KVStore kvstore = null; + private Proxy proxy; + private NoSQLHandle handle = null; + + private int maxReadKB = 19; + + private static final String tableName = "users"; + + private static final String usersDDL = + "CREATE TABLE IF NOT EXISTS users ( " + + " uid integer, " + + " name string, " + + " int integer, " + + " PRIMARY KEY (uid))"; + + private static final String idxIntDDL = + "CREATE INDEX IF NOT EXISTS idx_int ON users(int)"; + + private static final String queryDML = + "select * from users where 1 <= int and int <= 2 order by int desc"; + + private static final String delQueryDML = + "delete from users where uid = 10"; + + /* these override the Before/AfterClass methods in ProxyTestBase */ + @BeforeClass + public static void staticSetUp() { + Assume.assumeTrue("Skipping QueryResumeTest in minicloud or cloud test", + !Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + } + + @AfterClass + public static void staticTearDown() {} + + @Override + @Before + public void setUp() throws Exception { + cleanupTestDir(); + } + + @Override + @After + public void tearDown() throws Exception { + + if (proxy != null) { + proxy.shutdown(3, TimeUnit.SECONDS); + proxy = null; + } + + if (handle != null) { + handle.close(); + handle = null; + } + + if (kvstore != null) { + kvstore.close(); + kvstore = null; + } + + if (createStore != null) { + createStore.shutdown(); + createStore = null; + } + } + + private void createStore() throws Exception { + + int port = getKVPort(); + String rootDir = getTestDir(); + createStore = + new CreateStore( + rootDir, + getStoreName(), + port, + 3, /* nsns */ + 3, /* rf */ + 10, /*partitions*/ + 1, /*capacity*/ + 2, /* mb */ + false, /* use threads */ + null); + final File root = new File(rootDir); + root.mkdirs(); + createStore.start(); + + kvstore = KVStoreFactory.getStore( + new KVStoreConfig(getStoreName(), + String.format("%s:%s", getHostName(), port))); + + proxy = ProxyTestBase.startProxy(); + + handle = createHandle(); + } + + private NoSQLHandle createHandle() { + + NoSQLHandleConfig hconfig = + new NoSQLHandleConfig(ProxyTestBase.getProxyEndpoint()); + + /* 5 retries, default retry algorithm */ + hconfig.configureDefaultRetryHandler(5, 0); + + hconfig.setRequestTimeout(30000); + //hconfig.setNumThreads(20); + + SecureTestUtil.setAuthProvider(hconfig, + ProxyTestBase.SECURITY_ENABLED, + ProxyTestBase.onprem(), + ProxyTestBase.getTenantId()); + hconfig.setLogger(logger); + + /* Open the handle */ + NoSQLHandle h = NoSQLHandleFactory.createNoSQLHandle(hconfig); + + /* do a simple op to set the protocol version properly */ + try { + GetTableRequest getTable = + new GetTableRequest().setTableName("noop"); + h.getTable(getTable); + } catch (TableNotFoundException e) {} + + return h; + } + + private void createTableAndIndex() { + + TableLimits limits = new TableLimits(90000, 15000, 50); + int timeout = 20000; + + ProxyTestBase.tableOperation(handle, usersDDL, limits, + TableResult.State.ACTIVE, timeout); + ProxyTestBase.tableOperation(handle, idxIntDDL, null, + TableResult.State.ACTIVE, timeout); + } + + private void populateTable() { + + int numRows = 100; + int numRowsPerKey = 10; + MapValue row = new MapValue(); + int uid = 0; + int key = 0; + + PutRequest putRequest = new PutRequest(). + setValue(row). + setTableName(tableName); + + while (uid < numRows) { + + for (int i = 0; i < numRowsPerKey; ++i) { + + row.put("uid", uid); + row.put("int", key); + row.put("name", ("name." + key)); + + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + + ++uid; + } + + ++key; + } + + } + + @Test + public void testQueryResume() throws Exception { + + /* Create a 1x3 store with 10 partitions */ + createStore(); + + /* Create the table and index */ + createTableAndIndex(); + + /* Populate the table */ + populateTable(); + + /* Prepare the query */ + PrepareRequest preq = new PrepareRequest(); + preq.setGetQueryPlan(true); + preq.setStatement(queryDML); + PrepareResult pres = handle.prepare(preq); + PreparedStatement prep = pres.getPreparedStatement(); + + /* Execute the 1st query batch */ + QueryRequest qreq = new QueryRequest(); + qreq.setPreparedStatement(prep); + qreq.setQueryName("testQueryResume"); + qreq.setMaxReadKB(maxReadKB); + qreq.setTraceLevel(3); + + int numResults = 0; + QueryResult res = handle.query(qreq); + numResults += res.getResults().size(); + + /* Delete the row on which the query is supposed to resume from */ + QueryRequest delreq = new QueryRequest(); + delreq.setStatement(delQueryDML); + do { + res = handle.query(delreq); + res.getResults(); + } while (!delreq.isDone()); + + /* Execute the rest of the query */ + do { + res = handle.query(qreq); + numResults += res.getResults().size(); + } while (!qreq.isDone()); + + assertTrue(numResults == 19); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryTest.java new file mode 100644 index 00000000..d18bf782 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryTest.java @@ -0,0 +1,3091 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.lang.reflect.Method; +import java.math.BigDecimal; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; + +import org.junit.Test; + +import oracle.kv.table.FieldDef.Type; +import oracle.nosql.driver.Consistency; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.PrepareQueryException; +import oracle.nosql.driver.ReadThrottlingException; +import oracle.nosql.driver.RequestTimeoutException; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.WriteThrottlingException; +import oracle.nosql.driver.ops.GetIndexesRequest; +import oracle.nosql.driver.ops.GetIndexesResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.Result; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.ArrayValue; +import oracle.nosql.driver.values.DoubleValue; +import oracle.nosql.driver.values.FieldValue; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.JsonNullValue; +import oracle.nosql.driver.values.JsonUtils; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.NullValue; +import oracle.nosql.driver.values.StringValue; + +/** + * Test queries + */ +public class QueryTest extends ProxyTestBase { + final static int READ_KB_LIMIT = rlimits.getRequestReadKBLimit(); + + private static byte traceLevel = 0; + private static boolean showResults = false; + + private final static int MIN_QUERY_COST = 2; + + final static String tableName = "scanTable"; + final static String indexName = "idxName"; + final static String jsonTable = "jsonTable"; + /* timeout for all table operations */ + final static int timeout = 20000; + + /* Create a table */ + final static String createTableDDL = + "CREATE TABLE IF NOT EXISTS scanTable (" + + "sid INTEGER, " + + "id INTEGER, " + + "name STRING, " + + "age INTEGER, " + + "state STRING, " + + "salary LONG, " + + "array ARRAY(INTEGER), " + + "longString STRING," + + "PRIMARY KEY(SHARD(sid), id))"; + + /* Create an index on scanTable(name) */ + final String createIdxNameDDL = + "CREATE INDEX IF NOT EXISTS idxName on scanTable(name)"; + + /* Create an index on scanTable(sid, age)*/ + final String createIdxSidAgeDDL = + "CREATE INDEX IF NOT EXISTS idxSidAge ON scanTable(sid, age)"; + + /* Create an index on scanTable(state, age)*/ + final String createIdxStateAgeDDL = + "CREATE INDEX IF NOT EXISTS idxStateAge ON scanTable(state, age)"; + + /* Create an index on scanTable(state, age)*/ + final String createIdxArrayDDL = + "CREATE INDEX IF NOT EXISTS idxArray ON scanTable(array[])"; + + /* Create a table with Json field */ + final static String createJsonTableDDL = + "CREATE TABLE IF NOT EXISTS jsonTable (id INTEGER, info JSON, " + + "PRIMARY KEY(id))"; + + /* Create a table with 2 major keys, used in testIllegalQuery() */ + final static String createTestTableDDL = + "CREATE TABLE IF NOT EXISTS test (" + + "sid1 INTEGER, " + + "sid2 INTEGER, " + + "id INTEGER, " + + "name STRING, " + + "PRIMARY KEY(SHARD(sid1, sid2), id))"; + + final static String createIdxSid1NameDDL = + "CREATE INDEX IF NOT EXISTS idxSid1Name ON test(sid1, name)"; + + final static String createIdxNameSid1Sid2DDL = + "CREATE INDEX IF NOT EXISTS idxNameSid1Sid2 ON test(name, sid1, sid2)"; + + @Override + public void setUp() throws Exception { + super.setUp(); + + if (!testName.getMethodName().equals("testTimeout") && + !testName.getMethodName().equals("testShortTimeouts")) { + tableOperation(handle, createTableDDL, + new TableLimits(45000, 15000, 50), + TableResult.State.ACTIVE, timeout); + + tableOperation(handle, createIdxNameDDL, null, + TableResult.State.ACTIVE, timeout); + } + } + + @Test + public void testTimeout() { + /* + * This test requires large table read/write throughput, it is not + * applicable in cloud test. + */ + assumeTrue(!useCloudService); + runTimeoutTest(1, false); + } + + @Test + public void testShortTimeouts() { + /* + * This test requires large table read/write throughput, it is not + * applicable in cloud test. + */ + assumeTrue(!useCloudService); + runTimeoutTest(5, true); + } + + private void runTimeoutTest(int numLoops, boolean shortTimeouts) { + + /* Run this test against kv >= 21.3.1 */ + assumeKVVersion("testTimeout", 21, 3, 1); + + String tableDDL = + "CREATE TABLE Foo( \n" + + " id1 INTEGER, \n" + + " id2 INTEGER, \n" + + " id3 INTEGER, \n" + + " firstName STRING, \n" + + " lastName STRING, \n" + + " age INTEGER, \n" + + " id4 STRING, \n" + + "primary key (shard(id1, id2), id3, id4))"; + + Random rand = new Random(1); + + int num1 = 100; + int num2 = 40; + int num3 = 20; + int rus = (cloudRunning ? + tenantLimits.getStandardTableLimits().getTableReadUnits(): + 100000); + int wus = (cloudRunning ? + tenantLimits.getStandardTableLimits().getTableWriteUnits(): + 100000); + + tableOperation(handle, tableDDL, new TableLimits(rus, wus, 50), + TableResult.State.ACTIVE, timeout); + + MapValue row = new MapValue(); + PutRequest putRequest = new PutRequest() + .setValue(row) + .setTableName("foo"); + + for (int i = 0; i < num1; i++) { + + for (int j = 0; j < num2; ++j) { + + for (int k = 0; k < num3; ++k) { + + row.put("id1", rand.nextInt(num1)); + row.put("id2", rand.nextInt(num2)); + row.put("id3", rand.nextInt(num3)); + row.put("id4", ("id4-" + i)); + row.put("firstName", ("first" + rand.nextInt(10))); + row.put("lastName", ("last" + rand.nextInt(10))); + row.put("age", rand.nextInt(100)); + + PutResult res = getNextHandle().put(putRequest); + assertNotNull("Put failed", res.getVersion()); + } + } + } + + for (int x=0; x= $id"; + final String query6 = + "select * from scanTable where sid = 0 order by sid, id"; + final String query7 = + "select * from scanTable where id = 0"; + final String query8 = + "select sid, id from scanTable where id = 0"; + final String query9 = + "select * from scanTable where name = 'name_0'"; + final String query10 = + "select id, name from scanTable where name = 'name_0'"; + final String query11 = + "select * from scanTable where length(name) = 1"; + + final int numMajor = 10; + final int numPerMajor = 10; + final int numRows = numMajor * numPerMajor; + final int recordKB = 2; + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + /* + * Perform a simple query + */ + executeQuery(query2, null, 20, 0, false); + + /* + * Perform an update query + */ + QueryRequest queryRequest = new QueryRequest().setStatement(query3); + QueryResult queryRes = getNextHandle().query(queryRequest); + + /* + * Use a simple get query to validate the update + */ + queryRequest = new QueryRequest().setStatement(query4); + queryRes = getNextHandle().query(queryRequest); + assertEquals(1, queryRes.getResults().size()); + assertEquals("joe", + queryRes.getResults().get(0).get("name").getString()); + + /* full scan to count rows */ + executeQuery(query1, null, numRows, 0, false /* usePrepStmt */); + executeQuery(query1, null, numRows, 0, true /* usePrepStmt */); + + /* + * Query with external variables + */ + Map variables = new HashMap(); + variables.put("$sid", new IntegerValue(9)); + variables.put("$id", new StringValue("3")); + executeQuery(query5, variables, 7, 0, true); + + /* Query with sort */ + executeQuery(query6, null, numPerMajor, 0, + false /* usePrepStmt */); + executeQuery(query6, null, numPerMajor, 0, + true /* usePrepStmt */); + + /* + * Query cost test + */ + int cost; + int count; + + /* query1: select * from scanTable */ + cost = getExpReadKB(false /* keyOnly */, recordKB, numRows, numRows); + count = numRows; + executeQuery(query1, false /* keyOnly */, false /* indexScan */, + count, cost, 0, 0, recordKB); + + /* query2: select * from scanTable where sid >= 8 */ + count = (numMajor - 8) * numPerMajor; + cost = getExpReadKB(false /* keyOnly */, recordKB, count, count); + executeQuery(query2, false /* keyOnly */, false /* indexScan */, + count, cost, 0, 0, recordKB); + + /* query4: select name from scanTable where sid = 9 and id = 9 */ + cost = getExpReadKB(false /* keyOnly */, recordKB, 1, 1); + count = 1; + executeQuery(query4, false /* keyOnly */, false /* indexScan */, + count, cost, 0, 0, recordKB); + + /* query7: select * from scanTable where id = 0 */ + cost = getExpReadKB(false /* keyOnly */, recordKB, numMajor, numRows); + count = numMajor; + executeQuery(query7, false /* keyOnly */, false /* indexScan */, + count, cost, 0, 0, recordKB); + + /* query8: select sid, id from scanTable where id = 0 */ + cost = getExpReadKB(true /* keyOnly */, recordKB, 0, numRows); + count = numMajor; + executeQuery(query8, true /* keyOnly */, false /* indexScan */, + count, cost, 0, 0, recordKB); + + /* query9: select * from scanTable where name = 'name_0'"*/ + cost = getExpReadKB(false /* keyOnly */, recordKB, numMajor, numMajor); + count = numMajor; + executeQuery(query9, false /* keyOnly */, true /* indexScan */, + count, cost, 0, 0, recordKB); + + /* query10: select id, name from scanTable where name = 'name_0' */ + cost = getExpReadKB(true /* keyOnly */, recordKB, 0, numMajor); + count = numMajor; + executeQuery(query10, true /* keyOnly */, true /* indexScan */, + count, cost, 0, 0, recordKB); + + /* query11: select * from scanTable where length(name) = 1 */ + cost = getExpReadKB(false /* keyOnly */, recordKB, + (dontDoubleChargeKey() ? 0 : numRows), numRows); + count = 0; + executeQuery(query11, false /* keyOnly */, true /* indexScan */, + count, cost, 0, 0, recordKB); + } + + /** + * Test query with numeric-base and size-based limits + */ + @Test + public void testLimits() { + final int numMajor = 10; + final int numPerMajor = 101; + final int numRows = numMajor * numPerMajor; + final int recordKB = 2; + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + /* + * number-based limit + */ + + /* Read rows from all partitions with number-based limit. */ + String query = "select * from scanTable"; + int expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numRows /* numReadRows */, + numRows /* numReadKeys */); + int expCnt = numRows; + int[] limits = new int[] {0, 20, 100, expCnt, expCnt + 1}; + for (int limit : limits) { + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, limit, 0, recordKB); + } + + /* Read rows from single partition with number-based limit. */ + query = "select * from scanTable where sid = 5"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numPerMajor /* numReadRows */, + numPerMajor /* numReadKeys */); + expCnt = numPerMajor; + limits = new int[] {0, 20, 100, expCnt, expCnt + 1}; + for (int limit : limits) { + executeQuery(query, false /* keyOnly */, false /* indexScan */, + expCnt, expReadKB, limit, 0, recordKB); + } + + /* Read rows from all shards with number-based limit. */ + query = "select * from scanTable where name = 'name_1'"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numMajor /* numReadRows */, + numMajor /* numReadKeys */); + expCnt = numMajor; + limits = new int[] {0, 5, expCnt, expCnt + 1}; + for (int limit : limits) { + executeQuery(query, false /* keyOnly */, true /* indexScan */, + expCnt, expReadKB, limit, 0 /* maxReadKB */, recordKB); + } + + /* + * Size-based limit + */ + + /* Read rows from all partitions with size limit. */ + query = "select * from scanTable"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numRows /* numReadRows */, + numRows /* numReadKeys */); + expCnt = numRows; + int[] maxReadKBs = new int[] {0, 500, 1000, 2000}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + + /* Read rows from single partition with size limit. */ + query = "select * from scanTable where sid = 5"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numPerMajor /* numReadRows */, + numPerMajor /* numReadKeys */); + expCnt = numPerMajor; + maxReadKBs = new int[] {0, 50, 100, 250}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, false /* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + + /* Read rows from all shards with size limit. */ + query = "select * from scanTable where name = \"name_1\""; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numMajor /* numReadRows */, + numMajor /* numReadKeys */); + expCnt = numMajor; + maxReadKBs = new int[] {0, 5, 10, 25}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, true /* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + + /* + * Number-based and size-based limit + */ + + /* Read rows from all partitions with number and size limit. */ + query = "select * from scanTable"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numRows /* numReadRows */, + numRows /* numReadKeys */); + expCnt = numRows; + executeQuery(query, false /* keyOnly */, false/* indexScan */, expCnt, + expReadKB, 50 /* numLimit */, 100 /* sizeLimit */, + recordKB); + + /* Read rows from single partition with number and size limit. */ + query = "select * from scanTable where sid = 5"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numPerMajor /* numReadRows */, + numPerMajor /* numReadKeys */); + expCnt = numPerMajor; + executeQuery(query, false /* keyOnly */, false/* indexScan */, expCnt, + expReadKB, 10 /* numLimit */, 20 /* sizeLimit */, recordKB); + + /* Read rows from all shards with number and size limit. */ + query = "select * from scanTable where name = \"name_1\""; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numMajor /* numReadRows */, + numMajor /* numReadKeys */); + expCnt = numMajor; + executeQuery(query, false /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 5 /* numLimit */, 10 /* sizeLimit */, + recordKB); + } + + @Test + public void testDupElim() { + final int numMajor = 10; + final int numPerMajor = 40; + final int recordKB = 2; + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + tableOperation(handle, createIdxArrayDDL, null, + TableResult.State.ACTIVE, 10000); + + String query = + "select sid, id, t.array[size($)-2:] " + + "from scanTable t " + + "where t.array[] >any 11"; + + /* Prepare first, then execute */ + executeQuery(query, null, 200, 20, true); + } + + @Test + public void testOrderByPartitions() { + final int numMajor = 5; + final int numPerMajor = 10; + final int numRows = numMajor * numPerMajor; + final int recordKB = 2; + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + tableOperation(handle, createIdxStateAgeDDL, null, + TableResult.State.ACTIVE, 20000); + + String query; + int expReadKB, expCnt; + int[] maxReadKBs; + + //traceLevel = 3; + //showResults = true; + + /* + * Case 1: partial key + */ + query = "select sid, id, name, state " + + "from scanTable " + + "order by sid "; + + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numRows /* numReadRows */, + numRows /* numReadKeys */); + expCnt = numRows; + maxReadKBs = new int[] {0, 4, 25, 37, 66}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.EVENTUAL); + } + + /* + * Case 2: partial key offset limit + */ + query = "select sid, id, name, state " + + "from scanTable " + + "order by sid " + + "limit 10 offset 4"; + + expCnt = 10; + maxReadKBs = new int[] {0, 5, 6, 7, 8, 9, 20, 44, 81}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, -1 /*expReadKB*/, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.EVENTUAL); + } + + traceLevel = 0; + showResults = false; + + /* + * Case 3: partial key offset limit + */ + query = "select sid, id, name, state " + + "from scanTable " + + "order by sid " + + "limit 5 offset 44"; + + expCnt = 5; + maxReadKBs = new int[] {0, 5, 14, 51, 88}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, -1 /*expReadKB*/, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.EVENTUAL); + } + } + + @Test + public void testGroupByPartitions() { + + final int numMajor = 5; + final int numPerMajor = 10; + final int numRows = numMajor * numPerMajor; + final int recordKB = 2; + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + tableOperation(handle, createIdxStateAgeDDL, null, + TableResult.State.ACTIVE, 20000); + + String query; + int expReadKB, expCnt; + int[] maxReadKBs; + + //traceLevel = 3; + //showResults = true; + + /* + * Case 1 + */ + query = "select sid, count(*) as cnt, sum(salary) as sum " + + "from scanTable " + + "group by sid"; + + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numRows /* numReadRows */, + numRows /* numReadKeys */); + expCnt = 5; + //maxReadKBs = new int[] {0, 4, 25, 37, 66}; + maxReadKBs = new int[] {0}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.EVENTUAL); + } + } + + @Test + public void testOrderByShards() { + + final int numMajor = 10; + final int numPerMajor = 40; + final int recordKB = 2; + final int numRows = numMajor * numPerMajor; + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + tableOperation(handle, createIdxStateAgeDDL, null, + TableResult.State.ACTIVE, 20000); + + String query; + int expReadKB, expCnt; + int[] maxReadKBs; + + /* + * Case 1: multi-shard, covering index + */ + query = "select sid, id, state " + + "from scanTable " + + "order by state"; + /* + * TODO: NOSQL-719 + * Enable the cost check in cloud test after fix it + * + * The cases after this one may run into the same problem, since + * the case1 is executed multiple times, it is likely the table in + * KV table cache on all the proxies may be refreshed, so just disable + * the cost check in this one. + */ + if (useCloudService) { + expReadKB = -1; + } else { + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + numRows /* numReadKeys */); + } + expCnt = numRows; + maxReadKBs = new int[] {0, 5, 7, 11}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, true /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.EVENTUAL); + } + + /* + * Case 2: multi-shard, non-covering index + */ + query = "select sid, id, state, salary " + + "from scanTable " + + "order by state"; + + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numRows /* numReadRows */, + numRows /* numReadKeys */); + expCnt = numRows; + maxReadKBs = new int[] {6, 7, 8}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.EVENTUAL); + } + + /* + * Case 3: single-partition, non-covering index + */ + query = "select sid, id, state, salary " + + "from scanTable " + + "where sid = 3 " + + "order by sid, id " + + "limit 27 offset 5"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + 32 /* numReadRows */, + 32 /* numReadKeys */); + expCnt = 27; + maxReadKBs = new int[] {4, 5, 12}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.EVENTUAL); + } + } + + @Test + public void testGroupByShards() { + final int numMajor = 10; + final int numPerMajor = 101; + final int recordKB = 2; + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + String query; + int expReadKB, expCnt; + int[] maxReadKBs; + + tableOperation(handle, createIdxStateAgeDDL, null, + TableResult.State.ACTIVE, 20000); + /* + * Case 1. + */ + query = "select count(*) from scanTable where state = \"CA\""; + + /* + * TODO: NOSQL-719 + * Enable the cost check in cloud test after fix it + * + * The cases after this one may run into the same problem, since + * the case1 is executed multiple times, it is likely the table in + * KV table cache on all the proxies may be refreshed, so just disable + * the cost check in this one. + */ + if (useCloudService) { + expReadKB = -1; + } else { + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + 210); + } + expCnt = 1; + /* size-based limit */ + maxReadKBs = new int[] {10, 17, 23, 37, 209, 210, 500}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, true /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + + /* + * Case 2. + * sum(salary) = 165000 + */ + query = "select count(*), sum(salary) from scanTable " + + "where state = \"VT\""; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + 200 /* numReadRows */, + 200 /* numReadKeys */); + expCnt = 1; + /* size-based limit */ + maxReadKBs = new int[] {9, 19, 31, 44, 200, 500}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + + /* Prepare first, then execute */ + executeQuery(query, null, 1, 22, true); + + /* + * Case 3. + */ + query = "select state, count(*) from scanTable group by state"; + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + 1010); + expCnt = 5; + /* size-based limit */ + maxReadKBs = new int[] {30}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, true /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + + /* + * Case 4. + */ + query = + "select state, " + + " count(*) as cnt, " + + " sum(salary) as sum, " + + " avg(salary) as avg " + + "from scanTable "+ + "group by state"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + 1010 /* numReadRows */, + 1010); + expCnt = 5; + /* size-based limit */ + maxReadKBs = new int[] {34}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + + } + + /** + * Test group-by query with numeric-base limit and/or size-based limits + * + * 1. Single partition scan, key-only + * select count(*) from scanTable where sid = 1 + * + * 2. Single partition scan, key + row + * select min(name), min(age) from scanTable where sid = 1 + * + * 3. All partitions scan, key only + * select count(*) from scanTable group by sid + * + * 4. All partitions scan, key + row + * select min(name) from scanTable group by sid + * + * 5. All shards scan, key only + * select count(*) from scanTable group by sid, name + * + * 6. All shards scan, key + row + * select max(name) from scanTable group by sid, name + * + * 7. All partitions scan, key only, single row returned. + * select count(*) from scanTable + * + * 8. All shards scan, key only, single row returned. + * select min(name) from scanTable + */ + @Test + public void testGroupByWithLimits() { + final int numMajor = 10; + final int numPerMajor = 101; + final int numRows = numMajor * numPerMajor; + final int recordKB = 2; + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + String query; + int expReadKB, expCnt; + int[] limits, maxReadKBs; + + tableOperation(handle, createIdxSidAgeDDL, null, + TableResult.State.ACTIVE, timeout); + +long startMs = System.currentTimeMillis(); + + /* + * Case: Single partition scan, key only + */ + query = "select count(*) from scanTable where sid = 1"; + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + numPerMajor /* numReadKeys */); + expCnt = 1; + /* number-based limit */ + limits = new int[] {0, expCnt, expCnt + 1}; + for (int limit : limits) { + executeQuery(query, true /* keyOnly */, false /* indexScan */, + expCnt, expReadKB, limit, 0 /* maxReadKB */, + recordKB); + } + /* size-based limit */ + maxReadKBs = new int[] {0, 50, 100, 101}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + /* number-based and size-based limit */ + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 1 /* numLimit */, 50 /* maxReadKB */, + recordKB); + + /* + * Case 2: Single partition scan, key + row + */ + query = "select min(salary), min(age) from scanTable where sid = 1"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numPerMajor /* numReadRows */, + numPerMajor /* numReadKeys */); + expCnt = 1; + /* number-based limit */ + limits = new int[] {0, expCnt, expCnt + 1}; + for (int limit : limits) { + executeQuery(query, false /* keyOnly */, false /* indexScan */, + expCnt, expReadKB, limit, 0 /* maxReadKB */, + recordKB); + } + /* size-based limit */ + maxReadKBs = new int[] {0, 10, 100, 300, 303}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + /* number-based limit + size-based limit */ + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 1 /* numLimit */, 200 /* maxReadKB */, + recordKB); + + /* + * Case 3: All partitions scan, key only + */ + query = "select count(*) from scanTable group by sid"; + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + numRows /* numReadKeys */); + expCnt = numMajor; + /* number-based limit */ + limits = new int[] {0, 5, expCnt, expCnt + 1}; + for (int limit : limits) { + executeQuery(query, true /* keyOnly */, false /* indexScan */, + expCnt, expReadKB, limit, 0 /* maxReadKB */, + recordKB); + } + /* size-based limit */ + maxReadKBs = new int[] {0, 10, 100, 500, 1000, 1010}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, true /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + /* number-based limit + size-based limit */ + executeQuery(query, true /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 1 /* numLimit */, 200 /* maxReadKB */, + recordKB); + executeQuery(query, true /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 2 /* numLimit */, 200 /* maxReadKB */, + recordKB); + executeQuery(query, true /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 5 /* numLimit */, 200 /* maxReadKB */, + recordKB); + + /* + * Case 4: All partitions scan, key + row + */ + query = "select min(salary) from scanTable group by sid"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numRows /* numReadRows */, + numRows /* numReadKeys */); + expCnt = numMajor; + /* number-based limit */ + limits = new int[] {0, 5, expCnt, expCnt + 1}; + for (int limit : limits) { + executeQuery(query, false /* keyOnly */, false /* indexScan */, + expCnt, expReadKB, limit, 0 /* maxReadKB */, + recordKB); + } + /* size-based limit */ + maxReadKBs = new int[] {0, 10, 100, 500, 1000, 2047}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + /* number-based limit + size-based limit */ + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 1 /* numLimit */, 400 /* maxReadKB */, + recordKB); + executeQuery(query, false /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 3 /* numLimit */, 400 /* maxReadKB */, + recordKB); + + /* + * Case 5: All shards can, key only + */ + query = "select count(*) from scanTable group by sid, age"; + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + numRows /* numReadKeys */); + expCnt = numMajor * 10; + + /* number-based limit */ + limits = new int[] {0, 5, 50, expCnt, expCnt + 1}; + for (int limit : limits) { + executeQuery(query, true /* keyOnly */, true /* indexScan */, + expCnt, expReadKB, limit, 0 /* maxReadKB */, + recordKB); + } + /* size-based limit */ + maxReadKBs = new int[] {0, 10, 100, 500, 1000, 1010}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, true /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + + /* number-based and size-based limit */ + executeQuery(query, true /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 10 /* numLimit */, 100, recordKB); + + /* + * Case 6: All shards can, key + row + */ + query = "select max(salary) from scanTable group by sid, age"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numRows /* numReadRows */, + numRows /* numReadKeys */); + expCnt = numMajor * 10; + + /* number-based limit */ + limits = new int[] {0, 5, 50, expCnt, expCnt + 1}; + for (int limit : limits) { + executeQuery(query, false /* keyOnly */, true /* indexScan */, + expCnt, expReadKB, limit, 0 /* maxReadKB */, + recordKB); + } + + /* size-based limit */ + maxReadKBs = new int[] {0, 10, 100, 500, 1000, 2047}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + + executeQuery(query, false /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 10 /* numLimit */, 300 /* maxReadKB */, + recordKB); + + /* + * Case 7: All partitions scan, key only. Single row returned. + */ + query = "select count(*) from scanTable"; + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + numRows /* numReadKeys */); + expCnt = 1; + /* number-based limits */ + limits = new int[] {0, 1}; + for (int limit : limits) { + executeQuery(query, true /* keyOnly */, false /* indexScan */, + expCnt, expReadKB, limit, 0 /* maxReadKB */, + recordKB); + } + /* size-based limit */ + maxReadKBs = new int[] {0, 10, 100, 500, 1000, 1010 }; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, true /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + /* number-based limit + size-based limit */ + executeQuery(query, true /* keyOnly */, false/* indexScan */, + expCnt, expReadKB, 1 /* numLimit */, 500 /* maxReadKB */, + recordKB); + + /* + * Case 8: All shards scan, key only. Single row returned. + */ + query = "select min(name) from scanTable"; + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + numRows /* numReadKeys */); + expCnt = 1; + /* number-based limits */ + limits = new int[] {0, 1}; + for (int limit : limits) { + executeQuery(query, true /* keyOnly */, true /* indexScan */, + expCnt, expReadKB, limit, 0 /* maxReadKB */, + recordKB); + } + /* size-based limit */ + maxReadKBs = new int[] {0, 10, 100, 500, 1000, 1010 }; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, true /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB); + } + /* number-based limit + size-based limit */ + executeQuery(query, true /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 1 /* numLimit */, 500 /* maxReadKB */, + recordKB); +if (verbose) System.out.println("WithLimits took " + (System.currentTimeMillis() - startMs) + "ms"); + } + + @Test + public void testDelete() { + final int numMajor = 5; + final int numPerMajor = 100; + final int recordKB = 4; + + tableOperation(handle, createIdxStateAgeDDL, null, + TableResult.State.ACTIVE, 20000); + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + int expReadKB, expCnt; + int[] maxReadKBs; + String query; + + /* + * Case 1. ALL_SHARDS delete, without RETURNING, covering index + * 100 rows will be deleted. 200 key-reads will be performed + */ + query = "delete from scanTable where state = \"CA\""; + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + 200/*numReadKeys*/); + + expCnt = 1; + maxReadKBs = new int[] {10}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, true /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.ABSOLUTE); + } + + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + /* + * Case 2. ALL_SHARDS delete, with RETURNING, covering index + * 100 rows will be deleted. 200 key-reads will be performed + */ + query = "delete from scanTable where state = \"CA\" returning id"; + expReadKB = getExpReadKB(true /* keyOnly */, recordKB, + 0 /* numReadRows */, + 200/*numReadKeys*/); + expCnt = 100; + maxReadKBs = new int[] {10}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, true /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.ABSOLUTE); + } + + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + /* + * Case 3 ALL_SHARDS delete, with RETURNING, non-covering index + * 100 rows will be deleted. 200 key-reads will be performed + */ + query = "delete from scanTable where state = \"CA\" " + + "returning sid, id, name"; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + 100 /* numReadRows */, + 200/*numReadKeys*/); + expCnt = 100; + maxReadKBs = new int[] {10}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.ABSOLUTE); + } + + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + /* + * Case 4. ALL_SHARDS delete, without RETURNING, non-covering index + * 100 rows will be deleted. 200 key-reads will be performed + */ + query = "delete from scanTable where state = \"CA\" and name != \"abc\""; + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + 100 /* numReadRows */, + 200/*numReadKeys*/); + expCnt = 1; + maxReadKBs = new int[] {13}; + for (int maxReadKB : maxReadKBs) { + executeQuery(query, false /* keyOnly */, true/* indexScan */, + expCnt, expReadKB, 0 /* numLimit */, maxReadKB, + recordKB, Consistency.ABSOLUTE); + } + } + + @Test + public void testInsert() { + final int numMajor = 1; + final int numPerMajor = 10; + final int recordKB = 2; + final int prepCost = getMinQueryCost(); // = 2 + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + QueryRequest req; + QueryResult ret; + + /* Insert a new row */ + int newRecordKB = 8; + String longString = genString(newRecordKB * 1024); + String query = + "insert into scanTable values " + + "(1, 15, \"myname\", 23, \"WI\", 2500, [], \"" + + longString + "\")"; + + req = new QueryRequest().setStatement(query); + ret = getNextHandle().query(req); + + assertQueryReadKB(ret, 1, prepCost, true /* isAbsolute */); + assertWriteKB(ret, newRecordKB + 2); + assertTrue(ret.getResults().size() == 1); + + query = "select sid, id, name from scanTable where id = 15"; + req = new QueryRequest().setStatement(query); + ret = getNextHandle().query(req); + assertTrue(ret.getResults().size() == 1); + MapValue res = ret.getResults().get(0); + FieldValue name = res.get("name"); + assertTrue(name.getString().equals("myname")); + //System.out.println("Result = " + res); + } + + @Test + public void testUpdate() { + final int numMajor = 1; + final int numPerMajor = 10; + final int recordKB = 2; + final int minRead = getMinRead(); + final int prepCost = getMinQueryCost(); + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + QueryRequest req; + QueryResult ret; + int expReadKB; + + /* Update a row */ + int newRecordKB = 1; + String longString = genString((newRecordKB - 1) * 1024); + String query = "update scanTable set longString = \"" + longString + + "\" where sid = 0 and id = 0"; + req = new QueryRequest().setStatement(query); + ret = getNextHandle().query(req); + expReadKB = dontDoubleChargeKey() ? recordKB : (minRead + recordKB); + assertQueryReadKB(ret, expReadKB, prepCost, true /* isAbsolute */); + assertWriteKB(ret, (recordKB + newRecordKB)); + + /* Update non-existing row */ + query = "update scanTable set longString = \"test\" " + + "where sid = 100 and id = 0"; + req = new QueryRequest().setStatement(query); + ret = getNextHandle().query(req); + assertQueryReadKB(ret, minRead, prepCost, true /* isAbsolute */); + assertWriteKB(ret, 0); + + /* Update using preparedStatement */ + query = "declare $sval string; $sid integer; $id integer;" + + "update scanTable set longString = $sval " + + "where sid = $sid and id = $id"; + PrepareRequest prepReq = new PrepareRequest() + .setStatement(query).setGetQuerySchema(true); + PrepareResult prepRet = getNextHandle().prepare(prepReq); + if (!testV3) { + assertNotNull(prepRet.getPreparedStatement().getQuerySchema()); + } + assertNull(prepRet.getPreparedStatement().getQueryPlan()); + assertNotNull(prepRet.getPreparedStatement()); + assertReadKB(prepRet, prepCost, false /* isAbsolute */); + assertWriteKB(prepRet, 0); + + prepRet.getPreparedStatement() + .setVariable("$sval", new StringValue(longString)) + .setVariable("$sid", new IntegerValue(0)) + .setVariable("$id", new IntegerValue(1)); + + req = new QueryRequest().setPreparedStatement(prepRet); + ret = getNextHandle().query(req); + expReadKB = dontDoubleChargeKey() ? recordKB : (minRead + recordKB); + assertReadKB(ret, expReadKB, true /* isAbsolute */); + assertWriteKB(ret, (recordKB + newRecordKB)); + } + + @Test + public void testQueryWithSmallLimit() { + final int numMajor = 1; + final int numPerMajor = 5; + final int recordKB = 2; + final int minRead = getMinRead(); + final int prepCost = getMinQueryCost(); + + /* Load rows to table */ + loadRowsToScanTable(numMajor, numPerMajor, recordKB); + + String query; + QueryRequest req; + QueryResult ret; + int expReadKB; + + /* Update with number-based limit of 1 */ + int newRecordKB = 1; + String longString = genString((newRecordKB - 1) * 1024); + query = "update scanTable set longString = \"" + longString + + "\" where sid = 0 and id = 0"; + req = new QueryRequest().setStatement(query).setLimit(1); + ret = getNextHandle().query(req); + assertNull(ret.getContinuationKey()); + expReadKB = dontDoubleChargeKey() ? recordKB : (minRead + recordKB); + assertQueryReadKB(ret, expReadKB, prepCost, true /* isAbsolute */); + assertWriteKB(ret, (recordKB + newRecordKB)); + + /* Update with maxReadKB of 1, expect an IAE */ + expReadKB = dontDoubleChargeKey() ? recordKB : (minRead + recordKB); + query = "update scanTable set longString = \"" + longString + + "\" where sid = 0 and id = 1"; + + if (checkKVVersion(21, 3, 6)) { + /* Query should always make progress with small limit */ + req = new QueryRequest().setStatement(query).setMaxReadKB(1); + ret = getNextHandle().query(req); + assertQueryReadKB(ret, expReadKB, prepCost, true /* isAbsolute */); + assertWriteKB(ret, (recordKB + newRecordKB)); + } else { + for (int kb = 1; kb <= expReadKB; kb++) { + req = new QueryRequest().setStatement(query).setMaxReadKB(kb); + try { + ret = getNextHandle().query(req); + if (kb < expReadKB) { + fail("Expect to catch IAE but not"); + } else { + assertQueryReadKB(ret, expReadKB, prepCost, + true /* isAbsolute */); + assertWriteKB(ret, (recordKB + newRecordKB)); + } + } catch (IllegalArgumentException iae) { + assertTrue("Expect to succeed with maxReadKB of " + kb + + ", but fail: " + iae.getMessage(), + kb < expReadKB); + } + } + } + + /* Update with maxReadKB of 1, 0 row updated */ + query = "update scanTable set longString = \"" + longString + + "\" where sid = 100 and id = 1"; + req = new QueryRequest().setStatement(query).setMaxReadKB(1); + ret = getNextHandle().query(req); + assertNull(ret.getContinuationKey()); + assertQueryReadKB(ret, minRead, prepCost, true /* isAbsolute */); + assertWriteKB(ret, 0); + + /* Query with number limit of 1 */ + query = "select * from scanTable where sid = 0 and id > 1"; + int numRows = numMajor * (numPerMajor - 2); + expReadKB = getExpReadKB(false /* keyOnly */, recordKB, + numRows /* numReadRows */, + numRows /* numReadKeys */); + executeQuery(query, false /* keyOnly */, false/* indexScan */, + numRows, expReadKB, 1 /* limit */, 0 /* maxReadKB */, + recordKB); + + //traceLevel = 3; + //showResults = true; + + /* Query with maxReadKB of 1, expect an IAE */ + NoSQLHandle curHandle = getNextHandle(); + query = "select * from scanTable where sid = 0 and id > 1"; + if (checkKVVersion(21, 3, 6)) { + for (int kb = 1; kb <= 3; kb++) { + executeQuery(query, false, false, numRows, expReadKB, 0, + kb, recordKB); + } + } else { + int numExec = 0; + req = new QueryRequest().setStatement(query).setMaxReadKB(1); + try { + do { + numExec++; + ret = handle.query(req); + } while (!req.isDone()); + fail("Expect to catch IAE but not"); + } catch (IllegalArgumentException iae) { + assertEquals(2, numExec); + } + } + } + + /** + * Returns the estimated readKB. + */ + private int getExpReadKB(boolean keyOnly, + int recordKB, + int numReadRows, + int numReadKeys) { + final int minRead = getMinRead(); + int readKB = numReadKeys * minRead; + if (!keyOnly) { + if (dontDoubleChargeKey()) { + recordKB -= minRead; + } + readKB += numReadRows * recordKB; + } + return readKB == 0 ? minRead : readKB; + } + + /* + * Test illegal cases -- both prepared statement and string + */ + @Test + public void testIllegalQuery() { + + PrepareRequest prepReq; + QueryRequest queryReq; + String query; + + final String queryWithVariables = + "declare $sid integer; $id integer;" + + "select name from scanTable where sid = $sid and id >= $id"; + + /* Syntax error */ + prepReq = new PrepareRequest().setStatement("random string"); + try { + getNextHandle().prepare(prepReq); + fail("query should have failed"); + } catch (IllegalArgumentException iae) {} + + queryReq = new QueryRequest().setStatement("random string"); + try { + getNextHandle().query(queryReq); + fail("query should have failed"); + } catch (IllegalArgumentException iae) {} + + /* Try a query that requires external variables that are missing */ + queryReq = new QueryRequest().setStatement(queryWithVariables); + try { + getNextHandle().query(queryReq); + fail("query should have failed"); + } catch (IllegalArgumentException iae) { + } + + prepReq = new PrepareRequest().setStatement(queryWithVariables). + setGetQueryPlan(true); + PrepareResult prepRes = getNextHandle().prepare(prepReq); + queryReq = new QueryRequest().setPreparedStatement(prepRes); + try { + getNextHandle().query(queryReq); + fail("query should have failed"); + } catch (IllegalArgumentException iae) { + } + + /* Wrong name of variables */ + prepReq = new PrepareRequest().setStatement(queryWithVariables); + prepRes = getNextHandle().prepare(prepReq); + PreparedStatement prepStmt = prepRes.getPreparedStatement(); + prepStmt.setVariable("sid", new IntegerValue(9)); + prepStmt.setVariable("id", new IntegerValue(3)); + queryReq = new QueryRequest().setPreparedStatement(prepRes); + try { + getNextHandle().query(queryReq); + fail("query should have failed"); + } catch (IllegalArgumentException ex) { + } + + /* Wrong type for variables */ + prepReq = new PrepareRequest().setStatement(queryWithVariables); + prepRes = getNextHandle().prepare(prepReq); + prepStmt = prepRes.getPreparedStatement(); + prepStmt.setVariable("$sid", new DoubleValue(9.1d)); + prepStmt.setVariable("$id", new IntegerValue(3)); + queryReq = new QueryRequest().setPreparedStatement(prepRes); + try { + getNextHandle().query(queryReq); + fail("query should have failed"); + } catch (IllegalArgumentException iae) { + } + + /* Table not found */ + query = "select * from invalidTable"; + prepReq = new PrepareRequest().setStatement(query); + try { + getNextHandle().prepare(prepReq); + fail("prepare should have failed"); + } catch (TableNotFoundException tnfe) { + } + + queryReq = new QueryRequest().setStatement(query); + try { + getNextHandle().query(queryReq); + fail("query should have failed"); + } catch (TableNotFoundException tnfe) { + } + + /* Invalid column */ + query = "select * from scanTable where invalidColumn = 1"; + prepReq = new PrepareRequest().setStatement(query); + try { + getNextHandle().prepare(prepReq); + fail("prepare should have failed"); + } catch (IllegalArgumentException iae) { + } + + queryReq = new QueryRequest().setStatement(query); + try { + getNextHandle().query(queryReq); + fail("query should have failed"); + } catch (IllegalArgumentException tnfe) { + } + + /* Prepare or execute Ddl statement */ + query = "create table t1(id integer, name string, primary key(id))"; + prepReq = new PrepareRequest().setStatement(query); + try { + getNextHandle().prepare(prepReq); + fail("prepare should have failed"); + } catch (IllegalArgumentException iae) { + } + + queryReq = new QueryRequest().setStatement(query); + try { + getNextHandle().query(queryReq); + fail("query should have failed"); + } catch (IllegalArgumentException iae) { + } + + queryReq = new QueryRequest().setStatement(query); + try { + queryReq.setLimit(-1); + getNextHandle().query(queryReq); + fail("QueryRequest.setLimit() should fail with IAE"); + } catch (IllegalArgumentException iae) { + } + queryReq.setLimit(0); + + try { + queryReq.setMaxReadKB(-1); + fail("QueryRequest.setMaxReadKB() should fail with IAE"); + } catch (IllegalArgumentException iae) { + } + + if (!cloudRunning) { // Compartment path is support in cloud + /* + * Namespaces, child tables and identity columns are not + * yet supported + */ + String statement = + "create table ns:foo(id integer, primary key(id))"; + try { + tableOperation(handle, statement, + new TableLimits(10, 10, 10), + TableResult.State.ACTIVE, 10000); + fail("Namespaces not supported in table names"); + } catch (Exception e) { + assertTrue(e.getMessage().toLowerCase().contains("namespace")); + } + + statement = "drop table ns:foo"; + try { + tableOperation(handle, statement, + new TableLimits(10, 10, 10), + TableResult.State.ACTIVE, 10000); + fail("Namespaces not supported in table names"); + } catch (Exception e) { + if (onprem) { + assertTrue(e instanceof TableNotFoundException); + } else { + assertTrue(e.getMessage().toLowerCase() + .contains("namespace")); + } + } + + statement = "select * from ns:foo"; + try { + executeQuery(statement, null, 0, 0, false); + fail("Query with namespaced table not supported"); + } catch (Exception e) { + if (onprem) { + assertTrue(e instanceof TableNotFoundException); + } else { + assertTrue(e.getMessage().toLowerCase() + .contains("namespace")); + } + } + } + + String statement = "create namespace myns"; + try { + tableOperation(handle, statement, + new TableLimits(10, 10, 10), + TableResult.State.ACTIVE, 10000); + if (!onprem) { + fail("Creating namespaces not supported"); + } + } catch (Exception e) { + assertTrue(e.getMessage().toLowerCase().contains("namespace")); + } + + statement = "drop namespace myns"; + try { + tableOperation(handle, statement, + new TableLimits(10, 10, 10), + TableResult.State.ACTIVE, 10000); + if (!onprem) { + fail("Dropping namespaces not supported"); + } + } catch (Exception e) { + assertTrue(e.getMessage().toLowerCase().contains("namespace")); + } + } + + @Test + public void testJson() { + final String[] jsonRecords = { + "{" + + " \"id\":0," + + " \"info\":" + + " {" + + " \"firstName\":\"first0\", \"lastName\":\"last0\",\"age\":10," + + " \"address\":" + + " {" + + " \"city\": \"San Fransisco\"," + + " \"state\" : \"CA\"," + + " \"phones\" : [" + + " { \"areacode\" : 408, \"number\" : 50," + + " \"kind\" : \"home\" }," + + " { \"areacode\" : 650, \"number\" : 51," + + " \"kind\" : \"work\" }," + + " \"650-234-4556\"," + + " 650234455" + + " ]" + + " }," + + " \"children\":" + + " {" + + " \"Anna\" : { \"age\" : 10, \"school\" : \"sch_1\"," + + " \"friends\" : [\"Anna\", \"John\", \"Maria\"]}," + + " \"Lisa\" : { \"age\" : 12, \"friends\" : [\"Ada\"]}" + + " }" + + " }" + + "}", + + "{" + + " \"id\":1," + + " \"info\":" + + " {" + + " \"firstName\":\"first1\", \"lastName\":\"last1\",\"age\":11," + + " \"address\":" + + " {" + + " \"city\" : \"Boston\"," + + " \"state\" : \"MA\"," + + " \"phones\" : [ { \"areacode\" : 304, \"number\" : 30," + + " \"kind\" : \"work\" }," + + " { \"areacode\" : 318, \"number\" : 31," + + " \"kind\" : \"work\" }," + + " { \"areacode\" : 400, \"number\" : 41," + + " \"kind\" : \"home\" }]" + + " }," + + " \"children\":" + + " {" + + " \"Anna\" : { \"age\" : 9, \"school\" : \"sch_1\"," + + " \"friends\" : [\"Bobby\", \"John\", null]}," + + " \"Mark\" : { \"age\" : 4, \"school\" : \"sch_1\"," + + " \"friends\" : [\"George\"]}," + + " \"Dave\" : { \"age\" : 15, \"school\" : \"sch_3\"," + + " \"friends\" : [\"Bill\", \"Sam\"]}" + + " }" + + " }" + + "}", + + "{" + + " \"id\":2," + + " \"info\":" + + " {" + + " \"firstName\":\"first2\", \"lastName\":\"last2\",\"age\":12," + + " \"address\":" + + " {" + + " \"city\" : \"Portland\"," + + " \"state\" : \"OR\"," + + " \"phones\" : [ { \"areacode\" : 104, \"number\" : 10," + + " \"kind\" : \"home\" }," + + " { \"areacode\" : 118, \"number\" : 11," + + " \"kind\" : \"work\" } ]" + + " }," + + " \"children\":" + + " {" + + " }" + + " }" + + "}", + + "{ " + + " \"id\":3," + + " \"info\":" + + " {" + + " \"firstName\":\"first3\", \"lastName\":\"last3\",\"age\":13," + + " \"address\":" + + " {" + + " \"city\" : \"Seattle\"," + + " \"state\" : \"WA\"," + + " \"phones\" : null" + + " }," + + " \"children\":" + + " {" + + " \"George\" : { \"age\" : 7, \"school\" : \"sch_2\"," + + " \"friends\" : [\"Bill\", \"Mark\"]}," + + " \"Matt\" : { \"age\" : 14, \"school\" : \"sch_2\"," + + " \"friends\" : [\"Bill\"]}" + + " }" + + " }" + + "}" + }; + + String query; + Map bindValues = new HashMap(); + + tableOperation(handle, createJsonTableDDL, + new TableLimits(15000, 15000, 50), + TableResult.State.ACTIVE, timeout); + + /* create an index on a field that won't exist to test GetIndexes */ + tableOperation(handle, "create index JsonIndex on jsonTable " + + "(info.a.b.c as integer, info.d as string)", null, + TableResult.State.ACTIVE, 20000); + + /* Simple test of GetIndexesRequest */ + GetIndexesRequest getIndexes = new GetIndexesRequest() + .setTableName("jsonTable"); + + /* GetIndexesRquest returns GetIndexesResult */ + GetIndexesResult giRes = handle.getIndexes(getIndexes); + /* there is 1 index with 2 fields, each with a type */ + assertEquals(1, giRes.getIndexes().length); + for (GetIndexesResult.IndexInfo info : giRes.getIndexes()) { + assertEquals(2, info.getFieldNames().length); + for (int i = 0; i < info.getFieldNames().length; i++) { + assertNotNull(info.getFieldNames()[i]); + if (!testV3) { + assertNotNull(info.getFieldTypes()[i]); + } + } + } + + loadRowsToTable(jsonTable, jsonRecords); + + /* Basic query on a table with JSON field */ + query = "select id, f.info from jsonTable f"; + executeQuery(query, null, 4, 0, false /* usePrepStmt */); + + /* Test JsonNull */ + query = "select id from jsonTable f where f.info.address.phones = null"; + executeQuery(query, null, 1, 0, false /* usePrepStmt */); + + /* Bind JsonNull value */ + query = "declare $phones json;" + + "select id, f.info.address.phones " + + "from jsonTable f " + + "where f.info.address.phones != $phones"; + bindValues.put("$phones", JsonNullValue.getInstance()); + executeQuery(query, bindValues, 3, 0, true /* usePrepStmt */); + + /* Bind 2 String values */ + query = "declare $city string;$name string;" + + "select id, f.info.address.city, f.info.children.keys() " + + "from jsonTable f " + + "where f.info.address.city = $city and " + + " not f.info.children.keys() =any $name"; + bindValues.clear(); + bindValues.put("$city", new StringValue("Portland")); + bindValues.put("$name", new StringValue("John")); + executeQuery(query, bindValues, 1, 0, true /* usePrepStmt */); + + /* Bind MapValue */ + query = "declare $child json;" + + "select id, f.info.children.values() " + + "from jsonTable f " + + "where f.info.children.values() =any $child"; + String json = "{\"age\":14, \"school\":\"sch_2\", " + + " \"friends\":[\"Bill\"]}"; + bindValues.clear(); + bindValues.put("$child", JsonUtils.createValueFromJson(json, null)); + executeQuery(query, bindValues, 1, 0, true /* usePrepStmt */); + + /* Bind ArrayValue */ + query = "declare $friends json;" + + "select id, f.info.children.values() " + + "from jsonTable f " + + "where f.info.children.values().friends =any $friends"; + + ArrayValue friends = new ArrayValue(); + friends.add("Bill"); + friends.add("Mark"); + bindValues.clear(); + bindValues.put("$friends", friends); + executeQuery(query, bindValues, 1, 0, true /* usePrepStmt */); + } + + @Test + public void testPrepare() { + String query; + PrepareRequest req; + PrepareResult ret; + + query = "select * from scanTable"; + req = new PrepareRequest().setStatement(query).setGetQueryPlan(true); + ret = getNextHandle().prepare(req); + assertNull(ret.getPreparedStatement().getQuerySchema()); + assertNotNull(ret.getPreparedStatement().getQueryPlan()); + + if (!onprem) { + assertEquals(ret.getReadKB(), getMinQueryCost()); + assertEquals(ret.getWriteKB(), 0); + } + + query = "declare $sval string; $sid integer; $id integer;" + + "update scanTable set longString = $sval " + + "where sid = $sid and id = $id"; + req = new PrepareRequest().setStatement(query); + ret = getNextHandle().prepare(req); + if (!onprem) { + assertEquals(ret.getReadKB(), getMinQueryCost()); + assertEquals(ret.getWriteKB(), 0); + } + } + + @Test + public void testOldPreparedPlan() { + + Class prepstmtClass = null; + + try { + prepstmtClass = Class.forName( + "oracle.kv.impl.api.query.PreparedStatementImpl"); + } catch (Throwable e) { + System.out.println("Could not find PreparedStatementImpl class:" + + e); + prepstmtClass = null; + } + + assertTrue(prepstmtClass != null); + + Method setVersionFunction = null; + + try { + setVersionFunction = prepstmtClass.getMethod("setTestSerialVersion", + short.class); + } catch (Throwable e) { + System.out.println( + "Warning: Could not find PreparedStatementImpl.setTestSerialVersion() " + + "function: skipping testOldPreparedPlan()"); + return; + } + + String tableDDL = + "create table if not exists users(" + + " acct_id integer," + + " user_id integer," + + " info json," + + " primary key(acct_id, user_id))"; + + String queries[] = { + + "select count(*) as cnt " + + "from users u " + + "where u.info.country = \"USA\" and " + + " u.info.shows.showId =any 16", + }; + + tableOperation(handle, tableDDL, + new TableLimits(15000, 15000, 50), + TableResult.State.ACTIVE, timeout); + + // serial version 21 is 19.3 + short[] versions = { 21, 22, 23, 24 }; + + try { + for (short v : versions) { + + try { + setVersionFunction.invoke(null, v); + } catch (Throwable e) { + System.out.println( + "Failed to invoke " + + "PreparedStatementImpl.setTestSerialVersion() " + + "function: " + e); + return; + } + + /* Prepare the queries */ + PreparedStatement[] prepStmts = + new PreparedStatement[queries.length]; + + for (int i = 0; i < queries.length; ++i) { + PrepareRequest req = new PrepareRequest() + .setStatement(queries[i]); + PrepareResult res = handle.prepare(req); + prepStmts[i] = res.getPreparedStatement(); + } + + /* Execute the queries */ + for (int i = 0; i < queries.length; ++i) { + + QueryRequest queryReq = new QueryRequest(); + queryReq.setPreparedStatement(prepStmts[i]); + do { + QueryResult queryRes = handle.query(queryReq); + List results = queryRes.getResults(); + assertTrue(results.size() == 1); + } while (!queryReq.isDone()); + } + } + } finally { + try { + short v = -1; + setVersionFunction.invoke(null, v); + } catch (Throwable e) { + System.out.println( + "Failed to invoke " + + "PreparedStatementImpl.setTestSerialVersion() " + + "function: " + e); + return; + } + } + } + + /** + * Prepare a query, use it, evolve table, try again. + */ + @Test + public void testEvolution() { + /* + * TODO: NOSQL-719 + * Enable this test in cloud test after fix it + */ + if (useCloudService) { + return; + } + + /* Load rows to table */ + loadRowsToScanTable(1, 10, 2); + String query = "select age from scanTable"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = getNextHandle().prepare(prepReq); + assertNotNull(prepRet.getPreparedStatement()); + + QueryRequest qreq = new QueryRequest().setPreparedStatement(prepRet); + QueryResult qres = getNextHandle().query(qreq); + assertEquals(10, qres.getResults().size()); + + /* + * add a column and try the query again. It should fail with + * a message including "prepared with a different version of the table" + * + * NOTE: in the future, this should "just work", when the proxy + * manages re-preparing queries automatically + */ + tableOperation(handle, "alter table scanTable (add username string)", + null, null, TableResult.State.ACTIVE, null); + try { + qres = handle.query(qreq); + fail("Query should have failed"); + } catch (PrepareQueryException iae) { + } + + prepRet = handle.prepare(prepReq); + assertNotNull(prepRet.getPreparedStatement()); + qreq.setPreparedStatement(prepRet); + + /* + * remove a field and try the query again. It will fail because the + * field referenced does not exist anymore. + */ + tableOperation(handle, "alter table scanTable(drop age)", + null, null, TableResult.State.ACTIVE, null); + try { + qres = getNextHandle().query(qreq); + fail("Query should have failed"); + } catch (PrepareQueryException iae) { + /* success */ + } + } + + @Test + public void testIdentityAndUUID() { + String idName = "testSG"; + String uuidName = "testUUID"; + String createTableId = + "CREATE TABLE " + idName + + "(id INTEGER GENERATED ALWAYS AS IDENTITY, " + + "name STRING, " + + "PRIMARY KEY(id))"; + String createTableUUID = + "CREATE TABLE " + uuidName + + "(id STRING AS UUID GENERATED BY DEFAULT, " + + "name STRING, " + + "PRIMARY KEY(id))"; + + tableOperation(handle, createTableId, new TableLimits(100, 100, 1), + null, TableResult.State.ACTIVE, 10000); + tableOperation(handle, createTableUUID, new TableLimits(100, 100, 1), + null, TableResult.State.ACTIVE, 10000); + + /* + * Putting a row with a value for "id" should fail because always + * generated identity column should not has value. + */ + MapValue value = new MapValue().put("id", 100).put("name", "abc"); + PutRequest putReq = new PutRequest().setTableName(idName); + try { + putReq.setValue(value); + getNextHandle().put(putReq); + fail("Expected IAE; a generated always identity " + + "column should not have a value"); + } catch (IllegalArgumentException iae) { + } + + /* + * Putting a row without "id" field should succeed. + */ + value = new MapValue().put("name", "abc"); + putReq.setValue(value); + PutResult putRet = getNextHandle().put(putReq); + assertNotNull(putRet.getVersion()); + assertNotNull(putRet.getGeneratedValue()); + + /* + * Now the UUID table + */ + value = new MapValue().put("id", "abcde").put("name", "abc"); + putReq = new PutRequest().setTableName(uuidName); + try { + putReq.setValue(value); + getNextHandle().put(putReq); + fail("Expected IAE; the uuid value set was not a uuid"); + } catch (IllegalArgumentException iae) { + } + + /* + * Putting a row without "id" field should succeed. + */ + value = new MapValue().put("name", "abc"); + putReq.setValue(value); + putRet = getNextHandle().put(putReq); + assertNotNull(putRet.getVersion()); + assertNotNull(putRet.getGeneratedValue()); + } + + @Test + public void testQueryOrder() { + + final String[] declOrder = { + "sid", "id", "name", "age", "state","salary", "array", "longString" + }; + + /* Load rows to table */ + loadRowsToScanTable(10, 10, 1); + + QueryRequest queryReq = new QueryRequest(). + setStatement("select * from scanTable where id = 1 and sid = 1"); + + QueryResult queryRes = getNextHandle().query(queryReq); + + /* + * For each result, assert that the fields are all there and in the + * expected order. + */ + for (MapValue v : queryRes.getResults()) { + assertEquals(declOrder.length, v.size()); + int i = 0; + for (Map.Entry entry : v.entrySet()) { + assertEquals(declOrder[i++], entry.getKey()); + } + + /* perform a get and validate that it also is in decl order */ + GetRequest getReq = new GetRequest() + .setTableName(tableName) + .setKey(v); + GetResult getRes = getNextHandle().get(getReq); + i = 0; + for (Map.Entry entry : + getRes.getValue().entrySet()) { + assertEquals(declOrder[i++], entry.getKey()); + } + } + } + + @Test + public void testLowThroughput() { + final int numRows = 20; + String name = "testThroughput"; + String createTableDdl = + "CREATE TABLE " + name + + "(id INTEGER, bin binary, json json, primary key(id))"; + + tableOperation(handle, createTableDdl, new TableLimits(2, 20000, 1), + null, TableResult.State.ACTIVE, 10000); + + MapValue value = new MapValue() + .put("bin", new byte[500]) + .put("json", "abc"); + PutRequest putReq = new PutRequest().setTableName(name); + + /* add rows */ + for (int i = 0; i < numRows; i++) { + value.put("id", i); + putReq.setValue(value); + PutResult putRet = getNextHandle().put(putReq); + assertNotNull(putRet.getVersion()); + } + + /* + * Ensure that this query completes + */ + QueryRequest queryReq = new QueryRequest(). + setStatement("select * from " + name). + setMaxReadKB(2); + int numRes = 0; + long stime = System.currentTimeMillis(); + int RUs = 0; + do { + QueryResult queryRes = getNextHandle().query(queryReq); + numRes += queryRes.getResults().size(); + RUs += queryRes.getReadUnits(); + verbose(" RUs=" + RUs); + } while (!queryReq.isDone()); + if (verbose) { + long diffMs = System.currentTimeMillis() - stime; + System.out.println("Got " + RUs + " RUs in " + diffMs + "ms"); + } + assertEquals(numRows, numRes); + } + + /* + * Tests that a query with a V2 sort (geo_near) can operate against + * query versions 2 and 3 + */ + @Test + public void testQueryCompat() { + final String geoTable = "create table points (id integer, " + + "info json, primary key(id))"; + final String geoIndex = + "create index idx_ptn on points(info.point as point)"; + final String geoQuery = + "select id from points p " + + "where geo_near(p.info.point, " + + "{ \"type\" : \"point\", \"coordinates\" : [24.0175, 35.5156 ]}," + + "5000)"; + + TableResult tres = tableOperation(handle, geoTable, + new TableLimits(4, 1, 1), + TableResult.State.ACTIVE, 10000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + tres = tableOperation(handle, geoIndex, null, + TableResult.State.ACTIVE, 10000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + PrepareRequest prepReq = new PrepareRequest().setStatement(geoQuery); + PrepareResult prepRet = getNextHandle().prepare(prepReq); + assertNotNull(prepRet.getPreparedStatement()); + } + + /* + * Test use of large query strings for insert/update/upsert + */ + @Test + public void testLargeQueryStrings() { + final String tableName = "LargeQuery"; + final String createTable = "create table " + tableName + + "(id integer, data json, primary key(id))"; + final int[] stringSizes = {10, 500, 5000, 20000, 500000}; + + tableOperation(handle, createTable, + new TableLimits(4, 1000, 1000), + TableResult.State.ACTIVE, 10000); + /* create a large JSON data string */ + for (int size : stringSizes) { + String data = createLargeJson(size); + String iquery = "insert into " + tableName + " values(1," + + data + ") returning id"; + String uquery = "update " + tableName + " t " + + "set t.data = " + data + "where id = 1 returning id"; + + /* insert, then update */ + QueryRequest req = new QueryRequest().setStatement(iquery); + QueryResult res = getNextHandle().query(req); + assertEquals(1, res.getResults().get(0).get("id").getInt()); + req = new QueryRequest().setStatement(uquery); + res = getNextHandle().query(req); + assertEquals(1, res.getResults().get(0).get("id").getInt()); + } + + /* validate that select fails */ + final String squery = "select * from " + tableName + + " t where t.data.data = " + makeString(15000); + QueryRequest req = new QueryRequest().setStatement(squery); + try { + getNextHandle().query(req); + fail("Query should have failed"); + } catch (IllegalArgumentException iae) { + // success + } + } + + @Test + public void testBindArrayValue() { + final String tableName = "testBindArrayValue"; + final String createTable = "create table if not exists " + tableName + + "(id integer, " + + "info record(name string, age integer, " + + "address record(street string, room integer)), " + + "primary key(id))"; + + tableOperation(handle, createTable, new TableLimits(100, 100, 1), + TableResult.State.ACTIVE, 10000); + + String stmt = "declare $id integer;" + + "$info record(name string, age integer, " + + "address record(street string, " + + "room integer));" + + "upsert into " + tableName + " values($id, $info)"; + PrepareRequest prepReq = new PrepareRequest().setStatement(stmt); + PrepareResult prepRet = getNextHandle().prepare(prepReq); + PreparedStatement pstmt = prepRet.getPreparedStatement(); + + MapValue mapVal; + int id = 0; + + /* Case1: all fields are specified with non-null value */ + ArrayValue adVal = new ArrayValue() + .add("35 Network drive") + .add(203); + ArrayValue arrVal = new ArrayValue() + .add("Jack Wang") + .add(40) + .add(adVal); + mapVal = new MapValue() + .put("name", arrVal.get(0)) + .put("age", arrVal.get(1)) + .put("address", + new MapValue().put("street", adVal.get(0)) + .put("room", adVal.get(1))); + execInsertAndCheckInfo(pstmt, ++id, arrVal, tableName, mapVal); + + /* Case2: address = NULL*/ + arrVal = new ArrayValue() + .add("Jack Wang") + .add(40) + .add(NullValue.getInstance()); + mapVal = new MapValue() + .put("name", arrVal.get(0)) + .put("age", arrVal.get(1)) + .put("address", NullValue.getInstance()); + execInsertAndCheckInfo(pstmt, ++id, arrVal, tableName, mapVal); + + /* + * Case3: age = "40" and address.room = "203" which are castable to + * integer + */ + adVal = new ArrayValue() + .add("35 Network drive") + .add("203"); + arrVal = new ArrayValue() + .add("Jack Wang") + .add("40") + .add(adVal); + mapVal = new MapValue() + .put("name", arrVal.get(0)) + .put("age", 40) + .put("address", + new MapValue().put("street", adVal.get(0)) + .put("room", 203)); + execInsertAndCheckInfo(pstmt, ++id, arrVal, tableName, mapVal); + + /* + * Negative cases + */ + /* info.name: Type mismatch on input. Expected STRING, got INTEGER */ + arrVal = new ArrayValue() + .add(40) + .add("Jack Wang") + .add(NullValue.getInstance()); + pstmt.setVariable("$id", new IntegerValue(id)); + pstmt.setVariable("$info", arrVal); + + QueryRequest req = new QueryRequest().setPreparedStatement(pstmt); + try { + getNextHandle().query(req); + fail("Expect fail with IAE but not"); + } catch(IllegalArgumentException ex) { + } + + /* + * Invalid Array value for Record Value, it has 1 element but + * the Record Value contains 3 fields + */ + arrVal = new ArrayValue() + .add("Jack Wang"); + pstmt.setVariable("$id", new IntegerValue(id)); + pstmt.setVariable("$info", arrVal); + + req = new QueryRequest().setPreparedStatement(pstmt); + try { + getNextHandle().query(req); + fail("Expect fail with IAE but not"); + } catch(IllegalArgumentException ex) { + } + } + + @Test + public void testCRDT() { + if (!onprem) { + return; + } + + final String setRegion = "set local region localRegion"; + getNextHandle().doSystemRequest(setRegion, 20000, 1000); + + /* Test reading different types of CRDT. */ + FieldValue val = testCRDT(Type.INTEGER); + assertTrue(val.getInt() == 3); + + val = testCRDT(Type.LONG); + assertTrue(val.getLong() == 3); + + val = testCRDT(Type.NUMBER); + assertTrue(val.getNumber().equals(BigDecimal.valueOf(3))); + } + + @Test + public void testUpdateMultipleRows() { + + assumeKVVersion("testUpdateMultipleRows", 24, 4, 0); + + String tableName = "testUpdateQuery"; + String tableDdl = "CREATE TABLE IF NOT EXISTS " + tableName + "(" + + "sid INTEGER, " + + "id INTEGER, " + + "i INTEGER, " + + "s STRING, " + + "PRIMARY KEY(SHARD(sid), id))"; + TableLimits limits = new TableLimits(100, 100, 1); + + String updBySid = "UPDATE " + tableName + " SET i = i + 1 WHERE sid = 0"; + String updBySidRet = updBySid + " RETURNING *"; + String qryBySid = "SELECT id, i FROM " + tableName + " WHERE sid = 0"; + + int numSids = 2; + int numIdsPerSid = 10; + int updRKB = 10; + int updWKB = 20; + int ddlWaitMs = 10000; + String str = genString(512); + + tableOperation(handle, tableDdl, limits, ddlWaitMs); + + /* load rows */ + PutRequest preq = new PutRequest().setTableName(tableName); + PutResult pret; + for (int sid = 0; sid < numSids; sid++) { + for (int id = 0; id < numIdsPerSid; id++) { + MapValue row = new MapValue() + .put("sid", sid) + .put("id", id) + .put("i", id) + .put("s", str); + preq.setValue(row); + pret = handle.put(preq); + assertNotNull(pret.getVersion()); + } + } + + PreparedStatement update = + handle.prepare(new PrepareRequest().setStatement(updBySid)) + .getPreparedStatement(); + + PreparedStatement query = + handle.prepare(new PrepareRequest().setStatement(qryBySid)) + .getPreparedStatement(); + + QueryRequest req; + QueryResult ret; + int inc = 0; + TableLimits newLimits; + + req = new QueryRequest().setPreparedStatement(update); + ret = handle.query(req); + inc++; + assertEquals(1, ret.getResults().size()); + assertEquals("{\"NumRowsUpdated\":10}", + ret.getResults().get(0).toJson()); + if (!onprem) { + assertEquals(updRKB, ret.getReadKB()); + assertEquals(2 * updRKB, ret.getReadUnits()); + assertEquals(updWKB, ret.getWriteKB()); + } + + /* + * Test maxReadKB/maxWriteKB + * + * Update should fail if data read or write during query is less than + * maxReadKB or maxWriteKB + */ + req = new QueryRequest() + .setPreparedStatement(update) + .setMaxReadKB(updRKB - 1); + try { + ret = handle.query(req); + fail("expect to fail"); + } catch (IllegalArgumentException ex) { + req = req.copy().setMaxReadKB(updRKB); + ret = handle.query(req); + assertEquals("{\"NumRowsUpdated\":" + numIdsPerSid + "}", + ret.getResults().get(0).toJson()); + inc++; + } + + req = new QueryRequest() + .setPreparedStatement(update) + .setMaxWriteKB(updWKB - 3); + try { + handle.query(req); + fail("expect to fail"); + } catch (IllegalArgumentException ex) { + req = req.copy().setMaxWriteKB(updWKB); + ret = handle.query(req); + assertEquals("{\"NumRowsUpdated\":" + numIdsPerSid + "}", + ret.getResults().get(0).toJson()); + inc++; + } + + /* + * Returning clause is not supported if complete primary key is not + * provided. + */ + try { + handle.prepare(new PrepareRequest().setStatement(updBySidRet)); + fail("expect to fail"); + } catch (IllegalArgumentException ex) { + } + + /* Verify rows */ + req = new QueryRequest().setPreparedStatement(query); + ret = handle.query(req); + for (MapValue r : ret.getResults()) { + assertEquals(inc, r.get("i").getInt() - r.get("id").getInt()); + } + + /* + * Test the QueryRequest.limit on update query. + */ + + /* + * Use QueryRequest.limit that is smaller than the number of rows to + * update: + * - In onprem, update query should fail. + * - In cloud, the limit doesn't apply, query should succeed. + */ + req = new QueryRequest() + .setPreparedStatement(update) + .setLimit(numIdsPerSid - 1); + if (onprem) { + try { + ret = handle.query(req); + fail("Query should have failed due to exceeding the limit " + + "for the max number of records can be updated"); + } catch (IllegalArgumentException ex) { + } + + /* Increase the limit, query succeed */ + req.setLimit(numIdsPerSid); + ret = handle.query(req); + inc++; + assertEquals(1, ret.getResults().size()); + assertEquals("{\"NumRowsUpdated\":" + numIdsPerSid + "}", + ret.getResults().get(0).toJson()); + } else { + /* The limit doesn't apply to cloud */ + ret = handle.query(req); + inc++; + assertEquals(1, ret.getResults().size()); + assertEquals("{\"NumRowsUpdated\":" + numIdsPerSid + "}", + ret.getResults().get(0).toJson()); + } + + if (onprem) { + return; + } + + /* + * Test throttling update query + */ + + /* + * Create a new handle configured with no retries + */ + NoSQLHandleConfig config = new NoSQLHandleConfig(getProxyEndpoint()); + setHandleConfig(config); + /* no retries */ + config.configureDefaultRetryHandler(0, 0); + NoSQLHandle handleNoRetry = getHandle(config); + + /* Small readUnits */ + newLimits = new TableLimits(1, 100, 1); + tableOperation(handle, null /* ddlStatement */, newLimits, tableName, + TableResult.State.ACTIVE, ddlWaitMs); + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + } + + boolean throttled = false; + try { + for (int i = 0; i < 3; i++) { + req = new QueryRequest().setPreparedStatement(update); + ret = handleNoRetry.query(req); + inc++; + } + } catch (ReadThrottlingException te) { + throttled = true; + } + assertTrue(throttled); + + /* Small writeUnits */ + newLimits = new TableLimits(100, 1, 1); + tableOperation(handle, null /* ddlStatement */, newLimits, tableName, + TableResult.State.ACTIVE, ddlWaitMs); + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + } + + throttled = false; + try { + for (int i = 0; i < 3; i++) { + req = new QueryRequest().setPreparedStatement(update); + ret = handleNoRetry.query(req); + inc++; + } + } catch (WriteThrottlingException te) { + throttled = true; + } + assertTrue(throttled); + + /* Verify rows */ + req = new QueryRequest().setPreparedStatement(query); + ret = handle.query(req); + for (MapValue r : ret.getResults()) { + assertEquals(inc, r.get("i").getInt() - r.get("id").getInt()); + } + } + + private FieldValue testCRDT(Type type) { + String tableName = "mrtable" + type; + final String createTable = "create table " + tableName + + "(id integer, count " + type + " as mr_counter" + + ", primary key(id)) in regions localRegion"; + + getNextHandle().doSystemRequest(createTable, 20000, 1000); + + /* Insert a row with CRDT. */ + String insertStmt = "insert into " + tableName + + " values (1, default)"; + QueryRequest req = new QueryRequest().setStatement(insertStmt); + getNextHandle().query(req); + + String updateStmt = "Update " + tableName + + " set count = count + 3 where id = 1"; + req = new QueryRequest().setStatement(updateStmt); + getNextHandle().query(req); + + String selectStmt = "select * from " + tableName + " where id = 1"; + req = new QueryRequest().setStatement(selectStmt); + QueryResult ret = getNextHandle().query(req); + assertTrue(ret.getResults().size() == 1); + MapValue res = ret.getResults().get(0); + return res.get("count"); + } + + private void execInsertAndCheckInfo(PreparedStatement pstmt, + int id, + FieldValue info, + String tableName, + MapValue expInfo) { + + pstmt.setVariable("$id", new IntegerValue(id)); + pstmt.setVariable("$info", info); + + QueryRequest req; + QueryResult ret; + + req = new QueryRequest().setPreparedStatement(pstmt); + ret = getNextHandle().query(req); + assertEquals(1, ret.getResults().get(0).asMap() + .get("NumRowsInserted").getInt()); + + String stmt = "select info from " + tableName + " where id = " + id; + req = new QueryRequest().setStatement(stmt); + ret = getNextHandle().query(req); + assertEquals(1, ret.getResults().size()); + assertEquals(expInfo, ret.getResults().get(0).get("info")); + } + + private String createLargeJson(int size) { + MapValue map = new MapValue(); + map.put("data", makeString(size)); + return map.toString(); + } + + private void executeQuery(String statement, + boolean keyOnly, + boolean indexScan, + int expNumRows, + int expReadKB, + int numLimit, + int sizeLimit, + int recordKB) { + executeQuery(statement, keyOnly, indexScan, expNumRows, expReadKB, + numLimit, sizeLimit, recordKB, Consistency.EVENTUAL); + executeQuery(statement, keyOnly, indexScan, expNumRows, expReadKB, + numLimit, sizeLimit, recordKB, Consistency.ABSOLUTE); + } + + private void executeQuery(String statement, + boolean keyOnly, + boolean indexScan, + int expNumRows, + int expReadKB, + int numLimit, + int sizeLimit, + int recordKB, + Consistency consistency) { + + if (traceLevel >= 2) { + System.out.println("Executing query : " + statement); + } + + final int minRead = getMinRead(); + final boolean isAbsolute = (consistency == Consistency.ABSOLUTE); + boolean isDelete = statement.contains("delete"); + + final QueryRequest queryReq = new QueryRequest() + .setStatement(statement) + .setLimit(numLimit) + .setConsistency(consistency) + .setMaxReadKB(sizeLimit) + .setTraceLevel(traceLevel); + + if (consistency != null) { + queryReq.setConsistency(consistency); + } + + int expReadUnits = expReadKB; + int expBatchReadUnits = (sizeLimit > 0) ? sizeLimit : READ_KB_LIMIT; + if (checkKVVersion(21, 3, 6)) { + /* + * Query should suspend after read the table row or key + * (for key only query) if current read cost exceeded size limit, + * so at most the readKB over the size limit + */ + expBatchReadUnits += (!keyOnly) ? recordKB : minRead; + } else { + expBatchReadUnits += (indexScan && !keyOnly) ? recordKB : minRead; + } + expBatchReadUnits += (isDelete ? minRead : 0); + if (isAbsolute) { + expBatchReadUnits <<= 1; + expReadUnits <<= 1; + } + + int numRows = 0; + int readKB = 0; + int writeKB = 0; + int readUnits = 0; + int numBatches = 0; + int totalPrepCost = 0; + long startMs = System.currentTimeMillis(); + + do { + if (traceLevel >= 2) { + System.out.println("Starting BATCH " + numBatches); + } + + QueryResult queryRes = getNextHandle().query(queryReq); + + if (traceLevel >= 2) { + System.out.println(" BATCH " + numBatches + + " after handle.query(), calling getResults()"); + } + List results = queryRes.getResults(); + if (traceLevel >= 2) { + System.out.println(" BATCH " + numBatches + + " after getResults()"); + } + + int cnt = results.size(); + if (numLimit > 0) { + assertTrue("Unexpected number of rows returned, expect <= " + + numLimit + ", but get " + cnt + " rows", + cnt <= numLimit); + } + + int rkb = queryRes.getReadKB(); + int runits = queryRes.getReadUnits(); + int wkb = queryRes.getWriteKB(); + int prepCost = (numBatches == 0 ? getMinQueryCost() : 0); + + /* + * Make sure we didn't exceed the read limit. The "+ recordKB" is + * needed because at the RNs we allow the limit to be exceeded by + * 1 row, if we have already read the index entry for that row. The + * "+ 1" is needed for DELETE queries, because a row that satisfies + * the DELETE conditions, we read its primary-index once again to + * do the delete. + */ + assert(queryRes.getReadKB() <= + prepCost + getEffectiveMaxReadKB(queryReq) + recordKB + 1); + + if (showResults) { + for (int i = 0; i < results.size(); ++i) { + System.out.println("Result " + (numRows + i) + " :"); + System.out.println(results.get(i)); + } + } + + if (traceLevel >= 2) { + System.out.println("Batch ReadKB = " + rkb + + " Batch ReadUnits = " + runits + + " Batch WriteKB = " + wkb); + } + + numRows += cnt; + + assertTrue("Unexpected readUnits, expect <= " + + (expBatchReadUnits + prepCost) + ", but get " + runits, + runits <= (expBatchReadUnits + prepCost)); + readKB += rkb; + readUnits += runits; + writeKB += wkb; + totalPrepCost += prepCost; + + numBatches++; + } while (!queryReq.isDone()); + + if (traceLevel >= 2) { + System.out.println("Total ReadKB = " + readKB + + " Total ReadUnits = " + readUnits + + " Total WriteKB = " + writeKB); + } + + if (verbose) { + long diffMs = System.currentTimeMillis() - startMs; + if (diffMs == 0) { + diffMs = 1; /* avoid /0 error */ + } + System.out.println("query used " + readUnits + "RUs in " + + diffMs + "ms (" + + ((readUnits * 1000) / diffMs) + " RUs/s)" + + " maxReadKB=" + sizeLimit + + " limit=" + numLimit); + System.out.println(" query: " + statement); + } + + if (!onprem) { + assertTrue("Read KB and Read units should be > 0", + readKB > 0 && readUnits > 0); + } + assertEquals("Wrong number of rows returned, expect " + expNumRows + + ", but get " + numRows, expNumRows, numRows); + + if (expReadKB >= 0 && onprem == false) { + int delta = 0; + if (numBatches == 1) { + /* + * For ALL_PARTITIONS query, it might charge additional empty + * read cost(1KB) if all matched keys are all in the last shard + * but not 1st partition. + */ + delta = 1; + } else { + /* If a batch must resume after the resume key, but there are no + * more keys in the index range, an empty read is charged. */ + delta = 1; + } + + if (isAbsolute) { + delta <<= 1; + } + + expReadUnits += totalPrepCost; + + if (traceLevel >= 2) { + queryReq.printTrace(System.out); + } + + assertTrue("Unexpected read units, exp in range[" + + expReadUnits + ", " + (expReadUnits + delta) + + "] actual " + readUnits, + readUnits >= expReadUnits && + readUnits <= expReadUnits + delta); + /* Verify the readKB with readUnits */ + assertQueryReadKB(readKB, readUnits, totalPrepCost, isAbsolute); + } + } + + private void executeQuery(String query, + Map bindValues, + int expNumRows, + int maxReadKB, + boolean usePrepStmt) { + + final QueryRequest queryReq; + + if (bindValues == null || !usePrepStmt) { + queryReq = new QueryRequest(). + setStatement(query). + setMaxReadKB(maxReadKB). + setTraceLevel(traceLevel); + } else { + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRes = getNextHandle().prepare(prepReq); + PreparedStatement prepStmt = prepRes.getPreparedStatement(); + if (bindValues != null) { + for (Entry entry : bindValues.entrySet()) { + prepStmt.setVariable(entry.getKey(), entry.getValue()); + } + } + + queryReq = new QueryRequest(). + setPreparedStatement(prepStmt). + setMaxReadKB(maxReadKB). + setTraceLevel(traceLevel); + } + + queryReq.setTraceLevel(traceLevel); + + QueryResult queryRes; + int numRows = 0; + int totalReadKB = 0; + int totalReadUnits = 0; + + long startMs = System.currentTimeMillis(); + + do { + queryRes = getNextHandle().query(queryReq); + numRows += queryRes.getResults().size(); + + if (showResults) { + List results = queryRes.getResults(); + for (int i = 0; i < results.size(); ++i) { + System.out.println("Result " + i + " :"); + System.out.println(results.get(i)); + } + System.out.println("ReadKB = " + queryRes.getReadKB() + + " ReadUnits = " + queryRes.getReadUnits()); + } + + /* + * Note: in some rare cases we may get zero readKB with 1 result. + * From Markos: + * + * When we do index/sort based group by, if we reach the read limit + * in the middle of computing a group, we include the partially + * computed group row in the continuation key. When we send the + * continuation key back, we may discover (without reading any + * bytes) that the group was actually fully computed, and we now + * send it back as a result. + * + * So we take this rare case into account by allowing zero readKB + * if the numresults is 1 and we've already accumulated readKBs. + */ + if (!onprem && + (queryRes.getResults().size() > 1 || totalReadKB == 0)) { + assertTrue(queryRes.getReadKB() > 0); + } + + totalReadKB += queryRes.getReadKB(); + totalReadUnits += queryRes.getReadUnits(); + } while (!queryReq.isDone()); + + assertTrue("Wrong number of rows returned, expect " + expNumRows + + ", but get " + numRows, numRows == expNumRows); + + if (verbose) { + long diffMs = System.currentTimeMillis() - startMs; + if (diffMs == 0) { + diffMs = 1; /* avoid /0 error */ + } + System.out.println("query used " + totalReadUnits + "RUs in " + + diffMs + "ms (" + + ((totalReadUnits * 1000) / diffMs) + " RUs/s)"); + System.out.println(" maxReadKB=" + maxReadKB); + System.out.println(" query: " + query); + } + } + + private void loadRowsToScanTable(int numMajor, int numPerMajor, int nKB) { + + MapValue value = new MapValue(); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + + String states[] = { "CA", "OR", "WA", "VT", "NY" }; + int[] salaries = { 1000, 15000, 8000, 9000 }; + ArrayValue[] arrays = new ArrayValue[4]; + + for (int i = 0; i < 4; ++i) { + arrays[i] = new ArrayValue(4); + } + arrays[0].add(1).add(5).add(7).add(10); + arrays[1].add(4).add(7).add(7).add(11); + arrays[2].add(3).add(8).add(17).add(21); + arrays[3].add(3).add(8).add(12).add(14); + + int slen = (nKB - 1) * 1024; + /* Load rows */ + for (int i = 0; i < numMajor; i++) { + value.put("sid", i); + for (int j = 0; j < numPerMajor; j++) { + value.put("id", j); + value.put("name", "name_" + j); + value.put("age", j % 10); + value.put("state", states[j % 5]); + value.put("salary", salaries[j % 4]); + value.put("array", arrays[j % 4]); + value.put("longString", genString(slen)); + PutResult res = getNextHandle().put(putRequest); + assertNotNull("Put failed", res.getVersion()); + } + } + } + + private String genString(int len) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < len; i++) { + sb.append((char)('A' + i % 26)); + } + return sb.toString(); + } + + private void loadRowsToTable(String tabName, String[] jsons) { + + for (String json : jsons) { + MapValue value = (MapValue)JsonUtils.createValueFromJson(json, null); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tabName); + PutResult res = getNextHandle().put(putRequest); + assertNotNull("Put failed", res.getVersion()); + } + } + + private static int getMinQueryCost() { + return MIN_QUERY_COST; + } + + /* + * Note that the "expQueryKB" is the expected read KB for executing query, + * it doesn't include the cost of preparing query. + */ + private void assertQueryReadKB(int expQueryKB, + int actualKB, + int actualUnits, + int prepCost, + boolean isAbsolute) { + assertReadKB(expQueryKB, + (actualKB - prepCost), + (actualUnits - prepCost), + isAbsolute); + } + + /* + * Note that the "expQueryKB" is the expected read KB for executing query, + * it doesn't include the cost of preparing query. + */ + private void assertQueryReadKB(Result result, + int expQueryKB, + int prepCost, + boolean isAbsolute) { + assertQueryReadKB(expQueryKB, + result.getReadKBInternal(), + result.getReadUnitsInternal(), + prepCost, + isAbsolute); + } + + private void assertQueryReadKB(int actualKB, + int actualUnits, + int prepCost, + boolean isAbsolute) { + /* + * Check on actual readKB and actual read units with the specified + * isAsbolute only. + */ + assertReadKBUnits((actualKB - prepCost), + (actualUnits - prepCost), + isAbsolute); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryThrottlingTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryThrottlingTest.java new file mode 100644 index 00000000..e02618e0 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/QueryThrottlingTest.java @@ -0,0 +1,152 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.List; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.ReadThrottlingException; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.security.SecureTestUtil; + +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Some test cases that exercise issues with queries, max read + * sizes, throttling and ability to make forward progress on + * small capacity tables. + * + * These tests only runs against a local server and not minicloud. + */ +public class QueryThrottlingTest extends ProxyTestBase { + + @BeforeClass + public static void staticSetUp() + throws Exception { + + /* This test is for cloudsim only */ + assumeTrue("Skip QueryThrottlingTest if not cloud sim test", + !Boolean.getBoolean(ONPREM_PROP) && + !Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + + ProxyTestBase.staticSetUp(); + } + + @Test + public void throttleTest() throws Exception { + + final String tableName = "testQueryThrottle"; + /* + * don't use too many records as it slows the test because of the + * delay in the query loop, but use enough to cause some looping. + */ + final int numRecords = 500; + final int recordSize = 1000; + final int readLimit = 100; + + /* + * Create a new handle configured with no retries + */ + NoSQLHandleConfig config = new NoSQLHandleConfig(getEndpoint()); + SecureTestUtil.setAuthProvider(config, isSecure(), getTenantId()); + config.configureDefaultRetryHandler(0, 0); + + /* + * Open the handle + */ + NoSQLHandle myhandle = getHandle(config); + + /* + * Use high write throughput for loading, read throughput is what + * is being tested. + */ + TableResult tres = tableOperation( + myhandle, + "create table " + tableName + "(id integer, " + + "load string, primary key(id))", + new TableLimits(readLimit, 30000, 50), + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* + * Load the table + */ + MapValue value = new MapValue().put("load", makeString(recordSize)); + PutRequest putRequest = new PutRequest().setTableName(tableName). + setValue(value); + for (int i = 0; i < numRecords; i++) { + value.put("id", i); + PutResult res = myhandle.put(putRequest); + assertNotNull(res.getVersion()); + } + + List res; + String query = "select count(*) from " + tableName; + QueryRequest qr = new QueryRequest().setStatement(query); + res = runQuery(myhandle, qr); + assertEquals(1, res.size()); + + delay(1000); + + query = "select * from " + tableName; + qr = new QueryRequest().setStatement(query); + res = runQuery(myhandle, qr); + assertEquals(numRecords, res.size()); + } + + /** + * Run the query in a loop until no more results. + * return the number of results. + */ + private List runQuery(NoSQLHandle myhandle, QueryRequest qr) { + + List results = new ArrayList(); + + try { + do { + QueryResult res = myhandle.query(qr); + int num = res.getResults().size(); + if (num > 0) { + results.addAll(res.getResults()); + } + /* + * Do approximate rate-limiting to prevent throttling. This test + * is intended to ensure forward progress and not directly + * test throttling behavior. + * + * This should ensure that throttling does not occur, + * assuming that the proxy has halved the default + * throughput for the table. While this test could modify + * the requested KBs itself that is not the point of the + * test -- the proxy should do the reduction + */ + delay(1000); + } while (!qr.isDone()); + } catch (ReadThrottlingException rte) { + fail("Test should not have been throttled. Check the delay as " + + "it may be an environmental issue"); + } + return results; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/RowMetadataTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/RowMetadataTest.java new file mode 100644 index 00000000..68cab1f4 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/RowMetadataTest.java @@ -0,0 +1,1621 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import oracle.nosql.driver.Consistency; +import oracle.nosql.driver.Durability; +import oracle.nosql.driver.Durability.SyncPolicy; +import oracle.nosql.driver.Durability.ReplicaAckPolicy; +import oracle.nosql.driver.FieldRange; +import oracle.nosql.driver.SystemException; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.TimeToLive; +import oracle.nosql.driver.Version; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.MultiDeleteRequest; +import oracle.nosql.driver.ops.MultiDeleteResult; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.nosql.driver.ops.WriteRequest; +import oracle.nosql.driver.ops.WriteResult; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.JsonNullValue; +import oracle.nosql.driver.values.JsonUtils; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.NullValue; + +import org.junit.FixMethodOrder; +import org.junit.Test; +import org.junit.runners.MethodSorters; + +/* + * The tests are ordered so that the zzz* test goes last so it picks up + * DDL history reliably. + */ +@FixMethodOrder(MethodSorters.NAME_ASCENDING) +public class RowMetadataTest extends ProxyTestBase { + + private final static String RM1 = "{\"n\":1}"; + private final static String RM2 = "{\"n\":2}"; + private final static String RM3 = "{\"n\":3}"; + private final static String RM4 = "{\"n\":4}"; + private final static String RM5 = "{\"n\":5}"; + + @Test + public void smokeTest() { + + try { + + MapValue key = new MapValue().put("id", 10); + + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + + /* drop a table */ + TableResult tres = tableOperation(handle, + "drop table if exists testusers", + null, TableResult.State.DROPPED, + 20000); + assertNotNull(tres.getTableName()); + assertTrue(tres.getTableState() == TableResult.State.DROPPED); + assertNull(tres.getTableLimits()); + + /* Create a table */ + tres = tableOperation( + handle, + "create table if not exists testusers(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* Create an index */ + tres = tableOperation( + handle, + "create index if not exists Name on testusers(name)", + null, + TableResult.State.ACTIVE, + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* PUT */ + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("testusers") + .setRowMetadata(RM1); + + assertEquals(RM1, putRequest.getRowMetadata()); + + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + assertWriteKB(res); + + /* put another one. set TTL to test that path */ + putRequest.setTTL(TimeToLive.ofHours(2)); + putRequest.setRowMetadata(RM2); + value.put("id", 20); + handle.put(putRequest); + + /* + * Test ReturnRow for simple put of a row that exists. 2 cases: + * 1. unconditional (will return info) + * 2. if absent (will return info) + */ + value.put("id", 20); + putRequest.setReturnRow(true); + putRequest.setRowMetadata(RM3); + + PutResult pr = handle.put(putRequest); + assertNotNull(pr.getVersion()); /* success */ + assertNotNull(pr.getExistingVersion()); + assertNotNull(pr.getExistingValue()); + assertTrue(pr.getExistingModificationTime() != 0); + assertEquals(RM2, pr.getExistingRowMetadata()); + assertReadKB(pr); + assertWriteKB(pr); + + putRequest.setOption(Option.IfAbsent); + putRequest.setRowMetadata(RM4); + pr = handle.put(putRequest); + assertNull(pr.getVersion()); /* failure */ + assertNotNull(pr.getExistingVersion()); + assertNotNull(pr.getExistingValue()); + assertTrue(pr.getExistingModificationTime() != 0); + assertEquals(RM3, pr.getExistingRowMetadata()); + assertReadKB(pr); + + /* clean up */ + putRequest.setReturnRow(false); + putRequest.setOption(null); + + /* GET first row, id: 10 */ + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName("testusers"); + + GetResult res1 = handle.get(getRequest); + assertNotNull("Get failed", res1.getJsonValue()); + assertReadKB(res1); + + assertNotNull(res1.getRowMetadata()); + assertEquals(RM1, res1.getRowMetadata()); + + /* DELETE same key, id: 10 */ + DeleteRequest delRequest = new DeleteRequest() + .setKey(key) + .setTableName("testusers") + .setReturnRow(true) + .setRowMetadata(RM2); + + DeleteResult del = handle.delete(delRequest); + assertTrue("Delete failed", del.getSuccess()); + assertWriteKB(del); + assertEquals( RM1, del.getExistingRowMetadata()); + + /* GET -- no row, it was removed above */ + getRequest.setTableName("testusers"); + res1 = handle.get(getRequest); + assertNull(res1.getValue()); + assertReadKB(res1); + + /* GET -- no table */ + try { + getRequest.setTableName("not_a_table"); + res1 = handle.get(getRequest); + fail("Attempt to access missing table should have thrown"); + } catch (TableNotFoundException nse) { + /* success */ + } + + /* PUT -- invalid row -- this will throw */ + try { + value.remove("id"); + value.put("not_a_field", 1); + res = handle.put(putRequest); + fail("Attempt to put invalid row should have thrown"); + } catch (IllegalArgumentException iae) { + /* success */ + } + } catch (Exception e) { + checkErrorMessage(e); + e.printStackTrace(); + fail("Exception in test"); + } + } + + @Test + public void testPutGetDelete() { + + final String tableName = "testusers"; + final int recordKB = 2; + + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists testusers(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + final String name = genString((recordKB - 1) * 1024); + MapValue value = new MapValue().put("id", 10).put("name", name); + MapValue newValue = new MapValue().put("id", 11).put("name", name); + MapValue newValue1 = new MapValue().put("id", 12).put("name", name); + MapValue newValue2 = new MapValue().put("id", 13).put("name", name); + + /* Durability will be ignored unless run with -Donprem=true */ + Durability dur = new Durability(SyncPolicy.WRITE_NO_SYNC, + SyncPolicy.NO_SYNC, + ReplicaAckPolicy.NONE); + + /* Put a row with empty table name: should get illegal argument */ + PutRequest putReq = new PutRequest() + .setValue(value) + .setDurability(dur) + .setTableName("") + .setRowMetadata(RM1); + try { + handle.put(putReq); + fail("expected illegal argument exception on empty table name"); + } catch (IllegalArgumentException iae) { + /* success */ + } + + /* Put a row */ + putReq = new PutRequest() + .setValue(value) + .setDurability(dur) + .setTableName(tableName) + .setRowMetadata(RM2); + PutResult putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */ ); + + /* Put a row again with SetReturnRow(false). + * expect no row returned + */ + putReq.setReturnRow(false); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + true /* put over write */); + Version oldVersion = putRes.getVersion(); + assertNull(putRes.getExistingRowMetadata()); + + /* + * Put row again with SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be zero */ + recordKB, + true /* put overWrite */); + oldVersion = putRes.getVersion(); + assertEquals(RM2, putRes.getExistingRowMetadata()); + + /* + * Put a new row with SetReturnRow(true), + * expect no existing row returned. + */ + putReq = new PutRequest() + .setValue(newValue) + .setDurability(dur) + .setTableName(tableName) + .setReturnRow(true) + .setRowMetadata(RM3); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + assertNull(putRes.getExistingRowMetadata()); + + /* PutIfAbsent an existing row, it should fail */ + putReq = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setDurability(dur) + .setTableName(tableName) + .setRowMetadata(RM4); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + assertNull(putRes.getExistingRowMetadata()); + + /* + * PutIfAbsent fails + SetReturnRow(true), + * return existing value and version + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be recent */ + recordKB, + false /* put overWrite */); + assertEquals(RM2, putRes.getExistingRowMetadata()); + + /* PutIfPresent an existing row, it should succeed */ + putReq = new PutRequest() + .setOption(Option.IfPresent) + .setValue(value) + .setDurability(dur) + .setTableName(tableName) + .setRowMetadata(RM5); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + oldVersion = putRes.getVersion(); + assertNull(putRes.getExistingRowMetadata()); // no expPrevValue + + /* + * PutIfPresent succeed + SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + Version ifVersion = putRes.getVersion(); + assertEquals(RM5, putRes.getExistingRowMetadata()); + + /* PutIfPresent an new row, it should fail */ + putReq = new PutRequest() + .setOption(Option.IfPresent) + .setValue(newValue1) + .setDurability(dur) + .setTableName(tableName) + .setRowMetadata(RM1); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + assertNull(putRes.getExistingRowMetadata()); + + /* + * PutIfPresent fail + SetReturnRow(true), + * expect no existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + assertNull(putRes.getExistingRowMetadata()); + + /* PutIfAbsent an new row, it should succeed */ + putReq = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(newValue1) + .setDurability(dur) + .setTableName(tableName) + .setRowMetadata(RM2); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + assertNull(putRes.getExistingRowMetadata()); + + /* PutIfAbsent success + SetReturnRow(true) */ + putReq.setValue(newValue2).setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + assertNull(putRes.getExistingRowMetadata()); + + /* + * PutIfVersion an existing row with unmatched version, it should fail. + */ + putReq = new PutRequest() + .setOption(Option.IfVersion) + .setMatchVersion(oldVersion) + .setValue(value) + .setDurability(dur) + .setTableName(tableName) + .setRowMetadata(RM3); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + assertNull(putRes.getExistingRowMetadata()); + + /* + * PutIfVersion fails + SetReturnRow(true), + * expect existing row returned. + */ + putReq.setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + false /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + ifVersion /* expPrevVersion */, + true, /* modtime should be recent */ + recordKB, + false /* put overWrite */); + assertEquals(RM5, putRes.getExistingRowMetadata()); + + /* + * Put an existing row with matching version, it should succeed. + */ + putReq = new PutRequest() + .setOption(Option.IfVersion) + .setMatchVersion(ifVersion) + .setValue(value) + .setDurability(dur) + .setTableName(tableName) + .setRowMetadata(RM4); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + ifVersion = putRes.getVersion(); + assertNull(putRes.getExistingRowMetadata()); + + /* + * PutIfVersion succeed + SetReturnRow(true), + * expect no existing row returned. + */ + putReq.setMatchVersion(ifVersion).setReturnRow(true); + putRes = handle.put(putReq); + checkPutResult(putReq, putRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB, + false /* put overWrite */); + Version newVersion = putRes.getVersion(); + assertNull(putRes.getExistingRowMetadata()); + + /* + * Put with IfVersion but no matched version is specified, put should + * fail. + */ + putReq = new PutRequest() + .setOption(Option.IfVersion) + .setValue(value) + .setDurability(dur) + .setTableName(tableName) + .setRowMetadata(RM5); + try { + putRes = handle.put(putReq); + fail("Put with IfVersion should fail"); + } catch (IllegalArgumentException iae) { + checkErrorMessage(iae); + } + + /* + * Get + */ + + /* Get a row with empty table name: should get illegal argument */ + MapValue key = new MapValue().put("id", 10); + GetRequest getReq = new GetRequest() + .setKey(key) + .setTableName(""); + try { + handle.get(getReq); + fail("expected illegal argument exception on empty table name"); + } catch (IllegalArgumentException iae) { + /* success */ + } + + /* Get a row */ + getReq = new GetRequest() + .setKey(key) + .setTableName(tableName); + GetResult getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + true /* rowPresent*/, + value, + null, /* Don't check version if Consistency.EVENTUAL */ + true, /* modtime should be recent */ + recordKB); + assertEquals(RM4, getRes.getRowMetadata()); + + /* Get a row with ABSOLUTE consistency */ + getReq.setConsistency(Consistency.ABSOLUTE); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + true /* rowPresent*/, + value, + newVersion, + true, /* modtime should be recent */ + recordKB); + assertEquals(RM4, getRes.getRowMetadata()); + + /* Put row with null row metadata, ie remove */ + putReq = new PutRequest() + .setValue(value) + .setDurability(dur) + .setTableName(tableName) + .setRowMetadata(null); + handle.put(putReq); + + getReq = new GetRequest() + .setKey(key) + .setTableName(tableName); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + true /* rowPresent*/, + value, + null, /* Don't check version if Consistency.EVENTUAL */ + true, /* modtime should be recent */ + recordKB); + assertNull(getRes.getRowMetadata()); + + + /* Get non-existing row */ + key = new MapValue().put("id", 100); + getReq = new GetRequest() + .setKey(key) + .setTableName(tableName); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + false /* rowPresent*/, + null /* expValue */, + null /* expVersion */, + false, /* modtime should be zero */ + recordKB); + assertNull(getRes.getRowMetadata()); + + /* Get a row with ABSOLUTE consistency */ + getReq.setConsistency(Consistency.ABSOLUTE); + getRes = handle.get(getReq); + checkGetResult(getReq, getRes, + false /* rowPresent*/, + null /* expValue */, + null /* expVersion */, + false, /* modtime should be zero */ + recordKB); + assertNull(getRes.getRowMetadata()); + + /* Delete a row with empty table name: should get illegal argument */ + key = new MapValue().put("id", 10); + DeleteRequest delReq = new DeleteRequest() + .setKey(key) + .setTableName(""); + try { + handle.delete(delReq); + fail("expected illegal argument exception on empty table name"); + } catch (IllegalArgumentException iae) { + /* success */ + } + + /* Delete a row */ + delReq = new DeleteRequest() + .setKey(key) + .setTableName(tableName); + DeleteResult delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertNull(delRes.getExistingRowMetadata()); + + /* Put the row back to store */ + putReq = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setRowMetadata(RM5); + putRes = handle.put(putReq); + oldVersion = putRes.getVersion(); + assertNotNull(oldVersion); + assertNull(putRes.getExistingRowMetadata()); + + /* Delete succeed + setReturnRow(true), existing row returned. */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + oldVersion /* expPrevVersion */, + true, /* modtime should be zero */ + recordKB); + assertEquals(RM5, delRes.getExistingRowMetadata()); + + /* Delete fail + setReturnRow(true), no existing row returned. */ + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertNull(delRes.getExistingRowMetadata()); + + /* Put the row back to store */ + putReq = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setRowMetadata(RM1); + putRes = handle.put(putReq); + ifVersion = putRes.getVersion(); + assertNull(putRes.getExistingRowMetadata()); + + /* DeleteIfVersion with unmatched version, it should fail */ + delReq = new DeleteRequest() + .setMatchVersion(oldVersion) + .setKey(key) + .setTableName(tableName); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertNull(delRes.getExistingRowMetadata()); + + /* + * DeleteIfVersion with unmatched version + setReturnRow(true), + * the existing row returned. + */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + true /* rowPresent */, + value /* expPrevValue */, + ifVersion /* expPrevVersion */, + true, /* modtime should be recent */ + recordKB); + assertEquals(RM1, delRes.getExistingRowMetadata()); + + /* DeleteIfVersion with matched version, it should succeed. */ + delReq = new DeleteRequest() + .setMatchVersion(ifVersion) + .setKey(key) + .setTableName(tableName); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + false /* rowPresent */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertNull(delRes.getExistingRowMetadata()); + + /* Put the row back to store */ + putReq = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setRowMetadata(RM2); + putRes = handle.put(putReq); + ifVersion = putRes.getVersion(); + assertNull(putRes.getExistingRowMetadata()); + + /* + * DeleteIfVersion with matched version + setReturnRow(true), + * it should succeed but no existing row returned. + */ + delReq.setMatchVersion(ifVersion).setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + true /* shouldSucceed */, + false /* returnRow */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertNull(delRes.getExistingRowMetadata()); + + /* DeleteIfVersion with a key not existing, it should fail. */ + delReq = new DeleteRequest() + .setMatchVersion(ifVersion) + .setKey(key) + .setTableName(tableName); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* returnRow */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertNull(delRes.getExistingRowMetadata()); + + /* + * DeleteIfVersion with a key not existing + setReturnRow(true), + * it should fail and no existing row returned. + */ + delReq.setReturnRow(true); + delRes = handle.delete(delReq); + checkDeleteResult(delReq, delRes, + false /* shouldSucceed */, + false /* returnRow */, + null /* expPrevValue */, + null /* expPrevVersion */, + false, /* modtime should be zero */ + recordKB); + assertNull(delRes.getExistingRowMetadata()); + } + + private void checkModTime(long modTime, boolean modTimeRecent) { + if (modTimeRecent) { + if (modTime < (System.currentTimeMillis() - 2000)) { + fail("Expected modtime to be recent, got " + modTime); + } + } else { + if (modTime != 0) { + fail("Expected modtime to be zero, got " + modTime); + } + } + } + + private void checkPutResult(PutRequest request, + PutResult result, + boolean shouldSucceed, + boolean rowPresent, + MapValue expPrevValue, + Version expPrevVersion, + boolean modTimeRecent, + int recordKB, + boolean putOverWrite) { + if (shouldSucceed) { + assertNotNull("Put should succeed", result.getVersion()); + } else { + assertNull("Put should fail", result.getVersion()); + } + checkExistingValueVersion(request, result, shouldSucceed, rowPresent, + expPrevValue, expPrevVersion); + + checkModTime(result.getExistingModificationTime(), modTimeRecent); + + int[] expCosts = getPutReadWriteCost(request, + shouldSucceed, + rowPresent, + recordKB, + putOverWrite); + + if (onprem == false) { + assertReadKB(result, expCosts[0], true /* isAbsolute */); + assertWriteKB(result, expCosts[1]); + } + } + + private void checkDeleteResult(DeleteRequest request, + DeleteResult result, + boolean shouldSucceed, + boolean rowPresent, + MapValue expPrevValue, + Version expPrevVersion, + boolean modTimeRecent, + int recordKB) { + + assertEquals("Delete should " + (shouldSucceed ? "succeed" : " fail"), + shouldSucceed, result.getSuccess()); + checkExistingValueVersion(request, result, shouldSucceed, rowPresent, + expPrevValue, expPrevVersion); + + checkModTime(result.getExistingModificationTime(), modTimeRecent); + + int[] expCosts = getDeleteReadWriteCost(request, + shouldSucceed, + rowPresent, + recordKB); + + if (onprem == false) { + assertReadKB(result, expCosts[0], true /* isAbsolute */); + assertWriteKB(result, expCosts[1]); + } + } + + private void checkGetResult(GetRequest request, + GetResult result, + boolean rowPresent, + MapValue expValue, + Version expVersion, + boolean modTimeRecent, + int recordKB) { + + + if (rowPresent) { + if (expValue != null) { + assertEquals("Unexpected value", expValue, result.getValue()); + } else { + assertNotNull("Unexpected value", expValue); + } + if (expVersion != null) { + assertArrayEquals("Unexpected version", + expVersion.getBytes(), + result.getVersion().getBytes()); + } else { + assertNotNull("Unexpected version", result.getVersion()); + } + } else { + assertNull("Unexpected value", expValue); + assertNull("Unexpected version", result.getVersion()); + } + + checkModTime(result.getModificationTime(), modTimeRecent); + + final int minRead = getMinRead(); + int expReadKB = rowPresent ? recordKB : minRead; + + if (onprem == false) { + assertReadKB(result, expReadKB, + (request.getConsistencyInternal() == Consistency.ABSOLUTE)); + assertWriteKB(result, 0); + } + } + + private void checkExistingValueVersion(WriteRequest request, + WriteResult result, + boolean shouldSucceed, + boolean rowPresent, + MapValue expPrevValue, + Version expPrevVersion) { + + boolean hasReturnRow = rowPresent; + if (hasReturnRow) { + assertNotNull("PrevValue should be non-null", + result.getExistingValueInternal()); + if (expPrevValue != null) { + assertEquals("Unexpected PrevValue", + expPrevValue, result.getExistingValueInternal()); + } + assertNotNull("PrevVersion should be non-null", + result.getExistingVersionInternal()); + if (expPrevVersion != null) { + assertNotNull(result.getExistingVersionInternal()); + assertArrayEquals("Unexpected PrevVersion", + expPrevVersion.getBytes(), + result.getExistingVersionInternal().getBytes()); + } + } else { + assertNull("PrevValue should be null", + result.getExistingValueInternal()); + assertNull("PrevVersion should be null", + result.getExistingVersionInternal()); + } + } + + @Test + public void testReadQuery() throws InterruptedException { + final String createTable1 = + "create table tjson(id integer, info json, primary key(id))"; + final String createTable2 = + "create table trecord(id integer, " + + "info record(name string, age integer), " + + "primary key(id))"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + tableOperation(handle, createTable2, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + MapValue rowNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", NullValue.getInstance()) + .put("age", 20)); + MapValue rowJsonNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", JsonNullValue.getInstance()) + .put("age", 20)); + + MapValue[] rows = new MapValue[] {rowNull, rowJsonNull}; + Map tableExpRows = new HashMap(); + tableExpRows.put("tjson", rowJsonNull); + tableExpRows.put("trecord", rowNull); + + /* + * Put rows with NullValue or JsonNullValue, they should be converted + * to the right value for the target type. + */ + for (Map.Entry e : tableExpRows.entrySet()) { + String table = e.getKey(); + MapValue expRow = e.getValue(); + + for (MapValue row : rows) { + PutRequest putReq = new PutRequest() + .setTableName(table) + .setValue(row) + .setRowMetadata(RM1); + ; + PutResult putRet = handle.put(putReq); + Version pVersion = putRet.getVersion(); + assertNotNull(pVersion); + assertNull(putRet.getExistingRowMetadata()); + + MapValue key = new MapValue().put("id", row.get("id")); + GetRequest getReq = new GetRequest() + .setTableName(table) + .setConsistency(Consistency.ABSOLUTE) + .setKey(key); + GetResult getRet = handle.get(getReq); + assertEquals(expRow, getRet.getValue()); + assertNotNull(getRet.getVersion()); + assertTrue(Arrays.equals(pVersion.getBytes(), + getRet.getVersion().getBytes())); + assertEquals(RM1, getRet.getRowMetadata()); + } + } + + // add rmt field for checking the return of the query + rowNull.put("rmt", 1); + rowJsonNull.put("rmt", 1); + + /* + * Query with variable for json field and set NullValue or + * JsonNullValue to variable, the NullValue is expected to be converted + * to JsonNullValue. + */ + String query = "declare $name json;" + + "select id, info, row_metadata($t).n as rmt from tjson $t " + + "where $t.info.name = $name"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + + prepStmt.setVariable("$name", JsonNullValue.getInstance()); + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + boolean shouldRetry = false; + do { + try { + QueryResult queryRet = handle.query(queryReq); + assertEquals(1, queryRet.getResults().size()); + assertEquals(rowJsonNull, queryRet.getResults().get(0)); + + prepStmt.setVariable("$name", NullValue.getInstance()); + queryRet = handle.query(queryReq); + assertEquals(0, queryRet.getResults().size()); + } catch (SystemException e) { + shouldRetry = e.okToRetry(); + System.out.println("Caught " + (e.okToRetry() ? "retryable" : + "") + " ex: " + e.getMessage()); + System.out.println( + "Retrying query: " + queryReq.getStatement()); + e.printStackTrace(); + Thread.sleep(500); + } + } while (shouldRetry); + } + + @Test + public void testTableMultiWrite() { + final String createTable = + "create table tMW(s integer, id integer, info json, primary key(shard(s), id))"; + + tableOperation(handle, createTable, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + /* multi write */ + WriteMultipleRequest wmReq = new WriteMultipleRequest(); + String tableName = "tMW"; + + for (int i = 0; i < 10; i++) { + PutRequest pr = new PutRequest() + .setTableName(tableName) + .setRowMetadata("{\"n\":" + i + "}") + .setValue(new MapValue() + .put("s", 1) + .put("id", i) + .put("info", new MapValue().put("name", "John"))); + wmReq.add(pr, true); + } + + WriteMultipleResult wmRes = handle.writeMultiple(wmReq); + assertEquals(10, wmRes.getResults().size()); + + + QueryRequest queryReq = new QueryRequest() + .setStatement("select s, id, $t.info.name as name, row_metadata($t) as rmt from " + + tableName + " $t ORDER BY id ASC"); + QueryResult qRes = handle.query(queryReq); + + int i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(1, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertEquals("John", v.get("name").asString().getString()); + assertTrue(v.get("rmt").isMap()); + assertTrue(v.get("rmt").asMap().get("n").isInteger()); + assertEquals(i, v.get("rmt").asMap().get("n").asInteger().getInt()); + i++; + } + assertEquals(10, qRes.getResults().size()); + assertEquals(10, i); + + // do a multi delete operation + MultiDeleteRequest multiDeleteReq = new MultiDeleteRequest() + .setTableName(tableName) + .setKey(new MapValue().put("s", 1)) + .setRange(new FieldRange("id") + .setStart(new IntegerValue(0), true) + .setEnd(new IntegerValue(5), false)) + .setRowMetadata(RM1); + MultiDeleteResult multiDeleteRes = handle.multiDelete(multiDeleteReq); + + assertEquals(5, multiDeleteRes.getNumDeletions()); + + qRes = handle.query(queryReq); + assertEquals(5, qRes.getResults().size()); + } + + @Test + public void testNullJsonNull() throws InterruptedException { + final String createTable1 = + "create table tjson(id integer, info json, primary key(id))"; + final String createTable2 = + "create table trecord(id integer, " + + "info record(name string, age integer), " + + "primary key(id))"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + tableOperation(handle, createTable2, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + MapValue rowNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", NullValue.getInstance()) + .put("age", 20)); + MapValue rowJsonNull = new MapValue() + .put("id", 0) + .put("info", + new MapValue() + .put("name", JsonNullValue.getInstance()) + .put("age", 20)); + + MapValue[] rows = new MapValue[] {rowNull, rowJsonNull}; + Map tableExpRows = new HashMap(); + tableExpRows.put("tjson", rowJsonNull); + tableExpRows.put("trecord", rowNull); + + /* + * Put rows with NullValue or JsonNullValue, they should be converted + * to the right value for the target type. + */ + for (Map.Entry e : tableExpRows.entrySet()) { + String table = e.getKey(); + MapValue expRow = e.getValue(); + + for (MapValue row : rows) { + PutRequest putReq = new PutRequest() + .setTableName(table) + .setValue(row); + ; + PutResult putRet = handle.put(putReq); + Version pVersion = putRet.getVersion(); + assertNotNull(pVersion); + + MapValue key = new MapValue().put("id", row.get("id")); + GetRequest getReq = new GetRequest() + .setTableName(table) + .setConsistency(Consistency.ABSOLUTE) + .setKey(key); + GetResult getRet = handle.get(getReq); + assertEquals(expRow, getRet.getValue()); + assertNotNull(getRet.getVersion()); + assertTrue(Arrays.equals(pVersion.getBytes(), + getRet.getVersion().getBytes())); + assertNull(getRet.getRowMetadata()); + } + } + + /* + * Query with variable for json field and set NullValue or + * JsonNullValue to variable, the NullValue is expected to be converted + * to JsonNullValue. + */ + String query = "declare $name json;" + + "select id, info, row_metadata($t) as rmt from tjson $t " + + "where $t.info.name = $name"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + + prepStmt.setVariable("$name", JsonNullValue.getInstance()); + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + rowNull.put("rmt", JsonNullValue.getInstance()); + rowJsonNull.put("rmt", JsonNullValue.getInstance()); + + boolean shouldRetry = false; + do { + try { + QueryResult queryRet = handle.query(queryReq); + assertEquals(1, queryRet.getResults().size()); + assertEquals(rowJsonNull, queryRet.getResults().get(0)); + + prepStmt.setVariable("$name", NullValue.getInstance()); + queryRet = handle.query(queryReq); + assertEquals(0, queryRet.getResults().size()); + } catch (SystemException e) { + shouldRetry = e.okToRetry(); + System.out.println("Caught " + (e.okToRetry() ? "retryable" : "") + " ex: " + e.getMessage()); + System.out.println("Retrying query: " + queryReq.getStatement()); + e.printStackTrace(); + Thread.sleep(500); + } + } while (shouldRetry); + } + + private String genString(int length) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + sb.append((char)('A' + i % 26)); + } + return sb.toString(); + } + + @Test + public void testCollection() { + final String tableName = "testusersColl"; + + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists " + tableName + + "(id integer, primary key(id)) as json collection", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + MapValue value = new MapValue().put("id", 10).put("name", "John"); + + /* Put row without mentioning row metadata */ + PutRequest putReq = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult putRes = handle.put(putReq); + assertNull(putRes.getExistingRowMetadata()); + + /* Get the row back no row metadata expected */ + GetRequest getReq = new GetRequest() + .setTableName(tableName) + .setKey(value); + GetResult getRet = handle.get(getReq); + assertNull(getRet.getRowMetadata()); + + + /* Put row with row metadata */ + putReq = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setRowMetadata(RM1); + putRes = handle.put(putReq); + assertNull(putRes.getExistingRowMetadata()); + + /* Get the row back check there is the expected row metadata */ + getReq = new GetRequest() + .setTableName(tableName) + .setKey(value); + getRet = handle.get(getReq); + assertEquals(RM1, getRet.getRowMetadata()); + + + /* Put row without row metadata */ + putReq = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setReturnRow(true) + .setRowMetadata(null); + putRes = handle.put(putReq); + assertEquals(RM1, putRes.getExistingRowMetadata()); + + /* Get the row back check row metadata is null */ + getReq = new GetRequest() + .setTableName(tableName) + .setKey(value); + getRet = handle.get(getReq); + assertNull(getRet.getRowMetadata()); + + + /* Delete row check prev/existing is still null */ + DeleteRequest delReq = new DeleteRequest() + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + DeleteResult delRes = handle.delete(delReq); + assertNull(delRes.getExistingRowMetadata()); + + + /* Query */ + putReq = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setRowMetadata(RM1); + putRes = handle.put(putReq); + assertNull(putRes.getExistingRowMetadata()); + + QueryRequest queryReq = new QueryRequest() + .setStatement("select id, name, row_metadata($t) as rmt from " + + tableName + " $t"); + QueryResult qRes = handle.query(queryReq); + + assertEquals(1, qRes.getResults().size()); + assertEquals(10, qRes.getResults().get(0).get("id").getInt()); + assertEquals("John", qRes.getResults().get(0).get("name").getString()); + assertEquals(RM1, qRes.getResults().get(0).get("rmt").toJson()); + assertTrue(qRes.getResults().get(0).get("rmt").isMap()); + assertTrue(qRes.getResults().get(0).get("rmt").asMap().get("n").isInteger()); + assertEquals(1, qRes.getResults().get(0).get("rmt").asMap().get("n").asInteger().getInt()); + } + + @Test + public void testCollectionMultiWrite() { + final String tableName = "testusersCollMWrite"; + + /* Create a table */ + TableResult tres = tableOperation( + handle, + "create table if not exists " + tableName + + "(s integer, id integer, primary key(shard(s), id)) as json collection", + new TableLimits(500, 500, 50), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* multi write */ + WriteMultipleRequest wmReq = new WriteMultipleRequest(); + + for (int i = 0; i < 10; i++) { + PutRequest pr = new PutRequest() + .setTableName(tableName) + .setRowMetadata("{\"n\":" + i + "}") + .setValue(new MapValue() + .put("s", 1) + .put("id", i) + .put("name", "John")); + wmReq.add(pr, true); + } + WriteMultipleResult wmRes = handle.writeMultiple(wmReq); + assertEquals(10, wmRes.getResults().size()); + + /* query read metadata */ + QueryRequest queryReq = new QueryRequest() + .setStatement("select s, id, name, row_metadata($t) as rmt from " + + tableName + " $t ORDER BY id ASC"); + QueryResult qRes = handle.query(queryReq); + + int i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(1, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertEquals("John", v.get("name").asString().getString()); + assertTrue(v.get("rmt").isMap()); + assertTrue(v.get("rmt").asMap().get("n").isInteger()); + assertEquals(i, v.get("rmt").asMap().get("n").asInteger().getInt()); + i++; + } + assertEquals(10, qRes.getResults().size()); + assertEquals(10, i); + } + + @Test + public void testWriteQuery() throws InterruptedException { + final String tableName = "t"; + final String createTable1 = + "create table "+ tableName +" (s integer, id integer, info json, primary key(shard(s), id))"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + // do a few inserts + String query = "declare $id integer; insert into " + tableName + " values( 0, $id, {})"; + for (int i = 0; i < 10; i++) { + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + prepStmt.setVariable("$id", new IntegerValue(i)); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt) + .setRowMetadata(RM1); + + QueryResult queryRes = handle.query(queryReq); + assertNotNull(queryRes); + assertEquals(1, queryRes.getResults().get(0).asMap().get("NumRowsInserted").asInteger().getInt()); + } + + // check they have the correct row metadata + query = + "select $t.s, $t.id, $t.info, row_metadata($t) as rmt from " + + tableName + " $t order by $t.id"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + QueryResult qRes = handle.query(queryReq); + int i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(0, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertEquals(RM1, v.get("rmt").toJson()); + i++; + } + assertEquals(10, i); + + + // update many + query = "update " + tableName + " t SET t.info = t.info where t.s = 0"; + + queryReq = new QueryRequest() + .setStatement(query) + .setRowMetadata(RM2); + + qRes = handle.query(queryReq); + assertEquals(1, qRes.getResults().size()); + assertEquals(10, qRes.getResults().get(0).asMap().get("NumRowsUpdated").asInteger().getInt()); + + + // check they have the correct row metadata + query = + "select $t.s, $t.id, $t.info, row_metadata($t) as rmt from " + + tableName + " $t order by $t.id"; + prepReq = new PrepareRequest().setStatement(query); + prepRet = handle.prepare(prepReq); + prepStmt = prepRet.getPreparedStatement(); + + queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + qRes = handle.query(queryReq); + i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(0, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertEquals(RM2, v.get("rmt").toJson()); + i++; + } + assertEquals(10, i); + } + + @Test + public void testWriteQueryCollection() throws InterruptedException { + final String tableName = "t"; + final String createTable1 = + "create table "+ tableName +" (s integer, id integer, primary key(shard(s), id)) as json collection"; + + tableOperation(handle, createTable1, new TableLimits(10, 10, 1), + null, TableResult.State.ACTIVE, null); + + // do a few inserts + String query = "declare $id integer; insert into " + tableName + " values( 0, $id, {\"info\":1})"; + for (int i = 0; i < 10; i++) { + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + prepStmt.setVariable("$id", new IntegerValue(i)); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt) + .setRowMetadata(RM1); + + QueryResult queryRes = handle.query(queryReq); + assertNotNull(queryRes); + assertEquals(1, queryRes.getResults().get(0).asMap().get("NumRowsInserted").asInteger().getInt()); + } + + // check they have the correct row metadata + query = + "select $t.s, $t.id, $t.info, row_metadata($t) as rmt from " + + tableName + " $t order by $t.id"; + PrepareRequest prepReq = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + + QueryRequest queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + QueryResult qRes = handle.query(queryReq); + int i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(0, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertEquals(RM1, v.get("rmt").toJson()); + i++; + } + assertEquals(10, i); + + + // update many + query = "update " + tableName + " t SET t.info=3 where t.s = 0"; + + queryReq = new QueryRequest() + .setStatement(query) + .setRowMetadata(RM2); + + qRes = handle.query(queryReq); + assertEquals(1, qRes.getResults().size()); + assertEquals(10, qRes.getResults().get(0).asMap().get("NumRowsUpdated").asInteger().getInt()); + + + // check they have the correct row metadata + query = + "select $t.s, $t.id, $t.info, row_metadata($t) as rmt from " + + tableName + " $t order by $t.id"; + prepReq = new PrepareRequest().setStatement(query); + prepRet = handle.prepare(prepReq); + prepStmt = prepRet.getPreparedStatement(); + + queryReq = new QueryRequest() + .setPreparedStatement(prepStmt); + + qRes = handle.query(queryReq); + i = 0; + for (MapValue v : qRes.getResults()) { + assertEquals(0, v.get("s").asInteger().getInt()); + assertEquals(i, v.get("id").asInteger().getInt()); + assertEquals(RM2, v.get("rmt").toJson()); + i++; + } + assertEquals(10, i); + } + + @Test + public void testValidJSON() { + // invalid setRowMetadata values + String[] invalidJsons = new String[] { + "custom metadata", + "", + " ", + "\n", + "\t", + "'abc'", + "{{}}", + "{}{}", + "{}\n{}", + "{} {}", + "{},{}", + "{\"a\":'c'}", // single quoted string + "{'a':1}", // single quoted string + "NULL", + "Null", + "True", + "FALSE", + "{\"a\":\"Invalid string \u0000\"", + "\"abc\"\"def\"", + "1true2null", + "1,2,3", + "[][]", + "INF", // ??? since -INF is allowed + "Inf", + "-Inf", + "NAN", + "Not-A-Number", + }; + + for (String invalidJson : invalidJsons) { + assertThrows(IllegalArgumentException.class, () -> + JsonUtils.validateJsonConstruct(invalidJson)); + assertThrows(IllegalArgumentException.class, () -> + new PutRequest().setRowMetadata(invalidJson)); + assertThrows(IllegalArgumentException.class, () -> + new DeleteRequest().setRowMetadata(invalidJson)); + assertThrows(IllegalArgumentException.class, () -> + new QueryRequest().setRowMetadata(invalidJson)); + assertThrows(IllegalArgumentException.class, () -> + new MultiDeleteRequest().setRowMetadata(invalidJson)); + } + + // valid values for setRowMetadata + String[] validValues = new String[] { + null, + "{}", + "{\"a\":1}", + "{\"a\":2, \"b\":\"a\"}", + "{\"a\":[]}", + "{\"a\":[1, 2, 3]}", + " { } ", + "\n{\n}\n", + " \t\n{ \n \t } ", + "{\"a\": { \"b\":\"a\"}}", + "{\"a\":{ \"b\":{}}}", + "{\"a\":1}", + "{\"a\":true}", + "{\"a\":[null,1,\"c\", true, [[], {}, null]]}", + "\"abc\"", + "\"\"", + "123", + "123.456", + "null", + "true", + "false", + "[]", + "[1, \"s\", true]", + + // Non-numerical numbers are allowed + "NaN", + "{\"a\":NaN}", + "-INF", + "Infinity", + "-Infinity" + }; + + for (String v : validValues) { + new PutRequest().setRowMetadata(v); + new DeleteRequest().setRowMetadata(v); + new QueryRequest().setRowMetadata(v); + new MultiDeleteRequest().setRowMetadata(v); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/SerializationTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/SerializationTest.java new file mode 100644 index 00000000..5d413fdb --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/SerializationTest.java @@ -0,0 +1,2295 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static oracle.nosql.proxy.ProxySerialization.readFieldValue; +import static oracle.nosql.proxy.ProxySerialization.writeFieldValue; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_ARRAY; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_BINARY; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_BOOLEAN; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_DOUBLE; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_EMPTY; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_INTEGER; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_JSON_NULL; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_LONG; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_MAP; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_NULL; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_NUMBER; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_STRING; +import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_TIMESTAMP; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.Random; +import java.util.UUID; + +import oracle.kv.Key; +import oracle.kv.Value; +import oracle.kv.ValueVersion; +import oracle.kv.Version; +import oracle.kv.impl.api.table.FieldDefFactory; +import oracle.kv.impl.api.table.IndexImpl; +import oracle.kv.impl.api.table.NumberValueImpl; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TableBuilder; +import oracle.kv.impl.api.table.TableImpl; +import oracle.kv.impl.api.table.TimestampValueImpl; +import oracle.kv.impl.api.table.ValueReader; +import oracle.kv.impl.api.table.ValueSerializer.RowSerializer; +import oracle.kv.impl.topo.RepNodeId; +import oracle.kv.table.ArrayValue; +import oracle.kv.table.FieldDef; +import oracle.kv.table.FieldValue; +import oracle.kv.table.IndexKey; +import oracle.kv.table.MapValue; +import oracle.kv.table.RecordValue; +import oracle.kv.table.Row; +import oracle.nosql.driver.ops.serde.BinaryProtocol; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.JsonUtils; +import oracle.nosql.driver.values.NullValue; +import oracle.nosql.proxy.ProxySerialization.RowReaderImpl; +import oracle.nosql.proxy.ValueSerializer.RowSerializerImpl; +import oracle.nosql.proxy.util.TestBase; +import oracle.nosql.proxy.protocol.ByteInputStream; +import oracle.nosql.proxy.protocol.ByteOutputStream; + +import org.junit.Test; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.util.internal.logging.InternalLoggerFactory; +import io.netty.util.internal.logging.JdkLoggerFactory; + +/** + * Test serialization and deserialization on FieldValue on both driver and + * proxy side. + */ +public class SerializationTest extends TestBase { + + /* Some const values for testing */ + private static final oracle.nosql.driver.values.NullValue nullValue = + oracle.nosql.driver.values.NullValue.getInstance(); + + private static final oracle.nosql.driver.values.JsonNullValue jsonNull = + oracle.nosql.driver.values.JsonNullValue.getInstance(); + + private static final oracle.nosql.driver.values.EmptyValue emptyValue = + oracle.nosql.driver.values.EmptyValue.getInstance(); + + private static final oracle.nosql.driver.values.BooleanValue trueValue = + oracle.nosql.driver.values.BooleanValue.trueInstance(); + + private static final oracle.nosql.driver.values.BinaryValue binaryValue = + new oracle.nosql.driver.values.BinaryValue(new byte[]{(byte)0x0}); + + private static final oracle.nosql.driver.values.DoubleValue doubleValue = + new oracle.nosql.driver.values.DoubleValue(1.234567E6d); + + private static final oracle.nosql.driver.values.IntegerValue intValue = + new oracle.nosql.driver.values.IntegerValue(9999999); + + private static final oracle.nosql.driver.values.LongValue longValue = + new oracle.nosql.driver.values.LongValue(1000000000L); + + private static final oracle.nosql.driver.values.StringValue stringValue = + new oracle.nosql.driver.values.StringValue("oracle nosql"); + + private static final oracle.nosql.driver.values.TimestampValue + timestampValue = new oracle.nosql.driver.values.TimestampValue(0); + + private static final oracle.nosql.driver.values.NumberValue numberValue = + new oracle.nosql.driver.values.NumberValue(BigDecimal.ZERO); + + static { + /* Configures Netty logging to use JDK logger */ + InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE); + } + + private final ByteBuf testBuf = Unpooled.buffer(); + + /** + * Test simple type values + */ + @Test + public void testSimpleTypeValues() { + + oracle.nosql.driver.values.FieldValue driverValue; + + /* Null */ + driverValue = nullValue; + roundTrip(driverValue, FieldDefFactory.createStringDef()); + + /* JsonNull */ + driverValue = jsonNull; + roundTrip(driverValue, FieldDefFactory.createJsonDef()); + + /* Binary */ + byte[][] bytesArray = new byte[][]{ + new byte[0], + new byte[]{(byte)0, (byte)0}, + genByteArray(1024) + }; + for (byte[] bytes : bytesArray) { + roundTrip(new oracle.nosql.driver.values.BinaryValue(bytes), + FieldDefFactory.createBinaryDef()); + } + + /* FixedBinary */ + roundTrip(new oracle.nosql.driver.values.BinaryValue(genByteArray(32)), + FieldDefFactory.createFixedBinaryDef(32)); + + /* Boolean */ + boolean[] booleans = new boolean[] {true, false}; + for (boolean val : booleans) { + roundTrip(oracle.nosql.driver.values.BooleanValue.getInstance(val), + FieldDefFactory.createBooleanDef()); + } + + /* Integer */ + int[] ints = new int[] { + Integer.MIN_VALUE, Integer.MAX_VALUE, 0, -123456789, 123456789 + }; + for (int val : ints) { + roundTrip(new oracle.nosql.driver.values.IntegerValue(val), + FieldDefFactory.createIntegerDef()); + } + + /* Long */ + long[] longs = new long[] { + Long.MIN_VALUE, Long.MAX_VALUE, 0L, -1234567890123456789L, + 1234567890123456789L + }; + for (long val : longs) { + roundTrip(new oracle.nosql.driver.values.LongValue(val), + FieldDefFactory.createLongDef()); + } + + /* Float */ + float[] floats = new float[] { + Float.MIN_VALUE, Float.MAX_VALUE, 0.0f, -1.1231421f, 132124.1f + }; + for (float val : floats) { + roundTrip(new oracle.nosql.driver.values.DoubleValue(val), + FieldDefFactory.createFloatDef()); + } + + /* Double */ + double[] doubles = new double[] { + Double.MIN_VALUE, Double.MAX_VALUE, 0.0d, -1.1231421132132132d, + 132124.132132132132d + }; + for (double val : doubles) { + roundTrip(new oracle.nosql.driver.values.DoubleValue(val), + FieldDefFactory.createDoubleDef()); + } + + /* String */ + String[] strings = new String[] {"", genString(10), genString(1024)}; + for (String val : strings) { + roundTrip(new oracle.nosql.driver.values.StringValue(val), + FieldDefFactory.createStringDef()); + } + + /* Enum */ + final String[] enumValues = new String[]{"red", "yellow", "blue"}; + for (String val : enumValues) { + roundTrip(new oracle.nosql.driver.values.StringValue(val), + FieldDefFactory.createEnumDef(enumValues)); + } + + /* Timestamp */ + String datetime = "2017-07-15T15:18:59"; + for (int i = 0; i <= 9; i++) { + String val = datetime; + if (i > 0) { + val += new String(".123456789").substring(0, i + 1); + } + roundTrip(new oracle.nosql.driver.values.TimestampValue(val), + FieldDefFactory.createTimestampDef(i)); + } + + /* Number */ + for (int ival : ints) { + BigDecimal val = BigDecimal.valueOf(ival); + roundTrip(new oracle.nosql.driver.values.NumberValue(val), + FieldDefFactory.createNumberDef()); + } + + for (long lval : longs) { + BigDecimal val = BigDecimal.valueOf(lval); + roundTrip(new oracle.nosql.driver.values.NumberValue(val), + FieldDefFactory.createNumberDef()); + } + + for (float fval : floats) { + BigDecimal val = BigDecimal.valueOf(fval); + roundTrip(new oracle.nosql.driver.values.NumberValue(val), + FieldDefFactory.createNumberDef()); + } + + for (double dval : doubles) { + BigDecimal val = BigDecimal.valueOf(dval); + roundTrip(new oracle.nosql.driver.values.NumberValue(val), + FieldDefFactory.createNumberDef()); + } + + BigDecimal[] decs = new BigDecimal [] { + BigDecimal.ZERO, + new BigDecimal("1.23456789E+1024"), + new BigDecimal(new BigInteger("9999999999"), Integer.MIN_VALUE + 10), + new BigDecimal(new BigInteger("9999999999"), Integer.MAX_VALUE) + }; + + for (BigDecimal val : decs) { + roundTrip(new oracle.nosql.driver.values.NumberValue(val), + FieldDefFactory.createNumberDef()); + } + } + + /* + * Test on compatible types from Driver to proxy + * Driver type KV type + * ----------- ------- + * Long/Double/Decimal/String Integer + * Integer/Double/Decimal/String Long + * Integer/Long/Double/Decimal/String Float + * Integer/Long/Decimal/String Double + * Integer/Long/Double/String Number + * String Boolean + */ + @Test + public void testCompatibleTypes() + throws Exception { + + oracle.kv.table.FieldDef def; + + Random rand = new Random(System.currentTimeMillis()); + + /* + * KV type: Integer + * Driver values: Long/Double/Decimal/String + */ + def = FieldDefFactory.createIntegerDef(); + + /* long to int */ + long[] longToInts = new long[] { + Integer.MAX_VALUE, + Integer.MIN_VALUE, + rand.nextInt() + }; + for (long val : longToInts) { + doWriteToProxy(new oracle.nosql.driver.values.LongValue(val), + def, def.createInteger((int)val)); + } + + /* double to int */ + double[] doubleToInts = new double[] { + Integer.MAX_VALUE, + Integer.MIN_VALUE, + 1.23456789E8d + }; + for (double val : doubleToInts) { + doWriteToProxy(new oracle.nosql.driver.values.DoubleValue(val), + def, def.createInteger((int)val)); + } + + /* decimal to int */ + int[] ints = new int[] { + Integer.MAX_VALUE, + Integer.MIN_VALUE, + rand.nextInt() + }; + for (int val : ints) { + doWriteToProxy( + new oracle.nosql.driver.values.NumberValue( + BigDecimal.valueOf(val)), def, def.createInteger(val)); + } + + /* string to int */ + for (int val : ints) { + doWriteToProxy( + new oracle.nosql.driver.values.StringValue(String.valueOf(val)), + def, def.createInteger(val)); + } + + /* + * KV type: Long + * Driver values: Integer/Double/Decimal/String + */ + def = FieldDefFactory.createLongDef(); + + /* int to long */ + int[] intToLongs = new int[] { + Integer.MAX_VALUE, + Integer.MIN_VALUE, + rand.nextInt() + }; + for (int val : intToLongs) { + doWriteToProxy(new oracle.nosql.driver.values.IntegerValue(val), + def, def.createLong(val)); + } + + /* double to long */ + double[] doubleToLongs = new double[] { + -9.007199254740992E15, + 9.007199254740992E15, + 1.234567890123456E15d + }; + for (double val : doubleToLongs) { + doWriteToProxy(new oracle.nosql.driver.values.DoubleValue(val), + def, def.createLong((long)val)); + } + + /* decimal to longs */ + long[] longs = new long[] { + Long.MAX_VALUE, + Long.MIN_VALUE, + 0L, + rand.nextLong() + }; + for (long val : longs) { + doWriteToProxy( + new oracle.nosql.driver.values.NumberValue( + BigDecimal.valueOf(val)), def, def.createLong(val)); + } + + /* string to long */ + for (long val : longs) { + doWriteToProxy( + new oracle.nosql.driver.values.StringValue(String.valueOf(val)), + def, def.createLong(val)); + } + + /* + * KV type: Float + * Driver values: Integer/Long/Double/Decimal/String + */ + def = FieldDefFactory.createFloatDef(); + + /* int to float */ + int[] intToFloats = new int[] { + -10000000, 0, 10000000, Integer.MAX_VALUE, Integer.MIN_VALUE + }; + for (int val : intToFloats) { + doWriteToProxy(new oracle.nosql.driver.values.IntegerValue(val), + def, def.createFloat(val)); + } + + /* long to float */ + long[] longToFloats = new long[] { + -10000000L, 0, 10000000L, Long.MIN_VALUE, Long.MAX_VALUE + }; + for (long val : longToFloats) { + doWriteToProxy(new oracle.nosql.driver.values.LongValue(val), + def, def.createFloat(val)); + } + + /* double to float */ + float[] floats = new float[] { + Float.MAX_VALUE, -Float.MAX_VALUE, + Float.MIN_VALUE, -Float.MIN_VALUE, + rand.nextFloat() + }; + for (float val : floats) { + doWriteToProxy(new oracle.nosql.driver.values.DoubleValue(val), + def, def.createFloat(val)); + } + + /* decimal to float */ + for (float val : floats) { + BigDecimal dec = BigDecimal.valueOf(val); + doWriteToProxy(new oracle.nosql.driver.values.NumberValue(dec), + def, def.createFloat(val)); + } + + /* string to float */ + for (float val : floats) { + String sval = Float.toString(val); + doWriteToProxy(new oracle.nosql.driver.values.StringValue(sval), + def, def.createFloat(val)); + } + + /* + * KV type: Double + * Driver values: Integer/Long/Decimal/String + */ + def = FieldDefFactory.createDoubleDef(); + + /* int to double */ + int[] intToDoubles = new int[] { + -10000000, 0, 10000000, Integer.MAX_VALUE, Integer.MIN_VALUE + }; + for (int val : intToDoubles) { + doWriteToProxy(new oracle.nosql.driver.values.IntegerValue(val), + def, def.createDouble(val)); + } + + /* long to double */ + long[] longToDoubles = new long[] { + -10000000L, 0, 10000000L, Long.MIN_VALUE, Long.MAX_VALUE + }; + for (long val : longToDoubles) { + doWriteToProxy(new oracle.nosql.driver.values.LongValue(val), + def, def.createDouble(val)); + } + + /* decimal to double */ + double[] doubles = new double[] { + Double.MAX_VALUE, -Double.MAX_VALUE, + Double.MIN_VALUE, -Double.MIN_VALUE, + rand.nextDouble() + }; + for (double val : doubles) { + BigDecimal dec = BigDecimal.valueOf(val); + doWriteToProxy(new oracle.nosql.driver.values.NumberValue(dec), + def, def.createDouble(val)); + } + + /* string to double */ + for (double val : doubles) { + String sval = Double.toString(val); + doWriteToProxy(new oracle.nosql.driver.values.StringValue(sval), + def, def.createDouble(val)); + } + + /* + * KV type: Number + * Driver values: Integer/Long/Double/String + */ + def = FieldDefFactory.createNumberDef(); + + /* int to number */ + for (int val : ints) { + doWriteToProxy(new oracle.nosql.driver.values.IntegerValue(val), + def, def.createNumber(val)); + } + + /* long to number */ + for (long val : longs) { + doWriteToProxy(new oracle.nosql.driver.values.LongValue(val), + def, def.createNumber(val)); + } + + /* double to number */ + for (double val : doubles) { + doWriteToProxy(new oracle.nosql.driver.values.DoubleValue(val), + def, def.createNumber(val)); + } + + /* string to number */ + String sval = "1.23456789E+1024"; + doWriteToProxy(new oracle.nosql.driver.values.StringValue(sval), def, + def.createNumber(new BigDecimal("1.23456789E+1024"))); + + /* string to boolean */ + def = FieldDefFactory.createBooleanDef(); + sval = "true"; + doWriteToProxy(new oracle.nosql.driver.values.StringValue(sval), + def, def.createBoolean(true)); + + sval = "abc"; + doWriteToProxy(new oracle.nosql.driver.values.StringValue(sval), + def, def.createBoolean(false)); + + /* + * Conversion fails. + */ + + /* + * long/double/decimal to int + */ + + def = FieldDefFactory.createIntegerDef(); + + /* long to int */ + long lval = (long)Integer.MAX_VALUE + 1; + doWriteToProxy(new oracle.nosql.driver.values.LongValue(lval), + def, false /* shoudSucceed */); + + /* double to int */ + double dval = lval; + doWriteToProxy(new oracle.nosql.driver.values.DoubleValue(dval), + def, false /* shoudSucceed */); + + /* decimal to int */ + BigDecimal decVal = BigDecimal.valueOf(lval); + doWriteToProxy(new oracle.nosql.driver.values.NumberValue(decVal), + def, false /* shoudSucceed */); + + /* + * double/decimal to long + */ + + def = FieldDefFactory.createLongDef(); + + /* double to long */ + dval = 9.2233720368547758E10d; + doWriteToProxy(new oracle.nosql.driver.values.DoubleValue(dval), + def, false /* shoudSucceed */); + + /* decimal to long */ + decVal = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); + doWriteToProxy(new oracle.nosql.driver.values.NumberValue(decVal), + def, false /* shoudSucceed */); + + /* double to float */ + dval = Double.MAX_VALUE; + doWriteToProxy(new oracle.nosql.driver.values.DoubleValue(dval), + FieldDefFactory.createFloatDef(), + false /* shoudSucceed */); + + /* + * int/long/double/decimal to float + */ + + def = FieldDefFactory.createFloatDef(); + + /* int to float */ + int ival = 123456789; + doWriteToProxy(new oracle.nosql.driver.values.IntegerValue(ival), + def, false /* shoudSucceed */); + + /* long to float */ + lval = 1234567890123456789L; + doWriteToProxy(new oracle.nosql.driver.values.LongValue(lval), + def, false /* shoudSucceed */); + + /* double to float */ + dval = Double.MAX_VALUE; + doWriteToProxy(new oracle.nosql.driver.values.DoubleValue(dval), + def, false /* shoudSucceed */); + + /* decimal to float */ + decVal = BigDecimal.valueOf(123456789); + doWriteToProxy(new oracle.nosql.driver.values.NumberValue(decVal), + def, false /* shoudSucceed */); + + /* + * long/decimal to double + */ + def = FieldDefFactory.createDoubleDef(); + + /* long to double */ + lval = 1234567890123456789L; + doWriteToProxy(new oracle.nosql.driver.values.LongValue(lval), + def, false /* shoudSucceed */); + + /* decimal to double */ + decVal = BigDecimal.valueOf(lval); + doWriteToProxy(new oracle.nosql.driver.values.NumberValue(decVal), + def, false /* shoudSucceed */); + + /* Invalid string value for the target type*/ + String[] svals = new String[] {"_foo", "", "abc"}; + for (String val : svals) { + oracle.nosql.driver.values.StringValue value = + new oracle.nosql.driver.values.StringValue(val); + doWriteToProxy(value, FieldDefFactory.createIntegerDef(), + false /* shoudSucceed */); + doWriteToProxy(value, FieldDefFactory.createLongDef(), + false /* shoudSucceed */); + doWriteToProxy(value, FieldDefFactory.createFloatDef(), + false /* shoudSucceed */); + doWriteToProxy(value, FieldDefFactory.createDoubleDef(), + false /* shoudSucceed */); + doWriteToProxy(value, FieldDefFactory.createNumberDef(), + false /* shoudSucceed */); + } + } + + @Test + public void testArrayValue() { + FieldDef arrayDef; + oracle.nosql.driver.values.ArrayValue arrayValue; + int numElements = 3; + + /* Array(String) */ + arrayDef = TableBuilder.createArrayBuilder().addString().build(); + arrayValue = new oracle.nosql.driver.values.ArrayValue(numElements); + for (int i = 0; i < numElements; i++) { + arrayValue.add("name_" + i); + } + roundTrip(arrayValue, arrayDef); + + /* Array(Map(Record(rid Integer, rname String)) */ + FieldDef recordDef = TableBuilder.createRecordBuilder("rec") + .addInteger("rid").addString("rname").build(); + FieldDef mapDef = + TableBuilder.createMapBuilder().addField(recordDef).build(); + arrayDef = TableBuilder.createArrayBuilder().addField(mapDef).build(); + + oracle.nosql.driver.values.MapValue recValue; + oracle.nosql.driver.values.MapValue mapValue; + arrayValue = new oracle.nosql.driver.values.ArrayValue(numElements); + for (int i = 0; i < numElements; i++) { + mapValue = new oracle.nosql.driver.values.MapValue(); + for (int j = 0; j < numElements; j++) { + recValue = new oracle.nosql.driver.values.MapValue(); + recValue.put("rid", j); + recValue.put("rname", "name" + j); + mapValue.put("key" + j, recValue); + } + arrayValue.add(mapValue); + } + roundTrip(arrayValue, arrayDef); + + /* Empty array */ + roundTrip(new oracle.nosql.driver.values.ArrayValue(), arrayDef); + } + + @Test + public void testMapValue() { + FieldDef mapDef; + oracle.nosql.driver.values.MapValue mapValue; + int numElements = 3; + + /* Map(Long) */ + mapDef = TableBuilder.createMapBuilder().addLong().build(); + mapValue = new oracle.nosql.driver.values.MapValue(numElements); + long[] lvals = new long[] { + Long.MIN_VALUE, 0, Long.MAX_VALUE + }; + for (int i = 0; i < lvals.length; i++) { + mapValue.put("key" + i, lvals[i]); + } + roundTrip(mapValue, mapDef); + + /* Map(Array(Record(ri Integer, rf Float)) */ + mapDef = TableBuilder.createMapBuilder().addField + (TableBuilder.createArrayBuilder().addField + (TableBuilder.createRecordBuilder("rec") + .addInteger("ri") + .addFloat("rf") + .build()) + .build()) + .build(); + + float[] fvals = new float[] {Float.MIN_VALUE, 1.23145f, Float.MAX_VALUE}; + oracle.nosql.driver.values.ArrayValue arrayValue; + oracle.nosql.driver.values.MapValue recordValue; + for (int i = 0; i < lvals.length; i++) { + arrayValue = new oracle.nosql.driver.values.ArrayValue(fvals.length); + int j = 0; + for (float fval : fvals) { + recordValue = new oracle.nosql.driver.values.MapValue(); + recordValue.put("ri", j++).put("rf", fval); + arrayValue.add(recordValue); + } + mapValue.put("key" + i, arrayValue); + } + roundTrip(mapValue, mapDef); + + /* Empty map */ + roundTrip(new oracle.nosql.driver.values.MapValue(), mapDef); + } + + @Test + public void testRecordValue() { + FieldDef recDef = TableBuilder.createRecordBuilder("rec") + .addInteger("id") + .addBinary("bi") + .addFixedBinary("fbi", 30) + .addBoolean("bl") + .addDouble("d") + .addFloat("f") + .addLong("l") + .addNumber("n") + .addTimestamp("ts", 3) + .addString("s") + .addEnum("enum", new String[]{"red", "yellow", "blue"}, null) + .addJson("json", null) + .addField("array", + TableBuilder.createArrayBuilder().addString().build()) + .addField("map", + TableBuilder.createMapBuilder().addInteger().build()) + .addField("rec", + TableBuilder.createRecordBuilder("rec").addInteger("ri") + .addString("rs").build()) + .build(); + + oracle.nosql.driver.values.ArrayValue arrayValue = + new oracle.nosql.driver.values.ArrayValue(); + for (int i = 0; i < 3; i++) { + arrayValue.add("string" + i); + } + + oracle.nosql.driver.values.MapValue mapValue = + new oracle.nosql.driver.values.MapValue(); + for (int i = 0; i < 3; i++) { + mapValue.put("key" + i, i); + } + + oracle.nosql.driver.values.MapValue recValue = + new oracle.nosql.driver.values.MapValue(); + recValue.put("ri", 0).put("rs", "rs value"); + + oracle.nosql.driver.values.MapValue recordValue = + new oracle.nosql.driver.values.MapValue(); + recordValue.put("id", 1); + recordValue.put("bi", genByteArray(10)); + recordValue.put("fbi", genByteArray(30)); + recordValue.put("bl", true); + recordValue.put("d", 1.2321321321313131d); + recordValue.put("f", 1.2321f); + recordValue.put("l", 1234567890123456L); + recordValue.put("n", BigDecimal.valueOf(1231321321313213L, 10000)); + recordValue.put("ts", + new oracle.nosql.driver.values.TimestampValue( + "2017-08-21T13:34:35.123")); + recordValue.put("s", "this is a string"); + recordValue.put("enum", "blue"); + String json = "{\"a\": 1, \"b\": [23, 50, 60], \"map\":{\"m1\":6}}"; + recordValue.put("json", JsonUtils.createValueFromJson(json, null)); + recordValue.put("array", arrayValue); + recordValue.put("map", mapValue); + recordValue.put("rec", recValue); + roundTrip(recordValue, recDef); + + /* All fields are filled with NullValue */ + recordValue = new oracle.nosql.driver.values.MapValue(); + recordValue.put("id", nullValue); + recordValue.put("bi", nullValue); + recordValue.put("fbi", nullValue); + recordValue.put("bl", nullValue); + recordValue.put("d", nullValue); + recordValue.put("f", nullValue); + recordValue.put("l", nullValue); + recordValue.put("n", nullValue); + recordValue.put("ts", nullValue); + recordValue.put("s", nullValue); + recordValue.put("enum", nullValue); + recordValue.put("json", nullValue); + recordValue.put("array", nullValue); + recordValue.put("map", nullValue); + recordValue.put("rec", nullValue); + roundTrip(recordValue, recDef); + + /* Empty record */ + roundTrip(new oracle.nosql.driver.values.MapValue(), recDef); + } + + @Test + public void testDeserWithValueReader() { + TableImpl table = TableBuilder.createTableBuilder("test") + .addInteger("id") + .addBinary("bi") + .addFixedBinary("fbi", 30) + .addBoolean("bl") + .addDouble("d") + .addFloat("f") + .addLong("l") + .addNumber("n") + .addTimestamp("ts", 3) + .addString("s") + .addEnum("e", new String[]{"red", "yellow", "blue"}, null) + .addJson("json", null) + .addField("as", + TableBuilder.createArrayBuilder().addString().build()) + .addField("mi", + TableBuilder.createMapBuilder().addInteger().build()) + .addField("r", + TableBuilder.createRecordBuilder("r") + .addInteger("ri") + .addString("rs") + .build()) + .addField("a_r_ms", + TableBuilder.createArrayBuilder("a_r_ms") + .addField(TableBuilder.createRecordBuilder("r_ms") + .addInteger("ri") + .addField("rms", + TableBuilder.createMapBuilder("rms") + .addString() + .build()) + .build()) + .build()) + .addField("r_m_al", + TableBuilder.createRecordBuilder("r_m_al") + .addInteger("ri") + .addField("m_al", + TableBuilder.createMapBuilder("m_al") + .addField(TableBuilder.createArrayBuilder() + .addLong() + .build()) + .build()) + .build()) + .primaryKey("id") + .buildTable(); + + Row row = table.createRow(); + row.put("id", 0); + row.put("bi", genByteArray(50)); + row.putFixed("fbi", + genByteArray(table.getField("fbi").asFixedBinary().getSize())); + row.put("bl", true); + row.put("d", Double.MAX_VALUE); + row.put("f", Float.MAX_VALUE); + row.put("l", Long.MAX_VALUE); + row.putNumber("n", + new BigDecimal("1234567890123456789012345678901234567890")); + row.put("ts", new Timestamp(System.currentTimeMillis())); + row.put("s", genString(100)); + row.putEnum("e", "blue"); + row.putJson("json", + "{\"a\": 1, \"b\": [23, 50, 60], \"m\":{\"k1\":6,\"k2\":2}, " + + "\"d\": null}"); + + ArrayValue av = row.putArray("as"); + for (int i = 0; i < 3; i++) { + av.add("av_" + i); + } + + MapValue mv = row.putMap("mi"); + for (int i = 0; i < 3; i++) { + mv.put("k" + i, i); + } + + RecordValue rv = row.putRecord("r"); + rv.put("ri", 1); + rv.put("rs", "rs"); + av = row.putArray("a_r_ms"); + for (int i = 0; i < 3; i++) { + rv = av.addRecord(); + rv.put("ri", i); + mv = rv.putMap("rms"); + for (int j = 0; j < 3; j++) { + mv.put("k" + j, "ms_" + j); + } + } + + rv = row.putRecord("r_m_al"); + rv.put("ri", 1); + mv = rv.putMap("m_al"); + for (int i = 0; i < 3; i++) { + av = mv.putArray("k" + i); + for (int j = 0; j < 3; j++) { + av.add((long)j); + } + } + doDeserializeWithValueReader(row); + + row = table.createRow(); + row.put("id", 0); + ((RowImpl)row).addMissingFields(); + doDeserializeWithValueReader(row); + } + + /** + * Test invalid types for each target FieldDef.Type. + */ + @Test + public void testInvalidTypes() { + + final oracle.nosql.driver.values.FieldValue[] values = + new oracle.nosql.driver.values.FieldValue[] { + + new oracle.nosql.driver.values.ArrayValue(), + binaryValue, + trueValue, + doubleValue, + intValue, + longValue, + new oracle.nosql.driver.values.MapValue(), + stringValue, + timestampValue, + numberValue, + jsonNull, + nullValue, + emptyValue + }; + + FieldDef def; + int[] validTypes; + + /* Binary */ + def = FieldDefFactory.createBinaryDef(); + validTypes = new int[] { + TYPE_BINARY, TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY, + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Fixed Binary */ + def = FieldDefFactory.createFixedBinaryDef(1); + validTypes = new int[] { + TYPE_BINARY, TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Boolean */ + def = FieldDefFactory.createBooleanDef(); + values[TYPE_STRING] = new oracle.nosql.driver.values.StringValue("true"); + validTypes = new int[] { + TYPE_BOOLEAN, TYPE_STRING, TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Float */ + def = FieldDefFactory.createFloatDef(); + values[TYPE_STRING] = + new oracle.nosql.driver.values.StringValue("1.2345E-10f"); + validTypes = new int[] { + TYPE_DOUBLE, TYPE_INTEGER, TYPE_LONG, TYPE_STRING, TYPE_NUMBER, + TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Double */ + def = FieldDefFactory.createDoubleDef(); + values[TYPE_STRING] = + new oracle.nosql.driver.values.StringValue("1.2345678E100d"); + validTypes = new int[] { + TYPE_DOUBLE, TYPE_INTEGER, TYPE_LONG, TYPE_STRING, TYPE_NUMBER, + TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Integer */ + def = FieldDefFactory.createIntegerDef(); + values[TYPE_STRING] = new oracle.nosql.driver.values.StringValue("123"); + validTypes = new int[] { + TYPE_DOUBLE, TYPE_INTEGER, TYPE_LONG, TYPE_STRING, TYPE_NUMBER, + TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Long */ + def = FieldDefFactory.createLongDef(); + values[TYPE_STRING] = + new oracle.nosql.driver.values.StringValue("1234567890123"); + validTypes = new int[] { + TYPE_DOUBLE, TYPE_INTEGER, TYPE_LONG, TYPE_STRING, TYPE_NUMBER, + TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* String */ + def = FieldDefFactory.createStringDef(); + values[TYPE_STRING] = stringValue; + validTypes = new int[] { + TYPE_STRING, TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Timestamp */ + def = FieldDefFactory.createTimestampDef(0); + values[TYPE_STRING] = + new oracle.nosql.driver.values.StringValue("1970-01-01"); + validTypes = new int[] { + TYPE_INTEGER, TYPE_LONG, TYPE_STRING, TYPE_TIMESTAMP, + TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Number */ + def = FieldDefFactory.createNumberDef(); + values[TYPE_STRING] = + new oracle.nosql.driver.values.StringValue("1.213457E1024"); + validTypes = new int[] { + TYPE_DOUBLE, TYPE_INTEGER, TYPE_LONG, TYPE_STRING, TYPE_NUMBER, + TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Json */ + def = FieldDefFactory.createJsonDef(); + values[TYPE_STRING] = stringValue; + validTypes = new int[] { + TYPE_ARRAY, TYPE_BINARY, TYPE_BOOLEAN, TYPE_DOUBLE, TYPE_INTEGER, + TYPE_LONG, TYPE_MAP, TYPE_STRING, TYPE_TIMESTAMP, TYPE_NUMBER, + TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Array(Integer) */ + def = FieldDefFactory.createArrayDef(FieldDefFactory.createIntegerDef()); + validTypes = new int[] { + TYPE_ARRAY, TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Map(Integer) */ + def = FieldDefFactory.createMapDef(FieldDefFactory.createIntegerDef()); + validTypes = new int[] { + TYPE_MAP, TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + + /* Record */ + def = TableBuilder.createRecordBuilder("rec").addInteger("rid").build(); + validTypes = new int[] { + TYPE_MAP, TYPE_JSON_NULL, TYPE_NULL, TYPE_EMPTY + }; + doWriteValuesToProxy(values, validTypes, def); + } + + /** + * Test Null, Json Null or Empty values + */ + @Test + public void testSpecialValues() throws Exception { + + oracle.nosql.driver.values.MapValue row; + + TableImpl table = TableBuilder.createTableBuilder("jsonTest") + .addInteger("id") + .addString("name") + .addJson("json", null) + .primaryKey("id") + .buildTable(); + + IndexImpl idx1 = new IndexImpl("idx1", table, + Arrays.asList("json.children[].age", "json.address.zipcode", "name"), + Arrays.asList(FieldDef.Type.STRING, FieldDef.Type.STRING, null), + null); + + /* + * Record value: + * { + * "id" : 1, + * "name" : null + * "json" : { + * "address" : { + * "city" : "San Fransisco", + * "state" : "CA", + * "street" : + * }, + * "phones" : ["400-123-4567", , ], + * "children: [ * + * {"name": "Tomy", "school": }, + * + * ] + * "email" : + * }, + * } + */ + row = new oracle.nosql.driver.values.MapValue(); + row.put("id", 1); + + oracle.nosql.driver.values.MapValue jsonValue = + new oracle.nosql.driver.values.MapValue(); + String json = "{\"city\":\"San Fransisco\",\"state\":\"CA\"," + + " \"street\":null}"; + jsonValue.put("address", JsonUtils.createValueFromJson(json, null)) + .put("phones", + new oracle.nosql.driver.values.ArrayValue() + .add("400-123-4567") + .add(jsonNull) + .add(jsonNull)) + .put("children", + new oracle.nosql.driver.values.ArrayValue() + .add(new oracle.nosql.driver.values.MapValue() + .put("name", "Tomy") + .put("school", jsonNull)) + .add(jsonNull)) + .put("email", jsonNull); + row.put("json", jsonValue); + row.put("name", nullValue); + + RecordValue expValue = table.createRowFromJson(row.toJson(), false); + doWriteToProxy(row, expValue.getDefinition(), expValue); + + /* + * Index key record: + * { + * "json.user[].name":, + * "json.address.zipcode":null, + * "name":null + * } + */ + row = new oracle.nosql.driver.values.MapValue(); + row.put("json.children[].age", emptyValue); + row.put("json.address.zipcode", jsonNull); + row.put("name", nullValue); + + IndexKey expKey = idx1.createIndexKey(); + expKey.putEMPTY("json.children[].age"); + expKey.putJsonNull("json.address.zipcode"); + expKey.putNull("name"); + + doWriteToProxy(row, expKey.getDefinition(), expKey.asRecord()); + } + + /** + * Test on Json type value. + */ + @Test + public void testJsonValue() { + FieldDef def = TableBuilder.createRecordBuilder("jsonTest") + .addInteger("id") + .addJson("json", null) + .build(); + + oracle.nosql.driver.values.MapValue record = + new oracle.nosql.driver.values.MapValue(); + oracle.nosql.driver.values.ArrayValue arrayValue = + new oracle.nosql.driver.values.ArrayValue(); + oracle.nosql.driver.values.MapValue mapValue = + new oracle.nosql.driver.values.MapValue(); + + record.put("id", 0); + + /* Simple type values */ + record.put("json", trueValue); + roundTrip(record, def); + + record.put("json", intValue); + roundTrip(record, def); + + record.put("json", longValue); + roundTrip(record, def); + + record.put("json", doubleValue); + roundTrip(record, def); + + record.put("json", stringValue); + roundTrip(record, def); + + record.put("json", numberValue); + roundTrip(record, def); + + record.put("json", nullValue); + roundTrip(record, def); + + record.put("json", jsonNull); + roundTrip(record, def); + + record.put("json", binaryValue); + roundTrip(record, def, + JsonUtils.createValueFromJson(record.toJson(), null)); + + /* Array */ + arrayValue.add(1).add(2).add(3).add(jsonNull); + record.put("json", arrayValue); + roundTrip(record, def); + + oracle.nosql.driver.values.MapValue nestedMap = + new oracle.nosql.driver.values.MapValue(); + nestedMap.put("f", 1.123f).put("bool", false).put("jn", jsonNull); + + /* Map with nested array and map */ + + mapValue.put("s", "string value") + .put("i", 1234) + .put("v", jsonNull) + .put("bi", binaryValue) + .put("array", arrayValue) + .put("map", nestedMap); + record.put("json", mapValue); + roundTrip(record, def, + JsonUtils.createValueFromJson(record.toJson(), null)); + + String complexJson = + "{"+ + "\"id\":0,"+ + "\"record\" : { \"int\" : 20, \"string\" : \"aef\"," + + " \"bool\" : true },"+ + "\"info\":"+ + "{"+ + "\"firstName\":\"first0\", \"lastName\":\"last0\",\"age\":10,"+ + "\"address\":"+ + "{"+ + "\"city\": \"San Fransisco\","+ + "\"state\" : \"CA\","+ + "\"phones\" : [ { \"areacode\" : 408, \"number\" : 50," + + " \"kind\" : \"home\" },"+ + "{ \"areacode\" : 650, \"number\" : 51," + + " \"kind\" : \"work\" },"+ + "{ \"areacode\" : null, \"number\" : 52," + + " \"kind\" : \"home\" },"+ + "{ \"areacode\" : 510, \"number\" : 53," + + " \"kind\" : \"home\" },"+ + "{ \"areacode\" : 415, \"number\" : 54 },"+ + "\"650-234-4556\","+ + "650234455"+ + "]"+ + "},"+ + "\"children\":"+ + "{"+ + "\"Anna\" : { \"age\" : 10, \"school\" : \"sch_1\"," + + " \"friends\" : [\"Anna\", \"John\", \"Maria\"]},"+ + "\"Lisa\" : { \"age\" : 12," + + " \"friends\" : [\"Ada\"]},"+ + "\"Mary\" : { \"age\" : 7, \"school\" : \"sch_3\"," + + " \"friends\" : [\"Anna\", \"Mark\"]}"+ + "}"+ + "}"+ + "}"; + + oracle.nosql.driver.values.FieldValue value; + value = JsonUtils.createValueFromJson(complexJson, null); + record.put("json", value); + roundTrip(record, def); + } + + /* + * Test utility methods for numeric types conversion in ProxySerialization. + */ + @Test + public void testNumericConversion() { + + /* int to float */ + intToFloatTest(); + + /* long to int */ + longToIntTest(); + + /* long to float */ + longToFloatTest(); + + /* long to double */ + longToDoubleTest(); + + /* double to int */ + doubleToIntTest(); + + /* double to long */ + doubleToLongTest(); + + /* double to float */ + doubleToFloatTest(); + + /* decimal to float */ + decimalToFloatTest(); + + /* decimal to double */ + decimalToDoubleTest(); + } + + private void intToFloatTest() { + int[] valid = new int[] { + -8388608, + -7654321, + 0, + 8388608, + Integer.MAX_VALUE, + Integer.MIN_VALUE + }; + for (int val : valid) { + float fltVal = ProxySerialization.intToFloat(val); + assertTrue(Float.compare(val, fltVal) == 0); + } + + int[] invalid = new int[] { + -123456789, + 123456789, + Integer.MAX_VALUE - 1, + Integer.MIN_VALUE + 1 + }; + for (int val : invalid) { + try{ + ProxySerialization.intToFloat(val); + fail("intToFloat should fail: " + val); + } catch (Exception ex) { + } + } + } + + private void longToIntTest() { + long[] valid = new long[] { + Integer.MIN_VALUE, Integer.MAX_VALUE, 0L, 123456789L + }; + for (long val : valid) { + int ival = ProxySerialization.longToInt(val); + assertEquals(val, ival); + } + + long[] invalid = new long[] { + (long)Integer.MIN_VALUE - 1, + (long)Integer.MAX_VALUE + 1, + 12345678901L + }; + for (long val : invalid) { + try{ + ProxySerialization.longToInt(val); + fail("longToInt should fail: " + val); + } catch (Exception ex){ + } + } + } + + private void longToFloatTest() { + long[] valid = new long[] { + -8388608, + 0, + 1234567, + 8388608, + Long.MAX_VALUE, + Long.MIN_VALUE + }; + for (long val : valid) { + float fltVal = ProxySerialization.longToFloat(val); + assertTrue(Float.compare(val, fltVal) == 0); + } + + long[] invalid = new long[] { + -123456789, + 123456789, + Long.MAX_VALUE - 1, + Long.MIN_VALUE + 1 + }; + for (long val : invalid) { + try{ + ProxySerialization.longToFloat(val); + fail("longToFloat should fail: " + val); + } catch (Exception ex) { + } + } + } + + private void longToDoubleTest() { + long[] valid = new long[] { + 0x80000000000000L, + 0xFF80000000000000L, + 0L, + Long.MIN_VALUE, + Long.MAX_VALUE + }; + for (long val : valid) { + double dblVal = ProxySerialization.longToDouble(val); + assertTrue(Double.compare(dblVal, val) == 0); + } + long[] invalid = new long[] { + 0x80000000000001L, + Long.MIN_VALUE + 1, + Long.MAX_VALUE - 1 + }; + for (long val : invalid) { + try{ + ProxySerialization.longToDouble(val); + fail("longToDouble should fail: " + val); + } catch (Exception ex){ + } + } + } + + private void doubleToIntTest() { + double[] valid = new double[] { + Integer.MIN_VALUE, + Integer.MAX_VALUE, + 0.0d, + 1.23456789E8d, + -1234.0d + }; + for (double val : valid) { + int intVal = ProxySerialization.doubleToInt(val); + assertTrue(Double.compare(val, intVal) == 0); + } + + double[] invalid = new double[] { + 1.23456789E7, + (long)Integer.MAX_VALUE + 1, + Float.MIN_VALUE, + Double.MAX_VALUE + }; + for (double val : invalid) { + try{ + ProxySerialization.doubleToInt(val); + fail("doubleToInt should fail: " + val); + } catch (Exception ex){ + } + } + } + + private void doubleToLongTest() { + double[] valid = new double[] { + Long.MIN_VALUE, + Long.MAX_VALUE, + 0.0d, + 1.234567890123456789E18d, + -12345678901012.0d + }; + for (double val : valid) { + long longVal = ProxySerialization.doubleToLong(val); + assertTrue(Double.compare(val, longVal) == 0); + } + + double[] invalid = new double[] { + 1.2345678901234567E15, + 1.1d, + Double.MIN_VALUE, + Double.MAX_VALUE + }; + for (double val : invalid) { + try{ + ProxySerialization.doubleToLong(val); + fail("doubleToLong should fail: " + val); + } catch (Exception ex){ + } + } + } + + private void doubleToFloatTest() { + float[] valid = new float[] { + Float.NaN, + Float.NEGATIVE_INFINITY, + Float.POSITIVE_INFINITY, + Float.MAX_VALUE, + -Float.MAX_VALUE, + Float.MIN_VALUE, + -Float.MIN_VALUE, + 0f, + 1.23456E-14f + }; + for (double val : valid) { + float fltVal = ProxySerialization.doubleToFloat(val); + assertTrue(Double.compare(val, fltVal) == 0); + } + + double[] invalid = new double[] { + Float.MAX_VALUE * 2d, + Float.MIN_VALUE / 2d + }; + for (double val : invalid) { + try{ + ProxySerialization.doubleToFloat(val); + fail("doubleToFloat should fail: " + val); + } catch (Exception ex){ + } + } + } + + private void decimalToFloatTest() { + BigDecimal[] valid = new BigDecimal[] { + new BigDecimal("1.4E-45"), + new BigDecimal("3.4028235E38"), + BigDecimal.ZERO, + BigDecimal.valueOf(Integer.MAX_VALUE), + new BigDecimal("1.23456E-14") + }; + for (BigDecimal val : valid) { + ProxySerialization.decimalToFloat(val); + } + + BigDecimal[] invalid = new BigDecimal[] { + BigDecimal.valueOf(Double.MAX_VALUE) + }; + for (BigDecimal val : invalid) { + try{ + ProxySerialization.decimalToFloat(val); + fail("decimalToFloat should fail: " + val); + } catch (Exception ex) { + } + } + } + + private void decimalToDoubleTest() { + BigDecimal[] valid = new BigDecimal[] { + new BigDecimal("1.234567890123456E15"), + new BigDecimal("1.12345678"), + BigDecimal.valueOf(Double.MIN_VALUE), + BigDecimal.valueOf(Double.MAX_VALUE), + BigDecimal.valueOf(Integer.MIN_VALUE), + BigDecimal.valueOf(9223372036854774L), + BigDecimal.ONE, + new BigDecimal("1.234567890123456E309"), /* Infinity */ + new BigDecimal("-1.234567890123456E309") /* -Infinity */ + }; + for (BigDecimal val : valid) { + ProxySerialization.decimalToDouble(val); + } + + BigDecimal[] invalid = new BigDecimal[] { + new BigDecimal("1.12345678E-325"), + new BigDecimal("123456789012345678901234567890") + }; + for (BigDecimal val : invalid) { + try{ + ProxySerialization.decimalToDouble(val); + fail("decimalToDouble should fail: " + val); + } catch (Exception ex) { + } + } + } + + @Test + public void testSerializeWithValueSerializer() { + TableImpl testTable = TableBuilder.createTableBuilder("foo") + .addInteger("i") + .addLong("l") + .addString("s") + .addDouble("d") + .addFloat("f") + .addBoolean("bl") + .addBinary("bi") + .addFixedBinary("fix", 20) + .addNumber("n") + .addTimestamp("ts", 3) + .addField("mi", + TableBuilder.createMapBuilder("mi").addInteger().build()) + .addField("as", + TableBuilder.createArrayBuilder("as").addString().build()) + .addField("r", TableBuilder.createRecordBuilder("r") + .addInteger("ri") + .addString("rs") + .addField("rma", + TableBuilder.createMapBuilder("mai") + .addField(TableBuilder.createArrayBuilder("ai") + .addInteger() + .build()) + .build()) + .build()) + .addInteger("k1") + .addInteger("m1") + .addJson("json", null) + .primaryKey("m1", "k1") + .primaryKeySize("k1", 4) + .shardKey("m1") + .buildTable(); + + oracle.nosql.driver.values.MapValue mv = initRow(); + doWriteRowToProxyWithValueSerializer(testTable, mv, true /* exact */); + + mv = new oracle.nosql.driver.values.MapValue(); + mv.put("m1", 1).put("k1", 2); + doWriteRowToProxyWithValueSerializer(testTable, mv, true /* exact */); + + for (int pos = 0; pos < testTable.getFields().size(); pos++) { + if (!testTable.isPrimKeyAtPos(pos)) { + mv.put(testTable.getFields().get(pos), nullValue); + } + } + doWriteRowToProxyWithValueSerializer(testTable, mv, true /* exact */); + + TableImpl jsonTable = TableBuilder.createTableBuilder("foo") + .addInteger("id") + .addJson("json", null) + .primaryKey("id") + .buildTable(); + + mv = new oracle.nosql.driver.values.MapValue(); + mv.put("id", 1); + + mv.put("json", 1); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, true /* exact */); + + mv.put("json", "test"); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, true /* exact */); + + mv.put("json", true); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, true /* exact */); + + mv.put("json", genBytes(20)); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, true /* exact */); + + mv.put("json", Double.MAX_VALUE); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, true /* exact */); + + BigDecimal dec = + new BigDecimal("1234567890123456789012345678901234567890"); + mv.put("json", dec); + RowImpl expRow = jsonTable.createRow(); + expRow.put("id", 1); + expRow.putNumber("json", dec); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, expRow, + true /* exact */, + true /* shouldSucceed */); + + mv.put("json", new Timestamp(System.currentTimeMillis())); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, true /* exact */); + + mv.put("json", jsonNull); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, true /* exact */); + + mv.put("json", nullValue); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, true /* exact */); + + String json = "{\"i\":1," + + "\"r\":{" + + "\"s\":\"name\"," + + "\"map\":{" + + "\"m1r\":{\"m1s\":\"a string value\", \"m1null\":null}," + + "\"m2a\":[\"a1\",\"a2\"]," + + "\"m3m\":{\"m3k1\":1, \"m3k2\":2}}," + + "\"ar\":[-100, 0, -1234432443241431414]}, " + + "\"nv\":null}"; + mv.put("json", JsonUtils.createValueFromJson(json, null)); + doWriteRowToProxyWithValueSerializer(jsonTable, mv, true /* exact */); + + /* + * Invalid cases + */ + + testTable = TableBuilder.createTableBuilder("foo") + .addInteger("i") + .addString("s", null, false, "n/a") + .addInteger("k1") + .addInteger("m1") + .primaryKey("m1", "k1") + .primaryKeySize("k1", 4) + .shardKey("m1") + .buildTable(); + + mv = new oracle.nosql.driver.values.MapValue(); + writeRowToProxyWithValueSerializer(testTable, mv, + true /* exact */, + false /* shouldSucceed */); + + mv.put("k1", 1); + writeRowToProxyWithValueSerializer(testTable, mv, + true /* exact */, + false /* shouldSucceed */); + + mv.put("m1", 1); + mv.put("i", "String"); + writeRowToProxyWithValueSerializer(testTable, mv, + true /* exact */, + false /* shouldSucceed */); + + mv.put("i", 1); + mv.put("s", nullValue); + writeRowToProxyWithValueSerializer(testTable, mv, + true /* exact */, + false /* shouldSucceed */); + } + + @Test + public void testWriteRowToProxy() { + oracle.nosql.driver.values.MapValue mv; + TableImpl table; + String json; + + /* Case1: Row contains array(record) and map(record) */ + table = TableBuilder.createTableBuilder("foo") + .addInteger("id") + .addString("firstName") + .addString("lastName") + .addInteger("age") + .addField("address", + TableBuilder.createRecordBuilder("address") + .addString("city") + .addString("state") + .addField("phones", + TableBuilder.createArrayBuilder() + .addField( + TableBuilder.createRecordBuilder("prec") + .addInteger("number") + .addInteger("areaCode") + .addEnum("kind", + new String[]{"work", "home"}, null) + .build()) + .build()) + .build()) + .addField("children", + TableBuilder.createMapBuilder() + .addField(TableBuilder.createRecordBuilder("crec") + .addField("friends", + TableBuilder.createArrayBuilder() + .addString() + .build()) + .addString("school") + .addLong("age") + .build()) + .build()) + .primaryKey("id") + .buildTable(); + + json = "{ " + + " \"id\":0, " + + " \"firstName\":\"first0\", " + + " \"lastName\":\"last0\"," + + " \"age\":10," + + " \"address\": {" + + " \"city\": \"San Fransisco\"," + + " \"state\" :\"CA\"," + + " \"phones\":[" + + " { \"areacode\":408,\"number\":50, \"kind\":\"work\" }," + + " { \"areacode\":650,\"number\":51, \"kind\":\"work\" }," + + " { \"areacode\":650,\"number\":52, \"kind\":\"home\" }," + + " { \"areacode\":510,\"number\":53, \"kind\":\"home\" }," + + " { \"areacode\":415,\"number\":54, \"kind\":null }]" + + " }," + + " \"children\": {" + + " \"John\":{ \"age\":10, \"school\":\"sch_1\"," + + " \"friends\":[\"Anna\", \"John\", \"Maria\"]}," + + " \"Lisa\":{ \"age\":12, \"school\": null," + + " \"friends\":[\"Ada\"]}," + + " \"Mary\":{ \"age\":7, \"school\":\"sch_3\"," + + " \"friends\":[\"Anna\", \"Mark\"]}" + + " }" + + "}"; + + mv = (oracle.nosql.driver.values.MapValue) + oracle.nosql.driver.values.MapValue.createFromJson(json, null); + doWriteRowToProxyWithValueSerializer(table, mv, false /* exact */); + + /* + * Case2: Row contains unknown fields, they should be ignored. + */ + table = TableBuilder.createTableBuilder("foo") + .addInteger("id") + .addString("firstName") + .addString("lastName") + .addInteger("age") + .addField("address", + TableBuilder.createRecordBuilder("address") + .addString("city") + .addString("state") + .addString("street") + .build()) + .addField("phones", + TableBuilder.createArrayBuilder() + .addField(TableBuilder.createRecordBuilder("prec") + .addString("areacode") + .addString("number") + .build()) + .build()) + .addField("emails", + TableBuilder.createMapBuilder() + .addField(TableBuilder.createRecordBuilder("erec") + .addEnum("type", + new String[] {"work", "other"}, null) + .addString("email") + .build()) + .build()) + .primaryKey("id") + .buildTable(); + + json = "{" + + " \"id\": 0," + + " \"firstName\": \"first0\"," + + " \"lastName\": \"last0\"," + + " \"age\": 10," + + " \"invalidField0\": 0," + + " \"address\": {" + + " \"invalidField1\": 1," + + " \"street\": \"1 Oracle Way\"," + + " \"state\": \"CA\"," + + " \"city\": \"San Fransisco\"" + + " }," + + " \"phones\": [{" + + " \"number\": \"1231456\"," + + " \"invalidField2\": 2," + + " \"areacode\": \"781\"" + + " }," + + " {" + + " \"invalidField3\": 3," + + " \"areacode\": \"782\"," + + " \"number\": \"7654321\"" + + " }" + + " ]," + + " \"emails\": {" + + " \"email1\": {" + + " \"invalidField4\": 4," + + " \"email\": \"aaa@abc.com\"," + + " \"type\": \"work\"" + + " }," + + " \"email2\": {" + + " \"type\": \"other\"," + + " \"invalidField5\": 5," + + " \"email\": \"bbb@test.com\"" + + " }" + + " }" + + "}"; + mv = (oracle.nosql.driver.values.MapValue) + oracle.nosql.driver.values.MapValue.createFromJson(json, null); + /* + * Write to proxy using ValueSerializer to deserialize driver value + */ + doWriteRowToProxyWithValueSerializer(table, mv, false /* exact */); + + /* + * Write to proxy using readFieldValue() to deserialize driver value + * to FieldValue + */ + Row row = table.createRowFromJson(json, false); + doWriteToProxy(mv, table.getRowDef(), row, + true /* shouldSucceed */); + + /* + * Case3: key of map is empty string + */ + table = TableBuilder.createTableBuilder("Boo") + .addInteger("id") + .addField("expenses", + TableBuilder.createMapBuilder() + .addInteger() + .build()) + .primaryKey("id") + .buildTable(); + json = "{ \"id\":4, \"expenses\":{\"\":3, \"\\\"\":13}}"; + mv = (oracle.nosql.driver.values.MapValue) + oracle.nosql.driver.values.MapValue.createFromJson(json, null); + doWriteRowToProxyWithValueSerializer(table, mv, true /* exact */); + } + + @Test + public void testWriteArrayToRecord() { + TableImpl table = TableBuilder.createTableBuilder("foo") + .addInteger("id") + .addString("name") + .addInteger("age") + .addField("address", + TableBuilder.createRecordBuilder("address") + .addString("city") + .addString("street") + .addInteger("buildingNo") + .build()) + .primaryKey("id") + .buildTable(); + + oracle.nosql.driver.values.ArrayValue rowArr; + oracle.nosql.driver.values.MapValue rowMap; + oracle.nosql.driver.values.ArrayValue addressArr; + oracle.nosql.driver.values.MapValue addressMap; + + Row exp = table.createRow(); + exp.put("id", 1) + .put("name", "Jack Wang") + .put("age", 30) + .put("address", + table.getField("address").createRecord() + .put("city", "Burlington") + .put("street", "35 network drive") + .put("buildingNo", 95)); + + addressArr = new oracle.nosql.driver.values.ArrayValue() + .add("Burlington") + .add("35 network drive") + .add(95); + + addressMap = new oracle.nosql.driver.values.MapValue() + .put("city", "Burlington") + .put("street", "35 network drive") + .put("buildingNo", 95); + + /* + * Case1: Use array value for row, also use array for the nested record + * "address". + */ + rowArr = new oracle.nosql.driver.values.ArrayValue(); + rowArr.add(1) + .add("Jack Wang") + .add(30) + .add(addressArr); + doWriteRowToProxyWithValueSerializer(table, rowArr, exp, true, true); + + /* + * Case2: Use array value for row, use map value for the nested + * record "address". + */ + rowArr = new oracle.nosql.driver.values.ArrayValue(); + rowArr.add(1) + .add("Jack Wang") + .add(30) + .add(addressMap); + doWriteRowToProxyWithValueSerializer(table, rowArr, exp, true, true); + + /* + * Case3: Use map value for row, use array value for the nested + * record "address". + */ + rowMap = new oracle.nosql.driver.values.MapValue(); + rowMap.put("id", 1) + .put("name", "Jack Wang") + .put("age", 30) + .put("address", addressArr); + doWriteRowToProxyWithValueSerializer(table, rowArr, exp, true, true); + + /* + * Case4: Test exact = false, use array value for row with an additional + * NULL value + */ + rowArr = new oracle.nosql.driver.values.ArrayValue(); + rowArr.add(1) + .add("Jack Wang") + .add(30) + .add(addressArr) + .add(NullValue.getInstance()); + doWriteRowToProxyWithValueSerializer(table, rowArr, exp, false, true); + + /* Case5: use array value with "id" field only */ + rowArr = new oracle.nosql.driver.values.ArrayValue() + .add(new IntegerValue(1)); + exp = table.createRow(); + exp.put("id", 1); + ((RowImpl)exp).addMissingFields(); + doWriteRowToProxyWithValueSerializer(table, rowArr, exp, false, true); + + /* + * Negative case + */ + + /* + * Case6: exact = true, array contains more or less elements than + * expected, expect 4 actual 1 + */ + rowArr = new oracle.nosql.driver.values.ArrayValue() + .add(1); + doWriteRowToProxyWithValueSerializer(table, rowArr, null, true, false); + + /* + * Case6: Invalid string for Integer: invalid + */ + rowArr = new oracle.nosql.driver.values.ArrayValue() + .add("invalidForId"); + doWriteRowToProxyWithValueSerializer(table, rowArr, null, false, false); + } + + private void doWriteRowToProxyWithValueSerializer( + TableImpl table, + oracle.nosql.driver.values.FieldValue value, + boolean exact) { + doWriteRowToProxyWithValueSerializer(table, value, exact, true); + } + + private void doWriteRowToProxyWithValueSerializer( + TableImpl table, + oracle.nosql.driver.values.FieldValue value, + boolean exact, + boolean shouldSucceed) { + doWriteRowToProxyWithValueSerializer(table, value, null /* expRow */, + exact, shouldSucceed); + } + + private void doWriteRowToProxyWithValueSerializer ( + TableImpl table, + oracle.nosql.driver.values.FieldValue value, + Row expRow, + boolean exact, + boolean shouldSucceed) { + + Row row = writeRowToProxyWithValueSerializer(table, value, + exact, shouldSucceed); + if (shouldSucceed && expRow == null) { + expRow = table.createRowFromJson(value.toJson(), false /* exact */); + ((RowImpl)expRow).addMissingFields(); + } + assertEquals(expRow, row); + } + + private oracle.nosql.driver.values.MapValue initRow() { + oracle.nosql.driver.values.MapValue row; + row = new oracle.nosql.driver.values.MapValue(); + row.put("m1", 0); + row.put("k1", 100); + row.put("i", Integer.MAX_VALUE); + row.put("l", Long.MAX_VALUE); + row.put("s", "this a test string"); + row.put("d", Double.MAX_VALUE); + row.put("f", Float.MAX_VALUE); + row.put("bl", true); + row.put("bi", genBytes(10)); + row.put("fix", genBytes(20)); + row.put("n", new BigDecimal("123456789012345678901234567890")); + row.put("ts", new Timestamp(System.currentTimeMillis())); + + oracle.nosql.driver.values.MapValue mv = + new oracle.nosql.driver.values.MapValue(); + for (int i = 0; i < 3; i++) { + mv.put("k" + i, i); + } + row.put("mi", mv); + + oracle.nosql.driver.values.ArrayValue av = + new oracle.nosql.driver.values.ArrayValue(); + for (int i = 0; i < 3; i++) { + av.add("av" + i); + } + row.put("as", av); + + oracle.nosql.driver.values.MapValue rv = + new oracle.nosql.driver.values.MapValue(); + rv.put("ri", 1); + rv.put("rs", "rsv"); + + mv = new oracle.nosql.driver.values.MapValue(); + for (int i = 0; i < 3; i++) { + av = new oracle.nosql.driver.values.ArrayValue(); + for (int j = 0; j < 3; j++) { + av.add(j * 100); + } + mv.put("k" + i, av); + } + rv.put("rma", mv); + row.put("r", rv); + String json = "{\"i\":1,\"r\":{\"s\":\"name\",\"ar\":[0,1]}," + + " \"a\":null}"; + row.put("json", JsonUtils.createValueFromJson(json, null)); + return row; + } + + private byte[] genBytes(int len) { + byte[] bytes = new byte[len]; + for (int i = 0; i < len; i++) { + bytes[i] = (byte)(i % 256); + } + return bytes; + } + + private void doWriteValuesToProxy ( + oracle.nosql.driver.values.FieldValue[] values, + int[] validTypes, + FieldDef fieldDef) { + + int j = 0; + for (int i = 0; i < values.length; i++) { + boolean isValid = (j < validTypes.length && i == validTypes[j]); + doWriteToProxy(values[i], fieldDef, isValid); + if (isValid) { + j++; + } + } + } + + /** + * Value send from driver to proxy, then send back from proxy to driver. + */ + private void roundTrip(oracle.nosql.driver.values.FieldValue driverValue, + FieldDef fieldDef) { + roundTrip(driverValue, fieldDef, null); + } + + private void roundTrip(oracle.nosql.driver.values.FieldValue driverValue, + FieldDef fieldDef, + oracle.nosql.driver.values.FieldValue expValue) { + + oracle.nosql.driver.values.FieldValue driverValue1; + oracle.kv.table.FieldValue storeValue; + + storeValue = writeToProxy(driverValue, fieldDef, + true /* shouldSucceed */); + driverValue1 = writeToDriver(storeValue); + + /* + * Verify the input value from driver with the the one read from + * proxy. Use compareTo() instead of equals() because the latter + * is not correct for BigDecimal (Number) types + */ + oracle.nosql.driver.values.FieldValue exp = + (expValue != null) ? expValue : driverValue; + assertTrue("Wrong field value read\noriginal: " + + driverValue.toJson() + "\nread: " + driverValue1.toJson(), + (driverValue1.compareTo(exp) == 0)); + } + + private void doDeserializeWithValueReader(Row row) { + final ByteBuf buf = Unpooled.buffer(); + ByteOutputStream bos = new ByteOutputStream(buf); + + final TableImpl table = (TableImpl)row.getTable(); + final Version version = + new Version(UUID.randomUUID(), 1, new RepNodeId(1, 1), 0x1L); + final Value value = ((RowImpl)row).createValue(); + + oracle.nosql.driver.values.FieldValue fval0 = null; + oracle.nosql.driver.values.FieldValue fval1 = null; + oracle.nosql.driver.util.ByteInputStream bis; + + /* Serialize row using ValueReader */ + RowReaderImpl reader = new RowReaderImpl(bos, row.getTable()); + for (String fname : table.getPrimaryKeyInternal()) { + readKeyFieldValue(reader, fname, row.get(fname)); + } + table.readRowFromValueVersion(reader, new ValueVersion(value, version)); + assertTrue(reader.done() > 0); + assertEquals(reader.getVersion(), version); + /* Deserialize to driver value */ + try { + bis = new oracle.nosql.driver.util.NettyByteInputStream(buf); + fval0 = BinaryProtocol.readFieldValue(bis); + } catch (IOException ioe) { + fail("Failed to deserialize fval0: " + ioe.getMessage()); + } + + /* Serialize row using ValueReader */ + buf.clear(); + bos = new ByteOutputStream(buf); + try { + writeFieldValue(bos, row); + /* Deserialize to driver value */ + bis = new oracle.nosql.driver.util.NettyByteInputStream(buf); + fval1 = BinaryProtocol.readFieldValue(bis); + } catch (IOException ioe) { + fail("Failed to deserialize fval1: " + ioe.getMessage()); + } + + assertEquals(fval0, fval1); + } + + + private static void readKeyFieldValue(ValueReader reader, + String fname, + FieldValue value) { + + switch (value.getType()) { + case BOOLEAN: + reader.readBoolean(fname, value.asBoolean().get()); + break; + case DOUBLE: + reader.readDouble(fname, value.asDouble().get()); + break; + case ENUM: + reader.readEnum(fname, value.getDefinition(), + value.asEnum().getIndex()); + break; + case FLOAT: + reader.readFloat(fname, value.asFloat().get()); + break; + case INTEGER: + reader.readInteger(fname, value.asInteger().get()); + break; + case LONG: + reader.readLong(fname, value.asLong().get()); + break; + case STRING: + reader.readString(fname, value.asString().get()); + break; + case TIMESTAMP: + reader.readTimestamp(fname, value.getDefinition(), + ((TimestampValueImpl)value).getBytes()); + break; + case NUMBER: + reader.readNumber(fname, ((NumberValueImpl)value).getBytes()); + break; + default: + throw new IllegalStateException("Unexpected type: " + + value.getType()); + } + } + + /** + * Serialize value on driver, deserialize value on proxy side. + */ + private void doWriteToProxy( + oracle.nosql.driver.values.FieldValue driverValue, + FieldDef fieldDef, + boolean shouldSucceed) { + doWriteToProxy(driverValue, fieldDef, null, shouldSucceed); + } + + private void doWriteToProxy( + oracle.nosql.driver.values.FieldValue driverValue, + FieldDef fieldDef, + oracle.kv.table.FieldValue expValue) { + doWriteToProxy(driverValue, fieldDef, expValue, + true /* shouldSucceed */); + } + + private void doWriteToProxy( + oracle.nosql.driver.values.FieldValue driverValue, + FieldDef fieldDef, + oracle.kv.table.FieldValue expValue, + boolean shouldSucceed) { + + oracle.kv.table.FieldValue storeValue = + writeToProxy(driverValue, fieldDef, shouldSucceed); + if (expValue != null) { + assertTrue(storeValue.equals(expValue)); + } + } + + private oracle.kv.table.FieldValue + writeToProxy(oracle.nosql.driver.values.FieldValue driverValue, + FieldDef fieldDef, + boolean shouldSucceed) { + + testBuf.clear(); + try { + oracle.nosql.driver.util.ByteOutputStream bos = + new oracle.nosql.driver.util.NettyByteOutputStream(testBuf); + BinaryProtocol.writeFieldValue(bos, driverValue); + ByteInputStream bis = new ByteInputStream(testBuf); + oracle.kv.table.FieldValue storeValue = + readFieldValue(bis, fieldDef, true, false); + if (!shouldSucceed) { + fail("Expect to catch IAE but not, driver value's type is " + + driverValue.getType() + ", kv type is " + + fieldDef.getType()); + } + return storeValue; + } catch (IOException e) { + fail("Write value from driver to proxy failed: " + e); + } catch (IllegalArgumentException iae) { + if (shouldSucceed) { + fail("Expect to succeed but fail: " + iae.getMessage() + + ", driver value's type is " + driverValue.getType() + + ", kv type is " + fieldDef.getType()); + } + } + return null; + } + + private oracle.nosql.driver.values.FieldValue + writeToDriver(oracle.kv.table.FieldValue storeValue) { + + testBuf.clear(); + try { + ByteOutputStream bos = new ByteOutputStream(testBuf); + writeFieldValue(bos, storeValue); + oracle.nosql.driver.util.ByteInputStream bis = + new oracle.nosql.driver.util.NettyByteInputStream(testBuf); + return BinaryProtocol.readFieldValue(bis); + } catch (IOException e) { + fail("Write value from proxy to dirver failed: " + e); + } + return null; + } + + private Row writeRowToProxyWithValueSerializer( + TableImpl table, + oracle.nosql.driver.values.FieldValue driverRow, + boolean exact, + boolean shouldSucceed) { + + try { + testBuf.clear(); + oracle.nosql.driver.util.ByteOutputStream bos = + new oracle.nosql.driver.util.NettyByteOutputStream(testBuf); + BinaryProtocol.writeFieldValue(bos, driverRow); + + ByteInputStream bis = new ByteInputStream(testBuf); + byte type = bis.readByte(); + if (driverRow instanceof oracle.nosql.driver.values.MapValue) { + assertEquals(TYPE_MAP, type); + } else if (driverRow instanceof + oracle.nosql.driver.values.ArrayValue){ + assertEquals(TYPE_ARRAY, type); + } + RowSerializer serializer = + new RowSerializerImpl(bis, type, table, -1, -1, false, exact); + Key key = table.createKeyInternal(serializer, false); + Value value = table.createValueInternal(serializer); + Row row = table.createRowFromBytes(key.toByteArray(), + value.toByteArray(), + false); + if (!shouldSucceed) { + fail("Expect to fail but succeed"); + } + return row; + } catch (IOException e) { + fail("testRecordParser failed: " + e); + } catch (RuntimeException re) { + if (shouldSucceed) { + fail("Expect to succeed but failed: " + re.getMessage()); + } + } + return null; + } + + private byte[] genByteArray(int len) { + byte[] bytes = new byte[len]; + for (int i = 0; i < len; i++) { + bytes[i] = (byte)(i % 256); + } + return bytes; + } + + private String genString(int len) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < len; i++) { + sb.append('A' + (i % 26)); + } + return sb.toString(); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessNsonTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessNsonTest.java new file mode 100644 index 00000000..d1bab359 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessNsonTest.java @@ -0,0 +1,439 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.junit.Test; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.HttpResponseStatus; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.Version; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.Request; +import oracle.nosql.driver.ops.Result; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleRequest.OperationRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.nosql.driver.ops.WriteMultipleResult.OperationResult; +import oracle.nosql.driver.ops.WriteRequest; +import oracle.nosql.driver.ops.serde.Serializer; +import oracle.nosql.driver.ops.serde.SerializerFactory; +import oracle.nosql.driver.ops.serde.nson.NsonSerializerFactory; +import oracle.nosql.driver.util.ByteInputStream; +import oracle.nosql.driver.util.ByteOutputStream; +import oracle.nosql.driver.util.NettyByteInputStream; +import oracle.nosql.driver.util.NettyByteOutputStream; +import oracle.nosql.driver.values.MapValue; + +/** + * Extends ServerlessTestBase to exercise NSON (V4) protocol + */ +public class ServerlessNsonTest extends ServerlessTestBase { + private static short v4ProtocolVersion = 4; + private static SerializerFactory v4Factory = new NsonSerializerFactory(); + private static RequestSerializer serializer = new NsonRequestSerializer(); + private static ResponseDeserializer deserializer = + new NsonResponseDeserializer(); + + /* + * Test create table, get, put, delete + */ + @Test + public void testCrud() throws Exception { + final String tableName = "bar"; + TableResult tres = tableOp( + "create table bar(id integer, name string, primary key(id))", + tableLimits, null); + + /* put */ + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult pres = (PutResult) doRequest(putRequest); + assertNotNull(pres.getVersion()); + value.put("id", 5).put("name", "joe"); + pres = (PutResult) doRequest(putRequest); + Version version = pres.getVersion(); + assertNotNull(version); + + /* put ifAbsent, with return row info */ + putRequest.setOption(PutRequest.Option.IfAbsent).setReturnRow(true); + pres = (PutResult) doRequest(putRequest); + assertEquals(version, pres.getExistingVersion()); + assertEquals(value, pres.getExistingValue()); + + /* get */ + MapValue key = new MapValue().put("id", 5); + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + + GetResult gres = (GetResult) doRequest(getRequest); + assertEquals(value, gres.getValue()); + + /* delete */ + DeleteRequest delRequest = new DeleteRequest() + .setKey(key) + .setTableName(tableName); + DeleteResult dres = (DeleteResult) doRequest(delRequest); + assertTrue(dres.getSuccess()); + dres = (DeleteResult) doRequest(delRequest); + assertFalse(dres.getSuccess()); + + /* drop the table */ + tres = tableOp(("drop table " + tableName), null, null); + assertTrue(tres.getTableState() == TableResult.State.DROPPED); + + try { + tres = getTable(tableName, null); + fail("GetTable should have failed"); + } catch (TableNotFoundException tnfe) { + // success + } + } + + + + /* + * Test WriteMultiple + */ + @Test + public void testWriteMultiple() { + /* Create a table */ + final String createTableDDL = + "CREATE TABLE IF NOT EXISTS writeMultipleTable(" + + "sid INTEGER, id INTEGER, name STRING, longString STRING, " + + "PRIMARY KEY(SHARD(sid), id)) " + + "USING TTL 1 DAYS"; + + final String tableName = "writeMultipleTable"; + final int sid = 10; + final int recordKB = 2; + WriteMultipleRequest umRequest = new WriteMultipleRequest(); + List shouldSucceed = new ArrayList(); + List rowPresent = new ArrayList(); + + tableOp(createTableDDL, tableLimits, null); + + /* Put 10 rows */ + for (int i = 0; i < 10; i++) { + MapValue value = genRow(sid, i, recordKB); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + umRequest.add(putRequest, false); + rowPresent.add(false); + shouldSucceed.add(true); + } + + WriteMultipleResult umResult = + (WriteMultipleResult) doRequest(umRequest); + verifyResult(umResult, umRequest, shouldSucceed, rowPresent, recordKB); + Version versionId2 = umResult.getResults().get(2).getVersion(); + Version versionId7 = umResult.getResults().get(7).getVersion(); + + umRequest.clear(); + shouldSucceed.clear(); + rowPresent.clear(); + + /* PutIfAbsent, ReturnRow = true */ + MapValue value = genRow(sid, 0, recordKB, true); + PutRequest put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + rowPresent.add(true); + shouldSucceed.add(false); + + /* PutIfPresent, ReturnRow = true */ + value = genRow(sid, 1, recordKB, true); + put = new PutRequest() + .setOption(Option.IfPresent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + rowPresent.add(true); + shouldSucceed.add(true); + + /* PutIfVersion, ReturnRow = true */ + value = genRow(sid, 2, recordKB, true); + put = new PutRequest() + .setOption(Option.IfVersion) + .setMatchVersion(versionId2) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + rowPresent.add(false); + shouldSucceed.add(true); + + /* PutIfAbsent, ReturnRow = false */ + value = genRow(sid, 10, recordKB, true); + put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(false); + umRequest.add(put, false); + rowPresent.add(false); + shouldSucceed.add(true); + + /* Put, ReturnRow = true */ + value = genRow(sid, 3, recordKB, true); + put = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + rowPresent.add(true); + shouldSucceed.add(true); + + /* Put, ReturnRow = false */ + value = genRow(sid, 4, recordKB, true); + put = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setReturnRow(false); + umRequest.add(put, false); + rowPresent.add(false); + shouldSucceed.add(true); + + /* Delete, ReturnRow = true */ + value = genKey(sid, 5); + DeleteRequest delete = new DeleteRequest() + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(delete, false); + rowPresent.add(true); + shouldSucceed.add(true); + + /* Delete, ReturnRow = false */ + value = genKey(sid, 6); + delete = new DeleteRequest() + .setKey(value) + .setTableName(tableName) + .setReturnRow(false); + umRequest.add(delete, false); + rowPresent.add(false); + shouldSucceed.add(true); + + /* DeleteIfVersion, ReturnRow = true */ + value = genKey(sid, 7); + delete = new DeleteRequest() + .setMatchVersion(versionId7) + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(delete, false); + rowPresent.add(false); + shouldSucceed.add(true); + + /* DeleteIfVersion, ReturnRow = true */ + value = genKey(sid, 8); + delete = new DeleteRequest() + .setMatchVersion(versionId7) + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(delete, false); + rowPresent.add(true); + shouldSucceed.add(false); + + /* Delete, ReturnRow = true */ + value = genKey(sid, 100); + delete = new DeleteRequest() + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(delete, false); + rowPresent.add(false); + shouldSucceed.add(false); + + umResult = (WriteMultipleResult) doRequest(umRequest); + verifyResult(umResult, umRequest, shouldSucceed, rowPresent, recordKB); + } + + private MapValue genRow(int sid, int id, int recordKB) { + return genRow(sid, id, recordKB, false); + } + + private MapValue genRow(int sid, int id, int recordKB, boolean upd) { + return new MapValue().put("sid", sid).put("id", id) + .put("name", (upd ? "name_upd_" : "name_") + sid + "_" + id) + .put("longString", genString((recordKB - 1) * 1024)); + } + + private MapValue genKey(int sid, int id) { + return new MapValue().put("sid", sid).put("id", id); + } + + private void verifyResult(WriteMultipleResult umResult, + WriteMultipleRequest umRequest, + List shouldSucceedList, + List rowPresentList, + int recordKB) { + + assertTrue("The operation expected to succeed", + umResult.getSuccess()); + + List ops = umRequest.getOperations(); + assertTrue("Wrong number of results: expect " + + umRequest.getNumOperations() + ", actual " + umResult.size(), + umResult.size() == umRequest.getNumOperations()); + + int ind = 0; + + for (OperationResult result : umResult.getResults()) { + boolean shouldSucceed = shouldSucceedList.get(ind); + assertTrue("Operation expected to succeed, opIdx=" + ind, + result.getSuccess() == shouldSucceed); + + OperationRequest op = ops.get(ind); + WriteRequest request = op.getRequest(); + if (request instanceof PutRequest && shouldSucceed) { + assertTrue("Expected to get new version ", + result.getVersion() != null); + } else { + assertTrue("Expected to not get new version", + result.getVersion() == null); + } + + if (rowPresentList != null) { + boolean hasReturnRow = rowPresentList.get(ind); + + assertTrue("The existing value is expected to be " + + (hasReturnRow ? "not null: " : "null: "), + (hasReturnRow ? result.getExistingValue() != null : + result.getExistingValue() == null)); + assertTrue("The existing version is expected to be " + + (hasReturnRow ? "not null: " : "null: "), + (hasReturnRow ? result.getExistingVersion() != null : + result.getExistingVersion() == null)); + } + ind++; + } + } + + /** + * Serialize a Request returning a new ByteBuf + */ + static class NsonRequestSerializer implements RequestSerializer { + @Override + public ByteBuf serialize(Request req) throws IOException { + + ByteBuf content = Unpooled.buffer(); + ByteOutputStream bos = new NettyByteOutputStream(content); + short proto = v4ProtocolVersion; + Serializer ser = req.createSerializer(v4Factory); + if (ser == null) { + ser = req.createSerializer(binaryFactory); + proto = (short) 3; + } + bos.writeShort(proto); + try { + ser.serialize(req, proto, bos); + } catch (Exception e) { + e.printStackTrace(); + } + return content; + } + } + + /** + * Deserialize response + */ + static class NsonResponseDeserializer implements ResponseDeserializer { + + @Override + public Result deserialize(Request req, + ServerlessContext ctx) throws IOException { + + assertEquals(HttpResponseStatus.OK, ctx.status); + ByteBuf content = ctx.content; + try (ByteInputStream bis = new NettyByteInputStream(content)) { + short proto = v4ProtocolVersion; + Serializer ser = req.createDeserializer(v4Factory); + if (ser == null) { + ser = req.createDeserializer(binaryFactory); + proto = (short) 3; + } + Result res = ser.deserialize(req, bis, proto); + return res; + } finally { + /* release the response buffer */ + content.release(); + } + } + } + + private TableResult tableOp(String statement, + TableLimits limits, + String tableName) { + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableLimits(tableLimits) + .setTableName(tableName); + TableResult res = (TableResult) doRequest(tableRequest); + return waitForCompletion(res.getTableName(), res.getOperationId()); + } + + private TableResult waitForCompletion(String tableName, String opId) { + + TableResult res = getTable(tableName, opId); + TableResult.State state = res.getTableState(); + while (!isTerminal(state)) { + try { + Thread.sleep(500); + } catch (Exception e) {} // ignore + res = getTable(tableName, opId); + state = res.getTableState(); + } + return res; + } + + private TableResult getTable(String tableName, String opId) { + + GetTableRequest getTable = + new GetTableRequest().setTableName(tableName). + setOperationId(opId); + return (TableResult) doRequest(getTable); + } + + /** + * Perform the request using V4 protocol and the NSON serializers + */ + private Result doRequest(Request request) { + return doRequest(request, serializer, deserializer, v4ProtocolVersion); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessTest.java new file mode 100644 index 00000000..513463ea --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessTest.java @@ -0,0 +1,135 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.Test; + +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.Request; +import oracle.nosql.driver.ops.Result; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableResult.State; +import oracle.nosql.driver.values.MapValue; + +/** + * An extension of ServerlessTestBase that uses the V3 binary protocol + */ +public class ServerlessTest extends ServerlessTestBase { + + @Test + public void testServerless() { + String tableName = "foo"; + String statement = "create table if not exists " + tableName + + "(id integer, primary key(id), name string)"; + TableResult res = tableOp(statement, tableLimits, null); + assertTrue(res.getTableState() == TableResult.State.ACTIVE); + + /* CRUD */ + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult pres = (PutResult) doRequest(putRequest); + assertNotNull(pres.getVersion()); + value.put("id", 5).put("name", "joe"); + pres = (PutResult) doRequest(putRequest); + assertNotNull(pres.getVersion()); + + MapValue key = new MapValue().put("id", 5); + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + + GetResult gres = (GetResult) doRequest(getRequest); + assertEquals(value, gres.getValue()); + + QueryRequest queryReq = new QueryRequest() + .setStatement("select * from " + tableName); + /* Query */ + QueryResult qres = (QueryResult) doRequest(queryReq); + assertEquals(2, qres.getResults().size()); + + /* test error mapping */ + + /* + * test a table not found + */ + try { + getRequest.setTableName("abc"); + gres = (GetResult) doRequest(getRequest); + fail("Should have thrown"); + } catch (TableNotFoundException tnfe) { + // success + } + + /* + * test a bad DDL statement + */ + try { + res = tableOp("creat table(xxx)", tableLimits, null); + fail("Should have thrown"); + } catch (IllegalArgumentException iae) { + // success + } + } + + TableResult tableOp(String statement, + TableLimits limits, + String tableName) { + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableLimits(tableLimits) + .setTableName(tableName); + TableResult res = (TableResult) doRequest(tableRequest); + return waitForCompletion(res.getTableName(), res.getOperationId()); + } + + TableResult waitForCompletion(String tableName, String opId) { + + TableResult res = getTable(tableName, opId); + TableResult.State state = res.getTableState(); + while (!isTerminal(state)) { + try { + Thread.sleep(500); + } catch (Exception e) {} // ignore + res = getTable(tableName, opId); + state = res.getTableState(); + } + return res; + } + + TableResult getTable(String tableName, String opId) { + + GetTableRequest getTable = + new GetTableRequest().setTableName(tableName). + setOperationId(opId); + return (TableResult) doRequest(getTable); + } + + static boolean isTerminal(TableResult.State state) { + return state == State.ACTIVE || state == State.DROPPED; + } + + private Result doRequest(Request request) { + return doV3Request(request); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessTestBase.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessTestBase.java new file mode 100644 index 00000000..8f0e7742 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ServerlessTestBase.java @@ -0,0 +1,643 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static oracle.nosql.driver.util.HttpConstants.ACCEPT; +import static oracle.nosql.driver.util.HttpConstants.CONNECTION; +import static oracle.nosql.driver.util.HttpConstants.CONTENT_LENGTH; +import static oracle.nosql.driver.util.HttpConstants.CONTENT_TYPE; +import static oracle.nosql.driver.util.HttpConstants.REQUEST_ID_HEADER; +import static oracle.nosql.driver.util.HttpConstants.REQUEST_SERDE_VERSION_HEADER; +import static oracle.nosql.driver.util.HttpConstants.USER_AGENT; +import static org.junit.Assert.assertEquals; +import static org.junit.Assume.assumeTrue; + +import java.io.IOException; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestRule; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.util.internal.logging.InternalLoggerFactory; +import io.netty.util.internal.logging.JdkLoggerFactory; +import oracle.kv.util.kvlite.KVLite; +import oracle.nosql.common.contextlogger.LogContext; +import oracle.nosql.driver.NoSQLException; +import oracle.nosql.driver.RequestTimeoutException; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.Request; +import oracle.nosql.driver.ops.Result; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableResult.State; +import oracle.nosql.driver.ops.serde.BinaryProtocol; +import oracle.nosql.driver.ops.serde.BinarySerializerFactory; +import oracle.nosql.driver.ops.serde.Serializer; +import oracle.nosql.driver.ops.serde.SerializerFactory; +import oracle.nosql.driver.query.QueryDriver; +import oracle.nosql.driver.util.ByteInputStream; +import oracle.nosql.driver.util.ByteOutputStream; +import oracle.nosql.driver.util.NettyByteInputStream; +import oracle.nosql.driver.util.NettyByteOutputStream; +import oracle.nosql.driver.util.SerializationUtil; +import oracle.nosql.proxy.audit.ProxyAuditManager; +import oracle.nosql.proxy.kv.KVTenantManager; +import oracle.nosql.proxy.sc.LocalTenantManager; +import oracle.nosql.proxy.sc.TenantManager; +import oracle.nosql.proxy.security.AccessChecker; +import oracle.nosql.proxy.security.AccessCheckerFactory; +import oracle.nosql.proxy.util.KVLiteBase; +import oracle.nosql.util.tmi.TableRequestLimits; +import oracle.nosql.util.tmi.TenantLimits; + +/** + * This is a test base class that creates a kvlite instance and + * a proxy that runs in-process without an HTTP server. + * Test cases look like this: + * PutRequest putRequest = new PutRequest() + * .setValue(value) + * .setTableName(tableName); + * PutResult pres = (PutResult) doRequest(putRequest, ....); + * + * The doRequest method is the same as the various methods on the SDK's + * NoSQLHandle but abstracted to handle different serialization mechanisms and + * protocols. + */ +public class ServerlessTestBase extends KVLiteBase { + + /* + * Proxy state + */ + protected static boolean SECURITY_ENABLED = false; + protected final static String TEST_TENANT_ID = + System.getProperty("tenant.id", "ProxyTestTenant"); + protected static String PROXY_ASYNC_PROP = "test.async"; + protected static String PROXY_ERROR_LIMITING_PROP = "test.errorlimiting"; + protected static String KVLITE_USETHREADS_PROP = "test.usethreads"; + protected static String KVLITE_MULTISHARD_PROP = "test.multishard"; + protected static String KVLITE_MEMORYMB_PROP = "test.memorymb"; + + /* + * Tests don't need or use peak throughput information. Ideally there would + * be a way to disable it. + */ + protected static final int PEAK_THROUGHPUT_COLLECTION_PERIOD_DEFAULT_SEC = + Integer.MAX_VALUE; + protected static final int PEAK_THROUGHPUT_DEFAULT_TTL_DAY = 1; + + /* + * Operation throttling constants. 0 means reset to the default in the SC. + * A small number eliminates throttling by allowed an operation for every + * millisecond in that number (1 = 1 op/ms, 1000 means 1 op/second). + */ + protected static final int NO_OP_THROTTLE = 1; + protected static final int DEFAULT_OP_THROTTLE = 0; + + protected static String hostName = getHostName(); + protected static final int startPort = 13240; + protected static KVLite kvlite; + protected static Proxy proxy; + protected static TenantManager tm = null; + protected static AccessChecker ac = null; + protected static ProxyAuditManager audit = null; + + /* set to true if running against an existing cloud proxy */ + protected static boolean onprem = false; + protected static boolean verbose = false; + protected static boolean multishard = false; + protected static int memoryMB = 0; + protected static boolean isAsync = Boolean.getBoolean(PROXY_ASYNC_PROP); + + protected static DataService dataService; + protected static TableLimits tableLimits = new TableLimits(10, 20, 1); + protected static LogContext lc; + + /* make these available to V4 tests */ + + protected static short v3ProtocolVersion = 3; + protected static SerializerFactory binaryFactory = + new BinarySerializerFactory(); + protected static RequestSerializer binarySerializer = + new BinaryRequestSerializer(); + protected static ResponseDeserializer binaryDeserializer = + new BinaryResponseDeserializer(); + + /* + * An instance with non-default limits to make tests run reasonably + */ + protected static TenantLimits tenantLimits = + TenantLimits.getNewDefault(); + static { + tenantLimits.setNumTables(10) + /* + * NOTE: the per-table read/write limits need to be >= + * 1/2 of the per-tenant limit in order for + * LimitsTest to work correctly. + * See testTableProvisioningLimits. + */ + .setDdlRequestsRate(400) + .setTableLimitReductionsRate(50) + .setNumFreeTables(3) + .setNumAutoScalingTables(3) + .setBillingModeChangeRate(2); + TableRequestLimits tableLimits = tenantLimits.getStandardTableLimits(); + tableLimits.setTableReadUnits(90000) + .setTableWriteUnits(30000) + .setSchemaEvolutions(6); + } + + protected static boolean TEST_TRACE = Boolean.getBoolean("test.trace"); + + @Rule + public final TestRule watchman = new TestWatcher() { + + @Override + protected void starting(Description description) { + if (TEST_TRACE) { + System.out.println("Starting test: " + + description.getMethodName()); + } + } + }; + + @BeforeClass + public static void staticSetUp() + throws Exception { + + assumeTrue("Skipping serverless tests for minicloud or cloud runs", + !Boolean.getBoolean("usemc") && + !Boolean.getBoolean("usecloud")); + + staticSetUp(tenantLimits); + } + + public static void staticSetUp(TenantLimits tl) + throws Exception { + doStaticSetup(); + + startup(tl); + } + + @AfterClass + public static void staticTearDown() + throws Exception { + + if (proxy != null) { + proxy.shutdown(3, TimeUnit.SECONDS); + } + + if (tm != null) { + tm.close(); + } + + if (kvlite != null) { + kvlite.stop(false); + } + + cleanupTestDir(); + } + + @Before + public void setUp() throws Exception { + /* RequestContext must use a couple of local overloads */ + dataService.setRequestContextFactory(new ServerlessContextFactory()); + lc = proxy.generateLogContext("none"); + } + + @After + public void tearDown() throws Exception { + } + + protected static void startup() throws Exception { + startup(tenantLimits); + } + + protected static void startup(TenantLimits pTenantLimts) + throws Exception { + + /* + * Determine if running against an existing cloud proxy such as the + * MiniCloud. If so, don't start KVLite or a proxy or the aggregation + * service. Also check for proxy host and port set in system properties + * to override the defaults. + */ + onprem = Boolean.getBoolean("onprem"); + verbose = Boolean.getBoolean("test.verbose"); + + if (verbose) { + System.out.println("Starting tests in verbose output mode"); + } + + Boolean securityEnabled = Boolean.getBoolean("security"); + if (securityEnabled) { + SECURITY_ENABLED = true; + } + + cleanupTestDir(); + + multishard = Boolean.getBoolean(KVLITE_MULTISHARD_PROP); + memoryMB = Integer.getInteger(KVLITE_MEMORYMB_PROP, 0); + boolean useThreads = Boolean.getBoolean(KVLITE_USETHREADS_PROP); + if (useThreads) { + multishard = false; + if (verbose) { + System.out.println("Starting kvlite using threads in this jvm"); + } + } else if (verbose) { + System.out.println("Starting kvlite using separate jvm process, " + + "multishard=" + multishard + + ", memoryMB=" + memoryMB); + } + + kvlite = startKVLite(hostName, + null, /* default store name */ + useThreads, + verbose, + multishard, + memoryMB, + false); /* secured */ + + /* + * Set Netty to use JDK logger factory. + * + * Since 19.1, KV added slf4j-api.jar on the class path. By default, + * Netty tries to instantiate slf4j logger first then JDK logger, so + * it will use slf4j-api by default because of that KV change. However, + * slf4j needs additional implementation jar to do actual logging, + * otherwise, it will only produce NOP warnings. + */ + InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE); + + /* + * Use Properties to create a Config object for Proxy + */ + Properties commandLine = new Properties(); + commandLine.setProperty(Config.STORE_NAME.paramName, + getStoreName()); + + commandLine.setProperty(Config.HELPER_HOSTS.paramName, + (hostName + ":" + getKVPort())); + + + Config.ProxyType ptype = (onprem ? Config.ProxyType.KVPROXY : + Config.ProxyType.CLOUDTEST); + + commandLine.setProperty(Config.PROXY_TYPE.paramName, ptype.name()); + + commandLine.setProperty(Config.VERBOSE.paramName, + Boolean.toString(verbose)); + commandLine.setProperty(Config.ASYNC.paramName, Boolean.toString( + Boolean.getBoolean(PROXY_ASYNC_PROP))); + + /* Error limiting configs */ + commandLine.setProperty(Config.ERROR_LIMITING_ENABLED.paramName, + Boolean.toString( + Boolean.getBoolean(PROXY_ERROR_LIMITING_PROP))); + + /* create config from commandLine properties */ + Config cfg = new Config(commandLine); + + /* create an appropriate TenantManager */ + if (onprem) { + /* note: in KVPROXY mode the proxy *requires* a KVTenantManager */ + tm = KVTenantManager.createTenantManager(cfg); + } else { + tm = LocalTenantManager.createTenantManager(cfg); + } + + /* create a simple access checker */ + ac = AccessCheckerFactory.createInsecureAccessChecker(); + + /* this creates and starts a proxy without the netty server */ + proxy = new Proxy(cfg, tm, ac, audit); + proxy.startServer(null, false); + dataService = (DataService) proxy.getService("ProxyData"); + } + + static TableResult tableOp(String statement, + TableLimits limits, + String tableName, + RequestSerializer ser, + ResponseDeserializer deser, + short protocol) { + + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableLimits(tableLimits) + .setTableName(tableName); + TableResult res = + (TableResult) doRequest(tableRequest, ser, deser, protocol); + return waitForCompletion(res.getTableName(), + res.getOperationId(), + ser, deser, protocol); + } + + static TableResult waitForCompletion(String tableName, + String opId, + RequestSerializer ser, + ResponseDeserializer deser, + short protocol) { + + TableResult res = getTable(tableName, opId, ser, deser, protocol); + TableResult.State state = res.getTableState(); + while (!isTerminal(state)) { + try { + Thread.sleep(500); + } catch (Exception e) {} // ignore + res = getTable(tableName, opId, ser, deser, protocol); + state = res.getTableState(); + } + return res; + } + + static TableResult getTable(String tableName, String opId, + RequestSerializer ser, + ResponseDeserializer deser, + short protocol) { + + GetTableRequest getTable = + new GetTableRequest().setTableName(tableName). + setOperationId(opId); + return (TableResult) doRequest(getTable, ser, deser, protocol); + } + + static boolean isTerminal(TableResult.State state) { + return state == State.ACTIVE || state == State.DROPPED; + } + + interface RequestSerializer { + ByteBuf serialize(Request request) throws IOException; + } + + interface ResponseDeserializer { + Result deserialize(Request request, ServerlessContext ctx) + throws IOException; + } + + static Result doV3Request(Request request) { + return doRequest(request, binarySerializer, + binaryDeserializer, v3ProtocolVersion); + } + + /** + * Perform the request using direct proxy calls: + * 1. serialize the request + * 2. create a FullHttpRequest to encapsulate the payload + * 3. add HTTP headers expected by proxy + * 4. call the proxy's DataService.handleRequest method directly using + * a local ServerlessContext instance as an opaque reference so that the + * finishOp() method can grab the response + * 5. deserialize the response which is in the ServerlessContext object + */ + static Result doRequest(Request req, + RequestSerializer ser, + ResponseDeserializer deser, + short protocol) { + + try { + ServerlessContext ctx = new ServerlessContext(); + ByteBuf content = ser.serialize(req); + FullHttpRequest request = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.POST, + "http://direct", /* no URI */ + content); + /* infer version header from protocol */ + String versionHeader = (protocol >= 4 ? "v4" : null); + addRequestHeaders(req, request, versionHeader); + /* + * do the op + * NOTE: handleRequest always returns null. Responses are handled + * via DataService.finishOp() + */ + dataService.handleRequest(request, + null, + lc, + ctx); + + /* if not using an async proxy this is a no-op */ + ctx.await(req.getTimeoutInternal()); + + /* release request buffer */ + content.release(); + + return deser.deserialize(req, ctx); + } catch (IOException ioe) { + throw new NoSQLException("Exception in doRequest", ioe); + } + } + + static void addRequestHeaders(Request request, + FullHttpRequest httpRequest, + String versionHeader) { + HttpHeaders headers = httpRequest.headers(); + headers.set(CONTENT_TYPE, "application/octet-stream") + .set(CONNECTION, "keep-alive") + .set(ACCEPT, "application/octet-stream") + .set(USER_AGENT, "direct") + .set(REQUEST_ID_HEADER, "no-id") + .set(CONTENT_LENGTH, httpRequest.content().readableBytes()); + if (!onprem) { + headers.set("Authorization", "Bearer nobody"); + } + if (versionHeader != null) { + headers.set(REQUEST_SERDE_VERSION_HEADER, versionHeader); + } + } + + /** + * Serialize a Request returning a new ByteBuf + */ + static class BinaryRequestSerializer implements RequestSerializer { + @Override + public ByteBuf serialize(Request req) throws IOException { + ByteBuf content = Unpooled.buffer(); + ByteOutputStream bos = new NettyByteOutputStream(content); + bos.writeShort(v3ProtocolVersion); + Serializer ser = req.createSerializer(binaryFactory); + if (req instanceof QueryRequest || + req instanceof PrepareRequest) { + ser.serialize(req, + v3ProtocolVersion, + QueryDriver.QUERY_V3, + bos); + } else { + ser.serialize(req, + v3ProtocolVersion, + bos); + } + + return content; + } + } + + /** + * Deserialize response + */ + static class BinaryResponseDeserializer implements ResponseDeserializer { + + @Override + public Result deserialize(Request req, + ServerlessContext ctx) throws IOException { + + assertEquals(HttpResponseStatus.OK, ctx.status); + ByteBuf content = ctx.content; + try (ByteInputStream bis = new NettyByteInputStream(content)) { + int code = bis.readByte(); + if (code == 0) { + Result res; + Serializer ser = req.createDeserializer(binaryFactory); + if (req instanceof QueryRequest || + req instanceof PrepareRequest) { + res = ser.deserialize(req, + bis, + v3ProtocolVersion, + QueryDriver.QUERY_V3); + } else { + res = ser.deserialize(req, + bis, + v3ProtocolVersion); + } + return res; + } + String err = SerializationUtil.readString(bis); + throw BinaryProtocol.mapException(code, err); + } finally { + /* release the response buffer */ + content.release(); + } + } + } + + /* + * Used to get the proxy's response which would normally be sent + * directly to the client + */ + static class ServerlessContextFactory + implements DataServiceHandler.RequestContextFactory { + + @Override + public DataServiceHandler.RequestContext createRequestContext( + FullHttpRequest request, + ChannelHandlerContext ctx, + LogContext lc, + Object callerContext) { + + return new DataServiceHandler.RequestContext( + request, ctx, lc, callerContext) { + + @Override + public void finishOp(FullHttpResponse response) { + if (callerContext instanceof ServerlessContext) { + ((ServerlessContext) callerContext).setResponse(response); + } + } + + @Override + public void resetBuffers() { + if (bbis != null) { + bbis.buffer().readerIndex(inputOffset); + } + if (bbos == null) { + ByteBuf resp = Unpooled.buffer(); + bbos = new oracle.nosql.proxy.protocol.ByteOutputStream( + resp); + } + bbos.buffer().writerIndex(0); + } + }; + } + } + + /** + * holds http response info for deserialization + */ + static class ServerlessContext { + public ByteBuf content; + public HttpResponseStatus status; + private ReentrantLock lock; + private Condition cond; + + public ServerlessContext() { + lock = new ReentrantLock(); + cond = lock.newCondition(); + } + + public void await(int timeoutMs) { + if (lock == null) { + return; // nothing to do, not async + } + if (timeoutMs == 0) { + timeoutMs = 5000; // default + } + lock.lock(); + try { + while (status == null) { + boolean ok = + cond.await(timeoutMs, TimeUnit.MILLISECONDS); + if (!ok) { + /* timeout */ + throw new RequestTimeoutException( + "Operation timed out after " + timeoutMs + + " milliseconds"); + } + } + } catch (InterruptedException ie) { + throw new IllegalStateException("Timeout waiting for response"); + } finally { + lock.unlock(); + } + } + + public void setResponse(FullHttpResponse response) { + if (lock == null) { + this.status = response.status(); + this.content = response.content(); + } else { + lock.lock(); + this.status = response.status(); + this.content = response.content(); + try { + cond.signal(); + } finally { + lock.unlock(); + } + } + } + } + + protected static String genString(int len) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < len; i++) { + sb.append((char)('A' + i % 26)); + } + return sb.toString(); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/SslTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/SslTest.java new file mode 100644 index 00000000..c10cd25f --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/SslTest.java @@ -0,0 +1,162 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.net.URL; + +import org.junit.BeforeClass; +import org.junit.Test; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.TimeToLive; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.security.SecureTestUtil; + +public class SslTest extends ProxyTestBase { + + @BeforeClass + public static void staticSetUp() + throws Exception { + USE_SSL_HOOK = true; + ProxyTestBase.staticSetUp(); + USE_SSL_HOOK = false; + } + + @Override + public void setUp() throws Exception { + /* + * skip this test if onprem. The proxy equates SSL with secure, + * and expects an auth string, which is not presented + */ + org.junit.Assume.assumeTrue(!onprem); + super.setUp(); + } + + /* + * Use SSL handle + */ + @Override + protected NoSQLHandle configHandle(String endpoint) { + if (cloudRunning) { + return super.configHandle(endpoint); + } + NoSQLHandleConfig hconfig = + new NoSQLHandleConfig(getProxyHttpsEndpoint()); + return setupHandle(hconfig); + } + + @Test + public void sslTest() + throws Exception { + + /* SSL is not tested in cloud environments. See ProxyTestBase.setUp() */ + if (sslHandle == null) { + return; + } + sslTest(sslHandle); + + URL url = new URL("https", getProxyHost(), getProxyHttpsPort(), "/"); + + /* + * Get another handle with SSL configuration parameters to exercise + * a handle with them configured. + */ + sslTest(getSslHandle(url)); + + /* + * Test TLSv1.3 + */ + sslTest(getTls13SslHandle(url)); + } + + private NoSQLHandle getSslHandle(URL url) { + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(url); + + /* + * 5 retries, default retry algorithm + */ + hconfig.configureDefaultRetryHandler(5, 0); + hconfig.setRequestTimeout(30000); + + hconfig.setSSLCipherSuites( + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"); + hconfig.setSSLSessionCacheSize(100); + hconfig.setSSLSessionTimeout(1200); + + SecureTestUtil.setAuthProvider(hconfig, isSecure(), + onprem, getTenantId()); + return getHandle(hconfig); + } + + private NoSQLHandle getTls13SslHandle(URL url) { + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(url); + + /* + * 5 retries, default retry algorithm + */ + hconfig.configureDefaultRetryHandler(5, 0); + hconfig.setRequestTimeout(30000); + + hconfig.setSSLProtocols("TLSv1.3"); + + /* set TLSv1.3 cipher suite */ + hconfig.setSSLCipherSuites("TLS_AES_128_GCM_SHA256"); + hconfig.setSSLSessionCacheSize(100); + hconfig.setSSLSessionTimeout(1200); + + SecureTestUtil.setAuthProvider(hconfig, isSecure(), + onprem, getTenantId()); + return getHandle(hconfig); + } + + private void sslTest(NoSQLHandle testHandle) { + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + + TableResult tres = tableOperation( + testHandle, + "create table if not exists users(id integer, " + + "name string, primary key(id))", + new TableLimits(500, 500, 50), + TableResult.State.ACTIVE, + 10000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + + /* PUT */ + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("users"); + + PutResult res = testHandle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + assertWriteKB(res); + /* put a few more. set TTL to test that path */ + putRequest.setTTL(TimeToLive.ofHours(2)); + for (int i = 20; i < 30; i++) { + value.put("id", i); + testHandle.put(putRequest); + } + + /* QUERY */ + QueryRequest queryRequest = + new QueryRequest().setStatement("select * from users"); + + QueryResult queryRes = testHandle.query(queryRequest); + assertEquals(11, queryRes.getResults().size()); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/SyncLatencyTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/SyncLatencyTest.java new file mode 100644 index 00000000..8b5424dd --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/SyncLatencyTest.java @@ -0,0 +1,115 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assume.assumeTrue; + +import org.junit.Test; +import org.junit.BeforeClass; + + +/** + * Verify that latencies remain stable for asynchronous operations when the + * number of concurrent requests is higher then the number of proxy + * worker threads. + * + * These tests only runs against a local server and not minicloud. + * + * The tests use a KVLite that has a test hook that injects long + * latencies into all requests + */ +public class SyncLatencyTest extends LatencyTestBase { + + /* + * This test manages its own kvlite/proxy startup to control specific + * setup properties to allow for a test hook that injects latency into + * kvlite, and start proxy with only 2 worker threads + * + * note this hides the superclass static method so it won't be called + */ + @BeforeClass + public static void staticSetUp() + throws Exception { + + // this test doesn't run on minicloud or cloud test + assumeTrue("Skip SyncLatencyTest in minicloud or cloud test", + !Boolean.getBoolean("usemc") && + !Boolean.getBoolean("usecloud")); + + latencySetUp(false /*useAsync*/, 100 /*delayMs*/); + } + + @Test + public void testSyncGetPutLatency() throws Exception { + + // skip this test if running on minicloud + assumeTrue(cloudRunning == false); + + // without async, we should see significantly higher latencies + // when using more client threads than proxy threads + testLatency("syncGetPutLatency", + 3 /*readThreads*/, + 3 /*writeThreads*/, + 3 /*rwThreads*/, + 0 /*qThreads*/, + 10 /*runSeconds*/, + 250 /*minReadLatencyMs*/, + 1000 /*maxReadLatencyMs*/, + 250 /*minWriteLatencyMs*/, + 1000 /*maxWriteLatencyMs*/, + 0 /*minQueryLatencyMs*/, + 0 /*maxQueryLatencyMs*/); + } + + + @Test + public void testSyncQueryLatency() throws Exception { + + // skip this test if running on minicloud + assumeTrue(cloudRunning == false); + + // without async, we should see significantly higher latencies + // when using more client threads than proxy threads + testLatency("syncQueryLatency", + 0 /*readThreads*/, + 0 /*writeThreads*/, + 0 /*rwThreads*/, + 8 /*qThreads*/, + 10 /*runSeconds*/, + 0 /*minReadLatencyMs*/, + 0 /*maxReadLatencyMs*/, + 0 /*minWriteLatencyMs*/, + 0 /*maxWriteLatencyMs*/, + 250 /*minQueryLatencyMs*/, + 1000 /*maxQueryLatencyMs*/); + } + + + @Test + public void testSyncGetPutQueryLatency() throws Exception { + + // skip this test if running on minicloud + assumeTrue(cloudRunning == false); + + // without async, we should see significantly higher latencies + // when using more client threads than proxy threads + testLatency("syncGetPutQueryLatency", + 2 /*readThreads*/, + 2 /*writeThreads*/, + 2 /*rwThreads*/, + 4 /*qThreads*/, + 10 /*runSeconds*/, + 250 /*minReadLatencyMs*/, + 1000 /*maxReadLatencyMs*/, + 250 /*minWriteLatencyMs*/, + 1000 /*maxWriteLatencyMs*/, + 250 /*minQueryLatencyMs*/, + 1000 /*maxQueryLatencyMs*/); + } + +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/TableCacheTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/TableCacheTest.java new file mode 100644 index 00000000..67f7159c --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/TableCacheTest.java @@ -0,0 +1,350 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.nosql.proxy; + +import static oracle.nosql.proxy.protocol.Protocol.SERVICE_UNAVAILABLE; +import static oracle.nosql.proxy.protocol.Protocol.TABLE_NOT_FOUND; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import oracle.kv.impl.api.KVStoreImpl; +import oracle.kv.impl.api.table.TableAPIImpl; +import oracle.nosql.common.contextlogger.LogContext; +import oracle.nosql.common.sklogger.SkLogger; +import oracle.nosql.proxy.util.TestBase; +import oracle.nosql.proxy.util.TableCache; +import org.junit.Test; + +public class TableCacheTest extends TestBase { + /* + * Creates a cache where entries expire after 1 second of inactivity, + * and are refreshed every 250ms + */ + private static long expirationMs = 1000L; + private static long refreshMs = 250L; + + private void checkEntryRefresh(TestCache cache, + String namespace, + String tableName) { + TableCache.TableEntry entry = + cache.get(namespace, tableName, null); + assertNotNull(entry); + long minRefTime = System.currentTimeMillis(); + /* entry should have been refreshed within the last */ + /* refreshMs, with a bit of room for thread scheduling */ + minRefTime -= (refreshMs + 60); + assertTrue(entry.getLastRefresh() > minRefTime); + } + + private void checkRefreshLess(long sTime, TableCache.TableEntry entry) { + assertNotNull(entry); + if (entry.getLastRefresh() >= sTime) { + fail("Expected refresh time less than " + sTime + ", got " + + entry.getLastRefresh()); + } + } + + @Test + public void testCache() throws Exception { + + TestCache cache = new TestCache(expirationMs, refreshMs); + + cache.getTableEntry("ns", "mytable", null); + Thread.sleep(50); + cache.getTableEntry("ns", "mytable1", null); + Thread.sleep(50); + cache.getTableEntry(null, "mytable", null); + Thread.sleep(50); + cache.getTableEntry(null, "mytable1", null); + + /* were they all cached? */ + assertEquals(4, cache.getCacheSize()); + + /* let refresh happen at least once */ + Thread.sleep(refreshMs); + + /* check that they all still exist */ + assertEquals(4, cache.getCacheSize()); + + /* check that all have been refreshed */ + checkEntryRefresh(cache, null, "mytable"); + checkEntryRefresh(cache, null, "mytable1"); + checkEntryRefresh(cache, "ns", "mytable"); + checkEntryRefresh(cache, "ns", "mytable1"); + + /* get all entries again */ + cache.getTableEntry("ns", "mytable", null); + cache.getTableEntry("ns", "mytable1", null); + cache.getTableEntry(null, "mytable", null); + cache.getTableEntry(null, "mytable1", null); + + /* + * The above accesses will reset the inactivity time for each. + * .5 seconds and verify none have been removed (expired). + */ + Thread.sleep(500); + assertEquals(4, cache.getCacheSize()); + + /* + * get the first/last entry again + */ + cache.getTableEntry("ns", "mytable", null); + cache.getTableEntry(null, "mytable1", null); + + /* + * wait till two should have been removed + */ + Thread.sleep(600); + assertEquals(2, cache.getCacheSize()); + + /* check that the two not removed have recently been refreshed */ + checkEntryRefresh(cache, "ns", "mytable"); + checkEntryRefresh(cache, null, "mytable1"); + + /* remove one entry */ + cache.flushEntry("ns", "mytable"); + assertEquals(1, cache.getCacheSize()); + + /* do activity on the last entry */ + cache.getTableEntry(null, "mytable1", null); + + /* wait a bit longer */ + Thread.sleep(300); + + /* verify it's refreshed */ + checkEntryRefresh(cache, null, "mytable1"); + cache.shutDown(); + } + + @Test + public void testServiceUnavailable() throws Exception { + + TestCache cache = new TestCache(expirationMs, refreshMs); + + cache.getTableEntry("ns", "mytable", null); + /* simulate service down/unavailable: verify record not removed */ + cache.throwSU = true; + /* wait long enough to have refreshed entry */ + Thread.sleep(refreshMs + 60); + /* verify record still exists */ + assertNotNull(cache.get("ns", "mytable", null)); + /* delete entry */ + cache.flushEntry("ns", "mytable"); + /* verify trying to get entry throws SU exception */ + try { + cache.getTableEntry("ns", "mytable", null); + fail("Should have thrown RequestException"); + } catch (RequestException re) { + assertEquals(re.getErrorCode(), SERVICE_UNAVAILABLE); + } catch (Exception e) { + fail("Got unexpected exception: " + e); + } + /* check normal mode still works */ + cache.throwSU = false; + assertNotNull(cache.getTableEntry("ns", "mytable", null)); + cache.shutDown(); + } + + @Test + public void testTableNotFound() throws Exception { + + TestCache cache = new TestCache(expirationMs, refreshMs); + + cache.getTableEntry("ns", "mytable", null); + /* simulate table not found: verify record removed */ + cache.throwNF = true; + /* wait long enough to have refreshed entry */ + Thread.sleep(refreshMs + 60); + /* verify record still removed */ + assertNull(cache.get("ns", "mytable", null)); + /* verify trying to get entry throws NF exception */ + try { + cache.getTableEntry("ns", "mytable", null); + fail("Should have thrown RequestException"); + } catch (RequestException re) { + assertEquals(re.getErrorCode(), TABLE_NOT_FOUND); + } catch (Exception e) { + fail("Got unexpected exception: " + e); + } + /* check normal mode still works */ + cache.throwNF = false; + assertNotNull(cache.getTableEntry("ns", "mytable", null)); + cache.shutDown(); + } + + @Test + public void testUnknownException() throws Exception { + + TestCache cache = new TestCache(expirationMs, refreshMs); + + cache.getTableEntry("ns", "mytable", null); + /* simulate unknown exception: verify record removed */ + cache.throwUnknown = true; + /* wait long enough to have refreshed entry */ + Thread.sleep(refreshMs + 60); + /* verify record removed */ + assertNull(cache.get("ns", "mytable", null)); + /* verify trying to get entry throws unknown exception */ + try { + cache.getTableEntry("ns", "mytable", null); + fail("Should have thrown Exception"); + } catch (RequestException re) { + fail("Got unexpected exception: " + re); + } catch (Exception e) { + /* success */ + } + /* check normal mode still works */ + cache.throwUnknown = false; + assertNotNull(cache.getTableEntry("ns", "mytable", null)); + cache.shutDown(); + } + + @Test + public void testValidActivity() throws Exception { + + TestCache cache = new TestCache(expirationMs, refreshMs); + cache.getTableEntry("ns", "mytable", null); + /* + * verify that entry stays in cache as long as we keep + * accessing it within the inactivity time + */ + for (int x=0; x<10; x++) { + /* entry should exist without fetching again */ + TableCache.TableEntry entry = + cache.get("ns", "mytable", null); + assertNotNull(entry); + /* simulate "activity" */ + entry.setLastUsed(System.currentTimeMillis()); + Thread.sleep(300); + } + + cache.shutDown(); + } + + @Test + public void testNoRefresh() throws Exception { + /* + * Test expiration without refresh + */ + TestCache cache = new TestCache(500L, 0L); + cache.getTableEntry("ns", "mytable", null); + cache.getTableEntry("ns", "mytable1", null); + cache.getTableEntry(null, "mytable", null); + cache.getTableEntry(null, "mytable1", null); + long sTime = System.currentTimeMillis() + 1; + + Thread.sleep(400); + /* no entries should have been refreshed */ + checkRefreshLess(sTime, cache.get("ns", "mytable", null)); + checkRefreshLess(sTime, cache.get("ns", "mytable1", null)); + checkRefreshLess(sTime, cache.get(null, "mytable", null)); + checkRefreshLess(sTime, cache.get(null, "mytable1", null)); + + Thread.sleep(300); + /* cache should be empty after expiration */ + assertEquals(0, cache.getCacheSize()); + cache.shutDown(); + } + + @Test + public void testBadParams() throws Exception { + /* test bad constructor parameters */ + try { + new TestCache(1L, 1L); + fail("should have thrown"); + } catch (IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains("expiration")); + } + try { + new TestCache(1L, -1L); + fail("should have thrown"); + } catch (IllegalArgumentException iae) { + assertTrue(iae.getMessage().contains("refresh")); + } + } + + public class TestCache extends TableCache { + + /* for specific tests */ + public boolean throwSU = false; + public boolean throwNF = false; + public boolean throwUnknown = false; + + public TestCache(long expirationMs, long refreshMs) { + /* create cache with 50ms thread interval */ + super(new SkLogger("oracle.nosql.proxy", "TestCache"), + expirationMs, refreshMs, 50L); + } + + @Override + protected TableEntry getTable(String namespace, + String tableName, + String nsname, + LogContext lc) { + if (throwSU) { + throw new RequestException(SERVICE_UNAVAILABLE, "unavailable"); + } + if (throwNF) { + throw new RequestException(TABLE_NOT_FOUND, "table not found"); + } + if (throwUnknown) { + throw new RuntimeException("unknown error"); + } + return new TestTableEntry(namespace, tableName, nsname); + } + + @Override + public void shutDown() { + super.shutDown(); + } + + @Override + public KVStoreImpl getStoreByName(String storeName) { + return null; + } + + private class TestTableEntry extends TableEntry { + + private TestTableEntry(String namespace, + String tableName, + String nsname) { + super(null); + } + + @Override + public KVStoreImpl getStore() { + return null; + } + + @Override + public TableAPIImpl getTableAPI() { + return null; + } + + @Override + public String getStoreName() { + return "nostore"; + } + + @Override + public RequestLimits getRequestLimits() { + return null; + } + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/TenantLogTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/TenantLogTest.java new file mode 100644 index 00000000..d35e9796 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/TenantLogTest.java @@ -0,0 +1,392 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.nosql.proxy; + +import static oracle.nosql.proxy.protocol.HttpConstants.ENTRYPOINT; +import static oracle.nosql.proxy.protocol.HttpConstants.LOGCONTROL_PATH; +import static oracle.nosql.proxy.protocol.HttpConstants.LOG_LEVEL; +import static oracle.nosql.proxy.protocol.HttpConstants.TENANT_ID; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.LogRecord; + +import org.junit.BeforeClass; +import org.junit.Test; + +import oracle.kv.impl.api.table.NameUtils; +import oracle.nosql.common.sklogger.SkLogger; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.GetIndexesRequest; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.ListTablesRequest; +import oracle.nosql.driver.ops.MultiDeleteRequest; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.SystemResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.util.BinaryProtocol.OpCode; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.util.HttpRequest; +import oracle.nosql.util.HttpResponse; + +public class TenantLogTest extends ProxyTestBase { + /* Used to enable LogControl service in KVProxy, just for test purpose */ + private static final String TEST_KV_LOGCONTROL_PROP = "test.kvlogcontrol"; + private final SkLogger logger = new SkLogger(Proxy.class.getName(), + "proxy", "proxy_worker.log"); + private final int WAIT_MS = 20000; + + private final TestLogHandler testLogHandler = new TestLogHandler(); + private final HttpRequest httpRequest = new HttpRequest(); + + @BeforeClass + public static void staticSetUp() throws Exception { + + assumeTrue("Skipping TenantLogTest for minicloud or cloud test", + !Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + + if (Boolean.getBoolean("onprem")) { + System.setProperty(TEST_KV_LOGCONTROL_PROP, "true"); + } + ProxyTestBase.staticSetUp(); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + logger.addHandler(testLogHandler); + } + + + @Override + public void tearDown() throws Exception { + if (onprem) { + doSysOp(handle, "drop namespace if exists " + getTenantId()); + } + + logger.getLogger().removeHandler(testLogHandler); + super.tearDown(); + } + + @Test + public void testTenantLog() { + + final String namespace; + if (onprem) { + namespace = getTenantId(); + doSysOp(handle, "create namespace " + namespace); + } else { + namespace = "in.valid.iac.name.space"; + } + + /* Log all calls for specific tenant/namespace at FINE */ + setLogLevel(namespace, Level.FINE); + runOps(handle, namespace, true /*logEnabled*/, false /*nullEnabled*/); + + clearLogLevel(namespace, Level.FINE); + runOps(handle, namespace, false /*logEnabled*/, false /*nullEnabled*/); + } + + @Test + public void testNullTenantLog() { + + final String namespace; + if (onprem) { + namespace = getTenantId(); + doSysOp(handle, "create namespace " + namespace); + } else { + namespace = "in.valid.iac.name.space"; + } + + /* Log all calls only when they do not have a valid tenantId */ + setLogLevel("nullTenantId", Level.FINE); + runOps(handle, namespace, false /*logEnabled*/, true /*nullEnabled*/); + + clearLogLevel("nullTenantId", Level.FINE); + runOps(handle, namespace, false /*logEnabled*/, false /*nullEnabled*/); + } + + @Test + public void testDataPath() { + final String namespace; + if (onprem) { + namespace = getTenantId(); + doSysOp(handle, "create namespace " + namespace); + } else { + namespace = "in.valid.iac.name.space"; + } + + /* log all calls to /V2/nosql/data at FINE */ + setDataPathLevel(Level.FINE); + runOps(handle, namespace, true /*logEnabled*/, false /*nullEnabled*/); + + clearDataPathLevel(); + runOps(handle, namespace, false /*logEnabled*/, false /*nullEnabled*/); + } + + private void runOps(NoSQLHandle nosqlHandle, + String namespace, + boolean logEnabled, + boolean nullEnabled) { + + final int ddlWaitMs = WAIT_MS; + final String tableName = (onprem) ? + NameUtils.makeQualifiedName(namespace, "foo") : "foo"; + + testLogHandler.flush(); + + /* create table */ + String ddl = "create table " + tableName + + "(id integer, name string, primary key(id))"; + tableOperation(nosqlHandle, ddl, + (onprem ? null : new TableLimits(10, 10, 1)), ddlWaitMs); + checkLog(namespace, logEnabled, nullEnabled, + OpCode.CREATE_TABLE, OpCode.GET_TABLE); + + /* create Index */ + ddl = "create index idxName on " + tableName + "(name)"; + tableOperation(nosqlHandle, ddl, null, ddlWaitMs); + checkLog(namespace, logEnabled, nullEnabled, + OpCode.CREATE_INDEX, OpCode.GET_TABLE); + + /* alter table */ + ddl = "alter table " + tableName + "(add age integer)"; + tableOperation(nosqlHandle, ddl, null, ddlWaitMs); + checkLog(namespace, logEnabled, nullEnabled, + OpCode.ALTER_TABLE, OpCode.GET_TABLE); + + if (!onprem) { + /* update table limits */ + tableOperation(nosqlHandle, null, new TableLimits(11, 10, 1), + namespace, tableName, null /* matchETag */, + TableResult.State.ACTIVE, ddlWaitMs); + checkLog(namespace, logEnabled, nullEnabled, + OpCode.ALTER_TABLE, OpCode.GET_TABLE); + } + + /* list tables */ + ListTablesRequest listTables = new ListTablesRequest(); + if (onprem) { + listTables.setNamespace(namespace); + } + nosqlHandle.listTables(listTables); + checkLog(namespace, logEnabled, nullEnabled, OpCode.LIST_TABLES); + + /* get indexes */ + GetIndexesRequest getIndexes = new GetIndexesRequest() + .setTableName(tableName); + nosqlHandle.getIndexes(getIndexes); + checkLog(namespace, logEnabled, nullEnabled, OpCode.GET_INDEXES); + + /* drop Index */ + ddl = "drop index if exists idxName on " + tableName; + tableOperation(nosqlHandle, ddl, null, ddlWaitMs); + checkLog(namespace, logEnabled, nullEnabled, + OpCode.DROP_INDEX, OpCode.GET_TABLE); + + /* put */ + MapValue val = new MapValue(); + val.put("id", 1).put("name", "oracle"); + PutRequest put = new PutRequest() + .setTableName(tableName) + .setValue(val); + nosqlHandle.put(put); + checkLog(namespace, logEnabled, nullEnabled, OpCode.PUT); + + /* get */ + GetRequest get = new GetRequest() + .setTableName(tableName) + .setKey(val); + nosqlHandle.get(get); + checkLog(namespace, logEnabled, nullEnabled, OpCode.GET); + + /* delete */ + DeleteRequest delete = new DeleteRequest() + .setTableName(tableName) + .setKey(val); + nosqlHandle.delete(delete); + checkLog(namespace, logEnabled, nullEnabled, OpCode.DELETE); + + /* write-multiple */ + WriteMultipleRequest mput = new WriteMultipleRequest() + .add(put, false); + nosqlHandle.writeMultiple(mput); + checkLog(namespace, logEnabled, nullEnabled, OpCode.WRITE_MULTIPLE); + + /* multi-delete */ + MultiDeleteRequest mdel = new MultiDeleteRequest() + .setTableName(tableName) + .setKey(val); + nosqlHandle.multiDelete(mdel); + checkLog(namespace, logEnabled, nullEnabled, OpCode.MULTI_DELETE); + + /* prepare */ + String stmt = "select * from " + tableName; + PrepareRequest prep = new PrepareRequest().setStatement(stmt); + PrepareResult prepRet = nosqlHandle.prepare(prep); + checkLog(namespace, logEnabled, nullEnabled, OpCode.PREPARE); + + /* query */ + QueryRequest query = new QueryRequest().setStatement(stmt); + nosqlHandle.query(query); + checkLog(namespace, logEnabled, nullEnabled, OpCode.GET); + + /* query prepared stmt */ + query = new QueryRequest().setPreparedStatement(prepRet); + nosqlHandle.query(query); + checkLog(namespace, logEnabled, nullEnabled, OpCode.GET); + + /* drop table */ + ddl = "drop table " + tableName; + tableOperation(nosqlHandle, ddl, null, ddlWaitMs); + checkLog(namespace, logEnabled, nullEnabled, + OpCode.DROP_TABLE, OpCode.GET_TABLE); + } + + private void checkLog(String namespace, boolean logEnabled, + boolean nullEnabled, OpCode... ops) { + + if (!logEnabled && !nullEnabled) { + assertTrue(testLogHandler.getRecords().isEmpty()); + return; + } + + if (nullEnabled) { + boolean found = false; + final String expected = "handleRequest(), headers="; + for (LogRecord lr : testLogHandler.getRecords()) { + if (lr.getMessage().contains(expected)) { + found = true; + break; + } + } + assertTrue("Did not find \"" + expected + "\" in log", found); + } + + if (logEnabled) { + final String fmt = " handleRequest, op %s, " + + "namespace %s"; + boolean found; + String line; + for (OpCode op : ops) { + found = false; + line = String.format(fmt, op.name(), namespace); + for (LogRecord lr : testLogHandler.getRecords()) { + verbose("Log line: " + lr.getMessage()); + if (lr.getMessage().contains(line)) { + found = true; + break; + } + } + assertTrue("Did not find \"" + line + "\" in log", found); + } + } + + testLogHandler.flush(); + } + + private void doSysOp(NoSQLHandle handle, String ddl) { + SystemResult ret = handle.doSystemRequest(ddl, WAIT_MS, 1000); + assertEquals(SystemResult.State.COMPLETE, ret.getOperationState()); + } + + private void setDataPathLevel(Level level) { + String url = getDataPathUrl(level); + HttpResponse resp = httpRequest.doHttpPut(url, null); + assertEquals(200, resp.getStatusCode()); + } + + private void clearDataPathLevel() { + String url = getDataPathUrl(null); + HttpResponse resp = httpRequest.doHttpDelete(url, null); + assertEquals(200, resp.getStatusCode()); + } + + private void setLogLevel(String tenant, Level level) { + String url = getLogControlUrl(tenant, level); + HttpResponse resp = httpRequest.doHttpPut(url, null); + assertEquals(200, resp.getStatusCode()); + } + + private void clearLogLevel(String tenant, Level level) { + String url = getLogControlUrl(tenant, level); + HttpResponse resp = httpRequest.doHttpDelete(url, null); + assertEquals(200, resp.getStatusCode()); + } + + private String getLogControlUrl(String tenant, Level level) { + return getProxyEndpoint() + "/V2/" + LOGCONTROL_PATH + + "?" + TENANT_ID + "=" + tenant + + "&" + LOG_LEVEL + "=" + level.getName(); + } + + private String getDataPathUrl(Level level) { + /* "POST /V2/nosql/data" == "POST%20%2FV2%2Fnosql%2Fdata" */ + String url = getProxyEndpoint() + "/V2/" + LOGCONTROL_PATH + + "?" + ENTRYPOINT + "=" + "POST%20%2FV2%2Fnosql%2Fdata"; + if (level == null) { + return url; + } + return url + "&" + LOG_LEVEL + "=" + level.getName(); + } + + private static class TestLogHandler extends Handler { + private List records = new ArrayList<>(); + + TestLogHandler() { + setLevel(Level.ALL); + } + + @Override + public void publish(LogRecord lr) { + if (lr.getLevel() == Level.FINE) { + records.add(lr); + } + } + + public List getRecords() { + return records; + } + + @Override + public void flush() { + /* + System.out.println("\n==" + records.size() + "=="); + for (LogRecord lr : records) { + if (lr.getMessage().contains("[updateLogContext]")) { + System.out.println(lr.getLevel() + ": " + lr.getMessage()); + } + } + */ + records.clear(); + } + + @Override + public void close() + throws SecurityException { + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/TimeUtils.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/TimeUtils.java new file mode 100644 index 00000000..c0734a38 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/TimeUtils.java @@ -0,0 +1,120 @@ +package oracle.nosql.proxy; + +import java.text.DateFormat; +import java.text.FieldPosition; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Calendar; +import java.util.Date; +import java.util.TimeZone; + +import oracle.nosql.util.fault.ErrorCode; +import oracle.nosql.util.fault.RequestFaultException; + +/** + * A collection of static utility methods for converting between timestamps + * represented as long and as String. + */ +class TimeUtils { + /** + * When specifying timestamps in the API, these formats are accepted. + */ + private static SimpleDateFormat[] dateFormats = { + new SimpleDateFormat("MM-dd-yy'T'HHmmss.SSS"), + new SimpleDateFormat("MM-dd-yy'T'HHmmss"), + new SimpleDateFormat("MM-dd-yy'T'HHmm"), + new SimpleDateFormat("MM-dd-yy"), + new SimpleDateFormat("HHmmss"), + new SimpleDateFormat("HHmm") + }; + + static { + final TimeZone tz = TimeZone.getTimeZone("UTC"); + for (SimpleDateFormat sdf : dateFormats) { + sdf.setTimeZone(tz); + sdf.setLenient(false); + } + } + + private static String getDateFormatsUsage() { + String usage = + "Timestamps can be given in the following formats," + + "which are interpreted in the UTC time zone:"; + + for (SimpleDateFormat sdf : dateFormats) { + usage += " " + sdf.toPattern(); + } + + return usage; + } + + /** + * Apply the above formats in sequence until one of them matches. + * Synchronize on the class object to serialize use of the SDF instances. + */ + public static synchronized long parseTimestamp(String s, String label) { + + if (s == null) { + return 0L; + } + + /* + * Accept long as string as well + */ + try { + return Long.parseLong(s); + } catch (NumberFormatException nfe) { + /* not an error, continue */ + } + + Date r = null; + for (SimpleDateFormat sdf : dateFormats) { + try { + r = sdf.parse(s); + break; + } catch (ParseException pe) /* CHECKSTYLE:OFF */ { + } /* CHECKSTYLE:ON */ + } + + if (r == null) { + throw new RequestFaultException + ("The " + label + " parameter could not be parsed. " + + getDateFormatsUsage(), + ErrorCode.ILLEGAL_ARGUMENT); + } + + /* + * If the date parsed is in the distant past (i.e., in January 1970) + * then the string lacked a year/month/day. We'll be friendly and + * interpret the time as being in the recent past, that is, today. + */ + + final Calendar rcal = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + rcal.setTime(r); + + if (rcal.get(Calendar.YEAR) == 1970) { + final Calendar nowCal = Calendar.getInstance(); + nowCal.setTime(new Date()); + + rcal.set(nowCal.get(Calendar.YEAR), + nowCal.get(Calendar.MONTH), + nowCal.get(Calendar.DAY_OF_MONTH)); + + /* If the resulting time is in the future, subtract one day. */ + + if (rcal.after(nowCal)) { + rcal.add(Calendar.DAY_OF_MONTH, -1); + } + r = rcal.getTime(); + } + return r.getTime(); + } + + public synchronized static String getTimeStr(long timestamp) { + final DateFormat f = dateFormats[0]; + + return f.format(new Date(timestamp), + new StringBuffer(), + new FieldPosition(0)).toString(); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/TimestampTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/TimestampTest.java new file mode 100644 index 00000000..52047c23 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/TimestampTest.java @@ -0,0 +1,215 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static oracle.nosql.driver.util.TimestampUtil.createTimestamp; +import static oracle.nosql.driver.util.TimestampUtil.getNanoSeconds; +import static oracle.nosql.driver.util.TimestampUtil.getSeconds; +import static oracle.nosql.driver.util.TimestampUtil.parseString; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.sql.Timestamp; + +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; + +import org.junit.Test; + +/** + * Test put/get/delete on a table that contains Timestamp field. + */ +public class TimestampTest extends ProxyTestBase { + private final String tableName = "testTimestamp"; + + private final String createTableDDL = + "create table if not exists " + tableName + "(" + + "ts0 timestamp(0), " + + "ts1 timestamp(1), " + + "ts2 timestamp(2), " + + "ts3 timestamp(3), " + + "ts4 timestamp(4), " + + "ts5 timestamp(5), " + + "ts6 timestamp(6), " + + "ts7 timestamp(7), " + + "ts8 timestamp(8), " + + "ts9 timestamp(9), " + + "primary key(shard(ts0), ts9))"; + + private final Timestamp epoc = new Timestamp(0); + private final Timestamp max = parseString("9999-12-31T23:59:59.999999999"); + private final Timestamp min = parseString("-6383-01-01"); + + @Override + public void setUp() throws Exception { + super.setUp(); + tableOperation(handle, createTableDDL, + new TableLimits(20000, 20000, 50), + TableResult.State.ACTIVE, 10000); + } + + @Test + public void testPutGetDelete() { + + /* A timestamp */ + String text = "2017-07-13T16:48:05.123456789"; + Timestamp ts = parseString(text); + doPutGetDeleteTest(ts); + + /* A timestamp with negative year */ + text = "-117-07-13T01:02:52.987654321"; + ts = parseString(text); + doPutGetDeleteTest(ts); + + /* Use the epoc value: 1970-01-01 */ + doPutGetDeleteTest(epoc); + + /* 1970-01-01T00:00:00.999999999 */ + ts = (Timestamp)epoc.clone(); + ts.setNanos(999999999); + doPutGetDeleteTest(ts); + + /* Use the minimum Timestamp value */ + doPutGetDeleteTest(min); + + /* Use the maximum Timestamp value */ + doPutGetDeleteTest(max); + + /* Timestamp value is less than minimum value, put should fail. */ + long seconds = getSeconds(min); + seconds--; + ts = createTimestamp(seconds, 999999999); + doPutGetDeleteTest(ts, false); + + /* Timestamp value is greater than minimum value, put should fail */ + seconds = getSeconds(max); + seconds++; + ts = createTimestamp(seconds, 0); + doPutGetDeleteTest(ts, false); + } + + private void doPutGetDeleteTest(Timestamp ts) { + doPutGetDeleteTest(ts, true); + } + + private void doPutGetDeleteTest(Timestamp ts, boolean putShouldSucceed) { + /* Put a row */ + MapValue value = new MapValue() + .put("ts0", ts) + .put("ts1", ts) + .put("ts2", ts) + .put("ts3", ts) + .put("ts4", ts) + .put("ts5", ts) + .put("ts6", ts) + .put("ts7", ts) + .put("ts8", ts) + .put("ts9", ts); + + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + try { + PutResult putRes = handle.put(putRequest); + if (putShouldSucceed) { + assertNotNull("Put failed", putRes.getVersion()); + assertWriteKB(putRes); + } else { + fail("Put should fail but not"); + } + } catch (Exception ex) { + if (putShouldSucceed) { + fail("Put should succeed but fail"); + } + return; + } + + /* Get the row */ + MapValue key = new MapValue() + .put("ts0", ts) + .put("ts9", ts); + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + GetResult getRes = handle.get(getRequest); + value = getRes.getValue(); + assertNotNull("Get failed", value); + assertReadKB(getRes); + + Timestamp val = value.get("ts0").getTimestamp(); + assertTrue("Wrong value of ts0", val.compareTo(roundUp(ts, 0)) == 0); + + val = value.get("ts1").getTimestamp(); + assertTrue("Wrong value of ts1", val.compareTo(roundUp(ts, 1)) == 0); + + val = value.get("ts2").getTimestamp(); + assertTrue("Wrong value of ts2", val.compareTo(roundUp(ts, 2)) == 0); + + val = value.get("ts3").getTimestamp(); + assertTrue("Wrong value of ts3", val.compareTo(roundUp(ts, 3)) == 0); + + val = value.get("ts4").getTimestamp(); + assertTrue("Wrong value of ts4", val.compareTo(roundUp(ts, 4)) == 0); + + val = value.get("ts5").getTimestamp(); + assertTrue("Wrong value of ts5", val.compareTo(roundUp(ts, 5)) == 0); + + val = value.get("ts6").getTimestamp(); + assertTrue("Wrong value of ts6", val.compareTo(roundUp(ts, 6)) == 0); + + val = value.get("ts7").getTimestamp(); + assertTrue("Wrong value of ts7", val.compareTo(roundUp(ts, 7)) == 0); + + val = value.get("ts8").getTimestamp(); + assertTrue("Wrong value of ts8", val.compareTo(roundUp(ts, 8)) == 0); + + val = value.get("ts9").getTimestamp(); + assertTrue("Wrong value of ts9", val.compareTo(roundUp(ts, 9)) == 0); + + /* Delete the row */ + DeleteRequest delRequest = new DeleteRequest() + .setKey(key) + .setTableName(tableName); + DeleteResult delRes = handle.delete(delRequest); + assertTrue("Delete failed", delRes.getSuccess()); + } + + /** + * Rounds the fractional second of Timestamp according to the specified + * precision. + */ + private Timestamp roundUp(Timestamp ts, int precision) { + if (precision == 9 || ts.getNanos() == 0) { + return ts; + } + + long seconds = getSeconds(ts); + int nanos = getNanoSeconds(ts); + double base = Math.pow(10, (9 - precision)); + nanos = (int)(Math.round(nanos / base) * base); + if (nanos == (int)Math.pow(10, 9)) { + seconds++; + nanos = 0; + } + Timestamp ts1 = createTimestamp(seconds, nanos); + if (ts1.compareTo(max) > 0 ) { + ts1 = (Timestamp)max.clone(); + nanos = (int)((int)(ts.getNanos() / base) * base); + ts1.setNanos((int)((int)(ts.getNanos() / base) * base)); + } + return ts1; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/WarmupTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/WarmupTest.java new file mode 100644 index 00000000..b400eb53 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/WarmupTest.java @@ -0,0 +1,416 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.net.URL; +import java.util.HashSet; +import java.util.Set; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import oracle.kv.util.kvlite.KVLite; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.kv.KVTenantManager; +import oracle.nosql.proxy.sc.LocalTenantManager; +import oracle.nosql.proxy.sc.TenantManager; +import oracle.nosql.proxy.security.AccessChecker; +import oracle.nosql.proxy.security.AccessCheckerFactory; +import oracle.nosql.proxy.security.SecureTestUtil; +import oracle.nosql.proxy.util.KVLiteBase; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class WarmupTest extends KVLiteBase { + + /* + * Proxy state + */ + private static int PROXY_PORT = 8095; + protected static String KVLITE_MEMORYMB_PROP = "test.memorymb"; + + protected static String hostName = getHostName(); + protected static final int startPort = 13240; + protected static KVLite kvlite; + protected static Proxy proxy = null; + protected static TenantManager tm = null; + protected static AccessChecker ac = null; + + protected static boolean onprem = false; + protected static int memoryMB = 0; + protected static String warmupFilename; + protected static boolean miniCloud = false; + protected final static String prefix = getTestDir(); + + @BeforeClass + public static void staticSetUp() + throws Exception { + + assumeTrue("Skip WarmupTest in minicloud or cloud test", + !Boolean.getBoolean("usemc") && + !Boolean.getBoolean("usecloud")); + startup(); + } + + @AfterClass + public static void staticTearDown() + throws Exception { + + stopProxy(); + + if (kvlite != null) { + kvlite.stop(false); + } + + cleanupTestDir(); + } + + @After + public void tearDown() throws Exception { + stopProxy(); + if (warmupFilename != null && warmupFilename.isEmpty()==false) { + new File(warmupFilename).delete(); + } + } + + protected static void stopProxy() + throws Exception { + + if (proxy != null) { + proxy.shutdown(3, TimeUnit.SECONDS); + proxy = null; + } + + if (tm != null) { + tm.close(); + tm = null; + } + } + + protected static void startup() throws Exception { + + String proxyHost = System.getProperty("proxy.host"); + if (proxyHost != null) { + hostName = proxyHost; + } + Integer proxyPort = Integer.getInteger("proxy.port"); + if (proxyPort != null) { + PROXY_PORT = proxyPort; + } + + cleanupTestDir(); + + memoryMB = Integer.getInteger(KVLITE_MEMORYMB_PROP, 0); + kvlite = startKVLite(hostName, + null, // default store name + false, // useThreads = false + false, // verbose = false + false, // multishard + memoryMB, + false); // secured + } + + protected static void startProxy(String warmupFile, + int warmupTimeMs, + int warmupFileRecencyMs, + int warmupFileSaveIntervalMs) + throws Exception { + + + Properties commandLine = new Properties(); + commandLine.setProperty(Config.STORE_NAME.paramName, + getStoreName()); + commandLine.setProperty(Config.HELPER_HOSTS.paramName, + (hostName + ":" + getKVPort())); + Config.ProxyType ptype = (onprem ? Config.ProxyType.KVPROXY : + Config.ProxyType.CLOUDTEST); + commandLine.setProperty(Config.PROXY_TYPE.paramName, ptype.name()); + commandLine.setProperty(Config.HTTP_PORT.paramName, + Integer.toString(PROXY_PORT)); + + if (warmupFile != null) { + commandLine.setProperty(Config.WARMUP_FILE.paramName, + warmupFile); + commandLine.setProperty(Config.WARMUP_TIME_MS.paramName, + Integer.toString(warmupTimeMs)); + commandLine.setProperty(Config.WARMUP_FILE_RECENCY_MS.paramName, + Integer.toString(warmupFileRecencyMs)); + commandLine.setProperty( + Config.WARMUP_FILE_SAVE_INTERVAL_MS.paramName, + Integer.toString(warmupFileSaveIntervalMs)); + } + + commandLine.setProperty(Config.VERBOSE.paramName, + Boolean.toString( + Boolean.getBoolean("test.verbose"))); + + ac = AccessCheckerFactory.createInsecureAccessChecker(); + + /* make sure we use the table cache */ + System.setProperty("test.usetablecache", "true"); + /* create an appropriate TenantManager */ + Config cfg = new Config(commandLine); + if (onprem) { + tm = KVTenantManager.createTenantManager(cfg); + } else { + tm = LocalTenantManager.createTenantManager(cfg); + } + + proxy = ProxyMain.startProxy(commandLine, tm, ac, null); + } + + protected NoSQLHandle configHandle(String endpoint) { + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(endpoint); + return setupHandle(hconfig); + } + + protected NoSQLHandle configHandle(URL url) { + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(url); + return setupHandle(hconfig); + } + + /* Set configuration values for the handle */ + protected NoSQLHandle setupHandle(NoSQLHandleConfig hconfig) { + /* + * 5 retries, default retry algorithm + */ + hconfig.configureDefaultRetryHandler(5, 0); + hconfig.setRequestTimeout(30000); + SecureTestUtil.setAuthProvider(hconfig, false, + "TestTenant"); + return getHandle(hconfig); + } + + public static String getProxyEndpoint() { + try { + return "http://" + hostName + ":" + PROXY_PORT; + } catch (Exception e) { + } + return null; + } + + /** + * Allows classes to create a differently-configured NoSQLHandle. + */ + protected NoSQLHandle getHandle(NoSQLHandleConfig config) { + /* + * Create a Logger. Configuration for the logger is in proxy/build.xml + */ + Logger logger = Logger.getLogger(getClass().getName()); + config.setLogger(logger); + + /* + * Open the handle + */ + return NoSQLHandleFactory.createNoSQLHandle(config); + } + + /* + * Utility methods for use by subclasses + */ + + /** + * Simpler version of tableOperation. This will not support + * a change of limits as it doesn't accept a table name. + */ + protected static TableResult tableOperation(NoSQLHandle handle, + String statement, + TableLimits limits, + int waitMillis) { + assertTrue(waitMillis > 500); + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableLimits(limits) + .setTimeout(15000); + + return handle.doTableRequest(tableRequest, waitMillis, waitMillis/10); + } + + /** + * Delays for the specified number of milliseconds, ignoring exceptions + */ + static void delay(int delayMS) { + try { + Thread.sleep(delayMS); + } catch (Exception e) { + } + } + + protected static void createTable(NoSQLHandle handle, String tableName) { + String stmt = "create table if not exists " + tableName + + "(id integer, name string, primary key(id))"; + TableResult tres = tableOperation(handle, stmt, + new TableLimits(1000, 1000, 10), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + /* do a single put to get it into the cache */ + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + } + + protected static void createTables(NoSQLHandle handle, String[] tables) { + for (String tableName : tables) { + createTable(handle, tableName); + } + } + + protected static void dropTable(NoSQLHandle handle, String tableName) { + String stmt = "drop table " + tableName; + TableResult tres = tableOperation(handle, stmt, null, 20000); + assertEquals(TableResult.State.DROPPED, tres.getTableState()); + } + + protected static void dropTables(NoSQLHandle handle, String[] tables) { + for (String tableName : tables) { + dropTable(handle, tableName); + } + } + + protected void checkAllTables(String filename, + String[] tableNames) + throws Exception { + /* read nsname keys */ + BufferedReader reader = + new BufferedReader(new FileReader(filename)); + String nsname; + Set wTables = new HashSet(); + while ((nsname = reader.readLine()) != null) { + /* convert to namespace and tablename */ + if (nsname.isEmpty()) { + continue; + } + String[] arr = nsname.split(":"); + if (arr.length == 1) { + wTables.add(arr[0]); + } else { + wTables.add(arr[1]); + } + } + reader.close(); + for (String tName : tableNames) { + if (wTables.contains(tName) == false) { + fail("Warmupfile missing table '" + tName + "'"); + } + } + assertEquals(tableNames.length, wTables.size()); + } + + + protected static void writeFile(String fileName, String contents) + throws Exception { + BufferedWriter writer = new BufferedWriter(new FileWriter(fileName)); + writer.write(contents); + writer.close(); + } + + @Test + public void testEmptyWarmupFilename() + throws Exception { + assumeTrue(miniCloud == false); + startProxy("", 1000, 5000, 500); + } + + @Test + public void testInvalidWarmupFilename() + throws Exception { + assumeTrue(miniCloud == false); + warmupFilename = "sj % @~f\ndsf.. \b\\e sdjhj"; + startProxy(warmupFilename, 1000, 5000, 500); + } + + @Test + public void testOldWarmupFile() + throws Exception { + assumeTrue(miniCloud == false); + startProxy("/bin/bash", 1000, 5000, 500); + } + + @Test + public void testBinaryWarmupFile() + throws Exception { + assumeTrue(miniCloud == false); + warmupFilename = prefix + "/binary_warmup.test"; + writeFile(warmupFilename, "\u02cf\ucdc0ap\n\n\r\tgarbage\u0102"); + startProxy(warmupFilename, 1000, 5000, 500); + delay(2000); + } + + @Test + public void testUnwriteableWarmupFile() + throws Exception { + assumeTrue(miniCloud == false); + warmupFilename = prefix + "/unwriteable.test"; + writeFile(warmupFilename, "\n\n\n"); + File destFile = new File(warmupFilename); + destFile.setWritable(false); + startProxy(warmupFilename, 1000, 5000, 500); + delay(2000); + stopProxy(); + destFile.setWritable(true); + destFile.delete(); + } + + @Test + public void testBasicOperation() + throws Exception { + assumeTrue(miniCloud == false); + warmupFilename = prefix + "/tablecache.test"; + startProxy(warmupFilename, 1000, 5000, 500); + delay(600); + /* + * create a bunch of tables, then make sure the table + * names make it into the cache warmup file + */ + NoSQLHandle handle = configHandle(getProxyEndpoint()); + String[] tableNames = {"warm1", "foobar", "garbage", "audience"}; + createTables(handle, tableNames); + delay(600); + /* read cache warmup file, verify */ + checkAllTables(warmupFilename, tableNames); + /* drop a table, check the rest */ + dropTable(handle, "foobar"); + delay(600); + String[] remaining = {"warm1", "garbage", "audience"}; + checkAllTables(warmupFilename, remaining); + /* restart proxy, wait a bit, verify tables are in warmup file */ + stopProxy(); + delay(600); + startProxy(warmupFilename, 1000, 5000, 500); + delay(3000); + checkAllTables(warmupFilename, remaining); + /* drop tables */ + dropTables(handle, remaining); + } + +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/WriteMultipleTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/WriteMultipleTest.java new file mode 100644 index 00000000..cd3dae84 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/WriteMultipleTest.java @@ -0,0 +1,1007 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import oracle.nosql.driver.BatchOperationNumberLimitException; +import oracle.nosql.driver.RowSizeLimitException; +import oracle.nosql.driver.TimeToLive; +import oracle.nosql.driver.Version; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutRequest.Option; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.Request; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleRequest.OperationRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.nosql.driver.ops.WriteMultipleResult.OperationResult; +import oracle.nosql.driver.ops.WriteRequest; +import oracle.nosql.driver.values.MapValue; + +import org.junit.Test; + +/** + * Test on WriteMultiple operation. + */ +public class WriteMultipleTest extends ProxyTestBase { + + final static int BATCH_OP_NUMBER_LIMIT = rlimits.getBatchOpNumberLimit(); + final static int ROW_SIZE_LIMIT = rlimits.getRowSizeLimit(); + final static String tableName = "writeMultipleTable"; + + /* Create a table */ + final static String createTableDDL = + "CREATE TABLE IF NOT EXISTS writeMultipleTable(" + + "sid INTEGER, id INTEGER, name STRING, longString STRING, " + + "PRIMARY KEY(SHARD(sid), id)) " + + "USING TTL 1 DAYS"; + + final static TimeToLive tableTTL = TimeToLive.ofDays(1); + + @Override + public void setUp() throws Exception { + super.setUp(); + tableOperation(handle, createTableDDL, + new TableLimits(5000, 5000, 50), + TableResult.State.ACTIVE, 10000); + } + + @Override + public void tearDown() throws Exception { + deleteTable(tableName); + super.tearDown(); + } + + /** + * Test operation succeed. + */ + @Test + public void testOpSucceed() { + + final int sid = 10; + final int recordKB = 2; + WriteMultipleRequest umRequest = new WriteMultipleRequest(); + List shouldSucceed = new ArrayList(); + List rowPresent = new ArrayList(); + List putOverWrites = new ArrayList<>(); + + /* Put 10 rows */ + for (int i = 0; i < 10; i++) { + MapValue value = genRow(sid, i, recordKB); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + umRequest.add(putRequest, false); + rowPresent.add(false); + shouldSucceed.add(true); + putOverWrites.add(false); + } + + WriteMultipleResult umResult = handle.writeMultiple(umRequest); + verifyResult(umResult, umRequest, shouldSucceed, rowPresent, recordKB, + putOverWrites); + Version versionId2 = umResult.getResults().get(2).getVersion(); + Version versionId7 = umResult.getResults().get(7).getVersion(); + + umRequest.clear(); + shouldSucceed.clear(); + rowPresent.clear(); + putOverWrites.clear(); + + /* PutIfAbsent, ReturnRow = true */ + MapValue value = genRow(sid, 0, recordKB, true); + PutRequest put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + rowPresent.add(true); + shouldSucceed.add(false); + putOverWrites.add(false); + + /* PutIfPresent, ReturnRow = true */ + value = genRow(sid, 1, recordKB, true); + put = new PutRequest() + .setOption(Option.IfPresent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + rowPresent.add(true); + shouldSucceed.add(true); + putOverWrites.add(false); + + /* PutIfVersion, ReturnRow = true */ + value = genRow(sid, 2, recordKB, true); + put = new PutRequest() + .setOption(Option.IfVersion) + .setMatchVersion(versionId2) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + rowPresent.add(false); + shouldSucceed.add(true); + putOverWrites.add(false); + + /* PutIfAbsent, ReturnRow = false */ + value = genRow(sid, 10, recordKB, true); + put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(false); + umRequest.add(put, false); + rowPresent.add(false); + shouldSucceed.add(true); + putOverWrites.add(false); + + /* Put, ReturnRow = true */ + value = genRow(sid, 3, recordKB, true); + put = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + rowPresent.add(true); + shouldSucceed.add(true); + putOverWrites.add(true); + + /* Put, ReturnRow = false */ + value = genRow(sid, 4, recordKB, true); + put = new PutRequest() + .setValue(value) + .setTableName(tableName); + umRequest.add(put, false); + rowPresent.add(false); + shouldSucceed.add(true); + putOverWrites.add(true); + + /* Delete, ReturnRow = true */ + value = genKey(sid, 5); + DeleteRequest delete = new DeleteRequest() + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(delete, false); + rowPresent.add(true); + shouldSucceed.add(true); + + /* Delete, ReturnRow = false */ + value = genKey(sid, 6); + delete = new DeleteRequest() + .setKey(value) + .setTableName(tableName) + .setReturnRow(false); + umRequest.add(delete, false); + rowPresent.add(false); + shouldSucceed.add(true); + + /* DeleteIfVersion, ReturnRow = true */ + value = genKey(sid, 7); + delete = new DeleteRequest() + .setMatchVersion(versionId7) + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(delete, false); + rowPresent.add(false); + shouldSucceed.add(true); + + /* DeleteIfVersion, ReturnRow = true */ + value = genKey(sid, 8); + delete = new DeleteRequest() + .setMatchVersion(versionId7) + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(delete, false); + rowPresent.add(true); + shouldSucceed.add(false); + + /* Delete, ReturnRow = true */ + value = genKey(sid, 100); + delete = new DeleteRequest() + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(delete, false); + rowPresent.add(false); + shouldSucceed.add(false); + + umResult = handle.writeMultiple(umRequest); + verifyResult(umResult, umRequest, shouldSucceed, rowPresent, recordKB, + putOverWrites); + } + + /** + * Test operation aborted. The whole update multiple operation is aborted + * due to the failure of an operation with abortIfSuccessful set to true. + */ + @Test + public void testOpAborted() { + + final int sid = 20; + final int recordKB = 2; + + /* + * The whole writeMultiple operation aborted due to a failure of + * 2nd operation. + */ + MapValue oldVal101 = genRow(sid, 101, recordKB); + PutRequest req = new PutRequest() + .setValue(oldVal101) + .setTableName(tableName); + PutResult ret = handle.put(req); + Version oldVer101 = ret.getVersion(); + assert(oldVer101 != null); + + MapValue newVal101 = genRow(sid, 101, recordKB); + newVal101.put("name", newVal101.get("name").getString() + "_upd"); + req.setValue(newVal101); + ret = handle.put(req); + Version newVer101 = ret.getVersion(); + assert(newVer101 != null); + + PutRequest reqOK = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(genRow(sid, 100, recordKB)) + .setTableName(tableName); + + PutRequest reqNotExec = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(genRow(sid, 200, recordKB)) + .setTableName(tableName); + + boolean[] rowPresents = new boolean[] {false /* for reqOK */, + false /* for reqFail */, + false /* for reqNotExec */}; + boolean[] putOverWrites = new boolean[] {false /* for reqOK */, + false /* for reqFail */, + false /* for reqNotExec */}; + + /* 3 operations, fail at 2nd operation: PutIfAbsent */ + WriteRequest reqFail = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(genRow(sid, 101, recordKB)) + .setTableName(tableName) + .setReturnRow(true); + + rowPresents[1] = true; + runOpAbortedTest(reqOK, reqFail, reqNotExec, recordKB, + rowPresents, newVer101, newVal101, putOverWrites); + + /* 3 operations, fail at 2nd operation: PutIfPresent */ + + reqFail = new PutRequest() + .setOption(Option.IfPresent) + .setValue(genRow(sid, 102, recordKB)) + .setTableName(tableName) + .setReturnRow(true); + + rowPresents[1] = false; + runOpAbortedTest(reqOK, reqFail, reqNotExec, recordKB, + rowPresents, null, null, putOverWrites); + + /* 3 operations, fail at 2nd operation: PutIfVersion */ + reqFail = new PutRequest() + .setOption(Option.IfVersion) + .setMatchVersion(oldVer101) + .setValue(genRow(sid, 101, recordKB)) + .setTableName(tableName) + .setReturnRow(true); + + rowPresents[1] = true; + runOpAbortedTest(reqOK, reqFail, reqNotExec, recordKB, + rowPresents, newVer101, newVal101, putOverWrites); + + /* 3 operations, fail at 2nd operation: Delete */ + reqFail = new DeleteRequest() + .setKey(genKey(sid, 102)) + .setTableName(tableName) + .setReturnRow(true); + + rowPresents[1] = false; + runOpAbortedTest(reqOK, reqFail, reqNotExec, recordKB, + rowPresents, null, null, putOverWrites); + + /* 3 operations, fail at 2nd operation: DeleteIfVersion */ + reqFail = new DeleteRequest() + .setMatchVersion(oldVer101) + .setKey(genKey(sid, 101)) + .setTableName(tableName) + .setReturnRow(true); + + rowPresents[1] = true; + runOpAbortedTest(reqOK, reqFail, reqNotExec, recordKB, + rowPresents, newVer101, newVal101, putOverWrites); + } + + private void runOpAbortedTest(WriteRequest reqOK, + WriteRequest reqFail, + WriteRequest reqNotExec, + int recordKB, + boolean[] rowPresents, + Version expFailOpPrevVersion, + MapValue expFailOpPrevValue, + boolean[] putOverWrites) { + WriteMultipleRequest umRequest = new WriteMultipleRequest(); + WriteMultipleResult umResult; + + /* 1st op: reqOK */ + umRequest.add(reqOK, true); + + /* 2nd op: reqFail */ + int failedOpIndex = umRequest.getNumOperations(); + umRequest.add(reqFail, true); + + /* 3rd op: reqNotExec */ + umRequest.add(reqNotExec, true); + + umResult = handle.writeMultiple(umRequest); + verifyResultAborted(umRequest, umResult, failedOpIndex, rowPresents, + recordKB, expFailOpPrevVersion, + expFailOpPrevValue, putOverWrites); + } + + /** + * Test operation failed because of invalid arguments. + */ + @Test + public void testOpFailed() { + + final int sid = 30; + final int recordKB = 2; + WriteMultipleRequest umRequest = new WriteMultipleRequest(); + + /** + * WriteMultiple operation failed due to invalid arguments + */ + /* case1: Only put or delete request are allowed */ + umRequest.clear(); + GetRequest get = new GetRequest() + .setKey(genKey(sid, 1)) + .setTableName(tableName); + try { + umRequest.add(get, false); + fail("Expected to fail but not"); + } catch(IllegalArgumentException iae) { + } + + /* case2: Two operations have different shard keys */ + umRequest.clear(); + /* PutIfAbsent, return row = true*/ + MapValue value = genRow(sid, 0, recordKB); + PutRequest put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + + /* PutIfAbsent, return row = true*/ + value = genRow(sid + 1, 0, recordKB); + put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + try { + handle.writeMultiple(umRequest); + fail("Expected to fail but not"); + } catch (IllegalArgumentException iae) { + } + + /* case3: More than one operation has the same Key */ + umRequest.clear(); + /* PutIfAbsent, return row = true*/ + value = genRow(sid, 0, recordKB); + put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + + /* PutIfAbsent, return row = true*/ + value = genKey(sid, 0); + DeleteRequest delete = new DeleteRequest() + .setKey(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(delete, false); + + try { + handle.writeMultiple(umRequest); + fail("Expected to fail but not"); + } catch (IllegalArgumentException iae) { + } + + /* + * case4: the target table of a operation is different from that of + * others. + */ + umRequest.clear(); + /* PutIfAbsent, return row = true*/ + value = genRow(sid, 0, recordKB); + put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + + /* PutIfAbsent, return row = true*/ + value = genRow(sid, 1, recordKB); + put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName("test") + .setReturnRow(true); + try { + umRequest.add(put, false); + fail("Expected to fail but not"); + } catch (IllegalArgumentException iae) { + } + + if (!onprem && tenantLimits != null) { + /* case5: the number of operations exceeds the limit */ + int batchOpNumLimit = tenantLimits.getStandardTableLimits() + .getBatchOpNumberLimit(); + umRequest.clear(); + for (int i = 0; i < batchOpNumLimit + 1; i++) { + value = genRow(sid, i, recordKB); + put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + } + try { + handle.writeMultiple(umRequest); + fail("Expected to fail but not"); + } catch (BatchOperationNumberLimitException ex) { + } + } + + if (!onprem && tenantLimits != null) { + /* + * Case6: the data size of 2nd operation exceeds the limit. + */ + int rowSizeLimit = tenantLimits.getStandardTableLimits() + .getRowSizeLimit(); + umRequest.clear(); + value = genRow(sid, 100, recordKB); + put = new PutRequest() + .setOption(Option.IfAbsent) + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + + value = new MapValue().put("sid", sid).put("id", 101) + .put("name", genString(rowSizeLimit)); + put = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setReturnRow(true); + umRequest.add(put, false); + try { + handle.writeMultiple(umRequest); + fail("Expected to fail but not"); + } catch (RowSizeLimitException rsle) { + } + } + } + + @Test + public void testOpWithTTL() { + final int sid = 11; + final int num = 10; + final int recordKB = 2; + + /* + * Test TimeToLive.fromExpirationTime, the duration between + * referenceTime and expirationTime is less than one hour. + */ + TimeToLive ttl = TimeToLive.fromExpirationTime(1527483600000L, + 1527481131705L); + assertEquals(ttl.getValue(), 1); + assertEquals(ttl.getUnit(), TimeUnit.HOURS); + /* + * Test TimeToLive.fromExpirationTime, the duration between + * referenceTime and expirationTime is less than one day. + */ + ttl = TimeToLive.fromExpirationTime(1527552000000L, 1527481131705L); + assertEquals(ttl.getValue(), 1); + assertEquals(ttl.getUnit(), TimeUnit.DAYS); + + /* Test WriteMultipleRequest with TTL */ + WriteMultipleRequest umRequest = new WriteMultipleRequest(); + List shouldSucceed = new ArrayList(); + + /* Put rows with TTL of DO_NOT_EXPIRE */ + ttl = TimeToLive.DO_NOT_EXPIRE; + for (int i = 0; i < num; i++) { + MapValue value = genRow(sid, i, recordKB); + PutRequest putReq = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setReturnRow(false) + .setTTL(ttl); + umRequest.add(putReq, false); + shouldSucceed.add(true); + } + + WriteMultipleResult umResult = handle.writeMultiple(umRequest); + verifyResult(umResult, umRequest, shouldSucceed, null, recordKB, null); + + /* Verify expiration time */ + long oldExpirationTime = 0; + for (int i = 0; i < num; i++) { + MapValue key = genKey(sid, i); + GetRequest getReq = new GetRequest() + .setKey(key) + .setTableName(tableName); + GetResult getRet = handle.get(getReq); + assertTimeToLive(ttl, getRet.getExpirationTime(), 0); + if (oldExpirationTime == 0) { + oldExpirationTime = getRet.getExpirationTime(); + } + } + + /* Update rows with new TTL */ + umRequest.clear(); + shouldSucceed.clear(); + umRequest.add(new DeleteRequest() + .setKey(genKey(sid, 100)) + .setTableName(tableName), false); + shouldSucceed.add(false); + for (int i = 0; i < num; i++) { + MapValue value = genRow(sid, i, recordKB, true); + PutRequest putReq = new PutRequest() + .setValue(value) + .setTableName(tableName) + .setReturnRow(false); + ttl = genTTL(i); + if (ttl == tableTTL) { + putReq.setUseTableDefaultTTL(true); + } else { + putReq.setTTL(ttl); + } + umRequest.add(putReq, false); + shouldSucceed.add(true); + } + + umResult = handle.writeMultiple(umRequest); + verifyResult(umResult, umRequest, shouldSucceed, null, recordKB, null); + + /* Verify expiration time */ + for (int i = 0; i < num; i++) { + GetRequest getReq = new GetRequest() + .setKey(genKey(sid, i)) + .setTableName(tableName); + GetResult getRet = handle.get(getReq); + assertTimeToLive(genTTL(i), getRet.getExpirationTime(), + oldExpirationTime); + } + } + + @Test + public void testReturnRow() { + final int num = 5; + WriteMultipleResult ret; + Version[] orgVersions = new Version[num]; + Version[] updVersions = new Version[num]; + + /* + * Execute 5 PutIfPresent ops with returnRow = true, + * all ops fail, previous rows returned are all null. + */ + WriteMultipleRequest reqPutIfPresents = new WriteMultipleRequest(); + for (int i = 0; i < num; i++) { + MapValue mv = genRow(1, i, 1, true); + PutRequest put = new PutRequest() + .setTableName(tableName) + .setValue(mv) + .setOption(Option.IfPresent) + .setReturnRow(true); + reqPutIfPresents.add(put, false); + } + ret = handle.writeMultiple(reqPutIfPresents); + for (OperationResult opRet: ret.getResults()) { + assertFalse(opRet.getSuccess()); + assertNull(opRet.getExistingValue()); + assertNull(opRet.getExistingVersion()); + assertTrue(opRet.getExistingModificationTime() == 0); + } + + /* + * Execute 5 Put ops with returnRow = true, + * all ops succeed, previous rows returned are all null. + */ + WriteMultipleRequest reqPuts = new WriteMultipleRequest(); + for (int i = 0; i < num; i++) { + MapValue mv = genRow(1, i, 1); + PutRequest put = new PutRequest() + .setTableName(tableName) + .setValue(mv) + .setReturnRow(true); + reqPuts.add(put, false); + } + ret = handle.writeMultiple(reqPuts); + for (int i = 0; i < ret.getResults().size(); i++) { + OperationResult opRet = ret.getResults().get(i); + assertTrue(opRet.getSuccess()); + assertNull(opRet.getExistingValue()); + assertNull(opRet.getExistingVersion()); + assertTrue(opRet.getExistingModificationTime() == 0); + + assertNotNull(opRet.getVersion()); + orgVersions[i] = opRet.getVersion(); + } + + /* + * Execute 5 PutIfPresent ops with returnRow = true + * all ops succeed, previous rows returned are not null. + */ + ret = handle.writeMultiple(reqPutIfPresents); + for (int i = 0; i < ret.getResults().size(); i++) { + OperationResult opRet = ret.getResults().get(i); + assertTrue(opRet.getSuccess()); + assertNotNull(opRet.getExistingValue()); + assertNotNull(opRet.getExistingVersion()); + assertTrue(opRet.getExistingModificationTime() > 0); + + assertNotNull(opRet.getVersion()); + updVersions[i] = opRet.getVersion(); + } + + /* + * Execute 5 PutIfAbsent ops with returnRow = true, + * all ops fail, previous rows returned are not null. + */ + WriteMultipleRequest reqPutIfAbsents = new WriteMultipleRequest(); + for (int i = 0; i < num; i++) { + MapValue mv = genRow(1, i, 1); + PutRequest put = new PutRequest() + .setTableName(tableName) + .setValue(mv) + .setOption(Option.IfAbsent) + .setReturnRow(true); + reqPutIfAbsents.add(put, false); + } + ret = handle.writeMultiple(reqPutIfAbsents); + for (int i = 0; i < ret.getResults().size(); i++) { + OperationResult opRet = ret.getResults().get(i); + assertFalse(opRet.getSuccess()); + assertNotNull(opRet.getExistingValue()); + + assertNotNull(opRet.getExistingVersion()); + assertTrue(Arrays.equals(updVersions[i].getBytes(), + opRet.getExistingVersion().getBytes())); + + assertTrue(opRet.getExistingModificationTime() > 0); + } + + /* + * Execute 5 PutIfVersion ops with returnRow = true, + * all ops fail, previous rows returned are not null. + */ + WriteMultipleRequest reqPutIfVersions = new WriteMultipleRequest(); + for (int i = 0; i < num; i++) { + MapValue mv = genRow(1, i, 1); + PutRequest put = new PutRequest() + .setTableName(tableName) + .setValue(mv) + .setMatchVersion(orgVersions[i]) + .setOption(Option.IfVersion) + .setReturnRow(true); + reqPutIfVersions.add(put, false); + } + ret = handle.writeMultiple(reqPutIfVersions); + for (int i = 0; i < ret.getResults().size(); i++) { + OperationResult opRet = ret.getResults().get(i); + assertFalse(opRet.getSuccess()); + assertNotNull(opRet.getExistingValue()); + + assertNotNull(opRet.getExistingVersion()); + assertTrue(Arrays.equals(updVersions[i].getBytes(), + opRet.getExistingVersion().getBytes())); + + assertTrue(opRet.getExistingModificationTime() > 0); + } + + /* + * Execute 5 DeleteIfVersion ops with returnRow = true, + * all ops fail, previous rows returned are not null. + */ + WriteMultipleRequest reqDeleteIfVersions = new WriteMultipleRequest(); + for (int i = 0; i < num; i++) { + MapValue key = genKey(1, i); + DeleteRequest delete = new DeleteRequest() + .setTableName(tableName) + .setKey(key) + .setMatchVersion(orgVersions[i]) + .setReturnRow(true); + reqDeleteIfVersions.add(delete, false); + } + ret = handle.writeMultiple(reqPutIfVersions); + for (int i = 0; i < ret.getResults().size(); i++) { + OperationResult opRet = ret.getResults().get(i); + assertFalse(opRet.getSuccess()); + assertNotNull(opRet.getExistingValue()); + + assertNotNull(opRet.getExistingVersion()); + assertTrue(Arrays.equals(updVersions[i].getBytes(), + opRet.getExistingVersion().getBytes())); + + assertTrue(opRet.getExistingModificationTime() > 0); + } + + /* + * Execute 5 Deletes ops with returnRow = true, + * all ops success, previous rows returned are not null. + */ + WriteMultipleRequest reqDeletes = new WriteMultipleRequest(); + for (int i = 0; i < num; i++) { + MapValue key = genKey(1, i); + DeleteRequest delete = new DeleteRequest() + .setTableName(tableName) + .setKey(key) + .setReturnRow(true); + reqDeletes.add(delete, false); + } + ret = handle.writeMultiple(reqDeletes); + for (OperationResult opRet: ret.getResults()) { + assertTrue(opRet.getSuccess()); + assertNotNull(opRet.getExistingValue()); + assertNotNull(opRet.getExistingVersion()); + assertTrue(opRet.getExistingModificationTime() > 0); + } + } + + private MapValue genRow(int sid, int id, int recordKB) { + return genRow(sid, id, recordKB, false); + } + + private MapValue genRow(int sid, int id, int recordKB, boolean upd) { + return new MapValue().put("sid", sid).put("id", id) + .put("name", (upd ? "name_upd_" : "name_") + sid + "_" + id) + .put("longString", genString((recordKB - 1) * 1024)); + } + + private MapValue genKey(int sid, int id) { + return new MapValue().put("sid", sid).put("id", id); + } + + private void verifyResult(WriteMultipleResult umResult, + WriteMultipleRequest umRequest, + List shouldSucceedList, + List rowPresentList, + int recordKB, + List putOverWriteList) { + + assertTrue("The operation should have succeeded", + umResult.getSuccess()); + + List ops = umRequest.getOperations(); + assertTrue("Wrong number of results: expect " + + umRequest.getNumOperations() + ", actual " + umResult.size(), + umResult.size() == umRequest.getNumOperations()); + + int ind = 0; + int expReadKB = 0; + int expWriteKB = 0; + + for (OperationResult result : umResult.getResults()) { + boolean shouldSucceed = shouldSucceedList.get(ind); + assertTrue("Operation should have succeeded, opIdx=" + ind, + result.getSuccess() == shouldSucceed); + + OperationRequest op = ops.get(ind); + WriteRequest request = op.getRequest(); + if (request instanceof PutRequest && shouldSucceed) { + assertTrue("Expected to get new version", + result.getVersion() != null); + } else { + assertTrue("Expected no new version", + result.getVersion() == null); + } + + if (rowPresentList != null) { + boolean rowPresent = rowPresentList.get(ind); + boolean hasReturnRow = rowPresent; + assertTrue("The existing value is expected to be " + + (hasReturnRow ? "not null" : "null") + " but not", + (hasReturnRow ? result.getExistingValue() != null : + result.getExistingValue() == null)); + assertTrue("The existing version is expected to be " + + (hasReturnRow ? "not null" : "null") + " but not", + (hasReturnRow ? result.getExistingVersion() != null : + result.getExistingVersion() == null)); + if (op.getRequest() instanceof PutRequest) { + PutRequest putReq = (PutRequest)op.getRequest(); + boolean putOverwrite = putOverWriteList != null ? + (putOverWriteList.get(ind)) : false; + int[] expCosts = getPutReadWriteCost(putReq, + shouldSucceed, + rowPresent, + recordKB, + putOverwrite); + expReadKB += expCosts[0]; + expWriteKB += expCosts[1]; + } else { + assertTrue(op.getRequest() instanceof DeleteRequest); + DeleteRequest deleteReq = (DeleteRequest)op.getRequest(); + int[] expCosts = getDeleteReadWriteCost(deleteReq, + shouldSucceed, + rowPresent, + recordKB); + expReadKB += expCosts[0]; + expWriteKB += expCosts[1]; + } + } + ind++; + } + + /* Verify read/write cost */ + if (rowPresentList != null) { + assertReadKB(umResult, expReadKB, true /* isAbsolute */); + assertWriteKB(umResult, expWriteKB); + } + } + + private void verifyResultAborted(WriteMultipleRequest request, + WriteMultipleResult result, + int failedOpIndex, + boolean[] rowPresents, + int recordKB, + Version expPrevVersion, + MapValue expPrevValue, + boolean[] putOverWrite) { + + assertTrue("The operation is expected to abort", + result.getSuccess() == false); + + assertTrue("Wronng failed operation index, expect " + failedOpIndex + + "but get " + result.getFailedOperationIndex(), + result.getFailedOperationIndex() == failedOpIndex); + + Request opReq = request.getRequest(failedOpIndex); + OperationResult opRet = result.getFailedOperationResult(); + + assertTrue("The failed operation result should not be null", + opRet != null); + + assertTrue("The version is expected to be null but not", + opRet.getVersion() == null); + + if (expPrevVersion != null) { + assertNotNull(expPrevValue); + + assertTrue("The existing version should be not null", + opRet.getExistingVersion() != null); + assertTrue("The existing version is wrong", + Arrays.equals(expPrevVersion.getBytes(), + opRet.getExistingVersion().getBytes())); + } else { + assertTrue("The existing version should be null", + opRet.getExistingVersion() == null); + } + + if (expPrevValue != null) { + assertNotNull(expPrevVersion); + assertTrue("The existing value should be not null", + opRet.getExistingValue() != null); + assertTrue("The existing value is wrong", + expPrevValue.equals(opRet.getExistingValue())); + } else { + assertTrue("The existing value should be null", + opRet.getExistingValue() == null); + } + + /* Verify the read/write cost */ + if (onprem) { + return; + } + + int expReadKB = 0; + int expWriteKB = 0; + for (int i = 0; i <= failedOpIndex; i++) { + opReq = request.getRequest(i); + boolean shouldSucceed = i < failedOpIndex; + int[] expCosts; + if (opReq instanceof PutRequest) { + expCosts = getPutReadWriteCost((PutRequest)opReq, + shouldSucceed, + rowPresents[i], + recordKB, + putOverWrite[i]); + + } else { + assertTrue(opReq instanceof DeleteRequest); + expCosts = getDeleteReadWriteCost((DeleteRequest)opReq, + shouldSucceed, + rowPresents[i], + recordKB); + } + expReadKB += expCosts[0]; + expWriteKB += expCosts[1]; + } + + assertEquals(expReadKB, result.getReadKB()); + assertEquals(expWriteKB, result.getWriteKB()); + } + + private String genString(int len) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < len; i++) { + sb.append((char)('A' + i % 26)); + } + return sb.toString(); + } + + private TimeToLive genTTL(int id) { + switch (id % 4) { + case 1: + return TimeToLive.ofDays(id + 1); + case 2: + return TimeToLive.ofHours(id + 1); + case 3: + return tableTTL; + case 0: + break; + } + return null; + } + + private void assertTimeToLive(TimeToLive ttl, + long actExTime, + long origExTime) { + + final long HOUR_IN_MILLIS = 60 * 60 * 1000; + final long DAY_IN_MILLIS = 24 * HOUR_IN_MILLIS; + + if (ttl == null || ttl.getValue() == 0) { + assertTrue("Expiration time should be " + origExTime + ": " + + actExTime, actExTime == origExTime); + } else { + assertTrue("Expiration time should be greater than 0", + actExTime > 0); + + boolean unitIsHour = ttl.unitIsHours(); + long unitInMs = (unitIsHour ? HOUR_IN_MILLIS : DAY_IN_MILLIS); + long expExTime = ttl.toExpirationTime(System.currentTimeMillis()); + + assertTrue("Actual TTL duration " + actExTime + "ms differs by " + + "more than a " + (unitIsHour ? "hour" : "day") + + "from expected duration of " + expExTime +"ms: " + ttl, + Math.abs(actExTime - expExTime) <= unitInMs); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/FilterTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/FilterTest.java new file mode 100644 index 00000000..ed4677a9 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/FilterTest.java @@ -0,0 +1,722 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.filter; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import org.junit.Test; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.RequestTimeoutException; +import oracle.nosql.driver.SystemException; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.GetIndexesRequest; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.ListTablesRequest; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableUsageRequest; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.util.HttpRequest; +import oracle.nosql.util.HttpResponse; +import oracle.nosql.util.filter.Rule; + +/* + * Tests filtering request based on rules. + * + * The 2 methods blockOps() and executeOps() are used to execute request: + * o blockOps() expects to catch the specified exception due to being blocked. + * o executeOps() expect the request executed successfully. + * + * Basically, the test add rule and then run operations using above 2 methods + * to verify that the operations matching the rule will be blocked, those that + * don't match the rule can execute successfully + */ +public class FilterTest extends FilterTestBase { + + private final int REQUEST_WAIT_MS = 3000; + + private final String tenantId = getTenantId(); + private final String userId = (isSecure() ? "testuser" : null); + + private final String tableName = "filterTest"; + private final String indexName = "idxName"; + + private final TableLimits limits = new TableLimits(10, 10, 1); + + private final String createTableDDL = "create table if not exists " + + tableName + "(sid integer, id integer, name string, " + + "primary key(shard(sid), id))"; + + private final String alterTableDdl = + "alter table " + tableName + "(add i1 integer)"; + + private final String createIndexDdl = + "create index if not exists " + indexName + " on " + + tableName + "(name)"; + + private final String dropIndexDdl = + "drop index if exists " + indexName + " on " + tableName; + + private final String selectStmt = "select * from " + tableName; + + private final String insertStmt = + "insert into " + tableName + " values (3, 1, 'abc')"; + + private final String deleteStmt = "delete from " + tableName + + " where sid = 1 and id = 1"; + + private final String updateStmt = "update " + tableName + + " set name = \"name_upd\" where sid = 1 and id = 1"; + + private final MapValue row = (MapValue)MapValue.createFromJson( + "{\"sid\":1, \"id\":1, \"name\":\"a\"}", null); + private final MapValue key = (MapValue)MapValue.createFromJson( + "{\"sid\":1, \"id\":2}", null); + + private String tableOcid; + private PreparedStatement selectPrepStmt; + private PreparedStatement insertPrepStmt; + private PreparedStatement updatePrepStmt; + private PreparedStatement deletePrepStmt; + + private static final Rule.Action dropRequest = Rule.DROP_REQUEST; + private static final Rule.Action returnError = + new Rule.ReturnErrorAction(102 /* SERVICE_UNAVAILABLE*/, + "server is undergoing maintenance"); + private static final Class returnErrorException = SystemException.class; + + /* + * Operations + */ + private final OpWrapper createTable = new OpWrapper("createTable") { + @Override + void execOp() { + tableOperation(handle, createTableDDL, limits, null); + } + }; + + private final OpWrapper alterTable = new OpWrapper("alterTable") { + @Override + void execOp() { + tableOperation(handle, alterTableDdl, null, null); + } + }; + + private final OpWrapper updateTableLimits = new OpWrapper("updateLimits") { + @Override + void execOp() { + tableOperation(handle, null, limits, tableName); + } + }; + + private final OpWrapper dropTable = new OpWrapper("dropTable") { + @Override + void execOp() { + String ddl = "drop table if exists " + tableName; + tableOperation(handle, ddl, null, null); + } + }; + + private final OpWrapper createIndex = new OpWrapper("createIndex") { + @Override + void execOp() { + tableOperation(handle, createIndexDdl, null, null); + } + }; + + private final OpWrapper dropIndex = new OpWrapper("dropIndex") { + @Override + void execOp() { + tableOperation(handle, dropIndexDdl, null, null); + } + }; + + private final OpWrapper getTable = new OpWrapper("getTable") { + @Override + void execOp() { + GetTableRequest req = new GetTableRequest() + .setTableName(tableName); + handle.getTable(req); + } + }; + + private final OpWrapper getIndexes = new OpWrapper("getIndexes") { + @Override + void execOp() { + GetIndexesRequest req = new GetIndexesRequest() + .setTableName(tableName); + handle.getIndexes(req); + } + }; + + private final OpWrapper listTables = new OpWrapper("listTables") { + @Override + void execOp() { + ListTablesRequest req = new ListTablesRequest() + .setLimit(1); + handle.listTables(req); + } + }; + + private final OpWrapper put = new OpWrapper("put") { + @Override + void execOp() { + PutRequest putReq = new PutRequest() + .setTableName(tableName) + .setValue(row); + handle.put(putReq); + } + }; + + private final OpWrapper delete = new OpWrapper("delete") { + @Override + void execOp() { + DeleteRequest deleteReq = new DeleteRequest() + .setTableName(tableName) + .setKey(key); + handle.delete(deleteReq); + } + }; + + private final OpWrapper get = new OpWrapper("get") { + @Override + void execOp() { + GetRequest req = new GetRequest() + .setTableName(tableName) + .setKey(key); + handle.get(req); + } + }; + + private final OpWrapper prepare = new OpWrapper("prepare") { + @Override + void execOp() { + PrepareRequest req = new PrepareRequest().setStatement(selectStmt); + handle.prepare(req); + } + }; + + private final OpWrapper selectQuery = new OpWrapper("selectQuery") { + @Override + void execOp() { + QueryRequest req = new QueryRequest().setStatement(selectStmt); + handle.query(req); + } + }; + + private final OpWrapper selectPrepQuery = new OpWrapper("selectPrepQuery") { + @Override + void execOp() { + QueryRequest req = new QueryRequest() + .setPreparedStatement(selectPrepStmt); + handle.query(req); + } + }; + + private final OpWrapper deleteQuery = new OpWrapper("deleteQuery") { + @Override + void execOp() { + QueryRequest req = new QueryRequest().setStatement(deleteStmt); + handle.query(req); + } + }; + + private final OpWrapper deletePrepQuery = new OpWrapper("deletePrepQuery") { + @Override + void execOp() { + QueryRequest req = new QueryRequest() + .setPreparedStatement(deletePrepStmt); + handle.query(req); + } + }; + + private final OpWrapper insertQuery = new OpWrapper("insertQuery") { + @Override + void execOp() { + QueryRequest req = new QueryRequest().setStatement(insertStmt); + handle.query(req); + } + }; + + private final OpWrapper insertPrepQuery = new OpWrapper("insertPrepQuery") { + @Override + void execOp() { + QueryRequest req = new QueryRequest() + .setPreparedStatement(insertPrepStmt); + handle.query(req); + } + }; + + private final OpWrapper updateQuery = new OpWrapper("updateQuery") { + @Override + void execOp() { + QueryRequest req = new QueryRequest().setStatement(updateStmt); + handle.query(req); + } + }; + + private final OpWrapper updatePrepQuery = new OpWrapper("updatePrepQuery") { + @Override + void execOp() { + QueryRequest req = new QueryRequest() + .setPreparedStatement(updatePrepStmt); + handle.query(req); + } + }; + + private final OpWrapper writeMultiple = new OpWrapper("writeMultiple") { + @Override + void execOp() { + WriteMultipleRequest req = new WriteMultipleRequest() + .add(new PutRequest() + .setTableName(tableName) + .setValue(row), + false) + .add(new DeleteRequest() + .setTableName(tableName) + .setKey(key), + false); + handle.writeMultiple(req); + } + }; + + private final OpWrapper getTableUsage = new OpWrapper("getTableUsage") { + @Override + void execOp() { + TableUsageRequest req = new TableUsageRequest() + .setTableName(tableName) + .setLimit(1); + handle.getTableUsage(req); + } + }; + + private final OpWrapper[] ddlOps = new OpWrapper[] { + createTable, + alterTable, + updateTableLimits, + createIndex, + dropIndex, + dropTable + }; + + private final OpWrapper[] ddlOps_existing_table = new OpWrapper[] { + alterTable, + updateTableLimits, + createIndex, + dropIndex, + dropTable + }; + + private final OpWrapper[] readOps = new OpWrapper[] { + getTable, + getIndexes, + getTableUsage, + listTables, + get, + prepare, + selectQuery + }; + + private final OpWrapper[] writeOps = new OpWrapper[] { + put, + delete, + insertQuery, + deleteQuery, + writeMultiple + }; + + @Override + public void setUp() throws Exception { + super.setUp(); + initTableAndPrepStmts(); + } + + private void initTableAndPrepStmts() { + tableOperation(handle, createTableDDL, limits, 60000); + tableOcid = getTableOcid(tableName); + selectPrepStmt = prepare(selectStmt); + insertPrepStmt = prepare(insertStmt); + updatePrepStmt = prepare(updateStmt); + deletePrepStmt = prepare(deleteStmt); + } + + /* + * Filter requests by the rule containing operations only + */ + @Test + public void testOpRule() { + /* + * all operations should be blocked by the rule: + * {"name":"block_ops", "operations":["ALL"]} + */ + Rule rule = addRule("block_ops", dropRequest, new String[] {"all"}); + blockOps(rule, ddlOps); + blockOps(rule, readOps); + blockOps(rule, writeOps); + + deleteRule("block_ops", false); + + /* + * ddl and write operations should be blocked by the rule: + * {"name":"block_ops", "operations":["DDL", "WRITE"]} + */ + rule = addRule("block_ops", returnError, new String[] {"ddl", "write"}); + blockOps(rule, createTable); + blockOps(rule, writeOps); + executeOps(readOps); + } + + /* + * Test filtering requests by the rule with principal tenant and/or user + * information + */ + @Test + public void testPrincipalRule() { + /* This test only runs with security enabled and minicloud */ + assumeTrue(useMiniCloud); + + /* + * Add rule to block all operations from the user of specified tenant: + * { + * "name":"block_test_tenant", + * "tenant": tenantId, + * "operations": ["ALL"] + * } + */ + Rule rule = addRule("block_test_tenant", + dropRequest, + tenantId, + null /* userId */, + null /* tableId */, + new String[] {"all"}, + false); + blockOps(rule, createTable, getTable, put, get); + + /* remove above rule "block_test_tenant", operations can proceed */ + deleteRule("block_test_tenant", false); + reloadPersistentRules(); + executeOps(createTable, getTable, put, get); + + /* + * Add rule to block ddl and write requests from the specified user. + * { + * "name":"block_test_user", + * "user": userId, + * "operations": ["DDL", "WRITE"] + * } + */ + rule = addRule("block_test_user", + dropRequest, + null, + userId, + null, + new String[] {"ddl", "write"}, + false); + blockOps(rule, createTable, put); + /* not block read operations. */ + executeOps(getTable, get); + + /* remove the above rule "block_test_user", operations can proceed */ + deleteRule("block_test_user", false); + reloadPersistentRules(); + executeOps(createTable, getTable, put, get); + } + + /* + * Filter requests by the rule containing table ocid. + */ + @Test + public void testTableRule() { + /* + * Add rule to block all requests to the specified target table from + * a user from the specified tenant: + * { + * "name":"block_table_xxx", + * "tenant": tenantId, + * "table": tableOcid, + * "operations": ["ALL"] + * } + */ + String ruleName = "block_table_" + tableName; + Rule rule = addRule(ruleName, + returnError, + (cloudRunning) ? tenantId : null, + null, + tableOcid, + new String[] {"all"}, + false); + blockOps(rule, ddlOps_existing_table); + blockOps(rule, writeOps); + deleteRule(ruleName, false); + + /* + * Update rule to block write requests to the specified target table + * from the specified user + * { + * "name":"block_table_xxx", + * "user": userId, + * "table": tableOcid, + * "operations": ["WRITE"] + * } + */ + rule = addRule(ruleName, + returnError, + null, + (cloudRunning) ? userId : null, + tableOcid, + new String[] {"write"}, + false); + blockOps(rule, writeOps); + executeOps(ddlOps); + } + + /* + * Test filtering query request. + * + * Query can be a read or write operation, the actual operation is deferred + * to determine after parse the statement in handleQuery(). + * + * This test is to verify query operation can be blocked as expected by the + * rule that blocks "read" or "write" operation. + */ + @Test + public void testQuery() { + /* + * Add rule to block write requests to the specified target table: + * { + * "name":"block_query", + * "table": tableOcid, + * "operations": ["WRITE"] + * } + * + * The insert/delete/update query should be blocked, and prepare and + * select query should be executed successfully. + */ + Rule rule = addRule("block_query", + dropRequest, + null, + null, + tableOcid, + new String[] {"write"}, + false); + blockOps(rule, insertQuery, deleteQuery, updateQuery); + if (cloudRunning) { + blockOps(rule, insertPrepQuery, deletePrepQuery, updatePrepQuery); + } + executeOps(prepare, selectQuery, selectPrepQuery); + deleteRule("block_query", false); + + /* + * Update rule to block read requests to the specified target table: + * { + * "name":"block_query", + * "table": tableOcid, + * "operations": ["READ"] + * } + * + * The prepare and select query should be blocked, and + * insert/update/delete query should be executed successfully. + */ + rule = addRule("block_query", + dropRequest, + null, + null, + tableOcid, + new String[] {"read"}, + false); + blockOps(rule, prepare, selectQuery, updateQuery, deleteQuery); + if (cloudRunning) { + blockOps(rule, selectPrepQuery, updatePrepQuery, deletePrepQuery); + } + executeOps(insertQuery, insertPrepQuery); + } + + @Test + public void testPersistentRule() { + assumeTrue(useMiniCloud); + /* + * Add rule to block all operations from the user of specified tenant: + * { + * "name":"block_test_tenant", + * "tenant": tenantId, + * "operations": ["ALL"] + * } + */ + Rule rule = addRule("block_test_tenant", + dropRequest, + tenantId, + null /* userId */, + null /* tableId */, + new String[] {"all"}, + true); + reloadPersistentRules(); + blockOps(rule, createTable, getTable, put, get); + + /* remove above rule "block_test_tenant", operations can proceed */ + deleteRule("block_test_tenant", true); + reloadPersistentRules(); + executeOps(createTable, getTable, put, get); + + /* + * Add rule to block ddl and write requests from the specified user. + * { + * "name":"block_test_user", + * "user": userId, + * "table": tableOcid + * "operations": ["DDL", "WRITE"] + * } + */ + rule = addRule("block_test_user_table", + returnError, + null, + userId, + tableOcid, + new String[] {"ddl", "write"}, + true); + reloadPersistentRules(); + blockOps(rule, alterTable, put); + /* not block read operations. */ + executeOps(getTable, get); + + /* + * remove the above rule "block_test_user_table", + * operations can proceed + */ + deleteRule("block_test_user_table", true); + reloadPersistentRules(); + executeOps(createTable, getTable, put, get); + } + + @Override + protected void perTestHandleConfig(NoSQLHandleConfig hconfig) { + hconfig.configureDefaultRetryHandler(0, 0); + hconfig.setRequestTimeout(REQUEST_WAIT_MS); + hconfig.setTableRequestTimeout(REQUEST_WAIT_MS); + } + + private TableResult tableOperation(NoSQLHandle handle, + String ddl, + TableLimits limits, + String tableName) { + TableRequest req = new TableRequest(); + if (ddl != null) { + req.setStatement(ddl); + } + if (limits != null) { + req.setTableLimits(limits); + } + if (tableName != null) { + req.setTableName(tableName); + } + return handle.tableRequest(req); + } + + private void executeOps(OpWrapper... ops) { + for (OpWrapper op : ops) { + op.exec(); + } + } + + private void blockOps(Rule rule, OpWrapper... ops) { + Class expEx = getExpectedException(rule); + for (OpWrapper op : ops) { + op.exec(expEx); + } + } + + private Class getExpectedException(Rule rule) { + switch (rule.getActionType()) { + case DROP_REQUEST: + return RequestTimeoutException.class; + case RETURN_ERROR: + return returnErrorException; + default: + fail("Unexpected action type: " + rule.getAction()); + } + return null; + } + + private String getTableOcid(String tableName) { + if (!cloudRunning) { + return tableName; + } + + String url = tmUrlBase + "tables/" + tableName + + "?compartmentid=" + tenantId + + "&tenantid=" + tenantId; + HttpRequest httpRequest = new HttpRequest().disableRetry(); + HttpResponse response = httpRequest.doHttpGet(url); + if (200 != response.getStatusCode()) { + fail("getTble failed: " + response); + } + + /* Extract tableOcid from response */ + String output = response.getOutput(); + String field = "\"ocid\":"; + int pos = output.indexOf(field); + assertTrue(pos > 0); + pos += field.length() + 1; + assertTrue (pos < output.length()); + int to = output.indexOf("\"", pos); + String tableOcid = output.substring(pos, to); + return tableOcid.replace("_", "."); + } + + private PreparedStatement prepare(String query) { + PrepareRequest prep = new PrepareRequest().setStatement(query); + PrepareResult prepRet = handle.prepare(prep); + return prepRet.getPreparedStatement(); + } + + /** + * Run a code snippet and expect a specified error. + */ + private abstract class OpWrapper { + + private final String name; + + OpWrapper(String name) { + this.name = name; + } + + void exec() { + exec(null); + } + + void exec(Class expectedException) { + try { + execOp(); + if (expectedException != null) { + fail("Expected " + expectedException.getSimpleName() + + " on operation: " + name); + } + } catch (Exception e) { + if (expectedException == null) { + fail("Expected no exception, got " + e + ": " + name); + } else if (!expectedException.isInstance(e)) { + fail("Expected " + expectedException.getSimpleName() + + " but got " + e + ": " + name); + } + } + } + + abstract void execOp() throws Exception; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/FilterTestBase.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/FilterTestBase.java new file mode 100644 index 00000000..f55fce66 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/FilterTestBase.java @@ -0,0 +1,227 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.filter; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; +import static oracle.nosql.proxy.protocol.HttpConstants.FILTERS_PATH; + +import java.lang.reflect.Type; +import java.util.List; +import java.util.Objects; + +import org.junit.BeforeClass; +import com.google.gson.reflect.TypeToken; + +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.internal.logging.InternalLoggerFactory; +import io.netty.util.internal.logging.JdkLoggerFactory; +import oracle.nosql.proxy.ProxyTestBase; +import oracle.nosql.proxy.filter.FilterHandler.RuleWrapper; +import oracle.nosql.util.HttpRequest; +import oracle.nosql.util.HttpResponse; +import oracle.nosql.util.filter.Rule; +import oracle.nosql.util.filter.Rule.Action; + +public class FilterTestBase extends ProxyTestBase { + + private final String proxyFilterUrl = + getProxyEndpoint() + "/V0/" + FILTERS_PATH; + private final HttpRequest httpRequest = new HttpRequest().disableRetry(); + private String scFilterUrl; + + @BeforeClass + public static void staticSetUp() + throws Exception { + + /* + * This test needs to call proxy filter rest API to setup the filter + * rules, the filter rest API is not supported by onprem and not + * accessible in cloud service, so skip the test in onprem or cloud test. + */ + assumeTrue("Skip FilterTestBase in onprem or could test", + !Boolean.getBoolean(ONPREM_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + + ProxyTestBase.staticSetUp(); + } + + @Override + public void setUp() throws Exception { + /* + * Set Netty to use JDK logger factory. + */ + InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE); + removeAllRules(); + super.setUp(); + } + + @Override + public void tearDown() throws Exception { + removeAllRules(); + super.tearDown(); + } + + Rule addRule(String name, Action action, String[] operations) { + return addRule(name, action, null /* tenant */, null /* user */, + null /* table */, operations, false /* persist */); + } + + Rule addRule(String name, + Action action, + String tenantId, + String userId, + String tableId, + String[] operations, + boolean persist) { + + Rule rule = Rule.createRule(name, action, tenantId, userId, + tableId, operations); + addRule(rule.toJson(), persist); + + rule = getRule(name, persist); + assertNotNull(rule); + return rule; + } + + void addRule(String payload, boolean persist) { + addRule(payload, HttpResponseStatus.OK.code(), persist); + } + + void addRule(String payload, int statusCode, boolean persist) { + String url = getUrl(null, persist); + HttpResponse resp = httpRequest.doHttpPost(url, payload); + assertEquals(statusCode, resp.getStatusCode()); + } + + Rule getRule(String name, boolean persist) { + return getRule(name, HttpResponseStatus.OK.code(), persist); + } + + Rule getRule(String name, int statusCode, boolean persist) { + String url = getUrl(name, persist); + HttpResponse resp = httpRequest.doHttpGet(url); + assertEquals(statusCode, resp.getStatusCode()); + if (statusCode == HttpResponseStatus.OK.code()) { + return parseRuleFromResponse(resp); + } + return null; + } + + boolean deleteRule(String name, boolean persist) { + return deleteRule(name, HttpResponseStatus.OK.code(), persist); + } + + boolean deleteRule(String name, int statusCode, boolean persist) { + String url = getUrl(name, persist); + HttpResponse resp = httpRequest.doHttpDelete(url, null); + assertEquals(statusCode, resp.getStatusCode()); + if (statusCode == HttpResponseStatus.OK.code()) { + return resp.getOutput().contains("deleted"); + } + return false; + } + + List listRules(boolean persist) { + String url = getUrl(null, persist); + HttpResponse resp = httpRequest.doHttpGet(url); + assertEquals(HttpResponseStatus.OK.code(), resp.getStatusCode()); + return parseRulesFromResponse(resp); + } + + List listAllCacheRules() { + String url = getUrl(null, false) + "?all=true"; + HttpResponse resp = httpRequest.doHttpGet(url); + assertEquals(HttpResponseStatus.OK.code(), resp.getStatusCode()); + return parseRuleWrappersFromResponse(resp); + } + + void reloadPersistentRules() { + String url = getUrl("reload", false); + HttpResponse resp = httpRequest.doHttpPut(url, null); + assertEquals(HttpResponseStatus.OK.code(), resp.getStatusCode()); + } + + private String getUrl(String append, boolean persist) { + String url = persist ? getSCFilterUrl() : proxyFilterUrl; + if (url == null) { + fail("Filter url should not be null"); + } + if (append != null) { + url += "/" + append; + } + return url; + } + + private String getSCFilterUrl() { + if (cloudRunning) { + if (scFilterUrl == null && scHost != null && scPort != null) { + scFilterUrl = "http://" + scHost + ":" + scPort + "/V0/filters"; + } + return scFilterUrl; + } + return null; + } + + private void removeAllRules() { + removeAllRules(false); + if (cloudRunning) { + removeAllRules(true); + } + reloadPersistentRules(); + } + + void removeAllRules(boolean persist) { + List rules = listRules(persist); + for (Rule rule : rules) { + assertTrue(deleteRule(rule.getName(), persist)); + } + assertTrue(listRules(persist).isEmpty()); + } + + private List parseRulesFromResponse(HttpResponse resp) { + String output = resp.getOutput().trim(); + if (output.isEmpty()) { + return null; + } + + Type type = new TypeToken>(){}.getType(); + return Rule.getGson().fromJson(output, type); + } + + private Rule parseRuleFromResponse(HttpResponse resp) { + String output = resp.getOutput().trim(); + if (output.isEmpty()) { + return null; + } + + return Rule.fromJson(output); + } + + private List parseRuleWrappersFromResponse(HttpResponse resp) { + String output = resp.getOutput().trim(); + if (output.isEmpty()) { + return null; + } + + Type type = new TypeToken>(){}.getType(); + return Rule.getGson().fromJson(output, type); + } + + void assertRulesEquals(Rule r1, Rule r2) { + assertTrue(r1.getName().equalsIgnoreCase(r2.getName())); + assertTrue(r1.getAction().equals(r1.getAction())); + assertTrue(Objects.equals(r1.getTenant(), r2.getTenant())); + assertTrue(Objects.equals(r1.getUser(), r2.getUser())); + assertTrue(Objects.equals(r1.getTable(), r2.getTable())); + assertTrue(r1.getOpTypes().equals(r2.getOpTypes())); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/RuleTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/RuleTest.java new file mode 100644 index 00000000..4672e093 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/filter/RuleTest.java @@ -0,0 +1,372 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.filter; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Test; + +import oracle.nosql.proxy.filter.FilterHandler.RuleWrapper; +import oracle.nosql.proxy.protocol.Protocol.OpCode; +import oracle.nosql.util.filter.Rule; + +/* + * Test Rule.match() and FilterService add/delete/get/list operations + */ +public class RuleTest extends FilterTestBase { + + private final String testUserId = + "ocid1.user.oc1..aaaaaaaaewwvvrm63ckh4e5an3q4use7tyieefx4qlpfxdkgezpujubfpp2a"; + private final String testTenantId = + "ocid1.tenancy.oc1..aaaaaaaattuxbj75pnn3nksvzyidshdbrfmmeflv4kkemajroz2thvca4kba"; + private final String testTableId = + "ocid1.nosqltable.oc1.phx.amaaaaaackmxu5iakh7m2e4uyfuyd66amaypeogtrcb5gu4iveqlcm567ppa"; + + /* Test FilterHandler.matchRule() */ + @Test + public void testMatchRule() { + Rule.OpType type; + boolean ret; + boolean exp; + Rule rule; + + rule = Rule.createRule("rule1", null, null, null, null, + new String[] {"all"}); + for (OpCode op : OpCode.values()) { + ret = FilterHandler.matchRule(rule, op, testTenantId, + testUserId, testTableId); + assertTrue(ret); + } + + rule = Rule.createRule("rule1", null, null, null, null, + new String[] {"ddl", "write"}); + for (OpCode op : OpCode.values()) { + type = FilterHandler.getOpType(op); + exp = (type == Rule.OpType.DDL) || (type == Rule.OpType.WRITE); + ret = FilterHandler.matchRule(rule, op, testTenantId, + testUserId, testTableId); + assertEquals(exp, ret); + } + + rule = Rule.createRule("rule1", null, testTenantId, null, null, + new String[]{"ddl"}); + assertTrue(FilterHandler.matchRule(rule, OpCode.CREATE_TABLE, + testTenantId, testUserId, + testTableId)); + assertFalse(FilterHandler.matchRule(rule, OpCode.CREATE_TABLE, + "invalidTenant", testUserId, + testTableId)); + + rule = Rule.createRule("rule1", null, null, testUserId, + null, new String[]{"ddl"}); + assertTrue(FilterHandler.matchRule(rule, OpCode.CREATE_TABLE, + testTenantId, testUserId, + testTableId)); + assertFalse(FilterHandler.matchRule(rule, OpCode.CREATE_TABLE, + testTenantId, "invalidUser", + testTableId)); + + rule = Rule.createRule("rule1", null, null, null, testTableId, + new String[]{"all"}); + assertTrue(FilterHandler.matchRule(rule, OpCode.CREATE_TABLE, + testTenantId, testUserId, + testTableId)); + assertFalse(FilterHandler.matchRule(rule, OpCode.CREATE_TABLE, + testTenantId, testUserId, + null)); + assertFalse(FilterHandler.matchRule(rule, OpCode.CREATE_TABLE, + testTenantId, testUserId, + "invalidTable")); + + rule = Rule.createRule("rule1", Rule.DROP_REQUEST, + testTenantId, testUserId, testTableId, + new String[] {"ddl"}); + assertTrue(FilterHandler.matchRule(rule, OpCode.ALTER_TABLE, + testTenantId, testUserId, + testTableId)); + assertFalse(FilterHandler.matchRule(rule, OpCode.QUERY, + testTenantId, testUserId, + testTableId)); + assertFalse(FilterHandler.matchRule(rule, OpCode.ALTER_TABLE, + "invalidTenant", testUserId, + testTableId)); + assertFalse(FilterHandler.matchRule(rule, OpCode.ALTER_TABLE, + testTenantId, "invalidUser", + testTableId)); + assertFalse(FilterHandler.matchRule(rule, OpCode.ALTER_TABLE, + testTenantId, testUserId, + "invalidTable")); + } + + /* Test add rule */ + @Test + public void testAddRule() { + assumeTrue(!onprem); + + /* Add filter all rule */ + Rule rule = Rule.createRule("rule_1", null, null, null, null, + new String[] {"all"}); + addRule(rule.toJson(), false); + assertRulesEquals(rule, getRule(rule.getName(), false)); + /* Add the rule again, should be rejected */ + addRule(rule.toJson(), 409 /* CONFLICT */, false); + + /* + * Adding a block all rule that includes all the operation types + * (read, write, ddl) which is equivalent to "ALL" should be rejected + * as well. + */ + rule = Rule.createRule("rule_2", null, null, null, null, + new String[] {"read", "write", "ddl", + "config_read", "config_update"}); + addRule(rule.toJson(), 409 /* CONFLICT */, false); + + /* + * Update the rule to block the specified user operates on + * the specified table. + */ + rule = Rule.createRule("rule_3", Rule.DROP_REQUEST, + null, testUserId, testTableId, + new String[] {"all"}); + addRule(rule.toJson(), false); + assertRulesEquals(rule, getRule(rule.getName(), false)); + + /* + * Add a duplicate rule which same user, table and equivalent + * operations, it should be rejected. + */ + rule = Rule.createRule("rule_4", Rule.DROP_REQUEST, + null, testUserId, testTableId, + new String[] {"read", "write", "ddl", + "config_read", "config_update"}); + addRule(rule.toJson(), 409 /* CONFLICT */, false); + + /* Invalid rules */ + String[] badPayloads = new String[] { + "", + "abc", + "{\"user\": \"" + testUserId + "\"}", + "{\"name\": \"rule1\"}", + "{\"name\": \"rule1\", \"user\":\" + testUserId + \"}", + }; + for (String bad : badPayloads) { + addRule(bad, 400, false); + } + } + + /* Test get and delete rule */ + @Test + public void testGetDeleteRule() { + final Rule[] rules = new Rule[] { + Rule.createRule("block_all", null, null, null, null, + new String[] {"all"}), + Rule.createRule("block_user_" + testUserId, Rule.DROP_REQUEST, + null, testUserId, null, new String[] {"ddl"}), + Rule.createRule("block_user_table", Rule.DROP_REQUEST, + null, testUserId, testTableId, + new String[] {"ddl", "write"}), + }; + + for (Rule rule : rules) { + addRule(rule.toJson(), false); + } + assertEquals(rules.length, listRules(false).size()); + + /* Delete the rule with case-insensitive name */ + boolean testUpperCase = false; + String name; + for (Rule rule : rules) { + name = rule.getName(); + if (testUpperCase) { + name = name.toUpperCase(); + } + testUpperCase = !testUpperCase; + + Rule ret = getRule(name, false); + assertRulesEquals(rule, ret); + + /* Delete the rule */ + assertTrue(deleteRule(name, false)); + + /* Get the rule, should get 404 NOT_FOUND */ + getRule(name, 404, false); + + /* Delete again, should get 404 NOT_FOUND */ + assertFalse(deleteRule(name, 404, false)); + } + assertTrue(listRules(false).isEmpty()); + + /* Invalid argument */ + deleteRule(null, 400, false); + deleteRule("", 400, false); + } + + /* Test list rules */ + @Test + public void testListRules() { + List rules = new ArrayList<>(); + + rules.add(Rule.createRule("block_all", null, null, null, null, + new String[] {"all"})); + rules.add(Rule.createRule("block_ddl_write", null, null, null, null, + new String[] {"ddl", "write"})); + rules.add(Rule.createRule("block_user", Rule.DROP_REQUEST, + null, testUserId, null, + new String[]{"read", "write"})); + rules.add(Rule.createRule("block_user_table", Rule.DROP_REQUEST, + null, testUserId, testTableId, + new String[] {"all"})); + for (Rule rule : rules) { + addRule(rule.toJson(), false); + } + + List results = listRules(false); + assertEquals(rules.size(), results.size()); + for (int i = 0; i < results.size(); i++) { + assertRulesEquals(rules.get(i), results.get(i)); + } + } + + /* Test persistent rules add/delete/get */ + @Test + public void testPersistentRule() { + assumeTrue(useMiniCloud); + + Rule[] rules = new Rule[] { + Rule.createRule("rule_1", Rule.DROP_REQUEST, + testTenantId, null, null, + new String[] {"all"}), + Rule.createRule("rule_2", Rule.DROP_REQUEST, + null, testUserId, null, + new String[] {"all"}), + Rule.createRule("rule_3", Rule.DROP_REQUEST, + null, null, testTableId, + new String[] {"all"}), + Rule.createRule("rule_4", Rule.DROP_REQUEST, + null, testUserId, testTableId, + new String[] {"read", "write", "ddl", + "config_read", "config_update"}) + }; + + for (Rule rule: rules) { + addRule(rule.toJson(), true /* persistent */); + assertRulesEquals(rule, getRule(rule.getName(), true)); + } + assertEquals(rules.length, listRules(true).size()); + + /* + * Add a duplicate rule with same name, it should be rejected. + */ + Rule rule = Rule.createRule("rule_2", Rule.DROP_REQUEST, + null, testUserId, testTableId, + new String[] {"write", "ddl"}); + addRule(rule.toJson(), 409 /* CONFLICT */, true /* persistent */); + + /* + * Add a duplicate rule with same attributes, it should be rejected. + */ + rule = Rule.createRule("rule_4_dup", Rule.DROP_REQUEST, + null, testUserId, testTableId, + new String[] {"all"}); + addRule(rule.toJson(), 409 /* CONFLICT */, true /* persistent */); + + /* Invalid rules */ + String[] badPayloads = new String[] { + "{\"user\": \"" + testUserId + "\"}", + "{\"name\": \"rule1\"}", + "{\"name\": \"rule1\", \"user\":\" + testUserId + \"}", + "{\"name\": \"rule1\", \"operations\":[\"ddl\",\"write\"]}", + }; + for (String bad : badPayloads) { + addRule(bad, 400, true); + } + + /* Delete the rule */ + rule = rules[0]; + String name = rule.getName().toUpperCase(); + assertRulesEquals(rule, getRule(name, true)); + assertTrue(deleteRule(name, true)); + + /* Get the rule, should get 404 NOT_FOUND */ + getRule(name, 404, true); + + /* Delete again, should get 404 NOT_FOUND */ + assertFalse(deleteRule(name, 404, true)); + } + + /* Test mixing transient and persistent rules in cache */ + @Test + public void testMixedTransientPersistentRules() { + assumeTrue(useMiniCloud); + + final String transientName = "t_block_usera"; + final String persistentName = "p_block_usera"; + + List rules; + RuleWrapper rw; + + /* Add a transient rule */ + Rule trule = Rule.createRule(transientName, + Rule.DROP_REQUEST, + null, + testUserId, + testTableId, + new String[] {"all"}); + addRule(trule.toJson(), false /* persist */); + rules = listAllCacheRules(); + assertEquals(1, rules.size()); + rw = rules.get(0); + assertTrue(transientName.equalsIgnoreCase(rw.getRuleName())); + assertTrue(rw.isTransient()); + + /* Add a persistent rule with same attributes as above transient rule */ + Rule prule = Rule.createRule(persistentName, + Rule.DROP_REQUEST, + null, + testUserId, + testTableId, + new String[] {"all"}); + addRule(prule.toJson(), true /* persist */); + reloadPersistentRules(); + rules = listAllCacheRules(); + assertEquals(2, rules.size()); + + /* Delete the transient rule */ + deleteRule(transientName, false /* persist */); + rules = listAllCacheRules(); + assertEquals(1, rules.size()); + rw = rules.get(0); + assertTrue(persistentName.equalsIgnoreCase(rw.getRuleName())); + assertFalse(rw.isTransient()); + + /* Add the transient rule back */ + addRule(trule.toJson(), false /* persist */); + rules = listAllCacheRules(); + assertEquals(2, rules.size()); + + /* Delete the persistent rule */ + deleteRule(persistentName, true /* persist */); + reloadPersistentRules(); + rules = listAllCacheRules(); + assertEquals(1, rules.size()); + rw = rules.get(0); + assertTrue(transientName.equalsIgnoreCase(rw.getRuleName())); + assertTrue(rw.isTransient()); + + /* Delete transient rule */ + deleteRule(transientName, false /* persist */); + rules = listAllCacheRules(); + assertEquals(0, rules.size()); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVNonSecureProxyTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVNonSecureProxyTest.java new file mode 100644 index 00000000..c02136cf --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVNonSecureProxyTest.java @@ -0,0 +1,375 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.kv; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.io.PrintWriter; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.List; +import java.util.logging.Logger; + +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import oracle.kv.KVStoreConfig; +import oracle.kv.KVStoreFactory; +import oracle.kv.impl.test.TestStatus; +import oracle.kv.impl.xregion.service.JsonConfig; +import oracle.kv.impl.xregion.service.XRegionService; +import oracle.kv.util.kvlite.KVLite; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.kv.StoreAccessTokenProvider; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.ArrayValue; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.JsonOptions; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.Config; +import oracle.nosql.proxy.Proxy; +import oracle.nosql.proxy.ProxyMain; +import oracle.nosql.proxy.ProxyTestBase; + + +/** + * The tests in KVProxyTest will be run using this class's + * setUp + */ +public class KVNonSecureProxyTest extends KVProxyTest { + + @BeforeClass + public static void staticSetUp() throws Exception { + assumeTrue(!Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + cleanupTestDir(); + kvlite = startKVLite(hostName, + null, // default store name + false, // useThreads = false + false, // verbose = false + false, // isMultiShard = false + 0, // memoryMB = 0 + false); // isSecure = true + + proxy = startKVProxy(getStoreName(), + (hostName + ":" + getKVPort()), + ProxyTestBase.getProxyPort(), + false); + + waitForStoreInit(45); + + setAdmin(null); + + KVStoreConfig config = new KVStoreConfig(getStoreName(), + hostName+":"+ + getKVPort()); + store = KVStoreFactory.getStore(config); + + } + + @AfterClass + public static void staticTearDown() throws Exception { + KVProxyTest.staticTearDown(); + } + + @Override + @Before + public void setUp() throws Exception { + endpoint = getProxyEndpoint(); + authProvider = new StoreAccessTokenProvider(); + getHandle(); + } + + /* + * Override this to avoid (expected) failures related to secure + * operations on a not-secure store. + */ + @Test + @Override + public void testSecureSysOp() { + /* this fails in a not secure configuration */ + try { + doSysOp(kvhandle, + "create user newuser identified by 'ChrisToph \"_12&%'"); + fail("Should have failed with IAE"); + } catch (IllegalArgumentException iae) { + // success + } + } + + @Test + @Override + public void testSystemExceptions() { + /* don't run */ + } + + @Test + @Override + public void testTokenTimeout() { + /* secure only, don't run */ + } + + @Test + @Override + public void testNonSecureAccess() { + /* don't run */ + } + + @Override + @Test + public void testInvalidToken() + throws Exception { + /* secure only, don't run */ + } + + /* + * More complex MR testing - create 2 kvlite instances, each in their + * own region, start agents and try stuff... This is here to avoid the need + * for a secure version. The process to create 2 kvlite instances and agents + * is: + * 0. one kvlite instance has already been started by the test base + * 1. create a second kvlite instance using a different port, store + * name, etc. This test relies on those being available + * 2. start a second proxy against the second kvlite instance + * 3. set the local region for each kvlite instance + * 4. create MR table config files, one for each region. The format + * is based on the public documentation for MR tables. + * 5. create and start an XRegionService instance for each region, using + * the generated config files. These are the MR agents. + * 6. create remote regions for each region, pointing to each other. This + * is possible now because the agents are running + * At this point MR tests can be run. Cleanup needs to shut down all + * services that require it -- agents, proxy, kvlite + */ + @Test + public void testMultiRegion() throws Exception { + Proxy proxy2 = null; + KVLite kvlite2 = null; + NoSQLHandle kvhandle2 = null; + final String storeName2 = "kvstore2"; + final String portRange2 = "13271,13280"; + final int port2 = 13270; + final String root2 = getTestDir() + "/" + storeName2; + final int proxyPort2 = 8097; + final String endpoint2 = "http://localhost:" + proxyPort2; + final boolean verbose2 = false; + + /* region info */ + final String region1 = "region1"; + final String region2 = "region2"; + XRegionService service1 = null; + XRegionService service2 = null; + + TestStatus.setActive(true); + + try { + + kvlite2 = startKVLite( + "localhost", + storeName2, + false, // useThreads + false, // verbose + false, // multishard, + 0, //memoryMB + false, // secure + port2, // port + portRange2, // port range + root2); + + /* create a proxy for the new kvlite instance */ + proxy2 = startKVProxy(storeName2, + ("localhost:" + port2), + proxyPort2, + verbose2); + + proxy2.getTenantManager().waitForStoreInit(45); + + /* get a handle for the new store/proxy combo */ + NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint2); + config.setAuthorizationProvider(authProvider); + kvhandle2 = NoSQLHandleFactory.createNoSQLHandle(config); + + /* + * have 2 kvlite, 2 proxies, 2 handles + * configure xregion... + */ + doSysOp(kvhandle, "set local region " + region1); + doSysOp(kvhandle2, "set local region " + region2); + + /* region1 config file */ + MapValue cfg1 = new MapValue(); + cfg1.put("path", getTestDir()); + cfg1.put("agentGroupSize", 1); + cfg1.put("agentId", 0); + cfg1.put("region", region1); + cfg1.put("store", getStoreName()); + cfg1.put("helpers", new ArrayValue() + .add("localhost:" + getKVPort())); + cfg1.put("regions", new ArrayValue().add( + new MapValue().put("name", region2) + .put("store", storeName2) + .put("helpers", new ArrayValue() + .add("localhost:"+port2)))); + + /* region2 config file */ + MapValue cfg2 = new MapValue(); + cfg2.put("path", root2); + cfg2.put("agentGroupSize", 1); + cfg2.put("agentId", 0); + cfg2.put("region", region2); + cfg2.put("store", storeName2); + cfg2.put("helpers", new ArrayValue().add("localhost:" + port2)); + cfg2.put("regions", new ArrayValue().add( + new MapValue().put("name", region1) + .put("store", getStoreName()) + .put("helpers", new ArrayValue() + .add("localhost:"+getKVPort())))); + + String cfgFile1 = getTestDir() + "/config.json"; + String cfgFile2 = root2 + "/config.json"; + writeToFile(cfg1.toJson(JsonOptions.PRETTY), cfgFile1); + writeToFile(cfg2.toJson(JsonOptions.PRETTY), cfgFile2); + + Logger logger = Logger.getLogger(getClass().getName()); + + /* create and start xregion agents */ + service1 = + new XRegionService( + JsonConfig.readJsonFile(cfgFile1, logger), logger); + service2 = + new XRegionService( + JsonConfig.readJsonFile(cfgFile2, logger), logger); + service1.start(); + service2.start(); + + /* create remote regions */ + doSysOp(kvhandle, "create region " + region2); + doSysOp(kvhandle2, "create region " + region1); + + /* Now... create mr tables... */ + String createMRTable = + "create table mrtable(id integer, primary key(id)) in " + + "regions region1, region2"; + String createCounterTable = + "create table mr_counter_table(id integer, counter integer " + + " as mr_counter, primary key(id)) in regions region1, region2"; + String alterTable = + "alter table mrtable(add counter integer as mr_counter)"; + + TableResult tres = tableOperation(kvhandle, + createCounterTable, + null, + 10000); + assertNotNull(tres.getSchema()); + + tres = tableOperation(kvhandle, + createMRTable, + null, + 10000); + assertNotNull(tres.getSchema()); + + /* try adding a CRDT to test the evolution path */ + tres = tableOperation(kvhandle, + alterTable, + null, + 10000); + assertNotNull(tres.getSchema()); + + /* do an insert into the CRDT in original table */ + String insert = + "insert into mr_counter_table values (1, default)"; + QueryRequest req = new QueryRequest().setStatement(insert); + QueryResult ret = kvhandle.query(req); + assertEquals(1, ret.getResults().get(0).asMap() + .get("NumRowsInserted").getInt()); + + /* do an insert using a prepared query */ + String insertP = "declare $pkey integer; " + + "insert into mr_counter_table values ($pkey, default)"; + PrepareRequest prepReq = new PrepareRequest() + .setStatement(insertP); + PrepareResult prepRet = kvhandle.prepare(prepReq); + assertNotNull(prepRet.getPreparedStatement()); + prepRet.getPreparedStatement() + .setVariable("$pkey", new IntegerValue(0)); + + req = new QueryRequest().setPreparedStatement(prepRet); + ret = kvhandle.query(req); + List results = ret.getResults(); + assertEquals(results.size(), 1); + int num = results.get(0).get("NumRowsInserted").getInt(); + assertEquals(num, 1); + } finally { + if (kvhandle2 != null) { + kvhandle2.close(); + } + if (proxy2 != null) { + try { + proxy2.shutdown(3, TimeUnit.SECONDS); + } catch (Exception e) {} + } + if (service1 != null) { + service1.shutdown(); + } + if (service2 != null) { + service2.shutdown(); + } + if (kvlite2 != null) { + kvlite2.stop(false); + } + } + } + + /* + * Write the string content to the file specified by path + */ + static void writeToFile(String content, String path) { + try (PrintWriter out = new PrintWriter(path)) { + out.println(content); + } catch (Exception e) { + fail("Exception writing to file: " + e); + } + } + + static Proxy startKVProxy(String storeName, + String helperHosts, + int proxyPort, + boolean verbose) { + + /* create a proxy */ + Properties commandLine = new Properties(); + commandLine.setProperty(Config.PROXY_TYPE.paramName, + Config.ProxyType.KVPROXY.name()); + + commandLine.setProperty(Config.STORE_NAME.paramName, + storeName); + + commandLine.setProperty(Config.HELPER_HOSTS.paramName, + helperHosts); + + commandLine.setProperty(Config.HTTP_PORT.paramName, + Integer.toString(proxyPort)); + + commandLine.setProperty(Config.VERBOSE.paramName, + Boolean.toString(verbose)); + + return ProxyMain.startProxy(commandLine); + } + +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVProxyTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVProxyTest.java new file mode 100644 index 00000000..401b581f --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVProxyTest.java @@ -0,0 +1,1959 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.kv; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PrintStream; +import java.io.Writer; +import java.lang.reflect.Method; +import java.math.BigDecimal; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import io.netty.handler.ssl.util.SelfSignedCertificate; +import io.netty.util.internal.logging.InternalLoggerFactory; +import io.netty.util.internal.logging.JdkLoggerFactory; +import oracle.kv.KVSecurityConstants; +import oracle.kv.KVStore; +import oracle.kv.KVStoreConfig; +import oracle.kv.KVStoreFactory; +import oracle.kv.KVVersion; +import oracle.kv.LoginCredentials; +import oracle.kv.impl.admin.CommandServiceAPI; +import oracle.kv.impl.api.table.FieldValueImpl; +import oracle.kv.impl.api.table.RecordValueImpl; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TableAPIImpl; +import oracle.kv.impl.api.table.TableImpl; +import oracle.kv.impl.param.ParameterMap; +import oracle.kv.impl.param.ParameterState; +import oracle.kv.impl.security.RoleInstance; +import oracle.kv.impl.security.login.LoginManager; +import oracle.kv.impl.security.util.KVStoreLogin; +import oracle.kv.impl.util.registry.RegistryUtils; +import oracle.kv.table.FieldDef.Type; +import oracle.kv.table.FieldValueFactory; +import oracle.nosql.driver.BatchOperationNumberLimitException; +import oracle.nosql.driver.IndexExistsException; +import oracle.nosql.driver.IndexNotFoundException; +import oracle.nosql.driver.InvalidAuthorizationException; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.OperationNotSupportedException; +import oracle.nosql.driver.RequestTimeoutException; +import oracle.nosql.driver.ResourceExistsException; +import oracle.nosql.driver.ResourceNotFoundException; +import oracle.nosql.driver.SystemException; +import oracle.nosql.driver.TableExistsException; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.UserInfo; +import oracle.nosql.driver.http.NoSQLHandleImpl; +import oracle.nosql.driver.kv.StoreAccessTokenProvider; +import oracle.nosql.driver.ops.DeleteRequest; +import oracle.nosql.driver.ops.DeleteResult; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.ListTablesRequest; +import oracle.nosql.driver.ops.ListTablesResult; +import oracle.nosql.driver.ops.MultiDeleteRequest; +import oracle.nosql.driver.ops.MultiDeleteResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.ops.Request; +import oracle.nosql.driver.ops.Result; +import oracle.nosql.driver.ops.SystemRequest; +import oracle.nosql.driver.ops.SystemResult; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.ops.TableUsageRequest; +import oracle.nosql.driver.ops.WriteMultipleRequest; +import oracle.nosql.driver.ops.WriteMultipleResult; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.Config; +import oracle.nosql.proxy.ProxyMain; +import oracle.nosql.proxy.ProxyTestBase; + +public class KVProxyTest extends ProxyTestBase { + + final static int BATCH_OP_NUMBER_LIMIT = rlimits.getBatchOpNumberLimit(); + protected String endpoint; + protected StoreAccessTokenProvider authProvider; + protected NoSQLHandle kvhandle; + protected static CommandServiceAPI admin; + protected static KVStore store; + + private static void writeFile(String string, File file) + throws IOException { + + final Writer out = new FileWriter(file); + try { + out.write(string); + out.flush(); + } finally { + out.close(); + } + } + + @BeforeClass + public static void staticSetUp() throws Exception { + assumeTrue(!Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + + verbose = Boolean.getBoolean(VERBOSE_PROP); + + cleanupTestDir(); + /* + * Filter out the std output of kvlite. It prints to stdout + * when generating security information. + */ + PrintStream printStreamOriginal = System.out; + System.setOut(new PrintStream(new OutputStream() { + @Override + public void write(int b) throws IOException {} + })); + kvlite = startKVLite(hostName, + null, // default store name + false, // useThreads = false + false, // verbose = false + false, // isMultiShard = false + 0, // memoryMB = 0 + true); // isSecure = true + System.setOut(printStreamOriginal); + + final String securityDir = getTestDir() + "/security"; + final String loginFile = securityDir + "/user.security"; + + /* create admin user */ + final KVStoreLogin storeLogin = + new KVStoreLogin( + "admin", loginFile); + storeLogin.loadSecurityProperties(); + storeLogin.prepareRegistryCSF(); + final LoginCredentials creds = + storeLogin.makeShellLoginCredentials(); + final LoginManager loginMgr = + KVStoreLogin.getAdminLoginMgr(hostName, getKVPort(), creds); + setAdmin(loginMgr); + KVStoreConfig config = new KVStoreConfig(getStoreName(), + hostName+":"+ + getKVPort()); + config.setSecurityProperties(storeLogin.getSecurityProperties()); + store = KVStoreFactory.getStore(config, creds, null); + + /* create proxy user with base privileges */ + int planId = admin.createCreateUserPlan( + "Create User", "proxy", true, false, + "NoSql00__123456".toCharArray()); + execPlan(planId); + + final File proxyLoginFile = new File(securityDir, "proxy.security"); + final File passwordFile = new File(securityDir, "proxy.passwd"); + writeFile("Password Store:\n" + + "secret.proxy=NoSql00__123456\n", + passwordFile); + writeFile("oracle.kv.auth.pwdfile.file=" + passwordFile + + "\noracle.kv.auth.username=proxy" + + "\noracle.kv.transport=ssl" + + "\noracle.kv.ssl.trustStore=" + securityDir + + "/client.trust", + proxyLoginFile); + + /* create test user with extra privileges */ + planId = admin.createCreateUserPlan( + "Create User", "test", true, false, + "NoSql00__123456".toCharArray()); + execPlan(planId); + + final Set roles = new HashSet(); + roles.add(RoleInstance.READWRITE_NAME); + roles.add(RoleInstance.DBADMIN_NAME); + roles.add(RoleInstance.SYSADMIN_NAME); + planId = admin.createGrantPlan( + "Grant User", "test", + roles); + execPlan(planId); + + InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE); + SelfSignedCertificate ssc = new SelfSignedCertificate(getHostName()); + prepareTruststore(ssc.certificate()); + + Properties commandLine = new Properties(); + commandLine.setProperty(Config.PROXY_TYPE.paramName, + Config.ProxyType.KVPROXY.name()); + commandLine.setProperty(Config.HTTPS_PORT.paramName, + Integer.toString( + ProxyTestBase.getProxyHttpsPort())); + commandLine.setProperty(Config.STORE_NAME.paramName, + getStoreName()); + commandLine.setProperty(Config.HELPER_HOSTS.paramName, + hostName + ":" + getKVPort()); + + commandLine.setProperty(Config.STORE_SECURITY_FILE.paramName, + proxyLoginFile.getAbsolutePath()); + + commandLine.setProperty(Config.SSL_CERTIFICATE.paramName, + ssc.certificate().getAbsolutePath()); + commandLine.setProperty(Config.SSL_PRIVATE_KEY.paramName, + ssc.privateKey().getAbsolutePath()); + + /* async now defaults to true */ + boolean async = true; + if (System.getProperty(PROXY_ASYNC_PROP) != null) { + async = Boolean.getBoolean(PROXY_ASYNC_PROP); + } + commandLine.setProperty(Config.ASYNC.paramName, Boolean.toString( + async)); + commandLine.setProperty(Config.VERBOSE.paramName, Boolean.toString( + verbose)); + + proxy = ProxyMain.startProxy(commandLine); + waitForStoreInit(20); + } + + protected static void setAdmin(LoginManager mgr) throws Exception { + admin = RegistryUtils.getAdmin(hostName, getKVPort(), mgr); + } + + @AfterClass + public static void staticTearDown() throws Exception { + if (tm != null) { + tm.close(); + tm = null; + } + if (proxy != null) { + proxy.shutdown(3, TimeUnit.SECONDS); + proxy = null; + } + if (kvlite != null) { + kvlite.stop(true); + kvlite = null; + } + System.clearProperty(KVSecurityConstants.SECURITY_FILE_PROPERTY); + System.clearProperty("javax.net.ssl.trustStore"); + } + + @Override + @Before + public void setUp() throws Exception { + endpoint = getProxyHttpsEndpoint(); + authProvider = new StoreAccessTokenProvider( + "test", "NoSql00__123456".toCharArray()); + getHandle(); + } + + @Override + @After + public void tearDown() throws Exception { + if (kvhandle != null) { + dropAllMetadata(kvhandle); + kvhandle.close(); + } + + if (authProvider != null) { + authProvider.close(); + authProvider = null; + } + } + + /** + * A method to drop tables, namespaces, and users + */ + static void dropAllMetadata(NoSQLHandle nosqlHandle) { + final Set exceptUsers = new HashSet(); + exceptUsers.add("test"); + exceptUsers.add("proxy"); + + if (nosqlHandle == null) { + return; + } + /* dropAllTables doesn't catch namespaced tables -- TODO */ + ProxyTestBase.dropAllTables(nosqlHandle, true); + dropAllNamespaces(nosqlHandle); // this uses cascade to drop tables + dropAllUsers(nosqlHandle, exceptUsers); + } + + static void dropAllNamespaces(NoSQLHandle nosqlHandle) { + String[] namespaces = nosqlHandle.listNamespaces(); + if (namespaces == null) { + return; + } + + for (String ns : namespaces) { + if (ns.equals("sysdefault")) { + continue; + } + /* use cascade to remove tables in namespaces */ + String statement ="drop namespace " + ns + " cascade"; + doSysOp(nosqlHandle, statement); + } + } + + static void dropAllUsers(NoSQLHandle nosqlHandle, + Set exceptUsers) { + + UserInfo[] uInfo = nosqlHandle.listUsers(); + if (uInfo == null) { + return; + } + for (UserInfo u : uInfo) { + if (u.getName().equals("admin") || + (exceptUsers != null && exceptUsers.contains(u.getName()))) { + continue; + } + String statement ="drop user " + u.getName(); + doSysOp(nosqlHandle, statement); + } + } + + protected void getHandle() { + NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint); + + config.setAuthorizationProvider(authProvider); + + /* + * Open the handle + */ + kvhandle = NoSQLHandleFactory.createNoSQLHandle(config); + dropAllMetadata(kvhandle); + } + + @Test + public void testBasic() + throws Exception { + + final String tableName = "test"; + final String createTableStatement = + "CREATE TABLE IF NOT EXISTS " + tableName + + "(id INTEGER, " + + " pin INTEGER, " + + " name STRING, " + + " PRIMARY KEY(SHARD(pin), id))"; + + TableRequest tableRequest = new TableRequest() + .setStatement(createTableStatement); + TableResult tres = kvhandle.tableRequest(tableRequest); + tres.waitForCompletion(kvhandle, 60000, 1000); + assertEquals(tres.getTableState(), TableResult.State.ACTIVE); + /* limits can be null but if not, should be set to 0 */ + assertTrue(tres.getTableLimits() == null || + tres.getTableLimits().getReadUnits() == 0); + + /* + * PUT a row + */ + MapValue value = new MapValue().put("id", 1). + put("pin", "654321").put("name", "test1"); + + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult putResult = kvhandle.put(putRequest); + assertNotNull(putResult.getVersion()); + + /* + * GET the row + */ + MapValue key = new MapValue().put("id", 1).put("pin", "654321"); + + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + GetResult getRes = kvhandle.get(getRequest); + assertEquals("test1", + getRes.getValue().get("name").asString().getValue()); + + /* + * PUT a second row using JSON + */ + String jsonString = + "{\"id\": 2, \"pin\": 123456, \"name\":\"test2\"}"; + + putRequest = new PutRequest() + .setValueFromJson(jsonString, null) + .setTableName(tableName); + putResult = kvhandle.put(putRequest); + assertNotNull(putResult.getVersion()); + + /* + * GET the second row + */ + key = new MapValue().put("id", 2).put("pin", "123456"); + getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + assertEquals( + "test2", + kvhandle.get(getRequest).getValue().get("name"). + asString().getValue()); + + try { + QueryRequest queryRequest = new QueryRequest(). + setStatement("SELECT * from " + tableName + + " WHERE name= \"test2\""); + QueryResult qres = kvhandle.query(queryRequest); + List results = qres.getResults(); + assertEquals(results.size(), 1); + assertEquals(2, results.get(0).get("id").asInteger().getValue()); + } catch (RequestTimeoutException rte) { + if (KVVersion.CURRENT_VERSION.getMajor() >= 20 || + !(rte.getCause() instanceof SystemException)) { + throw rte; + } + /* ignore this exception for 19 for now; known bug */ + } + + /* + * Put in the third row, the pin/name field is the same as the + * second row + */ + jsonString = "{\"id\": 3, \"pin\": 123456, \"name\":\"test2\"}"; + + putRequest = new PutRequest() + .setValueFromJson(jsonString, null) // no options + .setTableName(tableName); + putResult = kvhandle.put(putRequest); + assertNotNull(putResult.getVersion()); + + /* + * Create index, test query by indexed field + */ + final String createIndexStatement = + "CREATE INDEX IF NOT EXISTS idx1 ON " + tableName + + " (name)"; + + tableRequest = + new TableRequest().setStatement(createIndexStatement); + kvhandle.tableRequest(tableRequest); + tres.waitForCompletion(kvhandle, 60000, 1000); + assertEquals(tres.getTableState(), TableResult.State.ACTIVE); + + QueryRequest queryRequest = new QueryRequest(). + setStatement("SELECT * from " + tableName + + " WHERE name= \"test2\""); + QueryResult qres = kvhandle.query(queryRequest); + List results = qres.getResults(); + assertEquals(results.size(), 2); + assertEquals("test2", + results.get(0).get("name").asString().getValue()); + + /* + * DELETE the first row + */ + key = new MapValue().put("id", 1).put("pin", "654321"); + DeleteRequest delRequest = new DeleteRequest() + .setKey(key) + .setTableName(tableName); + DeleteResult delResult = kvhandle.delete(delRequest); + assertTrue(delResult.getSuccess()); + + /* + * MultiDelete where name is test2 + */ + key = new MapValue().put("pin", "123456"); + MultiDeleteRequest multiDelRequest = new MultiDeleteRequest() + .setKey(key) + .setTableName(tableName); + + MultiDeleteResult mRes = kvhandle.multiDelete(multiDelRequest); + assertEquals(mRes.getNumDeletions(), 2); + + /* + * There should be no record in the table now + */ + queryRequest = new QueryRequest(). + setStatement("SELECT * from " + tableName); + qres = kvhandle.query(queryRequest); + results = qres.getResults(); + assertEquals(results.size(), 0); + } + + /* + * Test data limits: + * key size + * index key size + * row size + */ + @Test + public void testDataLimits() + throws Exception { + + final String tableName = "limits"; + final String createTableStatement = + "CREATE TABLE IF NOT EXISTS " + tableName + + "(id STRING, " + + " idx STRING, " + + " name STRING, " + + " PRIMARY KEY(id))"; + final String addIndex = "create index idx on limits(idx)"; + + tableOperation(kvhandle, createTableStatement, null, + TableResult.State.ACTIVE, 20000); + tableOperation(kvhandle, addIndex, null, TableResult.State.ACTIVE, + 20000); + + /* + * PUT a row that exceeds cloud key and value limits. + */ + MapValue value = new MapValue().put("id", makeString(500)) + .put("idx", makeString(300)).put("name", makeString(600000)); + + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult putResult = kvhandle.put(putRequest); + assertNotNull(putResult.getVersion()); + } + + /* + * Test table limits: + * # of indexes (cloud defaults to 5) + */ + @Test + public void testTableLimits() + throws Exception { + + final String createTableStatement = + "create table if not exists limits(" + + "id integer, i0 integer, i1 integer, i2 integer, i3 integer, " + + "i4 integer, i5 integer, i6 integer, primary key(id))"; + + tableOperation(kvhandle, createTableStatement, null, + TableResult.State.ACTIVE, 20000); + + for (int i = 0; i < 7; i++) { + String addIndex = "create index idx" + i + " on limits(i" + + i + ")"; + + tableOperation(kvhandle, addIndex, null, + TableResult.State.ACTIVE, 20000); + } + } + + @Test + public void testInvalidToken() + throws Exception { + + final String tableName = "test"; + final String createTableStatement = + "CREATE TABLE IF NOT EXISTS " + tableName + + "(id INTEGER, " + + " pin INTEGER, " + + " name STRING, " + + " PRIMARY KEY(SHARD(pin), id))"; + TableRequest tableRequest = new TableRequest() + .setStatement(createTableStatement); + TableResult tres = kvhandle.tableRequest(tableRequest); + tres.waitForCompletion(kvhandle, 60000, 1000); + + StoreAccessTokenProvider authProvider = + new MockProvider("test", "NoSql00__123456".toCharArray()); + NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint); + config.setAuthorizationProvider(authProvider); + NoSQLHandle testHandle = NoSQLHandleFactory.createNoSQLHandle(config); + MapValue value = + new MapValue().put("id", 1).put("pin", "654321"). + put("name", "test1"); + try { + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("test"); + testHandle.put(putRequest); + } catch (InvalidAuthorizationException e) { + /* Expect sec IAE here */ + return; + } finally { + testHandle.close(); + } + fail("Should not reach here"); + } + + private class MockProvider extends StoreAccessTokenProvider { + public MockProvider(String userName, char[] password) { + super(userName, password); + } + + @Override + public String getAuthorizationString(Request request) { + return "Bearer InvalidToken@#!"; + } + } + + @Test + public void testChildTables() + throws Exception { + + String tableName = "parent"; + String createTableStatement = + "CREATE TABLE IF NOT EXISTS " + tableName + + "(id INTEGER, " + + " pin INTEGER, " + + " name STRING, " + + " PRIMARY KEY(SHARD(pin), id))"; + + TableRequest tableRequest = new TableRequest() + .setStatement(createTableStatement); + TableResult tres = kvhandle.tableRequest(tableRequest); + tres.waitForCompletion(kvhandle, 60000, 1000); + assertEquals(tres.getTableState(), TableResult.State.ACTIVE); + + tableName = "parent.child"; + createTableStatement = + "CREATE TABLE IF NOT EXISTS " + tableName + + "(childId INTEGER, " + + " childName STRING, " + + " PRIMARY KEY(childId))"; + + tableRequest = new TableRequest() + .setStatement(createTableStatement); + tres = kvhandle.tableRequest(tableRequest); + tres.waitForCompletion(kvhandle, 60000, 1000); + assertEquals(tres.getTableState(), TableResult.State.ACTIVE); + + ListTablesRequest listTables = new ListTablesRequest(); + kvhandle.listTables(listTables); + + /* + * PUT a row + */ + MapValue value = new MapValue().put("id", 1). + put("pin", "654321").put("name", "test1"). + put("childId", 1).put("childName", "cName"); + + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult putResult = kvhandle.put(putRequest); + assertNotNull(putResult.getVersion()); + + /* + * GET the row + */ + MapValue key = new MapValue().put("id", 1).put("pin", "654321"). + put("childId", 1); + + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + GetResult getRes = kvhandle.get(getRequest); + assertEquals("cName", + getRes.getValue().get("childName"). + asString().getValue()); + + /* + * PUT a second row using JSON + */ + String jsonString = + "{\"id\": 2, \"pin\": 123456, \"name\":\"test2\"," + + "\"childId\": 2, \"childName\":\"cName2\"}"; + + putRequest = new PutRequest() + .setValueFromJson(jsonString, null) + .setTableName(tableName); + putResult = kvhandle.put(putRequest); + assertNotNull(putResult.getVersion()); + + /* + * GET the second row + */ + key = new MapValue().put("id", 2).put("pin", "123456"). + put("childId", 2); + getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + assertEquals( + "cName2", + kvhandle.get(getRequest).getValue().get("childName"). + asString().getValue()); + + /* + * QUERY the table. The table name is inferred from the + * query statement. + */ + try { + QueryRequest queryRequest = new QueryRequest(). + setStatement("SELECT * from " + tableName + + " WHERE childName= \"cName2\""); + QueryResult qres = kvhandle.query(queryRequest); + List results = qres.getResults(); + assertEquals(results.size(), 1); + assertEquals(2, results.get(0).get("id").asInteger().getValue()); + } catch (RequestTimeoutException rte) { + if (KVVersion.CURRENT_VERSION.getMajor() >= 20 || + !(rte.getCause() instanceof SystemException)) { + throw rte; + } + /* ignore this exception for 19 for now; known bug */ + } + + /* + * Put in the third row, the pin/name/childName field is the same + * as the second row + */ + jsonString = "{\"id\": 3, \"pin\": 123456, \"name\":\"test2\"," + + "\"childId\": 3, \"childName\":\"cName2\"}"; + + putRequest = new PutRequest() + .setValueFromJson(jsonString, null) // no options + .setTableName(tableName); + putResult = kvhandle.put(putRequest); + assertNotNull(putResult.getVersion()); + + /* + * Create index, test query by indexed field + */ + final String createIndexStatement = + "CREATE INDEX IF NOT EXISTS idx1 ON " + tableName + + " (childName)"; + + tableRequest = + new TableRequest().setStatement(createIndexStatement); + tres = kvhandle.tableRequest(tableRequest); + tres.waitForCompletion(kvhandle, 60000, 1000); + assertEquals(tres.getTableState(), TableResult.State.ACTIVE); + + QueryRequest queryRequest = new QueryRequest(). + setStatement("SELECT * from " + tableName + + " WHERE childName= \"cName2\""); + QueryResult qres = kvhandle.query(queryRequest); + List results = qres.getResults(); + assertEquals(results.size(), 2); + assertEquals("cName2", + results.get(0).get("childName").asString().getValue()); + + /* + * DELETE the first row + */ + key = new MapValue().put("id", 1).put("pin", "654321"). + put("childId", 1); + DeleteRequest delRequest = new DeleteRequest() + .setKey(key) + .setTableName(tableName); + DeleteResult delResult = kvhandle.delete(delRequest); + assertTrue(delResult.getSuccess()); + + /* + * MultiDelete where name is test2 + */ + key = new MapValue().put("pin", "123456"); + MultiDeleteRequest multiDelRequest = new MultiDeleteRequest() + .setKey(key) + .setTableName(tableName); + + MultiDeleteResult mRes = kvhandle.multiDelete(multiDelRequest); + assertEquals(mRes.getNumDeletions(), 2); + + /* + * There should be no record in the table now + */ + queryRequest = new QueryRequest(). + setStatement("SELECT * from " + tableName); + qres = kvhandle.query(queryRequest); + results = qres.getResults(); + assertEquals(results.size(), 0); + + /* + * DROP the child table + */ + tableRequest = new TableRequest() + .setStatement("DROP TABLE IF EXISTS " + tableName); + tres = kvhandle.tableRequest(tableRequest); + tres.waitForCompletion(kvhandle, 20000, 1000); + assertEquals(tres.getTableState(), TableResult.State.DROPPED); + /* + * DROP the parent table + */ + tableName = "parent"; + tableRequest = new TableRequest() + .setStatement("DROP TABLE IF EXISTS " + tableName); + tres = kvhandle.tableRequest(tableRequest); + tres.waitForCompletion(kvhandle, 20000, 1000); + assertEquals(tres.getTableState(), TableResult.State.DROPPED); + } + + @Test + public void testSecureSysOp() + throws Exception { + + SystemResult dres = doSysOp(kvhandle, "create namespace myns"); + + /* create a user -- use a password with white space and quotes */ + dres = doSysOp(kvhandle, + "create user newuser " + + "identified by 'ChrisToph \"_12&%'"); + + dres = doSysOp(kvhandle, "show namespaces"); + assertNotNull(dres.getResultString()); + assertNull(dres.getOperationId()); + + dres = doSysOp(kvhandle, "show users"); + assertNotNull(dres.getResultString()); + assertNull(dres.getOperationId()); + + dres = doSysOp(kvhandle, "show as json roles"); + assertNotNull(dres.getResultString()); + assertNull(dres.getOperationId()); + + dres = doSysOp(kvhandle, "show as json user admin"); + assertNotNull(dres.getResultString()); + assertNull(dres.getOperationId()); + + String[] roles = kvhandle.listRoles(); + /* + * The number of default roles may vary with the kv release. + * Don't assume a specific number. This range is safe for now. + */ + assertTrue(roles.length > 2 && roles.length < 10); + } + + @Test + public void testSystemExceptions() + throws Exception { + + if (cloudRunning) { + return; + } + final String createTable = "Create table " + + "foo(id integer, name string, primary key(id))"; + final String createIndex = "create index idx on foo(name)"; + + doSysOp(kvhandle, "create namespace myns"); + + doSysOp(kvhandle, createTable); + doSysOp(kvhandle, createIndex); + + doSysOp(kvhandle, + "create user newuser identified by 'ChrisToph \"_12&%'"); + /* + * test error conditions + */ + try { + doSysOp(kvhandle, "drop namespace not_a_namespace"); + fail("operation should have failed"); + } catch (ResourceNotFoundException e) { + /* success */ + } + try { + doSysOp(kvhandle, "show as json user not_a_user"); + fail("operation should have failed"); + } catch (ResourceNotFoundException e) { + /* success */ + } + try { + doSysOp(kvhandle, "show as json role not_a_role"); + fail("operation should have failed"); + } catch (ResourceNotFoundException e) { + /* success */ + } + + try { + doSysOp(kvhandle, + "create user newuser identified by 'Chrioph \"_12&%'"); + fail("operation should have failed"); + } catch (ResourceExistsException e) { + /* success */ + } + + try { + doSysOp(kvhandle, "create namespace myns"); + fail("operation should have failed"); + } catch (ResourceExistsException e) { + /* success */ + } + + try { + doSysOp(kvhandle, "drop table not_a_table"); + fail("operation should have failed"); + } catch (TableNotFoundException e) { + /* success */ + } + + try { + doSysOp(kvhandle, "drop index no_index on foo"); + fail("operation should have failed"); + } catch (IndexNotFoundException e) { + /* success */ + } + + try { + doSysOp(kvhandle, createTable); + fail("operation should have failed"); + } catch (TableExistsException e) { + /* success */ + } + + try { + doSysOp(kvhandle, createIndex); + } catch (IndexExistsException e) { + /* success */ + } + } + + /* + * Avoid security-related operations + */ + @Test + public void testSystem() + throws Exception { + + SystemResult dres = doSysOp(kvhandle, "create namespace myns"); + + dres = doSysOp(kvhandle, "show namespaces"); + assertNotNull(dres.getResultString()); + assertNull(dres.getOperationId()); + + dres = doSysOp(kvhandle, "show users"); + assertNotNull(dres.getResultString()); + assertNull(dres.getOperationId()); + + dres = doSysOp(kvhandle, "show as json user admin"); + assertNotNull(dres.getResultString()); + assertNull(dres.getOperationId()); + + dres = doSysOp(kvhandle, "show as json roles"); + assertNotNull(dres.getResultString()); + assertNull(dres.getOperationId()); + + /* + * Create a table using this mechanism. + */ + dres = doSysOp(kvhandle, + "create table foo(id integer, primary key(id))"); + + dres = doSysOp(kvhandle, "show as json tables"); + assertNotNull(dres.getResultString()); + } + + @Test + public void testNamespaces() + throws Exception { + + final String parentName = "myns:parent"; + final String childName = "myns:parent.child"; + final int numParent = 30; + final int numChild = 40; + + doSysOp(kvhandle, "create namespace myns"); + + /* parent in myns */ + TableRequest treq = new TableRequest().setStatement( + "create table myns:parent(id integer, primary key(id))"); + TableResult tres = kvhandle.tableRequest(treq); + tres.waitForCompletion(kvhandle, 100000, 1000); + + /* child in myns */ + treq = new TableRequest().setStatement( + "create table myns:parent.child(cid integer, name string, " + + "primary key(cid))"); + tres = kvhandle.tableRequest(treq); + tres.waitForCompletion(kvhandle, 100000, 1000); + + /* put data in both tables */ + PutRequest preq = new PutRequest(); + MapValue value = new MapValue(); + for (int i = 0; i < numParent; i++) { + value.put("name", "myname"); // ignored in parent + value.put("id", i); + preq.setTableName(parentName).setValue(value); + PutResult pres = kvhandle.put(preq); + assertNotNull("Parent put failed", pres.getVersion()); + for (int j = 0; j < numChild; j++) { + value.put("cid", j); // ignored in parent + preq.setTableName(childName).setValue(value); + pres = kvhandle.put(preq); + assertNotNull("Child put failed", pres.getVersion()); + assertNoUnits(pres); + } + } + + /* get parent */ + GetRequest getReq = new GetRequest().setTableName(parentName) + .setKey(new MapValue().put("id", 1)); + GetResult getRes = kvhandle.get(getReq); + assertNotNull(getRes.getValue()); + + /* get child */ + getReq = new GetRequest().setTableName(childName) + .setKey(new MapValue().put("id", 1).put("cid", 1)); + getRes = kvhandle.get(getReq); + assertNotNull(getRes.getValue()); + assertNoUnits(getRes); + + try { + /* query parent */ + String query = "select * from " + parentName; + List res = doQuery(kvhandle, query); + assertEquals(numParent, res.size()); + + /* query child */ + query = "select * from " + childName; + res = doQuery(kvhandle, query); + assertEquals(numParent * numChild, res.size()); + + /* prepared query on child */ + res = doPreparedQuery(kvhandle, query); + assertEquals(numParent * numChild, res.size()); + } catch (RequestTimeoutException rte) { + if (KVVersion.CURRENT_VERSION.getMajor() >= 20 || + !(rte.getCause() instanceof SystemException)) { + throw rte; + } + /* ignore this exception for 19 for now; known bug */ + } + + /* test ListTables with namespace */ + ListTablesRequest listTables = + new ListTablesRequest().setNamespace("myns"); + ListTablesResult lres = kvhandle.listTables(listTables); + assertEquals(2, lres.getTables().length); + } + + @Test + public void testDefaultNamespacesImpl() + throws Exception { + + /* skip this test if using older SDKs */ + Class handleImplClass = null; + try { + handleImplClass = Class.forName( + "oracle.nosql.driver.http.NoSQLHandleImpl"); + } catch (Throwable e) { + System.out.println("Could not find NoSQLHandleImpl class:" + e); + handleImplClass = null; + } + assertNotNull(handleImplClass); + Method setDefaultNamespaceFunction = null; + try { + setDefaultNamespaceFunction = handleImplClass.getMethod( + "setDefaultNamespace", String.class); + verbose("Using NoSQLHandleImpl.setDefaultNamespace()"); + } catch (Throwable e) { + verbose("Could not find NoSQLHandleImpl.setDefaultNamespace(): " + + "Skipping test"); + setDefaultNamespaceFunction = null; + } + assumeTrue(setDefaultNamespaceFunction != null); + + + final String parentName = "parent"; + final String childName = "parent.child"; + final String nsParentName = "myns:parent"; + final String nsChildName = "myns:parent.child"; + final int numParent = 30; + final int numChild = 40; + + doSysOp(kvhandle, "create namespace myns"); + + /* ((NoSQLHandleImpl)kvhandle).setDefaultNamespace("myns"); */ + setDefaultNamespaceFunction.invoke((NoSQLHandleImpl)kvhandle, "myns"); + + /* parent in myns */ + TableRequest treq = new TableRequest().setStatement( + "create table parent(id integer, primary key(id))"); + TableResult tres = kvhandle.tableRequest(treq); + tres.waitForCompletion(kvhandle, 100000, 1000); + + /* child in myns */ + treq = new TableRequest().setStatement( + "create table parent.child(cid integer, name string, " + + "primary key(cid))"); + tres = kvhandle.tableRequest(treq); + tres.waitForCompletion(kvhandle, 100000, 1000); + + ListTablesRequest listTables; + ListTablesResult lres; + + /* test ListTables with no namespace: should get just myns tables */ + listTables = new ListTablesRequest(); + lres = kvhandle.listTables(listTables); + assertEquals(2, lres.getTables().length); + + /* test ListTables with explicit namespace */ + listTables = new ListTablesRequest().setNamespace("myns"); + lres = kvhandle.listTables(listTables); + assertEquals(2, lres.getTables().length); + + /* test ListTables with explicit invalid */ + listTables = new ListTablesRequest().setNamespace("invalid"); + lres = kvhandle.listTables(listTables); + assertEquals(0, lres.getTables().length); + + /* test that dropping table works correctly */ + treq = new TableRequest().setStatement("drop table parent.child"); + tres = kvhandle.tableRequest(treq); + tres.waitForCompletion(kvhandle, 100000, 1000); + + /* test that ns:tablename overrides invalid default ns in DDL */ + /* ((NoSQLHandleImpl)kvhandle).setDefaultNamespace("invalid"); */ + setDefaultNamespaceFunction.invoke( + (NoSQLHandleImpl)kvhandle, "invalid"); + treq = new TableRequest().setStatement( + "create table myns:parent.child(cid integer, name string, " + + "primary key(cid))"); + tres = kvhandle.tableRequest(treq); + tres.waitForCompletion(kvhandle, 100000, 1000); + + /* test ListTables with explicit namespace */ + listTables = new ListTablesRequest().setNamespace("myns"); + lres = kvhandle.listTables(listTables); + assertEquals(2, lres.getTables().length); + + /* test ListTables with default invalid namespace */ + listTables = new ListTablesRequest(); + lres = kvhandle.listTables(listTables); + assertEquals(0, lres.getTables().length); + + /* reset default namespace to valid namespace */ + /* ((NoSQLHandleImpl)kvhandle).setDefaultNamespace("myns"); */ + setDefaultNamespaceFunction.invoke( + (NoSQLHandleImpl)kvhandle, "myns"); + + /* put data in both tables */ + PutRequest preq = new PutRequest(); + MapValue value = new MapValue(); + for (int i = 0; i < numParent; i++) { + value.put("name", "myname"); // ignored in parent + value.put("id", i); + preq.setTableName(parentName).setValue(value); + PutResult pres = kvhandle.put(preq); + assertNotNull("Parent put failed", pres.getVersion()); + for (int j = 0; j < numChild; j++) { + value.put("cid", j); // ignored in parent + preq.setTableName(childName).setValue(value); + pres = kvhandle.put(preq); + assertNotNull("Child put failed", pres.getVersion()); + assertNoUnits(pres); + } + } + + /* get parent */ + GetRequest getReq = new GetRequest().setTableName(parentName) + .setKey(new MapValue().put("id", 1)); + GetResult getRes = kvhandle.get(getReq); + assertNotNull(getRes.getValue()); + + /* get child */ + getReq = new GetRequest().setTableName(childName) + .setKey(new MapValue().put("id", 1).put("cid", 1)); + getRes = kvhandle.get(getReq); + assertNotNull(getRes.getValue()); + assertNoUnits(getRes); + + try { + /* query parent */ + String query = "select * from " + parentName; + List res = doQuery(kvhandle, query); + assertEquals(numParent, res.size()); + + /* query child */ + query = "select * from " + childName; + res = doQuery(kvhandle, query); + assertEquals(numParent * numChild, res.size()); + + /* prepared query on child */ + res = doPreparedQuery(kvhandle, query); + assertEquals(numParent * numChild, res.size()); + + /* query parent with explicit namespace */ + query = "select * from " + nsParentName; + res = doQuery(kvhandle, query); + assertEquals(numParent, res.size()); + + /* query child with explicit namespace */ + query = "select * from " + nsChildName; + res = doQuery(kvhandle, query); + assertEquals(numParent * numChild, res.size()); + + /* set an invalid default namespace, check all again */ + /* ((NoSQLHandleImpl)kvhandle).setDefaultNamespace("invalid"); */ + setDefaultNamespaceFunction.invoke( + (NoSQLHandleImpl)kvhandle, "invalid"); + + /* query parent with explicit namespace: should work */ + query = "select * from " + nsParentName; + res = doQuery(kvhandle, query); + assertEquals(numParent, res.size()); + + /* query child with explicit namespace: should work */ + query = "select * from " + nsChildName; + res = doQuery(kvhandle, query); + assertEquals(numParent * numChild, res.size()); + + /* prepared query on child with explicit namespace: should work */ + res = doPreparedQuery(kvhandle, query); + assertEquals(numParent * numChild, res.size()); + + /* query parent with default invalid namespace: should fail */ + query = "select * from " + parentName; + try { + res = doQuery(kvhandle, query); + fail("Expected TableNotFoundException"); + } catch (TableNotFoundException tne) { + /* expected */ + } + + /* query child with default invalid namespace: should fail */ + query = "select * from " + childName; + try { + res = doQuery(kvhandle, query); + fail("Expected TableNotFoundException"); + } catch (TableNotFoundException tne) { + /* expected */ + } + + /* prepared query with default invalid namespace: should fail */ + try { + res = doPreparedQuery(kvhandle, query); + fail("Expected TableNotFoundException"); + } catch (TableNotFoundException tne) { + /* expected */ + } + + /* verify exception for DDL with invalid default namespace */ + try { + treq = new TableRequest().setStatement( + "alter table parent.child using TTL 5 DAYS"); + tres = kvhandle.tableRequest(treq); + tres.waitForCompletion(kvhandle, 100000, 1000); + fail("Expected TableNotFoundException"); + } catch (TableNotFoundException tne) { + /* expected */ + } + + } catch (RequestTimeoutException rte) { + if (KVVersion.CURRENT_VERSION.getMajor() >= 20 || + !(rte.getCause() instanceof SystemException)) { + throw rte; + } + /* ignore this exception for 19 for now; known bug */ + } + + /* drop namespace - use cascade to remove tables */ + doSysOp(kvhandle, "drop namespace myns cascade"); + + /* verify that setting the namespace back to null works */ + /* ((NoSQLHandleImpl)kvhandle).setDefaultNamespace(null); */ + setDefaultNamespaceFunction.invoke((NoSQLHandleImpl)kvhandle, + (String)null); + testNamespaces(); + } + + @Test + public void testDefaultNamespacesConfig() + throws Exception { + + /* skip this test if using older SDKs */ + Class handleConfigClass = null; + try { + handleConfigClass = + Class.forName("oracle.nosql.driver.NoSQLHandleConfig"); + } catch (Throwable e) { + System.out.println("Could not find NoSQLHandleConfig class:" + e); + handleConfigClass = null; + } + assertNotNull(handleConfigClass); + Method setDefaultNamespaceFunction = null; + try { + setDefaultNamespaceFunction = handleConfigClass.getMethod( + "setDefaultNamespace", String.class); + } catch (Throwable e) { + verbose("Could not find NoSQLHandleConfig.setDefaultNamespace(): " + + "Skipping test"); + setDefaultNamespaceFunction = null; + } + assumeTrue(setDefaultNamespaceFunction != null); + + final String parentName = "parent"; + final String childName = "parent.child"; + final String nsParentName = "myns:parent"; + final String nsChildName = "myns:parent.child"; + final int numParent = 30; + final int numChild = 40; + + /* + * This test is the same as testDefaultnamespacesImpl except it + * depends on the (currently hidden) NoSQLHandleConfig setting. + * So it must create its own handle. + */ + NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint); + config.setAuthorizationProvider(authProvider); + + /* config.setDefaultNamespace("myns"); */ + setDefaultNamespaceFunction.invoke(config, "myns"); + + NoSQLHandle myhandle = NoSQLHandleFactory.createNoSQLHandle(config); + + doSysOp(myhandle, "create namespace myns"); + + /* parent in myns */ + TableRequest treq = new TableRequest().setStatement( + "create table parent(id integer, primary key(id))"); + TableResult tres = myhandle.tableRequest(treq); + tres.waitForCompletion(myhandle, 100000, 1000); + + /* child in myns */ + treq = new TableRequest().setStatement( + "create table parent.child(cid integer, name string, " + + "primary key(cid))"); + tres = myhandle.tableRequest(treq); + tres.waitForCompletion(myhandle, 100000, 1000); + + ListTablesRequest listTables; + ListTablesResult lres; + + /* test ListTables with no namespace: should get just myns tables */ + listTables = new ListTablesRequest(); + lres = myhandle.listTables(listTables); + assertEquals(2, lres.getTables().length); + + /* test ListTables with explicit namespace */ + listTables = new ListTablesRequest().setNamespace("myns"); + lres = myhandle.listTables(listTables); + assertEquals(2, lres.getTables().length); + + /* test ListTables with explicit invalid */ + listTables = new ListTablesRequest().setNamespace("invalid"); + lres = myhandle.listTables(listTables); + assertEquals(0, lres.getTables().length); + + /* put data in both tables */ + PutRequest preq = new PutRequest(); + MapValue value = new MapValue(); + for (int i = 0; i < numParent; i++) { + value.put("name", "myname"); // ignored in parent + value.put("id", i); + preq.setTableName(parentName).setValue(value); + PutResult pres = myhandle.put(preq); + assertNotNull("Parent put failed", pres.getVersion()); + for (int j = 0; j < numChild; j++) { + value.put("cid", j); // ignored in parent + preq.setTableName(childName).setValue(value); + pres = myhandle.put(preq); + assertNotNull("Child put failed", pres.getVersion()); + assertNoUnits(pres); + } + } + + /* get parent */ + GetRequest getReq = new GetRequest().setTableName(parentName) + .setKey(new MapValue().put("id", 1)); + GetResult getRes = myhandle.get(getReq); + assertNotNull(getRes.getValue()); + + /* get child */ + getReq = new GetRequest().setTableName(childName) + .setKey(new MapValue().put("id", 1).put("cid", 1)); + getRes = myhandle.get(getReq); + assertNotNull(getRes.getValue()); + assertNoUnits(getRes); + + try { + /* query parent */ + String query = "select * from " + parentName; + List res = doQuery(myhandle, query); + assertEquals(numParent, res.size()); + + /* query child */ + query = "select * from " + childName; + res = doQuery(myhandle, query); + assertEquals(numParent * numChild, res.size()); + + /* prepared query on child */ + res = doPreparedQuery(myhandle, query); + assertEquals(numParent * numChild, res.size()); + + /* query parent with explicit namespace */ + query = "select * from " + nsParentName; + res = doQuery(myhandle, query); + assertEquals(numParent, res.size()); + + /* query child with explicit namespace */ + query = "select * from " + nsChildName; + res = doQuery(myhandle, query); + assertEquals(numParent * numChild, res.size()); + + } catch (RequestTimeoutException rte) { + if (KVVersion.CURRENT_VERSION.getMajor() >= 20 || + !(rte.getCause() instanceof SystemException)) { + throw rte; + } + /* ignore this exception for 19 for now; known bug */ + } + + /* drop namespace - use cascade to remove tables */ + doSysOp(myhandle, "drop namespace myns cascade"); + } + + /** + * Test that the limit on # of operations in WriteMultiple isn't enforced + */ + @Test + public void writeMultipleTest() { + + doSysOp(kvhandle, "create table foo(id1 string, " + + "id2 integer, primary key(shard(id1), id2))"); + + WriteMultipleRequest wmReq = new WriteMultipleRequest(); + for (int i = 0; i < BATCH_OP_NUMBER_LIMIT +1; i++) { + PutRequest pr = new PutRequest().setTableName("foo"). + setValueFromJson("{\"id1\":\"a\", \"id2\":" + i + "}", null); + wmReq.add(pr, false); + } + try { + WriteMultipleResult wmRes = kvhandle.writeMultiple(wmReq); + assertEquals(BATCH_OP_NUMBER_LIMIT + 1, wmRes.size()); + } catch (BatchOperationNumberLimitException ex) { + fail("operation should have succeeded"); + } + } + + /** + * Tests synchronization of table metdata when mixing driver-based table + * operations and direct kv-based table changes made using the sql shell + * or admin CLI, or even a KVStore handle directly -- any operation that + * bypasses the proxy and any caching it might do. + */ + @Test + public void tableSync() throws Exception { + /* + * Case 1: + * create table + * drop it via KV + * create again using if exists + * try to put data + */ + final String table1 = + "create table if not exists mytable(id integer, primary key(id))"; + doTableRequest(kvhandle, table1, false); + dropTableUsingAdmin("mytable"); + doTableRequest(kvhandle, table1, false); + + doPut(kvhandle, "mytable", "{\"id\": 1}"); + doTableRequest(kvhandle, "drop table mytable", true); + } + + @Test + public void testUnsupportedOps() throws Exception { + + doSysOp(kvhandle, "create table foo(id integer, primary key(id))"); + try { + TableUsageRequest req = + new TableUsageRequest().setTableName("foo"); + kvhandle.getTableUsage(req); + fail("op should have failed"); + } catch (OperationNotSupportedException e) { + /* success */ + } + } + + /* + * Asserts that a not secure provider cannot access a secure proxy + */ + @Test + public void testNonSecureAccess() + throws Exception { + + if (cloudRunning) { + return; + } + NoSQLHandle testHandle = null; + StoreAccessTokenProvider sap = new StoreAccessTokenProvider(); + try { + NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint); + config.setAuthorizationProvider(sap); + sap.setAutoRenew(false); + testHandle = NoSQLHandleFactory.createNoSQLHandle(config); + final String tableName = "nonsecure"; + final String createTableStatement = + "CREATE TABLE IF NOT EXISTS " + tableName + + "(id INTEGER, " + + " pin INTEGER, " + + " name STRING, " + + " PRIMARY KEY(SHARD(pin), id))"; + + TableRequest tableRequest = new TableRequest() + .setStatement(createTableStatement); + TableResult tres = testHandle.tableRequest(tableRequest); + tres.waitForCompletion(testHandle, 60000, 1000); + assertEquals(tres.getTableState(), TableResult.State.ACTIVE); + } catch (IllegalArgumentException iae) { + /* Expected */ + if (iae.getMessage().contains("Illegal Argument: " + + "Missing authentication information") == false) { + fail("Expected , but got <" + + iae.getMessage() + ">"); + } + return; + } finally { + sap.close(); + if (testHandle != null) { + testHandle.close(); + } + } + fail("Should not reach here"); + } + + @Test + public void testTokenTimeout() + throws Exception { + + /* skip until kv auth issue in async path is fixed */ + assumeTrue(false); + + StoreAccessTokenProvider sap = + new StoreAccessTokenProvider("test", + "NoSql00__123456".toCharArray()); + NoSQLHandle testHandle = null; + + try { + /* Set the expiration time to 3 seconds */ + final ParameterMap map = new ParameterMap(); + map.setParameter(ParameterState.GP_SESSION_TIMEOUT, + "3 SECONDS"); + map.setParameter(ParameterState.GP_LOGIN_CACHE_TIMEOUT, + "3 SECONDS"); + int planId = + admin.createChangeGlobalSecurityParamsPlan("change timeout", + map); + execPlan(planId); + + /* Disable auto renew */ + sap.setAutoRenew(false); + + NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint); + config.setAuthorizationProvider(sap); + testHandle = NoSQLHandleFactory.createNoSQLHandle(config); + + /* Get a login token then wait for expiration */ + sap.getAuthorizationString(null); + + /* Wait for the login token to expire */ + Thread.sleep(5000); + final String tableName = "timeout"; + final String createTableStatement = + "CREATE TABLE IF NOT EXISTS " + tableName + + "(id INTEGER, " + + " pin INTEGER, " + + " name STRING, " + + " PRIMARY KEY(SHARD(pin), id))"; + TableRequest tableRequest = new TableRequest() + .setStatement(createTableStatement); + TableResult tres = testHandle.tableRequest(tableRequest); + + /* Wait for expiration before getTable */ + Thread.sleep(5000); + tres.waitForCompletion(testHandle, 60000, 1000); + assertEquals(tres.getTableState(), TableResult.State.ACTIVE); + + /* Wait for expiration before put request */ + Thread.sleep(5000); + MapValue value = new MapValue().put("id", 1). + put("pin", "654321").put("name", "test1"); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult putResult = testHandle.put(putRequest); + assertNotNull(putResult.getVersion()); + + /* Wait for expiration before get request */ + Thread.sleep(5000); + MapValue key = new MapValue().put("id", 1).put("pin", "654321"); + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + GetResult getRes = testHandle.get(getRequest); + Thread.sleep(5000); + assertEquals("test1", + getRes.getValue().get("name").asString().getValue()); + + /* Wait for expiration before query request */ + Thread.sleep(5000); + try { + QueryRequest queryRequest = new QueryRequest(). + setStatement("SELECT * from " + tableName); + QueryResult qres = testHandle.query(queryRequest); + List results = qres.getResults(); + assertEquals(results.size(), 1); + } catch (RequestTimeoutException rte) { + if (KVVersion.CURRENT_VERSION.getMajor() >= 20 || + !(rte.getCause() instanceof SystemException)) { + throw rte; + } + /* ignore this exception for 19 for now; known bug */ + } + + /* Wait for expiration before create index request */ + Thread.sleep(5000); + final String createIndexStatement = + "CREATE INDEX IF NOT EXISTS idx1 ON " + tableName + + " (name)"; + tableRequest = + new TableRequest().setStatement(createIndexStatement); + testHandle.tableRequest(tableRequest); + + /* Wait for expiration before getTable */ + Thread.sleep(5000); + tres.waitForCompletion(testHandle, 60000, 1000); + assertEquals(tres.getTableState(), TableResult.State.ACTIVE); + + /* Wait for expiration before multi delete request */ + Thread.sleep(5000); + key = new MapValue().put("pin", "654321"); + MultiDeleteRequest multiDelRequest = new MultiDeleteRequest() + .setKey(key) + .setTableName(tableName); + + MultiDeleteResult mRes = testHandle.multiDelete(multiDelRequest); + assertEquals(mRes.getNumDeletions(), 1); + } finally { + /* Set the expiration time back to default */ + final ParameterMap map = new ParameterMap(); + map.setParameter(ParameterState.GP_SESSION_TIMEOUT, + ParameterState.GP_SESSION_TIMEOUT_DEFAULT); + map.setParameter(ParameterState.GP_LOGIN_CACHE_TIMEOUT, + ParameterState.GP_LOGIN_CACHE_TIMEOUT_DEFAULT); + int planId = + admin.createChangeGlobalSecurityParamsPlan("change back", + map); + execPlan(planId); + + sap.close(); + if (testHandle != null) { + testHandle.close(); + } + } + } + + @Test + public void testLargeRow() { + doLargeRow(kvhandle, true); + } + + /* + * Ensure that MR table DDL statements pass through the proxy + * properly + */ + @Test + public void testMultiRegionBasic() { + final String show = "show regions"; + final String createRegion = "create region remoteRegion"; + final String setRegion = "set local region localRegion"; + final String createTable = "create table mrtable(id integer, " + + "primary key(id)) in regions localRegion"; + + /* + * doSysOp will throw on any failures; there is no "error" return + * information in SystemResult. + */ + SystemResult res = doSysOp(kvhandle, createRegion); + res = doSysOp(kvhandle, setRegion); + res = doSysOp(kvhandle, show); + String resString = res.getResultString(); + assertTrue(resString.contains("localRegion") && + resString.contains("remoteRegion")); + + /* count sys tables first */ + ListTablesRequest listTables = new ListTablesRequest(); + ListTablesResult lres = kvhandle.listTables(listTables); + int numSysTables = lres.getTables().length; + + res = doSysOp(kvhandle, createTable); + + lres = kvhandle.listTables(listTables); + assertEquals(numSysTables + 1, lres.getTables().length); + + /* this will throw if the table doesn't exist */ + getTable("mrtable", kvhandle); + + /* test a simple put */ + PutRequest putRequest = new PutRequest() + .setValue(new MapValue().put("id", 1)) + .setTableName("mrtable"); + PutResult putResult = kvhandle.put(putRequest); + assertNotNull(putResult.getVersion()); + } + + @Test + public void testQueryWithSmallLimit() { + final int recordKB = 2; + final int minRead = getMinRead(); + + String ddl = "create table smallLimitTest(id integer, " + + "longString string, primary key(id))"; + doTableRequest(kvhandle, ddl, false); + + /* Load 2 rows to table */ + PutRequest putReq = new PutRequest().setTableName("smallLimitTest"); + PutResult putRet; + MapValue row; + for (int i = 0; i < 2; i++) { + row = new MapValue() + .put("id", i) + .put("longString", genString((recordKB - 1) * 1024)); + putReq.setValue(row); + + putRet = kvhandle.put(putReq); + assertNotNull(putRet.getVersion()); + } + + String query; + QueryRequest req; + QueryResult ret; + + /* Query with maxReadKB of 1, expect an IAE */ + query = "select * from smallLimitTest"; + req = new QueryRequest().setStatement(query).setMaxReadKB(1); + int numExec = 0; + if (checkKVVersion(21, 3, 6)) { + /* Query should always make progress with small limit */ + int cnt = 0; + do { + numExec++; + ret = kvhandle.query(req); + cnt += ret.getResults().size(); + } while (!req.isDone()); + assertEquals(2, cnt); + } else { + try { + do { + numExec++; + ret = kvhandle.query(req); + } while (!req.isDone()); + fail("Expect to catch IAE but not"); + } catch (IllegalArgumentException iae) { + assertEquals(2, numExec); + } + } + + /* Update with number-based limit of 1 */ + int newRecordKB = 1; + String longString = genString((newRecordKB - 1) * 1024); + query = "update smallLimitTest set longString = \"" + longString + + "\" where id = 0"; + req = new QueryRequest().setStatement(query).setLimit(1); + ret = kvhandle.query(req); + assertNull(ret.getContinuationKey()); + + /* Update with maxReadKB of 1, expect an IAE */ + int expReadKB = dontDoubleChargeKey() ? recordKB : minRead + recordKB; + query = "update smallLimitTest set longString = \"" + longString + + "\" where id = 1"; + if (checkKVVersion(21, 3, 6)) { + /* Query should always make progress with small limit */ + req = new QueryRequest().setStatement(query).setMaxReadKB(1); + ret = kvhandle.query(req); + assertEquals(1, ret.getResults().size()); + } else { + for (int kb = 1; kb <= expReadKB; kb++) { + req = new QueryRequest().setStatement(query).setMaxReadKB(kb); + try { + ret = kvhandle.query(req); + if (kb < expReadKB) { + fail("Expect to catch IAE but not"); + } + } catch (IllegalArgumentException iae) { + assertTrue("Expect to succeed with maxReadKB of " + kb + + ", but fail: " + iae.getMessage(), + kb < expReadKB); + } + } + } + } + + /** + * Tests to ensure CRDT works properly on proxy: + * 1. Test inserting a row with a CRDT column and also updating + * the CRDT using UPDATE sql. + * 2. Use the direct driver to mimic the behaivor of an mrtable agent + * to put a remote row with CRDT from other regions. Test reading + * the CRDT using the driver. + */ + @Test + public void testCRDT() { + TableAPIImpl tableApi = (TableAPIImpl)store.getTableAPI(); + + final String setRegion = "set local region localRegion"; + doSysOp(kvhandle, setRegion); + + /* Test reading different types of CRDT. */ + testCRDT(Type.INTEGER, tableApi); + + testCRDT(Type.LONG, tableApi); + + testCRDT(Type.NUMBER, tableApi); + + /* MR counter is not allowed in non MR table */ + String ddl = "create table foo(id integer, c INTEGER as mr_counter, " + + "primary key(id))"; + try { + doSysOp(kvhandle, ddl); + fail("operation should have failed"); + } catch (IllegalArgumentException ex) { + assertTrue(ex.getMessage() + .contains("MR_counters are not allowed in the table")); + } + } + + private void testCRDT(Type type, + TableAPIImpl tableApi) { + String tableName = "mrtable" + type; + final String createTable = "create table " + tableName + + "(id integer, count " + type + " as mr_counter" + + ", primary key(id)) in regions localRegion"; + doSysOp(kvhandle, createTable); + /* this will throw if the table doesn't exist */ + getTable(tableName, kvhandle); + + /* Insert a row with CRDT. */ + String insertStmt = "insert into " + tableName + + " values (1, default)"; + QueryRequest req = new QueryRequest().setStatement(insertStmt); + kvhandle.query(req); + + String updateStmt = "Update " + tableName + + " set count = count + 1 where id = 1"; + req = new QueryRequest().setStatement(updateStmt); + kvhandle.query(req); + + /* Read the CRDT. */ + MapValue key = new MapValue().put("id", 1); + + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + checkGetRes(type, kvhandle.get(getRequest), 1); + + /* + * Use the direct driver to mimic the behavior of an mrtable agent. + * It puts a remote row which has non-zero counts for remote + * regions. + */ + TableImpl table = (TableImpl)tableApi.getTable(tableName); + RowImpl row = table.createRow(); + row.put("id", 2); + + row.setRegionId(2); + row.setModificationTime(System.currentTimeMillis()); + row.setExpirationTime(0); + + FieldValueImpl crdt = + table.getField("count"). + createCRDTValue(); + addCounts(type, crdt, 10, 12); + RecordValueImpl record = row; + record.putInternal(record.getFieldPos("count"), + crdt, false); + tableApi.putResolve(row, null, null); + + /* Read the CRDT in the remote row. */ + key = new MapValue().put("id", 2); + + getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + checkGetRes(type, kvhandle.get(getRequest), 22); + + } + + private void checkGetRes(Type type, GetResult getRes, int expected) { + if (type == Type.INTEGER) { + assertEquals(expected, + getRes.getValue().get("count").asInteger().getValue()); + } else if (type == Type.LONG) { + assertEquals(expected, + getRes.getValue().get("count").asLong().getValue()); + } else { + assertEquals(new BigDecimal(expected), + getRes.getValue().get("count").asNumber().getValue()); + } + } + + static void addCounts(Type type, + FieldValueImpl crdt, + int region1, + int region2) { + crdt.putMRCounterEntry(1, createValue(type, region1)); + crdt.putMRCounterEntry(2, createValue(type, region2)); + } + + static FieldValueImpl createValue(Type type, + int value) { + if (type == Type.INTEGER) { + return (FieldValueImpl)FieldValueFactory.createInteger(value); + } else if (type == Type.LONG) { + return (FieldValueImpl)FieldValueFactory.createLong(value); + } else { + return (FieldValueImpl)FieldValueFactory.createNumber(value); + } + } + + private String genString(int len) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < len; i++) { + sb.append((char)('A' + i % 26)); + } + return sb.toString(); + } + + protected static SystemResult doSysOp(NoSQLHandle nosqlHandle, + String statement) { + /* + * New kv code appears to take a while to return new + * plans if the store is fresh. Make the timeout long + * enough to cover that + */ + SystemRequest sreq = + new SystemRequest().setStatement(statement.toCharArray()). + setTimeout(20000); + SystemResult sres = nosqlHandle.systemRequest(sreq); + sres.waitForCompletion(nosqlHandle, 20000, 1000); + return sres; + } + + protected static void dropTableUsingAdmin(String tableName) + throws Exception { + int planId = admin.createRemoveTablePlan("dropTable", null, tableName); + execPlan(planId); + } + + protected static void execPlan(int planId) throws Exception { + admin.approvePlan(planId); + admin.executePlan(planId, false); + admin.awaitPlan(planId, 0, null); + admin.assertSuccess(planId); + } + + static void assertNoUnits(Result res) { + assertEquals(0, res.getReadKBInternal()); + assertEquals(0, res.getReadUnitsInternal()); + assertEquals(0, res.getWriteKBInternal()); + assertEquals(0, res.getWriteUnitsInternal()); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVSmoke.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVSmoke.java new file mode 100644 index 00000000..d2095290 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/KVSmoke.java @@ -0,0 +1,174 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.kv; + +import java.util.ArrayList; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.kv.StoreAccessTokenProvider; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.ListTablesRequest; +import oracle.nosql.driver.ops.ListTablesResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; + +/** + * A small test to sanity check a proxy + kvstore that's been started + * by other, external means. + */ +public class KVSmoke { + + protected String endpoint; + protected StoreAccessTokenProvider authProvider; + protected NoSQLHandle myhandle; + + public static void main(String[] argv) { + KVSmoke smoke = new KVSmoke(); + if (argv.length == 0) { + System.err.println("Usage: java " + + "oracle.nosql.proxy.kv.KvSmoke "); + } + + smoke.setupHandle(argv[0]); + smoke.doSmoke(); + smoke.shutdown(); + } + + + private void setupHandle(String endpoint) { + authProvider = new StoreAccessTokenProvider(); + NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint); + config.setAuthorizationProvider(authProvider); + + /* + * Open the handle + */ + myhandle = NoSQLHandleFactory.createNoSQLHandle(config); + } + + private void doSmoke() { + final String tableName = "test"; + final String createTableStatement = + "CREATE TABLE IF NOT EXISTS " + tableName + + "(id INTEGER, " + + " pin INTEGER, " + + " name STRING, " + + " PRIMARY KEY(SHARD(pin), id))"; + + TableRequest tableRequest = new TableRequest() + .setStatement(createTableStatement); + TableResult tres = myhandle.tableRequest(tableRequest); + + tres.waitForCompletion(myhandle, 60000, 1000); + System.out.println("tres=" + tres + " " + tres.getTableState()); + + /* + * PUT a row + */ + MapValue value = new MapValue().put("id", 1). + put("pin", "654321").put("name", "test1"); + + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult putResult = myhandle.put(putRequest); + System.out.println("put version: " + putResult.getVersion()); + + /* + * GET the row + */ + MapValue key = new MapValue().put("id", 1).put("pin", "654321"); + + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName(tableName); + GetResult getRes = myhandle.get(getRequest); + System.out.println("value: " + + getRes.getValue().get("name").asString().getValue()); + + /* + * PUT a second row using JSON + */ + String jsonString = + "{\"id\": 2, \"pin\": 123456, \"name\":\"test2\"}"; + + putRequest = new PutRequest() + .setValueFromJson(jsonString, null) + .setTableName(tableName); + putResult = myhandle.put(putRequest); + System.out.println("putResult = " + putResult); + } + + private void shutdown() { + if (myhandle != null) { + dropAllTables(myhandle); + myhandle.close(); + } + + if (authProvider != null) { + authProvider.close(); + authProvider = null; + } + } + + protected void dropAllTables(NoSQLHandle nosqlHandle) { + + /* get the names of all tables under this tenant */ + ListTablesRequest listTables = new ListTablesRequest(); + ListTablesResult lres = nosqlHandle.listTables(listTables); + ArrayList droppedTables = new ArrayList(); + /* clean up all the tables */ + for (String tableName: lres.getTables()) { + /* on-prem config may find system tables, which can't be dropped */ + if (tableName.startsWith("SYS$")) { + continue; + } + + /* ignore, but note exceptions */ + try { + TableResult tres = dropTableWithoutWait(nosqlHandle, tableName); + droppedTables.add(tres); + } catch (Exception e) { + System.err.println("DropAllTables: drop fail, table " + + tableName + ": " + e); + } + } + + /* + * don't wait for ACTIVE state. This may mean occasional + * failures but as long as tests pass that is ok. + */ + + /* wait for all tables dropped */ + for (TableResult tres: droppedTables) { + /* ignore, but note exceptions */ + try { + tres.waitForCompletion(nosqlHandle, 100000, 1000); + } catch (Exception e) { + System.err.println("DropAllTables: drop wait fail, table " + + tres + ": " + e); + } + } + } + + private TableResult dropTableWithoutWait(NoSQLHandle nosqlHandle, + String tableName) { + final String dropTableDdl = "drop table if exists " + tableName; + TableRequest tableRequest = new TableRequest() + .setStatement(dropTableDdl) + .setTimeout(100000); + return nosqlHandle.tableRequest(tableRequest); + } + +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/MRTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/MRTest.java new file mode 100644 index 00000000..3b435902 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/kv/MRTest.java @@ -0,0 +1,347 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.kv; + + +/* +import static org.junit.Assert.assertEquals; +*/ + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.File; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import oracle.kv.KVStore; +import oracle.kv.KVStoreFactory; +import oracle.kv.KVStoreConfig; +import oracle.kv.StatementResult; +import oracle.kv.impl.api.table.TableAPIImpl; +import oracle.kv.impl.test.TestStatus; +import oracle.kv.impl.xregion.service.JsonConfig; +import oracle.kv.impl.xregion.service.RegionInfo; +import oracle.kv.impl.xregion.service.XRegionService; +import oracle.kv.query.ExecuteOptions; +import oracle.kv.table.PrimaryKey; +import oracle.kv.table.RecordValue; +import oracle.kv.table.Row; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.kv.StoreAccessTokenProvider; +import oracle.nosql.driver.ops.GetRequest; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.GetResult; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.SystemResult; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; + +import oracle.nosql.proxy.Proxy; +import oracle.nosql.proxy.ProxyTestBase; +import oracle.nosql.proxy.util.CreateStore; +import oracle.nosql.proxy.util.TestBase; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +/* + * Multi-region test using CreateStore + * Steps to set up MR tables: + * 1. create local/remote stores + * 2. set local region names for each + * 3. configure the XRegion service + * a. create config for each region + * b. start service for each region (service == agent) + * 4. create remote regions (e.g. local needs to know about remote, vice versa) + */ +public class MRTest extends ProxyTestBase { + + private static final int startPort = 13250; + private static final String localStoreName = "JsonMR-local"; + private static final String remoteStoreName = "JsonMR-remote"; + private static final String localRegion = "LOC"; + private static final String remoteRegion = "REM"; + private static final int localProxyPort = 8095; + private static final int remoteProxyPort = 8096; + private static final String localProxyEndpoint = + "http://localhost:" + localProxyPort; + private static final String remoteProxyEndpoint = + "http://localhost:" + remoteProxyPort; + + private static CreateStore localStore; + private static CreateStore remoteStore; + + private static KVStore localKV; + private static KVStore remoteKV; + + private static TableAPIImpl localTableImpl; + private static TableAPIImpl remoteTableImpl; + + private static HashSet localHelpers; + private static HashSet remoteHelpers; + + private static Proxy localProxy; + private static Proxy remoteProxy; + + private static NoSQLHandle localHandle; + private static NoSQLHandle remoteHandle; + + private static RegionInfo localInfo; + private static RegionInfo remoteInfo; + + private static XRegionService localService; + private static XRegionService remoteService; + + private static StoreAccessTokenProvider authProvider = + new StoreAccessTokenProvider(); + + private static Logger testLogger = + Logger.getLogger(MRTest.class.getName()); + + @BeforeClass + public static void staticSetUp() throws Exception { + cleanupTestDir(); + + TestStatus.setManyRNs(true); + localStore = createStore((getTestDir() + "/" + localStoreName), + localStoreName, startPort); + localKV = KVStoreFactory.getStore(createKVConfig(localStore)); + localTableImpl = (TableAPIImpl)localKV.getTableAPI(); + String helperString = localStore.getHostname() + ":" + + localStore.getRegistryPort(); + localHelpers = new HashSet(); + localHelpers.add(helperString); + localInfo = new RegionInfo(localRegion, localStoreName, + new String[]{helperString}); + localProxy = KVNonSecureProxyTest.startKVProxy(localStoreName, + helperString, + localProxyPort, + false); + localHandle = getHandle(localProxyEndpoint); + assertNotNull(localHandle); + + remoteStore = createStore((getTestDir() + "/" + remoteStoreName), + remoteStoreName, startPort); + remoteKV = KVStoreFactory.getStore(createKVConfig(remoteStore)); + remoteTableImpl = (TableAPIImpl)remoteKV.getTableAPI(); + helperString = remoteStore.getHostname() + ":" + + remoteStore.getRegistryPort(); + remoteHelpers = new HashSet(); + remoteHelpers.add(helperString); + remoteInfo = new RegionInfo(remoteRegion, remoteStoreName, + new String[]{helperString}); + remoteProxy = KVNonSecureProxyTest.startKVProxy(remoteStoreName, + helperString, + remoteProxyPort, + false); + remoteHandle = getHandle(remoteProxyEndpoint); + assertNotNull(remoteHandle); + + /* defer system ops until fully initialized */ + localProxy.getTenantManager().waitForStoreInit(30); + remoteProxy.getTenantManager().waitForStoreInit(30); + doSysOp(localHandle, "set local region " + localRegion); + doSysOp(remoteHandle, "set local region " + remoteRegion); + + // create XR services + JsonConfig localConfig = + createXRConfig(getTestDir().toString(), + localRegion, + localStoreName, + localHelpers); + localConfig.addRegion(remoteInfo); + localService = createXRService(localConfig, testLogger); + Thread localServiceThread = new Thread(localService); + localServiceThread.start(); + + JsonConfig remoteConfig = + createXRConfig(getTestDir().toString(), + remoteRegion, + remoteStoreName, + remoteHelpers); + remoteConfig.addRegion(localInfo); + remoteService = createXRService(remoteConfig, testLogger); + Thread remoteServiceThread = new Thread(remoteService); + remoteServiceThread.start(); + while (!(remoteService.isRunning() && localService.isRunning())) { + Thread.sleep(1000); + } + + /* add "other" regions to each region */ + doSysOp(localHandle, "create region " + remoteRegion); + doSysOp(remoteHandle, "create region " + localRegion); + } + + @AfterClass + public static void staticTearDown() throws Exception { + /* shutdown agents first */ + if (localService != null) { + localService.shutdown(); + } + if (remoteService != null) { + remoteService.shutdown(); + } + if (localProxy != null) { + localProxy.shutdown(3, TimeUnit.SECONDS); + } + if (remoteProxy != null) { + remoteProxy.shutdown(3, TimeUnit.SECONDS); + } + + if (localStore != null) { + localKV.close(); + localStore.shutdown(); + } + if (remoteStore != null) { + remoteKV.close(); + remoteStore.shutdown(); + } + cleanupTestDir(); + } + + @Override + public void setUp() + throws Exception { + KVProxyTest.dropAllMetadata(localHandle); + KVProxyTest.dropAllMetadata(remoteHandle); + } + + @Override + public void tearDown() + throws Exception { + KVProxyTest.dropAllMetadata(localHandle); + KVProxyTest.dropAllMetadata(remoteHandle); + } + + @Test + public void testJsonCollection() throws Exception { + + doSysOp(localHandle, "create table foo(id number, primary key(id), " + + "counter as integer mr_counter) " + + "in regions LOC, REM as json collection"); + doSysOp(remoteHandle, "create table foo(id number, primary key(id), " + + "counter as integer mr_counter) " + + "in regions REM, LOC as json collection"); + + /* test fix for DDL generation for MR tables */ + GetTableRequest gtr = new GetTableRequest().setTableName("foo"); + TableResult tr = localHandle.getTable(gtr); + assertNotNull(tr.getDdl()); + + BigDecimal bd = new BigDecimal("1"); + + // put row in local + MapValue value = new MapValue(); + value.put("Id", bd); // use "Id" to test case-insensitivity of pkey + value.putFromJson("some_json", "{\"a\": {\"b\": {\"c\":1}}}", null); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName("foo"); + + /* initial local put */ + PutResult res = localHandle.put(putRequest); + + /* local get */ + MapValue key = new MapValue().put("id", bd); + GetRequest getRequest = new GetRequest() + .setKey(key) + .setTableName("foo"); + GetResult gres = localHandle.get(getRequest); + assertTrue(gres.getValue().get("counter").getInt() == 0); + + /* wait for replication to remote */ + gres = remoteHandle.get(getRequest); + while (gres.getValue() == null) { + Thread.sleep(500); + gres = remoteHandle.get(getRequest); + } + assertTrue(gres.getValue().get("counter").getInt() == 0); + + List queryRes = + doQuery(localHandle, "update foo $f set $f.counter = $ + 5 where " + + "id = 1"); + + /* wait for replication to remote, failure is an infinite loop */ + while(gres.getValue().get("counter").getInt() != 5) { + Thread.sleep(500); + gres = remoteHandle.get(getRequest); + } + + // update non-counter data, make sure it's not changed + value.putFromJson("some_json", "{\"a\": 25}", null); + res = localHandle.put(putRequest); + gres = localHandle.get(getRequest); + assertTrue(gres.getValue().get("counter").getInt() == 5); + assertTrue(gres.getValue(). + get("some_json").asMap().get("a").getInt() == 25); + } + + private static CreateStore createStore(String rootDir, + String storeName, + int port) throws Exception { + + CreateStore cs = new CreateStore(rootDir, + storeName, + port, + 1, /* n SNs */ + 1, /* rf */ + 10, /* n partitions */ + 1, /* capacity per SN */ + CreateStore.MB_PER_SN, + false, /* use threads is false */ + null); + final File root = new File(rootDir); + root.mkdirs(); + cs.start(); + return cs; + } + + private static KVStoreConfig createKVConfig(CreateStore cs) { + final KVStoreConfig config = cs.createKVConfig(); + return config; + } + + private static JsonConfig createXRConfig(String testPath, + String regionName, + String storeName, + HashSet helpers) { + return new JsonConfig(testPath, 1, 0, regionName, storeName, helpers); + } + + private static XRegionService createXRService(JsonConfig conf, + Logger logger) { + return new XRegionService(conf, logger); + } + + private static NoSQLHandle getHandle(String endpoint) { + NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint); + config.setAuthorizationProvider(authProvider); + + /* + * Open the handle + */ + return NoSQLHandleFactory.createNoSQLHandle(config); + } + + private static SystemResult doSysOp(NoSQLHandle handle, + String statement) { + return KVProxyTest.doSysOp(handle, statement); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ChildTableTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ChildTableTest.java new file mode 100644 index 00000000..f0e28d0e --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ChildTableTest.java @@ -0,0 +1,833 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.model.ChangeTableCompartmentDetails; +import com.oracle.bmc.nosql.model.Column; +import com.oracle.bmc.nosql.model.CreateTableDetails; +import com.oracle.bmc.nosql.model.Schema; +import com.oracle.bmc.nosql.model.Table; +import com.oracle.bmc.nosql.model.TableLimits; +import com.oracle.bmc.nosql.model.TableSummary; +import com.oracle.bmc.nosql.model.UpdateTableDetails; +import com.oracle.bmc.nosql.requests.ChangeTableCompartmentRequest; +import com.oracle.bmc.nosql.requests.CreateIndexRequest; +import com.oracle.bmc.nosql.requests.CreateTableRequest; +import com.oracle.bmc.nosql.requests.DeleteRowRequest; +import com.oracle.bmc.nosql.requests.DeleteTableRequest; +import com.oracle.bmc.nosql.requests.GetIndexRequest; +import com.oracle.bmc.nosql.requests.GetRowRequest; +import com.oracle.bmc.nosql.requests.ListTableUsageRequest; +import com.oracle.bmc.nosql.requests.ListTablesRequest; +import com.oracle.bmc.nosql.requests.ListTablesRequest.SortBy; +import com.oracle.bmc.nosql.requests.ListTablesRequest.SortOrder; +import com.oracle.bmc.nosql.requests.UpdateTableRequest; +import com.oracle.bmc.nosql.responses.DeleteRowResponse; +import com.oracle.bmc.nosql.responses.GetIndexResponse; +import com.oracle.bmc.nosql.responses.GetRowResponse; +import com.oracle.bmc.nosql.responses.GetTableResponse; +import com.oracle.bmc.nosql.responses.ListTablesResponse; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; + +/* Child tables test */ +public class ChildTableTest extends RestAPITestBase { + @Rule + public final TestName test = new TestName(); + + private final static String createTDdl = + "create table t(id integer, s string, primary key(id))"; + private final static String createTADdl = + "create table t.a(ida integer, s string, primary key(ida))"; + private final static String createIfNotExistsTADdl = + "create table if not exists t.a(ida integer, s string, primary key(ida))"; + private final static String createTABDdl = + "create table t.a.b(idb integer, s string, primary key(idb))"; + private final static String createTGDdl = + "create table t.g(idg integer, s string, primary key(idg))"; + + private final String newCompartmentId = getCompartmentIdMoveTo(); + + @Override + public void tearDown() throws Exception { + if (test.getMethodName().equals("testMoveCompartment")) { + /* Cleanup the tables in newCompartmentId */ + removeAllTables(newCompartmentId); + } + super.tearDown(); + } + + /** + * Test child table related table operations: + * 1. create table + * 2. get table + * 3. list tables + * 4. create/drop index + * 5. alter table + * 6. drop table + */ + @Test + public void testBasicTableOps() { + /* + * Create table + */ + createTable("t", createTDdl); + createTable("t.a", createTADdl, null /* limits */); + + /* create table t.a again, expect to get TableExistsException */ + createChildTableFail("t.a", createTADdl, "TableAlreadyExists"); + + /* create table with if not exists should succeed */ + createTable("t.a", createIfNotExistsTADdl, null /* limits */); + + /* + * create table with non-null limits, expect to get InvalidParameter + * error + */ + CreateTableRequest ctReq = + buildCreateTableRequest(getCompartmentId(), "t.a.b", + createTABDdl, defaultLimits); + executeDdlFail(ctReq, "InvalidParameter"); + + createTable("t.a.b", createTABDdl, null /* limits */); + createTable("t.g", createTGDdl, null /* limits */); + + /* + * Get table + */ + GetTableResponse gtRet; + Map columns = new HashMap<>(); + gtRet = getTable("t.a"); + columns.clear(); + columns.put("id", "INTEGER"); + columns.put("ida", "INTEGER"); + columns.put("s", "STRING"); + checkTableInfo(gtRet.getTable(), + "t.a", + columns, + new String[] {"id", "ida"}, + new String[] {"id"}, + null); + + gtRet = getTable("t.a.b"); + columns.clear(); + columns.put("id", "INTEGER"); + columns.put("ida", "INTEGER"); + columns.put("idb", "INTEGER"); + columns.put("s", "STRING"); + checkTableInfo(gtRet.getTable(), + "t.a.b", + columns, + new String[] {"id", "ida", "idb"}, + new String[] {"id"}, + null); + + /* + * List tables + */ + ListTablesRequest ltReq; + ListTablesResponse ltRes; + ltReq = ListTablesRequest.builder() + .compartmentId(getCompartmentId()) + .sortBy(SortBy.Name) + .sortOrder(SortOrder.Asc) + .build(); + ltRes = client.listTables(ltReq); + String[] expTables = new String[] {"t", "t.a", "t.a.b", "t.g"}; + if (cloudRunning) { + int i = 0; + for (TableSummary ts : ltRes.getTableCollection().getItems()) { + assertEquals(expTables[i++], ts.getName()); + } + } else { + assertEquals(expTables.length, + ltRes.getTableCollection().getItems().size()); + } + + /* + * Create/Drop index + */ + createIndex("t.a.b", "idx1", new String[]{"s"}); + dropIndex("t.a.b", "idx1", true); + + /* + * Alter table + */ + String ddl = "alter table t.a (add i integer)"; + alterTable("t.a", ddl); + gtRet = getTable("t.a"); + columns.clear(); + columns.put("id", "INTEGER"); + columns.put("ida", "INTEGER"); + columns.put("s", "STRING"); + columns.put("i", "INTEGER"); + checkTableInfo(gtRet.getTable(), + "t.a", + columns, + new String[] {"id", "ida"}, + new String[] {"id"}, + null); + + ddl = "alter table t.g (drop s)"; + alterTable("t.g", ddl); + gtRet = getTable("t.g"); + columns.clear(); + columns.put("id", "INTEGER"); + columns.put("idg", "INTEGER"); + checkTableInfo(gtRet.getTable(), + "t.g", + columns, + new String[] {"id", "idg"}, + new String[] {"id"}, + null); + /* + * Drop table + */ + dropTable("t.a.b"); + dropTable("t.a.b", true /* ifExists */, true /* wait */); + } + + @Test + public void testOpsWithOcid() { + assumeTrue("Skipping testOpsWithOcid if not minicloud or cloud test", + cloudRunning); + + /* + * Create table + */ + createTable("t", createTDdl, defaultLimits); + createTable("t.a", createTADdl, null /* limits */); + + String tOcid = getTableId("t"); + String taOcid = getTableId("t.a"); + + /* create/drop index */ + createIndex(tOcid, "idxt1", new String[]{"s"}); + + /* create index idxa1 on t.a(s) */ + createIndex(taOcid, "idxa1", new String[]{"s"}); + + /* create index if not exists idxa1 on t.a(s) */ + createIndex(taOcid, "idxa1", new String[]{"s"}, true /* ifNotExists */, + true /* wait */); + + /* get index */ + GetIndexRequest giReq = GetIndexRequest.builder() + .tableNameOrId(taOcid) + .indexName("idxa1") + .build(); + GetIndexResponse giRes = client.getIndex(giReq); + assertEquals("idxa1", giRes.getIndex().getName()); + + /* IllegalArgument: Invalid index field 'invalid' */ + CreateIndexRequest ciReq = + buildCreateIndexRequest(taOcid, + "idxa2", + new String[] {"s", "invalid"}, + false /* ifNotExists */); + executeDdlFail(ciReq, "IllegalArgument"); + /* + * IllegalArgument: Index is a duplicate of an existing index with + * another name + */ + ciReq = buildCreateIndexRequest(taOcid, + "idxa2", + new String[] {"s"}, + false /* ifNotExists */); + executeDdlFail(ciReq, "IllegalArgument"); + + /* drop index idxa1 on t.a */ + dropIndex(taOcid, "idxa1", true /* wait */); + + /* drop index if exists idxa1 on t.a */ + dropIndex(taOcid, "idxa1", true /* ifExists */, true /* wait */); + + /* Alter table */ + GetTableResponse gtRes = getTable(taOcid); + + String ddl = "alter table t.a(add n1 integer)"; + alterTable(taOcid, ddl, true /* wait */); + + gtRes = getTable(taOcid); + assertTrue(gtRes.getTable() + .getDdlStatement() + .toLowerCase() + .contains("n1 integer")); + + /* Update table limits */ + UpdateTableRequest utReq = buildUpdateTableRequest(taOcid, defaultLimits); + executeDdlFail(utReq, "InvalidParameter"); + + /* Update table tags */ + Map ftags = new HashMap<>(); + ftags.put("name", "nosql"); + + UpdateTableDetails utInfo = UpdateTableDetails.builder() + .freeformTags(ftags) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(taOcid) + .updateTableDetails(utInfo) + .build(); + executeDdl(utReq); + + gtRes = getTable(taOcid); + assertNotNull(gtRes.getTable().getFreeformTags()); + + /* Change compartment */ + ChangeTableCompartmentDetails info = + ChangeTableCompartmentDetails.builder() + .toCompartmentId(newCompartmentId) + .build(); + ChangeTableCompartmentRequest ctcReq = + ChangeTableCompartmentRequest.builder() + .tableNameOrId(taOcid) + .changeTableCompartmentDetails(info) + .build(); + executeDdlFail(ctcReq, "IllegalArgument"); + + /* list table usage */ + ListTableUsageRequest ltuReq = ListTableUsageRequest.builder() + .tableNameOrId(taOcid) + .build(); + try { + client.listTableUsage(ltuReq); + fail("Expect to fail but not"); + } catch (BmcException ex) { + assertEquals("InvalidParameter", ex.getServiceCode()); + } + + /* + * Row operations + */ + Map row = makeTARow(1, 1); + putRow(taOcid, row); + + List key = Arrays.asList(new String[] {"id:1", "ida:1"}); + GetRowRequest grReq = GetRowRequest.builder() + .tableNameOrId(taOcid) + .key(key) + .build(); + GetRowResponse grRes = client.getRow(grReq); + assertNotNull(grRes.getRow()); + + DeleteRowRequest drReq = DeleteRowRequest.builder() + .tableNameOrId(taOcid) + .key(key) + .build(); + DeleteRowResponse drRes = client.deleteRow(drReq); + assertTrue(drRes.getDeleteRowResult().getIsSuccess()); + + /* Drop table */ + String[] ocids = new String[] {taOcid, tOcid}; + for (String ocid : ocids) { + dropTable(ocid); + + /* + * TODO: NOSQL-715 + * Enable below case in cloud test after fix it + */ + if (!useCloudService) { + dropTable(ocid, true /* ifExists */, true /* wait */); + DeleteTableRequest dtReq; + dtReq = DeleteTableRequest.builder() + .tableNameOrId(ocid) + .build(); + executeDdlFail(dtReq, "TableNotFound"); + } + } + } + + @Test + public void testLimitTables() { + assumeTrue("Skipping testLimitTables if not minicloud or cloud test " + + "or tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + final int tableLimit = tenantLimits.getNumTables(); + if (tableLimit > NUM_TABLES) { + /* + * To prevent this test from running too long, skip the test if the + * table number limit > ProxyTestBase.NUM_TABLES + */ + return; + } + + String ddl = "create table p(id integer, s string, primary key(id))"; + createTable("p", ddl); + + String fmt = "create table %s(%s integer, s string, primary key(%s))"; + String table; + for (int i = 0; i < tableLimit - 1; i++) { + table = "p.c" + i; + ddl = String.format(fmt, table, "ck", "ck"); + createTable(table, ddl, null /* limits */); + + if ((++i) < tableLimit - 1) { + table += ".d"; + ddl = String.format(fmt, table, "dk", "dk"); + createTable(table, ddl, null /* limits */); + } + } + + table = "p.c" + tableLimit; + ddl = String.format(fmt, table, "ck", "ck"); + createChildTableFail(table, ddl, "TableLimitExceeded"); + + /* + * List tables + */ + ListTablesRequest ltReq; + ListTablesResponse ltRes; + ltReq = ListTablesRequest.builder() + .compartmentId(getCompartmentId()) + .build(); + ltRes = client.listTables(ltReq); + assertEquals(tableLimit, ltRes.getTableCollection().getItems().size()); + } + + @Test + public void testLimitColumns() { + assumeTrue("Skipping testLimitColumns if not minicloud or cloud test " + + "or tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + final int columnLimit = tenantLimits.getStandardTableLimits(). + getColumnsPerTable(); + + String ddl = "create table p(" + + " k1 integer, " + + " k2 integer, " + + " k3 integer, " + + " s string, " + + " primary key(k1, k2, k3))"; + createTable("p", ddl); + + /* + * Create table p.c with N columns, N is the number of column per table. + */ + StringBuilder sb; + sb = new StringBuilder("create table p.c(c1 integer, primary key(c1)"); + for (int i = 4; i < columnLimit; i++) { + sb.append(", s").append(i).append(" string"); + } + sb.append(")"); + createTable("p.c", sb.toString(), null /* limits */); + + /* + * Create table p.c.d with N + 1 columns, N is the number of column per + * table. + */ + sb = new StringBuilder("create table p.c.d(d1 integer, primary key(d1)"); + for (int i = 5; i < columnLimit + 1; i++) { + sb.append(", s").append(i).append(" string"); + } + sb.append(")"); + createChildTableFail("p.c.d", sb.toString(), "IllegalArgument"); + + /* + * Adding more field to p.c should fail as the columns number will + * exceed the limit + */ + UpdateTableDetails atInfo; + UpdateTableRequest atReq; + + ddl = "alter table p.c(add n1 integer)"; + atInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .ddlStatement(ddl) + .build(); + atReq = UpdateTableRequest.builder() + .updateTableDetails(atInfo) + .tableNameOrId("p.c") + .build(); + executeDdlFail(atReq, "IllegalArgument"); + } + + /** + * Test invalid table operations on child table: + * 1. Can't set limits on child table when create table + * 2. Can't create table if its parent doesn't exist + * 3. Don't allow to update limits of child table + * 4. Can't drop the parent table if referenced by any child + * 5. Don't allow to get table usage of child table + */ + @Test + public void testInvalidTableOps() { + /* + * Cannot set limits on child table + * TODO: add this case after modify the TableLimits of + * CreateTableDetails to be optional + */ + + /* The parent table of t.a does not exist */ + createChildTableFail("t.a", createTADdl, + (cloudRunning ? "IllegalArgument" : "InvalidParameter")); + + createTable("t", createTDdl); + createTable("t.a", createTADdl, null /* limits */); + + /* Don't allow to update limits of child table */ + UpdateTableDetails utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .tableLimits(defaultLimits) + .build(); + UpdateTableRequest utReq = UpdateTableRequest.builder() + .updateTableDetails(utInfo) + .tableNameOrId("t.a") + .build(); + executeDdlFail(utReq, "InvalidParameter"); + + /* Cannot drop the parent table still referenced by child table */ + DeleteTableRequest req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId("t") + .build(); + executeDdlFail(req, + (cloudRunning ? "IllegalArgument": "InvalidParameter")); + + if (cloudRunning) { + /* Don't allow to get table usage of child table */ + ListTableUsageRequest ltuReq = ListTableUsageRequest.builder() + .tableNameOrId("t.a") + .compartmentId(getCompartmentId()) + .build(); + try { + client.listTableUsage(ltuReq); + } catch (BmcException ex) { + assertEquals("InvalidParameter" , ex.getServiceCode()); + checkErrorMessage(ex); + } + + /* + * The Child table can't not be auto-reclaimable if its parent table + * is not auto reclaimable. + */ + createChildTableFail("t.g", createTGDdl, + true /* isAutoReclaimable*/, + "IllegalArgument"); + } + } + + /** + * Test put/get/delete row of child table. + */ + @Test + public void testPutGetDelete() { + createTable("t", createTDdl); + createTable("t.a", createTADdl, null /* limits */); + createTable("t.a.b", createTABDdl, null /* limits */); + + Map row; + GetRowRequest grReq; + GetRowResponse grRes; + DeleteRowRequest drReq; + DeleteRowResponse drRes; + + /* put a row to table t */ + row = makeTRow(1); + putRow("t", row); + + /* put a row to table t.a */ + row = makeTARow(1, 2); + putRow("t.a", row); + + List key = Arrays.asList(new String[] {"id:1", "ida:2"}); + grReq = GetRowRequest.builder() + .tableNameOrId("t.a") + .compartmentId(getCompartmentId()) + .key(key) + .build(); + grRes = client.getRow(grReq); + assertNotNull(grRes.getRow()); + + drReq = DeleteRowRequest.builder() + .tableNameOrId("t.a") + .compartmentId(getCompartmentId()) + .key(key) + .build(); + drRes = client.deleteRow(drReq); + assertTrue(drRes.getDeleteRowResult().getIsSuccess()); + + /* put a row to table t.a.b */ + row = makeTABRow(1, 2, 3); + putRow("t.a.b", row); + + key = Arrays.asList(new String[] {"id:1", "ida:2", "idb:3"}); + grReq = GetRowRequest.builder() + .tableNameOrId("t.a.b") + .compartmentId(getCompartmentId()) + .key(key) + .build(); + grRes = client.getRow(grReq); + assertNotNull(grRes.getRow()); + + drReq = DeleteRowRequest.builder() + .tableNameOrId("t.a.b") + .compartmentId(getCompartmentId()) + .key(key) + .build(); + drRes = client.deleteRow(drReq); + assertTrue(drRes.getDeleteRowResult().getIsSuccess()); + } + + @Test + public void testTags() { + assumeTrue("Skipping testTags for non-minicloud test", cloudRunning); + + Map ftags = new HashMap<>(); + Map> dtags = new HashMap<>(); + Map dtProps = new HashMap<>(); + + /* freeform tags */ + ftags.put("name", "nosql"); + + /* predefined tags */ + dtProps = new HashMap<>(); + dtProps.put(DEFINED_TAG_PROP, "true"); + dtags.put(DEFINED_TAG_NAMESPACE, dtProps); + + /* create table t */ + createTable("t", createTDdl); + + GetTableResponse gtRet; + /* create table t.a with freeformTags/definedTags */ + createTable("t.a", createTADdl, null/* limits */, ftags, dtags); + gtRet = getTable("t.a"); + checkTableTags(gtRet.getTable(), ftags, dtags, null); + + ftags.put("company", "oracle"); + dtProps.put(DEFINED_TAG_PROP, "false"); + + /* Update tags */ + UpdateTableDetails utInfo = + UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .freeformTags(ftags) + .definedTags(dtags) + .build(); + UpdateTableRequest utReq = + UpdateTableRequest.builder() + .tableNameOrId("t.a") + .updateTableDetails(utInfo) + .build(); + executeDdl(utReq); + + gtRet = getTable("t.a"); + checkTableTags(gtRet.getTable(), ftags, dtags, null); + } + + @Test + public void testMoveCompartment() { + assumeTrue("Skipping testMoveCompartment if not run against minicloud", + cloudRunning); + + final String[] tableNames = new String[] {"t", "t.a", "t.a.b", "t.g"}; + final String[] tableOcids = new String[4]; + String tOcid = null; + + /* Create table t and its descendant tables t.a, t.a.b and t.g */ + createTable("t", createTDdl); + tOcid = getTableId("t"); + tableOcids[0] = tOcid; + createTable("t.a", createTADdl, null /* limits */); + tableOcids[1] = getTableId("t.a"); + createTable("t.a.b", createTABDdl, null /* limits */); + tableOcids[2] = getTableId("t.a.b"); + createTable("t.g", createTGDdl, null /* limits */); + tableOcids[3] = getTableId("t.g"); + + ChangeTableCompartmentRequest req; + ChangeTableCompartmentDetails info; + + /* Cannot change child table's compartment */ + info = ChangeTableCompartmentDetails.builder() + .fromCompartmentId(getCompartmentId()) + .toCompartmentId(newCompartmentId) + .build(); + req = ChangeTableCompartmentRequest.builder() + .tableNameOrId("t.a") + .changeTableCompartmentDetails(info) + .build(); + executeDdlFail(req, "IllegalArgument"); + + /* + * Move the top table's compartment, its descendants table should be + * moved as well + */ + req = ChangeTableCompartmentRequest.builder() + .tableNameOrId("t") + .changeTableCompartmentDetails(info) + .build(); + executeDdl(req); + + GetTableResponse gtRes; + /* Get table using ocid */ + for (String ocid : tableOcids) { + gtRes = getTable(ocid); + assertEquals(newCompartmentId, gtRes.getTable().getCompartmentId()); + } + + /* + * Move compartment using table ocid from newCompartment to + * testCompartment. + */ + info = ChangeTableCompartmentDetails.builder() + .toCompartmentId(getCompartmentId()) + .build(); + req = ChangeTableCompartmentRequest.builder() + .tableNameOrId(tOcid) + .changeTableCompartmentDetails(info) + .build(); + executeDdl(req); + + /* Get table using compartmentId + table name */ + for (String tname : tableNames) { + gtRes = getTable(getCompartmentId(), tname); + assertEquals(getCompartmentId(), gtRes.getTable().getCompartmentId()); + } + } + + private void createChildTableFail(String tableName, + String ddl, + String expError) { + createChildTableFail(tableName, ddl, false, expError); + } + + private void createChildTableFail(String tableName, + String ddl, + boolean isAutoReclaimable, + String expError) { + CreateTableDetails ctInfo; + CreateTableRequest ctReq; + + ctInfo = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .isAutoReclaimable(isAutoReclaimable) + .build(); + ctReq = CreateTableRequest.builder() + .createTableDetails(ctInfo) + .build(); + executeDdlFail(ctReq, expError); + } + + private Map makeTRow(int id) { + Map row = new HashMap<>(); + row.put("id", id); + row.put("s", "s" + id); + return row; + } + + private Map makeTARow(int id, int ida) { + Map row = new HashMap<>(); + row.put("id", id); + row.put("ida", ida); + row.put("s", "s" + id + "_" + ida); + return row; + } + + private Map makeTABRow(int id, int ida, int idb) { + Map row = new HashMap<>(); + row.put("id", id); + row.put("ida", ida); + row.put("idb", idb); + row.put("s", "s" + id + "_" + ida + "_" + idb); + return row; + } + + private void checkTableInfo(Table table, + String tableName, + Map columns, + String[] primaryKeys, + String[] shardKeys, + TableLimits limits) { + assertNotNull(table); + + assertEquals(getCompartmentId(), table.getCompartmentId()); + assertEquals(tableName, table.getName()); + assertNotNull(table.getTimeCreated()); + assertNotNull(table.getDdlStatement()); + + Schema schema = table.getSchema(); + assertNotNull(schema); + + if (columns != null) { + List cols = schema.getColumns(); + assertEquals(columns.size(), cols.size()); + for (Column col : cols) { + String name = col.getName(); + assertTrue(columns.containsKey(name)); + assertTrue(columns.get(name).toUpperCase() + .equalsIgnoreCase(col.getType())); + + if (schema.getPrimaryKey().contains(name)) { + assertFalse(col.getIsNullable()); + } else { + assertTrue(col.getIsNullable()); + } + } + } + + if (primaryKeys != null) { + assertEquals(primaryKeys.length, schema.getPrimaryKey().size()); + int i = 0; + for (String key : schema.getPrimaryKey()) { + assertTrue(key.equalsIgnoreCase(primaryKeys[i++])); + } + + String[] skeys = (shardKeys != null) ? shardKeys : primaryKeys; + assertEquals(skeys.length, schema.getShardKey().size()); + i = 0; + for (String key : schema.getShardKey()) { + assertTrue(key.equalsIgnoreCase(skeys[i++])); + } + } + + if (limits != null) { + assertEquals(limits, table.getTableLimits()); + } else { + assertNull(table.getTableLimits()); + } + /* TODO: more validation */ + } + + private void checkTableTags(Table table, + Map freeformTags, + Map> definedTags, + Map> systemTags) { + + if (freeformTags != null) { + assertEquals(freeformTags, table.getFreeformTags()); + } + + if (definedTags != null) { + assertDefinedTags(definedTags, table.getDefinedTags()); + } + + if (systemTags != null) { + assertEquals(systemTags, table.getSystemTags()); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ConfigurationTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ConfigurationTest.java new file mode 100644 index 00000000..a2d767ae --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ConfigurationTest.java @@ -0,0 +1,327 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + */ +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.junit.After; +import org.junit.Assume; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.model.Configuration; +import com.oracle.bmc.nosql.model.HostedConfiguration; +import com.oracle.bmc.nosql.model.KmsKey; +import com.oracle.bmc.nosql.model.MultiTenancyConfiguration; +import com.oracle.bmc.nosql.model.UpdateHostedConfigurationDetails; +import com.oracle.bmc.nosql.model.WorkRequest.Status; +import com.oracle.bmc.nosql.requests.GetConfigurationRequest; +import com.oracle.bmc.nosql.requests.UnassignKmsKeyRequest; +import com.oracle.bmc.nosql.requests.UpdateConfigurationRequest; +import com.oracle.bmc.nosql.responses.GetConfigurationResponse; +import com.oracle.bmc.nosql.responses.UnassignKmsKeyResponse; +import com.oracle.bmc.nosql.responses.UpdateConfigurationResponse; + +/** + * Test configuration APIs: + * - get-configuration + * - update-configuration and + * - unassign-kms-key + */ +public class ConfigurationTest extends RestAPITestBase { + + private static final String testKeyId = + "ocid1.key.oc1.ca-montreal-1.gbtt5qeuaaa2c.ab4xkljr2eqgqifrmpxqddjdtic2lfj4owmln4dyfu4hhifw677hanrk5pna"; + private static final String testUpdateKeyId = + "ocid1.key.oc1.ca-montreal-1.gbtt5qeuaaa2c.ab4xkljrnljt7v4bebml7mqhonun4gwf2wcs5hjiejx6vni65f2vyht7wl6a"; + private static final String testVaultId = + "ocid1.vault.oc1.ca-montreal-1.gbtt5qeuaaa2c.ab4xkljrikzzvhy2uvlafsg3qy5hwmngs74sqbexnwsgxhj7qii5llb7f7vq"; + + private static final String testTenantId = TENANT_NOSQL_DEV; + + @BeforeClass + public static void staticSetUp() throws Exception { + cloudRunning = Boolean.getBoolean(USEMC_PROP); + Assume.assumeTrue( + "Skipping ConfigurationTest if not run against minicloud", + cloudRunning); + RestAPITestBase.staticSetUp(); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + removeCmekAndDedicatedTenancy(testTenantId); + } + + @Override + @After + public void tearDown() throws Exception { + removeCmekAndDedicatedTenancy(testTenantId); + super.tearDown(); + } + + private void removeCmekAndDedicatedTenancy(String tenantId) { + Configuration config = getConfiguration(tenantId); + if (config instanceof HostedConfiguration) { + if (((HostedConfiguration) config).getKmsKey().getId() != null) { + unassignKmsKey(); + } + setDedicatedTenantId(null); + } + } + + @Test + public void basicTest() { + /* + * multi-tenancies pod + * + * Get configuration should return MultiTenancyConfiguration + */ + Configuration config = getConfiguration(testTenantId); + assertTrue(config instanceof MultiTenancyConfiguration); + + /* + * Assign the pod to testTenantId + */ + setDedicatedTenantId(testTenantId); + assertKmsKey(null /* keyId */); + + /* + * Assign key + */ + updateConfiguration(testKeyId, null); + assertKmsKey(testKeyId); + + /* + * Rotate key + */ + updateConfiguration(testUpdateKeyId, testVaultId); + assertKmsKey(testUpdateKeyId); + + /* + * Remove key + */ + unassignKmsKey(); + assertKmsKey(null /* keyId */); + } + + @Test + public void testDryRun() { + + String workRequestId; + + /* + * Assign the pod to testTenantId. + */ + setDedicatedTenantId(testTenantId); + assertKmsKey(null /* keyId */); + + /* + * Dry run: set key + */ + updateConfiguration(testTenantId, testKeyId, null /* vaultId */, + true /* dryRun */, true /* wait */); + assertKmsKey(null /* keyId */); + + /* + * Dry run: assign key + * + * IllegalArgument: Invalid key + */ + workRequestId = updateConfiguration(testTenantId, "invalidKey", + null, /* vaultId */ + true /* dryRun */, + false /* wait */); + waitForStatus(workRequestId, "IllegalArgument", Status.Failed); + + /* + * Dry run: assign key + * + * IllegalArgument: Invalid key + */ + workRequestId = updateConfiguration(testTenantId, testTenantId, + null, /* vaultId */ + true /* dryRun */, + false /* wait */); + waitForStatus(workRequestId, "IllegalArgument", Status.Failed); + + /* + * Dry run: assign key + * + * IllegalArgument: The Kms Key doesn't belong to the Vault 'invalidVaultId' + */ + workRequestId = updateConfiguration(testTenantId, testKeyId, + "invalidVaultId", + true /* dryRun */, + false /* wait */); + waitForStatus(workRequestId, "IllegalArgument", Status.Failed); + + /* + * Dry run: remove key + * + * IllegalArgument: No kms key is assigned to the service + */ + workRequestId = unassignKmsKey(testTenantId, true /* dryRun */, + false /* wait */); + waitForStatus(workRequestId, "IllegalArgument", Status.Failed); + + /* + * Set the kms key + */ + updateConfiguration(testKeyId, null); + assertKmsKey(testKeyId /* keyId */); + + /* + * Dry run: remove key + */ + unassignKmsKey(testTenantId, true /* dryRun */, true /* wait */); + assertKmsKey(testKeyId /* keyId */); + + + /* Invalid parameters */ + try { + updateConfiguration(null, testKeyId, null, false); + fail("updateConfiguration should fail with NPE"); + } catch (NullPointerException ex) { + } + + try { + updateConfiguration(testTenantId, "", null, false); + fail("updateConfiguration should fail with 400-InvalidParameter"); + } catch (BmcException ex) { + assertEquals(400, ex.getStatusCode()); + } + + try { + updateConfiguration(testTenantId, null, null, false); + fail("updateConfiguration should fail with 400-InvalidParameter"); + } catch (BmcException ex) { + assertEquals(400, ex.getStatusCode()); + } + } + + /* + * Get configuration + */ + private Configuration getConfiguration(String tenantId) { + GetConfigurationRequest req = GetConfigurationRequest.builder() + .compartmentId(tenantId) + .build(); + GetConfigurationResponse res = client.getConfiguration(req); + Configuration config = res.getConfiguration(); + /* Sleep 250ms to avoid throttling. */ + try { + Thread.sleep(250); + } catch (InterruptedException e) { + } + return config; + } + + /* + * Update kms key + */ + private String updateConfiguration(String keyId, String vaultId) { + return updateConfiguration(testTenantId, keyId, vaultId, false, true); + } + + private String updateConfiguration(String tenantId, + String keyId, + String vaultId, + boolean dryRun, + boolean wait) { + + UpdateConfigurationResponse res = + updateConfiguration(tenantId, keyId, vaultId, dryRun); + assertEquals(202, res.get__httpStatusCode__()); + + String workRequestId = res.getOpcWorkRequestId(); + assertNotNull(workRequestId); + + if (!wait) { + return workRequestId; + } + waitForStatus(workRequestId, Status.Succeeded); + return workRequestId; + } + + private UpdateConfigurationResponse updateConfiguration(String tenantId, + String keyId, + String vaultId, + boolean dryRun) { + + KmsKey.Builder key = KmsKey.builder().id(keyId); + if (vaultId != null) { + key.kmsVaultId(vaultId); + } + + UpdateHostedConfigurationDetails details = + UpdateHostedConfigurationDetails.builder() + .kmsKey(key.build()) + .build(); + + UpdateConfigurationRequest req = + UpdateConfigurationRequest.builder() + .compartmentId(tenantId) + .updateConfigurationDetails(details) + .isOpcDryRun(dryRun) + .build(); + + return client.updateConfiguration(req); + } + + /* + * Remove kms key + */ + private String unassignKmsKey() { + return unassignKmsKey(testTenantId, false /* dryRun */, true /* wait */); + } + + private String unassignKmsKey(String tenantId, + boolean dryRun, + boolean wait) { + UnassignKmsKeyRequest req = UnassignKmsKeyRequest + .builder() + .compartmentId(tenantId) + .isOpcDryRun(dryRun) + .build(); + UnassignKmsKeyResponse res = client.unassignKmsKey(req); + assertEquals(202, res.get__httpStatusCode__()); + + String workRequestId = res.getOpcWorkRequestId(); + assertNotNull(workRequestId); + + if (!wait) { + return workRequestId; + } + waitForStatus(workRequestId, Status.Succeeded); + return workRequestId; + } + + /* Validate the key Id in the configuration */ + private void assertKmsKey(String keyId) { + Configuration config = getConfiguration(testTenantId); + assertTrue(config instanceof HostedConfiguration); + + HostedConfiguration hconfig = (HostedConfiguration)config; + KmsKey key = hconfig.getKmsKey(); + assertNotNull(key); + assertEquals(keyId, key.getId()); + if (keyId != null) { + assertEquals(testVaultId, key.getKmsVaultId()); + assertNotNull(key.getTimeCreated()); + assertNotNull(key.getTimeUpdated()); + } + assertEquals(KmsKey.KmsKeyState.Active, key.getKmsKeyState()); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/FilterTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/FilterTest.java new file mode 100644 index 00000000..0bf23365 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/FilterTest.java @@ -0,0 +1,1203 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static oracle.nosql.proxy.protocol.HttpConstants.FILTERS_PATH; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import oracle.nosql.util.HttpRequest; +import oracle.nosql.util.HttpResponse; +import oracle.nosql.util.filter.Rule; +import oracle.nosql.util.filter.Rule.Action; + +import com.google.gson.reflect.TypeToken; +import com.oracle.bmc.ClientConfiguration.ClientConfigurationBuilder; +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.model.ChangeTableCompartmentDetails; +import com.oracle.bmc.nosql.model.CreateIndexDetails; +import com.oracle.bmc.nosql.model.IndexKey; +import com.oracle.bmc.nosql.model.KmsKey; +import com.oracle.bmc.nosql.model.PreparedStatement; +import com.oracle.bmc.nosql.model.QueryDetails; +import com.oracle.bmc.nosql.model.TableLimits; +import com.oracle.bmc.nosql.model.UpdateHostedConfigurationDetails; +import com.oracle.bmc.nosql.model.UpdateRowDetails; +import com.oracle.bmc.nosql.model.UpdateTableDetails; +import com.oracle.bmc.nosql.requests.ChangeTableCompartmentRequest; +import com.oracle.bmc.nosql.requests.CreateIndexRequest; +import com.oracle.bmc.nosql.requests.DeleteIndexRequest; +import com.oracle.bmc.nosql.requests.DeleteRowRequest; +import com.oracle.bmc.nosql.requests.DeleteTableRequest; +import com.oracle.bmc.nosql.requests.GetConfigurationRequest; +//import com.oracle.bmc.nosql.requests.GetConfigurationRequest; +import com.oracle.bmc.nosql.requests.GetIndexRequest; +import com.oracle.bmc.nosql.requests.GetRowRequest; +import com.oracle.bmc.nosql.requests.GetWorkRequestRequest; +import com.oracle.bmc.nosql.requests.ListIndexesRequest; +import com.oracle.bmc.nosql.requests.ListTableUsageRequest; +import com.oracle.bmc.nosql.requests.ListTablesRequest; +import com.oracle.bmc.nosql.requests.ListWorkRequestsRequest; +import com.oracle.bmc.nosql.requests.PrepareStatementRequest; +import com.oracle.bmc.nosql.requests.QueryRequest; +import com.oracle.bmc.nosql.requests.SummarizeStatementRequest; +import com.oracle.bmc.nosql.requests.UnassignKmsKeyRequest; +import com.oracle.bmc.nosql.requests.UpdateConfigurationRequest; +import com.oracle.bmc.nosql.requests.UpdateRowRequest; +import com.oracle.bmc.nosql.requests.UpdateTableRequest; +import com.oracle.bmc.nosql.responses.PrepareStatementResponse; +import com.oracle.bmc.nosql.responses.UnassignKmsKeyResponse; +import com.oracle.bmc.nosql.responses.UpdateConfigurationResponse; +import com.oracle.bmc.retrier.RetryConfiguration; +import com.oracle.bmc.util.CircuitBreakerUtils; + +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.internal.logging.InternalLoggerFactory; +import io.netty.util.internal.logging.JdkLoggerFactory; + +/* + * This test suite is only for miniCloud test. + * + * Tests filtering request based on rules. + * + * The 2 methods blockOps() and executeOps() are used to execute request: + * o blockOps() expects to get specified status code due to being blocked. + * o executeOps() expect the request executed successfully. + * + * Basically, the test add rule and then run operations using above 2 methods + * to verify that the operations matching the rule will be blocked, those that + * don't match the rule can execute successfully + */ +public class FilterTest extends RestAPITestBase { + + private final int REQUEST_WAIT_MS = 3000; + + private final String proxyFilterUrl = + getProxyEndpoint() + "/V0/" + FILTERS_PATH; + private String scFilterUrl; + private final HttpRequest httpRequest = new HttpRequest().disableRetry(); + + private final String tableName = "restFilterTest"; + private final String indexName = "idxName"; + + private final String createTableDdl = "create table if not exists " + + tableName + "(id integer, name string, primary key(id))"; + private final String alterTableDdl = "alter table " + tableName + + "(add i1 integer)"; + + private final TableLimits limits = TableLimits.builder() + .maxReadUnits(100) + .maxWriteUnits(20) + .maxStorageInGBs(1) + .build(); + + private static final Map> tags = + new HashMap<>(); + static { + Map props = new HashMap<>(); + props.put("type", "backup"); + props.put("purpose", "WebTier"); + tags.put("Operations", props); + } + + private static final Rule.Action dropRequest = Rule.DROP_REQUEST; + private static final Rule.Action returnError = + new Rule.ReturnErrorAction(102 /* SERVICE_UNAVAILABLE*/, + "server is undergoing maintenance"); + /* 503 Service Unavailable */ + private static final int returnErrorRespCode = 503; + + private final String selectStmt = "select * from " + tableName; + private final String insertStmt = "insert into " + tableName + + " values(3, 'abc')"; + private final String deleteStmt = "delete from " + tableName + + " where id = 3"; + private final String updateStmt = "update " + tableName + + " set name=\"name_upd\" where id = 3"; + private String workRequestId = null; + + private String selectPrepStmt; + private String insertPrepStmt; + private String updatePrepStmt; + private String deletePrepStmt; + private String tableOcid; + + /* + * Operations + */ + private final OpWrapper createTable = new OpWrapper("createTable") { + @Override + void execOp(String tableNameOrid, boolean isTableId) { + createTable(tableNameOrid, createTableDdl, limits, false /* wait */); + } + }; + + private final OpWrapper alterTable = new OpWrapper("alterTable") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + updateTable(tableNameOrId, isTableId, alterTableDdl, null, null); + } + }; + + private final OpWrapper updateTableLimits = new OpWrapper("updateLimits") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + updateTable(tableNameOrId, isTableId, null, null, limits); + } + }; + + private final OpWrapper updateTableTags = new OpWrapper("updateTableTags") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + updateTable(tableNameOrId, isTableId, null, tags, null); + } + }; + + private final OpWrapper dropTable = new OpWrapper("dropTable") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + deleteTable(tableNameOrId, isTableId); + } + }; + + private final OpWrapper getTable = new OpWrapper("getTable") { + @Override + void execOp(String tableNameOrid, boolean isTableId) { + getTable((isTableId ? null : getCompartmentId()), tableNameOrid); + } + }; + + private final OpWrapper listTables = new OpWrapper("listTables") { + @Override + void execOp(String tableNameOrid, boolean isTableId) { + ListTablesRequest req = ListTablesRequest.builder() + .compartmentId(getCompartmentId()) + .build(); + client.listTables(req); + } + }; + + private final OpWrapper createIndex = new OpWrapper("createIndex") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + createIndex(tableNameOrId, isTableId); + } + }; + + private final OpWrapper dropIndex = new OpWrapper("dropIndex") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + deleteIndex(tableNameOrId, isTableId); + } + }; + + private final OpWrapper listIndexes = new OpWrapper("listIndexes") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + listIndexes(tableNameOrId, isTableId); + } + }; + + private final OpWrapper getIndex = new OpWrapper("getIndex") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + getIndex(tableNameOrId, isTableId); + } + }; + + private final OpWrapper changeCompartment = new OpWrapper("changeCompt") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + changeCompartment(tableNameOrId, isTableId); + } + }; + + private final OpWrapper listWorkRequests = new OpWrapper("listWorkReqs") { + + @Override + void execOp(String tableNameOrId, boolean isTableId) { + ListWorkRequestsRequest req = ListWorkRequestsRequest.builder() + .compartmentId(getCompartmentId()) + .build(); + client.listWorkRequests(req); + } + }; + + private final OpWrapper getWorkRequest = new OpWrapper("getWorkRequest") { + + @Override + void execOp(String tableNameOrId, boolean isTableId) { + GetWorkRequestRequest req = GetWorkRequestRequest.builder() + .workRequestId(workRequestId) + .build(); + client.getWorkRequest(req); + } + }; + + private final OpWrapper getTableUsage = new OpWrapper("getTableUsage") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + listTableUsage(tableNameOrId, isTableId); + } + }; + + private final OpWrapper put = new OpWrapper("put") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + put(tableNameOrId, isTableId); + } + }; + + private final OpWrapper get = new OpWrapper("get") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + get(tableNameOrId, isTableId); + } + }; + + private final OpWrapper delete = new OpWrapper("delete") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + delete(tableNameOrId, isTableId); + } + }; + + private final OpWrapper prepare = new OpWrapper("prepare") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + PrepareStatementRequest req = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(selectStmt) + .build(); + client.prepareStatement(req); + } + }; + + private final OpWrapper selectQuery = new OpWrapper("selectQuery") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + runQuery(selectStmt); + } + }; + + private final OpWrapper selectPrepQuery = new OpWrapper("selectPrepQuery") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + runPreparedQuery(selectPrepStmt); + } + }; + + private final OpWrapper insertQuery = new OpWrapper("insertQuery") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + runQuery(insertStmt); + } + }; + + private final OpWrapper insertPrepQuery = new OpWrapper("insertPrepQuery") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + runPreparedQuery(insertPrepStmt); + } + }; + + private final OpWrapper deleteQuery = new OpWrapper("deleteQuery") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + runQuery(deleteStmt); + } + }; + + private final OpWrapper deletePrepQuery = new OpWrapper("deletePrepQuery") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + runPreparedQuery(deletePrepStmt); + } + }; + + private final OpWrapper updateQuery = new OpWrapper("updateQuery") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + runQuery(updateStmt); + } + }; + + private final OpWrapper updatePrepQuery = new OpWrapper("updatePrepQuery") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + runPreparedQuery(updatePrepStmt); + } + }; + + private final OpWrapper summarize = new OpWrapper("summarize") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + SummarizeStatementRequest req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(selectStmt) + .build(); + client.summarizeStatement(req); + } + }; + + private final OpWrapper getConfiguration = + new OpWrapper("getConfiguration") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + GetConfigurationRequest req = GetConfigurationRequest.builder() + .compartmentId(TENANT_NOSQL_DEV) + .build(); + client.getConfiguration(req); + } + }; + + private final OpWrapper updateConfiguration = + new OpWrapper("updateConfiguration") { + private final String testKeyId = + "ocid1.key.oc1.ca-montreal-1.gbtt5qeuaaa2c.ab4xkljr2eqgqifrmpxqddjdtic2lfj4owmln4dyfu4hhifw677hanrk5pna"; + + @Override + void execOp(String tableNameOrId, boolean isTableId) { + KmsKey.Builder key = KmsKey.builder().id(testKeyId); + + UpdateHostedConfigurationDetails details = + UpdateHostedConfigurationDetails.builder() + .kmsKey(key.build()) + .build(); + + UpdateConfigurationRequest req = + UpdateConfigurationRequest.builder() + .compartmentId(TENANT_NOSQL_DEV) + .updateConfigurationDetails(details) + .build(); + + client.updateConfiguration(req); + } + }; + + private final OpWrapper unassignKmsKey = new OpWrapper("unassignKmsKey") { + @Override + void execOp(String tableNameOrId, boolean isTableId) { + UnassignKmsKeyRequest req = UnassignKmsKeyRequest + .builder() + .compartmentId(TENANT_NOSQL_DEV) + .build(); + client.unassignKmsKey(req); + } + }; + + private OpWrapper[] ddlOps = new OpWrapper[] { + createTable, + alterTable, + updateTableLimits, + updateTableTags, + createIndex, + dropIndex, + changeCompartment, + dropTable + }; + + private OpWrapper[] writeOps = new OpWrapper[] { + put, + delete, + insertQuery, + insertPrepQuery, + deleteQuery, + deletePrepQuery, + updateQuery, + updatePrepQuery, + }; + + private OpWrapper[] readOps = new OpWrapper[] { + get, + summarize, + prepare, + selectQuery, + selectPrepQuery, + + getTable, + getIndex, + listTables, + listIndexes, + + getTableUsage, + getWorkRequest, + listWorkRequests + }; + + @BeforeClass + public static void staticSetUp() throws Exception { + Assume.assumeTrue( + "Skipping FilterTest if not minicloud test", + Boolean.getBoolean(USEMC_PROP)); + + RestAPITestBase.staticSetUp(); + } + + @Override + protected void configClient(ClientConfigurationBuilder builder) { + /* + * Disable circuit breaker(enabled by default) in this test, the circuit + * breaker is in client, it will block requests once the number of + * failed requests reaches threshold. The filter test is intended to + * test requests will be blocked by proxy, so disable the circuit + * breaker. + */ + super.configClient(builder); + builder.readTimeoutMillis(REQUEST_WAIT_MS) + .retryConfiguration(RetryConfiguration.NO_RETRY_CONFIGURATION) + .circuitBreakerConfiguration( + CircuitBreakerUtils.getNoCircuitBreakerConfiguration()); + } + + @Override + public void setUp() throws Exception { + /* + * Configures Netty logging to suppress the logging output when + * try to instantiate slf4j logger firstly. + */ + InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE); + removeAllRules(); + super.setUp(); + initTableAndPreparedStmts(); + } + + private void initTableAndPreparedStmts() { + workRequestId = createTable(tableName, createTableDdl, limits); + createIndex(tableName, indexName, new String[]{"name"}); + tableOcid = getTableId(tableName); + + selectPrepStmt = prepare(selectStmt); + insertPrepStmt = prepare(insertStmt); + updatePrepStmt = prepare(updateStmt); + deletePrepStmt = prepare(deleteStmt); + } + + @Override + public void tearDown() throws Exception { + removeAllRules(); + super.tearDown(); + } + + /* + * Filter requests by the rule containing operations only + */ + @Test + public void testOpRule() { + /* + * all operations should be blocked by the rule: + * {"name":"block_ops", "operations":["ALL"]} + */ + Rule rule = addRule("block_ops", + dropRequest, + new String[]{"all"}, + false /* persist */); + blockOps(rule, ddlOps); + blockOps(rule, writeOps); + blockOps(rule, readOps); + deleteRule("block_ops", false /* persist */); + + /* + * ddl and write operations should be blocked by the rule: + * {"name":"block_ops", "operations":["DDL", "WRITE"]} + */ + rule = addRule("block_ops", + returnError, + new String[] {"ddl", "write"}, + false /* persist */); + blockOps(rule, ddlOps); + blockOps(rule, writeOps); + executeOps(readOps); + } + + /* + * Test filtering requests by the rule with principal tenant and/or user + * information + */ + @Test + public void testPrincipalRule() { + String principalTenantId = getTenantId(); + String principalId = getUserId(); + + /* + * Add rule to block all operations from the user of specified tenant: + * { + * "name": "block_tenant", + * "tenant": , + * "operations":["ddl"] + * } + */ + Rule rule = addRule("block_tenant", + dropRequest, + principalTenantId, + null, /* user */ + null, /* table */ + new String[] {"ddl"}, + false /* persist */); + blockOps(rule, ddlOps); + executeOps(put, getTable); + assertTrue(deleteRule("block_tenant", false)); + + /* + * Add rule to block ddl and write requests from the specified user. + * { + * "name": "block_user", + * "user": , + * "table": , + * "operations":[write] + * } + */ + rule = addRule("block_user", + returnError, + null, /* tenant */ + principalId, + null, /* table */ + new String[]{"write"}, + false /* persist */); + blockOps(rule, writeOps); + executeOps(get, createTable); + } + + /* + * Filter requests by the rule containing table ocid. + */ + @Test + public void testTableRule() { + OpWrapper[] table_ddl_ops = new OpWrapper[] { + alterTable, + updateTableLimits, + updateTableTags, + createIndex, + dropIndex, + dropTable, + changeCompartment, + }; + + OpWrapper[] table_write_ops = new OpWrapper[] { + put, + delete, + insertQuery, + deleteQuery + }; + + OpWrapper[] table_read_ops = new OpWrapper[] { + getTable, + listIndexes, + getTableUsage, + + prepare, + selectQuery, + get + }; + + /* + * Add rule to block all requests to the specified target table: + * { + * "name": "block_table", + * "table": , + * "operations": ["ALL] + * } + */ + Rule rule = addRule("block_table", + returnError, + null, /* tenant */ + null, /* user */ + tableOcid, /* table */ + new String[] {"all"}, /* operations */ + false); /* persist */ + blockOps(rule, table_ddl_ops); + blockOps(rule, table_write_ops); + blockOps(rule, table_read_ops); + executeOps(listTables, listWorkRequests, getWorkRequest); + deleteRule("block_table", false /* persist */); + + /* + * Update the rule to block ddl requests to the specified target table: + * { + * "name": "block_table", + * "table": , + * "operations": ["dll"] + * } + */ + rule = addRule("block_table", + dropRequest, + null, /* tenant */ + null, /* user */ + tableOcid, + new String[]{"ddl"}, + false); /* persist */ + blockOps(rule, table_ddl_ops); + blockOpsWithTableOcid(rule, tableOcid, table_ddl_ops); + + executeOps(table_write_ops); + executeOps(table_read_ops); + executeOpsWithTableOcid(tableOcid, listIndexes, getTable, + getTableUsage, put, get, delete); + } + + /* + * Test filtering query request. + * + * Query can be a read or write operation, the actual operation is deferred + * to determine after parse the statement in handleQuery(). + * + * This test is to verify query operation can be blocked as expected by the + * rule that blocks "read" or "write" operation. + */ + @Test + public void testQuery() { + /* + * Add rule to block read requests to the specified target table: + * { + * "name":"block_query", + * "table": tableOcid, + * "operations": ["READ"] + * } + * + * The prepare, summarize and select query should be blocked, the + * insert/delete query should execute successfully. + */ + Rule rule = addRule("block_query", + returnError, + null, /* tenant */ + null, /* user */ + null, /* table */ + new String[]{"read"}, + false); /* persist */ + blockOps(rule, prepare, summarize, selectQuery, selectPrepQuery, + updateQuery, updatePrepQuery, deleteQuery, deletePrepQuery); + executeOps(insertQuery, insertPrepQuery); + deleteRule("block_query", false /* persist */); + + /* + * Update rule to block write requests to the specified target table: + * { + * "name":"block_query", + * "table": tableOcid, + * "operations": ["WRITE"] + * } + * + * The delete/insert query should be blocked, and prepare, summarize and + * select query should execute successfully. + */ + rule = addRule("block_query", + dropRequest, + null, /* tenant */ + null, /* user */ + tableOcid, + new String[]{"write"}, + false); /* persist */ + blockOps(rule, insertQuery, insertPrepQuery, deleteQuery, + deletePrepQuery, updateQuery, updatePrepQuery); + executeOps(prepare, summarize, selectQuery, selectPrepQuery); + } + + /* Test persistent rule */ + @Test + public void testPersistentRule() { + String principalTenantId = getTenantId(); + String principalId = getUserId(); + + /* + * Add rule to block all operations from the user of specified tenant: + * { + * "name": "block_tenant", + * "tenant": , + * "operations":["ddl"] + * } + */ + Rule rule = addRule("block_tenant", + dropRequest, + principalTenantId, + null, /* user */ + null, /* table */ + new String[] {"ddl"}, + true); /* persist */ + reloadPersistentRules(); + blockOps(rule, ddlOps); + executeOps(put, getTable); + + assertTrue(deleteRule("block_tenant", true /* persist */)); + reloadPersistentRules(); + executeOps(updateTableLimits, createIndex, dropIndex); + + /* + * Add rule to block ddl and write requests from the specified user. + * { + * "name": "block_user", + * "user": , + * "table": , + * "operations":[write] + * } + */ + rule = addRule("block_user", + returnError, + null, /* user */ + principalId, + tableOcid, + new String[]{"write"}, + true); /* persist */ + reloadPersistentRules(); + blockOps(rule, writeOps); + executeOps(get, createTable); + } + + @Test + public void testConfigurationOps() { + try { + setDedicatedTenantId(TENANT_NOSQL_DEV); + + /* + * all configuration operations should be blocked by the rule: + * {"name":"block_ops", "operations":["ALL"]} + */ + Rule rule = addRule("block_ops", + dropRequest, + new String[]{"config_read", "config_update"}, + false /* persist */); + blockOps(rule, + getConfiguration, + updateConfiguration, + unassignKmsKey); + + /* + * delete the block_ops, the configuration operations can be + * executed + */ + deleteRule("block_ops", false /* persist */); + executeOps(getConfiguration, updateConfiguration, unassignKmsKey); + + /* + * The updateConfiguration and unassignKmsKey should be blocked by + * the rule: + * {"name":"block_config_update", "operations":["config_update"]} + */ + rule = addRule("block_config_update", + returnError, + new String[]{"CONFIG_UPDATE"}, + false /* persist */); + blockOps(rule, updateConfiguration, unassignKmsKey); + executeOps(getConfiguration, getTable); + + /* + * delete the block_config_update, the configuration operations can + * be executed. + */ + deleteRule("block_config_update", false /* persist */); + executeOps(getConfiguration, updateConfiguration, unassignKmsKey); + } finally { + setDedicatedTenantId(null); + } + } + + private void executeOps(OpWrapper... ops) { + for (OpWrapper op : ops) { + op.exec(); + } + } + + private void blockOps(Rule rule, OpWrapper... ops) { + int expCode = getExpectedStatusCode(rule); + for (OpWrapper op : ops) { + op.exec(expCode); + } + } + + private void executeOpsWithTableOcid(String tableId, OpWrapper... ops) { + for (OpWrapper op : ops) { + op.execWithOcid(tableId); + } + } + + private void blockOpsWithTableOcid(Rule rule, + String tableId, + OpWrapper... ops) { + int expCode = getExpectedStatusCode(rule); + for (OpWrapper op : ops) { + op.execWithOcid(tableId, expCode); + } + } + + private int getExpectedStatusCode(Rule rule) { + switch (rule.getActionType()) { + case DROP_REQUEST: + return -1; + case RETURN_ERROR: + return returnErrorRespCode; + default: + fail("Unexpceted action type: " + rule.getAction()); + } + return 0; + } + + private void put(String tableNameOrId, boolean isTableId) { + Map row = new HashMap(); + row.put("id", 1); + row.put("name", "name1"); + UpdateRowDetails info = UpdateRowDetails.builder() + .compartmentId((isTableId ? null : getCompartmentId())) + .value(row) + .build(); + UpdateRowRequest req = UpdateRowRequest.builder() + .tableNameOrId(tableNameOrId) + .updateRowDetails(info) + .build(); + client.updateRow(req); + } + + private void get(String tableNameOrId, boolean isTableId) { + List key = new ArrayList(); + key.add("id:1"); + GetRowRequest req = GetRowRequest.builder() + .tableNameOrId(tableNameOrId) + .compartmentId((isTableId ? null : getCompartmentId())) + .key(key) + .build(); + client.getRow(req); + } + + private void delete(String tableNameOrId, boolean isTableId) { + List key = new ArrayList(); + key.add("id:1"); + DeleteRowRequest req = DeleteRowRequest.builder() + .tableNameOrId(tableNameOrId) + .compartmentId((isTableId ? null : getCompartmentId())) + .key(key) + .build(); + client.deleteRow(req); + } + + private void updateTable(String tableNameOrId, + boolean isTableId, + String ddl, + Map> defTags, + TableLimits tableLimits) { + + UpdateTableDetails.Builder builder = UpdateTableDetails.builder(); + if (!isTableId) { + builder.compartmentId(getCompartmentId()); + } + if (ddl != null) { + builder.ddlStatement(ddl); + } else if (defTags != null) { + builder.definedTags(defTags); + } else if (tableLimits != null) { + builder.tableLimits(tableLimits); + } else { + fail("One of ddl, definedTags and tableLimits should be specified"); + } + + UpdateTableDetails info = builder.build(); + UpdateTableRequest req = UpdateTableRequest.builder() + .tableNameOrId(tableNameOrId) + .updateTableDetails(info) + .build(); + client.updateTable(req); + } + + private void createIndex(String tableNameOrId, boolean isTableId) { + /* Create Index */ + List keys = new ArrayList(); + keys.add(IndexKey.builder().columnName("name").build()); + + CreateIndexDetails info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId((isTableId ? null : getCompartmentId())) + .keys(keys) + .build(); + CreateIndexRequest req = CreateIndexRequest.builder() + .tableNameOrId(tableNameOrId) + .createIndexDetails(info) + .build(); + client.createIndex(req); + } + + private void deleteIndex(String tableNameOrId, boolean isTableId) { + /* Delete Index */ + DeleteIndexRequest req = DeleteIndexRequest.builder() + .tableNameOrId(tableNameOrId) + .compartmentId((isTableId ? null : getCompartmentId())) + .indexName(indexName) + .build(); + client.deleteIndex(req); + } + + private void listIndexes(String tableNameOrId, boolean isTableId) { + /* List Indexes */ + ListIndexesRequest req = ListIndexesRequest.builder() + .tableNameOrId(tableNameOrId) + .compartmentId((isTableId ? null : getCompartmentId())) + .build(); + client.listIndexes(req); + } + + private void getIndex(String tableNameOrId, boolean isTableId) { + /* Get Index */ + GetIndexRequest req = GetIndexRequest.builder() + .tableNameOrId(tableNameOrId) + .compartmentId((isTableId ? null : getCompartmentId())) + .indexName(indexName) + .build(); + client.getIndex(req); + } + + private void deleteTable(String tableNameOrId, boolean isTableId) { + DeleteTableRequest req = DeleteTableRequest.builder() + .compartmentId((isTableId ? null : getCompartmentId())) + .tableNameOrId(tableNameOrId) + .isIfExists(true) + .build(); + client.deleteTable(req); + } + + private void changeCompartment(String tableNameOrId, boolean isTableId) { + String toCompId = + "ocid1.compartment.oc1..aaaaaaaahy6aozjru5grkp2dhrhqfdwh4hihd6fpeafqdxvlfb6scf7hotnq"; + ChangeTableCompartmentDetails info = + ChangeTableCompartmentDetails.builder() + .fromCompartmentId((isTableId ? null : getCompartmentId())) + .toCompartmentId(toCompId) + .build(); + ChangeTableCompartmentRequest req = + ChangeTableCompartmentRequest.builder() + .tableNameOrId(tableNameOrId) + .changeTableCompartmentDetails(info) + .build(); + client.changeTableCompartment(req); + } + + private void listTableUsage(String tableNameOrId, boolean isTableId) { + ListTableUsageRequest req = ListTableUsageRequest.builder() + .tableNameOrId(tableNameOrId) + .compartmentId((isTableId ? null : getCompartmentId())) + .limit(1) + .build(); + client.listTableUsage(req); + } + + private Rule addRule(String name, + Action action, + String[] operations, + boolean persist) { + return addRule(name, action, null /* tenant */, null /* user */, + null /* table */, operations, persist); + } + + private Rule addRule(String name, + Action action, + String testTenantId, + String testUserId, + String tableId, + String[] operations, + boolean persist) { + + Rule rule = Rule.createRule(name, action, testTenantId, + testUserId, tableId, operations); + addRule(rule.toJson(), persist); + + rule = getRule(name, persist); + assertNotNull(rule); + return rule; + } + + private void addRule(String payload, boolean persist) { + addRule(payload, HttpResponseStatus.OK.code(), persist); + } + + private void addRule(String payload, int statusCode, boolean persist) { + String url = getUrl(null, persist); + HttpResponse resp = httpRequest.doHttpPost(url, payload); + assertEquals(statusCode, resp.getStatusCode()); + } + + private Rule getRule(String name, boolean persist) { + return getRule(name, HttpResponseStatus.OK.code(), persist); + } + + private Rule getRule(String name, int statusCode, boolean persist) { + String url = getUrl(name, persist); + HttpResponse resp = httpRequest.doHttpGet(url); + assertEquals(statusCode, resp.getStatusCode()); + if (statusCode == HttpResponseStatus.OK.code()) { + return parseRuleFromResponse(resp); + } + return null; + } + + private boolean deleteRule(String name, boolean persist) { + return deleteRule(name, HttpResponseStatus.OK.code(), persist); + } + + private boolean deleteRule(String name, int statusCode, boolean persist) { + String url = getUrl(name, persist); + HttpResponse resp = httpRequest.doHttpDelete(url, null); + assertEquals(statusCode, resp.getStatusCode()); + if (statusCode == HttpResponseStatus.OK.code()) { + return resp.getOutput().contains("deleted"); + } + return false; + } + + private List listRules(boolean persist) { + String url = getUrl(null, persist); + HttpResponse resp = httpRequest.doHttpGet(url); + assertEquals(HttpResponseStatus.OK.code(), resp.getStatusCode()); + return parseRulesFromResponse(resp); + } + + private void reloadPersistentRules() { + String url = getUrl("reload", false); + HttpResponse resp = httpRequest.doHttpPut(url, null); + assertEquals(HttpResponseStatus.OK.code(), resp.getStatusCode()); + } + + private String getUrl(String append, boolean persist) { + String url = persist ? getSCFilterUrl() : proxyFilterUrl; + if (url == null) { + fail("Filter url should not be null"); + } + if (append != null) { + url += "/" + append; + } + return url; + } + + private String getSCFilterUrl() { + if (cloudRunning) { + if (scFilterUrl == null && scHost != null && scPort != null) { + scFilterUrl = "http://" + scHost + ":" + scPort + "/V0/filters"; + } + return scFilterUrl; + } + return null; + } + + private void removeAllRules() { + removeAllRules(false); + removeAllRules(true); + reloadPersistentRules(); + } + + private void removeAllRules(boolean persist) { + List rules = listRules(persist); + for (Rule rule : rules) { + assertTrue(deleteRule(rule.getName(), persist)); + } + + assertTrue(listRules(persist).isEmpty()); + } + + private List parseRulesFromResponse(HttpResponse resp) { + String output = resp.getOutput().trim(); + if (output.isEmpty()) { + return null; + } + + Type type = new TypeToken>(){}.getType(); + return Rule.getGson().fromJson(output, type); + } + + private Rule parseRuleFromResponse(HttpResponse resp) { + String output = resp.getOutput().trim(); + if (output.isEmpty()) { + return null; + } + + return Rule.fromJson(output); + } + + private void runQuery(String sql) { + QueryDetails info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(sql) + .build(); + QueryRequest req = QueryRequest.builder() + .queryDetails(info) + .build(); + client.query(req); + } + + private void runPreparedQuery(String preparedStmt) { + QueryDetails info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(preparedStmt) + .isPrepared(true) + .build(); + QueryRequest req = QueryRequest.builder() + .queryDetails(info) + .build(); + client.query(req); + } + + private String prepare(String query) { + PrepareStatementRequest prepReq = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + PrepareStatementResponse prepRet = client.prepareStatement(prepReq); + PreparedStatement prepStmt = prepRet.getPreparedStatement(); + assertNotNull(prepStmt); + return prepStmt.getStatement(); + } + + /** + * Run a code snippet and expect a specified status code. + */ + private abstract class OpWrapper { + private final String name; + + OpWrapper(String name) { + this.name = name; + } + + void exec() { + exec(tableName, false /* tableName */, 200); + } + + void exec(int statusCode) { + exec(tableName, false /* tableName */, statusCode); + } + + void execWithOcid(String tableId) { + execWithOcid(tableId, 200); + } + + void execWithOcid(String tableId, int statusCode) { + exec(tableId, true, statusCode); + } + + private void exec(String tableNameOrId, + boolean isTableId, + int statusCode) { + try { + execOp(tableNameOrId, isTableId); + if (statusCode != 200) { + fail("Expected get " + statusCode + ": " + name); + } + } catch (BmcException ex) { + if (ex.getStatusCode() != statusCode) { + fail("Didn't expect " + ex + ": " + name); + } + } + } + + abstract void execOp(String tableNameOrId, boolean isTableId); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/FreeTableTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/FreeTableTest.java new file mode 100644 index 00000000..5e45b9fe --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/FreeTableTest.java @@ -0,0 +1,659 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static oracle.nosql.proxy.protocol.JsonProtocol.FREE_TIER_SYS_TAGS; + +import java.time.Instant; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; + +import oracle.nosql.util.tmi.TableInfo.ActivityPhase; + +import com.oracle.bmc.nosql.model.CreateTableDetails; +import com.oracle.bmc.nosql.model.QueryDetails; +import com.oracle.bmc.nosql.model.Table; +import com.oracle.bmc.nosql.model.Table.LifecycleState; +import com.oracle.bmc.nosql.model.TableCollection; +import com.oracle.bmc.nosql.model.TableLimits; +import com.oracle.bmc.nosql.model.TableSummary; +import com.oracle.bmc.nosql.model.UpdateRowDetails; +import com.oracle.bmc.nosql.model.UpdateTableDetails; +import com.oracle.bmc.nosql.requests.CreateTableRequest; +import com.oracle.bmc.nosql.requests.ListTablesRequest; +import com.oracle.bmc.nosql.requests.QueryRequest; +import com.oracle.bmc.nosql.requests.UpdateRowRequest; +import com.oracle.bmc.nosql.requests.UpdateTableRequest; +import com.oracle.bmc.nosql.responses.GetTableResponse; +import com.oracle.bmc.nosql.responses.ListTablesResponse; +import com.oracle.bmc.nosql.responses.UpdateRowResponse; + +import org.junit.AfterClass; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +/* + * This test suite is only for miniCloud test. + * + * Test free table operations including create/get/list, update table tags, + * table activity status change from IDEL to ACTIVE on data access operation + * and limit. + */ +public class FreeTableTest extends RestAPITestBase { + + private final String ddlFmt = + "create table %s(id integer, name string, primary key(id))"; + + private final TableLimits limits_50_50_1 = TableLimits.builder() + .maxReadUnits(50) + .maxWriteUnits(50) + .maxStorageInGBs(1) + .build(); + + @BeforeClass + public static void staticSetUp() throws Exception { + /* + * Run the test in minicloud only. + * + * Most of preprod envs do not support free table, so disable it in + * cloud test + */ + Assume.assumeTrue("Skipping FreeTableTest if not run against minicloud", + Boolean.getBoolean(USEMC_PROP)); + + RestAPITestBase.staticSetUp(); + setFreeTableStore(true); + } + + @AfterClass + public static void staticTearDown() throws Exception { + setFreeTableStore(false); + RestAPITestBase.staticTearDown(); + } + + /* + * Test basic free table operation: create, get and list + */ + @Test + public void testBasic() { + String tableName; + GetTableResponse gtRes; + Table table; + TableCollection tc; + String tableOcid; + + tableName = "freeTable"; + /* Create table */ + createTestTable(tableName, limits_50_50_1, + true /* isAutoReclaimable */); + + /* Get Table */ + gtRes = getTable(tableName); + tableOcid = gtRes.getTable().getId(); + assertFreeTableInfo(gtRes.getTable(), LifecycleState.Active); + + /* Sets the table's state to IDLE_P1 */ + setTableActivity(getTenantId(), tableOcid, + System.currentTimeMillis() - (daysToMs(30) + 5000), + ActivityPhase.IDLE_P1); + gtRes = getTable(tableName); + assertFreeTableInfo(gtRes.getTable(), LifecycleState.Inactive); + + /* List tables */ + tc = listTableInfos(); + assertEquals(1, tc.getItems().size()); + assertEquals(tenantLimits.getNumFreeTables(), + tc.getMaxAutoReclaimableTables().intValue()); + assertEquals(1, tc.getAutoReclaimableTables().intValue()); + assertFreeTableInfo(tc.getItems().get(0), LifecycleState.Inactive); + /* + * Test free table with tags + */ + tableName = "freeTableWithTags"; + /* freeform tags */ + Map freeTags = new HashMap<>(); + freeTags.put("createBy", "OracleNosql"); + freeTags.put("accountType", "IAMUser"); + + /* predefined tags */ + Map> definedTags = new HashMap<>(); + Map props = new HashMap<>(); + props.put("type", "backup"); + props.put("purpose", "WebTier"); + definedTags.put("Operations", props); + + /* Create table with tags */ + createTestTable(tableName, buildCreateTableDdl(tableName), + limits_50_50_1, true/* isAutoReclaimable*/, + freeTags, definedTags); + + /* Get Table */ + gtRes = getTable(tableName); + table = gtRes.getTable(); + assertFreeTableInfo(table, LifecycleState.Active); + assertTableTags(table, freeTags, definedTags); + + /* List Tables */ + tc = listTableInfos(); + assertEquals(2, tc.getItems().size()); + assertEquals(tenantLimits.getNumFreeTables(), + tc.getMaxAutoReclaimableTables().intValue()); + assertEquals(2, tc.getAutoReclaimableTables().intValue()); + for (TableSummary tbs : tc.getItems()) { + if (tbs.getName().equals(tableName)) { + assertTableTags(tbs, freeTags, definedTags); + assertFreeTableInfo(tbs, LifecycleState.Active); + } else { + assertFreeTableInfo(tbs, LifecycleState.Inactive); + } + } + } + + /* + * Test data access op will change table state from IDLE to ACTIVE + */ + @Test + public void testActivityState() throws Exception { + String tableName = "testActivityState"; + String query = "select * from " + tableName; + + /* Create a free table*/ + String ddl = buildCreateTableDdl(tableName); + String tableOcid = + scCreateTable(getTenantId(), getCompartmentId(), tableName, + buildCreateTableDdl(tableName), + new oracle.nosql.util.tmi.TableLimits(50, 50, 1), + true /* isFreeTable */); + + /* Sets the table's state to IDLE_P1 */ + setTableActivity(getTenantId(), tableOcid, + System.currentTimeMillis() - (daysToMs(30) + 5000), + ActivityPhase.IDLE_P1); + + /* Check table state is inactive */ + GetTableResponse gtRes = getTable(tableName); + assertEquals(LifecycleState.Inactive, + gtRes.getTable().getLifecycleState()); + + /* DDL operation will not change the table IDLE state */ + ddl = "alter table " + tableName + "(add age integer)"; + UpdateTableRequest utReq = buildUpdateTableRequest(tableName, ddl); + executeDdlFail(utReq, "TableDeploymentLimitExceeded"); + + /* Check table state is inactive */ + gtRes = getTable(tableName); + assertEquals(LifecycleState.Inactive, + gtRes.getTable().getLifecycleState()); + + /* Put a row */ + Map value = new HashMap(); + value.put("id", 0); + value.put("name", "nosql"); + UpdateRowDetails row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + UpdateRowResponse putRet = client.updateRow(putReq); + assertNotNull(putRet.getUpdateRowResult().getVersion()); + + /* Data access op, table state should be changed to ACTIVE */ + gtRes = getTable(tableName); + assertEquals(LifecycleState.Active, + gtRes.getTable().getLifecycleState()); + + QueryDetails info = QueryDetails.builder() + .statement(query) + .compartmentId(getCompartmentId()) + .build(); + QueryRequest qryReq = QueryRequest.builder() + .queryDetails(info) + .build(); + client.query(qryReq); + assertEquals(LifecycleState.Active, + gtRes.getTable().getLifecycleState()); + + /* + * Test using binary java driver + */ + + /* Create a free table*/ + dropTable(tableName); + tableOcid = scCreateTable(getTenantId(), getCompartmentId(), + tableName, buildCreateTableDdl(tableName), + new oracle.nosql.util.tmi.TableLimits(50, 50, 1), + true /* isFreeTable */); + + /* Sets the table's state to IDLE_P1 */ + setTableActivity(getTenantId(), tableOcid, + System.currentTimeMillis() - (daysToMs(75) + 5000), + ActivityPhase.IDLE_P2); + gtRes = getTable(tableName); + assertEquals(LifecycleState.Inactive, + gtRes.getTable().getLifecycleState()); + + oracle.nosql.driver.ops.QueryRequest qreq = + new oracle.nosql.driver.ops.QueryRequest() + .setStatement(query) + .setCompartment(getCompartmentId()); + handle = configHandle(getProxyEndpoint()); + handle.query(qreq); + + /* Data access op, table state should be changed to ACTIVE */ + gtRes = getTable(tableName); + assertEquals(LifecycleState.Active, + gtRes.getTable().getLifecycleState()); + + /* + * Test child tables + * + * The activity state of child table is always same as its top parent, + * activating child table from IDLE will activate its top parent. + */ + String childName = tableName + ".c"; + String childDdl = "create table " + childName + + "(idc integer, s string, primary key(idc))"; + String grandChildName = childName + ".g"; + String grandChildDdl = "create table " + grandChildName + + "(idg integer, s string, primary key(idg))"; + String childOcid; + String grandChildOcid; + + childOcid = scCreateTable(getTenantId(), getCompartmentId(), + childName, childDdl, + null /* tableLimits */, + true /* isFreeTable */); + grandChildOcid = scCreateTable(getTenantId(), getCompartmentId(), + grandChildName, grandChildDdl, + null /* tableLimits */, + true /* isFreeTable */); + + final String[] allTableOcids = new String[] { + tableOcid, + childOcid, + grandChildOcid + }; + + /* Set activity of the table and child table to IDLE_P2 */ + setTableActivity(ActivityPhase.IDLE_P2, tableOcid); + checkTableActivity(LifecycleState.Inactive, true, allTableOcids); + + /* Query child table */ + query = "select * from " + childName; + qreq = new oracle.nosql.driver.ops.QueryRequest() + .setStatement(query) + .setCompartment(getCompartmentId()); + handle.query(qreq); + /* + * Verify that the activity of both top table and 2 child tables + * should be ACTIVE + */ + checkTableActivity(LifecycleState.Active, false, allTableOcids); + + /* Set activity of the table and child table to IDLE_P2 */ + setTableActivity(ActivityPhase.IDLE_P2, tableOcid); + checkTableActivity(LifecycleState.Inactive, true, allTableOcids); + + /* Query top table */ + query = "select * from " + grandChildName; + qreq = new oracle.nosql.driver.ops.QueryRequest() + .setStatement(query) + .setCompartment(getCompartmentId()); + handle.query(qreq); + + /* + * Verify that the activity of both top table and the child tables + * should be ACTIVE + */ + checkTableActivity(LifecycleState.Active, false, allTableOcids); + } + + private void setTableActivity(ActivityPhase activity, + String... tableOcids) { + final long estDdlMs = System.currentTimeMillis() - + (expirationTimeInMs(activity) + 5000); + for (String tableOcid : tableOcids) { + /* Set activity of the top table to IDLE_P2 */ + setTableActivity(getTenantId(), tableOcid, estDdlMs, activity); + } + } + + private void checkTableActivity(LifecycleState expState, + boolean expireAtSameTime, + String... tableOcids) { + Table table; + Date expireTime = null; + for (String ocid : tableOcids) { + table = getTable(ocid).getTable(); + /* check activity state */ + assertEquals(expState, table.getLifecycleState()); + + /* check expiration time */ + if (expState == LifecycleState.Inactive) { + assertNotNull(table.getTimeOfExpiration()); + if (expireAtSameTime) { + if (expireTime == null) { + expireTime = table.getTimeOfExpiration(); + } else { + assertEquals(expireTime, table.getTimeOfExpiration()); + } + } + } else { + assertNull(table.getTimeOfExpiration()); + } + } + } + + private static long expirationTimeInMs(ActivityPhase activity) { + if (activity == ActivityPhase.IDLE_P1) { + return daysToMs(30); + } + if (activity == ActivityPhase.IDLE_P2) { + return daysToMs(75); + } + return 0; + } + + /* + * The system tags should be kept when alter table or update tags. + */ + @Test + public void testUpdateTableTags() { + String tableName = "testUpdateTagTags"; + /* freeform tags */ + Map freeTags = new HashMap<>(); + freeTags.put("createBy", "OracleNosql"); + freeTags.put("accountType", "IAMUser"); + + /* predefined tags */ + Map> definedTags = new HashMap<>(); + Map definedProps = new HashMap<>(); + definedProps.put(DEFINED_TAG_PROP, "v0"); + definedTags.put(DEFINED_TAG_NAMESPACE, definedProps); + + /* Creates a free table and verify its system tags */ + createTestTable(tableName, buildCreateTableDdl(tableName), + limits_50_50_1, true, freeTags, definedTags); + GetTableResponse gtRes = getTable(tableName); + Table table = gtRes.getTable(); + assertEquals(FREE_TIER_SYS_TAGS, table.getSystemTags()); + assertTableTags(table, freeTags, definedTags); + + /* Alter table schema, system tags should be kept */ + freeTags.put("accountType", "free"); + definedProps.put(DEFINED_TAG_PROP, "v1"); + + UpdateTableDetails utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .ddlStatement("alter table " + tableName + "(add age integer)") + .build(); + UpdateTableRequest utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdl(utReq); + gtRes = getTable(tableName); + table = gtRes.getTable(); + assertTableTags(table, table.getFreeformTags(), table.getDefinedTags()); + assertEquals(FREE_TIER_SYS_TAGS, table.getSystemTags()); + + /* Update table tags, system tags should be kept */ + utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .freeformTags(freeTags) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdl(utReq); + gtRes = getTable(tableName); + table = gtRes.getTable(); + assertTableTags(table, freeTags, Collections.emptyMap()); + assertEquals(FREE_TIER_SYS_TAGS, table.getSystemTags()); + + utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .freeformTags(freeTags) + .definedTags(definedTags) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdl(utReq); + gtRes = getTable(tableName); + table = gtRes.getTable(); + assertTableTags(table, freeTags, definedTags); + assertEquals(FREE_TIER_SYS_TAGS, table.getSystemTags()); + } + + /* + * Test the limit on the number of free table and table limits quota + */ + @Test + public void testLimits() { + final int maxNumFreeTables = tenantLimits.getNumFreeTables(); + String tableNamePrefix = "freeTable"; + /* Create 2 free tables */ + for (int i = 0; i < maxNumFreeTables - 1; i++) { + createTestTable(tableNamePrefix + i, limits_50_50_1, + true /* isAutoReclaimable */); + } + + /* + * read/write limits exceeds quota. + */ + String tableName = tableNamePrefix + (maxNumFreeTables - 1); + TableLimits execeedQuota = TableLimits.builder() + .maxWriteUnits(100) + .maxReadUnits(100) + .maxStorageInGBs(1).build(); + CreateTableDetails.Builder info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(buildCreateTableDdl(tableName)) + .isAutoReclaimable(true) + .tableLimits(execeedQuota); + CreateTableRequest req = CreateTableRequest.builder() + .createTableDetails(info.build()) + .build(); + executeDdlFail(req, "TableDeploymentLimitExceeded"); + + /* create another free table */ + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(buildCreateTableDdl(tableName)) + .isAutoReclaimable(true) + .tableLimits(limits_50_50_1); + req = CreateTableRequest.builder() + .createTableDetails(info.build()) + .build(); + executeDdl(req); + + /* + * The number of free table exceeds the limit. + */ + tableName = tableNamePrefix + maxNumFreeTables; + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(buildCreateTableDdl(tableName)) + .isAutoReclaimable(true) + .tableLimits(limits_50_50_1); + req = CreateTableRequest.builder() + .createTableDetails(info.build()) + .build(); + executeDdlFail(req, "TableLimitExceeded"); + } + + @Test + public void testChildTable() { + final int maxNumFreeTables = tenantLimits.getNumFreeTables(); + String pcDdl = "create table %s(" + + " %s integer, s string, " + + " primary key(%s))"; + + createTestTable("p", limits_50_50_1, true); + + CreateTableDetails info; + CreateTableRequest req; + + /* + * If parent table is auto reclaimable, child table must be auto + * reclaimable + */ + String ddl = String.format(pcDdl, "p.c", "idc", "idc"); + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name("p.c") + .ddlStatement(ddl) + .isAutoReclaimable(false) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdlFail(req, "IllegalArgument"); + + /* + * Test the number of parent and its child tables exceeds + * maxNumFreeTables. + */ + GetTableResponse gtRet; + String tableName = "p"; + String pkey; + for (int i = 0; i < maxNumFreeTables; i++) { + tableName += ".c" + i; + pkey = "idk" + String.valueOf(i); + ddl = String.format(pcDdl, tableName, pkey, pkey); + + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .isAutoReclaimable(true) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + + if (i == maxNumFreeTables - 1) { + executeDdlFail(req, "TableLimitExceeded"); + } else { + executeDdl(req); + gtRet = getTable(tableName); + assertTrue(gtRet.getTable().getIsAutoReclaimable()); + } + } + } + + private void createTestTable(String tableName, + TableLimits limits, + boolean isAutoReclaimable) { + createTestTable(tableName, buildCreateTableDdl(tableName), + limits, isAutoReclaimable, null, null); + } + + private void createTestTable(String tableName, + String ddl, + TableLimits limits, + boolean isAutoReclaimable, + Map freeTags, + Map> defineTags) { + + CreateTableRequest req; + + /* Create table */ + CreateTableDetails.Builder payload = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limits); + if (isAutoReclaimable) { + payload.isAutoReclaimable(isAutoReclaimable); + } + if (freeTags != null) { + payload.freeformTags(freeTags); + } + if (defineTags != null) { + payload.definedTags(defineTags); + } + req = CreateTableRequest.builder() + .createTableDetails(payload.build()) + .build(); + executeDdl(req); + } + + /* List tables */ + private TableCollection listTableInfos() { + ListTablesRequest lstReq = ListTablesRequest.builder() + .compartmentId(getCompartmentId()) + .build(); + ListTablesResponse lstRes = client.listTables(lstReq); + return lstRes.getTableCollection(); + } + + private String buildCreateTableDdl(String tableName) { + return String.format(ddlFmt, tableName); + } + + private void assertFreeTableInfo(Table table, LifecycleState state) { + assertTrue(table.getIsAutoReclaimable()); + assertEquals(FREE_TIER_SYS_TAGS, table.getSystemTags()); + assertEquals(state, table.getLifecycleState()); + if (state == LifecycleState.Inactive) { + assertNotNull(table.getTimeOfExpiration()); + assertTrue(table.getTimeOfExpiration() + .after(Date.from(Instant.now()))); + } else { + assertNull(table.getTimeOfExpiration()); + } + } + + private void assertTableTags(Table table, + Map freeTags, + Map> definedTags) { + assertEquals(freeTags, table.getFreeformTags()); + assertDefinedTags(definedTags, table.getDefinedTags()); + } + + private static long daysToMs(int days) { + return days * 24L * 3600L * 1000L; + } + + private void assertFreeTableInfo(TableSummary ts, LifecycleState state) { + assertTrue(ts.getIsAutoReclaimable()); + assertEquals(FREE_TIER_SYS_TAGS, ts.getSystemTags()); + assertEquals(state, ts.getLifecycleState()); + if (state == LifecycleState.Inactive) { + assertNotNull(ts.getTimeOfExpiration()); + assertTrue(ts.getTimeOfExpiration() + .after(Date.from(Instant.now()))); + } else { + assertNull(ts.getTimeOfExpiration()); + } + } + + private void assertTableTags(TableSummary ts, + Map freeTags, + Map> definedTags) { + assertEquals(freeTags, ts.getFreeformTags()); + assertEquals(definedTags, ts.getDefinedTags()); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/IndexTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/IndexTest.java new file mode 100644 index 00000000..49277c02 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/IndexTest.java @@ -0,0 +1,945 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.junit.Test; + +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.model.CreateIndexDetails; +import com.oracle.bmc.nosql.model.Index; +import com.oracle.bmc.nosql.model.IndexKey; +import com.oracle.bmc.nosql.model.IndexSummary; +import com.oracle.bmc.nosql.requests.CreateIndexRequest; +import com.oracle.bmc.nosql.requests.DeleteIndexRequest; +import com.oracle.bmc.nosql.requests.GetIndexRequest; +import com.oracle.bmc.nosql.requests.ListIndexesRequest; +import com.oracle.bmc.nosql.requests.ListIndexesRequest.LifecycleState; +import com.oracle.bmc.nosql.responses.GetIndexResponse; +import com.oracle.bmc.nosql.responses.ListIndexesResponse; + +/** + * Indexes related APIs: + * o create index + * o drop index + * o get index + * o get indexes + */ +public class IndexTest extends RestAPITestBase { + + @Test + public void testIndexBasic() { + final String tableName = "testIndexBasic"; + final String indexName = "idxNamePhoneAge"; + final String ddl = "create table " + tableName + "(" + + "id integer, " + + "name string, " + + "age integer, " + + "info json, " + + "primary key(id))"; + + createTable(tableName, ddl); + + /* Create Index */ + List keys = new ArrayList(); + keys.add(IndexKey.builder().columnName("name").build()); + keys.add(IndexKey.builder() + .columnName("info") + .jsonPath("phone") + .jsonFieldType("string") + .build()); + keys.add(IndexKey.builder().columnName("age").build()); + keys.add(IndexKey.builder() + .columnName("info") + .jsonPath("address[].city.id") + .jsonFieldType("Integer") + .build()); + + CreateIndexDetails info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId(getCompartmentId()) + .keys(keys) + .build(); + CreateIndexRequest ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdl(ciReq); + + /* Get Index */ + GetIndexRequest giReq = GetIndexRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .indexName(indexName) + .build(); + GetIndexResponse giRes = client.getIndex(giReq); + Index index = giRes.getIndex(); + assertNotNull(index); + assertEquals(indexName, index.getName()); + assertEquals(getCompartmentId(), index.getCompartmentId()); + assertEquals(tableName, index.getTableName()); + assertIndexKeys(keys, index.getKeys()); + + /* Create Index with if not exists */ + info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId(getCompartmentId()) + .keys(keys) + .isIfNotExists(true) + .build(); + ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdl(ciReq); + + /* Create Index but index already exists, get IndexAlreadyExists error */ + info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId(getCompartmentId()) + .keys(keys) + .build(); + ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdlFail(ciReq, "IndexAlreadyExists"); + + /* Drop Index */ + DeleteIndexRequest diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .build(); + executeDdl(diReq); + + /* Get Index, Index not found (404, NotAuthorizedOrNotFound, false) */ + try { + giRes = client.getIndex(giReq); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + } + + /* Drop Index with if not exists */ + diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .isIfExists(true) + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .build(); + executeDdl(diReq); + + /* Drop Index but index not exists, get 404 (Index not found) error */ + diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .isIfExists(false) + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .build(); + executeDdlFail(diReq, "IndexNotFound"); + } + + @Test + public void testIndexBasicWithTableOcid() { + + /* Run this test for minicloud only */ + assumeTrue("Skipping testIndexBasicWithTableOcid() if not minicloud " + + "test", cloudRunning); + + final String tableName = "testIndexBasicWithTableOcid"; + final String indexName = "idxNamePhoneAge"; + final String ddl = "create table " + tableName + "(" + + "id integer, " + + "name string, " + + "age integer, " + + "info json, " + + "primary key(id))"; + createTable(tableName, ddl); + + final String tableOcid = getTableId(tableName); + + /* Create Index */ + List keys = new ArrayList(); + keys.add(IndexKey.builder().columnName("name").build()); + keys.add(IndexKey.builder() + .columnName("info") + .jsonPath("phone") + .jsonFieldType("string") + .build()); + keys.add(IndexKey.builder().columnName("age").build()); + keys.add(IndexKey.builder() + .columnName("info") + .jsonPath("address[].city.id") + .jsonFieldType("Integer") + .build()); + + CreateIndexDetails info = CreateIndexDetails.builder() + .name(indexName) + .keys(keys) + .build(); + CreateIndexRequest ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableOcid) + .createIndexDetails(info) + .build(); + executeDdl(ciReq); + + /* Get Index */ + GetIndexRequest giReq = GetIndexRequest.builder() + .tableNameOrId(tableOcid) + .indexName(indexName) + .build(); + GetIndexResponse giRes = client.getIndex(giReq); + Index index = giRes.getIndex(); + assertNotNull(index); + assertEquals(indexName, index.getName()); + assertEquals(getCompartmentId(), index.getCompartmentId()); + assertEquals(tableName, index.getTableName()); + assertIndexKeys(keys, index.getKeys()); + + /* Create Index with if not exists */ + info = CreateIndexDetails.builder() + .name(indexName) + .keys(keys) + .isIfNotExists(true) + .build(); + ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableOcid) + .createIndexDetails(info) + .build(); + executeDdl(ciReq); + + /* Create Index but index already exists, get IndexAlreadyExists error */ + info = CreateIndexDetails.builder() + .name(indexName) + .keys(keys) + .build(); + ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableOcid) + .createIndexDetails(info) + .build(); + executeDdlFail(ciReq, "IndexAlreadyExists"); + + /* Drop Index */ + DeleteIndexRequest diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .tableNameOrId(tableOcid) + .build(); + executeDdl(diReq); + + /* Get Index, Index not found (404, NotAuthorizedOrNotFound, false) */ + try { + giRes = client.getIndex(giReq); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + } + + /* Drop Index with if not exists */ + diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .isIfExists(true) + .tableNameOrId(tableOcid) + .build(); + executeDdl(diReq); + + /* Drop Index but index not exists, get 404 (Index not found) error */ + diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .isIfExists(false) + .tableNameOrId(tableOcid) + .build(); + executeDdlFail(diReq, "IndexNotFound"); + } + + @Test + public void testIndexNonExistentTableOcid() { + + /* Run this test for minicloud only */ + assumeTrue("Skipping testIndexNonExistentTableOcid() if not minicloud " + + "test", cloudRunning); + + String tableName = "testIndexNonExistentTableOcid"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + /* Create table */ + createTable(tableName, ddl); + + /* fake non-existent table ocid */ + String tableOcid = getTableId(tableName) + "notexist"; + + /* Create Index */ + List keys = new ArrayList(); + String indexName = "idx_name"; + keys.add(IndexKey.builder().columnName("name").build()); + try { + createIndex(tableOcid, "idx", keys, true /* wait */); + fail("CreateIndex expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + } + + /* Get Index */ + try { + GetIndexRequest giReq = GetIndexRequest.builder() + .tableNameOrId(tableOcid) + .indexName(indexName) + .build(); + client.getIndex(giReq); + fail("GetIndex expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + } + + /* Delete Index */ + try { + DeleteIndexRequest diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .tableNameOrId(tableOcid) + .build(); + executeDdl(diReq); + fail("DeleteIndex expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + } + } + + @Test + public void testCreateIndexBadRequest() { + + final String tableName = "foo"; + final String indexName = "idxName"; + List keys = new ArrayList(); + + CreateIndexDetails info; + CreateIndexRequest req; + + /* Invalid name: name should not be empty or contain white space only */ + info = CreateIndexDetails.builder() + .name(" ") + .compartmentId(getCompartmentId()) + .keys(keys) + .build(); + req = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdlFail(req, 400 /* bad request */, "InvalidParameter"); + + /* Invalid name: name should not be empty or contain white space only */ + info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId("") + .keys(keys) + .build(); + req = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdlFail(req, 400 /* bad request */, "InvalidParameter"); + + /* Invalid keys, it should not be empty */ + info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId(getCompartmentId()) + .keys(keys) + .build(); + req = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdlFail(req, 400 /* bad request */, "InvalidParameter"); + + /* Invalid IndexKey */ + keys.clear(); + keys.add(IndexKey.builder() + .jsonPath("info.phone") + .jsonFieldType("string") + .build()); + executeDdlFail(req, 400 /* bad request */, "InvalidParameter"); + + keys.clear(); + keys.add(IndexKey.builder() + .columnName("name") + .jsonPath("info.phone") + .build()); + executeDdlFail(req, 400 /* bad request */, "InvalidParameter"); + + keys.clear(); + keys.add(IndexKey.builder() + .columnName("name") + .jsonFieldType("string") + .build()); + executeDdlFail(req, 400 /* bad request */, "InvalidParameter"); + + /* Table not found */ + keys.clear(); + keys.add(IndexKey.builder().columnName("name").build()); + info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId(getCompartmentId()) + .keys(keys) + .build(); + req = CreateIndexRequest.builder() + .tableNameOrId("invalid") + .createIndexDetails(info) + .build(); + if (cloudRunning) { + executeDdlFail(req, 404, "NotAuthorizedOrNotFound"); + } else { + executeDdlFail(req, 404, "TableNotFound"); + } + } + + @Test + public void testDropIndexWithMatchETag() { + + /* Run this test for minicloud only */ + assumeTrue("Skipping testDropIndexWithMatchETag() if not minicloud " + + "test", cloudRunning); + + final String tableName = "foo"; + final String indexName = "idxNamePhoneAge"; + final String ddl = "create table " + tableName + "(" + + "id integer, " + + "name string, " + + "age integer, " + + "info json, " + + "primary key(id))"; + + createTable(tableName, ddl); + + /* Create Index */ + List keys = new ArrayList(); + keys.add(IndexKey.builder().columnName("name").build()); + CreateIndexDetails info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId(getCompartmentId()) + .keys(keys) + .build(); + CreateIndexRequest ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdl(ciReq); + + /* Get index info and current ETag */ + GetIndexResponse giRes = getIndex(tableName, indexName); + String currentETag = giRes.getEtag(); + + /* Delete the index with ETag */ + DeleteIndexRequest diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .ifMatch(currentETag) + .build(); + executeDdl(diReq); + + /* Create index again with different columns */ + keys.clear(); + keys.add(IndexKey.builder().columnName("name").build()); + keys.add(IndexKey.builder().columnName("age").build()); + info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId(getCompartmentId()) + .keys(keys) + .build(); + ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdl(ciReq); + + /* Get index info and current ETag */ + giRes = getIndex(tableName, indexName); + String oldETag = currentETag; + currentETag = giRes.getEtag(); + + /* Delete the index with mismatched ETag, get ETagMismatch error */ + diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .ifMatch(oldETag) + .build(); + executeDdlFail(diReq, "ETagMismatch"); + + /* Delete the index with ETag, expect to succeed */ + diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .ifMatch(currentETag) + .build(); + executeDdl(diReq); + + /* Get Index, Index not found (404, NotAuthorizedOrNotFound, false) */ + try { + giRes = getIndex(tableName, indexName); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + } + + /* + * Delete the index with ifExists and mismatched ETag, expect to succeed + * + * For delete-if-exists, if index does not exists the ETag will be + * ignored, it expects to succeed. + */ + diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .isIfExists(true) + .ifMatch(oldETag) + .build(); + executeDdl(diReq); + } + + @Test + public void testGetIndexes() { + final String tableName = "foo"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, " + + "name string, " + + "age integer, " + + "zipCode string, " + + "primary key(id))"; + createTable(tableName, ddl); + + /* List all indexes, no index exists */ + List indexes = + runGetIndexes(tableName, + false /* isTableOcid */, + null /* namePattern */, + null /* LifecycleState */, + null /* SortBy */, + null /* SortOrder */, + 0 /* limit */); + assertEquals(0, indexes.size()); + + List timeCreatedAsc = new ArrayList(); + + /* Create Indexes */ + List keys = new ArrayList(); + String indexName = "idx_name"; + keys.add(IndexKey.builder().columnName("name").build()); + createIndexAndVerify(tableName, indexName, keys); + timeCreatedAsc.add(indexName); + + indexName = "idx_name_age"; + keys.clear(); + keys.add(IndexKey.builder().columnName("name").build()); + keys.add(IndexKey.builder().columnName("age").build()); + createIndexAndVerify(tableName, indexName, keys); + timeCreatedAsc.add(indexName); + + indexName = "idx_zipcode"; + keys.clear(); + keys.add(IndexKey.builder().columnName("zipcode").build()); + createIndexAndVerify(tableName, indexName, keys); + timeCreatedAsc.add(indexName); + + indexName = "idx_name_zipcode"; + keys.clear(); + keys.add(IndexKey.builder().columnName("name").build()); + keys.add(IndexKey.builder().columnName("zipcode").build()); + createIndexAndVerify(tableName, indexName, keys); + timeCreatedAsc.add(indexName); + + List timeCreatedDesc = new ArrayList(timeCreatedAsc); + Collections.reverse(timeCreatedDesc); + List nameAsc = new ArrayList(timeCreatedAsc); + Collections.sort(nameAsc); + + /* List all indexes */ + indexes = runGetIndexes(tableName, + false /* isTableOcid */, + null /* namePattern */, + null /* LifecycleState */, + null /* SortBy */, + null /* SortOrder */, + 0 /* limit */); + assertEquals(timeCreatedAsc.size(), indexes.size()); + if (cloudRunning) { + assertSortedIndex(indexes, timeCreatedDesc); + } + + /* List all indexes */ + indexes = runGetIndexes(tableName, + false /* isTableOcid */, + null /* namePattern */, + null /* LifecycleState */, + null /* SortBy */, + null /* SortOrder */, + 1 /* limit */); + assertEquals(timeCreatedAsc.size(), indexes.size()); + + /* Below tests for advanced parameters are for minicloud only: + * 1. Filtering with namePattern/start + * 2. Sorting by timeCreated or name */ + if (!cloudRunning) { + return; + } + + /* List those indexes whose name's prefix is idx_name */ + String namePattern = "*_name*"; + indexes = runGetIndexes(tableName, + false /* isTableOcid */, + namePattern, + null /* LifecycleState */, + null /* SortBy */, + null /* SortOrder */, + 0 /* limit */); + assertEquals(3, indexes.size()); + for (IndexSummary index : indexes) { + assertTrue(index.getName().startsWith("idx_name")); + } + + /* No matched index, should return 0 index. */ + indexes = runGetIndexes(tableName, + false /* isTableOcid */, + "*not*", + null /* LifecycleState */, + null /* SortBy */, + null /* SortOrder */, + 0 /* limit */); + assertEquals(0, indexes.size()); + + /* List indexes filtered by state */ + indexes = runGetIndexes(tableName, + false /* isTableOcid */, + null, + LifecycleState.All, + null /* SortBy */, + null /* SortOrder */, + 0 /* limit */); + assertEquals(4, indexes.size()); + + indexes = runGetIndexes(tableName, + false /* isTableOcid */, + null, + LifecycleState.Creating, + null /* SortBy */, + null /* SortOrder */, + 0 /* limit */); + assertEquals(0, indexes.size()); + + /* List indexes sorted by createdTime asc */ + indexes = runGetIndexes(tableName, + false /* isTableOcid */, + null /* namePattern */, + LifecycleState.Active, + ListIndexesRequest.SortBy.TimeCreated, + ListIndexesRequest.SortOrder.Asc, + 1); + assertEquals(4, indexes.size()); + assertSortedIndex(indexes, timeCreatedAsc); + + /* List indexes sorted by createdTime desc */ + indexes = runGetIndexes(tableName, + false /* isTableOcid */, + null /* namePattern */, + null /* LifecycleState */, + ListIndexesRequest.SortBy.TimeCreated, + ListIndexesRequest.SortOrder.Desc, + 2); + assertEquals(4, indexes.size()); + assertSortedIndex(indexes, timeCreatedDesc); + + /* List indexes sorted by name desc */ + indexes = runGetIndexes(tableName, + false /* isTableOcid */, + null /* namePattern */, + null /* LifecycleState */, + ListIndexesRequest.SortBy.Name, + ListIndexesRequest.SortOrder.Asc, + 3); + assertEquals(4, indexes.size()); + assertSortedIndex(indexes, nameAsc); + + /* List indexes using tableOcid, for miniCloud test only */ + if (cloudRunning) { + String tableOcid = getTableId(tableName); + indexes = runGetIndexes(tableOcid, + true /* isTableOcid */, + null /* namePattern */, + null /* LifecycleState */, + null /* SortBy */, + null /* SortOrder */, + 0 /* limit */); + assertEquals(timeCreatedAsc.size(), indexes.size()); + } + } + + @Test + public void testIndexTableNameMapping() + throws Exception { + + /* + * Run this test in minicloud only + * + * This test bypasses proxy and call SC API to create table to test + * proxy cache, it can only be run against minicloud. + */ + assumeTrue("Skipping testIndexTableNameMapping() if not minicloud " + + "test", useMiniCloud); + + String tableName = "testIndexTableNameMapping"; + String ddl = "create table " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + + /* drop non-existing table */ + dropTable(tableName); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* Create Index to cache mapping */ + List keys = new ArrayList(); + keys.add(IndexKey.builder().columnName("name").build()); + CreateIndexDetails info = CreateIndexDetails.builder() + .compartmentId(getCompartmentId()) + .name("idx1") + .keys(keys) + .build(); + CreateIndexRequest ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdl(ciReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* Create Index */ + keys = new ArrayList(); + keys.add(IndexKey.builder().columnName("name").build()); + info = CreateIndexDetails.builder() + .compartmentId(getCompartmentId()) + .name("idx1") + .keys(keys) + .build(); + ciReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + executeDdl(ciReq); + + /* Get Index to cache mapping */ + GetIndexRequest giReq = GetIndexRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .indexName("idx1") + .build(); + client.getIndex(giReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* Get Index */ + giReq = GetIndexRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .indexName("idx1") + .build(); + try { + client.getIndex(giReq); + fail("expected IndexNotFound"); + } catch (BmcException be) { + assertEquals(be.getStatusCode(), 404); + assertEquals(be.getServiceCode(), "IndexNotFound"); + } + + /* Drop Index to cache mapping */ + DeleteIndexRequest diReq = DeleteIndexRequest.builder() + .indexName("idx1") + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .build(); + client.deleteIndex(diReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* Drop Index */ + diReq = DeleteIndexRequest.builder() + .indexName("idx1") + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .build(); + client.deleteIndex(diReq); + } + + @Test + public void testIndexInvalidCompartmentId() { + + /* Run this test for minicloud only */ + assumeTrue("Skipping testIndexInvalidCompartmentId() if not minicloud " + + "test", cloudRunning); + + String tableName = "testIndexInvalidCompartmentId"; + /* Create Index */ + List keys = new ArrayList(); + String indexName = "idx_name"; + keys.add(IndexKey.builder().columnName("name").build()); + try { + CreateIndexDetails info = CreateIndexDetails.builder() + .name(indexName) + .keys(keys) + .build(); + CreateIndexRequest req = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(info) + .build(); + client.createIndex(req); + fail("CreateIndex expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + assertTrue(ex.getMessage().contains("compartment id")); + } + + /* Get Index */ + try { + GetIndexRequest giReq = GetIndexRequest.builder() + .tableNameOrId(tableName) + .indexName(indexName) + .build(); + client.getIndex(giReq); + fail("GetIndex expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + assertTrue(ex.getMessage().contains("compartment id")); + } + + /* Delete Index */ + try { + DeleteIndexRequest diReq = DeleteIndexRequest.builder() + .indexName(indexName) + .tableNameOrId(tableName) + .build(); + executeDdl(diReq); + fail("DeleteIndex expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404 , ex.getStatusCode()); + assertTrue(ex.getMessage().contains("compartment id")); + } + } + + private void assertSortedIndex(List indexes, + List expSorted) { + assertEquals(expSorted.size(), indexes.size()); + for (int i = 0; i < indexes.size(); i++) { + assertEquals(expSorted.get(i), indexes.get(i).getName()); + } + } + + private void createIndexAndVerify(String tableName, + String indexName, + List keys) { + createIndex(tableName, indexName, keys, true /* wait */); + + GetIndexResponse res = getIndex(tableName, indexName); + Index index = res.getIndex(); + assertIndexKeys(keys, index.getKeys()); + assertEquals(Index.LifecycleState.Active, index.getLifecycleState()); + } + + private GetIndexResponse getIndex(String tableName, String indexName) { + GetIndexRequest req = GetIndexRequest.builder() + .indexName(indexName) + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .build(); + return client.getIndex(req); + } + + private List + runGetIndexes(String tableNameOrId, + boolean isTableOcid, + String namePattern, + ListIndexesRequest.LifecycleState state, + ListIndexesRequest.SortBy sortBy, + ListIndexesRequest.SortOrder sortOrder, + int limit) { + + ListIndexesRequest.Builder builder = ListIndexesRequest.builder() + .tableNameOrId(tableNameOrId) + .limit(limit); + if (!isTableOcid) { + builder.compartmentId(getCompartmentId()); + } + if (namePattern != null) { + builder.name(namePattern); + } + if (state != null) { + builder.lifecycleState(state); + } + if (sortBy != null) { + builder.sortBy(sortBy); + } + if (sortOrder != null) { + builder.sortOrder(sortOrder); + } + + ListIndexesResponse liRes; + String page = null; + List indexes = new ArrayList<>(); + + while (true) { + if (page != null) { + builder.page(page); + } + + liRes = client.listIndexes(builder.build()); + assertNotNull(liRes); + + List items = liRes.getIndexCollection().getItems(); + if (limit > 0) { + assertTrue(items.size() <= limit); + } + indexes.addAll(items); + page = liRes.getOpcNextPage(); + if (page == null) { + break; + } + } + return indexes; + } + + private void assertIndexKeys(List expKeys, List keys) { + assertEquals(expKeys.size(), keys.size()); + int i = 0; + for (IndexKey key : keys) { + assertIndexKey(expKeys.get(i++), key); + } + } + + private void assertIndexKey(IndexKey expKey, IndexKey key) { + assertEquals(expKey.getColumnName(), key.getColumnName()); + if (expKey.getJsonPath() == null) { + assertNull(key.getJsonPath()); + assertNull(key.getJsonFieldType()); + } else { + assertEquals(expKey.getJsonPath(), key.getJsonPath()); + assertTrue(expKey.getJsonFieldType() + .equalsIgnoreCase(key.getJsonFieldType())); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/JsonPayloadTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/JsonPayloadTest.java new file mode 100644 index 00000000..b74a8c16 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/JsonPayloadTest.java @@ -0,0 +1,890 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static oracle.nosql.proxy.protocol.JsonProtocol.ON_DEMAND; +import static oracle.nosql.proxy.protocol.JsonProtocol.PROVISIONED; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.math.BigDecimal; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.junit.Test; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TableBuilder; +import oracle.kv.impl.api.table.TableImpl; +import oracle.kv.table.ArrayValue; +import oracle.kv.table.FieldValueFactory; +import oracle.kv.table.MapValue; +import oracle.kv.table.RecordValue; +import oracle.kv.table.Row; +import oracle.kv.table.TimestampValue; +import oracle.nosql.common.JsonBuilder; +import oracle.nosql.common.json.JsonUtils; +import oracle.nosql.proxy.protocol.ByteInputStream; +import oracle.nosql.proxy.protocol.JsonProtocol; +import oracle.nosql.proxy.protocol.JsonProtocol.JsonArray; +import oracle.nosql.proxy.protocol.JsonProtocol.JsonObject; +import oracle.nosql.proxy.protocol.JsonProtocol.JsonPayload; + +/** + * Test JSON payload parser and builder + */ +public class JsonPayloadTest { + + @Test + public void testJsonPayload() throws Exception { + int id = 1; + boolean ifNotExists = true; + String compartmentId = "ocid1.compartment.oc1..aaaaaaaagaqos5k"; + String ddlStatement = "create table if not exists foo1(" + + "id integer, name string, age integer, " + + "info json, primary key(id))"; + int maxReadUnits = 100; + int maxWriteUnits = 200; + int maxStorageInGBs = 3; + String capacityMode = PROVISIONED; + String[] states = new String[] { + "CREATING", "ACTIVE", "UPDATING" + }; + + String fmt = "{" + + " \"id\": %s, " + + " \"ifNotExists\": %s, " + + " \"compartmentId\": %s," + + " \"ddlStatement\": %s," + + " \"tableLimits\": {" + + " \"maxReadUnits\": %s," + + " \"maxWriteUnits\": %s," + + " \"maxStorageInGBs\": %s," + + " \"capacityMode\": %s" + + " }," + + " \"state\": [%s, %s, %s]" + + "}"; + + String[] jsons = new String[3]; + + jsons[0] = String.format(fmt, String.valueOf(id), + String.valueOf(ifNotExists), + appendQuotes(compartmentId), + appendQuotes(ddlStatement), + String.valueOf(maxReadUnits), + String.valueOf(maxWriteUnits), + String.valueOf(maxStorageInGBs), + appendQuotes(capacityMode), + appendQuotes(states[0]), + appendQuotes(states[1]), + appendQuotes(states[2])); + + /* Field values are all string */ + jsons[1] = String.format(fmt, appendQuotes(String.valueOf(id)), + appendQuotes(String.valueOf(ifNotExists)), + appendQuotes(compartmentId), + appendQuotes(ddlStatement), + appendQuotes(String.valueOf(maxReadUnits)), + appendQuotes(String.valueOf(maxWriteUnits)), + appendQuotes(String.valueOf(maxStorageInGBs)), + appendQuotes(capacityMode), + appendQuotes(states[0]), + appendQuotes(states[1]), + appendQuotes(states[2])); + + /* Field name in lower case */ + jsons[2] = String.format(fmt.toLowerCase(), String.valueOf(id), + String.valueOf(ifNotExists), + appendQuotes(compartmentId), + appendQuotes(ddlStatement), + String.valueOf(maxReadUnits), + String.valueOf(maxWriteUnits), + String.valueOf(maxStorageInGBs), + appendQuotes(capacityMode), + appendQuotes(states[0]), + appendQuotes(states[1]), + appendQuotes(states[2])); + + JsonPayload pl; + for (String json : jsons) { + validateJson(json); + pl = new JsonPayload(json); + while (pl.hasNext()) { + if (pl.isField("id")) { + assertEquals(id, pl.readInt()); + } else if (pl.isField("ifNotExists")) { + assertEquals(ifNotExists, pl.readBool()); + } else if (pl.isField("compartmentId")) { + assertEquals(compartmentId, pl.readString()); + } else if (pl.isField("ddlStatement")) { + assertEquals(ddlStatement, pl.readString()); + } else if (pl.isField("tableLimits")) { + JsonObject jo = pl.readObject(); + while (jo.hasNext()) { + if (jo.isField("maxReadUnits")) { + assertEquals(maxReadUnits, jo.readInt()); + } else if (jo.isField("maxWriteUnits")) { + assertEquals(maxWriteUnits, jo.readInt()); + } else if (jo.isField("maxStorageInGBs")) { + assertEquals(maxStorageInGBs, jo.readInt()); + } else if (jo.isField("capacityMode")) { + assertEquals(capacityMode, jo.readString()); + } else { + fail("Unexpected field: " + jo.getCurrentField()); + } + } + } else if (pl.isField("state")) { + JsonArray ja = pl.readArray(); + int i = 0; + while (ja.hasNext()) { + String value = ja.readString(); + if (i > 3) { + fail("Unexpected value: " + value); + } + assertEquals(states[i++], value); + } + assertEquals(3, i); + } else { + fail("Unexpected field: " + pl.getCurrentField()); + } + } + pl.close(); + } + } + + @Test + public void testJsonPayloadSkipValue() throws Exception { + final String[] allFields = new String[] { + "name", "ifNotExists", "compartmentId", "count", + "tableLimits", "state" + }; + final String payload = "{" + + " \"name\": \"users\", " + + " \"ifNotExists\": true, " + + " \"compartmentId\": \"testCompartment\"," + + " \"count\": 1, " + + " \"tableLimits\": {" + + " \"maxReadUnits\": 100," + + " \"maxWriteUnits\": 100," + + " \"maxStorageInGBs\": 1" + + " }," + + " \"state\": [\"ACTIVE\", \"INACTIVE\"]" + + "}"; + validateJson(payload); + + JsonPayload pl = new JsonPayload(payload); + + /* Skip all fields */ + List fields = new ArrayList<>(); + while (pl.hasNext()) { + fields.add(pl.getCurrentField()); + pl.skipValue(); + } + pl.close(); + + assertEquals(Arrays.asList(allFields), fields); + + /* Skip unknown fields */ + pl = new JsonPayload(payload); + while (pl.hasNext()) { + if (pl.isField("name")) { + assertEquals("users", pl.readString()); + } else if (pl.isField("ifNotExists")) { + assertEquals(true, pl.readBool()); + } else if (pl.isField("compartmentId")) { + assertEquals("testCompartment", pl.readString()); + } else { + pl.skipValue(); + } + } + pl.close(); + } + + @Test + public void testEmptyObjectArray() { + JsonBuilder jb = JsonBuilder.create(); + String json = jb.startObject("o") + .startArray("a1") + .startArray(null) + .endArray() + .endArray() + .append("id", 1) + .endObject() + .startArray("a") + .startObject(null) + .startArray("a1") + .endArray() + .append("id", 1) + .endObject() + .endArray() + .append("id", 1) + .toString(); + validateJson(json); + } + + @Test + public void testTypeCasting() throws Exception { + JsonPayload pl; + + /* Integer */ + String json = "{\"field\": 1}"; + pl = new JsonPayload(json); + assertTrue(pl.hasNext()); + assertEquals("field", pl.getCurrentField()); + assertEquals(1, pl.readInt()); + assertEquals("1", pl.readString()); + try { + pl.readBool(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readArray(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readObject(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + pl.close(); + + /* Boolean */ + json = "{\"field\": false}"; + pl = new JsonPayload(json); + assertTrue(pl.hasNext()); + assertEquals(false, pl.readBool()); + assertEquals("false", pl.readString()); + try { + pl.readInt(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readArray(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readObject(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + pl.close(); + + /* Integer overflow */ + json = "{\"field\": " + Long.MAX_VALUE + "}"; + pl = new JsonPayload(json); + assertTrue(pl.hasNext()); + try { + pl.readInt(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + pl.close(); + + /* String */ + json = "{\"field\": \"abc\"}"; + pl = new JsonPayload(json); + assertTrue(pl.hasNext()); + assertEquals(false, pl.readBool()); + assertEquals("abc", pl.readString()); + try { + pl.readInt(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readArray(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readObject(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + pl.close(); + + json = "{\"field\": \"100\"}"; + pl = new JsonPayload(json); + assertTrue(pl.hasNext()); + assertEquals(100, pl.readInt()); + pl.close(); + + /* Array type */ + json = "{\"field\": [1, 2, 3]}"; + pl = new JsonPayload(json); + assertTrue(pl.hasNext()); + try { + pl.readBool(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readString(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readInt(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readObject(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + JsonArray ja = pl.readArray(); + int i = 0; + while (ja.hasNext()) { + assertEquals(++i, ja.readInt()); + } + pl.close(); + + /* Object type */ + json = "{\"field\": {\"key\":1}}"; + pl = new JsonPayload(json); + assertTrue(pl.hasNext()); + try { + pl.readBool(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readString(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readInt(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + try { + pl.readArray(); + fail("Expect to fail but not"); + } catch (Exception ex) { + assertTrue(ex instanceof IllegalArgumentException); + } + JsonObject jo = pl.readObject(); + assertTrue(jo.hasNext()); + assertEquals("key", jo.getCurrentField()); + assertEquals(1, jo.readInt()); + pl.close(); + } + + @Test + public void testNullValue() throws IOException { + String json = "{\"i\": null, " + + "\"s\": null, " + + "\"b\": null, " + + "\"ar\": null, " + + "\"obj\":null}"; + JsonPayload pl = new JsonPayload(json); + while (pl.hasNext()) { + if (pl.isField("i")) { + assertEquals(0, pl.readInt()); + } else if (pl.isField("b")) { + assertFalse(pl.readBool()); + } else if (pl.isField("s")) { + assertNull(pl.readString()); + } else if (pl.isField("ar")) { + assertNull(pl.readArray()); + } else if (pl.isField("obj")) { + assertNull(pl.readObject()); + } else { + fail("Unexpected field: " + pl.getCurrentField()); + } + } + pl.close(); + } + + /* Test JsonPayload.readValueAsJson() */ + @Test + public void testReadValueAsJson() throws Exception { + String json = "{\n" + + " \"compartmentId\": \"ocid.iad.xxx\"," + + " \"id\": 1," + + " \"phone\": [123, 456]," + + " \"name\": \"Jack Ma\"," + + " \"address\": {" + + " \"info\": {" + + " \"state\":\"MA\"," + + " \"city\":\"Burlington\"," + + " \"street\":\"35 network drive\"" + + " }," + + " \"type\": \"WORK\"," + + " \"zipCode\":\"01803\"," + + " \"lot\": 123456.7," + + " \"inUsed\": true" + + " }," + + " \"option\": \"IF_ABSENT\"," + + " \"isGetReturnRow\": false," + + " \"timeoutInMs\": 5000" + + "}"; + + JsonPayload pl = new JsonPayload(json); + runReadValueAsJsonTest(pl); + pl.close(); + + ByteBuf buf = Unpooled.wrappedBuffer(json.getBytes()); + pl = new JsonPayload(new ByteInputStream(buf)); + runReadValueAsJsonTest(pl); + pl.close(); + } + + private void runReadValueAsJsonTest(JsonPayload pl) throws IOException { + String addressJson = null; + String field = null; + int n = 0; + while (pl.hasNext()) { + field = pl.getCurrentField(); + assertNotNull(field); + if (field.equals("address")) { + addressJson = pl.readValueAsJson(); + } else if (field.equals("phone")) { + assertEquals("[123, 456]", pl.readValueAsJson()); + } else { + assertNotNull(pl.readValue()); + } + n++; + } + assertEquals(8, n); + pl.close(); + + assertNotNull(addressJson); + pl = new JsonPayload(addressJson); + assertTrue(pl.hasNext()); + assertEquals("info", pl.getCurrentField()); + assertTrue(pl.readValueAsJson() + .contains("\"street\":\"35 network drive\"")); + + assertTrue(pl.hasNext()); + assertEquals("type", pl.getCurrentField()); + assertEquals("\"WORK\"", pl.readValueAsJson()); + + assertTrue(pl.hasNext()); + assertEquals("zipCode", pl.getCurrentField()); + assertEquals("\"01803\"", pl.readValueAsJson()); + + assertTrue(pl.hasNext()); + assertEquals("lot", pl.getCurrentField()); + assertEquals("123456.7", pl.readValueAsJson()); + + assertTrue(pl.hasNext()); + assertEquals("inUsed", pl.getCurrentField()); + assertEquals("true", pl.readValueAsJson()); + + assertFalse(pl.hasNext()); + pl.close(); + } + + private String appendQuotes(String str) { + return "\"" + str + "\""; + } + + private void validateJson(String json) { + try { + JsonUtils.parseJsonNode(json); + } catch (Exception ex) { + fail("Invalid json: " + json); + } + } + + @Test + public void testJsonBuilder() throws IOException { + + int id = -1; + boolean ifNotExists = true; + String compartmentId = "ocid1.compartment.oc1..aaaaaaaagaqos5k"; + String ddlStatement = "create table if not exists foo1(\n" + + "\tid integer, \n" + + "\tname string default \"n/a\", \n" + + "age integer default -1,\n " + + "info json,\n" + + "primary key(id))"; + int maxReadUnits = 100; + int maxWriteUnits = 200; + int maxStorageInGBs = 3; + String capacityMode = ON_DEMAND; + String[] states = new String[] { + "CREATING", "ACTIVE", "UPDATING" + }; + + JsonBuilder jb = JsonBuilder.create(); + jb.append("id", id); + jb.append("ifNotExists", ifNotExists); + jb.append("compartmentId", compartmentId); + jb.append("ddlStatement", ddlStatement); + jb.startObject("tableLimits"); + jb.append("maxReadUnits", maxReadUnits); + jb.append("maxWriteUnits", maxWriteUnits); + jb.append("maxStorageInGBs", maxStorageInGBs); + jb.append("capacityMode", capacityMode); + jb.endObject(); + jb.startArray("states"); + for (String state : states) { + jb.append(state); + } + jb.endArray(); + + String json = jb.toString(); + validateJson(json); + + JsonPayload pl = new JsonPayload(json); + while(pl.hasNext()) { + if (pl.isField("id")) { + assertEquals(id, pl.readInt()); + } else if (pl.isField("ifNotExists")) { + assertEquals(ifNotExists, pl.readBool()); + } else if (pl.isField("compartmentId")) { + assertEquals(compartmentId, pl.readString()); + } else if (pl.isField("ddlStatement")) { + assertEquals(ddlStatement, pl.readString()); + } else if (pl.isField("tableLimits")) { + JsonObject jo = pl.readObject(); + while (jo.hasNext()) { + if (jo.isField("maxReadUnits")) { + assertEquals(maxReadUnits, jo.readInt()); + } else if (jo.isField("maxWriteUnits")) { + assertEquals(maxWriteUnits, jo.readInt()); + } else if (jo.isField("maxStorageInGBs")) { + assertEquals(maxStorageInGBs, jo.readInt()); + } else if (jo.isField("capacityMode")) { + assertEquals(capacityMode, jo.readString()); + } else { + fail("Unexpected field: " + jo.getCurrentField()); + } + } + } else if (pl.isField("states")) { + JsonArray ja = pl.readArray(); + int i = 0; + while (ja.hasNext()) { + assertTrue(i < states.length); + assertEquals(states[i++], ja.readString()); + } + assertEquals(states.length, i); + } else { + fail("Unexpected field: " + pl.getCurrentField()); + } + } + + pl.close(); + } + + @Test + public void testEscapeCharactor() throws Exception { + final String value = "\tt\\\bb\"c\"\r\n"; + + JsonBuilder jb = JsonBuilder.create(); + jb.append("key", value); + String json = jb.toString(); + validateJson(json); + + JsonPayload pl = new JsonPayload(json); + assertTrue(pl.hasNext()); + assertEquals("key", pl.getCurrentField()); + String value1 = pl.readString(); + assertEquals(value, value1); + pl.close(); + } + + @Test + public void testTagsToJson() throws Exception { + /* freeform tags: Map */ + Map freeformTags = new HashMap(); + freeformTags.put("createBy", "OracleNosql"); + freeformTags.put("accountType", "IAMUser"); + + String json = JsonProtocol.tagsToJson(freeformTags); + Map tags = JsonUtils.readValue(json, Map.class); + assertEquals(freeformTags, tags); + + /* + * predefined tags: Map> + * Object can be Integer, String or Boolean + */ + Map> definedTags = new HashMap<>(); + Map props = new HashMap<>(); + props.put("Standby", true); + props.put("Purpose", "WebTier"); + definedTags.put("Operations", props); + props = new HashMap<>(); + props.put("Operator", "user1"); + props.put("number", 10.0); + definedTags.put("ZOperations", props); + + json = JsonProtocol.tagsToJson(definedTags); + tags = JsonUtils.readValue(json, Map.class); + assertEquals(definedTags, tags); + } + + /** + * Test JsonProtocol.buildFieldValue(), it used to output FieldValue to Json. + */ + @Test + public void testBuildFieldValue() { + TableImpl table = TableBuilder.createTableBuilder("foo") + .addInteger("id") + .addString("name") + .addBoolean("valid") + .addBinary("photo") + .addFixedBinary("code", 16) + .addLong("count") + .addFloat("height") + .addDouble("income") + .addEnum("color", new String[]{"red", "yellow", "blue"}, null) + .addNumber("storage") + .addTimestamp("time", 3) + .addField("ar", + TableBuilder.createArrayBuilder("ar") + .addField(TableBuilder.createRecordBuilder("rec") + .addInteger("ari") + .addString("ars") + .build()) + .build()) + .addField("ma", TableBuilder.createMapBuilder("ma") + .addField(TableBuilder.createArrayBuilder() + .addInteger() + .build()) + .build()) + .addField("rma", + TableBuilder.createRecordBuilder("rma") + .addInteger("rid") + .addField("rm", TableBuilder.createMapBuilder() + .addString() + .build()) + .addField("ra", TableBuilder.createArrayBuilder() + .addTimestamp(6) + .build()) + .build()) + .primaryKey("id") + .buildTable(); + + /* Test null values */ + RowImpl row = table.createRow(); + row.put("id", 1); + row.addMissingFields(); + roundTrip(row); + + row.clear(); + row.put("id", 2); + row.put("name","name1"); + row.put("valid", true); + row.put("photo", "this is a phone".getBytes()); + row.putFixed("code", genBytes(16)); + row.put("count", 1234567890123456789L); + row.put("height", (float)12.3); + row.put("income", 11313213.123412414); + row.putEnum("color", "blue"); + row.putNumber("storage", new BigDecimal("9999999999999999999999")); + row.put("time", new Timestamp(System.currentTimeMillis())); + + ArrayValue av = row.putArray("ar"); + RecordValue rv = av.addRecord(); + rv.put("ari", -1000); + rv.put("ars", "hello ndcs"); + rv = av.addRecord(); + rv.put("ari", 1001); + rv.put("ars", "hello oci"); + + MapValue mv = row.putMap("ma"); + av = mv.putArray("k1"); + av.add(1).add(200).add(-200); + + RecordValue rma = row.putRecord("rma"); + rma.put("rid", 1); + mv = rma.putMap("rm"); + mv.put("k1", "v1").put("k2", "v2").put("k3", ""); + av = rma.putArray("ra"); + long ms = System.currentTimeMillis(); + for (int i = 0; i < 3; i++ ) { + Timestamp ts = new Timestamp(ms + i); + av.add(ts); + } + roundTrip(row); + + /* Test timestamp string format */ + Timestamp ts = new Timestamp(System.currentTimeMillis()); + ts.setNanos(987654321); + TimestampValue tsv; + TimestampValue tsv1; + String dateStr; + for (int i = 0; i < 10; i++) { + JsonBuilder jb = JsonBuilder.create(); + tsv = FieldValueFactory.createTimestamp(ts, i); + + /* Build Json string for TimestampValue */ + JsonProtocol.buildFieldValue(jb, null, tsv); + dateStr = jb.toString(); + + /* Remove '{' and '}' surrounded the timestamp string */ + dateStr = dateStr.substring(1, dateStr.length() - 1); + assertTrue(dateStr.endsWith("Z\"")); + + /* Create TimestampValue from the Json String */ + tsv1 = FieldValueFactory.createValueFromJson( + tsv.getDefinition(), dateStr).asTimestamp(); + assertEquals(tsv, tsv1); + } + + /* Test json null */ + table = TableBuilder.createTableBuilder("foo") + .addInteger("id") + .addJson("info", null) + .primaryKey("id") + .buildTable(); + row = table.createRow(); + row.put("id", 1); + row.putJson("info", + "{" + + "\"name\": null, " + + "\"phones\":[123, null, 456], " + + "\"addresses\": [" + + "{\"city\": \"boston\", \"zipcode\": null}, " + + "null," + + "{\"city\": null, \"zipcode\":01803}" + + "]" + + "}"); + roundTrip(row); + + /* + * Test the 3 double special values NaN, Infinity and -Infinity are + * strings in the returning json of JsonProtocol.buildFieldValue(). + */ + table = TableBuilder.createTableBuilder("foo") + .addInteger("id") + .addDouble("d0") + .addDouble("d1") + .addDouble("d2") + .addDouble("d3") + .primaryKey("id") + .buildTable(); + row = table.createRow(); + row.put("id", 1) + .put("d0", Double.MAX_VALUE) + .put("d1", Double.NaN) + .put("d2", Double.POSITIVE_INFINITY) + .put("d3", Double.NEGATIVE_INFINITY); + + JsonBuilder jb = JsonBuilder.create(); + JsonProtocol.buildFieldValue(jb, null, row); + String json = jb.toString(); + + assertTrue(json.contains("\"d0\":" + Double.MAX_VALUE)); + assertTrue(json.contains("\"d1\":\"NaN\"")); + assertTrue(json.contains("\"d2\":\"Infinity\"")); + assertTrue(json.contains("\"d3\":\"-Infinity\"")); + } + + /* + * This test is in response to Jira NOSQL-8154 that requires timestamps + * to be accepted in RFC 3339 Nano format and reject certain strings with + * a message that mentions RFC 3339. The strings used come from the Jira + * and associated Confluence document. + */ + @Test + public void testParseTimestamp() { + String[] strings = new String[] { + "2020-07-01T13:01:25.123", + "2020-07-01T13:01:25.123Z", + "2020-07-01T10:01:25.123-03:00", + "2020-07-01T13:01:25.123456789", + "2020-07-01T13:01:25.123456789Z", + "2020-07-01T21:01:25.123456789+08:00" + }; + + String[] goodStrings = new String[] { + "2020-07-14t02:40:00z", + "2020-01-01T12:00:27.87+00:20", + "2020-01-01T12:00:27.873+00:20", + "2020-01-01T12:00:27.873834+00:20", + "2020-01-01T12:00:27.873834939+00:20" + }; + + String[] badStrings = new String[] { + "1937-01-01T12", + "1937-01-01T12:", + "1937-01-01T12Z", + "1937-01-01T12:Z", + "1937-01-01T12+20:00", + "1937-01-01T12:+20:00", + "1937-01-01T12Z+20:00", + "1937-01-01T12:Z+20:00" + }; + + long expEpochMs = 1593608485123L; + for (String ts : strings) { + assertEquals(expEpochMs, RestDataService.parseTimestamp(ts)); + } + + for (String ts : goodStrings) { + RestDataService.parseTimestamp(ts); + } + + for (String ts : badStrings) { + try { + RestDataService.parseTimestamp(ts); + } catch (Exception e) { + assertTrue(e.getMessage().contains("not in RFC")); + } + } + } + + private void roundTrip(Row row) { + JsonBuilder jb = JsonBuilder.create(); + JsonProtocol.buildFieldValue(jb, null, row); + String json = jb.toString(); + + /* Remove outmost '{' and '}' */ + json = json.substring(1, json.length() - 1); + + Row row1 = row.getTable().createRowFromJson(json, true); + assertEquals(row, row1); + } + + private static byte[] genBytes(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; i++) { + bytes[i] = (byte)(i % 256); + } + return bytes; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/QueryTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/QueryTest.java new file mode 100644 index 00000000..5dd6d029 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/QueryTest.java @@ -0,0 +1,1044 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.HashMap; +import java.util.Map; + +import org.junit.Test; + +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.model.PreparedStatement; +import com.oracle.bmc.nosql.model.QueryDetails; +import com.oracle.bmc.nosql.model.QueryResultCollection; +import com.oracle.bmc.nosql.model.RequestUsage; +import com.oracle.bmc.nosql.model.StatementSummary; +import com.oracle.bmc.nosql.model.TableLimits; +import com.oracle.bmc.nosql.model.StatementSummary.Operation; +import com.oracle.bmc.nosql.requests.PrepareStatementRequest; +import com.oracle.bmc.nosql.requests.QueryRequest; +import com.oracle.bmc.nosql.requests.SummarizeStatementRequest; +import com.oracle.bmc.nosql.responses.PrepareStatementResponse; +import com.oracle.bmc.nosql.responses.QueryResponse; +import com.oracle.bmc.nosql.responses.SummarizeStatementResponse; + +/** + * Test query APIs: + * o prepare + * o query + * o summarize + */ +public class QueryTest extends RestAPITestBase { + + @Test + public void testQuery() { + final String tableName = "foo"; + createTestTable(tableName); + + final int numRows = 31; + for (int i = 0; i < numRows; i++) { + Map value = createValue(i); + putRow(tableName, value); + } + + String query = "select * from foo"; + + PrepareStatementRequest prepReq = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + PrepareStatementResponse prepRes = client.prepareStatement(prepReq); + assertNotNull(prepRes.getPreparedStatement()); + String prepStmt = prepRes.getPreparedStatement().getStatement(); + assertNotNull(prepStmt); + + /* Query with prepred statment */ + QueryDetails infoPrep = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(prepStmt) + .isPrepared(true) + .build(); + + /* Query */ + QueryDetails info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + + /* Run query */ + int totalCost1 = runQuery(info, numRows, this::validateSimpleRow); + int totalCost2 = runQuery(infoPrep, numRows, this::validateSimpleRow); + assertEquals(totalCost1, totalCost2 + 2); + + /* Run query with limit */ + int limit = 10; + totalCost1 = runQueryWithLimit(info, limit, numRows, + this::validateSimpleRow); + totalCost2 = runQueryWithLimit(infoPrep, limit, numRows, + this::validateSimpleRow); + int batches = (numRows + (limit - 1))/ limit; + assertEquals(totalCost1, totalCost2 + batches * 2); + + /* Run query with maxReadKB */ + int maxReadKB = 15; + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .maxReadInKBs(maxReadKB) + .build(); + + infoPrep = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(prepStmt) + .isPrepared(true) + .maxReadInKBs(maxReadKB) + .build(); + + totalCost1 = runQueryWithMaxReadKB(info, maxReadKB, numRows, + this::validateSimpleRow); + totalCost2 = runQueryWithMaxReadKB(infoPrep, maxReadKB, numRows, + this::validateSimpleRow); + assertTrue(totalCost1 > totalCost2); + + /* 0 row returned */ + query = "select * from foo where id = -1"; + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + runQuery(info, 0, null); + } + + @Test + public void testQueryBadRequest() { + QueryDetails info; + QueryRequest req; + + String query = "select * from foo"; + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + + /* Invalid limit, it should not be negative value */ + req = QueryRequest.builder() + .limit(-1) + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* Invalid page: page should not be empty or contain white space only */ + req = QueryRequest.builder() + .page("") + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* Invalid page, Cannot deserialize value of type `byte[]` */ + req = QueryRequest.builder() + .page("invalid") + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* Invalid compartmentId: compartmentId should not be null */ + info = QueryDetails.builder() + .statement(query) + .build(); + req = QueryRequest.builder() + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* + * Invalid compartmentId: compartmentId should not be empty or contain + * white space only + */ + info = QueryDetails.builder() + .compartmentId(" ") + .statement(query) + .build(); + req = QueryRequest.builder() + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* + * Invalid statement: statement should not be null + */ + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .build(); + req = QueryRequest.builder() + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* + * Invalid statement: statement should not be empty or contain white + * space only + */ + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(" ") + .build(); + req = QueryRequest.builder() + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* + * Invalid maxReadInKBs, it should not be negative value: -1 + */ + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .maxReadInKBs(-1) + .build(); + req = QueryRequest.builder() + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* + * Invalid timeoutInMs, it should not be negative value: -1 + */ + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .timeoutInMs(-1) + .build(); + req = QueryRequest.builder() + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* + * Table not found + */ + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + req = QueryRequest.builder() + .queryDetails(info) + .build(); + runQueryFail(req, 404 /* Table not found */); + + /* + * Invalid serialized prepared statement + */ + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .isPrepared(true) + .build(); + req = QueryRequest.builder() + .queryDetails(info) + .build(); + runQueryFail(req, 400 /* bad request */); + + /* Complex query is not supported */ + createTestTable("foo"); + query = "select * from foo order by name"; + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + req = QueryRequest.builder() + .queryDetails(info) + .build(); + runQueryFail(req, 400); + } + + private void runQueryFail(QueryRequest req, int expCode) { + try { + client.query(req); + fail("expect to fail but not"); + } catch (BmcException ex) { + assertEquals(expCode , ex.getStatusCode()); + } + } + + /* TODO: fix below commented type, add test for complex types */ + @Test + public void testBindVariables() { + String tableName = "foo"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, s String, i integer, l long, f float, d double, " + + "bl boolean, n number, t timestamp(3), bi binary, " + + "primary key(id))"; + + /* Create table */ + createTable(tableName, ddl); + + /* Put single row */ + int id = 1; + String s = "abc"; + int i = Integer.MIN_VALUE; + long l = Long.MIN_VALUE; + float f = Float.MIN_VALUE; + double d = Double.MIN_VALUE; + boolean bl = false; + //BigDecimal n = BigDecimal.valueOf(Long.MAX_VALUE, 10); + long n = Long.MAX_VALUE; + String dt = "2019-08-20T12:12:39.123Z"; + String bi = "AAECAw=="; + Map row = new HashMap(); + row.put("id", id); + row.put("s", s); + row.put("i", i); + row.put("l", l); + row.put("f", f); + row.put("d", d); + row.put("bl", bl); + row.put("n", n); + row.put("t", dt); + row.put("bi", bi); + putRow(tableName, row); + + PrepareStatementRequest prepReq; + PrepareStatementResponse prepRes; + QueryDetails qinfo; + Map variables = new HashMap(); + + String query = "declare $i integer; $l long; $f float; $d double; " + + "$bl boolean; $n number;" + + "select * from " + tableName + + " where i = $i and l = $l and f = $f and d = $d" + + " and bl = $bl and n = $n"; + + prepReq = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + prepRes = client.prepareStatement(prepReq); + String prepStmt = prepRes.getPreparedStatement().getStatement(); + assertNotNull(prepStmt); + + variables.clear(); + variables.put("$i", i); + variables.put("$l", l); + variables.put("$f", f); + variables.put("$d", d); + variables.put("$bl", bl); + variables.put("$n", n); + //variables.put("$t", dt); + //variables.put("$bi", bi); + qinfo = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(prepStmt) + .variables(variables) + .isPrepared(true) + .build(); + runQuery(qinfo, 1, + new RowValidator() { + @Override + public void check(Map value) { + assertEquals(row.size(), value.size()); + for (String key : row.keySet()) { + Object exp = row.get(key); + Object val = value.get(key); + + if (key.equals("f")) { + assertTrue(val instanceof Double); + assertTrue(Float.compare(((Float)exp), + ((Double)val).floatValue()) == 0); + } else { + assertEquals(exp, val); + } + } + } + }); + } + + @Test + public void testPrepare() { + final String tableName = "foo"; + createTestTable(tableName); + + PrepareStatementRequest req; + PrepareStatementResponse res; + PreparedStatement pstmt; + + String query = "select * from foo"; + req = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + res = client.prepareStatement(req); + pstmt = res.getPreparedStatement(); + assertNotNull(pstmt); + assertNotNull(pstmt.getStatement()); + assertNull(pstmt.getQueryPlan()); + RequestUsage usage = pstmt.getUsage(); + assertNotNull(usage); + assertTrue(usage.getReadUnitsConsumed() == 2); + assertTrue(usage.getWriteUnitsConsumed() == 0); + + query = "select * from foo where id > 10 and contains(name, \"abc\")"; + req = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .isGetQueryPlan(true) + .build(); + res = client.prepareStatement(req); + pstmt = res.getPreparedStatement(); + assertNotNull(pstmt.getQueryPlan()); + } + + @Test + public void testPrepareBadRequest() { + + PrepareStatementRequest req; + + /* + * CompartmentId should not be null or empty + */ + req = PrepareStatementRequest.builder() + .compartmentId("") + .statement("select * from foo") + .build(); + runPrepareStatementFail(req, 400 /* bad request */); + + /* + * Statement should not be empty + */ + req = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement("") + .build(); + runPrepareStatementFail(req, 400 /* bad request */); + + /* + * Table not found + */ + req = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement("select * from notExists") + .build(); + runPrepareStatementFail(req, 404 /* Table not found */); + + /* + * The query and prepare methods can not be used for DDL statements + */ + req = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement("create table foo(id integer, name string, " + + "primary key(id))") + .build(); + runPrepareStatementFail(req, 400 /* bad request */); + + /* + * Unsupported query: queries with order by expressions + */ + createTestTable("test"); + req = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement("select * from test order by id") + .build(); + runPrepareStatementFail(req, 400 /* bad request */); + } + + private void runPrepareStatementFail(PrepareStatementRequest req, + int expCode) { + try { + client.prepareStatement(req); + fail("expect to fail but not"); + } catch (BmcException ex) { + assertEquals(expCode , ex.getStatusCode()); + checkErrorMessage(ex); + } + } + + @Test + public void testSummarize() { + String tableName = "foo"; + String tableDdl = "create table if not exists foo (" + + "id integer, name string, primary key(id))"; + + SummarizeStatementRequest req; + SummarizeStatementResponse res; + StatementSummary info; + + /* Create table */ + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(tableDdl) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertEquals(Operation.CreateTable, info.getOperation()); + assertEquals(tableName, info.getTableName()); + assertNull(info.getIndexName()); + assertTrue(info.getIsIfNotExists()); + assertNull(info.getIsIfExists()); + assertNull(info.getSyntaxError()); + + /* Create index */ + String ddl = "create index idxName on foo(name)"; + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(ddl) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertEquals(Operation.CreateIndex, info.getOperation()); + assertEquals(tableName, info.getTableName()); + assertEquals("idxName", info.getIndexName()); + assertFalse(info.getIsIfNotExists()); + assertNull(info.getIsIfExists()); + assertNull(info.getSyntaxError()); + + /* Drop table */ + ddl = "drop table if exists foo"; + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(ddl) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertEquals(Operation.DropTable, info.getOperation()); + assertEquals(tableName, info.getTableName()); + assertNull(info.getIndexName()); + assertTrue(info.getIsIfExists()); + assertNull(info.getIsIfNotExists()); + assertNull(info.getSyntaxError()); + + /* Drop index */ + ddl = "drop index idxName on foo"; + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(ddl) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertEquals(Operation.DropIndex, info.getOperation()); + assertEquals(tableName, info.getTableName()); + assertEquals("idxName", info.getIndexName()); + assertFalse(info.getIsIfExists()); + assertNull(info.getIsIfNotExists()); + assertNull(info.getSyntaxError()); + + /* + * Create the table before alter table if run locally, this is to + * satisfy LocalTenantManager.createPrepareCB() that provides + * TableMetadataHelper to check the existence of the table. + */ + if (!cloudRunning) { + createTable(tableName, tableDdl); + } + + /* Alter table */ + ddl = "alter table foo (add address string)"; + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(ddl) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertEquals(Operation.AlterTable, info.getOperation()); + assertEquals(tableName, info.getTableName()); + assertNull(info.getIndexName()); + assertNull(info.getIsIfNotExists()); + assertNull(info.getIsIfExists()); + assertNull(info.getSyntaxError()); + + /* Query */ + String query = "select * from foo where id = 1"; + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertEquals(Operation.Select, info.getOperation()); + assertEquals(tableName, info.getTableName()); + assertNull(info.getIndexName()); + assertNull(info.getIsIfNotExists()); + assertNull(info.getIsIfExists()); + assertNull(info.getSyntaxError()); + + /* Insert */ + query = "insert into foo values(1, 'test')"; + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertEquals(Operation.Insert, info.getOperation()); + assertEquals(tableName, info.getTableName()); + assertNull(info.getIndexName()); + assertNull(info.getIsIfNotExists()); + assertNull(info.getIsIfExists()); + assertNull(info.getSyntaxError()); + + /* Update */ + query = "update foo set name = 'test' where id = 1"; + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertEquals(Operation.Update, info.getOperation()); + assertEquals(tableName, info.getTableName()); + assertNull(info.getIndexName()); + assertNull(info.getIsIfNotExists()); + assertNull(info.getIsIfExists()); + assertNull(info.getSyntaxError()); + + /* Delete */ + query = "delete from foo where id = 1"; + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertEquals(Operation.Delete, info.getOperation()); + assertEquals(tableName, info.getTableName()); + assertNull(info.getIndexName()); + assertNull(info.getIsIfNotExists()); + assertNull(info.getIsIfExists()); + assertNull(info.getSyntaxError()); + + /* Syntax error */ + query = "create table foo (id integer, name string)"; + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + info = res.getStatementSummary(); + assertNotNull(info.getSyntaxError()); + } + + @Test + public void testSummarizeBadRequest() { + + SummarizeStatementRequest req; + + /* + * CompartmentId should not be null or empty + */ + req = SummarizeStatementRequest.builder() + .compartmentId("") + .statement("select * from foo") + .build(); + runSummarizeStatementFail(req, 400); + + /* + * Statement should not be empty + */ + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement("") + .build(); + runSummarizeStatementFail(req, 400); + } + + @Test + public void testQueryTableNameMapping() + throws Exception { + + /* + * Run this test for minicloud only + * + * This test directly calls SC API to create table to test proxy cache, + * it can only be run in minicloud. + */ + assumeTrue("Skipping testQueryTableNameMapping() if not minicloud test", + useMiniCloud); + + String tableName = "testQueryTableNameMapping"; + String ddl = "create table " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + String ddl2 = "create table " + tableName + "(" + + "id1 integer, name String, age integer, " + + "primary key(id1))"; + + /* drop non-existing table */ + dropTable(tableName); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + String query = "select * from " + tableName + " where id = '1'"; + String query2 = "select * from " + tableName + " where id1 = '1'"; + + /* Prepared query to cache mapping */ + PrepareStatementRequest prepReq = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + PrepareStatementResponse prepRes = client.prepareStatement(prepReq); + assertNotNull(prepRes.getPreparedStatement()); + String prepStmt = prepRes.getPreparedStatement().getStatement(); + assertNotNull(prepStmt); + + QueryDetails infoPrep = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(prepStmt) + .isPrepared(true) + .build(); + + /* Run query */ + QueryRequest qryReq = QueryRequest.builder() + .queryDetails(infoPrep) + .build(); + client.query(qryReq); + client.query(qryReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl2); + + prepReq = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query2) + .build(); + prepRes = client.prepareStatement(prepReq); + assertNotNull(prepRes.getPreparedStatement()); + prepStmt = prepRes.getPreparedStatement().getStatement(); + assertNotNull(prepStmt); + + /* Query with prepred statment */ + infoPrep = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(prepStmt) + .isPrepared(true) + .build(); + + /* Run query */ + qryReq = QueryRequest.builder() + .queryDetails(infoPrep) + .build(); + client.query(qryReq); + + /* Query to cache mapping */ + QueryDetails info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query2) + .build(); + qryReq = QueryRequest.builder() + .queryDetails(info) + .build(); + client.query(qryReq); + client.query(qryReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* Query */ + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + qryReq = QueryRequest.builder() + .queryDetails(info) + .build(); + client.query(qryReq); + client.query(qryReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl2); + + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query2) + .build(); + qryReq = QueryRequest.builder() + .queryDetails(info) + .build(); + client.query(qryReq); + + /* Summarize to cache mapping */ + SummarizeStatementRequest req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + SummarizeStatementResponse res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + req = SummarizeStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + res = client.summarizeStatement(req); + assertNotNull(res.getStatementSummary()); + } + + @Test + public void testQueryChildTable() { + final String createTDdl = + "create table t(id integer, s string, primary key(id))"; + final String createTADdl = + "create table t.a(ida integer, s string, primary key(ida))"; + final String createTABDdl = + "create table t.a.b(idb integer, s string, primary key(idb))"; + final TableLimits limits = TableLimits.builder() + .maxReadUnits(1000) + .maxWriteUnits(200) + .maxStorageInGBs(1) + .build(); + + createTable("t", createTDdl, limits); + createTable("t.a", createTADdl, null /* limits */); + createTable("t.a.b", createTABDdl, null /* limits */); + + int numId = 3; + int numIdaPerId = 3; + int numIdbPerIda = 3; + for (int i = 0; i < numId; i++) { + putRow("t", makeTRow(i)); + for (int j = 0; j < numIdaPerId; j++) { + putRow("t.a", makeTARow(i, j)); + for (int k = 0; k < numIdbPerIda; k++) { + putRow("t.a.b", makeTABRow(i, j, k)); + } + } + } + + String stmt; + QueryDetails qryInfo; + int maxReadKB; + + stmt = "select * from t.a"; + qryInfo = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(stmt) + .build(); + runQuery(qryInfo, 9, null); + + stmt = "select * from t.a.b"; + qryInfo = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(stmt) + .build(); + runQuery(qryInfo, 27, null); + runQueryWithLimit(qryInfo, 10, 27, null); + + maxReadKB = 15; + qryInfo = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(stmt) + .maxReadInKBs(maxReadKB) + .build(); + runQueryWithMaxReadKB(qryInfo, maxReadKB, 27, null); + + stmt = "select * from nested tables(t descendants(t.a a, t.a.b b))"; + qryInfo = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(stmt) + .build(); + runQuery(qryInfo, 27, null); + runQueryWithLimit(qryInfo, 10, 27, null); + + maxReadKB = 15; + qryInfo = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(stmt) + .maxReadInKBs(maxReadKB) + .build(); + runQueryWithMaxReadKB(qryInfo, maxReadKB, 27, null); + } + + private Map makeTRow(int id) { + Map row = new HashMap<>(); + row.put("id", id); + row.put("s", "s" + id); + return row; + } + + private Map makeTARow(int id, int ida) { + Map row = new HashMap<>(); + row.put("id", id); + row.put("ida", ida); + row.put("s", "s" + id + "_" + ida); + return row; + } + + private Map makeTABRow(int id, int ida, int idb) { + Map row = new HashMap<>(); + row.put("id", id); + row.put("ida", ida); + row.put("idb", idb); + row.put("s", "s" + id + "_" + ida + "_" + idb); + return row; + } + + private void runSummarizeStatementFail(SummarizeStatementRequest req, + int expCode) { + try { + client.summarizeStatement(req); + fail("expect to fail but not"); + } catch (BmcException ex) { + assertEquals(expCode , ex.getStatusCode()); + checkErrorMessage(ex); + } + } + + private int runQuery(QueryDetails info, + int expCount, + RowValidator validator) { + return runQueryWithLimit(info, 0, expCount, validator); + } + + private int runQueryWithLimit(QueryDetails info, + int limit, + int expCount, + RowValidator rowValidator) { + int total = 0; + QueryResponse res = null; + QueryRequest qryReq; + String nextPage = null; + int count = 0; + int totalKB = 0; + do { + qryReq = QueryRequest.builder() + .queryDetails(info) + .page(nextPage) + .limit(limit) + .build(); + + res = client.query(qryReq); + + QueryResultCollection qrc = res.getQueryResultCollection(); + if (rowValidator != null) { + for (Map e : qrc.getItems()) { + rowValidator.check(e); + } + } + count = qrc.getItems().size(); + total += count; + totalKB += qrc.getUsage().getReadUnitsConsumed(); + + if (count > 0) { + assertTrue(qrc.getUsage().getReadUnitsConsumed() > 0); + } + assertTrue(qrc.getUsage().getWriteUnitsConsumed() == 0); + + nextPage = res.getOpcNextPage(); + if (limit > 0) { + if (nextPage != null) { + assertEquals(count, limit); + } else { + assertTrue(count <= limit); + } + } + } while (nextPage!= null); + + assertEquals(expCount, total); + return totalKB; + } + + private int runQueryWithMaxReadKB(QueryDetails info, + int maxReadKB, + int expCount, + RowValidator rowValidator) { + final int prepCost = 2; + boolean isPrepared = info.getIsPrepared() != null && info.getIsPrepared(); + + int total = 0; + QueryResponse res = null; + QueryRequest qryReq; + String nextPage = null; + + int readKB = 0; + int totalKB = 0; + do { + qryReq = QueryRequest.builder() + .queryDetails(info) + .page(nextPage) + .build(); + res = client.query(qryReq); + + QueryResultCollection qrc = res.getQueryResultCollection(); + if (rowValidator != null) { + for (Map e : qrc.getItems()) { + rowValidator.check(e); + } + } + + total += qrc.getItems().size(); + readKB = qrc.getUsage().getReadUnitsConsumed(); + totalKB += readKB; + + if (maxReadKB > 0) { + if (isPrepared) { + assertTrue(readKB <= maxReadKB + 2); + } else { + assertTrue(readKB - prepCost <= maxReadKB + 2); + } + } + assertTrue(qrc.getUsage().getWriteUnitsConsumed() == 0); + + nextPage = res.getOpcNextPage(); + } while (nextPage!= null); + + assertEquals(expCount, total); + return totalKB; + } + + private void createTestTable(String tableName) { + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + + TableLimits limits = TableLimits.builder() + .maxReadUnits(2000) + .maxWriteUnits(1000) + .maxStorageInGBs(1) + .build(); + + createTable(tableName, ddl, limits); + } + + private Map createValue(int i) { + Map value = new HashMap(); + value.put("id", i); + value.put("name", "name" + i); + value.put("age", 20 + i % 40); + return value; + } + + private void validateSimpleRow(Map value) { + assertNotNull(value.get("id")); + Integer id = (Integer)value.get("id"); + Map exp = createValue(id.intValue()); + assertEquals(exp, value); + } + + @FunctionalInterface + private interface RowValidator { + void check(Map value); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RestAPITestBase.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RestAPITestBase.java new file mode 100644 index 00000000..35858f7f --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RestAPITestBase.java @@ -0,0 +1,967 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.net.HttpURLConnection; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import org.junit.After; +import org.junit.Assume; +import org.junit.Before; +import org.junit.BeforeClass; + +import com.oracle.bmc.ClientConfiguration; +import com.oracle.bmc.ConfigFileReader; +import com.oracle.bmc.ClientConfiguration.ClientConfigurationBuilder; +import com.oracle.bmc.ConfigFileReader.ConfigFile; +import com.oracle.bmc.auth.AbstractAuthenticationDetailsProvider; +import com.oracle.bmc.auth.ConfigFileAuthenticationDetailsProvider; +import com.oracle.bmc.auth.SimpleAuthenticationDetailsProvider; +import com.oracle.bmc.auth.StringPrivateKeySupplier; +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.NosqlClient; +import com.oracle.bmc.nosql.model.CreateIndexDetails; +import com.oracle.bmc.nosql.model.CreateTableDetails; +import com.oracle.bmc.nosql.model.IndexKey; +import com.oracle.bmc.nosql.model.TableCollection; +import com.oracle.bmc.nosql.model.TableLimits; +import com.oracle.bmc.nosql.model.TableLimits.CapacityMode; +import com.oracle.bmc.nosql.model.TableSummary; +import com.oracle.bmc.nosql.model.UpdateRowDetails; +import com.oracle.bmc.nosql.model.UpdateTableDetails; +import com.oracle.bmc.nosql.model.WorkRequest; +import com.oracle.bmc.nosql.model.WorkRequestError; +import com.oracle.bmc.nosql.requests.ChangeTableCompartmentRequest; +import com.oracle.bmc.nosql.requests.CreateIndexRequest; +import com.oracle.bmc.nosql.requests.CreateTableRequest; +import com.oracle.bmc.nosql.requests.DeleteIndexRequest; +import com.oracle.bmc.nosql.requests.DeleteTableRequest; +import com.oracle.bmc.nosql.requests.GetRowRequest; +import com.oracle.bmc.nosql.requests.GetTableRequest; +import com.oracle.bmc.nosql.requests.GetWorkRequestRequest; +import com.oracle.bmc.nosql.requests.ListTablesRequest; +import com.oracle.bmc.nosql.requests.ListWorkRequestErrorsRequest; +import com.oracle.bmc.nosql.requests.UpdateRowRequest; +import com.oracle.bmc.nosql.requests.UpdateTableRequest; +import com.oracle.bmc.nosql.responses.ChangeTableCompartmentResponse; +import com.oracle.bmc.nosql.responses.CreateIndexResponse; +import com.oracle.bmc.nosql.responses.CreateTableResponse; +import com.oracle.bmc.nosql.responses.DeleteIndexResponse; +import com.oracle.bmc.nosql.responses.DeleteTableResponse; +import com.oracle.bmc.nosql.responses.GetRowResponse; +import com.oracle.bmc.nosql.responses.GetTableResponse; +import com.oracle.bmc.nosql.responses.GetWorkRequestResponse; +import com.oracle.bmc.nosql.responses.ListTablesResponse; +import com.oracle.bmc.nosql.responses.ListWorkRequestErrorsResponse; +import com.oracle.bmc.nosql.responses.UpdateRowResponse; +import com.oracle.bmc.nosql.responses.UpdateTableResponse; +import com.oracle.bmc.requests.BmcRequest; +import com.oracle.bmc.retrier.RetryConfiguration; + +import oracle.nosql.common.json.ArrayNode; +import oracle.nosql.common.json.JsonUtils; +import oracle.nosql.common.json.ObjectNode; +import oracle.nosql.driver.RequestTimeoutException; +import oracle.nosql.proxy.ProxyTestBase; +import oracle.nosql.util.HttpRequest; +import oracle.nosql.util.HttpResponse; +import oracle.nosql.util.tmi.DropInputs; +import oracle.nosql.util.tmi.TableDDLInputs; +import oracle.nosql.util.tmi.TableInfo; +import oracle.nosql.util.tmi.TableInfo.ActivityPhase; +import oracle.nosql.util.tmi.TableInfo.TableState; + +public class RestAPITestBase extends ProxyTestBase { + protected static final String TENANT_NOSQL_DEV = + "ocid1.tenancy.oc1..aaaaaaaattuxbj75pnn3nksvzyidshdbrfmmeflv4kkemajroz2thvca4kba"; + /* default compartmentId used in mincloud test */ + private static final String MC_TEST_COMPARTMENT_ID = + "ocid1.compartment.oc1..aaaaaaaaw2774bxkk4kndya4pl43ols5z263iupqvcpcjkoz52oieg5czvtq"; + /* default another compartmentId used in mincloud test */ + protected static String MC_TEST_COMPRATMENT_ID_FOR_UPDATE = + "ocid1.compartment.oc1..aaaaaaaahy6aozjru5grkp2dhrhqfdwh4hihd6fpeafqdxvlfb6scf7hotnq"; + private static final String LOCAL_COMPARTMENT_ID = "test.compartment"; + + private static final String USER_OCID = "ocid1.user.oc1..dummyuser"; + private static final String FINGER_PRTINT = + "01:02:03:04:05:06:07:08:09:0A:0B:0C:0D:0E:0F:10"; + private static final String PRIVATE_KEY = genPrivateKey(); + + private final static int DEFAULT_WAIT_MS = 20_000; + private final static int DEFAULT_DELAY_MS = 500; + + protected final static TableLimits defaultLimits = + TableLimits.builder() + .maxReadUnits(100) + .maxWriteUnits(100) + .maxStorageInGBs(1) + .capacityMode(CapacityMode.Provisioned) + .build(); + + protected NosqlClient client; + + @BeforeClass + public static void staticSetUp() throws Exception { + /* + * The rest interface is not yet enabled for onprem, disable rest API + * related tests for onprem test + */ + onprem = Boolean.getBoolean(ONPREM_PROP); + Assume.assumeTrue("Skipping rest api test if onprem test", !onprem); + + /* + * Set test tenantId for minicloud test to tenancy "nosqldev", this is + * to work with MockIAMService that mimics to resolve 2 test + * compartments (COMPRATMENT_ID and COMPARTMENT_ID_TO_MOVE) to + * tenancy "nosqldev", see the methods getTenantId() of + * proxy/src/main/java/oracle/nosql/proxy/security/iam/MockIAMService.java + * in spartakv repo. + */ + cloudRunning = Boolean.getBoolean(USEMC_PROP); + if (Boolean.getBoolean(USEMC_PROP)) { + System.setProperty(TENANT_ID_PROP, TENANT_NOSQL_DEV); + } + + staticSetUp(tenantLimits); + } + + static String getUserId() { + return USER_OCID; + } + + protected static String getCompartmentId() { + if (TEST_COMPARTMENT_ID != null) { + return TEST_COMPARTMENT_ID; + } + return (useMiniCloud ? MC_TEST_COMPARTMENT_ID : LOCAL_COMPARTMENT_ID); + } + + static String getCompartmentIdMoveTo() { + return (TEST_COMPARTMENT_ID_FOR_UPDATE != null) ? + TEST_COMPARTMENT_ID_FOR_UPDATE : + MC_TEST_COMPRATMENT_ID_FOR_UPDATE; + } + + @Override + @Before + public void setUp() throws Exception { + client = getNoSQLClient(); + setOpThrottling(getTenantId(), NO_OP_THROTTLE); + removeAllTables(getCompartmentId()); + } + + protected NosqlClient getNoSQLClient() { + + ClientConfigurationBuilder cfg = ClientConfiguration.builder(); + configClient(cfg); + + NosqlClient.Builder builder = NosqlClient.builder(); + builder.configuration(cfg.build()); + + AbstractAuthenticationDetailsProvider provider; + if (useCloudService) { + ConfigFile config = null; + try { + config = ConfigFileReader.parse(OCI_CONFIG_FILE, OCI_PROFILE); + } catch (IOException e) { + fail("Unable to read config file: " + OCI_CONFIG_FILE); + } + provider = new ConfigFileAuthenticationDetailsProvider(config); + } else { + provider = SimpleAuthenticationDetailsProvider.builder() + .userId(getUserId()) + .fingerprint(FINGER_PRTINT) + .tenantId(getTenantId()) + .privateKeySupplier( + new StringPrivateKeySupplier(PRIVATE_KEY)) + .build(); + } + + return builder.endpoint(getProxyEndpoint()).build(provider); + } + + protected void configClient(ClientConfigurationBuilder builder) { + /* + * Now retries on below operations are enabled by default, disable + * retries in the rest API test. + * o ListTables + * o GetTable + * o ListIndexes + * o GetIndex + * o GetRow + * o ListTableUsage + * o PrepareStatement + * o SummarizeStatement + * o ListWorkRequests + * o GetWorkRequest + * o ListWorkRequestErrors + * o ListWorkRequestLogs + * o CreateTable + * o CreateIndex + * o ChangeTableCompartment + */ + builder.retryConfiguration(RetryConfiguration.NO_RETRY_CONFIGURATION); + } + + @Override + @After + public void tearDown() throws Exception { + removeAllTables(getCompartmentId()); + if (client != null) { + client.close(); + } + setOpThrottling(getTenantId(), DEFAULT_OP_THROTTLE); + } + + void removeAllTables(String comptId) { + ListTablesRequest ltReq = ListTablesRequest.builder() + .compartmentId(comptId) + .build(); + ListTablesResponse ltRet = client.listTables(ltReq); + TableCollection tables = ltRet.getTableCollection(); + + /* + * Sorted the tables in reverse order of name to make sure that the + * child table will be dropped before its parent + */ + Set sorted = + new TreeSet<>(String.CASE_INSENSITIVE_ORDER.reversed()); + for (TableSummary table : tables.getItems()) { + if (table.getName().startsWith("SYS$")) { + continue; + } + sorted.add(table.getName()); + } + for (String name : sorted) { + dropTable(comptId, name, true, true); + } + } + + GetTableResponse getTable(String tableNameOrId) { + String comptId = isValidTableOcid(tableNameOrId) ? + null : getCompartmentId(); + return getTable(comptId, tableNameOrId); + } + + GetTableResponse getTable(String cmptId, String tableNameOrId) { + GetTableRequest req = GetTableRequest.builder() + .compartmentId(cmptId) + .tableNameOrId(tableNameOrId) + .build(); + return client.getTable(req); + } + + String getTableId(String tableName) { + GetTableResponse gtRes = getTable(tableName); + if (gtRes.getTable() != null) { + return gtRes.getTable().getId(); + } + return null; + } + + /* + * create table + */ + void createTable(String tableName, String ddl) { + createTable(tableName, ddl, defaultLimits); + } + + String createTable(String tableName, String ddl, TableLimits limits) { + return createTable(tableName, ddl, limits, true /* wait */); + } + + String createTable(String tableName, + String ddl, + TableLimits limits, + boolean wait) { + CreateTableRequest req = buildCreateTableRequest(getCompartmentId(), + tableName, + ddl, + limits); + return executeDdl(req, wait); + } + + CreateTableRequest buildCreateTableRequest(String cmptId, + String tableName, + String ddl, + TableLimits limits) { + CreateTableDetails.Builder payload = CreateTableDetails.builder() + .compartmentId(cmptId) + .name(tableName) + .ddlStatement(ddl); + if (limits != null) { + payload.tableLimits(limits); + } + + return CreateTableRequest.builder() + .createTableDetails(payload.build()) + .build(); + } + + void createTable(String tableName, + String ddl, + TableLimits limits, + Map freeformTags, + Map> definedTags) { + + CreateTableDetails.Builder payload = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl); + if (limits != null) { + payload.tableLimits(limits); + } + if (freeformTags != null) { + payload.freeformTags(freeformTags); + } + if (definedTags != null) { + payload.definedTags(definedTags); + } + + CreateTableRequest ctReq = CreateTableRequest.builder() + .createTableDetails(payload.build()) + .build(); + executeDdl(ctReq); + } + + /* + * drop table + */ + void dropTable(String tableNameOrId) { + dropTable(tableNameOrId, true /* ifExists */, true /* wait */); + } + + String dropTable(String tableNameOrId, boolean ifExists, boolean wait) { + String comptId = isValidTableOcid(tableNameOrId) ? + null : getCompartmentId(); + return dropTable(comptId, tableNameOrId, ifExists, wait); + } + + String dropTable(String cmptId, + String tableNameOrId, + boolean ifExists, + boolean wait) { + DeleteTableRequest req = DeleteTableRequest.builder() + .compartmentId(cmptId) + .tableNameOrId(tableNameOrId) + .isIfExists(ifExists) + .build(); + return executeDdl(req, wait); + } + + /* + * alter table schema + */ + + void alterTable(String tableNameOrId, String ddl) { + alterTable(tableNameOrId, ddl, true /* wait */); + } + + String alterTable(String tableNameOrId, String ddl, boolean wait) { + UpdateTableRequest req = buildUpdateTableRequest(tableNameOrId, ddl); + return executeDdl(req, wait); + } + + UpdateTableRequest buildUpdateTableRequest(String tableNameOrId, String ddl) { + String comptId = isValidTableOcid(tableNameOrId) ? + null : getCompartmentId(); + UpdateTableDetails info = UpdateTableDetails.builder() + .compartmentId(comptId) + .ddlStatement(ddl) + .build(); + return UpdateTableRequest.builder() + .tableNameOrId(tableNameOrId) + .updateTableDetails(info) + .build(); + } + + /* + * update table limits + */ + void updateTable(String tableNameOrId, TableLimits limits) { + updateTable(tableNameOrId, limits, true /* wait */); + } + + String updateTable(String tableNameOrId, TableLimits limits, boolean wait) { + UpdateTableRequest req = buildUpdateTableRequest(tableNameOrId, limits); + return executeDdl(req, wait); + } + + UpdateTableRequest buildUpdateTableRequest(String tableNameOrId, + TableLimits limits) { + String comptId = isValidTableOcid(tableNameOrId) ? + null : getCompartmentId(); + UpdateTableDetails info = UpdateTableDetails.builder() + .compartmentId(comptId) + .tableLimits(limits) + .build(); + return UpdateTableRequest.builder() + .tableNameOrId(tableNameOrId) + .updateTableDetails(info) + .build(); + } + + /* + * create index + */ + + void createIndex(String tableNameOrId, String indexName, String[] fields) { + createIndex(tableNameOrId, indexName, fields, false /* ifNotExists */, + true /* wait */); + } + + String createIndex(String tableNameOrId, + String indexName, + String[] fields, + boolean ifNotExists, + boolean wait) { + CreateIndexRequest req = + buildCreateIndexRequest(tableNameOrId, indexName, fields, ifNotExists); + return executeDdl(req, wait); + } + + String createIndex(String tableNameOrId, + String indexName, + List keys, + boolean wait) { + CreateIndexRequest req = + buildCreateIndexRequest(tableNameOrId, indexName, keys, false); + return executeDdl(req, wait); + } + + CreateIndexRequest buildCreateIndexRequest(String tableNameOrId, + String indexName, + String[] fields, + boolean ifNotExists) { + List keys = new ArrayList<>(); + for (String field : fields) { + IndexKey key = IndexKey.builder() + .columnName(field) + .build(); + keys.add(key); + } + return buildCreateIndexRequest(tableNameOrId, indexName, keys, + ifNotExists); + } + + private CreateIndexRequest buildCreateIndexRequest(String tableNameOrId, + String indexName, + List keys, + boolean ifNotExists) { + + String comptId = isValidTableOcid(tableNameOrId) ? + null : getCompartmentId(); + CreateIndexDetails info = CreateIndexDetails.builder() + .name(indexName) + .compartmentId(comptId) + .isIfNotExists(ifNotExists) + .keys(keys) + .build(); + return CreateIndexRequest.builder() + .tableNameOrId(tableNameOrId) + .createIndexDetails(info) + .build(); + } + + /* + * drop index + */ + + String dropIndex(String tableNameOrId, String indexName, boolean wait) { + return dropIndex(tableNameOrId, indexName, false /* ifExists */, wait); + } + + String dropIndex(String tableNameOrId, + String indexName, + boolean ifExists, + boolean wait) { + String comptId = isValidTableOcid(tableNameOrId) ? + null : getCompartmentId(); + DeleteIndexRequest req = DeleteIndexRequest.builder() + .tableNameOrId(tableNameOrId) + .compartmentId(comptId) + .isIfExists(ifExists) + .indexName(indexName) + .build(); + return executeDdl(req, wait); + } + + String executeDdl(BmcRequest request) { + return executeDdl(request, true /* wait */); + } + + String executeDdl(BmcRequest request, boolean wait) { + String workRequestId = runDdlOp(request); + if (workRequestId == null) { + return null; + } + if (wait) { + waitForStatus(workRequestId, WorkRequest.Status.Succeeded); + } + return workRequestId; + } + + void executeDdlFail(BmcRequest request, String errorType) { + executeDdlFail(request, 0, errorType); + } + + void executeDdlFail(BmcRequest request, int errCode, String errType) { + String workRequestId; + try { + workRequestId = runDdlOp(request); + if (cloudRunning) { + waitForStatus(workRequestId, WorkRequest.Status.Failed); + assertEquals(errType, getWorkRequestError(workRequestId)); + } else { + waitForComplete(workRequestId); + fail("expect to fail but not"); + } + } catch (BmcException ex) { + if (errCode > 0) { + assertEquals(errCode, ex.getStatusCode()); + } + assertEquals(errType , ex.getServiceCode()); + checkErrorMessage(ex); + } + } + + void waitForComplete(String workRequestId) { + waitForStatus(workRequestId, + WorkRequest.Status.Succeeded, + WorkRequest.Status.Failed); + } + + void waitForStatus(String workRequestId, WorkRequest.Status... states) { + waitForStatus(workRequestId, null, states); + } + + void waitForStatus(String workRequestId, + String errorCode, + WorkRequest.Status... states) { + final int waitMs = DEFAULT_WAIT_MS; + final int delayMs = DEFAULT_DELAY_MS; + + GetWorkRequestRequest req; + GetWorkRequestResponse res; + WorkRequest workReq; + + req = GetWorkRequestRequest.builder() + .workRequestId(workRequestId) + .build(); + long start = System.currentTimeMillis(); + while(true) { + res = client.getWorkRequest(req); + workReq = res.getWorkRequest(); + for (WorkRequest.Status state : states) { + if (workReq.getStatus() == state) { + if (state == WorkRequest.Status.Failed) { + String error = getWorkRequestError(workRequestId); + if (errorCode != null) { + assertEquals(errorCode, error); + } else { + assertNotNull(error); + } + } + return; + } + } + + if (isWorkRequestCompleted(workReq.getStatus())) { + String error = null; + if (workReq.getStatus() == WorkRequest.Status.Failed) { + error = getWorkRequestError(workRequestId); + } + fail("WorkRequest done with state = " + workReq.getStatus() + + " but not the expected " + Arrays.toString(states) + + ": error=" + error + ", workRequestId=" + workRequestId); + } + + if (System.currentTimeMillis() - start > waitMs) { + fail("Not reach the specified status after wait " + waitMs + + "ms " + Arrays.toString(states)); + break; + } + try { + Thread.sleep(delayMs); + } catch (InterruptedException ignored) { + } + } + } + + private static boolean isWorkRequestCompleted(WorkRequest.Status state) { + return state == WorkRequest.Status.Succeeded || + state == WorkRequest.Status.Failed || + state == WorkRequest.Status.Canceled; + } + + protected String getWorkRequestError(String workRequestId) { + ListWorkRequestErrorsRequest req = + ListWorkRequestErrorsRequest.builder() + .workRequestId(workRequestId) + .build(); + + ListWorkRequestErrorsResponse res = client.listWorkRequestErrors(req); + List errors = + res.getWorkRequestErrorCollection().getItems(); + if (errors.isEmpty()) { + return null; + } + return errors.get(0).getCode(); + } + + private String runDdlOp(BmcRequest req) { + if (req instanceof CreateTableRequest) { + CreateTableResponse res = + client.createTable((CreateTableRequest)req); + return res.getOpcWorkRequestId(); + } + + if (req instanceof UpdateTableRequest) { + UpdateTableResponse res = + client.updateTable((UpdateTableRequest)req); + return res.getOpcWorkRequestId(); + } + + if (req instanceof DeleteTableRequest) { + DeleteTableResponse res = + client.deleteTable((DeleteTableRequest)req); + return res.getOpcWorkRequestId(); + } + + if (req instanceof CreateIndexRequest) { + CreateIndexResponse res = + client.createIndex((CreateIndexRequest)req); + return res.getOpcWorkRequestId(); + } + + if (req instanceof DeleteIndexRequest) { + DeleteIndexResponse res = + client.deleteIndex((DeleteIndexRequest)req); + return res.getOpcWorkRequestId(); + } + + if (req instanceof ChangeTableCompartmentRequest) { + ChangeTableCompartmentResponse res = + client.changeTableCompartment( + (ChangeTableCompartmentRequest)req); + return res.getOpcWorkRequestId(); + } + + fail("Invalid ddl operation request: " + req); + return null; + } + + /* + * dml ops + */ + + String putRow(String tableNameOrId, Map row) { + String comptId = isValidTableOcid(tableNameOrId) ? + null : getCompartmentId(); + UpdateRowDetails info = UpdateRowDetails.builder() + .compartmentId(comptId) + .value(row) + .build(); + UpdateRowRequest req = UpdateRowRequest.builder() + .tableNameOrId(tableNameOrId) + .updateRowDetails(info) + .build(); + UpdateRowResponse res = client.updateRow(req); + assertNotNull(res); + assertNotNull(res.getUpdateRowResult()); + assertNotNull(res.getUpdateRowResult().getVersion()); + return res.getUpdateRowResult().getVersion(); + } + + Map getRow(String tableNameOrId, List key) { + String comptId = isValidTableOcid(tableNameOrId) ? + null : getCompartmentId(); + GetRowRequest req = GetRowRequest.builder() + .tableNameOrId(tableNameOrId) + .compartmentId(comptId) + .key(key) + .build(); + GetRowResponse res = client.getRow(req); + assertNotNull(res); + assertNotNull(res.getRow()); + return res.getRow().getValue(); + } + + private static boolean isValidTableOcid(String tableNameOrId) { + if (cloudRunning && tableNameOrId != null) { + return tableNameOrId.startsWith("ocid1") && + tableNameOrId.contains("nosqltable"); + } + return false; + } + + protected static void checkErrorMessage(BmcException ex) { + if (ex == null || ex.getMessage() == null) { + return; + } + assertFalse(ex.getMessage().contains("ocid1_nosqltable_")); + } + + /* + * methods directly call SC APIs. + */ + + /* + * Sets the data store for free table if set is true, otherwise revoke the + * setting. + */ + protected static void setFreeTableStore(boolean set) { + if (!useMiniCloud) { + return; + } + + final String pl = "[\n" + + " {\n" + + " \"version\": 2,\n" + + " \"storeName\": \"DStore1\",\n" + + " \"storeAttrs\": {\n" + + " \"freeStore\": " + (set ? true : false) + ",\n" + + " \"tenantId\": null,\n" + + " \"compartmentIds\": null\n" + + " }\n" + + " }\n" + + "]"; + + HttpResponse resp = new HttpRequest().doHttpPost(scDSConfigBase, pl); + if (200 != resp.getStatusCode()) { + fail("setFreeTableStore failed: " + resp); + } + } + + /* + * Sets table's TableActivity state using SC rest call + */ + protected static void setTableActivity(String tenantId, + String tableOcid, + long dmlMs, + ActivityPhase phase) { + + if (!useMiniCloud) { + return; + } + + StringBuilder sb = new StringBuilder(tmUrlBase) + .append("tables/") + .append(tableOcid) + .append("/actions/setActivity") + .append("?tenantid=").append(tenantId) + .append("&dmlms=").append(dmlMs) + .append("&phase=").append(phase.name()); + String url = sb.toString(); + + HttpResponse res = new HttpRequest().doHttpPut(url, null); + if (res.getStatusCode() != HttpURLConnection.HTTP_OK) { + throw new IllegalStateException("setTableActivity failed: " + + res.getOutput()); + } + } + + /* + * Creates table using SC rest call + */ + protected static String scCreateTable(String tenantId, + String compartmentId, + String tableName, + String ddl, + oracle.nosql.util.tmi.TableLimits limits, + boolean isFreeTable) + throws Exception { + + if (!cloudRunning) { + return null; + } + + HttpRequest httpRequest = new HttpRequest().disableRetry(); + String url = tmUrlBase + "tables/" + tableName; + + /* re-create the table */ + TableDDLInputs tdi = new TableDDLInputs(ddl, tenantId, compartmentId, + null, /* matchETag */ + true, /* ifNotExists */ + limits, null /* tags */, + isFreeTable /* freeTable */, + null /* retryToken*/); + String payload = JsonUtils.print(tdi); + HttpResponse res = httpRequest.doHttpPost(url, payload); + if (res.getStatusCode() != HttpURLConnection.HTTP_OK) { + throw new IllegalStateException( + "Recreate failed to create table " + res.getOutput()); + } + TableInfo tif = JsonUtils.readValue(res.getOutput(), TableInfo.class); + String operationId = tif.getOperationId(); + waitForCompletion(httpRequest, tenantId, compartmentId, tableName, + operationId, TableState.ACTIVE, 20000); + return tif.getTableOcid(); + } + + protected static void scRecreateTable(String tenantId, + String compartmentId, + String tableName, + String ddl) + throws Exception { + + if (!useMiniCloud) { + return; + } + + HttpRequest httpRequest = new HttpRequest().disableRetry(); + + /* drop existing table */ + DropInputs di = new DropInputs(true, tenantId, compartmentId, null); + String payload = JsonUtils.print(di); + String url = tmUrlBase + "tables/" + tableName; + + HttpResponse res = httpRequest.doHttpDelete(url, payload); + if (res.getStatusCode() != HttpURLConnection.HTTP_OK) { + throw new IllegalStateException( + "Recreate failed to drop existing table " + res.getOutput()); + } + + TableInfo tif = JsonUtils.readValue(res.getOutput(), TableInfo.class); + String operationId = tif.getOperationId(); + waitForCompletion(httpRequest, tenantId, compartmentId, tableName, + operationId, TableState.DROPPED, 20000); + + scCreateTable(tenantId, compartmentId, tableName, ddl, + new oracle.nosql.util.tmi.TableLimits(100, 100, 100), + false /* isFreeTable */); + } + + private static void waitForCompletion(HttpRequest httpRequest, + String tenantId, + String compartmentId, + String tableName, + String operationId, + TableState state, + int waitMillis) + throws Exception { + + int delayMS = 500; + long startTime = System.currentTimeMillis(); + + String url = tmUrlBase + "tables/" + tableName + + "?tenantid=" + tenantId + + "&compartmentid=" + compartmentId + + "&operationid=" + operationId; + + while (true) { + + long curTime = System.currentTimeMillis(); + if ((curTime - startTime) > waitMillis) { + throw new RequestTimeoutException( + waitMillis, + "Operation not completed in expected time"); + } + + HttpResponse res = httpRequest.doHttpGet(url); + if (res.getStatusCode() == 200) { + TableInfo tif = JsonUtils.readValue(res.getOutput(), + TableInfo.class); + if (state == tif.getStateEnum()) { + return; + } + } else if (res.getStatusCode() == 404) { + if (state == TableState.DROPPED) { + return; + } + throw new IllegalStateException("Table not found " + tableName); + } + Thread.sleep(delayMS); + } + } + + protected static void assertDefinedTags( + Map> exp, + Map> tags) { + + if (useCloudService) { + /* ignore the default defined tag implicitly added in cloud */ + tags.remove("Oracle-Tags"); + } + assertEquals(exp, tags); + } + + /* + * Set or clear the dedicated tenantId of the pod + * + * If the given tenantId is not null, assign the pod to the given tenantId. + * Otherwise, clear the pod's dedicated tenantId. + */ + protected static void setDedicatedTenantId(String tenantId) { + + if (!cloudRunning) { + return; + } + + final String url = "http://" + scHost + ":" + scPort + + "/V0/service/dsconfig"; + + /* Get the current store configuration */ + HttpResponse res = new HttpRequest().doHttpGet(url, null); + assertEquals(HttpURLConnection.HTTP_OK, res.getStatusCode()); + + ArrayNode dsconfig = JsonUtils.parseJsonNode(res.getOutput()).asArray(); + ObjectNode storeAttrs = dsconfig.get(0).asObject() + .get("storeAttrs").asObject(); + + /* + * Change the dedicated tenantId and compartmentIds in the store + * configuration + */ + if (tenantId != null) { + storeAttrs.put("tenantId", tenantId) + .putArray("compartmentIds").add(tenantId); + } else { + storeAttrs.putNull("tenantId") + .putNull("compartmentIds"); + } + + /* Update the store configuration */ + res = new HttpRequest().doHttpPost(url, JsonUtils.toJson(dsconfig)); + assertEquals(HttpURLConnection.HTTP_OK, res.getStatusCode()); + } + + private static String genPrivateKey() { + try { + KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA"); + kpg.initialize(2048); + KeyPair kp = kpg.generateKeyPair(); + + StringBuilder sb = new StringBuilder(); + sb.append("-----BEGIN PRIVATE KEY-----\n") + .append(Base64.getEncoder().encodeToString( + kp.getPrivate().getEncoded())) + .append("\n-----END PRIVATE KEY-----"); + return sb.toString(); + + } catch (NoSuchAlgorithmException e) { + throw new IllegalArgumentException( + "Generate RSA private key file failed: " + e); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RestCurlTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RestCurlTest.java new file mode 100644 index 00000000..e803909d --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RestCurlTest.java @@ -0,0 +1,61 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.lang.ProcessBuilder; + +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +import oracle.nosql.proxy.ProxyTestBase; + +public class RestCurlTest extends ProxyTestBase { + private static final String shell = getProxyBase() + "/oracle/nosql/proxy/rest/curl_smoke_test.sh "; + + /* Note this overrides the parent BeforeClass method */ + @BeforeClass + public static void staticSetUp() + throws Exception { + + /* don't start kvlite/proxy if not in cloudsim mode */ + Assume.assumeTrue( + "Skipping RestCurlTest if not cloudsim test", + !Boolean.getBoolean(ONPREM_PROP) && + !Boolean.getBoolean(USEMC_PROP) && + !Boolean.getBoolean(USECLOUD_PROP)); + + staticSetUp(tenantLimits); + } + + @Test + public void restSmokeTest() throws Exception { + + /* this only runs on cloudsim mode */ + assumeTrue(onprem == false); + assumeTrue(cloudRunning == false); + /* should output go to stdout? default no/silent */ + // boolean curl_verbose = Boolean.getBoolean("test.curl.verbose"); + + /* invoke a shell script that uses curl to to REST tests */ + /* TODO: check for executable first */ + final String verb = (verbose) ? "-v " : ""; + String sh = shell + verb + getProxyPort(); + Process p = new ProcessBuilder() + .inheritIO() + .command("/bin/bash", "-c", sh) + .start(); + int retCode = p.waitFor(); + if (retCode != 0) { + fail("Error executing rest smoke test"); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RowTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RowTest.java new file mode 100644 index 00000000..ac98778c --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/RowTest.java @@ -0,0 +1,1199 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.junit.Test; + +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.model.DeleteRowResult; +import com.oracle.bmc.nosql.model.RequestUsage; +import com.oracle.bmc.nosql.model.Row; +import com.oracle.bmc.nosql.model.UpdateRowDetails; +import com.oracle.bmc.nosql.model.UpdateRowDetails.Option; +import com.oracle.bmc.nosql.model.UpdateRowResult; +import com.oracle.bmc.nosql.requests.DeleteRowRequest; +import com.oracle.bmc.nosql.requests.GetRowRequest; +import com.oracle.bmc.nosql.requests.UpdateRowRequest; +import com.oracle.bmc.nosql.responses.DeleteRowResponse; +import com.oracle.bmc.nosql.responses.GetRowResponse; +import com.oracle.bmc.nosql.responses.UpdateRowResponse; + +import oracle.kv.Version; +import oracle.nosql.common.json.JsonUtils; + +/** + * Test simple CRUD APIs: + * o put + * o get + * o delete + */ +public class RowTest extends RestAPITestBase { + + private final String tableName = "foo"; + private String tableDdl = "create table if not exists " + tableName + + "(id integer, name String, age integer, " + + "primary key(id))"; + + @Test + public void testPut() { + + createTable(tableName, tableDdl); + + UpdateRowRequest req; + UpdateRowResponse res; + + Map value = createValue(1); + + /* + * Test isExactMatch, default = false + */ + value.put("notExistsField", 1); + UpdateRowDetails row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + res = client.updateRow(req); + checkUpdateRowResponse(res, true, null, null); + + /* + * Set isExactMatch = false + */ + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .isExactMatch(false) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + res = client.updateRow(req); + checkUpdateRowResponse(res, true, null, null); + String etag = res.getEtag(); + + /* + * Test etag + */ + value = createValue(1); + value.put("name", value.get("name") + "_upd"); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .ifMatch(etag) + .build(); + res = client.updateRow(req); + etag = res.getEtag(); + checkUpdateRowResponse(res, true, null, null); + + /* verify row */ + List key = new ArrayList(); + key.add("id:1"); + GetRowRequest getReq = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + GetRowResponse getRet = client.getRow(getReq); + checkGetRowResponse(getRet, value); + assertETagEquals(etag, getRet.getEtag()); + + /* + * Invalid comparmentId, compartmentId should not be empty or contain + * white space only + */ + row = UpdateRowDetails.builder() + .compartmentId(" ") + .value(value) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + updateRowFail(req, 400 /* bad request */); + + /* + * Invalid value, it should not be empty. + */ + value.clear(); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + updateRowFail(req, 400 /* bad request */); + + /* + * Invalid identityCacheSize, it should not be negative value. + */ + value = createValue(0); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .identityCacheSize(-1) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + updateRowFail(req, 400 /* bad request */); + + /* + * Invalid ttl, it should not be negative value. + */ + value = createValue(0); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .ttl(-1) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + updateRowFail(req, 400 /* bad request */); + + /* + * Invalid timeoutInMs, it should not be negative value. + */ + value = createValue(0); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .timeoutInMs(-1) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + updateRowFail(req, 400 /* bad request */); + + /* + * Invalid matchVersion. + */ + value = createValue(0); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .ifMatch("invalid") + .build(); + updateRowFail(req, 400 /* bad request */); + + /* + * Table not found. + */ + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId("notFound") + .updateRowDetails(row) + .build(); + updateRowFail(req, 404 /* Table not found */); + + /* + * isExactMatch = true, row contains unexpected field + */ + value.put("notExistsField", 1); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .isExactMatch(true) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + updateRowFail(req, 400 /* bad request */); + + /* + * Invalid value, missing primary key field + */ + value = createValue(1); + value.remove("id"); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + req = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + updateRowFail(req, 400 /* bad request */); + } + + private void updateRowFail(UpdateRowRequest req, int expCode) { + try { + client.updateRow(req); + fail("expect to fail but not"); + } catch (BmcException ex) { + assertEquals(expCode , ex.getStatusCode()); + checkErrorMessage(ex); + } + } + + @Test + public void testPutWithOption() { + + createTable(tableName, tableDdl); + + /* Put a row */ + Map value = createValue(0); + UpdateRowDetails row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + UpdateRowResponse putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet); + + /* PutIfAbsent failed */ + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .option(Option.IfAbsent) + .value(value) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet, false, null, null); + + /* PutIfAbsent with returnRow failed. */ + Map prevValue = value; + String prevVersion = putRet.getUpdateRowResult().getVersion(); + + value = createValue(0); + value.put("name", value.get("name") + "_upd"); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .option(Option.IfAbsent) + .value(value) + .isGetReturnRow(true) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet, prevValue, prevVersion); + + /* PutIfAbsent OK */ + value = createValue(1); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .option(Option.IfAbsent) + .value(value) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet); + + /* PutIfPresent OK */ + value.put("name", value.get("name") + "_upd"); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .option(Option.IfPresent) + .value(value) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet); + prevValue = value; + prevVersion = putRet.getUpdateRowResult().getVersion(); + + /* PutIfPresent failed */ + value = createValue(2); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .option(Option.IfPresent) + .value(value) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet, false, null, null); + + /* PutIfVersion OK */ + value = prevValue; + value.put("name", value.get("name")+ "_putifVersion"); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .ifMatch(prevVersion) + .updateRowDetails(row) + .build(); + putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet); + + int id = ((Integer)value.get("id")).intValue(); + Map exp = getRow(tableName, createKey(id)); + assertEquals(exp, value); + + /* + * PutIfVersion with unmatched version, get 412 error + */ + + prevValue = exp; + String oldPrevVersion = prevVersion; + prevVersion = putRet.getUpdateRowResult().getVersion(); + value.put("name", value.get("name")+ "_putifVersion_failed"); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .isGetReturnRow(true) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .ifMatch(oldPrevVersion) + .updateRowDetails(row) + .build(); + updateRowFail(putReq, 412 /* Precondition Failed */); + } + + @Test + public void testTTL() { + String tableName = "foo"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id)) " + + "using ttl 2 days"; + + /* Create table */ + createTable(tableName, ddl); + + /* Put row */ + int id = 0; + Map value = createValue(id); + UpdateRowDetails row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + UpdateRowResponse putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet, true, null, null); + + /* Get row and check ttl */ + List key = createKey(id); + GetRowRequest getReq = GetRowRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .key(key) + .build(); + GetRowResponse getRet = client.getRow(getReq); + checkGetRowResponse(getRet, value); + Date defExpTime = getRet.getRow().getTimeOfExpiration(); + assertTrue(defExpTime!= null); + + /* Put row with TTL = 3 && isTtlUseTableDefault(false) */ + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .ttl(3) + .isTtlUseTableDefault(false) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet, true, null, null); + + getRet = client.getRow(getReq); + checkGetRowResponse(getRet, value); + Date expTime = getRet.getRow().getTimeOfExpiration(); + assertTrue(expTime.getTime() > defExpTime.getTime()); + + /* Put row with TTL = 4 && isTtlUseTableDefault(true) */ + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .ttl(4) + .isTtlUseTableDefault(true) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet, true, null, null); + + getRet = client.getRow(getReq); + checkGetRowResponse(getRet, value); + expTime = getRet.getRow().getTimeOfExpiration(); + assertTrue(expTime.getTime() == defExpTime.getTime()); + } + + @Test + public void testIdentityValue() { + String tableName = "foo"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer generated always as identity, " + + "name String, " + + "age integer, " + + "primary key(id))"; + + /* Create table */ + createTable(tableName, ddl); + + /* Put row */ + Map value = new HashMap(); + value.put("name", "jack"); + value.put("age", 21); + UpdateRowDetails row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + UpdateRowResponse putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet, true, null, null); + assertEquals("1", putRet.getUpdateRowResult().getGeneratedValue()); + } + + @Test + public void testDelete() { + + createTable(tableName, tableDdl); + + /* Put a row */ + int id = 1; + String version = putRow(tableName, createValue(id)); + + /* Delete a row */ + List key = createKey(id); + DeleteRowRequest req = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + DeleteRowResponse res = client.deleteRow(req); + checkDeleteRowResponse(res); + + /* Delete again, failed */ + res = client.deleteRow(req); + checkDeleteRowResponse(res, false, null, null); + + /* + * DeleteIfVersion with old version, get 412 error + */ + String matchVersion = version; + version = putRow(tableName, createValue(id)); + + req = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .ifMatch(matchVersion) + .isGetReturnRow(true) + .key(key) + .build(); + deleteRowFail(req, 412 /* Precondition Failed */); + + /* DeleteIfVersion OK */ + matchVersion = version; + req = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .ifMatch(version) + .isGetReturnRow(true) + .key(key) + .build(); + res = client.deleteRow(req); + checkDeleteRowResponse(res, true, null, null); + + /* Invalid key */ + key.clear(); + req = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + deleteRowFail(req, 400 /* bad request */); + + /* Invalid key: key element should not be empty */ + key.add("id:1"); + key.add(""); + req = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + deleteRowFail(req, 400 /* bad request */); + + /* Invalid timeoutInMs, it should not be negative */ + key.clear(); + key.add("id:1"); + req = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .timeoutInMs(-1) + .key(key) + .build(); + deleteRowFail(req, 400 /* bad request */); + + /* Table not found */ + key.clear(); + key.add("id:1"); + req = DeleteRowRequest.builder() + .tableNameOrId("invalid") + .compartmentId(getCompartmentId()) + .key(key) + .build(); + deleteRowFail(req, 404 /* Table not found */); + + /* Invalid primary key, invalid format 'column-name:value' */ + key.clear(); + key.add("id1"); + req = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + deleteRowFail(req, 400 /* bad request */); + + /* Invalid primary key, invalid type */ + key.clear(); + key.add("id:test"); + req = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + deleteRowFail(req, 400 /* bad request */); + + /* Invalid primary key, field not exist */ + key.clear(); + key.add("id:1"); + key.add("invalid:1"); + req = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + deleteRowFail(req, 400 /* bad request */); + } + + private void deleteRowFail(DeleteRowRequest req, int expCode) { + try { + client.deleteRow(req); + fail("expect to fail but not"); + } catch (BmcException ex) { + assertEquals(expCode , ex.getStatusCode()); + checkErrorMessage(ex); + } + } + + @Test + public void testCaseInsensitive() { + + createTable(tableName, tableDdl); + + Map value = new HashMap(); + value.put("ID", 0); + value.put("NaMe", "name0"); + value.put("AgE", 20); + + UpdateRowDetails row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + UpdateRowResponse putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet); + + Map retVal = getRow(tableName, createKey(0)); + assertEquals(createValue(0), retVal); + } + + @Test + public void testGet() { + final String tableName = "foo"; + String ddl = "create table if not exists " + tableName + "(" + + "k1 string, k2 integer, name String, age integer, " + + "primary key(shard(k1), k2))"; + /* Create table */ + createTable(tableName, ddl); + + /* Put a row */ + Map value = new HashMap(); + value.put("k1", "t1"); + value.put("k2", 1); + value.put("name", "name1"); + value.put("age", 20); + String version = putRow(tableName, value); + + GetRowRequest req; + GetRowResponse ret; + + /* Get row */ + List key = new ArrayList(); + key.add("k1:t1"); + key.add("k2:1"); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + ret = client.getRow(req); + checkGetRowResponse(ret, value); + assertETagEquals(version, ret.getEtag()); + + /* Get row but not exists */ + String opcRequestId = "get-req-1"; + key.clear(); + key.add("k1:t2"); + key.add("k2:1"); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .opcRequestId(opcRequestId) + .build(); + ret = client.getRow(req); + checkGetRowResponse(ret, null); + assertNull(ret.getRow().getValue()); + assertEquals(opcRequestId, ret.getOpcRequestId()); + + /* Test the value of key field contains ":" */ + value.clear(); + value.put("k1", "t2:id"); + value.put("k2", 2); + value.put("name", "name2"); + value.put("age", 21); + version = putRow(tableName, value); + + key.clear(); + key.add("k1:t2:id"); + key.add("k2:2"); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + ret = client.getRow(req); + checkGetRowResponse(ret, value); + assertETagEquals(version, ret.getEtag()); + + /* Invalid key */ + key.clear(); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + getRowFail(req, 400 /* bad request */); + + /* Invalid key: key element should not be empty */ + key.add("k1:1"); + key.add(""); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + getRowFail(req, 400 /* bad request */); + + /* Invalid timeoutInMs, it should not be negative */ + key.clear(); + key.add("k1:t1"); + key.add("k2:1"); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .timeoutInMs(-1) + .key(key) + .build(); + getRowFail(req, 400 /* bad request */); + + /* Table not found */ + key.clear(); + key.add("k1:1"); + req = GetRowRequest.builder() + .tableNameOrId("invalid") + .compartmentId(getCompartmentId()) + .key(key) + .build(); + getRowFail(req, 404 /* Table not found */); + + /* Invalid primary key, invalid format 'column-name:value' */ + key.clear(); + key.add("k1"); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + getRowFail(req, 400 /* bad request */); + + /* Invalid primary key, invalid type */ + key.clear(); + key.add("k1:t1"); + key.add("k2:abc"); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + getRowFail(req, 400 /* bad request */); + + /* Invalid primary key, field not exist */ + key.clear(); + key.add("k1:t1"); + key.add("invalid:1"); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + getRowFail(req, 400 /* bad request */); + + /* Invalid primary key, miss primary key field */ + key.clear(); + key.add("k1:t1"); + req = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + getRowFail(req, 400 /* bad request */); + } + + /** + * Test put/get/delete row using tableOcid. + */ + @Test + public void testWithTableOcid() { + if (!cloudRunning) { + return; + } + + createTable(tableName, tableDdl); + + final String tableOcid = getTableId(tableName); + + /* Put a row */ + Map value = createValue(0); + UpdateRowDetails row = UpdateRowDetails.builder() + .value(value) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableOcid) + .updateRowDetails(row) + .build(); + UpdateRowResponse putRet = client.updateRow(putReq); + checkUpdateRowResponse(putRet); + + /* Get row */ + List key = new ArrayList(); + key.add("id:0"); + GetRowRequest gtReq = GetRowRequest.builder() + .tableNameOrId(tableOcid) + .key(key) + .build(); + GetRowResponse gtRet = client.getRow(gtReq); + checkGetRowResponse(gtRet, value); + + /* Delete row */ + DeleteRowRequest delReq = DeleteRowRequest.builder() + .tableNameOrId(tableOcid) + .key(key) + .build(); + DeleteRowResponse delRet = client.deleteRow(delReq); + checkDeleteRowResponse(delRet); + } + + @Test + public void testRowNonExistentTableOcid() { + if (!cloudRunning) { + return; + } + + String tableName = "testIndexNonExistentTableOcid"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + /* Create table */ + createTable(tableName, ddl); + + /* fake non-existent table ocid */ + String tableOcid = getTableId(tableName) + "notexist"; + + /* Put a row */ + Map value = createValue(0); + UpdateRowDetails row = UpdateRowDetails.builder() + .value(value) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableOcid) + .updateRowDetails(row) + .build(); + try { + client.updateRow(putReq); + fail("UpdateRow expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + } + + /* Get row */ + List key = new ArrayList(); + key.add("id:0"); + GetRowRequest gtReq = GetRowRequest.builder() + .tableNameOrId(tableOcid) + .key(key) + .build(); + try { + client.getRow(gtReq); + fail("GetRow expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + } + + /* Delete row */ + DeleteRowRequest delReq = DeleteRowRequest.builder() + .tableNameOrId(tableOcid) + .key(key) + .build(); + try { + client.deleteRow(delReq); + fail("DeleteRow expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + } + } + + @Test + public void testRowTableNameMapping() + throws Exception { + + /* + * Run this test for minicloud only + * + * This test directly calls SC api to create table to test proxy cache, + * it can only be run in minicloud. + */ + assumeTrue("Skipping testRowTableNameMapping() if not minicloud test", + useMiniCloud); + + String tableName = "testRowTableNameMapping"; + String ddl = "create table " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + String ddl2 = "create table " + tableName + "(" + + "id1 integer, name String, age integer, " + + "primary key(id1))"; + + /* drop non-existing table */ + dropTable(tableName); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* Put a row to see if it can fetch table info correctly */ + Map value = createValue(0); + UpdateRowDetails row = UpdateRowDetails.builder() + .value(value) + .compartmentId(getCompartmentId()) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + client.updateRow(putReq); + client.updateRow(putReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl2); + + /* Put a row */ + value = new HashMap(); + value.put("id1", 2); + value.put("name", "name2"); + row = UpdateRowDetails.builder() + .value(value) + .compartmentId(getCompartmentId()) + .build(); + putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + client.updateRow(putReq); + + /* Get a row to cache mapping */ + List key = new ArrayList(); + key.add("id1:0"); + GetRowRequest gtReq = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + client.getRow(gtReq); + client.getRow(gtReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* Get row */ + key = new ArrayList(); + key.add("id:0"); + gtReq = GetRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + client.getRow(gtReq); + + /* Delete row to cache mapping */ + DeleteRowRequest delReq = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + client.deleteRow(delReq); + client.deleteRow(delReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl2); + + /* Delete row */ + key = new ArrayList(); + key.add("id1:0"); + delReq = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .compartmentId(getCompartmentId()) + .key(key) + .build(); + client.deleteRow(delReq); + } + + @Test + public void testRowInvalidCompartmentId() { + String tableName = "testRowInvalidCompartmentId"; + /* Put a row */ + Map value = createValue(0); + UpdateRowDetails row = UpdateRowDetails.builder() + .value(value) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .updateRowDetails(row) + .build(); + try { + client.updateRow(putReq); + fail("UpdateRow expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + if (cloudRunning) { + assertTrue(ex.getMessage().contains("compartment id")); + } + } + + /* Get row */ + List key = new ArrayList(); + key.add("id:0"); + GetRowRequest gtReq = GetRowRequest.builder() + .tableNameOrId(tableName) + .key(key) + .build(); + try { + client.getRow(gtReq); + fail("GetRow expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + if (cloudRunning) { + assertTrue(ex.getMessage().contains("compartment id")); + } + } + + /* Delete row */ + DeleteRowRequest delReq = DeleteRowRequest.builder() + .tableNameOrId(tableName) + .key(key) + .build(); + try { + client.deleteRow(delReq); + fail("DeleteRow expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404 , ex.getStatusCode()); + if (cloudRunning) { + assertTrue(ex.getMessage().contains("compartment id")); + } + } + } + + private void getRowFail(GetRowRequest req, int expCode) { + try { + client.getRow(req); + fail("expect to fail but not"); + } catch (BmcException ex) { + assertEquals(expCode , ex.getStatusCode()); + checkErrorMessage(ex); + } + } + + private Map createValue(int i) { + Map value = new HashMap(); + value.put("id", i); + value.put("name", "name" + i); + value.put("age", 20 + i % 40); + return value; + } + + private List createKey(int i) { + List key = new ArrayList(); + key.add("id:" + i); + return key; + } + + private void checkUpdateRowResponse(UpdateRowResponse res) { + checkUpdateRowResponse(res, true, null, null); + } + + private void checkUpdateRowResponse(UpdateRowResponse res, + Map prevRow, + String prevVersion) { + checkUpdateRowResponse(res, false, prevRow, prevVersion); + } + + private void checkUpdateRowResponse(UpdateRowResponse res, + boolean succeed, + Map prevRow, + String prevVersion) { + assertNotNull(res); + + UpdateRowResult result = res.getUpdateRowResult(); + assertNotNull(result); + + RequestUsage usage = result.getUsage(); + assertNotNull(usage); + if (succeed) { + assertNotNull(result.getVersion()); + assertTrue(usage.getWriteUnitsConsumed() > 0); + assertNotNull(res.getEtag()); + } else { + assertNull(result.getVersion()); + assertTrue(usage.getWriteUnitsConsumed() == 0); + assertNull(res.getEtag()); + } + + if (prevRow != null) { + assertEquals(prevRow, result.getExistingValue()); + assertTrue(usage.getReadUnitsConsumed() > 0); + } + if (prevVersion != null) { + assertEquals(prevVersion, result.getExistingVersion()); + } + } + + private void checkGetRowResponse(GetRowResponse res, + Map exp) { + assertNotNull(res); + assertNotNull(res.getOpcRequestId()); + + Row row = res.getRow(); + assertNotNull(row); + + RequestUsage usage = row.getUsage(); + assertNotNull(usage); + assertTrue(usage.getReadUnitsConsumed() > 0); + assertTrue(usage.getWriteUnitsConsumed() == 0); + + if (row.getValue() != null) { + assertNotNull(res.getEtag()); + } + if (exp != null) { + assertEquals(exp, row.getValue()); + } + } + + private void checkDeleteRowResponse(DeleteRowResponse res) { + checkDeleteRowResponse(res, true, null, null); + } + + private void checkDeleteRowResponse(DeleteRowResponse res, + boolean succeed, + Map prevValue, + String prevVersion) { + assertNotNull(res); + + DeleteRowResult result = res.getDeleteRowResult(); + assertNotNull(result); + assertTrue(result.getIsSuccess() == succeed); + + RequestUsage usage = result.getUsage(); + assertNotNull(usage); + if (succeed) { + assertTrue(usage.getWriteUnitsConsumed() > 0); + } else { + assertTrue(usage.getWriteUnitsConsumed() == 0); + } + assertTrue(usage.getReadUnitsConsumed() > 0); + + if (prevValue != null) { + assertEquals(prevValue, result.getExistingValue()); + assertEquals(prevVersion, result.getExistingVersion()); + } + } + + private void assertETagEquals(String exp, String etag) { + Version v0 = Version.fromByteArray(JsonUtils.decodeBase64(exp)); + Version v1 = Version.fromByteArray(JsonUtils.decodeBase64(etag)); + assertEquals(v0.getRepGroupUUID(), v1.getRepGroupUUID()); + assertEquals(v0.getVLSN(), v1.getVLSN()); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/TableTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/TableTest.java new file mode 100644 index 00000000..844726b3 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/TableTest.java @@ -0,0 +1,2502 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; + +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.model.ChangeTableCompartmentDetails; +import com.oracle.bmc.nosql.model.Column; +import com.oracle.bmc.nosql.model.CreateIndexDetails; +import com.oracle.bmc.nosql.model.CreateTableDetails; +import com.oracle.bmc.nosql.model.Identity; +import com.oracle.bmc.nosql.model.IndexKey; +import com.oracle.bmc.nosql.model.Schema; +import com.oracle.bmc.nosql.model.Table; +import com.oracle.bmc.nosql.model.Table.LifecycleState; +import com.oracle.bmc.nosql.model.TableCollection; +import com.oracle.bmc.nosql.model.TableLimits; +import com.oracle.bmc.nosql.model.TableLimits.CapacityMode; +import com.oracle.bmc.nosql.model.TableSummary; +import com.oracle.bmc.nosql.model.TableUsageSummary; +import com.oracle.bmc.nosql.model.UpdateTableDetails; +import com.oracle.bmc.nosql.requests.ChangeTableCompartmentRequest; +import com.oracle.bmc.nosql.requests.CreateIndexRequest; +import com.oracle.bmc.nosql.requests.CreateTableRequest; +import com.oracle.bmc.nosql.requests.DeleteTableRequest; +import com.oracle.bmc.nosql.requests.GetTableRequest; +import com.oracle.bmc.nosql.requests.ListTableUsageRequest; +import com.oracle.bmc.nosql.requests.ListTablesRequest; +import com.oracle.bmc.nosql.requests.UpdateTableRequest; +import com.oracle.bmc.nosql.responses.GetTableResponse; +import com.oracle.bmc.nosql.responses.ListTableUsageResponse; +import com.oracle.bmc.nosql.responses.ListTablesResponse; + +/** + * Test table related APIs: + * o list tables + * o create table + * o alter table + * o drop table + * o get table + * o get table usage + * o move compartment + */ +public class TableTest extends RestAPITestBase { + @Rule + public final TestName test = new TestName(); + + private final String newCompartmentId = getCompartmentIdMoveTo(); + + @Override + public void tearDown() throws Exception { + if (test.getMethodName().equals("testChangeCompartment")) { + removeAllTables(newCompartmentId); + } + super.tearDown(); + } + + @Test + public void testCreateTable() throws Exception { + String tableName = "foo"; + String ddl = "create table " + tableName + "(" + + "sid integer, " + + "id integer, " + + "name string, " + + "age integer, " + + "primary key(shard(sid), id))"; + + TableLimits limits = TableLimits.builder() + .maxReadUnits(200) + .maxWriteUnits(200) + .maxStorageInGBs(2) + .build(); + + CreateTableRequest req; + CreateTableDetails info; + + /* Create table */ + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limits) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdl(req); + + /* Check table */ + GetTableRequest getReq = GetTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .build(); + GetTableResponse getRes = client.getTable(getReq); + Table table = getRes.getTable(); + HashMap columns = new HashMap(); + columns.put("sid", "integer"); + columns.put("id", "integer"); + columns.put("name", "string"); + columns.put("age", "integer"); + String[] pkeys = new String[] {"sid","id"}; + String[] sKeys = new String[] {"sid"}; + validateTable(table, tableName, columns, pkeys, sKeys, limits, + (cloudRunning ? ddl : null), -1 /* ttl */); + if (cloudRunning) { + assertEquals(ddl, table.getDdlStatement()); + } + + /* Create table with if not exists */ + String ddlIfNotExists = ddl.replace(tableName, + "if not exists " + tableName); + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddlIfNotExists) + .tableLimits(limits) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdl(req); + + /* Create table but table already exists, get TableAleadyExists error */ + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limits) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdlFail(req, "TableAlreadyExists"); + } + + @Test + public void testCreateTableBadRequest() { + String tableName = "foo1"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, " + + "name string, " + + "primary key(id))"; + + TableLimits limits = TableLimits.builder() + .maxReadUnits(200) + .maxWriteUnits(200) + .maxStorageInGBs(2) + .build(); + + CreateTableRequest req; + CreateTableDetails info; + + /* Invalid name: name should not be empty or contain white space only */ + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name("") + .ddlStatement(ddl) + .tableLimits(limits) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + + /* + * Invalid compartmentId: compartmentId should not be empty or + * contain white space only + */ + info = CreateTableDetails.builder() + .compartmentId("") + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limits) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + + /* + * Invalid ddlStatement: ddlStatement should not be empty or contain + * white space only + */ + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement("") + .tableLimits(limits) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + + /* + * Invalid TableLimits + */ + TableLimits[] limitsBad = new TableLimits[] { + TableLimits.builder().maxReadUnits(0).maxWriteUnits(1) + .maxStorageInGBs(1).build(), + TableLimits.builder().maxReadUnits(1).maxWriteUnits(0) + .maxStorageInGBs(1).build(), + TableLimits.builder().maxReadUnits(1).maxWriteUnits(1) + .maxStorageInGBs(0).build(), + }; + for (TableLimits limit : limitsBad) { + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limit) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + } + + /* Invalid DDL, it is not CREATE TABLE statement */ + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement("alter table " + tableName + "(add n1 integer)") + .tableLimits(limits) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + + /* Table name provided doesn't match the name in ddl statement */ + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement("create table abc(id integer, primary key(id))") + .tableLimits(limits) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + } + + @Test + public void testUpdateTable() { + + final String tableName = "foo"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + + /* Create table */ + TableLimits limits = TableLimits.builder() + .maxReadUnits(1000) + .maxWriteUnits(1000) + .maxStorageInGBs(1) + .build(); + createTable(tableName, ddl, limits); + + /* Alter table */ + ddl = "alter table " + tableName + "(add i1 integer)"; + alterTable(tableName, ddl); + + GetTableResponse gtRes = getTable(tableName); + HashMap columns = new HashMap(); + columns.put("id", "integer"); + columns.put("name", "string"); + columns.put("age", "integer"); + columns.put("i1", "integer"); + validateTable(gtRes.getTable(), tableName, columns, new String[] {"id"}, + null /* shardKey */, limits, null /* tableDdl */, + -1 /* ttl */); + if (cloudRunning) { + assertTrue(gtRes.getTable().getDdlStatement() + .toLowerCase().contains("if not exists")); + } + + /* Alter TableLimits */ + limits = TableLimits.builder() + .maxReadUnits(1500) + .maxWriteUnits(1500) + .maxStorageInGBs(2) + .build(); + updateTable(tableName, limits); + + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, limits); + } + + @Test + public void testUpdateTableWithMatchETag() { + assumeTrue("Skipping testUpdateTableWithMatchETag() if not minicloud " + + "test", cloudRunning); + + final String tableName = "foo"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + + UpdateTableDetails info; + UpdateTableRequest req; + + /* Create table */ + TableLimits limits = TableLimits.builder() + .maxReadUnits(1000) + .maxWriteUnits(1000) + .maxStorageInGBs(1) + .build(); + createTable(tableName, ddl, limits); + + GetTableResponse gtRes = getTable(tableName); + String oldETag = gtRes.getEtag(); + assertNotNull(oldETag); + + /* Alter table schema */ + ddl = "alter table " + tableName + "(add i1 integer)"; + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .ddlStatement(ddl) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdl(req); + + /* Verify table information after alter table */ + gtRes = getTable(tableName); + String currentETag = gtRes.getEtag(); + assertNotNull(currentETag); + + /* + * Alter table with mismatched ETag, expect to get ETagMismatch error + */ + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .ifMatch(oldETag) + .build(); + executeDdlFail(req, "ETagMismatch"); + + /* + * Update table limits + */ + + /* Update limits with matched ETag */ + limits = TableLimits.builder() + .maxReadUnits(1500) + .maxWriteUnits(1500) + .maxStorageInGBs(2) + .build(); + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .tableLimits(limits) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .ifMatch(currentETag) + .updateTableDetails(info) + .build(); + executeDdl(req); + oldETag = currentETag; + + /* Verify table information after update table limits */ + gtRes = getTable(tableName); + currentETag = gtRes.getEtag(); + assertNotNull(currentETag); + validateTable(gtRes.getTable(), tableName, null /* columns */, + new String[] {"id"}, null /* shardKeys */, + limits, null /* tableDdl */, -1 /* ttl */); + + /* + * Update limits with mismatched ETag, get ETagMismatch error + */ + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .ifMatch(oldETag) + .build(); + executeDdlFail(req, "ETagMismatch"); + + /* + * Update tags + */ + + /* update tags with matched ETag */ + Map freeformTags = new HashMap<>(); + freeformTags.put("createBy", "OracleNosql"); + freeformTags.put("accountType", "IAMUser"); + + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .freeformTags(freeformTags) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .ifMatch(currentETag) + .build(); + executeDdl(req); + oldETag = currentETag; + + /* verify table information after update tags */ + gtRes = getTable(tableName); + assertEquals(freeformTags, gtRes.getTable().getFreeformTags()); + assertDefinedTags(Collections.emptyMap(), + gtRes.getTable().getDefinedTags()); + + /* update tags with mismatched ETag, get ETagMismatch error */ + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .ifMatch(oldETag) + .build(); + executeDdlFail(req, "ETagMismatch"); + } + + @Test + public void testUpdateTableBadRequest() { + final String tableName = "foo"; + + UpdateTableDetails info; + UpdateTableRequest req; + + /* + * Invalid compartmentId: compartmentId should not be empty or contain + * white space only + */ + String ddl = "alter table " + tableName + "(add i1 integer)"; + info = UpdateTableDetails.builder() + .compartmentId("") + .ddlStatement(ddl) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + + /* + * Invalid ddlStatement: ddlStatement should not be empty or contain + * white space only + */ + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .ddlStatement(" ") + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + + /* + * Invalid TableLimits, readUnits/writeUnits/StorageInGBs should be + * great than 0 + */ + TableLimits[] limitsBad = new TableLimits[] { + TableLimits.builder().maxReadUnits(0).maxWriteUnits(1) + .maxStorageInGBs(1).build(), + TableLimits.builder().maxReadUnits(1).maxWriteUnits(0) + .maxStorageInGBs(1).build(), + TableLimits.builder().maxReadUnits(1).maxWriteUnits(1) + .maxStorageInGBs(0).build(), + }; + for (TableLimits limit : limitsBad) { + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .tableLimits(limit) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + } + + /* Either ddlStatement or tableLimits should not be null */ + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + + /* Only one of either ddlStatement or tableLimits may be specified */ + TableLimits limits = TableLimits.builder() + .maxReadUnits(100) + .maxWriteUnits(200) + .maxStorageInGBs(2) + .build(); + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .ddlStatement(ddl) + .tableLimits(limits) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + + /* Only one of either tableLimits or tags may be specified */ + Map freeformTags = new HashMap<>(); + freeformTags.put("k1", "v1"); + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .tableLimits(limits) + .freeformTags(freeformTags) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + + /* Table not found */ + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .ddlStatement(ddl) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + if (cloudRunning) { + executeDdlFail(req, 404, "NotAuthorizedOrNotFound"); + } else { + executeDdlFail(req, 400, "InvalidParameter"); + } + + /* Invalid ddl, ddl is not ALTER TABLE statement */ + ddl = "create table " + tableName + "(id integer, primary key(id))"; + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .ddlStatement(ddl) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdlFail(req, 404, + (cloudRunning ? "NotAuthorizedOrNotFound" : "TableNotFound")); + + /* Table name given doesn't match the name in ddl statement */ + ddl = "alter table abc(add i1 integer)"; + info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .ddlStatement(ddl) + .build(); + req = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdlFail(req, 400, "InvalidParameter"); + } + + @Test + public void testDeleteTable() { + final String tableName = "foo"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + + /* Create table */ + createTable(tableName, ddl); + + DeleteTableRequest req; + + /* Delete table */ + req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .build(); + executeDdl(req); + + try { + getTable(tableName); + fail("Expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + checkErrorMessage(ex); + } + + /* Delete table if exists */ + req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .isIfExists(true) + .build(); + executeDdl(req); + + /* Delete a non-exists table using if exists */ + req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId("invalid") + .isIfExists(true) + .build(); + executeDdl(req); + + /* Delete a non-exists table */ + req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId("invalid") + .isIfExists(false) + .build(); + executeDdlFail(req, "TableNotFound"); + } + + @Test + public void testDeleteTableWithMatchETag() { + assumeTrue("Skipping testDeleteTableWithMatchETag() if not minicloud " + + "test", cloudRunning); + + final String tableName = "foo"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + + /* Create table */ + createTable(tableName, ddl); + GetTableResponse gtRes = getTable(tableName); + String currentETag = gtRes.getEtag(); + assertNotNull(currentETag); + + /* Alter table schema */ + ddl = "alter table " + tableName + "(add i1 integer)"; + UpdateTableDetails info = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .ddlStatement(ddl) + .build(); + UpdateTableRequest utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdl(utReq); + String oldETag = currentETag; + + /* Verify table information after alter table */ + gtRes = getTable(tableName); + currentETag = gtRes.getEtag(); + + DeleteTableRequest req; + + /* Delete table with mismatched ETag */ + req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .ifMatch(oldETag) + .build(); + executeDdlFail(req, "ETagMismatch"); + + /* Delete table with matched ETag */ + req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .ifMatch(currentETag) + .build(); + executeDdl(req); + + try { + getTable(tableName); + fail("Expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + checkErrorMessage(ex); + } + + /* + * Delete non-existing table with if exists and ETag, expect to succeed. + */ + req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .isIfExists(true) + .ifMatch(currentETag) + .build(); + executeDdl(req); + } + + @Test + public void testGetTable() { + String tableName = "foo"; + String ddl = "create table " + tableName + "(" + + "sid integer, " + + "id integer, " + + "name string, " + + "age integer, " + + "address json, " + + "dateTime timestamp(6), " + + "bin binary, " + + "bin20 binary(20), " + + "color enum(YELLOW, BLUE, RED), " + + "hobbies map(string), " + + "numbers array(number), " + + "info record(ri integer, rs string, rm map(integer)), " + + "primary key(shard(sid), id)) using ttl 3 days"; + TableLimits limits = TableLimits.builder() + .maxReadUnits(200) + .maxWriteUnits(200) + .maxStorageInGBs(2) + .build(); + + createTable(tableName, ddl, limits); + + GetTableRequest req; + GetTableResponse ret; + + req = GetTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .build(); + ret = client.getTable(req); + assertNotNull(ret); + assertNotNull(ret.getEtag()); + + Table table = ret.getTable(); + Schema schema = table.getSchema(); + assertEquals(getCompartmentId(), table.getCompartmentId()); + assertNotNull(schema.getTtl()); + assertEquals(3, schema.getTtl().intValue()); + assertNotNull(table.getDdlStatement()); + assertTableOcid(table.getId()); + HashMap columns = new HashMap(); + columns.put("sid", "integer"); + columns.put("id", "integer"); + columns.put("name", "string"); + columns.put("age", "integer"); + columns.put("address", "json"); + columns.put("dateTime", "timestamp(6)"); + columns.put("bin", "binary"); + columns.put("bin20", "binary(20)"); + columns.put("color", "enum(YELLOW, BLUE, RED)"); + columns.put("hobbies", "map(string)"); + columns.put("numbers", "array(number)"); + columns.put("info", "record(ri integer, rs string, rm map(integer))"); + String[] pkeys = new String[] {"sid","id"}; + String[] sKeys = new String[] {"sid"}; + validateTable(table, tableName, columns, pkeys, sKeys, limits, + (cloudRunning ? ddl : null), 3 /* ttl */); + + /* + * Test identity and uuid + */ + + /* + * seqNo INTEGER GNERATED ALWAYS AS IDENTITY + * guid0 STRING AS UUID + */ + ddl = "ALTER TABLE " + tableName + + "(ADD seqNo INTEGER GENERATED ALWAYS AS IDENTITY," + + " ADD guid0 STRING AS UUID)"; + alterTable(tableName, ddl, true); + ret = client.getTable(req); + assertNotNull(ret); + schema = ret.getTable().getSchema(); + assertNotNull(schema); + checkIdenity(schema.getIdentity(), "seqNo", true, false); + int uuidCols = 0; + for (Column col : schema.getColumns()) { + if (col.getName().equals("guid0")) { + assertTrue(col.getIsAsUuid()); + assertFalse(col.getIsGenerated()); + uuidCols++; + } else { + assertFalse(col.getIsAsUuid()); + assertFalse(col.getIsGenerated()); + } + } + assertEquals(1, uuidCols); + + /* + * seqNo INTEGER GNERATED BY DEFAULT AS IDENTITY + * guid0 STRING AS UUID + */ + ddl = "ALTER TABLE " + tableName + + "(MODIFY seqNo GENERATED BY DEFAULT AS IDENTITY)"; + alterTable(tableName, ddl, true); + ret = client.getTable(req); + assertNotNull(ret); + checkIdenity(ret.getTable().getSchema().getIdentity(), "seqNo", + false, false); + + /* + * seqNo INTEGER GNERATED BY DEFAULT ON NULL AS IDENTITY + * guid0 STRING AS UUID + */ + ddl = "ALTER TABLE " + tableName + + "(MODIFY seqNo GENERATED BY DEFAULT ON NULL AS IDENTITY)"; + alterTable(tableName, ddl, true); + ret = client.getTable(req); + assertNotNull(ret); + checkIdenity(ret.getTable().getSchema().getIdentity(), "seqNo", + false, true); + + /* + * seqNo INTEGER + * guid0 STRING AS UUID + * guid1 STRING AS UUID GENERATED BY DEFAULT) + */ + ddl = "ALTER TABLE " + tableName + + "(MODIFY seqNo DROP IDENTITY," + + " ADD guid1 STRING AS UUID GENERATED BY DEFAULT)"; + alterTable(tableName, ddl, true); + ret = client.getTable(req); + assertNotNull(ret); + schema = ret.getTable().getSchema(); + assertNull(schema.getIdentity()); + uuidCols = 0; + for (Column col : schema.getColumns()) { + if (col.getName().equals("guid0")) { + assertTrue(col.getIsAsUuid()); + assertFalse(col.getIsGenerated()); + uuidCols++; + } else if (col.getName().equals("guid1")) { + assertTrue(col.getIsAsUuid()); + assertTrue(col.getIsGenerated()); + uuidCols++; + } else { + assertFalse(col.getIsAsUuid()); + assertFalse(col.getIsGenerated()); + } + } + assertEquals(2, uuidCols); + + /* Table not found */ + req = GetTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId("tableNotExists") + .build(); + try { + client.getTable(req); + fail("expect to fail but not"); + } catch (BmcException ex) { + assertEquals(404 /* Table not found */ , ex.getStatusCode()); + checkErrorMessage(ex); + } + } + + /** + * Test get/update/delete table using table ocid. + */ + @Test + public void testWithTableOcid() { + assumeTrue("Skipping testWithTableOcid() if not minicloud test ", + cloudRunning); + + final String tableName = "testGetUpdateDeleteTableWithTableOcid"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + /* Create table */ + createTable(tableName, ddl); + + final String tableOcid = getTableId(tableName); + + /* Get table */ + GetTableRequest gtReq = GetTableRequest.builder() + .tableNameOrId(tableOcid) + .build(); + GetTableResponse gtRet = client.getTable(gtReq); + assertNotNull(gtRet); + assertNotNull(gtRet.getEtag()); + + /* Alter table */ + ddl = "alter table " + tableName + "(add i1 integer)"; + UpdateTableDetails info = UpdateTableDetails.builder() + .ddlStatement(ddl) + .build(); + UpdateTableRequest utReq = UpdateTableRequest.builder() + .tableNameOrId(tableOcid) + .updateTableDetails(info) + .build(); + executeDdl(utReq); + + GetTableResponse gtRes = getTable(tableName); + HashMap columns = new HashMap(); + columns.put("id", "integer"); + columns.put("name", "string"); + columns.put("age", "integer"); + columns.put("i1", "integer"); + validateTable(gtRes.getTable(), tableName, columns, new String[] {"id"}, + null /* shardKeys */, defaultLimits, null /* tableDdl */, + -1 /* ttl*/); + + /* Invalid table ocid: update table with mismatched table ocid. */ + String createTableFoo = "create table foo (id integer, primary key(id))"; + createTable("foo", createTableFoo); + String fooOcid = getTableId("foo"); + + ddl = "alter table " + tableName + "(drop i1)"; + info = UpdateTableDetails.builder() + .ddlStatement(ddl) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(fooOcid) + .updateTableDetails(info) + .build(); + executeDdlFail(utReq, 400, "InvalidParameter"); + + /* Update TableLimits */ + TableLimits limits = TableLimits.builder() + .maxReadUnits(101) + .maxWriteUnits(101) + .maxStorageInGBs(2) + .build(); + info = UpdateTableDetails.builder() + .tableLimits(limits) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableOcid) + .updateTableDetails(info) + .build(); + executeDdl(utReq); + + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, limits); + + /* Delete table */ + DeleteTableRequest dtReq = DeleteTableRequest.builder() + .tableNameOrId(tableOcid) + .build(); + executeDdl(dtReq); + + try { + getTable(tableName); + fail("Expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + checkErrorMessage(ex); + } + } + + @Test + public void testNonExistentTableOcid() { + assumeTrue("Skipping testNonExistentTableOcid() if not minicloud test", + cloudRunning); + + String tableName = "testNonExistentTableOcid"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + /* Create table */ + createTable(tableName, ddl); + + /* fake non-existent table ocid */ + String tableOcid = getTableId(tableName) + "notexist"; + + /* Get table */ + try { + getTable(tableOcid); + fail("GetTable expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + } + + /* Alter table */ + try { + ddl = "alter table " + tableName + "(add i1 integer)"; + UpdateTableDetails info = UpdateTableDetails.builder() + .ddlStatement(ddl) + .build(); + UpdateTableRequest utReq = UpdateTableRequest.builder() + .tableNameOrId(tableOcid) + .updateTableDetails(info) + .build(); + executeDdl(utReq); + fail("AlterTable expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + } + + /* List table usage */ + try { + ListTableUsageRequest.Builder builder = + ListTableUsageRequest.builder() + .tableNameOrId(tableOcid); + client.listTableUsage(builder.build()); + fail("ListTableUsage expect table-not-found but not"); + } catch (BmcException ex) { + assertEquals(404 /* table not found */, ex.getStatusCode()); + } + + /* Delete a non-exists table using if exists */ + DeleteTableRequest req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId("invalid") + .isIfExists(true) + .build(); + executeDdl(req); + + /* Delete a non-exists table */ + req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId("invalid") + .isIfExists(false) + .build(); + executeDdlFail(req, "TableNotFound"); + } + + /* + * Test list tables + * the cases with advanced parameters are for minicloud test only + */ + @Test + public void testListTables() { + + final String[] tableNames = new String[] { + "nosqlUsers9", + "Emails", + "Addresses", + "andcUsers10" + }; + final int numTables = tableNames.length; + + /* freeform tags */ + final Map freeformTags = new HashMap<>(); + freeformTags.put("createBy", "OracleNosql"); + freeformTags.put("accountType", "IAMUser"); + final List tablesWithFreeformTags = + Arrays.asList("nosqlUsers9", "Emails"); + + /* predefined tags */ + final Map> definedTags = new HashMap<>(); + Map props = new HashMap<>(); + props.put(DEFINED_TAG_PROP, "WebTier"); + definedTags.put(DEFINED_TAG_NAMESPACE, props); + + final List tablesWithDefinedTags = + Arrays.asList("nosqlUsers9", "Addresses"); + + final List timeCreatedAsc = new ArrayList(); + Map freeform; + Map> defined; + /* + * Create tables for testing list tables. One of them is auto scaling + * table. + */ + for (String tableName : tableNames) { + freeform = tablesWithFreeformTags.contains(tableName) ? + freeformTags : null; + defined = tablesWithDefinedTags.contains(tableName) ? + definedTags : null; + TableLimits limits; + if (tableName.equals(tableNames[0])) { + /* create one auto scaling table */ + limits = TableLimits.builder() + .maxStorageInGBs(100) + .capacityMode(CapacityMode.OnDemand) + .build(); + } else { + limits = defaultLimits; + } + createTestTable(tableName, limits, freeform, defined); + timeCreatedAsc.add(tableName); + } + if (cloudRunning) { + ListTablesRequest listRequest = + ListTablesRequest.builder(). + compartmentId(getCompartmentId()).build(); + ListTablesResponse listResponse = client.listTables(listRequest); + TableCollection tc = listResponse.getTableCollection(); + assertEquals(0, tc.getAutoReclaimableTables().intValue()); + assertEquals(1, tc.getOnDemandCapacityTables().intValue()); + if (tenantLimits != null) { + assertEquals(tenantLimits.getNumFreeTables(), + tc.getMaxAutoReclaimableTables().intValue()); + assertEquals(tenantLimits.getNumAutoScalingTables(), + tc.getMaxOnDemandCapacityTables().intValue()); + } + } + final List timeCreatedDesc = new ArrayList<>(timeCreatedAsc); + Collections.reverse(timeCreatedDesc); + final List nameAsc = new ArrayList<>(timeCreatedAsc); + Collections.sort(nameAsc); + + List tables; + ListTablesRequest.LifecycleState state = null; + ListTablesRequest.SortBy sortBy = null; + ListTablesRequest.SortOrder sortOrder = null; + int limit = 0; + String namePattern = null; + + /* list table with defaults for all parameters */ + tables = doListTables(null /* namePattern */, + null /* LifecycleState */, + null /* SortBy */, + null /* sortOrder */, + 0 /* limit */, + -1); + + for (TableSummary table : tables) { + assertTrue(timeCreatedAsc.contains(table.getName())); + assertTableOcid(table.getId()); + } + assertEquals(numTables, tables.size()); + if (cloudRunning) { + assertSortedTable(tables, timeCreatedDesc); + } + + /* list table with state = ACTIVE and limit = 3 */ + state = ListTablesRequest.LifecycleState.Active; + limit = 3; + tables = doListTables(null /* namePattern */, + state /* LifecycleState */, + null /* SortBy */, + null /* sortOrder */, + limit /* limit */, + numTables); + for (TableSummary table : tables) { + assertEquals(LifecycleState.Active, table.getLifecycleState()); + assertTrue(timeCreatedAsc.contains(table.getName())); + + if (cloudRunning) { + /* verify freeformTags and definedTags */ + if (tablesWithFreeformTags.contains(table.getName())) { + assertEquals(freeformTags, table.getFreeformTags()); + } + if (tablesWithDefinedTags.contains(table.getName())) { + assertDefinedTags(definedTags, table.getDefinedTags()); + } + } + } + + /* Below tests uses advanced parameters, they are for miniCloud only */ + if (!cloudRunning) { + return; + } + + /* list table with defaults for all parameters */ + tables = doListTables(null /* namePattern */, + null /* LifecycleState */, + null /* SortBy */, + null /* sortOrder */, + 0 /* limit */, + -1); + assertSortedTable(tables, timeCreatedDesc); + + /* list table with state = "CREATING", return 0 row */ + state = ListTablesRequest.LifecycleState.Creating; + limit = 3; + tables = doListTables(null /* namePattern */, + state /* LifecycleState */, + null /* SortBy */, + null /* sortOrder */, + limit /* limit */, + 0); + + /* list table with state = "ACTIVE" order by timeCreated desc */ + state = ListTablesRequest.LifecycleState.Active; + sortBy = ListTablesRequest.SortBy.TimeCreated; + sortOrder = ListTablesRequest.SortOrder.Asc; + limit = 3; + tables = doListTables(null /* namePattern */, + state /* LifecycleState */, + sortBy /* SortBy */, + sortOrder /* sortOrder */, + limit /* limit */, + numTables); + for (TableSummary table : tables) { + assertEquals(LifecycleState.Active, table.getLifecycleState()); + } + assertSortedTable(tables, timeCreatedAsc); + + /* list table with state = "ACTIVE" order by name */ + state = ListTablesRequest.LifecycleState.Active; + sortBy = ListTablesRequest.SortBy.Name; + sortOrder = ListTablesRequest.SortOrder.Asc; + limit = 4; + tables = doListTables(null /* namePattern */, + state /* LifecycleState */, + sortBy /* SortBy */, + sortOrder /* sortOrder */, + limit /* limit */, + numTables); + for (TableSummary table : tables) { + assertEquals(LifecycleState.Active, table.getLifecycleState()); + } + assertSortedTable(tables, nameAsc); + + /* + * list table: + * namepattern = "*users*" + * state = "ACTIVE" + * order by timeCreated asc + */ + namePattern = "*Users*"; + state = ListTablesRequest.LifecycleState.Active; + sortBy = ListTablesRequest.SortBy.TimeCreated; + sortOrder = ListTablesRequest.SortOrder.Asc; + limit = 1; + tables = doListTables(namePattern /* namePattern */, + state /* LifecycleState */, + sortBy /* SortBy */, + sortOrder /* sortOrder */, + limit /* limit */, + 2); + int prevIndex = -1; + for (TableSummary table : tables) { + assertEquals(LifecycleState.Active, table.getLifecycleState()); + int i = timeCreatedAsc.lastIndexOf(table.getName()); + assertTrue(prevIndex < i); + prevIndex = i; + } + + /* + * list table: + * namepattern = "*Users?" + * state = "ACTIVE" + * order by timeCreated asc + */ + namePattern = "*Users?"; + state = ListTablesRequest.LifecycleState.Active; + sortBy = ListTablesRequest.SortBy.TimeCreated; + sortOrder = ListTablesRequest.SortOrder.Asc; + limit = 3; + tables = doListTables(namePattern /* namePattern */, + state /* LifecycleState */, + sortBy /* SortBy */, + sortOrder /* sortOrder */, + limit /* limit */, + 1); + TableSummary table = tables.get(0); + assertEquals(LifecycleState.Active, table.getLifecycleState()); + assertEquals("nosqlUsers9", table.getName()); + } + + private List + doListTables(String namePattern, + ListTablesRequest.LifecycleState state, + ListTablesRequest.SortBy sortBy, + ListTablesRequest.SortOrder sortOrder, + int limit, + int expCount) { + + String nextPage = null; + int count = 0; + ListTablesRequest req; + ListTablesResponse res; + TableCollection tc; + + ListTablesRequest.Builder builder = + ListTablesRequest.builder() + .compartmentId(getCompartmentId()) + .limit(limit); + if (namePattern != null) { + builder.name(namePattern); + } + if (state != null) { + builder.lifecycleState(state); + } + if (sortBy != null) { + builder.sortBy(sortBy); + } + if (sortOrder != null) { + builder.sortOrder(sortOrder); + } + + List results = new ArrayList(); + do { + if (nextPage != null) { + builder.page(nextPage); + } + + req = builder.build(); + res = client.listTables(req); + tc = res.getTableCollection(); + count += tc.getItems().size(); + if (limit > 0) { + assertTrue(tc.getItems().size() <= limit); + } + for (TableSummary table : tc.getItems()) { + assertNotNull(table.getName()); + results.add(table); + } + nextPage = res.getOpcNextPage(); + } while (nextPage != null); + + if (expCount >= 0) { + assertEquals(expCount, count); + } + return results; + } + + private void assertSortedTable(List tables, + List expSorted) { + assertEquals(expSorted.size(), tables.size()); + for (int i = 0; i < tables.size(); i++) { + assertEquals(expSorted.get(i), tables.get(i).getName()); + } + } + + @Test + public void testTableTags() { + assumeTrue("Skipping testTableTags() if not minicloud test", + cloudRunning); + + String tableName = "foo"; + String ddl = "create table " + tableName + "(" + + "sid integer, " + + "id integer, " + + "name string, " + + "age integer, " + + "primary key(shard(sid), id))"; + + TableLimits limits = TableLimits.builder() + .maxReadUnits(200) + .maxWriteUnits(200) + .maxStorageInGBs(2) + .build(); + + /* freeform tags */ + Map freeformTags = new HashMap<>(); + freeformTags.put("createBy", "OracleNosql"); + freeformTags.put("accountType", "IAMUser"); + + Map freeformTags1 = new HashMap<>(); + freeformTags1.put("createBy", "ANDC"); + freeformTags1.put("accountType", "testUser"); + + /* predefined tags */ + Map> definedTags = null; + definedTags = new HashMap<>(); + Map props = new HashMap<>(); + props.put(DEFINED_TAG_PROP, "definedTags"); + definedTags.put(DEFINED_TAG_NAMESPACE, props); + + /* Create table */ + createTable(tableName, ddl, limits, freeformTags, definedTags); + + /* Get table and check tags */ + GetTableResponse gtRes = getTable(tableName); + Table table = gtRes.getTable(); + assertEquals(freeformTags, table.getFreeformTags()); + assertDefinedTags(definedTags, table.getDefinedTags()); + + /* Update tags */ + props.put(DEFINED_TAG_PROP, "updatedKey"); + UpdateTableDetails utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .definedTags(definedTags) + .freeformTags(freeformTags1) + .build(); + UpdateTableRequest utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdl(utReq); + + gtRes = getTable(tableName); + table = gtRes.getTable(); + assertEquals(freeformTags1, table.getFreeformTags()); + assertDefinedTags(definedTags, table.getDefinedTags()); + + /* Remove freeformTags */ + utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .freeformTags(Collections.emptyMap()) + .definedTags(definedTags) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdl(utReq); + + gtRes = getTable(tableName); + table = gtRes.getTable(); + assertTrue(table.getFreeformTags().isEmpty()); + assertDefinedTags(definedTags, table.getDefinedTags()); + + /* Remove definedTags */ + utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .definedTags(Collections.emptyMap()) + .freeformTags(freeformTags) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdl(utReq); + + gtRes = getTable(tableName); + table = gtRes.getTable(); + assertDefinedTags(Collections.emptyMap(), table.getDefinedTags()); + assertEquals(freeformTags, table.getFreeformTags()); + + /* + * Invalid tags + */ + + /* The value of 'k1' in freeformTags is null */ + freeformTags1 = new HashMap<>(); + freeformTags1.put("k1", null); + utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .freeformTags(freeformTags1) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdlFail(utReq, 400, "InvalidParameter"); + + /* The tags of a namespace in definedTags is null */ + definedTags.put("andc", null); + utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .definedTags(definedTags) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdlFail(utReq, 400, "InvalidParameter"); + + /* The value of andc.k1 in definedTags is null */ + Map kvs = new HashMap<>(); + kvs.put("k1", null); + definedTags.put("andc", kvs); + utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .definedTags(definedTags) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdlFail(utReq, 400, "InvalidParameter"); + + /* + * The value of andc.k1 in definedTags is invalid type, valid type is + * INTEGER/STRING/BOOLEAN + */ + kvs.clear(); + kvs.put("k1", new ArrayList()); + definedTags.put("andc", kvs); + utInfo = UpdateTableDetails.builder() + .compartmentId(getCompartmentId()) + .definedTags(definedTags) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utInfo) + .build(); + executeDdlFail(utReq, 400, "InvalidParameter"); + } + + @Test + public void testChangeCompartment() throws Exception { + assumeTrue("Skipping testChangeCompartment() if not minicloud test", + cloudRunning); + + final String tableName = "changeCompartmentTest"; + final String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + + String origCompt = getCompartmentId(); + + /* Create table */ + createTable(tableName, ddl); + GetTableResponse gtRes = getTable(tableName); + String origETag = gtRes.getEtag(); + String tableOcid = gtRes.getTable().getId(); + + /* Move the table to newCompt */ + execChangeCompartment(origCompt, newCompartmentId, tableName); + + /* Get table using ocid */ + gtRes = getTable(tableOcid); + assertEquals(newCompartmentId, gtRes.getTable().getCompartmentId()); + + /* Get table using compartment + tableName */ + gtRes = getTable(newCompartmentId, tableName); + assertEquals(newCompartmentId, gtRes.getTable().getCompartmentId()); + + /* Find table with origCompt, should not found */ + try { + getTable(origCompt, tableName); + fail("Expect to get 404(TableNotFound) but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + checkErrorMessage(ex); + } + + /* Move the table back to origCompt */ + execChangeCompartment(newCompartmentId, origCompt, tableName); + + /* Get table using ocid */ + gtRes = getTable(tableOcid); + assertEquals(origCompt, gtRes.getTable().getCompartmentId()); + + /* Get table using compartment + tableName */ + gtRes = getTable(origCompt, tableName); + assertEquals(origCompt, gtRes.getTable().getCompartmentId()); + String currentETag = gtRes.getEtag(); + + /* + * Move table to newCompt with unmatched ETag, should fail with + * ETagMismatch error. + */ + execChangeCompartment(origCompt, newCompartmentId, tableName, origETag, + true /* expFail */, "ETagMismatch"); + + /* + * Move table to newCompt with matched ETag, should succeed + */ + execChangeCompartment(origCompt, newCompartmentId, tableName, + currentETag, false /* expFail */, null); + + /* + * Move table to newCompt again, it should have failed with + * 404(NotAuthorizedOrNotFound) + */ + execChangeCompartment(origCompt, newCompartmentId, tableName, + null /* matchETag */, true /* expFail */, + "NotAuthorizedOrNotFound"); + + /* Create table in origCompt */ + createTable(tableName, ddl); + /* + * Move table to newCompt, the table already existed in newCompt, + * it should have failed with TableAlreadyExists error + */ + execChangeCompartment(origCompt, newCompartmentId, tableName, + null /* matchETag */, true /* expFail */, + "TableAlreadyExists"); + + /* Drop the table in newCompt */ + dropTable(newCompartmentId, tableName, false /* ifExists */, + true /* wait */); + + /* Bad parameter, toCompartment can not be null or empty */ + execChangeCompartment(origCompt, "", tableName, null /* matchETag */, + true /* expFail */, "InvalidParameter"); + + /* Change table compartment using tableOcid: origCompt -> newCompt */ + tableOcid = getTableId(tableName); + execChangeCompartment(null, newCompartmentId, tableOcid); + + gtRes = getTable(tableOcid); + assertEquals(newCompartmentId, gtRes.getTable().getCompartmentId()); + } + + private void execChangeCompartment(String fromCompt, + String toCompt, + String tableNameOrId) { + execChangeCompartment(fromCompt, toCompt, tableNameOrId, null, + false, null); + } + + private void execChangeCompartment(String fromCompt, + String toCompt, + String tableNameOrId, + String matchETag, + boolean expFail, + String errorCode) { + ChangeTableCompartmentRequest req; + ChangeTableCompartmentDetails info; + + info = ChangeTableCompartmentDetails.builder() + .fromCompartmentId(fromCompt) + .toCompartmentId(toCompt) + .build(); + req = ChangeTableCompartmentRequest.builder() + .tableNameOrId(tableNameOrId) + .changeTableCompartmentDetails(info) + .ifMatch(matchETag) + .build(); + + if (expFail) { + executeDdlFail(req, errorCode); + } else { + executeDdl(req); + } + } + + @Test + public void testGetTableUsage() throws Exception { + assumeTrue("Skipping testGetTableUsage() if not minicloud test", + cloudRunning); + + final String tableName = "testUsage"; + final String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, " + + "primary key(id))"; + /* Create table */ + createTable(tableName, ddl); + + long startTime = System.currentTimeMillis(); + long millsInSlice0 = startTime % 60_000L; + int delayMsToNextSlice = (int)(60_000L - millsInSlice0); + /* Start put/get at slice0 + 3sec to avoid edge case */ + Thread.sleep(delayMsToNextSlice + 3000); + long slice0Start = startTime + delayMsToNextSlice; + + /* Put a row */ + Map row = new HashMap<>(); + row.put("id", 0); + row.put("name", "name0"); + putRow(tableName, row); + + /* Wait for another minute to slice1 */ + Thread.sleep(60_000); + + /* Get a row */ + getRow(tableName, Arrays.asList("id:0")); + /* + * Wait for another minute to make sure the usage data for slice0 and + * slice1 are collected and write to store. + */ + Thread.sleep(60_000); + + List results; + /* List all usages from slice0 */ + results = doListTableUsage(tableName, false /*isTableOcid */, + slice0Start, 0, 0, 0); + assertTrue(!results.isEmpty()); + assertUsages(results, 1, 1); + + /* List table usages with limit = 1 */ + results = doListTableUsage(tableName, false /*isTableOcid */, + slice0Start, 0, 1, 0); + assertTrue(!results.isEmpty()); + assertUsages(results, 1, 1); + + long endTime = System.currentTimeMillis(); + /* List table usages with time range and limit = 1 */ + results = doListTableUsage(tableName, false /*isTableOcid */, + slice0Start, endTime, 1, 0); + assertTrue(!results.isEmpty()); + assertUsages(results, 1, 1); + + /* List table usages with time <= endTime and limit = 1 */ + results = doListTableUsage(tableName, false /*isTableOcid */, 0, + endTime, 1, 4); + assertTrue(!results.isEmpty()); + assertUsages(results, 1, 1); + + /* List usages using tableOcid */ + final String tableOcid = getTableId(tableName); + results = doListTableUsage(tableOcid, true /*isTableOcid */, + slice0Start, 0, 0, 0); + assertTrue(!results.isEmpty()); + assertUsages(results, 1, 1); + } + + @Test + public void testTableNameMapping() + throws Exception { + + /* + * Run this test in minicloud only + * + * This test directly calls SC API to create table to test proxy cache, + * it can only be run in minicloud. + */ + assumeTrue("Skipping testTableNameMapping() if not minicloud test", + useMiniCloud); + + String tableName = "testTableNameMapping"; + String ddl = "create table " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + + /* drop non-existing table */ + dropTable(tableName); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* get table to cache mapping */ + getTable(tableName); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* get table */ + getTable(tableName); + + /* list table usage to cache mapping */ + ListTableUsageRequest.Builder builder = + ListTableUsageRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName); + client.listTableUsage(builder.build()); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* list table usage */ + builder = ListTableUsageRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName); + client.listTableUsage(builder.build()); + + /* alter table to cache mapping */ + String alterddl = "alter table " + tableName + "(add i1 integer)"; + UpdateTableDetails info = UpdateTableDetails.builder() + .ddlStatement(alterddl) + .compartmentId(getCompartmentId()) + .build(); + UpdateTableRequest utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdl(utReq); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* alter table */ + info = UpdateTableDetails.builder() + .ddlStatement(alterddl) + .compartmentId(getCompartmentId()) + .build(); + utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(info) + .build(); + executeDdl(utReq); + + /* drop table to cache mapping */ + DeleteTableRequest req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .isIfExists(true) + .build(); + executeDdl(req); + + /* re-create table */ + scRecreateTable(getTenantId(), getCompartmentId(), tableName, ddl); + + /* drop table */ + req = DeleteTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .isIfExists(true) + .build(); + executeDdl(req); + } + + @Test + public void testInvalidCompartmentId() { + assumeTrue("Skipping testInvalidCompartmentId() if not minicloud test", + cloudRunning); + + String tableName = "testInvalidCompartmentId"; + String ddl = "create table if not exists " + tableName + "(" + + "id integer, name String, age integer, " + + "primary key(id))"; + + /* Get table */ + try { + GetTableRequest gtr = GetTableRequest.builder() + .tableNameOrId(tableName) + .build(); + client.getTable(gtr); + fail("GetTable expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + assertTrue(ex.getMessage().contains("compartment id")); + } + + /* Alter table */ + try { + ddl = "alter table " + tableName + "(add i1 integer)"; + UpdateTableDetails utd = UpdateTableDetails.builder() + .ddlStatement(ddl) + .build(); + UpdateTableRequest utReq = UpdateTableRequest.builder() + .tableNameOrId(tableName) + .updateTableDetails(utd) + .build(); + client.updateTable(utReq); + fail("AlterTable expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + assertTrue(ex.getMessage().contains("compartment id")); + } + + /* List table usage */ + try { + ListTableUsageRequest.Builder builder = + ListTableUsageRequest.builder() + .tableNameOrId(tableName); + client.listTableUsage(builder.build()); + fail("ListTableUsage expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + assertTrue(ex.getMessage().contains("compartment id")); + } + + /* Delete a non-exists table using if exists */ + DeleteTableRequest dtr = DeleteTableRequest.builder() + .tableNameOrId("invalid") + .isIfExists(true) + .build(); + try { + client.deleteTable(dtr); + fail("ListTableUsage expect 404 but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + assertTrue(ex.getMessage().contains("compartment id")); + } + } + + /** + * Test tenant max auto scaling table count and limits mode max change per + * day. + */ + @Test + public void testAutoScalingTableLimits() { + assumeTrue("Skipping testAutoScalingTableLimits() if not minicloud " + + "or cloud test", cloudRunning); + + /* Create auto scaling table */ + String tableName = "testusers"; + String ddl = "create table if not exists testusers(" + + "id integer, name string, primary key(id))"; + TableLimits limits = TableLimits.builder() + .maxStorageInGBs(20) + .capacityMode(CapacityMode.OnDemand) + .build(); + CreateTableDetails info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limits) + .build(); + CreateTableRequest req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdl(req); + + GetTableRequest getReq = GetTableRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .build(); + GetTableResponse getRes = client.getTable(getReq); + validateTable(getRes.getTable(), tableName, limits); + + /* + * Create 2 more auto scaling tables. + */ + createTestTable("tableName1", limits, null, null); + createTestTable("tableName2", limits, null, null); + + /* + * Cannot create more than 3 auto scaling tables. + */ + ddl = "create table if not exists tableName3(" + + "id integer, name string, primary key(id))"; + info = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name("tableName3") + .ddlStatement(ddl) + .tableLimits(limits) + .build(); + req = CreateTableRequest.builder() + .createTableDetails(info) + .build(); + executeDdlFail(req, "TableLimitExceeded"); + + /* + * Alter the table limits mode from AUTO_SCALING to PROVISIONED + */ + limits = TableLimits.builder() + .maxReadUnits(10) + .maxWriteUnits(20) + .maxStorageInGBs(30) + .capacityMode(CapacityMode.Provisioned) + .build(); + updateTable(tableName, limits); + getRes = client.getTable(getReq); + validateTable(getRes.getTable(), tableName, limits); + + if (tenantLimits != null) { + limits = null; + if (tenantLimits.getBillingModeChangeRate() == 2) { + /* + * Alter the table limits mode from PROVISIONED to AUTO_SCALING + */ + limits = TableLimits.builder() + .maxStorageInGBs(50) + .capacityMode(CapacityMode.OnDemand) + .build(); + updateTable(tableName, limits); + getRes = client.getTable(getReq); + validateTable(getRes.getTable(), tableName, limits); + + /* + * Cannot change the limits mode any more after reaching mode max + * allowed changes per day. + */ + limits = TableLimits.builder() + .maxReadUnits(10) + .maxWriteUnits(20) + .maxStorageInGBs(30) + .capacityMode(CapacityMode.Provisioned) + .build(); + } else if (tenantLimits.getBillingModeChangeRate() == 1) { + limits = TableLimits.builder() + .maxStorageInGBs(50) + .capacityMode(CapacityMode.OnDemand) + .build(); + } + + if (limits != null) { + UpdateTableRequest updateReq = + buildUpdateTableRequest(tableName, limits); + executeDdlFail(updateReq, "OperationRateLimitExceeded"); + } + } + } + + @Test + public void testRetryToken() { + /* + * TODO: NOSQL-718 + * Enable this in cloud test after fix it + */ + assumeTrue("Skipping testRetryToken() if not minicloud test", + useMiniCloud); + + final String tableName = "testRetryToken"; + final String indexName = "idxName"; + final String ddl = "create table " + tableName + + "(id integer, name string, primary key(id))"; + final TableLimits limits = TableLimits.builder() + .maxReadUnits(50) + .maxWriteUnits(50) + .maxStorageInGBs(1) + .build(); + + final long now = System.currentTimeMillis(); + final String createTableToken = "token_create_table_" + now; + final String createIndexToken = "token_create_index_" + now; + + String origRequestId; + String workRequestId; + + /* + * Create table + */ + CreateTableRequest createTableReq; + CreateTableDetails createTableInfo; + + /* Create table with a retry token */ + createTableInfo = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limits) + .build(); + createTableReq = CreateTableRequest.builder() + .createTableDetails(createTableInfo) + .opcRetryToken(createTableToken) + .build(); + origRequestId = executeDdl(createTableReq); + /* + * Create table with retry token again, should get the original request + * id. + */ + workRequestId = executeDdl(createTableReq, false); + assertEquals(origRequestId, workRequestId); + + /* + * The request does not match the original request, should get + * 409(InvalidatedRetryToken) error. + */ + Map tags = new HashMap<>(); + tags.put("production", "NDCS"); + createTableInfo = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limits) + .freeformTags(tags) + .build(); + createTableReq = CreateTableRequest.builder() + .createTableDetails(createTableInfo) + .opcRetryToken(createTableToken) + .build(); + executeDdlFail(createTableReq, 409, "InvalidatedRetryToken"); + + /* + * Create index + */ + CreateIndexRequest createIndexReq; + CreateIndexDetails createIndexInfo; + + List keys = new ArrayList(); + keys.add(IndexKey.builder().columnName("name").build()); + createIndexInfo = CreateIndexDetails.builder() + .name(indexName) + .compartmentId(getCompartmentId()) + .keys(keys) + .build(); + createIndexReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(createIndexInfo) + .opcRetryToken(createIndexToken) + .build(); + origRequestId = executeDdl(createIndexReq); + /* + * Create index with retry token again, should get the original request + * id. + */ + workRequestId = executeDdl(createIndexReq, false); + assertEquals(origRequestId, workRequestId); + + /* + * Creating another index with the retry token used for previous index + * should get 409(InvalidatedRetryToken). + */ + keys.clear(); + keys.add(IndexKey.builder().columnName("name").build()); + keys.add(IndexKey.builder().columnName("id").build()); + createIndexInfo = CreateIndexDetails.builder() + .name("idxNameId") + .compartmentId(getCompartmentId()) + .keys(keys) + .build(); + createIndexReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(createIndexInfo) + .opcRetryToken(createIndexToken) + .build(); + executeDdlFail(createIndexReq, 409, "InvalidatedRetryToken"); + + /* + * Create index with the retry token of create-table operation, it + * should get 409(InvalidatedRetryToken). + */ + createIndexReq = CreateIndexRequest.builder() + .tableNameOrId(tableName) + .createIndexDetails(createIndexInfo) + .opcRetryToken(createTableToken) + .build(); + executeDdlFail(createIndexReq, 409, "InvalidatedRetryToken"); + + /* Invalid retry token */ + String invalidRetryToken1 = "abc!"; + createTableInfo = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limits) + .build(); + createTableReq = CreateTableRequest.builder() + .createTableDetails(createTableInfo) + .opcRetryToken(invalidRetryToken1) + .build(); + executeDdlFail(createTableReq, 400, "InvalidParameter"); + + StringBuilder sb = new StringBuilder(65); + for (int i = 0; i < 65; i++) { + sb.append("A"); + } + String invalidRetryToken2 = sb.toString(); + createTableReq = CreateTableRequest.builder() + .createTableDetails(createTableInfo) + .opcRetryToken(invalidRetryToken2) + .build(); + executeDdlFail(createTableReq, 400, "InvalidParameter"); + } + + @Test + public void testUpdateTableWithCreateDdl() { + String tableName = "foo"; + String ddl = "create table " + tableName + "(" + + "id integer, " + + "name string, " + + "primary key(id))"; + TableLimits limits = TableLimits.builder() + .maxReadUnits(10) + .maxWriteUnits(10) + .maxStorageInGBs(1) + .build(); + createTable(tableName, ddl, limits); + + GetTableResponse gtRes; + String[] pkeys = new String[] {"id"}; + Map columns = new HashMap(); + columns.put("id", "INTEGER"); + columns.put("name", "STRING"); + + /* + * ALTER TABLE foo(ADD info JSON) + */ + ddl = "create table " + tableName + "(" + + "id integer, " + + "name string, " + + "info json, " + + "primary key(id))"; + alterTable(tableName, ddl); + + columns.put("info", "JSON"); + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, columns, pkeys, null, limits, + (cloudRunning ? ddl : null), -1 /* ttl */); + + /* + * ALTER TABLE foo(MODIFY id GENERATED ALWAYS AS IDENTITY) + */ + ddl = "create table " + tableName + "(" + + "id integer generated always as identity, " + + "name string, " + + "info json, " + + "primary key(id))"; + alterTable(tableName, ddl); + + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, columns, pkeys, null, limits, + (cloudRunning ? ddl : null), -1 /* ttl */); + checkIdenity(gtRes.getTable().getSchema().getIdentity(), + "id", true /* isAlways */, false /* isOnNull */); + + /* + * ALTER TABLE foo(ADD uid0 STRING AS UUID, + * DROP name, + * DROP info, + * MODIFY id GENERATED ALWAYS AS IDENTITY + * (START WITH 2 INCREMENT BY 2 MAXVALUE 100 + * CACHE 10 CYCLE)) + */ + ddl = "create table " + tableName + "(" + + "id integer generated always as identity(" + + "start with 2 increment by 2 maxvalue 100 cache 10 cycle), " + + "uid0 string as UUID, " + + "primary key(id))"; + alterTable(tableName, ddl); + + columns.remove("name"); + columns.remove("info"); + columns.put("uid0", "STRING_UUID"); + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, columns, pkeys, null, limits, + (cloudRunning ? ddl : null), -1 /* ttl */); + checkIdenity(gtRes.getTable().getSchema().getIdentity(), + "id", true /* isAlways */, false /* isOnNull */); + + + /* + * ALTER TABLE foo(ADD uid1 STRING AS UUID GENERATED BY DEFAULT, + * ADD address RECORD( + * line STRING, city STRING, zipcode STRING), + * DROP uid0, + * MODIFY id DROP IDENTITY) + */ + ddl = "create table " + tableName + "(" + + "id integer, " + + "uid1 string as UUID GENERATED BY DEFAULT, " + + "address record(line string, city string, zipcode string), " + + "primary key(id))"; + alterTable(tableName, ddl); + + columns.remove("uid0"); + columns.put("uid1", "STRING_UUID_GENERATED"); + columns.put("address", "RECORD(line STRING, city STRING, zipcode STRING)"); + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, columns, pkeys, null, limits, + (cloudRunning ? ddl : null), -1 /* ttl */); + assertNull(gtRes.getTable().getSchema().getIdentity()); + + /* + * ALTER TABLE foo(ADD address.lines ARRAY(STRING), + * ADD address.country STRING, + * DROP address.line) + */ + ddl = "create table " + tableName + "(" + + "id integer, " + + "uid1 string as UUID GENERATED BY DEFAULT, " + + "address record(city string, zipcode string, " + + "lines array(string), country string), " + + "primary key(id))"; + alterTable(tableName, ddl); + + columns.put("address", + "RECORD(city STRING, zipcode STRING, " + + "lines ARRAY(STRING), country STRING)"); + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, columns, pkeys, null, limits, + (cloudRunning ? ddl : null), -1 /* ttl */); + + /* + * ALTER TABLE foo USING TTL 30 DAYS + */ + String ddlWithTtl = ddl + " using TTL 30 days"; + alterTable(tableName, ddlWithTtl); + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, columns, pkeys, null, limits, + (cloudRunning ? ddlWithTtl : null), 30 /* ttl */); + + /* + * ALTER TABLE foo USING TTL 90 DAYS + */ + ddlWithTtl = ddl + " using TTL 90 days"; + alterTable(tableName, ddlWithTtl); + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, columns, pkeys, null, limits, + (cloudRunning ? ddlWithTtl : null), 90 /* ttl */); + + /* + * ALTER TABLE foo USING TTL 0 DAYS + */ + alterTable(tableName, ddl); + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, columns, pkeys, null, limits, + (cloudRunning ? ddl : null), 0 /* ttl */); + + /* + * Using semantical equivalent ddl, update-table does nothing and + * the ddl statement of table should be changed. + */ + ddl = "create table " + tableName + "(" + + "id integer, " + + "uid1 string as UUID GENERATED BY DEFAULT, " + + "address record(city string, zipcode string, " + + "lines array(string), country string), " + + "primary key(id))"; + alterTable(tableName, ddl); + gtRes = getTable(tableName); + validateTable(gtRes.getTable(), tableName, columns, pkeys, null, limits, + (cloudRunning ? ddl : null), 0 /* ttl */); + + /* + * Bad requests + */ + + /* + * Can't modify primary key fields + */ + ddl = "create table if not exists " + tableName + "(" + + "id integer, " + + "name string, " + + "primary key(id, name))"; + UpdateTableRequest req = buildUpdateTableRequest(tableName, ddl); + executeDdlFail(req, 400, + (cloudRunning ? "IllegalArgument" : "InvalidParameter")); + + /* + * Multiple alter table operations are needed to evolve to target table, + * only single alter table is supported at this time. + */ + ddl = "create table if not exists " + tableName + "(" + + "id integer, " + + "name string, " + + "age integer, " + + "primary key(id)) using ttl 10 days"; + req = buildUpdateTableRequest(tableName, ddl); + executeDdlFail(req, 400, + (cloudRunning ? "IllegalArgument" : "InvalidParameter")); + + /* The table name 'foo' does not match the table name in statement */ + req = buildUpdateTableRequest("foo", ddl); + executeDdlFail(req, 400, + (cloudRunning ? "IllegalArgument" : "InvalidParameter")); + + /* Table not found */ + ddl = "create table foo1(id integer, s string, primary key(id))"; + req = buildUpdateTableRequest("foo1", ddl); + executeDdlFail(req, 404, + (cloudRunning ? "NotAuthorizedOrNotFound" : "TableNotFound")); + } + + private List doListTableUsage(String tableNameOrId, + boolean isTableOcid, + long startTime, + long endTime, + int limit, + int maxEntries) { + ListTableUsageRequest req; + ListTableUsageResponse res; + + ListTableUsageRequest.Builder builder = ListTableUsageRequest.builder() + .tableNameOrId(tableNameOrId); + if (!isTableOcid) { + builder.compartmentId(getCompartmentId()); + } + if (startTime > 0) { + builder.timeStart(new Date(startTime)); + } + if (endTime > 0) { + builder.timeEnd(new Date(endTime)); + } + if (limit > 0) { + builder.limit(limit); + } + + String nextPage = null; + List results = new ArrayList<>(); + List usages; + do { + req = builder.page(nextPage).build(); + res = client.listTableUsage(req); + nextPage = res.getOpcNextPage(); + usages = res.getTableUsageCollection().getItems(); + if (!usages.isEmpty()) { + results.addAll(res.getTableUsageCollection().getItems()); + } + if (limit > 0 && nextPage != null) { + assertEquals(limit, usages.size()); + } + if (maxEntries > 0 && results.size() >= maxEntries) { + break; + } + } while (nextPage != null); + return results; + } + + private void assertUsages(List results, + int expWriteUnits, + int expReadUnits) { + int writeUnits = 0; + int readUnits = 0; + for (TableUsageSummary tus : results) { + + assertNotNull(tus.getWriteUnits()); + assertNotNull(tus.getReadUnits()); + assertNotNull(tus.getReadThrottleCount()); + assertNotNull(tus.getWriteThrottleCount()); + assertNotNull(tus.getStorageThrottleCount()); + assertNotNull(tus.getStorageInGBs()); + assertNotNull(tus.getMaxShardSizeUsageInPercent()); + + if (tus.getWriteUnits() > 0) { + writeUnits += tus.getWriteUnits(); + } + if (tus.getReadUnits() > 0) { + readUnits += tus.getReadUnits(); + } + } + assertEquals(expWriteUnits, writeUnits); + assertEquals(expReadUnits, readUnits); + } + + private void createTestTable(String tableName, + TableLimits limits, + Map freeformTags, + Map> definedTags) { + String ddl = "create table if not exists " + tableName + "(" + + "id integer, " + + "name string, " + + "age integer, " + + "primary key(id))"; + + /* Create table */ + CreateTableDetails ctInfo = CreateTableDetails.builder() + .compartmentId(getCompartmentId()) + .name(tableName) + .ddlStatement(ddl) + .tableLimits(limits) + .freeformTags(freeformTags) + .definedTags(definedTags) + .build(); + CreateTableRequest ctReq = CreateTableRequest.builder() + .createTableDetails(ctInfo) + .build(); + executeDdl(ctReq); + } + + private void validateTable(Table table, + String tableName, + TableLimits limits) { + validateTable(table, tableName, null /* columns */, + null /* primaryKeys */, null /* shardKeys */, + limits, null /* tableDdl */, -1 /* ttlInDays */); + } + + private void validateTable(Table table, + String tableName, + Map columns, + String[] primaryKeys, + String[] shardKeys, + TableLimits limits, + String tableDdl, + int ttlInDays) { + assertNotNull(table); + + assertTableOcid(table.getId()); + + assertEquals(getCompartmentId(), table.getCompartmentId()); + assertEquals(tableName, table.getName()); + assertNotNull(table.getTimeCreated()); + assertNotNull(table.getDdlStatement()); + + Schema schema = table.getSchema(); + assertNotNull(schema); + + if (columns != null) { + List cols = schema.getColumns(); + assertEquals(columns.size(), cols.size()); + for (Column col : cols) { + String name = col.getName(); + assertTrue(columns.containsKey(name)); + String type = columns.get(name).toUpperCase(); + if (type.startsWith("STRING")) { + assertEquals(type.contains("UUID"), col.getIsAsUuid()); + assertEquals(type.contains("GENERATED"), + col.getIsGenerated()); + type = "STRING"; + } + assertTrue(type.equalsIgnoreCase(col.getType())); + + if (schema.getPrimaryKey().contains(name)) { + assertFalse(col.getIsNullable()); + } else { + assertTrue(col.getIsNullable()); + } + } + } + + if (primaryKeys != null) { + assertEquals(primaryKeys.length, schema.getPrimaryKey().size()); + int i = 0; + for (String key : schema.getPrimaryKey()) { + assertTrue(key.equalsIgnoreCase(primaryKeys[i++])); + } + + String[] skeys = (shardKeys != null) ? shardKeys : primaryKeys; + assertEquals(skeys.length, schema.getShardKey().size()); + i = 0; + for (String key : schema.getShardKey()) { + assertTrue(key.equalsIgnoreCase(skeys[i++])); + } + } + + if (limits != null) { + TableLimits resLimits = table.getTableLimits(); + assertEquals(limits.getMaxStorageInGBs(), + resLimits.getMaxStorageInGBs()); + if (limits.getCapacityMode() == null || + limits.getCapacityMode() == CapacityMode.Provisioned) { + assertEquals(CapacityMode.Provisioned, + resLimits.getCapacityMode()); + assertEquals(limits.getMaxReadUnits(), + resLimits.getMaxReadUnits()); + assertEquals(limits.getMaxWriteUnits(), + resLimits.getMaxWriteUnits()); + } else { + assertEquals(CapacityMode.OnDemand, + resLimits.getCapacityMode()); + } + } + + if (tableDdl != null) { + assertEquals(tableDdl, table.getDdlStatement()); + } + + Integer ttl = table.getSchema().getTtl(); + if (ttlInDays >= 0) { + assertNotNull(ttl); + assertEquals(ttlInDays, ttl.intValue()); + } else { + assertNull(ttl); + } + + /* TODO: more validation */ + } + + private void checkIdenity(Identity identity, + String colName, + boolean isAlways, + boolean isOnNull) { + if (checkKVVersion(22, 2, 7)) { + assertNotNull(identity); + assertEquals(colName, identity.getColumnName()); + assertEquals(isAlways, identity.getIsAlways()); + assertEquals(isOnNull, identity.getIsNull()); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ThrottleLimitTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ThrottleLimitTest.java new file mode 100644 index 00000000..70012e77 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/ThrottleLimitTest.java @@ -0,0 +1,899 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.model.PreparedStatement; +import com.oracle.bmc.nosql.model.QueryDetails; +import com.oracle.bmc.nosql.model.TableLimits; +import com.oracle.bmc.nosql.model.TableLimits.CapacityMode; +import com.oracle.bmc.nosql.model.UpdateRowDetails; +import com.oracle.bmc.nosql.requests.CreateIndexRequest; +import com.oracle.bmc.nosql.requests.CreateTableRequest; +import com.oracle.bmc.nosql.requests.GetRowRequest; +import com.oracle.bmc.nosql.requests.GetWorkRequestRequest; +import com.oracle.bmc.nosql.requests.ListWorkRequestsRequest; +import com.oracle.bmc.nosql.requests.PrepareStatementRequest; +import com.oracle.bmc.nosql.requests.QueryRequest; +import com.oracle.bmc.nosql.requests.UpdateRowRequest; +import com.oracle.bmc.nosql.requests.UpdateTableRequest; +import com.oracle.bmc.nosql.responses.GetRowResponse; +import com.oracle.bmc.nosql.responses.PrepareStatementResponse; +import com.oracle.bmc.nosql.responses.QueryResponse; +import com.oracle.bmc.nosql.responses.UpdateRowResponse; +import com.oracle.bmc.retrier.DefaultRetryCondition; +import com.oracle.bmc.retrier.RetryConfiguration; + +import oracle.nosql.proxy.ProxyTestBase; + +import org.junit.Test; + +/** + * Throttling and limits test: + * o Read/write throttling + * o Size limit + * 1. Key/value size + * 2. Query size + * 3. Request size + * o Ddl limits + * 1. number of columns + * 2. number of tables + * 3. number of indexes + * 4. number of evolution + * 5. table provisioning limits + * 6. operation limits (TODO) + */ +public class ThrottleLimitTest extends RestAPITestBase { + + /* Test read/write throttling */ + @Test + public void testReadWriteThrottling() throws Exception { + + final RetryConfiguration retryConfiguration = + RetryConfiguration.builder() + .retryCondition(new TestRetryCondition()) + .build(); + + /* Create a table with small throughput */ + final String tableName = "testThrottle"; + String ddl = "create table if not exists testThrottle (" + + "id integer, name String, age integer, " + + "primary key(id))"; + + TableLimits limits = TableLimits.builder() + .maxReadUnits(1) + .maxWriteUnits(1) + .maxStorageInGBs(50) + .build(); + + createTable(tableName, ddl, limits); + + /* PUT */ + Map value = new HashMap(); + value.put("id", 1); + value.put("name", "jack"); + value.put("age", 21); + UpdateRowDetails row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + UpdateRowRequest putReq = UpdateRowRequest.builder() + .tableNameOrId(tableName) + .retryConfiguration(retryConfiguration) + .updateRowDetails(row) + .build(); + int num = 0; + try { + while (true) { + client.updateRow(putReq); + num++; + if (num > 1000) { + fail("Throttling exception should have been thrown"); + } + } + } catch (BmcException ex) { + /* success */ + assertError(ex, 429, "TooManyRequests"); + checkErrorMessage(ex); + } + + /* GET */ + List key = new ArrayList(); + key.add("id:1"); + num = 0; + try { + while (true) { + GetRowRequest getReq = GetRowRequest.builder() + .compartmentId(getCompartmentId()) + .tableNameOrId(tableName) + .retryConfiguration(retryConfiguration) + .key(key) + .build(); + GetRowResponse getRes = client.getRow(getReq); + assertNotNull(getRes.getRow()); + num++; + if (num > 1000) { + fail("Throttling exception should have been thrown"); + } + } + } catch (BmcException ex) { + /* success */ + assertError(ex, 429, "TooManyRequests"); + } + + Thread.sleep(2000); /* try to avoid previous throttling */ + + /* Query based on single partition scanning */ + String query = "select * from testThrottle where id = 1"; + PrepareStatementRequest prepReq = + PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + PrepareStatementResponse prepRes = client.prepareStatement(prepReq); + assertTrue("Prepare statement failed", + prepRes.getPreparedStatement() != null); + + /* Query with size limit */ + QueryDetails queryDetails = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(prepRes.getPreparedStatement().getStatement()) + .isPrepared(true) + .maxReadInKBs(3) + .build(); + QueryRequest queryReq = QueryRequest.builder() + .queryDetails(queryDetails) + .retryConfiguration(retryConfiguration) + .build(); + num = 0; + try { + while (true) { + /* Query */ + QueryResponse queryRes = client.query(queryReq); + assertEquals(1, queryRes.getQueryResultCollection() + .getItems().size()); + num++; + if (num > 1000) { + fail("Throttling exception should have been thrown"); + } + } + } catch (BmcException ex) { + /* success */ + assertError(ex, 429, "TooManyRequests"); + checkErrorMessage(ex); + } + + /* Alter table limit to increase write limit */ + limits = TableLimits.builder() + .maxReadUnits(1) + .maxWriteUnits(200) + .maxStorageInGBs(50) + .build(); + updateTable(tableName, limits); + + /* Put 200 rows */ + UpdateRowResponse putRes; + for (int i = 0; i < 200; i++) { + value.put("id", 100 + i); + row = UpdateRowDetails.builder() + .compartmentId(getCompartmentId()) + .value(value) + .build(); + putReq = UpdateRowRequest.builder() + .updateRowDetails(row) + .tableNameOrId(tableName) + .build(); + putRes = client.updateRow(putReq); + assertNotNull(putRes.getUpdateRowResult().getVersion()); + } + + /* Query based on all partitions scanning */ + Thread.sleep(2000); /* try to avoid previous throttling */ + query = "select * from testThrottle"; + prepReq = PrepareStatementRequest.builder() + .statement(query) + .compartmentId(getCompartmentId()) + .build(); + prepRes = client.prepareStatement(prepReq); + assertTrue("Prepare statement failed", + prepRes.getPreparedStatement() != null); + + queryDetails = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(prepRes.getPreparedStatement().getStatement()) + .isPrepared(true) + .maxReadInKBs(20) + .build(); + queryReq = QueryRequest.builder() + .queryDetails(queryDetails) + .retryConfiguration(retryConfiguration) + .build(); + QueryResponse queryRes; + num = 0; + try { + while (true) { + queryRes = client.query(queryReq); + num++; + if (queryRes.getOpcNextPage() == null) { + fail("Throttling exception should have been thrown"); + break; + } + queryReq = QueryRequest.builder() + .queryDetails(queryDetails) + .page(queryRes.getOpcNextPage()) + .build(); + } + } catch (BmcException ex) { + /* success */ + assertError(ex, 429, "TooManyRequests"); + checkErrorMessage(ex); + } + assertTrue(num > 0); + + /* Query without limits */ + Thread.sleep(1000); + num = 0; + queryDetails = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(prepRes.getPreparedStatement().getStatement()) + .isPrepared(true) + .build(); + queryReq = QueryRequest.builder() + .queryDetails(queryDetails) + .retryConfiguration(retryConfiguration) + .build(); + try { + while (true) { + /* Query */ + queryRes = client.query(queryReq); + assertTrue(queryRes.getQueryResultCollection() + .getItems().size() > 0); + num++; + if (num > 1000) { + fail("Throttling exception should have been thrown"); + } + } + } catch (BmcException ex) { + /* success */ + assertError(ex, 429, "TooManyRequests"); + checkErrorMessage(ex); + } + } + + @Test + public void testKeyValueSizeLimit() { + final int keySizeLimit = 64; + final int valueSizeLimit = 512 * 1024; + + final String tableName = "foo"; + final String ddl = "create table if not exists " + tableName + "(" + + "id string, name String, " + + "primary key(id))"; + createTable(tableName, ddl); + + Map row = new HashMap<>(); + row.put("id", genString(keySizeLimit)); + row.put("name", genString(valueSizeLimit - 5));/* 5 bytes for overhead */ + putRow("foo", row); + + row.put("id", genString(keySizeLimit + 1)); + try { + putRow("foo", row); + fail("Expect to get 400(KeySizeLimitExceeded) but not"); + } catch (BmcException ex) { + assertError(ex, 400, "KeySizeLimitExceeded"); + checkErrorMessage(ex); + } + + row.put("id", genString(1)); + row.put("name", genString(valueSizeLimit - 4)); + try { + putRow("foo", row); + fail("Expect to get 400(RowSizeLimitExceeded) but not"); + } catch (BmcException ex) { + assertError(ex, 400, "RowSizeLimitExceeded"); + checkErrorMessage(ex); + } + } + + @Test + public void testInsertKeyValueSize() { + final String tableName = "testSize"; + String ddl = "create table if not exists " + tableName + "(" + + "sid string, id string, s String, " + + "primary key(shard(sid), id))"; + createTable(tableName, ddl, + TableLimits.builder() + .maxReadUnits(100) + .maxWriteUnits(10000) + .maxStorageInGBs(1) + .capacityMode(CapacityMode.Provisioned) + .build()); + + PrepareStatementRequest prep; + PrepareStatementResponse pret; + PreparedStatement pstmt; + + QueryDetails qd; + QueryRequest query; + QueryResponse qret; + + final String s32 = genString(32); + final String s512K = genString(512 * 1024 - 5); /* 5 - overhead */ + + String fmt = "insert into " + tableName + + " (sid, id, s) values(\"%s\", \"%s\", \"%s\")"; + String insert = String.format(fmt, s32, s32, s32); + + prep = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(insert) + .build(); + pret = client.prepareStatement(prep); + + qd = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(insert) + .build(); + query = QueryRequest.builder() + .queryDetails(qd) + .build(); + qret = client.query(query); + assertEquals(1, qret.getQueryResultCollection().getItems().size()); + + /* Key size exceeded limit */ + insert = String.format(fmt, s32 + "a", s32, s32); + prep = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(insert) + .build(); + try { + pret = client.prepareStatement(prep); + } catch (BmcException ex) { + assertError(ex, 400, "KeySizeLimitExceeded"); + checkErrorMessage(ex); + } + + insert = String.format(fmt, s32, s32 + "a", s32); + qd = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(insert) + .build(); + query = QueryRequest.builder() + .queryDetails(qd) + .build(); + try { + qret = client.query(query); + } catch (BmcException ex) { + assertError(ex, 400, "KeySizeLimitExceeded"); + checkErrorMessage(ex); + } + + insert = "declare $sid string; $id string; $s string; insert into " + + tableName + " values($sid, $id, $s)"; + prep = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(insert) + .build(); + pret = client.prepareStatement(prep); + pstmt = pret.getPreparedStatement(); + assertNotNull(pstmt.getStatement()); + + /* Key size exceeded limit */ + Map values = new HashMap<>(); + values.put("$sid", s32); + values.put("$id", s32); + values.put("$s", s512K); + + qd = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .isPrepared(true) + .statement(pstmt.getStatement()) + .variables(values) + .build(); + query = QueryRequest.builder() + .queryDetails(qd) + .build(); + qret = client.query(query); + assertEquals(1, qret.getQueryResultCollection().getItems().size()); + + values.clear(); + values.put("$sid", s32 + "a"); + values.put("$id", s32); + values.put("$s", s512K); + + /* Key size exceeded limit */ + try { + qret = client.query(query); + } catch (BmcException ex) { + assertError(ex, 400, "KeySizeLimitExceeded"); + checkErrorMessage(ex); + } + + values.clear(); + values.put("$sid", s32); + values.put("$id", s32); + values.put("$s", s512K + "a"); + /* Value size exceeded limit */ + try { + qret = client.query(query); + } catch (BmcException ex) { + assertError(ex, 400, "RowSizeLimitExceeded"); + checkErrorMessage(ex); + } + } + + @Test + public void testQuerySize() { + final int querySizeLimit = 10 * 1024; + final String tableName = "foo"; + final String ddl = "create table if not exists " + tableName + "(" + + "id string, name String, " + + "primary key(id))"; + createTable(tableName, ddl); + + /* Test edge size: query string length = 10240 */ + StringBuilder sb = new StringBuilder(querySizeLimit); + sb.append("select * from foo where name = \""); + int maxStrLen = (querySizeLimit - sb.length() - 1); + sb.append(genString(maxStrLen)); + sb.append("\""); + String query = sb.toString(); + QueryDetails info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + QueryRequest queryReq = QueryRequest.builder() + .queryDetails(info) + .build(); + client.query(queryReq); + + /* query string length = 10241 */ + sb.append("select * from foo where name = \""); + sb.append(genString(maxStrLen + 1)); + sb.append("\""); + query = sb.toString(); + info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + queryReq = QueryRequest.builder() + .queryDetails(info) + .build(); + try { + client.query(queryReq); + fail("Expect to get 400 (InvalidParameter, " + + "Query statement too long) but not"); + } catch (BmcException ex) { + assertError(ex, 400, "InvalidParameter"); + checkErrorMessage(ex); + } + } + + @Test + public void testRequestSize() { + assumeTrue("Skipping testLimitTables if not minicloud or cloud test" + + " or tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + final int requestSizeLimit = tenantLimits.getStandardTableLimits(). + getRequestSizeLimit(); + final String tableName = "testRequestSize"; + + String ddl = "create table " + tableName + + "(id integer, name string, primary key(id))"; + createTable(tableName, ddl); + + String query = "declare $id integer; $name string;" + + "insert into " + tableName + " values($id, $name)"; + + PrepareStatementRequest prepReq = PrepareStatementRequest.builder() + .compartmentId(getCompartmentId()) + .statement(query) + .build(); + PrepareStatementResponse prepRes = client.prepareStatement(prepReq); + PreparedStatement pstmt = prepRes.getPreparedStatement(); + + Map variables = new HashMap<>(); + variables.put("$id", 1); + variables.put("$name", genString(requestSizeLimit)); + + QueryDetails info = QueryDetails.builder() + .compartmentId(getCompartmentId()) + .statement(pstmt.getStatement()) + .isPrepared(true) + .variables(variables) + .build(); + + QueryRequest qryReq; + try { + qryReq = QueryRequest.builder() + .queryDetails(info) + .build(); + + client.query(qryReq); + fail("Expect to get 400 (RequestSizeLimitExceeded, " + + "Query statement too long) but not"); + } catch (BmcException ex) { + assertError(ex, 400, "RequestSizeLimitExceeded"); + checkErrorMessage(ex); + } + } + + @Test + public void testColumnNumLimit() { + assumeTrue("Skipping testLimitTables if not minicloud or cloud test" + + " or tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + final int numFields = tenantLimits.getStandardTableLimits(). + getColumnsPerTable(); + + String tableName = "testColumnNumLimit"; + String ddl = makeCreateTableDdl(tableName, numFields); + createTable(tableName, ddl); + + ddl = "alter table " + tableName + "(add a1 integer)"; + UpdateTableRequest utReq = buildUpdateTableRequest(tableName, ddl); + executeDdlFail(utReq, "IllegalArgument"); + + tableName = "testColumnLimitBad"; + ddl = makeCreateTableDdl(tableName, numFields + 1); + createTableFail(tableName, ddl, defaultLimits, "IllegalArgument"); + } + + @Test + public void testTableNumLimit() { + assumeTrue("Skipping testTableNumLimit if not minicloud or cloud test" + + " or tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + + final int numTables = tenantLimits.getNumTables(); + if (numTables > NUM_TABLES) { + /* + * To prevent this test from running too long, skip the test if the + * table number limit > ProxyTestBase.NUM_TABLES + */ + return; + } + + for (int i = 0; i < numTables; i++) { + String tableName = "testTableNumLimit" + i; + String ddl = makeCreateTableDdl(tableName, 1); + createTable(tableName, ddl); + } + + String tableName = "testTableNumLimit" + numTables; + String ddl = makeCreateTableDdl(tableName, 1); + createTableFail(tableName, ddl, defaultLimits, "TableLimitExceeded"); + } + + @Test + public void testIndexNumLimit() { + assumeTrue("Skipping testIndexNumLimit if not minicloud or cloud test" + + " or tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + final String tableName = "testIndexNumLimit"; + final int numIndexesPerTable = tenantLimits.getStandardTableLimits(). + getIndexesPerTable(); + + final String ddl = makeCreateTableDdl(tableName, numIndexesPerTable + 1); + createTable(tableName, ddl); + + for (int i = 0; i < numIndexesPerTable; i++) { + String indexName = "idx" + i; + createIndex(tableName, indexName, new String[]{makeFieldName(i)}); + } + + createIndexFail(tableName, "idxBad", makeFieldName(numIndexesPerTable), + "IndexLimitExceeded"); + } + + @Test + public void testEvolutionNumLimit() { + assumeTrue("Skipping testEvolutionNumLimit if not minicloud or cloud test " + + "tenantLimits is not provided", + cloudRunning && tenantLimits != null); + + final int numEvolutions = tenantLimits.getStandardTableLimits(). + getSchemaEvolutions(); + if (numEvolutions > ProxyTestBase.NUM_SCHEMA_EVOLUTIONS) { + /* + * To prevent this test from running too long, skip the test if the + * table evolution times limit > ProxyTestBase.NUM_SCHEMA_EVOLUTIONS + */ + return; + } + + final String tableName = "testEvolutionNumLimit"; + String ddl = makeCreateTableDdl(tableName, 0); + createTable(tableName, ddl); + + for (int i = 0; i < numEvolutions; i++) { + ddl = makeAlterTableDdl(tableName, i); + alterTable(tableName, ddl); + } + + ddl = makeAlterTableDdl(tableName, numEvolutions); + alterTableFail(tableName, ddl, "EvolutionLimitExceeded"); + } + + /** + * Tests limits on total size and throughput allowed per-table and + * per-tenant. + */ + @Test + public void testTableProvisioningLimits() { + /* + * This test aims to create tables exceeds the tenant capacity, it is + * not applicable in cloud test + */ + assumeTrue("Skipping testTableProvisioningLimits if not minicloud test", + useMiniCloud); + + final int maxRead = tenantLimits.getStandardTableLimits(). + getTableReadUnits(); + final int maxWrite = tenantLimits.getStandardTableLimits(). + getTableWriteUnits(); + final int maxSize = tenantLimits.getStandardTableLimits(). + getTableSize(); + + /* TODO: when per-tenant limits are available get them */ + final int maxTenantRead = tenantLimits.getTenantReadUnits(); + final int maxTenantWrite = tenantLimits.getTenantWriteUnits(); + final int maxTenantSize = tenantLimits.getTenantSize(); + + String tableName = "testLimits"; + String ddl = makeCreateTableDdl(tableName, 1); + + /* ReadUnits > maxTableReadUnits */ + TableLimits limits = TableLimits.builder() + .maxReadUnits(maxRead + 1) + .maxWriteUnits(1) + .maxStorageInGBs(1) + .build(); + createTableFail(tableName, ddl, limits, "TableDeploymentLimitExceeded"); + + /* WriteUnits > maxTableWriteUnits */ + limits = TableLimits.builder() + .maxReadUnits(1) + .maxWriteUnits(maxWrite + 1) + .maxStorageInGBs(1) + .build(); + + createTableFail(tableName, ddl, limits, "TableDeploymentLimitExceeded"); + + /* tableSize > maxTableSize */ + limits = TableLimits.builder() + .maxReadUnits(1) + .maxWriteUnits(1) + .maxStorageInGBs(maxSize + 1) + .build(); + createTableFail(tableName, ddl, limits, "TableDeploymentLimitExceeded"); + + /* make a table and try to evolve it past read limit */ + limits = TableLimits.builder() + .maxReadUnits(maxRead) + .maxWriteUnits(maxWrite) + .maxStorageInGBs(maxSize) + .build(); + createTable(tableName, ddl, limits); + + limits = TableLimits.builder() + .maxReadUnits(maxRead + 1) + .maxWriteUnits(maxWrite) + .maxStorageInGBs(maxSize) + .build(); + updateTableLimitsFail(tableName, limits, "TableDeploymentLimitExceeded"); + + /* + * Test per-tenant limits by trying to create another table. If it's one + * table this only works if the per-table limit is >= 1/2 of the + * tenant limit. See ProxyTestBase's TenantLimits. + */ + tableName = "testLimits1"; + ddl = makeCreateTableDdl(tableName, 1); + limits = TableLimits.builder() + .maxReadUnits(maxTenantRead - maxRead + 1) + .maxWriteUnits(1) + .maxStorageInGBs(1) + .build(); + createTableFail(tableName, ddl, limits, "TableDeploymentLimitExceeded"); + + limits = TableLimits.builder() + .maxReadUnits(1) + .maxWriteUnits(maxTenantWrite - maxWrite + 1) + .maxStorageInGBs(1) + .build(); + createTableFail(tableName, ddl, limits, "TableDeploymentLimitExceeded"); + + limits = TableLimits.builder() + .maxReadUnits(1) + .maxWriteUnits(1) + .maxStorageInGBs(maxTenantSize - maxSize + 1) + .build(); + createTableFail(tableName, ddl, limits, "TableDeploymentLimitExceeded"); + } + + @Test + public void testOpThrottling() { + /* This test adjusts op throttling rate, it is for minicloud only */ + assumeTrue("Skipping testOpThrottling if not minicloud test", + useMiniCloud); + + final String tableName = "testOpRate"; + String workReqId; + + String ddl = "create table if not exists " + tableName + + "(id integer, name string, primary key(id))"; + TableLimits limits = TableLimits.builder() + .maxReadUnits(100) + .maxWriteUnits(100) + .maxStorageInGBs(1) + .build(); + + setOpThrottling(getTenantId(), DEFAULT_OP_THROTTLE); + + try { + /* create table */ + workReqId = createTable(tableName, ddl, limits, false); + + GetWorkRequestRequest req = GetWorkRequestRequest.builder() + .workRequestId(workReqId) + .build(); + int num = 0; + try { + while (true) { + client.getWorkRequest(req); + num++; + if (num > 100) { + fail("Op Throttling exception should have been thrown"); + } + } + } catch (BmcException ex) { + /* success */ + assertError(ex, 429, "TooManyRequests"); + checkErrorMessage(ex); + } + + ListWorkRequestsRequest lwReq = ListWorkRequestsRequest.builder() + .compartmentId(getCompartmentId()) + .build(); + num = 0; + try { + while (true) { + client.listWorkRequests(lwReq); + num++; + if (num > 100) { + fail("Op Throttling exception should have been thrown"); + } + } + } catch (BmcException ex) { + /* success */ + assertError(ex, 429, "TooManyRequests"); + checkErrorMessage(ex); + } + + } finally { + setOpThrottling(getTenantId(), NO_OP_THROTTLE); + } + } + + void createTableFail(String tableName, + String ddl, + int expHttpStatusCode, + String expErrorCode) { + CreateTableRequest req = buildCreateTableRequest(getCompartmentId(), + tableName, ddl, + defaultLimits); + executeDdlFail(req, expHttpStatusCode, expErrorCode); + } + + void createTableFail(String tableName, + String ddl, + TableLimits limits, + String expErrorCode) { + CreateTableRequest req = buildCreateTableRequest(getCompartmentId(), + tableName, + ddl, + limits); + executeDdlFail(req, expErrorCode); + } + + void alterTableFail(String tableName, String ddl, String expErrorCode) { + UpdateTableRequest req = buildUpdateTableRequest(tableName, ddl); + executeDdlFail(req, expErrorCode); + } + + void createIndexFail(String tableName, + String indexName, + String field, + String expErrorCode) { + CreateIndexRequest req = buildCreateIndexRequest(tableName, + indexName, + new String[]{field}, + false /* ifNotExists*/); + executeDdlFail(req, expErrorCode); + } + + void updateTableLimitsFail(String tableName, + TableLimits limits, + String expErrorCode) { + UpdateTableRequest req = buildUpdateTableRequest(tableName, limits); + executeDdlFail(req, expErrorCode); + } + + private void assertError(BmcException ex, + int statusCode, + String serviceCode) { + + assertEquals(statusCode, ex.getStatusCode()); + assertEquals(serviceCode, ex.getServiceCode()); + } + + private String makeCreateTableDdl(String name, int numFields) { + StringBuilder sb = new StringBuilder("CREATE TABLE IF NOT EXISTS "); + sb.append(name); + sb.append("(id INTEGER, "); + for (int i = 0; i < numFields - 1; i++) { + sb.append(makeFieldName(i)); + sb.append(" STRING, "); + } + sb.append("PRIMARY KEY(id))"); + return sb.toString(); + } + + private String makeAlterTableDdl(String tableName, int idxField) { + final StringBuilder sb = new StringBuilder("ALTER TABLE "); + sb.append(tableName); + sb.append(" (ADD "); + sb.append(makeFieldName(idxField)); + sb.append(" STRING)"); + return sb.toString(); + } + + private String makeFieldName(int idxField) { + return "c" + idxField; + } + + private static String genString(int length) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + sb.append((char)('A' + i % 10)); + } + return sb.toString(); + } + + private static class TestRetryCondition extends DefaultRetryCondition { + TestRetryCondition() { + super(); + } + + @Override + public boolean shouldBeRetried(final BmcException ex) { + boolean shouldBeRetried = super.shouldBeRetried(ex); + if (ex.getStatusCode() == 429) { + assertTrue(shouldBeRetried); + } + return shouldBeRetried; + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/WorkRequestTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/WorkRequestTest.java new file mode 100644 index 00000000..bd9e7b29 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/rest/WorkRequestTest.java @@ -0,0 +1,453 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ +package oracle.nosql.proxy.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import com.oracle.bmc.model.BmcException; +import com.oracle.bmc.nosql.model.TableLimits; +import com.oracle.bmc.nosql.model.WorkRequest; +import com.oracle.bmc.nosql.model.WorkRequest.OperationType; +import com.oracle.bmc.nosql.model.WorkRequest.Status; +import com.oracle.bmc.nosql.model.WorkRequestError; +import com.oracle.bmc.nosql.model.WorkRequestLogEntry; +import com.oracle.bmc.nosql.model.WorkRequestResource; +import com.oracle.bmc.nosql.model.WorkRequestResource.ActionType; +import com.oracle.bmc.nosql.model.WorkRequestSummary; +import com.oracle.bmc.nosql.requests.DeleteWorkRequestRequest; +import com.oracle.bmc.nosql.requests.GetWorkRequestRequest; +import com.oracle.bmc.nosql.requests.ListWorkRequestErrorsRequest; +import com.oracle.bmc.nosql.requests.ListWorkRequestLogsRequest; +import com.oracle.bmc.nosql.requests.ListWorkRequestsRequest; +import com.oracle.bmc.nosql.responses.GetWorkRequestResponse; +import com.oracle.bmc.nosql.responses.ListWorkRequestErrorsResponse; +import com.oracle.bmc.nosql.responses.ListWorkRequestLogsResponse; +import com.oracle.bmc.nosql.responses.ListWorkRequestsResponse; + +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * This test suite is only for miniCloud test. + * + * WorkRequest related APIs: + * o getWorkRequest + * o listWorkRequests + * o listWorkRequestLogs + * o listWorkRequestErrors + * o cancelWorkRequest(NYI) + */ +public class WorkRequestTest extends RestAPITestBase { + + @BeforeClass + public static void staticSetUp() throws Exception { + Assume.assumeTrue( + "Skipping WorkRequestTest if not minicloud or cloud test", + Boolean.getBoolean(USEMC_PROP) || + Boolean.getBoolean(USECLOUD_PROP)); + + RestAPITestBase.staticSetUp(); + } + + @Test + public void testInvalidWorkRequest() { + String invalidWorkRequestId = + "ocid1.nosqltableworkrequest.oc1.iad.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; + try { + GetWorkRequestRequest req = GetWorkRequestRequest.builder() + .workRequestId(invalidWorkRequestId) + .build(); + client.getWorkRequest(req); + } catch (BmcException e) { + assertEquals(404, e.getStatusCode()); + assertEquals(ErrorCode.RESOURCE_NOT_FOUND.getErrorCode(), + e.getServiceCode()); + } + + try { + ListWorkRequestErrorsRequest lwrer = ListWorkRequestErrorsRequest + .builder().workRequestId(invalidWorkRequestId).build(); + client.listWorkRequestErrors(lwrer); + } catch (BmcException e) { + assertEquals(404, e.getStatusCode()); + assertEquals(ErrorCode.RESOURCE_NOT_FOUND.getErrorCode(), + e.getServiceCode()); + } + + try { + ListWorkRequestLogsRequest lwrlr = ListWorkRequestLogsRequest + .builder().workRequestId(invalidWorkRequestId).build(); + client.listWorkRequestLogs(lwrlr); + } catch (BmcException e) { + assertEquals(404, e.getStatusCode()); + assertEquals(ErrorCode.RESOURCE_NOT_FOUND.getErrorCode(), + e.getServiceCode()); + } + + try { + DeleteWorkRequestRequest dwrr = DeleteWorkRequestRequest + .builder().workRequestId(invalidWorkRequestId).build(); + client.deleteWorkRequest(dwrr); + } catch (BmcException e) { + assertEquals(404, e.getStatusCode()); + assertEquals(ErrorCode.RESOURCE_NOT_FOUND.getErrorCode(), + e.getServiceCode()); + } + } + + @Test + public void testGetWorkRequest() { + final String tableName = "testGetWorkRequest"; + String workReqId; + + String ddl = genCreateTableDdl(tableName); + TableLimits limits = TableLimits.builder() + .maxReadUnits(100) + .maxWriteUnits(100) + .maxStorageInGBs(1) + .build(); + + /* create table */ + workReqId = createTable(tableName, ddl, limits, false); + checkWorkRequest(workReqId, + getCompartmentId(), + tableName, + OperationType.CreateTable); + + /* create index */ + workReqId = createIndex(tableName, "idxNameAge", + new String[] {"name", "age"}, + false /* ifNotExists */, + false /* wait */); + checkWorkRequest(workReqId, + getCompartmentId(), + tableName, + OperationType.UpdateTable); + + /* alter table */ + ddl = genAlterTableDdl(tableName); + workReqId = alterTable(tableName, ddl, false /* wait */); + checkWorkRequest(workReqId, + getCompartmentId(), + tableName, + OperationType.UpdateTable); + + /* alter tableLimits */ + limits = TableLimits.builder() + .maxReadUnits(200) + .maxWriteUnits(200) + .maxStorageInGBs(2) + .build(); + workReqId = updateTable(tableName, limits, false /* wait */); + checkWorkRequest(workReqId, + getCompartmentId(), + tableName, + OperationType.UpdateTable); + + /* drop table */ + workReqId = dropTable(tableName, + false /* isIfExists */, + false /* wait */); + checkWorkRequest(workReqId, + getCompartmentId(), + tableName, + OperationType.DeleteTable); + } + + @Test + public void testListWorkRequestLogError() { + final String tableName = "testListWorkRequestLogError"; + String workReqId; + + String ddl = genCreateTableDdl(tableName); + TableLimits limits = TableLimits.builder() + .maxReadUnits(100) + .maxWriteUnits(100) + .maxStorageInGBs(1) + .build(); + + /* create table */ + workReqId = createTable(tableName, ddl, limits, false); + checkWorkRequest(workReqId, + getCompartmentId(), + tableName, + OperationType.CreateTable); + + /* Get workRequest Log */ + getWorkRequestLog(workReqId); + getWorkRequestError(workReqId, false /* expFail */, null); + + ddl = "alter table " + tableName + "(add date Timestamp)"; + /* Get workRequest Error */ + workReqId = alterTable(tableName, ddl, false); + waitForStatus(workReqId, WorkRequest.Status.Failed); + /* Get workRequest error */ + getWorkRequestError(workReqId, true /* expFail */, "IllegalArgument"); + getWorkRequestLog(workReqId); + + /* To verify that the error message should not contain table ocid */ + ddl = "alter table " + tableName + "(drop id)"; + workReqId = alterTable(tableName, ddl, false); + waitForStatus(workReqId, WorkRequest.Status.Failed); + getWorkRequestError(workReqId, true /* expFail */, "IllegalArgument"); + } + + @Test + public void testCancelWorkRequest() { + final String tableName = "testCancelWorkRequest"; + TableLimits limits = TableLimits.builder() + .maxReadUnits(100) + .maxWriteUnits(100) + .maxStorageInGBs(1) + .build(); + String ddl = genCreateTableDdl(tableName); + createTable(tableName, ddl, limits); + + /* Create index */ + String workReqId = createIndex(tableName, "idx1", + new String[]{"name"}, + false /* ifNotExists */, + false /* wait */); + /* + * Cancel the work request of create index, expect to get + * 404/CantCancelWorkRequest + */ + DeleteWorkRequestRequest req = DeleteWorkRequestRequest.builder() + .workRequestId(workReqId) + .build(); + try { + client.deleteWorkRequest(req); + fail("Expect to get 404/CantCancelWorkRequest but not"); + } catch (BmcException ex) { + assertEquals(404, ex.getStatusCode()); + assertEquals(ex.getServiceCode(), + ErrorCode.CANNOT_CANCEL_WORK_REQUEST.getErrorCode()); + } + } + + private void getWorkRequestLog(String workRequestId) { + ListWorkRequestLogsRequest req; + ListWorkRequestLogsResponse res; + WorkRequestLogEntry entry; + + /* Get workRequest Log */ + req = ListWorkRequestLogsRequest.builder() + .workRequestId(workRequestId) + .build(); + res = client.listWorkRequestLogs(req); + assertNull(res.getOpcNextPage()); + assertNotNull(res.getWorkRequestLogEntryCollection().getItems()); + assertEquals(1, res.getWorkRequestLogEntryCollection() + .getItems().size()); + entry = res.getWorkRequestLogEntryCollection().getItems().get(0); + assertNotNull(entry); + assertNotNull(entry.getTimestamp()); + } + + private void getWorkRequestError(String workRequestId, + boolean expFail, + String expErrCode) { + ListWorkRequestErrorsRequest req; + ListWorkRequestErrorsResponse res; + WorkRequestError entry; + + /* Get workRequest error */ + req = ListWorkRequestErrorsRequest.builder() + .workRequestId(workRequestId) + .build(); + res = client.listWorkRequestErrors(req); + + assertNull(res.getOpcNextPage()); + assertNotNull(res.getWorkRequestErrorCollection().getItems()); + if (expFail) { + assertEquals(1, res.getWorkRequestErrorCollection() + .getItems().size()); + entry = res.getWorkRequestErrorCollection().getItems().get(0); + assertNotNull(entry); + if (expErrCode != null) { + assertEquals(expErrCode, entry.getCode()); + } + assertNotNull(entry.getMessage()); + /* + * TODO: enable this after move proxy to 5.4.9 + * assertTrue(!entry.getMessage().contains("ocid1_nosqltable_")); + */ + assertNotNull(entry.getTimestamp()); + } else { + assertTrue(res.getWorkRequestErrorCollection() + .getItems().isEmpty()); + } + } + + @Test + public void testListWorkRequest() { + final String tableName = "testListWorkRequest"; + + List workReqIds = new ArrayList<>(); + TableLimits limits = TableLimits.builder() + .maxReadUnits(100) + .maxWriteUnits(100) + .maxStorageInGBs(1) + .build(); + + /* Executes 4 ddl operations for each table */ + String ddl = genCreateTableDdl(tableName); + workReqIds.add(createTable(tableName, ddl, limits, false /* wait */)); + workReqIds.add(createIndex(tableName, "idxName", + new String[] {"name"}, + false /* ifNotExists */, + false /* wait */)); + + ddl = genAlterTableDdl(tableName); + workReqIds.add(alterTable(tableName, ddl, false /* wait */)); + workReqIds.add(dropIndex(tableName, "idxName", false /* wait */)); + + List reverseIds = new ArrayList<>(workReqIds); + Collections.reverse(reverseIds); + + /* List workRequests */ + List results; + results = listWorkRequests(getCompartmentId(), 2, 4); + assertEquals(4, results.size()); + int i = 0; + for (WorkRequestSummary wrs : results) { + assertEquals(reverseIds.get(i++), wrs.getId()); + } + + /* List workRequests again after the above workRequests complete */ + for (String workReqId : workReqIds) { + waitForComplete(workReqId); + } + + results = listWorkRequests(getCompartmentId(), 4, 4); + assertEquals(4, results.size()); + i = 0; + for (WorkRequestSummary wrs : results) { + assertEquals(reverseIds.get(i++), wrs.getId()); + assertEquals(Float.valueOf(100), wrs.getPercentComplete()); + WorkRequestResource table = wrs.getResources().get(0); + assertNotNull(table); + assertTableOcid(table.getIdentifier()); + } + } + + private List listWorkRequests(String cmptId, + int limit, + int stopCount) { + ListWorkRequestsRequest req; + ListWorkRequestsResponse res; + String nextPage = null; + int count = 0; + List results = new ArrayList<>(); + while(true) { + req = ListWorkRequestsRequest.builder() + .compartmentId(cmptId) + .page(nextPage) + .limit(limit) + .build(); + + res = client.listWorkRequests(req); + + int num = res.getWorkRequestCollection().getItems().size(); + results.addAll(res.getWorkRequestCollection().getItems()); + count += num; + nextPage = res.getOpcNextPage(); + if (nextPage == null || count >= stopCount) { + break; + } + } + return results; + } + + private void checkWorkRequest(String workRequestId, + String cmptId, + String tableName, + OperationType opType) { + + GetWorkRequestRequest req = GetWorkRequestRequest.builder() + .workRequestId(workRequestId) + .build(); + + GetWorkRequestResponse res; + while (true) { + res = client.getWorkRequest(req); + + WorkRequest workReq = res.getWorkRequest(); + assertEquals(opType, workReq.getOperationType()); + assertEquals(cmptId, workReq.getCompartmentId()); + assertEquals(workRequestId, workReq.getId()); + assertNotNull(workReq.getTimeAccepted()); + + List resources = workReq.getResources(); + assertNotNull(resources); + assertEquals(1, resources.size()); + WorkRequestResource resource = resources.get(0); + assertEquals("TABLE", resource.getEntityType()); + assertNotNull(resource.getIdentifier()); + assertTableOcid(resource.getIdentifier()); + assertNotNull(resource.getEntityUri()); + assertTrue(resource.getEntityUri().contains(tableName)); + Status status = workReq.getStatus(); + if (status == Status.InProgress) { + assertEquals(ActionType.InProgress, resource.getActionType()); + assertNotNull(workReq.getTimeStarted()); + assertNull(workReq.getTimeFinished()); + assertTrue(workReq.getPercentComplete() < 100); + } else if (status == Status.Succeeded || + status == Status.Failed) { + assertEquals(getActionType(opType), resource.getActionType()); + assertNotNull(workReq.getTimeStarted()); + assertNotNull(workReq.getTimeFinished()); + assertEquals(Float.valueOf(100f), workReq.getPercentComplete()); + break; + } + + if (useCloudService) { + /* + * In cloud test, sleep 250ms to avoid too frequent + * get-work-request calls + */ + try { + Thread.sleep(250); + } catch (InterruptedException e) { + } + } + } + } + + private ActionType getActionType(OperationType opType) { + switch(opType) { + case CreateTable: + return ActionType.Created; + case DeleteTable: + return ActionType.Deleted; + case UpdateTable: + return ActionType.Updated; + default: + fail("Unexpected OperationType: " + opType); + } + return null; + } + + private static String genCreateTableDdl(String tableName) { + return "create table if not exists " + tableName + + "(id integer, name string, age integer, primary key(id))"; + } + + private static String genAlterTableDdl(String tableName) { + return "alter table " + tableName + "(add status string)"; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/security/IAMRetryTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/security/IAMRetryTest.java new file mode 100644 index 00000000..36412833 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/security/IAMRetryTest.java @@ -0,0 +1,324 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.proxy.security; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import oracle.kv.util.kvlite.KVLite; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.proxy.Config; +import oracle.nosql.proxy.Proxy; +import oracle.nosql.proxy.ProxyMain; +import oracle.nosql.proxy.sc.LocalTenantManager; +import oracle.nosql.proxy.sc.TenantManager; +import oracle.nosql.proxy.util.KVLiteBase; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class IAMRetryTest extends KVLiteBase { + + /* + * Proxy state + */ + private static int PROXY_PORT = 8095; + protected static String KVLITE_MEMORYMB_PROP = "test.memorymb"; + protected static String SIMULATE_IAM_PROP = "test.simulateiam"; + + protected static String hostName = getHostName(); + protected static final int startPort = 13240; + protected static KVLite kvlite; + protected static Proxy proxy = null; + protected static TenantManager tm = null; + protected static AccessChecker ac = null; + + protected static int memoryMB = 0; + protected static String prevSimValue = null; + + @BeforeClass + public static void staticSetUp() + throws Exception { + startup(); + } + + @AfterClass + public static void staticTearDown() + throws Exception { + + stopProxy(); + + if (kvlite != null) { + kvlite.stop(false); + } + + cleanupTestDir(); + + /* reset system properties we set */ + if (prevSimValue == null) { + System.clearProperty(SIMULATE_IAM_PROP); + } else { + System.setProperty(SIMULATE_IAM_PROP, prevSimValue); + } + } + + @After + public void tearDown() throws Exception { + stopProxy(); + } + + protected static void stopProxy() + throws Exception { + + if (proxy != null) { + proxy.shutdown(3, TimeUnit.SECONDS); + proxy = null; + } + + if (tm != null) { + tm.close(); + tm = null; + } + } + + protected static void startup() throws Exception { + assumeTrue("Skip IAMRetryTest if not cloudsim test", + !Boolean.getBoolean("onprem") && + !Boolean.getBoolean("usemc") && + !Boolean.getBoolean("usecloud")); + + prevSimValue = System.getProperty(SIMULATE_IAM_PROP); + + String proxyHost = System.getProperty("proxy.host"); + if (proxyHost != null) { + hostName = proxyHost; + } + Integer proxyPort = Integer.getInteger("proxy.port"); + if (proxyPort != null) { + PROXY_PORT = proxyPort; + } + + cleanupTestDir(); + + memoryMB = Integer.getInteger(KVLITE_MEMORYMB_PROP, 0); + kvlite = startKVLite(hostName, + null, // default store name + false, // useThreads = false + false, // verbose = false + false, // multishard + memoryMB, + false); // secured + } + + protected static void startProxy(boolean allowInternalRetries, + int retryDelayMs, + int maxRetriesPerRequest, + int maxActiveRetryCount) + throws Exception { + + + Properties commandLine = new Properties(); + commandLine.setProperty(Config.STORE_NAME.paramName, + getStoreName()); + commandLine.setProperty(Config.HELPER_HOSTS.paramName, + (hostName + ":" + getKVPort())); + commandLine.setProperty(Config.PROXY_TYPE.paramName, "CLOUDSIM"); + commandLine.setProperty(Config.HTTP_PORT.paramName, + Integer.toString(PROXY_PORT)); + + commandLine.setProperty(Config.VERBOSE.paramName, + Boolean.toString( + Boolean.getBoolean("test.verbose"))); + + commandLine.setProperty(Config.AUTH_RETRIES_ENABLED.paramName, + Boolean.toString(allowInternalRetries)); + commandLine.setProperty(Config.MAX_ACTIVE_RETRY_COUNT.paramName, + Integer.toString(maxActiveRetryCount)); + commandLine.setProperty(Config.MAX_RETRIES_PER_REQUEST.paramName, + Integer.toString(maxRetriesPerRequest)); + commandLine.setProperty(Config.RETRY_DELAY_MS.paramName, + Integer.toString(retryDelayMs)); + + /* this will make IAM checks delay like real IAM */ + System.setProperty(SIMULATE_IAM_PROP, "true"); + + ac = AccessCheckerFactory.createInsecureAccessChecker(); + + /* create an appropriate TenantManager */ + Config cfg = new Config(commandLine); + tm = LocalTenantManager.createTenantManager(cfg); + + proxy = ProxyMain.startProxy(commandLine, tm, ac, null); + } + + protected NoSQLHandle configHandle(String endpoint) { + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(endpoint); + hconfig.configureDefaultRetryHandler(50, 10); + hconfig.setRequestTimeout(30000); + SecureTestUtil.setAuthProvider(hconfig, true, + "TestTenant"); + return getHandle(hconfig); + } + + public static String getProxyEndpoint() { + try { + return "http://" + hostName + ":" + PROXY_PORT; + } catch (Exception e) { + } + return null; + } + + /** + * Allows classes to create a differently-configured NoSQLHandle. + */ + protected NoSQLHandle getHandle(NoSQLHandleConfig config) { + /* + * Create a Logger. Configuration for the logger is in proxy/build.xml + */ + Logger logger = Logger.getLogger(getClass().getName()); + config.setLogger(logger); + + /* + * Open the handle + */ + return NoSQLHandleFactory.createNoSQLHandle(config); + } + + /* + * Utility methods for use by subclasses + */ + + /** + * Simpler version of tableOperation. This will not support + * a change of limits as it doesn't accept a table name. + */ + protected static TableResult tableOperation(NoSQLHandle handle, + String statement, + TableLimits limits, + int waitMillis) { + assertTrue(waitMillis > 500); + TableRequest tableRequest = new TableRequest() + .setStatement(statement) + .setTableLimits(limits) + .setTimeout(15000); + + return handle.doTableRequest(tableRequest, waitMillis, waitMillis/10); + } + + /** + * Delays for the specified number of milliseconds, ignoring exceptions + */ + static void delay(int delayMS) { + try { + Thread.sleep(delayMS); + } catch (Exception e) { + } + } + + protected static int createTable(NoSQLHandle handle, String tableName) { + String stmt = "create table if not exists " + tableName + + "(id integer, name string, primary key(id))"; + int retries = 0; + TableResult tres = tableOperation(handle, stmt, + new TableLimits(1000, 1000, 10), + 20000); + assertEquals(TableResult.State.ACTIVE, tres.getTableState()); + if (tres.getRetryStats() != null) { + retries += tres.getRetryStats().getRetries(); + } + /* do a single put to get it into the cache */ + MapValue value = new MapValue().put("id", 10).put("name", "jane"); + PutRequest putRequest = new PutRequest() + .setValue(value) + .setTableName(tableName); + PutResult res = handle.put(putRequest); + assertNotNull("Put failed", res.getVersion()); + if (res.getRetryStats() != null) { + retries += res.getRetryStats().getRetries(); + } + return retries; + } + + protected static int dropTable(NoSQLHandle handle, String tableName) { + String stmt = "drop table " + tableName; + TableResult tres = tableOperation(handle, stmt, null, 20000); + assertEquals(TableResult.State.DROPPED, tres.getTableState()); + if (tres.getRetryStats() == null) { + return 0; + } + return tres.getRetryStats().getRetries(); + } + + @Test + public void testNormalRetries() + throws Exception { + /* start proxy with internal retries disabled */ + startProxy(false, 10, 1, 1); + /* run test, see auth retries in client */ + NoSQLHandle handle = configHandle(getProxyEndpoint()); + int retries = createTable(handle, "retryTestTable"); + retries += dropTable(handle, "retryTestTable"); + /* expect that we had auth retries */ + assertTrue("expected at least one retry, got zero", retries > 0); + } + + @Test + public void testInternalRetries() + throws Exception { + /* start proxy with internal retries enabled */ + startProxy(true, 30, 10, 10); + /* run test, see zero auth retries in client */ + NoSQLHandle handle = configHandle(getProxyEndpoint()); + int retries = createTable(handle, "retryTestTable"); + retries += dropTable(handle, "retryTestTable"); + /* expect that we had no auth retries */ + assertTrue("expected zero retries, got " + retries, retries == 0); + } + + private void checkInvalidConfig(int retryDelayMs, + int maxRetriesPerRequest, + int maxActiveRetries) { + try { + startProxy(true, retryDelayMs, + maxRetriesPerRequest, + maxActiveRetries); + stopProxy(); + fail("Invalid config should have failed"); + } catch (RuntimeException re) { + } catch (Exception e) { + fail("Expected RuntimeException, got " + e); + } + } + + @Test + public void testInvalidConfigParams() + throws Exception { + checkInvalidConfig(0, 0, 0); + checkInvalidConfig(-1, -1, -1); + checkInvalidConfig(1, 20, 20); + checkInvalidConfig(100, 1000, 20); + checkInvalidConfig(100, 20, 1001); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/security/SecureTestUtil.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/security/SecureTestUtil.java new file mode 100644 index 00000000..25fbd50b --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/security/SecureTestUtil.java @@ -0,0 +1,95 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.nosql.proxy.security; + +import static oracle.nosql.proxy.protocol.HttpConstants.TOKEN_PREFIX; + +import java.io.IOException; + +import oracle.nosql.driver.AuthorizationProvider; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.iam.SignatureProvider; +import oracle.nosql.driver.kv.StoreAccessTokenProvider; +import oracle.nosql.driver.ops.Request; + +public class SecureTestUtil { + /** + * Configure NoSQLHandleConfig with authorization provider. + */ + public static void setAuthProvider(NoSQLHandleConfig config, + boolean security, + String tenant) { + setAuthProvider(config, security, false, tenant); + } + + public static void setAuthProvider(NoSQLHandleConfig config, + boolean security, + boolean onprem, + String tenant) { + if (security) { + if (onprem) { + throw new IllegalArgumentException( + "setAuthProvider not supported (yet) for secure onprem"); + } + config.setAuthorizationProvider( + new TestSignatureProvider().setTenantId(tenant)); + return; + } + if (onprem) { + config.setAuthorizationProvider(new StoreAccessTokenProvider()); + } else { + config.setAuthorizationProvider(new AuthorizationProvider() { + @Override + public String getAuthorizationString(Request request) { + return getAuthHeader(tenant); + } + + @Override + public void close() { + } + }); + } + } + + public static void setAuthProvider(NoSQLHandleConfig config, + String configFile, + String profile) { + try { + config.setAuthorizationProvider( + new SignatureProvider(configFile, profile)); + } catch (IOException ioe) { + throw new IllegalArgumentException("Unable to load " + profile + + " from configFile: " + ioe); + } + } + + /** + * Get an authorization header with service access token. + */ + public static String getAuthHeader(String tenantId, boolean security) { + if (security) { + return new TestSignatureProvider().setTenantId(tenantId) + .getAuthorizationString(null); + } + return getAuthHeader(tenantId); + } + + /** + * Get an authorization header with access token. + * @param needAccountAT whether need account access token + */ + public static String getAuthHeader(String tenantId) { + return TOKEN_PREFIX + tenantId; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/security/TestSignatureProvider.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/security/TestSignatureProvider.java new file mode 100644 index 00000000..576271b9 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/security/TestSignatureProvider.java @@ -0,0 +1,96 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.nosql.proxy.security; + +import static oracle.nosql.driver.util.HttpConstants.REQUEST_COMPARTMENT_ID; +import static oracle.nosql.driver.util.HttpConstants.AUTHORIZATION; + +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.iam.SignatureProvider; +import oracle.nosql.driver.ops.Request; + +import io.netty.handler.codec.http.HttpHeaders; + +public class TestSignatureProvider extends SignatureProvider { + + private String tenantId; + private String userId; + + public TestSignatureProvider() { + super(null, 0, 0); + tenantId = "TestTenant"; + userId = "TestUser"; + } + + public TestSignatureProvider setTenantId(String tenantId) { + this.tenantId = tenantId; + return this; + } + + @Override + public String getAuthorizationString(Request request) { + /* + * IAM signature format: + * Signature version="%s",headers="%s",keyId="%s", + * algorithm="rsa-sha256",signature="%s" + * + * Note that the tenantId/compartmentId are not inherently + * easily available from the auth string ("keyId" may include the + * tenancy, but may also be different for, say, instance + * principals) + * + * example real header: + * Signature headers="(request-target) host date",keyId="ocid1.tenancy.oc1..aaaaaaaaba3pv6wuzr4h25vqstifsfdsq/ocid1.user.oc1..aaaaaaaa65vwl75tewwm32rgqvm6i34unq/9b:39:03:07:c6:fa:5c:58:7d:60:85:d8:3e:5c:be:7e",algorithm="rsa-sha256",signature="LLszR7k+iORqsLNOVXdPVjRupFDnV99PhByYqWGxsJi6/04xWD0jVA4hnawCG5ciyXA4O2eUH+Ggh/glEnbLht3yowdLelPDnI6nQ9fC7tsQjIM5YsFka0k9AzPPRkpX6l2Ic3/CWvonf9zjeR6KM1ICcakCrYj6Xjmla5tapbJJ5AOv1r5jzCiIAq6avZSS+rRHrFjFVbgKkGekFJKJjh4CPA1beO1YYBF+ZcIGwxL7ItvWkV2AFTEv/0L15W4hEkEbDjQq5eeCvJdLUD8VfLYt1ELLmMZdnUvPXVfYrCHM1qQWLKS6KSerIjdaSKvzYD71idCDDQ+FGFYxcOPA8Q==",version="1" + * + * This needs to at least be in the format above such that the + * cloudsim tests run with "-Dsecurity=true" will pass + */ + return "Signature headers=\"(request-target) host date\",keyId=\"" + + tenantId + "/" + userId + "/dummy\"," + + "algorithm=\"rsa-sha256\",signature=\"dummy\",version=\"1\""; + } + + @Override + public void setRequiredHeaders(String authString, + Request request, + HttpHeaders headers, + byte[] content) { + String compartment = request.getCompartment(); + if (compartment == null) { + /* + * If request doesn't has compartment id, set the tenant id as the + * default compartment, which is the root compartment in IAM if + * using user principal. + */ + compartment = tenantId; + } + if (compartment != null) { + headers.add(REQUEST_COMPARTMENT_ID, compartment); + } + headers.add(AUTHORIZATION, getAuthorizationString(null)); + } + + @Override + public void close() { + } + + /** + * @since 5.2.27, prepare would throw NPE without specifying + * AuthenticationProfileProvider to SignatureProvider. + */ + @Override + public SignatureProvider prepare(NoSQLHandleConfig config) { + return this; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/CreateStore.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/CreateStore.java new file mode 100644 index 00000000..1e41dcca --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/CreateStore.java @@ -0,0 +1,908 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.util; + +import static oracle.kv.impl.security.PasswordManager.FILE_STORE_MANAGER_CLASS; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.rmi.RemoteException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Formatter; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.logging.Logger; + +import oracle.kv.KVStore; +import oracle.kv.KVStoreConfig; +import oracle.kv.KVStoreException; +import oracle.kv.impl.admin.AdminStatus; +import oracle.kv.impl.admin.CommandServiceAPI; +import oracle.kv.impl.admin.client.CommandShell; +import oracle.kv.impl.admin.param.BootstrapParams; +import oracle.kv.impl.api.ClientId; +import oracle.kv.impl.param.Parameter; +import oracle.kv.impl.param.ParameterMap; +import oracle.kv.impl.param.ParameterState; +import oracle.kv.impl.rep.admin.RepNodeAdminAPI; +import oracle.kv.impl.sna.StorageNodeAgent; +import oracle.kv.impl.sna.StorageNodeAgentAPI; +import oracle.kv.impl.topo.AdminId; +import oracle.kv.impl.topo.DatacenterId; +import oracle.kv.impl.topo.DatacenterType; +import oracle.kv.impl.topo.RepNode; +import oracle.kv.impl.topo.RepNodeId; +import oracle.kv.impl.topo.StorageNode; +import oracle.kv.impl.topo.StorageNodeId; +import oracle.kv.impl.topo.Topology; +import oracle.kv.impl.util.ConfigUtils; +import oracle.kv.impl.util.ConfigurableService.ServiceStatus; +import oracle.kv.impl.util.PollCondition; +import oracle.kv.impl.util.registry.RegistryUtils; + +/** + * Start, stop, reconfigure nodes. + */ +public class CreateStore { + + private static final Logger logger = + Logger.getLogger("oracle.kv.CreateStore"); + + private static final int portsPerFinder = 20; + private static final int haRange = 8; + + //In some cases it took more than 20 seconds. + private static final int deployWaitSeconds = + Integer.getInteger("test.deploywaitseconds", 20); + + /** + * The min amount of memory with which to configure a capacity=1 SN in a + * unit test. At this size we can accommodate a 3X3 config in 2.25G. + * + * Larger values risk making the machine thrash to the point where the + * tests can produce spurious errors or cause vm thrashing on machines + * with 3-4GB of memory. + * + * Smaller values risk cache eviction and JE level slowdowns for large + * data sets. + */ + public static final int MB_PER_SN = 256; + public static final String MB_PER_SN_STRING = String.valueOf(MB_PER_SN); + + public static final String STORAGE_NODE_POOL_NAME = "CreateStorePool"; + + /** + * The default properties that should be specified when creating JE + * environments. + */ + public static final String JE_DEFAULT_PROPERTIES = + canonicalJeParams(System.getProperty("test.je.props", "")); + + private String storeName; + private String rootDir; + private String hostname; + private List zones; + private int numPartitions; + private int numStorageNodes; + private int numStorageNodesInPool; + private int capacity = 1; + private int totalPrimaryRepFactor; + private int totalRepFactor; + private int numShards; + private int startPort; + private final int memoryMB; + private StorageNodeAgent[] snas; + private StorageNodeAgent[] expansionSnas; + private PortFinder[] portFinders; + private CommandServiceAPI cs; + private int csIndex = -1; + private RepNodeAdminAPI[] rns; + private boolean useThreads; + private boolean verbose; + private String mgmtImpl; + private ParameterMap policyMap; + private Set extraParams; + + /* Hang on to per-SN information for unit test usage. */ + private final Map portFinderMap = + new HashMap(); + + private final Map snaMap = + new HashMap(); + + private final Map> snToRNs = + new HashMap>(); + + private final Set snsWithAdmins = + new HashSet(); + + /* + * AdminDeployed and adminLocations provide a mechanism for placing admins + * on particular SNs, for specific test cases. The SNs specified by the ids + * in adminLocations should host Admins. Defaults to SNs 1, 2, 3. + */ + private final AtomicBoolean adminDeployed = new AtomicBoolean(false); + private Set adminLocations = new HashSet<>(); + + /** Information about a zone. */ + public static class ZoneInfo { + public final int repFactor; + public final DatacenterType zoneType; + public boolean masterAffinity; + public ZoneInfo(int repFactor) { + this(repFactor, DatacenterType.PRIMARY); + } + public ZoneInfo(int repFactor, DatacenterType zoneType) { + if (repFactor < 0) { + throw new IllegalArgumentException( + "The repFactor must be greater than 0"); + } + this.repFactor = repFactor; + if (zoneType == null) { + throw new IllegalArgumentException( + "The zone type must not be null"); + } + this.zoneType = zoneType; + } + public static List primaries(int... replicationFactors) { + final List result = + new ArrayList(replicationFactors.length); + for (int replicationFactor : replicationFactors) { + result.add(new ZoneInfo(replicationFactor)); + } + return result; + } + + public ZoneInfo setMasterAffinity(boolean masterAffinity) { + this.masterAffinity = masterAffinity; + return this; + } + } + + public CreateStore(String rootDir, + String storeName, + int startPort, + int numStorageNodes, + int replicationFactor, + int numPartitions, + int capacity, + int memoryMB, + boolean useThreads, + String mgmtImpl) + throws Exception { + + + this.useThreads = useThreads; + this.storeName = storeName; + this.zones = ZoneInfo.primaries(replicationFactor); + this.numPartitions = numPartitions; + this.numStorageNodes = numStorageNodes; + this.numStorageNodesInPool = numStorageNodes; + this.startPort = startPort - 1; + this.rootDir = rootDir; + this.hostname = "localhost"; + this.capacity = capacity; + this.memoryMB = memoryMB; + this.mgmtImpl = null; + this.extraParams = extraParams; + cs = null; + verbose = false; + policyMap = null; + + /* Default SNs which host Admins */ + adminLocations.add(1); + adminLocations.add(2); + adminLocations.add(3); + } + + public void setExpansionSnas(StorageNodeAgent[] expSnas) { + expansionSnas = expSnas; + } + + public void setPolicyMap(ParameterMap map) { + policyMap = map; + } + + public ParameterMap getPolicyMap() { + return policyMap; + } + + public void setPoolSize(int size) { + numStorageNodesInPool = size; + } + + public void setMasterAffinities(boolean masterAffinity) { + for (ZoneInfo zone : zones) { + zone.setMasterAffinity(masterAffinity); + } + } + + public void setVerbose(boolean verbose) { + this.verbose = verbose; + } + + /** + * Compute the admin locations needed to create RF admins in each current + * zone. + * + * @return the indices of SNs with admins + */ + public int[] computeAdminsForZones() { + initNumShards(); + + /* Allocate RF admins per zone */ + final int[] adminSNs = new int[totalRepFactor]; + int adminSNsOffset = 0; + int zoneId = 0; + int zoneRemainingRepFactor = 0; + int zoneRemainingCapacity = 0; + for (int i = 1; i <= numStorageNodesInPool; i++) { + if (zoneRemainingCapacity == 0) { + if (++zoneId > zones.size()) { + break; + } + zoneRemainingRepFactor = zones.get(zoneId-1).repFactor; + zoneRemainingCapacity = numShards * zoneRemainingRepFactor; + } + if (zoneRemainingRepFactor > 0) { + adminSNs[adminSNsOffset++] = i; + zoneRemainingRepFactor--; + } + + if (zoneRemainingCapacity > 0) { + zoneRemainingCapacity -= capacity; + } + } + return adminSNs; + } + + /** + * Create a series of unregistered SNs, to mimic the initial deployment + * of software on a storage node. These SNs do not yet have a storage node + * id. + */ + public void initStorageNodes() + throws Exception { + + /* + * Allow this to be called more than once. + */ + if (snas != null) { + return; + } + + initNumShards(); + + snas = new StorageNodeAgent[numStorageNodes]; + portFinders = new PortFinder[numStorageNodes]; + verbose("Creating " + numStorageNodes + + " storage nodes in root " + rootDir); + + for (int i = 0; i < numStorageNodes; i++) { + PortFinder pf = new PortFinder(startPort, haRange, hostname); + + snas[i] = CreateStoreUtils.createUnregisteredSNA + (rootDir, + pf, + capacity, + "config" + i + ".xml", + useThreads, + i == 0, /* Create Admin for first one */ + memoryMB, + extraParams); + + verbose("Created Storage Node " + i + " on host:port " + + hostname + ":" + pf.getRegistryPort()); + startPort += portsPerFinder; + portFinders[i] = pf; + } + verbose("Done creating storage nodes"); + } + + /** + * Initialize the numShards, numStorageNodesInPool, and totalRepFactor + * fields. + */ + private void initNumShards() { + if (totalPrimaryRepFactor != 0) { + return; + } + + if (numStorageNodesInPool == 0) { + numStorageNodesInPool = numStorageNodes; + } + for (final ZoneInfo zone : zones) { + if (zone.zoneType == DatacenterType.PRIMARY) { // RESOLVE why count secondaries + totalPrimaryRepFactor += zone.repFactor; + } + totalRepFactor += zone.repFactor; + } + int totalPoolCapacity = numStorageNodesInPool * capacity; + if (totalPoolCapacity < totalRepFactor) { + throw new IllegalStateException( + "SN pool capacity is too low: need " + totalRepFactor + + ", found " + totalPoolCapacity); + } + // RESOLVE the number of shards takes into account secondaries? + numShards = totalPoolCapacity / totalRepFactor; + } + + private void initSNMaps(StorageNodeId snId, + PortFinder pf, + StorageNodeAgent sna) { + snaMap.put(snId, sna); + portFinderMap.put(snId, pf); + snToRNs.put(snId, new ArrayList()); + } + + private void verbose(String msg) { + if (verbose) { + System.out.println(msg); + } + } + + public String getRootDir() { + return rootDir; + } + + public void setRootDir(String newRootDir) { + rootDir = newRootDir; + } + + public String getHostname() { + return hostname; + } + + public String getStoreName() { + return storeName; + } + + public int getRegistryPort(StorageNodeId snId) { + PortFinder pf = portFinderMap.get(snId); + if (pf == null) { + throw new IllegalStateException("No SNA for id " + snId); + } + return pf.getRegistryPort(); + } + + public void start() throws Exception { + + initStorageNodes(); + + verbose("Creating plans for store " + storeName); + cs = CreateStoreUtils.waitForAdmin(snas[0].getHostname(), + snas[0].getRegistryPort(), + 10, logger); + csIndex = 0; + + /* + * If we are configured for security, upgrade to a real login + */ + cs.configure(storeName); + + /* + * Deploy Datacenters. + */ + int zoneId = 1; + for (final ZoneInfo zone : zones) { + int planId = cs.createDeployDatacenterPlan( + "DCPlan" + zoneId, "Zone" + zoneId, zone.repFactor, + zone.zoneType, false /* no arbiters */, false); + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + cs.assertSuccess(planId); + zoneId++; + } + + /* + * Set policy map if non-null or if default JE properties were + * specified + */ + final ParameterMap mergedPolicyMap = + mergeParameterMapDefaults(policyMap); + if (mergedPolicyMap != null) { + cs.setPolicies(mergedPolicyMap); + } + + /* + * Deploy first SN. + */ + int planId = cs.createDeploySNPlan + ("Deploy SN", new DatacenterId(1), snas[0].getHostname(), + snas[0].getRegistryPort(), "comment"); + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + cs.assertSuccess(planId); + initSNMaps(snas[0].getStorageNodeId(), portFinders[0], snas[0]); + + /* + * Deploy admin + */ + StorageNodeId adminSNId = snas[0].getStorageNodeId(); + planId = cs.createDeployAdminPlan + ("Deploy admin", + adminSNId); + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + cs.assertSuccess(planId); + snsWithAdmins.add(snas[0].getStorageNodeId()); + + /* + * Need a storage pool for the store deployment. + */ + cs.addStorageNodePool(STORAGE_NODE_POOL_NAME); + cs.addStorageNodeToPool(STORAGE_NODE_POOL_NAME, + snas[0].getStorageNodeId()); + + /* + * Deploy the rest of the Storage Nodes + */ + verbose("Deploying the storage nodes and admin replicas"); + adminDeployed.set(true); + zoneId = 1; + int zoneRemainingCapacity = zones.get(zoneId-1).repFactor * numShards; + for (int i = 1; i < snas.length; i++) { + zoneRemainingCapacity -= capacity; + if ((zoneRemainingCapacity <= 0) && (zoneId < zones.size())) { + zoneId++; + zoneRemainingCapacity = + zones.get(zoneId-1).repFactor * numShards; + } + + planId = cs.createDeploySNPlan + ("Deploy SN", new DatacenterId(zoneId), snas[i].getHostname(), + snas[i].getRegistryPort(), "comment"); + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + cs.assertSuccess(planId); + if (i < numStorageNodesInPool) { + cs.addStorageNodeToPool(STORAGE_NODE_POOL_NAME, + snas[i].getStorageNodeId()); + } else if ((zoneId < zones.size()) || + (zoneRemainingCapacity > 0)) { + throw new IllegalStateException( + "Pool is too small to provide sufficient SN capacity for" + + " all zones. Remaining zones: " + + (zones.size() - zoneId) + + ", remaining rep factor for current zone: " + + zoneRemainingCapacity); + } + + initSNMaps(snas[i].getStorageNodeId(), portFinders[i], snas[i]); + + /* + * Create Admins on the SNs specified by adminLocation. + */ + adminSNId = snas[i].getStorageNodeId(); + if (adminLocations.contains(adminSNId.getStorageNodeId())) { + verbose("Deploying Admin Replica"); + adminSNId = snas[i].getStorageNodeId(); + planId = cs.createDeployAdminPlan( + "Deploy Admin", + adminSNId, + null /* Default admin type to zone type */); + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + cs.assertSuccess(planId); + snsWithAdmins.add(snas[i].getStorageNodeId()); + } + } + + /* + * The store + */ + verbose("Deploying the store"); + cs.createTopology("_CreateStoreTopo", + STORAGE_NODE_POOL_NAME, + numPartitions, + false); + planId = cs.createDeployTopologyPlan("Deploy CreateStore", + "_CreateStoreTopo", null); + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, deployWaitSeconds, TimeUnit.SECONDS); + cs.assertSuccess(planId); + + verbose("Done with plans, waiting for RepNodes"); + + /* + * Wait for the RepNodes to come up. Store the RepNodeAdminAPI + * interfaces for later retrieval. + */ + Topology topo = cs.getTopology(); + List repNodes = topo.getSortedRepNodes(); + + int i = 0; + rns = new RepNodeAdminAPI[repNodes.size()]; + for (RepNode rn : repNodes) { + RegistryUtils ru = new RegistryUtils(topo, null, logger); + RepNodeId rnId = rn.getResourceId(); + StorageNode sn = topo.get(rn.getStorageNodeId()); + rns[i] = + CreateStoreUtils.waitForRepNodeAdmin(topo.getKVStoreName(), + sn.getHostname(), + sn.getRegistryPort(), + rnId, + null, + 10, ServiceStatus.RUNNING, + logger); + snToRNs.get(rn.getStorageNodeId()).add(rnId); + } + + /* wait for system tables to be ready */ + new PollCondition(500, 40000) { + @Override + protected boolean condition() { + try { + return cs.isStoreReady(true /* tableOnly */); + } catch (RemoteException e) { + return false; + } + } + }.await(); + + verbose("Store deployment complete"); + } + + public KVStoreConfig createKVConfig() { + return new KVStoreConfig(storeName, hostname + ":" + getRegistryPort()); + } + + public CommandServiceAPI getAdmin() { + return cs; + } + + public int getAdminIndex() { + return csIndex; + } + + public RepNodeAdminAPI getRepNodeAdmin(int index) { + return rns[index]; + } + + public CommandServiceAPI getAdmin(int index) + throws Exception { + + return getAdmin(index, 10); + } + + public CommandServiceAPI getAdmin(int index, int timeoutSec) + throws Exception { + + StorageNodeAgent sna = snas[index]; + return CreateStoreUtils.waitForAdmin(sna.getHostname(), + sna.getRegistryPort(), + timeoutSec, + logger); + } + + public CommandServiceAPI getAdminMaster() { + + /** + * Retry if a failover is happening. + */ + for (int i = 0; i < 10; i++ ) { + for (int j = 0; j < snas.length; j++) { + try { + CommandServiceAPI newcs = getAdmin(j); + AdminStatus adminStatus = newcs.getAdminStatus(); + if (adminStatus.getIsAuthoritativeMaster()) { + cs = newcs; + csIndex = j; + return newcs; + } + } catch (Exception ignored) { + } + } + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + } + } + return null; + } + + public void shutdown() { + shutdown(true); + } + + public void shutdown(boolean force) { + shutdownSnas(snas, force); + shutdownSnas(expansionSnas, force); + snas = null; + expansionSnas = null; + } + + private void shutdownSnas(StorageNodeAgent[] snAgents, boolean force) { + if (snAgents != null) { + for (int i = 0; i < snAgents.length; i++) { + if (snAgents[i] != null) { + snAgents[i].shutdown(true, force); + snAgents[i] = null; + } + } + } + } + + public void shutdownSNA(int snaIdx, boolean force) { + snas[snaIdx].shutdown(true, force); + } + + public PortFinder[] getPortFinders() { + return portFinders; + } + + public int getRegistryPort() { + return getRegistryPort(csIndex); + } + + /** + * @param index identifies an SN + */ + public int getRegistryPort(int index) { + return portFinders[index].getRegistryPort(); + } + + public PortFinder getPortFinder(StorageNodeId snId) { + return portFinderMap.get(snId); + } + + public StorageNodeAgent getStorageNodeAgent(int index) { + return snas[index]; + } + + public StorageNodeId[] getStorageNodeIds() { + StorageNodeId[] snids = new StorageNodeId[snas.length]; + for (int i = 0; i < snas.length; i++) { + snids[i] = snas[i].getStorageNodeId(); + } + return snids; + } + + /* + * The following methods are used to query the topology of the store for + * testing reasons. + */ + + /** + * Return true if this SN hosts an admin. + */ + public boolean hasAdmin(StorageNodeId snId) { + return snsWithAdmins.contains(snId); + } + + /** + * Returns the admin ID for specified SN, or null if the specified SN does + * not have an admin. + */ + public AdminId getAdminId(int index) { + if (!snsWithAdmins.contains(snas[index].getStorageNodeId())) { + return null; + } + int adminId = 0; + for (int i = 0; i <= index; i++) { + final StorageNodeId snId = snas[i].getStorageNodeId(); + if (snsWithAdmins.contains(snId)) { + adminId++; + } + } + return new AdminId(adminId); + } + + /** + * Returns the storage Id of the SN that hosts the admin of adminId, null + * if no match. + */ + public StorageNodeId getStorageNodeId(AdminId adminId) { + int count = 0; + for (StorageNodeAgent sna : snas) { + StorageNodeId snId = sna.getStorageNodeId(); + if (!snsWithAdmins.contains(snId)) { + continue; + } + count ++; + if ((new AdminId(count)).equals(adminId)) { + return snId; + } + } + return null; + } + + /** + * Support for security: grant roles for the given user. + */ + public void grantRoles(String user, String... roles) + throws Exception { + + final Set roleSet = new HashSet(); + Collections.addAll(roleSet, roles); + final int planId = + cs.createGrantPlan("Grant roles", user, roleSet); + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + cs.assertSuccess(planId); + } + + /** + * Support for security: remove the specified roles from the given user. + */ + public void revokeRoles(String user, String... roles) + throws Exception { + + final Set roleSet = new HashSet(); + Collections.addAll(roleSet, roles); + final int planId = + cs.createRevokePlan("Revoke roles", user, roleSet); + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + cs.assertSuccess(planId); + } + + /* + * Return the bootstrap config file for the number SN + */ + public File getBootstrapConfigFile(int snNum) { + return new File(rootDir + File.separator + "config" + snNum + ".xml"); + } + + public void shutdownStore(boolean force) { + + verbose("Shutting down store: " + storeName + + " in rootDir: " + rootDir); + for (int i = 0; i < numStorageNodes; i++) { + File configFile = + new File(rootDir + File.separator + "config" + i + ".xml"); + verbose("Shutting down storage node " + i + " config file " + + configFile); + if (!configFile.exists()) { + System.err.println("Cannot find configuration file: " + + configFile); + return; + } + try { + BootstrapParams bp = ConfigUtils.getBootstrapParams(configFile); + String name = + RegistryUtils.bindingName + (bp.getStoreName(), + new StorageNodeId(bp.getStorageNodeId()).getFullName(), + RegistryUtils.InterfaceType.MAIN); + verbose("Attempting to contact Storage Node : " + + bp.getHostname() + ":" + bp.getRegistryPort()); + StorageNodeAgentAPI snai = + RegistryUtils.getStorageNodeAgent + (bp.getHostname(), bp.getRegistryPort(), name, + snas[i].getLoginManager(), logger); + System.err.println("Shutting down SNA " + i + + ", config file: " + configFile); + snai.shutdown(true, force); + } catch (Exception e) { + System.err.println("Exception in shutdown: " + e); + } + } + } + + /** + * Return the list of RNs that are hosted on this SN. + */ + public List getRNs(StorageNodeId snId) { + return snToRNs.get(snId); + } + + /** + * Return the number of RNs that are hosted on this SN. + */ + public int numRNs(StorageNodeId snId) { + return snToRNs.get(snId).size(); + } + + public StorageNodeAgent getStorageNodeAgent(StorageNodeId snId) { + return snaMap.get(snId); + } + + /** + * Returns the parameter map that should be used to set store policy or + * service parameters given the desired test-specific policy map passed as + * an argument. This method merges in any additional default parameters. + * Returns null if no parameters are needed. If map is null, then no + * test-specific parameters will be used, but the return value may still + * return a map that represents default parameters. Does not modify the + * supplied map. + */ + public static ParameterMap mergeParameterMapDefaults(ParameterMap map) { + ParameterMap mergedMap = null; + + /* Set JE_MISC defaults */ + if ((JE_DEFAULT_PROPERTIES != null) && + !JE_DEFAULT_PROPERTIES.equals("")) { + if (map == null) { + if (mergedMap == null) { + mergedMap = new ParameterMap(); + } + mergedMap.setParameter(ParameterState.JE_MISC, + JE_DEFAULT_PROPERTIES); + } else { + if (mergedMap == null) { + mergedMap = map.copy(); + } + String jeMiscDefault = JE_DEFAULT_PROPERTIES; + if (!jeMiscDefault.endsWith(";")) { + jeMiscDefault += ";"; + } + final String jeMisc = + map.getOrDefault(ParameterState.JE_MISC).asString(); + mergedMap.setParameter(ParameterState.JE_MISC, + mergeJeParams(jeMiscDefault, jeMisc)); + } + } + + return (mergedMap != null) ? mergedMap : map; + } + + /** + * Applies the consumer to each JE parameter key and value specified in the + * string. + */ + public static void forEachJeParams(String params, + BiConsumer func) { + for (String item : params.split(";")) { + item = item.trim(); + if ("".equals(item)) { + continue; + } + final String[] split = item.split("[ =]", 2); + func.accept(split[0], split[1]); + } + } + + /** Returns a String representing JE parameters in a canonical form. */ + public static String canonicalJeParams(String params) { + final StringBuilder sb = new StringBuilder(); + try (final Formatter fmt = new Formatter(sb)) { + forEachJeParams(params, (k, v) -> fmt.format("%s %s;", k, v)); + return sb.toString(); + } + } + + /** + * Merges two sets of JE parameters, represented as string values, and + * returns the result with any duplicate settings removed. + */ + public static String mergeJeParams(String initial, String update) { + final StringBuilder sb = new StringBuilder(); + try (final Formatter fmt = new Formatter(sb)) { + final SortedMap initialMap = new TreeMap<>(); + forEachJeParams(initial, initialMap::put); + final SortedMap updateMap = new TreeMap<>(); + forEachJeParams(update, updateMap::put); + initialMap.forEach((k, v) -> { + if (!updateMap.containsKey(k)) { + fmt.format("%s %s;", k, v); + } + }); + updateMap.forEach((k, v) -> fmt.format("%s %s;", k, v)); + return sb.toString(); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/CreateStoreUtils.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/CreateStoreUtils.java new file mode 100644 index 00000000..74b0218d --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/CreateStoreUtils.java @@ -0,0 +1,332 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.util; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.rmi.NoSuchObjectException; +import java.rmi.NotBoundException; +import java.rmi.RemoteException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.logging.Logger; + +import oracle.kv.impl.admin.CommandServiceAPI; +import oracle.kv.impl.admin.param.BootstrapParams; +import oracle.kv.impl.param.Parameter; +import oracle.kv.impl.rep.admin.RepNodeAdminAPI; +import oracle.kv.impl.sna.StorageNodeAgent; +import oracle.kv.impl.sna.StorageNodeAgentAPI; +import oracle.kv.impl.sna.StorageNodeAgentImpl; +import oracle.kv.impl.topo.RepNodeId; +import oracle.kv.impl.topo.StorageNodeId; +import oracle.kv.impl.util.CommandParser; +import oracle.kv.impl.util.ConfigUtils; +import oracle.kv.impl.util.ConfigurableService.ServiceStatus; +import oracle.kv.impl.util.FileNames; +import oracle.kv.impl.util.registry.RegistryUtils; +import oracle.kv.impl.util.server.LoggerUtils; + +/** + * Utility methods for testing related to Storage Nodes + */ +public class CreateStoreUtils { + + private static final Logger logger = + LoggerUtils.getLogger(CreateStoreUtils.class, "test"); + + public static StorageNodeAgent + createUnregisteredSNA(String rootDir, + PortFinder portFinder, + int capacity, + String configFileName, + boolean useThreads, + boolean createAdmin, + int memoryMB, + Set extraParams) + throws Exception { + + final String SEC_POLICY_STRING = + "grant {\n permission java.security.AllPermission;\n};"; + + /* generate bootstrap dir */ + File configFile = new File(rootDir + File.separator + configFileName); + + BootstrapParams bp = + new BootstrapParams(rootDir, + portFinder.getHostname(), + portFinder.getHostname(), + portFinder.getHaRange(), + null, /* servicePortRange */ + null, /* storeName */ + portFinder.getRegistryPort(), + -1, + capacity, + null, /* storageType */ + null, // sec dir + true, // runBootAdmin + null); + if (memoryMB != 0) { + bp.setMemoryMB(memoryMB*capacity); + } + ConfigUtils.createBootstrapConfig(bp, configFile.toString()); + + generateSecurityPolicyFile(rootDir); + + StorageNodeAgent sna = + startSNA(rootDir, configFileName, useThreads, createAdmin); + return sna; + } + + /** + * Start an instance of SNA assuming the bootstrap directory and file have + * been created, and specifying whether to disable services. + */ + public static StorageNodeAgent startSNA(String bootstrapDir, + String bootstrapFile, + boolean useThreads, + boolean createAdmin) + throws Exception { + + final List snaArgs = new ArrayList(); + snaArgs.add(CommandParser.ROOT_FLAG); + snaArgs.add(bootstrapDir); + snaArgs.add(StorageNodeAgent.CONFIG_FLAG); + snaArgs.add(bootstrapFile); + if (useThreads) { + snaArgs.add(StorageNodeAgent.THREADS_FLAG); + } + + StorageNodeAgentImpl sna = new StorageNodeAgentImpl(createAdmin); + sna.parseArgs(snaArgs.toArray(new String[snaArgs.size()])); + /* + * In testing environments we sometimes run into this exception + * java.rmi.NoSuchObjectException: no such object in table. + * cf. [#22835] + * It appears that retrying several times works around the bug. + */ + int nretries = 0; + boolean started = false; + while (!started) { + try { + sna.start(); + started = true; + } catch (IOException e) { + if (nretries++ > 100 || + !(e instanceof NoSuchObjectException || + e.getCause() instanceof NoSuchObjectException)) { + + throw e; + } + } + } + + return sna.getStorageNodeAgent(); + } + + + public static void deleteDirs(File f) { + if (f.isDirectory()) { + for (File c : f.listFiles()) { + deleteDirs(c); + } + } + f.delete(); + } + + public static void cleanStoreDir(String testDir, String kvstorename) + throws Exception { + + File storeDir = new File(testDir + File.separator + kvstorename); + deleteDirs(storeDir); + } + + /** + * Get and wait for a RepNodeAdmin handle to reach one of the states in + * the ServiceStatus array parameter. + */ + @SuppressWarnings("null") + public static RepNodeAdminAPI + waitForRepNodeAdmin(String storeName, + String hostName, + int port, + RepNodeId rnid, + StorageNodeId snid, + long timeoutSec, + ServiceStatus targetStatus, + Logger logger) + throws RemoteException, NotBoundException { + + RemoteException remoteException = null; + NotBoundException notBoundException = null; + RepNodeAdminAPI rnai = null; + ServiceStatus status = null; + + long limitMs = System.currentTimeMillis() + 1000 * timeoutSec; + + while (System.currentTimeMillis() <= limitMs) { + + /** + * The stub may be stale, get it again on exception. + */ + if (remoteException != null || notBoundException != null) { + rnai = null; + } + try { + if (rnai == null) { + rnai = RegistryUtils.getRepNodeAdmin( + storeName, hostName, port, rnid, null, logger); + } + status = rnai.ping().getServiceStatus(); + if (status == targetStatus) { + return rnai; + } + remoteException = null; + notBoundException = null; + } catch (RemoteException e) { + remoteException = e; + } catch (NotBoundException e) { + notBoundException = e; + } + + /* + * Check now for any process startup problems before + * sleeping. + */ + if (snid != null) { + RegistryUtils.checkForStartupProblem(storeName, + hostName, + port, + rnid, + snid, + null, + logger); + } + + try { + Thread.sleep(1000); + } catch (InterruptedException ignored) { + throw new IllegalStateException("unexpected interrupt"); + } + } + + if (status != null) { + throw new IllegalStateException + ("RN current status: " + status + " target status: " + + targetStatus); + } + if (remoteException != null) { + throw remoteException; + } + throw notBoundException; + } + /** + * Get and wait for a CommandService handle to reach the requested status. + * Treat UNREACHABLE as "any" and return once the handle is acquired. + */ + @SuppressWarnings("null") + public static CommandServiceAPI waitForAdmin(String hostname, + int registryPort, + long timeoutSec, + Logger logger) + throws RemoteException, NotBoundException { + + ServiceStatus targetStatus = ServiceStatus.RUNNING; + + RemoteException remoteException = null; + NotBoundException notBoundException = null; + CommandServiceAPI admin = null; + ServiceStatus status = null; + + long limitMs = System.currentTimeMillis() + 1000 * timeoutSec; + + while (System.currentTimeMillis() <= limitMs) { + + /** + * The stub may be stale, get it again on exception. + */ + if (notBoundException != null || remoteException != null) { + admin = null; + } + try { + if (admin == null) { + admin = RegistryUtils.getAdmin(hostname, registryPort, + null, logger); + } + + status = admin.ping() ; + + /** + * Treat UNREACHABLE as "any". + */ + if (targetStatus == ServiceStatus.UNREACHABLE) { + return admin; + } + if (status == targetStatus) { + return admin; + } + remoteException = null; + notBoundException = null; + } catch (RemoteException e) { + remoteException = e; + } catch (NotBoundException e) { + notBoundException = e; + } + + try { + Thread.sleep(1000); + } catch (InterruptedException ignored) { + throw new IllegalStateException("unexpected interrupt"); + } + } + + if (status != null) { + throw new IllegalStateException("Admin status: " + status + + " Target status: " + targetStatus); + } + if (remoteException != null) { + throw remoteException; + } + throw notBoundException; + } + + protected static void delay(int seconds) + throws Exception { + Thread.sleep(seconds*1000); + } + + + private static void generateSecurityPolicyFile(String rootDir) { + final String SEC_POLICY_STRING = + "grant {\n permission java.security.AllPermission;\n};"; + + File dest = new File + (rootDir + File.separator + FileNames.JAVA_SECURITY_POLICY_FILE); + if (!dest.exists()) { + FileOutputStream output = null; + try { + + dest.createNewFile(); + output = new FileOutputStream(dest); + output.write(SEC_POLICY_STRING.getBytes()); + } catch (FileNotFoundException fnf) { + } catch (IOException ie) { + } finally { + if (output != null) { + try { + output.close(); + } catch (IOException ignored) { + } + } + } + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/ElasticityTestSetup.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/ElasticityTestSetup.java new file mode 100644 index 00000000..64b6ef68 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/ElasticityTestSetup.java @@ -0,0 +1,323 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.util; + +import java.io.File; +import java.util.ArrayList; +import java.util.Scanner; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import oracle.kv.impl.admin.CommandServiceAPI; +import oracle.kv.impl.admin.param.BootstrapParams; +import oracle.kv.impl.sna.StorageNodeAgent; +import oracle.kv.impl.sna.StorageNodeAgentAPI; +import oracle.kv.impl.topo.DatacenterId; +import oracle.kv.impl.topo.StorageNodeId; +import oracle.kv.impl.util.ConfigUtils; +import oracle.kv.impl.util.FileUtils; +import oracle.kv.impl.util.registry.RegistryUtils; +import oracle.kv.impl.test.TestStatus; +import oracle.nosql.proxy.ProxyTestBase; +import oracle.nosql.proxy.util.CreateStore; +import oracle.nosql.proxy.util.CreateStoreUtils; +import oracle.nosql.proxy.util.PortFinder; + +/** + * This utility is for testing query with elasticity when using non-Java + * SDKs. Non-Java SDK tests can launch it as a separate process. It will + * deploy a store according to parameters on the command line and wait for + * commands on standard input. The commands currently supported are + * expand, contract and exit/quit. Expand and contract commands can + * take optional parameter indicating number of SNs to be added/removed. + * Note that currently doing contraction requires doing expansion first + * (done with the same number of SNs), so after the test the store ends + * up in its initial state. This is the same as done in ElasticityTest.java, + * but perhaps can be improved to do contraction independent of expansion. + * This utility also uses its stderr to write status messages for non-Java + * tests to read, mainly to know when expansion/contraction has finished. + * Stdout is used for logging/debugging messages. The non-Java driver test + * will look for status messages in stderr starting with certain prefix (see + * STATUS_PREFIX), other than that stderr can still be normally used. + * As with other tests, the test files will be stored in directory pointed to + * "testsandbox" system property. + * + * The store is started according to built-in test parameters: + * store - localhost:13250, name: kvstore + * proxy - localhost:8095, type: cloudsim + * + * Command line options: + * -subdir - subdirectory within the sandbox directory to store files for + * given run. Defaults to none (use the sandbox itself). + * -numsns - initial number of SNs in the store. Default is 3. + * -capacity - SN capacity. Extra SNs for expansion/contraction will use the + * same capacity. Default is 1. + * -repFactor - Replication factor. Default is 1. To test the driver side, + * number of replicas is not essential, but needed for testing proxy/kv side. + * -numPartitions - Number of partitions in the store, defaults to 10. + */ +public class ElasticityTestSetup extends ProxyTestBase { + + private static final int START_PORT = 5000; + private static final int HA_RANGE = 5; + private static final int DEFAULT_EXTRA_SNS = 3; + private static final String STATUS_PREFIX = + "Elasticity Test Store Status: "; + + private final Logger logger = Logger.getLogger(getClass().getName()); + private File rootDir; + private int numSNs = 3; + private int capacity = 1; + private int repFactor = 1; + private int numPartitions = 10; + private CreateStore createStore; + private ArrayList extraSNAs; + private int topoSeqNum; + + static { + verbose = Boolean.getBoolean(VERBOSE_PROP); + TestStatus.setActive(true); + } + + private static void verbose(String msg, Object... args) { + ProxyTestBase.verbose(String.format(msg, args)); + } + + private void startStore() throws Exception { + if (rootDir.exists()) { + clearDirectory(rootDir); + } + + int port = getKVPort(); + + createStore = new CreateStore(rootDir.getAbsolutePath(), + getStoreName(), port, numSNs, repFactor, numPartitions, capacity, + 512, false, null); + + rootDir.mkdirs(); + createStore.start(); + + String endpoint = String.format("%s:%s", getHostName(), port); + verbose("Started kvstore on %s", endpoint); + + proxy = startProxy(); + verbose("Started proxy on %s", getProxyEndpoint()); + System.err.println(STATUS_PREFIX + "started"); + } + + private void shutdownStore() throws Exception { + verbose("Shutting down..."); + if (proxy != null) { + proxy.shutdown(3, TimeUnit.SECONDS); + proxy = null; + verbose("Shutdown proxy."); + } + + if (createStore != null) { + createStore.shutdown(); + createStore = null; + verbose("Shutdown kvstore."); + } + } + + private void runPlan(CommandServiceAPI cs, int planId) throws Exception { + cs.approvePlan(planId); + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + cs.assertSuccess(planId); + } + + private void expandStore(int addSNCnt) throws Exception { + final String hostName = getHostName(); + final CommandServiceAPI cs = createStore.getAdmin(); + + verbose("Starting expansion..."); + + final int portsPerFinder = 20; + if (extraSNAs == null) { + extraSNAs = new ArrayList(); + } + final String poolName = CreateStore.STORAGE_NODE_POOL_NAME; + + for (int i = 0; i < addSNCnt; ++i) { + int snIdx = numSNs + extraSNAs.size(); + PortFinder pf = new PortFinder( + START_PORT + snIdx * portsPerFinder, HA_RANGE, + getHostName()); + int port = pf.getRegistryPort(); + + StorageNodeAgent sna = CreateStoreUtils.createUnregisteredSNA( + rootDir.getAbsolutePath(), + pf, + capacity, + String.format("config%s.xml", snIdx), + false /* useThreads */, + false /* createAdmin */, + 512 /* mb */, + null /* extra params */); + + extraSNAs.add(sna); + + CreateStoreUtils.waitForAdmin(hostName, port, 20, logger); + + int planId = cs.createDeploySNPlan( + String.format("deploy sn%s", snIdx + 1), + new DatacenterId(1), + hostName, + port, + "comment"); + + runPlan(cs, planId); + + StorageNodeId snId = sna.getStorageNodeId(); + verbose("Deployed SN %d", snId.getStorageNodeId()); + + cs.addStorageNodeToPool(poolName, snId); + verbose("Added %s to %s", snId.toString(), poolName); + } + + String expandTopoName = String.format("expand-%d", topoSeqNum++); + cs.copyCurrentTopology(expandTopoName); + cs.redistributeTopology(expandTopoName, poolName); + verbose("Created expanded topology %s", expandTopoName); + + int planId = cs.createDeployTopologyPlan( + "deploy expansion", expandTopoName, null); + System.err.println(STATUS_PREFIX + "start expand"); + runPlan(cs, planId); + verbose("Deployed topology %s", expandTopoName); + + createStore.setExpansionSnas( + extraSNAs.toArray(new StorageNodeAgent[0])); + verbose("Expansion done."); + System.err.println(STATUS_PREFIX + "expanded"); + } + + private void contractStore(int delSNCnt) throws Exception { + int extraSNCnt = extraSNAs != null ? extraSNAs.size() : 0; + if (delSNCnt <= 0 || delSNCnt > extraSNCnt) { + throw new IllegalArgumentException(String.format( + "Cannot contract by %d SNs, extra SN cnt: %d", + delSNCnt, extraSNCnt)); + } + + verbose("Starting contraction..."); + + final CommandServiceAPI cs = createStore.getAdmin(); + final String poolName = CreateStore.STORAGE_NODE_POOL_NAME; + + for (int i = 0; i < delSNCnt; ++i) { + StorageNodeId snId = new StorageNodeId( + numSNs + extraSNCnt - delSNCnt + i + 1); + cs.removeStorageNodeFromPool(poolName, snId); + verbose("Removed %s from %s", snId.toString(), + poolName); + } + + String contractTopoName = String.format("contract-%d", topoSeqNum++); + cs.copyCurrentTopology(contractTopoName); + cs.contractTopology(contractTopoName, poolName); + verbose("Created contracted topology %s", contractTopoName); + + int planId = cs.createDeployTopologyPlan( + "deploy contraction", contractTopoName, null); + System.err.println(STATUS_PREFIX + "start contract"); + runPlan(cs, planId); + verbose("Deployed topology %s", contractTopoName); + + for (int i = 0; i < delSNCnt; ++i) { + int snIdx = numSNs + extraSNCnt - delSNCnt + i; + StorageNodeId snId = new StorageNodeId(snIdx + 1); + planId = cs.createRemoveSNPlan( + String.format("remove sn%s", snId.getStorageNodeId()), snId); + runPlan(cs, planId); + verbose("Removed %s", snId.toString()); + + StorageNodeAgent sna = extraSNAs.get(snIdx - numSNs); + verbose("Shutting down %s", sna.getStorageNodeId().toString()); + sna.shutdown(true, true, "contration"); + verbose("Shutdown %s", snId.toString()); + + new File(rootDir, String.format("config%s.xml", snIdx)).delete(); + FileUtils.deleteDirectory(new File(rootDir, getStoreName())); + verbose("Removed data for %s", snId.toString()); + } + + extraSNAs.subList(extraSNCnt - delSNCnt, extraSNCnt).clear(); + createStore.setExpansionSnas( + extraSNAs.toArray(new StorageNodeAgent[0])); + verbose("Contraction done."); + System.err.println(STATUS_PREFIX + "contracted"); + } + + public ElasticityTestSetup(String[] args) { + String subDir = null; + + for(int i = 0; i < args.length; i++) { + final String arg = args[i]; + if (arg.equalsIgnoreCase("-subdir")) { + subDir = args[++i]; + } else if (arg.equalsIgnoreCase("-numsns")) { + numSNs = Integer.parseInt(args[++i]); + } else if (arg.equalsIgnoreCase("-capacity")) { + capacity = Integer.parseInt(args[++i]); + } else if(arg.equalsIgnoreCase("-repFactor")) { + repFactor = Integer.parseInt(args[++i]); + } else if (arg.equalsIgnoreCase("-numPartitions")) { + numPartitions = Integer.parseInt(args[++i]); + } + } + + rootDir = subDir != null ? + new File(getTestDir(), subDir) : new File(getTestDir()); + } + + public void run() throws Exception { + startStore(); + Scanner scanner = null; + try { + scanner = new Scanner(System.in); + while(scanner.hasNextLine()) { + String line = scanner.nextLine(); + String[] words = line.split("\\s+"); + if (words.length == 0) { + continue; + } + boolean done = false; + switch(words[0].toLowerCase()) { + case "expand": + expandStore(words.length > 1 ? + Integer.parseInt(words[1]) : DEFAULT_EXTRA_SNS); + break; + case "contract": + contractStore(words.length > 1 ? + Integer.parseInt(words[1]) : DEFAULT_EXTRA_SNS); + break; + case "exit": case "quit": + done = true; + break; + } + if (done) { + break; + } + } + } finally { + if (scanner != null) { + scanner.close(); + } + } + + shutdownStore(); + } + + public static void main(String[] args) throws Exception { + ElasticityTestSetup setup = new ElasticityTestSetup(args); + setup.run(); + } + +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/FreePortLocator.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/FreePortLocator.java new file mode 100644 index 00000000..806c2d67 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/FreePortLocator.java @@ -0,0 +1,178 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.util; + +import java.io.IOException; +import java.net.DatagramSocket; +import java.net.InetSocketAddress; +import java.net.ServerSocket; + +/** + * An iterator to iterate over the free ports on an interface. + */ +public class FreePortLocator { + + /** + * Whether to print debugging messages -- use this to find tests that are + * not closing ports. + */ + private static final boolean debug = + Boolean.getBoolean("test.debugFreePortLocator"); + + private final String hostname; + private final int portStart; + private final int portEnd; + + private int currPort; + + /** + * Constructor identifying the interface and the port range within which + * to look for free ports. The port range specified by the arguments + * must be < 32768, that is, it should be outside the dynamic port range + * that is typically configured on most machines. + */ + public FreePortLocator(String hostname, int portStart, int portEnd) { + super(); + assert portStart < portEnd; + + if ((portStart > 0x7fff) || (portEnd > 0x7fff)) { + throw new IllegalArgumentException + ("Invalid port range:" + portStart + " - " + portEnd + ". " + + "The port range must not extend past:" + 0x7fff + + " since the allocated ports could then overlap with " + + "dynamically assigned ports used by other services. "); + } + + this.hostname = hostname; + this.portStart = portStart; + this.portEnd = portEnd; + currPort = portStart; + } + + public int getPortStart() { + return portStart; + } + + public int getPortEnd() { + return portEnd; + } + + /** + * Returns the next free port. Note that it's possible that on a busy + * machine another process may grab the "free" port before it's actually + * used. + * + * There is somewhat AIsh aspect to the code below. In general it tries to + * be very conservative, using different techniques so that it works + * reasonably well on Linux, Mac OS and Windows. + * + * Note: The use of setReuseAddress after a bind operation may look + * dubious, since it runs counter to the API doc, but it helps based on + * actual tests. It's also the idiom used by Apache Camel to find a + * free port. It, at least, can't hurt. + */ + public int next() { + while (++currPort < portEnd) { + + /* Try without a hostname */ + ServerSocket ss = null; + DatagramSocket ds = null; + try { + ss = new ServerSocket(currPort); + ss.setReuseAddress(true); + ds = new DatagramSocket(currPort); + ds.setReuseAddress(true); + } catch (IOException e) { + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + + " busy - server, datagram: " + e); + e.printStackTrace(); + } + continue; + } finally { + if (ds != null) { + ds.close(); + } + + if (ss != null) { + try { + ss.close(); + } catch (IOException e) { + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + + " busy - server close: " + e); + e.printStackTrace(); + } + continue; + } + } + } + + ss = null; + ds = null; + + /* try with a hostname */ + final InetSocketAddress sa = + new InetSocketAddress(hostname, currPort); + try { + ss = new ServerSocket(); + ss.setReuseAddress(true); + ss.bind(sa); + + ds = new DatagramSocket(sa); + ds.setReuseAddress(true); + } catch (IOException e) { + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + + " busy - server, datagram hostname: " + e); + e.printStackTrace(); + } + continue; + } finally { + if (ds != null) { + ds.close(); + } + + if (ss != null) { + try { + ss.close(); + } catch (IOException e) { + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + + " busy - server hostname close: " + e); + e.printStackTrace(); + } + continue; + } + } + } + + /* Survived port test gauntlet, return it. */ + if (debug) { + System.err.println( + "FreePortLocator: " + currPort + " free"); + } + return currPort; + } + + throw new IllegalStateException + ("No more ports available in the range: " + + portStart + " - " + portEnd); + } + + /** + * Skip a number of ports. + */ + public void skip(int num) { + currPort += num; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/KVLiteBase.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/KVLiteBase.java new file mode 100644 index 00000000..fd53dbb2 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/KVLiteBase.java @@ -0,0 +1,125 @@ +package oracle.nosql.proxy.util; + +import oracle.kv.impl.param.ParameterMap; +import oracle.kv.impl.param.ParameterState; +import oracle.kv.util.kvlite.KVLite; + +/** + * A base class for tests that start and stop KVLite instances. + */ +public abstract class KVLiteBase extends TestBase { + static protected int multishardShards = 3; + static protected int multishardPartitions = 10 * getNumPartitions(); + + public static KVLite startKVLite(String hostName, + String storeName, + boolean useThreads, + boolean verbose, + boolean multishard, + int memoryMB, + boolean isSecure) { + return startKVLite(hostName, + storeName, + useThreads, + verbose, + multishard, + memoryMB, + isSecure, + getKVPort(), // default port + getPortRange(), // default port range + getTestDir()); // root + } + + /* + * Allow kv port, port range, and root to be specified in order to + * allow multiple kvlite instances in the same test + */ + public static KVLite startKVLite(String hostName, + String storeName, + boolean useThreads, + boolean verbose, + boolean multishard, + int memoryMB, + boolean isSecure, + int port, + String rangestr, + String rootDir) { + if (storeName == null) { + storeName = getStoreName(); + } + int capacity = 1; + int numStorageNodes = 1; + int repfactor = 1; + int partitions = getNumPartitions(); + String portstr = Integer.toString(port); + if (multishard) { + if (memoryMB == 0 || memoryMB < 768) { + memoryMB = 768; /* need extra space for multi-shard */ + } + capacity = multishardShards; + numStorageNodes = multishardShards; + repfactor = 3; + partitions = multishardPartitions; + portstr = Integer.toString(port) + KVLite.DEFAULT_SPLIT_STR + + Integer.toString(port + 30) + KVLite.DEFAULT_SPLIT_STR + + Integer.toString(port + 60); + rangestr = getPortRange() + KVLite.DEFAULT_SPLIT_STR + + Integer.toString(port + 35) + "," + + Integer.toString(port + 40) + KVLite.DEFAULT_SPLIT_STR + + Integer.toString(port + 65) + "," + + Integer.toString(port + 70); + } + + + KVLite kvlite = new KVLite(rootDir, + storeName, + portstr, + true, /* run bootadmin */ + hostName, + rangestr, + null, /* service port range */ + partitions, + null, /* mount point */ + useThreads, + isSecure, + null, /* no backup to restore */ + -1, + numStorageNodes, + repfactor, + capacity); + kvlite.setVerbose(verbose); + kvlite.setTableOnly(true); + + if (memoryMB == 0 || memoryMB < 256) { + /* use 256 if not multi-shard and not explicitly set by caller */ + memoryMB = 256; + } + kvlite.setMemoryMB(memoryMB); + + ParameterMap policies = new ParameterMap(); + policies.setParameter(ParameterState.COMMON_HIDE_USERDATA, "false"); + kvlite.setPolicyMap(policies); + + try { + kvlite.start(true); + } catch (Throwable t) { + /* + * Display any setup problems, which may happen in particular + * when debugging standalone, outside a nightly build run. + */ + System.err.println("problems starting up KvLite"); + t.printStackTrace(); + throw t; + } + + return kvlite; + } + + public static KVLite startKVLite(String hostName, + String storeName, + boolean useThreads, + boolean verbose) { + return startKVLite(hostName, storeName, useThreads, + verbose, false, 0, false); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/PortFinder.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/PortFinder.java new file mode 100644 index 00000000..2c081051 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/PortFinder.java @@ -0,0 +1,114 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.proxy.util; + +/** + * A utility to find available ports for the RMI registry, Admin HTTP port, + * and a range of ports for use by RepNodes. + */ +public class PortFinder { + + private int registryPort; + private String hostname; + private String haRange; + private int haFirstPort; + private int haNextPort; + private final int haRangeSize; + private final FreePortLocator locator; + private int mgmtTrapPort; + private int mgmtPollPort; + private static final int RANGE = 100; + + public PortFinder(int startingPort, int haRangeSize) { + this(startingPort, haRangeSize, "localhost"); + } + + public PortFinder(int startingPort, int haRangeSize, String hostname) { + this.haRangeSize = haRangeSize; + haFirstPort = 0; + registryPort = 0; + haRange = null; + this.hostname = hostname; + + locator = + new FreePortLocator(hostname, startingPort, startingPort + RANGE); + findPorts(); + } + + public int getRegistryPort() { + return registryPort; + } + + public String getHostname() { + return hostname; + } + + public String getHaRange() { + return haRange; + } + + public int getHaRangeSize() { + return haRangeSize; + } + + public int getHaFirstPort() { + return haFirstPort; + } + + public int getMgmtPollPort() { + return mgmtPollPort; + } + + public int getMgmtTrapPort() { + return mgmtTrapPort; + } + + /** + * Supply a sequence of port numbers, starting with HaFirstPort. + */ + public int getHaNextPort() { + int nextPort; + nextPort = haNextPort++; + assert nextPort < haFirstPort + haRangeSize; + return nextPort; + } + + private void findPorts() { + /* + * When adding new ports to this class, allocate them at the end of + * this method, so that the sequence is preserved. This matters for + * cross-release testing. + */ + + registryPort = locator.next(); + + /** + * Need haRangeSize contiguous available ports... + */ + int startRange = locator.next(); + int previous = startRange; + int num = 0; + while (num <= haRangeSize) { + int current = locator.next(); + if (current != previous + 1) { + /* start over with current */ + num = 0; + startRange = previous = current; + } else { + previous = current; + ++num; + } + } + haRange = (startRange + "," + (startRange + haRangeSize - 1)); + haFirstPort = startRange; + haNextPort = haFirstPort; + + mgmtPollPort = locator.next(); + mgmtTrapPort = locator.next(); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/TestBase.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/TestBase.java new file mode 100644 index 00000000..baf940dd --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/util/TestBase.java @@ -0,0 +1,377 @@ +package oracle.nosql.proxy.util; + +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.channels.FileChannel; + +import oracle.nosql.util.HttpRequest; +import oracle.nosql.util.HttpResponse; +import oracle.nosql.common.json.JsonUtils; +import oracle.nosql.util.tmi.TenantLimits; + +import org.junit.Rule; +import org.junit.rules.TestName; +import org.junit.rules.TestRule; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; + +/** + * The base class for all spartakv unit tests. + */ +public abstract class TestBase { + + /** + * root directory of the store + */ + private final static String ROOT_DIR = "kvroot"; + + /** + * store name + */ + private final static String STORE_NAME = "kvstore"; + + /** + * host name + */ + private final static String HOSTNAME = "localhost"; + + /** + * port number of the store + */ + private final static int KV_PORT = 13250; + + /** + * number of the partitions + */ + private final static int NUM_PARTITIONS = 10; + + /** + * HA port range + */ + private final static String HA_PORT_RANGE = "13255,13260"; + + /** + * port range + */ + private final static String PORT_RANGE = "13255,13260"; + + /** + * system property pointing to the test directory + */ + private final static String TEST_DIR_PROP = "testsandbox"; + + /** + * system property pointing to the directory stores logs of failure tests + */ + private final static String FAILURE_DIR = "failurecopydir"; + + /** + * default directory stores the logs of failure tests + */ + private final static String DEFAULT_FAIL_DIR = "build/failures"; + + private final static boolean TEST_TRACE = Boolean.getBoolean("test.trace"); + + protected static String scHost; + protected static Integer scPort; + protected static String scUrlBase; + protected static String scTierBase; + protected static String scDSConfigBase; + static { + doStaticSetup(); + } + + /** + * Set up URLs for talking to the SC to create and delete test tiers + * and tenants. This allows tests to modify default TenantLimits. + */ + protected static void doStaticSetup() { + scHost = System.getProperty("sc.host"); + if (scHost != null) { + scPort = Integer.parseInt(System.getProperty("sc.port", "13600")); + scUrlBase = "http://" + scHost + ":" + scPort + "/V0/service/"; + scTierBase = scUrlBase + "tier/"; + scDSConfigBase = scUrlBase + "dsconfig/"; + } + } + + /** + * The rule we use to control every test case, this rule is primarily to + * copy the testing environment, files, sub directories to another place + * for future investigation, if any of test failed. + */ + @Rule + public TestRule watchman = new TestWatcher() { + + /* Copy Environments when the test failed. */ + @Override + protected void failed(Throwable t, Description desc) { + String dirName = makeFileName(desc); + try { + copyEnvironments(dirName); + } catch (Exception e) { + throw new RuntimeException( + "Can't copy env dir to " + " after failure", e); + } + } + + @Override + protected void succeeded(Description desc){ + } + + @Override + protected void starting(Description description) { + if (TEST_TRACE) { + System.out.println("Starting test: " + + description.getMethodName()); + } + } + }; + + /** Provides the name of the current test. */ + @Rule + public final TestName testName = new TestName(); + + /** + * Copy the testing directory to other place. + */ + protected static void copyEnvironments(String path) throws Exception{ + File failureDir = getFailureCopyDir(); + + if (failureDir == null || failureDir.list() == null) { + return; + } + + /* If the testsandbox is not set, do not copy. */ + if (System.getProperty(TEST_DIR_PROP) == null || + System.getProperty(TEST_DIR_PROP).length() == 0) { + return; + } + + copyDir(getTestDirFile(), new File(failureDir, path)); + } + + /** + * Allow to set up self defined directory store failure copy. + */ + private static File getFailureCopyDir() { + String dir = System.getProperty(FAILURE_DIR, DEFAULT_FAIL_DIR); + File file = new File(dir); + if (!file.isDirectory()) { + file.mkdir(); + } + + return file; + } + + /** + * get the testing directory + */ + private static File getTestDirFile() { + String dir = System.getProperty(TEST_DIR_PROP); + if (dir == null || dir.length() == 0) { + throw new IllegalArgumentException + ("System property must be set to test data directory: " + + TEST_DIR_PROP); + } + + return new File(dir); + } + + /** + * Copy everything in test destination directory to another place for + * future evaluation when test failed. + */ + private static void copyDir(File fromDir, File toDir) throws IOException { + + if (fromDir == null || toDir == null) { + throw new NullPointerException("File location error"); + } + + if (!fromDir.isDirectory()) { + throw new IllegalStateException(fromDir + " should be a directory"); + } + + if (!fromDir.exists()) { + throw new IllegalStateException(fromDir + " does not exist"); + } + + if (!toDir.exists() && !toDir.mkdirs()) { + throw new IllegalStateException("Unable to create copy dest dir:" + + toDir); + } + + File [] fileList = fromDir.listFiles(); + if (fileList != null && fileList.length != 0) { + for (File file : fileList) { + if (file.isDirectory()) { + copyDir(file, new File(toDir, file.getName())); + } else { + copyFile(file, new File(toDir, file.getName())); + } + } + } + } + + /** + * Copy a file + * @param sourceFile the file to copy from, which must exist + * @param destFile the file to copy to. The file is created if it does + * not yet exist. + */ + private static void copyFile(File sourceFile, File destFile) + throws IOException { + + if (!destFile.exists()) { + destFile.createNewFile(); + } + + try (final FileInputStream source = new FileInputStream(sourceFile); + final FileOutputStream dest = new FileOutputStream(destFile)) { + final FileChannel sourceChannel = source.getChannel(); + dest.getChannel().transferFrom(sourceChannel, 0, + sourceChannel.size()); + } + } + + /** + * Get failure copy directory name. + */ + private String makeFileName(Description desc) { + String name = desc.getClassName() + "-" + desc.getMethodName(); + return name; + } + + /** + * get the string of testing directory + */ + protected static String getTestDir() { + String dir = System.getProperty(TEST_DIR_PROP); + if (dir == null) { + fail("System property \"testsandbox\" must be set"); + } + return dir; + } + + /** + * remove all files and directories from the test instance directory. + */ + public static void cleanupTestDir() { + File testDir = new File(getTestDir()); + if (!testDir.exists()) { + return; + } + clearDirectory(testDir); + } + + /** + * clears out the contents of the directory, recursively + */ + public static void clearDirectory(File dir) { + for (File file : dir.listFiles()) { + if (file.isDirectory()) { + clearDirectory(file); + } + boolean deleteDone = file.delete(); + assert deleteDone: "Couldn't delete " + file; + } + } + + /** + * return the name of the root directory + */ + public static String getRootDir() { + return ROOT_DIR; + } + + /** + * return the store name + */ + public static String getStoreName() { + return STORE_NAME; + } + + /** + * return the host name + */ + public static String getHostName() { + return HOSTNAME; + } + + /** + * return the port number of the store + */ + public static int getKVPort() { + return KV_PORT; + } + + /** + * return the number of the partitions of the store + */ + public static int getNumPartitions() { + return NUM_PARTITIONS; + } + + /** + * return the HA port range of the store + */ + public static String getHAPortRange() { + return HA_PORT_RANGE; + } + + /** + * return the port range of the store + */ + public static String getPortRange() { + return PORT_RANGE; + } + + /** + * return the name of the system property that + * points to the testing directory + */ + public static String getTestDirProp() { + return TEST_DIR_PROP; + } + + /* + * Add a tier + */ + protected static void addTier(String tenantId, TenantLimits limits) { + if (scTierBase == null) { + return; + } + + final String tierUrl = scTierBase + tenantId; + + HttpRequest httpRequest = new HttpRequest().disableRetry(); + HttpResponse response = + httpRequest.doHttpPost(tierUrl, JsonUtils.print(limits)); + if (200 != response.getStatusCode()) { + fail("addTier failed: " + response); + } + } + + /* + * Delete a tier + */ + protected static void deleteTier(String tenantId) { + if (scTierBase == null) { + return; + } + + final String tierUrl = scTierBase + tenantId; + + HttpRequest httpRequest = new HttpRequest().disableRetry(); + HttpResponse response = httpRequest.doHttpDelete(tierUrl, null); + /* allow 404 -- not found -- in this path */ + if (response.getStatusCode() != 200 && + response.getStatusCode() != 404) { + fail("deleteTier failed: " + response); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/JsonLoaderCloud.java b/httpproxy/tests/src/main/java/oracle/nosql/query/JsonLoaderCloud.java new file mode 100644 index 00000000..63ab65a0 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/JsonLoaderCloud.java @@ -0,0 +1,206 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ +package oracle.nosql.query; + +import java.io.IOException; +import java.util.Map; + +import oracle.nosql.common.qtf.JsonLoader; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.ops.GetTableRequest; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.ops.TableResult; +import oracle.nosql.driver.values.FieldValue; +import oracle.nosql.driver.values.JsonNullValue; +import oracle.nosql.driver.values.JsonOptions; + +public class JsonLoaderCloud extends JsonLoader { + + private final NoSQLHandle handle; + + private PutRequest.Option option; + + private String dataFile; + + /** + * Load JSONs from a file to table(s), the target table(s) should be + * specified in the file with a line of "Table: " before records of + * the table. + * + * @param handle the NoSQLHandle interface. + * @param fileName the file that contains JSON records. + * @param options PutRequest.Option used for put operation. + * + * @return a map of table name and its corresponding record count loaded. + */ + public static Map loadJsonFromFile(final NoSQLHandle handle, + final String fileName, + final PutRequest.Option + options) + throws IllegalArgumentException, IOException { + + return loadJsonFromFile(handle, null, fileName, options); + } + + /** + * Load JSONs from a file to the specified table. + * + * A overloaded + * {@link #loadJsonFromFile(NoSQLHandle, String, PutRequest.Option)} + * method, the difference is that 2nd argument "table" is provided to + * specify the target table to load records to. + */ + public static Map loadJsonFromFile(final NoSQLHandle handle, + final TableResult table, + final String fileName, + final PutRequest.Option + options) + throws IllegalArgumentException, IOException { + + return new JsonLoaderCloud(handle).loadJsonToTables(table, fileName, + options, true); + } + + public JsonLoaderCloud(final NoSQLHandle handle) { + this.handle = handle; + } + + private void setWriteOptions(PutRequest.Option option) { + this.option = option; + } + /** + * Load JSON records from a file to tables. + * + * @param table the initial table to which JSON records are loaded. + * @param fileName the file contains JSON records. + * @param options PutRequest.Option used to put records. + * @param exitOnFailure the flag indicates if exits if a record is + * failed to put. + * + * @return A map of table name and count of records loaded. + */ + public Map loadJsonToTables(TableResult table, + String fileName, + PutRequest.Option options, + boolean exitOnFailure) + throws IllegalArgumentException, IOException { + + setWriteOptions(options); + dataFile = fileName; + + return loadRecordsFromFile(table, fileName, Type.JSON, + false, exitOnFailure); + } + + /** + * Load JSON records from a file to the specified table. + * + * @param table the target table to which JSON records are loaded, the + * records of other tables will be skipped. + * @param fileName the file contains JSON records. + * @param options the PutRequest.Optio used to put records. + * @param exitOnFailure the flag indicates if exits if a record is + * failed to put. + * + * @return The total number of records loaded to the target table. + */ + public long loadJsonToTable(TableResult table, + String fileName, + PutRequest.Option options, + boolean exitOnFailure) + throws IllegalArgumentException, IOException { + + setWriteOptions(options); + Map results = loadRecordsFromFile(table, + fileName, + Type.JSON, + true, + exitOnFailure); + if (results.isEmpty()) { + return 0; + } + final String tableName = table.getTableName(); + assert(results.containsKey(tableName)); + return results.get(tableName); + } + + @Override + public void tallyCount(final Map result, + final Object table, + final long count) { + if (table == null || count == 0) { + return; + } + final String name = ((TableResult)table).getTableName(); + if (result.containsKey(name)) { + long total = result.get(name) + count; + result.put(name, total); + } else { + result.put(name, count); + } + } + + @Override + public Object getTargetTable(String name) { + try { + GetTableRequest tableReq = + new GetTableRequest().setTableName(name); + TableResult tableRes = handle.getTable(tableReq); + return tableRes; + } catch(Exception e) { + return null; + } + } + + @Override + public boolean checkSkipTable(Object table, String name) { + boolean ret = false; + if (table != null) { + ret = !((TableResult)table).getTableName().equalsIgnoreCase(name); + } + return ret; + } + + @Override + public void checkValidFieldForCSV(Object tableObj) + throws IllegalArgumentException { + throw new IllegalArgumentException("spartakv tool does not support " + + "CSV type for now"); + } + + @Override + public boolean putRecord(Object target, String rowLine, Type type) + throws RuntimeException { + + JsonOptions options = + new JsonOptions().setAllowNonNumericNumbers(true); + + String tableName = ((TableResult)target).getTableName(); + PutRequest putReq = new PutRequest(). + setTableName(tableName). + setValueFromJson(rowLine, options); + + if (option != null) { + putReq = putReq.setOption(option); + } + + if (dataFile.contains("row_metadata")) { + FieldValue info = putReq.getValue().get("info"); + if (info == null) { + info = JsonNullValue.getInstance(); + } + String metadata = info.toJson(); + putReq.setRowMetadata(metadata); + } + + PutResult putRes = handle.put(putReq); + //On a successful operation the value returned by getVersion() + //is non-null. On failure that value is null. + return putRes.getVersion() == null ? false: true; + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup.java b/httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup.java new file mode 100644 index 00000000..fc667b44 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup.java @@ -0,0 +1,72 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import oracle.nosql.driver.http.NoSQLHandleImpl; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.values.MapValue; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; + +/** + * Setup implementation for cases that depend on //data1. + */ +public class PrimIndexSetup extends QTDefaultImpl { + + String tableStatement = + "CREATE TABLE Foo( \n" + + " id1 INTEGER, \n" + + " id2 DOUBLE, \n" + + " id3 ENUM(tok0, tok1, tok2), \n" + + " firstName STRING, \n" + + " lastName STRING, \n" + + " age INTEGER, \n" + + " id4 STRING, \n" + + "primary key (id1, id2, id3, id4))"; + + @Override + public void before() { + opts.verbose("Run Before: PrimIndexSetup"); + + List stmts = new ArrayList(); + stmts.add(tableStatement); + executeStatements(stmts); + + NoSQLHandleImpl handle = (NoSQLHandleImpl)QTest.getHandle(); + + for (int i = 1; i < 6; i++) { + + for (int j = 0; j < 3; ++j) { + MapValue mv = new MapValue().put("id1", i) + .put("id2", i * 10.0 + j) + .put("id3", "tok" + (i % 3)) + .put("id4", ("id4-" + i)) + .put("firstName", ("first" + i)) + .put("lastName", ("last" + i)) + .put("age", i+10); + PutRequest req = new PutRequest().setValue(mv) + .setTableName("Foo"); + PutResult res = handle.put(req); + Assert.assertNotNull(res); + } + } + } + + @Override + public void after() { + opts.verbose("Run After: PrimIndexSetup"); + + List stmts = new ArrayList(); + stmts.add("DROP TABLE Foo"); + executeStatements(stmts); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup2.java b/httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup2.java new file mode 100644 index 00000000..24b00790 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup2.java @@ -0,0 +1,86 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import oracle.nosql.driver.http.NoSQLHandleImpl; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.values.MapValue; + +import org.junit.Assert; + +/** + * Setup implementation for cases that depend on //data1. + */ +public class PrimIndexSetup2 extends QTDefaultImpl { + + String tableStatement = + "CREATE TABLE Foo( \n" + + " id1 INTEGER, \n" + + " id2 INTEGER, \n" + + " id3 INTEGER, \n" + + " firstName STRING, \n" + + " lastName STRING, \n" + + " age INTEGER, \n" + + " id4 STRING, \n" + + "primary key (shard(id1, id2), id3, id4))"; + + Random rand = new Random(1); + + int num1 = 20; + int num2 = 5; + int num3 = 3; + + public PrimIndexSetup2() { + } + + @Override + public void before() { + opts.verbose("Run Before: PrimIndexSetup2"); + + List stmts = new ArrayList(); + stmts.add(tableStatement); + executeStatements(stmts); + + NoSQLHandleImpl handle = (NoSQLHandleImpl)QTest.getHandle(); + + for (int i = 0; i < num1; i++) { + + for (int j = 0; j < num2; ++j) { + + for (int k = 0; k < num3; ++k) { + + MapValue mv = new MapValue().put("id1", rand.nextInt(20)) + .put("id2", rand.nextInt(5)) + .put("id3", rand.nextInt(5)) + .put("id4", ("id4-" + i)) + .put("firstName", ("first" + i)) + .put("lastName", ("last" + i)) + .put("age", i+10); + PutRequest req = new PutRequest().setValue(mv) + .setTableName("Foo"); + PutResult res = handle.put(req); + Assert.assertNotNull(res); + } + } + } + } + + @Override + public void after() { + opts.verbose("Run After: PrimIndexSetup2"); + + List stmts = new ArrayList(); + stmts.add("DROP TABLE Foo"); + executeStatements(stmts); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup3.java b/httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup3.java new file mode 100644 index 00000000..9d2bf3de --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/PrimIndexSetup3.java @@ -0,0 +1,33 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import java.util.ArrayList; +import java.util.List; +/** + * Setup implementation for cases that depend on //data1. + */ +public class PrimIndexSetup3 extends PrimIndexSetup2 { + + public PrimIndexSetup3() { + num2 = 15; + } + + @Override + public void before() { + + super.before(); + + String indexStatement = + "CREATE INDEX idx on Foo (age)"; + + List stmts = new ArrayList(); + stmts.add(indexStatement); + executeStatements(stmts); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/ProxyOperation.java b/httpproxy/tests/src/main/java/oracle/nosql/query/ProxyOperation.java new file mode 100644 index 00000000..951923cc --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/ProxyOperation.java @@ -0,0 +1,129 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.NoSQLHandleConfig; +import oracle.nosql.driver.NoSQLHandleFactory; +import oracle.nosql.proxy.ProxyTestBase; +import oracle.nosql.proxy.security.SecureTestUtil; +import oracle.nosql.util.tmi.TableRequestLimits; +import oracle.nosql.util.tmi.TenantLimits; +/** + * Class extends ProxyTestBase for set up proxy test environment + */ +public class ProxyOperation extends ProxyTestBase{ + protected static NoSQLHandle staticHandle; + static NoSQLHandle sslHandle; + + /* + * An instance with non-default limits to make tests run reasonably + */ + + protected static TenantLimits qtfTenantLimits = + TenantLimits.getNewDefault(); + static { + qtfTenantLimits.setNumTables(100) + .setTenantSize(500000) + .setTenantReadUnits(8000000) + .setTenantWriteUnits(4000000) + .setDdlRequestsRate(4000) + .setTableLimitReductionsRate(40); + TableRequestLimits tableLimits = + qtfTenantLimits.getStandardTableLimits(); + tableLimits.setTableSize(500000) + .setTableReadUnits(200000) + .setTableWriteUnits(200000) + .setIndexesPerTable(40) + .setSchemaEvolutions(6); + } + + @BeforeClass + public static void staticSetUp() + throws Exception { + System.setProperty(ProxyTestBase.KVLITE_MULTISHARD_PROP, "true"); + ProxyTestBase.staticSetUp(qtfTenantLimits); + + staticHandle = configHandleStatic(getProxyEndpoint()); + setOpThrottling(getTenantId(), NO_OP_THROTTLE); + /* + * Only configure https if not running in minicloud, for now. + */ + if (!cloudRunning && SSLRunning) { + sslHandle = configHandleStatic("https://"+ hostName + ":" + + PROXY_HTTPS_PORT); + } + } + + @AfterClass + public static void staticTearDown() + throws Exception { + if (staticHandle != null) { + staticHandle.close(); + } + + setOpThrottling(getTenantId(), DEFAULT_OP_THROTTLE); + + if (sslHandle != null) { + sslHandle.close(); + } + String path = "oracle.nosql.query.QTest.test"; + copyEnvironments(path); + ProxyTestBase.staticTearDown(); + } + + @Override + @Before + public void setUp() throws Exception { + } + + @Override + @After + public void tearDown() throws Exception { + } + + public static NoSQLHandle getNosqlHandle() { + return staticHandle; + } + + private static NoSQLHandle configHandleStatic(String endpoint) { + return configHandleStatic(endpoint, onprem); + } + + protected static NoSQLHandle configHandleStatic(String endpoint, + boolean onprem) { + + NoSQLHandleConfig hconfig = new NoSQLHandleConfig(endpoint); + + /* + * 5 retries, default retry algorithm + */ + hconfig.configureDefaultRetryHandler(5, 0); + hconfig.setRequestTimeout(30000); + + SecureTestUtil.setAuthProvider(hconfig, isSecure(), + onprem, getTenantId()); + return getHandleStatic(hconfig); + } + + /** + * Allows classes to create a differently-configured NoSQLHandle. + */ + protected static NoSQLHandle getHandleStatic(NoSQLHandleConfig config) { + /* + * Open the handle + */ + return NoSQLHandleFactory.createNoSQLHandle(config); + } + +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/QTCaseCloud.java b/httpproxy/tests/src/main/java/oracle/nosql/query/QTCaseCloud.java new file mode 100644 index 00000000..25a78950 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/QTCaseCloud.java @@ -0,0 +1,780 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import static oracle.nosql.proxy.ProxyTestBase.getEffectiveMaxReadKB; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.math.BigDecimal; +import java.util.List; +import java.util.Map; + +import org.junit.Assert; + +import oracle.nosql.common.qtf.FileUtils; +import oracle.nosql.common.qtf.QTCase; +import oracle.nosql.driver.Consistency; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.ops.PrepareRequest; +import oracle.nosql.driver.ops.PrepareResult; +import oracle.nosql.driver.ops.PreparedStatement; +import oracle.nosql.driver.ops.QueryRequest; +import oracle.nosql.driver.ops.QueryResult; +import oracle.nosql.driver.values.BinaryValue; +import oracle.nosql.driver.values.BooleanValue; +import oracle.nosql.driver.values.DoubleValue; +import oracle.nosql.driver.values.FieldValue; +import oracle.nosql.driver.values.FieldValue.Type; +import oracle.nosql.driver.values.IntegerValue; +import oracle.nosql.driver.values.JsonNullValue; +import oracle.nosql.driver.values.JsonOptions; +import oracle.nosql.driver.values.JsonUtils; +import oracle.nosql.driver.values.LongValue; +import oracle.nosql.driver.values.MapValue; +import oracle.nosql.driver.values.NullValue; +import oracle.nosql.driver.values.NumberValue; +import oracle.nosql.driver.values.StringValue; +import oracle.nosql.driver.values.TimestampValue; +import oracle.nosql.proxy.ProxyTestBase; + +/** + * Class representing a single test case. + */ +public class QTCaseCloud extends QTCase { + + NoSQLHandle handle; + + boolean isOnPrem; + + void setHandle(NoSQLHandle handle) { + this.handle = handle; + } + + void setIsOnPrem() { + isOnPrem = true; + } + + @Override + public void run() + throws IOException { + boolean r = runQuery(); + Assert.assertTrue("QTCase failure: " + this, r); + } + + @Override + public boolean runQuery() + throws IOException { + + String query = FileUtils.readFileToString(queryFile).trim(); + String queryName = opt.relativize(getQueryFile()); + + String updQuery = null; + int updPos = query.indexOf("update"); + int insPos = query.indexOf("insert"); + int delPos = query.indexOf("delete"); + int selPos = query.indexOf("select"); + boolean isUpdateQuery = (updPos >= 0); + + updPos = (insPos >= 0 ? insPos : (delPos >= 0 ? delPos : updPos)); + + if (updPos >= 0 && selPos >= 0 && selPos > updPos) { + updQuery = query.substring(0, selPos); + query = query.substring(selPos); + } + + String expectedResult = null; + boolean haveExpectedResult = false; + boolean generatedExpectedResult = false; + + if (resultFile.exists()) { + haveExpectedResult = true; + expectedResult = skipComments( + FileUtils.readFileToString(getResultFile())); + } + + boolean checkQueryPlan = run.compileOnly; + + //System.out.println("Executing query: " + queryName); + opt.progress("Executing query: " + queryName); + opt.verbose(" Query: '" + query); + + if (handle == null) { + throw new IllegalStateException("No handle available."); + } + + boolean ret = false; + boolean finished = false; + + PreparedStatement selPrep = null; + PreparedStatement updPrep = null; + PreparedStatement prep = null; + + QueryRequest selReq = new QueryRequest(); + selReq.setConsistency(Consistency.ABSOLUTE); + selReq.setQueryName(queryName); + selReq.setTraceLevel(3/*opt.getTraceLevel()*/); + selReq.setLogFileTracing(false); + selReq.setInTestMode(true); + selReq.setMaxServerMemoryConsumption(200*1024); + /* + if (queryFile.getPath().contains("inner_joins/q/q23")) { + selReq.setLogFileTracing(true); + selReq.setTraceLevel(3); + } + */ + if (isOnPrem) { + /* + * The fix to KVSTORE-1228 was included into 21.3.1 but not in + * earlier versions, the configured kv in proxy is still 21.2.19, + * setting the batch limit may lead to some unnest test failures, + * so set the limit only when run against kv >= 21.3.1 + */ + if (ProxyTestBase.checkKVVersion(21, 3, 1)) { + if (!isUpdateQuery) { + selReq.setLimit(opt.getBatchSize()); + } + } + } else if (opt.getReadKBLimit() == 0) { + /* Use specific read limit, which exhibits the bug fixed in + * SR [#27735] */ + if (queryFile.getPath().contains("prim_index_sort/q/sort1")) { + selReq.setMaxReadKB(135); + } else { + if (!isUpdateQuery) { + selReq.setMaxReadKB(15); + } + } + } else { + if (!isUpdateQuery) { + selReq.setMaxReadKB(opt.getReadKBLimit()); + } + //System.out.println("Executing query: " + queryName + + // " with readKBLimit " + opt.getReadKBLimit()); + } + + if (queryFile.getPath().contains("offsetlimit")) { + selReq.setMaxMemoryConsumption(600000); + } else if (queryFile.getPath().contains("prim_index_sort")) { + selReq.setMaxMemoryConsumption(250000); + } else if (queryFile.getPath().contains("nonulls_idx")) { + selReq.setMaxMemoryConsumption(1000000); + } else if (queryFile.getPath().contains("idc_geojson")) { + selReq.setMaxMemoryConsumption(650000); + } else if (queryFile.getPath().contains("join")) { + selReq.setMaxMemoryConsumption(600000); + selReq.setTimeout(10000); + } else { + selReq.setMaxMemoryConsumption(60000); + } + + QueryRequest updReq = null; + if (updQuery != null) { + updReq = new QueryRequest(); + updReq.setTraceLevel(opt.getTraceLevel()); + if (!isUpdateQuery) { + updReq.setMaxReadKB(opt.getReadKBLimit()); + } + selReq.setConsistency(Consistency.ABSOLUTE); + } + + PrepareRequest preq = new PrepareRequest(); + preq.setGetQueryPlan(true); + PrepareResult pres; + + try { + try { + if (updQuery != null && updReq != null) { + preq.setStatement(updQuery); + pres = handle.prepare(preq); + updPrep = pres.getPreparedStatement(); + updReq.setPreparedStatement(updPrep); + + preq.setStatement(query); + pres = handle.prepare(preq); + selPrep = pres.getPreparedStatement(); + selReq.setPreparedStatement(selPrep); + + prep = updPrep; + } else { + preq.setStatement(query); + pres = handle.prepare(preq); + selPrep = pres.getPreparedStatement(); + selReq.setPreparedStatement(selPrep); + + prep = selPrep; + } + } catch (Exception e) { + if (haveExpectedResult) { + ret = checkException(true, expectedResult, e); + finished = true; + } else { + opt.failure("Missing expected result file and exception " + + "during prepare: " + e.getMessage() + + "\n" + getStackTrace(e)); + throw e; + } + } + + if (prep != null && checkQueryPlan) { + + if (!haveExpectedResult) { + String queryPlan = prep.getQueryPlan(); + + genTmpFile(COMPILED_QUERY_PLAN, queryPlan); + + generatedExpectedResult = true; + } else { + ret = checkQueryPlan(prep, expectedResult); + } + + finished = true; + } + + if (!finished) { + bindVars(updQuery, query, prep); + + if (!haveExpectedResult) { + StringBuffer sb = new StringBuffer(); + + do { + QueryResult res = handle.query(selReq); + List list = res.getResults(); + for (MapValue val : list) { + sb.append(val.toString()).append("\n"); + } + } while (!selReq.isDone()); + + genTmpFile(UNORDERED_RESULT, sb.toString()); + + generatedExpectedResult = true; + } else { + ret = checkResults(updReq, selReq, expectedResult); + } + } + + if (ret) { + opt.progress(" Passed"); + + } else if (generatedExpectedResult) { + File tmpResultFile = + new File(resultFile.getAbsolutePath() + ".tmp"); + opt.failure("Missing result file: " + + opt.relativize(resultFile) + + " generated expected file " + + opt.relativize(tmpResultFile) + "."); + + } else { + opt.failure(" FAILED !!! results don't match."); + if (prep != null && !checkQueryPlan) { + opt.failure("\tQuery Plan:\n" + prep); + } + if (!checkQueryPlan) { + if (updReq != null) { + updReq.printTrace(System.out); + } else { + selReq.printTrace(System.out); + } + } + } + + return ret; + + } catch (Exception e) { + String st = getStackTrace(e); + opt.failure(" FAILED !!! with exception."); + opt.failure(" Exception: " + e.getMessage() + "\n" + st + + "\n\n\tQuery plan:\n" + prep); + genActualFile("FAILED !!! with exception:", "Exception: " + + e.getMessage() + "\n" + st + "\n\n\tQuery plan:\n" + prep); + return false; + } catch (Throwable e) { + opt.failure(" FAILED !!! with throwable."); + opt.failure(" Throwable: " + e.getMessage() + + "\n\n\tQuery plan:\n" + prep); + e.printStackTrace(); + return false; + } finally { + if (queryFile.getPath().contains("delete")) { + getRun().suite.after(); + getRun().suite.before(); + } + } + } + + private void bindVars( + String updQuery, + String query, + PreparedStatement prep) { + + if (updQuery != null) { + query = updQuery; + } + + int insPos = query.indexOf("insert"); + if (insPos < 0) { + insPos = query.indexOf("update"); + } + + for (Map.Entry entry : run.suite.vars.entrySet()) { + + String qtfName = entry.getKey(); + String varName = qtfName; + int varPos = -1; + + if (varName.startsWith("$$")) { + + int pos = varName.indexOf('_'); + String queryName = varName.substring(pos+1); + + if (!queryName.equals(queryFile.getName())) { + continue; + } + + String varPosStr = varName.substring(2, pos); + varPos = Integer.parseInt(varPosStr); + + varName = varName.substring(0, pos); + + } else { + int idx = query.indexOf(varName); + if (idx < 0 || + (insPos < 0 && + query.charAt(idx + varName.length()) != ' ')) { + continue; + } + } + + String strValue = run.suite.vars.get(qtfName); + String strType = ((QTSuiteCloud)run.suite).varsType.get(qtfName); + String strDeclType = run.suite.varsDeclType.get(qtfName); + FieldValue.Type type = null; + FieldValue value; + + if (strDeclType != null) { + if (strDeclType.equals("int")) { + type = Type.INTEGER; + } else if (strDeclType.equals("long")) { + type = Type.LONG; + } else if (strDeclType.equals("double")) { + type = Type.DOUBLE; + } else if (strDeclType.equals("number")) { + type = Type.NUMBER; + } else if (strDeclType.equals("string")) { + type = Type.STRING; + } else if (strDeclType.equals("boolean")){ + type = Type.BOOLEAN; + } else if (strDeclType.equals("json")) { + type = Type.MAP; + } else { + throw new IllegalArgumentException( + "Unknown bind variable type in test.config file. " + + "Variable name: " + varName + " type: " + strDeclType); + } + + value = createValueFromString(strValue, type); + + } else { + if (strType == null) { + type = Type.NULL; + } else if (strType.equals("jnull")) { + type = Type.JSON_NULL; + } else if (strType.equals("integer")){ + type = Type.INTEGER; + } else if (strType.equals("long")){ + type = Type.LONG; + } else if (strType.equals("double")){ + type = Type.DOUBLE; + } else if (strType.equals("number")) { + type = Type.NUMBER; + } else if (strType.equals("json")) { + type = Type.MAP; + } else if (strType.equals("array")) { + type = Type.ARRAY; + } else if (strType.equals("boolean")){ + type = Type.BOOLEAN; + } else { + type = Type.STRING; + } + + value = createValueFromString(strValue, type); + } + + opt.verbose(" Bind var: " + varName + " = " + value); + + try { + if (varPos >= 0) { + prep.setVariable(varPos + 1, value); + } else { + prep.setVariable(varName, value); + } + } catch (IllegalArgumentException e) { + continue; + } + } + } + + private boolean checkQueryPlan( + PreparedStatement ps, + String expectedResult) throws IOException { + + expectedResult = skipComments(expectedResult); + + // find first line of the rest + String resultType; + + do { + int i = expectedResult.indexOf(EOL); + if (i < 0) { + throw new IllegalArgumentException("Invalid result file: " + + getOpt().relativize(resultFile)); + } + + resultType = expectedResult.substring(0, i).trim().toLowerCase(); + expectedResult = expectedResult.substring(i + 1); + } while (resultType.length() == 0); + + String expected = expectedResult.trim(); + + if (!COMPILED_QUERY_PLAN.equals(resultType)) { + throw new IllegalStateException("checkQueryPlan() should be " + + "called only for results with 'compiled-query-plan'"); + } + + String actual = ps.getQueryPlan().trim(); + boolean pass = expected.equals(actual); + String act = actual; + String exp = expected; + String aLine = null; + String eLine = null; + + if (!pass) { + pass = true; + // check line by line + do { + int aIndex = act.indexOf(EOL); + int eIndex = exp.indexOf(EOL); + + if (aIndex == eIndex && aIndex < 0) { + break; + } else if (aIndex < 0 || eIndex < 0) { + pass = false; + break; + } + + aLine = act.substring(0, aIndex); + eLine = exp.substring(0, eIndex); + + if (!aLine.trim().equals(eLine.trim())) { + pass = false; + break; + } + act = act.substring(aIndex + 1); + exp = exp.substring(eIndex + 1); + } while (true); + + if (!pass) { + + if (opt.isUpdateQueryPlans()) { + updateQueryPlanInResultFile(actual); + pass = true; + } else { + opt.failure("Test FAILED: " + opt.relativize(queryFile)); + + opt.failure("Query compilation plan not matching: " + + "\nEXPECTED:\n" + expected + "\n" + + "\nACTUAL:\n" + actual + "\n"); + + if (aLine != null && eLine != null) { + opt.failure("\nexpected line: " + eLine + + "\nactual line: " + aLine + "\n"); + } + genActualFile(COMPILED_QUERY_PLAN, actual); + } + } + } + + return pass; + } + + private boolean checkResults( + QueryRequest updReq, + QueryRequest selReq, + String expectedResult) { + + expectedResult = skipComments(expectedResult); + + // find first line of the rest + int i = expectedResult.indexOf(EOL); + if (i < 0) { + throw new IllegalArgumentException("Invalid result file: " + + getOpt().relativize(resultFile)); + } + + String resultType = expectedResult.substring(0, i).trim() + .toLowerCase(); + String results = expectedResult.substring(i + 1).trim(); + + if (UNORDERED_RESULT.equals(resultType)) { + return checkUnorderedResults(updReq, selReq, results); + + } else if (ORDERED_RESULT.equals(resultType)) { + return checkOrderedResults(updReq, selReq, results); + + } else if (RUNTIME_EXCEPTION.equals(resultType)) { + + QueryRequest req = (updReq != null ? updReq : selReq); + + do { + try { + QueryResult res = handle.query(req); + res.getResults(); + } catch (Exception e) { + return checkException(false, expectedResult, e); + } + } while (!req.isDone()); + + opt.failure(" Test " + this + " FAILED:" + + "\nA runtime exception was expected, but none was raised."); + return false; + + } else { + throw new IllegalArgumentException( + "Invalid result file: " + + getOpt().relativize(resultFile) + + "\nResult type: " + resultType); + } + } + + private boolean checkUnorderedResults( + QueryRequest updReq, + QueryRequest selReq, + String expectedResult) { + return checkEachResult(updReq, selReq, expectedResult, false); + } + + private boolean checkOrderedResults( + QueryRequest updReq, + QueryRequest selReq, + String expectedResult) { + return checkEachResult(updReq, selReq, expectedResult, true); + } + + private boolean checkEachResult( + QueryRequest updReq, + QueryRequest selReq, + String expectedResult, + boolean ordered) { + + String[] expected = expectedResult.split(EOL); + int numExpected = expected.length; + + if (expected.length == 1 && expected[0].equals("")) { + expected[0] = null; + numExpected = 0; + } + + MapValue[] expectedVals = new MapValue[expected.length]; + + JsonOptions options = new JsonOptions(); + options.setAllowNonNumericNumbers(true); + options.setMaintainInsertionOrder(true); + + for (int i = 0; i < expected.length; i++) { + if (expected[i] != null) { + expectedVals[i] = (MapValue) + JsonUtils.createValueFromJson(expected[i], options); + } + } + + String completeResult = ""; + int numMatched = 0; + boolean failed = false; + int totalReadKB = 0; + int j = 0; + + if (updReq != null) { + do { + QueryResult res = handle.query(updReq); + res.getResults(); + } while (!updReq.isDone()); + } + + /* These tests have non-deterministic results. We can only check that + * the number of results is the expected one */ + if (queryFile.getPath().contains("gb/q/noidx09") || + queryFile.getPath().contains("gb/q/noidx12") || + queryFile.getPath().contains("gb/q/noidx15") || + queryFile.getPath().contains("gb/q/distinct02")) { + + int numActual = 0; + + do { + QueryResult res = handle.query(selReq); + List list = res.getResults(); + numActual += list.size(); + } while (!selReq.isDone()); + + if (numActual != numExpected) { + opt.failure( + " Test " + this + " FAILED:" + + "\n Unexpected number of results: " + + "\n expected size: " + numExpected + + "\n actual size: " + numMatched); + failed = true; + } + + return !failed; + } + + do { + QueryResult res = handle.query(selReq); + List list = res.getResults(); + + totalReadKB += res.getReadKB(); + + /* This assert assumes that the max row size is 3KB. If larger rows + * are ever used, the assert needs to be modified */ + int maxReadKB; + if (queryFile.getPath().contains("join")) { + maxReadKB = getEffectiveMaxReadKB(selReq) + 5; + } else if (queryFile.getPath().contains("idc_geojson")) { + /* The max row size of idc_geojson is 13KB */ + maxReadKB = getEffectiveMaxReadKB(selReq) + 13; + } else { + maxReadKB = getEffectiveMaxReadKB(selReq) + 3; + } + assertTrue("maxReadKB=" + maxReadKB + ", readKB=" + res.getReadKB(), + res.getReadKB() <= maxReadKB); + + for (MapValue val1 : list) { + + MapValue val2 = (MapValue)JsonUtils.createValueFromJson( + val1.toString(), options); + + boolean found = false; + + if (!ordered) { + for (int i = 0; i < expectedVals.length; ++i) { + + if (expectedVals[i] != null) { + int ret = -1; + try { + ret = val2.compareTo(expectedVals[i]); + } catch (Exception e) {} + + if (ret == 0) { + expectedVals[i] = null; + ++numMatched; + found = true; + break; + } + } + } + } else { + int ret = -1; + try { + ret = val2.compareTo(expectedVals[j]); + } catch (Exception e) {} + + if (ret == 0) { + expectedVals[j] = null; + ++numMatched; + found = true; + } + ++j; + } + + if (!failed && !found) { + opt.failure( + " Test " + this + " FAILED:" + + "\n unexpected actual result: '" + val2 + "'" ); + failed = true; + } + + completeResult = + completeResult + + (completeResult.length() == 0 ? "" : "\n") + + val2; + } + + } while (!selReq.isDone()); +/* + if (!opt.getOnPrem() && + (queryFile.getPath().contains("sec_index/q/sort11") || + queryFile.getPath().contains("sec_index/q/q16") || + queryFile.getPath().contains("prim_index_sort/q/sort5") || + queryFile.getPath().contains("prim_index/q/q18"))) { + assert(totalReadKB == 1); + } +*/ + if (opt.getTraceLevel() > 0 && !selReq.getLogFileTracing()) { + selReq.printTrace(System.out); + } + + if (!failed && numMatched != numExpected) { + opt.failure( + " Test " + this + " FAILED:" + + "\n Fewer than expected results: " + + "\n expected size: " + numExpected + + "\n actual size: " + numMatched); + failed = true; + } + + if (failed) { + opt.failure( + " expected unordered: '\n" + expectedResult + "'" + + "\n actual: '\n" + completeResult + "'" + + "\n not matching: " + (numExpected - numMatched)); + genActualFile(UNORDERED_RESULT, completeResult); + } + + return !failed; + } + + public static FieldValue createValueFromString( + String value, + final Type type) { + + final InputStream jsonInput; + + switch (type) { + case JSON_NULL: + return JsonNullValue.getInstance(); + case NULL: + return NullValue.getInstance(); + case STRING: + return new StringValue(value); + case INTEGER: + return new IntegerValue(Integer.parseInt(value)); + case LONG: + return new LongValue(Long.parseLong(value)); + case DOUBLE: + return new DoubleValue(Double.parseDouble(value)); + case NUMBER: + return new NumberValue(new BigDecimal(value)); + case BOOLEAN: + return BooleanValue.getInstance(Boolean.parseBoolean(value)); + case BINARY: + return new BinaryValue(value.getBytes()); + case TIMESTAMP: + return new TimestampValue(value); + case ARRAY: + case MAP: + jsonInput = new ByteArrayInputStream(value.getBytes()); + return FieldValue.createFromJson(jsonInput, new JsonOptions(). + setMaintainInsertionOrder(true)); + default: + throw new IllegalArgumentException( + "Type not yet implemented: " + type); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/QTDefaultImpl.java b/httpproxy/tests/src/main/java/oracle/nosql/query/QTDefaultImpl.java new file mode 100644 index 00000000..40aeb301 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/QTDefaultImpl.java @@ -0,0 +1,317 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import oracle.nosql.common.qtf.FileUtils; +import oracle.nosql.common.qtf.QTAfter; +import oracle.nosql.common.qtf.QTBefore; +import oracle.nosql.common.qtf.QTOptions; +import oracle.nosql.driver.IndexExistsException; +import oracle.nosql.driver.NoSQLHandle; +import oracle.nosql.driver.RequestTimeoutException; +import oracle.nosql.driver.TableExistsException; +import oracle.nosql.driver.TableNotFoundException; +import oracle.nosql.driver.ops.SystemResult; +import oracle.nosql.driver.ops.TableLimits; +import oracle.nosql.driver.ops.TableRequest; +import oracle.nosql.driver.ops.TableResult; + +/** + * Class implements QTBefore and QTAfter. + */ +public class QTDefaultImpl + implements QTBefore, QTAfter { + + private static Pattern tableNamePattern = Pattern.compile( + "(?is)\\b(?:create+\\s+table+\\s+if+\\s+not+\\s+exists|" + + "create+\\s+table|alter+\\s+table)\\s+" + + "(.*?)(?=\\s|\\(|$)"); + + protected QTOptions opts; + protected File configFile; + protected Properties configProperties; + + private enum DdlType { + CREATE_TABLE, + DROP_TABLE, + ALTER_TABLE, + CREATE_INDEX + } + + @Override + public void setOptions(QTOptions opts) { + this.opts = opts; + } + + @Override + public void setConfigFile(File configFile) { + this.configFile = configFile; + } + + @Override + public void setConfigProperties(Properties configProperties) { + this.configProperties = configProperties; + } + + @Override + public void before() { + opts.verbose("Before: default impl for " + + opts.relativize(configFile)); + + System.out.println("Executing: " + opts.relativize(configFile)); + try { + String beforeDmlProp = + configProperties.getProperty("before-ddl-file"); + if (beforeDmlProp != null) { + + File dmlFile = new File(configFile.getParentFile(), + beforeDmlProp); + if (!dmlFile.exists() || !dmlFile.isFile()) + throw new IllegalArgumentException("Property " + + " before-ddl-file doesn't reference a valid file."); + + opts.verbose("Executing before-ddl-file: " + beforeDmlProp); + + List stmts = extractStatements(dmlFile); + executeStatements(stmts); + } + + String beforeDataProp = + configProperties.getProperty("before-data-file"); + if (beforeDataProp != null) { + + File dataFile = new File(configFile.getParentFile(), + beforeDataProp); + if (!dataFile.exists() || !dataFile.isFile()) + throw new IllegalArgumentException("Property " + + "before-data-file doesn't reference a valid file."); + + opts.verbose("Executing before-data-file: " + beforeDataProp); + JsonLoaderCloud.loadJsonFromFile(QTest.getHandle(), + dataFile.getCanonicalPath(), + null); + } + } + catch (FileNotFoundException e) { + e.printStackTrace(); + throw new IllegalArgumentException(e); + } + catch (IOException e) { + e.printStackTrace(); + throw new IllegalArgumentException(e); + } + catch (Throwable t) { + t.printStackTrace(); + throw new IllegalArgumentException(t); + } + } + + protected void executeStatements(List stmts) { + final int waitMillis = 100000; + final int delayMillis = 1000; + final TableLimits defLimits = new TableLimits(200000, 200000, 30); + final int maxRetries = 3; + + for(String stmt : stmts) { + opts.verbose("Executing: " + stmt); + String s = stmt.toLowerCase(); + + if (s.contains("namespace")) { + doSysOp(QTest.getHandle(), stmt, waitMillis, delayMillis); + } else { + TableLimits limits = defLimits; + /* + * This is a hack to make sure that limits are not attached + * to anything but create table ops. QTF does not support + * altering limits so alter table doesn't have them either. + */ + DdlType type = null; + if (s.contains("table")) { + if (s.contains("drop") || s.contains("alter")) { + limits = null; + type = s.contains("drop") ? DdlType.DROP_TABLE : + DdlType.ALTER_TABLE; + } else { + type = DdlType.CREATE_TABLE; + if (isCreateChildTable(stmt)) { + limits = null; + } + } + } + if (s.contains("index")) { + limits = null; + type = DdlType.CREATE_INDEX; + } + + retryTableOp(QTest.getHandle(), stmt, type, limits, waitMillis, + delayMillis, maxRetries); + } + + opts.verbose(" Successful"); + } + } + + private void retryTableOp(NoSQLHandle nosqlHanel, + String stmt, + DdlType type, + TableLimits limits, + int waitMs, + int delayMs, + int maxRetries) { + int retries = 0; + while (true) { + try { + doTableOp(QTest.getHandle(), stmt, limits, waitMs, delayMs); + break; + } catch (RequestTimeoutException ex) { + if (retries++ < maxRetries) { + opts.verbose(" retry executing '" + stmt + "' " + + (retries + 1) + " times"); + continue; + } + throw ex; + } catch (TableNotFoundException ex) { + if (retries > 0 && type == DdlType.DROP_TABLE) { + opts.verbose(" got TableNotFoundException, ignore this " + + "error when retry dropping table: " + stmt); + break; + } + throw ex; + } catch (TableExistsException ex) { + if (retries > 0 && type == DdlType.CREATE_TABLE) { + opts.verbose(" got TableExistsException, ignore this " + + "error when retry creating table: " + stmt); + break; + } + throw ex; + } catch (IndexExistsException ex) { + if (retries > 0 && type == DdlType.CREATE_INDEX) { + opts.verbose(" got IndexExistsException, ignore this " + + "error when retry creating index: " + stmt); + break; + } + throw ex; + } + } + } + + private static TableResult doTableOp(NoSQLHandle nosqlHanel, + String stmt, + TableLimits limits, + int waitMs, + int delayMs) { + + TableRequest tableRequest = new TableRequest() + .setStatement(stmt) + .setTimeout(60000); + if (limits != null) { + tableRequest = tableRequest.setTableLimits(limits); + } + + TableResult tres = QTest.getHandle().tableRequest(tableRequest); + tres.waitForCompletion(QTest.getHandle(), waitMs, delayMs); + return tres; + } + + private static SystemResult doSysOp(NoSQLHandle handle, + String stmt, + int waitMs, + int delayMs) { + + return handle.doSystemRequest(stmt, waitMs, delayMs); + } + + protected List extractStatements(File dmlFile) + throws IOException { + + // a statement can be on multiple lines, statements are split by an + // empty line (using String.trim() definition). + + String dmlStr = FileUtils.readFileToString(dmlFile).trim(); + String[] dmlLines = dmlStr.split("\n"); + List stmts = new ArrayList(); + String currentStmt = ""; + for (int i = 0; i < dmlLines.length; i++) { + String dmlLine = dmlLines[i].trim(); + if (dmlLine.startsWith("#")) + continue; + + if (dmlLine.length() > 0) { + currentStmt = currentStmt + dmlLine; + } else { + if (currentStmt.length() > 0) + stmts.add(currentStmt); + currentStmt = ""; + } + } + if (currentStmt.length() > 0) + stmts.add(currentStmt); + return stmts; + } + + protected List extractLines(File dataFile) + throws IOException { + // use the same technique + return extractStatements(dataFile); + } + + @Override + public void after() { + opts.verbose("After: default impl for " + + opts.relativize(configFile)); + + String afterDmlProp = configProperties.getProperty("after-ddl-file"); + if (afterDmlProp != null) { + try { + File dmlFile = new File(configFile.getParentFile(), + afterDmlProp); + if (!dmlFile.exists() || !dmlFile.isFile()) + throw new IllegalArgumentException("Property " + + "after-ddl-file" + + " doesn't reference a valid file."); + + List stmts = extractStatements(dmlFile); + executeStatements(stmts); + } + catch (FileNotFoundException e) { + e.printStackTrace(); + throw new IllegalArgumentException(e); + } + catch (IOException e) { + e.printStackTrace(); + throw new IllegalArgumentException(e); + } + } + } + + /* Check if the statement is to create child table */ + private boolean isCreateChildTable(String statement) { + Matcher matcher = tableNamePattern.matcher(statement.toLowerCase()); + if (!matcher.find() || matcher.groupCount() != 1) { + /* + * unable to find table name or multiple occurrences in statement, + * return the original statement. + */ + return false; + } + + int start = matcher.start(1); + int end = matcher.end(1); + String tableName = statement.substring(start, end); + return tableName.contains("."); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/QTFactoryCloud.java b/httpproxy/tests/src/main/java/oracle/nosql/query/QTFactoryCloud.java new file mode 100644 index 00000000..f3b56035 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/QTFactoryCloud.java @@ -0,0 +1,34 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ + +package oracle.nosql.query; + +import java.io.File; + +import oracle.nosql.common.qtf.QTCase; +import oracle.nosql.common.qtf.QTFactory; +import oracle.nosql.common.qtf.QTOptions; +import oracle.nosql.common.qtf.QTRun; +import oracle.nosql.common.qtf.QTSuite; + +class QTFactoryCloud implements QTFactory { + + @Override + public QTSuite createQTSuite(QTOptions opts, File configFile) { + return new QTSuiteCloud(opts, configFile); + } + + @Override + public QTRun createQTRun() { + return new QTRun(); + } + + @Override + public QTCase createQTCase() { + return new QTCaseCloud(); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/QTSuiteCloud.java b/httpproxy/tests/src/main/java/oracle/nosql/query/QTSuiteCloud.java new file mode 100644 index 00000000..e2e3a89c --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/QTSuiteCloud.java @@ -0,0 +1,29 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import java.io.File; +import oracle.nosql.common.qtf.QTOptions; +import oracle.nosql.common.qtf.QTSuite; + +/** + * Class representing test suite. + */ +public class QTSuiteCloud extends QTSuite { + + public static final String PKGNAME = "oracle.nosql.query."; + + QTSuiteCloud(QTOptions opts, File configFile) { + super(opts, configFile, PKGNAME); + } + + @Override + protected boolean getRunCase(String prop) { + return prop.startsWith(RUN_PREFIX); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/QTest.java b/httpproxy/tests/src/main/java/oracle/nosql/query/QTest.java new file mode 100644 index 00000000..b8872e1d --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/QTest.java @@ -0,0 +1,187 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import oracle.kv.impl.test.TestStatus; +import oracle.nosql.common.qtf.QTCase; +import oracle.nosql.common.qtf.QTestBase; +import oracle.nosql.common.qtf.QTOptions; +import oracle.nosql.common.qtf.QTSuite; +import oracle.nosql.driver.NoSQLHandle; + +import oracle.nosql.proxy.Proxy; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import com.fasterxml.jackson.core.StreamReadConstraints; + +/** + * Use JUnit's parametrized testing + */ +@RunWith(Parameterized.class) +public class QTest extends QTestBase { + + private static final String QTF_PROXY_HOST = + System.getProperty("test.qtf.proxy.host"); + + private static final String QTF_PROXY_PORT = + System.getProperty("test.qtf.proxy.port"); + + static { + /* + * Since Jackson 2.15, the constraints StreamReadConstraints was added + * to guard against malicious input by preventing processing of + * "too big" input constructs. + * + * The StreamReadConstraints.DEFAULT_MAX_NUM_LEN(default 1000) was + * added since jackson 2.16 to limit the maximum number length. + * See details in https://docshoster.org/p/com.fasterxml.jackson/jackson-core/2.18.2/com/fasterxml/jackson/core/StreamReadConstraints.html + * + * The qtf test "gb2" uses the number values with max length of 3087, + * increase the maxNumberLength to be able to read those number values + * when parsing from JSON string. + */ + int maxNumberLength = 4000; + StreamReadConstraints constraints = StreamReadConstraints.builder() + .maxNumberLength(maxNumberLength) + .build(); + StreamReadConstraints.overrideDefaultStreamReadConstraints(constraints); + } + + private static Proxy proxy; + + private static NoSQLHandle handle; + + public QTCaseCloud testCase; + + public static void main(String args[]) { + org.junit.runner.JUnitCore.main(QTest.class.getName()); + } + + @BeforeClass + public static void beforeAll() + throws Exception { + + /* Use external proxy */ + if (QTF_PROXY_HOST != null && QTF_PROXY_PORT != null) { + String endpoint = QTF_PROXY_HOST + ":" + QTF_PROXY_PORT; + handle = ProxyOperation.configHandleStatic(endpoint, + Boolean.getBoolean("onprem")); + return; + } + + TestStatus.setActive(true); + ProxyOperation.staticSetUp(); + + proxy = ProxyOperation.getProxy(); + } + + @AfterClass + public static void afterAll() + throws Exception { + + if (previousRun != null) { + previousSuite.after(); + previousRun = null; + previousSuite = null; + } + + ProxyOperation.staticTearDown(); + } + + @Parameterized.Parameters(name = "{index}: case({0})") + public static Collection data() + throws Exception { + + factory = new QTFactoryCloud(); + + QTOptions opts = parseCommandLine(); + opts.setVerbose(Boolean.getBoolean("test.verbose")); + + String filter = opts.getFilter(); + String filterOut = System.getProperty("test.filterOutList"); + if ( !"idc_".equals(filter) && filterOut == null ) { + filterOut = "idc_"; + } + opts.setFilterOut(filterOut); + + // Create QTSuites and QTRuns per QTSuite + List suites = createQTSuites(opts); + + List cases = new ArrayList(); + for(QTSuite s : suites) { + s.addCases(cases); + } + + List params = new ArrayList(); + for (QTCase c : cases) { + params.add(new Object[]{c, null}); + } + + return params; + } + + public static NoSQLHandle getHandle() { + + if (handle == null) { + handle = ProxyOperation.getNosqlHandle(); + } + return handle; + } + + public QTest(Object param1, Object param2) { + + testCase = (QTCaseCloud)param1; + + if (testCase != null) { + testCase.setHandle(getHandle()); + if (proxy != null && proxy.isOnPrem()) { + testCase.setIsOnPrem(); + } + } + } + + @Before + public void before() { + } + + @After + public void after() { + } + + @Test + public void test() + throws IOException { + + try { + checkDeps(testCase); + } catch (Exception e) { + /* + * This was added to catch unexpected failures in checkDeps. + * Without this, such exceptions are silent and cause QTF to + * misbehave (see checkDeps -- it won't properly change + * previous suite and handle before/after calls). + */ + fail("Exception from checkDeps: " + e); + } + testCase.run(); + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/RowPropsSetup.java b/httpproxy/tests/src/main/java/oracle/nosql/query/RowPropsSetup.java new file mode 100644 index 00000000..4824f3c6 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/RowPropsSetup.java @@ -0,0 +1,35 @@ +/*- + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * See the file LICENSE for redistribution information. + * + */ +package oracle.nosql.query; + +import java.util.Properties; + +/* + * The RowPropsSetup is the setup class of test rowprops in qtf test. + * + * Note: + * This RowPropsSetup in KV qtf test creates dummy tables before create test + * table to make sure the test table can be assigned with fixed table id(s). + * + * When using proxy, no table id returned to client, so unable to do same + * thing as that of KV qtf test. Actually, the rowprops test was excluded from + * the proxy-based qtf test, adding this case is just to pass through the + * initializing of QTSuiteCloud that parses the test configuration of all tests. + */ +public class RowPropsSetup extends QTDefaultImpl { + + @Override + public void setConfigProperties(Properties properties) { + super.setConfigProperties(properties); + if (!configProperties.containsKey("before-ddl-file")) { + configProperties.setProperty("before-ddl-file", "before.ddl"); + } + if (!configProperties.containsKey("after-ddl-file")) { + configProperties.setProperty("after-ddl-file", "after.ddl"); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/RunQueryTests.java b/httpproxy/tests/src/main/java/oracle/nosql/query/RunQueryTests.java new file mode 100644 index 00000000..540943b2 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/RunQueryTests.java @@ -0,0 +1,66 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import java.util.ArrayList; +import java.util.List; + +import oracle.nosql.common.qtf.QTCase; +import oracle.nosql.common.qtf.QTestBase; +import oracle.nosql.common.qtf.QTOptions; +import oracle.nosql.common.qtf.QTSuite; + +/** + * Class extends oracle.nosql.common.qtf.RunQueryTests + */ +public class RunQueryTests extends oracle.nosql.common.qtf.RunQueryTests { + + public QTOptions opt = new QTOptions(); + + public List suites = new ArrayList(); + + public static void main(String[] args) + throws Exception { + + RunQueryTests rqt = new RunQueryTests(); + + int initRes = rqt.init(args); + if (initRes < 0) { + return; + } + + rqt.opt.setVerbose(Boolean.getBoolean("test.verbose")); + + try { + ProxyOperation.staticSetUp(); + + rqt.opt.verbose("\nQuery Test Framework\n"); + + rqt.suites = QTestBase.createQTSuites(rqt.opt); + + rqt.opt.verbose("\nFind test cases\n"); + List cases = rqt.getCases(); + + rqt.opt.verbose("\nRun\n"); + + // Run only the tests that were filtered and only their run and + // suite before/after code. + QTest qTest = new QTest(null, null); + for (QTCase qtCase : cases) { + qTest.testCase = (QTCaseCloud)qtCase; + qTest.testCase.setHandle(QTest.getHandle()); + qTest.before(); + qTest.test(); + qTest.after(); + } + + } finally { + ProxyOperation.staticTearDown(); + } + } +} diff --git a/httpproxy/tests/src/main/java/oracle/nosql/query/UserTable.java b/httpproxy/tests/src/main/java/oracle/nosql/query/UserTable.java new file mode 100644 index 00000000..219fbb52 --- /dev/null +++ b/httpproxy/tests/src/main/java/oracle/nosql/query/UserTable.java @@ -0,0 +1,75 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2018 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.nosql.query; + +import oracle.nosql.driver.http.NoSQLHandleImpl; +import oracle.nosql.driver.ops.PutRequest; +import oracle.nosql.driver.ops.PutResult; +import oracle.nosql.driver.values.JsonOptions; +import oracle.nosql.driver.values.MapValue; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; + +/** + * Setup implementation for //simpleUserTable cases. + */ +public class UserTable extends QTDefaultImpl { + + String userTableStatement = + "CREATE TABLE Users" + + "(id INTEGER, firstName STRING, lastName STRING, age INTEGER," + + " address RECORD( \n" + + " city STRING,\n" + + " state STRING,\n" + + " phones ARRAY( RECORD( work INTEGER, home INTEGER ) ),\n" + + " ptr STRING), \n" + + " children MAP( RECORD( age LONG, friends ARRAY(STRING) ) ),\n" + + "primary key (id))"; + + @Override + public void before() { + opts.verbose("Run Before: UsersTable"); + + List stmts = new ArrayList(); + stmts.add(userTableStatement); + executeStatements(stmts); + + NoSQLHandleImpl handle = (NoSQLHandleImpl)QTest.getHandle(); + JsonOptions options = new JsonOptions(); + for (int i = 0; i < 10; i++) { + MapValue mv = new MapValue().put("id", i) + .put("firstName", ("first" + i)) + .put("lastName", ("last" + i)) + .put("age", i + 10) + .putFromJson("address", + "{\"city\":\"Boston\", \"state\":\"MA\", " + + "\"phones\":[{\"work\" : 111,\"home\" : 222}], \"ptr\" :" + + " null }", options) + .putFromJson("children", + "{\"john\": {\"age\" : 3, \"friends\":[\"f1\"]}, " + + "\"cory\": {\"age\" : 4, \"friends\":[\"f2\"]} }", + options); + PutRequest req = new PutRequest().setValue(mv) + .setTableName("Users"); + PutResult res = handle.put(req); + Assert.assertNotNull(res); + } + } + + @Override + public void after() { + opts.verbose("Run After: UsersTable"); + + List stmts = new ArrayList(); + stmts.add("DROP TABLE Users"); + executeStatements(stmts); + } +} diff --git a/httpproxy/tests/src/main/resources/oracle/nosql/proxy/rest/curl_smoke_test.sh b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/rest/curl_smoke_test.sh new file mode 100755 index 00000000..17fa812c --- /dev/null +++ b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/rest/curl_smoke_test.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +VERBOSE=0 +if [ "$1" = "-v" ] ; then + VERBOSE=1 + shift +fi + +PORT=$1 +[ "$PORT" = "" ] && PORT=8080 + + +URL="http://localhost:$PORT/20190828" +#COMPARTMENT="ocid1.compartment.oc1..aaaaaaaagaqos5k" +COMPARTMENT="testCompartment" +TABLENAME="testTable" +AUTH="Authorization: Bearer foo" +CTYPE="Content-Type: application/json" + +trap "/bin/rm -f *.json.$$ *.out.$$" EXIT + +function docurl() { + curl -s -S -vvv -H "$AUTH" "$@" > curl.out.$$ 2>&1 + [ $? -ne 0 ] && cat curl.out.$$ && exit 1 + # grep for 200 OK in verbose output + egrep 'HTTP.* 200 OK|HTTP.* 202 Accepted' curl.out.$$ > /dev/null + [ $? -ne 0 ] && egrep '^{|^< HTTP/' curl.out.$$ && exit 1 + [ $VERBOSE -eq 1 ] && grep '^{' curl.out.$$ | grep -v 'bytes data' + [ $VERBOSE -eq 1 ] && echo "" + [ $VERBOSE -eq 1 ] && echo "" +} + +cat > create.json.$$ << EOT +{ + "id": 1, + "ifNotExists": true, + "name": "$TABLENAME", + "compartmentId": "$COMPARTMENT", + "ddlStatement": "create table if not exists $TABLENAME(id integer, name string, age integer, info json, primary key(id))", + "tableLimits": {"maxReadUnits": 500, "maxWriteUnits": 500, "maxStorageInGBs": 5, "capacityMode": "PROVISIONED"} +} +EOT +# Create the table +[ $VERBOSE -eq 1 ] && echo "creating table $TABLENAME..." +docurl "$URL/tables" -H "$CTYPE" --data-binary @create.json.$$ +# wait a bit for it to be created + +sleep 10 +# see if the table exists +docurl "$URL/tables/$TABLENAME?compartmentId=$COMPARTMENT" -X GET + +ID=5 +cat > put.json.$$ << EOT +{ + "compartmentId": "$COMPARTMENT", + "value": { + "id": $ID, + "name": "John", + "age": 54, + "info": "{\"lastname\":\"Smith\", \"street\":\"1234 main street\"}" + } +} +EOT +# Put a row +[ $VERBOSE -eq 1 ] && echo "Putting a row with id=$ID" +docurl "$URL/tables/$TABLENAME/rows" -H "$CTYPE" -X PUT --data-binary @put.json.$$ + +# get the row +[ $VERBOSE -eq 1 ] && echo "Getting row with id=$ID" +docurl "$URL/tables/$TABLENAME/rows?compartmentId=$COMPARTMENT&key=id:$ID" -X GET + +cat > query.json.$$ << EOT +{ + "compartmentId": "$COMPARTMENT", + "statement":"select * from $TABLENAME where age > 50 and age < 60", + "timeoutInMs": 1000 +} +EOT + +# query for the row +[ $VERBOSE -eq 1 ] && echo "Querying for rows in age range" +docurl "$URL/query" -H "$CTYPE" --data-binary @query.json.$$ + +#delete the row +[ $VERBOSE -eq 1 ] && echo "Deleting row with id=$ID" +docurl "$URL/tables/$TABLENAME/rows?compartmentId=$COMPARTMENT&key=id:$ID" -X DELETE + +# query again +[ $VERBOSE -eq 1 ] && echo "Querying for rows in age range" +docurl "$URL/query" -H "$CTYPE" --data-binary @query.json.$$ + +# delete the table +[ $VERBOSE -eq 1 ] && echo "Deleting table $TABLENAME" +docurl "$URL/tables/$TABLENAME?compartmentId=$COMPARTMENT" -X DELETE + +[ $VERBOSE -eq 1 ] && echo "Test successful." +exit 0 diff --git a/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16be.json b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16be.json new file mode 100644 index 0000000000000000000000000000000000000000..b37765113c989a6f347b2f9584824604246f0861 GIT binary patch literal 58 ycmZRmW>8|tWJm#$RtyRZMhrSYQi&mtA(0`MAr&kN;-xZ_1Ib5Fv04UR1}*^eIS9xA literal 0 HcmV?d00001 diff --git a/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16bebom.json b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16bebom.json new file mode 100644 index 0000000000000000000000000000000000000000..8dfb0b43fd47ab0890e554c8bb4f3cab0ca6ae1e GIT binary patch literal 60 zcmezOpP`yTi6N6A1xQ*kC@>f?=m1G2hCGHuhFpeJuqcR^%1{m@A3?=x8F(4E0BM&B A#Q*>R literal 0 HcmV?d00001 diff --git a/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16le.json b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf16le.json new file mode 100644 index 0000000000000000000000000000000000000000..73ff4ccf9bae92c3ae537b6b00ba15dc4c7904f8 GIT binary patch literal 58 ycmbo3lD07)f=JcdMuT!vJzD2SKJP!1#?LB(nrcp11D0CHan A#Q*>R literal 0 HcmV?d00001 diff --git a/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8.json b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8.json new file mode 100644 index 00000000..017e2519 --- /dev/null +++ b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8.json @@ -0,0 +1 @@ +{"id": 2, "name":"newnâme"} diff --git a/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8_jsondata.txt b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8_jsondata.txt new file mode 100644 index 00000000..69a6f794 --- /dev/null +++ b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8_jsondata.txt @@ -0,0 +1,170 @@ +{ + "id":1151612, + "name":"Perry's Union Street_çé", + "address":"1944 Union Street_çé", + "city":"San Francisco", + "state":"CA", + "area":"San Francisco Bay Area", + "postal_code":"94123", + "country":"US", + "phone":"4159229022", + "lat":37.797904, + "lng":-122.431332, + "price":2, + "reserve_url":"http://www.opentable.com/single.aspx?rid=151612", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=151612", + "image_url":"https://www.opentable.com/img/restimages/151612.jpg" +}, +{ + "id":2141049, + "name":"ノルマンディ", + "address":"140ニューモンゴメリーストリート", + "city":"サンフランシスコ", + "state":"CA", + "area":"サンフランシスコ", + "postal_code":"94103", + "country":"US", + "phone":"4159750876", + "lat":37.786664, + "lng":-122.399773, + "price":2, + "reserve_url":"http://www.opentable.com/single.aspx?rid=141049", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=141049", + "image_url":"https://www.opentable.com/img/restimages/141049.jpg" +}, +{ + "id":322792, + "name":"渔民的石窟", + "address":"泰勒街2847号", + "city":"旧金山", + "state":"CA", + "area":"旧金山湾区", + "postal_code":"94133", + "country":"美国", + "phone":"4156737025", + "lat":37.808407, + "lng":-122.415845, + "price":2, + "reserve_url":"http://www.opentable.com/single.aspx?rid=22792", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=22792", + "image_url":"https://www.opentable.com/img/restimages/22792.jpg" +}, +{ + "id":450962, + "name":"Sons and Daughters_çé", + "address":"708 Bush Street_çé", + "city":"San Francisco", + "state":"CA", + "area":"San Francisco Bay Area", + "postal_code":"94108", + "country":"US", + "phone":"4153918311", + "lat":37.790138, + "lng":-122.40908, + "price":4, + "reserve_url":"http://www.opentable.com/single.aspx?rid=50962", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=50962", + "image_url":"https://www.opentable.com/img/restimages/50962.jpg" +}, +{ + "id":54121, + "name":"Ristobar_çé", + "address":"2300 Chestnut St_çé", + "city":"San Francisco", + "state":"CA", + "area":"San Francisco Bay Area", + "postal_code":"94123", + "country":"US", + "phone":"4159236464x", + "lat":37.80035, + "lng":-122.441486, + "price":2, + "reserve_url":"http://www.opentable.com/single.aspx?rid=4121", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=4121", + "image_url":"https://www.opentable.com/img/restimages/4121.jpg" +}, +{ + "id":6001, + "name":"Mamacita_çé", + "address":"2317 Chestnut St_çé", + "city":"San Francisco", + "state":"CA", + "area":"San Francisco Bay Area", + "postal_code":"94123", + "country":"US", + "phone":"4153468494", + "lat":37.799953, + "lng":-122.44147, + "price":2, + "reserve_url":"http://www.opentable.com/single.aspx?rid=6001", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=6001", + "image_url":"https://www.opentable.com/img/restimages/6001.jpg" +}, +{ + "id":7149539, + "name":"KUSAKABE_çé", + "address":"584 Washington Street_çé", + "city":"San Francisco", + "state":"CA", + "area":"San Francisco Bay Area", + "postal_code":"94111", + "country":"US", + "phone":"4157570155", + "lat":37.795597, + "lng":-122.402963, + "price":4, + "reserve_url":"http://www.opentable.com/single.aspx?rid=149539", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=149539", + "image_url":"https://www.opentable.com/img/restimages/149539.jpg" +}, +{ + "id":87742, + "name":"The Oak Room - The Westin St. Francis_çé", + "address":"335 Powell St_çé", + "city":"San Francisco", + "state":"CA", + "area":"San Francisco Bay Area", + "postal_code":"94102", + "country":"US", + "phone":"4157740264", + "lat":37.787978, + "lng":-122.408365, + "price":2, + "reserve_url":"http://www.opentable.com/single.aspx?rid=7742", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=7742", + "image_url":"https://www.opentable.com/img/restimages/7742.jpg" +}, +{ + "id":9022126, + "name":"BIN 38_çé", + "address":"3232 Scott Street_çé", + "city":"San Francisco", + "state":"CA", + "area":"San Francisco Bay Area", + "postal_code":"94123", + "country":"US", + "phone":"4155673838x", + "lat":37.799744, + "lng":-122.441095, + "price":2, + "reserve_url":"http://www.opentable.com/single.aspx?rid=22126", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=22126", + "image_url":"https://www.opentable.com/img/restimages/22126.jpg" +}, +{ + "id":10916, + "name":"çé_Benu", + "address":"çé_22 Hawthorne Street", + "city":"San Francisco", + "state":"CA", + "area":"San Francisco Bay Area", + "postal_code":"94105", + "country":"US", + "phone":"4156854860x", + "lat":37.785402, + "lng":-122.399068, + "price":4, + "reserve_url":"http://www.opentable.com/single.aspx?rid=45916", + "mobile_reserve_url":"http://mobile.opentable.com/opentable/?restId=45916", + "image_url":"https://www.opentable.com/img/restimages/45916.jpg" +} diff --git a/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8_testdata.txt b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8_testdata.txt new file mode 100644 index 00000000..d438a526 --- /dev/null +++ b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8_testdata.txt @@ -0,0 +1,16 @@ +zh{ + 中文姓名 + 中文姓名2 +} + +fr{ +1: newnâme +2:{"id":1,"name":"çé_myname"} +3:{"id":2,"name":"çé_newname"} +4:Perry's Union Street_çé +} + +jp{ + ネームの名 + ネームの名2 +} diff --git a/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8bom.json b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8bom.json new file mode 100644 index 00000000..d75aad07 --- /dev/null +++ b/httpproxy/tests/src/main/resources/oracle/nosql/proxy/utf8bom.json @@ -0,0 +1 @@ +{"id": 2, "name":"newnâme"} diff --git a/kvclient/pom.xml b/kvclient/pom.xml index e18dd309..0ef1c85f 100644 --- a/kvclient/pom.xml +++ b/kvclient/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kv - 25.1.13 + 25.3.21 kvclient diff --git a/kvmain/pom.xml b/kvmain/pom.xml index dcfab1eb..a765d9b5 100644 --- a/kvmain/pom.xml +++ b/kvmain/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kv - 25.1.13 + 25.3.21 kvmain diff --git a/kvmain/src/main/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.java b/kvmain/src/main/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.java index f0f22797..7ba84357 100644 --- a/kvmain/src/main/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.java +++ b/kvmain/src/main/java/com/sleepycat/bind/serial/TupleSerialKeyCreator.java @@ -76,6 +76,7 @@ public TupleSerialKeyCreator(SerialBinding dataBinding) { public boolean createSecondaryKey(SecondaryDatabase db, DatabaseEntry primaryKeyEntry, DatabaseEntry dataEntry, + long creationTime, long modificationTime, long expirationTime, int storageSize, diff --git a/kvmain/src/main/java/com/sleepycat/collections/DataView.java b/kvmain/src/main/java/com/sleepycat/collections/DataView.java index 2e144925..cdd38ffd 100644 --- a/kvmain/src/main/java/com/sleepycat/collections/DataView.java +++ b/kvmain/src/main/java/com/sleepycat/collections/DataView.java @@ -520,7 +520,7 @@ boolean useKey(Object key, Object value, DatabaseEntry keyThang, DatabaseEntry valueThang = new DatabaseEntry(); entityBinding.objectToData(value, valueThang); secKeyCreator.createSecondaryKey(secDb, primaryKeyThang, - valueThang, 0, 0, 0, keyThang); + valueThang, 0, 0, 0, 0, keyThang); } else { entityBinding.objectToKey(value, keyThang); } diff --git a/kvmain/src/main/java/com/sleepycat/je/Cursor.java b/kvmain/src/main/java/com/sleepycat/je/Cursor.java old mode 100644 new mode 100755 index 9e014d19..d5ccec49 --- a/kvmain/src/main/java/com/sleepycat/je/Cursor.java +++ b/kvmain/src/main/java/com/sleepycat/je/Cursor.java @@ -13,6 +13,9 @@ package com.sleepycat.je; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.Collection; import java.util.logging.Level; @@ -36,6 +39,7 @@ import com.sleepycat.je.dbi.TriggerManager; import com.sleepycat.je.dbi.WriteParams; import com.sleepycat.je.latch.LatchSupport; +import com.sleepycat.je.log.ErasedException; import com.sleepycat.je.log.ReplicationContext; import com.sleepycat.je.log.entry.LNLogEntry; import com.sleepycat.je.tree.BIN; @@ -44,11 +48,15 @@ import com.sleepycat.je.tree.LN; import com.sleepycat.je.txn.LockType; import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.Txn; +import com.sleepycat.je.txn.WriteLockInfo; import com.sleepycat.je.txn.LockerFactory; import com.sleepycat.je.utilint.DatabaseUtil; import com.sleepycat.je.utilint.InternalException; import com.sleepycat.je.utilint.LoggerUtils; import com.sleepycat.je.utilint.NotSerializable; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; /** * A database cursor. Cursors are used for operating on collections of records, @@ -165,6 +173,8 @@ public class Cursor implements ForwardCursor { */ CursorConfig config; + public volatile TestHook semaphoreHook; + /* User Transacational, or null if none. */ private Transaction transaction; @@ -497,17 +507,6 @@ private void setPrefixConstraint(final Cursor c, key2Data, key2Off, key2Size) == 0; } - private boolean checkRangeConstraint(final DatabaseEntry key) { - assert key.getOffset() == 0; - assert key.getData().length == key.getSize(); - - if (rangeConstraint == null) { - return true; - } - - return rangeConstraint.inBounds(key.getData()); - } - /** * Discards the cursor. * @@ -631,7 +630,9 @@ public OperationResult delete(final WriteOptions options) { BeforeImageContext bImgCtx = null; if (options != null && options.getBeforeImageTTL() > 0) { bImgCtx = new BeforeImageContext( - options.getBeforeImageTTL(), + TTL.ttlToExpiration( + options.getBeforeImageTTL(), + options.getBeforeImageTTLUnit()), options.getBeforeImageTTLUnit() == TimeUnit.HOURS); } return deleteInternal(dbImpl.getRepContext(), cacheMode, bImgCtx); @@ -644,6 +645,10 @@ static void checkDeleteWriteOptions(final WriteOptions options) { throw new IllegalArgumentException( "modificationTime must be zero for a delete op."); } + if (options.getCreationTime() != 0) { + throw new IllegalArgumentException( + "CreationTime must be zero for a delete op."); + } if (options.isTombstone()) { throw new IllegalArgumentException( "Tombstone property must be false for a delete op."); @@ -651,6 +656,24 @@ static void checkDeleteWriteOptions(final WriteOptions options) { } } + static void checkWriteOptions(final WriteOptions options) { + if (options != null) { + if (options.getModificationTime() != 0 + && options.getCreationTime() != 0) { + if (options.getModificationTime() < options.getCreationTime()) { + throw new IllegalArgumentException( + "modification time cannot be less than creation time"); + } + } + if (options.getCreationTime() != 0 + && options.getModificationTime() == 0) { + throw new IllegalArgumentException( + "creation time cannot be explicitly specified " + + "without the modification time"); + } + } + } + /** * Deletes the record to which the cursor refers. When the database has * associated secondary databases, this method also deletes the associated @@ -829,6 +852,7 @@ OperationResult putInternal( final Put putType, WriteOptions options) { + checkWriteOptions(options); DatabaseUtil.checkForNullParam(putType, "putType"); if (putType == Put.CURRENT) { @@ -2391,6 +2415,7 @@ private OperationResult deleteNotify(final ReplicationContext repContext, final OperationResult readResult; long oldModificationTime = 0; + long oldCreationTime = 0; int oldStorageSize = 0; if (needOldData || hasAssociations) { @@ -2402,6 +2427,7 @@ private OperationResult deleteNotify(final ReplicationContext repContext, return null; } oldModificationTime = readResult.getModificationTime(); + oldCreationTime = readResult.getCreationTime(); oldStorageSize = readResult.getStorageSize(); } else { readResult = null; @@ -2441,6 +2467,8 @@ private OperationResult deleteNotify(final ReplicationContext repContext, nWrites += secDb.updateSecondary( locker, null /*cursor*/, dbImpl, cursorImpl, key, oldData, null /*newData*/, cacheMode, + 0 /*newCreationTime*/, + oldCreationTime, 0 /*newModificationTime*/, oldModificationTime, 0 /*expirationTime*/, @@ -2539,6 +2567,7 @@ OperationResult putForReplay( writeParams = new WriteParams(null /* cacheMode */, preprocessor, repContext, lnEntry.getExpiration(), lnEntry.isExpirationInHours(), true /* updateExpiration */, + lnEntry.getCreationTime(), lnEntry.getModificationTime(), lnEntry.isTombstone(), null, null, null, lnEntry.isBeforeImageEnabled(), ((BeforeImageLNLogEntry) lnEntry) @@ -2549,6 +2578,7 @@ OperationResult putForReplay( writeParams = new WriteParams(null /* cacheMode */, preprocessor, repContext, lnEntry.getExpiration(), lnEntry.isExpirationInHours(), true /* updateExpiration */, + lnEntry.getCreationTime(), lnEntry.getModificationTime(), lnEntry.isTombstone(), null/* allIndexDbNames */, null/* allIndexIds */, null/* indexesToUpdate */, lnEntry.isBeforeImageEnabled()); @@ -2626,6 +2656,10 @@ private OperationResult putHandleDups( throw new IllegalArgumentException( "modificationTime must be zero for a put op."); } + if (writeParams.creationTime != 0) { + throw new IllegalArgumentException( + "creationTime must be zero for a put op."); + } switch (putMode) { case OVERWRITE: @@ -2907,6 +2941,8 @@ private OperationResult putNotify( nWrites += secDb.updateSecondary( locker, null, dbImpl, cursorImpl, key, oldData, newData, writeParams.cacheMode, + result.getCreationTime(), + result.getOldCreationTime(), result.getModificationTime(), result.getOldModificationTime(), result.getExpirationTime(), @@ -3073,6 +3109,12 @@ private OperationResult putCurrentNoNotify( final OperationResult result = cursorImpl.updateCurrentRecord( key, data, writeParams, returnOldData, returnNewData); + + if (result != null && result.getBeforeImageDBEntry() != null) { + final EnvironmentImpl envImpl = dbImpl.getEnv(); + envImpl.getBeforeImageIndex() + .put(result.getBeforeImageDBEntry()); + } if (result != null && includeInOpStats) { dbImpl.getEnv().incUpdateOps(dbImpl); @@ -4016,14 +4058,17 @@ private OperationResult searchExact( beginMoveCursor(false /*samePosition*/, cacheMode); + LockStanding standing = null; + try { /* * Search for a BIN slot whose key is == the search key. If such a * slot is found, lock it and check whether it is valid. */ - if (cursorImpl.searchExact( - key, preprocessor, lockType, excludeTombstones, - dirtyReadAll, dataRequested) == null) { + standing = cursorImpl.searchExact( + key, preprocessor, lockType, excludeTombstones, + dirtyReadAll, dataRequested); + if (standing == null) { success = true; return null; } @@ -4035,8 +4080,51 @@ private OperationResult searchExact( * the found slot if a partial key comparator is used, since then * it may be different than the given key. */ - result = cursorImpl.getCurrent( - dbImpl.allowsKeyUpdates() ? key : null, data); + if (standing.readCommittedData()) { + try { + result = cursorImpl.readLastCommitted( + standing.getLockResult().getWriteLockInfo(), + dataRequested, dbImpl.allowsKeyUpdates() ? key : null, + data); + } catch (ErasedException | IOException e) { + /* + * Optimistic read using abortLSN failed, retrying with + * read-committed mode. A read lock will be added to the + * cursor and the locker. + */ + Txn tempLockerRef = (Txn) getCursorImpl().getLocker(); + tempLockerRef.setOptimisticReadIsolation(false); + tempLockerRef.setReadCommittedIsolation(true); + + /* + * searchExact() requires the cursor + * must initially be uninitialized. + */ + cursorImpl.releaseBIN(); + cursorImpl.reset(); + + assert TestHookExecute.doHookIfSet(semaphoreHook); + + standing = cursorImpl.searchExact( + key, preprocessor, lockType, excludeTombstones, + dirtyReadAll, dataRequested); + + tempLockerRef.setOptimisticReadIsolation(true); + tempLockerRef.setReadCommittedIsolation(false); + + if (standing == null) { + success = true; + return null; + } + + result = cursorImpl.getCurrent( + dbImpl.allowsKeyUpdates() ? key : null, data); + } + } else { + result = cursorImpl.getCurrent( + dbImpl.allowsKeyUpdates() ? key : null, data); + } + /* Check for data match, if asked so. */ if (result != null && @@ -4049,6 +4137,8 @@ private OperationResult searchExact( } finally { + cursorImpl.releaseLockForOptimisticRead(standing, lockType); + if (success && !dbImpl.isInternalDb() && cursorImpl.getBIN() != null && @@ -4475,7 +4565,7 @@ private long countEstimateHandleDups(final boolean excludeTombstones) { * avoid deadlocks (this is done when the user's isolation mode is * READ_COMMITTED or REPEATABLE_READ). * - * @param lockPrimaryOnly If false, then we are not using dirty-read for + * @param nonDirtyReadPrimary If false, then we are not using dirty-read for * secondary deadlock avoidance. If true, this secondary cursor's * reference to the primary will be checked after the primary record has * been locked. @@ -4514,7 +4604,7 @@ OperationResult readPrimaryAfterGet( DatabaseEntry data, final LockMode lockMode, final boolean secDirtyRead, - final boolean lockPrimaryOnly, + final boolean nonDirtyReadPrimary, final boolean verifyPrimary, final Locker locker, final Database secDb, @@ -4524,13 +4614,18 @@ OperationResult readPrimaryAfterGet( final DatabaseImpl priDbImpl = priDb.getDbImpl(); /* - * If we only lock the primary (and check the sec cursor), we must be - * using sec dirty-read for deadlock avoidance (whether or not the user - * requested dirty-read). Otherwise, we should be using sec dirty-read + * If we perform nonDirtyReadPrimary without OptimisticReadIsolation, + * we must be using sec dirty-read for deadlock avoidance + * (whether or not the user requested dirty-read). + * Otherwise, we should be using sec dirty-read * iff the user requested it. */ - if (lockPrimaryOnly) { - assert secDirtyRead && !priDirtyRead; + if (nonDirtyReadPrimary) { + if (!cursorImpl.getLocker().isOptimisticReadIsolation()) { + assert secDirtyRead && !priDirtyRead; + } else { + assert !secDirtyRead && !priDirtyRead; + } } else { assert secDirtyRead == priDirtyRead; } @@ -4551,6 +4646,8 @@ OperationResult readPrimaryAfterGet( data = new DatabaseEntry(); } + assert TestHookExecute.doHookIfSet(semaphoreHook); + /* * Do not release non-transactional locks when reading the primary * cursor. They are held until all locks for this operation are @@ -4571,90 +4668,160 @@ OperationResult readPrimaryAfterGet( pKey, null, priLockType, true /*excludeTombstones*/, dirtyReadAll, dataRequested); + + assert TestHookExecute.doHookIfSet(semaphoreHook); + OperationResult result = null; try { if (priLockStanding != null) { - result = priCursor.getCurrent(null, data); - if (result == null) { - priCursor.revertLock(priLockStanding); - priLockStanding = null; + if (priLockStanding.readCommittedData()) { + try { + result = priCursor.readLastCommitted( + priLockStanding.getLockResult().getWriteLockInfo(), + dataRequested, null, data); + if (result == null) { + priLockStanding = null; + } + } catch (ErasedException | IOException e) { + + /* + * Optimistic read using abortLSN failed, retrying with + * read-committed mode. A read lock will be added to the + * cursor and the locker. + */ + Txn tempLockerRef = (Txn) priCursor.getLocker(); + tempLockerRef.setOptimisticReadIsolation(false); + tempLockerRef.setReadCommittedIsolation(true); + + /* + * searchExact() requires the cursor + * must initially be uninitialized. + */ + priCursor.releaseBIN(); + priCursor.reset(); + + priLockStanding = priCursor.searchExact( + pKey, null, priLockType, + true /*excludeTombstones*/, + dirtyReadAll, dataRequested); + + tempLockerRef.setOptimisticReadIsolation(true); + tempLockerRef.setReadCommittedIsolation(false); + + result = priCursor.getCurrent(null, data); + if (result == null) { + priCursor.revertLock(priLockStanding); + priLockStanding = null; + } + } + } else { + result = priCursor.getCurrent(null, data); + if (result == null) { + priCursor.revertLock(priLockStanding); + priLockStanding = null; + } } } } finally { priCursor.releaseBIN(); } - if (priLockStanding != null && lockPrimaryOnly) { - if (!ensureReferenceToPrimary(pKey, priLockType)) { - priCursor.revertLock(priLockStanding); - priLockStanding = null; - } - } - + /* + * If priLockStanding == null, which means the corresponding record + * in primaryDB is not found. + */ if (priLockStanding == null) { + handlePrimaryRecordNotFound(secDirtyRead, secAssoc, priDb, secDb, + pKey, key, verifyPrimary, locker); + return null; + } else { + /* - * If using read-uncommitted and the primary is deleted, the - * primary must have been deleted after reading the secondary. - * We cannot verify this by checking if the secondary is - * deleted, because it may have been reinserted. [#22603] + * Post-validation phase to check consistency between primary + * and secondary DB records. * - * If the secondary is expired (within TTL clock tolerance), - * then the record must have expired after the secondary read. + * The primary was found, there are two cases here, and we have + * different ways to do the post validation. * - * In either case, return false to skip this record. - */ - if (secDirtyRead || cursorImpl.isProbablyExpired()) { - return null; - } - - /* - * TODO: whether we need to do the following check for all - * usage scenarios of readPrimaryAfterGet. If true, we - * may get the SecondaryAssociation by the secDb. + * 1. If using dirtyRead on primaryDB(which also implies + * dirtyRead is used on secondaryDB), check to + * see if primary was updated so that it no longer contains the + * secondary key. If it has been, return null to skip the record + * This is checked by re-generating secondaryKey. + * checkForPrimaryUpdate is not called for tombstones (excluded + * above). We cannot pass a tombstone to a secondary key creator. + * + * 2. Non-dirty read is used on primaryDB(read-committed, + * repeatable read, optimisticRead). + * See ensureReferenceToPrimary for the validation rules. * - * If secDb has been removed from SecondaryAssociation, the - * operations on the primary database after removing it - * may cause an inconsistency between the secondary record and - * the corresponding primary record. For this case, just return - * false to skip this record. */ - if (secAssoc != null) { - boolean stillExist = false; - for (SecondaryDatabase db : secAssoc.getSecondaries(pKey)) { - if (db == secDb) { - stillExist = true; - break; - } - } - if (!stillExist) { + + if (priDirtyRead) { + /* post-validation case 1, dirty-Read on primary */ + if (checkForPrimaryUpdate(key, pKey, data, + result.getCreationTime(), + result.getModificationTime(), + result.getExpirationTime(), + cursorImpl.getStorageSize())) { return null; } - } + } else { - /* - * When the primary is deleted, secondary keys are deleted - * first. So if the above check fails, we know the secondary - * reference is corrupt. - */ - throw secDb.secondaryRefersToMissingPrimaryKey( - !verifyPrimary, locker, priDb, key, pKey, - cursorImpl.getExpirationTime()); - } + WriteLockInfo wli = null; + boolean regenerateSecKey = false; + if (priLockStanding.readCommittedData()) { - /* - * If using read-uncommitted and the primary was found, check to - * see if primary was updated so that it no longer contains the - * secondary key. If it has been, return null to skip the record. - * - * checkForPrimaryUpdate is not called for tombstones (excluded - * above). We cannot pass a tombstone to a secondary key creator. - */ - if (priDirtyRead && - checkForPrimaryUpdate(key, pKey, data, - result.getModificationTime(), - result.getExpirationTime(), - cursorImpl.getStorageSize())) { - return null; + wli = priLockStanding.getLockResult(). + getWriteLockInfo(); + regenerateSecKey = true; + } + + /* post-validation case 2, non-dirty Read on primary. */ + boolean validationResult = ensureReferenceToPrimary(pKey, + priLockType, data, key, (SecondaryDatabase) secDb, + wli, regenerateSecKey); + + if (!validationResult) { + + /* validation failed, revert the lock if obtained previously */ + priCursor.revertLock(priLockStanding); + + handlePrimaryRecordNotFound(secDirtyRead, secAssoc, + priDb, secDb, pKey, key, verifyPrimary, locker); + return null; + } else { + /* + * Normally, for optimisticRead, read lock should be + * release right after getting the data, but here, + * wait until the post-validation ends to release the + * read lock to prevent any write operations from + * happening between reading Primary and post-validation. + * + * Consider this case: + * Initially (A, 1) is in primaryDB, + * (1-A) is in secondaryDB. And we want to find record + * with value == 1. + * + * a. optimisticRead read on secDB, found 1-A, + * next step is use A as the primaryKey to search + * in primaryDB. + * b. optimisticRead read on priDB got (A-1) + * c. A writeTxn sneaks in, update (A-1) to (A-2), + * writeTxn commits. + * d. so now post-validation found nothing + * since (1-A) is deleted in step c, + * so a SecondaryIntegrityException will be thrown. + * + * A read lock is needed to guard between step b and + * step d. + * drop read lock for optimisticRead after + * post-validation ends + */ + priCursor.releaseLockForOptimisticRead( + priLockStanding, priLockType); + } + } } /* @@ -4679,23 +4846,98 @@ OperationResult readPrimaryAfterGet( } } + private void handlePrimaryRecordNotFound(final boolean secDirtyRead, + final SecondaryAssociation secAssoc, + final Database priDb, + final Database secDb, + final DatabaseEntry pKey, + final DatabaseEntry key, + final boolean verifyPrimary, + final Locker locker) { + /* + * If using read-uncommitted and the primary is deleted, the + * primary must have been deleted after reading the secondary. + * We cannot verify this by checking if the secondary is + * deleted, because it may have been reinserted. [#22603] + * + * If using optimisticRead and the primary is deleted, since before + * read on primaryDB ,and after using optimisticRead read on + * secondaryDB, read lock on secondaryDB will be released, so what happens + * between is uncertain. + * + * + * If the secondary is expired (within TTL clock tolerance), + * then the record must have expired after the secondary read. + * + * In either case, return false to skip this record. + */ + if (secDirtyRead || cursorImpl.isProbablyExpired() || + cursorImpl.getLocker().isOptimisticReadIsolation()) { + return; + } + + /* + * TODO: whether we need to do the following check for all + * usage scenarios of readPrimaryAfterGet. If true, we + * may get the SecondaryAssociation by the secDb. + * + * If secDb has been removed from SecondaryAssociation, the + * operations on the primary database after removing it + * may cause an inconsistency between the secondary record and + * the corresponding primary record. For this case, just return + * false to skip this record. + */ + if (secAssoc != null) { + boolean stillExist = false; + for (SecondaryDatabase db : secAssoc.getSecondaries(pKey)) { + if (db == secDb) { + stillExist = true; + break; + } + } + if (!stillExist) { + return; + } + } + + /* + * When the primary is deleted, secondary keys are deleted + * first. So if the above check fails, we know the secondary + * reference is corrupt. + */ + throw secDb.secondaryRefersToMissingPrimaryKey( + !verifyPrimary, locker, priDb, key, pKey, + cursorImpl.getExpirationTime()); + } + /** * Checks whether this secondary cursor still refers to the primary key, * and locks the secondary record if necessary. * * This is used for deadlock avoidance with secondary DBs. The initial - * secondary index read is done without locking. After the primary has - * been locked, we check here to insure that the primary/secondary - * relationship is still in place. There are two cases: - * - * 1. If the secondary DB has duplicates, the key contains the sec/pri - * relationship and the presence of the secondary record (that is not - * deleted) is sufficient to insure the sec/pri relationship. - * - * 2. If the secondary DB does not allow duplicates, then the primary key - * (the data of the secondary record) must additionally be compared to - * the original search key. This detects the case where the secondary - * record was updated to refer to a different primary key. + * secondary index read is done without locking. + * After reading the primary, there are two cases: + * + * 1. primaryDB hasn't been locked only because optimisticRead is used + * and got blocked when read the primaryDB, so the most recent committed + * data is returned and no lock is obtained. + * In this case, we regenerate the secKey using the most recent committed + * primary data, compare it with the secKey passed in. regenerateSecKey + * is an indicator of this case. + * + * 2. primaryDB has been locked, we check here to ensure that + * the primary/secondary relationship is still in place. + * There are two cases: + * + * (1). If the secondary DB has duplicates, the key contains the sec/pri + * relationship and the presence of the secondary record (that is + * not deleted) is sufficient to insure the sec/pri relationship. + * + * (2). If the secondary DB does not allow duplicates, then the primary + * key (the data of the secondary record) must additionally be + * compared to the original search key. This detects the case + * where the secondary record was updated to refer to a different + * primary key. * * In addition, this method locks the secondary record if it would expire * within {@link EnvironmentParams#ENV_TTL_MAX_TXN_TIME}. This is needed to @@ -4703,7 +4945,12 @@ OperationResult readPrimaryAfterGet( */ private boolean ensureReferenceToPrimary( final DatabaseEntry matchPriKey, - final LockType lockType) { + final LockType lockType, + final DatabaseEntry primaryData, + final DatabaseEntry secKey, + SecondaryDatabase secDB, + final WriteLockInfo wli, + boolean regenerateSecKey) { assert lockType != LockType.NONE; @@ -4719,8 +4966,19 @@ private boolean ensureReferenceToPrimary( final BIN bin = cursorImpl.getBIN(); final int index = cursorImpl.getIndex(); + /* + * If regenerateSecKey == true, read lock is not obtained + * on primaryDB, so it's possible there is a writeTxn deleting + * current record, and bin.isDeleted(index) == true. + * But we regenerate the secKey using the most recent committed + * primary data, compare it with the secKey passed in, so no need to + * return false here. + */ + if (bin.isDeleted(index)) { - return false; + if (!regenerateSecKey) { + return false; + } } final EnvironmentImpl envImpl = dbImpl.getEnv(); @@ -4730,7 +4988,16 @@ private boolean ensureReferenceToPrimary( bin.getExpiration(index), bin.isExpirationInHours()); if (envImpl.expiresWithin( - expirationTime, envImpl.getTtlMaxTxnTime())) { + expirationTime, envImpl.getTtlMaxTxnTime()) + && !cursorImpl.getLocker().isOptimisticReadIsolation()) { + + /* + * For optimisticRead, lock is not required here, + * to ensure the lock is indeed acquired. + * Since lockLN() is needed to support repeatable-read. + * The lock prevents expiration of the secondary. + * See comment above ensureReferenceToPrimary(). + */ cursorImpl.lockLN( lockType, false /*excludeTombstones*/, false /*allowUncontended*/, false /*noWait*/); @@ -4739,22 +5006,76 @@ private boolean ensureReferenceToPrimary( cursorImpl.releaseBIN(); } + /* - * If there are no duplicates, check the secondary data (primary key). - * No need to actually lock (use LockType.NONE) since the primary lock - * protects the secondary from changes. + * Also ensure that the key in the secondaryDB matches the data + * in the primaryDB. This is necessary because we use + * READ_UNCOMMITTED_ALL when reading from the + * secondaryDB, but a read-committed isolation level(can it be RMW?) + * when reading from the primaryDB. + * As a result, it's possible that the primaryDB returns the + * latest committed data, which may not correspond to the secondary + * key used in the search. */ - if (!cursorImpl.hasDuplicates()) { - final DatabaseEntry secData = new DatabaseEntry(); - if (cursorImpl.lockAndGetCurrent( - null, secData, LockType.NONE, - false /*excludeTombstones*/) == null) { - return false; + if (regenerateSecKey) { + SecondaryConfig secConfig = secDB.getPrivateSecondaryConfig(); + SecondaryKeyCreator keyCreator = secConfig.getKeyCreator(); + SecondaryMultiKeyCreator multiKeyCreator = secConfig.getMultiKeyCreator(); + + long priModificationTime = wli.getAbortModificationTime(); + long priCreationTime = wli.getAbortCreationTime(); + long priExpirationTime = wli.getAbortExpiration(); + int priStorageSize = wli.getAbortLogSize(); + + assert keyCreator != null || multiKeyCreator != null; + + if (keyCreator != null) { + + DatabaseEntry newSecKey = new DatabaseEntry(); + + if (!keyCreator.createSecondaryKey(secDB, matchPriKey, + primaryData, + priCreationTime, + priModificationTime, + priExpirationTime, + priStorageSize, + newSecKey) || !newSecKey.equals(secKey)) { + return false; + } + } else { + Set newSecKeys = new HashSet<>(); + multiKeyCreator.createSecondaryKeys(secDB, matchPriKey, + primaryData, + priCreationTime, + priModificationTime, + priExpirationTime, + priStorageSize, + newSecKeys); + return newSecKeys.contains(secKey); } + } else { + /* + * Also ensure that the key in the secondaryDB matches the data + * in the primaryDB. This is necessary because we use + * READ_UNCOMMITTED_ALL when reading from the + * secondaryDB, but a read-committed isolation level(can it be RMW?) + * when reading from the primaryDB. + * + * If there are no duplicates, check the secondary data (primary key). + * No need to actually lock (use LockType.NONE) since the primary lock + * protects the secondary from changes. + */ + if (!cursorImpl.hasDuplicates()) { + final DatabaseEntry secData = new DatabaseEntry(); - if (!secData.equals(matchPriKey)) { - return false; + if (cursorImpl.lockAndGetCurrent( + null, secData, LockType.NONE, + false /*excludeTombstones*/) == null) { + return false; + } + + return secData.equals(matchPriKey); } } @@ -4774,6 +5095,7 @@ boolean checkForPrimaryUpdate( final DatabaseEntry key, final DatabaseEntry pKey, final DatabaseEntry data, + final long creationTime, final long modificationTime, final long expirationTime, final int storageSize) { @@ -4908,7 +5230,7 @@ private LockType getLockType(final LockMode lockMode) { } else if (lockMode == null || lockMode == LockMode.DEFAULT) { return LockType.READ; } else if (lockMode == LockMode.RMW) { - return LockType.WRITE; + return LockType.WRITE_RMW; } else { assert false : lockMode; return LockType.NONE; @@ -5014,6 +5336,10 @@ void checkOpen() { } } + public void setSemaphoreHook(TestHook hook) { + this.semaphoreHook = hook; + } + /** * @throws EnvironmentFailureException if the underlying environment is * invalid. diff --git a/kvmain/src/main/java/com/sleepycat/je/Database.java b/kvmain/src/main/java/com/sleepycat/je/Database.java old mode 100644 new mode 100755 index 3b407502..3561efd4 --- a/kvmain/src/main/java/com/sleepycat/je/Database.java +++ b/kvmain/src/main/java/com/sleepycat/je/Database.java @@ -30,6 +30,7 @@ import com.sleepycat.je.dbi.EnvironmentImpl; import com.sleepycat.je.dbi.GetMode; import com.sleepycat.je.dbi.SearchMode; +import com.sleepycat.je.dbi.TTL; import com.sleepycat.je.dbi.TriggerManager; import com.sleepycat.je.rep.DatabasePreemptedException; import com.sleepycat.je.txn.HandleLocker; @@ -884,6 +885,7 @@ Cursor newDbcInstance(final Transaction txn, public void populateSecondaries(final Transaction txn, final DatabaseEntry key, final DatabaseEntry data, + final long creationTime, final long modificationTime, final long expirationTime, final int storageSize, @@ -918,6 +920,8 @@ public void populateSecondaries(final Transaction txn, null /*priCursor*/, key /*priKey*/, null /*oldData*/, data /*newData*/, cacheMode, + creationTime, + 0,/*oldCreationTime*/ modificationTime, 0 /*oldModificationTime*/, expirationTime, @@ -989,7 +993,8 @@ public OperationResult delete(final Transaction txn, BeforeImageContext bImgCtx = null; if (options != null && options.getBeforeImageTTL() > 0) { bImgCtx = new BeforeImageContext( - options.getBeforeImageTTL(), + TTL.ttlToExpiration(options.getBeforeImageTTL(), + options.getBeforeImageTTLUnit()), options.getBeforeImageTTLUnit() == TimeUnit.HOURS); } diff --git a/kvmain/src/main/java/com/sleepycat/je/DbInternal.java b/kvmain/src/main/java/com/sleepycat/je/DbInternal.java old mode 100644 new mode 100755 index 14495128..b4a8ab50 --- a/kvmain/src/main/java/com/sleepycat/je/DbInternal.java +++ b/kvmain/src/main/java/com/sleepycat/je/DbInternal.java @@ -46,6 +46,7 @@ public class DbInternal { public static OperationResult DEFAULT_RESULT = new OperationResult( 0 /*expirationTime*/, false /*update*/, + 0L /*creationTime*/, 0L /*modificationTime*/, 0 /*storageSize*/, false /*tombstone*/); @@ -488,6 +489,21 @@ public static void setCheckpointUP(final EnvironmentConfig config, public static boolean getCheckpointUP(final EnvironmentConfig config) { return config.getCheckpointUP(); } + + /** + * Proxy to EnvironmentConfig.setBImgIdx() + */ + public static void setCreateBImgIdx(final EnvironmentConfig config, + final boolean bImgIdx) { + config.setCreateBImgIdx(bImgIdx); + } + + /** + * Proxy to EnvironmentConfig.getBImgIdx() + */ + public static boolean getBImgIdx(final EnvironmentConfig config) { + return config.getBImgIdx(); + } /** * Proxy to EnvironmentConfig.setTxnReadCommitted() @@ -597,12 +613,13 @@ public static Txn getTxn(final Transaction transaction) { public static OperationResult makeResult( final long expirationTime, + final long creationTime, final long modificationTime, final int storageSize, final boolean tombstone) { return new OperationResult( - expirationTime, false /*update*/, + expirationTime, false /*update*/, creationTime, modificationTime, storageSize, tombstone); } @@ -610,13 +627,14 @@ public static OperationResult makeResult( final int expiration, final boolean expirationInHours, final boolean update, + final long creationTime, final long modificationTime, final int storageSize, final boolean tombstone) { return new OperationResult( TTL.expirationToSystemTime(expiration, expirationInHours), - update, modificationTime, storageSize, tombstone); + update, creationTime,modificationTime, storageSize, tombstone); } public static ReadOptions getReadOptions(LockMode lockMode) { diff --git a/kvmain/src/main/java/com/sleepycat/je/Environment.java b/kvmain/src/main/java/com/sleepycat/je/Environment.java index 8b46e1b4..b3067209 100644 --- a/kvmain/src/main/java/com/sleepycat/je/Environment.java +++ b/kvmain/src/main/java/com/sleepycat/je/Environment.java @@ -22,9 +22,11 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReference; import java.util.logging.Level; +import java.util.stream.Stream; import com.sleepycat.je.Durability.ReplicaAckPolicy; import com.sleepycat.je.cleaner.ExtinctionScanner; +import com.sleepycat.je.config.EnvironmentParams; import com.sleepycat.je.dbi.DatabaseImpl; import com.sleepycat.je.dbi.DbConfigManager; import com.sleepycat.je.dbi.DbEnvPool; @@ -33,6 +35,7 @@ import com.sleepycat.je.dbi.EnvironmentImpl; import com.sleepycat.je.dbi.StartupTracker.Phase; import com.sleepycat.je.dbi.TriggerManager; +import com.sleepycat.je.rep.NetworkRestore; import com.sleepycat.je.rep.ReplicationConfig; import com.sleepycat.je.txn.HandleLocker; import com.sleepycat.je.txn.Locker; @@ -42,6 +45,7 @@ import com.sleepycat.je.utilint.LoggerUtils; import com.sleepycat.je.utilint.Pair; +import com.sleepycat.je.utilint.PropUtil; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; @@ -226,7 +230,7 @@ public Environment(File envHome, EnvironmentConfig configuration) IllegalArgumentException { this(envHome, configuration, null /*repConfig*/, - null /*envImplParam*/); + null /*envImplParam*/, false); } /** @@ -239,7 +243,8 @@ public Environment(File envHome, EnvironmentConfig configuration) protected Environment(File envHome, EnvironmentConfig envConfig, ReplicationConfig repConfig, - EnvironmentImpl envImpl) { + EnvironmentImpl envImpl, + boolean joinGroup) { initEnvImpl(); @@ -248,6 +253,41 @@ protected Environment(File envHome, final EnvironmentConfig useEnvConfig = resolveConfig(envHome, envConfig, repConfig); + /* + * Verify if the environment is in network restore. If True, the + * opening of this environment handle is locked until the network + * restore finishes. This will be only applied if the environment + * to be opened is aimed to join a group. + */ + if (joinGroup && NetworkRestore.isEnvInRestore(envHome, this)) { + synchronized (this) { + try { + /* + * timeout to avoid to stuck hanging forever. + */ + String val = DbConfigManager.getVal(useEnvConfig.props, + EnvironmentParams.ENV_NETWORK_RESTORE_LOCK_TIMEOUT); + int timeout = PropUtil.parseDuration(val); + wait(timeout); + } catch (InterruptedException e) { + /* + * Do not make anything. Because of a network restore + * is in progress, likely a EnvironmentFailureException + * will be thrown. + */ + LoggerUtils.envLogMsg(Level.WARNING, envImpl, + "A timeout occurred because a network " + + "restore on " + envHome.getAbsolutePath() + + " has not finished. Thus, the environment " + + "handle on that directory was not opened. " + + "See value of the " + + EnvironmentConfig. + ENV_NETWORK_RESTORE_LOCK_TIMEOUT + + " parameter to increase this timeout."); + } + } + } + if (envImpl != null) { /* We're creating an InternalEnvironment in EnvironmentImpl. */ environmentImpl = envImpl; @@ -1456,11 +1496,13 @@ private void checkTxnConfig(TransactionConfig txnConfig) if (txnConfig == null) { return; } - if ((txnConfig.getReadUncommitted() && - txnConfig.getReadCommitted())) { + + if (Stream.of(txnConfig.getReadUncommitted(), + txnConfig.getReadCommitted(), + txnConfig.getOptimisticRead()).filter(x -> x).count() > 1) { throw new IllegalArgumentException ("Only one may be specified: " + - "ReadCommitted or ReadUncommitted"); + "ReadCommitted or ReadUncommitted or OptimisticRead"); } } diff --git a/kvmain/src/main/java/com/sleepycat/je/EnvironmentConfig.java b/kvmain/src/main/java/com/sleepycat/je/EnvironmentConfig.java old mode 100644 new mode 100755 index 90c3b722..6020294c --- a/kvmain/src/main/java/com/sleepycat/je/EnvironmentConfig.java +++ b/kvmain/src/main/java/com/sleepycat/je/EnvironmentConfig.java @@ -1121,6 +1121,45 @@ public class EnvironmentConfig extends EnvironmentMutableConfig { */ public static final String ENV_LATCH_TIMEOUT = "je.env.latchTimeout"; + /** + * For locking the opening of an {@link Environment} handle if its directory is + * in network restore. When an environment handle is being opening to join a + * group, it is verified if a network restore is in progress on its directory. + * If True, the opening of the handle is locked until the network restore + * finishes. If the network restore does not finish, a timeout will occur after + * the duration specified by this parameter to avoid stuck hanging forever. + * When this timeout occurs: + *

      + *
    • The Environment is not opened.
    • + *
    • An {@link EnvironmentFailureException} is thrown.
    • + *
    • A message is logged at level WARNING.
    • + *
    + *

    + * Most applications should not change this parameter. The default value, 2 + * minutes, should be much longer than the duration of a network restore. + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Information about configuration option
    NameTypeMutableDefaultMinimumMaximum
    {@value}DurationNo2 min1 min-none-
    + * + * @see Time Duration + * Properties + * + * @since 25.3 + */ + public static final String ENV_NETWORK_RESTORE_LOCK_TIMEOUT = + "je.env.networkRestoreLockTimeout"; + /** * The interval added to the system clock time for determining that a * record may have expired. Used when an internal integrity error may be @@ -4190,6 +4229,12 @@ public class EnvironmentConfig extends EnvironmentMutableConfig { * For unit testing, to prevent writing utilization data during checkpoint. */ private transient boolean checkpointUP = true; + + /** + * For unit testing, to prevent use of the Before Image DB. + */ + private transient boolean createBImg = true; + private boolean allowCreate = false; @@ -4681,7 +4726,21 @@ void setCheckpointUP(boolean checkpointUP) { boolean getCheckpointUP() { return checkpointUP; } + + /** + * For unit testing, to prevent use of the Before Image DB. + */ + void setCreateBImgIdx(boolean createBImg) { + this.createBImg = createBImg; + } + /** + * For unit testing, to prevent use of the Before Image DB. + */ + boolean getBImgIdx() { + return createBImg; + } + /** * Returns a copy of this configuration object. */ diff --git a/kvmain/src/main/java/com/sleepycat/je/EnvironmentStats.java b/kvmain/src/main/java/com/sleepycat/je/EnvironmentStats.java index ff2c4290..831597c8 100644 --- a/kvmain/src/main/java/com/sleepycat/je/EnvironmentStats.java +++ b/kvmain/src/main/java/com/sleepycat/je/EnvironmentStats.java @@ -2490,11 +2490,17 @@ public List getStatGroups() { DbiStatDefinition.BACKUP_GROUP_DESC); } - if (bImgStats == null) { - bImgStats = new StatGroup( - BeforeImageIndexStatDefinition.GROUP_NAME, - BeforeImageIndexStatDefinition.GROUP_DESC); - } + if (dbVerifyStats == null) { + dbVerifyStats = new StatGroup( + DbVerifyStatDefinition.DB_VERIFY_GROUP_NAME, + DbVerifyStatDefinition.DB_VERIFY_GROUP_DESC); + } + + if (bImgStats == null) { + bImgStats = new StatGroup( + BeforeImageIndexStatDefinition.GROUP_NAME, + BeforeImageIndexStatDefinition.GROUP_DESC); + } return Arrays.asList( logStats, cacheStats, cleanerStats, incompStats, diff --git a/kvmain/src/main/java/com/sleepycat/je/JEVersion.java b/kvmain/src/main/java/com/sleepycat/je/JEVersion.java index 3649080d..e186b1f9 100644 --- a/kvmain/src/main/java/com/sleepycat/je/JEVersion.java +++ b/kvmain/src/main/java/com/sleepycat/je/JEVersion.java @@ -31,7 +31,7 @@ public class JEVersion implements Comparable, Serializable { * Release version. */ public static final JEVersion CURRENT_VERSION = - new JEVersion(25, 1, 11, null); + new JEVersion(25, 3, 17, null); private final int majorNum; private final int minorNum; diff --git a/kvmain/src/main/java/com/sleepycat/je/OperationResult.java b/kvmain/src/main/java/com/sleepycat/je/OperationResult.java old mode 100644 new mode 100755 index 4f55e7d8..8298fd23 --- a/kvmain/src/main/java/com/sleepycat/je/OperationResult.java +++ b/kvmain/src/main/java/com/sleepycat/je/OperationResult.java @@ -13,7 +13,8 @@ package com.sleepycat.je; -import com.sleepycat.je.beforeimage.BeforeImageIndex; +import com.sleepycat.je.beforeimage.BeforeImageIndex; +import com.sleepycat.je.util.TimeSupplier; /** * The result of an operation that successfully reads or writes a record. @@ -35,6 +36,8 @@ public class OperationResult { private final long expirationTime; private final long modificationTime; + private final long creationTime; + private long oldCreationTime; private long oldModificationTime; private final int storageSize; private int oldStorageSize; @@ -44,16 +47,30 @@ public class OperationResult { OperationResult(final long expirationTime, final boolean update, + final long creationTime, final long modificationTime, final int storageSize, final boolean tombstone) { this.expirationTime = expirationTime; + this.creationTime = creationTime; this.modificationTime = modificationTime; this.storageSize = storageSize; this.tombstone = tombstone; this.update = update; } + @Override + public String toString() { + return "OperationResult{" + + "expirationTime=" + expirationTime + + ", update=" + update + + ", creationTime=" + TimeSupplier.formatCurrentTimeToDate(creationTime) + + ", modificationTime=" + TimeSupplier.formatCurrentTimeToDate(modificationTime) + + ", storageSize=" + storageSize + + ", tombstone=" + tombstone + + '}'; + } + /** * Returns whether the operation was an update, for distinguishing inserts * and updates performed by a {@link Put#OVERWRITE} operation. @@ -122,6 +139,43 @@ public long getModificationTime() { return modificationTime; } + /** + * Returns the creationTime of the record, or zero. + * + *

    For write operations, non-zero is returned for records belonging to + * primary databases and zero is returned when the record belongs to a + * secondary (duplicates) database.

    + * + *

    For read operations, zero is returned in the following cases and + * non-zero is returned in all other cases. + *

      + *
    • When the record belongs to a secondary (duplicates) database, + * zero is always returned. Creation times are not maintained in + * secondary databases.
    • + * + *
    • When the record data is not requested, i.e., the {@code data} + * param is null or {@link DatabaseEntry#setPartial} was called, then + * zero may be returned. This is because the creation time is + * stored with the record's data if its update or delete, + * so to obtain the creation time + * the record's LN may need to be fetched from disk. If the LN happens + * to be cached or is embedded in the parent BIN (see {@link + * EnvironmentConfig#TREE_MAX_EMBEDDED_LN}), then non-zero is + * returned; but to guarantee that it is returned, the data should be + * requested.
    • + * + *
    • When the record was last written using JE 19.3 or earlier, + * zero is always returned. Storage of creation times was added in + * JE 25.2.
    • + *
    + * + * @see WriteOptions#setCreationTime + * @since 25.2 + */ + public long getCreationTime() { + return creationTime; + } + public long getOldModificationTime() { return oldModificationTime; } @@ -130,6 +184,17 @@ public void setOldModificationTime(long t) { oldModificationTime = t; } + /* + * Used while indexing secondary databases + */ + public long getOldCreationTime() { + return oldCreationTime; + } + + public void setOldCreationTime(long t) { + oldCreationTime = t; + } + public int getStorageSize() { return storageSize; } @@ -174,10 +239,10 @@ public boolean hasBeforeImage() { /** * Returns if Before Image was enabled when this record was created. * The record may not have a Before Image, even if it is enabled, if no - * previous entry for the key of the record existed. Use + * previous entry for the key of the record existed. Use * {@link OperationResult#hasBeforeImage()} to check if a Before Image * exists for the record. - * @since 24.3 + * @since 25.1 Currently not used. */ public boolean beforeImageEnabled() { // TODO check this necessity @@ -190,7 +255,7 @@ public boolean beforeImageEnabled() { /** * Returns the bytes of the Before Image of this record. Only non-null * if the record has a Before Image and it was requested in - * {@link ReadOptions#setIncludeBeforeImage()} in the get operation. + * in the get operation. Currently not used. * @since 25.1 */ public byte[] beforeImage() { @@ -203,8 +268,8 @@ public byte[] beforeImage() { /** * Returns the expiration time of the Before Image of this record. * Only non-zero if the record has a Before Image and it was requested in - * {@link ReadOptions#setIncludeBeforeImage()} in the get operation. - * @since 25.1 + * in the get operation. + * @since 25.1 Currently not used */ public long beforeImageExpiration() { if (bImgEntry != null) { diff --git a/kvmain/src/main/java/com/sleepycat/je/SecondaryCursor.java b/kvmain/src/main/java/com/sleepycat/je/SecondaryCursor.java old mode 100644 new mode 100755 index b238ac74..eed513da --- a/kvmain/src/main/java/com/sleepycat/je/SecondaryCursor.java +++ b/kvmain/src/main/java/com/sleepycat/je/SecondaryCursor.java @@ -594,7 +594,7 @@ OperationResult getInternal( return retrieveNext( key, pKey, data, lockMode, cacheMode, getMode, - getLockPrimaryOnly(lockMode, data)); + getNonDirtyReadPrimary(lockMode, data)); } if (getType == Get.CURRENT) { @@ -608,7 +608,7 @@ OperationResult getInternal( return position( key, pKey, data, lockMode, cacheMode, getType == Get.FIRST, - getLockPrimaryOnly(lockMode, data)); + getNonDirtyReadPrimary(lockMode, data)); } /** @@ -1479,10 +1479,11 @@ private OperationResult getCurrentInternal(final DatabaseEntry key, final DatabaseEntry data, final LockMode lockMode, final CacheMode cacheMode) { - final boolean lockPrimaryOnly = getLockPrimaryOnly(lockMode, data); + final boolean nonDirtyReadPrimary = + getNonDirtyReadPrimary(lockMode, data); final LockMode searchLockMode = - lockPrimaryOnly ? LockMode.READ_UNCOMMITTED_ALL : lockMode; + getSearchModeOnSecondary(nonDirtyReadPrimary, lockMode); final OperationResult result = getCurrentInternal( key, pKey, searchLockMode, false /*excludeTombstones*/, cacheMode); @@ -1493,7 +1494,7 @@ private OperationResult getCurrentInternal(final DatabaseEntry key, return readPrimaryAfterGet( key, pKey, data, lockMode, isReadUncommittedMode(searchLockMode), - lockPrimaryOnly, result); + nonDirtyReadPrimary, result); } /** @@ -1509,10 +1510,12 @@ OperationResult search(final DatabaseEntry key, final CacheMode cacheMode, final SearchMode searchMode) { - final boolean lockPrimaryOnly = getLockPrimaryOnly(lockMode, data); + final boolean nonDirtyReadPrimary = + getNonDirtyReadPrimary(lockMode, data); final LockMode searchLockMode = - lockPrimaryOnly ? LockMode.READ_UNCOMMITTED_ALL : lockMode; + getSearchModeOnSecondary(nonDirtyReadPrimary, lockMode); + final OperationResult result1 = search( key, pKey, searchLockMode, false /*excludeTombstones*/, @@ -1526,7 +1529,7 @@ OperationResult search(final DatabaseEntry key, final OperationResult result2 = readPrimaryAfterGet( key, pKey, data, lockMode, isReadUncommittedMode(searchLockMode), - lockPrimaryOnly, result1); + nonDirtyReadPrimary, result1); if (result2 != null) { return result2; @@ -1545,12 +1548,12 @@ key, pKey, data, lockMode, isReadUncommittedMode(searchLockMode), /* Find exact sec key and next primary key. */ return retrieveNext( key, pKey, data, lockMode, cacheMode, GetMode.NEXT_DUP, - lockPrimaryOnly); + nonDirtyReadPrimary); case SET_RANGE: /* Find next sec key or primary key. */ return retrieveNext( key, pKey, data, lockMode, cacheMode, GetMode.NEXT, - lockPrimaryOnly); + nonDirtyReadPrimary); default: throw EnvironmentFailureException.unexpectedState(); } @@ -1568,10 +1571,10 @@ private OperationResult position(final DatabaseEntry key, final LockMode lockMode, final CacheMode cacheMode, final boolean first, - final boolean lockPrimaryOnly) { + final boolean nonDirtyReadPrimary) { final LockMode searchLockMode = - lockPrimaryOnly ? LockMode.READ_UNCOMMITTED_ALL : lockMode; + getSearchModeOnSecondary(nonDirtyReadPrimary, lockMode); final OperationResult result1 = position( key, pKey, searchLockMode, false /*excludeTombstones*/, @@ -1583,7 +1586,7 @@ private OperationResult position(final DatabaseEntry key, final OperationResult result2 = readPrimaryAfterGet( key, pKey, data, lockMode, isReadUncommittedMode(searchLockMode), - lockPrimaryOnly, result1); + nonDirtyReadPrimary, result1); if (result2 != null) { return result2; @@ -1592,7 +1595,7 @@ key, pKey, data, lockMode, isReadUncommittedMode(searchLockMode), /* Advance over the unavailable record. */ return retrieveNext( key, pKey, data, lockMode, cacheMode, - first ? GetMode.NEXT : GetMode.PREV, lockPrimaryOnly); + first ? GetMode.NEXT : GetMode.PREV, nonDirtyReadPrimary); } /** @@ -1608,10 +1611,10 @@ private OperationResult retrieveNext( final LockMode lockMode, final CacheMode cacheMode, GetMode getMode, - final boolean lockPrimaryOnly) { + final boolean nonDirtyReadPrimary) { final LockMode searchLockMode = - lockPrimaryOnly ? LockMode.READ_UNCOMMITTED_ALL : lockMode; + getSearchModeOnSecondary(nonDirtyReadPrimary, lockMode); GetMode savedMode = null; @@ -1633,7 +1636,7 @@ private OperationResult retrieveNext( final OperationResult result2 = readPrimaryAfterGet( key, pKey, data, lockMode, isReadUncommittedMode(searchLockMode), - lockPrimaryOnly, result1); + nonDirtyReadPrimary, result1); if (result2 != null) { return result2; @@ -1662,8 +1665,8 @@ private OperationResult retrieveNext( * + When the primary data is not requested we must lock the secondary * because we do not read or lock the primary. */ - private boolean getLockPrimaryOnly(final LockMode lockMode, - final DatabaseEntry data) { + private boolean getNonDirtyReadPrimary(final LockMode lockMode, + final DatabaseEntry data) { final boolean dataRequested = data != null && @@ -1672,6 +1675,16 @@ private boolean getLockPrimaryOnly(final LockMode lockMode, return dataRequested && !isReadUncommittedMode(lockMode); } + private LockMode getSearchModeOnSecondary(boolean nonDirtyReadPrimary, + LockMode lockMode) { + if (nonDirtyReadPrimary) { + return this.getCursorImpl().getLocker().isOptimisticReadIsolation() ? + lockMode : LockMode.READ_UNCOMMITTED_ALL; + } else { + return lockMode; + } + } + /** * Reads the primary record associated with a secondary record. * @@ -1693,6 +1706,9 @@ private boolean getLockPrimaryOnly(final LockMode lockMode, * then used to read and lock the associated primary record. At this * point only the primary record is locked. * + * For optimisticRead, both secondary and primary will be read using + * optimisticRead configuration. + * * Then, the secondary reference is checked (see checkReferenceToPrimary in * Cursor). Note that there is no need to lock the secondary before * checking its reference to the primary, because during the check the @@ -1727,7 +1743,7 @@ private OperationResult readPrimaryAfterGet( final DatabaseEntry data, final LockMode lockMode, final boolean secDirtyRead, - final boolean lockPrimaryOnly, + final boolean nonDirtyReadPrimary, final OperationResult origResult) { final Database primaryDb = secondaryDb.getPrimary(pKey); @@ -1742,7 +1758,7 @@ private OperationResult readPrimaryAfterGet( if (dataRequested) { return readPrimaryAfterGet( primaryDb, key, pKey, data, lockMode, secDirtyRead, - lockPrimaryOnly, false /*verifyPrimary*/, + nonDirtyReadPrimary, false /*verifyPrimary*/, cursorImpl.getLocker() /*locker*/, secondaryDb, null /*secAssoc*/); } else { @@ -1754,6 +1770,7 @@ private OperationResult readPrimaryAfterGet( return secDirtyRead ? DbInternal.makeResult( cursorImpl.getExpirationTime(), + 0L /*creationTime*/, 0L /*modificationTime*/, cursorImpl.getStorageSize(), false /*tombstone*/) : @@ -1768,6 +1785,7 @@ private OperationResult readPrimaryAfterGet( boolean checkForPrimaryUpdate(final DatabaseEntry key, final DatabaseEntry pKey, final DatabaseEntry data, + final long creationTime, final long modificationTime, final long expirationTime, final int storageSize) { @@ -1790,7 +1808,7 @@ boolean checkForPrimaryUpdate(final DatabaseEntry key, */ final DatabaseEntry secKey = new DatabaseEntry(); if (!conf.getKeyCreator().createSecondaryKey - (secondaryDb, pKey, data, + (secondaryDb, pKey, data, creationTime, modificationTime, expirationTime, storageSize, secKey) || !secKey.equals(key)) { @@ -1804,7 +1822,7 @@ boolean checkForPrimaryUpdate(final DatabaseEntry key, */ final Set results = new HashSet(); conf.getMultiKeyCreator().createSecondaryKeys - (secondaryDb, pKey, data, + (secondaryDb, pKey, data, creationTime, modificationTime, expirationTime, storageSize, results); if (!results.contains(key)) { diff --git a/kvmain/src/main/java/com/sleepycat/je/SecondaryDatabase.java b/kvmain/src/main/java/com/sleepycat/je/SecondaryDatabase.java old mode 100644 new mode 100755 index bda9c099..bf150e6c --- a/kvmain/src/main/java/com/sleepycat/je/SecondaryDatabase.java +++ b/kvmain/src/main/java/com/sleepycat/je/SecondaryDatabase.java @@ -332,6 +332,8 @@ private void init(final Locker locker) { priCursor.getCursorImpl(), key /*priKey*/, null /*oldData*/, data /*newData*/, null /*cacheMode*/, + result.getCreationTime(), + 0, /*old creation time*/ result.getModificationTime(), 0, /*old modification time*/ result.getExpirationTime(), @@ -601,6 +603,7 @@ private boolean deleteObsoletePrimaryKeysInternal(final Cursor cursor, public void populateSecondaries(final Transaction txn, final DatabaseEntry key, final DatabaseEntry data, + final long creationTime, final long modificationTime, final long expirationTime, final int storageSize, @@ -1229,6 +1232,8 @@ int updateSecondary(final Locker locker, final DatabaseEntry oldData, final DatabaseEntry newData, final CacheMode cacheMode, + final long newCreationTime, + final long oldCreationTime, final long newModificationTime, final long oldModificationTime, final long expirationTime, @@ -1272,6 +1277,7 @@ int updateSecondary(final Locker locker, if ((oldData != null || newData == null) && !oldTombstone) { oldSecKey = new DatabaseEntry(); if (!keyCreator.createSecondaryKey(this, priKey, oldData, + oldCreationTime, oldModificationTime, oldExpirationTime, oldStorageSize, @@ -1283,6 +1289,7 @@ int updateSecondary(final Locker locker, if (newData != null && !newTombstone) { newSecKey = new DatabaseEntry(); if (!keyCreator.createSecondaryKey(this, priKey, newData, + newCreationTime, newModificationTime, expirationTime, storageSize, @@ -1397,6 +1404,7 @@ int updateSecondary(final Locker locker, oldKeys = new HashSet<>(); multiKeyCreator.createSecondaryKeys( this, priKey, oldData, + newCreationTime, oldModificationTime, oldExpirationTime, oldStorageSize, @@ -1409,6 +1417,7 @@ int updateSecondary(final Locker locker, newKeys = new HashSet<>(); multiKeyCreator.createSecondaryKeys( this, priKey, newData, + newCreationTime, newModificationTime, expirationTime, storageSize, diff --git a/kvmain/src/main/java/com/sleepycat/je/SecondaryKeyCreator.java b/kvmain/src/main/java/com/sleepycat/je/SecondaryKeyCreator.java index f80c6890..f5d4277d 100644 --- a/kvmain/src/main/java/com/sleepycat/je/SecondaryKeyCreator.java +++ b/kvmain/src/main/java/com/sleepycat/je/SecondaryKeyCreator.java @@ -124,6 +124,9 @@ public interface SecondaryKeyCreator { * or non-null, and the implementation is expected to ignore it; otherwise, * this parameter is always non-null. * + * @param creationTime the creation time of the record. It is + * needed only if the secondary index indexes the creation time. + * * @param modificationTime the last modification time of the record. It is * needed only if the secondary index indexes the modification time. * @@ -142,6 +145,7 @@ public interface SecondaryKeyCreator { public boolean createSecondaryKey(SecondaryDatabase secondary, DatabaseEntry key, DatabaseEntry data, + long creationTime, long modificationTime, long expirationTime, int storageSize, diff --git a/kvmain/src/main/java/com/sleepycat/je/SecondaryMultiKeyCreator.java b/kvmain/src/main/java/com/sleepycat/je/SecondaryMultiKeyCreator.java index 44003a82..b37e4d10 100644 --- a/kvmain/src/main/java/com/sleepycat/je/SecondaryMultiKeyCreator.java +++ b/kvmain/src/main/java/com/sleepycat/je/SecondaryMultiKeyCreator.java @@ -148,6 +148,7 @@ public interface SecondaryMultiKeyCreator { public void createSecondaryKeys(SecondaryDatabase secondary, DatabaseEntry key, DatabaseEntry data, + long creationTime, long modificationTime, long expirationTime, int storageSize, diff --git a/kvmain/src/main/java/com/sleepycat/je/TransactionConfig.java b/kvmain/src/main/java/com/sleepycat/je/TransactionConfig.java index 49b0db8a..e1dc01c7 100644 --- a/kvmain/src/main/java/com/sleepycat/je/TransactionConfig.java +++ b/kvmain/src/main/java/com/sleepycat/je/TransactionConfig.java @@ -34,6 +34,7 @@ public class TransactionConfig implements Cloneable { private boolean noWait = false; private boolean readUncommitted = false; private boolean readCommitted = false; + private boolean optimisticRead = false; private boolean readOnly = false; private boolean localWrite = false; private int txnTimeout = -1; @@ -221,6 +222,34 @@ public TransactionConfig setReadCommitted(final boolean readCommitted) { return this; } + /** + * Configures the transaction for read committed isolation. + * + *

    The major difference between Optimistic Read and + * Read Committed/Repeatable Read lies in how they handle read locks + * in the presence of a write lock. + * In Read Committed and Repeatable Read, a read operation will block and + * wait for the write lock to be released if it is held by an open + * transaction on the LN. In contrast, Optimistic Read does not wait for the + * lock; instead, it attempts to read the most recently committed version of + * the data(i.e., the latest version that has been committed before the + * uncommitted transaction). If it fails to do so, it falls back to + * Read Committed semantics.

    + * + *

    Additionally, Optimistic Read does not guarantee cursor stability. + * The read lock is released immediately after reading the LN-before + * the BIN latch is released-ensuring that the read does not block + * subsequent write operations.

    + * + * @param optimisticRead Configures the transaction for optimistic read + * isolation. + * @return this + */ + public TransactionConfig setOptimisticRead(final boolean optimisticRead) { + this.optimisticRead = optimisticRead; + return this; + } + /** * Returns true if the transaction is configured for read-committed * isolation. @@ -235,6 +264,19 @@ public boolean getReadCommitted() { return readCommitted; } + /** + * Returns true if the transaction is configured for optimisticRead + * isolation. + * + * @return true if the transaction is configured for read-committed + * isolation. + * + * @see #setOptimisticRead + */ + public boolean getOptimisticRead() { + return optimisticRead; + } + /** * Configures this transaction to disallow write operations, regardless of * whether writes are allowed for the {@link Environment} or the @@ -392,6 +434,7 @@ public String toString() { "\nnoWait=" + noWait + "\nreadUncommitted=" + readUncommitted + "\nreadCommitted=" + readCommitted + + "\noptimisticRead=" + optimisticRead + "\n"; } diff --git a/kvmain/src/main/java/com/sleepycat/je/WriteOptions.java b/kvmain/src/main/java/com/sleepycat/je/WriteOptions.java old mode 100644 new mode 100755 index 6685c3c6..cca06ae9 --- a/kvmain/src/main/java/com/sleepycat/je/WriteOptions.java +++ b/kvmain/src/main/java/com/sleepycat/je/WriteOptions.java @@ -325,11 +325,18 @@ public class WriteOptions implements Cloneable { */ public static final int TTL_MAX_DAYS = TTL_MAX_HOURS / 24; + /** + * The value of Creation Time when it is unknown, for records created + * before 25.4, and created before Creation Time was enabled. + */ + public static final long CREATION_TIME_NOT_SET = 0L; + private CacheMode cacheMode = null; private int ttl = 0; private TimeUnit ttlUnit = TimeUnit.DAYS; private long expirationTime = 0; private boolean updateTtl = false; + private long creationTime = 0; private long modificationTime = 0; private boolean tombstone = false; private TimeUnit beforeImageTTLUnit = TimeUnit.DAYS; @@ -627,6 +634,34 @@ public long getModificationTime() { return modificationTime; } + /** + * Sets the creation time to be associated with a record that is + * inserted or updated, in order to override the default creation time. + * + * This feature is disabled and is a no-op. + * + *

    + * + * @since 25.3 + */ + public WriteOptions setCreationTime(final long creationTime) { + //no-op + return this; + } + + /** + * Returns the last creation time to be associated with a record that + * is inserted, updated or deleted by the operation, or + * {@link WriteOptions#CREATION_TIME_NOT_SET} if creation time was not + * enabled at record creation. + * + * @see #setCreationTime + * @since 25.3 + */ + public long getCreationTime() { + return creationTime; + } + /** * Sets the tombstone property to be associated with a record that is * inserted or updated. @@ -680,7 +715,7 @@ public TimeUnit getBeforeImageTTLUnit() { } /** - * If set to true, {@link OperationResult#isBeforeImageEnabled()} will + * If set to true, {@link #isBeforeImageEnabled()} will * return true. * @since 25.1 */ diff --git a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageContext.java b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageContext.java old mode 100644 new mode 100755 index 7b0225f1..2292bb36 --- a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageContext.java +++ b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageContext.java @@ -20,7 +20,7 @@ /** * {@literal * BeforeImageContext provides info on existence of the beforeimage and - * its TTL, this is the data which is stored with the current + * its TTL, this is the data which is stored with the current * image of primary database. so don't add any metadata which is not added * to current record. * } @@ -29,19 +29,25 @@ public class BeforeImageContext { private final int expTime; private final boolean expTimeInHrs; - + + //dummy ctx for inserts + public BeforeImageContext() { + this.expTime = 0; + this.expTimeInHrs = true; + } + public BeforeImageContext(final int expTime, - final boolean expTimeInHrs) { + final boolean expTimeInHrs) { this.expTime = expTime; this.expTimeInHrs = expTimeInHrs; } - //user defined + //user defined public int getExpTime() { return expTime; } - //logged time + //logged time public int getLoggedExpTime() { return TTL.ttlToExpiration(expTime, expTimeInHrs ? TimeUnit.HOURS @@ -51,7 +57,7 @@ public int getLoggedExpTime() { public boolean isExpTimeInHrs() { return expTimeInHrs; } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); @@ -59,4 +65,4 @@ public String toString() { sb.append(" expInHrs=").append(expTimeInHrs); return sb.toString(); } -} \ No newline at end of file +} diff --git a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageIndex.java b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageIndex.java old mode 100644 new mode 100755 index feb6e566..67d9b412 --- a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageIndex.java +++ b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageIndex.java @@ -39,6 +39,7 @@ import com.sleepycat.je.dbi.DbTree; import com.sleepycat.je.dbi.DbType; import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.TTL; import com.sleepycat.je.log.LogUtils; import com.sleepycat.je.txn.BasicLocker; import com.sleepycat.je.txn.Locker; @@ -53,7 +54,7 @@ /** * {@literal * This class manages the beforeimage database which stores previous records of - * all the database records configured with enableBeforeImage + * all the database records configured with enableBeforeImage * } */ public class BeforeImageIndex { @@ -136,7 +137,7 @@ public PutContext getPutCtx() { public static class BeforeImagePayLoad { - /*this is the data stored as value + /*this is the data stored as value * in beforeimage database */ private final byte[] bImgData; @@ -163,7 +164,7 @@ public byte[] getbImgData() { public byte[] marshalData() { int totLength = Integer.BYTES + bImgData.length + Long.BYTES; - // totLength += version >= 26 ? Long.BYTES + // totLength += version >= 26 ? Long.BYTES ByteBuffer buf = ByteBuffer.allocate(totLength); LogUtils.writePackedInt(buf, version); LogUtils.writeByteArray(buf, bImgData); @@ -206,8 +207,8 @@ public Builder setVersion(int version) { return this; } /* - * template to add new field - * public Builder setBImgTxnId(long txnId) { + * template to add new field + * public Builder setBImgTxnId(long txnId) { * this.txnId = txnId; * return this; * } @@ -215,7 +216,7 @@ public Builder setVersion(int version) { public BeforeImagePayLoad build() { if (version == 0) { /* - * Feature introduced version + * Feature introduced version * TODO map this to logentryversion? */ version = 25; @@ -248,7 +249,12 @@ public BeforeImageIndex(EnvironmentImpl envImpl) throws DatabaseException { BeforeImageIndexStatDefinition.N_BIMG_RECORDS_BY_DELETES); nBImgByTombs = new IntStat(statistics, BeforeImageIndexStatDefinition.N_BIMG_RECORDS_BY_TOMBSTONES); - + if (!openBeforeImageDatabase()) { + if (!envImpl.isReadOnly()) { + throw EnvironmentFailureException + .unexpectedState("Unable to create the before image database "); + } + } } public static void setBeforeImageHook(TestHook hook) { @@ -274,13 +280,25 @@ public StatGroup getStats(StatsConfig config) { * operation which can be concurrent. * */ - private synchronized void openBeforeImageDatabase() + private synchronized boolean openBeforeImageDatabase() throws DatabaseException { - // We cannot use the locker from user operation to avoid issues - // when other user, get the db, and the user operation which - // created the db was aborted. isn't this simpler to just initialize - // after env creation when we are single threaded. + /* + Not able to create this internal db as part of user operation + due to following reasons. + 1. if we use the same locker of user txn, then until the txn + commits, no other users put operation can happen. + 2. Aborted txn might involve, the another blocked txn on this + monitor to carry over the same task. + 3. if we use different lockers i.e nested transactions. the + commit of this locker in replica replay may be causing the dtvlsn + of master to update when a subscriber is blocked on waitforDtvlsn + code, causing the partial non durable transaction entry to be + streamed.(TODO check this) + Due to the above issues, we are initializing the database + during the env creation when we are single threaded. + */ + final Locker locker = Txn.createLocalAutoTxn(envImpl, new TransactionConfig()); @@ -290,10 +308,9 @@ private synchronized void openBeforeImageDatabase() DbType.BEFORE_IMAGE.getInternalName(), null /* databaseHandle */, false); if (db == null) { + if (envImpl.isReadOnly()) { - /* This should have been caught earlier. */ - throw EnvironmentFailureException.unexpectedState( - "A replicated environment can't be opened read only."); + return false; } DatabaseConfig dbConfig = new DatabaseConfig(); dbConfig.setReplicated(false); @@ -301,6 +318,7 @@ private synchronized void openBeforeImageDatabase() DbType.BEFORE_IMAGE.getInternalName(), dbConfig); } beforeImageDbImpl = db; + return true; } finally { locker.operationEnd(true); } @@ -327,7 +345,7 @@ private void updateStats(final DBEntry entry) { } /** - * + * * @param entry to be inserted to beforeimage database * @return true if successfully added the entry */ @@ -337,15 +355,49 @@ public boolean put(final DBEntry entry) { "beforeImageIndex put " + entry.getAbortLsn() + " " + entry.getExpTime() + " " + entry.isExpInHours()); if (beforeImageDbImpl == null) { - openBeforeImageDatabase(); + throw EnvironmentFailureException + .unexpectedState("No BeforeImage Database Exists"); } + + if (TTL.isExpired(entry.getExpTime(), entry.isExpInHours())) { + // to handle cases where the replica is down until expiry of + // beforeimage + return false; + } + + /* Below check is to get the user defined TTL in a simplified + * common way for master and replica. + * After user specifies TTL through the options, we convert the + * TTL to currentTime + TTL in specified timeunit(days or hrs). + * This represent number of hours since Unix epoch + added TTL number + * of hours when the before image will be expired (if time unit is hours). + * For example Current Data : May/6/2025 number of hours passed since epoch + * will be 487850 (apprx) , user specified TTL is 2 hrs. so we store + * 487852 in log. + * we need to fetch the user TTL again when we are trying to insert + * in beforeimageidx. so we calculate number of time units passed until + * now and subtract it to the expired number of time units to get back + * the user specified TTL. + * so for above example we calculate currentTime, which would be in same + * hour boundary would give us 487850, and after subtracting with the + * logged time for before image which is 487582 will give us 2 hrs TTL + * back. + * This simplifies avoiding the TTL conversion + * when we are logging. + */ + + int currentTime = TTL.systemTimeToExpiration( + TTL.currentSystemTime(), entry.isExpInHours()); + int userTTL = entry.getExpTime() - currentTime; + assert userTTL >= 0; + TestHookExecute.doHookIfSet(beforeImageHook); DatabaseEntry key = new DatabaseEntry(); LongBinding.longToEntry(entry.getAbortLsn(), key); Cursor c = makeCursor(entry.getLock()); try { OperationResult res = c.put(key, entry.getData(), Put.NO_OVERWRITE, - new WriteOptions().setTTL(entry.getExpTime(), + new WriteOptions().setTTL(userTTL, entry.isExpInHours() ? TimeUnit.HOURS : TimeUnit.DAYS)); @@ -374,12 +426,10 @@ public boolean put(final DBEntry entry) { public DatabaseEntry get(long abortLsn, Locker lck) { LoggerUtils.fine(logger, envImpl, "beforeImageIndex get " + abortLsn); - - if (beforeImageDbImpl == null) { - // todo throw an exception - throw EnvironmentFailureException - .unexpectedState("No BeforeImage Database Exists "); - } + if (beforeImageDbImpl == null) { + throw EnvironmentFailureException + .unexpectedState("No BeforeImage Database Exists"); + } DatabaseEntry key = new DatabaseEntry(); DatabaseEntry data = new DatabaseEntry(); @@ -423,8 +473,6 @@ public DatabaseImpl getDatabaseImpl() { /** * For debugging and unit tests - * - * @throws DatabaseException */ public void dumpDb(boolean display, List idxDataList) { diff --git a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLN.java b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLN.java old mode 100644 new mode 100755 index 6c8a0e0a..7ab7fc01 --- a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLN.java +++ b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLN.java @@ -108,11 +108,13 @@ protected LNLogEntry createLogEntry(LogEntryType entryType, int abortExpiration, boolean abortExpirationInHours, long abortModificationTime, + long abortCreationTime, boolean abortTombstone, byte[] newKey, boolean newEmbeddedLN, int newExpiration, boolean newExpirationInHours, + long creationTime, long newModTime, boolean newTombstone, boolean newBlindDeletion, @@ -127,9 +129,10 @@ protected LNLogEntry createLogEntry(LogEntryType entryType, abortExpiration, abortExpirationInHours, abortModificationTime, + abortCreationTime, abortTombstone, newKey, this, newEmbeddedLN, newExpiration, - newExpirationInHours, newModTime, + newExpirationInHours, creationTime, newModTime, newTombstone, newBlindDeletion, priorSize, priorLsn, bImgCtx); diff --git a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLNLogEntry.java b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLNLogEntry.java old mode 100644 new mode 100755 index dfd759cf..a6093f32 --- a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLNLogEntry.java +++ b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageLNLogEntry.java @@ -14,6 +14,8 @@ package com.sleepycat.je.beforeimage; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; import com.sleepycat.je.dbi.DatabaseId; import com.sleepycat.je.dbi.EnvironmentImpl; import com.sleepycat.je.dbi.TTL; @@ -21,20 +23,21 @@ import com.sleepycat.je.log.LogEntryHeader; import com.sleepycat.je.log.LogEntryType; import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.log.VersionedWriteLoggable; import com.sleepycat.je.txn.Txn; /** * BeforeImageLNLogEntry contains all the regular LNLogEntry fields and * additional information about the before image. - * This additional information is used to support replication + * This additional information is used to support replication * of beforeimage information to other replicas. * * The extra fields which follow the usual {@link * com.sleepycat.je.log.entry.LNLogEntry} fields introduced in version 25 are: - * + * * beforeImageExpiration - beforeImage Expiration time * beforeImageExpirationInHours - beforeImage Expiration time in days or hours. - * + * */ public class BeforeImageLNLogEntry extends LNLogEntry { @@ -73,12 +76,14 @@ public BeforeImageLNLogEntry( int abortExpiration, boolean abortExpirationInHours, long abortModificationTime, + long abortCreationTime, boolean abortTombstone, byte[] key, BeforeImageLN ln, boolean embeddedLN, int expiration, boolean expirationInHours, + long creationTime, long modificationTime, boolean tombstone, boolean blindDeletion, @@ -87,13 +92,14 @@ public BeforeImageLNLogEntry( BeforeImageContext befImageContext) { super(entryType, dbId, txn, abortLsn, abortKD, abortKey, abortData, - abortVLSN, abortExpiration, abortExpirationInHours, - abortModificationTime, abortTombstone, key, ln, embeddedLN, - expiration, expirationInHours, modificationTime, tombstone, - blindDeletion, priorSize, priorLsn, true); + abortVLSN, abortExpiration, abortExpirationInHours, + abortModificationTime, abortCreationTime, + abortTombstone, key, ln, embeddedLN, + expiration, expirationInHours, creationTime, modificationTime, + tombstone, blindDeletion, priorSize, priorLsn, true); if (befImageContext != null) { - beforeImageExpiration = befImageContext.getLoggedExpTime(); + beforeImageExpiration = befImageContext.getExpTime(); beforeImageExpirationInHours = befImageContext.isExpTimeInHrs(); } } @@ -115,7 +121,7 @@ public void readEntry(EnvironmentImpl envImpl, readBaseLNEntry(envImpl, header, entryBuffer, false /*keyIsLastSerializedField*/); - + /* * The BeforeImageLNLogEntry was introduced in version LAST_FORMAT_CHANGE. */ @@ -147,18 +153,27 @@ public StringBuilder dumpEntry(StringBuilder sb, boolean verbose) { @Override public int getLastFormatChange() { - return Math.max(LAST_FORMAT_CHANGE, super.getLastFormatChange()); + return LAST_FORMAT_CHANGE; + } + + @Override + public Collection getEmbeddedLoggables() { + final Collection list = + new ArrayList<>(super.getEmbeddedLoggables()); + list.add(new BeforeImageLN()); + return list; } /** - * returns the before image expiration as user specified + * returns the before image expiration as user specified */ public int getBeforeImageExpiration() { - if (isBeforeImageExpired()) { - return 0; - } - return TTL.systemTimeToExpiration(beforeImageExpiration, - beforeImageExpirationInHours); + return beforeImageExpiration; + } + + public long getBeforeImageExpirationTime() { + return TTL.expirationToSystemTime(getBeforeImageExpiration(), + beforeImageExpirationInHours); } /** @@ -170,7 +185,7 @@ public String getBeforeImageStoredExpiration() { } /** - * returns true if before image expired to the record + * returns true if before image expired to the record */ public boolean isBeforeImageExpired() { return TTL.isExpired(beforeImageExpiration, @@ -205,7 +220,7 @@ public void writeEntry(final ByteBuffer destBuffer, false /*keyIsLastSerializedField*/, forReplication); if (logVersion >= LAST_FORMAT_CHANGE) { LogUtils.writePackedInt(destBuffer, - beforeImageExpirationInHours ? + beforeImageExpirationInHours ? (-beforeImageExpiration) : beforeImageExpiration); } } diff --git a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageOutputWireRecord.java b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageOutputWireRecord.java old mode 100644 new mode 100755 index b48e45fc..a207027b --- a/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageOutputWireRecord.java +++ b/kvmain/src/main/java/com/sleepycat/je/beforeimage/BeforeImageOutputWireRecord.java @@ -21,12 +21,15 @@ import com.sleepycat.je.DatabaseException; import com.sleepycat.je.EnvironmentFailureException; import com.sleepycat.je.dbi.EnvironmentImpl; +import com.sleepycat.je.dbi.TTL; import com.sleepycat.je.log.LogEntryHeader; import com.sleepycat.je.log.LogEntryType; import com.sleepycat.je.log.LogItem; import com.sleepycat.je.log.LogUtils; import com.sleepycat.je.log.entry.ReplicableLogEntry; import com.sleepycat.je.rep.stream.OutputWireRecord; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; import com.sleepycat.je.rep.stream.InputWireRecord; /** @@ -38,6 +41,7 @@ public class BeforeImageOutputWireRecord extends OutputWireRecord { private byte[] bImgData; + private static volatile TestHook verifyHook; private synchronized ReplicableLogEntry instantiateEntryWithBeforeImage(LogEntryHeader currentEntryHeader) { @@ -48,9 +52,14 @@ public class BeforeImageOutputWireRecord extends OutputWireRecord { if (logItem != null) { logEntry = logItem.cachedEntry; if (logEntry != null) { - bImgData = logItem.getBeforeImageData(); - if (bImgData != null) { - return logEntry; + if (logItem.getBeforeImageCtx() != null) { + if (!TTL.isExpired(logItem.getBeforeImageCtx().getExpTime(), + logItem.getBeforeImageCtx().isExpTimeInHrs())) { + bImgData = logItem.getBeforeImageData(); + } + if (bImgData != null) { + return logEntry; + } } } } @@ -72,16 +81,20 @@ public class BeforeImageOutputWireRecord extends OutputWireRecord { if (logItem != null) { logItem.cachedEntry = logEntry; } - if (envImpl.getBeforeImageIndex() != null) { - DatabaseEntry bImgEntry = envImpl.getBeforeImageIndex() - .get(entry.getAbortLsn(), null); - if (bImgEntry != null) { - bImgData = bImgEntry.getData(); - } - if (logItem != null) { - logItem.setBeforeImageData(bImgData); - } - } + if (envImpl.getBeforeImageIndex() != null) { + DatabaseEntry bImgEntry = envImpl.getBeforeImageIndex() + .get(entry.getAbortLsn(), null); + if (bImgEntry != null) { + bImgData = bImgEntry.getData(); + if (logItem != null) { + logItem.setBeforeImageData(bImgData); + BeforeImageContext bImgCtx = new BeforeImageContext( + entry.getBeforeImageExpiration(), + entry.isBeforeImageExpirationInHours()); + logItem.setBeforeImageCtx(bImgCtx); + } + } + } return logEntry; } @@ -107,7 +120,18 @@ public BeforeImageOutputWireRecord(final EnvironmentImpl envImpl, public BeforeImageOutputWireRecord(final EnvironmentImpl envImpl, final LogItem logItem) { super(envImpl, logItem); - this.bImgData = logItem.getBeforeImageData(); + if (logItem.getBeforeImageCtx() != null) { + if (!TTL.isExpired(logItem.getBeforeImageCtx().getExpTime(), + logItem.getBeforeImageCtx().isExpTimeInHrs())) { + this.bImgData = logItem.getBeforeImageData(); + } else { + TestHookExecute.doHookIfSet(verifyHook, true); + } + } + } + + public static void setInputWireRecordHook(TestHook hook) { + verifyHook = hook; } /* For unit test support. */ diff --git a/kvmain/src/main/java/com/sleepycat/je/cleaner/Cleaner.java b/kvmain/src/main/java/com/sleepycat/je/cleaner/Cleaner.java old mode 100644 new mode 100755 index a6dd7c37..8ce535b8 --- a/kvmain/src/main/java/com/sleepycat/je/cleaner/Cleaner.java +++ b/kvmain/src/main/java/com/sleepycat/je/cleaner/Cleaner.java @@ -1903,6 +1903,7 @@ private boolean migratePendingLN( env, db, null /*locker*/, null /*writeLockInfo*/, false /*newEmbeddedLN*/, bin.getKey(index), bin.getExpiration(index), bin.isExpirationInHours(), + ln.getCreationTime(), ln.getModificationTime(), bin.isTombstone(index), false /*newBlindDeletion*/, false /*currEmbeddedLN*/, treeLsn, bin.getLastLoggedSize(index), diff --git a/kvmain/src/main/java/com/sleepycat/je/cleaner/DataEraser.java b/kvmain/src/main/java/com/sleepycat/je/cleaner/DataEraser.java old mode 100644 new mode 100755 index f6b4c40e..a3bde9f0 --- a/kvmain/src/main/java/com/sleepycat/je/cleaner/DataEraser.java +++ b/kvmain/src/main/java/com/sleepycat/je/cleaner/DataEraser.java @@ -1848,6 +1848,7 @@ private void eraseFile(final Long file) { DbLsn.getFileOffset(logLsn), new LNInfo( targetLN, dbId, key, expirationTime, + lnEntry.getCreationTime(), lnEntry.getModificationTime(), headerSize, itemSize)); diff --git a/kvmain/src/main/java/com/sleepycat/je/cleaner/ExtinctionScanner.java b/kvmain/src/main/java/com/sleepycat/je/cleaner/ExtinctionScanner.java old mode 100644 new mode 100755 index a1048055..b66ac005 --- a/kvmain/src/main/java/com/sleepycat/je/cleaner/ExtinctionScanner.java +++ b/kvmain/src/main/java/com/sleepycat/je/cleaner/ExtinctionScanner.java @@ -217,8 +217,8 @@ public ExtinctionScanner(@NonNull final EnvironmentImpl envImpl) { threadPool = envImpl.isReadOnly() ? null : new ThreadPoolExecutor( - 1 /*corePoolSize*/, 1 /*maxPoolThreads*/, - 0 /*keepAliveTime*/, TimeUnit.MILLISECONDS, + 2 /*corePoolSize*/, 4 /*maxPoolThreads*/, + 2000 /*keepAliveTime*/, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), new StoppableThreadFactory( envImpl, "JEExtinctRecordScanner", logger, @@ -1998,6 +1998,7 @@ public boolean isExtinctionForDb(final DatabaseId dbId) { return dbId.equals(this.dbId); } + @SuppressWarnings("deprecation") @Override public void run() { assert TestHookExecute.doHookIfSet(dbBeforeExecTaskHook); @@ -2066,7 +2067,7 @@ public void run() { logger, envImpl, "Start DB remove/truncate scan, id=" + id + " dbId=" + dbImpl.getId() + - " dbName=" + dbImpl.getName()); + " dbName=" + dbImpl.getName() + Thread.currentThread().getId()); /* * At this point, it's possible for the evictor to find @@ -2191,7 +2192,9 @@ private ObsoleteTreeWalker(final DatabaseImpl dbImpl, * FUTURE: Set mem limit to remainder of JE cache size during * recovery. For now, use a smallish value (50 MB). */ - setInternalMemoryLimit(50L * 1024L * 1024L); + long freeMemory = (int)Runtime.getRuntime().freeMemory() / 10; //todo make configurable or is this too much + + setInternalMemoryLimit(Math.max(50L * 1024L * 1024L, freeMemory)); } @Override diff --git a/kvmain/src/main/java/com/sleepycat/je/cleaner/FileProcessor.java b/kvmain/src/main/java/com/sleepycat/je/cleaner/FileProcessor.java old mode 100644 new mode 100755 index e22ffda7..8791e474 --- a/kvmain/src/main/java/com/sleepycat/je/cleaner/FileProcessor.java +++ b/kvmain/src/main/java/com/sleepycat/je/cleaner/FileProcessor.java @@ -918,6 +918,7 @@ private boolean processFile(Long fileNum, dbId, lnEntry.getKey(), expirationTime, + lnEntry.getCreationTime(), lnEntry.getModificationTime(), 0, 0)); @@ -997,6 +998,7 @@ private boolean processFile(Long fileNum, DbLsn.getFileOffset(logLsn), new LNInfo( targetLN, dbId, key, expirationTime, + lnEntry.getCreationTime(), lnEntry.getModificationTime(), 0, 0)); if (lookAheadCache.isFull()) { @@ -1278,6 +1280,7 @@ private LNInfo processFoundLN( final LN lnFromLog = info.getLN(); final byte[] key = info.getKey(); + final long creationTime = info.getCreatedTime(); final long modificationTime = info.getModificationTime(); final DatabaseImpl db = bin.getDatabase(); @@ -1346,7 +1349,8 @@ private LNInfo processFoundLN( return new LNInfo( null /*LN*/, db.getId(), key, - info.getExpirationTime(), info.getModificationTime(), + info.getExpirationTime(), info.getCreatedTime(), + info.getModificationTime(), 0, 0); } @@ -1409,6 +1413,7 @@ private LNInfo processFoundLN( envImpl, db, null /*locker*/, null /*writeLockInfo*/, false /*newEmbeddedLN*/, bin.getKey(index), bin.getExpiration(index), bin.isExpirationInHours(), + creationTime, modificationTime, bin.isTombstone(index), false /*newBlindDeletion*/, false /*newEmbeddedLN*/, logLsn, bin.getLastLoggedSize(index), diff --git a/kvmain/src/main/java/com/sleepycat/je/cleaner/LNInfo.java b/kvmain/src/main/java/com/sleepycat/je/cleaner/LNInfo.java old mode 100644 new mode 100755 index 55810654..2045868e --- a/kvmain/src/main/java/com/sleepycat/je/cleaner/LNInfo.java +++ b/kvmain/src/main/java/com/sleepycat/je/cleaner/LNInfo.java @@ -30,6 +30,7 @@ public final class LNInfo { private final DatabaseId dbId; private final byte[] key; private final long expirationTime; + private final long creationTime; private final long modificationTime; private final int headerSize; private final int itemSize; @@ -38,6 +39,7 @@ public LNInfo(final LN ln, final DatabaseId dbId, final byte[] key, final long expirationTime, + final long creationTime, final long modificationTime, int headerSize, int itemSize) { @@ -46,6 +48,7 @@ public LNInfo(final LN ln, this.key = key; this.expirationTime = expirationTime; this.modificationTime = modificationTime; + this.creationTime = creationTime; this.headerSize = headerSize; this.itemSize = itemSize; } @@ -70,6 +73,10 @@ long getModificationTime() { return modificationTime; } + public long getCreatedTime() { + return creationTime; + } + int getHeaderSize() { return headerSize; } int getItemSize() { return itemSize; } diff --git a/kvmain/src/main/java/com/sleepycat/je/config/EnvironmentParams.java b/kvmain/src/main/java/com/sleepycat/je/config/EnvironmentParams.java index 894301a6..1e6c8c2f 100644 --- a/kvmain/src/main/java/com/sleepycat/je/config/EnvironmentParams.java +++ b/kvmain/src/main/java/com/sleepycat/je/config/EnvironmentParams.java @@ -307,6 +307,14 @@ public class EnvironmentParams { false, // mutable false); // forReplication + public static final DurationConfigParam ENV_NETWORK_RESTORE_LOCK_TIMEOUT = + new DurationConfigParam(EnvironmentConfig.ENV_NETWORK_RESTORE_LOCK_TIMEOUT, + "1 min", // min + null, // max + "2 min", // default + false, // mutable + false); // forReplication + public static final DurationConfigParam ENV_TTL_CLOCK_TOLERANCE = new DurationConfigParam(EnvironmentConfig.ENV_TTL_CLOCK_TOLERANCE, "1 ms", // min diff --git a/kvmain/src/main/java/com/sleepycat/je/config/RemovedProperties.java b/kvmain/src/main/java/com/sleepycat/je/config/RemovedProperties.java index 66fd5676..acba7093 100644 --- a/kvmain/src/main/java/com/sleepycat/je/config/RemovedProperties.java +++ b/kvmain/src/main/java/com/sleepycat/je/config/RemovedProperties.java @@ -83,6 +83,8 @@ public class RemovedProperties { constants.add("java.util.logging.level.recovery"); constants.add("java.util.logging.level.evictor"); constants.add("java.util.logging.level.cleaner"); + constants.add("je.rep.txnRollbackLimit"); + constants.add("je.rep.txnRollbackDisabled"); properties = Collections.unmodifiableSet(constants); } diff --git a/kvmain/src/main/java/com/sleepycat/je/dbi/CursorImpl.java b/kvmain/src/main/java/com/sleepycat/je/dbi/CursorImpl.java old mode 100644 new mode 100755 index b6c26e69..cdad844d --- a/kvmain/src/main/java/com/sleepycat/je/dbi/CursorImpl.java +++ b/kvmain/src/main/java/com/sleepycat/je/dbi/CursorImpl.java @@ -16,6 +16,8 @@ import static com.sleepycat.je.EnvironmentFailureException.assertState; import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; +import java.io.FileNotFoundException; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Set; @@ -23,7 +25,6 @@ import com.sleepycat.je.beforeimage.BeforeImageContext; import com.sleepycat.je.beforeimage.BeforeImageLN; -import com.sleepycat.je.beforeimage.BeforeImageIndex; import com.sleepycat.je.beforeimage.BeforeImageIndex.DBEntry; import com.sleepycat.je.beforeimage.BeforeImageIndex.BeforeImagePayLoad; import com.sleepycat.je.CacheMode; @@ -41,6 +42,8 @@ import com.sleepycat.je.log.LogUtils; import com.sleepycat.je.log.ReplicationContext; import com.sleepycat.je.log.entry.LNLogEntry; +import com.sleepycat.je.log.WholeEntry; +import com.sleepycat.je.log.ErasedException; import com.sleepycat.je.tree.BIN; import com.sleepycat.je.tree.BINBoundary; import com.sleepycat.je.tree.IN; @@ -58,6 +61,7 @@ import com.sleepycat.je.txn.LockResult; import com.sleepycat.je.txn.LockType; import com.sleepycat.je.txn.Locker; +import com.sleepycat.je.txn.Txn; import com.sleepycat.je.txn.LockerFactory; import com.sleepycat.je.txn.WriteLockInfo; import com.sleepycat.je.util.TimeSupplier; @@ -157,6 +161,8 @@ public class CursorImpl implements Cloneable { private TestHook testHook; + private volatile TestHook fileNotFoundExpHook; + /** * Creates a cursor with retainNonTxnLocks=true, isSecondaryCursor=false. * These are the standard settings for an internal cursor. @@ -1115,7 +1121,7 @@ assert assertCursorState( final LogItem logItem = ln.log( envImpl, dbImpl, locker, wli, false /*newEmbeddedLN*/, - key, expiration, expirationInHours, + key, expiration, expirationInHours, 0L /*creation time*/, 0L /*modificationTime*/, false /*newTombstone*/, true /*newBlindDeletion*/, false /*currEmbeddedLN*/, DbLsn.NULL_LSN /*currLsn*/, 0 /*currSize*/, @@ -1138,7 +1144,7 @@ assert assertCursorState( return DbInternal.makeResult( expiration, expirationInHours, false /*update*/, - 0L /*modificationTime*/, + 0L/*creationTime*/, 0L /*modificationTime*/, getStorageSize(), false /*tombstone*/); @@ -1209,6 +1215,7 @@ assert assertCursorState( final byte[] currKey = bin.getKey(index); byte[] currData = null; long bImgModTime = 0L; + long creationTime = 0L; final int expiration = bin.getExpiration(index); final boolean expirationInHours = bin.isExpirationInHours(); @@ -1216,7 +1223,7 @@ assert assertCursorState( * Must fetch LN if the LN is not embedded and any of the following * are true: * - CLEANER_FETCH_OBSOLETE_SIZE is configured and lastLoggedSize - * is ucurrEmbeddedLNnknown + * is unknown * - this database does not use the standard LN class and we * cannot call DbType.createdDeletedLN further below * For other cases, we are careful not to fetch, in order to avoid @@ -1227,25 +1234,33 @@ assert assertCursorState( if ((currLoggedSize == 0 && !currEmbeddedLN && envImpl.getCleaner().getFetchObsoleteSize(dbImpl)) || - !dbType.mayCreateDeletedLN()) { + !dbType.mayCreateDeletedLN() || + (!dbType.isInternal() && (!bin.isEmbeddedLN(index) + && !dbImpl.isLNImmediatelyObsolete()))) { ln = bin.fetchLN(index, cacheMode); currData = (ln != null ? ln.getData() : null); bImgModTime = (ln != null) ? ln.getModificationTime() : 0L; + creationTime = (ln != null) ? ln.getCreationTime() : 0L; + if (ln == null) { /* An expired LN was purged. */ revertLock(lockStanding); success = true; - return null; } } else { ln = bin.getLN(index, cacheMode); currData = (ln != null ? ln.getData() : null); bImgModTime = (ln != null) ? ln.getModificationTime() : 0L; + if (currEmbeddedLN) { + creationTime = bin.getCreationTime(index); + } else { + creationTime = (ln != null) ? ln.getCreationTime() : 0L; + } } - + long oldLNMemSize = 0; /* * BeforeImage Support during delete */ @@ -1260,6 +1275,7 @@ assert assertCursorState( // the current LN LN lnTemp = bin.fetchLN(index, cacheMode); + oldLNMemSize = lnTemp.getMemorySizeIncludedByParent(); currData = (lnTemp != null ? lnTemp.getData() : null); bImgModTime = (lnTemp != null) ? lnTemp.getModificationTime() @@ -1273,7 +1289,6 @@ assert assertCursorState( * deleted LN (with ln.data == null), but do not attach it to the * tree yet. */ - long oldLNMemSize = 0; if (ln != null) { oldLNMemSize = ln.getMemorySizeIncludedByParent(); ln.delete(); @@ -1282,7 +1297,8 @@ assert assertCursorState( } /* Get a wli to log. */ - final WriteLockInfo wli = lockStanding.prepareForUpdate(bin, index); + final WriteLockInfo wli = + lockStanding.prepareForUpdate(bin, index, true); /* Modification time is only specified for replica replay. */ if (modificationTime == 0 && !dbImpl.getSortedDuplicates()) { @@ -1302,7 +1318,7 @@ assert assertCursorState( logItem = ln.log( envImpl, dbImpl, locker, wli, currEmbeddedLN /*newEmbeddedLN*/, currKey /*newKey*/, - expiration, expirationInHours, modificationTime, + expiration, expirationInHours, creationTime, modificationTime, false /*newTombstone*/, false /*newBlindDeletion*/, currEmbeddedLN, currLsn, currLoggedSize, false/*isInsertion*/, false /*backgroundIO*/, repContext, @@ -1339,6 +1355,7 @@ assert assertCursorState( trace(Level.FINER, TRACE_DELETE, bin, index, currLsn, logItem.lsn); OperationResult res = DbInternal.makeResult( expiration, expirationInHours, false /*update*/, + creationTime, modificationTime, getStorageSize(), false /*tombstone*/); @@ -1358,8 +1375,9 @@ assert assertCursorState( new DatabaseEntry(bImgData), bImgCtx, DBEntry.PutContext.DELETE)); logItem.setBeforeImageData(bImgData); + logItem.setBeforeImageCtx(bImgCtx); } - + return res; } finally { @@ -1407,6 +1425,7 @@ public void logDeletionForReplay(final LNLogEntry lnEntry, false /*newEmbeddedLN*/, lnEntry.getKey() /*newKey*/, lnEntry.getExpiration(), lnEntry.isExpirationInHours(), + lnEntry.getCreationTime(), lnEntry.getModificationTime(), lnEntry.isTombstone(), false /*newBlindDeletion*/, false /*currEmbeddedLN*/, DbLsn.NULL_LSN /*currLsn*/, @@ -1627,7 +1646,7 @@ assert assertCursorState( success = true; return null; } - + /* * Update the non-defunct record at the cursor position. We have * optimized by preferring to take an uncontended lock. The @@ -1822,7 +1841,7 @@ private Pair insertRecordInternal( * this LSN. The abortLSN and abortKD fields of the wli will be * included in the new logrec. */ - wli = lockStanding.prepareForUpdate(bin, index); + wli = lockStanding.prepareForUpdate(bin, index, true); } else { /* @@ -1844,8 +1863,9 @@ private Pair insertRecordInternal( BeforeImageContext bImgCtx = null; //dummy object to enablebeforeimage without ctx if (writeParams.isBeforeImageEnabled()) { - bImgCtx = new BeforeImageContext(0, true); + bImgCtx = new BeforeImageContext(); } + long creationTime = writeParams.creationTime; /* * If treeLn is non-null we can use it to log the LN. If it is null, @@ -1859,12 +1879,13 @@ private Pair insertRecordInternal( * because the old defunct LN is counted obsolete by other means. */ LogItem logItem = null; - + try { logItem = loggingLn.log( envImpl, dbImpl, locker, wli, newEmbeddedLN, key, writeParams.expiration, writeParams.expirationInHours, + creationTime, writeParams.modificationTime, writeParams.tombstone, false /*newBlindDeletion*/, currEmbeddedLN, currLsn, 0 /*currSize*/, true/*isInsertion*/, false /*backgroundIO*/, @@ -1893,10 +1914,13 @@ private Pair insertRecordInternal( bin.updateEntry(index, logItem.lsn, vlsn, logItem.size); bin.setExpiration( - index, writeParams.expiration, writeParams.expirationInHours); + index, writeParams.expiration, writeParams.expirationInHours); bin.setModificationTime( - index, newEmbeddedLN ? writeParams.modificationTime : 0); + index, newEmbeddedLN ? writeParams.modificationTime : 0); + + bin.setCreationTime( + index, newEmbeddedLN ? creationTime : 0); bin.setTombstone(index, writeParams.tombstone); @@ -1931,6 +1955,7 @@ private Pair insertRecordInternal( index, shouldCache ? treeLn : null, logItem.lsn, vlsn, logItem.size, key, embeddedData, writeParams.expiration, writeParams.expirationInHours, + creationTime, writeParams.modificationTime, writeParams.tombstone); } @@ -1947,7 +1972,8 @@ private Pair insertRecordInternal( lockStanding, DbInternal.makeResult( writeParams.expiration, writeParams.expirationInHours, - false /*update*/, writeParams.modificationTime, + false /*update*/, creationTime, + writeParams.modificationTime, getStorageSize(), writeParams.tombstone)); } @@ -2008,6 +2034,7 @@ private OperationResult updateRecordInternal( final boolean newEmbeddedLN; final LogItem logItem; long bImgModTime = 0L; + long creationTime = 0L; /* * Must fetch LN if it is not embedded and any of the following @@ -2019,8 +2046,14 @@ private OperationResult updateRecordInternal( * - this database does not use the standard LN class and we * cannot call DbType.createdUpdatedLN further below (this is * the case for NameLNs, MapLNs, and FileSummaryLNs). + * - It is a user database that may have a creation time that needs + * to be transfered to the new record and the LN is not obsolete + * and as such may not exist. * For other cases, we are careful not to fetch, in order to avoid * a random read during an update operation. + * + * TODO: Find a way to get the creation time that does not require + * fetching the LN. */ LN treeLn; if (returnOldData != null || @@ -2028,7 +2061,9 @@ private OperationResult updateRecordInternal( (currLoggedSize == 0 && !currEmbeddedLN && envImpl.getCleaner().getFetchObsoleteSize(dbImpl)) || - !dbType.mayCreateUpdatedLN()) { + !dbType.mayCreateUpdatedLN() || + (!dbType.isInternal() && (!bin.isEmbeddedLN(index) + && !dbImpl.isLNImmediatelyObsolete()))) { if (currEmbeddedLN) { /* * TODO: If treeLn is null, avoid unnecessary allocation by @@ -2037,6 +2072,7 @@ private OperationResult updateRecordInternal( currData = bin.getEmbeddedData(index); treeLn = bin.getLN(index, cacheMode); bImgModTime = bin.getModificationTime(index); + creationTime = bin.getCreationTime(index); } else { /* * TODO: If treeLn is null, avoid unnecessary allocation by @@ -2046,16 +2082,26 @@ private OperationResult updateRecordInternal( currData = (treeLn != null ? treeLn.getData() : null); bImgModTime = (treeLn != null ? treeLn.getModificationTime() : 0L); + creationTime = (treeLn != null ? treeLn.getCreationTime() : 0L); } } else { treeLn = bin.getLN(index, cacheMode); currData = (treeLn != null ? treeLn.getData() : null); - bImgModTime = (treeLn != null ? treeLn.getModificationTime() : 0L); + bImgModTime = (treeLn != null ? treeLn.getModificationTime() : 0L); + if (currEmbeddedLN) { + creationTime = bin.getCreationTime(index); + } else { + creationTime = (treeLn != null ? treeLn.getCreationTime() : 0L); + } } - final boolean isCached = (treeLn != null); + if (writeParams.creationTime != 0) { + creationTime = writeParams.creationTime; + } - long oldModificationTime = (isCached ? treeLn.getModificationTime() : 0); + final boolean isCached = (treeLn != null); + long oldModificationTime = (isCached ? treeLn.getModificationTime() : bin.getModificationTime(index)); + long oldCreationTime = (isCached ? treeLn.getCreationTime() : bin.getCreationTime(index)); if (returnOldData != null) { assert currData != null; @@ -2164,7 +2210,8 @@ private OperationResult updateRecordInternal( * this LSN. The abortLSN and abortKD fields of the wli will be * included in the new logrec. */ - final WriteLockInfo wli = lockStanding.prepareForUpdate(bin, index); + final WriteLockInfo wli = + lockStanding.prepareForUpdate(bin, index, true); /* * If the tree LN and replay LN do not apply, create an UncachedLN @@ -2204,7 +2251,8 @@ private OperationResult updateRecordInternal( logItem = loggingLn.log( envImpl, dbImpl, locker, wli, newEmbeddedLN, (key != null ? key : currKey), - expiration, expirationInHours, writeParams.modificationTime, + expiration, expirationInHours, creationTime, + writeParams.modificationTime, writeParams.tombstone, false /*newBlindDeletion*/, currEmbeddedLN, currLsn, currLoggedSize, false /*isInsertion*/, false /*backgroundIO*/, writeParams.repContext, bImgCtx); @@ -2229,7 +2277,8 @@ private OperationResult updateRecordInternal( bin.updateRecord( index, oldLNMemSize, logItem.lsn, vlsn, logItem.size, key, (newEmbeddedLN ? newData : null), - expiration, expirationInHours, writeParams.modificationTime, + expiration, expirationInHours, creationTime, + writeParams.modificationTime, writeParams.tombstone); /* Cache record version/size for update operation. */ @@ -2238,10 +2287,10 @@ private OperationResult updateRecordInternal( trace(Level.FINER, TRACE_MOD, bin, index, currLsn, logItem.lsn); OperationResult res = DbInternal.makeResult( - expiration, expirationInHours, true /*update*/, + expiration, expirationInHours, true /*update*/, creationTime, writeParams.modificationTime, getStorageSize(), writeParams.tombstone); - + /* * BeforeImage Support during update */ @@ -2260,9 +2309,11 @@ writeParams.modificationTime, getStorageSize(), writeParams.tombstone ? DBEntry.PutContext.TOMBSTONE : DBEntry.PutContext.UPDATE)); logItem.setBeforeImageData(bImgData); + logItem.setBeforeImageCtx(bImgCtx); } res.setOldModificationTime(oldModificationTime); + res.setOldCreationTime(oldCreationTime); res.setOldStorageSize(oldStorageSize); return res; } @@ -2446,6 +2497,13 @@ assert assertCursorState( } public boolean searchExact(DatabaseEntry searchKey, LockType lockType) { + /* + * Caller of this method won't be configured with OptimisticRead && + * lockType == LockType.READ, so + * special handling is not required for the caller, i.e, release the + * read lock immediately and distinguish the source of the data. + */ + assert !(locker.isOptimisticReadIsolation() && !lockType.isWriteLock()); return searchExact( searchKey, null, lockType, false, false, false) != null; } @@ -2567,6 +2625,7 @@ public OperationResult lockAndGetCurrent( /* Used in the finally to indicate whether exception was raised. */ boolean success = false; + LockStanding standing = null; try { assert assertCursorState( @@ -2625,9 +2684,10 @@ assert assertCursorState( (!foundData.getPartial() || foundData.getPartialLength() != 0)); - if (lockLNAndCheckDefunct( - lockType, excludeTombstones, - dirtyReadAll, dataRequested) == null) { + standing = + lockLNAndCheckDefunct(lockType, excludeTombstones, + dirtyReadAll, dataRequested); + if (standing == null) { if (treeStatsAccumulator != null) { treeStatsAccumulator.incrementDeletedLNCount(); } @@ -2635,12 +2695,53 @@ assert assertCursorState( return null; } - final OperationResult result = getCurrent(foundKey, foundData); + OperationResult result; + if (standing.readCommittedData()) { + try { + result = readLastCommitted( + standing.getLockResult().getWriteLockInfo(), + dataRequested, foundKey, foundData); + } catch (ErasedException | IOException e) { + + /* + * Optimistic read using abortLSN failed, retrying with + * read-committed mode. A read lock will be added to the + * cursor and the locker. + */ + Txn tempLockerRef = (Txn) locker; + tempLockerRef.setOptimisticReadIsolation(false); + tempLockerRef.setReadCommittedIsolation(true); + + /* try to acquire the read lock with bin latched.*/ + standing = + lockLNAndCheckDefunct(lockType, excludeTombstones, + dirtyReadAll, dataRequested); + + tempLockerRef.setOptimisticReadIsolation(true); + tempLockerRef.setReadCommittedIsolation(false); + + if (standing == null) { + if (treeStatsAccumulator != null) { + treeStatsAccumulator.incrementDeletedLNCount(); + } + + success = true; + return null; + } + + result = getCurrent(foundKey, foundData); + } + } else { + result = getCurrent(foundKey, foundData); + } success = true; return result; } finally { + + releaseLockForOptimisticRead(standing, lockType); + if (unlatch || !success) { releaseBIN(); } @@ -2706,7 +2807,35 @@ private LockStanding lockLNAndCheckDefunct( lockType, excludeTombstones, false /*allowUncontended*/, false /*noWait*/); + /* + * For transactional locker, + * lockLN() creates a writeLockInfo only when a write lock + * is truly granted. + * In unContended cases, no lock is granted and no writeLockInfo + * is created. + * If created, the writeLockInfo is not initialized - the caller + * of lockLN is responsible for initializing it if desired. + * For example, deleteCurrentRecord(), updateCurrentRecord() and + * insertRecordInternal() will call LockStanding.prepareForUpdate() + * or LockStanding.prepareForInsert() to initialize the wli object. + * But lockAndGetCurrentLN() does not need to do so, since + * lockAndGetCurrentLN() will only be used in LockType.NONE setting. + * + * Here before returning standing, prepareForUpdate is called to + * initialize the wli object, so it can be accessed by potential + * optimisticRead txn. And actually, here it corresponds to + * the case that a read operation configured with LockMode.RMW. + */ if (standing.recordExists()) { + if (lockType.isWriteLock() && locker.isTransactional()) { + assert locker.getWriteLockInfo(standing.lsn) != null; + assert standing.lockResult.getWriteLockInfo() != null; + + standing.prepareForUpdate(bin, index, + (lockType.isRMW()? false : true)); + standing.lockResult.getWriteLockInfo(). + setAbortLogSize(bin.getLastLoggedSize(index)); + } return standing; } @@ -2736,7 +2865,23 @@ private LockStanding lockLNAndCheckDefunct( * Although there is some redundant processing in the sense that lockLN * is called more than once (above and below), this is not considered a * performance issue because the first call does not actually lock. + * + * Here we truly want to acquire the read lock, but if the locker is + * configured as optimisticRead initially, the read lock request could + * be blocked by an existing write lock and a writeLockInfo created by the + * write lock will be returned in advance, instead of waiting for the + * read lock to be granted, so here temporarily + * set the OptimisticReadIsolation to false in order to break to behavior + * of OptimisticRead. */ + Txn tempLockerRef = null; + if (locker.isOptimisticReadIsolation()) { + tempLockerRef = (Txn)locker; + } + if (tempLockerRef != null) { + tempLockerRef.setOptimisticReadIsolation(false); + tempLockerRef.setReadCommittedIsolation(true); + } standing = lockLN( LockType.READ, excludeTombstones, false /*allowUncontended*/, !dataRequested /*noWait*/); @@ -2757,6 +2902,11 @@ private LockStanding lockLNAndCheckDefunct( /* We have acquired a temporary read lock. */ revertLock(standing); + if (tempLockerRef != null) { + tempLockerRef.setOptimisticReadIsolation(true); + tempLockerRef.setReadCommittedIsolation(false); + } + if (standing.recordExists()) { /* * Another txn aborted the deletion or expiration time change while @@ -2772,6 +2922,112 @@ private LockStanding lockLNAndCheckDefunct( return null; } + + /** + * This method is triggered when a read operation configured with + * OptimisticRead is blocked by write operation. + * @param wli the WriteLockInfo object + * @param dataRequested whether data is requested from the caller + * @param key DatabaseEntry object to load the key + * @param data DatabaseEntry object to load the data + * @return OperationResult, will return null if + * read is blocked by an insertion that hasn't been committed, + * or the most recent committed data is expired. + */ + public OperationResult readLastCommitted(WriteLockInfo wli, + boolean dataRequested, + DatabaseEntry key, + DatabaseEntry data) + throws ErasedException, IOException { + + assert TestHookExecute.doIOHookIfSet(fileNotFoundExpHook); + + if (wli.getAbortLsn() == DbLsn.NULL_LSN) { + /* + * Read is blocked by an insertion that hasn't been committed, + * the most recent committed record doesn't exist. + */ + return null; + } + + if (wli.getAbortKnownDeleted()) { + return null; + } + + /* + * If the most recent committed data is expired, return null + */ + if (dbImpl.getEnv().isExpired(wli.getAbortExpiration(), + wli.isAbortExpirationInHours())) { + return null; + } + + if (key != null) { + LN.setEntry(key, bin.getKey(index)); + } + + if (dataRequested) { + /* + * The last committed data is embedded in BIN, and it is stored inside + * wli. + */ + if (wli.getAbortData() != null) { + /* + * Data is requested. + * Copying data is desired here instead of giving up the + * ownership of the data. + */ + LN.setEntry(data, wli.getAbortData()); + } else { + getDataFromAbortLSN(wli.getAbortLsn(), wli.getAbortLogSize(), data); + } + } + + //TODO is this correct? creationTime is not right here. + return DbInternal.makeResult( + wli.getAbortExpiration(), + wli.isAbortExpirationInHours(), + false, + wli.getAbortCreationTime(), + wli.getAbortModificationTime(), + wli.getAbortLogSize(), + wli.getAbortTombstone()); + } + + /** + * Retrieve LN from log files using abortLSN and load data into foundData. + * @param abortLsn the lsn of the most recent committed data + * @param foundData DatabaseEntry object to hold the data + */ + private void getDataFromAbortLSN(long abortLsn, + int abortLogSize, + final DatabaseEntry foundData) + throws ErasedException, FileNotFoundException { + + assert(bin.isLatchExclusiveOwner()); + assert(foundData != null); + + /* + * We don't need to fetch the LN if the user has not requested that we + * return the data, or if we know for sure that the LN is empty. + */ + + final EnvironmentImpl envImpl = dbImpl.getEnv(); + + //fetch the entry + final WholeEntry wholeEntry = envImpl.getLogManager(). + getWholeLogEntry(abortLsn, abortLogSize); + + LNLogEntry lnEntry = (LNLogEntry) wholeEntry.getEntry(); + lnEntry.postFetchInit(dbImpl); + //TODO I think this is unnecessary, needs confirmation. + //BtreeVerifier.verifyDataRecord(lnEntry, bin, idx); + + LN ln = (LN) lnEntry.getResolvedItem(dbImpl); + byte[] data = ln.getData(); + foundData.setData(data); + } + /** * Copy current record into the key and data DatabaseEntry. * @@ -2855,6 +3111,9 @@ public OperationResult getCurrent( bin.getExpiration(index), bin.isExpirationInHours(), false, + (ln != null) ? + ln.getCreationTime() : + bin.getCreationTime(index), (ln != null) ? ln.getModificationTime() : bin.getModificationTime(index), @@ -3586,6 +3845,15 @@ public static class LockStanding { private long lsn; private boolean defunct; + + /** + * readCommittedData == true indicating that: + * A read-txn configured with OptimisticRead tries to read a record but + * blocked by a write-txn on a LN, after the attempt to lock the LN, + * read-txn should use the abortLSN obtained from the write-txn's + * locker to read the committed version of the LN. + */ + private boolean readCommittedData; private LockResult lockResult; /** @@ -3614,7 +3882,7 @@ boolean recordExists() { * case, the abortLsn and abortKD have been set already and should not * be overwritten here. */ - WriteLockInfo prepareForUpdate(BIN bin, int idx) { + WriteLockInfo prepareForUpdate(BIN bin, int idx, boolean obsolete) { DatabaseImpl db = bin.getDatabase(); boolean abortKD = !recordExists(); @@ -3624,6 +3892,7 @@ WriteLockInfo prepareForUpdate(BIN bin, int idx) { int abortExpiration = bin.getExpiration(idx); boolean abortExpirationInHours = bin.isExpirationInHours(); long abortModificationTime = 0L; + long abortCreationTime = 0L; boolean abortTombstone = bin.isTombstone(idx); if (bin.isEmbeddedLN(idx)) { @@ -3638,6 +3907,7 @@ WriteLockInfo prepareForUpdate(BIN bin, int idx) { } abortModificationTime = bin.getModificationTime(idx); + abortCreationTime = bin.getCreationTime(idx); } WriteLockInfo wri = (lockResult == null ? @@ -3652,13 +3922,17 @@ WriteLockInfo prepareForUpdate(BIN bin, int idx) { wri.setAbortVLSN(abortVLSN); wri.setAbortExpiration(abortExpiration, abortExpirationInHours); wri.setAbortModificationTime(abortModificationTime); + wri.setAbortCreationTime(abortCreationTime); wri.setAbortTombstone(abortTombstone); wri.setDb(db); + wri.setObsolete(obsolete); } else { lockResult.setAbortInfo( lsn, abortKD, abortKey, abortData, abortVLSN, abortExpiration, abortExpirationInHours, - abortModificationTime, abortTombstone, db); + abortModificationTime, abortCreationTime, + abortTombstone, db, obsolete); + wri.setObsolete(obsolete || wri.getObsolete()); } return wri; } @@ -3673,8 +3947,26 @@ WriteLockInfo prepareForUpdate(BIN bin, int idx) { static WriteLockInfo prepareForInsert(BIN bin) { WriteLockInfo wri = new WriteLockInfo(); wri.setDb(bin.getDatabase()); + //For the sake of optimisticRead + wri.setAbortLsn(DbLsn.NULL_LSN); return wri; } + + public boolean readCommittedData() { + return readCommittedData; + } + + public void setReadCommittedData(boolean readCommittedData) { + this.readCommittedData = readCommittedData; + } + + public LockResult getLockResult() { + return lockResult; + } + + public long getLockLSN() { + return lsn; + } } /** @@ -3751,6 +4043,14 @@ static WriteLockInfo prepareForInsert(BIN bin) { * LSN for locking, guaranteeing that two conflicting locks cannot be * granted on the old and new LSNs. * + * For transactional locker, + * lockLN() creates a writeLockInfo only when a write lock is truly granted. + * In uncontended cases, no lock is granted and no writeLockInfo is created. + * If created, the writeLockInfo is not initialized - the caller is + * responsible for initializing it. + * Subsequent write-type calls on the same lsn do not overwrite + * the existing writeLockInfo object. + * * Cleaner Migration Locking * ------------------------- * The cleaner takes a non-blocking read lock on the old LSN before @@ -3896,6 +4196,15 @@ public LockStanding lockLN( standing.lsn, lockType, true /*noWait*/, dbImpl, this); } catch (LockNotAvailableException e) { + /* + * non-blocking lock was denied but qualified to read + * committed data + */ + if (standing.lockResult != null && + standing.lockResult.getWriteLockInfo() != null) { + standing.setReadCommittedData(true); + return standing; + } releaseBIN(); throw e; @@ -3919,6 +4228,14 @@ public LockStanding lockLN( bin.isDefunct(index, excludeTombstones); return standing; + } else if (standing.lockResult.getWriteLockInfo() != null) { + /* + * Txn doing a read in optimisticRead mode, blocked by a write lock, + * but got the writeLockInfo from an active write-Txn. We can use the + * writeLockInfo to get the committed data. + */ + standing.setReadCommittedData(true); + return standing; } if (noWait) { @@ -4131,6 +4448,19 @@ private boolean verifyPendingDeleted(LockType lockType) { return true; } + /** + * Read lock will always be released after reading the LN. + */ + public void releaseLockForOptimisticRead(LockStanding standing, + LockType lockType) { + if (lockType.equals(LockType.READ) && + getLocker().isOptimisticReadIsolation() && + standing != null && + !standing.readCommittedData()) { + revertLock(standing); + } + } + public void revertLock(LockStanding standing) { if (standing.lockResult != null) { @@ -4366,10 +4696,15 @@ private void traceInsert( } /* For unit testing only. */ - public void setTestHook(TestHook hook) { + public void setTestHook(TestHook hook) { testHook = hook; } + /* For unit testing only. */ + public void setFileNotFoundExpHook(TestHook hook) { + fileNotFoundExpHook = hook; + } + /* Check that the target bin is latched. For use in assertions. */ private boolean checkAlreadyLatched(boolean isLatched) { if (isLatched) { diff --git a/kvmain/src/main/java/com/sleepycat/je/dbi/DbTree.java b/kvmain/src/main/java/com/sleepycat/je/dbi/DbTree.java index d25c5a47..d7801ec4 100755 --- a/kvmain/src/main/java/com/sleepycat/je/dbi/DbTree.java +++ b/kvmain/src/main/java/com/sleepycat/je/dbi/DbTree.java @@ -1655,9 +1655,24 @@ public DatabaseId getDbIdFromName(final Locker nameLocker, final DatabaseEntry keyDbt = new DatabaseEntry(StringUtils.toUTF8(databaseName)); - if (!nameCursor.searchExact( - keyDbt, writeLock ? LockType.WRITE : LockType.READ)) { - return null; + /* + * A read lock is needed here and will be hold until the + * nameLocker closes. + */ + Txn tempLocker = null; + if (nameLocker.isOptimisticReadIsolation() && !writeLock) { + tempLocker = (Txn) nameLocker; + tempLocker.setOptimisticReadIsolation(false); + } + try { + if (!nameCursor.searchExact( + keyDbt, writeLock ? LockType.WRITE : LockType.READ)) { + return null; + } + } finally { + if (tempLocker != null) { + tempLocker.setOptimisticReadIsolation(true); + } } final NameLN nameLN = (NameLN) nameCursor.getCurrentLN( diff --git a/kvmain/src/main/java/com/sleepycat/je/dbi/EnvironmentImpl.java b/kvmain/src/main/java/com/sleepycat/je/dbi/EnvironmentImpl.java old mode 100644 new mode 100755 index 057ea046..c35b6e3e --- a/kvmain/src/main/java/com/sleepycat/je/dbi/EnvironmentImpl.java +++ b/kvmain/src/main/java/com/sleepycat/je/dbi/EnvironmentImpl.java @@ -939,7 +939,10 @@ public synchronized void finishInit(EnvironmentConfig envConfig) open(); runOrPauseDaemons(configManager); - beforeImageIndex = new BeforeImageIndex(this); + if (DbInternal.getBImgIdx(envConfig)) { + beforeImageIndex = new BeforeImageIndex(this); + } + success = true; } finally { if (!success) { @@ -3461,7 +3464,7 @@ public InternalEnvironment(File envHome, VersionMismatchException, DatabaseException, IllegalArgumentException { - super(envHome, configuration, null /*repConfig*/, envImpl); + super(envHome, configuration, null /*repConfig*/, envImpl, false); } @Override diff --git a/kvmain/src/main/java/com/sleepycat/je/dbi/MemoryBudget.java b/kvmain/src/main/java/com/sleepycat/je/dbi/MemoryBudget.java old mode 100644 new mode 100755 diff --git a/kvmain/src/main/java/com/sleepycat/je/dbi/WriteParams.java b/kvmain/src/main/java/com/sleepycat/je/dbi/WriteParams.java old mode 100644 new mode 100755 index 1fb9a27c..821e846c --- a/kvmain/src/main/java/com/sleepycat/je/dbi/WriteParams.java +++ b/kvmain/src/main/java/com/sleepycat/je/dbi/WriteParams.java @@ -14,11 +14,11 @@ package com.sleepycat.je.dbi; import java.util.concurrent.TimeUnit; +import com.sleepycat.je.util.TimeSupplier; import com.sleepycat.je.CacheMode; import com.sleepycat.je.WriteOptions; import com.sleepycat.je.log.ReplicationContext; -import com.sleepycat.je.util.TimeSupplier; /** * A struct for passing and returning certain params to/from 'put' and 'delete' @@ -33,7 +33,8 @@ public class WriteParams { public final boolean expirationInHours; public final boolean updateExpiration; public final boolean tombstone; - public final long modificationTime; + public long creationTime; + public long modificationTime; private final String[] allIndexDbNames; private final long[] allIndexIds; @@ -59,6 +60,7 @@ public WriteParams( final int expiration, final boolean expirationInHours, final boolean updateExpiration, + final long creationTime, final long modificationTime, final boolean tombstone, final String[] allIndexDbNames, @@ -72,6 +74,7 @@ public WriteParams( this.expiration = expiration; this.expirationInHours = expirationInHours; this.updateExpiration = updateExpiration; + this.creationTime = creationTime; this.modificationTime = modificationTime; this.tombstone = tombstone; this.allIndexDbNames = allIndexDbNames; @@ -91,6 +94,7 @@ public WriteParams( final int expiration, final boolean expirationInHours, final boolean updateExpiration, + final long creationTime, final long modificationTime, final boolean tombstone, final String[] allIndexDbNames, @@ -101,8 +105,9 @@ public WriteParams( final boolean beforeImageExpirationInHours) { this(cacheMode, preprocessor, repContext, expiration, expirationInHours, - updateExpiration, modificationTime, tombstone, allIndexDbNames, - allIndexIds, indexesToUpdate, enableBeforeImage); + updateExpiration, creationTime, modificationTime, + tombstone, allIndexDbNames, + allIndexIds, indexesToUpdate, enableBeforeImage); this.beforeImageExpiration = beforeImageExpiration; this.beforeImageExpirationInHours = beforeImageExpirationInHours; } @@ -125,6 +130,9 @@ public WriteParams(final WriteOptions options, options.getTTLUnit()), options.getTTLUnit() == TimeUnit.HOURS, options.getUpdateTTL(), + dbImpl.getSortedDuplicates() ? + options.getCreationTime() + : getCreateTime(options.getCreationTime()), dbImpl.getSortedDuplicates() ? options.getModificationTime() : getModTime(options.getModificationTime()), @@ -133,7 +141,9 @@ public WriteParams(final WriteOptions options, options.getAllIndexIds(), options.getIndexesToUpdate(), options.getBeforeImageTTL() > 0, - options.getBeforeImageTTL(), + TTL.ttlToExpiration( + options.getBeforeImageTTL(), + options.getBeforeImageTTLUnit()), options.getBeforeImageTTLUnit() == TimeUnit.HOURS); } @@ -153,7 +163,7 @@ public WriteParams( final boolean tombstone) { this( cacheMode, null /*preprocessor*/, repContext, - expiration, expirationInHours, updateExpiration, + expiration, expirationInHours, updateExpiration, 0L, 0L /*modificationTime*/, tombstone, null, null, null, false); } @@ -164,7 +174,7 @@ public WriteParams(final ReplicationContext repContext) { this( null /*cacheMode*/, null /*preprocessor*/, repContext, 0 /*expiration*/, false /*expirationInHours*/, - false /*updateExpiration*/, getModTime(0L), + false /*updateExpiration*/, getCreateTime(0L), getModTime(0L), false /*tombstone*/, null, null, null, false); } @@ -172,6 +182,30 @@ private static long getModTime(final long modTimeParam) { return modTimeParam != 0 ? modTimeParam : TimeSupplier.currentTimeMillis(); } + private static long getCreateTime(final long createTimeParam) { + /* if insert it would take modtime + * Otherwise its no-op unless user explicitly sets it + * same goes for WriteParams which uses replication context + */ + return createTimeParam != 0 ? createTimeParam : 0; + } + + public void setModificationTime(long val) { + modificationTime = val; + } + + public long getModificationTime() { + return modificationTime; + } + + public void setCreationTime(long val) { + creationTime = val; + } + + public long getCreationTime() { + return creationTime; + } + public void setExpirationUpdated(boolean val) { expirationUpdated = val; } diff --git a/kvmain/src/main/java/com/sleepycat/je/log/LogEntryType.java b/kvmain/src/main/java/com/sleepycat/je/log/LogEntryType.java old mode 100644 new mode 100755 index 1f6834e6..1ffc467f --- a/kvmain/src/main/java/com/sleepycat/je/log/LogEntryType.java +++ b/kvmain/src/main/java/com/sleepycat/je/log/LogEntryType.java @@ -280,12 +280,16 @@ public class LogEntryType { * ----------------------- * [KVSTORE-2316] Remove support for OldBINDelta * - * Version 25 (in JE 24.2) + * Version 25 (in JE 25.1) * ----------------------- * [KVSTORE-2302] Add support for Before Images * + * Version 26 (in JE 25.2) + * ----------------------- + * [KVSTORE-2587] Add support for CreationTime + * */ - public static final int LOG_VERSION = 25; + public static final int LOG_VERSION = 26; /** * The latest log version for which the replicated log format of any @@ -296,7 +300,7 @@ public class LogEntryType { * non-replicable entries, or only to the local, not replicated, form of * replicable entries, the as was the case for log versions 9, 10, and 11. */ - public static final int LOG_VERSION_HIGHEST_REPLICABLE = 25; + public static final int LOG_VERSION_HIGHEST_REPLICABLE = 26; /** * Log versions prior to 8 (JE 5.0) are no longer supported as of JE 20.1. @@ -335,6 +339,12 @@ public class LogEntryType { */ public static final int LOG_VERSION_CKPT_SCAN_IDS = 23; + /* + * The log version that introduced creation time to the update and delete + * log entries + */ + public static final int LOG_VERSION_CREATION_TIME = 26; + /** * Should be used for reading the entry header of the file header, since * the actual version is not known until the FileHeader item is read. @@ -1014,9 +1024,23 @@ boolean isINType() { return nodeType == NodeType.IN; } - public static boolean isBeforeImageType(byte typeNum) { - LogEntryType t = findType(typeNum); - return t != null && (t == LOG_UPD_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE - || t == LOG_DEL_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE); - } + public static boolean isBeforeImageType(byte typeNum) { + LogEntryType t = findType(typeNum); + return t != null && (t == LOG_UPD_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE + || t == LOG_DEL_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE); + } + + public static boolean isExplicitCreateTimeLNType(byte typeNum) { + LogEntryType t = findType(typeNum); + return t != null && (t == LOG_UPD_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE + || t == LOG_UPD_LN_TRANSACTIONAL || t == LOG_UPD_LN + || t == LOG_DEL_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE + || t == LOG_DEL_LN_TRANSACTIONAL || t == LOG_DEL_LN); + } + + public static boolean isExplicitCreateTimeInsertLNType(byte typeNum) { + LogEntryType t = findType(typeNum); + return t != null && (t == LOG_INS_LN_TRANSACTIONAL + || t == LOG_INS_LN); + } } diff --git a/kvmain/src/main/java/com/sleepycat/je/log/LogItem.java b/kvmain/src/main/java/com/sleepycat/je/log/LogItem.java old mode 100644 new mode 100755 index e46ad5b0..6e297e27 --- a/kvmain/src/main/java/com/sleepycat/je/log/LogItem.java +++ b/kvmain/src/main/java/com/sleepycat/je/log/LogItem.java @@ -15,6 +15,7 @@ import java.nio.ByteBuffer; +import com.sleepycat.je.beforeimage.BeforeImageContext; import com.sleepycat.je.log.entry.ReplicableLogEntry; import com.sleepycat.je.utilint.DbLsn; @@ -66,6 +67,7 @@ public class LogItem { ByteBuffer nonCachedBuffer = null; byte[] bImgData = null; + BeforeImageContext bImgCtx = null; public ByteBuffer getBuffer() { return (cachedBuffer != null) ? @@ -76,6 +78,14 @@ public void setBeforeImageData(byte[] bImgData) { this.bImgData = bImgData; } + public void setBeforeImageCtx(BeforeImageContext bImgCtx) { + this.bImgCtx = bImgCtx; + } + + public BeforeImageContext getBeforeImageCtx() { + return bImgCtx; + } + public byte[] getBeforeImageData() { return bImgData; } diff --git a/kvmain/src/main/java/com/sleepycat/je/log/entry/LNEntryInfo.java b/kvmain/src/main/java/com/sleepycat/je/log/entry/LNEntryInfo.java old mode 100644 new mode 100755 index e517237a..46411a93 --- a/kvmain/src/main/java/com/sleepycat/je/log/entry/LNEntryInfo.java +++ b/kvmain/src/main/java/com/sleepycat/je/log/entry/LNEntryInfo.java @@ -48,6 +48,13 @@ public class LNEntryInfo { */ public long modificationTime; + /** + * The last creation time of the log entry, or zero if the LN belongs + * to a secondary (duplicates) database or was originally written using + * JE 25.2 or earlier. + */ + public long creationTime; + /** * The tombstone property of the record. */ @@ -83,6 +90,8 @@ public class LNEntryInfo { */ public int dataLength; + + public void getKey(final DatabaseEntry entry) { entry.setData(key, keyOffset, keyLength); } diff --git a/kvmain/src/main/java/com/sleepycat/je/log/entry/LNLogEntry.java b/kvmain/src/main/java/com/sleepycat/je/log/entry/LNLogEntry.java old mode 100644 new mode 100755 index 98858889..4f4a4d70 --- a/kvmain/src/main/java/com/sleepycat/je/log/entry/LNLogEntry.java +++ b/kvmain/src/main/java/com/sleepycat/je/log/entry/LNLogEntry.java @@ -23,6 +23,7 @@ import com.sleepycat.je.DatabaseEntry; import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.WriteOptions; import com.sleepycat.je.dbi.DatabaseId; import com.sleepycat.je.dbi.DatabaseImpl; import com.sleepycat.je.dbi.DupKeyData; @@ -233,6 +234,55 @@ * havePriorSize, priorSize, havePriorFile, priorFile, * haveAbortModificationTime, abortModificationTime, abortTombstone. * + * {@literal + * 26 <= version : + * + * 1-byte flags + * abortKnownDeleted + * embeddedLN + * haveAbortKey + * haveAbortData + * haveAbortVLSN + * haveAbortLSN + * haveAbortExpiration + * haveExpiration + * 1-byte flags2 + * havePriorSize + * havePriorFile + * enableBeforeImage + * tombstone + * abortTombstone + * haveAbortModificationTime + * haveAbortCreationTime << new + * 1-byte flags3 << new + * haveInsertsCreationTime << new + * databaseid + * expiration -- if haveExpiration + * modificationTime + * creationTime << new + * key + * data + * abortLsn -- if transactional and haveAbortLSN + * txn id -- if transactional + * prev LSN of same txn -- if transactional + * abort key -- if haveAbortKey + * abort data -- if haveAbortData + * abort vlsn -- if haveAbortVLSN + * abort expiration -- if haveAbortExpiration + * abort modification time -- if haveAbortModificationTime + * abort creation time -- if haveAbortCreationTime << new + * priorSize -- if havePriorSize + * priorFile -- if havePriorFile + * } + * + * In forReplication mode, these flags and fields are omitted: + * abortKnownDeleted, embeddedLN, haveAbortKey, haveAbortData, + * haveAbortVLSN, abort key, abort data, abort vlsn, + * haveAbortLSN, abortLsn, haveAbortExpiration, abort expiration, + * havePriorSize, priorSize, havePriorFile, priorFile, + * haveAbortModificationTime, abortModificationTime, + * haveAbortCreationTime, abortCreationTime, abortTombstone. + * * NOTE: LNLogEntry is sub-classed by NameLNLogEntry, which adds some extra * fields after the fields shown above. NameLNLogEntry never has a * Before Image. @@ -256,13 +306,17 @@ public class LNLogEntry extends BaseReplicableEntry { private static final byte HAVE_ABORT_MODIFICATION_TIME_MASK = 0x10; private static final byte BLIND_DELETION_MASK = 0x20; private static final byte ENABLE_BEFORE_IMAGE_MASK = 0x40; + private static final byte HAVE_ABORT_CREATION_TIME_MASK = (byte) 0x80; + /* flags3 */ + private static final byte HAVE_INSERTS_CREATION_TIME_MASK = (byte) 0x1; /** * Used for computing the minimum log space used by an LNLogEntry. */ - public static final int MIN_LOG_SIZE = 2 + // Flags + public static final int MIN_LOG_SIZE = 3 + // Flags 1 + // DatabaseId 5 + // ModificationTime + 5 + // CreationTime 1 + // LN with zero-length data LogEntryHeader.MIN_HEADER_SIZE; @@ -273,7 +327,7 @@ public class LNLogEntry extends BaseReplicableEntry { * * @see #getLastFormatChange */ - private static final int LAST_FORMAT_CHANGE = 19; + private static final int LAST_FORMAT_CHANGE = LogEntryType.LOG_VERSION_CREATION_TIME; /* * Persistent fields. @@ -333,6 +387,9 @@ public class LNLogEntry extends BaseReplicableEntry { /* Abort modification time in millis. */ private long abortModificationTime = 0; + /* Abort creation time in millis. */ + private long abortCreationTime = 0; + /* Abort tombstone property. */ private boolean abortTombstone = false; @@ -376,7 +433,14 @@ public class LNLogEntry extends BaseReplicableEntry { * version was embedded in the BIN. */ private boolean haveAbortModificationTime; - + + /* + * True if the logrec stores an abort creation time, which is the case + * only if (a) this is a transactional logrec, and (b) the record's abort + * version was embedded in the BIN. + */ + private boolean haveAbortCreationTime; + /* * True if the logrec stores the before image which would be empty for * inserts but non-empty for updates and deletes @@ -436,6 +500,9 @@ public class LNLogEntry extends BaseReplicableEntry { /* Modification time as Java time in millis. */ private long modificationTime; + /* Creation time as Java time in millis. */ + private long creationTime; + /* Whether the record is a tombstone. */ private boolean tombstone; @@ -448,6 +515,12 @@ public class LNLogEntry extends BaseReplicableEntry { /* Transient field for getUserKeyData. Is null if status is unknown. */ private Boolean dupStatus; + + /* For cases when creationtime differs from the modification time like in + * the partition migration , where a update is actually insert to a new shard + * database + */ + private boolean haveInsertsCreationTime; /** * Creates an instance to read an entry. @@ -478,12 +551,14 @@ public LNLogEntry( int abortExpiration, boolean abortExpirationInHours, long abortModificationTime, + long abortCreationTime, boolean abortTombstone, byte[] key, T ln, boolean embeddedLN, int expiration, boolean expirationInHours, + long creationTime, long modificationTime, boolean tombstone, boolean blindDeletion, @@ -502,6 +577,7 @@ public LNLogEntry( this.abortExpiration = abortExpiration; this.abortExpirationInHours = abortExpirationInHours; this.abortModificationTime = abortModificationTime; + this.abortCreationTime = abortCreationTime; this.abortTombstone = abortTombstone; this.enableBeforeImage = enableBeforeImage; @@ -512,12 +588,17 @@ public LNLogEntry( haveAbortExpiration = (abortExpiration != 0); haveExpiration = (expiration != 0); haveAbortModificationTime = (abortModificationTime != 0); + haveAbortCreationTime = (abortCreationTime != 0); + haveInsertsCreationTime = (LogEntryType + .isExplicitCreateTimeInsertLNType(entryType.getTypeNum())) + && (creationTime != modificationTime); this.embeddedLN = embeddedLN; this.key = key; this.ln = ln; this.expiration = expiration; this.expirationInHours = expirationInHours; + this.creationTime = creationTime; this.modificationTime = modificationTime; this.tombstone = tombstone; this.blindDeletion = blindDeletion; @@ -551,6 +632,7 @@ protected void reset() { abortExpiration = 0; abortExpirationInHours = false; abortModificationTime = 0; + abortCreationTime = 0; abortTombstone = false; haveAbortLSN = false; @@ -560,6 +642,7 @@ protected void reset() { haveAbortExpiration = false; haveExpiration = false; haveAbortModificationTime = false; + haveAbortCreationTime = false; havePriorSize = false; havePriorFile = false; @@ -568,13 +651,15 @@ protected void reset() { ln = null; expiration = 0; expirationInHours = false; + creationTime = 0; modificationTime = 0; tombstone = false; blindDeletion = false; priorSize = 0; priorFile = DbLsn.MAX_FILE_NUM; - dupStatus = null; + + haveInsertsCreationTime = false; } /** @@ -592,6 +677,7 @@ public void getLNEntryInfo(final LNEntryInfo lnInfo) { lnInfo.databaseId = getDbId().getId(); lnInfo.transactionId = (txn != null) ? txn.getId() : -1; + lnInfo.creationTime = getCreationTime(); lnInfo.modificationTime = getModificationTime(); lnInfo.tombstone = isTombstone(); lnInfo.data = ln.getData(); @@ -627,7 +713,11 @@ public static void parseEntry(final ByteBuffer buffer, if (logVersion >= 12) { byte flags = buffer.get(); byte flags2 = (logVersion >= 16) ? buffer.get() : (byte) 0; - flag.setFlags(flags,flags2); + byte flags3 = (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) + ? buffer.get() + : (byte) 0; + + flag.setFlags(flags, flags2, flags3); } lnInfo.databaseId = LogUtils.readPackedLong(buffer); @@ -637,7 +727,19 @@ public static void parseEntry(final ByteBuffer buffer, if (flag.haveExpiration) { LogUtils.readPackedInt(buffer); } + lnInfo.modificationTime = LogUtils.readPackedLong(buffer); + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME && LogEntryType + .isExplicitCreateTimeLNType(header.getType())) { + lnInfo.creationTime = LogUtils.readPackedLong(buffer); + } else if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME && + flag.haveInsertsCreationTime) { + lnInfo.creationTime = LogUtils.readPackedLong(buffer); + } else { + //for older version logs and inserts + lnInfo.creationTime = WriteOptions.CREATION_TIME_NOT_SET; + } + lnInfo.key = buffer.array(); /* @@ -671,13 +773,13 @@ public static void parseEntry(final ByteBuffer buffer, * flag.haveAbortLSN is not used from here, * so no need to change it as readBaseLNEntry does */ - flag.setFlags(buffer.get(), (byte) 0); + flag.setFlags(buffer.get(), (byte) 0, (byte) 0); } lnInfo.transactionId = LogUtils.readPackedLong(buffer); LogUtils.readPackedLong(buffer); } else if (logVersion == 11) { - flag.setFlags(buffer.get(), (byte) 0); + flag.setFlags(buffer.get(), (byte) 0, (byte) 0); } if (logVersion >= 11) { @@ -704,6 +806,12 @@ public static void parseEntry(final ByteBuffer buffer, } } + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + if (flag.haveAbortCreationTime) { + LogUtils.readPackedLong(buffer); + } + } + if (logVersion >= 12 && logVersion < 19) { if (flag.haveExpiration) { LogUtils.readPackedInt(buffer); @@ -743,7 +851,6 @@ public static void parseEntry(final ByteBuffer buffer, } lnInfo.tombstone = flag.tombstone; - buffer.position(recStartPosition); } @@ -756,12 +863,15 @@ static class Flags { boolean haveAbortVLSN = false; boolean haveAbortExpiration = false; boolean haveAbortModificationTime = false; + boolean haveAbortCreationTime = false; boolean enableBeforeImage = false; boolean havePriorSize = false; boolean havePriorFile = false; boolean tombstone = false; + boolean haveInsertsCreationTime = false; - public void setFlags(byte flags, byte flags2) { + + public void setFlags(byte flags, byte flags2, byte flags3) { haveExpiration = ((flags & HAVE_EXPIRATION_MASK) != 0); haveAbortLSN = ((flags & HAVE_ABORT_LSN_MASK) != 0); haveAbortKey = ((flags & HAVE_ABORT_KEY_MASK) != 0); @@ -772,10 +882,14 @@ public void setFlags(byte flags, byte flags2) { havePriorSize = ((flags2 & HAVE_PRIOR_SIZE_MASK) != 0); havePriorFile = ((flags2 & HAVE_PRIOR_FILE_MASK) != 0); haveAbortModificationTime = - ((flags2 & HAVE_ABORT_MODIFICATION_TIME_MASK) != 0); + ((flags2 & HAVE_ABORT_MODIFICATION_TIME_MASK) != 0); + haveAbortCreationTime = + ((flags2 & HAVE_ABORT_CREATION_TIME_MASK) != 0); enableBeforeImage = ((flags2 & ENABLE_BEFORE_IMAGE_MASK) != 0); tombstone = ((flags2 & TOMBSTONE_MASK) != 0); + haveInsertsCreationTime = ((flags3 + & HAVE_INSERTS_CREATION_TIME_MASK) != 0); } } @@ -823,7 +937,11 @@ protected final void readBaseLNEntry( if (logVersion >= 12) { byte flags = entryBuffer.get(); byte flags2 = (logVersion >= 16) ? entryBuffer.get() : (byte) 0; - setFlags(flags, flags2); + byte flags3 = (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) + ? entryBuffer.get() + : (byte) 0; + + setFlags(flags, flags2, flags3); } ln = newInstanceOfType(); @@ -839,7 +957,17 @@ protected final void readBaseLNEntry( expirationInHours = true; } } - modificationTime = LogUtils.readPackedLong(entryBuffer); + modificationTime = LogUtils.readPackedLong(entryBuffer); + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME && + LogEntryType.isExplicitCreateTimeLNType(header.getType())) { + creationTime = LogUtils.readPackedLong(entryBuffer); + } else if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME && + haveInsertsCreationTime) { + creationTime = LogUtils.readPackedLong(entryBuffer); + } else { + creationTime = WriteOptions.CREATION_TIME_NOT_SET; + } + key = LogUtils.readByteArray(entryBuffer); ln.readFromLog(envImpl, entryBuffer, logVersion); } @@ -859,7 +987,7 @@ protected final void readBaseLNEntry( } if (logVersion < 12) { - setFlags(entryBuffer.get(), (byte) 0); + setFlags(entryBuffer.get(), (byte) 0, (byte) 0); haveAbortLSN = (abortLsn != DbLsn.NULL_LSN); } @@ -867,7 +995,7 @@ protected final void readBaseLNEntry( txn.readFromLog(envImpl, entryBuffer, logVersion); } else if (logVersion == 11) { - setFlags(entryBuffer.get(), (byte) 0); + setFlags(entryBuffer.get(), (byte) 0, (byte) 0); } if (logVersion >= 11) { @@ -898,6 +1026,12 @@ protected final void readBaseLNEntry( } } + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + if (haveAbortCreationTime) { + abortCreationTime = LogUtils.readPackedLong(entryBuffer); + } + } + if (logVersion >= 12 && logVersion < 19) { if (haveExpiration) { expiration = LogUtils.readPackedInt(entryBuffer); @@ -931,6 +1065,8 @@ protected final void readBaseLNEntry( /* Save cached values after read. */ ln.setModificationTime(modificationTime); + ln.setCreationTime(creationTime); + ln.setVLSNSequence( (header.getVLSN() != INVALID_VLSN) ? header.getVLSN() : NULL_VLSN); @@ -938,7 +1074,7 @@ protected final void readBaseLNEntry( dupStatus = null; } - private void setFlags(final byte flags, final byte flags2) { + private void setFlags(final byte flags, final byte flags2, final byte flags3) { /* First flags byte. */ embeddedLN = ((flags & EMBEDDED_LN_MASK) != 0); @@ -958,8 +1094,12 @@ private void setFlags(final byte flags, final byte flags2) { abortTombstone = ((flags2 & ABORT_TOMBSTONE_MASK) != 0); haveAbortModificationTime = ((flags2 & HAVE_ABORT_MODIFICATION_TIME_MASK) != 0); + haveAbortCreationTime = + ((flags2 & HAVE_ABORT_CREATION_TIME_MASK) != 0); enableBeforeImage = ((flags2 & ENABLE_BEFORE_IMAGE_MASK) != 0); + haveInsertsCreationTime = + ((flags3 & HAVE_INSERTS_CREATION_TIME_MASK) != 0); } @Override @@ -1027,6 +1167,18 @@ public StringBuilder dumpEntry(StringBuilder sb, boolean verbose) { sb.append(""); } + if (creationTime != 0) { + sb.append(""); + } + + if (haveInsertsCreationTime) { + sb.append(""); + } + if (modificationTime != 0) { sb.append(""); } + if (haveAbortCreationTime) { + sb.append(""); + } if (enableBeforeImage) { sb.append(""); } } + return sb; } @@ -1178,6 +1336,10 @@ protected final int getBaseLNEntrySize( if (logVersion >= 16) { size += 1; // flags2 } + + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + size += 1; // flags3 + } if (entryType.isTransactional()) { if (logVersion < 12 || (haveAbortLSN && !forReplication)) { @@ -1219,6 +1381,11 @@ protected final int getBaseLNEntrySize( abortModificationTime); } } + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + if (haveAbortCreationTime) { + size += LogUtils.getPackedLongLogSize(abortCreationTime); + } + } } if (logVersion >= 12) { @@ -1228,6 +1395,13 @@ protected final int getBaseLNEntrySize( } } + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME && + LogEntryType.isExplicitCreateTimeLNType(entryType.getTypeNum())) { + size += LogUtils.getPackedLongLogSize(creationTime); + } else if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME + && haveInsertsCreationTime) { + size += LogUtils.getPackedLongLogSize(creationTime); + } return size; } @@ -1264,6 +1438,8 @@ protected final void writeBaseLNEntry( */ byte flags = 0; byte flags2 = 0; + byte flags3 = 0; + if (entryType.isTransactional() && (logVersion < 12 || !forReplication)) { @@ -1312,6 +1488,11 @@ protected final void writeBaseLNEntry( flags2 |= ABORT_TOMBSTONE_MASK; } } + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + if (haveAbortCreationTime) { + flags2 |= HAVE_ABORT_CREATION_TIME_MASK; + } + } } if (logVersion >= 19) { @@ -1331,6 +1512,12 @@ protected final void writeBaseLNEntry( flags2 |= ENABLE_BEFORE_IMAGE_MASK; } } + + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + if (haveInsertsCreationTime) { + flags3 |= HAVE_INSERTS_CREATION_TIME_MASK; + } + } if (logVersion >= 12) { if (haveExpiration) { @@ -1342,6 +1529,10 @@ protected final void writeBaseLNEntry( if (logVersion >= 16) { destBuffer.put(flags2); } + + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + destBuffer.put(flags3); + } dbId.writeToLog(destBuffer, logVersion, forReplication); @@ -1352,6 +1543,15 @@ protected final void writeBaseLNEntry( expirationInHours ? (-expiration) : expiration); } LogUtils.writePackedLong(destBuffer, modificationTime); + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME + && LogEntryType.isExplicitCreateTimeLNType( + entryType.getTypeNum())) { + LogUtils.writePackedLong(destBuffer, creationTime); + } else if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME + && haveInsertsCreationTime) { + LogUtils.writePackedLong(destBuffer, creationTime); + } + LogUtils.writeByteArray(destBuffer, key); ln.writeToLog(destBuffer, logVersion, forReplication); } @@ -1398,6 +1598,12 @@ protected final void writeBaseLNEntry( destBuffer, abortModificationTime); } } + if (logVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + if (haveAbortCreationTime) { + LogUtils.writePackedLong( + destBuffer, abortCreationTime); + } + } } if (logVersion >= 12 && logVersion < 19) { @@ -1457,6 +1663,7 @@ public void postLogWork( } /* Save cached values after write. */ + ln.setCreationTime(creationTime); ln.setModificationTime(modificationTime); ln.setVLSNSequence( (vlsn != INVALID_VLSN) ? vlsn : NULL_VLSN); @@ -1571,6 +1778,17 @@ public long getModificationTime() { return modificationTime; } + /** + * Returns the LN's creation time, or zero if the LN belongs to a + * secondary (duplicates) database or was originally written using JE 25.1 + * or earlier. + * + * @since 25.2 + */ + public long getCreationTime() { + return creationTime; + } + /** * Returns the tombstone property of the record. * @@ -1676,6 +1894,10 @@ public long getAbortModificationTime() { return abortModificationTime; } + public long getAbortCreationTime() { + return abortCreationTime; + } + public boolean getAbortTombstone() { return abortTombstone; } diff --git a/kvmain/src/main/java/com/sleepycat/je/log/entry/NameLNLogEntry.java b/kvmain/src/main/java/com/sleepycat/je/log/entry/NameLNLogEntry.java old mode 100644 new mode 100755 index 5b018889..140174a0 --- a/kvmain/src/main/java/com/sleepycat/je/log/entry/NameLNLogEntry.java +++ b/kvmain/src/main/java/com/sleepycat/je/log/entry/NameLNLogEntry.java @@ -122,6 +122,7 @@ public NameLNLogEntry( long abortModificationTime, byte[] key, NameLN nameLN, + long creationTime, long modificationTime, int priorSize, long priorLsn, @@ -133,9 +134,10 @@ public NameLNLogEntry( null/*abortKey*/, null/*abortData*/, NULL_VLSN/*abortVLSN*/, 0 /*abortExpiration*/, false /*abortExpirationInHours*/, - abortModificationTime, false /*abortTombstone*/, + abortModificationTime, 0 /*abortCreationTime*/, + false /*abortTombstone*/, key, nameLN, false/*newEmbeddedLN*/, - 0 /*expiration*/, false /*expirationInHours*/, + 0 /*expiration*/, false /*expirationInHours*/, creationTime, modificationTime, false /*tombstone*/, false /*blindDeletion*/, priorSize, priorLsn, false); diff --git a/kvmain/src/main/java/com/sleepycat/je/recovery/RecoveryManager.java b/kvmain/src/main/java/com/sleepycat/je/recovery/RecoveryManager.java old mode 100644 new mode 100755 index da7bb273..6b46c0c2 --- a/kvmain/src/main/java/com/sleepycat/je/recovery/RecoveryManager.java +++ b/kvmain/src/main/java/com/sleepycat/je/recovery/RecoveryManager.java @@ -2002,6 +2002,7 @@ private void recoveryUndo( lnEntry.getAbortKey(), lnEntry.getAbortData(), lnEntry.getAbortVLSN(), lnEntry.getAbortExpiration(), lnEntry.isAbortExpirationInHours(), + lnEntry.getCreationTime(), lnEntry.getModificationTime(), lnEntry.isTombstone()); } @@ -2019,6 +2020,7 @@ public static void abortUndo( lnEntry.getAbortKey(), lnEntry.getAbortData(), lnEntry.getAbortVLSN(), lnEntry.getAbortExpiration(), lnEntry.isAbortExpirationInHours(), + lnEntry.getAbortCreationTime(), lnEntry.getAbortModificationTime(), lnEntry.getAbortTombstone()); } @@ -2036,6 +2038,7 @@ public static void rollbackUndo( revertTo.revertLsn, revertTo.revertKD, revertTo.revertPD, revertTo.revertKey, revertTo.revertData, revertTo.revertVLSN, revertTo.revertExpiration, revertTo.revertExpirationInHours, + revertTo.revertCreationTime, revertTo.revertModificationTime, revertTo.revertTombstone); } @@ -2054,6 +2057,7 @@ private static void undo( long revertVLSN, int revertExpiration, boolean revertExpirationInHours, + long revertCreationTime, long revertModificationTime, boolean revertTombstone) throws DatabaseException { @@ -2141,6 +2145,7 @@ private static void undo( slotIdx, revertLsn, revertKD, revertPD, revertKey, revertEmbData, revertVLSN, revertLogrecSize, revertExpiration, revertExpirationInHours, + revertCreationTime, revertModificationTime, revertTombstone); replaced = true; @@ -2531,8 +2536,9 @@ private void relock( logrec.getAbortLsn(), logrec.getAbortKnownDeleted(), logrec.getAbortKey(), logrec.getAbortData(), logrec.getAbortVLSN(), logrec.getAbortExpiration(), logrec.isAbortExpirationInHours(), - logrec.getAbortModificationTime(), logrec.getAbortTombstone(), - db); + logrec.getAbortModificationTime(), + logrec.getAbortCreationTime(), logrec.getAbortTombstone(), + db, true); final WriteLockInfo wli = result.getWriteLockInfo(); @@ -2608,6 +2614,8 @@ private long redo( int expiration = logrec.getExpiration(); boolean expirationInHours = logrec.isExpirationInHours(); long modificationTime = logrec.getModificationTime(); + long creationTime = logrec.getCreationTime(); + boolean tombstone = logrec.isTombstone(); long treeLsn = DbLsn.NULL_LSN; @@ -2719,7 +2727,8 @@ private long redo( bin.recoverRecord( index, logrecLsn, redoKD, redoPD, logrecKey, logrecEmbData, logrecVLSN, logrecSize, - expiration, expirationInHours, modificationTime, + expiration, expirationInHours, creationTime, + modificationTime, tombstone); replaced = true; @@ -2761,7 +2770,7 @@ private long redo( bin.recoverRecord( index, logrecLsn, false/*KD*/, false/*PD*/, logrecKey, logrecEmbData, logrecVLSN, logrecSize, - expiration, expirationInHours, modificationTime, + expiration, expirationInHours, creationTime, modificationTime, tombstone); inserted = true; @@ -2797,6 +2806,9 @@ private long redo( bin.setModificationTime( index, (logrecEmbData != null) ? modificationTime : 0); + bin.setCreationTime( + index, (logrecEmbData != null) ? creationTime : 0); + bin.setTombstone(index, tombstone); bin.setCachedVLSN( @@ -2844,6 +2856,9 @@ private long redo( bin.setModificationTime( index, (logrecEmbData != null) ? modificationTime : 0); + bin.setCreationTime( + index, (logrecEmbData != null) ? creationTime : 0); + bin.setTombstone(index, tombstone); bin.setCachedVLSN( diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/BinaryProtocolException.java b/kvmain/src/main/java/com/sleepycat/je/rep/BinaryProtocolException.java new file mode 100644 index 00000000..26e94763 --- /dev/null +++ b/kvmain/src/main/java/com/sleepycat/je/rep/BinaryProtocolException.java @@ -0,0 +1,31 @@ +/*- + * Copyright (C) 2002, 2025, Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package com.sleepycat.je.rep; + +import com.sleepycat.je.utilint.NotSerializable; + +import java.io.IOException; + +/** + * Thrown to indicate that an error happened in the reading of channel + * at {@link com.sleepycat.je.rep.utilint.BinaryProtocol}. + */ +@SuppressWarnings("serial") +public class BinaryProtocolException extends IOException + implements NotSerializable { + + public BinaryProtocolException(String message) { + super(message); + } +} diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/NetworkRestore.java b/kvmain/src/main/java/com/sleepycat/je/rep/NetworkRestore.java index 938f79f9..494e30c0 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/NetworkRestore.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/NetworkRestore.java @@ -17,17 +17,11 @@ import java.io.IOException; import java.net.ConnectException; import java.net.InetSocketAddress; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Random; -import java.util.Set; +import java.util.*; import java.util.logging.Logger; import com.sleepycat.je.DatabaseException; +import com.sleepycat.je.Environment; import com.sleepycat.je.EnvironmentFailureException; import com.sleepycat.je.log.RestoreMarker; import com.sleepycat.je.rep.impl.RepImpl; @@ -91,6 +85,16 @@ public class NetworkRestore { private static final long MAX_VLSN = Long.MAX_VALUE; + /* + * Singleton instance aimed to control what environments are in network + * restore + */ + private static final EnvironmentsInNetworkRestore envsInNetworkRestore; + + static { + envsInNetworkRestore = new EnvironmentsInNetworkRestore(); + } + /* * The log provider last used or null. May be queried by other threads * during the backup. @@ -105,8 +109,12 @@ public class NetworkRestore { private final Logger logger; + /* + * For testing only. + */ private TestHook testInterruptHook; private boolean failDuringRestore = false; + private boolean delayRestore = false; /** * Creates an instance of NetworkRestore suitable for restoring the logs at @@ -177,7 +185,7 @@ private long initServerList(final RepImpl repImpl, final List logProviders; if ((config.getLogProviders() != null) && - (config.getLogProviders().size() > 0)) { + (!config.getLogProviders().isEmpty())) { final Set memberNames = new HashSet<>(); for (ReplicationNode node : logException.getLogProviders()) { memberNames.add(node.getName()); @@ -237,12 +245,101 @@ private long resetServerList(final List serverList, return Math.max(0, maxVlsnServer.rangeEnd - maxLag); } + /* + * Return if the directory of an environment handle is in network restore. + * An 'Environment' object corresponding the handle to be opened could + * be given as parameter. So, when the network restore finishes, this + * object will be notified that can continue the opening of the handle, + * avoiding the corresponding timeout ('je.env.networkRestoreLockTimeout'). + */ + static public boolean isEnvInRestore(File envHome, Environment env) { + return envsInNetworkRestore.isEnvInRestore(envHome, env); + } + + /* + * Used in testing to clear the cache of environments that + * are in network restore when switching between test methods. + */ + static public void cleanEnvsInRestore() { + envsInNetworkRestore.clearEnvsInRestore(); + } + + /* + * Class aimed to control the environments that are in network restore. + */ + static private class EnvironmentsInNetworkRestore { + private final List envsInRestore = new ArrayList<>(); + /* + * The first idea was using Map instead + * Map>. But several unit tests failed. It + * led to think if several environment handles can be opened on a + * same directory in production, or the opening of environment + * handles can be similar as implemented in the unit tests. To avoid + * possible outages, it was used List. + */ + private final Map> envsForKey = new HashMap<>(); + + synchronized boolean isEnvInRestore(File envHome, Environment env) { + String envKey = makeKeyForEnv(envHome); + if (envsInRestore.contains(envKey)) { + /* + * It True, save the environment handle that is being + * opened but its directory is in network restore. + */ + if (env != null) { + List envList = envsForKey.computeIfAbsent( + envKey, k -> new ArrayList<>()); + envList.add(env); + } + return true; + } + return false; + } + + synchronized void addEnvInRestore(File envHome) { + String envKey = makeKeyForEnv(envHome); + if (!envsInRestore.contains(envKey)) { + envsInRestore.add(envKey); + } + } + + synchronized void removeEnvInRestore(File envHome) { + String envKey = makeKeyForEnv(envHome); + envsInRestore.remove(envKey); + List envList = envsForKey.remove(envKey); + if (envList != null) { + /* + * Notify to all environment handles that the + * corresponding network restore finished. + */ + for (Environment env : envList) { + synchronized (env) { + env.notify(); + } + } + } + } + + /* + * Used in testing to clear the cache of environments in + * network restore when switching between test methods. + */ + synchronized void clearEnvsInRestore() { + envsInRestore.clear(); + envsForKey.clear(); + } + + private String makeKeyForEnv(File envHome) { + return envHome.getAbsolutePath(); + } + } + /** * Restores the log files from one of the members of the replication group. *

    * If config.getLogProviders() returns null, or an empty list, * it uses the member that is least busy as the provider of the log files. - * Otherwise it selects a member from the list, choosing the first member + * Otherwise, it selects a member from the list, choosing the first member * that's available, to provide the log files. If the members in this list * are not present in logException.getLogProviders(), it will * result in an IllegalArgumentException being thrown. @@ -253,7 +350,7 @@ private long resetServerList(final List serverList, *

    * Log files that are currently at the node will be retained if they are * part of a consistent set of log files. Obsolete log files are either - * deleted, or are renamed based on the the configuration of + * deleted, or are renamed based on the configuration of * config.getRetainLogFiles(). *

    * If the InsufficientLogException was caught by the @@ -279,9 +376,30 @@ public synchronized void execute( throws EnvironmentFailureException, IllegalArgumentException { - RepImpl repImpl = null; + final RepImpl repImpl = logException.openRepImpl(config. + getLoggingHandler()); + final File envHome = repImpl.getEnvironmentHome(); + boolean isCompletedRestore = false; try { - repImpl = logException.openRepImpl(config.getLoggingHandler()); + envsInNetworkRestore.addEnvInRestore(envHome); + + /* + * For testing only. + */ + if (delayRestore) { + /* + * Delay the starting of a network restore to ensure that + * the opening of an environment handle is locked because + * its directory should be restored. + */ + try { + wait(1000 * 10); + } catch (InterruptedException ex) { + /* + * Do not make anything. + */ + } + } /* See 'Algorithm'. */ final int maxLag = repImpl.getConfigManager().getInt( @@ -317,7 +435,6 @@ public synchronized void execute( */ while (!serverList.isEmpty()) { final List newServerList = new LinkedList<>(); - final File envHome = repImpl.getEnvironmentHome(); for (Server server : serverList) { final InetSocketAddress serverSocket = @@ -350,6 +467,7 @@ public synchronized void execute( backup.setInterruptHook(testInterruptHook); backup.setFailDuringRestore(failDuringRestore); backup.execute(); + isCompletedRestore = true; LoggerUtils.info(logger, repImpl, String.format( "Network restore completed from: %s. " + "Elapsed time: %,d s.", @@ -411,8 +529,9 @@ public synchronized void execute( throw EnvironmentFailureException.unexpectedState ("Tried and failed with every node"); } finally { - if (repImpl != null) { - repImpl.close(); + repImpl.close(); + if (isCompletedRestore) { + envsInNetworkRestore.removeEnvInRestore(envHome); } } } @@ -467,11 +586,24 @@ public String toString() { } } + /* + * For testing only + */ public void setTestInterruptHook(final TestHook hook) { testInterruptHook = hook; } + /* + * For testing only + */ public void setFailDuringRestore(boolean fail) { failDuringRestore = fail; } + + /* + * For testing only + */ + public void setDelayRestore(boolean delayRestore) { + this.delayRestore = delayRestore; + } } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/ReplicaConnectRetryException.java b/kvmain/src/main/java/com/sleepycat/je/rep/ReplicaConnectRetryException.java index 424ac693..07b83fab 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/ReplicaConnectRetryException.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/ReplicaConnectRetryException.java @@ -15,13 +15,46 @@ import com.sleepycat.je.utilint.NotSerializable; +/** + * Thrown to indicate that the Replica must retry connecting to the same + * master, after some period of time. + */ @SuppressWarnings("serial") -public class ReplicaConnectRetryException extends ReplicaRetryException -implements NotSerializable { +public class ReplicaConnectRetryException extends Exception + implements NotSerializable { + + final int nRetries; + final int retrySleepMs; public ReplicaConnectRetryException(String message, - int retries, - int retrySleepMs) { - super(message, retries, retrySleepMs); + int nRetries, + int retrySleepMs) { + super(message); + this.nRetries = nRetries; + this.retrySleepMs = retrySleepMs; + } + + /** + * Get thread sleep time before retry + * + * @return sleep time in ms + */ + public long getRetrySleepMs() { + return retrySleepMs; + } + + /** + * Get number of nRetries + * + * @return number of nRetries + */ + public int getNRetries() { + return nRetries; + } + + @Override + public String getMessage() { + return "Failed after nRetries: " + nRetries + + " with retry interval: " + retrySleepMs + "ms. with message " + super.getMessage(); } } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/ReplicaRetryException.java b/kvmain/src/main/java/com/sleepycat/je/rep/ReplicaRetryException.java deleted file mode 100644 index 9c776f51..00000000 --- a/kvmain/src/main/java/com/sleepycat/je/rep/ReplicaRetryException.java +++ /dev/null @@ -1,57 +0,0 @@ -/*- - * Copyright (C) 2002, 2025, Oracle and/or its affiliates. All rights reserved. - * - * This file was distributed by Oracle as part of a version of Oracle NoSQL - * Database made available at: - * - * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html - * - * Please see the LICENSE file included in the top-level directory of the - * appropriate version of Oracle NoSQL Database for a copy of the license and - * additional information. - */ - -package com.sleepycat.je.rep; - -/** - * Thrown to indicate that the Replica must retry connecting to the same - * master, after some period of time. - */ - -@SuppressWarnings("serial") -public abstract class ReplicaRetryException extends Exception { - final int nRetries; - final int retrySleepMs; - - public ReplicaRetryException(String message, - int nRetries, - int retrySleepMs) { - super(message); - this.nRetries = nRetries; - this.retrySleepMs = retrySleepMs; - } - - /** - * Get thread sleep time before retry - * - * @return sleep time in ms - */ - public long getRetrySleepMs() { - return retrySleepMs; - } - - /** - * Get number of nRetries - * - * @return number of nRetries - */ - public int getNRetries() { - return nRetries; - } - - @Override - public String getMessage() { - return "Failed after nRetries: " + nRetries + - " with retry interval: " + retrySleepMs + "ms. with message " + super.getMessage(); - } -} diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/ReplicatedEnvironment.java b/kvmain/src/main/java/com/sleepycat/je/rep/ReplicatedEnvironment.java index ceffb8bc..b7e844cc 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/ReplicatedEnvironment.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/ReplicatedEnvironment.java @@ -603,7 +603,7 @@ protected ReplicatedEnvironment(File envHome, ReplicaConsistencyException, IllegalArgumentException { - super(envHome, envConfig, repConfig, envImplParam); + super(envHome, envConfig, repConfig, envImplParam, joinGroup); repEnvironmentImpl = (RepImpl) DbInternal.getNonNullEnvImpl(this); nameIdPair = repEnvironmentImpl.getNameIdPair(); diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/ReplicationConfig.java b/kvmain/src/main/java/com/sleepycat/je/rep/ReplicationConfig.java index a8b4d4e7..8c1e5f60 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/ReplicationConfig.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/ReplicationConfig.java @@ -383,7 +383,7 @@ public class ReplicationConfig extends ReplicationMutableConfig { * IllegalArgumentException is thrown. * * When moving to 24.2, we decide to remove numerous deprecated - * configParams. Although this one marked as deprecared, this configuration + * configParams. Although this one marked as deprecated, this configuration * could still be used by on-prem users through configuration files, and it * could alter cleaner behavior. */ @@ -392,6 +392,49 @@ public class ReplicationConfig extends ReplicationMutableConfig { public static final String REPLAY_FREE_DISK_PERCENT = EnvironmentParams.REP_PARAM_PREFIX + "replayFreeDiskPercent"; + /** + * Number of messages that should be logged when {@link #TXN_TRACK_ACK_INFO} is set to + * True. These messages correspond to what nodes in a replication group acknowledged a + * transaction when it reaches the required number of acks to be counted as durable. + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Information about configuration option
    NameTypeMutableDefaultMinimumMaximum
    {@value}IntegerNo1001500
    + */ + public static final String TXN_TRACK_ACK_INFO_LIMIT = + "je.txn.trackAckInfoLimit"; + + /** + * Set this parameter to true to track information of what nodes in a replication group + * acknowledged a transaction when it reaches the required number of acks to be counted + * as durable. The tracking of this information will show which transaction ID received + * a quorum of acknowledging and what were the nodes. The default is false, and true + * should only be used during debugging. + * + * + * + * + * + * + * + * + * + * + *
    Information about configuration option
    NameTypeMutableDefault
    {@value}BooleanNofalse
    + */ + public static final String TXN_TRACK_ACK_INFO = + "je.txn.trackAckInfo"; + /** * The maximum amount of time for a replay transaction to wait for a lock. * @@ -829,6 +872,8 @@ public class ReplicationConfig extends ReplicationMutableConfig { EnvironmentParams.REP_PARAM_PREFIX + "electionsRebroadcastPeriod"; /** + * @deprecated as of 25.2. + * * In rare cases, a node may need to rollback committed transactions in * order to rejoin a replication group. This parameter limits the number of * durable transactions that may be rolled back. Durable transactions are @@ -859,11 +904,16 @@ public class ReplicationConfig extends ReplicationMutableConfig { * * * @see RollbackProhibitedException + * */ + @SuppressWarnings("deprecation") + @Deprecated public static final String TXN_ROLLBACK_LIMIT = EnvironmentParams.REP_PARAM_PREFIX + "txnRollbackLimit"; /** + * @deprecated as 25.2 + * * In rare cases, a node may need to rollback committed transactions in * order to rejoin a replication group. If this parameter is set to true * and a rollback is necessary to rejoin the group, a {@link @@ -893,7 +943,10 @@ public class ReplicationConfig extends ReplicationMutableConfig { * False * * + * */ + @SuppressWarnings("deprecation") + @Deprecated public static final String TXN_ROLLBACK_DISABLED = EnvironmentParams.REP_PARAM_PREFIX + "txnRollbackDisabled"; diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/arbiter/impl/ArbiterAcker.java b/kvmain/src/main/java/com/sleepycat/je/rep/arbiter/impl/ArbiterAcker.java index a489cad4..84588450 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/arbiter/impl/ArbiterAcker.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/arbiter/impl/ArbiterAcker.java @@ -40,7 +40,6 @@ import com.sleepycat.je.log.entry.LogEntry; import com.sleepycat.je.rep.GroupShutdownException; import com.sleepycat.je.rep.NodeType; -import com.sleepycat.je.rep.ReplicaRetryException; import com.sleepycat.je.rep.ReplicaConnectRetryException; import com.sleepycat.je.rep.ReplicatedEnvironment; import com.sleepycat.je.rep.UnknownMasterException; @@ -72,7 +71,6 @@ import com.sleepycat.je.util.TimeSupplier; import com.sleepycat.je.utilint.LoggerUtils; import com.sleepycat.je.utilint.LongStat; -import com.sleepycat.je.utilint.NotSerializable; import com.sleepycat.je.utilint.StatGroup; import com.sleepycat.je.utilint.StoppableThread; import com.sleepycat.je.utilint.StringStat; @@ -229,15 +227,44 @@ private enum RequestExitType { private void initializeConnection() throws ReplicaConnectRetryException, - IOException, - ReplicaConnectRetryException { + IOException { createArbiterFeederChannel(); + /* + * Invoked to refresh the Helper Host information in the + * Replication Group Admin. In this way, it is avoided that the + * 'refreshCachedGroup' method repeatedly fails due to: + * + * 'Arbiter exception: com.sleepycat.je.rep.UnknownMasterException: + * Could not determine master from helpers at: [here it will be + * listed the Helper Hosts, for instance, localhost/127.0.0.1:5001, + * localhost/127.0.0.1:5002]'. + * + * That repeated exception could lead to delaying the start of the + * handshake and to cause a quorum loss [KVSTORE-2654]. + */ + arbiterImpl.refreshHelperHosts(); + /* + * Invoked to update the information of the Replication Group + * before performing the handshake. If it is not called, the + * creation of RepFeederHandshakeConfig fails because that + * information does not exist, or that information is not + * current. + */ arbiterImpl.refreshCachedGroup(); + LoggerUtils.fine(logger, repImpl, + "Cache group updated before the handshake"); ReplicaFeederHandshake handshake = new ReplicaFeederHandshake(new RepFeederHandshakeConfig()); protocol = handshake.execute(); - + /* + * Invoked to update the information of the Replication Group + * after performing the handshake. Otherwise, a + * com.sleepycat.je.rep.UnknownMasterException is repeatedly + * thrown when calling this method again. + */ arbiterImpl.refreshCachedGroup(); + LoggerUtils.fine(logger, repImpl, + "Cache group updated after the handshake"); /* read heartbeat and respond */ masterHeartbeatId = protocol. @@ -288,16 +315,15 @@ void runArbiterAckLoop() DatabaseException, GroupShutdownException { - Class retryExceptionClass = null; + Class retryExceptionClass = null; int retryCount = 0; try { - while (true) { try { runArbiterAckLoopInternal(); /* Normal exit */ break; - } catch (ReplicaRetryException e) { + } catch (ReplicaConnectRetryException e) { if (!arbiterImpl.getMasterStatus().inSync()) { LoggerUtils.fine(logger, repImpl, "Retry terminated, out of sync."); @@ -358,7 +384,7 @@ void shutdown() { private void runArbiterAckLoopInternal() throws InterruptedException, - ReplicaRetryException { + ReplicaConnectRetryException { shutdownException = null; LoggerUtils.info(logger, repImpl, @@ -387,9 +413,11 @@ private void runArbiterAckLoopInternal() * it and return to the outer node level loop. */ LoggerUtils.fine(logger, repImpl, - "Arbiter exception: " + e.getMessage() + - "\n" + LoggerUtils.getStackTrace(e)); - } catch (ReplicaRetryException e) { + "Arbiter exception: " + e + " channel: " + + ((arbiterFeederChannel == null) ? "null" + : arbiterFeederChannel.getChannel().toString()) + + "\n" + LoggerUtils.getStackTrace(e)); + } catch (ReplicaConnectRetryException e) { /* Propagate it outwards. Node does not need to shutdown. */ throw e; } catch (GroupShutdownException e) { @@ -521,7 +549,7 @@ StatGroup loadStats(StatsConfig config) private void loopExitCleanup() { if (shutdownException != null) { - if (shutdownException instanceof ReplicaRetryException) { + if (shutdownException instanceof ReplicaConnectRetryException) { LoggerUtils.fine(logger, repImpl, "Retrying connection to feeder. Message: " + shutdownException.getMessage()); @@ -574,8 +602,12 @@ private void createArbiterFeederChannel() dataChannel, timeoutMs); + LoggerUtils.fine(logger, repImpl, + "starting service dispatcher handshake"); ServiceDispatcher.doServiceHandshake (dataChannel, FeederManager.FEEDER_SERVICE); + LoggerUtils.fine(logger, repImpl, + "service dispatcher handshake done"); } catch (ConnectException e) { /* diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/arbiter/impl/ArbiterImpl.java b/kvmain/src/main/java/com/sleepycat/je/rep/arbiter/impl/ArbiterImpl.java index 7aea932d..1901fde6 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/arbiter/impl/ArbiterImpl.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/arbiter/impl/ArbiterImpl.java @@ -493,7 +493,7 @@ public void refreshHelperHosts() { } } - RepGroupImpl refreshCachedGroup() + void refreshCachedGroup() throws DatabaseException { RepGroupImpl repGroupImpl; repGroupImpl = repGroupAdmin.getGroup().getRepGroupImpl(); @@ -510,7 +510,6 @@ RepGroupImpl refreshCachedGroup() helpers.addAll(repGroupImpl.getAllHelperSockets()); helperSockets = helpers; cachedRepGroupImpl = repGroupImpl; - return cachedRepGroupImpl; } void updateNameIdPair(NameIdPair other) { diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/impl/RepImpl.java b/kvmain/src/main/java/com/sleepycat/je/rep/impl/RepImpl.java index c6848882..0eda20bd 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/impl/RepImpl.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/impl/RepImpl.java @@ -2464,10 +2464,6 @@ private boolean useArbiter(MasterTxn txn) { return false; } - public void setAuthenticator(StreamAuthenticator authenticator) { - this.authenticator = authenticator; - } - public StreamAuthenticator getAuthenticator() { return authenticator; } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/impl/RepParams.java b/kvmain/src/main/java/com/sleepycat/je/rep/impl/RepParams.java index 08d409c1..ef86a0f5 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/impl/RepParams.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/impl/RepParams.java @@ -21,6 +21,7 @@ import java.util.HashSet; import java.util.StringTokenizer; +import com.sleepycat.je.EnvironmentConfig; import com.sleepycat.je.config.BooleanConfigParam; import com.sleepycat.je.config.ConfigParam; import com.sleepycat.je.config.DurationConfigParam; @@ -200,6 +201,26 @@ private boolean isValid(char c) { true, // mutable true); // forReplication + /** + * @see ReplicationConfig#TXN_TRACK_ACK_INFO_LIMIT + */ + public static final IntConfigParam TXN_TRACK_ACK_INFO_LIMIT = + new IntConfigParam(ReplicationConfig.TXN_TRACK_ACK_INFO_LIMIT, + 1, // min + 500, // max + 100, // default + false, // mutable + true); // forReplication + + /** + * @see ReplicationConfig#TXN_TRACK_ACK_INFO + */ + public static final BooleanConfigParam TXN_TRACK_ACK_INFO = + new BooleanConfigParam(ReplicationConfig.TXN_TRACK_ACK_INFO, + false, // default + false, // mutable + true); // forReplication + /** * The lock timeout for replay transactions. */ diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/DurabilityQuorum.java b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/DurabilityQuorum.java index 1ba5c687..be8f376b 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/DurabilityQuorum.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/DurabilityQuorum.java @@ -127,8 +127,12 @@ public void ensureSufficientAcks(MasterTxn txn) * period since the pre-log hook. Note that in this case we merely * want to check; we don't want to switch into active arbitration * unless/until we actually lose the connection to the replica at - * commit time. TODO: this doesn't seem right! Shouldn't we require - * activation at this point!!! + * commit time. + * This code is used by DesignatedPrimaryProvider. In the case of a + * two node, one Arbiter replication group one node can be designated + * to be the master in case one of the nodes fails, and in that + * situation it only requires a single ack from itself to count a + * transaction as durable. */ if (repImpl.getRepNode().getArbiter().activationPossible()) { String msg = "txn " + txn.getId() + diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/ElectionStatesContinuation.java b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/ElectionStatesContinuation.java index 3a215c55..c9790f5c 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/ElectionStatesContinuation.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/ElectionStatesContinuation.java @@ -23,6 +23,7 @@ import java.nio.file.StandardCopyOption; import java.util.logging.Level; +import com.sleepycat.je.DatabaseException; import com.sleepycat.je.dbi.EnvironmentFailureReason; import com.sleepycat.je.rep.ReplicationConfig; import com.sleepycat.je.rep.elections.Proposer.DefaultFormattedProposal; @@ -529,6 +530,16 @@ private void persist() { */ private void persist(JsonObject obj) { ensureHoldLock(); + try { + /* + * This method is invoked to persist the information used to make an election + * in order to be accessed it if a crash occurs right after the election. + */ + envImpl.getLogManager().flushSync(); + } catch (DatabaseException e) { + LoggerUtils.info(envImpl.getLogger(), envImpl, + String.format("Cannot be applied f-Sync because of: " + e.getMessage())); + } try (final FileOutputStream fos = new FileOutputStream(tempFile)) { try (final PrintWriter writer = new PrintWriter(fos)) { writer.write(obj.toJson()); diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/Feeder.java b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/Feeder.java index 70691738..ac752827 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/Feeder.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/Feeder.java @@ -206,9 +206,6 @@ final public class Feeder { /* The filter to be used for records written to the replication stream.*/ private FeederFilter feederFilter; - /* feeder authenticator */ - private final StreamAuthenticator authenticator; - /* security check interval in ms */ private final long securityChkIntvMs; @@ -423,8 +420,12 @@ private NamedChannelWithTimeout configureChannel(DataChannel channel) feederFilter = null; /* get authenticator from containing rn */ - authenticator = repNode.getAuthenticator(); securityChkIntvMs = repNode.getSecurityCheckInterval(); + LoggerUtils.info(logger, repImpl, + "Feeder created with nameId=" + nameIdPair + + ", channel authenticator=" + + dataChannel.getStreamAuthenticator() + + ", security check interval ms=" + securityChkIntvMs); } void startFeederThreads() { @@ -452,7 +453,6 @@ public Feeder() { vlsnRate = null; writeMessageHook = initialWriteMessageHook; feederFilter = null; - authenticator = null; securityChkIntvMs = 0; } @@ -673,12 +673,15 @@ public void shutdown(Exception shutdownException) { feederManager.getReplicaVLSNRateMap().removeStat(replicaName); LoggerUtils.info(logger, repImpl, - "Shutting down feeder for replica " + replicaName + + "Shutting down feeder for replica=" + replicaName + ((shutdownException == null) ? "" : - (" Reason: " + - shutdownException.getMessage() + " ")) + - RepUtils.writeTimesString(pstats)); + (" reason=" + shutdownException + + ", protocol stats= ")) + + RepUtils.writeTimesString(pstats) + + ((shutdownException == null) ? "" : + ", shutdown exception stack=\n" + + LoggerUtils.getStackTrace(shutdownException))); final String feederState = String.format("Feeder state at exit -- " + @@ -760,8 +763,8 @@ public MasterFeederSource getMasterFeederSource() { return null; } - public StreamAuthenticator getAuthenticator() { - return authenticator; + private StreamAuthenticator getAuthenticator() { + return feederReplicaChannel.getChannel().getStreamAuthenticator(); } /** @@ -951,6 +954,11 @@ public void run() { * thread is the only one to notice a problem. The Replica can * decide to re-establish the connection */ + LoggerUtils.info(logger, repImpl, + "Feeder input thread to shut down feeder in " + + "its entirety" + + ", replica=" + replicaNameIdPair.getName() + + ", exception=" + shutdownException) ; shutdown(shutdownException); cleanup(); } @@ -1586,10 +1594,14 @@ public void run() { * Feeder but this is the safe course of action. */ LoggerUtils.severe(logger, repImpl, - "Unexpected exception: " + e.getMessage() + + "Unexpected runtime exception=" + e + + ", stack=\n" + LoggerUtils.getStackTraceForSevereLog(e)); throw e; } catch (Error e) { + LoggerUtils.severe(logger, repImpl, + "Unexpected error=" + e + ", stack=\n" + + LoggerUtils.getStackTraceForSevereLog(e)); feederOutputError = e; repNode.getRepImpl().invalidate(e); } finally { @@ -2153,12 +2165,13 @@ public static void setInitialWriteMessageHook( public boolean needSecurityChecks() { /* no check for non-secure store without an authenticator */ + final StreamAuthenticator authenticator = getAuthenticator(); if (authenticator == null) { return false; } final DataChannel channel = feederReplicaChannel.getChannel(); - return channel.isTrustCapable() && !channel.isTrusted(); + return DataChannel.needSecurityCheck(channel); } /** @@ -2172,8 +2185,8 @@ private boolean doSecurityCheck() { return true; } + final StreamAuthenticator authenticator = getAuthenticator(); final long curr = TimeSupplier.currentTimeMillis(); - if ((curr - authenticator.getLastCheckTimeMs()) >= securityChkIntvMs) { /* both authentication and authorization */ return authenticator.checkAccess(); @@ -2196,6 +2209,7 @@ private boolean processReauthenticate(Message msg) { } /* ignore the message if no authentication is enabled */ + final StreamAuthenticator authenticator = getAuthenticator(); if (authenticator == null) { return true; } @@ -2203,7 +2217,12 @@ private boolean processReauthenticate(Message msg) { final Protocol.ReAuthenticate reauth = (Protocol.ReAuthenticate)msg; authenticator.setToken(reauth.getTokenBytes()); /* both authentication and authorization */ - return authenticator.checkAccess(); + final boolean ret = authenticator.checkAccess(); + LoggerUtils.info(logger, repImpl, + "Feeder=" + nameIdPair + " reauthenticate" + + " result=" + (ret ? "success" : "failure") + + ", channel id=" + authenticator.getChannelId()); + return ret; } /** diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/FeederManager.java b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/FeederManager.java index 4859e63d..a6859979 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/FeederManager.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/FeederManager.java @@ -1106,6 +1106,9 @@ void runFeeders() LoggerUtils.warning(logger, repNode.getRepImpl(), "Feeder manager unexpected interrupt"); } finally { + LoggerUtils.info(logger, repNode.getRepImpl(), + "Start shutting down feeder manager" + + ", reason=" + feederShutdownException); repNode.resetReadyLatch(feederShutdownException); repNode.getServiceDispatcher().cancel(FEEDER_SERVICE); shutdownFeeders(feederShutdownException); @@ -1142,8 +1145,10 @@ private void shutdownFeeders(Exception feederShutdownException) { LoggerUtils.info(logger, repNode.getRepImpl(), "Feeder Manager shutting down feeders." + - " Active and nascent feeders:" + feederSet.size() + - " Managed feeders:" + managedFeederCount.get()); + " Active and nascent feeders=" + feederSet.size() + + " Managed feeders=" + managedFeederCount.get() + + (feederShutdownException == null ? "" : + ", reason=" + feederShutdownException)); feederSet.forEach((feeder) -> { nFeedersShutdown.increment(); @@ -1492,7 +1497,7 @@ void flush() { } final RepImpl repImpl = repNode.getRepImpl(); final MasterTxn nullTxn = - MasterTxn.createNullTxn(repImpl); + MasterTxn.createNullTxn(repImpl, false); /* * We don't want to wait for any reason, if the txn fails, * we can try later. @@ -1524,4 +1529,36 @@ void decrementManagedFeederCount() { managedFeederCount.getAndDecrement(); } + /** + * As a way to ensure that any inflight non-durable txns at the time of + * failure in the preceding term are made durable in a new term without + * having to wait for the application itself to create a durable txn. That + * is, it speeds up the final state of these non-durable txns, since the + * ack of the null txn advances the dtvlsn. + * + * This satisfies the one described in Section 5.4.2 and Figure 8 in the + * Raft manuscript + */ + void sendNullTxnWhenNodeBecomesMaster() { + /* It is created a sync NullTxn to ensure durability. */ + final MasterTxn nullTxn = + MasterTxn.createNullTxn(repNode.getRepImpl(), true); + nullTxn.setTxnTimeout(1); + try { + nullTxn.commit(); + LoggerUtils.fine(logger, repNode.getRepImpl(), + "Success to write null txn when " + + repNode.getNodeName() + + " was elected as master. " + + nullTxn.logString()); + } catch (Exception e) { + nullTxn.abort(); + LoggerUtils.warning(logger, repNode.getRepImpl(), + "Failed to write null txn when " + + repNode.getNodeName() + + " was elected as master. " + + nullTxn.logString() + " Reason:" + + e.getMessage()); + } + } } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/RepNode.java b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/RepNode.java index 5ee03ff4..18b5a49e 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/RepNode.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/RepNode.java @@ -17,10 +17,6 @@ import static com.sleepycat.je.rep.ReplicatedEnvironment.State.REPLICA; import static com.sleepycat.je.rep.ReplicatedEnvironment.State.UNKNOWN; import static com.sleepycat.je.rep.impl.RepImpl.CAPPED_DTVLSN; -import static com.sleepycat.je.rep.impl.node.ChannelTimeoutStatDefinition.N_CHANNEL_TIMEOUT_MAP; -import static com.sleepycat.je.rep.impl.node.MasterTransferStatDefinition.N_MASTER_TRANSFERS; -import static com.sleepycat.je.rep.impl.node.MasterTransferStatDefinition.N_MASTER_TRANSFERS_SUCCESS; -import static com.sleepycat.je.rep.impl.node.MasterTransferStatDefinition.N_MASTER_TRANSFERS_FAILURE; import static com.sleepycat.je.rep.impl.RepParams.DBTREE_CACHE_CLEAR_COUNT; import static com.sleepycat.je.rep.impl.RepParams.ENV_CONSISTENCY_TIMEOUT; import static com.sleepycat.je.rep.impl.RepParams.GROUP_NAME; @@ -29,6 +25,10 @@ import static com.sleepycat.je.rep.impl.RepParams.NODE_TYPE; import static com.sleepycat.je.rep.impl.RepParams.RESET_REP_GROUP_RETAIN_UUID; import static com.sleepycat.je.rep.impl.RepParams.SECURITY_CHECK_INTERVAL; +import static com.sleepycat.je.rep.impl.node.ChannelTimeoutStatDefinition.N_CHANNEL_TIMEOUT_MAP; +import static com.sleepycat.je.rep.impl.node.MasterTransferStatDefinition.N_MASTER_TRANSFERS; +import static com.sleepycat.je.rep.impl.node.MasterTransferStatDefinition.N_MASTER_TRANSFERS_FAILURE; +import static com.sleepycat.je.rep.impl.node.MasterTransferStatDefinition.N_MASTER_TRANSFERS_SUCCESS; import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; import static com.sleepycat.je.utilint.VLSN.UNINITIALIZED_VLSN; @@ -42,10 +42,12 @@ import java.util.Timer; import java.util.UUID; import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.Consumer; import java.util.logging.Logger; -import com.sleepycat.je.rep.ReplicaConnectRetryException; import com.sleepycat.je.CheckpointConfig; import com.sleepycat.je.DatabaseException; import com.sleepycat.je.Durability.ReplicaAckPolicy; @@ -70,6 +72,7 @@ import com.sleepycat.je.rep.NodeType; import com.sleepycat.je.rep.QuorumPolicy; import com.sleepycat.je.rep.RepInternal; +import com.sleepycat.je.rep.ReplicaConnectRetryException; import com.sleepycat.je.rep.ReplicaConsistencyException; import com.sleepycat.je.rep.ReplicaStateException; import com.sleepycat.je.rep.ReplicatedEnvironment; @@ -108,7 +111,6 @@ import com.sleepycat.je.rep.stream.MasterChangeListener; import com.sleepycat.je.rep.stream.MasterStatus; import com.sleepycat.je.rep.stream.MasterSuggestionGenerator; -import com.sleepycat.je.rep.subscription.StreamAuthenticator; import com.sleepycat.je.rep.txn.ReplayTxn; import com.sleepycat.je.rep.util.AtomicLongMax; import com.sleepycat.je.rep.util.ldiff.LDiffService; @@ -167,6 +169,9 @@ public class RepNode extends StoppableThread { /* Used when the node is a feeder. */ private FeederManager feederManager; + /* For testing only. */ + private volatile static boolean sendNullTxnWhenNodeBecomesMaster = true; + /* * The status of the Master. Note that this is the leading state as * communicated to this node via the Listener. The node itself may not as @@ -372,7 +377,8 @@ public class RepNode extends StoppableThread { private final AtomicLongMax localDurableVLSN = new AtomicLongMax(NULL_VLSN); /** latch to wait for a given VLSN to be durable */ - private volatile VLSNIndex.VLSNAwaitLatch dtVLSNLatch = null; + private final Lock durableLock = new ReentrantLock(); + private final Condition dtVLSNLatch = durableLock.newCondition(); /** * If not null, a test hook that is called with the name of the current @@ -976,14 +982,6 @@ int getSecurityCheckInterval() { return getConfigManager().getInt(SECURITY_CHECK_INTERVAL); } - StreamAuthenticator getAuthenticator() { - if (repImpl == null) { - return null; - } - - return repImpl.getAuthenticator(); - } - /** * Starts up the thread in which the node does its processing as a master * or replica. It then waits for the newly started thread to transition it @@ -2272,8 +2270,19 @@ public void run() { Thread.sleep(wait); elections.incrementElectionsDelayed(); } + + /* + * Just after a node becomes master, we immediately send + * out a NullTxn to ensure that any inflight non-durable + * txns at the time of failure in the preceding term are + * made durable in a new term without having to wait for + * the application itself to create a durable txn. + */ + if (sendNullTxnWhenNodeBecomesMaster) { + feederManager.sendNullTxnWhenNodeBecomesMaster(); + } + feederManager.runFeeders(); - prevTermEndVLSN = VLSN.NULL_VLSN; /* * At this point, the feeder manager has been shutdown. @@ -2289,6 +2298,7 @@ public void run() { * initialized for replica state, the node will NPE if it * attempts execute replicated writes. */ + prevTermEndVLSN = VLSN.NULL_VLSN; nodeState.changeToUnknownAndNotify(); repImpl.getVLSNIndex().initAsReplica(); assert runConvertHooks(); @@ -3168,8 +3178,11 @@ public void incrementChannelTimeout(String name) { private synchronized long updateDTVLSNMax(long vlsn) { final long ret = dtvlsn.updateMax(vlsn); - if (dtVLSNLatch != null) { - dtVLSNLatch.countDown(vlsn); + durableLock.lock(); + try { + dtVLSNLatch.signalAll(); + } finally { + durableLock.unlock(); } return ret; } @@ -3181,26 +3194,18 @@ public synchronized void updateLocalDurableVLSN(long vlsn) { public void waitForDTVLSN(long vlsn, long waitNs) throws InterruptedException, VLSNIndex.WaitTimeOutException { - synchronized (this) { - if (durable(vlsn)) { - /* already durable */ - return; - } - if (dtVLSNLatch == null) { - dtVLSNLatch = new VLSNIndex.VLSNAwaitLatch(vlsn); - } - } - - /* - * Do any waiting outside the synchronization section. If the - * waited-for VLSN has already arrived, the waitLatch will have been - * counted down, and we'll go through. - */ - if (!dtVLSNLatch.await(waitNs, TimeUnit.NANOSECONDS) || - dtVLSNLatch.isTerminated()) { - /* Timed out waiting for a durable VLSN, or was terminated. */ - throw new VLSNIndex.WaitTimeOutException(); - } + // dont sync this as the updatedtvlsnmax always gets the intrinsic locks + // in different ways might delay this + durableLock.lock(); + try { + while (!durable(vlsn)) { + if (dtVLSNLatch.awaitNanos(waitNs) <= 0) { + throw new VLSNIndex.WaitTimeOutException(); + } + } + } finally { + durableLock.unlock(); + } } /** @@ -3214,4 +3219,10 @@ public boolean durable(long vlsn) { final long dtVLSN = dtvlsn.get(); return (dtVLSN != NULL_VLSN) && (vlsn <= dtVLSN); } + + /* For testing only. */ + public static void setSendNullTxnWhenNodeBecomesMaster( + boolean sendNullTxn) { + sendNullTxnWhenNodeBecomesMaster = sendNullTxn; + } } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/Replica.java b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/Replica.java index d8ec50b6..908a1cf5 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/Replica.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/impl/node/Replica.java @@ -50,7 +50,6 @@ import com.sleepycat.je.rep.NodeType; import com.sleepycat.je.rep.ReplicaConsistencyException; import com.sleepycat.je.rep.ReplicaConnectRetryException; -import com.sleepycat.je.rep.ReplicaRetryException; import com.sleepycat.je.rep.ReplicatedEnvironment.State; import com.sleepycat.je.rep.RestartRequiredException; import com.sleepycat.je.rep.TimeConsistencyPolicy; @@ -442,7 +441,7 @@ void runReplicaLoop() DatabaseException, GroupShutdownException { - Class retryExceptionClass = null; + Class retryExceptionClass = null; int retryCount = 0; try { @@ -451,7 +450,7 @@ void runReplicaLoop() runReplicaLoopInternal(); /* Normal exit */ break; - } catch (ReplicaRetryException e) { + } catch (ReplicaConnectRetryException e) { assert TestHookExecute.doHookIfSet(retryDuplicatedNodeHook, 0); if (!repNode.getMasterStatus().inSync()) { @@ -527,7 +526,7 @@ void runReplicaLoop() private void runReplicaLoopInternal() throws RestartRequiredException, InterruptedException, - ReplicaRetryException, + ReplicaConnectRetryException, InsufficientLogException { shutdownException = null; @@ -569,7 +568,7 @@ private void runReplicaLoopInternal() ("\n" + LoggerUtils.getStackTrace(e)) : "")); } catch (SyncUpFailedException e) { LoggerUtils.info(logger, repImpl, e.getMessage()); - } catch (ReplicaRetryException|DiskLimitException e) { + } catch (ReplicaConnectRetryException|DiskLimitException e) { /* Propagate it outwards. Node does not need to shutdown. */ throw e; } catch (GroupShutdownException e) { @@ -884,7 +883,7 @@ private void processHeartbeat(Heartbeat heartbeat, private void loopExitCleanup() { if (shutdownException != null) { - if (shutdownException instanceof ReplicaRetryException) { + if (shutdownException instanceof ReplicaConnectRetryException) { LoggerUtils.info(logger, repImpl, "Retrying connection to feeder. Message: " + shutdownException.getMessage()); diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/net/DataChannel.java b/kvmain/src/main/java/com/sleepycat/je/rep/net/DataChannel.java index 3dc76fa7..f944fa97 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/net/DataChannel.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/net/DataChannel.java @@ -26,6 +26,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; +import com.sleepycat.je.rep.subscription.StreamAuthenticator; + /** * An interface that associates a delegate socketChannel for network I/O, which * provides ByteChannel, GatheringByteChannel, and ScatteringByteChannel, @@ -310,5 +312,33 @@ public enum ContinueAction { * @return the future completed when the task completes */ CompletableFuture executeTasks(ExecutorService executor); + + /** + * Sets stream authenticator for the channel + * @param authenticator stream authenticator + */ + void setStreamAuthenticator(StreamAuthenticator authenticator); + + /** + * Gets a channel stream authenticator, or null if not available + * @return a chanenel stream authenticator, or null + */ + StreamAuthenticator getStreamAuthenticator(); + + /** + * Returns a channel ID + * @return channel ID + */ + String getChannelId(); + + /** + * Helper method, return true if the data channel needs security check, + * false otherwise + * @param channel data channel + * @return true if the data channel needs security check + */ + static boolean needSecurityCheck(DataChannel channel) { + return channel.isTrustCapable() && !channel.isTrusted(); + } } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederFilter.java b/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederFilter.java index 712ccaed..1d5c41e2 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederFilter.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederFilter.java @@ -128,4 +128,11 @@ default boolean durableEntriesOnly() { default boolean includeBeforeImage() { return false; } + + /** + * Sets the feeder filter id if implemented + * @param filterId feeder filter id + */ + default void setFeederFilterId(String filterId) { + } } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederReplicaHandshake.java b/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederReplicaHandshake.java index 04d6fa3d..0e3abe30 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederReplicaHandshake.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederReplicaHandshake.java @@ -669,9 +669,11 @@ private Protocol negotiateProtocol() */ if (dup != null && dup.getChannel() != null && !dup.getChannel().isOpen() && !dup.isShutdown()) { - dup.shutdown(new IOException("Feeder's channel for node " + - replicaNameIdPair + - " is already closed")); + final String msg = "Feeder's channel for node=" + + replicaNameIdPair + " is already closed" + + ", shut down feeder with exception"; + LoggerUtils.fine(logger, repNode.getRepImpl(), () -> msg); + dup.shutdown(new IOException(msg)); } dup = repNode.feederManager().getFeeder(replicaNameIdPair.getName()); diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederReplicaSyncup.java b/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederReplicaSyncup.java index a23cee23..95c5ec80 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederReplicaSyncup.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederReplicaSyncup.java @@ -187,6 +187,11 @@ public void execute() if (filter != null) { filter.setStartVLSN(startVLSN); filter.setLogger(logger); + final String filterId = + feeder.getReplicaNameIdPair().getName() + + ", channel=" + + namedChannel.getChannel().getChannelId(); + filter.setFeederFilterId(filterId); } feeder.setFeederFilter(filter); @@ -198,7 +203,8 @@ public void execute() break; } - final StreamAuthenticator auth = feeder.getAuthenticator(); + final StreamAuthenticator auth = + namedChannel.getChannel().getStreamAuthenticator(); /* if security check is needed, auth cannot be null */ assert (auth != null); /* remember table id strings of subscribed tables */ diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederTxns.java b/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederTxns.java index 8815ac85..959b3152 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederTxns.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/stream/FeederTxns.java @@ -28,6 +28,7 @@ import com.sleepycat.je.AsyncAckHandler; import com.sleepycat.je.StatsConfig; +import com.sleepycat.je.config.EnvironmentParams; import com.sleepycat.je.rep.InsufficientAcksException; import com.sleepycat.je.rep.impl.RepImpl; import com.sleepycat.je.rep.impl.RepNodeImpl; @@ -81,10 +82,19 @@ public class FeederTxns { private final LongAvgStat ackTxnAvgNs; private final LongAvgStat localTxnAvgNs; + /* + * Variables to control how many messages have been logged containing + * what nodes acknowledged a transaction when it reaches the required + * number of acks to be counted as durable. + */ + private final boolean trackAckInfo; + private final int trackAckInfoLimit; + private volatile int txnTrackAckInfoCounter; public FeederTxns(RepImpl repImpl) { // TODO: increase size of active TXN map for async and parameterize it. this.repImpl = repImpl; + txnMap = new AckExpiringMap(1024); statistics = new StatGroup(FeederTxnStatDefinition.GROUP_NAME, FeederTxnStatDefinition.GROUP_DESC); @@ -99,6 +109,12 @@ public FeederTxns(RepImpl repImpl) { statistics, VLSN_RATE, MOVING_AVG_PERIOD_MILLIS, TimeUnit.MINUTES); localTxnAvgNs = new LongAvgStat(statistics, LOCAL_TXN_AVG_NS); ackTxnAvgNs = new LongAvgStat(statistics, ACK_TXN_AVG_NS); + + txnTrackAckInfoCounter = 0; + trackAckInfo = Boolean.parseBoolean(repImpl.getConfig() + .getConfigParam(RepParams.TXN_TRACK_ACK_INFO.getName())); + trackAckInfoLimit = Integer.parseInt(repImpl.getConfig() + .getConfigParam(RepParams.TXN_TRACK_ACK_INFO_LIMIT.getName())); } public void shutdown() { @@ -185,6 +201,38 @@ public MasterTxn noteReplicaAck(final RepNodeImpl replica, return null; } txn.countdownAck(); + + /* + * If True means that the transaction with ID 'txnId' received + * the required number of acks. + */ + if (txn.getPendingAcks() <= 0) { + /* + * If True means that the debugging tool was activated + * to track what nodes in the replication group + * acknowledged a transaction when it reaches the required + * number of acks to be counted as durable. + */ + if (trackAckInfo) { + /* + * Verify that the number of messages to be logged is under + * the maximum limit. + */ + if (txnTrackAckInfoCounter <= trackAckInfoLimit) { + StringBuilder msg = new StringBuilder("Transaction id " + + txnId + " received a quorum of acks from the " + + "following nodes:"); + for (RepNodeImpl node : repImpl.getRepNode().getRepGroupDB() + .getGroup().getElectableMembers()) { + msg.append(" ").append(node.getName()); + } + LoggerUtils.info(repImpl.getLogger(), repImpl, + msg.toString()); + txnTrackAckInfoCounter++; + } + } + } + if (!txn.usesAsyncAcks() || (txn.getPendingAcks() > 0)) { return txn; } @@ -407,7 +455,8 @@ public void run() { if (repNode == null) { return; } - /** + + /* * It's ok for the "now" time to be slightly inaccurate. in * order to minimize nanoTime() calls. */ diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/stream/InputWireRecord.java b/kvmain/src/main/java/com/sleepycat/je/rep/stream/InputWireRecord.java old mode 100644 new mode 100755 index 6e427d05..5a6433dc --- a/kvmain/src/main/java/com/sleepycat/je/rep/stream/InputWireRecord.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/stream/InputWireRecord.java @@ -42,7 +42,7 @@ public class InputWireRecord extends WireRecord { private boolean hasBeforeImage; private boolean enabledBeforeImage; private BeforeImageIndex.BeforeImagePayLoad beforeImage; - private int beforeImageExp; + private long beforeImageExp; /** @@ -77,7 +77,7 @@ public class InputWireRecord extends WireRecord { if (entry instanceof BeforeImageLNLogEntry) { beforeImageExp = ((BeforeImageLNLogEntry) entry) - .getBeforeImageExpiration(); + .getBeforeImageExpirationTime(); } if (msgBuffer.hasRemaining()) { diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/stream/MatchpointSearchResults.java b/kvmain/src/main/java/com/sleepycat/je/rep/stream/MatchpointSearchResults.java index 424f4a81..5f1d8e95 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/stream/MatchpointSearchResults.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/stream/MatchpointSearchResults.java @@ -105,8 +105,7 @@ public MatchpointSearchResults(EnvironmentImpl envImpl) { */ matchpointLSN = DbLsn.makeLsn(0, 0); - passedTxnLimit = - envImpl.getConfigManager().getInt(RepParams.TXN_ROLLBACK_LIMIT); + passedTxnLimit = 10; // hardcoded value for debugging purposes passedTxns = new ArrayList<>(); numPassedCommits = 0; numPassedDurableCommits = 0; diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/stream/ReplicaFeederHandshake.java b/kvmain/src/main/java/com/sleepycat/je/rep/stream/ReplicaFeederHandshake.java index efd3f5a9..65360d6c 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/stream/ReplicaFeederHandshake.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/stream/ReplicaFeederHandshake.java @@ -26,11 +26,11 @@ import com.sleepycat.je.dbi.DbConfigManager; import com.sleepycat.je.dbi.EnvironmentFailureReason; import com.sleepycat.je.log.LogEntryType; +import com.sleepycat.je.rep.BinaryProtocolException; import com.sleepycat.je.rep.NodeType; import com.sleepycat.je.rep.ReplicaConnectRetryException; import com.sleepycat.je.rep.impl.RepGroupImpl; import com.sleepycat.je.rep.impl.RepImpl; -import com.sleepycat.je.rep.impl.RepParams; import com.sleepycat.je.rep.impl.node.NameIdPair; import com.sleepycat.je.rep.stream.Protocol.CacheSizeResponse; import com.sleepycat.je.rep.stream.Protocol.DuplicateNodeReject; @@ -206,7 +206,35 @@ private Protocol negotiateProtocol() * Returns the highest level the feeder can support, or the version we * just sent, if it can support that version */ - Message message = defaultProtocol.read(namedChannel); + Message message = null; + try { + message = defaultProtocol.read(namedChannel); + } + catch (BinaryProtocolException e) { + /* + * Error was encountered while reading from the channel. + * + * Likely, this is because the channel is not alive. So, we want + * to throw a Retry Exception so that the handshake to be + * successful in the another try. Otherwise, exceptions like the + * ones reported in KVSTORE-2654 are repeatedly thrown, which could + * delay the performing of a successful handshake. + */ + LoggerUtils.info(logger, repImpl, + "Error reading channel during Protocol " + + "negotiating: " + LoggerUtils.getStackTrace(e)); + + LoggerUtils.info(logger, repImpl, + "Throwing Dummy ReplicaConnectRetryException"); + + throw new ReplicaConnectRetryException( + "Dummy ReplicaConnectRetryException. Only for achieving " + + "that the handshake can be successful in the another try " + + "with feeder", + REPLICA_CONNECTION_RETRIES, + REPLICA_CONNECTION_RETRY_SLEEP_MS); + } + if (message instanceof DuplicateNodeReject) { /* When the feeder messages with DuplicateNodeReject to the replica @@ -279,7 +307,7 @@ public Protocol execute() /* Ensure that software versions are compatible. */ verifyVersions(); - /** + /* * Note whether log entries with later log versions need to be * converted to log version 12 to work around [#25222]. */ diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/stream/ReplicaFeederSyncup.java b/kvmain/src/main/java/com/sleepycat/je/rep/stream/ReplicaFeederSyncup.java index 39842cf5..1f583935 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/stream/ReplicaFeederSyncup.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/stream/ReplicaFeederSyncup.java @@ -406,51 +406,33 @@ private void verifyRollback(VLSNRange range) /* * We're planning on rolling back past a commit or abort, and we know - * that we have not passed a barrier checkpoint. See if we have - * exceeded the number of rolledback commits limit. + * that we have not passed a barrier checkpoint. */ - EnvironmentImpl envImpl = repNode.getRepImpl(); - DbConfigManager configMgr = envImpl.getConfigManager(); - final int rollbackTxnLimit = - configMgr.getInt(RepParams.TXN_ROLLBACK_LIMIT); - final boolean rollbackDisabled = - configMgr.getBoolean(RepParams.TXN_ROLLBACK_DISABLED); - final int numPassedDurableCommits = searchResults.getNumPassedDurableCommits(); final int numPassedCommits = searchResults.getNumPassedCommits(); final long dtvlsn = searchResults.getDTVLSN(); + + String str = "Rollback Message.\n Node " + repNode.getRepImpl().getName() + + " must rollback " + searchResults.getRollbackMsg() + + " in order to rejoin the replication group"; + LoggerUtils.info(logger, repImpl, str); + LoggerUtils.info(logger, repImpl, - String.format("Rollback info. " + - "Number of passed commits:%,d. " + - "(durable commits:%,d). " + - "Durable commit VLSN:%,d " + - "matchpoint VLSN:%,d " + - "lastTxnEnd VLSN:%,d " + - "lastSync VLSN:%,d " + - "Rollback transaction limit:%,d", - numPassedCommits, - numPassedDurableCommits, - dtvlsn, - matchpointVLSN, - lastTxnEnd, - lastSync, - rollbackTxnLimit)); - - if (numPassedDurableCommits > rollbackTxnLimit || rollbackDisabled) { - - LoggerUtils.severe(logger, repImpl, - "Limited list of transactions that would " + - " be truncated for hard recovery:\n" + - searchResults.dumpPassedTxns()); - - throw new RollbackProhibitedException(repNode.getRepImpl(), - rollbackTxnLimit, - rollbackDisabled, - matchpointVLSN, - searchResults); - } + String.format("Rollback info. " + + "Number of passed commits:%,d. " + + "(durable commits:%,d). " + + "Durable commit VLSN:%,d " + + "matchpoint VLSN:%,d " + + "lastTxnEnd VLSN:%,d " + + "lastSync VLSN:%,d ", + numPassedCommits, + numPassedDurableCommits, + dtvlsn, + matchpointVLSN, + lastTxnEnd, + lastSync)); /* * After passing all the earlier qualifications, do a truncation and @@ -776,16 +758,26 @@ public RollbackException setupHardRecovery() RollbackException r = new RollbackException(repImpl, matchpointVLSN, searchResults); - Level level = Level.WARNING; + /* + * Rolling back over non-durable (vlsn greater than the dtvlsn) + * transactions should be logged as a normal event, at the info + * level. + */ + Level level = Level.INFO; if (searchResults.getNumPassedDurableCommits() > 0) { + /* + * rolling back over the dtvlsn should be logged as a severe + * event + */ level = Level.SEVERE; } final String passedTxn = searchResults.dumpPassedTxns(); - if (passedTxn != null && passedTxn.length() > 0) { + if (passedTxn != null && !passedTxn.isEmpty()) { LoggerUtils.logMsg(logger, repImpl, level, - "Limited list of transactions truncated for " + - "hard recovery:\n" + - searchResults.dumpPassedTxns()); + "Limited list of transactions " + + "truncated for hard recovery " + + "(only for debugging purpose):\n" + + searchResults.dumpPassedTxns()); } else { LoggerUtils.info(logger, repImpl, r.getMessage()); } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/ServerAuthMethod.java b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/ServerAuthMethod.java index 80566360..0eca50d8 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/ServerAuthMethod.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/ServerAuthMethod.java @@ -13,12 +13,16 @@ package com.sleepycat.je.rep.subscription; +import static java.util.logging.Level.INFO; + import java.io.IOException; import java.nio.ByteBuffer; import com.sleepycat.je.log.LogUtils; import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.net.InstanceLogger; import com.sleepycat.je.rep.utilint.ServiceHandshake; +import com.sleepycat.je.rep.utilint.net.SSLDataChannel; /** * Object represents a subscription authentication method used in service @@ -68,9 +72,10 @@ static class ServerTokenOp extends ServiceHandshake.ServerInitOp { private final StreamAuthenticator auth; ServerTokenOp(ServiceHandshake.ServerHandshake initState, - StreamAuthenticator auth) { + StreamAuthenticator configuredAuth) { super(initState); - this.auth = auth; + /* create a clone of configured authenticator */ + auth = configuredAuth.getInstance(initState.getChannel()); } @Override @@ -110,15 +115,32 @@ public ServiceHandshake.InitResult processOp(DataChannel channel) if (!auth.authenticate()) { return ServiceHandshake.InitResult.REJECT; } + + /* set authenticator in the channel */ + channel.setStreamAuthenticator(auth); + if (channel instanceof SSLDataChannel) { + /* seems only SSL channel has a logger */ + final SSLDataChannel sslDataChannel = (SSLDataChannel) channel; + final InstanceLogger logger = sslDataChannel.getLogger(); + logger.log(INFO, lm("Set channel stream authenticator, " + + "channelId=" + auth.getChannelId() + + ", authenticator=" + auth)); + } return ServiceHandshake.InitResult.DONE; } } + @Override + public String toString() { + return "ServerAuthMethod=" + getMechanismName() + + ", authenticator=" + serverAuth; + } + /** * Client side authentication, effectively no-op except rejecting * handshake and it is not supposed to be called at client-side. */ - class ClientTokenOp extends ServiceHandshake.ClientInitOp { + static class ClientTokenOp extends ServiceHandshake.ClientInitOp { ClientTokenOp(ServiceHandshake.ClientHandshake initState) { super(initState); @@ -126,7 +148,7 @@ class ClientTokenOp extends ServiceHandshake.ClientInitOp { @Override public ServiceHandshake.InitResult processOp( - ServiceHandshake.IOAdapter ioAdapter) throws IOException { + ServiceHandshake.IOAdapter ioAdapter) { return ServiceHandshake.InitResult.REJECT; } } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/StreamAuthenticator.java b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/StreamAuthenticator.java index 3401c707..b0dcca0e 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/StreamAuthenticator.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/StreamAuthenticator.java @@ -14,6 +14,8 @@ package com.sleepycat.je.rep.subscription; +import com.sleepycat.je.rep.net.DataChannel; + /** * Object represents an interface to authenticate stream consumer and check its * access privilege. @@ -69,4 +71,17 @@ public interface StreamAuthenticator { * check has been performed. */ long getLastCheckTimeMs(); + + /** + * Gets a copy instance from existing stream authenticator + * @param channel data channel that owns the authenticator + * @return a new instance + */ + StreamAuthenticator getInstance(DataChannel channel); + + /** + * Returns channel id associated with the authenticator + * @return channel id + */ + String getChannelId(); } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionCallback.java b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionCallback.java index fc17040b..3e75d6a5 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionCallback.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionCallback.java @@ -69,16 +69,18 @@ void processDel(long vlsn, byte[] key, byte[] val, * * @param vlsn VLSN of commit entry * @param txnId id of txn to commit + * @param timestamp timestamp of the commit entry */ - void processCommit(long vlsn, long txnId); + void processCommit(long vlsn, long txnId, long timestamp); /** * Process an abort entry from stream * * @param vlsn VLSN of abort entry * @param txnId id of txn to abort + * @param timestamp timestamp of the abort entry */ - void processAbort(long vlsn, long txnId); + void processAbort(long vlsn, long txnId, long timestamp); /** * Process the exception from stream. diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionConfig.java b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionConfig.java index 295f0d04..b08bc0e2 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionConfig.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionConfig.java @@ -776,12 +776,12 @@ public void processDel(long vlsn, byte[] key, byte[] value, long txnId, } @Override - public void processCommit(long vlsn, long txnid) { + public void processCommit(long vlsn, long txnid, long ts) { } @Override - public void processAbort(long vlsn, long txnid) { + public void processAbort(long vlsn, long txnid, long ts) { } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionProcessMessageThread.java b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionProcessMessageThread.java index b69b5f79..a33b4d0b 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionProcessMessageThread.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionProcessMessageThread.java @@ -20,12 +20,16 @@ import java.util.concurrent.TimeUnit; import java.util.logging.Logger; +import com.sleepycat.je.log.entry.AbortLogEntry; +import com.sleepycat.je.log.entry.CommitLogEntry; import com.sleepycat.je.log.entry.LNLogEntry; import com.sleepycat.je.log.entry.LogEntry; import com.sleepycat.je.rep.GroupShutdownException; import com.sleepycat.je.rep.impl.RepImpl; import com.sleepycat.je.rep.stream.InputWireRecord; import com.sleepycat.je.rep.stream.Protocol; +import com.sleepycat.je.txn.TxnAbort; +import com.sleepycat.je.txn.TxnCommit; import com.sleepycat.je.utilint.LoggerUtils; import com.sleepycat.je.utilint.StoppableThread; import com.sleepycat.je.utilint.TestHook; @@ -183,16 +187,22 @@ public void run() { stats.setHighVLSN(vlsn); stats.getNumOpsProcessed().increment(); - /* call different proc depending on entry type */ + /* call different callbacks depending on entry type */ if (LOG_TXN_COMMIT.equalsType(type)) { + final CommitLogEntry ce = (CommitLogEntry) entry; + final TxnCommit tc = ce.getMainItem(); + final long ts = tc.getTime().getTime(); stats.getNumTxnCommitted().increment(); - callBack.processCommit(vlsn, txnId); + callBack.processCommit(vlsn, txnId, ts); continue; } if (LOG_TXN_ABORT.equalsType(type)) { - stats.getNumTxnAborted().increment(); - callBack.processAbort(vlsn, txnId); + final AbortLogEntry ae = (AbortLogEntry) entry; + final TxnAbort ta = ae.getMainItem(); + final long ts = ta.getTime().getTime(); + stats.getNumTxnAborted().increment(); + callBack.processAbort(vlsn, txnId, ts); continue; } diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionThread.java b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionThread.java index 47a9e670..962e417b 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionThread.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/subscription/SubscriptionThread.java @@ -844,17 +844,8 @@ private void openChannel() throws ConnectionException, * as yet. For example, the transition to the master may not have * been completed. */ - //TODO: The INVALID code may be caused by connection problems - //or authentication issues. The former should lead to a retry but - // the latter should not. Today the response code does not - // distinguish these two cases and we just retry for both. - // After [KVSTORE-224]/[#27504] is done, hopefully we will have - // better response code to tell if the failure is caused by - // security check for which case the ReplicationSecurityException - // should be thrown instead of ConnectionException Response response = se.getResponse(); - if (response == ServiceDispatcher.Response.UNKNOWN_SERVICE || - response == ServiceDispatcher.Response.INVALID) { + if (response == ServiceDispatcher.Response.UNKNOWN_SERVICE) { final long ts = config.getSleepBeforeRetryMs(); final String msg = "Fail to open channel to node=" + config.getFeederHostPort() + @@ -866,6 +857,21 @@ private void openChannel() throws ConnectionException, throw new ConnectionException(msg, ts, se); } + /* + * No retry since INVALID response is returned only on security + * check failure + */ + if (response == ServiceDispatcher.Response.INVALID) { + final String msg = "Fail to open channel to node=" + + config.getFeederHostPort() + + ", fail to authenticate" + + ", dispatcher response=" + response.name() + + ", service exception=" + se; + LoggerUtils.info(logger, repImpl, lm(msg)); + throw new ReplicationSecurityException( + msg, config.getSubNodeName(), se); + } + final String msg = "Subscription=" + config.getSubNodeName() + "failed to handshake for service=" + FeederManager.FEEDER_SERVICE + " with node=" + diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/txn/MasterTxn.java b/kvmain/src/main/java/com/sleepycat/je/rep/txn/MasterTxn.java index 8c81486f..41cd6c0c 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/txn/MasterTxn.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/txn/MasterTxn.java @@ -739,6 +739,10 @@ public interface MasterTxnFactory { default MasterTxn createNullTxn(EnvironmentImpl envImpl) { return new NullTxn(envImpl); } + + default MasterTxn createSyncNullTxn(EnvironmentImpl envImpl) { + return new NullTxn(envImpl, true); + } } /* The method used to create user Master Txns via the factory. */ @@ -747,8 +751,10 @@ public static MasterTxn create(EnvironmentImpl envImpl, return factory.create(envImpl, config); } - public static MasterTxn createNullTxn(EnvironmentImpl envImpl) { - return factory.createNullTxn(envImpl); + public static MasterTxn createNullTxn(EnvironmentImpl envImpl, boolean sync) { + return sync + ? factory.createSyncNullTxn(envImpl) + : factory.createNullTxn(envImpl); } /** diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/txn/NullTxn.java b/kvmain/src/main/java/com/sleepycat/je/rep/txn/NullTxn.java index ef5e9c4a..2f4f339d 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/txn/NullTxn.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/txn/NullTxn.java @@ -68,17 +68,28 @@ public class NullTxn extends MasterTxn { * uses it to process its acknowledgments, effectively making it ack * processing async. */ - private static TransactionConfig NULL_TXN_CONFIG = new TransactionConfig(); + private final static TransactionConfig NULL_TXN_CONFIG = + new TransactionConfig(); + private final static TransactionConfig NULL_SYNC_TXN_CONFIG = + new TransactionConfig(); static { NULL_TXN_CONFIG. setDurability(new Durability(SyncPolicy.WRITE_NO_SYNC, SyncPolicy.WRITE_NO_SYNC, ReplicaAckPolicy.SIMPLE_MAJORITY)); + NULL_SYNC_TXN_CONFIG. + setDurability(new Durability(SyncPolicy.SYNC, + SyncPolicy.SYNC, + ReplicaAckPolicy.SIMPLE_MAJORITY)); } public NullTxn(EnvironmentImpl envImpl) { - super(envImpl, NULL_TXN_CONFIG); + this(envImpl, false); + } + + public NullTxn(EnvironmentImpl envImpl, boolean sync) { + super(envImpl, sync ? NULL_SYNC_TXN_CONFIG : NULL_TXN_CONFIG); } @Override diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/BinaryProtocol.java b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/BinaryProtocol.java index a23e31ca..45c6c93e 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/BinaryProtocol.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/BinaryProtocol.java @@ -45,6 +45,7 @@ import com.sleepycat.je.dbi.DbConfigManager; import com.sleepycat.je.dbi.EnvironmentImpl; import com.sleepycat.je.log.LogUtils; +import com.sleepycat.je.rep.BinaryProtocolException; import com.sleepycat.je.rep.ReplicationConfig; import com.sleepycat.je.rep.impl.RepParams; import com.sleepycat.je.rep.impl.node.NameIdPair; @@ -502,10 +503,13 @@ private void fillBuffer(final ReadableByteChannel channel, while (buffer.position() < buffer.limit()) { final int numRead = channel.read(buffer); if (numRead <= 0) { - throw new IOException("Expected bytes: " + buffer.limit() + - " read bytes: " + buffer.position() + - " messageOp: " + - (op == null ? "unknown" : op)); + throw new BinaryProtocolException( + "Expected bytes: " + buffer.limit() + + " read bytes: " + buffer.position() + + " numRead: " + numRead + + " messageOp: " + + (op == null ? "unknown" : op) + ); } } nReadNanos.add(System.nanoTime() - start); diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/ServiceDispatcher.java b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/ServiceDispatcher.java index cf793492..40647dbc 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/ServiceDispatcher.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/ServiceDispatcher.java @@ -1347,7 +1347,7 @@ public String getMessage() { return "Service was busy"; case INVALID: - return "Invalid response supplied"; + return "Invalid response supplied, security failure"; case PROCEED: return "Protocol continuation requested"; diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/ServiceHandshake.java b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/ServiceHandshake.java index ef8a836f..ebaf6d75 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/ServiceHandshake.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/ServiceHandshake.java @@ -13,15 +13,19 @@ package com.sleepycat.je.rep.utilint; +import static com.sleepycat.je.rep.subscription.SubscriptionConfig.SERVICE_HANDSHAKE_AUTH_METHOD; + import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.channels.ByteChannel; +import java.util.Arrays; import java.util.logging.Level; import com.sleepycat.je.EnvironmentFailureException; +import com.sleepycat.je.rep.impl.node.FeederManager; import com.sleepycat.je.rep.net.DataChannel; import com.sleepycat.je.rep.net.DataChannel.AsyncIO; import com.sleepycat.je.rep.utilint.ServiceDispatcher.Response; @@ -115,7 +119,7 @@ public static class ServerHandshake { currentOp = new ReceiveNameOp(this); } - DataChannel getChannel() { + public DataChannel getChannel() { return channel; } @@ -159,9 +163,21 @@ InitResult process() throws IOException { */ return InitResult.DONE; } - logMsg(Level.WARNING, - false, // noteError - "DataChannel is trust-capable but is not trusted"); + + if (subscriptionHandshake()) { + /* a subscription handshake, OK */ + logMsg(Level.INFO, + false, // noteError + lm("DataChannel (id=" + channel.getChannelId() + + ") for subscription, OK it is trust-capable but" + + " not trusted")); + } else { + /* likely an error in channel configuration */ + logMsg(Level.WARNING, + false, // noteError + lm("DataChannel (id=" + channel.getChannelId() + + ") is trust-capable but is not trusted")); + } /* * Defer rejecting the connection until the @@ -180,13 +196,41 @@ InitResult process() throws IOException { } /* Initiate the authentication step. */ + logMsg(Level.FINE, false, + lm("Start RequireAuthenticateOp" + + ", authInfo=" + Arrays.toString(authInfo) + + ", current op=" + currentOp.getClass().getSimpleName())); currentOp = new RequireAuthenticateOp(this, authInfo); return currentOp.processOp(channel); } void logMsg(Level level, boolean noteError, String msg) { + if (dispatcher == null) { + return; + } dispatcher.logMsg(level, noteError, msg); } + + private String lm(String msg) { + return "[ServiceHandshake][Service=" + serviceName + "] " + msg; + } + + private boolean subscriptionHandshake() { + if (!FeederManager.FEEDER_SERVICE.equals(serviceName)) { + /* not a feeder service */ + return false; + } + + if (authInfo == null || authInfo.length == 0) { + /* no authentication method */ + return false; + } + + /* return true if any subscription auth method */ + return Arrays.stream(authInfo) + .anyMatch(t -> SERVICE_HANDSHAKE_AUTH_METHOD + .equals(t.getMechanismName())); + } } /** @@ -243,6 +287,10 @@ protected ServerInitOp(ServerHandshake initState) { this.initState = initState; } + protected String lm(String msg) { + return "[" + this.getClass().getSimpleName() + "] " + msg; + } + /** * Incrementally process the operation. The operation may require * multiple passes since we are presumed to be in non-blocking mode. diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/AbstractDataChannel.java b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/AbstractDataChannel.java index 9f415e0c..6adea70f 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/AbstractDataChannel.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/AbstractDataChannel.java @@ -14,12 +14,12 @@ package com.sleepycat.je.rep.utilint.net; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketAddress; import java.nio.channels.SocketChannel; import com.sleepycat.je.rep.net.DataChannel; +import com.sleepycat.je.rep.subscription.StreamAuthenticator; /** * An abstract class that utilizes a delegate socketChannel for network @@ -46,6 +46,10 @@ abstract public class AbstractDataChannel implements DataChannel { */ protected final SocketChannel socketChannel; protected final String addressPair; + /** + * Stream authenticator for secure store, or null otherwise + */ + private volatile StreamAuthenticator authenticator; /** * Constructor for sub-classes. @@ -57,6 +61,8 @@ protected AbstractDataChannel(SocketChannel socketChannel) { this.socketChannel = socketChannel; this.configuredBlocking = socketChannel.isBlocking(); this.addressPair = getAddressPair(); + /* no authenticator by default */ + authenticator = null; } /** @@ -113,6 +119,20 @@ public SocketChannel getSocketChannel() { return socketChannel; } + @Override + public void setStreamAuthenticator(StreamAuthenticator auth) { + if (authenticator != null) { + throw new IllegalStateException( + "StreamAuthenticator already set to=" + authenticator); + } + authenticator = auth; + } + + @Override + public StreamAuthenticator getStreamAuthenticator() { + return authenticator; + } + /** * Ensures this channel is in blocking mode when calling close. */ @@ -133,6 +153,11 @@ protected void ensureCloseAsyncForNonBlocking() { } } + @Override + public String getChannelId() { + return addressPair; + } + /** * Returns the address pair string. */ diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/SSLDataChannel.java b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/SSLDataChannel.java index 1adb5058..2a46ff03 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/SSLDataChannel.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/SSLDataChannel.java @@ -297,6 +297,10 @@ public SSLDataChannel(SocketChannel socketChannel, toShortString())); } + public InstanceLogger getLogger() { + return logger; + } + /** Returns the ssl engine. */ public SSLEngine getSSLEngine() { return sslEngine; diff --git a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/SimpleDataChannel.java b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/SimpleDataChannel.java index 09c48741..7211be64 100644 --- a/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/SimpleDataChannel.java +++ b/kvmain/src/main/java/com/sleepycat/je/rep/utilint/net/SimpleDataChannel.java @@ -21,6 +21,9 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; +import com.sleepycat.je.utilint.TestHook; +import com.sleepycat.je.utilint.TestHookExecute; + /** * A basic concrete extension of DataChannel. @@ -28,6 +31,11 @@ */ public class SimpleDataChannel extends AbstractDataChannel { + /** + * Test hook executed before channel read and write. + */ + public static volatile TestHook ioHook = null; + /** * A wrapped socket channel so that read can be timed out. * @@ -112,6 +120,8 @@ public synchronized void configureBlocking(boolean block) @Override public int read(ByteBuffer dst) throws IOException { + assert TestHookExecute.doIOHookIfSet(ioHook, this); + synchronized(wrappedReadChannel) { return wrappedReadChannel.read(dst); } @@ -126,6 +136,8 @@ public long read(ByteBuffer[] dsts) throws IOException { public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + assert TestHookExecute.doIOHookIfSet(ioHook, this); + SSLDataChannel.checkParams(dsts, offset, length); long nbytes = 0; synchronized(wrappedReadChannel) { @@ -153,11 +165,13 @@ public long read(ByteBuffer[] dsts, int offset, int length) @Override public int write(ByteBuffer src) throws IOException { + assert TestHookExecute.doIOHookIfSet(ioHook, this); return socketChannel.write(src); } @Override public long write(ByteBuffer[] srcs) throws IOException { + assert TestHookExecute.doIOHookIfSet(ioHook, this); return socketChannel.write(srcs); } @@ -165,6 +179,7 @@ public long write(ByteBuffer[] srcs) throws IOException { public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + assert TestHookExecute.doIOHookIfSet(ioHook, this); return socketChannel.write(srcs, offset, length); } diff --git a/kvmain/src/main/java/com/sleepycat/je/tree/BIN.java b/kvmain/src/main/java/com/sleepycat/je/tree/BIN.java old mode 100644 new mode 100755 index 552b3624..4063072f --- a/kvmain/src/main/java/com/sleepycat/je/tree/BIN.java +++ b/kvmain/src/main/java/com/sleepycat/je/tree/BIN.java @@ -200,6 +200,9 @@ public class BIN extends IN { private static final INLongRep.EmptyRep EMPTY_MODIFICATION_TIMES = new INLongRep.EmptyRep(3, true); + private static final INLongRep.EmptyRep EMPTY_CREATION_TIMES = + new INLongRep.EmptyRep(3, true); + /** * The set of cursors that are currently referring to this BIN. * This field is set to null when there are no cursors on this BIN. @@ -274,6 +277,9 @@ public class BIN extends IN { private INLongRep modificationTimes = EMPTY_MODIFICATION_TIMES; private long modificationTimesBase = -1; + private INLongRep creationTimes = EMPTY_CREATION_TIMES; + private long creationTimesBase = -1; + public BIN() { } @@ -578,7 +584,7 @@ public void setModificationTime(int idx, long value) { setModificationTimeOffset(idx, value - modificationTimesBase + 1); } - + /** * Returns the modification time for an embedded record. Returns zero if * the record is not embedded or the record was written with JE 19.3 or @@ -608,6 +614,72 @@ void setModificationTimeOffset(int idx, long offset) { modificationTimes = modificationTimes.set(idx, offset, this); } + public void setCreationTime(int idx, long value) { + + if (value == 0) { + creationTimes = creationTimes.set(idx, 0, this); + return; + } + + /* + * If this is the first slot with an expiration, initialize the base to + * the value and set the offset (slot value) to one. + */ + if (creationTimesBase == -1 || nEntries == 1) { + creationTimesBase = value; + setCreationTimeOffset(idx, 1); + return; + } + + /* + * Slot's creation time must not be less than the base. If it is, + * decrease the base and increase the offset in other slots + * accordingly. + */ + if (value < creationTimesBase) { + final long adjustment = creationTimesBase - value; + creationTimesBase = value; + + for (int i = 0; i < nEntries; ++i) { + if (i == idx) { + continue; + } + final long offset = creationTimes.get(i); + if (offset == 0) { + continue; + } + setCreationTimeOffset(i, offset + adjustment); + } + } + + setCreationTimeOffset(idx, value - creationTimesBase + 1); + } + + public long getCreationTime(int idx) { + final long offset = creationTimes.get(idx); + + if (offset == 0) { + return 0; + } + return offset - 1 + creationTimesBase; + } + + long getCreationTimesBase() { + return creationTimesBase; + } + + long getCreationTimeOffset(int idx) { + return creationTimes.get(idx); + } + + void setCreationTimesBase(long base) { + creationTimesBase = base; + } + + void setCreationTimeOffset(int idx, long offset) { + creationTimes = creationTimes.set(idx, offset, this); + } + /** * Returns whether the slot is known-deleted, pending-deleted, or expired. */ @@ -685,6 +757,7 @@ void appendEntryFromOtherNode(IN from, int fromIdx) { fromBin.isExpirationInHours()); setModificationTime(idx, fromBin.getModificationTime(fromIdx)); + setCreationTime(idx, fromBin.getCreationTime(fromIdx)); } /** @@ -695,6 +768,7 @@ void copyEntries(int from, int to, int n) { super.copyEntries(from, to, n); vlsnCache = vlsnCache.copy(from, to, n, this); expirationValues = expirationValues.copy(from, to, n, this); + creationTimes = creationTimes.copy(from, to, n, this); modificationTimes = modificationTimes.copy(from, to, n, this); } @@ -707,6 +781,7 @@ void clearEntry(int idx) { setCachedVLSNUnconditional(idx, NULL_VLSN); setExpiration(idx, 0, false); setModificationTime(idx, 0); + setCreationTime(idx, 0); } /* @@ -1228,6 +1303,8 @@ public long compactMemory() { final long oldSize = inMemorySize; super.compactMemory(); expirationValues = expirationValues.compact(this, EMPTY_EXPIRATION); + creationTimes = + creationTimes.compact(this, EMPTY_CREATION_TIMES); modificationTimes = modificationTimes.compact(this, EMPTY_MODIFICATION_TIMES); return oldSize - inMemorySize; @@ -1255,6 +1332,10 @@ public long computeMemorySize() { size += expirationValues.getMemorySize(); } + if (creationTimes != null) { + size += creationTimes.getMemorySize(); + } + if (modificationTimes != null) { size += modificationTimes.getMemorySize(); } @@ -1272,16 +1353,18 @@ protected long printMemorySize() { final long inTotal = super.printMemorySize(); final long vlsnCacheOverhead = vlsnCache.getMemorySize(); final long expirationOverhead = expirationValues.getMemorySize(); + final long createTimeOverhead = creationTimes.getMemorySize(); final long modTimeOverhead = modificationTimes.getMemorySize(); final long binTotal = inTotal + - vlsnCacheOverhead + expirationOverhead + modTimeOverhead; - - System.out.format( - "BIN: %d vlsns: %d expiration: %d" + - " modTimeOverhead: %d %n", - binTotal, vlsnCacheOverhead, expirationOverhead, modTimeOverhead); - + vlsnCacheOverhead + expirationOverhead + modTimeOverhead + + createTimeOverhead; + + System.out.format("BIN: %d vlsns: %d expiration: %d" + + " createTimeOverhead: %d" + + " modTimeOverhead: %d %n", + binTotal, vlsnCacheOverhead, expirationOverhead, + createTimeOverhead, modTimeOverhead); return binTotal; } @@ -1522,7 +1605,7 @@ private void logEvictedLN(final int idx, final LN ln) { final LogItem logItem = ln.log( envImpl, dbImpl, null /*locker*/, null /*writeLockInfo*/, isEmbeddedLN(idx), getKey(idx), - getExpiration(idx), isExpirationInHours(), + getExpiration(idx), isExpirationInHours(), ln.getCreationTime(), ln.getModificationTime(), isTombstone(idx), false /*newBlindDeletion*/, isEmbeddedLN(idx), currLsn, getLastLoggedSize(idx), @@ -1811,6 +1894,7 @@ private void initBINDelta( Node[] targets = null; int[] expiration = null; long[] modTimes = null; + long[] createTimes = null; if (copyTargets) { targets = new Node[nDeltas]; @@ -1824,6 +1908,10 @@ private void initBINDelta( modTimes = new long[nDeltas]; } + if (creationTimesBase != -1) { + createTimes = new long[nDeltas]; + } + int j = 0; for (int i = 0; i < getNEntries(); i += 1) { @@ -1850,6 +1938,9 @@ private void initBINDelta( modTimes[j] = getModificationTime(i); } + if (createTimes != null) { + createTimes[j] = getCreationTime(i); + } j += 1; } @@ -1865,6 +1956,7 @@ private void initBINDelta( states, keyPrefix, keys, targets, sizes, vlsns, expiration, isExpirationInHours(), + createTimes, modTimes); destBIN.setBINDelta(true); @@ -1889,6 +1981,7 @@ private void resetContent( final long[] vlsns, final int[] expiration, final boolean expirationInHours, + final long[] createTimes, final long[] modTimes) { updateRepCacheStats(false); @@ -1904,7 +1997,8 @@ private void resetContent( expirationBase = -1; modificationTimes = EMPTY_MODIFICATION_TIMES; modificationTimesBase = -1; - + creationTimes = EMPTY_CREATION_TIMES; + creationTimesBase = 1; updateRepCacheStats(true); entryStates = new byte[capacity]; @@ -1927,6 +2021,9 @@ private void resetContent( setModificationTime(i, modTimes[i]); } + if (createTimes != null) { + setCreationTime(i, createTimes[i]); + } setLastLoggedSizeUnconditional(i, loggedSizes[i]); setCachedVLSNUnconditional(i, vlsns[i]); } @@ -2142,6 +2239,9 @@ private void resetContent(final BIN other) { expirationBase = other.expirationBase; setExpirationInHours(other.isExpirationInHours()); + creationTimes = other.creationTimes; + creationTimesBase = other.creationTimesBase; + modificationTimes = other.modificationTimes; modificationTimesBase = other.modificationTimesBase; @@ -2183,6 +2283,8 @@ private void resize(final int newCapacity) { lastLoggedSizes = lastLoggedSizes.resize(newCapacity); expirationValues = expirationValues.resize(newCapacity); modificationTimes = modificationTimes.resize(newCapacity); + creationTimes = creationTimes.resize(newCapacity); + vlsnCache = vlsnCache.resize(newCapacity); updateMemorySize(inMemorySize, computeMemorySize()); @@ -2297,6 +2399,7 @@ public void reconstituteBIN( getLastLoggedSize(i), getCachedVLSN(i), getTarget(i), getExpiration(i), isExpirationInHours(), + getCreationTime(i), getModificationTime(i)); } @@ -2325,6 +2428,7 @@ void applyDelta( final Node child, final int expiration, final boolean expirationInHours, + final long creationTime, final long modificationTime) { /* @@ -2370,6 +2474,7 @@ void applyDelta( setCachedVLSNUnconditional(foundIndex, vlsn); setExpiration(foundIndex, expiration, expirationInHours); + setCreationTime(foundIndex, creationTime); setModificationTime(foundIndex, modificationTime); } diff --git a/kvmain/src/main/java/com/sleepycat/je/tree/IN.java b/kvmain/src/main/java/com/sleepycat/je/tree/IN.java old mode 100644 new mode 100755 index aab86ba6..98e4ff53 --- a/kvmain/src/main/java/com/sleepycat/je/tree/IN.java +++ b/kvmain/src/main/java/com/sleepycat/je/tree/IN.java @@ -29,6 +29,7 @@ import com.sleepycat.je.DatabaseException; import com.sleepycat.je.EnvironmentFailureException; import com.sleepycat.je.VerifyError; +import com.sleepycat.je.WriteOptions; import com.sleepycat.je.cleaner.PackedObsoleteInfo; import com.sleepycat.je.dbi.DatabaseId; import com.sleepycat.je.dbi.DatabaseImpl; @@ -898,6 +899,7 @@ public final boolean equals(Object obj) { /** * Sort based on equality key. */ + @Override public final int compareTo(IN argIN) { long argNodeId = argIN.getNodeId(); long myNodeId = getNodeId(); @@ -1928,7 +1930,7 @@ private boolean checkKeyOrder(byte[] key, int idx) { } /** - * + * * See KVSTORE-1323. */ private void updateChildKeys() { @@ -3633,6 +3635,7 @@ public final void insertRecord( byte[] newData, int expiration, boolean expirationInHours, + long creationTime, long modificationTime, boolean tombstone) { @@ -3668,6 +3671,8 @@ public final void insertRecord( bin.setExpiration(idx, expiration, expirationInHours); bin.setModificationTime(idx, embeddedData ? modificationTime : 0); + bin.setCreationTime(idx, embeddedData ? creationTime : 0); + bin.setTombstone(idx, tombstone); if (multiSlotChange) { @@ -3721,6 +3726,7 @@ public final void updateRecord( byte[] newEmbData, int expiration, boolean expirationInHours, + long creationTime, long modificationTime, boolean tombstone) { @@ -3753,6 +3759,9 @@ public final void updateRecord( bin.setModificationTime( idx, (newEmbData != null) ? modificationTime : 0); + bin.setCreationTime( + idx, (newEmbData != null) ? creationTime : 0); + bin.setTombstone(idx, tombstone); if (multiSlotChange) { @@ -3812,6 +3821,7 @@ public final void deleteRecord( } bin.setModificationTime(idx, 0); + bin.setCreationTime(idx, 0); bin.setTombstone(idx, false); Node newLN = entryTargets.get(idx); @@ -3879,6 +3889,7 @@ public final void recoverRecord( int logrecSize, int expiration, boolean expirationInHours, + long creationTime, long modificationTime, boolean tombstone) { @@ -3975,6 +3986,7 @@ public final void recoverRecord( bin.setExpiration(idx, expiration, expirationInHours); bin.setModificationTime(idx, (embData != null) ? modificationTime : 0); + bin.setCreationTime(idx, (embData != null) ? creationTime : 0); bin.setTombstone(idx, tombstone); if (multiSlotChange) { @@ -5671,7 +5683,8 @@ public final int getLogSize(boolean deltasOnly) { final int expBase = bin.getExpirationBase(); haveExpiration = (expBase != -1); size += LogUtils.getPackedIntLogSize(expBase); - + final long createTimesBase = bin.getCreationTimesBase(); + size += LogUtils.getPackedLongLogSize(createTimesBase); final long modTimesBase = bin.getModificationTimesBase(); size += LogUtils.getPackedLongLogSize(modTimesBase); } @@ -5731,6 +5744,8 @@ public final int getLogSize(boolean deltasOnly) { } if (isEmbedded) { + size += LogUtils.getPackedLongLogSize( + bin.getCreationTimeOffset(i)); size += LogUtils.getPackedLongLogSize( bin.getModificationTimeOffset(i)); } @@ -5777,7 +5792,8 @@ public final void serialize(ByteBuffer logBuffer, final int expBase = bin.getExpirationBase(); haveExpiration = (expBase != -1); LogUtils.writePackedInt(logBuffer, expBase); - + LogUtils.writePackedLong( + logBuffer, bin.getCreationTimesBase()); LogUtils.writePackedLong( logBuffer, bin.getModificationTimesBase()); } else { @@ -5879,6 +5895,8 @@ assert checkForNullLSN(i) : } if (isEmbedded) { + LogUtils.writePackedLong( + logBuffer, bin.getCreationTimeOffset(i)); LogUtils.writePackedLong( logBuffer, bin.getModificationTimeOffset(i)); } @@ -5974,8 +5992,15 @@ public final void materialize( } if (bin != null && entryVersion >= 19) { + if (entryVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + long base = LogUtils.readPackedLong(itemBuffer); + bin.setCreationTimesBase(base); + } long base = LogUtils.readPackedLong(itemBuffer); bin.setModificationTimesBase(base); + if (entryVersion < LogEntryType.LOG_VERSION_CREATION_TIME) { + bin.setCreationTimesBase(base); + } } nodeId = LogUtils.readPackedLong(itemBuffer); @@ -6086,8 +6111,16 @@ public final void materialize( } if (entryVersion >= 19 && isEmbedded) { + if (entryVersion >= LogEntryType.LOG_VERSION_CREATION_TIME) { + bin.setCreationTimeOffset( + i, LogUtils.readPackedLong(itemBuffer)); + } bin.setModificationTimeOffset( i, LogUtils.readPackedLong(itemBuffer)); + if (entryVersion < LogEntryType.LOG_VERSION_CREATION_TIME) { + bin.setCreationTimeOffset( + i , WriteOptions.CREATION_TIME_NOT_SET); + } } } @@ -6164,6 +6197,10 @@ public final void dumpLog(StringBuilder sb, boolean verbose) { sb.append("\" expirationIn=\""); sb.append(bin.isExpirationInHours() ? "hours" : "days"); } + if (bin != null && bin.getCreationTimesBase() != -1) { + sb.append("\" baseCreateTime=\""); + sb.append(bin.getCreationTimesBase()); + } if (bin != null && bin.getModificationTimesBase() != -1) { sb.append("\" baseModTime=\""); sb.append(bin.getModificationTimesBase()); @@ -6344,6 +6381,11 @@ private void dumpSlotState(StringBuilder sb, int i, BIN bin) { sb.append(" off:").append(bin.getExpirationOffset(i)); } sb.append("\" tombstone=\"").append(bin.isTombstone(i)); + long createTime = bin.getCreationTime(i); + if (createTime != 0) { + sb.append("\" creationTime=\"") + .append(StatUtils.getDate(createTime)); + } long modTime = bin.getModificationTime(i); if (modTime != 0) { sb.append("\" modTime=\"").append(StatUtils.getDate(modTime)); diff --git a/kvmain/src/main/java/com/sleepycat/je/tree/LN.java b/kvmain/src/main/java/com/sleepycat/je/tree/LN.java old mode 100644 new mode 100755 index 164e950a..91650fda --- a/kvmain/src/main/java/com/sleepycat/je/tree/LN.java +++ b/kvmain/src/main/java/com/sleepycat/je/tree/LN.java @@ -78,6 +78,7 @@ public class LN extends Node implements VersionedWriteLoggable { */ private long vlsnSequence = NULL_VLSN; private long modificationTime = 0; + private long creationTime = 0; /** * Create an empty LN, to be filled in from the log. @@ -253,6 +254,24 @@ public void setModificationTime(long time) { modificationTime = time; } + /** + * Returns the cached creation time that was copied from the + * LNLogEntry. + * @see #creationTime + */ + public long getCreationTime() { + return creationTime; + } + + /** + * Caches a copy of the creation time from the LNLogEntry after a read + * or write. + * @see #creationTime + */ + public void setCreationTime(long time) { + creationTime = time; + } + /* * If you get to an LN, this subtree isn't valid for delete. True, the LN * may have been deleted, but you can't be sure without taking a lock, and @@ -471,6 +490,7 @@ public LogItem log( final byte[] newKey, final int newExpiration, final boolean newExpirationInHours, + final long creationTime, final long newModificationTime, final boolean newTombstone, final boolean newBlindDeletion, @@ -573,6 +593,7 @@ public LogItem log( int abortExpiration = 0; boolean abortExpirationInHours = false; long abortModificationTime = 0; + long abortCreationTime = 0; boolean abortTombstone = false; LogParams params = new LogParams(); @@ -592,6 +613,7 @@ public LogItem log( abortExpiration = writeLockInfo.getAbortExpiration(); abortExpirationInHours = writeLockInfo.isAbortExpirationInHours(); abortModificationTime = writeLockInfo.getAbortModificationTime(); + abortCreationTime = writeLockInfo.getAbortCreationTime(); abortTombstone = writeLockInfo.getAbortTombstone(); params.obsoleteDupsAllowed = locker.isRolledBack(); @@ -624,9 +646,9 @@ public LogItem log( entryType, dbImpl, txn, abortLsn, abortKD, abortKey, abortData, abortVLSN, abortExpiration, abortExpirationInHours, - abortModificationTime, abortTombstone, + abortModificationTime, abortCreationTime, abortTombstone, newKey, newEmbeddedLN, newExpiration, newExpirationInHours, - newModificationTime, newTombstone, newBlindDeletion, + creationTime, newModificationTime, newTombstone, newBlindDeletion, priorSize, priorLsn, repContext, bImgCtx); /* LNs are never provisional. */ @@ -725,6 +747,7 @@ public LogItem log( final byte[] newKey, final int newExpiration, final boolean newExpirationInHours, + final long creationTime, final long newModificationTime, final boolean newTombstone, final boolean newBlindDeletion, @@ -736,7 +759,7 @@ public LogItem log( ReplicationContext repContext) throws DatabaseException { return log(envImpl, dbImpl, locker, writeLockInfo, newEmbeddedLN, - newKey, newExpiration, newExpirationInHours, + newKey, newExpiration, newExpirationInHours, creationTime, newModificationTime, newTombstone, newBlindDeletion, currEmbeddedLN, currLsn, currSize, isInsertion, backgroundIO, repContext, null); @@ -759,11 +782,13 @@ protected LNLogEntry createLogEntry( int abortExpiration, boolean abortExpirationInHours, long abortModificationTime, + long abortCreationTime, boolean abortTombstone, byte[] newKey, boolean newEmbeddedLN, int newExpiration, boolean newExpirationInHours, + long creationTime, long newModificationTime, boolean newTombstone, boolean newBlindDeletion, @@ -776,8 +801,9 @@ protected LNLogEntry createLogEntry( entryType, dbImpl.getId(), txn, abortLsn, abortKD, abortKey, abortData, abortVLSN, abortExpiration, abortExpirationInHours, abortModificationTime, + abortCreationTime, abortTombstone, newKey, this, newEmbeddedLN, - newExpiration, newExpirationInHours, + newExpiration, newExpirationInHours, creationTime, newModificationTime, newTombstone, newBlindDeletion, priorSize, priorLsn, (bImgCtx != null)); } diff --git a/kvmain/src/main/java/com/sleepycat/je/tree/NameLN.java b/kvmain/src/main/java/com/sleepycat/je/tree/NameLN.java old mode 100644 new mode 100755 index df4bd8f1..6c90faad --- a/kvmain/src/main/java/com/sleepycat/je/tree/NameLN.java +++ b/kvmain/src/main/java/com/sleepycat/je/tree/NameLN.java @@ -215,11 +215,13 @@ protected LNLogEntry createLogEntry( int abortExpiration, boolean abortExpirationInHours, long abortModificationTime, + long abortCreationTime, boolean abortTombstone, byte[] newKey, boolean newEmbeddedLN, int newExpiration, boolean newExpirationInHours, + long creationTime, long newModTime, boolean newTombstone, boolean newBlindDeletion, @@ -229,7 +231,8 @@ protected LNLogEntry createLogEntry( BeforeImageContext bImgCtx) { return new NameLNLogEntry( - entryType, dbImpl.getId(), txn, abortLsn, abortKD, abortModificationTime, - newKey, this, newModTime, priorSize, priorLsn, repContext); + entryType, dbImpl.getId(), txn, abortLsn, abortKD, + abortModificationTime, newKey, this, + creationTime, newModTime, priorSize, priorLsn, repContext); } } diff --git a/kvmain/src/main/java/com/sleepycat/je/txn/BasicLocker.java b/kvmain/src/main/java/com/sleepycat/je/txn/BasicLocker.java index ed282126..0584dfa4 100644 --- a/kvmain/src/main/java/com/sleepycat/je/txn/BasicLocker.java +++ b/kvmain/src/main/java/com/sleepycat/je/txn/BasicLocker.java @@ -149,6 +149,11 @@ public boolean isReadCommittedIsolation() { return false; } + @Override + public boolean isOptimisticReadIsolation() { + return false; + } + /** * No transactional locker is available. */ diff --git a/kvmain/src/main/java/com/sleepycat/je/txn/LockResult.java b/kvmain/src/main/java/com/sleepycat/je/txn/LockResult.java index 6973e73f..069dd251 100644 --- a/kvmain/src/main/java/com/sleepycat/je/txn/LockResult.java +++ b/kvmain/src/main/java/com/sleepycat/je/txn/LockResult.java @@ -56,8 +56,10 @@ public void setAbortInfo( int abortExpiration, boolean abortExpirationInHours, long abortModificationTime, + long abortCreationTime, boolean abortTombstone, - DatabaseImpl db) { + DatabaseImpl db, + boolean obsolete) { /* * Do not overwrite abort info if this locker has logged the @@ -72,9 +74,11 @@ public void setAbortInfo( wli.setAbortVLSN(abortVLSN); wli.setAbortExpiration(abortExpiration, abortExpirationInHours); wli.setAbortModificationTime(abortModificationTime); + wli.setAbortCreationTime(abortCreationTime); wli.setAbortTombstone(abortTombstone); wli.setDb(db); } + wli.setObsolete(obsolete || wli.getObsolete()); wli.setNeverLocked(false); } } diff --git a/kvmain/src/main/java/com/sleepycat/je/txn/LockType.java b/kvmain/src/main/java/com/sleepycat/je/txn/LockType.java index 3513ce00..5acd1f9b 100644 --- a/kvmain/src/main/java/com/sleepycat/je/txn/LockType.java +++ b/kvmain/src/main/java/com/sleepycat/je/txn/LockType.java @@ -25,16 +25,23 @@ public class LockType { * below. */ public static final LockType READ = - new LockType(0, false, "READ"); + new LockType(0, false, "READ", false); public static final LockType WRITE = - new LockType(1, true, "WRITE"); + new LockType(1, true, "WRITE", false); + /* + * WRITE_RMW is used for LockMode.RMW and is identical to WRITE + * except its abort LSN is not made obsolete when the transaction + * commits. + */ + public static final LockType WRITE_RMW = + new LockType(1, true, "WRITE_RMW", true); /** * NONE is used for requesting a dirty read and does not appear in the * conflict or upgrade matrices. */ public static final LockType NONE = - new LockType(2, false, "NONE"); + new LockType(2, false, "NONE", false); /** * Lock conflict matrix. @@ -66,17 +73,19 @@ public class LockType { }, }; - private int index; - private boolean write; - private String name; + final private int index; + final private boolean write; + final private String name; + final private boolean rmw; /** * No lock types can be defined outside this class. */ - private LockType(int index, boolean write, String name) { + private LockType(int index, boolean write, String name, boolean rmw) { this.index = index; this.write = write; this.name = name; + this.rmw = rmw; } /** @@ -88,6 +97,13 @@ public final boolean isWriteLock() { return write; } + /** + * Returns if this is a WRITE lock gotten on a read using the LockMode.RMW. + */ + public final boolean isRMW() { + return rmw; + } + /** * Returns the LockConfict that results when this lock type is held and the * given lock type is requested by another locker. diff --git a/kvmain/src/main/java/com/sleepycat/je/txn/Locker.java b/kvmain/src/main/java/com/sleepycat/je/txn/Locker.java index 52478a87..f7e657b3 100644 --- a/kvmain/src/main/java/com/sleepycat/je/txn/Locker.java +++ b/kvmain/src/main/java/com/sleepycat/je/txn/Locker.java @@ -605,6 +605,11 @@ public void disallowReplicaWrite() { */ public abstract boolean isReadCommittedIsolation(); + /** + * Returns whether the isolation level of this locker is optimistic read. + */ + public abstract boolean isOptimisticReadIsolation(); + /** * Returns the underlying Txn if the locker is transactional, or null if * the locker is non-transactional. For a Txn-based locker, this method diff --git a/kvmain/src/main/java/com/sleepycat/je/txn/PreparedTxn.java b/kvmain/src/main/java/com/sleepycat/je/txn/PreparedTxn.java deleted file mode 100644 index fe671663..00000000 --- a/kvmain/src/main/java/com/sleepycat/je/txn/PreparedTxn.java +++ /dev/null @@ -1,58 +0,0 @@ -/*- - * Copyright (C) 2002, 2025, Oracle and/or its affiliates. All rights reserved. - * - * This file was distributed by Oracle as part of a version of Oracle NoSQL - * Database made available at: - * - * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html - * - * Please see the LICENSE file included in the top-level directory of the - * appropriate version of Oracle NoSQL Database for a copy of the license and - * additional information. - */ - -package com.sleepycat.je.txn; - -import com.sleepycat.je.DatabaseException; -import com.sleepycat.je.TransactionConfig; -import com.sleepycat.je.dbi.EnvironmentImpl; -import com.sleepycat.je.log.ReplicationContext; - -/** - * A PreparedTxn is used at recovery for processing a TXN_PREPARE log entry. It - * is provides essentially the same functionality as a TXN but lets the calling - * code set the transaction id. - */ -public class PreparedTxn extends Txn { - - private PreparedTxn(EnvironmentImpl envImpl, - TransactionConfig config, - long mandatedId) - throws DatabaseException { - - super(envImpl, config, ReplicationContext.NO_REPLICATE, mandatedId); - } - - public static PreparedTxn createPreparedTxn(EnvironmentImpl envImpl, - TransactionConfig config, - long mandatedId) - throws DatabaseException { - - PreparedTxn ret = null; - try { - ret = new PreparedTxn(envImpl, config, mandatedId); - } catch (DatabaseException DE) { - ret.close(false); - throw DE; - } - return ret; - } - - /** - * PrepareTxns use the mandated id. - */ - @Override - protected long generateId(TxnManager txnManager, long mandatedId) { - return mandatedId; - } -} diff --git a/kvmain/src/main/java/com/sleepycat/je/txn/Txn.java b/kvmain/src/main/java/com/sleepycat/je/txn/Txn.java index b3209086..538e0d0f 100644 --- a/kvmain/src/main/java/com/sleepycat/je/txn/Txn.java +++ b/kvmain/src/main/java/com/sleepycat/je/txn/Txn.java @@ -224,7 +224,9 @@ public class Txn extends Locker implements VersionedWriteLoggable { private Durability commitDurability; /* Whether to use Read-Committed isolation. */ - private final boolean readCommittedIsolation; + private volatile boolean readCommittedIsolation; + + private volatile boolean optimisticReadIsolation; protected volatile boolean isGroupCommitted = false; @@ -280,8 +282,9 @@ public class Txn extends Locker implements VersionedWriteLoggable { private final boolean readOnly; - private TestHook preCommitExceptionHook; - private TestHook masterTransferHook; + private volatile TestHook preCommitExceptionHook; + private volatile TestHook masterTransferHook; + private volatile TestHook semaphoreHook; /** * Constructor for reading from log. @@ -289,6 +292,7 @@ public class Txn extends Locker implements VersionedWriteLoggable { public Txn() { defaultDurability = null; readCommittedIsolation = false; + optimisticReadIsolation = false; repContext = null; readOnly = false; } @@ -321,6 +325,7 @@ protected Txn(EnvironmentImpl envImpl, this.repContext = repContext; readCommittedIsolation = config.getReadCommitted(); + optimisticReadIsolation = config.getOptimisticRead(); if (config.getDurability() == null) { defaultDurability = config.getDefaultDurability(envImpl); } else { @@ -520,6 +525,35 @@ protected LockResult lockInternal(long lsn, (lsn, this, lockType, timeout, useNoWait, jumpAheadOfWaiters, database); + /* TestHook, release semB and acquire semA */ + assert(TestHookExecute.doHookIfSet(semaphoreHook)); + + /* + * DENIED and in optimisticRead mode, get the writeLockInfo from + * writeOwnerLocker. + */ + if (grant == LockGrantType.DENIED && + isOptimisticReadIsolation() && + lockType.equals(LockType.READ)) { + + Locker writeOwnerLocker = lockManager.getWriteOwnerLocker(lsn); + if (writeOwnerLocker != null) { + return new LockResult(grant, + writeOwnerLocker.getWriteLockInfo(lsn)); + } else { + /* + * The blocking writeTxn commits after lockManager.lock(), + * before lockManager.getWriteOwnerLocker(), + * So lockManager.getWriteOwnerLocker() will return null. + * Return LockResult without the WriteLockInfo object, so that + * in cursorImpl.lockLN(), the optimisticRead txn will request + * a blocking lock. + */ + return new LockResult(grant, null); + } + } + + WriteLockInfo info = null; if (writeInfo != null && database != null) { if (grant != LockGrantType.DENIED && lockType.isWriteLock()) { @@ -531,7 +565,7 @@ protected LockResult lockInternal(long lsn, } } - if (readCommittedIsolation && + if ((optimisticReadIsolation || readCommittedIsolation) && grant != LockGrantType.DENIED && !lockType.isWriteLock() && cursor != null) { @@ -545,7 +579,12 @@ protected LockResult lockInternal(long lsn, public synchronized void releaseLock(long lsn, CursorImpl cursor) throws DatabaseException { - if (readCommittedIsolation && !removeCursorLock(lsn, cursor)) { + /* + * read lock obtained in optimisticReadIsolation + * will also have the cursor-lock association, see Txn.lockInternal(). + */ + if ((readCommittedIsolation || optimisticReadIsolation) + && !removeCursorLock(lsn, cursor)) { return; } @@ -1177,7 +1216,8 @@ private boolean isObsoleteLSN(WriteLockInfo info) { ((info.getDb() != null) && info.getDb().isLNImmediatelyObsolete()) || /* Was already counted obsolete during logging. */ - (info.getAbortData() != null)); + (info.getAbortData() != null) || + !info.getObsolete()); } /** @@ -1687,7 +1727,12 @@ protected synchronized void addLock(Long lsn, if (type.isWriteLock()) { ensureWriteInfo(); - writeInfo.put(lsn, new WriteLockInfo()); + + WriteLockInfo info = new WriteLockInfo(); + if (type.isRMW()) { + info.setObsolete(false); + } + writeInfo.put(lsn, info); int delta = WRITE_LOCK_OVERHEAD; @@ -1829,6 +1874,19 @@ public boolean isReadCommittedIsolation() { return readCommittedIsolation; } + @Override + public boolean isOptimisticReadIsolation() { + return optimisticReadIsolation; + } + + public void setReadCommittedIsolation(boolean readCommittedIsolation) { + this.readCommittedIsolation = readCommittedIsolation; + } + + public void setOptimisticReadIsolation(boolean optimisticReadIsolation) { + this.optimisticReadIsolation = optimisticReadIsolation; + } + /** * This is a transactional locker. */ @@ -2625,7 +2683,7 @@ public synchronized void transferCursorLocks(final CursorImpl oldCursor, */ private synchronized void addCursorLock(final long lsn, final CursorImpl cursor) { - assert readCommittedIsolation; + assert readCommittedIsolation || optimisticReadIsolation; assert lsn != NULL_LSN; assert cursor != null; @@ -2679,7 +2737,7 @@ private synchronized boolean removeAllCursorLocks( */ private synchronized boolean removeCursorLock(final long lsn, final CursorImpl cursor) { - assert readCommittedIsolation; + assert readCommittedIsolation || optimisticReadIsolation; assert lsn != NULL_LSN; boolean noneRemaining = true; @@ -2732,4 +2790,8 @@ public void setPreCommitExceptionHook(TestHook hook) { public void setMasterTransferHook(TestHook hook) { masterTransferHook = hook; } + + public void setSemaphoreHook(TestHook hook) { + semaphoreHook = hook; + } } diff --git a/kvmain/src/main/java/com/sleepycat/je/txn/TxnChain.java b/kvmain/src/main/java/com/sleepycat/je/txn/TxnChain.java old mode 100644 new mode 100755 index e5969f94..51e42c20 --- a/kvmain/src/main/java/com/sleepycat/je/txn/TxnChain.java +++ b/kvmain/src/main/java/com/sleepycat/je/txn/TxnChain.java @@ -250,6 +250,7 @@ public TxnChain( currLogrec.getAbortVLSN(), currLogrec.getAbortExpiration(), currLogrec.isAbortExpirationInHours(), + currLogrec.getCreationTime(), currLogrec.getModificationTime(), currLogrec.isTombstone()); @@ -349,6 +350,7 @@ public static class RevertInfo { public long revertVLSN; public int revertExpiration; public boolean revertExpirationInHours; + public long revertCreationTime; public long revertModificationTime; public boolean revertTombstone; @@ -360,6 +362,7 @@ public static class RevertInfo { long revertVLSN, int revertExpiration, boolean revertExpirationInHours, + long revertCreationTime, long revertModificationTime, boolean revertTombstone) { @@ -371,6 +374,7 @@ public static class RevertInfo { this.revertVLSN = revertVLSN; this.revertExpiration = revertExpiration; this.revertExpirationInHours = revertExpirationInHours; + this.revertCreationTime = revertCreationTime; this.revertModificationTime = revertModificationTime; this.revertTombstone = revertTombstone; } @@ -416,6 +420,7 @@ private CompareSlot(DatabaseImpl dbImpl, byte[] key) { this.key = key; } + @Override public int compareTo(CompareSlot other) { int dbCompare = dbImpl.getId().compareTo(other.dbImpl.getId()); if (dbCompare != 0) { diff --git a/kvmain/src/main/java/com/sleepycat/je/txn/WriteLockInfo.java b/kvmain/src/main/java/com/sleepycat/je/txn/WriteLockInfo.java index ab4ae860..a092ae4b 100644 --- a/kvmain/src/main/java/com/sleepycat/je/txn/WriteLockInfo.java +++ b/kvmain/src/main/java/com/sleepycat/je/txn/WriteLockInfo.java @@ -94,6 +94,9 @@ public class WriteLockInfo { /* Abort modification time. */ private long abortModificationTime; + /* Abort creation time. */ + private long abortCreationTime; + /* Abort tombstone property. */ private boolean abortTombstone; @@ -109,10 +112,18 @@ public class WriteLockInfo { */ private boolean neverLocked = true; + /* + * Write locks obtained when doing a read with LockMode.RWM should not + * mark the abortLSN obsolete on transaction commit, because the abortLSN + * is the record being read, which is not obsolete. + */ + private boolean obsolete; + static final WriteLockInfo basicWriteLockInfo = new WriteLockInfo(); // public for Sizeof public WriteLockInfo() { + obsolete = true; } public boolean getAbortKnownDeleted() { @@ -183,6 +194,14 @@ public void setAbortModificationTime(long v) { abortModificationTime = v; } + public long getAbortCreationTime() { + return abortCreationTime; + } + + public void setAbortCreationTime(long v) { + abortCreationTime = v; + } + public boolean getAbortTombstone() { return abortTombstone; } @@ -207,6 +226,14 @@ public void setNeverLocked(boolean neverLocked) { this.neverLocked = neverLocked; } + public boolean getObsolete() { + return obsolete; + } + + public void setObsolete(boolean obsolete) { + this.obsolete = obsolete; + } + /* * Copy all the information needed to create a clone of the lock. */ @@ -219,9 +246,11 @@ public void copyAllInfo(WriteLockInfo source) { abortLogSize = source.abortLogSize; abortExpiration = source.abortExpiration; abortModificationTime = source.abortModificationTime; + abortCreationTime = source.abortCreationTime; abortTombstone = source.abortTombstone; db = source.db; neverLocked = source.neverLocked; + obsolete = source.obsolete; } @Override @@ -236,7 +265,9 @@ public String toString() { " abortExpiration=" + getAbortExpiration() + " abortExpirationInHours=" + isAbortExpirationInHours() + " abortModificationTime=" + getAbortModificationTime() + + " abortCreationTime=" + getAbortCreationTime() + " abortTombstone=" + getAbortTombstone() + - " neverLocked=" + neverLocked; + " neverLocked=" + neverLocked + + " obsolete=" + obsolete; } } diff --git a/kvmain/src/main/java/com/sleepycat/je/util/TimeSupplier.java b/kvmain/src/main/java/com/sleepycat/je/util/TimeSupplier.java old mode 100644 new mode 100755 index 39ab3ebe..cefa838a --- a/kvmain/src/main/java/com/sleepycat/je/util/TimeSupplier.java +++ b/kvmain/src/main/java/com/sleepycat/je/util/TimeSupplier.java @@ -13,9 +13,13 @@ package com.sleepycat.je.util; +import java.text.SimpleDateFormat; import java.time.Clock; import java.time.Duration; import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Date; +import java.util.TimeZone; import java.util.concurrent.TimeUnit; /* @@ -61,6 +65,32 @@ public static Instant now() { return clock.instant(); } + public static String formatCurrentTimeToDate() { + final Date date = new Date(); + date.setTime(TimeSupplier.currentTimeMillis()); + final TimeZone tz = TimeZone.getTimeZone("UTC"); + final SimpleDateFormat df = + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS z"); + df.setTimeZone(tz); + return df.format(date); + } + + public static String formatCurrentTimeToDate(long time) { + final Date date = new Date(); + date.setTime(time); + final TimeZone tz = TimeZone.getTimeZone("UTC"); + final SimpleDateFormat df = + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS z"); + df.setTimeZone(tz); + return df.format(date); + } + + //as we store the ttl in hours granularity since epoch + public static long hoursSinceEpoch() { + return ChronoUnit.HOURS.between(Instant.EPOCH, + clock.instant()); + } + public static void flashBack(long amount, TimeUnit unit) { setClockOffset(-amount, unit); } diff --git a/kvmain/src/main/java/com/sleepycat/je/util/verify/BtreeVerifier.java b/kvmain/src/main/java/com/sleepycat/je/util/verify/BtreeVerifier.java index b44831e8..45b5f218 100644 --- a/kvmain/src/main/java/com/sleepycat/je/util/verify/BtreeVerifier.java +++ b/kvmain/src/main/java/com/sleepycat/je/util/verify/BtreeVerifier.java @@ -737,6 +737,7 @@ private boolean verifyBatch( if (checkPriToSecRefs && !result.isTombstone()) { verifyPriToSecRefs( priDb, cursor, keyEntry, dataEntry, + result.getCreationTime(), result.getModificationTime(), result.getExpirationTime(), result.getStorageSize(), @@ -1689,6 +1690,7 @@ private void verifyPriToSecRefs( final Cursor cursor, final DatabaseEntry key, final DatabaseEntry data, + final long creationTime, final long modificationTime, final long expirationTime, final int storageSize, @@ -1756,7 +1758,7 @@ private void verifyPriToSecRefs( } if (!checkSecondaryKeysExist( - priDb, secDb, key, data, + priDb, secDb, key, data, creationTime, modificationTime, expirationTime, storageSize, dbCache, secAssoc, DbInternal.getCursorImpl(cursor))) { @@ -1780,6 +1782,7 @@ private boolean checkSecondaryKeysExist( final SecondaryDatabase secDb, final DatabaseEntry priKey, final DatabaseEntry priData, + final long creationTime, final long modificationTime, final long expirationTime, final int storageSize, @@ -1818,7 +1821,7 @@ private boolean checkSecondaryKeysExist( DatabaseEntry secKey = new DatabaseEntry(); if (!keyCreator.createSecondaryKey( - secDb, priKey, priData, + secDb, priKey, priData, creationTime, modificationTime, expirationTime, storageSize, secKey)) { /* This primary record has no secondary keys. */ return true; @@ -1832,6 +1835,7 @@ private boolean checkSecondaryKeysExist( final Set secKeys = new HashSet<>(); multiKeyCreator.createSecondaryKeys(secDb, priKey, priData, + creationTime, modificationTime, expirationTime, storageSize, diff --git a/kvmain/src/main/java/com/sleepycat/je/utilint/TestHook.java b/kvmain/src/main/java/com/sleepycat/je/utilint/TestHook.java index b072669b..d2c477d2 100644 --- a/kvmain/src/main/java/com/sleepycat/je/utilint/TestHook.java +++ b/kvmain/src/main/java/com/sleepycat/je/utilint/TestHook.java @@ -39,6 +39,10 @@ default void doIOHook() throws IOException { throw new UnsupportedOperationException(); } + default void doIOHook(T obj) throws IOException { + throw new UnsupportedOperationException(); + } + default void doHook() { throw new UnsupportedOperationException(); } diff --git a/kvmain/src/main/java/com/sleepycat/je/utilint/TestHookAdapter.java b/kvmain/src/main/java/com/sleepycat/je/utilint/TestHookAdapter.java index dd50c333..6beb7223 100644 --- a/kvmain/src/main/java/com/sleepycat/je/utilint/TestHookAdapter.java +++ b/kvmain/src/main/java/com/sleepycat/je/utilint/TestHookAdapter.java @@ -27,6 +27,11 @@ public void doIOHook() throws IOException { throw new UnsupportedOperationException(); } + @Override + public void doIOHook(T obj) throws IOException { + throw new UnsupportedOperationException(); + } + @Override public void doExceptionHook() throws Exception { throw new UnsupportedOperationException(); diff --git a/kvmain/src/main/java/com/sleepycat/je/utilint/TestHookExecute.java b/kvmain/src/main/java/com/sleepycat/je/utilint/TestHookExecute.java index 76bf5849..3871e767 100644 --- a/kvmain/src/main/java/com/sleepycat/je/utilint/TestHookExecute.java +++ b/kvmain/src/main/java/com/sleepycat/je/utilint/TestHookExecute.java @@ -38,6 +38,15 @@ public static boolean doIOHookIfSet(TestHook testHook) return true; } + public static boolean doIOHookIfSet(TestHook testHook, T obj) + throws IOException { + + if (testHook != null) { + testHook.doIOHook(obj); + } + return true; + } + public static boolean doHookIfSet(TestHook testHook) { if (testHook != null) { testHook.doHook(); diff --git a/kvmain/src/main/java/oracle/kv/KVStoreFactory.java b/kvmain/src/main/java/oracle/kv/KVStoreFactory.java index 918c3c0b..27054d4e 100644 --- a/kvmain/src/main/java/oracle/kv/KVStoreFactory.java +++ b/kvmain/src/main/java/oracle/kv/KVStoreFactory.java @@ -335,7 +335,7 @@ private static KVStore getStoreInternal(KVStoreConfig config, public static int getEndpointGroupNumThreads() { final int numThreads = Integer.getInteger( ENDPOINT_GROUP_NUM_THREADS_PROPERTY, - 2 * Runtime.getRuntime().availableProcessors()); + Runtime.getRuntime().availableProcessors()); if (numThreads < 1) { throw new IllegalStateException( "The " + ENDPOINT_GROUP_NUM_THREADS_PROPERTY + diff --git a/kvmain/src/main/java/oracle/kv/KVVersion.java b/kvmain/src/main/java/oracle/kv/KVVersion.java index 0eef6e8a..df9507ad 100644 --- a/kvmain/src/main/java/oracle/kv/KVVersion.java +++ b/kvmain/src/main/java/oracle/kv/KVVersion.java @@ -208,7 +208,13 @@ public class KVVersion implements Comparable, Serializable { new KVVersion(24, 4, 4, null); /* R24.4 11/2024 */ /** @hidden */ public static final KVVersion R25_1 = - new KVVersion(25, 1, 13, null); /* R25.1 4/2025, prerequisite: 22.3 */ + new KVVersion(25, 1, 10, null); /* R25.1 4/2025, prerequisite: 22.3 */ + /** @hidden */ + public static final KVVersion R25_2 = + new KVVersion(25, 2, 0, null); /* R25.2 (for cloud) prerequisite: 22.3 */ + /** @hidden */ + public static final KVVersion R25_3 = + new KVVersion(25, 3, 21, null); /* R25.3 8/2025 */ /** * The current software version. @@ -218,7 +224,7 @@ public class KVVersion implements Comparable, Serializable { * WHEN YOU BUMP THIS VERSION, BE SURE TO BUMP THE VERSIONS IN * misc/rpm/*.spec. */ - R25_1; + R25_3; /** * The current prerequisite version. Nodes can only join the cluster if diff --git a/kvmain/src/main/java/oracle/kv/KeyValueVersion.java b/kvmain/src/main/java/oracle/kv/KeyValueVersion.java index 6e2c3576..0169af71 100644 --- a/kvmain/src/main/java/oracle/kv/KeyValueVersion.java +++ b/kvmain/src/main/java/oracle/kv/KeyValueVersion.java @@ -26,6 +26,7 @@ public class KeyValueVersion { private final Key key; private final Value value; private final Version version; + private final long creationTime; private final long modificationTime; /** @@ -42,6 +43,7 @@ public KeyValueVersion(final Key key, this.key = key; this.value = value; this.version = version; + this.creationTime = 0; this.modificationTime = 0; } @@ -54,6 +56,7 @@ public KeyValueVersion(final Key key, public KeyValueVersion(final Key key, final Value value, final Version version, + final long creationTime, final long modificationTime) { assert key != null; assert value != null; @@ -61,6 +64,7 @@ public KeyValueVersion(final Key key, this.key = key; this.value = value; this.version = version; + this.creationTime = creationTime; this.modificationTime = modificationTime; } @@ -76,6 +80,7 @@ public KeyValueVersion(final Key key, this.key = key; this.value = value; this.version = null; + this.creationTime = 0; this.modificationTime = 0; } @@ -112,6 +117,25 @@ public long getExpirationTime() { return 0L; } + /** + * Returns the creation time of the record. This method throws an + * {@link UnsupportedOperationException} for records created when the store + * was running at a version earlier than version 25.2, or if the creation + * time is not available because the return value was not requested. + * + * @return the creation time + * @hidden make it public when this method is added to other non-java + * drivers. + * @since 25.3 + */ + public long getCreationTime() { + if (creationTime <= 0) { + throw new UnsupportedOperationException("Creation " + + "time is not available."); + } + return creationTime; + } + /** * Returns the modification time of the record. This method throws an * {@link UnsupportedOperationException} for records created when the store @@ -137,4 +161,4 @@ public long getModificationTime() { public String toString() { return key.toString() + ' ' + value + ' ' + version; } -} +} \ No newline at end of file diff --git a/kvmain/src/main/java/oracle/kv/OperationResult.java b/kvmain/src/main/java/oracle/kv/OperationResult.java index 294e3c99..fb7c9ba9 100644 --- a/kvmain/src/main/java/oracle/kv/OperationResult.java +++ b/kvmain/src/main/java/oracle/kv/OperationResult.java @@ -124,6 +124,16 @@ public interface OperationResult { */ FieldValue getGeneratedValue(); + + /** + * Internal use only + * @hidden + * + * For a put or delete operation, returns the creation time of the + * previous record in milliseconds. + */ + long getPreviousCreationTime(); + /** * Internal use only * @hidden @@ -133,6 +143,16 @@ public interface OperationResult { */ long getPreviousModificationTime(); + + /** + * Internal use only + * @hidden + * + * For a put or delete operation, returns the creation time of the + * record in milliseconds. + */ + long getNewCreationTime(); + /** * Internal use only * @hidden diff --git a/kvmain/src/main/java/oracle/kv/Value.java b/kvmain/src/main/java/oracle/kv/Value.java index b967babb..5effa285 100644 --- a/kvmain/src/main/java/oracle/kv/Value.java +++ b/kvmain/src/main/java/oracle/kv/Value.java @@ -23,14 +23,18 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.BitSet; +import com.sleepycat.util.PackedInteger; import oracle.kv.impl.api.table.Region; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TableJsonUtils; import oracle.kv.impl.util.FastExternalizable; +import oracle.kv.impl.util.SerialVersion; import oracle.kv.impl.util.SerializationUtil; -import com.sleepycat.util.PackedInteger; - /** * The Value in a Key/Value store. * @@ -117,7 +121,21 @@ public enum Format implements FastExternalizable { * information for multi-region table, including region id and * tombstone. */ - MULTI_REGION_TABLE(4); + MULTI_REGION_TABLE(4), + + /** + * Format that contains: + * - 1st byte: format version + * - 2nd byte: bitset + * - bit 0: 1 if row has regionId, 0 otherwise + * - bit 1: 1 if row has row-metadata, 0 otherwise + * - bits 2-7: unused + * - if it has regionId next bytes are a packed int + * - if it has metadata next bytes are the metadata string length and string + * - rest bytes are the row data + */ + TABLE_V5(5); + private static final Format[] VALUES = values(); public static Format valueOf(int ordinal) { @@ -181,16 +199,21 @@ public static Format fromFirstByte(int firstByte) { public static boolean isTableFormat(Format format) { int ordinal = format.ordinal(); return ordinal >= Format.TABLE.ordinal() && - ordinal <= Format.MULTI_REGION_TABLE.ordinal(); + ordinal <= Format.TABLE_V5.ordinal(); } public static boolean isTableFormat(int firstByte) { int ordinal = firstByte + 1; return ordinal >= Format.TABLE.ordinal() && - ordinal <= Format.MULTI_REGION_TABLE.ordinal(); + ordinal <= Format.TABLE_V5.ordinal(); } } + private static int TABLEV5_REGIONID_BIT = 0; + private static int TABLEV5_ROWMETADATA_BIT = 1; + // private static int TABLEV5_EXTRABYTE_BIT = 7; + /* NOTE: When adding the 8th new property, must add another byte !!! */ + /** * An instance that represents an empty value for key-only records. */ @@ -199,14 +222,16 @@ public static boolean isTableFormat(int firstByte) { private final byte[] val; private final Format format; private final int regionId; + private final String rowMetadata; private Value(byte[] val, Format format) { - this(val, format, Region.NULL_REGION_ID); + this(val, format, Region.NULL_REGION_ID, null); } private Value(byte[] val, Format format, - int regionId) { + int regionId, + String rowMetadata) { checkNull("val", val); checkNull("format", format); if ((format == Format.MULTI_REGION_TABLE) && @@ -215,7 +240,7 @@ private Value(byte[] val, "The region id cannot be " + Region.NULL_REGION_ID + " for multi-region table"); } - if ((format != Format.MULTI_REGION_TABLE) && + if ((format.ordinal() < Format.MULTI_REGION_TABLE.ordinal()) && (regionId != Region.NULL_REGION_ID)) { throw new IllegalArgumentException( "The region id must be " + Region.NULL_REGION_ID + @@ -225,9 +250,17 @@ private Value(byte[] val, throw new IllegalArgumentException( "Illegal region ID: " + regionId); } + if (rowMetadata != null && format != Format.TABLE_V5) { + throw new IllegalArgumentException("Format must be " + + Format.TABLE_V5 + " for non-null metadata. Format: " + format + " rmtd: " + rowMetadata); + } this.val = val; this.format = format; this.regionId = regionId; + if (rowMetadata != null) { + TableJsonUtils.validateJsonConstruct(rowMetadata); + } + this.rowMetadata = rowMetadata; } /** @@ -254,16 +287,46 @@ public Value(DataInput in, short serialVersion) * Both NONE and TABLE formats skip the first byte. */ if (format == Format.NONE || Format.isTableFormat(format)) { - if (format == Format.MULTI_REGION_TABLE) { + if (format == Format.TABLE_V5) { + byte bitsetByte = in.readByte(); + BitSet options = BitSet.valueOf(new byte[] { bitsetByte }); + int alreadyRead = 2; + + if (options.get(TABLEV5_REGIONID_BIT)) { // contains multi-region + regionId = readPackedInt(in); + alreadyRead += PackedInteger.getWriteIntLength(regionId); + } else { + regionId = Region.NULL_REGION_ID; + } + + if (options.get(TABLEV5_ROWMETADATA_BIT)) { // contains metadata + int metadataLen = readPackedInt(in); + byte[] mdba = new byte[metadataLen]; + in.readFully(mdba, 0, metadataLen); + rowMetadata = new String(mdba, StandardCharsets.UTF_8); + // rowMetadata should have been checked before serialization + // it is valid JSON Object + // assert TableJsonUtils.validateJsonObject(rowMetadata); + alreadyRead += PackedInteger.getWriteIntLength(metadataLen) + metadataLen; + } else { + rowMetadata = null; + } + + val = new byte[len - (alreadyRead)]; + + } else if (format == Format.MULTI_REGION_TABLE) { /* read compressed region id. */ regionId = readPackedInt(in); final int regionIdLen = PackedInteger.getWriteIntLength(regionId); val = new byte[len - (regionIdLen + 1)]; + rowMetadata = null; } else { - this.regionId = 0; + this.regionId = Region.NULL_REGION_ID; + rowMetadata = null; val = new byte[len - 1]; } + in.readFully(val); return; } @@ -273,6 +336,8 @@ public Value(DataInput in, short serialVersion) * record's schema ID. */ regionId = Region.NULL_REGION_ID; + // null value means there is no row-metadata set for this Value + rowMetadata = null; val = new byte[len]; val[0] = (byte) firstByte; in.readFully(val, 1, len - 1); @@ -317,7 +382,7 @@ public static byte[] readFastExternal(DataInput in, @Override public void writeFastExternal(DataOutput out, short serialVersion) throws IOException { - final int prefixLength; + int prefixLength; switch (format) { case AVRO: prefixLength = 0; @@ -331,6 +396,25 @@ public void writeFastExternal(DataOutput out, short serialVersion) final int regionIdLen = PackedInteger.getWriteIntLength(regionId); prefixLength = regionIdLen + 1; break; + case TABLE_V5: + if (serialVersion < SerialVersion.ROW_METADATA_VERSION) { + throw new IllegalArgumentException("Serial version " + + serialVersion + " does not support setting row metadata, " + + "must be " + SerialVersion.ROW_METADATA_VERSION + " or greater"); + } + prefixLength = 2; // format and bitset + + if (regionId != Region.NULL_REGION_ID) { + final int regionIdLen2 = PackedInteger.getWriteIntLength(regionId); + prefixLength += regionIdLen2; + } + if (rowMetadata != null) { + byte[] mdba = rowMetadata.getBytes(StandardCharsets.UTF_8); + final int metadataLenLen = PackedInteger.getWriteIntLength( + rowMetadata.length()); + prefixLength += metadataLenLen + mdba.length; + } + break; default: throw new AssertionError(); } @@ -342,6 +426,19 @@ public void writeFastExternal(DataOutput out, short serialVersion) if (format == Format.MULTI_REGION_TABLE) { /* write the compressed region id. */ writePackedInt(out, regionId); + } else if (format == Format.TABLE_V5) { + BitSet options = new BitSet(8); + options.set(TABLEV5_REGIONID_BIT, regionId != Region.NULL_REGION_ID); // has regionId + options.set(TABLEV5_ROWMETADATA_BIT, rowMetadata != null); // has metadata + out.write(options.isEmpty() ? new byte[]{0} : options.toByteArray()); + if (regionId != Region.NULL_REGION_ID) { + writePackedInt(out, regionId); + } + if (rowMetadata != null) { + byte[] mdba = rowMetadata.getBytes(StandardCharsets.UTF_8); + writePackedInt(out, mdba.length); + out.write(mdba); + } } } out.write(val); @@ -398,7 +495,46 @@ public byte[] toByteArray() { if (format == Format.NONE || Format.isTableFormat(format)) { final byte[] bytes; - if (format == Format.MULTI_REGION_TABLE) { + if (format == Format.TABLE_V5) { + + int prefixLength = 2; // format and bitset + + if (regionId != Region.NULL_REGION_ID) { + final int regionIdLen2 = PackedInteger.getWriteIntLength(regionId); + prefixLength += regionIdLen2; + } + if (rowMetadata != null) { + byte[] mdba = rowMetadata.getBytes(StandardCharsets.UTF_8); + final int metadataLenLen = PackedInteger.getWriteIntLength( + rowMetadata.length()); + prefixLength += metadataLenLen + mdba.length; + } + + bytes = new byte[prefixLength + val.length]; + + bytes[0] = (byte)(format.ordinal() - 1); + int alreadyWritten = 1; + + BitSet options = new BitSet(8); + options.set(TABLEV5_REGIONID_BIT, regionId != Region.NULL_REGION_ID); // has regionId + options.set(TABLEV5_ROWMETADATA_BIT, rowMetadata != null); // has metadata + byte[] bitsetBytes = options.isEmpty() ? new byte[]{0} : options.toByteArray(); + System.arraycopy(bitsetBytes, 0, bytes, 1, bitsetBytes.length); + alreadyWritten += bitsetBytes.length; + if (regionId != Region.NULL_REGION_ID) { + PackedInteger.writeInt(bytes, alreadyWritten, regionId); + alreadyWritten += PackedInteger.getWriteIntLength(regionId); + } + if (rowMetadata != null) { + byte[] mdba = rowMetadata.getBytes(StandardCharsets.UTF_8); + PackedInteger.writeInt(bytes, alreadyWritten, mdba.length); + alreadyWritten += PackedInteger.getWriteIntLength(mdba.length); + System.arraycopy(mdba, 0, bytes, alreadyWritten, mdba.length); + alreadyWritten += mdba.length; + } + System.arraycopy(val, 0, bytes, alreadyWritten, val.length); + + } else if (format == Format.MULTI_REGION_TABLE) { final int regionIdLen = PackedInteger.getWriteIntLength(regionId); bytes = new byte[val.length + regionIdLen + 1]; @@ -448,22 +584,48 @@ public static Value fromByteArray(byte[] bytes) { final Format format = Format.fromFirstByte(bytes[0]); + if (format == Format.AVRO) { + return new Value(bytes, format); + } + + if (format == Format.NONE || Format.MULTI_REGION_TABLE.compareTo(format) > 0) { + final byte[] val = new byte[bytes.length - 1]; + System.arraycopy(bytes, 1, val, 0, val.length); + return new Value(val, format); + } + if (format == Format.MULTI_REGION_TABLE) { final int regionIdLen = PackedInteger.getReadIntLength(bytes, 1); final int regionId = PackedInteger.readInt(bytes, 1); final byte[] val = new byte[bytes.length - regionIdLen - 1]; System.arraycopy(bytes, regionIdLen + 1, val, 0, val.length); - return new Value(val, format, regionId); + return new Value(val, format, regionId, null); } - if (format == Format.NONE || Format.isTableFormat(format)) { - final byte[] val = new byte[bytes.length - 1]; - System.arraycopy(bytes, 1, val, 0, val.length); - return new Value(val, format); + if (format == Format.TABLE_V5) { + int regionId = Region.NULL_REGION_ID; + String metadata = null; + BitSet options = BitSet.valueOf(new byte[] {bytes[1]}); + int alreadyRead = 2; + + if (options.get(TABLEV5_REGIONID_BIT)) { // contains multi-region + regionId = PackedInteger.readInt(bytes, alreadyRead); + alreadyRead += PackedInteger.getWriteIntLength(regionId); + } + + if (options.get(TABLEV5_ROWMETADATA_BIT)) { // contains metadata + int metadataLen = PackedInteger.readInt(bytes, alreadyRead); + alreadyRead += PackedInteger.getWriteIntLength(metadataLen); + metadata = new String(bytes, alreadyRead, metadataLen, StandardCharsets.UTF_8); + alreadyRead += metadataLen; + } + final byte[] val = new byte[bytes.length - alreadyRead]; + System.arraycopy(bytes, alreadyRead, val, 0, val.length); + return new Value(val, format, regionId, metadata); } - final byte[] val = bytes; - return new Value(val, format); + + throw new IllegalStateException("Unknown format: " + format); } /** @@ -479,28 +641,25 @@ public static Value createValue(byte[] val) { * For internal use only. * @hidden * - * Creates a value with a given format. + * Creates a value with a given format and region */ - public static Value internalCreateValue(byte[] val, Format format) { - /* - * Create a value with local region id for multi_region tables, or - * null region id for local tables. */ - if (format == Format.MULTI_REGION_TABLE) { - return new Value(val, format, Region.LOCAL_REGION_ID); - } - return new Value(val, format); + public static Value internalCreateValue(byte[] val, + Format format, + int regionId) { + return new Value(val, format, regionId, null); } /** * For internal use only. * @hidden * - * Creates a value with a given format and region + * Creates a value with a given format, region and rowMetadata */ public static Value internalCreateValue(byte[] val, - Format format, - int regionId) { - return new Value(val, format, regionId); + Format format, + int regionId, + String rowMetadata) { + return new Value(val, format, regionId, rowMetadata); } /** @@ -527,6 +686,14 @@ public int getRegionId() { return regionId; } + /** + * Returns the row metadata of this value. + * @hidden For internal use only + */ + public String getRowMetadata() { + return rowMetadata; + } + @Override public boolean equals(Object other) { if (!(other instanceof Value)) { @@ -553,6 +720,12 @@ public String toString() { if (format == Format.MULTI_REGION_TABLE) { sb.append(" region ID:"); sb.append(regionId); + } else if (format == Format.TABLE_V5) { + sb.append(" region ID:"); + sb.append(regionId); + sb.append(" metadata:'"); + sb.append(rowMetadata); + sb.append("'"); } sb.append(" bytes:"); for (int i = 0; i < 100 && i < val.length; i += 1) { @@ -567,23 +740,200 @@ public String toString() { } /** - * Create a tombstone value with Format.NONE + * Create a tombstone value which only contains the format, region id, + * row metadata and an empty byte array. * - * @hidden For internal cloud use only + * @hidden For internal use only */ - public static Value createTombstoneNoneValue() { - return internalCreateValue(new byte[0], Format.NONE); + public static Value createTombstoneValue(int regionId, String rowMetadata) { + if (regionId == Region.NULL_REGION_ID && rowMetadata == null) { + return internalCreateValue(new byte[0], Format.NONE, + Region.NULL_REGION_ID, null /* rowMetadata */); + } + if (rowMetadata == null) { + return internalCreateValue(new byte[0], Format.MULTI_REGION_TABLE, + regionId, null /* rowMetadata */); + } + return internalCreateValue(new byte[0], Format.TABLE_V5, regionId, + rowMetadata); } /** - * Create a tombstone value which only contains the format, region id - * and an empty byte array. - * - * @hidden For internal use only + * Returns true if tombstone, i.e. it can contain regionId or rowMetadata + * but payload (row data) is empty. + */ + public static boolean isTombstone(byte[] bytes) { + return getValueOffset(bytes) == bytes.length; + } + + /** + * Returns true if there is a regionId in the entire encoded row, otherwise false. */ - public static Value createTombstoneValue(int regionId) { - return internalCreateValue(new byte[0], - Format.MULTI_REGION_TABLE, - regionId); + public static boolean hasRegionId(byte[] bytes) { + if (bytes == null || bytes.length == 0) { + return false; + } + + final Value.Format format = Value.Format.fromFirstByte(bytes[0]); + + if (Value.Format.MULTI_REGION_TABLE.compareTo(format) > 0) { + return false; + } + + if (format == Value.Format.MULTI_REGION_TABLE) { + return true; + } + + if (format == Format.TABLE_V5) { + BitSet options = BitSet.valueOf(new byte[]{bytes[1]}); + + return options.get(TABLEV5_REGIONID_BIT); + } + + throw new IllegalStateException("Invalid format: " + format); + } + + /** + * Returns true if there is a regionId in the entire encoded row, otherwise false. + */ + public static boolean hasRowMetadata(byte[] bytes) { + if (bytes == null || bytes.length == 0) { + return false; + } + final Value.Format format = Value.Format.fromFirstByte(bytes[0]); + + if (Format.TABLE_V5.compareTo(format) > 0) { + return false; + } + + if (format == Format.TABLE_V5) { + BitSet options = BitSet.valueOf(new byte[]{bytes[1]}); + + return options.get(TABLEV5_ROWMETADATA_BIT); + } + + throw new IllegalStateException("Invalid format: " + format); + } + + /** + * Returns the regionId given the entire encoded row or + * {@link Region#NULL_REGION_ID} if not present. + */ + public static int getRegionIdFromByteArray(byte[] bytes) { + if (bytes == null || bytes.length == 0) { + return Region.NULL_REGION_ID; + } + + final Value.Format format = Value.Format.fromFirstByte(bytes[0]); + + // all formats before MULTI_REGION_FORMAT + if (Value.Format.MULTI_REGION_TABLE.compareTo(format) > 0) { + return Region.NULL_REGION_ID; + } + + if (format == Value.Format.MULTI_REGION_TABLE) { + final int regionId = PackedInteger.readInt(bytes, 1); + return regionId; + } + + if (format == Format.TABLE_V5) { + int regionId = Region.NULL_REGION_ID; + BitSet options = BitSet.valueOf(new byte[] {bytes[1]}); + int alreadyRead = 2; + + if (options.get(TABLEV5_REGIONID_BIT)) { // contains regionId + regionId = PackedInteger.readInt(bytes, alreadyRead); + } + return regionId; + } + + throw new IllegalStateException("Invalid format: " + format); + } + + /** + * Returns the offset index (starts with 0) of the row value given the + * entire encoded row. + */ + public static int getValueOffset(byte[] bytes) { + if (bytes == null || bytes.length == 0) { + throw new IllegalStateException("Invalid bytes value: " + Arrays.toString(bytes)); + } + + final Value.Format format = Value.Format.fromFirstByte(bytes[0]); + + // all formats before MULTI_REGION_FORMAT + if (Value.Format.MULTI_REGION_TABLE.compareTo(format) > 0) { + return 1; + } + + if (format == Value.Format.MULTI_REGION_TABLE) { + /* skip bytes of region id */ + final int regionIdLen = PackedInteger.getReadIntLength(bytes, 1); + return regionIdLen + 1; + } + + if (format == Format.TABLE_V5) { + BitSet options = BitSet.valueOf(new byte[] {bytes[1]}); + int offset = 2; + + if (options.get(TABLEV5_REGIONID_BIT)) { // contains regionId + int regionId = PackedInteger.readInt(bytes, offset); + offset += PackedInteger.getWriteIntLength(regionId); + } + + if (options.get(TABLEV5_ROWMETADATA_BIT)) { // contains metadata + int metadataLen = PackedInteger.readInt(bytes, offset); + offset += PackedInteger.getWriteIntLength(metadataLen); + offset += metadataLen; + } + return offset; + } + + throw new IllegalStateException("Invalid format: " + format); + } + + /** + * Sets regionId and rowMetadata if available and returns the offset of row data. + */ + public static int setRegionIdAndRowMetadata(byte[] bytes, RowImpl row) { + if (bytes == null || bytes.length == 0) { + throw new IllegalStateException("Invalid bytes value: " + Arrays.toString(bytes)); + } + + final Value.Format format = Value.Format.fromFirstByte(bytes[0]); + + // all formats before MULTI_REGION_FORMAT + if (Value.Format.MULTI_REGION_TABLE.compareTo(format) > 0) { + return 1; + } + + if (format == Value.Format.MULTI_REGION_TABLE) { + final int regionIdLen = PackedInteger.getReadIntLength(bytes, 1); + final int regionId = PackedInteger.readInt(bytes, 1); + row.setRegionId(regionId); + return regionIdLen + 1; + } + + if (format == Format.TABLE_V5) { + BitSet options = BitSet.valueOf(new byte[] {bytes[1]}); + int offset = 2; /* 1 format, 1 bitset */ + + if (options.get(TABLEV5_REGIONID_BIT)) { // contains regionId + int regionId = PackedInteger.readInt(bytes, offset); + row.setRegionId(regionId); + offset += PackedInteger.getWriteIntLength(regionId); + } + + if (options.get(TABLEV5_ROWMETADATA_BIT)) { // contains metadata + int metadataLen = PackedInteger.readInt(bytes, offset); + offset += PackedInteger.getWriteIntLength(metadataLen); + String metadata = new String(bytes, offset, metadataLen, StandardCharsets.UTF_8); + row.setRowMetadata(metadata); + offset += metadataLen; + } + return offset; + } + + throw new IllegalStateException("Invalid format: " + format); } } diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/CommandService.java b/kvmain/src/main/java/oracle/kv/impl/admin/CommandService.java index 047ca2e9..7afdf71e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/CommandService.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/CommandService.java @@ -990,7 +990,9 @@ public int createRemoveIndexPlan(String planName, /** * @since 20.1 + * @deprecated since 25.3 */ + @Deprecated public int createEvolveTablePlan(String planName, String namespace, String tableName, @@ -1002,6 +1004,22 @@ public int createEvolveTablePlan(String planName, short serialVersion) throws RemoteException; + /** + * @since 25.3 + * @param beforeImgTTL specify before image ttl, added in 25.3 + */ + public int createEvolveTablePlan(String planName, + String namespace, + String tableName, + int tableVersion, + FieldMap fieldMap, + TimeToLive ttl, + TimeToLive beforeImgTTL, + Set regions, + AuthContext authCtx, + short serialVersion) + throws RemoteException; + /** * Creates a new plan to perform network restore from source node to target * node. diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/CommandServiceAPI.java b/kvmain/src/main/java/oracle/kv/impl/admin/CommandServiceAPI.java index 22057aaf..000a329c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/CommandServiceAPI.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/CommandServiceAPI.java @@ -13,6 +13,7 @@ package oracle.kv.impl.admin; +import static oracle.kv.impl.util.SerialVersion.BEFORE_IMAGE_VERSION; import static oracle.kv.impl.util.SerialVersion.JSON_COLLECTION_VERSION; import static oracle.kv.impl.util.SerialVersion.SHUTDOWN_REASON_VERSION; import static oracle.kv.impl.util.SerialVersion.TABLE_MD_IN_STORE_VERSION; @@ -926,6 +927,7 @@ public int createEvolveTablePlan(String planName, int tableVersion, FieldMap fieldMap, TimeToLive ttl, + TimeToLive beforeImgTTL, Set regions) throws RemoteException { @@ -933,11 +935,25 @@ public int createEvolveTablePlan(String planName, final short serialVersion = getSerialVersion(); + if (serialVersion < BEFORE_IMAGE_VERSION) { + return proxyRemote.createEvolveTablePlan(planName, + namespace, + tableName, + tableVersion, fieldMap, + ttl, + /* ignore before img ttl */ + null, + regions, + NULL_CTX, + serialVersion); + } + return proxyRemote.createEvolveTablePlan(planName, namespace, tableName, tableVersion, fieldMap, ttl, + beforeImgTTL, regions, NULL_CTX, serialVersion); diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/CommandServiceImpl.java b/kvmain/src/main/java/oracle/kv/impl/admin/CommandServiceImpl.java index 64b9df41..72fd040c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/CommandServiceImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/CommandServiceImpl.java @@ -1436,6 +1436,7 @@ public Integer execute() { shardKey, fieldMap, ttl, + null, /*beforeImageTTL*/ limits, r2compat, schemaId, @@ -1605,6 +1606,24 @@ public int createEvolveTablePlan(final String planName, final Set regions, AuthContext authCtx, short serialVersion) { + return createEvolveTablePlan(planName, namespace, tableName, + tableVersion, fieldMap, ttl, + null /* ignore before image ttl */, + regions, authCtx, serialVersion); + } + + @Override + @SecureInternalMethod + public int createEvolveTablePlan(final String planName, + final String namespace, + final String tableName, + final int tableVersion, + final FieldMap fieldMap, + final TimeToLive ttl, + final TimeToLive beforeImgTTL, + final Set regions, + AuthContext authCtx, + short serialVersion) { return aservice.getFaultHandler().execute (new ProcessFaultHandler.SimpleOperation() { @@ -1627,6 +1646,7 @@ public Integer execute() { tableVersion, fieldMap, ttl, + beforeImgTTL, table.getIdentityColumnInfo(), regions); } diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/SysTableMonitor.java b/kvmain/src/main/java/oracle/kv/impl/admin/SysTableMonitor.java index 478c291a..c920f8fd 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/SysTableMonitor.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/SysTableMonitor.java @@ -233,6 +233,7 @@ private boolean checkTables() { table.getTableVersion(), newTable.getFieldMap(), newTable.getDefaultTTL(), + newTable.getBeforeImageTTL(), newTable.getDescription(), true /* systemTable */, null /*identityColumnInfo*/, diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/TableDdlOperation.java b/kvmain/src/main/java/oracle/kv/impl/admin/TableDdlOperation.java index 9758a481..b704e64d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/TableDdlOperation.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/TableDdlOperation.java @@ -187,7 +187,9 @@ public void perform(DdlHandler ddlHandler) { ddlHandler.getAdmin().getPlanner().createEvolveTablePlan( "AlterTable", table.getInternalNamespace(), table.getFullName(), tableVersion, table.getFieldMap(), - table.getDefaultTTL(), null, false, + table.getDefaultTTL(), + table.getBeforeImageTTL(), + null, false, table.getIdentityColumnInfo(), table.getIdentitySequenceDef(), table.isChild() ? null : table.getRemoteRegions()); diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/client/CommandShell.java b/kvmain/src/main/java/oracle/kv/impl/admin/client/CommandShell.java index 09489944..dedfef1b 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/client/CommandShell.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/client/CommandShell.java @@ -1624,7 +1624,13 @@ protected String run(String name, String[] args, String line) final ShellCommandResult result = command.executeJsonOutput(cmdArgs, this); - exitCode = command.getExitCode(); + /* + * Do not overwrite exitCode in case of exit command, so that the + * exitCode of previous command is retained. + */ + if (!(command instanceof ExitCommand)) { + exitCode = command.getExitCode(); + } return CommandJsonUtils.handleConversionFailure( (CommandJsonUtils.JsonConversionTask)() -> { diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/client/PlanCommand.java b/kvmain/src/main/java/oracle/kv/impl/admin/client/PlanCommand.java index b1d0abd2..a63ab74f 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/client/PlanCommand.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/client/PlanCommand.java @@ -3405,7 +3405,8 @@ public String exec(String[] args, Shell shell) te.getTable().getFullName(), te.getTableVersion(), te.getFieldMap(), - te.getDefaultTTL(), + te.getNewTableTTL(), + te.getNewBeforeImageTTL(), te.getRegions()); shell.removeVariable(tableName); return executePlan(planId, cs, shell); diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/plan/Planner.java b/kvmain/src/main/java/oracle/kv/impl/admin/plan/Planner.java index b2de927d..6f8c29ca 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/plan/Planner.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/plan/Planner.java @@ -537,6 +537,7 @@ public int createEvolveTablePlan(String planName, int tableVersion, FieldMap fieldMap, TimeToLive ttl, + TimeToLive beforeImgTTL, IdentityColumnInfo identityColumnInfo, Set regions) { return createEvolveTablePlan(planName, @@ -545,6 +546,7 @@ public int createEvolveTablePlan(String planName, tableVersion, fieldMap, ttl, + beforeImgTTL, null, /* description (no change) */ false /* systemTable */, identityColumnInfo /*identityColumnInfo*/, @@ -562,6 +564,7 @@ public synchronized int createEvolveTablePlan( int tableVersion, FieldMap fieldMap, TimeToLive ttl, + TimeToLive beforeImgTTL, String description, boolean systemTable, IdentityColumnInfo newIdentityColumnInfo, @@ -574,6 +577,7 @@ public synchronized int createEvolveTablePlan( tableName, tableVersion, fieldMap, ttl, + beforeImgTTL, description, systemTable, newIdentityColumnInfo, diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/plan/TablePlanGenerator.java b/kvmain/src/main/java/oracle/kv/impl/admin/plan/TablePlanGenerator.java index 57f3ac27..8ee0604f 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/plan/TablePlanGenerator.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/plan/TablePlanGenerator.java @@ -220,6 +220,7 @@ private static void checkStoreVersion(Admin admin, int tableVersion, FieldMap newFieldMap, TimeToLive ttl, + TimeToLive beforeImgTTL, String description, boolean systemTable, IdentityColumnInfo newIdentityColumnInfo, @@ -251,6 +252,7 @@ private static void checkStoreVersion(Admin admin, tableVersion, newFieldMap, ttl, + beforeImgTTL, description, systemTable, newIdentityColumnInfo, diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/AddTable.java b/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/AddTable.java index 5dd0299a..0b08ad84 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/AddTable.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/AddTable.java @@ -91,6 +91,9 @@ public class AddTable extends UpdateMetadata { private final Map jsonCollectionMRCounters; + private final int beforeImageTTL; + private final TimeUnit beforeImageTTLUnit; + /** */ public AddTable(MetadataPlan plan, @@ -119,6 +122,14 @@ public AddTable(MetadataPlan plan, this.ttl = 0; this.ttlUnit = null; } + if (table.getBeforeImageTTL() != null) { + this.beforeImageTTL = (int) table.getBeforeImageTTL().getValue(); + this.beforeImageTTLUnit = table.getBeforeImageTTL().getUnit(); + } else { + this.beforeImageTTL = 0; + this.beforeImageTTLUnit = null; + } + limits = (parentName == null) ? table.getTableLimits() : null; this.fieldMap = table.getFieldMap(); this.r2compat = table.isR2compatible(); @@ -216,6 +227,11 @@ protected TableMetadata updateMetadata(TableMetadata md, Transaction txn) { /* If the table does not exist, add it */ final TableImpl existing = md.getTable(namespace, tableName, parentName); + + TimeToLive bittl = (beforeImageTTLUnit == null ? + null : + TimeToLive.createTimeToLive(beforeImageTTL, + beforeImageTTLUnit)); if (existing == null) { TableImpl table = md.addTable(namespace, tableName, @@ -227,6 +243,7 @@ protected TableMetadata updateMetadata(TableMetadata md, Transaction txn) { (ttlUnit == null) ? null : TimeToLive.createTimeToLive(ttl, ttlUnit), + bittl, limits, r2compat, schemaId, diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/EvolveTable.java b/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/EvolveTable.java index d1a68fc9..2fc1824e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/EvolveTable.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/EvolveTable.java @@ -63,6 +63,7 @@ public class EvolveTable extends UpdateMetadata { private final int tableVersion; private final FieldMap fieldMap; private final TimeToLive ttl; + private final TimeToLive beforeImgTTL; private final String description; private final boolean systemTable; private final IdentityColumnInfo identityColumnInfo; @@ -87,6 +88,7 @@ public EvolveTable(MetadataPlan plan, int tableVersion, FieldMap fieldMap, TimeToLive ttl, + TimeToLive beforeImgTTL, String description, boolean systemTable, IdentityColumnInfo identityColumnInfo, @@ -102,6 +104,7 @@ public EvolveTable(MetadataPlan plan, this.fieldMap = fieldMap; this.tableVersion = tableVersion; this.ttl = ttl; + this.beforeImgTTL = beforeImgTTL; this.description = description; this.systemTable = systemTable; this.identityColumnInfo = identityColumnInfo; @@ -161,8 +164,9 @@ protected TableMetadata updateMetadata(TableMetadata md, Transaction txn) { final KVStoreImpl store = (KVStoreImpl)admin.getInternalKVStore(); /* From this point the table is evolved */ - if (md.evolveTable(table, tableVersion, fieldMap, ttl, description, - systemTable, identityColumnInfo, regions)) { + if (md.evolveTable(table, tableVersion, fieldMap, ttl, beforeImgTTL, + description, systemTable, identityColumnInfo, + regions)) { if (!oldTableHasIdentity && table.hasIdentityColumn() && sequenceDefChange != null) { addIdentityColumn(table, store); diff --git a/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/RepairShardQuorum.java b/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/RepairShardQuorum.java index a59e692c..1c1f20e0 100644 --- a/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/RepairShardQuorum.java +++ b/kvmain/src/main/java/oracle/kv/impl/admin/plan/task/RepairShardQuorum.java @@ -470,6 +470,13 @@ private static boolean repairQuorum(AbstractPlan plan, * rollback limit error. */ if (rollbackSecondaries.size() > 0) { + /* + * Suppress deprecation warning for TXN_ROLLBACK_LIMIT, which has + * been deprecated since JE 25.3.7. Retain the logic to avoid + * upgrade issue, setting this parameter would be a no-op in JE + * 25.3.7 and later releases. + */ + @SuppressWarnings("deprecation") final String jeParams = ReplicationConfig.TXN_ROLLBACK_LIMIT + "=" + Integer.MAX_VALUE; @@ -740,7 +747,12 @@ private static void errorFindingVLSN(RepNodeId rnId, /** * Detect if given RN will encounter rollback limit error. + * + * Suppress deprecation warning for TXN_ROLLBACK_LIMIT, which has been + * deprecated since JE 25.3.7. Retain the logic to avoid upgrade issue, + * setting this parameter would be a no-op in JE 25.3.7 and later releases. */ + @SuppressWarnings("deprecation") private static boolean detectRollbackLimitError(RNInfo rnInfo, long rollbackNum) { final RepNodeParams rnParams = rnInfo.params; diff --git a/kvmain/src/main/java/oracle/kv/impl/api/AsyncRequestDispatcherImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/AsyncRequestDispatcherImpl.java index 006260ef..d34315b5 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/AsyncRequestDispatcherImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/AsyncRequestDispatcherImpl.java @@ -904,7 +904,7 @@ public CompletableFuture executeNOPAsync(RepNodeState rns, response); return response; }, - endpointGroup.getBackupSchedExecService()); + endpointGroup.getBackupExecService()); }), (response, exception) -> { /* diff --git a/kvmain/src/main/java/oracle/kv/impl/api/KVStoreImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/KVStoreImpl.java index fb74bee2..64c8ba81 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/KVStoreImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/KVStoreImpl.java @@ -40,6 +40,7 @@ import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; +import java.util.function.Supplier; import java.util.logging.Logger; import javax.net.ssl.SSLHandshakeException; @@ -70,7 +71,6 @@ import oracle.kv.OperationResult; import oracle.kv.ParallelScanIterator; import oracle.kv.ReauthenticateHandler; -import oracle.kv.RequestTimeoutException; import oracle.kv.ReturnValueVersion; import oracle.kv.StatementResult; import oracle.kv.StoreIteratorConfig; @@ -800,6 +800,7 @@ KeyValueVersion[] getMoreElements() { stringKeyResults[i] = new KeyValueVersion (keySerializer.fromByteArray(entry.getKeyBytes()), entry.getValue(), entry.getVersion(), + entry.getCreationTime(), entry.getModificationTime()); } return stringKeyResults; @@ -1071,11 +1072,12 @@ KeyValueVersion[] getMoreElements() { for (int i = 0; i < stringKeyResults.length; i += 1) { final ResultKeyValueVersion entry = byteKeyResults.get(i); - stringKeyResults[i] = createKeyValueVersion - (keySerializer.fromByteArray(entry.getKeyBytes()), - entry.getValue(), entry.getVersion(), - entry.getExpirationTime(), - entry.getModificationTime()); + stringKeyResults[i] = createKeyValueVersion( + keySerializer.fromByteArray(entry.getKeyBytes()), + entry.getValue(), entry.getVersion(), + entry.getExpirationTime(), + entry.getCreationTime(), + entry.getModificationTime()); } return stringKeyResults; } @@ -1981,7 +1983,8 @@ public Request makePutResolveRequest( long expTime, boolean updateTTL, boolean isTombstone, - long timestamp, + long creationTime, + long lastModification, int regionId) { final byte[] keyBytes = keySerializer.toByteArray(key); final PartitionId partitionId = dispatcher.getPartitionId(keyBytes); @@ -1989,7 +1992,9 @@ public Request makePutResolveRequest( returnChoice, expTime, updateTTL, - isTombstone, timestamp, + isTombstone, + creationTime, + lastModification, regionId); return makeWriteRequest(putResolve, partitionId, durability, timeout, @@ -2041,7 +2046,8 @@ public Result deleteInternalResult(Key key, final Request req = makeDeleteRequest(key, getReturnChoice(prevValue), durability, timeout, timeoutUnit, - tableId, false /* doTombstone */); + tableId, false /* doTombstone */, + null /* rowMetadata */); return executeRequestWithPrev(req, prevValue); } @@ -2051,11 +2057,12 @@ public Request makeDeleteRequest(Key key, long timeout, TimeUnit timeoutUnit, long tableId, - boolean doTombstone) { + boolean doTombstone, + String rowMetadata) { final byte[] keyBytes = keySerializer.toByteArray(key); final PartitionId partitionId = dispatcher.getPartitionId(keyBytes); final Delete del = new Delete(keyBytes, returnChoice, tableId, - doTombstone); + doTombstone, rowMetadata); return makeWriteRequest(del, partitionId, durability, timeout, timeoutUnit); } @@ -2110,7 +2117,8 @@ public Result deleteIfVersionInternalResult(Key key, final Request req = makeDeleteIfVersionRequest( key, matchVersion, getReturnChoice(prevValue), durability, - timeout, timeoutUnit, tableId, false /* doTombstone */); + timeout, timeoutUnit, tableId, false /* doTombstone */, + null /* rowMetadata */); return executeRequestWithPrev(req, prevValue); } @@ -2122,13 +2130,14 @@ public Request makeDeleteIfVersionRequest( long timeout, TimeUnit timeoutUnit, long tableId, - boolean doTombstone) + boolean doTombstone, + String rowMetadata) { final byte[] keyBytes = keySerializer.toByteArray(key); final PartitionId partitionId = dispatcher.getPartitionId(keyBytes); final Delete del = new DeleteIfVersion(keyBytes, returnChoice, matchVersion, tableId, - doTombstone); + doTombstone, rowMetadata); return makeWriteRequest(del, partitionId, durability, timeout, timeoutUnit); } @@ -2719,10 +2728,9 @@ private Result getExecuteResult(Request request, Response response) { private Response executeRequestInternal(Request request) throws FaultException { - final LoginManager requestLoginMgr = this.loginMgr; final boolean isUserSuppliedAuth = request.getAuthContext() != null; - try { - return dispatcher.execute( + return withReauthenticate( + () -> dispatcher.execute( request, /* * If AuthContext is already supplied, which means externally @@ -2731,22 +2739,8 @@ private Response executeRequestInternal(Request request) * For other cases, use login manager to handle auth retry * within request dispatcher. */ - isUserSuppliedAuth ? null : loginMgr); - } catch (AuthenticationRequiredException are) { - /* - * Try to reauthenticate, but if the AuthContext was provided by - * the caller it needs to handle the exception and reauthenticate. - */ - if (!tryReauthenticate(requestLoginMgr, isUserSuppliedAuth)) { - throw are; - } - - /* - * If the authentication completed, we assume we are ready to - * retry the operation. No retry on the authentication here. - */ - return dispatcher.execute(request, loginMgr); - } + isUserSuppliedAuth ? null : loginMgr), + isUserSuppliedAuth); } /** @@ -2873,8 +2867,7 @@ public KVStats getMonitorStats() { @Override public void login(LoginCredentials creds) - throws RequestTimeoutException, AuthenticationFailureException, - FaultException { + throws AuthenticationFailureException, FaultException { if (creds == null) { throw new IllegalArgumentException("No credentials provided"); @@ -2956,7 +2949,7 @@ public void login(LoginCredentials creds) @Override public void logout() - throws RequestTimeoutException, FaultException { + throws FaultException { synchronized(loginLock) { if (loginMgr == null) { @@ -3322,35 +3315,12 @@ public Publisher executeAsync(Statement statement, */ private ExecutionFuture executeDdl(PreparedDdlStatementImpl statement) throws IllegalArgumentException, FaultException { - final LoginManager requestLoginMgr = this.loginMgr; - final ExecuteOptions options = statement.getExecuteOptions(); - - try { - return statementExecutor.executeDdl(statement.getQuery(), - statement.getNamespace(), - options, - null, /* TableLimits */ - getLoginManager(this)); - } catch (AuthenticationRequiredException are) { - /* - * Try to reauthenticate, but if the AuthContext was provided by - * the caller it needs to handle the exception and reauthenticate. - */ - if (!tryReauthenticate(requestLoginMgr, - options.getAuthContext() != null)) { - throw are; - } - /* - * If the authentication completed, we assume we are ready to - * retry the operation. No retry on the authentication here. - */ - return statementExecutor.executeDdl(statement.getQuery(), - statement.getNamespace(), - options, - null, /* TableLimits */ - getLoginManager(this)); - } + return withReauthenticate( + () -> statementExecutor.executeDdl( + statement.getQuery(), statement.getNamespace(), + statement.getExecuteOptions(), null /* TableLimits */), + statement.getExecuteOptions().getAuthContext() != null); } /** @@ -3384,27 +3354,11 @@ public ExecutionFuture setTableLimits(String namespace, TableLimits limits) throws IllegalArgumentException, FaultException { - final LoginManager requestLoginMgr = this.loginMgr; - try { - return statementExecutor.setTableLimits(namespace, - tableName, - limits, - getLoginManager(this)); - } catch (AuthenticationRequiredException are) { - if (!tryReauthenticate(requestLoginMgr, - false /* isUserSuppliedAuth */)) { - throw are; - } - - /* - * If the authentication completed, we assume we are ready to - * retry the operation. No retry on the authentication here. - */ - return statementExecutor.setTableLimits(namespace, - tableName, - limits, - getLoginManager(this)); - } + return withReauthenticate( + () -> statementExecutor.setTableLimits( + namespace, tableName, limits), + false /* isUserSuppliedAuth */ + ); } @Override @@ -3507,31 +3461,16 @@ public StatementResult executeSync(Statement statement, if (options == null) { options = new ExecuteOptions(); } + final ExecuteOptions executeOptions = options; if (statement instanceof PreparedDdlStatementImpl || !getDispatcher().isAsync() || !options.isAsync()) { - final LoginManager requestLoginMgr = this.loginMgr; - try { - return ((InternalStatement)statement).executeSync(this, - options); - } catch (AuthenticationRequiredException are) { - /* - * Try to reauthenticate, but if the AuthContext was provided - * by the caller it needs to handle the exception and - * reauthenticate. - */ - if (!tryReauthenticate(requestLoginMgr, - options.getAuthContext() != null)) { - throw are; - } - /* - * If the authentication completed, we assume we are ready to - * retry the operation. No retry on the authentication here. - */ - return ((InternalStatement)statement).executeSync(this, - options); - } + + return withReauthenticate( + () -> ((InternalStatement)statement).executeSync( + this, executeOptions), + options.getAuthContext() != null); } /* @@ -3622,28 +3561,11 @@ public StatementResult executeSyncShards(Statement statement, if (options == null) { options = new ExecuteOptions(); } - - final LoginManager requestLoginMgr = this.loginMgr; - try { - return ((InternalStatement)statement). - executeSyncShards(this, options, shards); - } catch (AuthenticationRequiredException are) { - /* - * Try to reauthenticate, but if the AuthContext was provided by - * the caller it needs to handle the exception and reauthenticate. - */ - if (!tryReauthenticate(requestLoginMgr, - options.getAuthContext() != null)) { - throw are; - } - - /* - * If the authentication completed, we assume we are ready to - * retry the operation. No retry on the authentication here. - */ - return ((InternalStatement)statement). - executeSyncShards(this, options, shards); - } + final ExecuteOptions executeOptions = options; + return withReauthenticate( + () -> ((InternalStatement)statement).executeSyncShards( + this, executeOptions, shards), + executeOptions.getAuthContext() != null); } /** @@ -3722,28 +3644,43 @@ public ExecutionFuture execute(char[] statement, throws FaultException, IllegalArgumentException { checkClosed(); - if (options == null) { options = new ExecuteOptions(); } - + final ExecuteOptions exeOptions = options; PreparedStatement ps = prepare(statement, options); - if (ps instanceof PreparedDdlStatementImpl) { - String namespace = null; - if (options != null) { - namespace = options.getNamespace(); - } - return statementExecutor.executeDdl(statement, - namespace, - options, - limits, - getLoginManager(this)); + return withReauthenticate( + () -> statementExecutor.executeDdl( + statement, exeOptions.getNamespace(), exeOptions, limits), + options.getAuthContext() != null); } throw new IllegalArgumentException( "Execute with TableLimits is restricted to DDL operations"); } + private T withReauthenticate(Supplier operation, + boolean isUserSuppliedAuth) { + final LoginManager requestLoginMgr = this.loginMgr; + try { + return operation.get(); + } catch (AuthenticationRequiredException are) { + /* + * Try to reauthenticate, but if the AuthContext was provided by + * the caller it needs to handle the exception and reauthenticate. + */ + if (!tryReauthenticate(requestLoginMgr, isUserSuppliedAuth)) { + throw are; + } + + /* + * If the authentication completed, we assume we are ready to + * retry the operation. No retry on the authentication here. + */ + return operation.get(); + } + } + /** * Utility method to create a KeyValueVersion. If expiration time is * non-zero it creates KeyValueVersionInternal to hold it. This allows @@ -3755,15 +3692,17 @@ public static KeyValueVersion createKeyValueVersion( final Value value, final Version version, final long expirationTime, + final long creationTime, final long modificationTime) { if (expirationTime == 0) { return new KeyValueVersion(key, value, version, - modificationTime); + creationTime, modificationTime); } return new KeyValueVersionInternal(key, value, version, expirationTime, + creationTime, modificationTime); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/KeyValueVersionInternal.java b/kvmain/src/main/java/oracle/kv/impl/api/KeyValueVersionInternal.java index 003d7837..d178f361 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/KeyValueVersionInternal.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/KeyValueVersionInternal.java @@ -34,8 +34,9 @@ public KeyValueVersionInternal(final Key key, final Value value, final Version version, final long expirationTime, + final long creationTime, final long modificationTime) { - super(key, value, version, modificationTime); + super(key, value, version, creationTime, modificationTime); this.expirationTime = expirationTime; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/RequestHandlerImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/RequestHandlerImpl.java index d5980c57..10fbefac 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/RequestHandlerImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/RequestHandlerImpl.java @@ -405,7 +405,7 @@ public class RequestHandlerImpl extends VersionedRemoteImpl * Test hook to be invoked immediately before initiating a request * transaction commit. */ - private TestHook preCommitTestHook; + private TestHook preCommitTestHook; /** * Test hook to be invoked before the response is returned. @@ -610,7 +610,7 @@ public void setTestNOPHook(TestHook hook) { requestNOPExecute = hook; } - public void setPreCommitTestHook(TestHook hook) { + public void setPreCommitTestHook(TestHook hook) { preCommitTestHook = hook; } @@ -895,7 +895,7 @@ private CompletableFuture executeInternal(Request request) { * Holds the context needed to execute a request, particularly for when the * request is completed by a JE async acknowledgements handler. */ - private class ExecuteRequest { + public class ExecuteRequest { private final Request request; private final InternalOperation internalOp; @@ -1126,9 +1126,7 @@ CompletableFuture executeOnce() { if (txn.isValid()) { streamHandle.prepare(); /* If testing SR21210, throw InsufficientAcksException. */ - assert TestHookExecute.doHookIfSet - (preCommitTestHook, - RepInternal.getRepImpl(repEnv)); + assert TestHookExecute.doHookIfSet(preCommitTestHook, this); /* * Grab the internal txn before the commit since the commit * clears that field but we need to check for async acks @@ -1428,6 +1426,13 @@ public void onException(MasterTxn txnIgnore, Exception ex) { } } } + + /** + * Unit test only + */ + public InternalOperation getInternalOp() { + return internalOp; + } } /** diff --git a/kvmain/src/main/java/oracle/kv/impl/api/bulk/BulkMultiGet.java b/kvmain/src/main/java/oracle/kv/impl/api/bulk/BulkMultiGet.java index 7a85d1be..b772783c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/bulk/BulkMultiGet.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/bulk/BulkMultiGet.java @@ -154,6 +154,7 @@ protected void convertResult(Result result, entry.getValue(), entry.getVersion(), entry.getExpirationTime(), + entry.getCreationTime(), entry.getModificationTime()); elementList.add(value); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/bulk/BulkPut.java b/kvmain/src/main/java/oracle/kv/impl/api/bulk/BulkPut.java index 70434206..6581ea0a 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/bulk/BulkPut.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/bulk/BulkPut.java @@ -14,6 +14,7 @@ package oracle.kv.impl.api.bulk; import static oracle.kv.impl.util.SerialVersion.BULK_PUT_RESOLVE; +import static oracle.kv.impl.util.SerialVersion.CREATION_TIME_VER; import static oracle.kv.impl.util.SerializationUtil.readNonNullByteArray; import static oracle.kv.impl.util.SerializationUtil.readPackedLong; import static oracle.kv.impl.util.SerializationUtil.writeNonNullByteArray; @@ -911,6 +912,7 @@ public Long call() throws Exception { final Key pk = getKey(e); final Value value = getValue(e); final long tableId = getTableId(e); + final long creationTime = getCreationTime(e); final long modTime = getModificationTime(e); final boolean isTombstone = getIsTombstone(e); final byte[] keyBytes = serializer.toByteArray(pk); @@ -921,6 +923,7 @@ public Long call() throws Exception { streamId, tableId, ttl, + creationTime, modTime, isTombstone); } @@ -988,6 +991,17 @@ protected long getTableId(@SuppressWarnings("unused") E entry) { return 0; } + /** + * Returns the creation time of the entry if available (e.g. from + * a restore/migration), BulkPut for table should override this method. + * The value for creation time will only be used if the value of + * {@link BulkWriteOptions#setUsePutResolve} is true + */ + protected long getCreationTime( + @SuppressWarnings("unused") E entry) { + return 0; + } + /** * Returns the modification time of the entry if available (e.g. from * a restore/migration), BulkPut for table should override this method. @@ -1065,13 +1079,12 @@ public void setShardPutTask(ShardPutTask rgPutThread) { @SuppressWarnings("unchecked") synchronized void put(byte[] key, byte[] value, int streamId, long tableId, - TimeToLive ttl, long modTime, + TimeToLive ttl, long creationTime, long modTime, boolean isTombstone) throws InterruptedException { - final WrappedValue wv = new WrappedValue(value, streamId, - tableId, ttl, modTime, - isTombstone); + final WrappedValue wv = new WrappedValue(value, streamId, tableId, + ttl, creationTime, modTime, isTombstone); final Object old = kvPairs.put(key, wv); if (old != null) { List list; @@ -1166,7 +1179,7 @@ private void flush(boolean force) } /** - * Adds a entry to KVPair list, return the size of the entry. + * Adds an entry to KVPair list, return the size of the entry. */ private int addEntry(byte[] key, WrappedValue wv, List kvpairs, Set tableIds, @@ -1179,6 +1192,7 @@ private int addEntry(byte[] key, WrappedValue wv, value, wv.getTTLVal(), wv.getTTLUnitOrdinal(), + wv.getCreationTime(), wv.getModificationTime(), wv.isTombstone(), streamId)); @@ -1214,15 +1228,18 @@ private static class WrappedValue { private final long tableId; private final int ttlVal; private final byte ttlUnitOrdinal; + private final long creationTime; private final long modificationTime; private final boolean isTombstone; WrappedValue(byte[] value, int streamId, - long tableId, TimeToLive ttl, long modificationTime, - boolean isTombstone) { + long tableId, TimeToLive ttl, long creationTime, + long modificationTime, boolean isTombstone) { + this.value = value; this.streamId = streamId; this.tableId = tableId; + this.creationTime = creationTime; this.modificationTime = modificationTime; this.isTombstone = isTombstone; if (ttl != null) { @@ -1242,6 +1259,10 @@ long getTableId() { return tableId; } + long getCreationTime() { + return creationTime; + } + long getModificationTime() { return modificationTime; } @@ -1284,17 +1305,20 @@ public static class KVPair implements FastExternalizable { final int ttlVal; final TimeUnit ttlUnit; final int streamId; + final long creationTime; final long modificationTime; final boolean isTombstone; public KVPair(byte[] key, byte[] value, int ttlVal, byte ttlUnitOrdinal, - long modificationTime, boolean isTombstone, - int streamId) { + long creationTime, long modificationTime, boolean isTombstone, + int streamId) { + this.key = key; this.value = value; this.ttlVal = ttlVal; ttlUnit = TimeToLive.convertTimeToLiveUnit(ttlVal, ttlUnitOrdinal); this.streamId = streamId; + this.creationTime = creationTime; this.modificationTime = modificationTime; this.isTombstone = isTombstone; } @@ -1316,6 +1340,11 @@ public KVPair(DataInput in, modificationTime = 0L; isTombstone = false; } + if (serialVersion >= CREATION_TIME_VER) { + creationTime = readPackedLong(in); + } else { + creationTime = 0L; + } } /** @@ -1350,6 +1379,9 @@ public void writeFastExternal(DataOutput out, short serialVersion) "Cannot use tombstones in BulkPut in serial version: " + serialVersion); } + if (serialVersion >= CREATION_TIME_VER) { + writePackedLong(out, creationTime); + } } @Override @@ -1365,6 +1397,7 @@ public boolean equals(Object obj) { Arrays.equals(value, other.value) && (ttlVal == other.ttlVal) && (ttlUnit == other.ttlUnit) && + (creationTime == other.creationTime) && (modificationTime == other.modificationTime) && (isTombstone == other.isTombstone) && (streamId == other.streamId); @@ -1373,7 +1406,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { return Objects.hash(key, value, ttlVal, ttlUnit, streamId, - modificationTime, isTombstone); + creationTime, modificationTime, isTombstone); } public byte[] getKey() { @@ -1392,6 +1425,10 @@ public int getTTLVal() { return ttlVal; } + public long getCreationTime() { + return creationTime; + } + public long getModificationTime() { return modificationTime; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/BasicDeleteHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/BasicDeleteHandler.java index 0d775cf6..eda2fb2c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/BasicDeleteHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/BasicDeleteHandler.java @@ -15,6 +15,7 @@ import java.util.Collections; import java.util.List; + import oracle.kv.impl.api.ops.InternalOperation.OpCode; import oracle.kv.impl.security.KVStorePrivilege; import oracle.kv.impl.security.NamespacePrivilege; diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/BasicPutHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/BasicPutHandler.java index 6cbbf526..c1a89fb9 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/BasicPutHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/BasicPutHandler.java @@ -13,9 +13,12 @@ package oracle.kv.impl.api.ops; -import java.util.Arrays; +import static oracle.kv.impl.security.KVStorePrivilegeLabel.DELETE_TABLE; + import java.util.Collections; +import java.util.EnumSet; import java.util.List; + import oracle.kv.UnauthorizedException; import oracle.kv.impl.api.ops.InternalOperation.OpCode; import oracle.kv.impl.api.table.TableImpl; @@ -24,6 +27,7 @@ import oracle.kv.impl.security.NamespacePrivilege; import oracle.kv.impl.security.SystemPrivilege; import oracle.kv.impl.security.TablePrivilege; + import com.sleepycat.je.Cursor; import com.sleepycat.je.DatabaseEntry; import com.sleepycat.je.OperationResult; @@ -35,13 +39,6 @@ abstract class BasicPutHandler extends SingleKeyOperationHandler { - /** - * Whether this put operation has valid TTL setting that would introduce - * implicit deletion of this record. If true, performing this operation - * would require user having DELETE_TABLE privilege. - */ - boolean hasValidTTLSetting = false; - BasicPutHandler(OperationHandler handler, OpCode opCode, Class operationType) { @@ -54,16 +51,15 @@ void verifyDataAccess(T op) /* * Check if the operation has a valid TTL. If so, this operation is - * an implicit delete and requires DELETE_TABLE privilege. See - * tableAccessPrivileges(), below. + * an implicit delete and requires DELETE_TABLE privilege. * - * The check for null handles older clients that may not include a - * TTL. + * The check for null handles older clients that may not include a TTL. */ - if (((Put)op).getTTL() != null) { - hasValidTTLSetting = (((Put)op).getTTL().getValue() != 0); + if (op.getTTL() != null && op.getTTL().getValue() != 0) { + super.verifyDataAccess(op, EnumSet.of(DELETE_TABLE)); + } else { + super.verifyDataAccess(op); } - super.verifyDataAccess(op); } @Override @@ -79,11 +75,6 @@ List generalAccessPrivileges() { @Override public List tableAccessPrivileges(long tableId) { - - if (hasValidTTLSetting) { - return Arrays.asList(new TablePrivilege.InsertTable(tableId), - new TablePrivilege.DeleteTable(tableId)); - } return Collections.singletonList( new TablePrivilege.InsertTable(tableId)); } @@ -91,12 +82,6 @@ List generalAccessPrivileges() { @Override public List namespaceAccessPrivileges(String namespace) { - - if (hasValidTTLSetting) { - return Arrays.asList( - new NamespacePrivilege.InsertInNamespace(namespace), - new NamespacePrivilege.DeleteInNamespace(namespace)); - } return Collections.singletonList( new NamespacePrivilege.InsertInNamespace(namespace)); } @@ -113,7 +98,6 @@ static OperationResult putEntry(Cursor cursor, } } - /* * Returns true if the server needs to copy MR counters. This is only * used by classes in this hierarchy. In this case MR counters may diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/Delete.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/Delete.java index 70fbda19..e688657c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/Delete.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/Delete.java @@ -14,6 +14,7 @@ package oracle.kv.impl.api.ops; import static oracle.kv.impl.util.SerialVersion.CLOUD_MR_TABLE; +import static oracle.kv.impl.util.SerialVersion.ROW_METADATA_VERSION; import java.io.DataInput; import java.io.DataOutput; @@ -22,6 +23,7 @@ import oracle.kv.ReturnValueVersion.Choice; import oracle.kv.impl.api.lob.KVLargeObjectImpl; +import oracle.kv.impl.util.SerializationUtil; /** * The delete operation deletes the key/value pair associated with the key. @@ -45,11 +47,16 @@ public class Delete extends SingleKeyOperation { */ private final boolean doTombstone; + /** + * The row metadata for the op. + */ + private final String rowMetadata; + /** * Constructs a delete operation. */ public Delete(byte[] keyBytes, Choice prevValChoice) { - this(keyBytes, prevValChoice, 0, false); + this(keyBytes, prevValChoice, 0, false, null); } /** @@ -58,8 +65,9 @@ public Delete(byte[] keyBytes, Choice prevValChoice) { public Delete(byte[] keyBytes, Choice prevValChoice, long tableId, - boolean doTombstone) { - this(OpCode.DELETE, keyBytes, prevValChoice, tableId, doTombstone); + boolean doTombstone, + String rowMetadata) { + this(OpCode.DELETE, keyBytes, prevValChoice, tableId, doTombstone, rowMetadata); } /** @@ -69,11 +77,13 @@ public Delete(byte[] keyBytes, byte[] keyBytes, Choice prevValChoice, long tableId, - boolean doTombstone) { + boolean doTombstone, + String rowMetadata) { super(opCode, keyBytes); this.prevValChoice = prevValChoice; this.tableId = tableId; this.doTombstone = doTombstone; + this.rowMetadata = rowMetadata; } /** Constructor to implement deserializedForm */ @@ -86,12 +96,23 @@ public Delete(byte[] keyBytes, } else { if (other.doTombstone) { throw new IllegalStateException("Serial version " + - serialVersion + " does not support for external " + + serialVersion + " does not have support for external " + "multi-region table, must be " + CLOUD_MR_TABLE + " or greater"); } doTombstone = false; } + if (serialVersion >= ROW_METADATA_VERSION) { + rowMetadata = other.rowMetadata; + } else { + if (other.rowMetadata != null) { + throw new IllegalStateException("Serial version " + + serialVersion + " does not have support for row metadata, " + + "must be " + ROW_METADATA_VERSION + + " or greater"); + } + rowMetadata = null; + } } /** @@ -124,6 +145,11 @@ public Delete(byte[] keyBytes, doTombstone = false; } + if (serialVersion >= ROW_METADATA_VERSION) { + rowMetadata = SerializationUtil.readString(in, serialVersion); + } else { + rowMetadata = null; + } } /** @@ -156,6 +182,16 @@ public void writeFastExternal(DataOutput out, short serialVersion) "must be " + CLOUD_MR_TABLE + " or greater"); } } + + if (serialVersion >= ROW_METADATA_VERSION) { + SerializationUtil.writeString(out, serialVersion, rowMetadata); + } else { + if (rowMetadata != null) { + throw new IllegalStateException("Serial version " + + serialVersion + " does not support setting row metadata, " + + "must be " + ROW_METADATA_VERSION + " or greater"); + } + } } public Choice getReturnValueVersionChoice() { @@ -191,6 +227,10 @@ public boolean doTombstone() { return doTombstone; } + public String getRowMetadata() { + return rowMetadata; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); @@ -203,6 +243,11 @@ public String toString() { sb.append(tableId); sb.append(" "); } + if (rowMetadata != null) { + sb.append("RowMetadata '") + .append(rowMetadata) + .append("' "); + } sb.append(super.toString()); return sb.toString(); } @@ -220,13 +265,14 @@ public boolean equals(Object obj) { final Delete other = (Delete) obj; return (prevValChoice == other.prevValChoice) && (tableId == other.tableId) && - (doTombstone == other.doTombstone); + (doTombstone == other.doTombstone) && + (rowMetadata == other.rowMetadata); } @Override public int hashCode() { return Objects.hash(super.hashCode(), prevValChoice, tableId, - doTombstone); + doTombstone, rowMetadata); } @Override diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteHandler.java index 8073f532..cf19b234 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteHandler.java @@ -30,6 +30,7 @@ import com.sleepycat.je.Get; import com.sleepycat.je.OperationResult; import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; /** * Server handler for {@link Delete}. @@ -93,10 +94,11 @@ private Result insertTombstone(Delete op, Transaction txn, PartitionId partitionId, ReturnResultValueVersion prevVal, - boolean isMultiRegion) { - Value value = isMultiRegion ? - Value.createTombstoneValue(Region.LOCAL_REGION_ID) : - Value.createTombstoneNoneValue(); + boolean isMultiRegion, + String rowMetadata) { + int regionId = isMultiRegion ? Region.LOCAL_REGION_ID : + Region.NULL_REGION_ID; + Value value = Value.createTombstoneValue(regionId, rowMetadata); Put put = new Put(op.getKeyBytes(), value, op.getReturnValueVersionChoice(), @@ -120,6 +122,7 @@ private Result insertTombstone(Delete op, returnValueBytes, putResult.getPreviousVersion(), putResult.getPreviousExpirationTime(), + putResult.getPreviousCreationTime(), putResult.getPreviousModificationTime(), putResult.getPreviousStorageSize()); reserializeResultValue(op, prevVal.getValueVersion()); @@ -152,8 +155,8 @@ private Result delete(Delete op, try { final DatabaseEntry prevData = prevValue.getReturnChoice().needValue() ? - new DatabaseEntry() : - NO_DATA; + new DatabaseEntry() : + NO_DATA; final OperationResult result = cursor.get(keyEntry, prevData, @@ -166,13 +169,15 @@ private Result delete(Delete op, exist = false; } else { final TableImpl tbl = getAndCheckTable(op.getTableId()); - if (tbl != null && (tbl.isMultiRegion() || op.doTombstone())) { + if (tbl != null && (tbl.isMultiRegion() || op.doTombstone() || + op.getRowMetadata() != null)) { /* - * It's a multi-region table, so insert tombstone instead of - * deleting. + * It has regionId or rowMetadata, so insert tombstone + * instead of deleting. */ return insertTombstone(op, txn, partitionId, prevValue, - tbl.isMultiRegion()); + tbl.isMultiRegion(), + op.getRowMetadata()); } final int recordSize = getStorageSize(cursor); @@ -188,7 +193,12 @@ private Result delete(Delete op, op.addReadBytes(MIN_READ); } - cursor.delete(null); + final WriteOptions jeOptions = makeOption(null, + false, + op.getTableId(), + operationHandler, + false); + cursor.delete(jeOptions); op.addWriteBytes(recordSize, getNIndexWrites(cursor), partitionId, -recordSize); MigrationStreamHandle.get().addDelete(keyEntry, cursor); diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteIfVersion.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteIfVersion.java index 72e5b6b3..907a2636 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteIfVersion.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteIfVersion.java @@ -38,7 +38,7 @@ public DeleteIfVersion(byte[] keyBytes, ReturnValueVersion.Choice prevValChoice, Version matchVersion) { this(keyBytes, prevValChoice, matchVersion, 0 /* tableId */, - false /* doTombstone */); + false /* doTombstone */, null /* rowMetadata */); } /** @@ -49,9 +49,10 @@ public DeleteIfVersion(byte[] keyBytes, ReturnValueVersion.Choice prevValChoice, Version matchVersion, long tableId, - boolean doTombstone) { + boolean doTombstone, + String rowMetadata) { super(OpCode.DELETE_IF_VERSION, keyBytes, prevValChoice, tableId, - doTombstone); + doTombstone, rowMetadata); checkNull("matchVersion", matchVersion); this.matchVersion = matchVersion; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteIfVersionHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteIfVersionHandler.java index a2a8b31b..665b356e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteIfVersionHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/DeleteIfVersionHandler.java @@ -30,6 +30,7 @@ import com.sleepycat.je.OperationResult; import com.sleepycat.je.ReadOptions; import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; /** * Server handler for {@link DeleteIfVersion}. @@ -123,9 +124,13 @@ private boolean deleteIfVersion(DeleteIfVersion op, Transaction txn, MultiDeleteTableHandler.insertTombstone(cursor, operationHandler, keyEntry, getTombstoneTTL(), op, oldRecordSize, partitionId, - tbl.isMultiRegion()); + tbl.isMultiRegion(), op.getTableId(), + op.getRowMetadata()); } else { - cursor.delete(null); + final WriteOptions jeOptions = + makeOption(null, false, op.getTableId(), + operationHandler, false); + cursor.delete(jeOptions); MigrationStreamHandle.get().addDelete(keyEntry, cursor); op.addReadBytes(MIN_READ); op.addWriteBytes(oldRecordSize, getNIndexWrites(cursor), diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/Execute.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/Execute.java index be2ea59c..0e6318ea 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/Execute.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/Execute.java @@ -377,14 +377,15 @@ public Operation createDelete(Key key, ReturnValueVersion.Choice prevReturn, boolean abortIfUnsuccessful) { return createDelete(key, prevReturn, abortIfUnsuccessful, 0L, - false /* doTombstone */); + false /* doTombstone */, null /* rowMetadata */); } public Operation createDelete(Key key, ReturnValueVersion.Choice prevReturn, boolean abortIfUnsuccessful, long tableId, - boolean doTombstone) { + boolean doTombstone, + String rowMetadata) { return new OperationImpl (key, abortIfUnsuccessful, new Delete(keySerializer.toByteArray(key), @@ -392,7 +393,8 @@ public Operation createDelete(Key key, prevReturn : ReturnValueVersion.Choice.NONE, tableId, - doTombstone)); + doTombstone, + rowMetadata)); } @Override @@ -408,7 +410,8 @@ public Operation createDeleteIfVersion(Key key, Version version) { boolean abortIfUnsuccessful) { return createDeleteIfVersion(key, version, prevReturn, abortIfUnsuccessful, 0L, - false /* doTombstone */); + false /* doTombstone */, + null /* rowMetadata */); } public Operation @@ -417,7 +420,8 @@ public Operation createDeleteIfVersion(Key key, Version version) { ReturnValueVersion.Choice prevReturn, boolean abortIfUnsuccessful, long tableId, - boolean doTombstone) { + boolean doTombstone, + String rowMetadata) { return new OperationImpl (key, abortIfUnsuccessful, new DeleteIfVersion(keySerializer.toByteArray(key), @@ -426,7 +430,8 @@ public Operation createDeleteIfVersion(Key key, Version version) { ReturnValueVersion.Choice.NONE, version, tableId, - doTombstone)); + doTombstone, + rowMetadata)); } } } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/IndexIterateHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/IndexIterateHandler.java index 8453c7cd..3df5ad6d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/IndexIterateHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/IndexIterateHandler.java @@ -93,6 +93,7 @@ Result execute(IndexIterate op, valBytes, valVers.getVersion(), valVers.getExpirationTime(), + valVers.getCreationTime(), valVers.getModificationTime())); /* diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/IndexScanner.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/IndexScanner.java index ec5a79f3..1a1b3566 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/IndexScanner.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/IndexScanner.java @@ -223,6 +223,18 @@ public long getModificationTime() { return (result != null ? result.getModificationTime() : 0); } + /** + * Returns the creation time of the current valid result if non-null, + * otherwise 0. + * + * This means that the caller must have received a true result from + * one of the navigational interfaces indicating there's a current + * record. + */ + public long getCreationTime() { + return (result != null ? result.getCreationTime() : 0); + } + /** * Returns the current OperationResult. If the most recent operation failed * to find a record this will be null. diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/InternalOperationHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/InternalOperationHandler.java index 4da40089..0dafa0d7 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/InternalOperationHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/InternalOperationHandler.java @@ -17,6 +17,7 @@ import java.util.Collections; import java.util.Comparator; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -36,6 +37,8 @@ import oracle.kv.impl.security.AccessCheckUtils; import oracle.kv.impl.security.ExecutionContext; import oracle.kv.impl.security.KVStorePrivilege; +import oracle.kv.impl.security.KVStorePrivilegeLabel; +import oracle.kv.impl.security.NamespacePrivilege; import oracle.kv.impl.security.SystemPrivilege; import oracle.kv.impl.security.TablePrivilege; import oracle.kv.impl.systables.TableMetadataDesc; @@ -50,6 +53,7 @@ import com.sleepycat.je.OperationResult; import com.sleepycat.je.ReadOptions; import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; import com.sleepycat.je.dbi.CursorImpl; import com.sleepycat.je.dbi.RecordVersion; import com.sleepycat.je.utilint.VLSN; @@ -232,25 +236,38 @@ TableImpl findTableByKeyBytes(byte[] keyBytes) { } /** - * Creates JE write options with given TTL arguments. + * Creates JE write options with given TTL, before image TTL if enabled, + * and tombstone. */ - static com.sleepycat.je.WriteOptions makeOption(TimeToLive ttl, - boolean updateTTL) { - int ttlVal = ttl != null ? (int) ttl.getValue() : 0; - TimeUnit ttlUnit = ttl != null ? ttl.getUnit() : null; - return new com.sleepycat.je.WriteOptions() - .setTTL(ttlVal, ttlUnit) - .setUpdateTTL(updateTTL); + static WriteOptions makeOption(TimeToLive ttl, + boolean updateTTL, + long tableId, + OperationHandler handler, + boolean tombstone) { + final int ttlVal = ttl != null ? (int) ttl.getValue() : 0; + final TimeUnit ttlUnit = ttl != null ? ttl.getUnit() : null; + final WriteOptions ret = new WriteOptions(); + /* set TTL */ + ret.setTTL(ttlVal, ttlUnit).setUpdateTTL(updateTTL); + /* set before image TTL */ + handler.setBeforeImageTTL(ret, tableId); + /* set tombstone */ + ret.setTombstone(tombstone); + return ret; } /** - * Creates JE write options with given expiration time + * Creates JE write options with given expiration time, and before image + * TTL if enabled */ - static com.sleepycat.je.WriteOptions makeExpirationTimeOption( - long expiration, boolean updateTTL) { - return new com.sleepycat.je.WriteOptions() - .setExpirationTime(expiration, null) - .setUpdateTTL(updateTTL); + static WriteOptions makeExpirationTimeOption(long expiration, + boolean updateTTL, + long tableId, + OperationHandler handler) { + final WriteOptions ret = new WriteOptions(); + ret.setExpirationTime(expiration, null).setUpdateTTL(updateTTL); + handler.setBeforeImageTTL(ret, tableId); + return ret; } /** @@ -278,14 +295,17 @@ void getPrevValueVersion(Cursor cursor, OperationResult result) { long expirationTime = (result != null ? result.getExpirationTime() : 0); + long creationTime = (result != null ? result.getCreationTime() : 0); long modificationTime = (result != null ? result.getModificationTime() : 0); + switch (prevValue.getReturnChoice()) { case VALUE: assert !prevData.getPartial(); prevValue.setValueVersion(prevData.getData(), null, expirationTime, + creationTime, modificationTime, getStorageSize(cursor)); break; @@ -298,6 +318,7 @@ void getPrevValueVersion(Cursor cursor, getVersion(cursor), expirationTime, 0L, + 0L, -1); break; case ALL: @@ -305,11 +326,12 @@ void getPrevValueVersion(Cursor cursor, prevValue.setValueVersion(prevData.getData(), getVersion(cursor), expirationTime, + creationTime, modificationTime, getStorageSize(cursor)); break; case NONE: - prevValue.setValueVersion(null, null, 0L, 0L, -1); + prevValue.setValueVersion(null, null, 0L, 0L, 0L, -1); break; default: throw new IllegalStateException @@ -326,6 +348,7 @@ static ResultValueVersion getBeforeUpdateInfo(Choice choice, long expirationTime = (result != null ? result.getExpirationTime() : 0); long modificationTime = (result != null ? result.getModificationTime(): 0); + long creationTime = (result != null ? result.getCreationTime(): 0); switch (choice) { case VALUE: @@ -333,6 +356,7 @@ static ResultValueVersion getBeforeUpdateInfo(Choice choice, return new ResultValueVersion(prevData.getData(), null, expirationTime, + creationTime, modificationTime, getStorageSize(cursor)); case VERSION: @@ -344,16 +368,18 @@ static ResultValueVersion getBeforeUpdateInfo(Choice choice, handler.getVersion(cursor), expirationTime, 0L, + 0L, -1); case ALL: assert !prevData.getPartial(); return new ResultValueVersion(prevData.getData(), handler.getVersion(cursor), expirationTime, + creationTime, modificationTime, getStorageSize(cursor)); case NONE: - return new ResultValueVersion(null, null, 0L, 0L, -1); + return new ResultValueVersion(null, null, 0L, 0L, 0L,-1); default: throw new IllegalStateException(choice.toString()); } @@ -531,8 +557,14 @@ static class Keyspace { private static final byte[] AVRO_SCHEMA_KEY_PREFIX = new byte[] { 0, 0x73, 0x63, 0x68 }; /* Keybytes of "//sch" */ - static interface KeyAccessChecker { + interface KeyAccessChecker { boolean allowAccess(byte[] key); + + @SuppressWarnings("unused") + default boolean allowAccess( + byte[] key, EnumSet tablePrivs) { + return allowAccess(key); + } } static final KeyAccessChecker privateKeyAccessChecker = @@ -696,6 +728,12 @@ static class TableAccessChecker implements KeyAccessChecker { @Override public boolean allowAccess(byte[] key) { + return allowAccess(key, null); + } + + @Override + public boolean allowAccess(byte[] key, + EnumSet tablePrivs) { if (!Keyspace.isGeneralAccess(key)) { return true; } @@ -707,7 +745,13 @@ public boolean allowAccess(byte[] key) { return false; } - return internalCheckTableAccess(possibleTable); + if (!internalCheckTableAccess(possibleTable)) { + return false; + } + if (tablePrivs != null) { + return hasTablePrivileges(possibleTable, tablePrivs); + } + return true; } boolean internalCheckTableAccess(TableImpl table) { @@ -765,6 +809,49 @@ boolean internalCheckTableAccess(TableImpl table) { } } + /** + * Whether current session has the required table privileges. + */ + static boolean hasTablePrivileges(TableImpl table, + EnumSet privs) { + final ExecutionContext exeCtx = ExecutionContext.getCurrent(); + if (exeCtx == null) { + return true; + } + + for (KVStorePrivilegeLabel priv : privs) { + /* Current session has privileges on the namespace of the table */ + if (exeCtx.hasPrivilege( + NamespacePrivilege.get( + TablePrivilege.implyingNamespacePrivLabel(priv), + table.getInternalNamespace()))) { + continue; + } + + /* Current session doesn't have the required table privileges */ + if (!exeCtx.hasPrivilege( + TablePrivilege.get(priv, + table.getId(), + table.getInternalNamespace(), + table.getName()))) { + return false; + } + } + return true; + } + + /* + * Verify if current session has the required table privileges. + */ + protected static void verifyTablePrivileges( + TableImpl table, EnumSet tablePrivs) { + if (!hasTablePrivileges(table, tablePrivs)) { + throw new UnauthorizedException( + "Insufficient access rights granted on table, id: " + + table.getId() + " name: " + table.getFullNamespaceName()); + } + } + /** * Returns privilege for the specified table ID. If the ID is for the table * metadata system table, the USRVIEW privilege is required. Otherwise, diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiDeleteTable.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiDeleteTable.java index 868b0c31..41c197ec 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiDeleteTable.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiDeleteTable.java @@ -23,6 +23,8 @@ import oracle.kv.KeyRange; import oracle.kv.impl.api.table.TargetTables; +import oracle.kv.impl.util.SerialVersion; +import oracle.kv.impl.util.SerializationUtil; /** * A multi-delete table operation over table(s) in the same partition. @@ -47,6 +49,7 @@ public class MultiDeleteTable extends MultiTableOperation { private final int batchSize; private final int maxWriteKB; private final boolean doTombstone; + private final String rowMetadata; /* * This is only used on the server side by table data removal to track the @@ -61,7 +64,7 @@ public MultiDeleteTable(byte[] parentKey, TargetTables targetTables, KeyRange subRange) { this(parentKey, targetTables, subRange, null, 0, - false /* doTombstone */); + false /* doTombstone */, null /* rowMetadata */); } /** @@ -72,13 +75,16 @@ public MultiDeleteTable(byte[] parentKey, KeyRange subRange, byte[] resumeKey, int maxWriteKB, - boolean doTombstone) { + boolean doTombstone, + String rowMetadata) { super(OpCode.MULTI_DELETE_TABLE, parentKey, targetTables, subRange); this.resumeKey = resumeKey; this.maxWriteKB = maxWriteKB; this.majorPathComplete = true; this.batchSize = 0; - this.doTombstone = doTombstone; + /* Must use tombstones if rowMetadata is present */ + this.doTombstone = doTombstone || (rowMetadata != null); + this.rowMetadata = rowMetadata; } /** Constructor to implement deserializedForm */ @@ -99,6 +105,12 @@ private MultiDeleteTable(MultiDeleteTable other, short serialVersion) { } doTombstone = false; } + + if (serialVersion >= SerialVersion.ROW_METADATA_VERSION) { + rowMetadata = other.rowMetadata; + } else { + rowMetadata = null; + } } /** @@ -119,6 +131,12 @@ protected MultiDeleteTable(DataInput in, short serialVersion) } else { doTombstone = false; } + + if (serialVersion >= SerialVersion.ROW_METADATA_VERSION) { + rowMetadata = SerializationUtil.readString(in, serialVersion); + } else { + rowMetadata = null; + } } @Override @@ -139,6 +157,10 @@ public void writeFastExternal(DataOutput out, short serialVersion) "must be " + CLOUD_MR_TABLE + " or greater"); } } + + if (serialVersion >= SerialVersion.ROW_METADATA_VERSION) { + SerializationUtil.writeString(out, serialVersion, rowMetadata); + } } /** @@ -161,6 +183,7 @@ public MultiDeleteTable(byte[] parentKey, this.resumeKey = resumeKey; this.maxWriteKB = 0; this.doTombstone = doTombstone; + this.rowMetadata = null; } byte[] getResumeKey() { @@ -187,6 +210,10 @@ int getMaxWriteKB() { return maxWriteKB; } + String getRowMetadata() { + return rowMetadata; + } + /** * Return whether it is an external multi-region table. */ diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiDeleteTableHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiDeleteTableHandler.java index d5c16501..f7030cf3 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiDeleteTableHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiDeleteTableHandler.java @@ -24,6 +24,7 @@ import oracle.kv.Version; import oracle.kv.impl.api.ops.InternalOperation.OpCode; import oracle.kv.impl.api.table.Region; +import oracle.kv.impl.api.table.TableImpl; import oracle.kv.impl.rep.migration.MigrationStreamHandle; import oracle.kv.impl.security.KVStorePrivilege; import oracle.kv.impl.security.NamespacePrivilege; @@ -110,11 +111,13 @@ Result execute(MultiDeleteTable op, * and here a duplicate tombstone will be inserted. * This is harmless and it's ok to overcount the * deletions.*/ + final long tid = tableInfo.currentTable.getId(); insertTombstoneHelper(cursor, keyEntry, op, partitionId, - isMR); + isMR, + tid); nDeletions++; /* delete ancestor keys if needed */ nDeletions += @@ -125,7 +128,8 @@ Result execute(MultiDeleteTable op, ancestorKey, op, partitionId, - isMR)); + isMR, + tid)); } else { /* @@ -133,7 +137,12 @@ Result execute(MultiDeleteTable op, * in the delete path. If the record is gone the * delete below will fail. */ - if (cursor.delete(null) != null) { + final TableImpl tb = tableInfo.getCurrentTable(); + final long tid = tb.getId(); + final WriteOptions jeOptions = + makeOption(null, false, tid, operationHandler, + false); + if (cursor.delete(jeOptions) != null) { nDeletions++; /* * Gets the migration stream to forward the @@ -186,7 +195,8 @@ private OperationResult insertTombstoneHelper(Cursor cursor, DatabaseEntry key, MultiDeleteTable op, PartitionId partitionId, - boolean isMR) { + boolean isMR, + long tableId) { final int oldRecordSize = getStorageSize(cursor); final TimeToLive ttl = getTombstoneTTL(); return insertTombstone(cursor, @@ -196,7 +206,9 @@ private OperationResult insertTombstoneHelper(Cursor cursor, op, oldRecordSize, partitionId, - isMR); + isMR, + tableId, + op.getRowMetadata()); } @Override @@ -216,21 +228,24 @@ private OperationResult insertTombstoneHelper(Cursor cursor, /** * Put a tombstone at current position of the cursor. */ - protected static OperationResult - insertTombstone(Cursor cursor, - OperationHandler operationHandler, - DatabaseEntry keyEntry, - TimeToLive tombstoneTTL, - InternalOperation op, - int oldRecordSize, - PartitionId partitionId, - boolean isMultiRegion) { - WriteOptions jeOptions = makeOption(tombstoneTTL, true); - jeOptions.setTombstone(true); + static OperationResult insertTombstone(Cursor cursor, + OperationHandler operationHandler, + DatabaseEntry keyEntry, + TimeToLive tombstoneTTL, + InternalOperation op, + int oldRecordSize, + PartitionId partitionId, + boolean isMultiRegion, + long tableId, + String rowMetadata) { + final WriteOptions jeOptions = + makeOption(tombstoneTTL, true, tableId, operationHandler, + true/* tombstone */); - Value value = isMultiRegion ? - Value.createTombstoneValue(Region.LOCAL_REGION_ID) : - Value.createTombstoneNoneValue(); + int regionId = isMultiRegion ? Region.LOCAL_REGION_ID : + Region.NULL_REGION_ID; + Value value = Value.createTombstoneValue(regionId, + rowMetadata); byte[] valueBytes = value.toByteArray(); final DatabaseEntry dataEntry = valueDatabaseEntry(valueBytes); @@ -242,6 +257,7 @@ private OperationResult insertTombstoneHelper(Cursor cursor, Version version = operationHandler.getVersion(cursor); MigrationStreamHandle.get().addPut(keyEntry, dataEntry, version.getVLSN(), + result.getCreationTime(), result.getModificationTime(), expTime, true /*isTombstone*/); diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiKeyOperationHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiKeyOperationHandler.java index 1c22edc0..79bab68a 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiKeyOperationHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiKeyOperationHandler.java @@ -13,6 +13,7 @@ package oracle.kv.impl.api.ops; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -27,6 +28,7 @@ import oracle.kv.impl.api.ops.OperationHandler.KVAuthorizer; import oracle.kv.impl.security.ExecutionContext; import oracle.kv.impl.security.KVStorePrivilege; +import oracle.kv.impl.security.KVStorePrivilegeLabel; import oracle.kv.impl.security.SystemPrivilege; import oracle.kv.impl.topo.PartitionId; @@ -58,7 +60,9 @@ public boolean allowAccess(DatabaseEntry keyEntry) { } @Override - public boolean allowFullAccess() { + public boolean + allowAccess(DatabaseEntry keyEntry, + EnumSet tablePrivs) { return true; } }; @@ -142,6 +146,7 @@ boolean iterate(MultiKeyOperation op, valVers.getValueBytes(), valVers.getVersion(), valVers.getExpirationTime(), + valVers.getCreationTime(), valVers.getModificationTime(), false /* isTombstone */)); @@ -399,19 +404,20 @@ private KeyspaceAccessAuthorizer(Set keyCheckers) { @Override public boolean allowAccess(DatabaseEntry keyEntry) { + return allowAccess(keyEntry, null); + } + + @Override + public boolean allowAccess(DatabaseEntry keyEntry, + EnumSet tablePrivs) { final byte[] key = keyEntry.getData(); for (final KeyAccessChecker checker : keyCheckers) { - if (!checker.allowAccess(key)) { + if (!checker.allowAccess(key, tablePrivs)) { return false; } } return true; } - - @Override - public boolean allowFullAccess() { - return false; - } } static void addKeyResult(List results, diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiTableOperationHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiTableOperationHandler.java index 38e0c494..365f9ba2 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiTableOperationHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/MultiTableOperationHandler.java @@ -52,6 +52,7 @@ import com.sleepycat.je.LockMode; import com.sleepycat.je.OperationResult; import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; /** * Base server handler for subclasses of MultiTableOperation. @@ -113,6 +114,7 @@ static void addValueResult(final OperationHandler operationHandler, valBytes, valVers.getVersion(), result.getExpirationTime(), + result.getCreationTime(), result.getModificationTime(), result.isTombstone())); } @@ -382,8 +384,8 @@ public void verifyTableAccess( } checkKeyspacePermission(parentKey); - new TargetTableAccessChecker(operationHandler, this, tables). - checkAccess(); + new TargetTableAccessChecker(operationHandler, this, tables) + .checkAccess(); } /** @@ -682,12 +684,13 @@ int addAncestorIndexValues(Database db, valVers.getValueBytes(), opSerialVersion); - results.add(new ResultIndexRows - (indexKeyBytes, + results.add(new ResultIndexRows( + indexKeyBytes, ancestorKey.getData(), valBytes, valVers.getVersion(), valVers.getExpirationTime(), + valVers.getCreationTime(), valVers.getModificationTime())); } } @@ -767,7 +770,11 @@ int deleteAncestorKeys( if (putTombstone == null) { /* regular delete */ if (result != null) { - if (ancestorCursor.delete(null) != null) { + final long tid = entry.getTable().getId(); + final WriteOptions jeOpt = + makeOption(null, false, tid, + operationHandler, false); + if (ancestorCursor.delete(jeOpt) != null) { numAncestors++; MigrationStreamHandle .get().addDelete(ancestorKey, @@ -953,12 +960,11 @@ void checkAccess() throws FaultException, UnauthorizedException { private void internalCheckAccess(long tableId) throws FaultException, UnauthorizedException { - final TableImpl table = - operationHandler.getAndCheckTable(tableId); + final TableImpl table = operationHandler.getAndCheckTable(tableId); if (!internalCheckTableAccess(table)) { throw new UnauthorizedException ( "Insufficient access rights granted on table, id:" + - tableId + " name:" + table.getFullNamespaceName()); + table.getId() + " name:" + table.getFullNamespaceName()); } } } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/OperationHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/OperationHandler.java index 79328f27..1b9eab0e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/OperationHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/OperationHandler.java @@ -13,6 +13,7 @@ package oracle.kv.impl.api.ops; +import java.util.EnumSet; import java.util.List; import java.util.UUID; import java.util.logging.Logger; @@ -28,9 +29,11 @@ import oracle.kv.impl.rep.RepNode; import oracle.kv.impl.rep.RepNodeService; import oracle.kv.impl.security.KVStorePrivilege; +import oracle.kv.impl.security.KVStorePrivilegeLabel; import oracle.kv.impl.topo.PartitionId; import oracle.kv.impl.util.SortableString; import oracle.kv.impl.util.server.LoggerUtils; +import oracle.kv.table.TimeToLive; import com.sleepycat.je.Cursor; import com.sleepycat.je.CursorConfig; @@ -38,6 +41,7 @@ import com.sleepycat.je.DbInternal; import com.sleepycat.je.OperationResult; import com.sleepycat.je.Transaction; +import com.sleepycat.je.WriteOptions; import com.sleepycat.je.dbi.CursorImpl; import com.sleepycat.je.dbi.RecordVersion; import com.sleepycat.je.rep.impl.RepImpl; @@ -82,12 +86,18 @@ interface KVAuthorizer { /** * Should access to the entry be allowed? */ - public boolean allowAccess(DatabaseEntry keyEntry); + boolean allowAccess(DatabaseEntry keyEntry); /** - * Will the authorizer return true for all entries? + * Should access to the entry be allowed and whether the current session + * has given table privileges? + * + * Note that the table privileges specified should also imply the same + * privileges in the generalAccessPrivileges of the operation using + * this authorizer. */ - public boolean allowFullAccess(); + boolean allowAccess(DatabaseEntry keyEntry, + EnumSet tablePrivs); } public OperationHandler(RepNode repNode, RepNodeService.Params params) { @@ -295,6 +305,7 @@ ResultValueVersion makeValueVersion(Cursor c, dataEntry.getData(), getVersion(c), result != null ? result.getExpirationTime() : 0, + result != null ? result.getCreationTime() : 0, result != null ? result.getModificationTime() : 0, InternalOperationHandler.getStorageSize(c)); } @@ -323,6 +334,36 @@ TableImpl getAndCheckTable(final long tableId) { return table; } + /** + * Sets the before image TTL in JE {@link WriteOptions} + * @param jeOptions JE write options + * @param tableId table id + */ + void setBeforeImageTTL(WriteOptions jeOptions, long tableId) { + if (tableId <= 0) { + /* not valid table id, no table associated w/ the op */ + return; + } + final TableImpl table = getAndCheckTable(tableId); + setBeforeImageTTLFromTable(jeOptions, table); + + } + + /** + * Sets the before image TTL in JE write options from table instance + * @param jeOptions je write options + * @param table table instance + */ + private void setBeforeImageTTLFromTable(WriteOptions jeOptions, + TableImpl table) { + final TimeToLive beforeImgTTL = table.getBeforeImageTTL(); + if (beforeImgTTL != null) { + /* before image is enabled for the table */ + jeOptions.saveBeforeImage((int) beforeImgTTL.getValue(), + beforeImgTTL.getUnit()); + } + } + /** * Returns the table associated with the table id, or null if the * table with the id does not exist. diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutBatchHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutBatchHandler.java index 40d42eef..b478f9c6 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutBatchHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutBatchHandler.java @@ -14,10 +14,12 @@ package oracle.kv.impl.api.ops; import static oracle.kv.impl.api.ops.OperationHandler.CURSOR_DEFAULT; +import static oracle.kv.impl.security.KVStorePrivilegeLabel.DELETE_TABLE; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -31,11 +33,13 @@ import oracle.kv.impl.api.ops.InternalOperation.OpCode; import oracle.kv.impl.api.ops.OperationHandler.KVAuthorizer; import oracle.kv.impl.api.ops.Result.PutBatchResult; +import oracle.kv.impl.api.table.Region; import oracle.kv.impl.api.table.RowImpl; import oracle.kv.impl.api.table.TableImpl; import oracle.kv.impl.api.table.TableMetadata; import oracle.kv.impl.rep.migration.MigrationStreamHandle; import oracle.kv.impl.security.KVStorePrivilege; +import oracle.kv.impl.security.KVStorePrivilegeLabel; import oracle.kv.impl.security.NamespacePrivilege; import oracle.kv.impl.security.SystemPrivilege; import oracle.kv.impl.security.TablePrivilege; @@ -51,7 +55,7 @@ import com.sleepycat.je.OperationResult; import com.sleepycat.je.Put; import com.sleepycat.je.Transaction; -import com.sleepycat.util.PackedInteger; +import com.sleepycat.je.WriteOptions; /** * Server handler for {@link PutBatch}. @@ -71,13 +75,6 @@ */ class PutBatchHandler extends MultiKeyOperationHandler { - /** - * Whether the operation has implicit deletion because of its TTL setting. - * This flag is reset for each operation in a batch, and if true, the - * operation requires DELETE_TABLE privilege. - */ - boolean hasValidTTLSetting = false; - Set tablesWithCRDT = new HashSet<>(); PutBatchHandler(OperationHandler handler) { @@ -107,10 +104,11 @@ private List putBatch(PutBatch op, List kvPairs, KVAuthorizer kvAuth) { - final com.sleepycat.je.WriteOptions noExpiry = - makeOption(TimeToLive.DO_NOT_EXPIRE, false); + final WriteOptions noExpiry = + makeOption(TimeToLive.DO_NOT_EXPIRE, false, op.getTableId(), + getOperationHandler(), false/* not tombstone */); - final List keysPresent = new ArrayList(); + final List keysPresent = new ArrayList<>(); final Database db = getRepNode().getPartitionDB(partitionId); final DatabaseEntry keyEntry = new DatabaseEntry(); @@ -144,13 +142,7 @@ private List putBatch(PutBatch op, DatabaseEntry dataEntryToUse = valueDatabaseEntry(dataEntry, e.getValue()); - /* - * Check if this put has valid TTL setting. This must be done - * before the access check. - */ - hasValidTTLSetting = (e.getTTLVal() != 0); - - if (!kvAuth.allowAccess(keyEntry)) { + if (!kvAuth.allowAccess(keyEntry, getTablePrivileges(e))) { throw new UnauthorizedException("Insufficient access " + "rights granted"); } @@ -173,15 +165,7 @@ private List putBatch(PutBatch op, boolean replaceCRDT = false; TableImpl table = null; if (tablesWithCRDT.size() > 0) { - long[] tableIds = op.getTableIds(); - /* - * Get the table using tableId if the op is - * against only one table, otherwise find - * the table by key bytes. - */ - table = tableIds.length > 1 ? - findTableByKeyBytes(e.getKey()) : - getAndCheckTable(tableIds[0]); + table = findTable(op, e.getKey()); if (op.getOverwrite() && table != null && tablesWithCRDT.contains(table.getId())) { prevData = new DatabaseEntry(); @@ -196,7 +180,7 @@ private List putBatch(PutBatch op, while (true) { - final com.sleepycat.je.WriteOptions jeOptions; + final WriteOptions jeOptions; int ttlVal = e.getTTLVal(); if (ttlVal != 0) { jeOptions = makeJEWriteOptions(ttlVal, e.getTTLUnit()); @@ -204,6 +188,13 @@ private List putBatch(PutBatch op, jeOptions = noExpiry; } + /* set before image TTL */ + final TableImpl tb = findTable(op, e.getKey()); + if (tb != null) { + operationHandler.setBeforeImageTTL(jeOptions, + tb.getId()); + } + if (replaceCRDT) { dataEntryToUse = PutHandler.copyCRDTFromPrevRow(table, e.getKey(), e.getValue(), prevData, getRepNode()); @@ -222,6 +213,7 @@ private List putBatch(PutBatch op, MigrationStreamHandle.get(). addPut(keyEntry, dataEntryToUse, v.getVLSN(), + result.getCreationTime(), result.getModificationTime(), result.getExpirationTime(), false /*isTombstone*/); @@ -239,6 +231,19 @@ private List putBatch(PutBatch op, return keysPresent; } + /** + * Get the table using tableId if the op is against only one table, + * otherwise find the table by key bytes. + */ + private TableImpl findTable(PutBatch op, byte[] keyBytes) { + final long[] tableIds = op.getTableIds(); + if (tableIds == null) { + return null; + } + return tableIds.length > 1 ? + findTableByKeyBytes(keyBytes) : getAndCheckTable(tableIds[0]); + } + /* * Use PutResolve semantics. This path handles both true MR merging * as well as simple restore from backup. This is doing similar work @@ -262,22 +267,32 @@ private void handlePutResolve(Cursor cursor, KVPair e, /* state for migration stream */ Version version = null; + long creationTime = 0l; long modTime = 0l; long expiration = 0L; com.sleepycat.je.WriteOptions jeOptions = null; + + final long tableId = op.getTableId(); + final OperationHandler opHandler = getOperationHandler(); + final boolean tombstone = e.isTombstone(); + final TimeToLive ttl; + if (e.isTombstone()) { - jeOptions = makeOption(getRepNode().getRepNodeParams(). - getTombstoneTTL(), true); - jeOptions.setTombstone(true); + ttl = getRepNode().getRepNodeParams().getTombstoneTTL(); } else if (e.getTTLVal() != 0) { /* using overwrite, update the TTL if it's set */ - jeOptions = new com.sleepycat.je.WriteOptions() - .setTTL(e.getTTLVal(), e.getTTLUnit()).setUpdateTTL(true); + ttl = TimeToLive.createTimeToLive(e.getTTLVal(), e.getTTLUnit()); } else { - jeOptions = makeOption(TimeToLive.DO_NOT_EXPIRE, true); + ttl = TimeToLive.DO_NOT_EXPIRE; } + /* create JE write option */ + jeOptions = makeOption(ttl, true, tableId, opHandler, tombstone); + + /* if creationTime is 0 it's the same as the default */ + jeOptions.setCreationTime(e.getCreationTime()); + /* if mod time is 0 it's the same as the default */ jeOptions.setModificationTime(e.getModificationTime()); @@ -290,6 +305,7 @@ private void handlePutResolve(Cursor cursor, KVPair e, */ version = getVersion(cursor); expiration = opres.getExpirationTime(); + creationTime = opres.getCreationTime(); modTime = opres.getModificationTime(); final int storageSize = getStorageSize(cursor); @@ -400,6 +416,9 @@ private void handlePutResolve(Cursor cursor, KVPair e, mergeCRDT(remoteRow, localRow, table, store, Value.Format.fromFirstByte( localDataEntry.getData()[0])); + + /* use existing creation time */ + jeOptions.setCreationTime(opres.getCreationTime()); /* use existing mod time */ jeOptions. setModificationTime(opres.getModificationTime()); @@ -422,6 +441,7 @@ private void handlePutResolve(Cursor cursor, KVPair e, Put.CURRENT, jeOptions); version = getVersion(cursor); expiration = opres.getExpirationTime(); + creationTime = opres.getCreationTime(); modTime = opres.getModificationTime(); final int storageSize = getStorageSize(cursor); op.addWriteBytes(storageSize, getNIndexWrites(cursor), @@ -433,6 +453,7 @@ private void handlePutResolve(Cursor cursor, KVPair e, MigrationStreamHandle.get().addPut(keyEntry, dataEntry, version.getVLSN(), + creationTime, modTime, expiration, e.isTombstone()); @@ -440,17 +461,15 @@ private void handlePutResolve(Cursor cursor, KVPair e, } /* - * 1st byte is format, after that, if in MULTI_REGION_TABLE format, - * is region id. If not in MRT format, return localRegionId + * Gets region id if one exists otherwise return localRegionId. */ private static int getRegionId(DatabaseEntry entry, int localRegionId) { byte[] valueBytes = entry.getData(); - Value.Format format = (valueBytes.length > 0) ? - Value.Format.fromFirstByte(valueBytes[0]) : null; - if (format != Value.Format.MULTI_REGION_TABLE) { + int regionId = Value.getRegionIdFromByteArray(valueBytes); + if (regionId == Region.NULL_REGION_ID) { return localRegionId; } - return PackedInteger.readInt(valueBytes, 1); + return regionId; } /* @@ -489,10 +508,6 @@ List generalAccessPrivileges() { @Override public List tableAccessPrivileges(long tableId) { - if (hasValidTTLSetting) { - return Arrays.asList(new TablePrivilege.InsertTable(tableId), - new TablePrivilege.DeleteTable(tableId)); - } return Collections.singletonList( new TablePrivilege.InsertTable(tableId)); } @@ -500,11 +515,6 @@ List tableAccessPrivileges(long tableId) { @Override public List namespaceAccessPrivileges(String namespace) { - if (hasValidTTLSetting) { - return Arrays.asList( - new NamespacePrivilege.InsertInNamespace(namespace), - new NamespacePrivilege.DeleteInNamespace(namespace)); - } return Collections.singletonList( new NamespacePrivilege.InsertInNamespace(namespace)); } @@ -529,4 +539,17 @@ private com.sleepycat.je.WriteOptions makeJEWriteOptions( .setTTL(ttlVal, ttlUnit) .setUpdateTTL(false); } + + /* + * Get additional table privileges required to perform this put operation. + * + * When an operation has a valid TTL, it is an implicit delete and + * requires DELETE_TABLE privilege. + */ + private EnumSet getTablePrivileges(KVPair kvPair) { + if (kvPair.getTTLVal() != 0) { + return EnumSet.of(DELETE_TABLE); + } + return null; + } } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutHandler.java index a29452bb..b7536f38 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutHandler.java @@ -37,6 +37,7 @@ import oracle.kv.impl.rep.migration.MigrationStreamHandle; import oracle.kv.impl.topo.PartitionId; import oracle.kv.impl.util.TxnUtil; +import oracle.kv.table.TimeToLive; import com.sleepycat.je.Cursor; import com.sleepycat.je.Database; @@ -46,7 +47,6 @@ import com.sleepycat.je.OperationResult; import com.sleepycat.je.Transaction; import com.sleepycat.je.WriteOptions; -import com.sleepycat.util.PackedInteger; /** * Server handler for {@link Put}. @@ -97,6 +97,7 @@ protected static Result.PutResult put(Put op, ResultValueVersion prevVal = null; long expTime = 0L; Version version = null; + long creationTime = 0L; long modificationTime = 0L; int storageSize = -1; boolean wasUpdate; @@ -105,21 +106,25 @@ protected static Result.PutResult put(Put op, byte[] valueBytes = op.getValueBytes(); assert (keyBytes != null) && (valueBytes != null); - checkTombstoneLength(tombstone, valueBytes.length); + checkTombstone(tombstone, valueBytes); final DatabaseEntry keyEntry = new DatabaseEntry(keyBytes); DatabaseEntry dataEntry = valueDatabaseEntry(valueBytes); OperationResult opres = null; - WriteOptions jeOptions; + + final TimeToLive ttl; + final boolean updateTTL; if (tombstone) { - jeOptions = makeOption(repnode.getRepNodeParams().getTombstoneTTL(), - true /* updateTTL */); - jeOptions.setTombstone(true); + ttl = repnode.getRepNodeParams().getTombstoneTTL(); + updateTTL = true; } else { - jeOptions = makeOption(op.getTTL(), op.getUpdateTTL()); + ttl = op.getTTL(); + updateTTL = op.getUpdateTTL(); } - + final WriteOptions jeOptions = + makeOption(ttl, updateTTL, op.getTableId(), operationHandler, + tombstone); jeOptions.setAllQueryIndexes(op.getAllIndexes(), op.getAllIndexIds()); jeOptions.setIndexesToUpdate(op.getIndexesToUpdate()); @@ -150,6 +155,7 @@ protected static Result.PutResult put(Put op, version = operationHandler.getVersion(cursor); expTime = opres.getExpirationTime(); + creationTime = opres.getCreationTime(); modificationTime = opres.getModificationTime(); storageSize = getStorageSize(cursor); wasUpdate = false; @@ -230,6 +236,7 @@ protected static Result.PutResult put(Put op, expTime = opres.getExpirationTime(); version = operationHandler.getVersion(cursor); + creationTime = opres.getCreationTime(); modificationTime = opres.getModificationTime(); storageSize = getStorageSize(cursor); @@ -245,6 +252,7 @@ protected static Result.PutResult put(Put op, MigrationStreamHandle.get().addPut(keyEntry, dataEntry, version.getVLSN(), + creationTime, modificationTime, expTime, tombstone); @@ -256,6 +264,7 @@ protected static Result.PutResult put(Put op, version, expTime, wasUpdate, + creationTime, modificationTime, storageSize, repnode.getRepNodeId(). @@ -266,11 +275,11 @@ protected static Result.PutResult put(Put op, } } - protected static void checkTombstoneLength(boolean tombstone, int length) { - /* tombstone layout: FORMAT(1 byte) | REGION_ID(packed int) */ - if (tombstone && length > (PackedInteger.MAX_LENGTH + 1)) { + protected static void checkTombstone(boolean tombstone, byte[] valueBytes) { + if (tombstone && !Value.isTombstone(valueBytes)) { throw new FaultException("The value for tombstone " + - "must be empty. length=" + length, true); + "must be empty. length=" + + (valueBytes.length - Value.getValueOffset(valueBytes)), true); } } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfAbsentHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfAbsentHandler.java index 0468617f..5a02750d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfAbsentHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfAbsentHandler.java @@ -68,6 +68,7 @@ Result execute(PutIfAbsent op, Transaction txn, PartitionId partitionId) { ResultValueVersion prevVal = null; long expTime = 0L; Version version = null; + long creationTime = 0L; long modificationTime = 0L; int storageSize = -1; @@ -80,7 +81,11 @@ Result execute(PutIfAbsent op, Transaction txn, PartitionId partitionId) { final DatabaseEntry dataEntry = valueDatabaseEntry(valueBytes); OperationResult opres; - WriteOptions jeOptions = makeOption(op.getTTL(), op.getUpdateTTL()); + final WriteOptions jeOptions = makeOption(op.getTTL(), + op.getUpdateTTL(), + op.getTableId(), + getOperationHandler(), + false /* not tombstone */); final Database db = getRepNode().getPartitionDB(partitionId); @@ -94,6 +99,7 @@ Result execute(PutIfAbsent op, Transaction txn, PartitionId partitionId) { if (opres != null) { version = getVersion(cursor); expTime = opres.getExpirationTime(); + creationTime = opres.getCreationTime(); modificationTime = opres.getModificationTime(); storageSize = getStorageSize(cursor); @@ -104,6 +110,7 @@ Result execute(PutIfAbsent op, Transaction txn, PartitionId partitionId) { MigrationStreamHandle.get().addPut(keyEntry, dataEntry, version.getVLSN(), + creationTime, modificationTime, expTime, false /*isTombstone*/); @@ -131,12 +138,14 @@ Result execute(PutIfAbsent op, Transaction txn, PartitionId partitionId) { partitionId, storageSize); version = getVersion(cursor); expTime = opres.getExpirationTime(); + creationTime = opres.getCreationTime(); modificationTime = opres.getModificationTime(); MigrationStreamHandle.get(). addPut(keyEntry, dataEntry, version.getVLSN(), + creationTime, modificationTime, expTime, false /*isTombstone*/); @@ -170,6 +179,7 @@ Result execute(PutIfAbsent op, Transaction txn, PartitionId partitionId) { version, expTime, false /*wasUpdate*/, + creationTime, modificationTime, storageSize, getRepNode().getRepNodeId(). diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfPresentHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfPresentHandler.java index f75c88d8..a7847142 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfPresentHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfPresentHandler.java @@ -69,6 +69,7 @@ Result execute(PutIfPresent op, Transaction txn, PartitionId partitionId) { ResultValueVersion prevVal = null; long expTime = 0L; + long creationTime = 0L; long modificationTime = 0L; int storageSize = -1; Version version = null; @@ -83,7 +84,11 @@ Result execute(PutIfPresent op, Transaction txn, PartitionId partitionId) { DatabaseEntry dataEntry = valueDatabaseEntry(valueBytes); OperationResult opres; - WriteOptions jeOptions = makeOption(op.getTTL(), op.getUpdateTTL()); + final WriteOptions jeOptions = makeOption(op.getTTL(), + op.getUpdateTTL(), + op.getTableId(), + getOperationHandler(), + false /* not a tombstone*/); final Database db = getRepNode().getPartitionDB(partitionId); @@ -147,6 +152,7 @@ Result execute(PutIfPresent op, Transaction txn, PartitionId partitionId) { expTime = opres.getExpirationTime(); version = getVersion(cursor); wasUpdate = true; + creationTime = opres.getCreationTime(); modificationTime = opres.getModificationTime(); storageSize = getStorageSize(cursor); @@ -158,6 +164,7 @@ Result execute(PutIfPresent op, Transaction txn, PartitionId partitionId) { MigrationStreamHandle.get().addPut(keyEntry, dataEntry, version.getVLSN(), + creationTime, modificationTime, expTime, false /*isTombstone*/); @@ -170,6 +177,7 @@ Result execute(PutIfPresent op, Transaction txn, PartitionId partitionId) { version, expTime, wasUpdate, + creationTime, modificationTime, storageSize, getRepNode().getRepNodeId(). diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfVersionHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfVersionHandler.java index 09c28db6..f34aec3c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfVersionHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutIfVersionHandler.java @@ -71,6 +71,7 @@ Result execute(PutIfVersion op, Transaction txn, PartitionId partitionId) { ResultValueVersion prevVal = null; long expTime = 0L; + long creationTime = 0L; long modificationTime = 0L; int storageSize = -1; Version version = null; @@ -87,7 +88,11 @@ Result execute(PutIfVersion op, Transaction txn, PartitionId partitionId) { DatabaseEntry dataEntry = valueDatabaseEntry(valueBytes); OperationResult opres; - WriteOptions options = makeOption(op.getTTL(), op.getUpdateTTL()); + final WriteOptions options = makeOption(op.getTTL(), + op.getUpdateTTL(), + op.getTableId(), + getOperationHandler(), + false /* not a tombstone*/); final Database db = getRepNode().getPartitionDB(partitionId); @@ -135,6 +140,7 @@ Result execute(PutIfVersion op, Transaction txn, PartitionId partitionId) { version = getVersion(cursor); expTime = opres.getExpirationTime(); + creationTime = opres.getCreationTime(); modificationTime = opres.getModificationTime(); storageSize = getStorageSize(cursor); wasUpdate = true; @@ -145,6 +151,7 @@ Result execute(PutIfVersion op, Transaction txn, PartitionId partitionId) { MigrationStreamHandle.get().addPut(keyEntry, dataEntry, version.getVLSN(), + creationTime, modificationTime, expTime, false /*isTombstone*/); @@ -179,6 +186,7 @@ Result execute(PutIfVersion op, Transaction txn, PartitionId partitionId) { version, expTime, wasUpdate, + creationTime, modificationTime, storageSize, getRepNode().getRepNodeId(). diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutResolve.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutResolve.java index 24680443..3de37a04 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutResolve.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutResolve.java @@ -14,6 +14,7 @@ package oracle.kv.impl.api.ops; import static oracle.kv.impl.util.SerialVersion.CLOUD_MR_TABLE; +import static oracle.kv.impl.util.SerialVersion.CREATION_TIME_VER; import java.io.DataInput; import java.io.DataOutput; @@ -25,11 +26,11 @@ import oracle.kv.impl.api.table.Region; import oracle.kv.table.TimeToLive; -import com.sleepycat.util.PackedInteger; public class PutResolve extends Put { private final boolean isTombstone; - private final long timestamp; + private final long creationTime; + private final long lastModificationTime; /* expiration time in system time */ private final long expirationTimeMs; @@ -47,7 +48,8 @@ public PutResolve(byte[] keyBytes, long expirationTimeMs, boolean updateTTL, boolean isTombstone, - long timestamp, + long creationTime, + long lastModificationTime, int localRegionId) { super(OpCode.PUT_RESOLVE, keyBytes, value, prevValChoice, tableId, TimeToLive.DO_NOT_EXPIRE, updateTTL, @@ -56,7 +58,8 @@ public PutResolve(byte[] keyBytes, null, /*allIndexIds*/ null /*indexesToUpdate*/); this.isTombstone = isTombstone; - this.timestamp = timestamp; + this.creationTime = creationTime; + this.lastModificationTime = lastModificationTime; this.expirationTimeMs = expirationTimeMs; if (localRegionId != Region.NULL_REGION_ID) { Region.checkId(localRegionId, true /* isExternalRegion */); @@ -68,7 +71,8 @@ public PutResolve(byte[] keyBytes, private PutResolve(PutResolve other, short serialVersion) { super(other, serialVersion); isTombstone = other.isTombstone; - timestamp = other.timestamp; + creationTime = other.creationTime; + lastModificationTime = other.lastModificationTime; expirationTimeMs = other.expirationTimeMs; if (includeCloudMRTable(serialVersion)) { localRegionId = other.localRegionId; @@ -91,13 +95,18 @@ private PutResolve(PutResolve other, short serialVersion) { super(OpCode.PUT_RESOLVE, in, serialVersion); isTombstone = in.readBoolean(); - timestamp = in.readLong(); + lastModificationTime = in.readLong(); expirationTimeMs = in.readLong(); if (includeCloudMRTable(serialVersion)) { localRegionId = in.readInt(); } else { localRegionId = Region.NULL_REGION_ID; } + if (serialVersion >= CREATION_TIME_VER) { + creationTime = in.readLong(); + } else { + creationTime = 0; + } } @Override @@ -114,8 +123,12 @@ public byte[] getValueBytes() { return requestValue.getBytes(); } + public long getCreationTime() { + return creationTime; + } + public long getTimestamp() { - return timestamp; + return lastModificationTime; } /** @@ -166,15 +179,7 @@ boolean keyOnlyPut(int offset) { */ int computeOffset() { final byte[] valueBytes = getValueBytes(); - final Value.Format format = Value.Format.fromFirstByte(valueBytes[0]); - /* should always pass, cheap check for safety */ - if (format != Value.Format.MULTI_REGION_TABLE) { - throw new IllegalArgumentException("Invalid format=" + format); - } - - /* skip bytes of region id */ - final int regionIdLen = PackedInteger.getReadIntLength(valueBytes, 1); - return regionIdLen + 1; + return Value.getValueOffset(valueBytes); } /** @@ -191,7 +196,7 @@ public void writeFastExternal(DataOutput out, short serialVersion) throws IOException { super.writeFastExternal(out, serialVersion); out.writeBoolean(isTombstone); - out.writeLong(timestamp); + out.writeLong(lastModificationTime); out.writeLong(expirationTimeMs); if (includeCloudMRTable(serialVersion)) { out.writeInt(localRegionId); @@ -202,6 +207,9 @@ public void writeFastExternal(DataOutput out, short serialVersion) "region Id , must be " + CLOUD_MR_TABLE + " or greater"); } } + if (serialVersion >= CREATION_TIME_VER) { + out.writeLong(creationTime); + } } /** @@ -228,15 +236,16 @@ public boolean equals(Object obj) { } final PutResolve other = (PutResolve) obj; return (isTombstone == other.isTombstone) && - (timestamp == other.timestamp) && + (creationTime == other.creationTime) && + (lastModificationTime == other.lastModificationTime) && (expirationTimeMs == other.expirationTimeMs) && (localRegionId == other.localRegionId); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), isTombstone, timestamp, - expirationTimeMs, localRegionId); + return Objects.hash(super.hashCode(), isTombstone, creationTime, + lastModificationTime, expirationTimeMs, localRegionId); } @Override diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutResolveHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutResolveHandler.java index ac9100f2..cebaf02d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/PutResolveHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/PutResolveHandler.java @@ -22,7 +22,6 @@ import oracle.kv.ReturnValueVersion.Choice; import oracle.kv.UnauthorizedException; import oracle.kv.Value; -import oracle.kv.Value.Format; import oracle.kv.Version; import oracle.kv.impl.api.KVStoreImpl; import oracle.kv.impl.api.ops.InternalOperation.OpCode; @@ -50,11 +49,10 @@ import com.sleepycat.je.Put; import com.sleepycat.je.Transaction; import com.sleepycat.je.WriteOptions; -import com.sleepycat.util.PackedInteger; /** * Server handler for {@link PutResolve}. - * + *

    * Throughput calculation * +------------------------------------------------------------------------------+ * | Op | Choice | # | Read | * | Write | @@ -100,12 +98,13 @@ Result execute(PutResolve op, byte[] valueBytes = op.getValueBytes(); assert (keyBytes != null) && (valueBytes != null); - PutHandler.checkTombstoneLength(op.isTombstone(), valueBytes.length); + PutHandler.checkTombstone(op.isTombstone(), valueBytes); final DatabaseEntry keyEntry = new DatabaseEntry(keyBytes); final DatabaseEntry dataEntry = valueDatabaseEntry(valueBytes); final WriteOptions writeOptions = getWriteOptions(op); + writeOptions.setCreationTime(op.getCreationTime()); writeOptions.setModificationTime(op.getTimestamp()); final TableImpl table = getAndCheckTable(op.getTableId()); @@ -207,14 +206,16 @@ private void versionCheck(PutResolve op, TableImpl table) { private WriteOptions getWriteOptions(PutResolve op) { WriteOptions writeOptions; + final long tableId = op.getTableId(); if (op.isTombstone()) { writeOptions = makeOption(getRepNode().getRepNodeParams().getTombstoneTTL(), - true); - writeOptions.setTombstone(true); + true, tableId, getOperationHandler(), true); } else { writeOptions = makeExpirationTimeOption(op.getExpirationTimeMs(), - op.getUpdateTTL()); + op.getUpdateTTL(), + tableId, + getOperationHandler()); } return writeOptions; } @@ -236,6 +237,7 @@ private Result.PutResult put(Cursor cursor, ResultValueVersion prevVal = null; Version version = null; long expTime = 0L; + long creationTime = 0L; long modificationTime = 0L; int storageSize = -1; boolean wasUpdate = false; @@ -244,8 +246,10 @@ private Result.PutResult put(Cursor cursor, OperationResult opres = putEntry(cursor, keyEntry, dataEntry, NO_OVERWRITE, writeOptions); if (opres != null) { + /* This is when there is no row in the local store */ version = getVersion(cursor); expTime = opres.getExpirationTime(); + creationTime = opres.getCreationTime(); modificationTime = opres.getModificationTime(); storageSize = getStorageSize(cursor); @@ -253,6 +257,7 @@ private Result.PutResult put(Cursor cursor, op.addWriteBytes(storageSize, getNIndexWrites(cursor), partitionId, storageSize); } else { + /* This is when there is a row in the local store */ final Choice choice = op.getReturnValueVersionChoice(); final DatabaseEntry localDataEntry = new DatabaseEntry(); @@ -313,10 +318,15 @@ private Result.PutResult put(Cursor cursor, Value.Format.fromFirstByte( remoteValue[0])); } else { + /* local row wins */ /* Only merge the CRDTs. */ dataEntry = mergeCRDT(remoteRow, localRow, table, store, Value.Format.fromFirstByte( localValue[0])); + + /* This set the creation time of the winning row which + is local, basically keeping the same creation time */ + writeOptions.setCreationTime(opres.getCreationTime()); /* * The modification time should not be changed * since except the CRDTs, other fields are not changed. @@ -342,6 +352,7 @@ private Result.PutResult put(Cursor cursor, version = getVersion(cursor); expTime = opres.getExpirationTime(); wasUpdate = true; + creationTime = opres.getCreationTime(); modificationTime = opres.getModificationTime(); storageSize = getStorageSize(cursor); @@ -349,13 +360,13 @@ private Result.PutResult put(Cursor cursor, partitionId, storageSize); } reserializeResultValue(op, prevVal); - } if (version != null) { MigrationStreamHandle.get().addPut(keyEntry, dataEntry, version.getVLSN(), + creationTime, modificationTime, expTime, op.isTombstone()); @@ -367,6 +378,7 @@ private Result.PutResult put(Cursor cursor, version, expTime, wasUpdate, + creationTime, modificationTime, storageSize, getRepNode().getRepNodeId(). @@ -382,9 +394,7 @@ private PrimaryKeyMetadata getMRMeta(long updateTime, * Local value can be empty value for tombstone put locally or value * with non MULTI_REGION_TABLE format. */ - Value.Format format = (valueBytes.length > 0) ? - Value.Format.fromFirstByte(valueBytes[0]) : null; - if (format != Format.MULTI_REGION_TABLE) { + if (!Value.hasRegionId(valueBytes)) { if (Region.isMultiRegionId(localRegionId)) { /* * If local row is not multi-region format, then it is a local @@ -395,7 +405,7 @@ private PrimaryKeyMetadata getMRMeta(long updateTime, throw new IllegalArgumentException("This is not a record of " + "multiregion tables."); } - int regionId = PackedInteger.readInt(valueBytes, 1); + int regionId = Value.getRegionIdFromByteArray(valueBytes); return new PrimaryKeyMetadata(updateTime, regionId); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/RequestValue.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/RequestValue.java index 8c15f2c7..1c4dc5ec 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/RequestValue.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/RequestValue.java @@ -26,7 +26,6 @@ import oracle.kv.impl.util.FastExternalizable; import oracle.kv.impl.util.UserDataControl; -import com.sleepycat.util.PackedInteger; /** * Holds a Value for a request, optimized to avoid array allocations/copies. @@ -106,11 +105,18 @@ byte[] getBytes() { int getRegionId() { if (value == null) { - return PackedInteger.readInt(bytes, 1); + return Value.getRegionIdFromByteArray(bytes); } return value.getRegionId(); } + String getRowMetadata() { + if (value == null) { + return Value.fromByteArray(bytes).getRowMetadata(); + } + return value.getRowMetadata(); + } + Format getValueFormat() { if (value == null) { return Format.fromFirstByte(bytes[0]); diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/Result.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/Result.java index eb192d81..9717ca78 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/Result.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/Result.java @@ -14,6 +14,7 @@ package oracle.kv.impl.api.ops; import static oracle.kv.impl.util.ObjectUtil.checkNull; +import static oracle.kv.impl.util.SerialVersion.CREATION_TIME_VER; import static oracle.kv.impl.util.SerialVersion.QUERY_VERSION_14; import static oracle.kv.impl.util.SerializationUtil.toDeserializedForm; import static oracle.kv.impl.util.SerializationUtil.readByteArray; @@ -262,6 +263,12 @@ public long getPreviousExpirationTime() { "previous expiration time"); } + @Override + public long getPreviousCreationTime() { + throw new IllegalStateException("result of type: " + getClass() + + " does not contain a previous creation time"); + } + @Override public long getPreviousModificationTime() { throw new IllegalStateException("result of type: " + getClass() + @@ -269,11 +276,18 @@ public long getPreviousModificationTime() { "previous modification time"); } + @Override + public long getNewCreationTime() { + throw new IllegalStateException("result of type: " + getClass() + + " does not contain a " + + "new creation time"); + } + @Override public long getNewModificationTime() { throw new IllegalStateException("result of type: " + getClass() + - " does not contain a " + - "new modification time"); + " does not contain a " + + "new modification time"); } @Override @@ -511,6 +525,8 @@ public static class PutResult extends ValueVersionResult { private final Version newVersion; /* of the new record */ private final long newExpirationTime; /* of the new record */ + // There is creationTime in prevValue, they should both be the same. + private final long newCreationTime; private final long newModificationTime; private final int newStorageSize; private final int shard; @@ -543,6 +559,7 @@ public static class PutResult extends ValueVersionResult { Version version, long expTime, boolean wasUpdate, + long creationTime, long modificationTime, int storageSize, int shard) { @@ -552,6 +569,7 @@ public static class PutResult extends ValueVersionResult { this.wasUpdate = wasUpdate; newVersion = version; newExpirationTime = expTime; + newCreationTime = creationTime; newModificationTime = modificationTime; newStorageSize = storageSize; this.shard = shard; @@ -563,6 +581,7 @@ private PutResult(PutResult other, short serialVersion) { wasUpdate = other.wasUpdate; newVersion = other.newVersion; newExpirationTime = other.newExpirationTime; + newCreationTime = other.newCreationTime; newModificationTime = other.newModificationTime; newStorageSize = other.newStorageSize; shard = other.shard; @@ -591,6 +610,11 @@ private PutResult(PutResult other, short serialVersion) { newStorageSize = in.readInt(); shard = in.readInt(); + if (serialVersion >= CREATION_TIME_VER) { + newCreationTime = in.readLong(); + } else { + newCreationTime = 0; + } } /** @@ -609,6 +633,8 @@ private PutResult(PutResult other, short serialVersion) { * whether newModificationTime is present *

  • [Optional]({@link DataOutput#writeLong long}) * {@link #getNewModificationTime newModificationTime} + *
  • [Contingent on serialVersion]({@link DataOutput#writeLong long}) + * {@link #getNewCreationTime creationTime} * */ @Override @@ -618,8 +644,8 @@ public void writeFastExternal(DataOutput out, short serialVersion) writeFastExternalOrNull(out, serialVersion, newVersion); writeTimestamp(out, - newExpirationTime, - serialVersion); + newExpirationTime, + serialVersion); out.writeBoolean(wasUpdate); writeTimestamp(out, newModificationTime, @@ -627,6 +653,9 @@ public void writeFastExternal(DataOutput out, short serialVersion) out.writeInt(newStorageSize); out.writeInt(shard); + if (serialVersion >= CREATION_TIME_VER) { + out.writeLong(newCreationTime); + } } @Override @@ -658,6 +687,11 @@ public boolean getWasUpdate() { return wasUpdate; } + @Override + public long getNewCreationTime() { + return newCreationTime; + } + @Override public long getNewModificationTime() { return newModificationTime; @@ -681,6 +715,7 @@ public boolean equals(Object obj) { final PutResult other = (PutResult) obj; return Objects.equals(newVersion, other.newVersion) && (newExpirationTime == other.newExpirationTime) && + (newCreationTime == other.newCreationTime) && (newModificationTime == other.newModificationTime) && (newStorageSize == other.newStorageSize) && (shard == other.shard) && @@ -690,7 +725,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hash(newVersion, newExpirationTime, + return Objects.hash(newVersion, newExpirationTime, newCreationTime, newModificationTime, newStorageSize, shard, wasUpdate, generatedValue); } @@ -881,6 +916,7 @@ public static abstract class ValueVersionResult extends Result { private final ResultValue resultValue; protected Version version; private final long expirationTime; + protected long creationTime; private final long modificationTime; private final int storageSize; @@ -896,12 +932,14 @@ public static abstract class ValueVersionResult extends Result { null); version = valueVersion.getVersion(); expirationTime = valueVersion.getExpirationTime(); + creationTime = valueVersion.getCreationTime(); modificationTime = valueVersion.getModificationTime(); storageSize = valueVersion.getStorageSize(); } else { resultValue = null; version = null; expirationTime = 0; + creationTime = 0; modificationTime = 0; storageSize = -1; } @@ -917,6 +955,7 @@ public static abstract class ValueVersionResult extends Result { resultValue = toDeserializedForm(other.resultValue, serialVersion); version = other.version; expirationTime = other.expirationTime; + creationTime = other.creationTime; modificationTime = other.modificationTime; storageSize = other.storageSize; } @@ -945,6 +984,11 @@ public static abstract class ValueVersionResult extends Result { modificationTime = readTimestamp(in, serialVersion); storageSize = in.readInt(); + if (serialVersion >= CREATION_TIME_VER) { + creationTime = in.readLong(); + } else { + creationTime = 0; + } } /** @@ -974,14 +1018,12 @@ public void writeFastExternal(DataOutput out, short serialVersion) super.writeFastExternal(out, serialVersion); writeFastExternalOrNull(out, serialVersion, resultValue); writeFastExternalOrNull(out, serialVersion, version); - writeTimestamp(out, - expirationTime, - serialVersion); - writeTimestamp(out, - modificationTime, - serialVersion); - + writeTimestamp(out, expirationTime, serialVersion); + writeTimestamp(out, modificationTime, serialVersion); out.writeInt(storageSize); + if (serialVersion >= CREATION_TIME_VER) { + out.writeLong(creationTime); + } } @Override @@ -1003,6 +1045,11 @@ public long getPreviousExpirationTime() { return expirationTime; } + @Override + public long getPreviousCreationTime() { + return creationTime; + } + @Override public long getPreviousModificationTime() { return modificationTime; @@ -1024,13 +1071,15 @@ public boolean equals(Object obj) { Objects.equals(version, other.version) && (expirationTime == other.expirationTime) && (modificationTime == other.modificationTime) && + (creationTime == other.creationTime) && (storageSize == other.storageSize); } @Override public int hashCode() { return Objects.hash(super.hashCode(), resultValue, version, - expirationTime, modificationTime, storageSize); + expirationTime, modificationTime, creationTime, + storageSize); } } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultIndexKeys.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultIndexKeys.java index 8a50d9f7..d32868ad 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultIndexKeys.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultIndexKeys.java @@ -28,7 +28,7 @@ /** * Holds results of an index key iteration over a table. This result includes - * primary key and index key byte arrays. This is all of the information that + * primary key and index key byte arrays. This is all the information that * is available in a single secondary scan without doing an additional database * read of the primary data. * diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultIndexRows.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultIndexRows.java index eb25f875..37cef51a 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultIndexRows.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultIndexRows.java @@ -50,9 +50,10 @@ public ResultIndexRows(byte[] indexKeyBytes, byte[] valueBytes, Version version, long expirationTime, + long creationTime, long modificationTime) { super(primaryKeyBytes, valueBytes, version, expirationTime, - modificationTime, false/* isTombstone */); + creationTime, modificationTime, false/* isTombstone */); this.indexKeyBytes = indexKeyBytes; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultKeyValueVersion.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultKeyValueVersion.java index 1163c812..d0fc951a 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultKeyValueVersion.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultKeyValueVersion.java @@ -14,6 +14,7 @@ package oracle.kv.impl.api.ops; import static oracle.kv.impl.util.ObjectUtil.checkNull; +import static oracle.kv.impl.util.SerialVersion.CREATION_TIME_VER; import static oracle.kv.impl.util.SerialVersion.TABLE_ITERATOR_TOMBSTONES_VER; import static oracle.kv.impl.util.SerializationUtil.toDeserializedForm; import static oracle.kv.impl.util.SerializationUtil.readNonNullByteArray; @@ -44,12 +45,14 @@ public class ResultKeyValueVersion implements FastExternalizable { private final Version version; private final long expirationTime; private final long modificationTime; + private final long creationTime; private final boolean isTombstone; public ResultKeyValueVersion(byte[] keyBytes, byte[] valueBytes, Version version, long expirationTime, + long creationTime, long modificationTime, boolean isTombstone) { checkNull("keyBytes", keyBytes); @@ -58,6 +61,7 @@ public ResultKeyValueVersion(byte[] keyBytes, this.resultValue = new ResultValue(valueBytes); this.version = version; this.expirationTime = expirationTime; + this.creationTime = creationTime; this.modificationTime = modificationTime; this.isTombstone = isTombstone; } @@ -68,6 +72,7 @@ public ResultKeyValueVersion(byte[] keyBytes, resultValue = toDeserializedForm(other.resultValue, serialVersion); version = other.version; expirationTime = other.expirationTime; + creationTime = other.creationTime; modificationTime = other.modificationTime; isTombstone = other.isTombstone; } @@ -89,6 +94,11 @@ public ResultKeyValueVersion(DataInput in, short serialVersion) } else { isTombstone = false; } + if (serialVersion >= CREATION_TIME_VER) { + creationTime = in.readLong(); + } else { + creationTime = 0; + } } /** @@ -124,12 +134,15 @@ public void writeFastExternal(DataOutput out, short serialVersion) if (serialVersion >= TABLE_ITERATOR_TOMBSTONES_VER) { out.writeBoolean(isTombstone); } else if (isTombstone) { - throw new IllegalStateException("Result is a tombstone while its" + + throw new IllegalStateException("Result is a tombstone while its " + "serial version=" + serialVersion + " is less than the minimum " + "required version=" + TABLE_ITERATOR_TOMBSTONES_VER); } + if (serialVersion >= CREATION_TIME_VER) { + out.writeLong(creationTime); + } } @Override @@ -145,6 +158,7 @@ public boolean equals(Object obj) { resultValue.equals(other.resultValue) && version.equals(other.version) && (expirationTime == other.expirationTime) && + (creationTime == other.creationTime) && (modificationTime == other.modificationTime) && (isTombstone == other.isTombstone); } @@ -152,7 +166,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { return Objects.hash(keyBytes, resultValue, version, expirationTime, - modificationTime, isTombstone); + creationTime, modificationTime, isTombstone); } public byte[] getKeyBytes() { @@ -175,6 +189,10 @@ public long getExpirationTime() { return expirationTime; } + public long getCreationTime() { + return creationTime; + } + public long getModificationTime() { return modificationTime; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultValueVersion.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultValueVersion.java index 4fcab3bc..322ad77d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultValueVersion.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/ResultValueVersion.java @@ -23,6 +23,7 @@ public class ResultValueVersion { private byte[] valueBytes; private final Version version; private final long expirationTime; + private final long creationTime; private final long modificationTime; private final int storageSize; @@ -32,11 +33,13 @@ public class ResultValueVersion { public ResultValueVersion(byte[] valueBytes, Version version, long expirationTime, + long creationTime, long modificationTime, int storageSize) { this.valueBytes = valueBytes; this.version = version; this.expirationTime = expirationTime; + this.creationTime = creationTime; this.modificationTime = modificationTime; this.storageSize = storageSize; } @@ -57,6 +60,10 @@ public long getExpirationTime() { return expirationTime; } + public long getCreationTime() { + return creationTime; + } + public long getModificationTime() { return modificationTime; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/ReturnResultValueVersion.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/ReturnResultValueVersion.java index 60b00bb0..ae438b09 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/ReturnResultValueVersion.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/ReturnResultValueVersion.java @@ -37,11 +37,13 @@ Choice getReturnChoice() { void setValueVersion(byte[] valueBytes, Version version, long expirationTime, + long creationTime, long modificationTime, int storageSize) { this.valueVersion = new ResultValueVersion(valueBytes, version, expirationTime, + creationTime, modificationTime, storageSize); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/Scanner.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/Scanner.java index a8287662..509b73e3 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/Scanner.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/Scanner.java @@ -50,24 +50,23 @@ * A class to encapsulate iteration over JE records for KV methods that * require iteration. * - * The usage pattern is: + * The usage pattern is:
      *   Scanner scanner = new Scanner(...);
      *   DatabaseEntry keyEntry = scanner.getKey();
    - *   DatabaseEntry dateEntry = scanner.getDate();
    + *   DatabaseEntry dataEntry = scanner.getData();
      *   try {
      *     while (scanner.next()) {
      *        // do things with keyEntry, dataEntry
      *     }
      *   } finally {
      *     scanner.close();  // closes cursor
    - *   }
    + *   }                    
    * * Any security-relevant operations are performed by callers. Batching is * also handled by callers. * * See below for more information on state. */ - public class Scanner { /** Lowest possible value for a serialized key character. */ @@ -328,6 +327,14 @@ public long getExpirationTime() { return (result != null ? result.getExpirationTime() : 0); } + /** + * Returns the creation time of the current valid result if non-null, + * otherwise 0. + */ + public long getCreationTime() { + return (result != null ? result.getCreationTime() : 0); + } + public long getModificationTime() { return (result != null ? result.getModificationTime() : 0); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/SingleKeyOperationHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/SingleKeyOperationHandler.java index 24670d67..17c9cd52 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/SingleKeyOperationHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/SingleKeyOperationHandler.java @@ -13,6 +13,7 @@ package oracle.kv.impl.api.ops; +import java.util.EnumSet; import java.util.List; import oracle.kv.UnauthorizedException; @@ -23,6 +24,7 @@ import oracle.kv.impl.security.AccessCheckUtils; import oracle.kv.impl.security.ExecutionContext; import oracle.kv.impl.security.KVStorePrivilege; +import oracle.kv.impl.security.KVStorePrivilegeLabel; import oracle.kv.impl.security.SystemPrivilege; import oracle.kv.impl.security.TablePrivilege; @@ -84,6 +86,39 @@ List getRequiredPrivileges(T op) { void verifyDataAccess(T op) throws UnauthorizedException { + verifyDataAccess(op, null); + } + + /** + * Verifies the data access in general keyspace for operation. If the + * tableId for the operation is non-zero, table existence will be checked + * first. Then if the security is enabled, the legitimacy of data access + * will be checked further: + *

    + * 1. if tableId is non-zero, the access privileges on the table specified + * by the id are needed;
    + * 2. if tableId is zero but the key falls in a table's keyspace, the + * access privileges on the table are needed;
    + * 3. if tableId is zero and the key is not in any table keyspace, the + * access privileges on general keyspace are needed; + *

    + * If it's a table operation and requires additional table privileges to + * perform, we also check if current session has required access privileges. + * + * Here we only check if current session has the required access + * privileges for efficiency, since the authentication checking and subject + * identification have been done while processing the request. + * + * @param op the operation + * @param tablePrivs the additional required table privileges. Note that the + * table privileges specified should be able to imply the same privileges + * in the generalAccessPrivileges of the operation. + * @throws UnauthorizedException if the permission check for data access + * fails. + */ + void verifyDataAccess(T op, EnumSet tablePrivs) + throws UnauthorizedException { + TableImpl accessedTable = null; /* For table operation, check if table exists */ @@ -144,6 +179,14 @@ void verifyDataAccess(T op) /* Check the privileges on the namespace */ if ( !accessedTable.isSystemTable() && exeCtx.hasAllPrivileges( namespaceAccessPrivileges(accessedTable.getInternalNamespace()))) { + + /* + * The operation may require additional privileges, verify if + * current session has the required privileges. + */ + if (tablePrivs != null) { + verifyTablePrivileges(accessedTable, tablePrivs); + } return; } @@ -156,7 +199,8 @@ void verifyDataAccess(T op) throw new UnauthorizedException( "Insufficient access rights granted on table, id: " + accessedTable.getId() + - " name: " + accessedTable.getFullNamespaceName()); + " name: " + accessedTable.getFullNamespaceName() + ); } /* Ensure at least read privileges on all parent tables */ @@ -180,6 +224,14 @@ void verifyDataAccess(T op) * table access. */ verifySystemTableAccess(accessedTable); + + /* + * The operation may require additional privileges, verify if + * current session has the required privileges. + */ + if (tablePrivs != null) { + verifyTablePrivileges(accessedTable, tablePrivs); + } } void reserializeResultValue(SingleKeyOperation op, ResultValueVersion rvv) { diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/TableQuery.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/TableQuery.java index 024cf50e..e698a03e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/TableQuery.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/TableQuery.java @@ -17,6 +17,7 @@ import static oracle.kv.impl.util.SerialVersion.QUERY_VERSION_14; import static oracle.kv.impl.util.SerialVersion.QUERY_VERSION_16; import static oracle.kv.impl.util.SerialVersion.CLOUD_MR_TABLE; +import static oracle.kv.impl.util.SerialVersion.QUERY_VERSION_18; import java.io.ByteArrayOutputStream; import java.io.DataInput; @@ -151,6 +152,8 @@ public class TableQuery extends InternalOperation { private final boolean performsWrite; + private final String rowMetadata; + public TableQuery( String queryName, DistributionKind distKind, @@ -175,7 +178,8 @@ public TableQuery( int localRegionId, boolean doTombstone, long maxServerMemoryConsumption, - boolean performsWrite) { + boolean performsWrite, + String rowMetadata) { /* * The distinct OpCodes are primarily for a finer granularity of @@ -212,6 +216,7 @@ public TableQuery( this.doTombstone = doTombstone; this.maxServerMemoryConsumption = maxServerMemoryConsumption; this.performsWrite = performsWrite; + this.rowMetadata = rowMetadata; } FieldDefImpl getResultDef() { @@ -346,6 +351,10 @@ public long getMaxServerMemoryConsumption() { return maxServerMemoryConsumption; } + public String getRowMetadata() { + return rowMetadata; + } + @Override public boolean performsWrite() { return performsWrite; @@ -439,6 +448,10 @@ public void writeFastExternal(DataOutput out, short serialVersion) out.writeBoolean(performsWrite); out.writeInt(updateLimit); } + + if (serialVersion >= QUERY_VERSION_18) { + SerializationUtil.writeString(out, serialVersion, rowMetadata); + } } /** @@ -512,6 +525,12 @@ protected TableQuery(OpCode opCode, DataInput in, short serialVersion) updateLimit = 0; } + if (serialVersion >= QUERY_VERSION_18) { + rowMetadata = SerializationUtil.readString(in, serialVersion); + } else { + rowMetadata = null; + } + } catch (IOException e) { e.printStackTrace(); throw e; diff --git a/kvmain/src/main/java/oracle/kv/impl/api/ops/TableQueryHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/ops/TableQueryHandler.java index f166d002..433b3647 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/ops/TableQueryHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/ops/TableQueryHandler.java @@ -112,9 +112,10 @@ Result execute(TableQuery op, PartitionId partitionId) { TableMetadataHelper mdHelper = getMetadataHelper(); - ExecuteOptions options = new ExecuteOptions(). - setRegionId(op.getLocalRegionId()). - setDoTombstone(op.doTombstone()); + ExecuteOptions options = new ExecuteOptions() + .setRegionId(op.getLocalRegionId()) + .setDoTombstone(op.doTombstone()) + .setRowMetadata(op.getRowMetadata()); /* * Save the ResumeInfo before execution. diff --git a/kvmain/src/main/java/oracle/kv/impl/api/parallelscan/ParallelScan.java b/kvmain/src/main/java/oracle/kv/impl/api/parallelscan/ParallelScan.java index db69c800..aa5b6769 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/parallelscan/ParallelScan.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/parallelscan/ParallelScan.java @@ -255,6 +255,7 @@ protected void convertResult(Result result, entry.getValue(), entry.getVersion(), entry.getExpirationTime(), + entry.getCreationTime(), entry.getModificationTime())); } } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/AddTable.java b/kvmain/src/main/java/oracle/kv/impl/api/table/AddTable.java index c38a3c27..bbac04e6 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/AddTable.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/AddTable.java @@ -23,6 +23,7 @@ import static oracle.kv.impl.util.SerializationUtil.writeString; import static oracle.kv.impl.util.SerialVersion.JSON_COLLECTION_VERSION; import static oracle.kv.impl.util.SerialVersion.SCHEMALESS_TABLE_VERSION; +import static oracle.kv.impl.util.SerialVersion.BEFORE_IMAGE_VERSION; import java.io.DataInput; import java.io.DataOutput; @@ -64,6 +65,7 @@ class AddTable extends TableChange { private final Set regionIds; private final boolean schemaless; private final Map jsonCollectionMRCounters; + private final TimeToLive beforeImageTTL; AddTable(TableImpl table, int seqNum) { super(seqNum); @@ -76,6 +78,7 @@ class AddTable extends TableChange { shardKey = table.getShardKey(); fields = table.getFieldMap(); ttl = table.getDefaultTTL(); + beforeImageTTL = table.getBeforeImageTTL(); r2compat = table.isR2compatible(); schemaId = table.getSchemaId(); description = table.getDescription(); @@ -103,6 +106,12 @@ class AddTable extends TableChange { fields = new FieldMap(in, serialVersion); ttl = readFastExternalOrNull(in, serialVersion, TimeToLive::readFastExternal); + if (serialVersion >= BEFORE_IMAGE_VERSION) { + beforeImageTTL = readFastExternalOrNull(in, serialVersion, + TimeToLive::readFastExternal); + } else { + beforeImageTTL = null; + } r2compat = in.readBoolean(); schemaId = in.readInt(); description = readString(in, serialVersion); @@ -142,6 +151,9 @@ public void writeFastExternal(DataOutput out, short serialVersion) WriteFastExternal::writeString); fields.writeFastExternal(out, serialVersion); writeFastExternalOrNull(out, serialVersion, ttl); + if (serialVersion >= BEFORE_IMAGE_VERSION) { + writeFastExternalOrNull(out, serialVersion, beforeImageTTL); + } out.writeBoolean(r2compat); out.writeInt(schemaId); writeString(out, serialVersion, description); @@ -174,9 +186,9 @@ TableImpl apply(TableMetadata md) { TableImpl ret = md.insertTable(namespace, name, parentName, primaryKey, primaryKeySizes, shardKey, fields, - ttl, limits, r2compat, schemaId, description, - owner, sysTable, identityColumnInfo, regionIds, - schemaless, jsonCollectionMRCounters); + ttl, beforeImageTTL, limits, r2compat, schemaId, + description, owner, sysTable, identityColumnInfo, + regionIds, schemaless, jsonCollectionMRCounters); return ret; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/DDLGenerator.java b/kvmain/src/main/java/oracle/kv/impl/api/table/DDLGenerator.java index 3031a894..e20012c6 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/DDLGenerator.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/DDLGenerator.java @@ -21,7 +21,6 @@ import java.util.Map; import java.util.Set; -import oracle.kv.impl.api.table.IndexImpl.IndexField; import oracle.kv.table.ArrayDef; import oracle.kv.table.EnumDef; import oracle.kv.table.FieldDef; @@ -161,13 +160,19 @@ public String[] genAlterDdl(Table newTable) { } /* set ttl */ - if (!ttlEquals(oldT, newT)) { + if (!ttlEquals(oldT, newT, true)) { ddl = getAlterTtlDDL(newT); if (ddl != null) { ddls.add(ddl); } } + if (!ttlEquals(oldT, newT, false)) { + ddl = getAlterBeforeImageTtlDDL(newT); + assert(ddl != null); + ddls.add(ddl); + } + /* add | drop , */ if (!TableImpl.equalsRemoteRegions(oldT.getRemoteRegions(), newT.getRemoteRegions())) { @@ -243,11 +248,15 @@ private String generateDDL(boolean withIfNotExists) { * Generate the DDL statement for the TTL if it exists */ TimeToLive defaultTTL = table.getDefaultTTL(); - if (defaultTTL != null) { appendTTL(defaultTTL); } + TimeToLive beforeTTL = table.getBeforeImageTTL(); + if (beforeTTL != null) { + appendBeforeImageTTL(beforeTTL); + } + if (table.isMultiRegion() && table.isTop()) { if (regionMapper == null) { throw new IllegalArgumentException( @@ -466,6 +475,18 @@ private void appendTTL(TimeToLive ttl) { .append(ttl.getUnit().name()); } + private void appendBeforeImageTTL(TimeToLive ttl) { + + if (ttl != null) { + tableSb.append(" ENABLE BEFORE IMAGE USING TTL "). + append(ttl.getValue()). + append(" "). + append(ttl.getUnit().name()); + } else { + tableSb.append(" DISABLE BEFORE IMAGE"); + } + } + /** * Generate the DDL for the generic table field. * @@ -588,59 +609,32 @@ private String getIndexDDL(Index index, boolean withIfNotExists) { .append(" ON ").append(table.getFullNamespaceName()).append("("); /* Append the fields */ - List fields = ((IndexImpl)index).getFields(); - int numFields = fields.size(); - - for (int i = 0; i < numFields; i++) { - - String field = fields.get(i); - sb.append(field); - - FieldDef.Type type = ((IndexImpl)index).getFieldType(i); - if (type != null) { - sb.append(" as "); - /* - * POINT and GEOMETRY are special and handled directly - */ - if (type == FieldDef.Type.POINT) { - sb.append("POINT"); - } else if (type == FieldDef.Type.GEOMETRY) { - /* - * Note: GEOMETRY indexes can have additional - * properties but they are not currently being - * maintained or used (see KVSTORE-1978) - */ - sb.append("GEOMETRY"); - } else { - sb.append(IndexField.getDDLTypeString(type)); - } + IndexImpl indexImpl = (IndexImpl)index; + for (int i = 0; i < indexImpl.numFields(); i++) { + if (i > 0) { + sb.append(", "); } + sb.append(TableJsonUtils.toExternalIndexField(indexImpl, i, true)); /* * Append the text index field annotation */ if (index.getType() == Index.IndexType.TEXT) { - + String field = indexImpl.getFields().get(i); String annotationField = index.getAnnotationForField(field); if (annotationField != null) { sb.append(" ").append(annotationField); } } - - if (i == numFields - 1) { - break; - } - sb.append(","); } sb.append(")"); if (index.getType() != Index.IndexType.TEXT) { - IndexImpl idx = (IndexImpl)index; - if (!idx.indexesNulls()) { + if (!indexImpl.indexesNulls()) { sb.append(" WITH NO NULLS"); } - if (idx.isUnique()) { + if (indexImpl.isUnique()) { sb.append(" WITH UNIQUE KEYS PER ROW"); } } @@ -649,8 +643,6 @@ private String getIndexDDL(Index index, boolean withIfNotExists) { * Append the text index properties */ if (index.getType() == Index.IndexType.TEXT) { - - IndexImpl indexImpl = (IndexImpl) index; Map properties = indexImpl.getProperties(); for (Map.Entry entry : properties.entrySet()) { @@ -1228,6 +1220,14 @@ private String getAlterTtlDDL(TableImpl newT) { return null; } + private String getAlterBeforeImageTtlDDL(TableImpl newT) { + + tableSb.setLength(0); + appendAlterTablePerfix(); + appendBeforeImageTTL(newT.getBeforeImageTTL()); + return tableSb.toString(); + } + /* * Returns modify region ddl: * alter table [add | drop] , @@ -1364,9 +1364,13 @@ private static boolean sequenceDefEquals(SequenceDef def0, return (def1 == null); } - private static boolean ttlEquals(TableImpl t0, TableImpl t1) { - TimeToLive ttl0 = t0.getDefaultTTL(); - TimeToLive ttl1 = t1.getDefaultTTL(); + private static boolean ttlEquals( + TableImpl t0, + TableImpl t1, + boolean tableTTL) { + + TimeToLive ttl0 = (tableTTL ? t0.getDefaultTTL() : t0.getBeforeImageTTL()); + TimeToLive ttl1 = (tableTTL ? t1.getDefaultTTL() : t1.getBeforeImageTTL()); if (isTtlNotExpire(ttl0) && isTtlNotExpire(ttl1)) { return true; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/EvolveTable.java b/kvmain/src/main/java/oracle/kv/impl/api/table/EvolveTable.java index 0ce7b967..659a1c87 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/EvolveTable.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/EvolveTable.java @@ -13,6 +13,7 @@ package oracle.kv.impl.api.table; +import static oracle.kv.impl.util.SerialVersion.BEFORE_IMAGE_VERSION; import static oracle.kv.impl.util.SerializationUtil.readCollection; import static oracle.kv.impl.util.SerializationUtil.readFastExternalOrNull; import static oracle.kv.impl.util.SerializationUtil.readPackedInt; @@ -40,6 +41,7 @@ class EvolveTable extends TableChange { private final String namespace; private final FieldMap fields; private final TimeToLive ttl; + private final TimeToLive beforeImgTTL; private final String description; private final IdentityColumnInfo identityColumnInfo; private final Set regions; @@ -50,6 +52,7 @@ class EvolveTable extends TableChange { namespace = table.getInternalNamespace(); fields = table.getFieldMap(); ttl = table.getDefaultTTL(); + beforeImgTTL = table.getBeforeImageTTL(); description = table.getDescription(); identityColumnInfo = table.getIdentityColumnInfo(); regions = table.isChild() ? null : table.getRemoteRegions(); @@ -62,6 +65,12 @@ class EvolveTable extends TableChange { fields = new FieldMap(in, serialVersion); ttl = readFastExternalOrNull(in, serialVersion, TimeToLive::readFastExternal); + if (serialVersion >= BEFORE_IMAGE_VERSION) { + beforeImgTTL = readFastExternalOrNull( + in, serialVersion, TimeToLive::readFastExternal); + } else { + beforeImgTTL = null; + } description = readString(in, serialVersion); identityColumnInfo = readFastExternalOrNull(in, serialVersion, IdentityColumnInfo::new); @@ -78,6 +87,9 @@ public void writeFastExternal(DataOutput out, short serialVersion) writeString(out, serialVersion, namespace); fields.writeFastExternal(out, serialVersion); writeFastExternalOrNull(out, serialVersion, ttl); + if (serialVersion >= BEFORE_IMAGE_VERSION) { + writeFastExternalOrNull(out, serialVersion, beforeImgTTL); + } writeString(out, serialVersion, description); writeFastExternalOrNull(out, serialVersion, identityColumnInfo); writeCollection(out, serialVersion, regions, @@ -86,7 +98,8 @@ public void writeFastExternal(DataOutput out, short serialVersion) @Override TableImpl apply(TableMetadata md) { - return md.evolveTable(namespace, tableName, fields, ttl, description, + return md.evolveTable(namespace, tableName, fields, ttl, + beforeImgTTL, description, identityColumnInfo, regions); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/FieldValueImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/FieldValueImpl.java index 25be597f..c8bf5ebc 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/FieldValueImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/FieldValueImpl.java @@ -31,6 +31,7 @@ import oracle.kv.impl.api.table.ValueSerializer.MapValueSerializer; import oracle.kv.impl.api.table.ValueSerializer.RecordValueSerializer; import oracle.kv.impl.query.QueryStateException; +import oracle.kv.impl.query.compiler.FuncRowMetadata; import oracle.kv.impl.query.types.TypeManager; import oracle.kv.impl.util.FastExternalizable; import oracle.kv.table.ArrayValue; @@ -41,6 +42,7 @@ import oracle.kv.table.FieldDef; import oracle.kv.table.FieldDef.Type; import oracle.kv.table.FieldValue; +import oracle.kv.table.FieldValueFactory; import oracle.kv.table.FixedBinaryValue; import oracle.kv.table.FloatValue; import oracle.kv.table.IndexKey; @@ -912,7 +914,20 @@ public FieldValueImpl evaluateScalarPath(List path, int pathPos) { case RECORD: { RecordValueImpl rec = (RecordValueImpl)this; String next = path.get(pathPos).getStep(); - FieldValueImpl fv = rec.get(next); + FieldValueImpl fv; + + if (this instanceof RowImpl && + next.equals(FuncRowMetadata.COL_NAME)) { + String rowMetadata = ((RowImpl)rec).getRowMetadata(); + if (rowMetadata != null) { + fv = (FieldValueImpl)FieldValueFactory. + createValueFromJson(rowMetadata); + } else { + return EmptyValueImpl.getInstance(); + } + } else { + fv = rec.get(next); + } if (fv == null) { diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/FieldValueReaderImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/FieldValueReaderImpl.java index 60d6e821..395c76ea 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/FieldValueReaderImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/FieldValueReaderImpl.java @@ -246,10 +246,17 @@ public void setTableVersion(int tableVersion) { public void setExpirationTime(long expirationTime) { } + @Override public void setRowMetadata(String rowMetadata) { + } + @Override public void setModificationTime(long modificationTime) { } + @Override + public void setCreationTime(long creationTime) { + } + @Override public void setVersion(Version version) { } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/IndexImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/IndexImpl.java index 156d9c14..ddabf384 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/IndexImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/IndexImpl.java @@ -49,6 +49,7 @@ import oracle.kv.impl.query.compiler.ExprFuncCall; import oracle.kv.impl.query.compiler.ExprUtils; import oracle.kv.impl.query.compiler.Function; +import oracle.kv.impl.query.compiler.FuncRowMetadata; import oracle.kv.impl.query.compiler.QueryControlBlock; import oracle.kv.impl.query.runtime.PlanIter; import oracle.kv.impl.tif.esclient.jsonContent.ESJsonUtil; @@ -60,6 +61,7 @@ import oracle.kv.table.FieldDef.Type; import oracle.kv.table.FieldRange; import oracle.kv.table.FieldValue; +import oracle.kv.table.FieldValueFactory; import oracle.kv.table.Index; import oracle.kv.table.IndexKey; import oracle.kv.table.RecordValue; @@ -2279,6 +2281,7 @@ private void checkIsMapBothIndex() { public byte[] extractIndexKey( byte[] key, byte[] data, + long creationTime, long modTime, long expTime, int size, @@ -2289,6 +2292,7 @@ public byte[] extractIndexKey( return null; } + row.setCreationTime(creationTime); row.setModificationTime(modTime); row.setExpirationTime(expTime); row.setStorageSize(size); @@ -2348,6 +2352,7 @@ public byte[] extractIndexKey(RowImpl row) { public List extractIndexKeys( byte[] key, byte[] data, + long creationTime, long modTime, long expTime, int size, @@ -2359,6 +2364,7 @@ public List extractIndexKeys( return null; } + row.setCreationTime(creationTime); row.setModificationTime(modTime); row.setExpirationTime(expTime); row.setStorageSize(size); @@ -2467,6 +2473,8 @@ public void extractIndexKeys(MultiKeyExtractionContext ctx) { case REC_FIELD: case MAP_FIELD: { ++ctx.pathPos; + /* Keep walking down the data guide. We are sure to find a + * BRACKETS or VALUES step. */ extractIndexKeys(ctx); --ctx.pathPos; return; @@ -2613,7 +2621,21 @@ public void extractIndexKeys(MultiKeyExtractionContext ctx) { case REC_FIELD: case MAP_FIELD: { String fname = si.getStep(); - FieldValueImpl fv = rec.get(fname); + FieldValueImpl fv; + + if (rec instanceof RowImpl && + fname.equals(FuncRowMetadata.COL_NAME)) { + String metadataStr = ((RowImpl)rec).getRowMetadata(); + if (metadataStr == null) { + // evaluate to EmptyValue when rowMetadata is null. + fv = EmptyValueImpl.getInstance(); + } else { + fv = (FieldValueImpl) FieldValueFactory. + createValueFromJson(metadataStr); + } + } else { + fv = rec.get(fname); + } if (fv == null && !(rec instanceof JsonCollectionRowImpl)) { throw new IllegalArgumentException( @@ -2954,6 +2976,16 @@ private byte[] serializeIndexKey(MultiKeyExtractionContext ctx) { if (func != null) { switch (func.getCode()) { + case FN_CREATION_TIME: + long creationTime = ctx.row.getCreationTime(); + fval = FieldDefImpl.Constants.timestampDefs[3]. + createTimestamp(new Timestamp(creationTime)); + break; + case FN_CREATION_TIME_MILLIS: + long creationTimeMillis = ctx.row.getCreationTime(); + fval = FieldDefImpl.Constants.longDef. + createLong(creationTimeMillis); + break; case FN_MOD_TIME: long modTime = ctx.row.getLastModificationTime(); fval = FieldDefImpl.Constants.timestampDefs[3]. @@ -3944,10 +3976,37 @@ void compileFunctionalField(FieldDefImpl intype) { * Returns the DDL type string for the type. It must be a valid * JSON index type (see getFieldDefForTypecode). */ - static String getDDLTypeString(FieldDef.Type code) { + public static String getDDLTypeString(FieldDef.Type code) { + /* + * POINT and GEOMETRY are special and handled directly because they + * don't have a corresponding FieldDef class. + */ + if (code == FieldDef.Type.POINT) { + return "Point"; + } + if (code == FieldDef.Type.GEOMETRY) { + return "Geometry"; + } return getFieldDefForTypecode(code).getDDLString(); } + /* + * Returns the FieldDef.Type from the DDL type String, the type + * should be a valid JSON index type, see types in + * getFieldDefForTypecode, GEOMETRY and POINT. + */ + public static FieldDef.Type fromDdlTypeString(String typeString) { + try { + return FieldDef.Type.valueOf(typeString.toUpperCase()); + } catch (IllegalArgumentException iae) { + if (typeString.equalsIgnoreCase("AnyAtomic")) { + return FieldDef.Type.ANY_ATOMIC; + } + throw new IllegalArgumentException( + "Invalid type for JSON index field: " + typeString); + } + } + private static FieldDefImpl getFieldDefForTypecode(FieldDef.Type code) { switch (code) { case INTEGER: @@ -4084,25 +4143,12 @@ public String formatIndex(boolean asJson) { if (asJson) { TableImpl.JsonFormatter handler = TableImpl.createJsonFormatter(true); - List itypes = null; - if (types != null) { - itypes = new ArrayList(types.size()); - for (FieldDef.Type type : types) { - if (type == null) { - itypes.add("null"); - } else { - itypes.add(type.toString()); - } - } - } - handler.index(table, 1, getName(), getDescription(), getType().toString().toLowerCase(), - getFields(), - itypes, + TableJsonUtils.collectIndexFieldInfos(this), indexesNulls(), isUnique, getAnnotationsInternal(), @@ -4285,4 +4331,4 @@ RowImpl getRow(final TableImpl table, return row; } } -} +} \ No newline at end of file diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/IndexScan.java b/kvmain/src/main/java/oracle/kv/impl/api/table/IndexScan.java index 850200fe..cedce7bc 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/IndexScan.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/IndexScan.java @@ -347,7 +347,7 @@ protected InternalOperation createOp(byte[] resumeSecondaryKey, /** * Convert the results to KeyPair instances. Note that in the * case where ancestor and/or child table returns are requested - * the IndexKey returned is based on the the index and the table + * the IndexKey returned is based on the index and the table * containing the index, but the PrimaryKey returned may be from * a different, ancestor or child table. */ @@ -564,6 +564,7 @@ private static Row convertRow(TableAPIImpl apiImpl, vv, fullKey, rowResult.getExpirationTime(), + rowResult.getCreationTime(), rowResult.getModificationTime(), false, false); diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/JsonCollectionReturnRowImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/JsonCollectionReturnRowImpl.java index 2f87df4d..b03df6c2 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/JsonCollectionReturnRowImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/JsonCollectionReturnRowImpl.java @@ -75,14 +75,15 @@ void init(TableAPIImpl impl, ReturnValueVersion rvv, RowSerializer key, long prevExpirationTime, + long creationTime, long prevModificationTime, ValueReader reader) { if (returnChoice == Choice.VALUE || returnChoice == Choice.ALL) { if (rvv.getValue() != null) { table.readKeyFields(reader, key); impl.getRowFromValueVersion(rvv, key, prevExpirationTime, - prevModificationTime, false, - false, reader); + creationTime, prevModificationTime, + false,false, reader); } } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/JsonCollectionRowImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/JsonCollectionRowImpl.java index cb9da1fd..df7cb8ca 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/JsonCollectionRowImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/JsonCollectionRowImpl.java @@ -403,6 +403,7 @@ public void addPrimKeyAndPropertyFields(int indexSize) { public void addPrimKeyAndPropertyFields( long expTime, + long creationTime, long modTime, int partitionId, int rowSize, @@ -413,7 +414,7 @@ public void addPrimKeyAndPropertyFields( map.put(names.get(i), get(i)); } - map.addProperties(expTime, modTime, partitionId, rowSize, indexSize); + map.addProperties(expTime, creationTime, modTime, partitionId, rowSize, indexSize); } /** @@ -436,12 +437,14 @@ public JsonCollectionRowImpl getJColl() { } private void addProperties(long expTime, + long creationTime, long modTime, int partitionId, int rowSize, int indexSize) { JsonCollectionRowImpl row = getJColl(); row.setExpirationTime(expTime); + row.setCreationTime(creationTime); row.setModificationTime(modTime); row.setPartition(partitionId); row.setStorageSize(rowSize); @@ -470,6 +473,10 @@ public long getExpirationTime() { return getJColl().getExpirationTime(); } + public long getCreationTime() { + return getJColl().getCreationTime(); + } + public int getPartitionId() { return getJColl().getPartition(); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/NsonRow.java b/kvmain/src/main/java/oracle/kv/impl/api/table/NsonRow.java index dc42cb1a..e9d0269b 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/NsonRow.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/NsonRow.java @@ -16,13 +16,14 @@ import oracle.nosql.nson.Nson; /** - * NsonRow represents a Row along with row metadata but with the key and value + * NsonRow represents a Row along with extra row metadata but with the key and value * in NSON format. It is not really a "Row" in that it does not implement that * interface. It's primary use is for serialization/deserialization of rows * for streaming, replication and backup/restore */ public class NsonRow { + private final long creationTime; private final long modificationTime; private final long expirationTime; private final int regionId; @@ -36,7 +37,8 @@ public class NsonRow { */ private final byte[] nsonValue; - public NsonRow(long modificationTime, + public NsonRow(long creationTime, + long modificationTime, long expirationTime, int regionId, byte[] nsonKey, @@ -46,6 +48,7 @@ public NsonRow(long modificationTime, if (nsonKey == null) { throw new IllegalArgumentException("Primary key can not be null"); } + this.creationTime = creationTime; this.modificationTime = modificationTime; this.expirationTime = expirationTime; this.regionId = regionId; @@ -59,6 +62,10 @@ public TableImpl getTable() { return table; } + public long getCreationTime() { + return creationTime; + } + public long getModificationTime() { return modificationTime; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/NsonUtil.java b/kvmain/src/main/java/oracle/kv/impl/api/table/NsonUtil.java index 860ba8f0..8dc9dee7 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/NsonUtil.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/NsonUtil.java @@ -53,7 +53,6 @@ import oracle.nosql.nson.util.NioByteOutputStream; import oracle.nosql.nson.values.TimestampValue; -import com.sleepycat.util.PackedInteger; /** * NsonUtil is a class of static methods that encapsulate serialization and @@ -118,13 +117,7 @@ public static boolean createNsonFromValueBytes(TableImpl table, Value.Format format = Value.Format.fromFirstByte(valueBytes[0]); if (Format.isTableFormat(format)) { - int offset = 1; - if (format == Value.Format.MULTI_REGION_TABLE) { - final int regionIdLen = - PackedInteger.getReadIntLength(valueBytes, 1); - PackedInteger.readInt(valueBytes, 1); - offset = regionIdLen + 1; - } + int offset = Value.getValueOffset(valueBytes); if (table.isJsonCollection()) { try { /* @@ -235,10 +228,13 @@ public static Value createValueFromNsonBytes(TableImpl table, "Failed to re-serialize NSON with MR counter: " + ioe); } } + Value.Format rowFormat = Format.TABLE_V1; + if (regionId != Region.NULL_REGION_ID) { + rowFormat = Format.MULTI_REGION_TABLE; + } return Value.internalCreateValue( nsonValueBytes, - regionId == Region.NULL_REGION_ID ? - Value.Format.TABLE_V1 : Value.Format.MULTI_REGION_TABLE, + rowFormat, regionId); } ByteInputStream nsonInput = @@ -264,13 +260,12 @@ public static Value createValueFromNsonBytes(TableImpl table, writeAvroRecordFromNson(table, e, nsonInput, table.getFieldMap(), true); e.flush(); - /* - * If non Null regionId is specified, it should use MRTable format. - */ + return Value.internalCreateValue( outputStream.toByteArray(), regionId == Region.NULL_REGION_ID ? Value.Format.TABLE_V1 : Value.Format.MULTI_REGION_TABLE, +// Format.TABLE_V5, regionId); } catch (IOException ioe) { diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/NumberUtils.java b/kvmain/src/main/java/oracle/kv/impl/api/table/NumberUtils.java index cbe3a208..1e72af25 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/NumberUtils.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/NumberUtils.java @@ -317,15 +317,15 @@ private static void writeByte(byte[] buffer, int offset, int sign, if (sign < 0) { value = -1 * value; } - buffer[offset] = toUnsignedByte(value); + buffer[offset] = toUnsignedByte(sign, value); } /** * Converts the value with sign to a unsigned byte. If the value is * negative, subtract 2. */ - private static byte toUnsignedByte(int value) { - if (value < 0) { + private static byte toUnsignedByte(int sign, int value) { + if (sign < 0) { value -= 2; } return excess128(value); diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/ReturnRowImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/ReturnRowImpl.java index ac0a6796..2094017f 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/ReturnRowImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/ReturnRowImpl.java @@ -102,14 +102,15 @@ void init(TableAPIImpl impl, ReturnValueVersion rvv, RowSerializer key, long prevExpirationTime, + long creationTime, long prevModificationTime, ValueReader reader) { if (returnChoice == Choice.VALUE || returnChoice == Choice.ALL) { if (rvv.getValue() != null) { table.readKeyFields(reader, key); impl.getRowFromValueVersion(rvv, key, prevExpirationTime, - prevModificationTime, - false, false, reader); + creationTime, prevModificationTime, + false, false, reader); if (reader instanceof RowReaderImpl) { RowImpl newRow = (RowImpl)reader.getValue(); if (newRow.getTableVersion() != getTableVersion()) { @@ -131,6 +132,7 @@ void init(TableAPIImpl impl, * set is simpler than a complex conditional. */ reader.setExpirationTime(prevExpirationTime); + reader.setCreationTime(creationTime); reader.setModificationTime(prevModificationTime); reader.setVersion(rvv.getVersion()); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/RowImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/RowImpl.java index be0df5eb..059f37ae 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/RowImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/RowImpl.java @@ -57,11 +57,18 @@ public class RowImpl extends RecordValueImpl implements Row, RowSerializer { private TimeToLive ttl; + private String rowMetadata; + /** * Last update timestamp in ms. */ private long lastUpdateTimeMs; + /** + * Creation timestamp in ms. + */ + private long creationTimeMs; + private int storageSize = -1; private int partition = -1; @@ -91,11 +98,13 @@ protected RowImpl(RowImpl other) { this.expirationTime = other.expirationTime; this.ttl = other.ttl; this.lastUpdateTimeMs = other.lastUpdateTimeMs; + this.creationTimeMs = other.creationTimeMs; this.regionId = other.regionId; this.storageSize = other.storageSize; this.partition = other.partition; this.shard = other.shard; this.isTombstone = other.isTombstone; + this.rowMetadata = other.rowMetadata; } /* @@ -376,6 +385,24 @@ TimeToLive getTTLAndClearExpiration() { return retVal; } + /** + * @see Row#setRowMetadata(String) + */ + @Override + public void setRowMetadata(String rowMetadata) { + // we don't validate it is a valid JSON Object here, validation is done + // when Value instance in created. + this.rowMetadata = rowMetadata; + } + + /** + * @see Row#getRowMetadata() + */ + @Override + public String getRowMetadata() { + return rowMetadata; + } + public void removeValueFields() { if (table.hasValueFields()) { /* remove non-key fields if present */ @@ -423,6 +450,18 @@ public void setModificationTime(long modificationTime) { this.lastUpdateTimeMs = modificationTime; } + @Override + public long getCreationTime() { + return creationTimeMs; + } + + /** + * Set the creation timestamp for the row. + */ + public void setCreationTime(long creationTime) { + this.creationTimeMs = creationTime; + } + /** * Returns the region id associated with the row. The region id indicates * the region where the row is originally persisted. diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/RowReaderImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/RowReaderImpl.java index f9ae6e2e..96b4b512 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/RowReaderImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/RowReaderImpl.java @@ -60,8 +60,18 @@ public void setModificationTime(long modificationTime) { getValue().setModificationTime(modificationTime); } + @Override + public void setCreationTime(long creationTime) { + getValue().setCreationTime(creationTime); + } + @Override public void setTombstone(boolean isTombstone) { getValue().setTombstone(isTombstone); } + + @Override + public void setRowMetadata(String rowMetadata) { + getValue().setRowMetadata(rowMetadata); + } } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableAPIImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableAPIImpl.java index 6e19bb45..717f7fdb 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableAPIImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableAPIImpl.java @@ -859,6 +859,7 @@ public void createRowFromGetResult(Result result, } ((TableImpl)rowKey.getTable()).readKeyFields(reader, rowKey); getRowFromValueVersion(vv, rowKey, result.getPreviousExpirationTime(), + result.getPreviousCreationTime(), result.getPreviousModificationTime(), false, false, reader); } @@ -943,6 +944,7 @@ public Version processPutResult(Result result, ReturnRow prevRowArg) { if (result.getSuccess()) { row.setExpirationTime(result.getNewExpirationTime()); + row.setCreationTime(result.getNewCreationTime()); row.setModificationTime(result.getNewModificationTime()); } @@ -1504,12 +1506,9 @@ protected Value getValue(Row row) { */ if (getIsTombstone(row)) { /* tombstone */ - int regionId = ((RowImpl)row).getRegionId(); - if (Region.isMultiRegionId(regionId)) { - return Value.createTombstoneValue( - ((RowImpl)row).getRegionId()); - } - return Value.createTombstoneNoneValue(); + return Value.createTombstoneValue( + ((RowImpl)row).getRegionId(), + row.getRowMetadata()); } /* * if using putResolve MR counter values should @@ -1534,6 +1533,12 @@ protected long getTableId(Row row) { return table.getId(); } + @Override + protected long getCreationTime(Row row) { + return options.getUsePutResolve() ? + row.getCreationTime() : 0L; + } + @Override protected long getModificationTime(Row row) { return options.getUsePutResolve() ? @@ -1675,12 +1680,8 @@ protected Value getValue(NsonRow row) { */ if (row.getIsTombstone()) { /* tombstone */ - int regionId = row.getRegionId(); - if (Region.isMultiRegionId(regionId)) { - return Value.createTombstoneValue( - row.getRegionId()); - } - return Value.createTombstoneNoneValue(); + return Value.createTombstoneValue( + row.getRegionId(), null /* rowMetadata */); } if (row.getNsonValue() != null) { /* @@ -1707,6 +1708,11 @@ protected long getTableId(NsonRow row) { return row.getTable().getId(); } + @Override + protected long getCreationTime(NsonRow row) { + return row.getCreationTime(); + } + @Override protected long getModificationTime(NsonRow row) { return row.getModificationTime(); @@ -2349,7 +2355,8 @@ private Request makeDeleteRequest(RowSerializer rowKey, getTimeout(writeOptions), getTimeoutUnit(writeOptions), table.getId(), - doTombstone(writeOptions)); + doTombstone(writeOptions), + rowKey.getRowMetadata()); setContextFromOptions(req, writeOptions); return req; } @@ -2442,7 +2449,8 @@ key, matchVersion, getReturnChoice(prevRow), getTimeout(writeOptions), getTimeoutUnit(writeOptions), table.getId(), - doTombstone(writeOptions)); + doTombstone(writeOptions), + rowKey.getRowMetadata()); setContextFromOptions(req, writeOptions); return req; } @@ -2556,7 +2564,8 @@ private Request makeMultiDeleteTableRequest(RowSerializer rowKey, keyRange, continuationKey, getMaxWriteKB(writeOptions), - doTombstone(writeOptions)); + doTombstone(writeOptions), + rowKey.getRowMetadata()); final Request req = store.makeWriteRequest(del, partitionId, getDurability(writeOptions), @@ -3154,18 +3163,20 @@ public CompletableFuture executeAsyncInternal( RowImpl getRowFromValueVersion(ValueVersion vv, RowImpl row, long expirationTime, + long creationTime, long modificationTime, boolean keyOnly, boolean isTombstone) { ValueReader reader = row.initRowReader(); - getRowFromValueVersion(vv, row, expirationTime, modificationTime, - keyOnly, isTombstone, reader); + getRowFromValueVersion(vv, row, expirationTime, creationTime, + modificationTime, keyOnly, isTombstone, reader); return reader.getValue(); } void getRowFromValueVersion(ValueVersion vv, RowSerializer row, long expirationTime, + long creationTime, long modificationTime, boolean keyOnly, boolean isTombstone, @@ -3182,6 +3193,7 @@ void getRowFromValueVersion(ValueVersion vv, } } reader.setExpirationTime(expirationTime); + reader.setCreationTime(creationTime); reader.setModificationTime(modificationTime); reader.setTombstone(isTombstone); if (!table.readRowFromValueVersion(reader, vv)) { @@ -3223,6 +3235,7 @@ void getRowFromValueVersion(ValueVersion vv, newTable.readKeyFields(reader, row); reader.setExpirationTime(expirationTime); reader.setModificationTime(modificationTime); + reader.setCreationTime(creationTime); reader.setTombstone(isTombstone); if (!newTable.readRowFromValueVersion(reader, vv)) { reader.reset(); @@ -3420,7 +3433,8 @@ private Operation getOperation(WriteOptions writeOptions) { op = factory.createDelete(key, choice, abortIfUnsuccessful, t.getId(), - false /* doTombstone */); + false /* doTombstone */, + record.getRowMetadata()); break; case DELETE_IF_VERSION: key = t.createKeyInternal(record, false); @@ -3428,7 +3442,8 @@ private Operation getOperation(WriteOptions writeOptions) { choice, abortIfUnsuccessful, t.getId(), - false /* doTombstone */); + false /* doTombstone */, + record.getRowMetadata()); break; } @@ -3512,14 +3527,15 @@ public boolean getPreviousRow(ValueReader reader) { if (value != null && key != null) { PrimaryKeyImpl rowKey = (PrimaryKeyImpl)key; ((TableImpl)key.getTable()).readKeyFields(reader, rowKey); - impl.getRowFromValueVersion - (new ValueVersion(value, version), - rowKey, - opRes.getPreviousExpirationTime(), - opRes.getPreviousModificationTime(), - false, - false, - reader); + impl.getRowFromValueVersion( + new ValueVersion(value, version), + rowKey, + opRes.getPreviousExpirationTime(), + opRes.getPreviousCreationTime(), + opRes.getPreviousModificationTime(), + false, + false, + reader); return true; } return false; @@ -3782,10 +3798,12 @@ public TableOperation createDeleteInternal(RowSerializer rowKey, ReturnRowImpl.mapChoice(prevReturn); TableImpl table = (TableImpl)rowKey.getTable(); Key key = table.createKeyInternal(rowKey, false); - Operation op = factory.createDelete(key, choice, + Operation op = factory.createDelete(key, + choice, abortIfUnsuccessful, table.getId(), - doTombstone); + doTombstone, + rowKey.getRowMetadata()); return new OpWrapper(op, TableOperation.Type.DELETE, rowKey, null); } @@ -3821,10 +3839,8 @@ public TableOperation createDeleteInternal(RowSerializer rowKey, TableImpl table = (TableImpl)rowKey.getTable(); Key key = table.createKeyInternal(rowKey, false); Operation op = factory.createDeleteIfVersion(key, versionMatch, - choice, - abortIfUnsuccessful, - table.getId(), - doTombstone); + choice, abortIfUnsuccessful, table.getId(), doTombstone, + rowKey.getRowMetadata()); return new OpWrapper (op, TableOperation.Type.DELETE_IF_VERSION, rowKey, null); } @@ -3955,11 +3971,12 @@ public Result putDelResolveNsonInternal(TableImpl table, byte[] nsonValueBytes, int regionId, long expirationTime, + long creationTime, long lastModificationTime, WriteOptions writeOptions) { return store.executeRequest( makePutDelResolveRequest(table, nsonKeyBytes, nsonValueBytes, - regionId, expirationTime, + regionId, expirationTime, creationTime, lastModificationTime, writeOptions)); } @@ -3970,11 +3987,13 @@ public Result putDelResolveNsonInternal(TableImpl table, byte[] nsonValueBytes, int regionId, long expirationTime, + // todo add creationTime on the callers of this method + // long creationTime, long lastModificationTime, WriteOptions writeOptions) { return store.executeRequestAsync( makePutDelResolveRequest(table, nsonKeyBytes, nsonValueBytes, - regionId, expirationTime, + regionId, expirationTime, 0 /* creationTime*/, lastModificationTime, writeOptions)); } @@ -3987,11 +4006,17 @@ private Request makePutDelResolveRequest(RowImpl row, final Value value; if (row.isPrimaryKey()) { - value = Value.createTombstoneValue(row.getRegionId()); + value = Value.createTombstoneValue(row.getRegionId(), + row.getRowMetadata()); } else { + Value.Format format = Value.Format.MULTI_REGION_TABLE; + if (row.getRowMetadata() != null) { + format = Value.Format.TABLE_V5; + } value = table.createValueInternal(row, - Value.Format.MULTI_REGION_TABLE, - row.getRegionId(), store, + format, + row.getRegionId(), + store, null /* genInfo */, false /* replaceCRDT */); } @@ -3999,6 +4024,7 @@ private Request makePutDelResolveRequest(RowImpl row, return makePutDelResolveRequest(table, prevRow, key, value, row.isPrimaryKey(), row.getExpirationTime(), + row.getCreationTime(), row.getLastModificationTime(), writeOptions); } @@ -4008,6 +4034,7 @@ private Request makePutDelResolveRequest(TableImpl table, byte[] nsonValueBytes, int regionId, long expirationTime, + long creationTime, long lastModificationTime, WriteOptions writeOptions) { @@ -4015,14 +4042,14 @@ private Request makePutDelResolveRequest(TableImpl table, boolean isTombstone = nsonValueBytes == null; final Value value; if (isTombstone) { - value = Value.createTombstoneValue(regionId); + value = Value.createTombstoneValue(regionId, null /* rowMetadata */); } else { value = NsonUtil.createValueFromNsonBytes(table, nsonValueBytes, regionId, false); } return makePutDelResolveRequest(table, null, key, value, isTombstone, - expirationTime, lastModificationTime, + expirationTime, creationTime, lastModificationTime, writeOptions); } @@ -4032,6 +4059,7 @@ private Request makePutDelResolveRequest(TableImpl table, Value value, boolean isTombstone, long expirationTime, + long creationTime, long lastModificationTime, WriteOptions writeOptions) { @@ -4041,7 +4069,9 @@ private Request makePutDelResolveRequest(TableImpl table, "PutDelResolve is not supported for non multi-region table"); } - if (value.getFormat() != Value.Format.MULTI_REGION_TABLE) { + if ( + !Region.isMultiRegionId(value.getRegionId()) + ) { throw new IllegalArgumentException( "PutDelResolve is not supported for non multi-region row"); } @@ -4059,6 +4089,7 @@ private Request makePutDelResolveRequest(TableImpl table, /* delete if row is primary key */ isTombstone, /* pass key metadata */ + creationTime, lastModificationTime, getRegionId(writeOptions)); setContextFromOptions(req, writeOptions); @@ -4099,11 +4130,13 @@ private void initReturnRow(ReturnRow rr, if (rr instanceof ReturnRowImpl) { ((ReturnRowImpl)rr).init(this, rvv, row, result.getPreviousExpirationTime(), + result.getPreviousCreationTime(), result.getPreviousModificationTime(), rowReader); } else { ((JsonCollectionReturnRowImpl)rr).init(this, rvv, row, result.getPreviousExpirationTime(), + result.getPreviousCreationTime(), result.getPreviousModificationTime(), rowReader); } @@ -4211,6 +4244,7 @@ public static KeyRange createKeyRange(FieldRange range, boolean forQuery) { list.add(getRowFromValueVersion(vv, row, rkvv.getExpirationTime(), + rkvv.getCreationTime(), rkvv.getModificationTime(), false, false)); diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableBuilder.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableBuilder.java index 14aad5ee..e11c1fbc 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableBuilder.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableBuilder.java @@ -399,6 +399,7 @@ public TableImpl buildTable() { getShardKey(), fields, ttl, + beforeImageTTL, null, /*limits*/ r2compat, 0, @@ -460,6 +461,7 @@ public String toJsonString(boolean pretty) { getShardKey(), fields, ttl, + beforeImageTTL, null, /*limits*/ r2compat, 0, diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableBuilderBase.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableBuilderBase.java index 1d8f74b9..5d7fa2c2 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableBuilderBase.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableBuilderBase.java @@ -38,6 +38,7 @@ public class TableBuilderBase { protected FieldMap fields; protected TimeToLive ttl; + protected TimeToLive beforeImageTTL; protected Set regions = null; private boolean skipNullableDefaultValidation = false; @@ -813,6 +814,14 @@ public void setDefaultTTL(TimeToLive ttl) { this.ttl = ttl; } + public TimeToLive getBeforeImageTTL() { + return beforeImageTTL; + } + + public void setBeforeImageTTL(TimeToLive ttl) { + this.beforeImageTTL = ttl; + } + public void setIdentity( String identityColumn, boolean always, diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableEventHandler.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableEventHandler.java index e29c2548..63faed7d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableEventHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableEventHandler.java @@ -32,6 +32,8 @@ default void tableId(long tableId) {} public void ttl(TimeToLive ttl); + public void beforeImageTTL(TimeToLive ttl); + public void systemTable(boolean value); public void description(String description); @@ -59,6 +61,21 @@ public void identity(String columnName, boolean onNull, SequenceDef sequenceDef); + /* + * Map the table name and parent table name + * + * They are currently overridden in the cloud for map the KV table name + * (ocid.xyz) to the user table name (the name specified by the user in the + * original DDL) + */ + default String mapTableName(String tableName) { + return tableName; + } + + default String mapParentTableName(String parentName) { + return parentName; + } + /* * Use this to list child table names */ @@ -76,13 +93,11 @@ public void index(Table table, /* can be null */ String indexName, String description, String type, - List fields, - List types, + List fields, boolean indexesNulls, boolean isUnique, Map annotations, Map properties); - public void endIndexes(); /* @@ -120,4 +135,65 @@ public void fieldInfo(String name, public void endFields(); public void end(); -} + + /* + * The class represents an index Field used in table JSON metadata. + */ + public static class IndexFieldInfo { + /* + * The path to the indexed field, it is null if it is a function + */ + private String path; + + /* + * Index field type. + * + * The declared type string if it is a Json field, or the return type + * of the function if it is a function. Otherwise, it is null. + */ + private String type; + + /* + * Function name, it is null if the field is not a function. + */ + private String function; + + /* + * Function arguments + * + * A comma-separated string containing all arguments, or null if the + * field is not a function. + */ + private String arguments; + + public IndexFieldInfo(String path, String type) { + this.path = path; + this.type = type; + function = null; + arguments = null; + } + + public IndexFieldInfo(String function, String arguments, String type) { + this.function = function; + this.arguments = arguments; + this.type = type; + path = null; + } + + public String getPath() { + return path; + } + + public String getType() { + return type; + } + + public String getFunction() { + return function; + } + + public String getArguments() { + return arguments; + } + } +} \ No newline at end of file diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableEvolver.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableEvolver.java index decb5030..d9214c26 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableEvolver.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableEvolver.java @@ -16,6 +16,7 @@ import java.util.HashSet; import oracle.kv.table.Table; +import oracle.kv.table.TimeToLive; /** * TableEvolver is a class used to evolve existing tables. It has accessors @@ -55,20 +56,30 @@ * */ public class TableEvolver extends TableBuilderBase { + private final TableImpl table; + /* May be null */ + private final RegionMapper regionMapper; + private final int evolvedVersion; + private String description; - /* May be null */ - private final RegionMapper regionMapper; + private boolean updateTTL; + + private boolean enableBeforeImage; + + private boolean disableBeforeImage; private TableEvolver(TableImpl table, RegionMapper regionMapper) { + super(table.getFieldMap().clone()); + this.table = table; this.regionMapper = regionMapper; - ttl = table.getDefaultTTL(); - description = table.getDescription(); evolvedVersion = table.getTableVersion(); + description = table.getDescription(); + if (evolvedVersion != table.numTableVersions()) { throw new IllegalArgumentException ("Table evolution must be performed on the latest version"); @@ -129,6 +140,37 @@ public TableBuilderBase setDescription(String description) { return this; } + public void setUpdateTableTTL() { + assert(!enableBeforeImage); + assert(!disableBeforeImage); + updateTTL = true; + } + + public void setEnableBeforeImage() { + assert(!updateTTL); + assert(!disableBeforeImage); + enableBeforeImage = true; + } + + public void setDisableBeforeImage() { + assert(!updateTTL); + assert(!enableBeforeImage); + disableBeforeImage = true; + } + + public TimeToLive getNewTableTTL() { + return (updateTTL ? ttl : table.getDefaultTTL()); + } + + public TimeToLive getNewBeforeImageTTL() { + + return (enableBeforeImage ? + (beforeImageTTL == null ? + TableImpl.DEFAULT_BEFORE_IMAGE_TTL : + beforeImageTTL) : + (disableBeforeImage ? null : table.getBeforeImageTTL())); + } + @Override public RegionMapper getRegionMapper() { return regionMapper; @@ -192,7 +234,10 @@ public void dropRegion(String regionName) { public TableImpl evolveTable() { if (hasSetIdentity) { - table.evolve(fields, ttl, description, + table.evolve(fields, + getNewTableTTL(), + getNewBeforeImageTTL(), + description, getIdentityColumnInfo(), sequenceDef, regions); } else { if (table.hasIdentityColumn()) { @@ -201,18 +246,31 @@ public TableImpl evolveTable() { if (!fields.exists(idColName)) { /* column with IDENTITY was dropped */ - table.evolve(fields, ttl, description, null, null, regions); + table.evolve(fields, + getNewTableTTL(), + getNewBeforeImageTTL(), + description, + null, null, regions); } else { /* column with IDENTITY still exists but index might have changed */ - table.evolve(fields, ttl, description, - new IdentityColumnInfo(fields.getFieldPos(idColName), - table.getIdentityColumnInfo().isIdentityGeneratedAlways(), - table.getIdentityColumnInfo().isIdentityOnNull()), - sequenceDef, regions); + IdentityColumnInfo ici = table.getIdentityColumnInfo(); + table.evolve(fields, + getNewTableTTL(), + getNewBeforeImageTTL(), + description, + new IdentityColumnInfo(fields.getFieldPos(idColName), + ici.isIdentityGeneratedAlways(), + ici.isIdentityOnNull()), + sequenceDef, + regions); } } else { - table.evolve(fields, ttl, description, null, null, regions); + table.evolve(fields, + getNewTableTTL(), + getNewBeforeImageTTL(), + description, + null, null, regions); } } @@ -231,8 +289,11 @@ public String toJsonString(boolean pretty) { TableImpl t = table.clone(); - t.evolve(fields, ttl, description, - getIdentityColumnInfo(), sequenceDef, regions); + t.evolve(fields, + getNewTableTTL(), + getNewBeforeImageTTL(), + description, + getIdentityColumnInfo(), sequenceDef, regions); return t.toJsonString(pretty, regionMapper); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableImpl.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableImpl.java index 6ef574a8..698b73e4 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableImpl.java @@ -15,6 +15,7 @@ import static oracle.kv.impl.api.table.TableJsonUtils.ALWAYS; import static oracle.kv.impl.api.table.TableJsonUtils.ANNOTATIONS; +import static oracle.kv.impl.api.table.TableJsonUtils.ARGUMENTS; import static oracle.kv.impl.api.table.TableJsonUtils.ASUUID; import static oracle.kv.impl.api.table.TableJsonUtils.CACHE; import static oracle.kv.impl.api.table.TableJsonUtils.CHILDREN; @@ -25,6 +26,7 @@ import static oracle.kv.impl.api.table.TableJsonUtils.DEFAULT; import static oracle.kv.impl.api.table.TableJsonUtils.DESC; import static oracle.kv.impl.api.table.TableJsonUtils.FIELDS; +import static oracle.kv.impl.api.table.TableJsonUtils.FUNCTION; import static oracle.kv.impl.api.table.TableJsonUtils.GENERATED; import static oracle.kv.impl.api.table.TableJsonUtils.IDENTITY; import static oracle.kv.impl.api.table.TableJsonUtils.INCREMENT; @@ -43,6 +45,7 @@ import static oracle.kv.impl.api.table.TableJsonUtils.NULLABLE; import static oracle.kv.impl.api.table.TableJsonUtils.OWNER; import static oracle.kv.impl.api.table.TableJsonUtils.PARENT; +import static oracle.kv.impl.api.table.TableJsonUtils.PATH; import static oracle.kv.impl.api.table.TableJsonUtils.PKEY_SIZES; import static oracle.kv.impl.api.table.TableJsonUtils.PRIMARYKEY; import static oracle.kv.impl.api.table.TableJsonUtils.PROPERTIES; @@ -60,6 +63,7 @@ import static oracle.kv.impl.api.table.TableJsonUtils.UNIQUE; import static oracle.kv.impl.api.table.TableJsonUtils.WITH_NO_NULLS; import static oracle.kv.impl.api.table.TableJsonUtils.WRITE_LIMIT; +import static oracle.kv.impl.util.SerialVersion.BEFORE_IMAGE_VERSION; import static oracle.kv.impl.util.SerialVersion.JSON_COLLECTION_VERSION; import static oracle.kv.impl.util.SerialVersion.MULTI_REGION_CHILD_TABLE_VERSION_DEPRECATED_REMOVE_AFTER_PREREQ_25_1; import static oracle.kv.impl.util.SerialVersion.SCHEMALESS_TABLE_VERSION; @@ -231,6 +235,11 @@ public class TableImpl implements Table, MetadataInfo, Ownable, private static final long serialVersionUID = 1L; + /** + * Default TTL of before image + */ + public static final TimeToLive DEFAULT_BEFORE_IMAGE_TTL = + TimeToLive.ofHours(24); /** * A thread-local byte array used as the initial buffer in the * ByteArrayOutputStreams used by createValueInternal. @@ -338,6 +347,11 @@ public class TableImpl implements Table, MetadataInfo, Ownable, */ private Map jsonCollectionMRCounters; + /** + * TTL of before image, null if before image is disabled + */ + private TimeToLive beforeImgTTL; + /* * transient, cached values */ @@ -374,10 +388,6 @@ public class TableImpl implements Table, MetadataInfo, Ownable, /* sequence definition of the identity column */ private transient SequenceDef identitySequenceDef = null; - /* - * The value format used for this table. - */ - private transient Value.Format valueFormat; /* * The column position of the "STRING AS UUID GENERATED BY DEFAULT" field. @@ -527,6 +537,7 @@ public void writeFastExternal(DataOutput out, short serialVersion) * * TODO: remove schemaId, r2compat */ + public TableImpl(final String namespace, final String name, final TableImpl parent, @@ -535,6 +546,7 @@ public TableImpl(final String namespace, final List shardKey, final FieldMap fields, final TimeToLive ttl, + TimeToLive beforeImageTTL, TableLimits limits, boolean r2compat, int schemaId, @@ -578,6 +590,7 @@ public TableImpl(final String namespace, versions = new ArrayList<>(); versions.add(fields); this.ttl = ttl; + this.beforeImgTTL = beforeImageTTL; setVersion(INITIAL_TABLE_VERSION); validateTableName(name, sysTable); @@ -681,6 +694,7 @@ private TableImpl(TableImpl t) { versions = new ArrayList<>(t.versions); ttl = t.ttl; + beforeImgTTL = t.beforeImgTTL; setVersion(t.version); /* this constructor uses the same Comparator as t.indexes */ indexes = new TreeMap<>(t.indexes); @@ -739,6 +753,13 @@ public TableImpl(DataInput in, short serialVersion, TableImpl parent) } ttl = readFastExternalOrNull(in, serialVersion, TimeToLive::readFastExternal); + if (serialVersion >= BEFORE_IMAGE_VERSION) { + beforeImgTTL = readFastExternalOrNull(in, serialVersion, + TimeToLive::readFastExternal); + } else { + beforeImgTTL = null; + } + status = TableStatus.readFastExternal(in, serialVersion); r2compat = in.readBoolean(); schemaId = readPackedInt(in); @@ -887,6 +908,9 @@ public void writeFastExternal(DataOutput out, short serialVersion) fm.writeFastExternal(out, serialVersion); } writeFastExternalOrNull(out, serialVersion, ttl); + if (serialVersion >= BEFORE_IMAGE_VERSION) { + writeFastExternalOrNull(out, serialVersion, beforeImgTTL); + } status.writeFastExternal(out, serialVersion); out.writeBoolean(r2compat); writePackedInt(out, schemaId); @@ -944,16 +968,6 @@ private void readObject(java.io.ObjectInputStream in) setIdString(); initGeneratedUUIDPos(); initializeVersionInfo(true); - if (isTop() && isMultiRegion()) { - /* - * Child tables need the regions of the top table to - * determine the format, which indicates if the table - * is a multi-region table. Thus, the format should be - * reset after the top table reads and initializes - * the regions. - */ - resetMRChildTableValueFormat(); - } } /* @@ -976,19 +990,6 @@ static void writeMRCounters(Map counters, } - /* - * Reset the format to multi-region table for all child - * tables in the hierarchy. - */ - private void resetMRChildTableValueFormat() { - if (isChild()) { - valueFormat = Format.MULTI_REGION_TABLE; - } - for (Table t : getChildTables().values()) { - ((TableImpl) t).resetMRChildTableValueFormat(); - } - } - @Override public TableImpl clone() { return new TableImpl(this); @@ -1646,6 +1647,9 @@ public boolean equals(Object other) { if (!equalsTTL(ttl, otherDef.ttl)) { return false; } + if (!equalsTTL(beforeImgTTL, otherDef.beforeImgTTL)) { + return false; + } if (!equalsPKSizes(primaryKeySizes, otherDef.primaryKeySizes)) { return false; @@ -2077,7 +2081,7 @@ public RowImpl createRowFromBytes(byte[] keyBytes, * If createRowFromKeyBytes returns null, then the serialized key * doesn't match the table's key. It may, however, return a false * positive if the key belongs to a descendent in the parent-child - * table hierarchy. Hence the extra test for matching table Ids. + * table hierarchy. Hence, the extra test for matching table Ids. */ if (fullKey != null && getId() == fullKey.getTableImpl().getId()) { @@ -2088,12 +2092,19 @@ public RowImpl createRowFromBytes(byte[] keyBytes, valueBytes.length == 0) ? null : Value.Format.fromFirstByte(valueBytes[0]); int offset = (format == null ? 0 : 1); - int regionId = -1; + int regionId = Region.UNKNOWN_REGION_ID; + String rowMetadata = null; if (format == Value.Format.MULTI_REGION_TABLE) { /* Set region ID */ offset += PackedInteger.getReadIntLength(valueBytes, 1); regionId = PackedInteger.readInt(valueBytes, 1); fullKey.setRegionId(regionId); + } else if (format == Format.TABLE_V5) { + /* Set region ID and row metadata if necessary */ + Value v = Value.fromByteArray(valueBytes); + regionId = v.getRegionId(); + rowMetadata = v.getRowMetadata(); + offset = Value.getValueOffset(valueBytes); } /* Handle rows with no value data */ @@ -2123,6 +2134,9 @@ public RowImpl createRowFromBytes(byte[] keyBytes, } else if (format == Value.Format.MULTI_REGION_TABLE) { /* id was set on RowImpl but not ValueReader */ reader.setRegionId(regionId); + } else if (format == Format.TABLE_V5) { + reader.setRegionId(regionId); + reader.setRowMetadata(rowMetadata); } if (initRowFromByteValue(reader, valueBytes, @@ -2337,7 +2351,8 @@ public Value createValueInternal(RowSerializer row, final short opSerialVersion = (testCurrentSerialVersion != 0) ? testCurrentSerialVersion : SerialVersion.CURRENT; - Format valFormat = getValueFormat(opSerialVersion); + Format valFormat = getValueFormat(opSerialVersion, row); + final int regionId; if (isMultiRegion()) { @@ -2366,8 +2381,10 @@ public Value createValueInternal(RowSerializer row, regionId, replaceCRDT); } if (retVal == null) { - retVal = Value.internalCreateValue(new byte[0], valFormat, - regionId); + retVal = Value.internalCreateValue(new byte[0], + valFormat, + regionId, + row.getRowMetadata()); } return retVal; } @@ -2412,9 +2429,10 @@ public Value createValueInternal(RowSerializer row, writeAvroRecord(e, row, true, valFormat, store, genInfo, replaceCRDT); e.flush(); - return Value.internalCreateValue - (outputStream.toByteArray(), - isAvro ? Value.Format.AVRO : valFormat, regionId); + return Value.internalCreateValue(outputStream.toByteArray(), + isAvro ? Value.Format.AVRO : valFormat, + regionId, + row.getRowMetadata()); } catch (IOException ioe) { throw new IllegalCommandException("Failed to serialize Avro: " + ioe); @@ -2442,7 +2460,7 @@ static class ByteArrayOutputStreamWithInitialBuffer /** * Deserialize the record value that is encoded in Avro. * - * Offset is requires because on the client side the byte offset is 0 but + * Offset is required because on the client side the byte offset is 0 but * on the server side a "raw" database record is used which includes an * empty first byte added by the system. * @@ -2475,7 +2493,7 @@ public boolean initRowFromByteValue(RowImpl row, /** * This method is used by the query runtime code (ServerTableIter) to - * fillin a table row from the binary key and value of the row. In this + * fill in a table row from the binary key and value of the row. In this * case we know that the binary key belongs to "this" table, so there is * no need to call findTargetTable(). */ @@ -2483,6 +2501,7 @@ public boolean initRowFromKeyValueBytes( byte[] keyBytes, byte[] valueBytes, long expTime, + long creationTime, long modTime, Version vers, int partition, @@ -2498,8 +2517,8 @@ public boolean initRowFromKeyValueBytes( reader)) { return false; } - row = initRowFromValueBytes(row, valueBytes, expTime, modTime, - vers, partition, shard, storageSize); + row = initRowFromValueBytes(row, valueBytes, expTime, creationTime, + modTime, vers, partition, shard, storageSize); return (row != null ? true : false); } @@ -2510,6 +2529,7 @@ public RowImpl initRowFromValueBytes( RowImpl row, byte[] data, long expTime, + long creationTime, long modTime, Version vers, int partition, @@ -2534,6 +2554,7 @@ public RowImpl initRowFromValueBytes( } row.setExpirationTime(expTime); + row.setCreationTime(creationTime); row.setModificationTime(modTime); row.setVersion(vers); row.setPartition(partition); @@ -2549,16 +2570,11 @@ public RowImpl initRowFromValueBytes( return null; } - /* multi-region table */ - if (Format.MULTI_REGION_TABLE.equals(format)) { - final int regionIdLen = PackedInteger.getReadIntLength(data, 1); - int regionId = PackedInteger.readInt(data, 1); - offset = regionIdLen + 1; - row.setRegionId(regionId); - } + offset = Value.setRegionIdAndRowMetadata(data, row); if (initRowFromByteValue(row, data, format, offset)) { row.setExpirationTime(expTime); + row.setCreationTime(creationTime); row.setModificationTime(modTime); row.setPartition(partition); row.setShard(shard); @@ -2800,6 +2816,9 @@ public boolean readRowFromValueVersion(ValueReader reader, if (format == Value.Format.MULTI_REGION_TABLE) { reader.setRegionId(vv.getValue().getRegionId()); + } else if (format == Format.TABLE_V5) { + reader.setRegionId(vv.getValue().getRegionId()); + reader.setRowMetadata(vv.getValue().getRowMetadata()); } /* @@ -2917,6 +2936,7 @@ private void validateNewLimits(TableLimits newLimits) { */ void evolve(FieldMap newFields, TimeToLive newTTL, + TimeToLive newBeforeImgTTL, String newDescription, IdentityColumnInfo identityInfo, SequenceDef sequenceDef, @@ -2966,6 +2986,7 @@ void evolve(FieldMap newFields, } ttl = newTTL; + beforeImgTTL = newBeforeImgTTL; if (newDescription != null) { setDescription(newDescription); } @@ -3148,6 +3169,15 @@ public String toJsonString(boolean pretty, return formatter.toString(); } + /* For unit test */ + String toJsonString(boolean pretty, int jsonVersion) { + JsonFormatter formatter = + (pretty) ? new JsonPrettyFormatter(jsonVersion) : + new JsonFormatter(jsonVersion); + walkTableInfo(formatter, false, null); + return formatter.toString(); + } + /** * Formats the table. If fields is null format the entire * table, otherwise, just use the specified fields. The field names @@ -4246,6 +4276,15 @@ public TimeToLive getDefaultTTL() { return ttl; } + /** + * Returns the before image TTL. If null, the before image is disabled + * for the table. + * @return the before image TTL, or null. + */ + public TimeToLive getBeforeImageTTL() { + return beforeImgTTL; + } + /** * Whether this table is system table, internal use only. */ @@ -5019,7 +5058,6 @@ private void initializeVersionInfo(boolean validate) { getValueRecordDef(); createPrimKeyDef(); } - valueFormat = getValueFormat(SerialVersion.CURRENT); } /** @@ -5040,13 +5078,31 @@ public byte[] reserializeToOldValue(byte[] keyBytes, * Returns the value format for the specified serial version. */ public Format getValueFormat(short serialVersion) { + return getValueFormat(serialVersion, null); + } + + public Format getValueFormat(short serialVersion, RowSerializer row) { assert serialVersion >= SerialVersion.MINIMUM; - if (valueFormat != null && serialVersion == SerialVersion.CURRENT) { - return valueFormat; - } - if (isMultiRegion()) { + + if (isMultiRegion() && serialVersion < SerialVersion.ROW_METADATA_VERSION) { return Format.MULTI_REGION_TABLE; } + if (serialVersion >= SerialVersion.ROW_METADATA_VERSION) { + if (row != null && row.getRowMetadata() != null) { + return Format.TABLE_V5; + } + if (isMultiRegion()) { + return Format.MULTI_REGION_TABLE; + } + } else { + // if rowMetadata not supported in serialVersion + if (row != null && row.getRowMetadata() != null) { + throw new IllegalArgumentException("Row metadata not " + + "supported by the server version. Server version is " + + serialVersion + ", required version is " + + SerialVersion.ROW_METADATA_VERSION); + } + } /* * If the table contains JSON field including a nested JSON field and @@ -5103,7 +5159,7 @@ private class TableVersionInfo { private boolean isKeyOnly; /* - * This isfor schema fields defined as MR counters and + * This is for schema fields defined as MR counters and * JSON MR counters. It is not used for MR counters on * JSON Collection tables. */ @@ -5501,6 +5557,10 @@ public void walkTableInfo(TableEventHandler handler, handler.ttl(getDefaultTTL()); } + if (getBeforeImageTTL() != null) { + handler.beforeImageTTL(getBeforeImageTTL()); + } + if (owner != null) { handler.owner(owner.toString()); } @@ -5576,24 +5636,12 @@ public void walkTableInfo(TableEventHandler handler, int indexNum = 1; for (Map.Entry indexEntry : indexes.entrySet()) { IndexImpl impl = (IndexImpl) indexEntry.getValue(); - List types = null; - if (impl.getTypes() != null) { - types = new ArrayList(impl.getTypes().size()); - for (FieldDef.Type type : impl.getTypes()) { - if (type == null) { - types.add(null); - } else { - types.add(type.toString()); - } - } - } handler.index(this, indexNum++, impl.getName(), impl.getDescription(), impl.getType().toString().toLowerCase(), - impl.getFields(), - types, + TableJsonUtils.collectIndexFieldInfos(impl), impl.indexesNulls(), impl.isUnique(), impl.getAnnotationsInternal(), @@ -5751,6 +5799,18 @@ static JsonFormatter createJsonFormatter(boolean pretty) { * A JSON formatter class that implements TableEventHandler */ public static class JsonFormatter implements TableEventHandler { + + /* + * V1: initial version. + */ + public static final int V1 = 1; + /* + * V2: Modify the index representation to better support function index, + * see details in KVSTORE-2724 + */ + public static final int V2 = 2; + public static final int CURRENT_VERSION = V2; + protected final StringBuilder sb; static final char SEP = ':'; static final char COMMA = ','; @@ -5760,8 +5820,15 @@ public static class JsonFormatter implements TableEventHandler { static final char START_ARRAY = '['; static final char END_ARRAY = ']'; + private final int jsonVersion; + public JsonFormatter() { + this(CURRENT_VERSION); + } + + public JsonFormatter(int jsonVersion) { sb = new StringBuilder(); + this.jsonVersion = jsonVersion; } /* @@ -5804,7 +5871,7 @@ protected void appendString(String value) { CharTypes.appendQuoted(sb, value); sb.append(QUOT); } else { - sb.append("null"); + sb.append(NULL); } } @@ -5825,7 +5892,7 @@ protected void arrayOfString(String key, List list) { comma(); sb.append(QUOT).append(key).append(QUOT); sep(); - sb.append("["); + startArray(); for (String fieldName : list) { if (!first) { arrayComma(); @@ -5834,7 +5901,7 @@ protected void arrayOfString(String key, List list) { } appendString(fieldName); } - sb.append("]"); + endArray(); } protected void startObject() { @@ -5852,7 +5919,7 @@ protected void endObject() { @Override public void start(String namespace, String tableName) { startObject(); - nonStringField(JSON_VERSION, "1"); + nonStringField(JSON_VERSION, String.valueOf(jsonVersion)); comma(); stringField(TYPE, "table"); if (namespace != null) { @@ -5860,7 +5927,7 @@ public void start(String namespace, String tableName) { stringField("namespace", namespace); } comma(); - stringField("name", tableName); + stringField("name", mapTableName(tableName)); } @Override @@ -5905,6 +5972,14 @@ public void ttl(TimeToLive ttl) { } } + @Override + public void beforeImageTTL(TimeToLive ttl) { + if (ttl != null) { + comma(); + stringField("beforeImageTTL", ttl.toString()); + } + } + @Override public void tableId(long tableId) { comma(); @@ -5930,7 +6005,7 @@ public void description(String description) { @Override public void parent(String parentName) { comma(); - stringField(PARENT, parentName); + stringField(PARENT, mapParentTableName(parentName)); } @Override @@ -6139,8 +6214,7 @@ public void index(Table table, String indexName, String description, String type, - List fields, - List types, + List fields, boolean indexesNulls, boolean isUnique, Map annotations, @@ -6157,10 +6231,62 @@ public void index(Table table, comma(); stringField(DESC, description); } - arrayOfString(FIELDS, fields); - if (types != null) { - arrayOfString(TYPES, types); + /* + * The format of the json representation of the schema has several + * formats, add the ability to parse any version. + */ + if (jsonVersion < V2) { + IndexImpl index = (IndexImpl)table.getIndex(indexName); + List types = null; + if (index.getTypes() != null) { + types = new ArrayList(index.getTypes().size()); + for (FieldDef.Type ftype : index.getTypes()) { + if (ftype == null) { + types.add(null); + } else { + types.add(ftype.toString()); + } + } + } + arrayOfString(FIELDS, index.getFields()); + if (types != null) { + arrayOfString(TYPES, types); + } + } else { + comma(); + sb.append(QUOT).append(FIELDS).append(QUOT); + sep(); + startArray(); + boolean first = true; + for (IndexFieldInfo info : fields) { + if (!first) { + comma(); + } else { + first = false; + } + + startObject(); + if (info.getPath() != null) { + stringField(PATH, info.getPath()); + if (info.getType() != null) { + comma(); + stringField(TYPE, info.getType()); + } + } else if (info.getFunction() != null) { + stringField(FUNCTION, info.getFunction()); + if (info.getArguments() != null) { + comma(); + stringField(ARGUMENTS, info.getArguments()); + } + if (info.getType() != null) { + comma(); + stringField(TYPE, info.getType()); + } + } + endObject(); + } + endArray(); } booleanField(WITH_NO_NULLS, !indexesNulls); @@ -6367,6 +6493,14 @@ private static class JsonPrettyFormatter extends JsonFormatter { private String indent; private int currentIndent = 0; + JsonPrettyFormatter() { + super(); + } + + JsonPrettyFormatter(int jsonVersion) { + super(jsonVersion); + } + private void changeIndent(int num) { currentIndent += num; StringBuilder isb = new StringBuilder(); @@ -6460,7 +6594,7 @@ private Value createJsonCollectionValue(RowSerializer rowArg, writeJsonCollectionMap(map, ns, null, replaceCRDT); byte[] bytes = Arrays.copyOfRange(out.array(), 0, out.getOffset()); - return Value.internalCreateValue(bytes, valFormat, regionId); + return Value.internalCreateValue(bytes, valFormat, regionId, rowArg.getRowMetadata()); } catch (IOException ioe) { throw new IllegalStateException( "IOException while serializing a JSON collection row",ioe); @@ -6637,10 +6771,10 @@ private boolean readJsonCollectionRow(AvroRowReader reader, /* * copy MR counter values in "from" to "to." This is used by put operations - * to prevent inadvertant modification of MR counters via a put call. + * to prevent inadvertent modification of MR counters via a put call. * NSON processing makes this relatively simple. * 1. generate events on the "to" data - * 2. when an MR counter field is found, find that data in the from data + * 2. when an MR counter field is found, find that data in the "from" data * and copy it */ public Value copyJsonCollectionMRCounters(byte[] toRow, byte[] fromRow) { @@ -6653,8 +6787,8 @@ public Value copyJsonCollectionMRCounters(byte[] toRow, byte[] fromRow) { ) { /* get past format and region id */ - int toOffset = offsetToData(toRow); - int fromOffset = offsetToData(fromRow); + int toOffset = Value.getValueOffset(toRow); + int fromOffset = Value.getValueOffset(fromRow); JsonCollectionReserializer jcr = new JsonCollectionReserializer(from, fromOffset, out); @@ -6676,16 +6810,6 @@ public Value copyJsonCollectionMRCounters(byte[] toRow, byte[] fromRow) { } } - private static int offsetToData(byte[] value) { - Value.Format format = Value.Format.fromFirstByte(value[0]); - int offset = (format == null ? 0 : 1); - if (format == Value.Format.MULTI_REGION_TABLE) { - /* add region id */ - offset += PackedInteger.getReadIntLength(value, 1); - } - return offset; - } - private class JsonCollectionReserializer extends NsonSerializer { private final ByteInputStream from; private final int offsetToData; @@ -6937,4 +7061,4 @@ private void addMRCounterValue(FieldValueImpl value) { counterValue.putMRCounterEntry(regionId, value); } } -} +} \ No newline at end of file diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableJsonUtils.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableJsonUtils.java index 5f529301..7e5706f2 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableJsonUtils.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableJsonUtils.java @@ -23,9 +23,15 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import oracle.kv.impl.api.table.IndexImpl.IndexField; +import oracle.kv.impl.api.table.TableEventHandler.IndexFieldInfo; +import oracle.kv.impl.api.table.TableImpl.JsonFormatter; import oracle.kv.impl.api.table.serialize.DecoderFactory; +import oracle.kv.impl.query.compiler.CompilerAPI; +import oracle.kv.impl.query.compiler.QueryControlBlock; import oracle.kv.impl.query.compiler.Translator.IdentityDefHelper; import oracle.kv.impl.security.ResourceOwner; +import oracle.kv.query.PrepareCallback; import oracle.kv.table.ArrayValue; import oracle.kv.table.FieldDef; import oracle.kv.table.FieldDef.Type; @@ -39,6 +45,7 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; /** * This class provides utilities for interaction with JSON processing @@ -87,6 +94,9 @@ public class TableJsonUtils { final static String TABLE = "table"; final static String WITH_NO_NULLS = "withNoNulls"; final static String UNIQUE = "withUniqueKeysPerRow"; + final static String ARGUMENTS = "arguments"; + final static String FUNCTION = "function"; + final static String PATH = "path"; /* related to limits */ final static String LIMITS = "limits"; @@ -150,6 +160,7 @@ static DecoderFactory getDecoderFactory() { } static private JsonFactory jsonFactory; + static private JsonFactory jsonSpecFactory; /* * common code to create a JSON parser @@ -184,6 +195,17 @@ public static JsonParser createJsonParser(Reader in) return jsonFactory.createParser(in); } + /* This creates a JSON parser inline with the JSON spec. This is used for + validating the row metadata values. */ + public static JsonParser createSpecJsonParser(String in) + throws IOException { + if (jsonSpecFactory == null) { + jsonSpecFactory = new JsonFactory(); + } + + return jsonSpecFactory.createParser(in); + } + /** * Translate the specified Base64 string into a byte array. */ @@ -389,27 +411,20 @@ static FieldDefImpl fromJson(MapValue node) { /** * Adds an index definition to the table. */ - static void indexFromNode(MapValue node, TableImpl table) { + static void indexFromNode(MapValue node, TableImpl table, int jsonVersion) { - ArrayValue fields = node.get(FIELDS).asArray(); - ArrayList fieldStrings = new ArrayList(fields.size()); - for (int i = 0; i < fields.size(); i++) { - fieldStrings.add(fields.get(i).asString().get()); + + ArrayList fieldStrings = new ArrayList<>(); + ArrayList types = new ArrayList<>(); + + if (jsonVersion < JsonFormatter.V2) { + indexFieldsFromNodeV1(node, fieldStrings, types); + } else { + indexFieldsFromNode(node, fieldStrings, types); } - ArrayList typeStrings = null; - if (node.get(TYPES) != null) { - ArrayValue types = node.get(TYPES).asArray(); - typeStrings = new ArrayList(types.size()); - for (int i = 0; i < fields.size(); i++) { - FieldValue typeNode = types.get(i); - if (typeNode.isJsonNull()) { - typeStrings.add(null); - } else { - typeStrings.add(FieldDef.Type.valueOf( - typeNode.asString().get())); - } - } + if (types.isEmpty()) { + types = null; } String name = getStringFromNode(node, NAME, true); @@ -429,7 +444,7 @@ static void indexFromNode(MapValue node, TableImpl table) { Map annotations = getMapFromNode(node, ANNOTATIONS); Map properties = getMapFromNode(node, PROPERTIES); - table.addIndex(new IndexImpl(name, table, fieldStrings, typeStrings, + table.addIndex(new IndexImpl(name, table, fieldStrings, types, indexNulls, isUnique, annotations, properties, desc)); } @@ -437,7 +452,6 @@ static void indexFromNode(MapValue node, TableImpl table) { /** * Build Table from JSON string * - * NOTE: this format was test-only in R3, but export/import has made use * of it for R4. This means that changes must be made carefully and if * state is added to a table or index it needs to be reflected in the JSON @@ -453,6 +467,17 @@ public static TableImpl fromJsonString(String jsonString, private static TableImpl fromJson(MapValue rootNode, TableImpl parent) { + + /* + * The version of this JSON schema. KVSTORE-2724 introduced a version + * change and previously the json object had no version field. If the + * version field not present, this json object is older, and fill in + * with CURRENT_VERSION. + */ + Integer val = getIntFromNode(rootNode, JSON_VERSION); + int jsonVersion = (val != null) ? val.intValue() : + JsonFormatter.CURRENT_VERSION; + /* * Create a TableBuilder for the table. */ @@ -461,8 +486,7 @@ private static TableImpl fromJson(MapValue rootNode, TableBuilder tb = null; String tname = getStringFromNode(rootNode, NAME, true); if (rootNode.get(SYSTABLE) != null) { - tb = - TableBuilder.createSystemTableBuilder(tname); + tb = TableBuilder.createSystemTableBuilder(tname); } else { tb = TableBuilder.createTableBuilder (namespace, @@ -627,7 +651,7 @@ private static TableImpl fromJson(MapValue rootNode, arrayNode = rootNode.get(INDEXES).asArray(); for (int i = 0; i < arrayNode.size(); i++) { MapValue map = arrayNode.get(i).asMap(); - indexFromNode(map, newTable); + indexFromNode(map, newTable, jsonVersion); } } @@ -709,6 +733,18 @@ static Boolean getBooleanFromMap(MapValue map, String field) { return Boolean.valueOf(fieldNode.asBoolean().get()); } + /** + * Returns the integer value of the named field in the MapValue + * if it exists, otherwise null. + */ + static Integer getIntFromNode(MapValue node, String field) { + final FieldValue fieldNode = node.get(field); + if ((fieldNode == null) || !fieldNode.isInteger()) { + return null; + } + return Integer.valueOf(fieldNode.asInteger().get()); + } + /** * Returns a Map of the named field in the MapValue * if it exists, otherwise null. @@ -737,4 +773,342 @@ private static Map getMapFromNode(MapValue node, } return map; } -} + + /* + * Returns the external format string for the specified index field, used + * by DDLGenerator to generate the create index ddl and by TableImpl to + * build the JSON string of Table object. + * + * Converts the internal representation of functional index field that uses + * character '#' and '@' as indicators of start of the field name and the + * function arguments to its external format. + * e.g. substring#name@,1,3 -> substring(s,1,3) + * + * Also append the declared type if requested. + */ + public static String toExternalIndexField(IndexImpl index, + int fieldPos, + boolean includeType) { + String field = index.getFields().get(fieldPos); + IndexField ifield = index.getIndexPath(fieldPos); + StringBuilder sb = new StringBuilder(); + + boolean isFunctional = (ifield.getFunction() != null); + if (isFunctional) { + int pos = field.indexOf("#"); + /* Append the function name and the parenthesis '(' */ + sb.append(field.substring(0, pos)) + .append("("); + + /* + * Append the field path only without arguments, as it should be + * followed by the declared type. + * + * Note that we can't simply search for the next '@' to locate the + * boundary of the field path, because '@' could be a special + * character in the field path(e.g. info."@".name) + */ + int end = field.length(); + if (ifield.getFunctionArgs() != null) { + /* + * The index field format: #@,arg1[,arg2..] + * + * For example, substring(info.name,1,3) + * The internal format string is "substring#info.name@,1,3", the + * IndexField.functionArgs is ",1,3". The field path "info.name" + * is the part between the "#" and the "@" before the function + * arguments. + */ + end -= 1 + ifield.getFunctionArgs().length(); + } + sb.append(field.substring(pos + 1, end)); + } else { + sb.append(field); + } + + /* Append the declared type: AS */ + if (includeType) { + FieldDef.Type type = index.getFieldType(fieldPos); + if (type != null) { + sb.append(" AS ") + .append(IndexField.getDDLTypeString(type)); + } + } + + /* + * If field is function, append the function arguments and the right + * parenthesis ')'. + */ + if (isFunctional) { + if (ifield.getFunctionArgs() != null) { + sb.append(ifield.getFunctionArgs()); + } + sb.append(")"); + } + return sb.toString(); + } + + /** + * Collects information of the index fields + * + * Public for cloud use. + */ + public static List collectIndexFieldInfos(IndexImpl index) { + final List fields = new ArrayList<>(); + + for (int i = 0; i < index.numFields(); i++) { + IndexField idxField = index.getIndexPath(i); + + /* declared type */ + String declaredType = null; + if (index.getTypes() != null) { + Type ftype = index.getFieldType(i); + if (ftype != null) { + declaredType = IndexField.getDDLTypeString(ftype); + } + } + + IndexFieldInfo info; + if (idxField.getFunction() == null) { + /* not a function */ + info = new IndexFieldInfo(index.getFields().get(i), + declaredType); + } else { + /* function */ + String function = idxField.getFunction().getName(); + String returnType = idxField.getType().getDDLString(); + String arguments = null; + String path = idxField.getPathName(); + path = path.substring(function.length() + 1); + if (!path.isEmpty()) { + arguments = path; + if (declaredType != null) { + arguments += " AS " + declaredType; + } + if (idxField.getFunctionArgs() != null) { + arguments += idxField.getFunctionArgs(); + } + } + info = new IndexFieldInfo(function, arguments, returnType); + } + fields.add(info); + } + return fields; + } + + /* + * Parses V2 Index JSON representation + * + * "fields": [{ + * "path": # normal field + * }, { + * "path": , # json field + * "type": , + * }, { + * "function": , # function field + * "arguments": + * "type": , + * }] + * + * Reads the "fields" of index node in JSON metadata, populate the index + * field strings and their declared types into fieldStrings and types. + * + * The field string is the internal representation of index field: + * - If field is not a function, it is the path of indexed field. + * - If field is a function, function#[field_path[,arg[,arg..]]], see + * {@link oracle.kv.impl.query.compiler.Translator#getIndexFieldNames()} + * for details. + * e.g. substring(info.name as string, 0, 3), field string is + * "substring#info.name@0,3" + */ + private static void indexFieldsFromNode(MapValue node, + List fieldStrings, + List types) { + + boolean hasFunction = false; + boolean hasType = false; + FieldValue prop; + + ArrayValue fields = node.get(FIELDS).asArray(); + for (int i = 0; i < fields.size(); i++) { + MapValue obj = fields.get(i).asMap(); + if (obj.getFields().keySet().contains(FUNCTION)) { + /* + * The index has a function index, stop traverse the index + * fields. The field strings will be collected by compiling + * the index ddl later. + */ + hasFunction = true; + fieldStrings.clear(); + types.clear(); + break; + } + fieldStrings.add(obj.get(PATH).asString().get()); + + prop = obj.get(TYPE); + if (prop != null) { + if (!hasType) { + for (int n = 0; n < i; n++) { + types.add(null); + } + hasType = true; + } + types.add(IndexField.fromDdlTypeString(prop.asString().get())); + } else { + if (hasType) { + types.add(null); + } + } + } + + /* Not a functional index */ + if (!hasFunction) { + return; + } + + /* + * Functional index + * + * The parser converts the index fields to their internal representation. + * We build a create-index ddl containing all the index fields and then + * compile it using the PreparationCallback to get the internal field + * strings. + */ + final StringBuilder sb = new StringBuilder("create index idx on tbl("); + for (int i = 0; i < fields.size(); i++) { + if (i > 0) { + sb.append(","); + } + MapValue field = fields.get(i).asMap(); + prop = field.get(FUNCTION); + if (prop != null) { + sb.append(prop.asString().get()).append("("); + prop = field.get(ARGUMENTS); + if (prop != null) { + sb.append(prop.asString()); + } + sb.append(")"); + } else { + prop = field.get(PATH); + assert(prop != null); + sb.append(prop.asString().get()); + + prop = field.get(TYPE); + if (prop != null) { + sb.append(" AS ").append(prop.asString().get()); + } + } + } + sb.append(")"); + + PrepareCallback prepCB = new PrepareCallback() { + @Override + public void indexFields(String[] idxFields) { + for (String field : idxFields) { + fieldStrings.add(field); + } + } + @Override + public void indexFieldTypes(Type[] idxFieldTypes) { + if (idxFieldTypes != null) { + for (Type type : idxFieldTypes) { + types.add(type); + } + } + } + }; + + QueryControlBlock qcb = CompilerAPI.compile(sb.toString().toCharArray(), + null, null, null, + null, prepCB); + if (!qcb.succeeded()) { + throw new IllegalArgumentException( + "Unable to parse index field in table JSON metadata: " + + qcb.getErrorMessage(), qcb.getException()); + } + } + + /* + * Parses V1 Index JSON representation. + * + * "fields": ["path1", "path2", ...], + * "types": ["type1", "type2", ...] + * + * Reads the "fields" and "types" of index node in JSON metadata, populate + * the index field strings and their declared types into fieldStrings and + * types. + */ + private static void indexFieldsFromNodeV1(MapValue node, + List fieldStrings, + List types) { + + ArrayValue fieldsNode = node.get(FIELDS).asArray(); + for (int i = 0; i < fieldsNode.size(); i++) { + fieldStrings.add(fieldsNode.get(i).asString().get()); + } + + if (node.get(TYPES) != null) { + ArrayValue typesNode = node.get(TYPES).asArray(); + for (int i = 0; i < fieldsNode.size(); i++) { + FieldValue typeNode = typesNode.get(i); + if (typeNode.isJsonNull()) { + types.add(null); + } else { + types.add(FieldDef.Type.valueOf(typeNode.asString().get())); + } + } + } + } + + /** + * Validates input is a valid JSON construct: object, array, string, number, + * true, false or null. Throws IllegalArgumentException if not valid. + * Multiple JSON Objects are not allowed. Strings must use only double + * quotes ("). + */ + public static void validateJsonConstruct(String jsonInput) { + try (JsonParser jp = createSpecJsonParser(jsonInput)) { + + int s = 0; + int i = 0; + JsonToken token = jp.nextToken(); + if (token == null) { + throw new IllegalArgumentException( + "Value is not a valid JSON construct."); + } else if (JsonToken.START_OBJECT.equals(token) || + JsonToken.START_ARRAY.equals(token)) { + s += 1; + } else { + i += ( s == 0 ? 1 : 0); + } + + while (!jp.isClosed()) { + token = jp.nextToken(); + if (token != null) { + if(JsonToken.FIELD_NAME.equals(token)) { + // skip + } else if (JsonToken.START_OBJECT.equals(token) || + JsonToken.START_ARRAY.equals(token) ) { + if (s == 0) { + throw new IllegalArgumentException( + "Multiple JSON " + + "Objects not allowed."); + } + s++; + } else if (JsonToken.END_OBJECT.equals(token) || + JsonToken.END_ARRAY.equals(token)) { + s--; + i += ( s == 0 ? 1 : 0); + } else { + i += ( s == 0 ? 1 : 0); + } + if (i > 1) { + throw new IllegalArgumentException("Multiple top level JSON constructs not allowed"); + } + } + } + } catch (IOException ioe) { + throw new IllegalArgumentException("JSON parse failed: " + ioe); + } + } +} \ No newline at end of file diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableMetadata.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableMetadata.java index ee86a04d..24c9885b 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableMetadata.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableMetadata.java @@ -389,6 +389,7 @@ public TableImpl addTable(String namespace, List shardKey, FieldMap fieldMap, TimeToLive ttl, + TimeToLive beforeImageTTL, TableLimits limits, boolean r2compat, int schemaId, @@ -396,7 +397,8 @@ public TableImpl addTable(String namespace, ResourceOwner owner) { return addTable(namespace, name, parentName, - primaryKey, primaryKeySizes, shardKey, fieldMap, ttl, + primaryKey, primaryKeySizes, shardKey, fieldMap, + ttl, beforeImageTTL, limits, r2compat, schemaId, description, owner, false, null, null, false, null); } @@ -409,6 +411,7 @@ public TableImpl addTable(String namespace, List shardKey, FieldMap fieldMap, TimeToLive ttl, + TimeToLive beforeImageTTL, TableLimits limits, boolean r2compat, int schemaId, @@ -423,7 +426,9 @@ public TableImpl addTable(String namespace, primaryKey, primaryKeySizes, shardKey, fieldMap, - ttl, limits, + ttl, + beforeImageTTL, + limits, r2compat, schemaId, description, owner, sysTable, @@ -486,6 +491,7 @@ public TableImpl dropTable(String namespace, */ public boolean evolveTable(TableImpl table, int tableVersion, FieldMap fieldMap, TimeToLive ttl, + TimeToLive beforeImgTTL, String description, boolean systemTable, IdentityColumnInfo newIdentityColumnInfo, @@ -513,6 +519,7 @@ public boolean evolveTable(TableImpl table, int tableVersion, /* Exit if nothing has changed */ // TODO - what about description? if (fieldMap.equals(table.getFieldMap()) && compareTTL(ttl, table.getDefaultTTL()) && + compareTTL(beforeImgTTL, table.getBeforeImageTTL()) && compareIdentityColumn(table.getIdentityColumnInfo(), newIdentityColumnInfo) && compareRegions(table.isChild() ? null : table.getRemoteRegions(), @@ -527,7 +534,8 @@ public boolean evolveTable(TableImpl table, int tableVersion, table.numTableVersions()); } - table.evolve(fieldMap, ttl, description, newIdentityColumnInfo, null, + table.evolve(fieldMap, ttl, beforeImgTTL, description, + newIdentityColumnInfo, null, regions); bumpSeqNum(); table.setSequenceNumber(seqNum); @@ -655,6 +663,7 @@ TableImpl insertTable(String namespace, List shardKey, FieldMap fields, TimeToLive ttl, + TimeToLive beforeImageTTL, TableLimits limits, boolean r2compat, int schemaId, @@ -719,7 +728,8 @@ TableImpl insertTable(String namespace, parent.checkChildLimit(name); table = new TableImpl(namespace, name, parent, primaryKey, primaryKeySizes, shardKey, - fields, ttl, limits, r2compat, schemaId, + fields, ttl, beforeImageTTL, + limits, r2compat, schemaId, description, true, owner, sysTable, identityColumnInfo, regionIds, jsonCollection, mrCounters); @@ -734,7 +744,8 @@ TableImpl insertTable(String namespace, } table = new TableImpl(namespace, name, null, primaryKey, primaryKeySizes, shardKey, - fields, ttl, limits, r2compat, schemaId, + fields, ttl, beforeImageTTL, + limits, r2compat, schemaId, description, true, owner, sysTable, identityColumnInfo, regionIds, jsonCollection, mrCounters); @@ -753,11 +764,13 @@ TableImpl evolveTable(String namespace, String tableName, FieldMap fields, TimeToLive ttl, + TimeToLive beforeImgTTL, String description, IdentityColumnInfo identityColumnInfo, Set regions) { final TableImpl table = getTable(namespace, tableName, true); - table.evolve(fields, ttl, description, identityColumnInfo, null, regions); + table.evolve(fields, ttl, beforeImgTTL, description, + identityColumnInfo, null, regions); return table; } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableMultiGetBatch.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableMultiGetBatch.java index e3216e32..67e32830 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableMultiGetBatch.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableMultiGetBatch.java @@ -223,6 +223,7 @@ protected void convertResult(Result result, vv, row, entry.getExpirationTime(), + entry.getCreationTime(), entry.getModificationTime(), false, false); diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TablePath.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TablePath.java index 72b4ba12..f8b2a763 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TablePath.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TablePath.java @@ -20,6 +20,7 @@ import oracle.kv.impl.query.compiler.CompilerAPI; import oracle.kv.impl.query.compiler.EscapeUtil; import oracle.kv.impl.query.compiler.Function; +import oracle.kv.impl.query.compiler.FuncRowMetadata; import oracle.kv.table.FieldDef.Type; /** @@ -182,6 +183,18 @@ void addBranch(ArrayList branch) { ArrayList> getBranches() { return branches; } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder("{\n"); + sb.append(" kind : ").append(kind).append("\n"); + sb.append(" step : ").append(step).append("\n"); + sb.append(" fieldPos : ").append(fieldPos).append("\n"); + sb.append(" keysPos : ").append(keysPos).append("\n"); + sb.append("}"); + return sb.toString(); + } } final private FieldMap fieldMap; @@ -206,6 +219,11 @@ public TablePath(TableImpl table, String path) { return; } + if (!steps.isEmpty() && + steps.get(0).step.equals(FuncRowMetadata.COL_NAME)) { + return; + } + for (StepInfo step : steps) { if (step.isRecStep()) { step.setKind(StepKind.MAP_FIELD); @@ -434,7 +452,12 @@ public final String getLastStep() { * component of the field. */ public FieldDefImpl getFirstDef() { - return fieldMap.getFieldDef(steps.get(0).step); + + String firstStep = steps.get(0).step; + if (firstStep.equals(FuncRowMetadata.COL_NAME)) { + return FieldDefImpl.Constants.jsonDef; + } + return fieldMap.getFieldDef(firstStep); } public static List parsePathName(TableImpl table, @@ -492,7 +515,7 @@ public List parsePathName(String pathname) { * to start parsing the path. */ String funcName = sb.toString(); - function = CompilerAPI.getFuncLib().getFunc(funcName, 1); + function = CompilerAPI.getFuncLib().getFunc(funcName, -1); sb.delete(0, sb.length()); if (i + 1 == pathname.length()) { diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TableScan.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TableScan.java index 4a9d5328..f2b9e720 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TableScan.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TableScan.java @@ -423,6 +423,7 @@ protected void convertResult(Result result, entry.getValue(), entry.getVersion(), entry.getExpirationTime(), + entry.getCreationTime(), entry.getModificationTime())); } } @@ -506,6 +507,7 @@ private static Row convertToRow(TableAPIImpl apiImpl, return apiImpl.getRowFromValueVersion(vv, fullKey, rkvv.getExpirationTime(), + rkvv.getCreationTime(), rkvv.getModificationTime(), false, rkvv.getIsTombstone()); diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TabularFormatter.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TabularFormatter.java index b72d977d..425d43ec 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TabularFormatter.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TabularFormatter.java @@ -22,6 +22,7 @@ import java.util.Map; import java.util.Map.Entry; +import oracle.kv.impl.api.table.IndexImpl.IndexField; import oracle.kv.impl.query.shell.output.ResultOutputFactory; import oracle.kv.impl.query.shell.output.ResultOutputFactory.OutputMode; import oracle.kv.impl.query.shell.output.ResultOutputFactory.ResultOutput; @@ -43,6 +44,7 @@ public class TabularFormatter { private final static String FIELD_NAME = "name"; private final static String FIELD_TTL = "ttl"; + private final static String FIELD_BEFORE_IMAGE_TTL = "beforeImageTTL"; private final static String FIELD_OWNER = "owner"; private final static String FIELD_NAMESPACE = "namespace"; private final static String FIELD_SYSTABLE = "sysTable"; @@ -64,7 +66,7 @@ public class TabularFormatter { private final static String FIELD_TABLE = "table"; private final static String FIELD_FIELDS = "fields"; private final static String FIELD_MULTI_KEY = "multiKey"; - private final static String FIELD_TYPES = "declaredType"; + private final static String FIELD_TYPES = "fieldType"; private final static String FIELD_ANNOTATIONS = "annotations"; private final static String FIELD_PROPERTIES = "properties"; @@ -155,6 +157,7 @@ private static RecordValue generateTableInfoRecord( row.put(FIELD_NAMESPACE, emptyIfNull(table.getInternalNamespace())); row.put(FIELD_NAME, table.getFullName()); row.put(FIELD_TTL, emptyIfNull(table.getDefaultTTL())); + row.put(FIELD_BEFORE_IMAGE_TTL, emptyIfNull(table.getBeforeImageTTL())); row.put(FIELD_OWNER, emptyIfNull(table.getOwner())); row.put(FIELD_SYSTABLE, booleanYesNo(table.isSystemTable())); row.put(FIELD_JSON_COLLECTION, booleanYesNo(table.isJsonCollection())); @@ -289,16 +292,22 @@ private static RecordValue generateIndexRecord(IndexImpl index) { row.put(FIELD_MULTI_KEY, booleanYesNo(index.isMultiKey())); ArrayValue av = row.putArray(FIELD_FIELDS); - for (String field : index.getFields()) { + for (int i = 0; i < index.numFields(); i++) { + String field = TableJsonUtils.toExternalIndexField(index, i, true); av.add(field); } if (!isTextIndex) { av = row.putArray(FIELD_TYPES); - if (index.getTypes() != null) { - for (Type t : index.getTypes()) { - av.add(emptyIfNull(t)); + for (int i = 0; i < index.numFields(); i++) { + IndexField ifield = index.getIndexPath(i); + Type t; + if (ifield.getFunction() != null) { + t = ifield.getType().getType(); + } else { + t = index.getFieldType(i); } + av.add(emptyIfNull(t)); } } else { av = row.putArray(FIELD_ANNOTATIONS); @@ -362,6 +371,7 @@ private static TableImpl buildTableInfoTable() { tb.addString(FIELD_NAMESPACE); tb.addString(FIELD_NAME); tb.addString(FIELD_TTL); + tb.addString(FIELD_BEFORE_IMAGE_TTL); tb.addString(FIELD_OWNER); tb.addString(FIELD_JSON_COLLECTION); tb.addString(FIELD_SYSTABLE); diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/TupleValue.java b/kvmain/src/main/java/oracle/kv/impl/api/table/TupleValue.java index 366a6a64..ff473121 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/TupleValue.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/TupleValue.java @@ -50,6 +50,8 @@ public class TupleValue extends FieldValueImpl { long theExpirationTime; + long theCreationTime; + long theModificationTime; int thePartition; @@ -60,6 +62,9 @@ public class TupleValue extends FieldValueImpl { Version theRowVersion; + String rowMetadata; + + public TupleValue( RecordDefImpl def, FieldValueImpl[] regs, @@ -259,6 +264,14 @@ public long getExpirationTime() { return theExpirationTime; } + public void setCreationTime(long t) { + theCreationTime = t; + } + + public long getCreationTime() { + return theCreationTime; + } + public void setModificationTime(long t) { theModificationTime = t; } @@ -299,6 +312,14 @@ public Version getVersion() { return theRowVersion; } + public void setRowMetadata(String rowMetadata) { + this.rowMetadata = rowMetadata; + } + + public String getRowMetadata() { + return rowMetadata; + } + /* * FieldValueImpl internal api methods */ diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/ValueReader.java b/kvmain/src/main/java/oracle/kv/impl/api/table/ValueReader.java index abb693b9..b0038fb4 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/ValueReader.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/ValueReader.java @@ -28,18 +28,32 @@ public interface ValueReader extends AvroRowReader { * that's a thought for a future refactor. */ void setTableVersion(int tableVersion); + void setExpirationTime(long expirationTime); + + @SuppressWarnings(value = "unused") + default void setCreationTime(long creationTime) {} + @SuppressWarnings("unused") default void setModificationTime(long modificationTime) {} + + @SuppressWarnings("unused") + default void setRowMetadata(String rowMetadata) {} + void setVersion(Version version); + @SuppressWarnings("unused") default void setRegionId(int regionId) {} + @SuppressWarnings("unused") default void setTombstone(boolean isTombstone) {} T getValue(); + Table getTable(); + void reset(); + /* this doesn't belong here at all, but is kept for compat for now */ void setValue(T value); } diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/ValueSerializer.java b/kvmain/src/main/java/oracle/kv/impl/api/table/ValueSerializer.java index 53fccfc9..c7d42a3c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/ValueSerializer.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/ValueSerializer.java @@ -46,6 +46,17 @@ public interface RowSerializer extends RecordValueSerializer { */ TimeToLive getTTL(); + /** + * Returns the metadata associated with the row. + * A null value represents the absence of metadata. + */ + String getRowMetadata(); + + /** + * Sets the rowMetadata + */ + void setRowMetadata(String rowMetadata); + /** * Returns true if the RowSerializer is for a Primary Key. */ diff --git a/kvmain/src/main/java/oracle/kv/impl/api/table/serialize/AvroEncoder.java b/kvmain/src/main/java/oracle/kv/impl/api/table/serialize/AvroEncoder.java index 1f9b014e..0a12e27a 100644 --- a/kvmain/src/main/java/oracle/kv/impl/api/table/serialize/AvroEncoder.java +++ b/kvmain/src/main/java/oracle/kv/impl/api/table/serialize/AvroEncoder.java @@ -324,7 +324,7 @@ public void writeString(Utf8 utf8) throws IOException { * expected */ public void writeString(String string) throws IOException { - final int charLength = string.length(); + final int charLength = string != null ? string.length() : 0; if (0 == charLength) { writeZero(); return; diff --git a/kvmain/src/main/java/oracle/kv/impl/async/AbstractEndpointGroup.java b/kvmain/src/main/java/oracle/kv/impl/async/AbstractEndpointGroup.java index 95630a2c..3ee597ac 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/AbstractEndpointGroup.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/AbstractEndpointGroup.java @@ -22,8 +22,10 @@ import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; @@ -38,11 +40,6 @@ */ public abstract class AbstractEndpointGroup implements EndpointGroup { - /** - * The maximum thread size of the back up executor. - */ - private static final int BACKUP_EXECUTOR_NUM_CORE_THREADS = 1024; - private final Logger logger; private final RateLimitingLogger rateLimitingLogger; /** @@ -72,7 +69,12 @@ public abstract class AbstractEndpointGroup implements EndpointGroup { * TODO: In the future when we provide thread-pool service for each * process, this backup executor should be replaced. */ - private final ScheduledThreadPoolExecutor backupExecutor; + private final ExecutorService backupExecutor; + /** + * The schedueld back up executor. The purpose is the same as the + * backupExecutor. + */ + private final ScheduledExecutorService backupSchedExecutor; /* * Creator and responder endpoints. Use ConcurrentHashMap for thread-safety * and high concurrency. Entries are removed when the underlying @@ -144,49 +146,35 @@ protected AbstractEndpointGroup(Logger logger, logger); this.dialogResourceManager = new DialogResourceManager( numPermits, perfTracker.getDialogResourceManagerPerfTracker()); + final ThreadFactory threadFactory = new KVThreadFactory( + String.format("%s.backup", getClass().getSimpleName()), logger) { + @Override + public UncaughtExceptionHandler makeUncaughtExceptionHandler() { + return exceptionHandler; + } + }; /* - * Creates a ScheduledThreadPoolExecutor with - * BACKUP_EXECUTOR_NUM_CORE_THREADS core threads. Although a subclass of - * ThreadPoolExecutor, the ScheduledThreadPoolExecutor is a bit special. - * The executor has unbounded queue and its max thread size cannot be - * configured. Therefore the executor will create new threads when there - * are more tasks than the number of threads until the thread count - * reaches BACKUP_EXECUTOR_NUM_CORE_THREADS (i.e., 1024). After that, if - * more tasks are scheduled, they are enqueued until OOME happens. - * - * We also configure keep-alive time and allow core thread to timeout. - * This is because the back up executor is only used occasionally for - * various tasks that should not block the nio channel executor and - * therefore it seems undesirable to keep a lot of unused the threads - * around. The javadoc of ScheduledThreadPoolExecutor warns about such - * configuration. However, I think the reason for those warnings is for - * timely execution of the tasks: we need to spin out core threads for - * new tasks if they can be timed out. Since we do not expect back up - * executor to be used to its full capacity with timing sensitive tasks, - * such configuration should be OK. - * - * TODO: Currently using ScheduledThreadPoolExecutor but it is not - * satisfactory since its max thread number cannot be configured - * properly and its queue is unbounded. We want an executor that has a - * max thread size and bounded queue size with reject policy of - * restarting the process [KVSTORE-2265]. + * Creates the backupExecutor with the newCachedThreadPool. This will + * reuse an idle thread when a new task is submitted, but create new one + * if none exists. There is no bound on maximum number of threads. This + * set up can prevent deadlock issues in the presense of blocking tasks + * ([KVSTORE-2260]). Furthermore, it will not start too many threads if + * not necessary ([KVSTORE-2719]). The only down side is that this will + * lead to OOM which is less satisfactory because ideally we want to cap + * the thread and restart the RN earlier. */ - this.backupExecutor = - new ScheduledThreadPoolExecutor( - BACKUP_EXECUTOR_NUM_CORE_THREADS, - new KVThreadFactory( - String.format("%s.backup", getClass().getSimpleName()), - logger) - { - @Override - public UncaughtExceptionHandler - makeUncaughtExceptionHandler() - { - return exceptionHandler; - } - }); - backupExecutor.setKeepAliveTime(1, TimeUnit.MINUTES); - backupExecutor.allowCoreThreadTimeOut(true); + this.backupExecutor = Executors.newCachedThreadPool(threadFactory); + /* + * Creates the backupSchedExecutor with the newScheduledThreadPool. Set + * a core thread number to eight. Currently only + * NioEndpointHandler$ChannelInputCloseOrRetryAfterRejection and + * NioChannelThreadPoolPerfTracker$HeartbeatCheckTask uses this + * executor. The tasks are non-blocking, not frequent and not + * performance critical. Therefore, eight core threads should be more + * than sufficient. + */ + this.backupSchedExecutor = + Executors.newScheduledThreadPool(8, threadFactory); } public Logger getLogger() { @@ -463,10 +451,15 @@ public boolean getIsShutdown() { } @Override - public ScheduledExecutorService getBackupSchedExecService() { + public ExecutorService getBackupExecService() { return backupExecutor; } + @Override + public ScheduledExecutorService getBackupSchedExecService() { + return backupSchedExecutor; + } + /** * Removes a listener. */ @@ -673,12 +666,17 @@ synchronized void shutdown() { /* For testing */ /** - * Shuts down the backup executor, waits for termination. + * Shuts down the backup executors, waits for termination. */ public boolean awaitBackupExecutorQuiescence(long timeout, TimeUnit unit) throws InterruptedException { + final long deadlineNanos = System.nanoTime() + unit.toNanos(timeout); backupExecutor.shutdown(); - return backupExecutor.awaitTermination(timeout, unit); + backupSchedExecutor.shutdown(); + return backupExecutor.awaitTermination( + deadlineNanos - System.nanoTime(), TimeUnit.NANOSECONDS) && + backupSchedExecutor.awaitTermination( + deadlineNanos - System.nanoTime(), TimeUnit.NANOSECONDS); } } diff --git a/kvmain/src/main/java/oracle/kv/impl/async/EndpointGroup.java b/kvmain/src/main/java/oracle/kv/impl/async/EndpointGroup.java index 9ce09310..ce27b998 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/EndpointGroup.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/EndpointGroup.java @@ -14,6 +14,7 @@ package oracle.kv.impl.async; import java.io.IOException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import oracle.kv.impl.async.exception.ConnectionNotEstablishedException; @@ -133,6 +134,17 @@ ListenHandle listen(ListenerConfig listenerConfig, * * @return the executor service */ + ExecutorService getBackupExecService(); + + /** + * Returns the back-up scheduled executor services associated with this + * group. + * + * Note that the current implementation creates a small core size. Therefore + * tasks submitted to this executor must be non-blocking. + * + * @return the executor service + */ ScheduledExecutorService getBackupSchedExecService(); /** diff --git a/kvmain/src/main/java/oracle/kv/impl/async/InetNetworkAddress.java b/kvmain/src/main/java/oracle/kv/impl/async/InetNetworkAddress.java index 9f573f87..6bead430 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/InetNetworkAddress.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/InetNetworkAddress.java @@ -121,8 +121,7 @@ public InetNetworkAddress(String hostname, int port) { } return new InetNetworkAddress(hostname, port); }, - AsyncRegistryUtils.getEndpointGroup() - .getBackupSchedExecService()); + AsyncRegistryUtils.getEndpointGroup().getBackupExecService()); } static CompletableFuture @@ -142,8 +141,7 @@ public InetNetworkAddress(String hostname, int port) { } return new InetNetworkAddress(hostname, address.getPort()); }, - AsyncRegistryUtils.getEndpointGroup() - .getBackupSchedExecService()); + AsyncRegistryUtils.getEndpointGroup().getBackupExecService()); } /** @@ -212,8 +210,7 @@ public CompletableFuture resolveSocketAddress() { */ return CompletableFuture.supplyAsync( () -> new InetSocketAddress(hostname, port), - AsyncRegistryUtils.getEndpointGroup() - .getBackupSchedExecService()); + AsyncRegistryUtils.getEndpointGroup().getBackupExecService()); } @Override diff --git a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioChannelThreadPool.java b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioChannelThreadPool.java index 4742d816..eb38f714 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioChannelThreadPool.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioChannelThreadPool.java @@ -31,7 +31,7 @@ public class NioChannelThreadPool { private final int id; private final AtomicInteger childSequencer = new AtomicInteger(0); private final AtomicReferenceArray executors; - private final ScheduledExecutorService backupExecutor; + private final ScheduledExecutorService backupSchedExecutor; private final int maxQuiescentSeconds; private final AtomicInteger index = new AtomicInteger(0); private final KVThreadFactory threadFactory; @@ -52,7 +52,7 @@ public NioChannelThreadPool(Logger logger, int num, int maxQuiescentSeconds, AsyncEndpointGroupFaultHandler faultHandler, - ScheduledExecutorService backupExecutor) { + ScheduledExecutorService backupSchedExecutor) { if (num <= 0) { throw new IllegalArgumentException(String.format( @@ -61,13 +61,13 @@ public NioChannelThreadPool(Logger logger, this.logger = logger; this.id = poolSequencer.getAndIncrement(); this.executors = new AtomicReferenceArray(num); - this.backupExecutor = backupExecutor; + this.backupSchedExecutor = backupSchedExecutor; this.maxQuiescentSeconds = maxQuiescentSeconds; this.threadFactory = new KVThreadFactory( NioChannelThreadPool.class.getName(), logger); this.faultHandler = faultHandler; - this.perfTracker = - new NioChannelThreadPoolPerfTracker(this, logger, backupExecutor); + this.perfTracker = new NioChannelThreadPoolPerfTracker(this, logger, + backupSchedExecutor); /* * TODO: should we have core threads that we start at the beginning and * keep active? If we do care about efficiency of using threads and @@ -193,8 +193,8 @@ private NioChannelExecutor createExecutor(int childIndex) { } } - public ScheduledExecutorService getBackupExecutor() { - return backupExecutor; + public ScheduledExecutorService getBackupSchedExecutor() { + return backupSchedExecutor; } /** diff --git a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioChannelThreadPoolPerfTracker.java b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioChannelThreadPoolPerfTracker.java index c316c9af..4dc2f482 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioChannelThreadPoolPerfTracker.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioChannelThreadPoolPerfTracker.java @@ -134,7 +134,7 @@ public class NioChannelThreadPoolPerfTracker { public NioChannelThreadPoolPerfTracker( NioChannelThreadPool pool, Logger logger, - ScheduledExecutorService backupExecutor) + ScheduledExecutorService backupSchedExecutor) { this.pool = pool; this.rateLimitingLogger = new RateLimitingLogger( @@ -143,7 +143,7 @@ public NioChannelThreadPoolPerfTracker( logger); this.heartbeatTimes = new AtomicLongArray(pool.getExecutors().length()); - backupExecutor.scheduleWithFixedDelay( + backupSchedExecutor.scheduleWithFixedDelay( new HeartbeatCheckTask(), 1, heartbeatCheckIntervalMillis, TimeUnit.MILLISECONDS); } diff --git a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioCreatorEndpoint.java b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioCreatorEndpoint.java index ebc6566d..812d4eb9 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioCreatorEndpoint.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioCreatorEndpoint.java @@ -74,6 +74,7 @@ private EndpointHandler setupHandler(SocketChannel socketChannel) perfName, remoteAddress, executor, + endpointGroup.getBackupExecService(), endpointGroup.getBackupSchedExecService(), getDialogHandlerFactoryMap(), socketChannel); diff --git a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioEndpointHandler.java b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioEndpointHandler.java index b2db7567..e4bed6ee 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioEndpointHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioEndpointHandler.java @@ -17,6 +17,7 @@ import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -69,7 +70,8 @@ public class NioEndpointHandler private static final int CLOSE_INPUT_IN_BACKUP_EXECUTOR_NUM_RETRIES = 10; private final NioChannelExecutor channelExecutor; - private final ScheduledExecutorService backupExecutor; + private final ExecutorService backupExecutor; + private final ScheduledExecutorService backupSchedExecutor; private final EndpointConfig endpointConfig; private final SocketChannel socketChannel; /* @@ -138,7 +140,8 @@ public NioEndpointHandler( String perfName, NetworkAddress remoteAddress, NioChannelExecutor channelExecutor, - ScheduledExecutorService backupExecutor, + ExecutorService backupExecutor, + ScheduledExecutorService backupSchedExecutor, DialogHandlerFactoryMap dialogHandlerFactories, SocketChannel socketChannel, DialogResourceManager concurrentDialogsManager, @@ -149,6 +152,7 @@ public NioEndpointHandler( dialogHandlerFactories, concurrentDialogsManager); this.channelExecutor = channelExecutor; this.backupExecutor = backupExecutor; + this.backupSchedExecutor = backupSchedExecutor; this.endpointConfig = endpointConfig; this.socketChannel = socketChannel; this.channelInput = new NioChannelInput(trackersManager, this); @@ -1186,7 +1190,7 @@ public void run() { return; } scheduleOrLogReject( - () -> backupExecutor.schedule( + () -> backupSchedExecutor.schedule( new ChannelInputCloseOrRetryAfterRejection(remaining), 1, TimeUnit.SECONDS), ChannelInputCloseOrRetryAfterRejection.class.getSimpleName() diff --git a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioResponderEndpoint.java b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioResponderEndpoint.java index a210ee9c..3035f6f1 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioResponderEndpoint.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/NioResponderEndpoint.java @@ -36,7 +36,9 @@ class NioResponderEndpoint extends AbstractResponderEndpoint { super(endpointGroup, remoteAddress, listenerConfig, listener); this.handler = new PreReadWrappedEndpointHandler( this, endpointConfig, remoteAddress, - executor, getEndpointGroup().getBackupSchedExecService(), + executor, + getEndpointGroup().getBackupExecService(), + getEndpointGroup().getBackupSchedExecService(), listener, socketChannel); } diff --git a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/PreReadWrappedEndpointHandler.java b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/PreReadWrappedEndpointHandler.java index 8aa0e811..2444b59f 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/PreReadWrappedEndpointHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/PreReadWrappedEndpointHandler.java @@ -17,6 +17,7 @@ import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; import java.util.Arrays; +import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.logging.Level; import java.util.logging.Logger; @@ -63,7 +64,8 @@ class PreReadWrappedEndpointHandler EndpointConfig endpointConfig, NetworkAddress remoteAddress, NioChannelExecutor channelExecutor, - ScheduledExecutorService backupExecutor, + ExecutorService backupExecutor, + ScheduledExecutorService backupSchedExecutor, NioEndpointGroup.NioListener listener, SocketChannel socketChannel) { super(responderEndpoint, endpointConfig, remoteAddress); @@ -78,7 +80,7 @@ class PreReadWrappedEndpointHandler this.innerEndpointHandler = new NioEndpointHandler( logger, this, endpointConfig, false, listener.getLocalAddress().toString(), - remoteAddress, channelExecutor, backupExecutor, + remoteAddress, channelExecutor, backupExecutor, backupSchedExecutor, listener.getDialogHandlerFactoryMap(), socketChannel, responderEndpoint.getEndpointGroup().getDialogResourceManager(), diff --git a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/PreWriteWrappedEndpointHandler.java b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/PreWriteWrappedEndpointHandler.java index 79897aa9..04d3d593 100644 --- a/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/PreWriteWrappedEndpointHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/async/dialog/nio/PreWriteWrappedEndpointHandler.java @@ -17,6 +17,7 @@ import java.nio.ByteBuffer; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; +import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.logging.Level; import java.util.logging.Logger; @@ -44,7 +45,8 @@ class PreWriteWrappedEndpointHandler String perfName, NetworkAddress remoteAddress, NioChannelExecutor channelExecutor, - ScheduledExecutorService backupExecutor, + ExecutorService backupExecutor, + ScheduledExecutorService backupSchedExecutor, DialogHandlerFactoryMap dialogHandlerFactories, SocketChannel socketChannel) { @@ -55,7 +57,7 @@ class PreWriteWrappedEndpointHandler this.innerEndpointHandler = new NioEndpointHandler( logger, this, endpointConfig, true, String.format("%s(%s)", perfName, remoteAddress.toString()), - remoteAddress, channelExecutor, backupExecutor, + remoteAddress, channelExecutor, backupExecutor, backupSchedExecutor, dialogHandlerFactories, socketChannel, creatorEndpoint.getEndpointGroup().getDialogResourceManager(), creatorEndpoint.getEndpointGroup().getEventTrackersManager()); diff --git a/kvmain/src/main/java/oracle/kv/impl/client/admin/DdlStatementExecutor.java b/kvmain/src/main/java/oracle/kv/impl/client/admin/DdlStatementExecutor.java index 29cb5cbd..40eaec29 100644 --- a/kvmain/src/main/java/oracle/kv/impl/client/admin/DdlStatementExecutor.java +++ b/kvmain/src/main/java/oracle/kv/impl/client/admin/DdlStatementExecutor.java @@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit; import java.util.logging.Logger; +import oracle.kv.AuthenticationRequiredException; import oracle.kv.ExecutionFuture; import oracle.kv.FaultException; import oracle.kv.KVSecurityException; @@ -37,6 +38,8 @@ import oracle.kv.impl.fault.WrappedClientException; import oracle.kv.impl.security.AuthContext; import oracle.kv.impl.security.login.LoginManager; +import oracle.kv.impl.test.TestHook; +import oracle.kv.impl.test.TestHookExecute; import oracle.kv.impl.topo.Topology; import oracle.kv.impl.util.KVThreadFactory; import oracle.kv.impl.util.registry.Protocols; @@ -111,6 +114,8 @@ public class DdlStatementExecutor { private final Map> notificationTargets; private final Topology topo; + + /* All access needs to be inside the synchronized block of this object. */ private LoginManager loginManager; private final Logger logger; private final ClientId clientId; @@ -125,6 +130,9 @@ public class DdlStatementExecutor { /* The polling interval for status check tasks. */ private final long checkIntervalMillis; + /* Test hook can be invoked before execute DDL */ + public static volatile TestHook beforeExecuteHook = null; + public DdlStatementExecutor(KVStoreImpl store) { this(store.getDispatcher().getTopologyManager().getTopology(), KVStoreImpl.getLoginManager(store), @@ -197,9 +205,17 @@ public synchronized void renewLoginManager(LoginManager loginManger) { clientAdminService = finder.getDDLService(); } + /* + * For unit test. + */ + public synchronized LoginManager getLoginManager() { + return loginManager; + } + /** * Establish an RMI connection to the admin master. Ensure that the proper - * credentials are set up. + * credentials are set up. Except the constructor, the other invocation of + * this method must be inside the synchronized block of this object. * * A note about login managers: * ---------------------------- @@ -247,7 +263,8 @@ private void ensureClientAdminService() * This method can be used if there isn't any storage node available * in the topology, typically the bootstrap admin case. */ - public void findClientAdminService(String adminHost, int adminPort) + public synchronized void findClientAdminService(String adminHost, + int adminPort) throws FaultException { if (canHandleDDL()) { @@ -284,13 +301,14 @@ private boolean canHandleDDL() { if (clientAdminService.canHandleDDL()) { return true; } - } catch (RemoteException e) { + } catch (RemoteException | AuthenticationRequiredException e) { logger.fine("Ensuring connection, got " + e); } - /* - * Either the RMI service is down, or the admin is no longer a - * master. Null out the cached connection so we can find another. + * The RMI service is down, the admin is no longer a master or the + * cached login token is no longer valid, require re-authentication. + * Null out the cached connection, so we can find another for next + * retries. */ clientAdminService = null; return false; @@ -407,11 +425,10 @@ void taskFailed(int planId, RemoteException e) { public ExecutionFuture executeDdl(char[] statement, String namespace, ExecuteOptions options, - TableLimits limits, - LoginManager login) + TableLimits limits) throws IllegalArgumentException, FaultException { try { - loginManager = login; + assert TestHookExecute.doHookIfSet(beforeExecuteHook, null); final AuthContext authCtx = options == null ? null : options.getAuthContext(); ExecutionInfo info = getClientAdminService().execute(statement, @@ -475,11 +492,10 @@ public ExecutionFuture executeDdl(char[] statement, */ public ExecutionFuture setTableLimits(String namespace, String tableName, - TableLimits limits, - LoginManager login) + TableLimits limits) throws IllegalArgumentException, FaultException { try { - loginManager = login; + assert TestHookExecute.doHookIfSet(beforeExecuteHook, null); ExecutionInfo info = getClientAdminService(). setTableLimits(namespace, tableName, limits); return new DdlFuture(info.getPlanId(), this); diff --git a/kvmain/src/main/java/oracle/kv/impl/param/LoadParameters.java b/kvmain/src/main/java/oracle/kv/impl/param/LoadParameters.java index 2fceb527..821fdb03 100644 --- a/kvmain/src/main/java/oracle/kv/impl/param/LoadParameters.java +++ b/kvmain/src/main/java/oracle/kv/impl/param/LoadParameters.java @@ -178,7 +178,8 @@ public void saveParameters(File file, boolean readonly, Logger logger) { writer.printf("\n"); } catch (Exception e) { throw new IllegalStateException("Problem creating config file: " + - temp + ": " + e); + file + " (working dir: " + System.getProperty("user.dir") + + "): " + e); } finally { if (writer != null) { writer.close(); diff --git a/kvmain/src/main/java/oracle/kv/impl/param/ParameterUtils.java b/kvmain/src/main/java/oracle/kv/impl/param/ParameterUtils.java index e66918ec..6a855e19 100644 --- a/kvmain/src/main/java/oracle/kv/impl/param/ParameterUtils.java +++ b/kvmain/src/main/java/oracle/kv/impl/param/ParameterUtils.java @@ -177,7 +177,6 @@ public ParameterUtils(ParameterMap map) { /* Replication. Not all of these are documented publicly */ ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT + "=10 s;" + - ReplicationConfig.TXN_ROLLBACK_LIMIT + "=10;" + ReplicationConfig.REPLICA_ACK_TIMEOUT + "=5 s;" + ReplicationConfig.CONSISTENCY_POLICY + "=NoConsistencyRequiredPolicy;" + ReplicationMutableConfig.REPLAY_MAX_OPEN_DB_HANDLES + "=100;" + diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/CheckpointTableManager.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/CheckpointTableManager.java index 347a463a..4fd6b90c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/CheckpointTableManager.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/CheckpointTableManager.java @@ -81,11 +81,11 @@ public class CheckpointTableManager { /** * retry interval in ms */ - private final static int RETRY_INTERVAL_MS = 3000; + final static int RETRY_INTERVAL_MS = 3000; /** * maximum number of attempts */ - private final static int MAX_NUM_ATTEMPTS = 20; + final static int MAX_NUM_ATTEMPTS = 20; /** * read checkpoint from master diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/DataEntry.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/DataEntry.java index a7ed270f..9dbc88e6 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/DataEntry.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/DataEntry.java @@ -13,6 +13,9 @@ package oracle.kv.impl.pubsub; +import static oracle.kv.impl.pubsub.DataEntry.Type.TXN_ABORT; +import static oracle.kv.impl.pubsub.DataEntry.Type.TXN_COMMIT; + import oracle.kv.table.TimeToLive; import com.sleepycat.je.dbi.DatabaseId; @@ -23,7 +26,7 @@ * Object to represent an operation from source kvstore via replication * stream. Each entry is reconstructed from a single message in replication * stream. Received entries will be queued in a FIFO queue to be processed - * at granularity of transaction. Currently two types of operations can be + * at granularity of transaction. Currently, two types of operations can be * constructed from replication stream: 1) a data operation representing a * write (e.g., put or delete) operation in kvstore; 2) a transactional * operation (e.g, commit or abort). All other type of messages shall be @@ -58,6 +61,25 @@ public class DataEntry { TimeToLive.DO_NOT_EXPIRE.getUnit()); /** expiration time in ms */ private final long expirationMs; + /** + * True if before image for the table in the entry, false otherwise + */ + private final boolean beforeImgEnabled; + /** + * Value byte array for before image, or null if before image is disabled + * or does not exist + */ + private final byte[] valBeforeImg; + /** + * Timestamp in ms of the before image, or 0 if before image is disabled or + * does not exist + */ + private final long tsBeforeImg; + /** + * Expiration time in ms of the before image, or 0 if before image is + * disabled or does not exist + */ + private final long expBeforeImg; /** * Builds a data entry @@ -70,10 +92,16 @@ public class DataEntry { * @param dbId database ID of the entry or null * @param lastUpdateMs last update time in ms * @param expirationMs expiration time + * @param beforeImgEnabled true if before image is enabled + * @param valBeforeImg value byte[] of before imge + * @param tsBeforeImg timestamp of before image + * @param expBeforeImg expiration time of before image */ - DataEntry(Type type, long vlsn, long txnId, byte[] key, byte[] value, - DatabaseId dbId, long lastUpdateMs, long expirationMs) { - + private DataEntry(Type type, long vlsn, long txnId, byte[] key, + byte[] value, DatabaseId dbId, long lastUpdateMs, + long expirationMs, boolean beforeImgEnabled, + byte[] valBeforeImg, long tsBeforeImg, + long expBeforeImg) { this.type = type; this.vlsn = vlsn; this.txnId = txnId; @@ -82,6 +110,37 @@ public class DataEntry { this.dbId = dbId; this.lastUpdateMs = lastUpdateMs; this.expirationMs = expirationMs; + this.beforeImgEnabled = beforeImgEnabled; + this.valBeforeImg = valBeforeImg; + this.tsBeforeImg = tsBeforeImg; + this.expBeforeImg = expBeforeImg; + } + + /** + * Builds a put entry + */ + static DataEntry getPutEntry(long vlsn, long txnId, byte[] key, + byte[] value, DatabaseId dbId, long ts, + long expMs, boolean beforeImgEnabled, + byte[] valBeforeImg, long tsBeforeImg, + long expBeforeImg) { + return new DataEntry(Type.PUT, vlsn, txnId, key, value, dbId, ts, + expMs, beforeImgEnabled, valBeforeImg, + tsBeforeImg, expBeforeImg); + } + + /** + * Builds a delete entry + */ + static DataEntry getDelEntry(long vlsn, long txnId, byte[] key, + byte[] value, DatabaseId dbId, long ts, + boolean beforeImgEnabled, + byte[] valBeforeImg, long tsBeforeImg, + long expBeforeImg) { + return new DataEntry(Type.DELETE, vlsn, txnId, key, value, dbId, ts, + DO_NOT_EXPIRE_EXPIRATION_TIME_MS, + beforeImgEnabled, valBeforeImg, tsBeforeImg, + expBeforeImg); } /** @@ -95,25 +154,32 @@ public class DataEntry { * @param dbId database ID of the entry or null * @param lastUpdateMs last update time in ms */ - DataEntry(Type type, long vlsn, long txnId, byte[] key, byte[] value, - DatabaseId dbId, long lastUpdateMs) { + private DataEntry(Type type, long vlsn, long txnId, byte[] key, + byte[] value, DatabaseId dbId, long lastUpdateMs) { this(type, vlsn, txnId, key, value, dbId, lastUpdateMs, - DO_NOT_EXPIRE_EXPIRATION_TIME_MS); + DO_NOT_EXPIRE_EXPIRATION_TIME_MS, false, null, 0, 0); } /** - * Builds a data entry where update timestamp is not applicable + * Builds a data entry that represents a transaction commit * - * @param type type of entry - * @param vlsn vlsn of operation + * @param vlsn vlsn of entry * @param txnId txn id - * @param key key of the entry - * @param value value of the entry - * @param dbId database ID of the entry or null + * @param ts timestamp of commit */ - DataEntry(Type type, long vlsn, long txnId, byte[] key, byte[] value, - DatabaseId dbId) { - this(type, vlsn, txnId, key, value, dbId, 0); + static DataEntry getCommitEntry(long vlsn, long txnId, long ts) { + return new DataEntry(TXN_COMMIT, vlsn, txnId, null, null, null, ts); + } + + /** + * Builds a data entry that represents a transaction abort + * + * @param vlsn vlsn of entry + * @param txnId txn id + * @param ts timestamp of abort + */ + static DataEntry getAbortEntry(long vlsn, long txnId, long ts) { + return new DataEntry(TXN_ABORT, vlsn, txnId, null, null, null, ts); } /** @@ -121,7 +187,7 @@ public class DataEntry { * * @return type of entry */ - Type getType() { + public Type getType() { return type; } @@ -130,7 +196,7 @@ Type getType() { * * @return VLSN of the entry */ - long getVLSN(){ + public long getVLSN() { return vlsn; } @@ -148,7 +214,7 @@ long getTxnID() { * * @return key as byte array, null if does not exist */ - byte[] getKey() { + public byte[] getKey() { return key; } @@ -158,7 +224,7 @@ byte[] getKey() { * @return value as byte array, null if it does not exist, e.g., in * delete operation. */ - byte[] getValue() { + public byte[] getValue() { return value; } @@ -188,6 +254,52 @@ long getExpirationMs() { return expirationMs; } + /** + * Returns if the before image is enabled + * @return true if the before image is enabled, false otherwise + */ + boolean isBeforeImgEnabled() { + return beforeImgEnabled; + } + + /** + * Returns value byte[] of the before image if available, or null if + * before image is not enabled + * @return value byte[] of the before image if available or null + */ + byte[] getValBeforeImg() { + if (!beforeImgEnabled) { + return null; + } + return valBeforeImg; + } + + /** + * Returns last modification timestamp of the before image if available, + * or 0 if before image is not enabled + * @return last modification timestamp of the before image if available + * or 0 if before image is not enabled. + */ + long getLastModTimeBeforeImg() { + if (!beforeImgEnabled) { + return 0; + } + return tsBeforeImg; + } + + /** + * Returns expiration timestamp of the before image if available, + * or 0 if before image is not enabled + * @return expiration timestamp of the before image if available + * or 0 if before image is not enabled. + */ + long getExpBeforeImg() { + if (!beforeImgEnabled) { + return 0; + } + return expBeforeImg; + } + @Override public String toString() { StringBuilder msg = new StringBuilder(); @@ -195,35 +307,37 @@ public String toString() { switch(type) { case TXN_COMMIT: msg.append("txn commit, ") - .append("vlsn: ").append(vlsn) - .append(", txn id: ").append(txnId); + .append("vlsn=").append(vlsn) + .append(", txn id=").append(txnId); break; case TXN_ABORT: msg.append("txn abort, ") - .append("vlsn: ").append(vlsn) - .append(", txn id: ").append(txnId); + .append("vlsn=").append(vlsn) + .append(", txn id=").append(txnId); break; case DELETE: msg.append("delete op, ") - .append("vlsn: ").append(vlsn) + .append("vlsn=").append(vlsn) .append(", key:").append(getKeyBytesString()) - .append(", txn id: ").append(txnId) - .append(", db id: ").append(dbId) - .append(", last update time: ").append(lastUpdateMs); + .append(", txn id=").append(txnId) + .append(", db id=").append(dbId) + .append(", last update time=").append(lastUpdateMs); + appendBeforeImage(msg); break; case PUT: msg.append("put op, ") - .append("vlsn: ").append(vlsn) + .append("vlsn=").append(vlsn) .append(", key:").append(getKeyBytesString()) - .append(", txn id: ").append(txnId) - .append(", db id: ").append(dbId) - .append(", last update time: ").append(lastUpdateMs) - .append(", expiration: ").append( + .append(", txn id=").append(txnId) + .append(", db id=").append(dbId) + .append(", last update time=").append(lastUpdateMs) + .append(", expiration=").append( (expirationMs != DO_NOT_EXPIRE_EXPIRATION_TIME_MS) ? expirationMs : "never"); + appendBeforeImage(msg); break; default: @@ -233,6 +347,18 @@ public String toString() { return msg.toString(); } + /** + * Appends before image info in message if enabled + * @param msg message + */ + private void appendBeforeImage(StringBuilder msg) { + msg.append("Before image enabled=").append(beforeImgEnabled); + if (beforeImgEnabled) { + msg.append("timestamp of before image=").append(tsBeforeImg); + msg.append("expiration time of before image=").append(expBeforeImg); + } + } + /** * Returns the key bytes in string using JE helper method * @return key bytes in string @@ -244,7 +370,7 @@ private String getKeyBytesString() { /** * Type of messages supported in publisher */ - enum Type { + public enum Type { /* txn commit */ TXN_COMMIT, diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/NoSQLStreamFeederFilter.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/NoSQLStreamFeederFilter.java index a30dec66..8f7aa6db 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/NoSQLStreamFeederFilter.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/NoSQLStreamFeederFilter.java @@ -15,6 +15,7 @@ import static com.sleepycat.je.log.LogEntryType.LOG_DEL_LN; import static com.sleepycat.je.log.LogEntryType.LOG_DEL_LN_TRANSACTIONAL; +import static com.sleepycat.je.log.LogEntryType.LOG_DEL_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE; import static com.sleepycat.je.log.LogEntryType.LOG_INS_LN; import static com.sleepycat.je.log.LogEntryType.LOG_INS_LN_TRANSACTIONAL; import static com.sleepycat.je.log.LogEntryType.LOG_TRACE; @@ -22,6 +23,7 @@ import static com.sleepycat.je.log.LogEntryType.LOG_TXN_COMMIT; import static com.sleepycat.je.log.LogEntryType.LOG_UPD_LN; import static com.sleepycat.je.log.LogEntryType.LOG_UPD_LN_TRANSACTIONAL; +import static com.sleepycat.je.log.LogEntryType.LOG_UPD_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE; import static com.sleepycat.je.utilint.VLSN.FIRST_VLSN; import static com.sleepycat.je.utilint.VLSN.INVALID_VLSN; import static oracle.kv.impl.util.ThreadUtils.threadId; @@ -87,7 +89,6 @@ import com.sleepycat.je.rep.stream.OutputWireRecord; import com.sleepycat.je.utilint.LoggerUtils; import com.sleepycat.je.utilint.VLSN; -import com.sleepycat.util.PackedInteger; import com.sleepycat.util.UtfOps; /** @@ -155,6 +156,12 @@ public class NoSQLStreamFeederFilter implements FeederFilter, Serializable { */ private final boolean localWritesOnly; + /** + * True if include before image in stream event if available, false + * otherwise + */ + private final boolean inclBeforeImage; + /** * If localWritesOnly is true, the region ID of the local region, used to * filter out entries with a different region id when the value is in @@ -279,7 +286,8 @@ public class NoSQLStreamFeederFilter implements FeederFilter, Serializable { private NoSQLStreamFeederFilter(Set tables, int nTotalParts, boolean localWritesOnly, - int localRegionId) { + int localRegionId, + boolean inclBeforeImage) { super(); if (tables == null) { @@ -307,6 +315,7 @@ private NoSQLStreamFeederFilter(Set tables, this.nTotalParts = nTotalParts; this.localWritesOnly = localWritesOnly; this.localRegionId = localRegionId; + this.inclBeforeImage = inclBeforeImage; /* * Will be initialized at server side. The methods refer to these null @@ -341,6 +350,32 @@ public void setStartVLSN(long vlsn) { startVLSN = vlsn; } + /** + * Sets the feeder filter id, the id can only be set once for a + * feeder filter instance + */ + @Override + public void setFeederFilterId(String id) { + if (filterId != null) { + throw new IllegalStateException( + "Stream feeder filter id has already been set to=" + filterId + + ", id=" + id); + } + filterId = id; + logger.fine(() -> lm("Set stream feeder filter id=" + filterId)); + } + + /** + * Returns true if subscription should include before image in entries, + * false otherwise. + * @return true if subscription should include before image in entries, + * false otherwise. + */ + @Override + public boolean includeBeforeImage() { + return inclBeforeImage; + } + /* convert the map with string table id key to a map with byte[] key */ private static Map> convertToBytesKey(Map> tableMatchKeys) { @@ -597,6 +632,7 @@ private static Map> getMatchKeys( } /** + * Unit test only * A convenience method to get feeder filter for on-prem MR table * @param tbs subscribed tables * @param nTotalParts total number of partitions in store @@ -607,7 +643,7 @@ public static NoSQLStreamFeederFilter getFilter(Set tbs, int nTotalParts, boolean localWritesOnly) { return getFilter(tbs, nTotalParts, localWritesOnly, - Region.LOCAL_REGION_ID); + Region.LOCAL_REGION_ID, false); } /** @@ -617,7 +653,7 @@ public static NoSQLStreamFeederFilter getFilter(Set tbs, *

    * It is more preferable to provide start vlsn than a set of owned * partitions because in order to provide the set of owned partitions, it - * need to query the server beforehand with given start vlsn, which is + * needs to query the server beforehand with given start vlsn, which is * unnecessarily since the filter can do that when it is installed at * feeder. * @@ -628,15 +664,17 @@ public static NoSQLStreamFeederFilter getFilter(Set tbs, * localWritesOnly is true to filter out entries * with another region ID when the value is in * MULTI_REGION_TABLE format + * @param inclBeforeImage true if include before image * * @return a feeder filter with given set of subscribed tables */ static NoSQLStreamFeederFilter getFilter(Set tbs, int nTotalParts, boolean localWritesOnly, - int localRegionId) { + int localRegionId, + boolean inclBeforeImage) { return new NoSQLStreamFeederFilter(tbs, nTotalParts, localWritesOnly, - localRegionId); + localRegionId, inclBeforeImage); } /** @@ -657,7 +695,6 @@ public synchronized OutputWireRecord execute(final OutputWireRecord record, /* once-time initialization on very first record */ if (!initialized) { hostRN = repImpl.getRepNode().getMasterName(); - filterId = hostRN; initScheduledStat(repImpl); /* only dump once */ LoggerUtils.info(logger, repImpl, @@ -674,6 +711,12 @@ public synchronized OutputWireRecord execute(final OutputWireRecord record, } openTxnIds = new HashSet<>(); initialized = true; + LoggerUtils.info(logger, repImpl, + lm("NoSQLStreamFilter initialized" + + ", filterId=" + filterId + + ", hostRN=" + hostRN + + ", # partitions=" + nTotalParts + + ", local writes only=" + localWritesOnly)); } /* entry from pgt db and need process, partGenTblDBId cannot be null */ @@ -745,7 +788,10 @@ public synchronized OutputWireRecord execute(final OutputWireRecord record, /* finally filter out all non-subscribed tables */ final OutputWireRecord ret = filter(record); LoggerUtils.finest(logger, repImpl, - () -> lm("vlsn of last passed=" + lastPassedVLSN + + () -> lm((ret == null ? "Block" : "Pass") + + " entry with vlsn=" + record.getVLSN() + + ", type=" + type + + ", vlsn of last passed=" + lastPassedVLSN + ", last processed=" + lastFilterVLSN)); return ret; } @@ -815,6 +861,8 @@ public synchronized FeederFilterChangeResult applyChange( ", type=" + req.getReqType() + ", result=[" + ret + "]" + ", table=" + req.getTableName() + + ", before image=" + inclBeforeImage + + ", durable entries=" + durableEntriesOnly() + ", #tables=" + tableIds.size() + ", idStrings=" + tableIds + ", ids=" + @@ -832,7 +880,8 @@ public synchronized FeederFilterChangeResult applyChange( */ NoSQLStreamFeederFilter updateFilter(Set tables) { return NoSQLStreamFeederFilter.getFilter( - tables, nTotalParts, localWritesOnly, localRegionId); + tables, nTotalParts, localWritesOnly, localRegionId, + inclBeforeImage); } /** @@ -1048,7 +1097,10 @@ public String toString() { } else { msg = "ids=" + Arrays.toString(tableIds.toArray()); } - return "[" + msg + ", owned partitions: " + + return "[" + filterId + "]" + + "[" + msg + ", before image=" + inclBeforeImage + + ", durable entries="+ durableEntriesOnly() + + ", owned partitions=" + (ownedParts == null ? "all" : ownedParts) + "]"; } @@ -1104,10 +1156,8 @@ private OutputWireRecord filter(final OutputWireRecord record) { /* block all non-local table writes if necessary */ final byte[] val = lnEntry.getData(); if (localWritesOnly && val != null && val.length > 0) { - final Value.Format format = Value.Format.fromFirstByte(val[0]); - if (Value.Format.isTableFormat(format) && - format == Value.Format.MULTI_REGION_TABLE && - PackedInteger.readInt(val, 1) != localRegionId) { + int regionId = Value.getRegionIdFromByteArray(val); + if (regionId != Region.NULL_REGION_ID && regionId != localRegionId) { return null; } } @@ -1204,7 +1254,9 @@ private static boolean isDataEntry(LogEntryType type) { LOG_DEL_LN.equals(type) || LOG_INS_LN_TRANSACTIONAL.equals(type) || LOG_UPD_LN_TRANSACTIONAL.equals(type) || - LOG_DEL_LN_TRANSACTIONAL.equals(type); + LOG_DEL_LN_TRANSACTIONAL.equals(type) || + LOG_UPD_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE.equals(type) || + LOG_DEL_LN_TRANSACTIONAL_WITH_BEFORE_IMAGE.equals(type); } /* Compares two non-null byte[] from start inclusively to end exclusively */ @@ -1419,7 +1471,7 @@ private void dumpFilterStatistics(RepImpl repImpl) { TimeUnit.SECONDS); } - String buildFilterStatMsg() { + public String buildFilterStatMsg() { final StringBuilder sb = new StringBuilder("StreamFilter statistics:"); if (partGenTblDBId == null) { sb.append("Uninitialized"); @@ -1443,7 +1495,8 @@ String buildFilterStatMsg() { .append(getNumInParts()).append("\n"); sb.append("# partitions migrated in=") .append(getNumOutParts()).append("\n"); - sb.append("owned partitions=").append(getOwnedParts()); + sb.append("include before image=").append(inclBeforeImage).append("\n"); + sb.append("owned partitions=").append(getOwnedParts()).append("\n"); sb.append("max # open txns=").append(getMaxNumOpenTxn()); return sb.toString(); } @@ -1671,7 +1724,7 @@ private long getReplicableDBId(OutputWireRecord record) { } private String lm(String msg) { - return "[StreamFilter-" + filterId + "] " + msg; + return "[StreamFilter][" + filterId + "] " + msg; } /** diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/NoSQLSubscriptionImpl.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/NoSQLSubscriptionImpl.java index 33f117a3..7f76781b 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/NoSQLSubscriptionImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/NoSQLSubscriptionImpl.java @@ -575,7 +575,19 @@ public int getPendingRequests() { */ @Override public void subscribeTable(String tableName) { - startChangeWorker(StreamChangeReq.Type.ADD, tableName); + subscribeTable(tableName, false); + } + + /** + * Adds a subscribed table to the running subscription, and specify if to + * stream transaction. + * @param tableName name of the table + * @param streamTxn true if to stream transactions, false to stream write + * operations in {@link StreamOperation}. + */ + @Override + public void subscribeTable(String tableName, boolean streamTxn) { + startChangeWorker(StreamChangeReq.Type.ADD, tableName, streamTxn); } /** @@ -585,7 +597,7 @@ public void subscribeTable(String tableName) { */ @Override public void unsubscribeTable(String tableName) { - startChangeWorker(StreamChangeReq.Type.REMOVE, tableName); + startChangeWorker(StreamChangeReq.Type.REMOVE, tableName, false); } /** @@ -1152,11 +1164,13 @@ private void subscriberOnNext(NoSQLSubscriber s, StreamOperation op) { /* Starts change worker thread */ private synchronized void startChangeWorker(StreamChangeReq.Type type, - String table) { + String table, + boolean streamTxn) { if (!isCanceled() && !parentPU.isClosed()) { /* check if too many requests before submit to executor */ if (!isTooManyRequests()) { - executor.submit(new StreamChangeWorkerThread(type, table)); + executor.submit( + new StreamChangeWorkerThread(type, table, streamTxn)); logger.info(lm("Submitted worker thread for request=" + type + " table=" + table + ", #pending requests=" + pendingRequests.incrementAndGet())); @@ -1213,13 +1227,16 @@ private class StreamChangeWorkerThread extends StoppableThread { private final StreamChangeReq.Type type; private final String tableName; - - StreamChangeWorkerThread(StreamChangeReq.Type type, String tableName) { + private final boolean streamTxn; + StreamChangeWorkerThread(StreamChangeReq.Type type, + String tableName, + boolean streamTxn) { super("StreamChangeWorkerThread" + "-" + UUID.randomUUID().toString().subSequence(0, 8) + "-" + subscriber.getSubscriptionConfig().getSubscriberId()); this.type = type; this.tableName = tableName; + this.streamTxn = streamTxn; } @Override @@ -1321,7 +1338,8 @@ public void run() { break; case OK: err = "Change successfully applied to filter" + - " (type=" + type + ", table=" + tableName + ")"; + " (type=" + type + ", table=" + tableName + + ", stream txn=" + streamTxn + ")"; logger.fine(() -> lm(err)); if (type.equals(StreamChangeReq.Type.ADD)) { processAdd(tableImpl); @@ -1370,7 +1388,7 @@ private void processAdd(TableImpl tableImpl) { checkCanceled(); /* add the table to the subscribed table list */ - parentPU.addTable(tableImpl); + parentPU.addTable(tableImpl, streamTxn); /* unset expiration time if stream is not empty */ parentPU.unsetExpireTimeMs(); diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/OpenTransactionBuffer.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/OpenTransactionBuffer.java index 047449bf..6cef6407 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/OpenTransactionBuffer.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/OpenTransactionBuffer.java @@ -32,6 +32,7 @@ import java.util.logging.Logger; import oracle.kv.Key; +import oracle.kv.Value; import oracle.kv.impl.api.table.PrimaryKeyImpl; import oracle.kv.impl.api.table.RowImpl; import oracle.kv.impl.api.table.TableImpl; @@ -50,10 +51,12 @@ import oracle.kv.pubsub.StreamOperation.SequenceId; import oracle.kv.pubsub.SubscribedTableVersionException; import oracle.kv.pubsub.SubscriptionFailureException; +import oracle.kv.table.Row; +import oracle.kv.table.Table; +import oracle.kv.txn.TransactionIdImpl; import com.sleepycat.je.utilint.StoppableThread; import com.sleepycat.je.utilint.VLSN; -import com.sleepycat.util.PackedInteger; /** * Object maintaining a list of open transactions received from source. When a @@ -184,6 +187,13 @@ public class OpenTransactionBuffer { /* deserializer to create stream operations from JE data entries */ private final StreamOperation.Deserializer deserializer; + /** + * True if to include before image in deserializing events, false + * otherwise. + */ + private final boolean inclBeforeImage; + /** true if include abort transactions, false otherwise */ + private final boolean includeAbortTxn; OpenTransactionBuffer(ReplicationStreamConsumer parent, RepGroupId repGroupId, @@ -200,7 +210,6 @@ public class OpenTransactionBuffer { this.logger = logger; this.deserializer = (deserializer == null) ? new DefaultDeserializer() : deserializer; - /* make a map of local copy of subscribed tables for quick lookup */ cachedTables = new ConcurrentHashMap<>(); if (tables == null) { @@ -242,6 +251,17 @@ public class OpenTransactionBuffer { /* to be initialized in startWorker() */ workerThread = null; + + final PublishingUnit pu = parent.getPu(); + if (pu == null) { + /* some unit test only */ + includeAbortTxn = false; + inclBeforeImage = false; + } else { + /* regular stream */ + includeAbortTxn = pu.includeAbortTransaction(); + inclBeforeImage = pu.includeBeforeImage(); + } } public RepGroupId getRepGroupId() { @@ -255,6 +275,14 @@ public boolean isWaitingForCkpt () { return partGenMarkProcessor.isWaitingForCkpt(); } + /** + * Unit test only + * Returns true if stream should include before image, false otherwise + */ + public boolean isInclBeforeImage() { + return inclBeforeImage; + } + /** * Shut down otb completely */ @@ -506,7 +534,8 @@ private synchronized void addEntry(DataEntry entry) { } /* Aborts a txn from openTxnBuffer */ - private synchronized void abort(final DataEntry entry) { + private synchronized void abort(final DataEntry entry) + throws InterruptedException { assert (DataEntry.Type.TXN_ABORT.equals(entry.getType())); @@ -517,7 +546,7 @@ private synchronized void abort(final DataEntry entry) { * feeder filler is unable to filter some commit/abort msg for * internal db or dup db. So it is possible we can see some * phantom commit/abort without an open txn in buffer. But we - * wont receive PUT/DEL for such internal db entries, so there + * will not receive PUT/DEL for such internal db entries, so there * is no open txn for such commit/abort in buffer. */ logger.finest(() -> lm("Abort a non-existent txnid=" + txnid + "," + @@ -525,6 +554,12 @@ private synchronized void abort(final DataEntry entry) { return; } + if (includeAbortTxn) { + final long ts = entry.getLastUpdateMs(); + final long abortVLSN = entry.getVLSN(); + commitAbortHelper(false, txn, txnid, ts, abortVLSN); + } + /* remove txn from openTxnBuffer and update openTxnBuffer stats */ final long numOps = txn.size(); numAbortOps.addAndGet(numOps); @@ -535,6 +570,28 @@ private synchronized void abort(final DataEntry entry) { ", # of ops aborted=" + numOps)); } + /** + * Returns true if the stream operation is from a table that streams + * transactions, false otherwise + */ + private boolean isStreamTxnTable(StreamOperation op) { + final StreamOperation.Type type = op.getType(); + if (!type.equals(StreamOperation.Type.PUT) && + !type.equals(StreamOperation.Type.DELETE)) { + /* ignore all non-put non-delete operations */ + return false; + } + final PublishingUnit pu = parent.getPu(); + if (pu == null) { + /* some unit test only */ + return false; + } + final Table table = op.getTable(); + /* gets table id of top table to decide if stream txn */ + final long topTableId = ((TableImpl) table).getTopLevelTable().getId(); + return pu.getStreamTransaction(topTableId); + } + /* Commits an open txn from openTxnBuffer */ private synchronized void commit(final DataEntry entry) throws SubscriptionFailureException, InterruptedException { @@ -542,6 +599,7 @@ private synchronized void commit(final DataEntry entry) assert (DataEntry.Type.TXN_COMMIT.equals(entry.getType())); final long txnid = entry.getTxnID(); + final long vlsn = entry.getVLSN(); final List txn = openTxnBuffer.remove(txnid); if (txn == null) { /* @@ -551,25 +609,31 @@ private synchronized void commit(final DataEntry entry) * wont receive PUT/DEL for such internal db entries, so there * is no open txn for such commit/abort in buffer. */ - logger.finest(() -> "Ignore a non-existent txn id=" + txnid); + logger.finest(() -> "Ignore a non-existent txn id=" + txnid + + ", vlsn=" + vlsn); return; } - commitHelper(txn); + commitAbortHelper(true, txn, txnid, entry.getLastUpdateMs(), vlsn); /* remove txn from openTxnBuffer and update openTxnBuffer stats */ final long numOps = txn.size(); numCommitOps.addAndGet(numOps); numCommitTxn.getAndIncrement(); - lastCommitVLSN = entry.getVLSN(); + lastCommitVLSN = vlsn; logger.finest(() -> lm("Committed txn=" + txnid + " with vlsn=" + lastCommitVLSN + ", # of ops committed=" + numOps)); } /* Commits a transaction and convert data entry to stream operations */ - private void commitHelper(List allOps) + private void commitAbortHelper(boolean commit, + List allOps, + long txnId, + long ts, + long commitVLSN) throws SubscriptionFailureException, InterruptedException { + final List txnOps = new ArrayList<>(); for (DataEntry entry : allOps) { /* sanity check just in case */ @@ -577,11 +641,11 @@ private void commitHelper(List allOps) if ((type != DataEntry.Type.PUT) && (type != DataEntry.Type.DELETE)) { throw new IllegalStateException( - "Type " + type + " cannot be streamed to client."); + "Type=" + type + " cannot be streamed to client."); } if (entry.getKey() == null) { - throw new IllegalStateException("key cannot be null when being " + - "deserialized."); + throw new IllegalStateException( + "key cannot be null when being deserialized."); } /* process a partition generation db entry */ @@ -590,15 +654,46 @@ private void commitHelper(List allOps) } /* regular data entry */ - final StreamOperation msg = buildStreamOp(entry.getKey(), - entry.getValue(), - type, - entry.getVLSN(), - entry.getLastUpdateMs(), - entry.getExpirationMs()); + final StreamOperation msg = + buildStreamOp(entry.getKey(), + entry.getValue(), + type, + entry.getVLSN(), + entry.getLastUpdateMs(), + entry.getExpirationMs(), + entry.isBeforeImgEnabled(), + entry.getValBeforeImg(), + entry.getLastModTimeBeforeImg(), + entry.getExpBeforeImg()); + if (msg == null) { + /* cannot deserialize the entry */ + continue; + } + if (!isStreamTxnTable(msg)) { + /* enqueue stream operation */ + enqueueMsg(msg); + logger.finest(() -> lm("Enqueue " + (commit ? + "commit" : "abort") + " op=" + msg)); + } else { + /* cache the ops in transaction */ + txnOps.add(msg); + logger.finest(() -> lm("Stream transaction for table id=" + + msg.getTableId())); + } + } + + if (!txnOps.isEmpty()) { + /* create txn stream operation */ + final int shardId = repGroupId.getGroupId(); + final TransactionIdImpl + id = new TransactionIdImpl(shardId, txnId, ts); + final StreamSequenceId sq = new StreamSequenceId(commitVLSN); + final StreamOperation txn = + new StreamTxnEvent(id, sq, commit, txnOps); /* enqueue stream operation */ - enqueueMsg(msg); - logger.finest(() -> lm("committed op=" + msg)); + enqueueMsg(txn); + logger.finest(() -> lm("Enqueue " + (commit ? + "commit" : "abort") + " txn=" + txn)); } } @@ -669,7 +764,7 @@ private Map getCachedTables(byte[] key) { } if (table == null) { - /* already dropped, a short lived table */ + /* already dropped, a short-lived table */ droppedTables.add(rootTableId); return null; } @@ -686,13 +781,20 @@ private StreamOperation buildStreamOp(byte[] key, DataEntry.Type type, long vlsn, long lastUpdateMs, - long expirationMs) + long expirationMs, + boolean beforeImgEnabled, + byte[] valBeforeImg, + long tsBeforeImg, + long expBeforeImg) throws SubscriptionFailureException { /* check if the key belongs to a subscribed table */ final Map tbMap = getCachedTables(key); if (tbMap == null) { /* not a subscribed table or dropped */ + logger.finest(() -> lm("Key not found in subscribed tables" + + ", cached tables=" + cachedTables.keySet() + + ", dropped tables="+ droppedTables)); return null; } @@ -701,10 +803,14 @@ private StreamOperation buildStreamOp(byte[] key, for (TableImpl t : tbMap.values()) { final StreamOperation op = deserialize(t, key, value, type, vlsn, lastUpdateMs, - expirationMs); + expirationMs, beforeImgEnabled, valBeforeImg, + tsBeforeImg, expBeforeImg); if (op != null) { return op; } + logger.finest(() -> lm("[rg=" + repGroupId + ", vlsn=" + + vlsn + "] " + "key not from table=" + + t.getFullNamespaceName())); } } @@ -720,6 +826,8 @@ private StreamOperation buildStreamOp(byte[] key, // later /* cannot deserialize */ + logger.finest(() -> lm("Cannot deserialize key from tables=" + + tbMap.keySet())); return null; } @@ -730,10 +838,15 @@ private StreamOperation deserialize(TableImpl table, DataEntry.Type type, long vlsn, long lastUpdateMs, - long expirationMs) { + long expirationMs, + boolean beforeImgEnabled, + byte[] valBeforeImg, + long tsBeforeImg, + long expBeforeImg) { try { return createMsg(table, key, value, type, vlsn, lastUpdateMs, - expirationMs); + expirationMs, beforeImgEnabled, valBeforeImg, + tsBeforeImg, expBeforeImg); } catch (SubscribedTableVersionException stve) { final String rootTableId = getTableId(table.getTopLevelTable()); @@ -761,7 +874,8 @@ private StreamOperation deserialize(TableImpl table, " to ver=" + stve.getRequiredVersion())); /* we should not fail this time! */ return createMsg(refresh, key, value, type, vlsn, - lastUpdateMs, expirationMs); + lastUpdateMs, expirationMs, beforeImgEnabled, + valBeforeImg, tsBeforeImg, expBeforeImg); } final String err = @@ -798,7 +912,11 @@ private StreamOperation createMsg(TableImpl table, DataEntry.Type type, long vlsn, long lastUpdateMs, - long expirationMs) + long expirationMs, + boolean beforeImgEnabled, + byte[] valBeforeImg, + long tsBeforeImg, + long expBeforeImg) throws SubscribedTableVersionException { final StreamSequenceId sequenceId = new StreamSequenceId(vlsn); @@ -817,12 +935,14 @@ private StreamOperation createMsg(TableImpl table, } return deserializer.getPutEvent( subscriberId, repGroupId, table, key, value, sequenceId, - lastUpdateMs, expirationMs); - + lastUpdateMs, expirationMs, inclBeforeImage, + beforeImgEnabled, valBeforeImg, tsBeforeImg, expBeforeImg); case DELETE: return deserializer.getDeleteEvent( subscriberId, repGroupId, table, key, value, sequenceId, - lastUpdateMs, expirationMs, !streamAllTables); + lastUpdateMs, expirationMs, !streamAllTables, + inclBeforeImage, beforeImgEnabled, valBeforeImg, + tsBeforeImg, expBeforeImg); default: /* should never reach here */ throw new AssertionError("Unrecognized type " + type); @@ -965,6 +1085,39 @@ private static String getRootTableIdString(byte[] key) { } } + /** + * Returns the size of open transaction buffer in bytes by iterating all + * data entries in the buffer + * @return size of open transaction buffer in bytes + */ + synchronized public long computeSize() { + long ret = 0; + final long ts = System.currentTimeMillis(); + for (List list : openTxnBuffer.values()) { + for (DataEntry de : list) { + ret += getDataEntrySize(de); + } + } + final long total = ret; + logger.fine(() -> lm("Estimated OTB size bytes=" + total + + ", computation elapsedMs=" + + (System.currentTimeMillis() - ts))); + return ret; + } + + private long getDataEntrySize(DataEntry entry) { + long ret = 40; /* metadata overhead, estimated */ + final byte[] key = entry.getKey(); + if (key != null) { + ret += key.length; + } + final byte[] val = entry.getValue(); + if (val != null) { + ret += val.length; + } + return ret; + } + public static class DefaultDeserializer implements StreamOperation.Deserializer { @@ -976,33 +1129,33 @@ public PutEvent getPutEvent(NoSQLSubscriberId subscriberId, byte[] value, SequenceId sequenceId, long lastModificationTime, - long expirationTime) { - RowImpl row; - try { - /* Deserialize complete row */ - row = table.createRowFromBytes( - key, value, table.isKeyOnly(), - false/* do not add missing col */); - - } catch (TableVersionException tve) { - /* need refresh table md */ - throw new SubscribedTableVersionException( - subscriberId, rgId, - table.getFullNamespaceName(), - tve.getRequiredVersion(), - table.getTableVersion()); - } - /* key was not associated with a table */ + long expirationTime, + boolean inclBeforeImage, + boolean beforeImgEnabled, + byte[] valBeforeImg, + long tsBeforeImg, + long expBeforeImg) { + + /* build current image */ + final RowImpl row = buildRow(subscriberId, rgId, table, key, value, + lastModificationTime, expirationTime); + /* build before image */ + final Row beforeImagRow = buildBeforeImageRow(subscriberId, + rgId, + table, + inclBeforeImage, + beforeImgEnabled, + key, + valBeforeImg, + tsBeforeImg, + expBeforeImg, + true); if (row == null) { return null; } - /* set last update time */ - row.setModificationTime(lastModificationTime); - /* set expiration time */ - row.setExpirationTime(expirationTime); - /* populate size info */ - row.setStorageSize(key.length + (value == null ? 0 : value.length)); - return new StreamPutEvent(row, sequenceId, rgId.getGroupId()); + return new StreamPutEvent(row, sequenceId, rgId.getGroupId(), + inclBeforeImage, beforeImgEnabled, + expBeforeImg, beforeImagRow); } @Override @@ -1014,7 +1167,12 @@ public DeleteEvent getDeleteEvent(NoSQLSubscriberId subscriberId, SequenceId sequenceId, long lastModificationTime, long expirationTime, - boolean exactTable) { + boolean exactTable, + boolean inclBeforeImage, + boolean beforeImgEnabled, + byte[] valBeforeImg, + long tsBeforeImg, + long expBeforeImg) { PrimaryKeyImpl delKey; try { /* a primary key */ @@ -1036,12 +1194,13 @@ public DeleteEvent getDeleteEvent(NoSQLSubscriberId subscriberId, delKey.setModificationTime(lastModificationTime); /* - * if a tombstone delete and tombstone is in MR format, set the - * region id to primary key. + * if a tombstone delete, set the region id and rowMetadata to + * primary key. */ if (value != null && value.length > 0) { - int regionId = PackedInteger.readInt(value, 1); - delKey.setRegionId(regionId); + Value val = Value.fromByteArray(value); + delKey.setRegionId(val.getRegionId()); + delKey.setRowMetadata(val.getRowMetadata()); } /* @@ -1056,7 +1215,91 @@ public DeleteEvent getDeleteEvent(NoSQLSubscriberId subscriberId, /* populate size info */ delKey.setStorageSize(key.length + (value == null ? 0 : value.length)); - return new StreamDelEvent(delKey, sequenceId, rgId.getGroupId()); + + /* build before image */ + final Row beforeImagRow = buildBeforeImageRow(subscriberId, + rgId, + table, + inclBeforeImage, + beforeImgEnabled, + key, + valBeforeImg, + tsBeforeImg, + expBeforeImg, + false); + return new StreamDelEvent(delKey, sequenceId, rgId.getGroupId(), + inclBeforeImage, beforeImgEnabled, + expBeforeImg, beforeImagRow); + } + + private RowImpl buildRow(NoSQLSubscriberId subscriberId, + RepGroupId rgId, TableImpl table, + byte[] key, byte[] val, long timestampMs, + long expTimeMs) { + + RowImpl row; + try { + /* Deserialize complete row */ + row = table.createRowFromBytes( + key, val, table.isKeyOnly(), + false/* do not add missing col */); + } catch (TableVersionException tve) { + /* need refresh table md */ + throw new SubscribedTableVersionException( + subscriberId, rgId, + table.getFullNamespaceName(), + tve.getRequiredVersion(), + table.getTableVersion()); + } + /* key was not associated with a table */ + if (row == null) { + return null; + } + /* set last update time */ + row.setModificationTime(timestampMs); + /* set expiration time */ + row.setExpirationTime(expTimeMs); + /* populate size info */ + row.setStorageSize(key.length + + (val == null ? 0 : val.length)); + return row; + } + + /** + * Builds before image row, or null if + * 1) subscription not configured to include before image, or + * 2) subscribed table does not enable the before for the entry, or + * 3) before image does not exist for insert + * @return the before image row, or null + */ + private Row buildBeforeImageRow(NoSQLSubscriberId subscriberId, + RepGroupId rgId, + TableImpl table, + boolean inclBeforeImage, + boolean beforeImgEnabled, + byte[] key, + byte[] valBeforeImg, + long tsBeforeImg, + long expBeforeImg, + boolean putOp) { + + if (!inclBeforeImage) { + /* stream configured to exclude before image */ + return null; + } + if (!beforeImgEnabled) { + /* table disabled before image */ + return null; + } + + if (putOp && valBeforeImg == null) { + /* a put op but value is null, must be an insert op */ + return null; + } + + /* build before image row */ + return buildRow(subscriberId, rgId, table, key, + valBeforeImg, tsBeforeImg, expBeforeImg); } } @@ -1133,6 +1376,26 @@ public String toJsonString() { throw getUnsupportedException(); } + @Override + public boolean includeBeforeImage() { + throw getUnsupportedException(); + } + + @Override + public boolean isBeforeImageEnabled() { + throw getUnsupportedException(); + } + + @Override + public boolean isBeforeImageExpired() { + throw getUnsupportedException(); + } + + @Override + public Row getBeforeImage() { + throw getUnsupportedException(); + } + @Override public Type getType() { return Type.INTERNAL; diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/PublishingUnit.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/PublishingUnit.java index e996229f..1be2651a 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/PublishingUnit.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/PublishingUnit.java @@ -15,6 +15,8 @@ import static com.sleepycat.je.utilint.VLSN.FIRST_VLSN; import static com.sleepycat.je.utilint.VLSN.NULL_VLSN; +import static oracle.kv.impl.pubsub.CheckpointTableManager.MAX_NUM_ATTEMPTS; +import static oracle.kv.impl.pubsub.CheckpointTableManager.RETRY_INTERVAL_MS; import static oracle.kv.impl.util.CommonLoggerUtils.exceptionString; import static oracle.kv.impl.util.ThreadUtils.threadId; @@ -58,6 +60,7 @@ import oracle.kv.impl.util.PollCondition; import oracle.kv.impl.util.RateLimitingLogger; import oracle.kv.impl.util.server.LoggerUtils; +import oracle.kv.impl.xregion.service.ServiceMDMan; import oracle.kv.pubsub.CheckpointFailureException; import oracle.kv.pubsub.NoSQLPublisher; import oracle.kv.pubsub.NoSQLStreamMode; @@ -72,6 +75,7 @@ import oracle.kv.pubsub.SubscriptionInsufficientLogException; import oracle.kv.pubsub.SubscriptionTableNotFoundException; import oracle.kv.stats.SubscriptionMetrics; +import oracle.kv.table.Table; import oracle.kv.table.TableAPI; import com.sleepycat.je.rep.InsufficientLogException; @@ -144,14 +148,22 @@ public class PublishingUnit { /* true if a new ckpt table is created and used */ private volatile boolean newCkptTable; - /* + /** * A map to hold impl of all subscribed tables, which will be used in * de-serialization of rows. The map is null if user is trying to * subscribe all tables in this case, we do not prepare any impl at the * time when subscription is created, since new table will be created any - * time during subscription. + * time during subscription. This structure shall be updated atomically + * with {@link #streamTxnTables} to avoid inconsistency. */ private volatile ConcurrentMap tables; + /** + * A thread-safe map of subscribed tables to stream transactions instead + * of write operations, indexed by table id. Modifying this structure + * must be synchronized with modifying {@link #tables} and they have to + * modified atomically to avoid inconsistency. + */ + private final Map streamTxnTables; /*-- For test and internal use only. ---*/ @@ -231,6 +243,7 @@ public PublishingUnit(NoSQLPublisher parent, directory = null; executor = Executors.newSingleThreadScheduledExecutor( new PublishingUnitThreadFactory()); + streamTxnTables = new ConcurrentHashMap<>(); } /** @@ -277,10 +290,16 @@ public void subscribe(NoSQLSubscriber s) { /* determine where to start */ final StreamPosition initPos = getStartStreamPos(config); + /* sanity check of subscribed streaming txn tables */ + sanityCheckStreamTxnTables(config); logger.info(lm("Subscription id=" + si + " to stream from " + "shards=" + shards + " out of all=" + all + ", stream mode=" + config.getStreamMode() + - ", from position=" + initPos)); + ", from position=" + initPos + + ", stream txn tables=" + + config.getStreamTxnTables() + + ", include abort txn=" + + config.getStreamAbortTxn())); /* prepare the unit */ prepareUnit(config); @@ -683,7 +702,7 @@ public NoSQLPublisher.SecurityCred getSecurityCred() { * * @return the output queue, null if not initialized */ - BoundedOutputQueue getOutputQueue() { + public BoundedOutputQueue getOutputQueue() { return outputQueue; } @@ -847,6 +866,14 @@ public Map getConsumers() { return consumers; } + /** + * Returns true if before image should be included if available, false + * otherwise + */ + public boolean includeBeforeImage() { + return subscriber.getSubscriptionConfig().getIncludeBeforeImage(); + } + /** * Internal use in test only */ @@ -863,6 +890,14 @@ long getChangeTimeoutMs() { return config.getChangeTimeoutMs(); } + public boolean getStreamTransaction(long tableId) { + return streamTxnTables.containsKey(tableId); + } + + public boolean includeAbortTransaction() { + return config.getStreamAbortTxn(); + } + /** * Returns true if kvstore is a non-secure store * @@ -995,7 +1030,7 @@ synchronized NoSQLSubscriptionImpl.StreamChangeStatus applyChange( * * @return a copy of subscribed tables, or null */ - Set getSubscribedTables() { + public Set getSubscribedTables() { if (tables == null) { return null; } @@ -1008,8 +1043,18 @@ Set getSubscribedTables() { * * @param table table to add */ - void addTable(TableImpl table) { - tables.put(table.getFullNamespaceName(), table); + synchronized void addTable(TableImpl table, boolean streamTxn) { + if (tables != null) { + tables.put(table.getFullNamespaceName(), table); + } + if (streamTxn) { + addStreamTxnTable(table.getFullNamespaceName()); + } + logger.info(lm("Added to PU, table=" + ServiceMDMan.getTrace(table) + + ", stream txn="+ streamTxn + + ", subscribed tables=" + tables.keySet() + + ", stream txn tables=" + + ServiceMDMan.getTrace(streamTxnTables.values()))); } /** @@ -1017,8 +1062,11 @@ void addTable(TableImpl table) { * * @param table table to remove */ - void removeTable(TableImpl table) { + synchronized void removeTable(TableImpl table) { + assert tables != null; tables.remove(table.getFullNamespaceName()); + /* may or may not in the map of stream txn tables, just remove it */ + streamTxnTables.remove(table.getId()); } /** @@ -1189,6 +1237,9 @@ private void prepareUnit(NoSQLSubscriptionConfig conf) notFoundTbls.add(table); } else { tables.put(tableImpl.getFullNamespaceName(), tableImpl); + if (conf.getStreamTxn(table)) { + addStreamTxnTable(tableImpl.getFullNamespaceName()); + } } } @@ -1201,11 +1252,20 @@ private void prepareUnit(NoSQLSubscriptionConfig conf) throw new SubscriptionFailureException(si, err, stnfe); } - logger.fine(() -> lm("PU subscribed tables=" + tables.keySet() + - ", table names=" + tableNames)); + logger.info(lm("PU subscribed tables=" + + ServiceMDMan.getTrace( + tables.values().stream().map(t -> (Table) t) + .collect(Collectors.toSet())) + + ", configured tables names=" + tableNames + + ", stream txn tables=" + + ServiceMDMan.getTrace(streamTxnTables.values()))); } else { - /* user subscribes all tables */ - logger.fine(() -> lm("PU subscribed all tables")); + + /* subscribe all tables, add stream txn tables */ + assert tables == null; + config.getStreamTxnTables().forEach(this::addStreamTxnTable); + logger.info(lm("PU subscribed all tables, stream txn tables=" + + ServiceMDMan.getTrace(streamTxnTables.values()))); } /* @@ -1274,6 +1334,58 @@ private void prepareUnit(NoSQLSubscriptionConfig conf) logger.fine(() -> lm("PU preparation done.")); } + /** + * Adds a table configured to stream transactions + * @param table table name + */ + private void addStreamTxnTable(String table) { + final TableImpl tb = getTable(config.getSubscriberId(), table); + if (tb == null) { + logger.warning(lm("Stream txn table=" + table + " not found, " + + "ignore")); + return; + } + if (!tb.isTop()) { + /* cannot stream txn for a non-top table */ + return; + } + + /* add top table to stream txn table set */ + streamTxnTables.put(tb.getId(), tb); + + if (tables == null) { + /* + * stream already configured to subscribe all tables, + * no need to add child tables to subscribed tables set + */ + logger.info(lm("To stream transaction for table=" + table + + ", stream configured to subscribe all tables")); + return; + } + + /* walk through and add all child tables to subscribed tables */ + final Set allTbs = new HashSet<>(); + getAllChildTables(tb, allTbs); + allTbs.forEach(t -> tables.put(t.getFullNamespaceName(), + (TableImpl) t)); + logger.info(lm("To stream transaction for table=" + table + + ", adds all tables to subscribed tables=" + + ServiceMDMan.getTbNames(allTbs))); + } + + /** + * Gets all tables in the hierarchy, including the top level table. + * @param rootTable root table + * @param tables a returned set of all tables in the hierarchy + */ + public static void getAllChildTables(Table rootTable, Set
    tables) { + /* walk through hierarchy recursively */ + tables.add(rootTable); + for (Table child : rootTable.getChildTables().values()) { + getAllChildTables(child, tables); + } + } + /* Creates feeder filter from subscribed tables */ private NoSQLStreamFeederFilter getFilter(RepGroupId gid) { @@ -1286,19 +1398,21 @@ private NoSQLStreamFeederFilter getFilter(RepGroupId gid) { final NoSQLStreamFeederFilter filter; final boolean localWrites = config.isLocalWritesOnly(); final int localRegionId = config.getLocalRegionId(); + final boolean inclBeforeImage = config.getIncludeBeforeImage(); if (tables == null) { /* get a feeder filter passing all tables */ filter = NoSQLStreamFeederFilter.getFilter( - null, nParts, localWrites, localRegionId); + null, nParts, localWrites, localRegionId, inclBeforeImage); } else if (tables.isEmpty()) { /* get a feeder filter passing no table */ filter = NoSQLStreamFeederFilter.getFilter( - new HashSet<>(), nParts, localWrites, localRegionId); + new HashSet<>(), nParts, localWrites, localRegionId, + inclBeforeImage); } else { /* get a feeder filter passing selected tables */ filter = NoSQLStreamFeederFilter.getFilter( new HashSet<>(tables.values()), nParts, localWrites, - localRegionId); + localRegionId, inclBeforeImage); } filter.setRepGroupId(gid); logger.fine(() -> lm("Filter created with tables=" + @@ -1336,6 +1450,75 @@ NoSQLPublisher getParent() { return parent; } + /** + * Retrieves table from kvstore with retry. + * @param table table name + * @return table instance if available, or null + */ + private TableImpl getTableWithRetry(String table) { + int attempt = 0; + final NoSQLSubscriberId sid = config.getSubscriberId(); + while (!isClosed()) { + attempt++; + try { + final TableImpl ret = + (TableImpl) kvstore.getTableAPI().getTable(table); + if (ret != null) { + return ret; + } + if (attempt == MAX_NUM_ATTEMPTS) { + final String err = + "Table=" + table + " not found" + + ", max attempts=" + MAX_NUM_ATTEMPTS + + ", store=" + getStoreName(); + logger.warning(lm(err)); + break; + } + synchronized (this) { + wait(RETRY_INTERVAL_MS); + } + } catch (FaultException fe) { + final String err = "Cannot get table=" + table + + ", will retry, error=" + fe; + logger.finest(() -> lm(err)); + } catch (Exception exp) { + final String err = "Error in accessing table=" + table + + ", store=" + getStoreName(); + logger.warning(lm(err + ", cause=" + exp)); + throw new SubscriptionFailureException(sid, err); + } + } + return null; + } + + /** + * Sanity check of tables to stream transactions + * @param conf subscription configuration + */ + private void sanityCheckStreamTxnTables(NoSQLSubscriptionConfig conf) { + final Set txnTables = conf.getStreamTxnTables(); + if (txnTables.isEmpty()) { + return; + } + final NoSQLSubscriberId sid = conf.getSubscriberId(); + for (String table : txnTables) { + final TableImpl tb = getTableWithRetry(table); + if (tb == null) { + final String err = "Stream txn table=" + table + " not found"; + logger.warning(lm(err)); + throw new SubscriptionFailureException(sid, err); + } + if (!tb.isTop()) { + final String err = + "Stream txn table=" + table + " is not a top table" + + " at store=" + getStoreName(); + logger.warning(lm(err)); + throw new SubscriptionFailureException(sid, err); + } + } + logger.fine(() -> lm("Done checking stream txn tables=" + txnTables)); + } + private void verifyCkptTable(NoSQLSubscriptionConfig conf) { if (!isCkptEnabled()) { @@ -2033,7 +2216,7 @@ long getMinVLSNInQueue() { } } - long getCurrSizeBytes() { + public long getCurrSizeBytes() { return currSizeBytes; } @@ -2045,7 +2228,7 @@ Object[] getQueuedOps() { return boundedQueue.toArray(); } - long getMaxSizeBytes() { + public long getMaxSizeBytes() { return maxSizeBytes; } diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamCbk.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamCbk.java index d48fb577..fd848947 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamCbk.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamCbk.java @@ -22,7 +22,7 @@ /** * Default callback to process each entry received from replication stream. */ -class ReplicationStreamCbk implements SubscriptionCallback { +public class ReplicationStreamCbk implements SubscriptionCallback { /* private logger */ private final Logger logger; @@ -42,10 +42,6 @@ class ReplicationStreamCbk implements SubscriptionCallback { shard = stat.getParent().getRepGroupId().getGroupId(); } - public ReplicationStreamConsumerStat getStat() { - return stat; - } - /** * Processes a put (insert or update) entry from stream * @@ -56,22 +52,20 @@ public ReplicationStreamConsumerStat getStat() { * @param dbId id of database the entry belongs to * @param ts timestamp of the last update * @param expMs expiration time in system time in ms + * @param beforeImgEnabled true if before image is enabled for the entry + * @param valBeforeImg value bytes of the before image if enabled + * @param tsBeforeImg timestamp in ms of the before image if enabled + * @param expBeforeImg expiration time in ms of the before image */ @Override public void processPut(long vlsn, byte[] key, byte[] value, long txnId, DatabaseId dbId, long ts, long expMs, - boolean beforeImgEnabled, - byte[] valBeforeImg, - long tsBeforeImg, - long expBeforeImg) { - processEntry(new DataEntry(DataEntry.Type.PUT, - vlsn, - txnId, - key, - value, - dbId, - ts, - expMs)); + boolean beforeImgEnabled, byte[] valBeforeImg, + long tsBeforeImg, long expBeforeImg) { + processEntry(DataEntry.getPutEntry(vlsn, txnId, key, value, dbId, + ts, expMs, beforeImgEnabled, + valBeforeImg, tsBeforeImg, + expBeforeImg)); stat.incrNumPuts(vlsn); } @@ -84,21 +78,19 @@ public void processPut(long vlsn, byte[] key, byte[] value, long txnId, * @param txnId id of txn the entry belongs to * @param dbId id of database the entry belongs to * @param ts timestamp of the last update + * @param beforeImgEnabled true if before image is enabled for the entry + * @param valBeforeImg value bytes of the before image if enabled + * @param tsBeforeImg timestamp in ms of the before image if enabled + * @param expBeforeImg expiration time in ms of the before image */ @Override public void processDel(long vlsn, byte[] key, byte[] val, long txnId, - DatabaseId dbId, - long ts, boolean beforeImgEnabled, - byte[] valBeforeImg, - long tsBeforeImg, + DatabaseId dbId, long ts, boolean beforeImgEnabled, + byte[] valBeforeImg, long tsBeforeImg, long expBeforeImg) { - processEntry(new DataEntry(DataEntry.Type.DELETE, - vlsn, - txnId, - key, - val, - dbId, - ts)); + processEntry(DataEntry.getDelEntry(vlsn, txnId, key, val, dbId, ts, + beforeImgEnabled, valBeforeImg, + tsBeforeImg, expBeforeImg)); stat.incrNumDels(vlsn); } @@ -107,15 +99,11 @@ public void processDel(long vlsn, byte[] key, byte[] val, long txnId, * * @param vlsn VLSN of commit entry * @param txnId id of txn to commit + * @param ts timestamp of commit */ @Override - public void processCommit(long vlsn, long txnId) { - processEntry(new DataEntry(DataEntry.Type.TXN_COMMIT, - vlsn, - txnId, - null, - null, - null)); + public void processCommit(long vlsn, long txnId, long ts) { + processEntry(DataEntry.getCommitEntry(vlsn, txnId, ts)); stat.incrNumCommits(vlsn); } @@ -124,15 +112,11 @@ public void processCommit(long vlsn, long txnId) { * * @param vlsn VLSN of abort entry * @param txnId id of txn to abort + * @param ts timestamp of abort */ @Override - public void processAbort(long vlsn, long txnId) { - processEntry(new DataEntry(DataEntry.Type.TXN_ABORT, - vlsn, - txnId, - null, - null, - null)); + public void processAbort(long vlsn, long txnId, long ts) { + processEntry(DataEntry.getAbortEntry(vlsn, txnId, ts)); stat.incrNumAborts(vlsn); } @@ -148,7 +132,7 @@ public void processException(final Exception exp) { /* * When receiving an exception msg from feeder, the JE client thread - * will shutdown the stream after calling this function. The + * will shut down the stream after calling this function. The * replication stream consumer which owns the JE client thread is * supposed to retry or terminate the subscription. */ @@ -163,7 +147,7 @@ private void processEntry(DataEntry dataEntry) { logger.finest(() -> lm("enqueued entry with " + "type=" + dataEntry.getType() + ", txn id=" + dataEntry.getTxnID() + - ", key=" + dataEntry.getTxnID())); + ", vlsn=" + dataEntry.getVLSN())); } catch (InterruptedException ie) { /* thread is shut down by others */ @@ -175,4 +159,12 @@ private void processEntry(DataEntry dataEntry) { private String lm(String msg) { return "[RSCBK][shard=" + shard + "] " + msg; } + + /** + * Unit test only + * @return replication stream consumer statistics object + */ + public ReplicationStreamConsumerStat getStat() { + return stat; + } } diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamConsumer.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamConsumer.java index f42eaefa..19c9dc98 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamConsumer.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamConsumer.java @@ -516,7 +516,7 @@ void setAllPartClosed() { * * @return the statistics of the consumer */ - ReplicationStreamConsumerStat getRSCStat() { + public ReplicationStreamConsumerStat getRSCStat() { return stat; } @@ -529,6 +529,14 @@ public String toString() { stat.dumpStat(); } + /** + * Unit test only + * @return JE subscription config + */ + public SubscriptionConfig getSubscriptionConfig() { + return subscriptionConfig; + } + /* for test use only */ ReplicationStreamCbk getRepStrCbk() { return replicationStreamCbk; diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamConsumerStat.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamConsumerStat.java index e4f5fa2a..75f7ab23 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamConsumerStat.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/ReplicationStreamConsumerStat.java @@ -25,7 +25,7 @@ /** * Object represents the statistics of the replication stream consumer */ -class ReplicationStreamConsumerStat { +public class ReplicationStreamConsumerStat { /** parent consumer */ private final ReplicationStreamConsumer parent; @@ -191,7 +191,7 @@ long getLastMsgTimeMs() { * * @return the last streamed VLSN or NULL_VLSN */ - long getLastStreamedVLSN() { + public long getLastStreamedVLSN() { return lastStreamVLSN; } diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamDelEvent.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamDelEvent.java index 61555aec..321edff5 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamDelEvent.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamDelEvent.java @@ -18,6 +18,8 @@ import oracle.kv.impl.util.UserDataControl; import oracle.kv.pubsub.StreamOperation; import oracle.kv.table.PrimaryKey; +import oracle.kv.table.Row; +import oracle.kv.table.Table; /** * Object represents a delete operation in NoSQL Stream @@ -30,19 +32,49 @@ public class StreamDelEvent implements StreamOperation.DeleteEvent { private final int repGroupId; + /** + * True if the stream is configured to include before image + */ + private final boolean inclBeforeImage; + /** + * True if before image is enabled for this event + */ + private final boolean beforeImgEnabled; + /** + * Before image expiration time in ms + */ + private final long beforeImgExpMs; + /** + * The before image in Row if enabled, or null if before image is + * disabled or does not exist, e.g., for insert operation. + */ + private final Row beforeImg; + /** * Constructs a delete operation * * @param key primary key of the deleted row * @param sequenceId unique sequence id * @param repGroupId shard id of the deletion + * @param inclBeforeImage true if include before image in subscription + * @param beforeImgEnabled true if before image enabled + * @param beforeImgExpMs before image expiration time in ms + * @param beforeImg before image of the write operation */ protected StreamDelEvent(PrimaryKey key, SequenceId sequenceId, - int repGroupId) { + int repGroupId, + boolean inclBeforeImage, + boolean beforeImgEnabled, + long beforeImgExpMs, + Row beforeImg) { this.key = key; this.sequenceId = sequenceId; this.repGroupId = repGroupId; + this.inclBeforeImage = inclBeforeImage; + this.beforeImgEnabled = beforeImgEnabled; + this.beforeImgExpMs = beforeImgExpMs; + this.beforeImg = beforeImg; } /** @@ -104,7 +136,11 @@ public String toString() { return "Del OP [seq=" + ((StreamSequenceId) sequenceId).getSequence() + ", shard id=" + repGroupId + ", primary key=" + - UserDataControl.displayPrimaryKeyJson(key) + "]"; + UserDataControl.displayPrimaryKeyJson(key) + "]" + + ", before image enabled=" + beforeImgEnabled + + ", include before image=" + inclBeforeImage + + ", before=" + UserDataControl.displayRowJson(beforeImg) + + "]"; } /** @@ -127,6 +163,17 @@ public String getFullTableName() { return key.getTable().getFullName(); } + /** + * @hidden + * + * Returns the table instance associated with the operation + * @return table instance + */ + @Override + public Table getTable() { + return key.getTable(); + } + /** * Returns the table name of this operation. * @@ -196,6 +243,55 @@ public long getPrimaryKeySize() { */ @Override public String toJsonString() { - return key.toJsonString(true); + return "[type=" + getType() + "]" + + "[seq=" + sequenceId + "]" + + "[shard=" + repGroupId + "]" + + "[region id=" + getRegionId() + "]" + + "[table=" + getFullTableName() + "]" + + "[before image enabled=" + beforeImgEnabled + "]" + + "[before image incl.=" + inclBeforeImage + "]" + + key.toJsonString(false) + + (beforeImgEnabled && inclBeforeImage ? + ", before image=" + getBeforeImage() : ""); + } + + @Override + public boolean includeBeforeImage() { + return inclBeforeImage; + } + + @Override + public boolean isBeforeImageEnabled() { + return beforeImgEnabled; + } + + @Override + public boolean isBeforeImageExpired() { + return StreamOperation.isBeforeImageExpired(beforeImgEnabled, + inclBeforeImage, + beforeImgExpMs, + beforeImg); + } + + @Override + public Row getBeforeImage() { + if (!inclBeforeImage) { + return null; + } + if (!beforeImgEnabled) { + return null; + } + if (isBeforeImageExpired()) { + return null; + } + return beforeImg; + } + + /** + * @hidden + * Returns before image expiration time + */ + public long getBeforeImgExpMs() { + return beforeImgExpMs; } } diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamPutEvent.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamPutEvent.java index 44abd4d6..2e82a6e8 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamPutEvent.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamPutEvent.java @@ -18,6 +18,7 @@ import oracle.kv.impl.util.UserDataControl; import oracle.kv.pubsub.StreamOperation; import oracle.kv.table.Row; +import oracle.kv.table.Table; /** * Object represents a put operation in NoSQL Stream @@ -29,20 +30,49 @@ public class StreamPutEvent implements StreamOperation.PutEvent { private final int repGroupId; private final Row row; + /** + * True if the stream is configured to include before image + */ + private final boolean inclBeforeImage; + /** + * True if before image is enabled for this event + */ + private final boolean beforeImgEnabled; + /** + * Before image expiration time in ms + */ + private final long beforeImgExpMs; + /** + * The before image in Row if enabled, or null if before image is + * disabled or does not exist, e.g., for insert operation. + */ + private final Row beforeImg; /** - * Constructs a put operation + * Constructs a put operation with before image enabled * * @param row row of put * @param sequenceId unique sequence id * @param repGroupId shard id of the deletion + * @param inclBeforeImage true if include before image in subscription + * @param beforeImgEnabled true if before image enabled + * @param beforeImgExpMs before image expiration time + * @param beforeImg before image of the write operation */ protected StreamPutEvent(Row row, SequenceId sequenceId, - int repGroupId) { + int repGroupId, + boolean inclBeforeImage, + boolean beforeImgEnabled, + long beforeImgExpMs, + Row beforeImg) { this.row = row; this.sequenceId = sequenceId; this.repGroupId = repGroupId; + this.inclBeforeImage = inclBeforeImage; + this.beforeImgEnabled = beforeImgEnabled; + this.beforeImgExpMs = beforeImgExpMs; + this.beforeImg = beforeImg; } /** @@ -103,7 +133,11 @@ public DeleteEvent asDelete() { public String toString() { return "PUT OP [seq=" + ((StreamSequenceId)sequenceId).getSequence() + ", shard id=" + repGroupId + - ", row=" + UserDataControl.displayRowJson(row) + "]"; + ", row=" + UserDataControl.displayRowJson(row) + + ", before image enabled=" + beforeImgEnabled + + ", include before image=" + inclBeforeImage + + ", before=" + UserDataControl.displayRowJson(beforeImg) + + "]"; } /** @@ -136,6 +170,17 @@ public String getTableName() { return row.getTable().getName(); } + /** + * @hidden + * + * Returns the table instance associated with the operation + * @return table instance + */ + @Override + public Table getTable() { + return row.getTable(); + } + /** * Returns the region id of this operation. * @@ -198,6 +243,55 @@ public long getRowSize() { */ @Override public String toJsonString() { - return row.toJsonString(true); + return "[type=" + getType() + "]" + + "[seq=" + sequenceId + "]" + + "[shard=" + repGroupId + "]" + + "[region id=" + getRegionId() + "]" + + "[table=" + getFullTableName() + "]" + + "[before image enabled=" + beforeImgEnabled + "]" + + "[before image incl.=" + inclBeforeImage + "]" + + row.toJsonString(false) + + (beforeImgEnabled && inclBeforeImage ? + ", before image=" + getBeforeImage() : ""); + } + + @Override + public boolean includeBeforeImage() { + return inclBeforeImage; + } + + @Override + public boolean isBeforeImageEnabled() { + return beforeImgEnabled; + } + + @Override + public boolean isBeforeImageExpired() { + return StreamOperation.isBeforeImageExpired(beforeImgEnabled, + inclBeforeImage, + beforeImgExpMs, + beforeImg); + } + + @Override + public Row getBeforeImage() { + if (!inclBeforeImage) { + return null; + } + if (!beforeImgEnabled) { + return null; + } + if (isBeforeImageExpired()) { + return null; + } + return beforeImg; + } + + /** + * @hidden + * Returns before image expiration time + */ + public long getBeforeImgExpMs() { + return beforeImgExpMs; } } diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamTxnEvent.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamTxnEvent.java new file mode 100644 index 00000000..87805fca --- /dev/null +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/StreamTxnEvent.java @@ -0,0 +1,201 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.kv.impl.pubsub; + +import java.util.ArrayList; +import java.util.List; + +import oracle.kv.pubsub.StreamOperation; +import oracle.kv.table.Row; +import oracle.kv.table.Table; +import oracle.kv.txn.TransactionIdImpl; + +/** + * Object represents a transaction in NoSQL Stream + */ +public class StreamTxnEvent implements StreamOperation.TransactionEvent { + + private static final int MAX_NUM_OPS_IN_TXN = 1024 * 1024; + + private final SequenceId sequenceId; + private final TransactionIdImpl txnId; + private final TransactionType type; + private final List ops; + + StreamTxnEvent(TransactionIdImpl txnId, + SequenceId sequenceId, + boolean commit, + List list) { + this.txnId = txnId; + this.sequenceId = sequenceId; + this.ops = new ArrayList<>(); + this.type = commit ? TransactionType.COMMIT : TransactionType.ABORT ; + list.forEach(this::addOp); + } + + @Override + public SequenceId getSequenceId() { + return sequenceId; + } + + @Override + public int getRepGroupId() { + return txnId.getShardId(); + } + + @Override + public long getTableId() { + throw new IllegalArgumentException("Not supported in transaction"); + } + + @Override + public Table getTable() { + throw new IllegalArgumentException("Not supported in transaction"); + } + + @Override + public String getFullTableName() { + throw new IllegalArgumentException("Not supported in transaction"); + } + + @Override + public String getTableName() { + throw new IllegalArgumentException("Not supported in transaction"); + } + + @Override + public int getRegionId() { + throw new IllegalArgumentException("Not supported in transaction"); + } + + @Override + public long getLastModificationTime() { + throw new IllegalArgumentException( + "Last modification time not supported in transaction"); + } + + @Override + public long getExpirationTime() { + throw new IllegalArgumentException( + "Expiration time not supported in transaction"); + } + + @Override + public String toJsonString() { + final StringBuilder sb = new StringBuilder(); + sb.append(txnId) + .append("[seq=").append(sequenceId).append("]") + .append("[type=").append(type).append("]") + .append("[#ops=").append(ops.size()).append("]") + .append("\n"); + for (int idx = 0; idx < ops.size(); idx++) { + final StreamOperation op = ops.get(idx); + sb.append("\t") + .append("[op=").append(idx).append("]") + .append(op.toJsonString()) + .append("\n"); + } + return sb.toString(); + } + + @Override + public boolean includeBeforeImage() { + throw new IllegalStateException( + "Before image not supported in transaction event"); + } + + @Override + public boolean isBeforeImageEnabled() { + throw new IllegalStateException( + "Before image not supported in transaction event"); + } + + @Override + public boolean isBeforeImageExpired() { + throw new IllegalStateException( + "Before image not supported in transaction event"); + } + + @Override + public Row getBeforeImage() { + throw new IllegalStateException( + "Before image not supported in transaction event"); + } + + @Override + public String toString() { + return "Txn [seq=" + sequenceId + + ", type=" + type + + ", txn id=" + txnId + + ", #ops="+ ops.size() + "]"; + } + + @Override + public Type getType() { + return Type.TRANSACTION; + } + + @Override + public PutEvent asPut() { + throw new IllegalArgumentException("This operation is not a put"); + } + + @Override + public DeleteEvent asDelete() { + throw new IllegalArgumentException("This operation is not a delete"); + } + + @Override + public TransactionEvent asTransaction() { + return this; + } + + @Override + public TransactionIdImpl getTransactionId() { + return txnId; + } + + @Override + public TransactionType getTransactionType() { + return type; + } + + @Override + public long getNumOperations() { + return ops.size(); + } + + @Override + public List getOperations() { + return ops; + } + + private void addOp(StreamOperation op) { + if (op.getType().equals(Type.TRANSACTION)) { + throw new IllegalArgumentException( + "Nested transaction is not supported"); + } + if (!op.getType().equals(Type.PUT) && + !op.getType().equals(Type.DELETE)) { + throw new IllegalArgumentException( + "Unsupported operation in transaction"); + } + if (ops.size() == MAX_NUM_OPS_IN_TXN) { + throw new IllegalStateException( + "Number of operations in transaction cannot be greater" + + " than " + MAX_NUM_OPS_IN_TXN); + } + ops.add(op); + } +} diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/SubscriptionStatImpl.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/SubscriptionStatImpl.java index 74e99f5f..b1e6463e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/SubscriptionStatImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/SubscriptionStatImpl.java @@ -207,13 +207,13 @@ synchronized void updateShardStat(RepGroupId shard, lastMsgTime.put(shard, stat.getLastMsgTimeMs()); currOpenTxns.put(shard, stat.getOpenTxns()); numSuccReconn.put(shard, stat.getNumSuccReconn()); + totalTokenRefreshed.set(stat.getNumTokenRefreshed()); /* accumulated stat */ totalCommitTxns.addAndGet(stat.getCommitTxns()); totalAbortTxns.addAndGet(stat.getAbortTxns()); totalCommitOps.addAndGet(stat.getCommitOps()); totalAbortOps.addAndGet(stat.getAbortOps()); - totalTokenRefreshed.addAndGet(stat.getNumTokenRefreshed()); } private static Map copy(Map stat) { diff --git a/kvmain/src/main/java/oracle/kv/impl/pubsub/security/StreamServerAuthHandler.java b/kvmain/src/main/java/oracle/kv/impl/pubsub/security/StreamServerAuthHandler.java index 881577b3..fe56aac0 100644 --- a/kvmain/src/main/java/oracle/kv/impl/pubsub/security/StreamServerAuthHandler.java +++ b/kvmain/src/main/java/oracle/kv/impl/pubsub/security/StreamServerAuthHandler.java @@ -33,6 +33,7 @@ import oracle.kv.impl.security.login.LoginToken; import oracle.kv.impl.util.RateLimitingLogger; +import com.sleepycat.je.rep.net.DataChannel; import com.sleepycat.je.rep.subscription.StreamAuthenticator; /** @@ -41,6 +42,12 @@ */ public class StreamServerAuthHandler implements StreamAuthenticator { + /** + * Null channel id for configured the authenticator passed down to JE. + * During handshake, each data channel will create a clone authenticator + * with its own channel id + */ + private static final String NULL_CHANNEL_ID = "NullChannelId"; /** * Rate limiting logger max objects */ @@ -76,14 +83,21 @@ public class StreamServerAuthHandler implements StreamAuthenticator { * rate limiting logger */ private final RateLimitingLogger rl; + /** + * Channel associated with the authenticator mainly used for logging, + * useful in presence of multiple streams + */ + private final String channelId; - private StreamServerAuthHandler(AccessChecker accessChecker, + private StreamServerAuthHandler(String channelId, + AccessChecker accessChecker, Logger logger) { if (accessChecker == null) { throw new IllegalArgumentException("Null access checker"); } + this.channelId = channelId; this.accessChecker = accessChecker; this.logger = logger; authCtx = null; @@ -92,6 +106,12 @@ private StreamServerAuthHandler(AccessChecker accessChecker, rl = new RateLimitingLogger<>(RL_INTV_MS, RL_MAX_OBJS, logger); } + @Override + public String toString() { + return "StreamServerAuthHandler of channelId=" + channelId + + ", tableIdStr=" + Arrays.toString(tableIdStr) ; + } + /** * Gets an instance of server side authentication handler * @@ -102,17 +122,27 @@ private StreamServerAuthHandler(AccessChecker accessChecker, * * @throws IllegalArgumentException if access checker is null */ - public static StreamServerAuthHandler getAuthHandler(AccessChecker ac, - Logger logger) - throws IllegalArgumentException { + public static StreamServerAuthHandler getAuthHandler( + AccessChecker ac, Logger logger) throws IllegalArgumentException { + return new StreamServerAuthHandler(NULL_CHANNEL_ID, ac, logger); + } + @Override + public StreamAuthenticator getInstance(DataChannel channel) { final StreamServerAuthHandler ret = - new StreamServerAuthHandler(ac, logger); - - logger.fine(() -> lm("Server authenticator created.")); + new StreamServerAuthHandler(channel.getChannelId(), + accessChecker, + logger); + logger.info(lm("Create a new instance of StreamServerAuthHandler, " + + "channel id=" + ret.getChannelId())); return ret; } + @Override + public String getChannelId() { + return channelId; + } + /** * Creates auth context from login token * @@ -265,17 +295,13 @@ public boolean checkAccess() { final SubscriptionOpsCtx opCtx = new SubscriptionOpsCtx(tableIdStr); try { - final ExecutionContext ec = - ExecutionContext.create(accessChecker, authCtx, opCtx); - /* * Calling ExecutionContext.create has checked if execution ctx * has all privileges, and raise exception if it fails. */ - logger.fine(() -> lm("Privilege check passed for=" + - opCtx.describe() + - ", requestor client host=" + - ec.requestorContext().getClientHost())); + ExecutionContext.create(accessChecker, authCtx, opCtx); + logger.finest(() -> lm("Privilege check passed for=" + + opCtx.describe())); return true; } catch (AuthenticationRequiredException | UnauthorizedException | SessionAccessException exp) { @@ -300,8 +326,8 @@ private static boolean isTokenExpired(AuthContext ac) { System.currentTimeMillis()); } - private static String lm(String msg) { - return "[StreamServerAuth] " + msg; + private String lm(String msg) { + return "[StreamServerAuth][ChannelId=" + channelId + "] " + msg; } /** @@ -316,10 +342,8 @@ static class SubscriptionOpsCtx implements OperationContext { @Override public String describe() { - return "Table subscription request of " + - ((ids == null || ids.length == 0) ? - " all tables" : - " tables IDs=" + Arrays.toString(ids)); + return "Subscription of" + ((ids == null || ids.length == 0) ? + " all user tables" : " tables IDs=" + Arrays.toString(ids)); } @Override diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/CodeGenerator.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/CodeGenerator.java index 76f0e741..c31247c4 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/CodeGenerator.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/CodeGenerator.java @@ -98,6 +98,8 @@ public class CodeGenerator extends ExprVisitor { private boolean theForCloud; + private boolean theRemoveGlobalSort; + public CodeGenerator(QueryControlBlock qcb) { theQCB = qcb; qcb.setCodegen(this); @@ -270,15 +272,20 @@ private PlanIter[] generateItersForExprs(ArrayList exprs) { @Override boolean enter(ExprSort sort) { - /* If the sort is going to be removed, set the index scan direction - * before actually removing the sort */ - if (sort.matchesGroupBy()) { + /* Check whether the sort can be removed due to an underlying + * index-based group-by that produces its results in the desired + * order. */ + ExprBaseTable tableExpr = sort.getTableExpr(); + + if (tableExpr != null && sort.matchesGroupBy()) { Expr input = sort.getInput(); while (input != null) { if (input.getKind() == ExprKind.GROUP) { + /* The group-by is a global one, so the sort cannot + * be removed */ break; } @@ -290,13 +297,36 @@ boolean enter(ExprSort sort) { if (sfw.hasGroupBy() && input.getKind() == ExprKind.RECEIVE) { + /* The group-by is an index-based one. If the sorting + * is in descending order, we must reverse the direction + * of the index scan. */ if (sort.isDescendingIndexScan()) { - ExprBaseTable tableExpr = sort.getTableExpr(); + + if (tableExpr.getNumDescendants() > 0) { + + /* Cannot reverse the direction of the index + * scan if the query performs LOJs with + * descendant tables via the primary index. + * If the LOJs are done via a secondary index + * index on the target table, all the sorting + * exprs must be from the target table. */ + if (tableExpr.getIndex() == null) { + break; + } + + ArrayList tables = sort.getTables(); + if (tables.size() != 1 || + tables.get(0) != tableExpr.getTargetTable()) { + break; + } + } + ExprReceive rcv = (ExprReceive)input; tableExpr.setDirection(Direction.REVERSE); rcv.reverseSortDirection(); } + theRemoveGlobalSort = true; return true; } } else { @@ -305,6 +335,7 @@ boolean enter(ExprSort sort) { } } + /* The sort will not be removed, so set theTopBlockingExpr to the sort */ if (theTopBlockingExpr == null) { theTopBlockingExpr = sort; } @@ -315,31 +346,8 @@ boolean enter(ExprSort sort) { @Override void exit(ExprSort sort) { - /* Remove the sort if it's not needed. See ExprSort javadoc for - * details */ - if (sort.matchesGroupBy()) { - - Expr input = sort.getInput(); - - while (input != null) { - - if (input.getKind() == ExprKind.GROUP) { - break; - } - - if (input.getKind() == ExprKind.SFW) { - - ExprSFW sfw = (ExprSFW)input; - input = sfw.getDomainExpr(0); - - if (sfw.hasGroupBy() && - input.getKind() == ExprKind.RECEIVE) { - return; - } - } else { - break; - } - } + if (theRemoveGlobalSort) { + return; } if (theBottomBlockingExpr == null) { diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/ExprSFW.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/ExprSFW.java index f243df84..ebaeff86 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/ExprSFW.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/ExprSFW.java @@ -1842,7 +1842,7 @@ Expr rewriteGroupBy(boolean forDistinct) { * - outerSFW : the outer SFW * - outerFromVar : from FROM var of the outer SFW. */ - Expr rewriteSelectExprForGroupBy( + private Expr rewriteSelectExprForGroupBy( int fieldPos, Expr fieldExpr, Expr fieldSubExpr, @@ -2509,8 +2509,13 @@ sortPositions, getSortSpecs(), /* Check whether the sort exprs are a prefix of the group exprs or * the group exprs are a prefix of the sort exprs. If so, and we * later discover that the group-by is going to be an index-based - * one, the sort can be removed. */ - if (hasGroupBy()) { + * one, the sort can be removed. + * Note: do not apply this optimization if the query has inner joins. + * This is ok for now, because currently, grouping over inner joins + * is always done via a global ExprGroup (i.e., there is no sorting + * index). If this changes in the future, this optimization has to + * revisited for inner joins. */ + if (hasGroupBy() && !hasJoin()) { boolean desc = false; boolean nullsLast = false; int i = 0; diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncCreationTime.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncCreationTime.java new file mode 100644 index 00000000..78551e70 --- /dev/null +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncCreationTime.java @@ -0,0 +1,92 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.kv.impl.query.compiler; + +import oracle.kv.impl.api.table.FieldDefImpl; +import oracle.kv.impl.query.QueryException; +import oracle.kv.impl.query.runtime.PlanIter; +import oracle.kv.impl.query.runtime.FuncCreationTimeIter; +import oracle.kv.impl.query.types.ExprType; +import oracle.kv.impl.query.types.TypeManager; +import oracle.kv.table.Row; + +/** + * Function to return the row creation time as timestamp(3) - Timestamp with + * milliseconds precision. + * + * @see Row#getCreationTime() + * @since 25.3 + */ +public class FuncCreationTime extends Function { + + FuncCreationTime() { + super(FunctionLib.FuncCode.FN_CREATION_TIME, "creation_time", + TypeManager.ANY_RECORD_ONE(), + TypeManager.createType(FieldDefImpl.Constants.timestampDefs[3], + ExprType.Quantifier.ONE)); + } + + @Override + boolean isIndexable() { + return true; + } + + @Override + public boolean isRowProperty() { + return true; + } + + @Override + boolean mayReturnNULL(ExprFuncCall caller) { + return true; + } + + @Override + boolean mayReturnEmpty(ExprFuncCall caller) { + return false; + } + + @Override + Expr normalizeCall(ExprFuncCall funcCall) { + + Expr arg = funcCall.getArg(0); + + if (arg.getKind() == Expr.ExprKind.VAR && ((ExprVar)arg).getTable() != null) { + return funcCall; + } + + throw new QueryException( + "The argument to the creation_time function must be a row " + + "variable", funcCall.getLocation()); + } + + @Override + PlanIter codegen(CodeGenerator codegen, + ExprFuncCall caller, + PlanIter[] argIters) { + + int resultReg = codegen.allocateResultReg(caller); + + Expr arg = caller.getArg(0); + + if (arg.getKind() != Expr.ExprKind.VAR || ((ExprVar)arg).getTable() == null) { + throw new QueryException( + "The argument to the creation_time function must " + + "be a row variable", caller.getLocation()); + } + + return new FuncCreationTimeIter(caller, resultReg, argIters[0]); + } +} + diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncCreationTimeMillis.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncCreationTimeMillis.java new file mode 100644 index 00000000..b668709e --- /dev/null +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncCreationTimeMillis.java @@ -0,0 +1,87 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.kv.impl.query.compiler; + +import oracle.kv.impl.query.QueryException; +import oracle.kv.impl.query.runtime.PlanIter; +import oracle.kv.impl.query.runtime.FuncCreationTimeMillisIter; +import oracle.kv.impl.query.types.TypeManager; +import oracle.kv.table.Row; + +/** + * Function to return the row creation time in milliseconds since the epoch. + * + * @see Row#getCreationTime() + * @since 25.3 + */ +public class FuncCreationTimeMillis extends Function { + + FuncCreationTimeMillis() { + super(FunctionLib.FuncCode.FN_CREATION_TIME_MILLIS, "creation_time_millis", + TypeManager.ANY_RECORD_ONE(), + TypeManager.LONG_ONE()); + } + + @Override + boolean isIndexable() { + return true; + } + + @Override + public boolean isRowProperty() { + return true; + } + + @Override + boolean mayReturnNULL(ExprFuncCall caller) { + return true; + } + + @Override + boolean mayReturnEmpty(ExprFuncCall caller) { + return false; + } + + @Override + Expr normalizeCall(ExprFuncCall funcCall) { + + Expr arg = funcCall.getArg(0); + + if (arg.getKind() == Expr.ExprKind.VAR && ((ExprVar)arg).getTable() != null) { + return funcCall; + } + + throw new QueryException( + "The argument to the creation_time_millis function must " + + "be a row variable", funcCall.getLocation()); + } + + @Override + PlanIter codegen(CodeGenerator codegen, + ExprFuncCall caller, + PlanIter[] argIters) { + + int resultReg = codegen.allocateResultReg(caller); + + Expr arg = caller.getArg(0); + + if (arg.getKind() != Expr.ExprKind.VAR || ((ExprVar)arg).getTable() == null) { + throw new QueryException( + "The argument to the creation_time_millis function must " + + "be a row variable", caller.getLocation()); + } + + return new FuncCreationTimeMillisIter(caller, resultReg, argIters[0]); + } +} diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncExpirationTime.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncExpirationTime.java index e67700b0..e3bc4f5e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncExpirationTime.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncExpirationTime.java @@ -23,7 +23,7 @@ import oracle.kv.impl.query.types.TypeManager; /** - * Function to return the current system time in millisecods since the epoch + * Function to return the current system time in milliseconds since the epoch */ public class FuncExpirationTime extends Function { diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncExpirationTimeMillis.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncExpirationTimeMillis.java index dc02f512..2cc6a182 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncExpirationTimeMillis.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncExpirationTimeMillis.java @@ -21,7 +21,7 @@ import oracle.kv.impl.query.types.TypeManager; /** - * Function to return the current system time in millisecods since the epoch + * Function to return the current system time in milliseconds since the epoch */ public class FuncExpirationTimeMillis extends Function { diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncRowMetadata.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncRowMetadata.java new file mode 100644 index 00000000..08898635 --- /dev/null +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FuncRowMetadata.java @@ -0,0 +1,89 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.kv.impl.query.compiler; + +import oracle.kv.impl.api.table.FieldDefImpl; +import oracle.kv.impl.query.QueryException; +import oracle.kv.impl.query.compiler.FunctionLib.FuncCode; +import oracle.kv.impl.query.runtime.FuncRowMetadataIter; +import oracle.kv.impl.query.runtime.PlanIter; +import oracle.kv.impl.query.types.ExprType; +import oracle.kv.impl.query.types.TypeManager; +import oracle.kv.table.Row; + +/** + * Function to return the metadata associated to a row. + * + * @see Row#getRowMetadata() + * @since 25.3 + */ +public class FuncRowMetadata extends Function { + + public static final String COL_NAME = "row_metadata()"; + + FuncRowMetadata() { + super(FuncCode.FN_ROW_METADATA, "row_metadata", + TypeManager.ANY_RECORD_ONE(), + TypeManager.createType(FieldDefImpl.Constants.jsonDef, + ExprType.Quantifier.ONE)); + } + + @Override + public boolean isRowProperty() { + return true; + } + + @Override + boolean mayReturnNULL(ExprFuncCall caller) { + return true; + } + + @Override + boolean mayReturnEmpty(ExprFuncCall caller) { + return false; + } + + @Override + Expr normalizeCall(ExprFuncCall funcCall) { + + Expr arg = funcCall.getArg(0); + + if (arg.getKind() == Expr.ExprKind.VAR && ((ExprVar)arg).getTable() != null) { + return funcCall; + } + + throw new QueryException( + "The argument to the row_metadata function must be a row " + + "variable", funcCall.getLocation()); + } + + @Override + PlanIter codegen(CodeGenerator codegen, + ExprFuncCall caller, + PlanIter[] argIters) { + + int resultReg = codegen.allocateResultReg(caller); + + Expr arg = caller.getArg(0); + + if (arg.getKind() != Expr.ExprKind.VAR || ((ExprVar)arg).getTable() == null) { + throw new QueryException( + "The argument to the expiration_time function must " + + "be a row variable", caller.getLocation()); + } + + return new FuncRowMetadataIter(caller, resultReg, argIters[0]); + } +} + diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/FunctionLib.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FunctionLib.java index 8e4c5858..06f92024 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/FunctionLib.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/FunctionLib.java @@ -197,7 +197,14 @@ public static enum FuncCode { FN_DAY_OF_WEEK(127), FN_DAY_OF_MONTH(128), FN_DAY_OF_YEAR(129), - FN_TIMESTAMP_BUCKET(130); + FN_TIMESTAMP_BUCKET(130), + + /* Other */ + FN_ROW_METADATA(131), + + FN_CREATION_TIME(132), + FN_CREATION_TIME_MILLIS(133); + private static final FuncCode[] VALUES = values(); public static final int VALUES_COUNT = VALUES.length; @@ -438,6 +445,10 @@ public static FuncCode valueOf(int ordinal) { "day_of_year")); theFunctions.add(new FuncTimestampBucket(FuncCode.FN_TIMESTAMP_BUCKET, "timestamp_bucket")); + theFunctions.add(new FuncRowMetadata()); + + theFunctions.add(new FuncCreationTime()); + theFunctions.add(new FuncCreationTimeMillis()); for (Function func : theFunctions) { sctx.addFunction(func); diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/IndexAnalyzer.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/IndexAnalyzer.java index 496fe1af..c2530cf5 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/IndexAnalyzer.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/IndexAnalyzer.java @@ -2274,7 +2274,19 @@ private void rewriteExpr(ExprVar idxVar, Expr expr) { trace("Replacing expr:\n" + etr.theExpr.display()); } - if (etr.theExpr.getNumParents() > 1) { + int numParents = etr.theExpr.getNumParents(); + + if (numParents == 0) { + /* This can happen if the expr was added twice to the rewrite + * map and has already been rewritten in an earlier iteration + * of this for loop */ + if (theTrace >= 3) { + trace("expr has no parents"); + } + continue; + } + + if (numParents > 1) { parent = expr.findSubExpr(etr.theExpr); if (parent == null) { throw new QueryStateException( @@ -5037,21 +5049,19 @@ private boolean checkIsCovering(ExprSFW sfw) { * is not sorting by prim key columns, we must mark the prim index as * not covering. */ - if (theTablePos == theTargetTablePos) { - int numSortExprs = sfw.getNumSortExprs(); + int numSortExprs = sfw.getNumSortExprs(); - for (int i = 0; i < numSortExprs; ++i) { - Expr expr = sfw.getSortExpr(i); - if (!isIndexOnlyExpr(expr, true, true)) { - isCovering = false; - if (theTrace >= 2) { - trace("Index is not covering: it does " + - "not cover the " + i + "-th ORDER BY expr"); - } + for (int i = 0; i < numSortExprs; ++i) { + Expr expr = sfw.getSortExpr(i); + if (!isIndexOnlyExpr(expr, false, true)) { + isCovering = false; + if (theTrace >= 2) { + trace("Index is not covering: it does " + + "not cover the " + i + "-th ORDER BY expr"); + } - if (!theIsUnnestingIndex) { - return false; - } + if (!theIsUnnestingIndex) { + return false; } } } @@ -5290,6 +5300,8 @@ private boolean isIndexOnlyExpr( case FN_PARTITION: case FN_SHARD: break; + case FN_CREATION_TIME: + case FN_CREATION_TIME_MILLIS: case FN_MOD_TIME: return false; case FN_VERSION: @@ -5313,6 +5325,8 @@ private boolean isIndexOnlyExpr( break; } return false; + case FN_ROW_METADATA: + return false; default: break; } @@ -5383,9 +5397,18 @@ private void checkIsSortingIndex() { boolean desc = false; boolean nullsLast = false; int i; + int e; + + if (!hasSort && !hasGroupBy) { + return; + } int numPkCols = theTable.getPrimaryKeySize(); int numShardKeys = theTable.getShardKeySize(); + int numTables = theTableExpr.getNumTables(); + int numAncestors = theTableExpr.getNumAncestors(); + int numDescendants = theTableExpr.getNumDescendants(); + int numExprs = (hasGroupBy ? theSFW.getNumGroupExprs() : theSFW.getNumSortExprs()); @@ -5415,36 +5438,90 @@ private void checkIsSortingIndex() { * are a prefix of the primary key columns. */ if (theIsPrimary) { - for (i = 0; i < numPkCols && i < numExprs; ++i) { + if (hasSort && desc && numDescendants > 0) { + /* In the current implementation, it is not possible to use + * an index to order by primary key columns in descending + * order when the NESTED TABLES clause contains descendants */ + return; + } + + for (i = 0, e = 0; i < numPkCols && e < numExprs; ++i) { Expr expr = (hasGroupBy ? - theSFW.getFieldExpr(i) : - theSFW.getSortExpr(i)); + theSFW.getFieldExpr(e) : + theSFW.getSortExpr(e)); - if (!ExprUtils. - isPrimKeyColumnRef(theTableExpr, theTable, i, expr)) { - break; + if (ExprUtils. + isPrimKeyColumnRef(theTableExpr, theTable, i, expr)) { + + if (hasGroupBy && i == numShardKeys - 1) { + theSFW.setGroupByExprCompleteShardKey(); + } + + ++e; + continue; } - if (hasGroupBy && i == numShardKeys - 1) { - theSFW.setGroupByExprCompleteShardKey(); + /* The currest expr does not match with the current pk column. + * Check whether we have an equality predicate on the current + * pk column. If so, skip the current pk column. */ + ArrayList startstopPIs = theStartStopPreds.get(i); + if (startstopPIs != null && startstopPIs.size() == 1) { + PredInfo pi = startstopPIs.get(0); + if (pi.isEq()) { + continue; + } } + + break; } - if (i == numExprs) { + if (e == numExprs) { + theSFW.addSortingIndex(null); + theTableExpr.setDirection(direction); + return; + } - if (hasSort) { + /* If the pk columns of the target table are a prefix of the + * sort/group exprs, check if the remaining sort/group exprs + * are the pk columns of the descendant tables. Inheritted + * pk columns may or may not be among the sort/group exprs. */ + if (i == numPkCols && numDescendants > 0 && !desc) { + + int numAncestorPkCols = numPkCols; + + for (int t = numAncestors + 1; t < numTables; ++t) { + + TableImpl descendant = theTableExpr.getTable(t); + numPkCols = descendant.getPrimaryKeySize(); + + for (int pkPos = 0; + e < numExprs && pkPos < numPkCols; + ++e, ++pkPos) { + Expr expr = (hasGroupBy ? + theSFW.getFieldExpr(e) : + theSFW.getSortExpr(e)); + if (!ExprUtils.isPrimKeyColumnRef(theTableExpr, + descendant, + pkPos, + expr)) { + if (pkPos < numAncestorPkCols) { + --e; + continue; + } - if (desc && theTableExpr.getNumDescendants() > 0) { - /* In the current implementation, it is not possible to use - * an index to order by primary key columns in descending - * order when the NESTED TABLES clause contains descendants */ - return; + return; + } } - } - theSFW.addSortingIndex(null); - theTableExpr.setDirection(direction); + if (e == numExprs) { + theSFW.addSortingIndex(null); + theTableExpr.setDirection(direction); + break; + } + + numAncestorPkCols = numPkCols; + } } return; @@ -5461,14 +5538,13 @@ private void checkIsSortingIndex() { /* Check whether the sort exprs are a prefix of the index paths. */ List indexPaths = theIndex.getIndexFields(); - int k; - for (i = 0, k = 0; i < indexPaths.size() && k < numExprs; ++i) { + for (i = 0, e = 0; i < indexPaths.size() && e < numExprs; ++i) { IndexField ipath = indexPaths.get(i); Expr expr = (hasGroupBy ? - theSFW.getFieldExpr(k) : - theSFW.getSortExpr(k)); + theSFW.getFieldExpr(e) : + theSFW.getSortExpr(e)); IndexExpr epath = expr.getIndexExpr(); if (ipath.isGeometry() || epath == null) { @@ -5481,10 +5557,13 @@ private void checkIsSortingIndex() { break; } - ++k; + ++e; continue; } + /* The currest expr does not match with the current ipath. Check + * whether we have an equality predicate on the current ipath. If + * so, skip the current ipath. */ ArrayList startstopPIs = theStartStopPreds.get(i); if (startstopPIs != null && startstopPIs.size() == 1) { PredInfo pi = startstopPIs.get(0); @@ -5496,33 +5575,76 @@ private void checkIsSortingIndex() { break; } - if (numExprs > 0 && k == numExprs) { + if (numExprs > 0 && e == numExprs) { theSFW.addSortingIndex(theIndex); theTableExpr.setDirection(direction); return; } + if (i != indexPaths.size()) { + return; + } + /* Check if the remaining sort exprs are primary-key columns * (which exist in the index as well). */ - if (i == indexPaths.size()) { + int pkPos = 0; + for (; pkPos < numPkCols && e < numExprs; ++e, ++pkPos) { - for (int j = 0; - j < numPkCols && k < numExprs; - ++k, ++j) { + Expr expr = (hasGroupBy ? + theSFW.getFieldExpr(e) : + theSFW.getSortExpr(e)); - Expr expr = (hasGroupBy ? - theSFW.getFieldExpr(k) : - theSFW.getSortExpr(k)); + if (!ExprUtils. + isPrimKeyColumnRef(theTableExpr, theTable, pkPos, expr)) { + break; + } + } - if (!ExprUtils. - isPrimKeyColumnRef(theTableExpr, theTable, j, expr)) { - break; + if (e == numExprs) { + theSFW.addSortingIndex(theIndex); + theTableExpr.setDirection(direction); + return; + } + + if (theTrace >= 2) { + trace("checkIsSortingIndex: Checking for descendant pk cols. " + + "exprPos = " + e + " pkPos = " + pkPos); + } + + if (numDescendants > 0 && !desc) { + + int numAncestorPkCols = pkPos+1; + + for (int t = numAncestors + 1; t < numTables; ++t) { + + TableImpl descendant = theTableExpr.getTable(t); + numPkCols = descendant.getPrimaryKeySize(); + + for (pkPos = 0; + e < numExprs && pkPos < numPkCols; + ++e, ++pkPos) { + Expr expr = (hasGroupBy ? + theSFW.getFieldExpr(e) : + theSFW.getSortExpr(e)); + if (!ExprUtils.isPrimKeyColumnRef(theTableExpr, + descendant, + pkPos, + expr)) { + if (pkPos < numAncestorPkCols) { + --e; + continue; + } + return; + } } - } - if (k == numExprs) { - theSFW.addSortingIndex(theIndex); - theTableExpr.setDirection(direction); + if (e == numExprs) { + theSFW.addSortingIndex(theIndex); + theTableExpr.setDirection(direction); + return; + } + + numAncestorPkCols = numPkCols; } } } diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/IndexExpr.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/IndexExpr.java index ca32491e..2f6750b6 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/IndexExpr.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/IndexExpr.java @@ -591,18 +591,22 @@ static IndexExpr create(Expr expr) { switch (expr.getKind()) { case FUNC_CALL: - if (expr != epath.theExpr || - !epath.theFunction.isIndexable()) { - return null; - } - ExprFuncCall fncall = (ExprFuncCall)expr; - for (int i = 1; i < fncall.getNumArgs(); ++i) { - Expr arg = fncall.getArg(i); - if (!ConstKind.isCompileConst(arg)) { + if (fncall.getFuncCode() == FuncCode.FN_ROW_METADATA) { + epath.add(FuncRowMetadata.COL_NAME, StepKind.REC_FIELD, expr); + } else { + if (expr != epath.theExpr || + !epath.theFunction.isIndexable()) { return null; } + + for (int i = 1; i < fncall.getNumArgs(); ++i) { + Expr arg = fncall.getArg(i); + if (!ConstKind.isCompileConst(arg)) { + return null; + } + } } expr = fncall.getArg(0); @@ -714,7 +718,8 @@ static IndexExpr create(Expr expr) { if (varExpr.getIndex() == null && !epath.theSteps.isEmpty()) { String colName = epath.theSteps.get(0).theName; - if (!epath.theTable.isJsonCollection()) { + if (!epath.theTable.isJsonCollection() && + !colName.equals(FuncRowMetadata.COL_NAME)) { StepKind skind = epath.theSteps.get(0).theKind; if (skind == StepKind.REC_FIELD) { epath.theColumnPos = @@ -1407,6 +1412,18 @@ private static IndexExpr createPathForUnnestVar(ExprVar initUnnestVar) { expr = expr.getInput(); break; } + case FUNC_CALL: + ExprFuncCall fncall = (ExprFuncCall)expr; + + if (fncall.getFuncCode() == FuncCode.FN_ROW_METADATA) { + epath.add(FuncRowMetadata.COL_NAME, StepKind.REC_FIELD, expr); + expr = fncall.getArg(0); + break; + } + + throw new QueryStateException( + "Unexpected expression in unnest path for variable " + + initUnnestVar.getName()); case VAR: { ExprVar var = (ExprVar)expr; @@ -1430,8 +1447,8 @@ private static IndexExpr createPathForUnnestVar(ExprVar initUnnestVar) { } default: { throw new QueryStateException( - "Unexpected expression in unnest path " + - epath.getPathName()); + "Unexpected expression in unnest path for variable " + + initUnnestVar.getName()); } } } diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/Translator.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/Translator.java index 3bf819e7..38c962e5 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/Translator.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/Translator.java @@ -1552,6 +1552,15 @@ private ExprVar checkUnnestVar(ExprVar var, ExprVar invar) { } return null; } + case FUNC_CALL: + Function func = expr.getFunction(FuncCode.FN_ROW_METADATA); + if (func != null) { + ExprFuncCall fncall = (ExprFuncCall)expr; + return (ExprVar)fncall.getArg(0); + } + throw new QueryException( + "Invalid expression in UNNEST clause", + expr.getLocation()); default: throw new QueryException( "Invalid expression in UNNEST clause", @@ -5492,6 +5501,47 @@ public void enterCreate_table_statement( getLocation(ctx.table_name())); } + Frozen_defContext frozen_def = null; + + KVQLParser.Table_optionsContext options = ctx.table_options(); + if (options != null) { + List< KVQLParser.Ttl_defContext> ttls = options.ttl_def(); + if (ttls != null && ttls.size() > 1) { + throw new QueryException( + "Only one TTL definition is allowed in CREATE TABLE DDL", + getLocation(options)); + } + List< KVQLParser.Regions_defContext> regions = options.regions_def(); + if (regions != null && regions.size() > 1) { + throw new QueryException( + "Only one regions definition is allowed in CREATE TABLE DDL", + getLocation(options)); + } + List< KVQLParser.Frozen_defContext> frozens = options.frozen_def(); + if (frozens != null && frozens.size() > 1) { + throw new QueryException( + "Only one frozen definition is allowed in CREATE TABLE DDL", + getLocation(options)); + } + if (frozens != null && !frozens.isEmpty()) { + frozen_def = options.frozen_def().get(0); + } + List< KVQLParser.Json_collection_defContext> jcs = + options.json_collection_def(); + if (jcs != null && jcs.size() > 1) { + throw new QueryException( + "Only one json collection definition is allowed " + + "in CREATE TABLE DDL", getLocation(options)); + } + List ebis = + options.enable_before_image(); + if (ebis != null && ebis.size() > 1) { + throw new QueryException( + "Only one enable before image definition is allowed " + + "in CREATE TABLE DDL", getLocation(options)); + } + } + /* * Do callback for name including parent path before requiring metadata * to allow the caller to allow, or not, child tables without a metadata @@ -5507,7 +5557,6 @@ public void enterCreate_table_statement( theQCB.getPrepareCallback().ifNotExistsFound(); } - Frozen_defContext frozen_def = ctx.table_options().frozen_def(); if (frozen_def != null) { boolean force = (frozen_def.FORCE() != null); theQCB.getPrepareCallback().freezeFound(); @@ -5878,30 +5927,44 @@ public void enterTtl_def(KVQLParser.Ttl_defContext ctx) { } @Override - public void enterRegions_def(KVQLParser.Regions_defContext ctx) { - final String[] regionNames = - makeIdArray(ctx.region_names().id_list().id()); + public void enterEnable_before_image(KVQLParser.Enable_before_imageContext ctx) { - PrepareCallback pc = theQCB.getPrepareCallback(); - if (pc != null) { - for (String regionName : regionNames) { - pc.regionName(regionName); - } + if (ctx.before_image_ttl() == null) { + theTableBuilder.setBeforeImageTTL(TableImpl.DEFAULT_BEFORE_IMAGE_TTL); } + } - /* - * If the RegionMapper is available add regions so that the - * table can be validated - */ - if (theTableBuilder.getRegionMapper() != null) { - for (String regionName : regionNames) { - theTableBuilder.addRegion(regionName); + @Override + public void enterBefore_image_ttl(KVQLParser.Before_image_ttlContext ctx) { + KVQLParser.DurationContext duration = ctx.duration(); + Location loc = getLocation(ctx); + try { + int ttl = Integer.parseInt(duration.INT().getText()); + if (ttl <= 0) { + throw new QueryException( + "Before image TTL value must be greater than 0", loc); } + theTableBuilder.setBeforeImageTTL( + TimeToLive.createTimeToLive( + ttl, + convertToTimeUnit(duration.time_unit()))); + } catch (NumberFormatException nfex) { + String msg = "Invalid TTL value: " + + duration.INT().getText() + + " in " + duration.INT().getText() + + " " + duration.time_unit().getText(); + throw new QueryException(msg, loc); + } catch (IllegalArgumentException iae) { + String msg = "Invalid TTL Unit: " + + convertToTimeUnit(duration.time_unit()) + + " in " + duration.INT().getText() + + " " + duration.time_unit().getText(); + throw new QueryException(msg, loc); } } @Override - public void enterAdd_region_def(KVQLParser.Add_region_defContext ctx) { + public void enterRegions_def(KVQLParser.Regions_defContext ctx) { final String[] regionNames = makeIdArray(ctx.region_names().id_list().id()); @@ -5923,51 +5986,43 @@ public void enterAdd_region_def(KVQLParser.Add_region_defContext ctx) { } } - @Override - public void enterDrop_region_def(KVQLParser.Drop_region_defContext ctx) { - final String[] regionNames = - makeIdArray(ctx.region_names().id_list().id()); - - PrepareCallback pc = theQCB.getPrepareCallback(); - if (pc != null) { - for (String regionName : regionNames) { - pc.regionName(regionName); - } - } - - /* - * If the prepare doesn't need to be completed, there may not be - * metadata available to map the regions, so don't add them. This case - * is a syntax check only, so if any regions don't exist the operation - * will fail later. - */ - if ((pc == null) || pc.prepareNeeded()) { - for (String regionName : regionNames) { - theTableBuilder.dropRegion(regionName); - } - } - } - /* - * alter_table_statement : ALTER TABLE table_name alter_field_statement ; + * alter_table_statement : ALTER TABLE table_name alter_def ; + * + * alter_def : alter_field_statements | + * ttl_def | + * add_region_def | drop_region_def | + * freeze_def | unfreeze_def | + * enable_before_image | disable_before_image ; + * + * freeze_def: FREEZE SCHEMA FORCE?; + * + * unfreeze_def: UNFREEZE SCHEMA ; + * + * enable_before_image : ENABLE BEFORE IMAGE ttl_def ; * - * alter_field_statement : - * LP - * (add_field_statement | drop_field_statement | modify_field_statement) - * (COMMA - * (add_field_statement | drop_field_statement | modify_field_statement))* - * RP ; + * disable_before_image : DISABLE BEFORE IMAGE ; + * + * add_region_def : ADD REGIONS region_names ; + * + * drop_region_def : DROP REGIONS region_names ; + * + * alter_field_statements : + * LP + * (add_field_statement | drop_field_statement | modify_field_statement) + * (COMMA + * (add_field_statement | drop_field_statement | modify_field_statement))* + * RP ; * * add_field_statement : - * ADD schema_path type_def (default_def | identity_def | - * mr_counter_def)? comment? ; + * ADD schema_path type_def (default_def | identity_def | mr_counter_def)? + * comment? ; * * drop_field_statement : DROP schema_path ; * * modify_field_statement : - * MODIFY schema_path ((type_def default_def? comment?) | - * identity_def | - DROP IDENTITY); + * MODIFY schema_path ((type_def default_def? comment?) | identity_def | + * DROP IDENTITY); * * schema_path : init_schema_path_step (DOT schema_path_step)*; * @@ -5982,14 +6037,16 @@ public void enterAlter_table_statement( String namespace = computeNamespace(ctx.table_name()); String[] pathName = getNamePath(ctx.table_name().table_id_path()); - if (pathName != null && pathName.length > 0 && ( - pathName[0].startsWith(SYS_PREFIX) || + if (pathName != null && pathName.length > 0 && + (pathName[0].startsWith(SYS_PREFIX) || pathName[pathName.length - 1].startsWith(SYS_PREFIX))) { throw new QueryException("Can not ALTER system table: " + ctx.table_name().table_id_path().getText(), getLocation(ctx.table_name().table_id_path())); } + Alter_defContext adf = ctx.alter_def(); + if (theQCB.getPrepareCallback() != null) { theQCB.getPrepareCallback().queryOperation(QueryOperation.ALTER_TABLE); theQCB.getPrepareCallback().namespaceName(namespace); @@ -6001,9 +6058,9 @@ public void enterAlter_table_statement( * Check for freeze and unfreeze in the context and make * callbacks as needed */ - Alter_defContext adf = ctx.alter_def(); Freeze_defContext fdf = adf.freeze_def(); Unfreeze_defContext udf = adf.unfreeze_def(); + if (fdf != null || udf != null) { if (fdf != null) { boolean force = (fdf.FORCE() != null); @@ -6032,9 +6089,19 @@ public void enterAlter_table_statement( } theTableBuilder = - TableEvolver.createTableEvolver(currentTable, - theMetadataHelper == null ? null : - theMetadataHelper.getRegionMapper()); + TableEvolver.createTableEvolver(currentTable, + (theMetadataHelper == null ? + null : + theMetadataHelper.getRegionMapper())); + if (adf.enable_before_image() != null) { + ((TableEvolver)theTableBuilder).setEnableBeforeImage(); + } + if (adf.disable_before_image() != null) { + ((TableEvolver)theTableBuilder).setDisableBeforeImage(); + } + if (adf.ttl_def() != null) { + ((TableEvolver)theTableBuilder).setUpdateTableTTL(); + } } @Override @@ -6101,6 +6168,54 @@ public void enterFrozen_def(KVQLParser.Frozen_defContext ctx) { } } + @Override + public void enterAdd_region_def(KVQLParser.Add_region_defContext ctx) { + final String[] regionNames = + makeIdArray(ctx.region_names().id_list().id()); + + PrepareCallback pc = theQCB.getPrepareCallback(); + if (pc != null) { + for (String regionName : regionNames) { + pc.regionName(regionName); + } + } + + /* + * If the RegionMapper is available add regions so that the + * table can be validated + */ + if (theTableBuilder.getRegionMapper() != null) { + for (String regionName : regionNames) { + theTableBuilder.addRegion(regionName); + } + } + } + + @Override + public void enterDrop_region_def(KVQLParser.Drop_region_defContext ctx) { + final String[] regionNames = + makeIdArray(ctx.region_names().id_list().id()); + + PrepareCallback pc = theQCB.getPrepareCallback(); + if (pc != null) { + for (String regionName : regionNames) { + pc.regionName(regionName); + } + } + + /* + * If the prepare doesn't need to be completed, there may not be + * metadata available to map the regions, so don't add them. This case + * is a syntax check only, so if any regions don't exist the operation + * will fail later. + */ + if ((pc == null) || pc.prepareNeeded()) { + for (String regionName : regionNames) { + theTableBuilder.dropRegion(regionName); + } + } + } + /* * add_field_statement : * ADD schema_path type_def (default_def | identity_def )? comment? ; @@ -6489,9 +6604,11 @@ private String[] getIndexFieldNames( names[i] = ""; } - /* if there is no type expression, just get the text. - * Otherwise we need to split out the type_expr. */ - if (field.path_type() == null) { + if (path.row_metadata() != null) { + names[i] += "row_metadata()."; + } + + if (path.old_index_path() != null) { names[i] += path.getText(); } else { if (path.multikey_path_prefix() != null) { @@ -6512,10 +6629,13 @@ private String[] getIndexFieldNames( names[i] += ("@" + functionArgs); } + //System.out.println("YYYY-0 " + names[i]); + /* * Check for old-stype syntax and handle it */ - if (path.name_path() == null) { + if (path.old_index_path() == null) { + //System.out.println("YYYY-1 " + names[i]); ++i; continue; } @@ -6544,6 +6664,7 @@ private String[] getIndexFieldNames( names[i] = sb.toString(); } + //System.out.println("YYYY-2 " + names[i]); ++i; } diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.g4 b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.g4 index b05423e6..5b6c1329 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.g4 +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.g4 @@ -515,7 +515,7 @@ set_local_region_statement : */ create_table_statement : CREATE TABLE (IF NOT EXISTS)? table_name comment? LP table_def RP - table_options ; + table_options? ; table_name : (namespace ':' )? table_id_path ; @@ -547,32 +547,29 @@ id_with_size : id storage_size? ; storage_size : LP INT RP ; -/* think about adding all ordering options for json collections */ -table_options : (ttl_def? regions_def? | regions_def? ttl_def? - | ttl_def? frozen_def? | frozen_def? ttl_def? - | regions_def? json_collection_def? ttl_def? - | regions_def? ttl_def? json_collection_def? - | json_collection_def? ttl_def? regions_def? - | ttl_def? json_collection_def? regions_def?) ; - -/* Default TTL */ +table_options : + (ttl_def | + regions_def | + frozen_def | + json_collection_def | + enable_before_image)+ ; ttl_def : USING TTL duration ; -/* Regions */ - region_names : id_list ; regions_def : IN REGIONS region_names ; -add_region_def : ADD REGIONS region_names ; - -drop_region_def : DROP REGIONS region_names ; - frozen_def : WITH SCHEMA FROZEN FORCE?; json_collection_def : AS JSON COLLECTION ; +enable_before_image : ENABLE BEFORE IMAGE before_image_ttl? ; + +before_image_ttl : USING TTL duration ; + +disable_before_image : DISABLE BEFORE IMAGE ; + /* * This is used for setting a field as identity field. */ @@ -603,12 +600,18 @@ uuid_def : alter_table_statement : ALTER TABLE table_name alter_def ; alter_def : alter_field_statements | ttl_def | - add_region_def | drop_region_def | freeze_def | unfreeze_def; + add_region_def | drop_region_def | + freeze_def | unfreeze_def | + enable_before_image | disable_before_image ; freeze_def: FREEZE SCHEMA FORCE?; unfreeze_def: UNFREEZE SCHEMA ; +add_region_def : ADD REGIONS region_names ; + +drop_region_def : DROP REGIONS region_names ; + /* * Table modification -- add, drop, modify fields in an existing table. * This definition allows multiple changes to be contained in a single @@ -672,11 +675,15 @@ index_function_args : (COMMA const_expr)+ ; * be checked for code that reproduces these constants. */ index_path : - name_path | - multikey_path_prefix multikey_path_suffix? | - ELEMENTOF LP name_path RP multikey_path_suffix? | - KEYOF LP name_path RP | - KEYS LP name_path RP ; + (row_metadata? (name_path | multikey_path_prefix multikey_path_suffix? )) | + old_index_path; + +old_index_path : + ELEMENTOF LP name_path RP multikey_path_suffix? | + KEYOF LP name_path RP | + KEYS LP name_path RP ; + +row_metadata : 'row_metadata().' ; multikey_path_prefix : field_name @@ -976,14 +983,15 @@ id_list : id (COMMA id)* ; id : (ACCOUNT | ADD | ADMIN | ALL | ALTER | ALWAYS| ANCESTORS | AND | ANY_T | ANYATOMIC_T | ANYJSONATOMIC_T | ANYRECORD_T | ARRAY_COLLECT | AS | ASC | - BETWEEN | BY | CACHE | CASE | CAST | COLLECTION | COMMENT | COUNT | + BEFORE | BETWEEN | BY | CACHE | CASE | CAST | COLLECTION | COMMENT | COUNT | CREATE | CYCLE | DAYS | DECLARE | DEFAULT | DELETE | DESC | DESCENDANTS | - DESCRIBE | DISTINCT | DROP | - ELEMENTOF | ELEMENTS | ELSE | END | ES_SHARDS | ES_REPLICAS | EXISTS | EXTRACT | + DESCRIBE | DISABLE | DISTINCT | DROP | + ELEMENTOF | ELEMENTS | ELSE | ENABLE | END | ES_SHARDS | ES_REPLICAS | + EXISTS | EXTRACT | FIELDS | FIRST | FREEZE | FROM | FROZEN | FULLTEXT | GENERATED | GRANT | GROUP | HOURS | - IDENTIFIED | IDENTITY | IF | INCREMENT | INDEX | INDEXES | INSERT | INTO | - IN | IS | JSON | KEY | KEYOF | KEYS | + IDENTIFIED | IDENTITY | IF | INCREMENT | IMAGE | INDEX | INDEXES | INSERT | + INTO | IN | IS | JSON | KEY | KEYOF | KEYS | LIFETIME | LAST | LIMIT | LOCAL | LOCK | MERGE | MINUTES | MODIFY | MR_COUNTER NAMESPACE | NAMESPACES | NESTED | NO | NOT | NULLS | OF | OFFSET | ON | OR | ORDER | OVERRIDE | @@ -1039,6 +1047,8 @@ ASC : [Aa][Ss][Cc]; ARRAY_COLLECT : 'array_collect' ; +BEFORE : [Bb][Ee][Ff][Oo][Rr][Ee] ; + BETWEEN : [Bb][Ee][Tt][Ww][Ee][Ee][Nn] ; BY : [Bb][Yy] ; @@ -1075,6 +1085,8 @@ DESCENDANTS : [Dd][Ee][Ss][Cc][Ee][Nn][Dd][Aa][Nn][Tt][Ss] ; DESCRIBE : [Dd][Ee][Ss][Cc][Rr][Ii][Bb][Ee] ; +DISABLE : [Dd][Ii][Ss][Aa][Bb][Ll][Ee] ; + DISTINCT : [Dd][Ii][Ss][Tt][Ii][Nn][Cc][Tt] ; DROP : [Dd][Rr][Oo][Pp] ; @@ -1085,6 +1097,8 @@ ELEMENTS : [Ee][Ll][Ee][Mm][Ee][Nn][Tt][Ss] ; ELSE : [Ee][Ll][Ss][Ee] ; +ENABLE : [Ee][Nn][Aa][Bb][Ll][Ee] ; + END : [Ee][Nn][Dd] ; ES_SHARDS : [Ee][Ss] UNDER [Ss][Hh][Aa][Rr][Dd][Ss] ; @@ -1127,6 +1141,8 @@ IDENTITY : [Ii][Dd][Ee][Nn][Tt][Ii][Tt][Yy] ; IF : [Ii][Ff] ; +IMAGE : [Ii][Mm][Aa][Gg][Ee] ; + IN : [Ii][Nn] ; INCREMENT : [Ii][Nn][Cc][Rr][Ee][Mm][Ee][Nn][Tt] ; diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.interp b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.interp index e7c4632d..2c290457 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.interp +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.interp @@ -3,6 +3,7 @@ null '/*+' '*/' '@' +'row_metadata().' null null null @@ -23,6 +24,7 @@ null null null null +null 'count' null null @@ -115,6 +117,9 @@ null null null null +null +null +null 'seq_transform' null null @@ -218,6 +223,7 @@ null null null null +null VARNAME ACCOUNT ADD @@ -230,6 +236,7 @@ AND AS ASC ARRAY_COLLECT +BEFORE BETWEEN BY CACHE @@ -248,11 +255,13 @@ DELETE DESC DESCENDANTS DESCRIBE +DISABLE DISTINCT DROP ELEMENTOF ELEMENTS ELSE +ENABLE END ES_SHARDS ES_REPLICAS @@ -274,6 +283,7 @@ HOURS IDENTIFIED IDENTITY IF +IMAGE IN INCREMENT INDEX @@ -572,10 +582,11 @@ table_options ttl_def region_names regions_def -add_region_def -drop_region_def frozen_def json_collection_def +enable_before_image +before_image_ttl +disable_before_image identity_def sequence_options mr_counter_def @@ -584,6 +595,8 @@ alter_table_statement alter_def freeze_def unfreeze_def +add_region_def +drop_region_def alter_field_statements add_field_statement drop_field_statement @@ -599,6 +612,8 @@ index_field index_function index_function_args index_path +old_index_path +row_metadata multikey_path_prefix multikey_path_suffix path_type @@ -653,4 +668,4 @@ id atn: -[4, 1, 212, 2571, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 2, 196, 7, 196, 2, 197, 7, 197, 2, 198, 7, 198, 2, 199, 7, 199, 2, 200, 7, 200, 2, 201, 7, 201, 2, 202, 7, 202, 2, 203, 7, 203, 2, 204, 7, 204, 2, 205, 7, 205, 2, 206, 7, 206, 2, 207, 7, 207, 2, 208, 7, 208, 2, 209, 7, 209, 2, 210, 7, 210, 2, 211, 7, 211, 2, 212, 7, 212, 2, 213, 7, 213, 2, 214, 7, 214, 2, 215, 7, 215, 2, 216, 7, 216, 2, 217, 7, 217, 2, 218, 7, 218, 2, 219, 7, 219, 2, 220, 7, 220, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 471, 8, 1, 1, 2, 3, 2, 474, 8, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 5, 3, 484, 8, 3, 10, 3, 12, 3, 487, 9, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 3, 7, 500, 8, 7, 1, 7, 3, 7, 503, 8, 7, 1, 7, 3, 7, 506, 8, 7, 1, 7, 3, 7, 509, 8, 7, 1, 7, 3, 7, 512, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 5, 8, 518, 8, 8, 10, 8, 12, 8, 521, 9, 8, 1, 8, 1, 8, 1, 8, 3, 8, 526, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 531, 8, 8, 5, 8, 533, 8, 8, 10, 8, 12, 8, 536, 9, 8, 1, 9, 1, 9, 1, 9, 3, 9, 541, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 552, 8, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 559, 8, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 5, 11, 566, 8, 11, 10, 11, 12, 11, 569, 9, 11, 1, 12, 1, 12, 1, 12, 5, 12, 574, 8, 12, 10, 12, 12, 12, 577, 9, 12, 1, 13, 1, 13, 1, 13, 5, 13, 582, 8, 13, 10, 13, 12, 13, 585, 9, 13, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 3, 15, 593, 8, 15, 1, 16, 1, 16, 3, 16, 597, 8, 16, 1, 16, 3, 16, 600, 8, 16, 1, 17, 1, 17, 3, 17, 604, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 610, 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 617, 8, 18, 1, 18, 1, 18, 5, 18, 621, 8, 18, 10, 18, 12, 18, 624, 9, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 21, 3, 21, 635, 8, 21, 1, 21, 3, 21, 638, 8, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 5, 21, 647, 8, 21, 10, 21, 12, 21, 650, 9, 21, 3, 21, 652, 8, 21, 1, 22, 1, 22, 5, 22, 656, 8, 22, 10, 22, 12, 22, 659, 9, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 5, 23, 667, 8, 23, 10, 23, 12, 23, 670, 9, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 3, 23, 690, 8, 23, 1, 23, 3, 23, 693, 8, 23, 1, 24, 1, 24, 3, 24, 697, 8, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 5, 25, 707, 8, 25, 10, 25, 12, 25, 710, 9, 25, 1, 26, 3, 26, 713, 8, 26, 1, 26, 1, 26, 3, 26, 717, 8, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 5, 27, 724, 8, 27, 10, 27, 12, 27, 727, 9, 27, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 741, 8, 30, 10, 30, 12, 30, 744, 9, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 5, 31, 752, 8, 31, 10, 31, 12, 31, 755, 9, 31, 1, 32, 3, 32, 758, 8, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 3, 33, 765, 8, 33, 1, 33, 3, 33, 768, 8, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 775, 8, 34, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 3, 36, 786, 8, 36, 1, 36, 1, 36, 3, 36, 790, 8, 36, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 800, 8, 38, 1, 39, 1, 39, 1, 39, 3, 39, 805, 8, 39, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 4, 40, 813, 8, 40, 11, 40, 12, 40, 814, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 1, 41, 5, 41, 823, 8, 41, 10, 41, 12, 41, 826, 9, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 1, 42, 5, 42, 834, 8, 42, 10, 42, 12, 42, 837, 9, 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 847, 8, 43, 11, 43, 12, 43, 848, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 858, 8, 44, 10, 44, 12, 44, 861, 9, 44, 1, 44, 1, 44, 3, 44, 865, 8, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 3, 46, 876, 8, 46, 1, 46, 1, 46, 3, 46, 880, 8, 46, 1, 46, 1, 46, 3, 46, 884, 8, 46, 1, 46, 1, 46, 1, 46, 3, 46, 889, 8, 46, 1, 46, 5, 46, 892, 8, 46, 10, 46, 12, 46, 895, 9, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 5, 47, 902, 8, 47, 10, 47, 12, 47, 905, 9, 47, 1, 48, 1, 48, 1, 48, 5, 48, 910, 8, 48, 10, 48, 12, 48, 913, 9, 48, 1, 49, 1, 49, 1, 49, 5, 49, 918, 8, 49, 10, 49, 12, 49, 921, 9, 49, 1, 50, 1, 50, 1, 50, 3, 50, 926, 8, 50, 1, 51, 1, 51, 1, 51, 5, 51, 931, 8, 51, 10, 51, 12, 51, 934, 9, 51, 1, 52, 1, 52, 1, 52, 3, 52, 939, 8, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 3, 53, 946, 8, 53, 1, 54, 1, 54, 1, 54, 3, 54, 951, 8, 54, 1, 54, 1, 54, 1, 55, 1, 55, 3, 55, 957, 8, 55, 1, 56, 1, 56, 3, 56, 961, 8, 56, 1, 56, 1, 56, 3, 56, 965, 8, 56, 1, 56, 1, 56, 1, 57, 1, 57, 3, 57, 971, 8, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 3, 58, 989, 8, 58, 1, 59, 1, 59, 1, 59, 1, 59, 3, 59, 995, 8, 59, 3, 59, 997, 8, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 3, 60, 1004, 8, 60, 1, 61, 1, 61, 1, 62, 1, 62, 3, 62, 1010, 8, 62, 1, 62, 1, 62, 5, 62, 1014, 8, 62, 10, 62, 12, 62, 1017, 9, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 5, 63, 1030, 8, 63, 10, 63, 12, 63, 1033, 9, 63, 1, 63, 1, 63, 1, 63, 1, 63, 3, 63, 1039, 8, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 66, 1, 66, 1, 66, 3, 66, 1053, 8, 66, 1, 66, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 5, 67, 1063, 8, 67, 10, 67, 12, 67, 1066, 9, 67, 3, 67, 1068, 8, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 5, 70, 1093, 8, 70, 10, 70, 12, 70, 1096, 9, 70, 1, 70, 1, 70, 3, 70, 1100, 8, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 74, 3, 74, 1123, 8, 74, 1, 74, 1, 74, 1, 74, 1, 74, 3, 74, 1129, 8, 74, 1, 74, 3, 74, 1132, 8, 74, 1, 74, 1, 74, 1, 74, 1, 74, 5, 74, 1138, 8, 74, 10, 74, 12, 74, 1141, 9, 74, 1, 74, 1, 74, 3, 74, 1145, 8, 74, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 5, 74, 1152, 8, 74, 10, 74, 12, 74, 1155, 9, 74, 1, 74, 1, 74, 1, 74, 1, 74, 3, 74, 1161, 8, 74, 1, 74, 3, 74, 1164, 8, 74, 1, 75, 1, 75, 3, 75, 1168, 8, 75, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 3, 77, 1175, 8, 77, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 3, 78, 1183, 8, 78, 1, 79, 3, 79, 1186, 8, 79, 1, 79, 1, 79, 1, 79, 3, 79, 1191, 8, 79, 1, 79, 3, 79, 1194, 8, 79, 1, 79, 1, 79, 1, 79, 5, 79, 1199, 8, 79, 10, 79, 12, 79, 1202, 9, 79, 1, 79, 1, 79, 1, 79, 3, 79, 1207, 8, 79, 1, 80, 1, 80, 1, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1217, 8, 81, 5, 81, 1219, 8, 81, 10, 81, 12, 81, 1222, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1229, 8, 81, 5, 81, 1231, 8, 81, 10, 81, 12, 81, 1234, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1241, 8, 81, 5, 81, 1243, 8, 81, 10, 81, 12, 81, 1246, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1253, 8, 81, 5, 81, 1255, 8, 81, 10, 81, 12, 81, 1258, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1266, 8, 81, 5, 81, 1268, 8, 81, 10, 81, 12, 81, 1271, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 5, 81, 1278, 8, 81, 10, 81, 12, 81, 1281, 9, 81, 3, 81, 1283, 8, 81, 1, 82, 1, 82, 1, 82, 1, 82, 1, 83, 3, 83, 1290, 8, 83, 1, 83, 1, 83, 3, 83, 1294, 8, 83, 1, 83, 3, 83, 1297, 8, 83, 1, 83, 3, 83, 1300, 8, 83, 1, 83, 1, 83, 1, 84, 3, 84, 1305, 8, 84, 1, 84, 1, 84, 3, 84, 1309, 8, 84, 1, 84, 1, 84, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 3, 87, 1324, 8, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 3, 88, 1332, 8, 88, 1, 89, 1, 89, 1, 90, 1, 90, 1, 91, 3, 91, 1339, 8, 91, 1, 91, 1, 91, 1, 91, 1, 91, 3, 91, 1345, 8, 91, 1, 91, 3, 91, 1348, 8, 91, 1, 91, 1, 91, 3, 91, 1352, 8, 91, 1, 91, 3, 91, 1355, 8, 91, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 3, 93, 1362, 8, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 3, 94, 1379, 8, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 5, 95, 1386, 8, 95, 10, 95, 12, 95, 1389, 9, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 3, 96, 1396, 8, 96, 1, 96, 3, 96, 1399, 8, 96, 1, 97, 1, 97, 3, 97, 1403, 8, 97, 1, 97, 1, 97, 3, 97, 1407, 8, 97, 3, 97, 1409, 8, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 3, 98, 1417, 8, 98, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 103, 1, 103, 1, 104, 1, 104, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 3, 106, 1450, 8, 106, 1, 107, 1, 107, 1, 108, 1, 108, 1, 108, 1, 108, 3, 108, 1458, 8, 108, 1, 109, 1, 109, 1, 109, 1, 109, 3, 109, 1464, 8, 109, 1, 110, 1, 110, 1, 111, 1, 111, 1, 112, 1, 112, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 5, 114, 1477, 8, 114, 10, 114, 12, 114, 1480, 9, 114, 1, 115, 1, 115, 1, 115, 5, 115, 1485, 8, 115, 10, 115, 12, 115, 1488, 9, 115, 1, 116, 3, 116, 1491, 8, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 5, 117, 1498, 8, 117, 10, 117, 12, 117, 1501, 9, 117, 1, 118, 1, 118, 3, 118, 1505, 8, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 3, 119, 1512, 8, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 3, 120, 1520, 8, 120, 1, 120, 1, 120, 3, 120, 1524, 8, 120, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 3, 125, 1546, 8, 125, 1, 125, 1, 125, 3, 125, 1550, 8, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 126, 1, 126, 1, 126, 3, 126, 1560, 8, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 3, 128, 1569, 8, 128, 1, 128, 1, 128, 1, 128, 1, 128, 3, 128, 1575, 8, 128, 5, 128, 1577, 8, 128, 10, 128, 12, 128, 1580, 9, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 3, 129, 1589, 8, 129, 1, 129, 3, 129, 1592, 8, 129, 1, 130, 1, 130, 1, 130, 1, 130, 5, 130, 1598, 8, 130, 10, 130, 12, 130, 1601, 9, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 133, 1, 133, 3, 133, 1614, 8, 133, 1, 133, 1, 133, 1, 133, 3, 133, 1619, 8, 133, 5, 133, 1621, 8, 133, 10, 133, 12, 133, 1624, 9, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 134, 3, 134, 1631, 8, 134, 3, 134, 1633, 8, 134, 1, 134, 3, 134, 1636, 8, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 3, 135, 1649, 8, 135, 1, 136, 1, 136, 1, 136, 5, 136, 1654, 8, 136, 10, 136, 12, 136, 1657, 9, 136, 1, 137, 1, 137, 3, 137, 1661, 8, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 3, 139, 1668, 8, 139, 1, 139, 3, 139, 1671, 8, 139, 1, 139, 3, 139, 1674, 8, 139, 1, 139, 3, 139, 1677, 8, 139, 1, 139, 3, 139, 1680, 8, 139, 1, 139, 3, 139, 1683, 8, 139, 1, 139, 3, 139, 1686, 8, 139, 1, 139, 3, 139, 1689, 8, 139, 1, 139, 3, 139, 1692, 8, 139, 1, 139, 3, 139, 1695, 8, 139, 1, 139, 3, 139, 1698, 8, 139, 1, 139, 3, 139, 1701, 8, 139, 1, 139, 3, 139, 1704, 8, 139, 1, 139, 3, 139, 1707, 8, 139, 1, 139, 3, 139, 1710, 8, 139, 1, 139, 3, 139, 1713, 8, 139, 1, 139, 3, 139, 1716, 8, 139, 1, 139, 3, 139, 1719, 8, 139, 1, 139, 3, 139, 1722, 8, 139, 1, 139, 3, 139, 1725, 8, 139, 3, 139, 1727, 8, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 3, 145, 1751, 8, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 3, 147, 1763, 8, 147, 3, 147, 1765, 8, 147, 1, 147, 1, 147, 1, 147, 1, 147, 4, 147, 1771, 8, 147, 11, 147, 12, 147, 1772, 1, 147, 1, 147, 3, 147, 1777, 8, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 3, 148, 1800, 8, 148, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 3, 150, 1810, 8, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 3, 152, 1823, 8, 152, 1, 153, 1, 153, 1, 153, 3, 153, 1828, 8, 153, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 3, 155, 1837, 8, 155, 1, 155, 1, 155, 1, 155, 1, 155, 3, 155, 1843, 8, 155, 5, 155, 1845, 8, 155, 10, 155, 12, 155, 1848, 9, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 3, 156, 1860, 8, 156, 1, 156, 3, 156, 1863, 8, 156, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 3, 158, 1872, 8, 158, 1, 158, 3, 158, 1875, 8, 158, 1, 158, 1, 158, 1, 158, 1, 158, 3, 158, 1881, 8, 158, 1, 159, 1, 159, 1, 159, 5, 159, 1886, 8, 159, 10, 159, 12, 159, 1889, 9, 159, 1, 160, 1, 160, 1, 160, 5, 160, 1894, 8, 160, 10, 160, 12, 160, 1897, 9, 160, 1, 161, 1, 161, 1, 161, 5, 161, 1902, 8, 161, 10, 161, 12, 161, 1905, 9, 161, 1, 161, 1, 161, 1, 161, 3, 161, 1910, 8, 161, 1, 162, 1, 162, 1, 162, 1, 162, 3, 162, 1916, 8, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 3, 163, 1925, 8, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 3, 163, 1935, 8, 163, 1, 163, 3, 163, 1938, 8, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 3, 163, 1945, 8, 163, 1, 163, 1, 163, 1, 163, 1, 163, 3, 163, 1951, 8, 163, 1, 163, 3, 163, 1954, 8, 163, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 5, 165, 1961, 8, 165, 10, 165, 12, 165, 1964, 9, 165, 1, 166, 1, 166, 3, 166, 1968, 8, 166, 1, 166, 3, 166, 1971, 8, 166, 1, 167, 1, 167, 1, 167, 3, 167, 1976, 8, 167, 1, 167, 3, 167, 1979, 8, 167, 1, 167, 3, 167, 1982, 8, 167, 1, 167, 1, 167, 1, 168, 1, 168, 4, 168, 1988, 8, 168, 11, 168, 12, 168, 1989, 1, 169, 1, 169, 1, 169, 3, 169, 1995, 8, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 3, 169, 2002, 8, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 3, 169, 2014, 8, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 5, 170, 2025, 8, 170, 10, 170, 12, 170, 2028, 9, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 3, 170, 2040, 8, 170, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 3, 172, 2055, 8, 172, 1, 172, 3, 172, 2058, 8, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 3, 173, 2066, 8, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 3, 173, 2073, 8, 173, 1, 173, 3, 173, 2076, 8, 173, 1, 173, 3, 173, 2079, 8, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, 174, 1, 174, 1, 174, 1, 174, 3, 174, 2089, 8, 174, 1, 175, 1, 175, 1, 175, 5, 175, 2094, 8, 175, 10, 175, 12, 175, 2097, 9, 175, 1, 176, 1, 176, 3, 176, 2101, 8, 176, 1, 177, 1, 177, 5, 177, 2105, 8, 177, 10, 177, 12, 177, 2108, 9, 177, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 3, 178, 2116, 8, 178, 1, 179, 1, 179, 1, 179, 1, 179, 3, 179, 2122, 8, 179, 1, 179, 1, 179, 1, 179, 1, 179, 3, 179, 2128, 8, 179, 1, 180, 1, 180, 1, 180, 3, 180, 2133, 8, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 3, 180, 2145, 8, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 3, 180, 2152, 8, 180, 1, 181, 1, 181, 1, 181, 5, 181, 2157, 8, 181, 10, 181, 12, 181, 2160, 9, 181, 1, 182, 1, 182, 1, 182, 3, 182, 2165, 8, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 3, 182, 2181, 8, 182, 1, 183, 1, 183, 1, 183, 1, 183, 3, 183, 2187, 8, 183, 1, 183, 3, 183, 2190, 8, 183, 1, 184, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 3, 185, 2200, 8, 185, 1, 185, 3, 185, 2203, 8, 185, 1, 185, 3, 185, 2206, 8, 185, 1, 185, 3, 185, 2209, 8, 185, 1, 185, 3, 185, 2212, 8, 185, 1, 186, 1, 186, 1, 186, 1, 186, 3, 186, 2218, 8, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 3, 188, 2228, 8, 188, 1, 189, 1, 189, 1, 189, 1, 189, 3, 189, 2234, 8, 189, 1, 190, 1, 190, 3, 190, 2238, 8, 190, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 192, 3, 192, 2246, 8, 192, 1, 192, 3, 192, 2249, 8, 192, 1, 192, 1, 192, 1, 192, 3, 192, 2254, 8, 192, 1, 193, 1, 193, 1, 193, 1, 194, 1, 194, 1, 194, 1, 194, 1, 195, 1, 195, 3, 195, 2265, 8, 195, 1, 196, 1, 196, 1, 196, 1, 197, 1, 197, 1, 197, 1, 197, 1, 198, 1, 198, 1, 198, 1, 198, 1, 199, 1, 199, 1, 199, 1, 199, 1, 199, 3, 199, 2283, 8, 199, 1, 199, 1, 199, 1, 199, 1, 200, 1, 200, 1, 200, 1, 200, 1, 201, 1, 201, 1, 201, 1, 201, 1, 202, 1, 202, 1, 202, 1, 202, 1, 202, 3, 202, 2301, 8, 202, 1, 202, 1, 202, 1, 202, 1, 203, 1, 203, 1, 203, 1, 203, 3, 203, 2310, 8, 203, 1, 204, 1, 204, 1, 204, 5, 204, 2315, 8, 204, 10, 204, 12, 204, 2318, 9, 204, 1, 205, 1, 205, 3, 205, 2322, 8, 205, 1, 206, 1, 206, 3, 206, 2326, 8, 206, 1, 206, 1, 206, 1, 206, 3, 206, 2331, 8, 206, 5, 206, 2333, 8, 206, 10, 206, 12, 206, 2336, 9, 206, 1, 207, 1, 207, 1, 208, 1, 208, 1, 208, 1, 208, 1, 208, 1, 208, 1, 208, 3, 208, 2347, 8, 208, 1, 209, 1, 209, 1, 209, 1, 209, 5, 209, 2353, 8, 209, 10, 209, 12, 209, 2356, 9, 209, 1, 209, 1, 209, 1, 209, 1, 209, 3, 209, 2362, 8, 209, 1, 210, 1, 210, 1, 210, 1, 210, 5, 210, 2368, 8, 210, 10, 210, 12, 210, 2371, 9, 210, 1, 210, 1, 210, 1, 210, 1, 210, 3, 210, 2377, 8, 210, 1, 211, 1, 211, 1, 211, 1, 211, 1, 212, 1, 212, 1, 212, 1, 212, 1, 212, 1, 212, 1, 212, 3, 212, 2390, 8, 212, 1, 213, 1, 213, 1, 213, 1, 214, 1, 214, 1, 214, 1, 215, 1, 215, 1, 216, 3, 216, 2401, 8, 216, 1, 216, 1, 216, 1, 217, 3, 217, 2406, 8, 217, 1, 217, 1, 217, 1, 218, 1, 218, 1, 219, 1, 219, 1, 219, 5, 219, 2415, 8, 219, 10, 219, 12, 219, 2418, 9, 219, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 3, 220, 2565, 8, 220, 1, 220, 1, 220, 3, 220, 2569, 8, 220, 1, 220, 0, 2, 60, 62, 221, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 0, 18, 2, 0, 14, 14, 31, 31, 2, 0, 45, 45, 72, 72, 1, 0, 180, 185, 1, 0, 192, 193, 2, 0, 176, 176, 194, 195, 2, 0, 71, 71, 134, 134, 2, 0, 4, 4, 178, 179, 2, 0, 64, 64, 130, 130, 2, 0, 27, 27, 56, 56, 3, 0, 176, 176, 179, 179, 192, 192, 1, 0, 154, 155, 3, 0, 150, 150, 152, 152, 157, 157, 2, 0, 154, 155, 157, 157, 2, 0, 31, 31, 33, 33, 2, 0, 77, 77, 128, 128, 4, 0, 27, 27, 56, 56, 80, 80, 114, 114, 1, 0, 200, 202, 1, 0, 203, 204, 2908, 0, 442, 1, 0, 0, 0, 2, 470, 1, 0, 0, 0, 4, 473, 1, 0, 0, 0, 6, 477, 1, 0, 0, 0, 8, 488, 1, 0, 0, 0, 10, 491, 1, 0, 0, 0, 12, 494, 1, 0, 0, 0, 14, 496, 1, 0, 0, 0, 16, 513, 1, 0, 0, 0, 18, 540, 1, 0, 0, 0, 20, 542, 1, 0, 0, 0, 22, 562, 1, 0, 0, 0, 24, 570, 1, 0, 0, 0, 26, 578, 1, 0, 0, 0, 28, 586, 1, 0, 0, 0, 30, 589, 1, 0, 0, 0, 32, 594, 1, 0, 0, 0, 34, 603, 1, 0, 0, 0, 36, 605, 1, 0, 0, 0, 38, 627, 1, 0, 0, 0, 40, 630, 1, 0, 0, 0, 42, 634, 1, 0, 0, 0, 44, 653, 1, 0, 0, 0, 46, 689, 1, 0, 0, 0, 48, 696, 1, 0, 0, 0, 50, 698, 1, 0, 0, 0, 52, 712, 1, 0, 0, 0, 54, 718, 1, 0, 0, 0, 56, 728, 1, 0, 0, 0, 58, 731, 1, 0, 0, 0, 60, 734, 1, 0, 0, 0, 62, 745, 1, 0, 0, 0, 64, 757, 1, 0, 0, 0, 66, 761, 1, 0, 0, 0, 68, 774, 1, 0, 0, 0, 70, 776, 1, 0, 0, 0, 72, 782, 1, 0, 0, 0, 74, 791, 1, 0, 0, 0, 76, 799, 1, 0, 0, 0, 78, 804, 1, 0, 0, 0, 80, 806, 1, 0, 0, 0, 82, 818, 1, 0, 0, 0, 84, 829, 1, 0, 0, 0, 86, 840, 1, 0, 0, 0, 88, 864, 1, 0, 0, 0, 90, 869, 1, 0, 0, 0, 92, 872, 1, 0, 0, 0, 94, 898, 1, 0, 0, 0, 96, 906, 1, 0, 0, 0, 98, 914, 1, 0, 0, 0, 100, 925, 1, 0, 0, 0, 102, 927, 1, 0, 0, 0, 104, 935, 1, 0, 0, 0, 106, 945, 1, 0, 0, 0, 108, 947, 1, 0, 0, 0, 110, 956, 1, 0, 0, 0, 112, 958, 1, 0, 0, 0, 114, 968, 1, 0, 0, 0, 116, 988, 1, 0, 0, 0, 118, 990, 1, 0, 0, 0, 120, 1003, 1, 0, 0, 0, 122, 1005, 1, 0, 0, 0, 124, 1007, 1, 0, 0, 0, 126, 1038, 1, 0, 0, 0, 128, 1040, 1, 0, 0, 0, 130, 1047, 1, 0, 0, 0, 132, 1049, 1, 0, 0, 0, 134, 1057, 1, 0, 0, 0, 136, 1071, 1, 0, 0, 0, 138, 1076, 1, 0, 0, 0, 140, 1082, 1, 0, 0, 0, 142, 1103, 1, 0, 0, 0, 144, 1110, 1, 0, 0, 0, 146, 1114, 1, 0, 0, 0, 148, 1122, 1, 0, 0, 0, 150, 1167, 1, 0, 0, 0, 152, 1169, 1, 0, 0, 0, 154, 1174, 1, 0, 0, 0, 156, 1182, 1, 0, 0, 0, 158, 1185, 1, 0, 0, 0, 160, 1208, 1, 0, 0, 0, 162, 1282, 1, 0, 0, 0, 164, 1284, 1, 0, 0, 0, 166, 1289, 1, 0, 0, 0, 168, 1304, 1, 0, 0, 0, 170, 1312, 1, 0, 0, 0, 172, 1314, 1, 0, 0, 0, 174, 1323, 1, 0, 0, 0, 176, 1331, 1, 0, 0, 0, 178, 1333, 1, 0, 0, 0, 180, 1335, 1, 0, 0, 0, 182, 1338, 1, 0, 0, 0, 184, 1356, 1, 0, 0, 0, 186, 1359, 1, 0, 0, 0, 188, 1378, 1, 0, 0, 0, 190, 1380, 1, 0, 0, 0, 192, 1392, 1, 0, 0, 0, 194, 1408, 1, 0, 0, 0, 196, 1410, 1, 0, 0, 0, 198, 1418, 1, 0, 0, 0, 200, 1421, 1, 0, 0, 0, 202, 1426, 1, 0, 0, 0, 204, 1431, 1, 0, 0, 0, 206, 1433, 1, 0, 0, 0, 208, 1435, 1, 0, 0, 0, 210, 1437, 1, 0, 0, 0, 212, 1449, 1, 0, 0, 0, 214, 1451, 1, 0, 0, 0, 216, 1453, 1, 0, 0, 0, 218, 1459, 1, 0, 0, 0, 220, 1465, 1, 0, 0, 0, 222, 1467, 1, 0, 0, 0, 224, 1469, 1, 0, 0, 0, 226, 1471, 1, 0, 0, 0, 228, 1473, 1, 0, 0, 0, 230, 1481, 1, 0, 0, 0, 232, 1490, 1, 0, 0, 0, 234, 1494, 1, 0, 0, 0, 236, 1504, 1, 0, 0, 0, 238, 1506, 1, 0, 0, 0, 240, 1515, 1, 0, 0, 0, 242, 1525, 1, 0, 0, 0, 244, 1527, 1, 0, 0, 0, 246, 1531, 1, 0, 0, 0, 248, 1535, 1, 0, 0, 0, 250, 1540, 1, 0, 0, 0, 252, 1559, 1, 0, 0, 0, 254, 1563, 1, 0, 0, 0, 256, 1568, 1, 0, 0, 0, 258, 1581, 1, 0, 0, 0, 260, 1593, 1, 0, 0, 0, 262, 1604, 1, 0, 0, 0, 264, 1609, 1, 0, 0, 0, 266, 1613, 1, 0, 0, 0, 268, 1625, 1, 0, 0, 0, 270, 1648, 1, 0, 0, 0, 272, 1650, 1, 0, 0, 0, 274, 1658, 1, 0, 0, 0, 276, 1662, 1, 0, 0, 0, 278, 1726, 1, 0, 0, 0, 280, 1728, 1, 0, 0, 0, 282, 1732, 1, 0, 0, 0, 284, 1734, 1, 0, 0, 0, 286, 1738, 1, 0, 0, 0, 288, 1742, 1, 0, 0, 0, 290, 1746, 1, 0, 0, 0, 292, 1752, 1, 0, 0, 0, 294, 1756, 1, 0, 0, 0, 296, 1799, 1, 0, 0, 0, 298, 1801, 1, 0, 0, 0, 300, 1804, 1, 0, 0, 0, 302, 1811, 1, 0, 0, 0, 304, 1822, 1, 0, 0, 0, 306, 1824, 1, 0, 0, 0, 308, 1829, 1, 0, 0, 0, 310, 1832, 1, 0, 0, 0, 312, 1851, 1, 0, 0, 0, 314, 1864, 1, 0, 0, 0, 316, 1867, 1, 0, 0, 0, 318, 1882, 1, 0, 0, 0, 320, 1890, 1, 0, 0, 0, 322, 1909, 1, 0, 0, 0, 324, 1911, 1, 0, 0, 0, 326, 1919, 1, 0, 0, 0, 328, 1955, 1, 0, 0, 0, 330, 1957, 1, 0, 0, 0, 332, 1970, 1, 0, 0, 0, 334, 1972, 1, 0, 0, 0, 336, 1987, 1, 0, 0, 0, 338, 2013, 1, 0, 0, 0, 340, 2015, 1, 0, 0, 0, 342, 2041, 1, 0, 0, 0, 344, 2044, 1, 0, 0, 0, 346, 2059, 1, 0, 0, 0, 348, 2088, 1, 0, 0, 0, 350, 2090, 1, 0, 0, 0, 352, 2098, 1, 0, 0, 0, 354, 2102, 1, 0, 0, 0, 356, 2115, 1, 0, 0, 0, 358, 2117, 1, 0, 0, 0, 360, 2129, 1, 0, 0, 0, 362, 2153, 1, 0, 0, 0, 364, 2161, 1, 0, 0, 0, 366, 2182, 1, 0, 0, 0, 368, 2191, 1, 0, 0, 0, 370, 2195, 1, 0, 0, 0, 372, 2213, 1, 0, 0, 0, 374, 2219, 1, 0, 0, 0, 376, 2223, 1, 0, 0, 0, 378, 2229, 1, 0, 0, 0, 380, 2237, 1, 0, 0, 0, 382, 2239, 1, 0, 0, 0, 384, 2253, 1, 0, 0, 0, 386, 2255, 1, 0, 0, 0, 388, 2258, 1, 0, 0, 0, 390, 2262, 1, 0, 0, 0, 392, 2266, 1, 0, 0, 0, 394, 2269, 1, 0, 0, 0, 396, 2273, 1, 0, 0, 0, 398, 2277, 1, 0, 0, 0, 400, 2287, 1, 0, 0, 0, 402, 2291, 1, 0, 0, 0, 404, 2295, 1, 0, 0, 0, 406, 2309, 1, 0, 0, 0, 408, 2311, 1, 0, 0, 0, 410, 2321, 1, 0, 0, 0, 412, 2325, 1, 0, 0, 0, 414, 2337, 1, 0, 0, 0, 416, 2346, 1, 0, 0, 0, 418, 2361, 1, 0, 0, 0, 420, 2376, 1, 0, 0, 0, 422, 2378, 1, 0, 0, 0, 424, 2389, 1, 0, 0, 0, 426, 2391, 1, 0, 0, 0, 428, 2394, 1, 0, 0, 0, 430, 2397, 1, 0, 0, 0, 432, 2400, 1, 0, 0, 0, 434, 2405, 1, 0, 0, 0, 436, 2409, 1, 0, 0, 0, 438, 2411, 1, 0, 0, 0, 440, 2568, 1, 0, 0, 0, 442, 443, 3, 2, 1, 0, 443, 444, 5, 0, 0, 1, 444, 1, 1, 0, 0, 0, 445, 471, 3, 4, 2, 0, 446, 471, 3, 148, 74, 0, 447, 471, 3, 158, 79, 0, 448, 471, 3, 182, 91, 0, 449, 471, 3, 250, 125, 0, 450, 471, 3, 326, 163, 0, 451, 471, 3, 366, 183, 0, 452, 471, 3, 368, 184, 0, 453, 471, 3, 238, 119, 0, 454, 471, 3, 244, 122, 0, 455, 471, 3, 358, 179, 0, 456, 471, 3, 240, 120, 0, 457, 471, 3, 246, 123, 0, 458, 471, 3, 346, 173, 0, 459, 471, 3, 374, 187, 0, 460, 471, 3, 372, 186, 0, 461, 471, 3, 302, 151, 0, 462, 471, 3, 370, 185, 0, 463, 471, 3, 324, 162, 0, 464, 471, 3, 376, 188, 0, 465, 471, 3, 378, 189, 0, 466, 471, 3, 360, 180, 0, 467, 471, 3, 248, 124, 0, 468, 471, 3, 364, 182, 0, 469, 471, 3, 10, 5, 0, 470, 445, 1, 0, 0, 0, 470, 446, 1, 0, 0, 0, 470, 447, 1, 0, 0, 0, 470, 448, 1, 0, 0, 0, 470, 449, 1, 0, 0, 0, 470, 450, 1, 0, 0, 0, 470, 451, 1, 0, 0, 0, 470, 452, 1, 0, 0, 0, 470, 453, 1, 0, 0, 0, 470, 454, 1, 0, 0, 0, 470, 455, 1, 0, 0, 0, 470, 456, 1, 0, 0, 0, 470, 457, 1, 0, 0, 0, 470, 458, 1, 0, 0, 0, 470, 459, 1, 0, 0, 0, 470, 460, 1, 0, 0, 0, 470, 461, 1, 0, 0, 0, 470, 462, 1, 0, 0, 0, 470, 463, 1, 0, 0, 0, 470, 464, 1, 0, 0, 0, 470, 465, 1, 0, 0, 0, 470, 466, 1, 0, 0, 0, 470, 467, 1, 0, 0, 0, 470, 468, 1, 0, 0, 0, 470, 469, 1, 0, 0, 0, 471, 3, 1, 0, 0, 0, 472, 474, 3, 6, 3, 0, 473, 472, 1, 0, 0, 0, 473, 474, 1, 0, 0, 0, 474, 475, 1, 0, 0, 0, 475, 476, 3, 14, 7, 0, 476, 5, 1, 0, 0, 0, 477, 478, 5, 28, 0, 0, 478, 479, 3, 8, 4, 0, 479, 485, 5, 167, 0, 0, 480, 481, 3, 8, 4, 0, 481, 482, 5, 167, 0, 0, 482, 484, 1, 0, 0, 0, 483, 480, 1, 0, 0, 0, 484, 487, 1, 0, 0, 0, 485, 483, 1, 0, 0, 0, 485, 486, 1, 0, 0, 0, 486, 7, 1, 0, 0, 0, 487, 485, 1, 0, 0, 0, 488, 489, 5, 4, 0, 0, 489, 490, 3, 188, 94, 0, 490, 9, 1, 0, 0, 0, 491, 492, 3, 6, 3, 0, 492, 493, 3, 134, 67, 0, 493, 11, 1, 0, 0, 0, 494, 495, 3, 60, 30, 0, 495, 13, 1, 0, 0, 0, 496, 497, 3, 40, 20, 0, 497, 499, 3, 16, 8, 0, 498, 500, 3, 38, 19, 0, 499, 498, 1, 0, 0, 0, 499, 500, 1, 0, 0, 0, 500, 502, 1, 0, 0, 0, 501, 503, 3, 54, 27, 0, 502, 501, 1, 0, 0, 0, 502, 503, 1, 0, 0, 0, 503, 505, 1, 0, 0, 0, 504, 506, 3, 50, 25, 0, 505, 504, 1, 0, 0, 0, 505, 506, 1, 0, 0, 0, 506, 508, 1, 0, 0, 0, 507, 509, 3, 56, 28, 0, 508, 507, 1, 0, 0, 0, 508, 509, 1, 0, 0, 0, 509, 511, 1, 0, 0, 0, 510, 512, 3, 58, 29, 0, 511, 510, 1, 0, 0, 0, 511, 512, 1, 0, 0, 0, 512, 15, 1, 0, 0, 0, 513, 514, 5, 50, 0, 0, 514, 519, 3, 18, 9, 0, 515, 516, 5, 168, 0, 0, 516, 518, 3, 18, 9, 0, 517, 515, 1, 0, 0, 0, 518, 521, 1, 0, 0, 0, 519, 517, 1, 0, 0, 0, 519, 520, 1, 0, 0, 0, 520, 534, 1, 0, 0, 0, 521, 519, 1, 0, 0, 0, 522, 530, 5, 168, 0, 0, 523, 525, 3, 12, 6, 0, 524, 526, 5, 13, 0, 0, 525, 524, 1, 0, 0, 0, 525, 526, 1, 0, 0, 0, 526, 527, 1, 0, 0, 0, 527, 528, 5, 4, 0, 0, 528, 531, 1, 0, 0, 0, 529, 531, 3, 36, 18, 0, 530, 523, 1, 0, 0, 0, 530, 529, 1, 0, 0, 0, 531, 533, 1, 0, 0, 0, 532, 522, 1, 0, 0, 0, 533, 536, 1, 0, 0, 0, 534, 532, 1, 0, 0, 0, 534, 535, 1, 0, 0, 0, 535, 17, 1, 0, 0, 0, 536, 534, 1, 0, 0, 0, 537, 541, 3, 30, 15, 0, 538, 541, 3, 20, 10, 0, 539, 541, 3, 26, 13, 0, 540, 537, 1, 0, 0, 0, 540, 538, 1, 0, 0, 0, 540, 539, 1, 0, 0, 0, 541, 19, 1, 0, 0, 0, 542, 543, 5, 86, 0, 0, 543, 544, 5, 122, 0, 0, 544, 545, 5, 170, 0, 0, 545, 551, 3, 30, 15, 0, 546, 547, 5, 11, 0, 0, 547, 548, 5, 170, 0, 0, 548, 549, 3, 22, 11, 0, 549, 550, 5, 171, 0, 0, 550, 552, 1, 0, 0, 0, 551, 546, 1, 0, 0, 0, 551, 552, 1, 0, 0, 0, 552, 558, 1, 0, 0, 0, 553, 554, 5, 32, 0, 0, 554, 555, 5, 170, 0, 0, 555, 556, 3, 24, 12, 0, 556, 557, 5, 171, 0, 0, 557, 559, 1, 0, 0, 0, 558, 553, 1, 0, 0, 0, 558, 559, 1, 0, 0, 0, 559, 560, 1, 0, 0, 0, 560, 561, 5, 171, 0, 0, 561, 21, 1, 0, 0, 0, 562, 567, 3, 30, 15, 0, 563, 564, 5, 168, 0, 0, 564, 566, 3, 30, 15, 0, 565, 563, 1, 0, 0, 0, 566, 569, 1, 0, 0, 0, 567, 565, 1, 0, 0, 0, 567, 568, 1, 0, 0, 0, 568, 23, 1, 0, 0, 0, 569, 567, 1, 0, 0, 0, 570, 575, 3, 30, 15, 0, 571, 572, 5, 168, 0, 0, 572, 574, 3, 30, 15, 0, 573, 571, 1, 0, 0, 0, 574, 577, 1, 0, 0, 0, 575, 573, 1, 0, 0, 0, 575, 576, 1, 0, 0, 0, 576, 25, 1, 0, 0, 0, 577, 575, 1, 0, 0, 0, 578, 579, 3, 30, 15, 0, 579, 583, 3, 28, 14, 0, 580, 582, 3, 28, 14, 0, 581, 580, 1, 0, 0, 0, 582, 585, 1, 0, 0, 0, 583, 581, 1, 0, 0, 0, 583, 584, 1, 0, 0, 0, 584, 27, 1, 0, 0, 0, 585, 583, 1, 0, 0, 0, 586, 587, 5, 146, 0, 0, 587, 588, 3, 30, 15, 0, 588, 29, 1, 0, 0, 0, 589, 592, 3, 32, 16, 0, 590, 591, 5, 92, 0, 0, 591, 593, 3, 60, 30, 0, 592, 590, 1, 0, 0, 0, 592, 593, 1, 0, 0, 0, 593, 31, 1, 0, 0, 0, 594, 599, 3, 252, 126, 0, 595, 597, 5, 13, 0, 0, 596, 595, 1, 0, 0, 0, 596, 597, 1, 0, 0, 0, 597, 598, 1, 0, 0, 0, 598, 600, 3, 34, 17, 0, 599, 596, 1, 0, 0, 0, 599, 600, 1, 0, 0, 0, 600, 33, 1, 0, 0, 0, 601, 604, 5, 4, 0, 0, 602, 604, 3, 440, 220, 0, 603, 601, 1, 0, 0, 0, 603, 602, 1, 0, 0, 0, 604, 35, 1, 0, 0, 0, 605, 606, 5, 139, 0, 0, 606, 607, 5, 170, 0, 0, 607, 609, 3, 102, 51, 0, 608, 610, 5, 13, 0, 0, 609, 608, 1, 0, 0, 0, 609, 610, 1, 0, 0, 0, 610, 611, 1, 0, 0, 0, 611, 612, 5, 4, 0, 0, 612, 622, 1, 0, 0, 0, 613, 614, 5, 168, 0, 0, 614, 616, 3, 102, 51, 0, 615, 617, 5, 13, 0, 0, 616, 615, 1, 0, 0, 0, 616, 617, 1, 0, 0, 0, 617, 618, 1, 0, 0, 0, 618, 619, 5, 4, 0, 0, 619, 621, 1, 0, 0, 0, 620, 613, 1, 0, 0, 0, 621, 624, 1, 0, 0, 0, 622, 620, 1, 0, 0, 0, 622, 623, 1, 0, 0, 0, 623, 625, 1, 0, 0, 0, 624, 622, 1, 0, 0, 0, 625, 626, 5, 171, 0, 0, 626, 37, 1, 0, 0, 0, 627, 628, 5, 136, 0, 0, 628, 629, 3, 12, 6, 0, 629, 39, 1, 0, 0, 0, 630, 631, 5, 115, 0, 0, 631, 632, 3, 42, 21, 0, 632, 41, 1, 0, 0, 0, 633, 635, 3, 44, 22, 0, 634, 633, 1, 0, 0, 0, 634, 635, 1, 0, 0, 0, 635, 637, 1, 0, 0, 0, 636, 638, 5, 34, 0, 0, 637, 636, 1, 0, 0, 0, 637, 638, 1, 0, 0, 0, 638, 651, 1, 0, 0, 0, 639, 652, 5, 176, 0, 0, 640, 641, 3, 12, 6, 0, 641, 648, 3, 48, 24, 0, 642, 643, 5, 168, 0, 0, 643, 644, 3, 12, 6, 0, 644, 645, 3, 48, 24, 0, 645, 647, 1, 0, 0, 0, 646, 642, 1, 0, 0, 0, 647, 650, 1, 0, 0, 0, 648, 646, 1, 0, 0, 0, 648, 649, 1, 0, 0, 0, 649, 652, 1, 0, 0, 0, 650, 648, 1, 0, 0, 0, 651, 639, 1, 0, 0, 0, 651, 640, 1, 0, 0, 0, 652, 43, 1, 0, 0, 0, 653, 657, 5, 1, 0, 0, 654, 656, 3, 46, 23, 0, 655, 654, 1, 0, 0, 0, 656, 659, 1, 0, 0, 0, 657, 655, 1, 0, 0, 0, 657, 658, 1, 0, 0, 0, 658, 660, 1, 0, 0, 0, 659, 657, 1, 0, 0, 0, 660, 661, 5, 2, 0, 0, 661, 45, 1, 0, 0, 0, 662, 663, 5, 101, 0, 0, 663, 664, 5, 170, 0, 0, 664, 668, 3, 252, 126, 0, 665, 667, 3, 328, 164, 0, 666, 665, 1, 0, 0, 0, 667, 670, 1, 0, 0, 0, 668, 666, 1, 0, 0, 0, 668, 669, 1, 0, 0, 0, 669, 671, 1, 0, 0, 0, 670, 668, 1, 0, 0, 0, 671, 672, 5, 171, 0, 0, 672, 690, 1, 0, 0, 0, 673, 674, 5, 47, 0, 0, 674, 675, 5, 170, 0, 0, 675, 676, 3, 252, 126, 0, 676, 677, 3, 328, 164, 0, 677, 678, 5, 171, 0, 0, 678, 690, 1, 0, 0, 0, 679, 680, 5, 102, 0, 0, 680, 681, 5, 170, 0, 0, 681, 682, 3, 252, 126, 0, 682, 683, 5, 171, 0, 0, 683, 690, 1, 0, 0, 0, 684, 685, 5, 48, 0, 0, 685, 686, 5, 170, 0, 0, 686, 687, 3, 252, 126, 0, 687, 688, 5, 171, 0, 0, 688, 690, 1, 0, 0, 0, 689, 662, 1, 0, 0, 0, 689, 673, 1, 0, 0, 0, 689, 679, 1, 0, 0, 0, 689, 684, 1, 0, 0, 0, 690, 692, 1, 0, 0, 0, 691, 693, 5, 204, 0, 0, 692, 691, 1, 0, 0, 0, 692, 693, 1, 0, 0, 0, 693, 47, 1, 0, 0, 0, 694, 695, 5, 13, 0, 0, 695, 697, 3, 440, 220, 0, 696, 694, 1, 0, 0, 0, 696, 697, 1, 0, 0, 0, 697, 49, 1, 0, 0, 0, 698, 699, 5, 95, 0, 0, 699, 700, 5, 17, 0, 0, 700, 701, 3, 12, 6, 0, 701, 708, 3, 52, 26, 0, 702, 703, 5, 168, 0, 0, 703, 704, 3, 12, 6, 0, 704, 705, 3, 52, 26, 0, 705, 707, 1, 0, 0, 0, 706, 702, 1, 0, 0, 0, 707, 710, 1, 0, 0, 0, 708, 706, 1, 0, 0, 0, 708, 709, 1, 0, 0, 0, 709, 51, 1, 0, 0, 0, 710, 708, 1, 0, 0, 0, 711, 713, 7, 0, 0, 0, 712, 711, 1, 0, 0, 0, 712, 713, 1, 0, 0, 0, 713, 716, 1, 0, 0, 0, 714, 715, 5, 89, 0, 0, 715, 717, 7, 1, 0, 0, 716, 714, 1, 0, 0, 0, 716, 717, 1, 0, 0, 0, 717, 53, 1, 0, 0, 0, 718, 719, 5, 55, 0, 0, 719, 720, 5, 17, 0, 0, 720, 725, 3, 12, 6, 0, 721, 722, 5, 168, 0, 0, 722, 724, 3, 12, 6, 0, 723, 721, 1, 0, 0, 0, 724, 727, 1, 0, 0, 0, 725, 723, 1, 0, 0, 0, 725, 726, 1, 0, 0, 0, 726, 55, 1, 0, 0, 0, 727, 725, 1, 0, 0, 0, 728, 729, 5, 75, 0, 0, 729, 730, 3, 96, 48, 0, 730, 57, 1, 0, 0, 0, 731, 732, 5, 90, 0, 0, 732, 733, 3, 96, 48, 0, 733, 59, 1, 0, 0, 0, 734, 735, 6, 30, -1, 0, 735, 736, 3, 62, 31, 0, 736, 742, 1, 0, 0, 0, 737, 738, 10, 1, 0, 0, 738, 739, 5, 94, 0, 0, 739, 741, 3, 62, 31, 0, 740, 737, 1, 0, 0, 0, 741, 744, 1, 0, 0, 0, 742, 740, 1, 0, 0, 0, 742, 743, 1, 0, 0, 0, 743, 61, 1, 0, 0, 0, 744, 742, 1, 0, 0, 0, 745, 746, 6, 31, -1, 0, 746, 747, 3, 64, 32, 0, 747, 753, 1, 0, 0, 0, 748, 749, 10, 1, 0, 0, 749, 750, 5, 12, 0, 0, 750, 752, 3, 64, 32, 0, 751, 748, 1, 0, 0, 0, 752, 755, 1, 0, 0, 0, 753, 751, 1, 0, 0, 0, 753, 754, 1, 0, 0, 0, 754, 63, 1, 0, 0, 0, 755, 753, 1, 0, 0, 0, 756, 758, 5, 88, 0, 0, 757, 756, 1, 0, 0, 0, 757, 758, 1, 0, 0, 0, 758, 759, 1, 0, 0, 0, 759, 760, 3, 66, 33, 0, 760, 65, 1, 0, 0, 0, 761, 767, 3, 68, 34, 0, 762, 764, 5, 66, 0, 0, 763, 765, 5, 88, 0, 0, 764, 763, 1, 0, 0, 0, 764, 765, 1, 0, 0, 0, 765, 766, 1, 0, 0, 0, 766, 768, 5, 197, 0, 0, 767, 762, 1, 0, 0, 0, 767, 768, 1, 0, 0, 0, 768, 67, 1, 0, 0, 0, 769, 775, 3, 70, 35, 0, 770, 775, 3, 72, 36, 0, 771, 775, 3, 78, 39, 0, 772, 775, 3, 90, 45, 0, 773, 775, 3, 92, 46, 0, 774, 769, 1, 0, 0, 0, 774, 770, 1, 0, 0, 0, 774, 771, 1, 0, 0, 0, 774, 772, 1, 0, 0, 0, 774, 773, 1, 0, 0, 0, 775, 69, 1, 0, 0, 0, 776, 777, 3, 94, 47, 0, 777, 778, 5, 16, 0, 0, 778, 779, 3, 94, 47, 0, 779, 780, 5, 12, 0, 0, 780, 781, 3, 94, 47, 0, 781, 71, 1, 0, 0, 0, 782, 789, 3, 94, 47, 0, 783, 786, 3, 74, 37, 0, 784, 786, 3, 76, 38, 0, 785, 783, 1, 0, 0, 0, 785, 784, 1, 0, 0, 0, 786, 787, 1, 0, 0, 0, 787, 788, 3, 94, 47, 0, 788, 790, 1, 0, 0, 0, 789, 785, 1, 0, 0, 0, 789, 790, 1, 0, 0, 0, 790, 73, 1, 0, 0, 0, 791, 792, 7, 2, 0, 0, 792, 75, 1, 0, 0, 0, 793, 800, 5, 190, 0, 0, 794, 800, 5, 191, 0, 0, 795, 800, 5, 188, 0, 0, 796, 800, 5, 189, 0, 0, 797, 800, 5, 186, 0, 0, 798, 800, 5, 187, 0, 0, 799, 793, 1, 0, 0, 0, 799, 794, 1, 0, 0, 0, 799, 795, 1, 0, 0, 0, 799, 796, 1, 0, 0, 0, 799, 797, 1, 0, 0, 0, 799, 798, 1, 0, 0, 0, 800, 77, 1, 0, 0, 0, 801, 805, 3, 80, 40, 0, 802, 805, 3, 86, 43, 0, 803, 805, 3, 88, 44, 0, 804, 801, 1, 0, 0, 0, 804, 802, 1, 0, 0, 0, 804, 803, 1, 0, 0, 0, 805, 79, 1, 0, 0, 0, 806, 807, 3, 82, 41, 0, 807, 808, 5, 60, 0, 0, 808, 809, 5, 170, 0, 0, 809, 812, 3, 84, 42, 0, 810, 811, 5, 168, 0, 0, 811, 813, 3, 84, 42, 0, 812, 810, 1, 0, 0, 0, 813, 814, 1, 0, 0, 0, 814, 812, 1, 0, 0, 0, 814, 815, 1, 0, 0, 0, 815, 816, 1, 0, 0, 0, 816, 817, 5, 171, 0, 0, 817, 81, 1, 0, 0, 0, 818, 819, 5, 170, 0, 0, 819, 824, 3, 94, 47, 0, 820, 821, 5, 168, 0, 0, 821, 823, 3, 94, 47, 0, 822, 820, 1, 0, 0, 0, 823, 826, 1, 0, 0, 0, 824, 822, 1, 0, 0, 0, 824, 825, 1, 0, 0, 0, 825, 827, 1, 0, 0, 0, 826, 824, 1, 0, 0, 0, 827, 828, 5, 171, 0, 0, 828, 83, 1, 0, 0, 0, 829, 830, 5, 170, 0, 0, 830, 835, 3, 12, 6, 0, 831, 832, 5, 168, 0, 0, 832, 834, 3, 12, 6, 0, 833, 831, 1, 0, 0, 0, 834, 837, 1, 0, 0, 0, 835, 833, 1, 0, 0, 0, 835, 836, 1, 0, 0, 0, 836, 838, 1, 0, 0, 0, 837, 835, 1, 0, 0, 0, 838, 839, 5, 171, 0, 0, 839, 85, 1, 0, 0, 0, 840, 841, 3, 94, 47, 0, 841, 842, 5, 60, 0, 0, 842, 843, 5, 170, 0, 0, 843, 846, 3, 12, 6, 0, 844, 845, 5, 168, 0, 0, 845, 847, 3, 12, 6, 0, 846, 844, 1, 0, 0, 0, 847, 848, 1, 0, 0, 0, 848, 846, 1, 0, 0, 0, 848, 849, 1, 0, 0, 0, 849, 850, 1, 0, 0, 0, 850, 851, 5, 171, 0, 0, 851, 87, 1, 0, 0, 0, 852, 865, 3, 94, 47, 0, 853, 854, 5, 170, 0, 0, 854, 859, 3, 94, 47, 0, 855, 856, 5, 168, 0, 0, 856, 858, 3, 94, 47, 0, 857, 855, 1, 0, 0, 0, 858, 861, 1, 0, 0, 0, 859, 857, 1, 0, 0, 0, 859, 860, 1, 0, 0, 0, 860, 862, 1, 0, 0, 0, 861, 859, 1, 0, 0, 0, 862, 863, 5, 171, 0, 0, 863, 865, 1, 0, 0, 0, 864, 852, 1, 0, 0, 0, 864, 853, 1, 0, 0, 0, 865, 866, 1, 0, 0, 0, 866, 867, 5, 60, 0, 0, 867, 868, 3, 102, 51, 0, 868, 89, 1, 0, 0, 0, 869, 870, 5, 42, 0, 0, 870, 871, 3, 94, 47, 0, 871, 91, 1, 0, 0, 0, 872, 873, 3, 94, 47, 0, 873, 875, 5, 66, 0, 0, 874, 876, 5, 88, 0, 0, 875, 874, 1, 0, 0, 0, 875, 876, 1, 0, 0, 0, 876, 877, 1, 0, 0, 0, 877, 879, 5, 91, 0, 0, 878, 880, 5, 126, 0, 0, 879, 878, 1, 0, 0, 0, 879, 880, 1, 0, 0, 0, 880, 881, 1, 0, 0, 0, 881, 883, 5, 170, 0, 0, 882, 884, 5, 93, 0, 0, 883, 882, 1, 0, 0, 0, 883, 884, 1, 0, 0, 0, 884, 885, 1, 0, 0, 0, 885, 893, 3, 186, 93, 0, 886, 888, 5, 168, 0, 0, 887, 889, 5, 93, 0, 0, 888, 887, 1, 0, 0, 0, 888, 889, 1, 0, 0, 0, 889, 890, 1, 0, 0, 0, 890, 892, 3, 186, 93, 0, 891, 886, 1, 0, 0, 0, 892, 895, 1, 0, 0, 0, 893, 891, 1, 0, 0, 0, 893, 894, 1, 0, 0, 0, 894, 896, 1, 0, 0, 0, 895, 893, 1, 0, 0, 0, 896, 897, 5, 171, 0, 0, 897, 93, 1, 0, 0, 0, 898, 903, 3, 96, 48, 0, 899, 900, 5, 196, 0, 0, 900, 902, 3, 96, 48, 0, 901, 899, 1, 0, 0, 0, 902, 905, 1, 0, 0, 0, 903, 901, 1, 0, 0, 0, 903, 904, 1, 0, 0, 0, 904, 95, 1, 0, 0, 0, 905, 903, 1, 0, 0, 0, 906, 911, 3, 98, 49, 0, 907, 908, 7, 3, 0, 0, 908, 910, 3, 98, 49, 0, 909, 907, 1, 0, 0, 0, 910, 913, 1, 0, 0, 0, 911, 909, 1, 0, 0, 0, 911, 912, 1, 0, 0, 0, 912, 97, 1, 0, 0, 0, 913, 911, 1, 0, 0, 0, 914, 919, 3, 100, 50, 0, 915, 916, 7, 4, 0, 0, 916, 918, 3, 100, 50, 0, 917, 915, 1, 0, 0, 0, 918, 921, 1, 0, 0, 0, 919, 917, 1, 0, 0, 0, 919, 920, 1, 0, 0, 0, 920, 99, 1, 0, 0, 0, 921, 919, 1, 0, 0, 0, 922, 926, 3, 102, 51, 0, 923, 924, 7, 3, 0, 0, 924, 926, 3, 100, 50, 0, 925, 922, 1, 0, 0, 0, 925, 923, 1, 0, 0, 0, 926, 101, 1, 0, 0, 0, 927, 932, 3, 116, 58, 0, 928, 931, 3, 104, 52, 0, 929, 931, 3, 110, 55, 0, 930, 928, 1, 0, 0, 0, 930, 929, 1, 0, 0, 0, 931, 934, 1, 0, 0, 0, 932, 930, 1, 0, 0, 0, 932, 933, 1, 0, 0, 0, 933, 103, 1, 0, 0, 0, 934, 932, 1, 0, 0, 0, 935, 938, 5, 177, 0, 0, 936, 939, 3, 108, 54, 0, 937, 939, 3, 106, 53, 0, 938, 936, 1, 0, 0, 0, 938, 937, 1, 0, 0, 0, 939, 105, 1, 0, 0, 0, 940, 946, 3, 440, 220, 0, 941, 946, 3, 436, 218, 0, 942, 946, 3, 122, 61, 0, 943, 946, 3, 144, 72, 0, 944, 946, 3, 134, 67, 0, 945, 940, 1, 0, 0, 0, 945, 941, 1, 0, 0, 0, 945, 942, 1, 0, 0, 0, 945, 943, 1, 0, 0, 0, 945, 944, 1, 0, 0, 0, 946, 107, 1, 0, 0, 0, 947, 948, 7, 5, 0, 0, 948, 950, 5, 170, 0, 0, 949, 951, 3, 12, 6, 0, 950, 949, 1, 0, 0, 0, 950, 951, 1, 0, 0, 0, 951, 952, 1, 0, 0, 0, 952, 953, 5, 171, 0, 0, 953, 109, 1, 0, 0, 0, 954, 957, 3, 114, 57, 0, 955, 957, 3, 112, 56, 0, 956, 954, 1, 0, 0, 0, 956, 955, 1, 0, 0, 0, 957, 111, 1, 0, 0, 0, 958, 960, 5, 172, 0, 0, 959, 961, 3, 12, 6, 0, 960, 959, 1, 0, 0, 0, 960, 961, 1, 0, 0, 0, 961, 962, 1, 0, 0, 0, 962, 964, 5, 169, 0, 0, 963, 965, 3, 12, 6, 0, 964, 963, 1, 0, 0, 0, 964, 965, 1, 0, 0, 0, 965, 966, 1, 0, 0, 0, 966, 967, 5, 173, 0, 0, 967, 113, 1, 0, 0, 0, 968, 970, 5, 172, 0, 0, 969, 971, 3, 12, 6, 0, 970, 969, 1, 0, 0, 0, 970, 971, 1, 0, 0, 0, 971, 972, 1, 0, 0, 0, 972, 973, 5, 173, 0, 0, 973, 115, 1, 0, 0, 0, 974, 989, 3, 120, 60, 0, 975, 989, 3, 118, 59, 0, 976, 989, 3, 122, 61, 0, 977, 989, 3, 124, 62, 0, 978, 989, 3, 126, 63, 0, 979, 989, 3, 128, 64, 0, 980, 989, 3, 132, 66, 0, 981, 989, 3, 134, 67, 0, 982, 989, 3, 136, 68, 0, 983, 989, 3, 138, 69, 0, 984, 989, 3, 140, 70, 0, 985, 989, 3, 142, 71, 0, 986, 989, 3, 144, 72, 0, 987, 989, 3, 146, 73, 0, 988, 974, 1, 0, 0, 0, 988, 975, 1, 0, 0, 0, 988, 976, 1, 0, 0, 0, 988, 977, 1, 0, 0, 0, 988, 978, 1, 0, 0, 0, 988, 979, 1, 0, 0, 0, 988, 980, 1, 0, 0, 0, 988, 981, 1, 0, 0, 0, 988, 982, 1, 0, 0, 0, 988, 983, 1, 0, 0, 0, 988, 984, 1, 0, 0, 0, 988, 985, 1, 0, 0, 0, 988, 986, 1, 0, 0, 0, 988, 987, 1, 0, 0, 0, 989, 117, 1, 0, 0, 0, 990, 996, 3, 440, 220, 0, 991, 994, 5, 177, 0, 0, 992, 995, 3, 440, 220, 0, 993, 995, 3, 436, 218, 0, 994, 992, 1, 0, 0, 0, 994, 993, 1, 0, 0, 0, 995, 997, 1, 0, 0, 0, 996, 991, 1, 0, 0, 0, 996, 997, 1, 0, 0, 0, 997, 119, 1, 0, 0, 0, 998, 1004, 3, 432, 216, 0, 999, 1004, 3, 436, 218, 0, 1000, 1004, 5, 199, 0, 0, 1001, 1004, 5, 198, 0, 0, 1002, 1004, 5, 197, 0, 0, 1003, 998, 1, 0, 0, 0, 1003, 999, 1, 0, 0, 0, 1003, 1000, 1, 0, 0, 0, 1003, 1001, 1, 0, 0, 0, 1003, 1002, 1, 0, 0, 0, 1004, 121, 1, 0, 0, 0, 1005, 1006, 7, 6, 0, 0, 1006, 123, 1, 0, 0, 0, 1007, 1009, 5, 172, 0, 0, 1008, 1010, 3, 12, 6, 0, 1009, 1008, 1, 0, 0, 0, 1009, 1010, 1, 0, 0, 0, 1010, 1015, 1, 0, 0, 0, 1011, 1012, 5, 168, 0, 0, 1012, 1014, 3, 12, 6, 0, 1013, 1011, 1, 0, 0, 0, 1014, 1017, 1, 0, 0, 0, 1015, 1013, 1, 0, 0, 0, 1015, 1016, 1, 0, 0, 0, 1016, 1018, 1, 0, 0, 0, 1017, 1015, 1, 0, 0, 0, 1018, 1019, 5, 173, 0, 0, 1019, 125, 1, 0, 0, 0, 1020, 1021, 5, 174, 0, 0, 1021, 1022, 3, 12, 6, 0, 1022, 1023, 5, 169, 0, 0, 1023, 1031, 3, 12, 6, 0, 1024, 1025, 5, 168, 0, 0, 1025, 1026, 3, 12, 6, 0, 1026, 1027, 5, 169, 0, 0, 1027, 1028, 3, 12, 6, 0, 1028, 1030, 1, 0, 0, 0, 1029, 1024, 1, 0, 0, 0, 1030, 1033, 1, 0, 0, 0, 1031, 1029, 1, 0, 0, 0, 1031, 1032, 1, 0, 0, 0, 1032, 1034, 1, 0, 0, 0, 1033, 1031, 1, 0, 0, 0, 1034, 1035, 5, 175, 0, 0, 1035, 1039, 1, 0, 0, 0, 1036, 1037, 5, 174, 0, 0, 1037, 1039, 5, 175, 0, 0, 1038, 1020, 1, 0, 0, 0, 1038, 1036, 1, 0, 0, 0, 1039, 127, 1, 0, 0, 0, 1040, 1041, 5, 116, 0, 0, 1041, 1042, 5, 170, 0, 0, 1042, 1043, 3, 130, 65, 0, 1043, 1044, 5, 168, 0, 0, 1044, 1045, 3, 12, 6, 0, 1045, 1046, 5, 171, 0, 0, 1046, 129, 1, 0, 0, 0, 1047, 1048, 3, 12, 6, 0, 1048, 131, 1, 0, 0, 0, 1049, 1050, 5, 15, 0, 0, 1050, 1052, 5, 170, 0, 0, 1051, 1053, 5, 34, 0, 0, 1052, 1051, 1, 0, 0, 0, 1052, 1053, 1, 0, 0, 0, 1053, 1054, 1, 0, 0, 0, 1054, 1055, 3, 12, 6, 0, 1055, 1056, 5, 171, 0, 0, 1056, 133, 1, 0, 0, 0, 1057, 1058, 3, 440, 220, 0, 1058, 1067, 5, 170, 0, 0, 1059, 1064, 3, 12, 6, 0, 1060, 1061, 5, 168, 0, 0, 1061, 1063, 3, 12, 6, 0, 1062, 1060, 1, 0, 0, 0, 1063, 1066, 1, 0, 0, 0, 1064, 1062, 1, 0, 0, 0, 1064, 1065, 1, 0, 0, 0, 1065, 1068, 1, 0, 0, 0, 1066, 1064, 1, 0, 0, 0, 1067, 1059, 1, 0, 0, 0, 1067, 1068, 1, 0, 0, 0, 1068, 1069, 1, 0, 0, 0, 1069, 1070, 5, 171, 0, 0, 1070, 135, 1, 0, 0, 0, 1071, 1072, 5, 24, 0, 0, 1072, 1073, 5, 170, 0, 0, 1073, 1074, 5, 176, 0, 0, 1074, 1075, 5, 171, 0, 0, 1075, 137, 1, 0, 0, 0, 1076, 1077, 5, 24, 0, 0, 1077, 1078, 5, 170, 0, 0, 1078, 1079, 5, 34, 0, 0, 1079, 1080, 3, 12, 6, 0, 1080, 1081, 5, 171, 0, 0, 1081, 139, 1, 0, 0, 0, 1082, 1083, 5, 19, 0, 0, 1083, 1084, 5, 135, 0, 0, 1084, 1085, 3, 12, 6, 0, 1085, 1086, 5, 123, 0, 0, 1086, 1094, 3, 12, 6, 0, 1087, 1088, 5, 135, 0, 0, 1088, 1089, 3, 12, 6, 0, 1089, 1090, 5, 123, 0, 0, 1090, 1091, 3, 12, 6, 0, 1091, 1093, 1, 0, 0, 0, 1092, 1087, 1, 0, 0, 0, 1093, 1096, 1, 0, 0, 0, 1094, 1092, 1, 0, 0, 0, 1094, 1095, 1, 0, 0, 0, 1095, 1099, 1, 0, 0, 0, 1096, 1094, 1, 0, 0, 0, 1097, 1098, 5, 38, 0, 0, 1098, 1100, 3, 12, 6, 0, 1099, 1097, 1, 0, 0, 0, 1099, 1100, 1, 0, 0, 0, 1100, 1101, 1, 0, 0, 0, 1101, 1102, 5, 39, 0, 0, 1102, 141, 1, 0, 0, 0, 1103, 1104, 5, 21, 0, 0, 1104, 1105, 5, 170, 0, 0, 1105, 1106, 3, 12, 6, 0, 1106, 1107, 5, 13, 0, 0, 1107, 1108, 3, 186, 93, 0, 1108, 1109, 5, 171, 0, 0, 1109, 143, 1, 0, 0, 0, 1110, 1111, 5, 170, 0, 0, 1111, 1112, 3, 12, 6, 0, 1112, 1113, 5, 171, 0, 0, 1113, 145, 1, 0, 0, 0, 1114, 1115, 5, 43, 0, 0, 1115, 1116, 5, 170, 0, 0, 1116, 1117, 3, 440, 220, 0, 1117, 1118, 5, 50, 0, 0, 1118, 1119, 3, 12, 6, 0, 1119, 1120, 5, 171, 0, 0, 1120, 147, 1, 0, 0, 0, 1121, 1123, 3, 6, 3, 0, 1122, 1121, 1, 0, 0, 0, 1122, 1123, 1, 0, 0, 0, 1123, 1124, 1, 0, 0, 0, 1124, 1125, 7, 7, 0, 0, 1125, 1126, 5, 65, 0, 0, 1126, 1131, 3, 252, 126, 0, 1127, 1129, 5, 13, 0, 0, 1128, 1127, 1, 0, 0, 0, 1128, 1129, 1, 0, 0, 0, 1129, 1130, 1, 0, 0, 0, 1130, 1132, 3, 34, 17, 0, 1131, 1128, 1, 0, 0, 0, 1131, 1132, 1, 0, 0, 0, 1132, 1144, 1, 0, 0, 0, 1133, 1134, 5, 170, 0, 0, 1134, 1139, 3, 150, 75, 0, 1135, 1136, 5, 168, 0, 0, 1136, 1138, 3, 150, 75, 0, 1137, 1135, 1, 0, 0, 0, 1138, 1141, 1, 0, 0, 0, 1139, 1137, 1, 0, 0, 0, 1139, 1140, 1, 0, 0, 0, 1140, 1142, 1, 0, 0, 0, 1141, 1139, 1, 0, 0, 0, 1142, 1143, 5, 171, 0, 0, 1143, 1145, 1, 0, 0, 0, 1144, 1133, 1, 0, 0, 0, 1144, 1145, 1, 0, 0, 0, 1145, 1146, 1, 0, 0, 0, 1146, 1147, 5, 134, 0, 0, 1147, 1148, 5, 170, 0, 0, 1148, 1153, 3, 154, 77, 0, 1149, 1150, 5, 168, 0, 0, 1150, 1152, 3, 154, 77, 0, 1151, 1149, 1, 0, 0, 0, 1152, 1155, 1, 0, 0, 0, 1153, 1151, 1, 0, 0, 0, 1153, 1154, 1, 0, 0, 0, 1154, 1156, 1, 0, 0, 0, 1155, 1153, 1, 0, 0, 0, 1156, 1160, 5, 171, 0, 0, 1157, 1158, 5, 117, 0, 0, 1158, 1159, 5, 125, 0, 0, 1159, 1161, 3, 156, 78, 0, 1160, 1157, 1, 0, 0, 0, 1160, 1161, 1, 0, 0, 0, 1161, 1163, 1, 0, 0, 0, 1162, 1164, 3, 152, 76, 0, 1163, 1162, 1, 0, 0, 0, 1163, 1164, 1, 0, 0, 0, 1164, 149, 1, 0, 0, 0, 1165, 1168, 3, 440, 220, 0, 1166, 1168, 3, 436, 218, 0, 1167, 1165, 1, 0, 0, 0, 1167, 1166, 1, 0, 0, 0, 1168, 151, 1, 0, 0, 0, 1169, 1170, 5, 108, 0, 0, 1170, 1171, 3, 42, 21, 0, 1171, 153, 1, 0, 0, 0, 1172, 1175, 5, 29, 0, 0, 1173, 1175, 3, 12, 6, 0, 1174, 1172, 1, 0, 0, 0, 1174, 1173, 1, 0, 0, 0, 1175, 155, 1, 0, 0, 0, 1176, 1177, 3, 96, 48, 0, 1177, 1178, 7, 8, 0, 0, 1178, 1183, 1, 0, 0, 0, 1179, 1180, 5, 133, 0, 0, 1180, 1181, 5, 121, 0, 0, 1181, 1183, 5, 29, 0, 0, 1182, 1176, 1, 0, 0, 0, 1182, 1179, 1, 0, 0, 0, 1183, 157, 1, 0, 0, 0, 1184, 1186, 3, 6, 3, 0, 1185, 1184, 1, 0, 0, 0, 1185, 1186, 1, 0, 0, 0, 1186, 1187, 1, 0, 0, 0, 1187, 1188, 5, 129, 0, 0, 1188, 1193, 3, 252, 126, 0, 1189, 1191, 5, 13, 0, 0, 1190, 1189, 1, 0, 0, 0, 1190, 1191, 1, 0, 0, 0, 1191, 1192, 1, 0, 0, 0, 1192, 1194, 3, 34, 17, 0, 1193, 1190, 1, 0, 0, 0, 1193, 1194, 1, 0, 0, 0, 1194, 1195, 1, 0, 0, 0, 1195, 1200, 3, 162, 81, 0, 1196, 1197, 5, 168, 0, 0, 1197, 1199, 3, 162, 81, 0, 1198, 1196, 1, 0, 0, 0, 1199, 1202, 1, 0, 0, 0, 1200, 1198, 1, 0, 0, 0, 1200, 1201, 1, 0, 0, 0, 1201, 1203, 1, 0, 0, 0, 1202, 1200, 1, 0, 0, 0, 1203, 1204, 5, 136, 0, 0, 1204, 1206, 3, 12, 6, 0, 1205, 1207, 3, 160, 80, 0, 1206, 1205, 1, 0, 0, 0, 1206, 1207, 1, 0, 0, 0, 1207, 159, 1, 0, 0, 0, 1208, 1209, 5, 108, 0, 0, 1209, 1210, 3, 42, 21, 0, 1210, 161, 1, 0, 0, 0, 1211, 1212, 5, 117, 0, 0, 1212, 1220, 3, 164, 82, 0, 1213, 1216, 5, 168, 0, 0, 1214, 1217, 3, 162, 81, 0, 1215, 1217, 3, 164, 82, 0, 1216, 1214, 1, 0, 0, 0, 1216, 1215, 1, 0, 0, 0, 1217, 1219, 1, 0, 0, 0, 1218, 1213, 1, 0, 0, 0, 1219, 1222, 1, 0, 0, 0, 1220, 1218, 1, 0, 0, 0, 1220, 1221, 1, 0, 0, 0, 1221, 1283, 1, 0, 0, 0, 1222, 1220, 1, 0, 0, 0, 1223, 1224, 5, 6, 0, 0, 1224, 1232, 3, 166, 83, 0, 1225, 1228, 5, 168, 0, 0, 1226, 1229, 3, 162, 81, 0, 1227, 1229, 3, 166, 83, 0, 1228, 1226, 1, 0, 0, 0, 1228, 1227, 1, 0, 0, 0, 1229, 1231, 1, 0, 0, 0, 1230, 1225, 1, 0, 0, 0, 1231, 1234, 1, 0, 0, 0, 1232, 1230, 1, 0, 0, 0, 1232, 1233, 1, 0, 0, 0, 1233, 1283, 1, 0, 0, 0, 1234, 1232, 1, 0, 0, 0, 1235, 1236, 5, 104, 0, 0, 1236, 1244, 3, 168, 84, 0, 1237, 1240, 5, 168, 0, 0, 1238, 1241, 3, 162, 81, 0, 1239, 1241, 3, 168, 84, 0, 1240, 1238, 1, 0, 0, 0, 1240, 1239, 1, 0, 0, 0, 1241, 1243, 1, 0, 0, 0, 1242, 1237, 1, 0, 0, 0, 1243, 1246, 1, 0, 0, 0, 1244, 1242, 1, 0, 0, 0, 1244, 1245, 1, 0, 0, 0, 1245, 1283, 1, 0, 0, 0, 1246, 1244, 1, 0, 0, 0, 1247, 1248, 5, 107, 0, 0, 1248, 1256, 3, 170, 85, 0, 1249, 1252, 5, 168, 0, 0, 1250, 1253, 3, 162, 81, 0, 1251, 1253, 3, 170, 85, 0, 1252, 1250, 1, 0, 0, 0, 1252, 1251, 1, 0, 0, 0, 1253, 1255, 1, 0, 0, 0, 1254, 1249, 1, 0, 0, 0, 1255, 1258, 1, 0, 0, 0, 1256, 1254, 1, 0, 0, 0, 1256, 1257, 1, 0, 0, 0, 1257, 1283, 1, 0, 0, 0, 1258, 1256, 1, 0, 0, 0, 1259, 1260, 5, 67, 0, 0, 1260, 1261, 5, 79, 0, 0, 1261, 1269, 3, 172, 86, 0, 1262, 1265, 5, 168, 0, 0, 1263, 1266, 3, 162, 81, 0, 1264, 1266, 3, 172, 86, 0, 1265, 1263, 1, 0, 0, 0, 1265, 1264, 1, 0, 0, 0, 1266, 1268, 1, 0, 0, 0, 1267, 1262, 1, 0, 0, 0, 1268, 1271, 1, 0, 0, 0, 1269, 1267, 1, 0, 0, 0, 1269, 1270, 1, 0, 0, 0, 1270, 1283, 1, 0, 0, 0, 1271, 1269, 1, 0, 0, 0, 1272, 1273, 5, 117, 0, 0, 1273, 1274, 5, 125, 0, 0, 1274, 1279, 3, 176, 88, 0, 1275, 1276, 5, 168, 0, 0, 1276, 1278, 3, 162, 81, 0, 1277, 1275, 1, 0, 0, 0, 1278, 1281, 1, 0, 0, 0, 1279, 1277, 1, 0, 0, 0, 1279, 1280, 1, 0, 0, 0, 1280, 1283, 1, 0, 0, 0, 1281, 1279, 1, 0, 0, 0, 1282, 1211, 1, 0, 0, 0, 1282, 1223, 1, 0, 0, 0, 1282, 1235, 1, 0, 0, 0, 1282, 1247, 1, 0, 0, 0, 1282, 1259, 1, 0, 0, 0, 1282, 1272, 1, 0, 0, 0, 1283, 163, 1, 0, 0, 0, 1284, 1285, 3, 178, 89, 0, 1285, 1286, 5, 184, 0, 0, 1286, 1287, 3, 12, 6, 0, 1287, 165, 1, 0, 0, 0, 1288, 1290, 5, 65, 0, 0, 1289, 1288, 1, 0, 0, 0, 1289, 1290, 1, 0, 0, 0, 1290, 1291, 1, 0, 0, 0, 1291, 1296, 3, 178, 89, 0, 1292, 1294, 5, 3, 0, 0, 1293, 1292, 1, 0, 0, 0, 1293, 1294, 1, 0, 0, 0, 1294, 1295, 1, 0, 0, 0, 1295, 1297, 3, 180, 90, 0, 1296, 1293, 1, 0, 0, 0, 1296, 1297, 1, 0, 0, 0, 1297, 1299, 1, 0, 0, 0, 1298, 1300, 5, 37, 0, 0, 1299, 1298, 1, 0, 0, 0, 1299, 1300, 1, 0, 0, 0, 1300, 1301, 1, 0, 0, 0, 1301, 1302, 3, 12, 6, 0, 1302, 167, 1, 0, 0, 0, 1303, 1305, 5, 65, 0, 0, 1304, 1303, 1, 0, 0, 0, 1304, 1305, 1, 0, 0, 0, 1305, 1306, 1, 0, 0, 0, 1306, 1308, 3, 178, 89, 0, 1307, 1309, 5, 44, 0, 0, 1308, 1307, 1, 0, 0, 0, 1308, 1309, 1, 0, 0, 0, 1309, 1310, 1, 0, 0, 0, 1310, 1311, 3, 12, 6, 0, 1311, 169, 1, 0, 0, 0, 1312, 1313, 3, 178, 89, 0, 1313, 171, 1, 0, 0, 0, 1314, 1315, 3, 178, 89, 0, 1315, 1316, 5, 137, 0, 0, 1316, 1317, 5, 99, 0, 0, 1317, 1318, 3, 174, 87, 0, 1318, 173, 1, 0, 0, 0, 1319, 1324, 3, 126, 63, 0, 1320, 1324, 3, 124, 62, 0, 1321, 1324, 3, 120, 60, 0, 1322, 1324, 3, 122, 61, 0, 1323, 1319, 1, 0, 0, 0, 1323, 1320, 1, 0, 0, 0, 1323, 1321, 1, 0, 0, 0, 1323, 1322, 1, 0, 0, 0, 1324, 175, 1, 0, 0, 0, 1325, 1326, 3, 96, 48, 0, 1326, 1327, 7, 8, 0, 0, 1327, 1332, 1, 0, 0, 0, 1328, 1329, 5, 133, 0, 0, 1329, 1330, 5, 121, 0, 0, 1330, 1332, 5, 29, 0, 0, 1331, 1325, 1, 0, 0, 0, 1331, 1328, 1, 0, 0, 0, 1332, 177, 1, 0, 0, 0, 1333, 1334, 3, 102, 51, 0, 1334, 179, 1, 0, 0, 0, 1335, 1336, 3, 96, 48, 0, 1336, 181, 1, 0, 0, 0, 1337, 1339, 3, 6, 3, 0, 1338, 1337, 1, 0, 0, 0, 1338, 1339, 1, 0, 0, 0, 1339, 1340, 1, 0, 0, 0, 1340, 1341, 5, 30, 0, 0, 1341, 1342, 5, 50, 0, 0, 1342, 1347, 3, 252, 126, 0, 1343, 1345, 5, 13, 0, 0, 1344, 1343, 1, 0, 0, 0, 1344, 1345, 1, 0, 0, 0, 1345, 1346, 1, 0, 0, 0, 1346, 1348, 3, 34, 17, 0, 1347, 1344, 1, 0, 0, 0, 1347, 1348, 1, 0, 0, 0, 1348, 1351, 1, 0, 0, 0, 1349, 1350, 5, 136, 0, 0, 1350, 1352, 3, 12, 6, 0, 1351, 1349, 1, 0, 0, 0, 1351, 1352, 1, 0, 0, 0, 1352, 1354, 1, 0, 0, 0, 1353, 1355, 3, 184, 92, 0, 1354, 1353, 1, 0, 0, 0, 1354, 1355, 1, 0, 0, 0, 1355, 183, 1, 0, 0, 0, 1356, 1357, 5, 108, 0, 0, 1357, 1358, 3, 42, 21, 0, 1358, 185, 1, 0, 0, 0, 1359, 1361, 3, 188, 94, 0, 1360, 1362, 7, 9, 0, 0, 1361, 1360, 1, 0, 0, 0, 1361, 1362, 1, 0, 0, 0, 1362, 187, 1, 0, 0, 0, 1363, 1379, 3, 216, 108, 0, 1364, 1379, 3, 202, 101, 0, 1365, 1379, 3, 214, 107, 0, 1366, 1379, 3, 212, 106, 0, 1367, 1379, 3, 208, 104, 0, 1368, 1379, 3, 204, 102, 0, 1369, 1379, 3, 206, 103, 0, 1370, 1379, 3, 200, 100, 0, 1371, 1379, 3, 190, 95, 0, 1372, 1379, 3, 210, 105, 0, 1373, 1379, 3, 218, 109, 0, 1374, 1379, 3, 220, 110, 0, 1375, 1379, 3, 222, 111, 0, 1376, 1379, 3, 224, 112, 0, 1377, 1379, 3, 226, 113, 0, 1378, 1363, 1, 0, 0, 0, 1378, 1364, 1, 0, 0, 0, 1378, 1365, 1, 0, 0, 0, 1378, 1366, 1, 0, 0, 0, 1378, 1367, 1, 0, 0, 0, 1378, 1368, 1, 0, 0, 0, 1378, 1369, 1, 0, 0, 0, 1378, 1370, 1, 0, 0, 0, 1378, 1371, 1, 0, 0, 0, 1378, 1372, 1, 0, 0, 0, 1378, 1373, 1, 0, 0, 0, 1378, 1374, 1, 0, 0, 0, 1378, 1375, 1, 0, 0, 0, 1378, 1376, 1, 0, 0, 0, 1378, 1377, 1, 0, 0, 0, 1379, 189, 1, 0, 0, 0, 1380, 1381, 5, 159, 0, 0, 1381, 1382, 5, 170, 0, 0, 1382, 1387, 3, 192, 96, 0, 1383, 1384, 5, 168, 0, 0, 1384, 1386, 3, 192, 96, 0, 1385, 1383, 1, 0, 0, 0, 1386, 1389, 1, 0, 0, 0, 1387, 1385, 1, 0, 0, 0, 1387, 1388, 1, 0, 0, 0, 1388, 1390, 1, 0, 0, 0, 1389, 1387, 1, 0, 0, 0, 1390, 1391, 5, 171, 0, 0, 1391, 191, 1, 0, 0, 0, 1392, 1393, 3, 440, 220, 0, 1393, 1395, 3, 188, 94, 0, 1394, 1396, 3, 194, 97, 0, 1395, 1394, 1, 0, 0, 0, 1395, 1396, 1, 0, 0, 0, 1396, 1398, 1, 0, 0, 0, 1397, 1399, 3, 426, 213, 0, 1398, 1397, 1, 0, 0, 0, 1398, 1399, 1, 0, 0, 0, 1399, 193, 1, 0, 0, 0, 1400, 1402, 3, 196, 98, 0, 1401, 1403, 3, 198, 99, 0, 1402, 1401, 1, 0, 0, 0, 1402, 1403, 1, 0, 0, 0, 1403, 1409, 1, 0, 0, 0, 1404, 1406, 3, 198, 99, 0, 1405, 1407, 3, 196, 98, 0, 1406, 1405, 1, 0, 0, 0, 1406, 1407, 1, 0, 0, 0, 1407, 1409, 1, 0, 0, 0, 1408, 1400, 1, 0, 0, 0, 1408, 1404, 1, 0, 0, 0, 1409, 195, 1, 0, 0, 0, 1410, 1416, 5, 29, 0, 0, 1411, 1417, 3, 432, 216, 0, 1412, 1417, 3, 436, 218, 0, 1413, 1417, 5, 199, 0, 0, 1414, 1417, 5, 198, 0, 0, 1415, 1417, 3, 440, 220, 0, 1416, 1411, 1, 0, 0, 0, 1416, 1412, 1, 0, 0, 0, 1416, 1413, 1, 0, 0, 0, 1416, 1414, 1, 0, 0, 0, 1416, 1415, 1, 0, 0, 0, 1417, 197, 1, 0, 0, 0, 1418, 1419, 5, 88, 0, 0, 1419, 1420, 5, 197, 0, 0, 1420, 199, 1, 0, 0, 0, 1421, 1422, 5, 156, 0, 0, 1422, 1423, 5, 170, 0, 0, 1423, 1424, 3, 188, 94, 0, 1424, 1425, 5, 171, 0, 0, 1425, 201, 1, 0, 0, 0, 1426, 1427, 5, 147, 0, 0, 1427, 1428, 5, 170, 0, 0, 1428, 1429, 3, 188, 94, 0, 1429, 1430, 5, 171, 0, 0, 1430, 203, 1, 0, 0, 0, 1431, 1432, 7, 10, 0, 0, 1432, 205, 1, 0, 0, 0, 1433, 1434, 5, 67, 0, 0, 1434, 207, 1, 0, 0, 0, 1435, 1436, 7, 11, 0, 0, 1436, 209, 1, 0, 0, 0, 1437, 1438, 5, 160, 0, 0, 1438, 211, 1, 0, 0, 0, 1439, 1440, 5, 151, 0, 0, 1440, 1441, 5, 170, 0, 0, 1441, 1442, 3, 438, 219, 0, 1442, 1443, 5, 171, 0, 0, 1443, 1450, 1, 0, 0, 0, 1444, 1445, 5, 151, 0, 0, 1445, 1446, 5, 170, 0, 0, 1446, 1447, 3, 438, 219, 0, 1447, 1448, 6, 106, -1, 0, 1448, 1450, 1, 0, 0, 0, 1449, 1439, 1, 0, 0, 0, 1449, 1444, 1, 0, 0, 0, 1450, 213, 1, 0, 0, 0, 1451, 1452, 5, 149, 0, 0, 1452, 215, 1, 0, 0, 0, 1453, 1457, 5, 148, 0, 0, 1454, 1455, 5, 170, 0, 0, 1455, 1456, 5, 200, 0, 0, 1456, 1458, 5, 171, 0, 0, 1457, 1454, 1, 0, 0, 0, 1457, 1458, 1, 0, 0, 0, 1458, 217, 1, 0, 0, 0, 1459, 1463, 5, 161, 0, 0, 1460, 1461, 5, 170, 0, 0, 1461, 1462, 5, 200, 0, 0, 1462, 1464, 5, 171, 0, 0, 1463, 1460, 1, 0, 0, 0, 1463, 1464, 1, 0, 0, 0, 1464, 219, 1, 0, 0, 0, 1465, 1466, 5, 162, 0, 0, 1466, 221, 1, 0, 0, 0, 1467, 1468, 5, 163, 0, 0, 1468, 223, 1, 0, 0, 0, 1469, 1470, 5, 164, 0, 0, 1470, 225, 1, 0, 0, 0, 1471, 1472, 5, 165, 0, 0, 1472, 227, 1, 0, 0, 0, 1473, 1478, 3, 440, 220, 0, 1474, 1475, 5, 177, 0, 0, 1475, 1477, 3, 440, 220, 0, 1476, 1474, 1, 0, 0, 0, 1477, 1480, 1, 0, 0, 0, 1478, 1476, 1, 0, 0, 0, 1478, 1479, 1, 0, 0, 0, 1479, 229, 1, 0, 0, 0, 1480, 1478, 1, 0, 0, 0, 1481, 1486, 3, 232, 116, 0, 1482, 1483, 5, 177, 0, 0, 1483, 1485, 3, 232, 116, 0, 1484, 1482, 1, 0, 0, 0, 1485, 1488, 1, 0, 0, 0, 1486, 1484, 1, 0, 0, 0, 1486, 1487, 1, 0, 0, 0, 1487, 231, 1, 0, 0, 0, 1488, 1486, 1, 0, 0, 0, 1489, 1491, 5, 205, 0, 0, 1490, 1489, 1, 0, 0, 0, 1490, 1491, 1, 0, 0, 0, 1491, 1492, 1, 0, 0, 0, 1492, 1493, 3, 440, 220, 0, 1493, 233, 1, 0, 0, 0, 1494, 1499, 3, 236, 118, 0, 1495, 1496, 5, 177, 0, 0, 1496, 1498, 3, 236, 118, 0, 1497, 1495, 1, 0, 0, 0, 1498, 1501, 1, 0, 0, 0, 1499, 1497, 1, 0, 0, 0, 1499, 1500, 1, 0, 0, 0, 1500, 235, 1, 0, 0, 0, 1501, 1499, 1, 0, 0, 0, 1502, 1505, 3, 440, 220, 0, 1503, 1505, 5, 203, 0, 0, 1504, 1502, 1, 0, 0, 0, 1504, 1503, 1, 0, 0, 0, 1505, 237, 1, 0, 0, 0, 1506, 1507, 5, 25, 0, 0, 1507, 1511, 5, 84, 0, 0, 1508, 1509, 5, 59, 0, 0, 1509, 1510, 5, 88, 0, 0, 1510, 1512, 5, 42, 0, 0, 1511, 1508, 1, 0, 0, 0, 1511, 1512, 1, 0, 0, 0, 1512, 1513, 1, 0, 0, 0, 1513, 1514, 3, 254, 127, 0, 1514, 239, 1, 0, 0, 0, 1515, 1516, 5, 35, 0, 0, 1516, 1519, 5, 84, 0, 0, 1517, 1518, 5, 59, 0, 0, 1518, 1520, 5, 42, 0, 0, 1519, 1517, 1, 0, 0, 0, 1519, 1520, 1, 0, 0, 0, 1520, 1521, 1, 0, 0, 0, 1521, 1523, 3, 254, 127, 0, 1522, 1524, 5, 20, 0, 0, 1523, 1522, 1, 0, 0, 0, 1523, 1524, 1, 0, 0, 0, 1524, 241, 1, 0, 0, 0, 1525, 1526, 3, 440, 220, 0, 1526, 243, 1, 0, 0, 0, 1527, 1528, 5, 25, 0, 0, 1528, 1529, 5, 105, 0, 0, 1529, 1530, 3, 242, 121, 0, 1530, 245, 1, 0, 0, 0, 1531, 1532, 5, 35, 0, 0, 1532, 1533, 5, 105, 0, 0, 1533, 1534, 3, 242, 121, 0, 1534, 247, 1, 0, 0, 0, 1535, 1536, 5, 117, 0, 0, 1536, 1537, 5, 76, 0, 0, 1537, 1538, 5, 105, 0, 0, 1538, 1539, 3, 242, 121, 0, 1539, 249, 1, 0, 0, 0, 1540, 1541, 5, 25, 0, 0, 1541, 1545, 5, 121, 0, 0, 1542, 1543, 5, 59, 0, 0, 1543, 1544, 5, 88, 0, 0, 1544, 1546, 5, 42, 0, 0, 1545, 1542, 1, 0, 0, 0, 1545, 1546, 1, 0, 0, 0, 1546, 1547, 1, 0, 0, 0, 1547, 1549, 3, 252, 126, 0, 1548, 1550, 3, 426, 213, 0, 1549, 1548, 1, 0, 0, 0, 1549, 1550, 1, 0, 0, 0, 1550, 1551, 1, 0, 0, 0, 1551, 1552, 5, 170, 0, 0, 1552, 1553, 3, 256, 128, 0, 1553, 1554, 5, 171, 0, 0, 1554, 1555, 3, 278, 139, 0, 1555, 251, 1, 0, 0, 0, 1556, 1557, 3, 254, 127, 0, 1557, 1558, 5, 169, 0, 0, 1558, 1560, 1, 0, 0, 0, 1559, 1556, 1, 0, 0, 0, 1559, 1560, 1, 0, 0, 0, 1560, 1561, 1, 0, 0, 0, 1561, 1562, 3, 230, 115, 0, 1562, 253, 1, 0, 0, 0, 1563, 1564, 3, 228, 114, 0, 1564, 255, 1, 0, 0, 0, 1565, 1569, 3, 258, 129, 0, 1566, 1569, 3, 268, 134, 0, 1567, 1569, 3, 264, 132, 0, 1568, 1565, 1, 0, 0, 0, 1568, 1566, 1, 0, 0, 0, 1568, 1567, 1, 0, 0, 0, 1569, 1578, 1, 0, 0, 0, 1570, 1574, 5, 168, 0, 0, 1571, 1575, 3, 258, 129, 0, 1572, 1575, 3, 268, 134, 0, 1573, 1575, 3, 264, 132, 0, 1574, 1571, 1, 0, 0, 0, 1574, 1572, 1, 0, 0, 0, 1574, 1573, 1, 0, 0, 0, 1575, 1577, 1, 0, 0, 0, 1576, 1570, 1, 0, 0, 0, 1577, 1580, 1, 0, 0, 0, 1578, 1576, 1, 0, 0, 0, 1578, 1579, 1, 0, 0, 0, 1579, 257, 1, 0, 0, 0, 1580, 1578, 1, 0, 0, 0, 1581, 1582, 3, 440, 220, 0, 1582, 1588, 3, 188, 94, 0, 1583, 1589, 3, 194, 97, 0, 1584, 1589, 3, 294, 147, 0, 1585, 1589, 3, 300, 150, 0, 1586, 1589, 3, 298, 149, 0, 1587, 1589, 3, 260, 130, 0, 1588, 1583, 1, 0, 0, 0, 1588, 1584, 1, 0, 0, 0, 1588, 1585, 1, 0, 0, 0, 1588, 1586, 1, 0, 0, 0, 1588, 1587, 1, 0, 0, 0, 1588, 1589, 1, 0, 0, 0, 1589, 1591, 1, 0, 0, 0, 1590, 1592, 3, 426, 213, 0, 1591, 1590, 1, 0, 0, 0, 1591, 1592, 1, 0, 0, 0, 1592, 259, 1, 0, 0, 0, 1593, 1594, 5, 170, 0, 0, 1594, 1599, 3, 262, 131, 0, 1595, 1596, 5, 168, 0, 0, 1596, 1598, 3, 262, 131, 0, 1597, 1595, 1, 0, 0, 0, 1598, 1601, 1, 0, 0, 0, 1599, 1597, 1, 0, 0, 0, 1599, 1600, 1, 0, 0, 0, 1600, 1602, 1, 0, 0, 0, 1601, 1599, 1, 0, 0, 0, 1602, 1603, 5, 171, 0, 0, 1603, 261, 1, 0, 0, 0, 1604, 1605, 3, 266, 133, 0, 1605, 1606, 5, 13, 0, 0, 1606, 1607, 7, 12, 0, 0, 1607, 1608, 5, 83, 0, 0, 1608, 263, 1, 0, 0, 0, 1609, 1610, 3, 262, 131, 0, 1610, 265, 1, 0, 0, 0, 1611, 1614, 3, 440, 220, 0, 1612, 1614, 3, 436, 218, 0, 1613, 1611, 1, 0, 0, 0, 1613, 1612, 1, 0, 0, 0, 1614, 1622, 1, 0, 0, 0, 1615, 1618, 5, 177, 0, 0, 1616, 1619, 3, 440, 220, 0, 1617, 1619, 3, 436, 218, 0, 1618, 1616, 1, 0, 0, 0, 1618, 1617, 1, 0, 0, 0, 1619, 1621, 1, 0, 0, 0, 1620, 1615, 1, 0, 0, 0, 1621, 1624, 1, 0, 0, 0, 1622, 1620, 1, 0, 0, 0, 1622, 1623, 1, 0, 0, 0, 1623, 267, 1, 0, 0, 0, 1624, 1622, 1, 0, 0, 0, 1625, 1626, 5, 103, 0, 0, 1626, 1627, 5, 69, 0, 0, 1627, 1632, 5, 170, 0, 0, 1628, 1630, 3, 270, 135, 0, 1629, 1631, 5, 168, 0, 0, 1630, 1629, 1, 0, 0, 0, 1630, 1631, 1, 0, 0, 0, 1631, 1633, 1, 0, 0, 0, 1632, 1628, 1, 0, 0, 0, 1632, 1633, 1, 0, 0, 0, 1633, 1635, 1, 0, 0, 0, 1634, 1636, 3, 272, 136, 0, 1635, 1634, 1, 0, 0, 0, 1635, 1636, 1, 0, 0, 0, 1636, 1637, 1, 0, 0, 0, 1637, 1638, 5, 171, 0, 0, 1638, 269, 1, 0, 0, 0, 1639, 1640, 5, 118, 0, 0, 1640, 1641, 5, 170, 0, 0, 1641, 1642, 3, 272, 136, 0, 1642, 1643, 5, 171, 0, 0, 1643, 1649, 1, 0, 0, 0, 1644, 1645, 5, 170, 0, 0, 1645, 1646, 3, 272, 136, 0, 1646, 1647, 6, 135, -1, 0, 1647, 1649, 1, 0, 0, 0, 1648, 1639, 1, 0, 0, 0, 1648, 1644, 1, 0, 0, 0, 1649, 271, 1, 0, 0, 0, 1650, 1655, 3, 274, 137, 0, 1651, 1652, 5, 168, 0, 0, 1652, 1654, 3, 274, 137, 0, 1653, 1651, 1, 0, 0, 0, 1654, 1657, 1, 0, 0, 0, 1655, 1653, 1, 0, 0, 0, 1655, 1656, 1, 0, 0, 0, 1656, 273, 1, 0, 0, 0, 1657, 1655, 1, 0, 0, 0, 1658, 1660, 3, 440, 220, 0, 1659, 1661, 3, 276, 138, 0, 1660, 1659, 1, 0, 0, 0, 1660, 1661, 1, 0, 0, 0, 1661, 275, 1, 0, 0, 0, 1662, 1663, 5, 170, 0, 0, 1663, 1664, 5, 200, 0, 0, 1664, 1665, 5, 171, 0, 0, 1665, 277, 1, 0, 0, 0, 1666, 1668, 3, 280, 140, 0, 1667, 1666, 1, 0, 0, 0, 1667, 1668, 1, 0, 0, 0, 1668, 1670, 1, 0, 0, 0, 1669, 1671, 3, 284, 142, 0, 1670, 1669, 1, 0, 0, 0, 1670, 1671, 1, 0, 0, 0, 1671, 1727, 1, 0, 0, 0, 1672, 1674, 3, 284, 142, 0, 1673, 1672, 1, 0, 0, 0, 1673, 1674, 1, 0, 0, 0, 1674, 1676, 1, 0, 0, 0, 1675, 1677, 3, 280, 140, 0, 1676, 1675, 1, 0, 0, 0, 1676, 1677, 1, 0, 0, 0, 1677, 1727, 1, 0, 0, 0, 1678, 1680, 3, 280, 140, 0, 1679, 1678, 1, 0, 0, 0, 1679, 1680, 1, 0, 0, 0, 1680, 1682, 1, 0, 0, 0, 1681, 1683, 3, 290, 145, 0, 1682, 1681, 1, 0, 0, 0, 1682, 1683, 1, 0, 0, 0, 1683, 1727, 1, 0, 0, 0, 1684, 1686, 3, 290, 145, 0, 1685, 1684, 1, 0, 0, 0, 1685, 1686, 1, 0, 0, 0, 1686, 1688, 1, 0, 0, 0, 1687, 1689, 3, 280, 140, 0, 1688, 1687, 1, 0, 0, 0, 1688, 1689, 1, 0, 0, 0, 1689, 1727, 1, 0, 0, 0, 1690, 1692, 3, 284, 142, 0, 1691, 1690, 1, 0, 0, 0, 1691, 1692, 1, 0, 0, 0, 1692, 1694, 1, 0, 0, 0, 1693, 1695, 3, 292, 146, 0, 1694, 1693, 1, 0, 0, 0, 1694, 1695, 1, 0, 0, 0, 1695, 1697, 1, 0, 0, 0, 1696, 1698, 3, 280, 140, 0, 1697, 1696, 1, 0, 0, 0, 1697, 1698, 1, 0, 0, 0, 1698, 1727, 1, 0, 0, 0, 1699, 1701, 3, 284, 142, 0, 1700, 1699, 1, 0, 0, 0, 1700, 1701, 1, 0, 0, 0, 1701, 1703, 1, 0, 0, 0, 1702, 1704, 3, 280, 140, 0, 1703, 1702, 1, 0, 0, 0, 1703, 1704, 1, 0, 0, 0, 1704, 1706, 1, 0, 0, 0, 1705, 1707, 3, 292, 146, 0, 1706, 1705, 1, 0, 0, 0, 1706, 1707, 1, 0, 0, 0, 1707, 1727, 1, 0, 0, 0, 1708, 1710, 3, 292, 146, 0, 1709, 1708, 1, 0, 0, 0, 1709, 1710, 1, 0, 0, 0, 1710, 1712, 1, 0, 0, 0, 1711, 1713, 3, 280, 140, 0, 1712, 1711, 1, 0, 0, 0, 1712, 1713, 1, 0, 0, 0, 1713, 1715, 1, 0, 0, 0, 1714, 1716, 3, 284, 142, 0, 1715, 1714, 1, 0, 0, 0, 1715, 1716, 1, 0, 0, 0, 1716, 1727, 1, 0, 0, 0, 1717, 1719, 3, 280, 140, 0, 1718, 1717, 1, 0, 0, 0, 1718, 1719, 1, 0, 0, 0, 1719, 1721, 1, 0, 0, 0, 1720, 1722, 3, 292, 146, 0, 1721, 1720, 1, 0, 0, 0, 1721, 1722, 1, 0, 0, 0, 1722, 1724, 1, 0, 0, 0, 1723, 1725, 3, 284, 142, 0, 1724, 1723, 1, 0, 0, 0, 1724, 1725, 1, 0, 0, 0, 1725, 1727, 1, 0, 0, 0, 1726, 1667, 1, 0, 0, 0, 1726, 1673, 1, 0, 0, 0, 1726, 1679, 1, 0, 0, 0, 1726, 1685, 1, 0, 0, 0, 1726, 1691, 1, 0, 0, 0, 1726, 1700, 1, 0, 0, 0, 1726, 1709, 1, 0, 0, 0, 1726, 1718, 1, 0, 0, 0, 1727, 279, 1, 0, 0, 0, 1728, 1729, 5, 133, 0, 0, 1729, 1730, 5, 125, 0, 0, 1730, 1731, 3, 428, 214, 0, 1731, 281, 1, 0, 0, 0, 1732, 1733, 3, 438, 219, 0, 1733, 283, 1, 0, 0, 0, 1734, 1735, 5, 60, 0, 0, 1735, 1736, 5, 106, 0, 0, 1736, 1737, 3, 282, 141, 0, 1737, 285, 1, 0, 0, 0, 1738, 1739, 5, 6, 0, 0, 1739, 1740, 5, 106, 0, 0, 1740, 1741, 3, 282, 141, 0, 1741, 287, 1, 0, 0, 0, 1742, 1743, 5, 35, 0, 0, 1743, 1744, 5, 106, 0, 0, 1744, 1745, 3, 282, 141, 0, 1745, 289, 1, 0, 0, 0, 1746, 1747, 5, 137, 0, 0, 1747, 1748, 5, 113, 0, 0, 1748, 1750, 5, 51, 0, 0, 1749, 1751, 5, 46, 0, 0, 1750, 1749, 1, 0, 0, 0, 1750, 1751, 1, 0, 0, 0, 1751, 291, 1, 0, 0, 0, 1752, 1753, 5, 13, 0, 0, 1753, 1754, 5, 67, 0, 0, 1754, 1755, 5, 22, 0, 0, 1755, 293, 1, 0, 0, 0, 1756, 1764, 5, 53, 0, 0, 1757, 1765, 5, 10, 0, 0, 1758, 1759, 5, 17, 0, 0, 1759, 1762, 5, 29, 0, 0, 1760, 1761, 5, 92, 0, 0, 1761, 1763, 5, 197, 0, 0, 1762, 1760, 1, 0, 0, 0, 1762, 1763, 1, 0, 0, 0, 1763, 1765, 1, 0, 0, 0, 1764, 1757, 1, 0, 0, 0, 1764, 1758, 1, 0, 0, 0, 1765, 1766, 1, 0, 0, 0, 1766, 1767, 5, 13, 0, 0, 1767, 1776, 5, 58, 0, 0, 1768, 1770, 5, 170, 0, 0, 1769, 1771, 3, 296, 148, 0, 1770, 1769, 1, 0, 0, 0, 1771, 1772, 1, 0, 0, 0, 1772, 1770, 1, 0, 0, 0, 1772, 1773, 1, 0, 0, 0, 1773, 1774, 1, 0, 0, 0, 1774, 1775, 5, 171, 0, 0, 1775, 1777, 1, 0, 0, 0, 1776, 1768, 1, 0, 0, 0, 1776, 1777, 1, 0, 0, 0, 1777, 295, 1, 0, 0, 0, 1778, 1779, 5, 120, 0, 0, 1779, 1780, 5, 137, 0, 0, 1780, 1800, 3, 434, 217, 0, 1781, 1782, 5, 61, 0, 0, 1782, 1783, 5, 17, 0, 0, 1783, 1800, 3, 434, 217, 0, 1784, 1785, 5, 78, 0, 0, 1785, 1800, 3, 434, 217, 0, 1786, 1787, 5, 87, 0, 0, 1787, 1800, 5, 78, 0, 0, 1788, 1789, 5, 81, 0, 0, 1789, 1800, 3, 434, 217, 0, 1790, 1791, 5, 87, 0, 0, 1791, 1800, 5, 81, 0, 0, 1792, 1793, 5, 18, 0, 0, 1793, 1800, 5, 200, 0, 0, 1794, 1795, 5, 87, 0, 0, 1795, 1800, 5, 18, 0, 0, 1796, 1800, 5, 26, 0, 0, 1797, 1798, 5, 87, 0, 0, 1798, 1800, 5, 26, 0, 0, 1799, 1778, 1, 0, 0, 0, 1799, 1781, 1, 0, 0, 0, 1799, 1784, 1, 0, 0, 0, 1799, 1786, 1, 0, 0, 0, 1799, 1788, 1, 0, 0, 0, 1799, 1790, 1, 0, 0, 0, 1799, 1792, 1, 0, 0, 0, 1799, 1794, 1, 0, 0, 0, 1799, 1796, 1, 0, 0, 0, 1799, 1797, 1, 0, 0, 0, 1800, 297, 1, 0, 0, 0, 1801, 1802, 5, 13, 0, 0, 1802, 1803, 5, 83, 0, 0, 1803, 299, 1, 0, 0, 0, 1804, 1805, 5, 13, 0, 0, 1805, 1809, 5, 140, 0, 0, 1806, 1807, 5, 53, 0, 0, 1807, 1808, 5, 17, 0, 0, 1808, 1810, 5, 29, 0, 0, 1809, 1806, 1, 0, 0, 0, 1809, 1810, 1, 0, 0, 0, 1810, 301, 1, 0, 0, 0, 1811, 1812, 5, 9, 0, 0, 1812, 1813, 5, 121, 0, 0, 1813, 1814, 3, 252, 126, 0, 1814, 1815, 3, 304, 152, 0, 1815, 303, 1, 0, 0, 0, 1816, 1823, 3, 310, 155, 0, 1817, 1823, 3, 280, 140, 0, 1818, 1823, 3, 286, 143, 0, 1819, 1823, 3, 288, 144, 0, 1820, 1823, 3, 306, 153, 0, 1821, 1823, 3, 308, 154, 0, 1822, 1816, 1, 0, 0, 0, 1822, 1817, 1, 0, 0, 0, 1822, 1818, 1, 0, 0, 0, 1822, 1819, 1, 0, 0, 0, 1822, 1820, 1, 0, 0, 0, 1822, 1821, 1, 0, 0, 0, 1823, 305, 1, 0, 0, 0, 1824, 1825, 5, 49, 0, 0, 1825, 1827, 5, 113, 0, 0, 1826, 1828, 5, 46, 0, 0, 1827, 1826, 1, 0, 0, 0, 1827, 1828, 1, 0, 0, 0, 1828, 307, 1, 0, 0, 0, 1829, 1830, 5, 127, 0, 0, 1830, 1831, 5, 113, 0, 0, 1831, 309, 1, 0, 0, 0, 1832, 1836, 5, 170, 0, 0, 1833, 1837, 3, 312, 156, 0, 1834, 1837, 3, 314, 157, 0, 1835, 1837, 3, 316, 158, 0, 1836, 1833, 1, 0, 0, 0, 1836, 1834, 1, 0, 0, 0, 1836, 1835, 1, 0, 0, 0, 1837, 1846, 1, 0, 0, 0, 1838, 1842, 5, 168, 0, 0, 1839, 1843, 3, 312, 156, 0, 1840, 1843, 3, 314, 157, 0, 1841, 1843, 3, 316, 158, 0, 1842, 1839, 1, 0, 0, 0, 1842, 1840, 1, 0, 0, 0, 1842, 1841, 1, 0, 0, 0, 1843, 1845, 1, 0, 0, 0, 1844, 1838, 1, 0, 0, 0, 1845, 1848, 1, 0, 0, 0, 1846, 1844, 1, 0, 0, 0, 1846, 1847, 1, 0, 0, 0, 1847, 1849, 1, 0, 0, 0, 1848, 1846, 1, 0, 0, 0, 1849, 1850, 5, 171, 0, 0, 1850, 311, 1, 0, 0, 0, 1851, 1852, 5, 6, 0, 0, 1852, 1853, 3, 318, 159, 0, 1853, 1859, 3, 188, 94, 0, 1854, 1860, 3, 194, 97, 0, 1855, 1860, 3, 294, 147, 0, 1856, 1860, 3, 298, 149, 0, 1857, 1860, 3, 300, 150, 0, 1858, 1860, 3, 260, 130, 0, 1859, 1854, 1, 0, 0, 0, 1859, 1855, 1, 0, 0, 0, 1859, 1856, 1, 0, 0, 0, 1859, 1857, 1, 0, 0, 0, 1859, 1858, 1, 0, 0, 0, 1859, 1860, 1, 0, 0, 0, 1860, 1862, 1, 0, 0, 0, 1861, 1863, 3, 426, 213, 0, 1862, 1861, 1, 0, 0, 0, 1862, 1863, 1, 0, 0, 0, 1863, 313, 1, 0, 0, 0, 1864, 1865, 5, 35, 0, 0, 1865, 1866, 3, 318, 159, 0, 1866, 315, 1, 0, 0, 0, 1867, 1868, 5, 82, 0, 0, 1868, 1880, 3, 318, 159, 0, 1869, 1871, 3, 188, 94, 0, 1870, 1872, 3, 194, 97, 0, 1871, 1870, 1, 0, 0, 0, 1871, 1872, 1, 0, 0, 0, 1872, 1874, 1, 0, 0, 0, 1873, 1875, 3, 426, 213, 0, 1874, 1873, 1, 0, 0, 0, 1874, 1875, 1, 0, 0, 0, 1875, 1881, 1, 0, 0, 0, 1876, 1881, 3, 294, 147, 0, 1877, 1881, 3, 300, 150, 0, 1878, 1879, 5, 35, 0, 0, 1879, 1881, 5, 58, 0, 0, 1880, 1869, 1, 0, 0, 0, 1880, 1876, 1, 0, 0, 0, 1880, 1877, 1, 0, 0, 0, 1880, 1878, 1, 0, 0, 0, 1881, 317, 1, 0, 0, 0, 1882, 1887, 3, 320, 160, 0, 1883, 1884, 5, 177, 0, 0, 1884, 1886, 3, 322, 161, 0, 1885, 1883, 1, 0, 0, 0, 1886, 1889, 1, 0, 0, 0, 1887, 1885, 1, 0, 0, 0, 1887, 1888, 1, 0, 0, 0, 1888, 319, 1, 0, 0, 0, 1889, 1887, 1, 0, 0, 0, 1890, 1895, 3, 440, 220, 0, 1891, 1892, 5, 172, 0, 0, 1892, 1894, 5, 173, 0, 0, 1893, 1891, 1, 0, 0, 0, 1894, 1897, 1, 0, 0, 0, 1895, 1893, 1, 0, 0, 0, 1895, 1896, 1, 0, 0, 0, 1896, 321, 1, 0, 0, 0, 1897, 1895, 1, 0, 0, 0, 1898, 1903, 3, 440, 220, 0, 1899, 1900, 5, 172, 0, 0, 1900, 1902, 5, 173, 0, 0, 1901, 1899, 1, 0, 0, 0, 1902, 1905, 1, 0, 0, 0, 1903, 1901, 1, 0, 0, 0, 1903, 1904, 1, 0, 0, 0, 1904, 1910, 1, 0, 0, 0, 1905, 1903, 1, 0, 0, 0, 1906, 1907, 5, 134, 0, 0, 1907, 1908, 5, 170, 0, 0, 1908, 1910, 5, 171, 0, 0, 1909, 1898, 1, 0, 0, 0, 1909, 1906, 1, 0, 0, 0, 1910, 323, 1, 0, 0, 0, 1911, 1912, 5, 35, 0, 0, 1912, 1915, 5, 121, 0, 0, 1913, 1914, 5, 59, 0, 0, 1914, 1916, 5, 42, 0, 0, 1915, 1913, 1, 0, 0, 0, 1915, 1916, 1, 0, 0, 0, 1916, 1917, 1, 0, 0, 0, 1917, 1918, 3, 252, 126, 0, 1918, 325, 1, 0, 0, 0, 1919, 1920, 5, 25, 0, 0, 1920, 1924, 5, 62, 0, 0, 1921, 1922, 5, 59, 0, 0, 1922, 1923, 5, 88, 0, 0, 1923, 1925, 5, 42, 0, 0, 1924, 1921, 1, 0, 0, 0, 1924, 1925, 1, 0, 0, 0, 1925, 1926, 1, 0, 0, 0, 1926, 1927, 3, 328, 164, 0, 1927, 1928, 5, 92, 0, 0, 1928, 1950, 3, 252, 126, 0, 1929, 1930, 5, 170, 0, 0, 1930, 1931, 3, 330, 165, 0, 1931, 1937, 5, 171, 0, 0, 1932, 1934, 5, 137, 0, 0, 1933, 1935, 5, 87, 0, 0, 1934, 1933, 1, 0, 0, 0, 1934, 1935, 1, 0, 0, 0, 1935, 1936, 1, 0, 0, 0, 1936, 1938, 5, 89, 0, 0, 1937, 1932, 1, 0, 0, 0, 1937, 1938, 1, 0, 0, 0, 1938, 1944, 1, 0, 0, 0, 1939, 1940, 5, 137, 0, 0, 1940, 1941, 5, 138, 0, 0, 1941, 1942, 5, 71, 0, 0, 1942, 1943, 5, 100, 0, 0, 1943, 1945, 5, 112, 0, 0, 1944, 1939, 1, 0, 0, 0, 1944, 1945, 1, 0, 0, 0, 1945, 1951, 1, 0, 0, 0, 1946, 1947, 5, 170, 0, 0, 1947, 1948, 3, 330, 165, 0, 1948, 1949, 6, 163, -1, 0, 1949, 1951, 1, 0, 0, 0, 1950, 1929, 1, 0, 0, 0, 1950, 1946, 1, 0, 0, 0, 1951, 1953, 1, 0, 0, 0, 1952, 1954, 3, 426, 213, 0, 1953, 1952, 1, 0, 0, 0, 1953, 1954, 1, 0, 0, 0, 1954, 327, 1, 0, 0, 0, 1955, 1956, 3, 440, 220, 0, 1956, 329, 1, 0, 0, 0, 1957, 1962, 3, 332, 166, 0, 1958, 1959, 5, 168, 0, 0, 1959, 1961, 3, 332, 166, 0, 1960, 1958, 1, 0, 0, 0, 1961, 1964, 1, 0, 0, 0, 1962, 1960, 1, 0, 0, 0, 1962, 1963, 1, 0, 0, 0, 1963, 331, 1, 0, 0, 0, 1964, 1962, 1, 0, 0, 0, 1965, 1967, 3, 338, 169, 0, 1966, 1968, 3, 344, 172, 0, 1967, 1966, 1, 0, 0, 0, 1967, 1968, 1, 0, 0, 0, 1968, 1971, 1, 0, 0, 0, 1969, 1971, 3, 334, 167, 0, 1970, 1965, 1, 0, 0, 0, 1970, 1969, 1, 0, 0, 0, 1971, 333, 1, 0, 0, 0, 1972, 1973, 3, 440, 220, 0, 1973, 1975, 5, 170, 0, 0, 1974, 1976, 3, 338, 169, 0, 1975, 1974, 1, 0, 0, 0, 1975, 1976, 1, 0, 0, 0, 1976, 1978, 1, 0, 0, 0, 1977, 1979, 3, 344, 172, 0, 1978, 1977, 1, 0, 0, 0, 1978, 1979, 1, 0, 0, 0, 1979, 1981, 1, 0, 0, 0, 1980, 1982, 3, 336, 168, 0, 1981, 1980, 1, 0, 0, 0, 1981, 1982, 1, 0, 0, 0, 1982, 1983, 1, 0, 0, 0, 1983, 1984, 5, 171, 0, 0, 1984, 335, 1, 0, 0, 0, 1985, 1986, 5, 168, 0, 0, 1986, 1988, 3, 120, 60, 0, 1987, 1985, 1, 0, 0, 0, 1988, 1989, 1, 0, 0, 0, 1989, 1987, 1, 0, 0, 0, 1989, 1990, 1, 0, 0, 0, 1990, 337, 1, 0, 0, 0, 1991, 2014, 3, 234, 117, 0, 1992, 1994, 3, 340, 170, 0, 1993, 1995, 3, 342, 171, 0, 1994, 1993, 1, 0, 0, 0, 1994, 1995, 1, 0, 0, 0, 1995, 2014, 1, 0, 0, 0, 1996, 1997, 5, 36, 0, 0, 1997, 1998, 5, 170, 0, 0, 1998, 1999, 3, 234, 117, 0, 1999, 2001, 5, 171, 0, 0, 2000, 2002, 3, 342, 171, 0, 2001, 2000, 1, 0, 0, 0, 2001, 2002, 1, 0, 0, 0, 2002, 2014, 1, 0, 0, 0, 2003, 2004, 5, 70, 0, 0, 2004, 2005, 5, 170, 0, 0, 2005, 2006, 3, 234, 117, 0, 2006, 2007, 5, 171, 0, 0, 2007, 2014, 1, 0, 0, 0, 2008, 2009, 5, 71, 0, 0, 2009, 2010, 5, 170, 0, 0, 2010, 2011, 3, 234, 117, 0, 2011, 2012, 5, 171, 0, 0, 2012, 2014, 1, 0, 0, 0, 2013, 1991, 1, 0, 0, 0, 2013, 1992, 1, 0, 0, 0, 2013, 1996, 1, 0, 0, 0, 2013, 2003, 1, 0, 0, 0, 2013, 2008, 1, 0, 0, 0, 2014, 339, 1, 0, 0, 0, 2015, 2026, 3, 236, 118, 0, 2016, 2017, 5, 177, 0, 0, 2017, 2025, 3, 236, 118, 0, 2018, 2019, 5, 177, 0, 0, 2019, 2020, 5, 134, 0, 0, 2020, 2021, 5, 170, 0, 0, 2021, 2025, 5, 171, 0, 0, 2022, 2023, 5, 172, 0, 0, 2023, 2025, 5, 173, 0, 0, 2024, 2016, 1, 0, 0, 0, 2024, 2018, 1, 0, 0, 0, 2024, 2022, 1, 0, 0, 0, 2025, 2028, 1, 0, 0, 0, 2026, 2024, 1, 0, 0, 0, 2026, 2027, 1, 0, 0, 0, 2027, 2039, 1, 0, 0, 0, 2028, 2026, 1, 0, 0, 0, 2029, 2030, 5, 172, 0, 0, 2030, 2040, 5, 173, 0, 0, 2031, 2032, 5, 177, 0, 0, 2032, 2033, 5, 134, 0, 0, 2033, 2034, 5, 170, 0, 0, 2034, 2040, 5, 171, 0, 0, 2035, 2036, 5, 177, 0, 0, 2036, 2037, 5, 71, 0, 0, 2037, 2038, 5, 170, 0, 0, 2038, 2040, 5, 171, 0, 0, 2039, 2029, 1, 0, 0, 0, 2039, 2031, 1, 0, 0, 0, 2039, 2035, 1, 0, 0, 0, 2040, 341, 1, 0, 0, 0, 2041, 2042, 5, 177, 0, 0, 2042, 2043, 3, 234, 117, 0, 2043, 343, 1, 0, 0, 0, 2044, 2057, 5, 13, 0, 0, 2045, 2058, 5, 154, 0, 0, 2046, 2058, 5, 155, 0, 0, 2047, 2058, 5, 150, 0, 0, 2048, 2058, 5, 160, 0, 0, 2049, 2058, 5, 149, 0, 0, 2050, 2058, 5, 157, 0, 0, 2051, 2058, 5, 163, 0, 0, 2052, 2054, 5, 153, 0, 0, 2053, 2055, 3, 418, 209, 0, 2054, 2053, 1, 0, 0, 0, 2054, 2055, 1, 0, 0, 0, 2055, 2058, 1, 0, 0, 0, 2056, 2058, 5, 158, 0, 0, 2057, 2045, 1, 0, 0, 0, 2057, 2046, 1, 0, 0, 0, 2057, 2047, 1, 0, 0, 0, 2057, 2048, 1, 0, 0, 0, 2057, 2049, 1, 0, 0, 0, 2057, 2050, 1, 0, 0, 0, 2057, 2051, 1, 0, 0, 0, 2057, 2052, 1, 0, 0, 0, 2057, 2056, 1, 0, 0, 0, 2058, 345, 1, 0, 0, 0, 2059, 2060, 5, 25, 0, 0, 2060, 2061, 5, 52, 0, 0, 2061, 2065, 5, 62, 0, 0, 2062, 2063, 5, 59, 0, 0, 2063, 2064, 5, 88, 0, 0, 2064, 2066, 5, 42, 0, 0, 2065, 2062, 1, 0, 0, 0, 2065, 2066, 1, 0, 0, 0, 2066, 2067, 1, 0, 0, 0, 2067, 2068, 3, 328, 164, 0, 2068, 2069, 5, 92, 0, 0, 2069, 2070, 3, 252, 126, 0, 2070, 2072, 3, 348, 174, 0, 2071, 2073, 3, 354, 177, 0, 2072, 2071, 1, 0, 0, 0, 2072, 2073, 1, 0, 0, 0, 2073, 2075, 1, 0, 0, 0, 2074, 2076, 5, 97, 0, 0, 2075, 2074, 1, 0, 0, 0, 2075, 2076, 1, 0, 0, 0, 2076, 2078, 1, 0, 0, 0, 2077, 2079, 3, 426, 213, 0, 2078, 2077, 1, 0, 0, 0, 2078, 2079, 1, 0, 0, 0, 2079, 347, 1, 0, 0, 0, 2080, 2081, 5, 170, 0, 0, 2081, 2082, 3, 350, 175, 0, 2082, 2083, 5, 171, 0, 0, 2083, 2089, 1, 0, 0, 0, 2084, 2085, 5, 170, 0, 0, 2085, 2086, 3, 350, 175, 0, 2086, 2087, 6, 174, -1, 0, 2087, 2089, 1, 0, 0, 0, 2088, 2080, 1, 0, 0, 0, 2088, 2084, 1, 0, 0, 0, 2089, 349, 1, 0, 0, 0, 2090, 2095, 3, 352, 176, 0, 2091, 2092, 5, 168, 0, 0, 2092, 2094, 3, 352, 176, 0, 2093, 2091, 1, 0, 0, 0, 2094, 2097, 1, 0, 0, 0, 2095, 2093, 1, 0, 0, 0, 2095, 2096, 1, 0, 0, 0, 2096, 351, 1, 0, 0, 0, 2097, 2095, 1, 0, 0, 0, 2098, 2100, 3, 338, 169, 0, 2099, 2101, 3, 418, 209, 0, 2100, 2099, 1, 0, 0, 0, 2100, 2101, 1, 0, 0, 0, 2101, 353, 1, 0, 0, 0, 2102, 2106, 3, 356, 178, 0, 2103, 2105, 3, 356, 178, 0, 2104, 2103, 1, 0, 0, 0, 2105, 2108, 1, 0, 0, 0, 2106, 2104, 1, 0, 0, 0, 2106, 2107, 1, 0, 0, 0, 2107, 355, 1, 0, 0, 0, 2108, 2106, 1, 0, 0, 0, 2109, 2110, 5, 40, 0, 0, 2110, 2111, 5, 184, 0, 0, 2111, 2116, 5, 200, 0, 0, 2112, 2113, 5, 41, 0, 0, 2113, 2114, 5, 184, 0, 0, 2114, 2116, 5, 200, 0, 0, 2115, 2109, 1, 0, 0, 0, 2115, 2112, 1, 0, 0, 0, 2116, 357, 1, 0, 0, 0, 2117, 2118, 5, 35, 0, 0, 2118, 2121, 5, 62, 0, 0, 2119, 2120, 5, 59, 0, 0, 2120, 2122, 5, 42, 0, 0, 2121, 2119, 1, 0, 0, 0, 2121, 2122, 1, 0, 0, 0, 2122, 2123, 1, 0, 0, 0, 2123, 2124, 3, 328, 164, 0, 2124, 2125, 5, 92, 0, 0, 2125, 2127, 3, 252, 126, 0, 2126, 2128, 5, 97, 0, 0, 2127, 2126, 1, 0, 0, 0, 2127, 2128, 1, 0, 0, 0, 2128, 359, 1, 0, 0, 0, 2129, 2132, 7, 13, 0, 0, 2130, 2131, 5, 13, 0, 0, 2131, 2133, 5, 67, 0, 0, 2132, 2130, 1, 0, 0, 0, 2132, 2133, 1, 0, 0, 0, 2133, 2151, 1, 0, 0, 0, 2134, 2135, 5, 121, 0, 0, 2135, 2144, 3, 252, 126, 0, 2136, 2137, 5, 170, 0, 0, 2137, 2138, 3, 362, 181, 0, 2138, 2139, 5, 171, 0, 0, 2139, 2145, 1, 0, 0, 0, 2140, 2141, 5, 170, 0, 0, 2141, 2142, 3, 362, 181, 0, 2142, 2143, 6, 180, -1, 0, 2143, 2145, 1, 0, 0, 0, 2144, 2136, 1, 0, 0, 0, 2144, 2140, 1, 0, 0, 0, 2144, 2145, 1, 0, 0, 0, 2145, 2152, 1, 0, 0, 0, 2146, 2147, 5, 62, 0, 0, 2147, 2148, 3, 328, 164, 0, 2148, 2149, 5, 92, 0, 0, 2149, 2150, 3, 252, 126, 0, 2150, 2152, 1, 0, 0, 0, 2151, 2134, 1, 0, 0, 0, 2151, 2146, 1, 0, 0, 0, 2152, 361, 1, 0, 0, 0, 2153, 2158, 3, 318, 159, 0, 2154, 2155, 5, 168, 0, 0, 2155, 2157, 3, 318, 159, 0, 2156, 2154, 1, 0, 0, 0, 2157, 2160, 1, 0, 0, 0, 2158, 2156, 1, 0, 0, 0, 2158, 2159, 1, 0, 0, 0, 2159, 363, 1, 0, 0, 0, 2160, 2158, 1, 0, 0, 0, 2161, 2164, 5, 119, 0, 0, 2162, 2163, 5, 13, 0, 0, 2163, 2165, 5, 67, 0, 0, 2164, 2162, 1, 0, 0, 0, 2164, 2165, 1, 0, 0, 0, 2165, 2180, 1, 0, 0, 0, 2166, 2181, 5, 122, 0, 0, 2167, 2181, 5, 132, 0, 0, 2168, 2181, 5, 111, 0, 0, 2169, 2170, 5, 131, 0, 0, 2170, 2181, 3, 380, 190, 0, 2171, 2172, 5, 110, 0, 0, 2172, 2181, 3, 440, 220, 0, 2173, 2174, 5, 63, 0, 0, 2174, 2175, 5, 92, 0, 0, 2175, 2181, 3, 252, 126, 0, 2176, 2177, 5, 121, 0, 0, 2177, 2181, 3, 252, 126, 0, 2178, 2181, 5, 85, 0, 0, 2179, 2181, 5, 106, 0, 0, 2180, 2166, 1, 0, 0, 0, 2180, 2167, 1, 0, 0, 0, 2180, 2168, 1, 0, 0, 0, 2180, 2169, 1, 0, 0, 0, 2180, 2171, 1, 0, 0, 0, 2180, 2173, 1, 0, 0, 0, 2180, 2176, 1, 0, 0, 0, 2180, 2178, 1, 0, 0, 0, 2180, 2179, 1, 0, 0, 0, 2181, 365, 1, 0, 0, 0, 2182, 2183, 5, 25, 0, 0, 2183, 2184, 5, 131, 0, 0, 2184, 2186, 3, 384, 192, 0, 2185, 2187, 3, 392, 196, 0, 2186, 2185, 1, 0, 0, 0, 2186, 2187, 1, 0, 0, 0, 2187, 2189, 1, 0, 0, 0, 2188, 2190, 5, 7, 0, 0, 2189, 2188, 1, 0, 0, 0, 2189, 2190, 1, 0, 0, 0, 2190, 367, 1, 0, 0, 0, 2191, 2192, 5, 25, 0, 0, 2192, 2193, 5, 110, 0, 0, 2193, 2194, 3, 440, 220, 0, 2194, 369, 1, 0, 0, 0, 2195, 2196, 5, 9, 0, 0, 2196, 2197, 5, 131, 0, 0, 2197, 2199, 3, 380, 190, 0, 2198, 2200, 3, 390, 195, 0, 2199, 2198, 1, 0, 0, 0, 2199, 2200, 1, 0, 0, 0, 2200, 2202, 1, 0, 0, 0, 2201, 2203, 5, 145, 0, 0, 2202, 2201, 1, 0, 0, 0, 2202, 2203, 1, 0, 0, 0, 2203, 2205, 1, 0, 0, 0, 2204, 2206, 5, 143, 0, 0, 2205, 2204, 1, 0, 0, 0, 2205, 2206, 1, 0, 0, 0, 2206, 2208, 1, 0, 0, 0, 2207, 2209, 3, 388, 194, 0, 2208, 2207, 1, 0, 0, 0, 2208, 2209, 1, 0, 0, 0, 2209, 2211, 1, 0, 0, 0, 2210, 2212, 3, 392, 196, 0, 2211, 2210, 1, 0, 0, 0, 2211, 2212, 1, 0, 0, 0, 2212, 371, 1, 0, 0, 0, 2213, 2214, 5, 35, 0, 0, 2214, 2215, 5, 131, 0, 0, 2215, 2217, 3, 380, 190, 0, 2216, 2218, 5, 20, 0, 0, 2217, 2216, 1, 0, 0, 0, 2217, 2218, 1, 0, 0, 0, 2218, 373, 1, 0, 0, 0, 2219, 2220, 5, 35, 0, 0, 2220, 2221, 5, 110, 0, 0, 2221, 2222, 3, 440, 220, 0, 2222, 375, 1, 0, 0, 0, 2223, 2227, 5, 54, 0, 0, 2224, 2228, 3, 394, 197, 0, 2225, 2228, 3, 396, 198, 0, 2226, 2228, 3, 398, 199, 0, 2227, 2224, 1, 0, 0, 0, 2227, 2225, 1, 0, 0, 0, 2227, 2226, 1, 0, 0, 0, 2228, 377, 1, 0, 0, 0, 2229, 2233, 5, 109, 0, 0, 2230, 2234, 3, 400, 200, 0, 2231, 2234, 3, 402, 201, 0, 2232, 2234, 3, 404, 202, 0, 2233, 2230, 1, 0, 0, 0, 2233, 2231, 1, 0, 0, 0, 2233, 2232, 1, 0, 0, 0, 2234, 379, 1, 0, 0, 0, 2235, 2238, 3, 440, 220, 0, 2236, 2238, 3, 436, 218, 0, 2237, 2235, 1, 0, 0, 0, 2237, 2236, 1, 0, 0, 0, 2238, 381, 1, 0, 0, 0, 2239, 2240, 5, 57, 0, 0, 2240, 2241, 3, 386, 193, 0, 2241, 383, 1, 0, 0, 0, 2242, 2243, 3, 440, 220, 0, 2243, 2245, 3, 382, 191, 0, 2244, 2246, 5, 143, 0, 0, 2245, 2244, 1, 0, 0, 0, 2245, 2246, 1, 0, 0, 0, 2246, 2248, 1, 0, 0, 0, 2247, 2249, 3, 388, 194, 0, 2248, 2247, 1, 0, 0, 0, 2248, 2249, 1, 0, 0, 0, 2249, 2254, 1, 0, 0, 0, 2250, 2251, 3, 436, 218, 0, 2251, 2252, 5, 142, 0, 0, 2252, 2254, 1, 0, 0, 0, 2253, 2242, 1, 0, 0, 0, 2253, 2250, 1, 0, 0, 0, 2254, 385, 1, 0, 0, 0, 2255, 2256, 5, 17, 0, 0, 2256, 2257, 3, 436, 218, 0, 2257, 387, 1, 0, 0, 0, 2258, 2259, 5, 98, 0, 0, 2259, 2260, 5, 74, 0, 0, 2260, 2261, 3, 428, 214, 0, 2261, 389, 1, 0, 0, 0, 2262, 2264, 3, 382, 191, 0, 2263, 2265, 5, 144, 0, 0, 2264, 2263, 1, 0, 0, 0, 2264, 2265, 1, 0, 0, 0, 2265, 391, 1, 0, 0, 0, 2266, 2267, 5, 5, 0, 0, 2267, 2268, 7, 14, 0, 0, 2268, 393, 1, 0, 0, 0, 2269, 2270, 3, 438, 219, 0, 2270, 2271, 5, 124, 0, 0, 2271, 2272, 3, 406, 203, 0, 2272, 395, 1, 0, 0, 0, 2273, 2274, 3, 408, 204, 0, 2274, 2275, 5, 124, 0, 0, 2275, 2276, 3, 440, 220, 0, 2276, 397, 1, 0, 0, 0, 2277, 2278, 3, 412, 206, 0, 2278, 2282, 5, 92, 0, 0, 2279, 2283, 3, 414, 207, 0, 2280, 2281, 5, 84, 0, 0, 2281, 2283, 3, 254, 127, 0, 2282, 2279, 1, 0, 0, 0, 2282, 2280, 1, 0, 0, 0, 2283, 2284, 1, 0, 0, 0, 2284, 2285, 5, 124, 0, 0, 2285, 2286, 3, 440, 220, 0, 2286, 399, 1, 0, 0, 0, 2287, 2288, 3, 438, 219, 0, 2288, 2289, 5, 50, 0, 0, 2289, 2290, 3, 406, 203, 0, 2290, 401, 1, 0, 0, 0, 2291, 2292, 3, 408, 204, 0, 2292, 2293, 5, 50, 0, 0, 2293, 2294, 3, 440, 220, 0, 2294, 403, 1, 0, 0, 0, 2295, 2296, 3, 412, 206, 0, 2296, 2300, 5, 92, 0, 0, 2297, 2301, 3, 414, 207, 0, 2298, 2299, 5, 84, 0, 0, 2299, 2301, 3, 254, 127, 0, 2300, 2297, 1, 0, 0, 0, 2300, 2298, 1, 0, 0, 0, 2301, 2302, 1, 0, 0, 0, 2302, 2303, 5, 50, 0, 0, 2303, 2304, 3, 440, 220, 0, 2304, 405, 1, 0, 0, 0, 2305, 2306, 5, 131, 0, 0, 2306, 2310, 3, 380, 190, 0, 2307, 2308, 5, 110, 0, 0, 2308, 2310, 3, 440, 220, 0, 2309, 2305, 1, 0, 0, 0, 2309, 2307, 1, 0, 0, 0, 2310, 407, 1, 0, 0, 0, 2311, 2316, 3, 410, 205, 0, 2312, 2313, 5, 168, 0, 0, 2313, 2315, 3, 410, 205, 0, 2314, 2312, 1, 0, 0, 0, 2315, 2318, 1, 0, 0, 0, 2316, 2314, 1, 0, 0, 0, 2316, 2317, 1, 0, 0, 0, 2317, 409, 1, 0, 0, 0, 2318, 2316, 1, 0, 0, 0, 2319, 2322, 3, 440, 220, 0, 2320, 2322, 5, 141, 0, 0, 2321, 2319, 1, 0, 0, 0, 2321, 2320, 1, 0, 0, 0, 2322, 411, 1, 0, 0, 0, 2323, 2326, 3, 410, 205, 0, 2324, 2326, 5, 8, 0, 0, 2325, 2323, 1, 0, 0, 0, 2325, 2324, 1, 0, 0, 0, 2326, 2334, 1, 0, 0, 0, 2327, 2330, 5, 168, 0, 0, 2328, 2331, 3, 410, 205, 0, 2329, 2331, 5, 8, 0, 0, 2330, 2328, 1, 0, 0, 0, 2330, 2329, 1, 0, 0, 0, 2331, 2333, 1, 0, 0, 0, 2332, 2327, 1, 0, 0, 0, 2333, 2336, 1, 0, 0, 0, 2334, 2332, 1, 0, 0, 0, 2334, 2335, 1, 0, 0, 0, 2335, 413, 1, 0, 0, 0, 2336, 2334, 1, 0, 0, 0, 2337, 2338, 3, 252, 126, 0, 2338, 415, 1, 0, 0, 0, 2339, 2347, 3, 418, 209, 0, 2340, 2347, 3, 420, 210, 0, 2341, 2347, 3, 436, 218, 0, 2342, 2347, 3, 432, 216, 0, 2343, 2347, 5, 199, 0, 0, 2344, 2347, 5, 198, 0, 0, 2345, 2347, 5, 197, 0, 0, 2346, 2339, 1, 0, 0, 0, 2346, 2340, 1, 0, 0, 0, 2346, 2341, 1, 0, 0, 0, 2346, 2342, 1, 0, 0, 0, 2346, 2343, 1, 0, 0, 0, 2346, 2344, 1, 0, 0, 0, 2346, 2345, 1, 0, 0, 0, 2347, 417, 1, 0, 0, 0, 2348, 2349, 5, 174, 0, 0, 2349, 2354, 3, 422, 211, 0, 2350, 2351, 5, 168, 0, 0, 2351, 2353, 3, 422, 211, 0, 2352, 2350, 1, 0, 0, 0, 2353, 2356, 1, 0, 0, 0, 2354, 2352, 1, 0, 0, 0, 2354, 2355, 1, 0, 0, 0, 2355, 2357, 1, 0, 0, 0, 2356, 2354, 1, 0, 0, 0, 2357, 2358, 5, 175, 0, 0, 2358, 2362, 1, 0, 0, 0, 2359, 2360, 5, 174, 0, 0, 2360, 2362, 5, 175, 0, 0, 2361, 2348, 1, 0, 0, 0, 2361, 2359, 1, 0, 0, 0, 2362, 419, 1, 0, 0, 0, 2363, 2364, 5, 172, 0, 0, 2364, 2369, 3, 424, 212, 0, 2365, 2366, 5, 168, 0, 0, 2366, 2368, 3, 424, 212, 0, 2367, 2365, 1, 0, 0, 0, 2368, 2371, 1, 0, 0, 0, 2369, 2367, 1, 0, 0, 0, 2369, 2370, 1, 0, 0, 0, 2370, 2372, 1, 0, 0, 0, 2371, 2369, 1, 0, 0, 0, 2372, 2373, 5, 173, 0, 0, 2373, 2377, 1, 0, 0, 0, 2374, 2375, 5, 172, 0, 0, 2375, 2377, 5, 173, 0, 0, 2376, 2363, 1, 0, 0, 0, 2376, 2374, 1, 0, 0, 0, 2377, 421, 1, 0, 0, 0, 2378, 2379, 5, 203, 0, 0, 2379, 2380, 5, 169, 0, 0, 2380, 2381, 3, 424, 212, 0, 2381, 423, 1, 0, 0, 0, 2382, 2390, 3, 418, 209, 0, 2383, 2390, 3, 420, 210, 0, 2384, 2390, 5, 203, 0, 0, 2385, 2390, 3, 432, 216, 0, 2386, 2390, 5, 199, 0, 0, 2387, 2390, 5, 198, 0, 0, 2388, 2390, 5, 197, 0, 0, 2389, 2382, 1, 0, 0, 0, 2389, 2383, 1, 0, 0, 0, 2389, 2384, 1, 0, 0, 0, 2389, 2385, 1, 0, 0, 0, 2389, 2386, 1, 0, 0, 0, 2389, 2387, 1, 0, 0, 0, 2389, 2388, 1, 0, 0, 0, 2390, 425, 1, 0, 0, 0, 2391, 2392, 5, 23, 0, 0, 2392, 2393, 3, 436, 218, 0, 2393, 427, 1, 0, 0, 0, 2394, 2395, 5, 200, 0, 0, 2395, 2396, 3, 430, 215, 0, 2396, 429, 1, 0, 0, 0, 2397, 2398, 7, 15, 0, 0, 2398, 431, 1, 0, 0, 0, 2399, 2401, 5, 193, 0, 0, 2400, 2399, 1, 0, 0, 0, 2400, 2401, 1, 0, 0, 0, 2401, 2402, 1, 0, 0, 0, 2402, 2403, 7, 16, 0, 0, 2403, 433, 1, 0, 0, 0, 2404, 2406, 7, 3, 0, 0, 2405, 2404, 1, 0, 0, 0, 2405, 2406, 1, 0, 0, 0, 2406, 2407, 1, 0, 0, 0, 2407, 2408, 5, 200, 0, 0, 2408, 435, 1, 0, 0, 0, 2409, 2410, 7, 17, 0, 0, 2410, 437, 1, 0, 0, 0, 2411, 2416, 3, 440, 220, 0, 2412, 2413, 5, 168, 0, 0, 2413, 2415, 3, 440, 220, 0, 2414, 2412, 1, 0, 0, 0, 2415, 2418, 1, 0, 0, 0, 2416, 2414, 1, 0, 0, 0, 2416, 2417, 1, 0, 0, 0, 2417, 439, 1, 0, 0, 0, 2418, 2416, 1, 0, 0, 0, 2419, 2565, 5, 5, 0, 0, 2420, 2565, 5, 6, 0, 0, 2421, 2565, 5, 7, 0, 0, 2422, 2565, 5, 8, 0, 0, 2423, 2565, 5, 9, 0, 0, 2424, 2565, 5, 10, 0, 0, 2425, 2565, 5, 11, 0, 0, 2426, 2565, 5, 12, 0, 0, 2427, 2565, 5, 162, 0, 0, 2428, 2565, 5, 163, 0, 0, 2429, 2565, 5, 164, 0, 0, 2430, 2565, 5, 165, 0, 0, 2431, 2565, 5, 15, 0, 0, 2432, 2565, 5, 13, 0, 0, 2433, 2565, 5, 14, 0, 0, 2434, 2565, 5, 16, 0, 0, 2435, 2565, 5, 17, 0, 0, 2436, 2565, 5, 18, 0, 0, 2437, 2565, 5, 19, 0, 0, 2438, 2565, 5, 21, 0, 0, 2439, 2565, 5, 22, 0, 0, 2440, 2565, 5, 23, 0, 0, 2441, 2565, 5, 24, 0, 0, 2442, 2565, 5, 25, 0, 0, 2443, 2565, 5, 26, 0, 0, 2444, 2565, 5, 27, 0, 0, 2445, 2565, 5, 28, 0, 0, 2446, 2565, 5, 29, 0, 0, 2447, 2565, 5, 30, 0, 0, 2448, 2565, 5, 31, 0, 0, 2449, 2565, 5, 32, 0, 0, 2450, 2565, 5, 33, 0, 0, 2451, 2565, 5, 34, 0, 0, 2452, 2565, 5, 35, 0, 0, 2453, 2565, 5, 36, 0, 0, 2454, 2565, 5, 37, 0, 0, 2455, 2565, 5, 38, 0, 0, 2456, 2565, 5, 39, 0, 0, 2457, 2565, 5, 40, 0, 0, 2458, 2565, 5, 41, 0, 0, 2459, 2565, 5, 42, 0, 0, 2460, 2565, 5, 43, 0, 0, 2461, 2565, 5, 44, 0, 0, 2462, 2565, 5, 45, 0, 0, 2463, 2565, 5, 49, 0, 0, 2464, 2565, 5, 50, 0, 0, 2465, 2565, 5, 51, 0, 0, 2466, 2565, 5, 52, 0, 0, 2467, 2565, 5, 53, 0, 0, 2468, 2565, 5, 54, 0, 0, 2469, 2565, 5, 55, 0, 0, 2470, 2565, 5, 56, 0, 0, 2471, 2565, 5, 57, 0, 0, 2472, 2565, 5, 58, 0, 0, 2473, 2565, 5, 59, 0, 0, 2474, 2565, 5, 61, 0, 0, 2475, 2565, 5, 62, 0, 0, 2476, 2565, 5, 63, 0, 0, 2477, 2565, 5, 64, 0, 0, 2478, 2565, 5, 65, 0, 0, 2479, 2565, 5, 60, 0, 0, 2480, 2565, 5, 66, 0, 0, 2481, 2565, 5, 67, 0, 0, 2482, 2565, 5, 69, 0, 0, 2483, 2565, 5, 70, 0, 0, 2484, 2565, 5, 71, 0, 0, 2485, 2565, 5, 74, 0, 0, 2486, 2565, 5, 72, 0, 0, 2487, 2565, 5, 75, 0, 0, 2488, 2565, 5, 76, 0, 0, 2489, 2565, 5, 77, 0, 0, 2490, 2565, 5, 79, 0, 0, 2491, 2565, 5, 80, 0, 0, 2492, 2565, 5, 82, 0, 0, 2493, 2494, 5, 83, 0, 0, 2494, 2565, 5, 84, 0, 0, 2495, 2565, 5, 85, 0, 0, 2496, 2565, 5, 86, 0, 0, 2497, 2565, 5, 87, 0, 0, 2498, 2565, 5, 88, 0, 0, 2499, 2565, 5, 89, 0, 0, 2500, 2565, 5, 91, 0, 0, 2501, 2565, 5, 90, 0, 0, 2502, 2565, 5, 92, 0, 0, 2503, 2565, 5, 94, 0, 0, 2504, 2565, 5, 95, 0, 0, 2505, 2565, 5, 97, 0, 0, 2506, 2565, 5, 100, 0, 0, 2507, 2565, 5, 98, 0, 0, 2508, 2565, 5, 99, 0, 0, 2509, 2565, 5, 103, 0, 0, 2510, 2565, 5, 104, 0, 0, 2511, 2565, 5, 195, 0, 0, 2512, 2565, 5, 105, 0, 0, 2513, 2565, 5, 106, 0, 0, 2514, 2565, 5, 107, 0, 0, 2515, 2565, 5, 108, 0, 0, 2516, 2565, 5, 112, 0, 0, 2517, 2565, 5, 110, 0, 0, 2518, 2565, 5, 111, 0, 0, 2519, 2565, 5, 109, 0, 0, 2520, 2565, 5, 113, 0, 0, 2521, 2565, 5, 114, 0, 0, 2522, 2565, 5, 115, 0, 0, 2523, 2565, 5, 116, 0, 0, 2524, 2565, 5, 117, 0, 0, 2525, 2565, 5, 118, 0, 0, 2526, 2565, 5, 119, 0, 0, 2527, 2565, 5, 120, 0, 0, 2528, 2565, 5, 121, 0, 0, 2529, 2565, 5, 122, 0, 0, 2530, 2565, 5, 123, 0, 0, 2531, 2565, 5, 124, 0, 0, 2532, 2565, 5, 125, 0, 0, 2533, 2565, 5, 126, 0, 0, 2534, 2565, 5, 127, 0, 0, 2535, 2565, 5, 128, 0, 0, 2536, 2565, 5, 138, 0, 0, 2537, 2565, 5, 139, 0, 0, 2538, 2565, 5, 129, 0, 0, 2539, 2565, 5, 130, 0, 0, 2540, 2565, 5, 131, 0, 0, 2541, 2565, 5, 132, 0, 0, 2542, 2565, 5, 133, 0, 0, 2543, 2565, 5, 134, 0, 0, 2544, 2565, 5, 135, 0, 0, 2545, 2565, 5, 136, 0, 0, 2546, 2565, 5, 137, 0, 0, 2547, 2565, 5, 147, 0, 0, 2548, 2565, 5, 148, 0, 0, 2549, 2565, 5, 149, 0, 0, 2550, 2565, 5, 150, 0, 0, 2551, 2565, 5, 151, 0, 0, 2552, 2565, 5, 152, 0, 0, 2553, 2565, 5, 153, 0, 0, 2554, 2565, 5, 155, 0, 0, 2555, 2565, 5, 154, 0, 0, 2556, 2565, 5, 156, 0, 0, 2557, 2565, 5, 157, 0, 0, 2558, 2565, 5, 158, 0, 0, 2559, 2565, 5, 159, 0, 0, 2560, 2565, 5, 160, 0, 0, 2561, 2565, 5, 161, 0, 0, 2562, 2565, 5, 166, 0, 0, 2563, 2565, 5, 206, 0, 0, 2564, 2419, 1, 0, 0, 0, 2564, 2420, 1, 0, 0, 0, 2564, 2421, 1, 0, 0, 0, 2564, 2422, 1, 0, 0, 0, 2564, 2423, 1, 0, 0, 0, 2564, 2424, 1, 0, 0, 0, 2564, 2425, 1, 0, 0, 0, 2564, 2426, 1, 0, 0, 0, 2564, 2427, 1, 0, 0, 0, 2564, 2428, 1, 0, 0, 0, 2564, 2429, 1, 0, 0, 0, 2564, 2430, 1, 0, 0, 0, 2564, 2431, 1, 0, 0, 0, 2564, 2432, 1, 0, 0, 0, 2564, 2433, 1, 0, 0, 0, 2564, 2434, 1, 0, 0, 0, 2564, 2435, 1, 0, 0, 0, 2564, 2436, 1, 0, 0, 0, 2564, 2437, 1, 0, 0, 0, 2564, 2438, 1, 0, 0, 0, 2564, 2439, 1, 0, 0, 0, 2564, 2440, 1, 0, 0, 0, 2564, 2441, 1, 0, 0, 0, 2564, 2442, 1, 0, 0, 0, 2564, 2443, 1, 0, 0, 0, 2564, 2444, 1, 0, 0, 0, 2564, 2445, 1, 0, 0, 0, 2564, 2446, 1, 0, 0, 0, 2564, 2447, 1, 0, 0, 0, 2564, 2448, 1, 0, 0, 0, 2564, 2449, 1, 0, 0, 0, 2564, 2450, 1, 0, 0, 0, 2564, 2451, 1, 0, 0, 0, 2564, 2452, 1, 0, 0, 0, 2564, 2453, 1, 0, 0, 0, 2564, 2454, 1, 0, 0, 0, 2564, 2455, 1, 0, 0, 0, 2564, 2456, 1, 0, 0, 0, 2564, 2457, 1, 0, 0, 0, 2564, 2458, 1, 0, 0, 0, 2564, 2459, 1, 0, 0, 0, 2564, 2460, 1, 0, 0, 0, 2564, 2461, 1, 0, 0, 0, 2564, 2462, 1, 0, 0, 0, 2564, 2463, 1, 0, 0, 0, 2564, 2464, 1, 0, 0, 0, 2564, 2465, 1, 0, 0, 0, 2564, 2466, 1, 0, 0, 0, 2564, 2467, 1, 0, 0, 0, 2564, 2468, 1, 0, 0, 0, 2564, 2469, 1, 0, 0, 0, 2564, 2470, 1, 0, 0, 0, 2564, 2471, 1, 0, 0, 0, 2564, 2472, 1, 0, 0, 0, 2564, 2473, 1, 0, 0, 0, 2564, 2474, 1, 0, 0, 0, 2564, 2475, 1, 0, 0, 0, 2564, 2476, 1, 0, 0, 0, 2564, 2477, 1, 0, 0, 0, 2564, 2478, 1, 0, 0, 0, 2564, 2479, 1, 0, 0, 0, 2564, 2480, 1, 0, 0, 0, 2564, 2481, 1, 0, 0, 0, 2564, 2482, 1, 0, 0, 0, 2564, 2483, 1, 0, 0, 0, 2564, 2484, 1, 0, 0, 0, 2564, 2485, 1, 0, 0, 0, 2564, 2486, 1, 0, 0, 0, 2564, 2487, 1, 0, 0, 0, 2564, 2488, 1, 0, 0, 0, 2564, 2489, 1, 0, 0, 0, 2564, 2490, 1, 0, 0, 0, 2564, 2491, 1, 0, 0, 0, 2564, 2492, 1, 0, 0, 0, 2564, 2493, 1, 0, 0, 0, 2564, 2495, 1, 0, 0, 0, 2564, 2496, 1, 0, 0, 0, 2564, 2497, 1, 0, 0, 0, 2564, 2498, 1, 0, 0, 0, 2564, 2499, 1, 0, 0, 0, 2564, 2500, 1, 0, 0, 0, 2564, 2501, 1, 0, 0, 0, 2564, 2502, 1, 0, 0, 0, 2564, 2503, 1, 0, 0, 0, 2564, 2504, 1, 0, 0, 0, 2564, 2505, 1, 0, 0, 0, 2564, 2506, 1, 0, 0, 0, 2564, 2507, 1, 0, 0, 0, 2564, 2508, 1, 0, 0, 0, 2564, 2509, 1, 0, 0, 0, 2564, 2510, 1, 0, 0, 0, 2564, 2511, 1, 0, 0, 0, 2564, 2512, 1, 0, 0, 0, 2564, 2513, 1, 0, 0, 0, 2564, 2514, 1, 0, 0, 0, 2564, 2515, 1, 0, 0, 0, 2564, 2516, 1, 0, 0, 0, 2564, 2517, 1, 0, 0, 0, 2564, 2518, 1, 0, 0, 0, 2564, 2519, 1, 0, 0, 0, 2564, 2520, 1, 0, 0, 0, 2564, 2521, 1, 0, 0, 0, 2564, 2522, 1, 0, 0, 0, 2564, 2523, 1, 0, 0, 0, 2564, 2524, 1, 0, 0, 0, 2564, 2525, 1, 0, 0, 0, 2564, 2526, 1, 0, 0, 0, 2564, 2527, 1, 0, 0, 0, 2564, 2528, 1, 0, 0, 0, 2564, 2529, 1, 0, 0, 0, 2564, 2530, 1, 0, 0, 0, 2564, 2531, 1, 0, 0, 0, 2564, 2532, 1, 0, 0, 0, 2564, 2533, 1, 0, 0, 0, 2564, 2534, 1, 0, 0, 0, 2564, 2535, 1, 0, 0, 0, 2564, 2536, 1, 0, 0, 0, 2564, 2537, 1, 0, 0, 0, 2564, 2538, 1, 0, 0, 0, 2564, 2539, 1, 0, 0, 0, 2564, 2540, 1, 0, 0, 0, 2564, 2541, 1, 0, 0, 0, 2564, 2542, 1, 0, 0, 0, 2564, 2543, 1, 0, 0, 0, 2564, 2544, 1, 0, 0, 0, 2564, 2545, 1, 0, 0, 0, 2564, 2546, 1, 0, 0, 0, 2564, 2547, 1, 0, 0, 0, 2564, 2548, 1, 0, 0, 0, 2564, 2549, 1, 0, 0, 0, 2564, 2550, 1, 0, 0, 0, 2564, 2551, 1, 0, 0, 0, 2564, 2552, 1, 0, 0, 0, 2564, 2553, 1, 0, 0, 0, 2564, 2554, 1, 0, 0, 0, 2564, 2555, 1, 0, 0, 0, 2564, 2556, 1, 0, 0, 0, 2564, 2557, 1, 0, 0, 0, 2564, 2558, 1, 0, 0, 0, 2564, 2559, 1, 0, 0, 0, 2564, 2560, 1, 0, 0, 0, 2564, 2561, 1, 0, 0, 0, 2564, 2562, 1, 0, 0, 0, 2564, 2563, 1, 0, 0, 0, 2565, 2569, 1, 0, 0, 0, 2566, 2567, 5, 207, 0, 0, 2567, 2569, 6, 220, -1, 0, 2568, 2564, 1, 0, 0, 0, 2568, 2566, 1, 0, 0, 0, 2569, 441, 1, 0, 0, 0, 279, 470, 473, 485, 499, 502, 505, 508, 511, 519, 525, 530, 534, 540, 551, 558, 567, 575, 583, 592, 596, 599, 603, 609, 616, 622, 634, 637, 648, 651, 657, 668, 689, 692, 696, 708, 712, 716, 725, 742, 753, 757, 764, 767, 774, 785, 789, 799, 804, 814, 824, 835, 848, 859, 864, 875, 879, 883, 888, 893, 903, 911, 919, 925, 930, 932, 938, 945, 950, 956, 960, 964, 970, 988, 994, 996, 1003, 1009, 1015, 1031, 1038, 1052, 1064, 1067, 1094, 1099, 1122, 1128, 1131, 1139, 1144, 1153, 1160, 1163, 1167, 1174, 1182, 1185, 1190, 1193, 1200, 1206, 1216, 1220, 1228, 1232, 1240, 1244, 1252, 1256, 1265, 1269, 1279, 1282, 1289, 1293, 1296, 1299, 1304, 1308, 1323, 1331, 1338, 1344, 1347, 1351, 1354, 1361, 1378, 1387, 1395, 1398, 1402, 1406, 1408, 1416, 1449, 1457, 1463, 1478, 1486, 1490, 1499, 1504, 1511, 1519, 1523, 1545, 1549, 1559, 1568, 1574, 1578, 1588, 1591, 1599, 1613, 1618, 1622, 1630, 1632, 1635, 1648, 1655, 1660, 1667, 1670, 1673, 1676, 1679, 1682, 1685, 1688, 1691, 1694, 1697, 1700, 1703, 1706, 1709, 1712, 1715, 1718, 1721, 1724, 1726, 1750, 1762, 1764, 1772, 1776, 1799, 1809, 1822, 1827, 1836, 1842, 1846, 1859, 1862, 1871, 1874, 1880, 1887, 1895, 1903, 1909, 1915, 1924, 1934, 1937, 1944, 1950, 1953, 1962, 1967, 1970, 1975, 1978, 1981, 1989, 1994, 2001, 2013, 2024, 2026, 2039, 2054, 2057, 2065, 2072, 2075, 2078, 2088, 2095, 2100, 2106, 2115, 2121, 2127, 2132, 2144, 2151, 2158, 2164, 2180, 2186, 2189, 2199, 2202, 2205, 2208, 2211, 2217, 2227, 2233, 2237, 2245, 2248, 2253, 2264, 2282, 2300, 2309, 2316, 2321, 2325, 2330, 2334, 2346, 2354, 2361, 2369, 2376, 2389, 2400, 2405, 2416, 2564, 2568] \ No newline at end of file +[4, 1, 217, 2559, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 2, 196, 7, 196, 2, 197, 7, 197, 2, 198, 7, 198, 2, 199, 7, 199, 2, 200, 7, 200, 2, 201, 7, 201, 2, 202, 7, 202, 2, 203, 7, 203, 2, 204, 7, 204, 2, 205, 7, 205, 2, 206, 7, 206, 2, 207, 7, 207, 2, 208, 7, 208, 2, 209, 7, 209, 2, 210, 7, 210, 2, 211, 7, 211, 2, 212, 7, 212, 2, 213, 7, 213, 2, 214, 7, 214, 2, 215, 7, 215, 2, 216, 7, 216, 2, 217, 7, 217, 2, 218, 7, 218, 2, 219, 7, 219, 2, 220, 7, 220, 2, 221, 7, 221, 2, 222, 7, 222, 2, 223, 7, 223, 2, 224, 7, 224, 2, 225, 7, 225, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 481, 8, 1, 1, 2, 3, 2, 484, 8, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 5, 3, 494, 8, 3, 10, 3, 12, 3, 497, 9, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 3, 7, 510, 8, 7, 1, 7, 3, 7, 513, 8, 7, 1, 7, 3, 7, 516, 8, 7, 1, 7, 3, 7, 519, 8, 7, 1, 7, 3, 7, 522, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 5, 8, 528, 8, 8, 10, 8, 12, 8, 531, 9, 8, 1, 8, 1, 8, 1, 8, 3, 8, 536, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 541, 8, 8, 5, 8, 543, 8, 8, 10, 8, 12, 8, 546, 9, 8, 1, 9, 1, 9, 1, 9, 3, 9, 551, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 562, 8, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 569, 8, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 5, 11, 576, 8, 11, 10, 11, 12, 11, 579, 9, 11, 1, 12, 1, 12, 1, 12, 5, 12, 584, 8, 12, 10, 12, 12, 12, 587, 9, 12, 1, 13, 1, 13, 1, 13, 5, 13, 592, 8, 13, 10, 13, 12, 13, 595, 9, 13, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 3, 15, 603, 8, 15, 1, 16, 1, 16, 3, 16, 607, 8, 16, 1, 16, 3, 16, 610, 8, 16, 1, 17, 1, 17, 3, 17, 614, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 620, 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 627, 8, 18, 1, 18, 1, 18, 5, 18, 631, 8, 18, 10, 18, 12, 18, 634, 9, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 21, 3, 21, 645, 8, 21, 1, 21, 3, 21, 648, 8, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 5, 21, 657, 8, 21, 10, 21, 12, 21, 660, 9, 21, 3, 21, 662, 8, 21, 1, 22, 1, 22, 5, 22, 666, 8, 22, 10, 22, 12, 22, 669, 9, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 5, 23, 677, 8, 23, 10, 23, 12, 23, 680, 9, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 3, 23, 700, 8, 23, 1, 23, 3, 23, 703, 8, 23, 1, 24, 1, 24, 3, 24, 707, 8, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 5, 25, 717, 8, 25, 10, 25, 12, 25, 720, 9, 25, 1, 26, 3, 26, 723, 8, 26, 1, 26, 1, 26, 3, 26, 727, 8, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 5, 27, 734, 8, 27, 10, 27, 12, 27, 737, 9, 27, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 751, 8, 30, 10, 30, 12, 30, 754, 9, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 5, 31, 762, 8, 31, 10, 31, 12, 31, 765, 9, 31, 1, 32, 3, 32, 768, 8, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 3, 33, 775, 8, 33, 1, 33, 3, 33, 778, 8, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 785, 8, 34, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 3, 36, 796, 8, 36, 1, 36, 1, 36, 3, 36, 800, 8, 36, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 810, 8, 38, 1, 39, 1, 39, 1, 39, 3, 39, 815, 8, 39, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 4, 40, 823, 8, 40, 11, 40, 12, 40, 824, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 1, 41, 5, 41, 833, 8, 41, 10, 41, 12, 41, 836, 9, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 1, 42, 5, 42, 844, 8, 42, 10, 42, 12, 42, 847, 9, 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 857, 8, 43, 11, 43, 12, 43, 858, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 868, 8, 44, 10, 44, 12, 44, 871, 9, 44, 1, 44, 1, 44, 3, 44, 875, 8, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 3, 46, 886, 8, 46, 1, 46, 1, 46, 3, 46, 890, 8, 46, 1, 46, 1, 46, 3, 46, 894, 8, 46, 1, 46, 1, 46, 1, 46, 3, 46, 899, 8, 46, 1, 46, 5, 46, 902, 8, 46, 10, 46, 12, 46, 905, 9, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 5, 47, 912, 8, 47, 10, 47, 12, 47, 915, 9, 47, 1, 48, 1, 48, 1, 48, 5, 48, 920, 8, 48, 10, 48, 12, 48, 923, 9, 48, 1, 49, 1, 49, 1, 49, 5, 49, 928, 8, 49, 10, 49, 12, 49, 931, 9, 49, 1, 50, 1, 50, 1, 50, 3, 50, 936, 8, 50, 1, 51, 1, 51, 1, 51, 5, 51, 941, 8, 51, 10, 51, 12, 51, 944, 9, 51, 1, 52, 1, 52, 1, 52, 3, 52, 949, 8, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 3, 53, 956, 8, 53, 1, 54, 1, 54, 1, 54, 3, 54, 961, 8, 54, 1, 54, 1, 54, 1, 55, 1, 55, 3, 55, 967, 8, 55, 1, 56, 1, 56, 3, 56, 971, 8, 56, 1, 56, 1, 56, 3, 56, 975, 8, 56, 1, 56, 1, 56, 1, 57, 1, 57, 3, 57, 981, 8, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 3, 58, 999, 8, 58, 1, 59, 1, 59, 1, 59, 1, 59, 3, 59, 1005, 8, 59, 3, 59, 1007, 8, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 3, 60, 1014, 8, 60, 1, 61, 1, 61, 1, 62, 1, 62, 3, 62, 1020, 8, 62, 1, 62, 1, 62, 5, 62, 1024, 8, 62, 10, 62, 12, 62, 1027, 9, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 5, 63, 1040, 8, 63, 10, 63, 12, 63, 1043, 9, 63, 1, 63, 1, 63, 1, 63, 1, 63, 3, 63, 1049, 8, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 66, 1, 66, 1, 66, 3, 66, 1063, 8, 66, 1, 66, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 5, 67, 1073, 8, 67, 10, 67, 12, 67, 1076, 9, 67, 3, 67, 1078, 8, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 5, 70, 1103, 8, 70, 10, 70, 12, 70, 1106, 9, 70, 1, 70, 1, 70, 3, 70, 1110, 8, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 74, 3, 74, 1133, 8, 74, 1, 74, 1, 74, 1, 74, 1, 74, 3, 74, 1139, 8, 74, 1, 74, 3, 74, 1142, 8, 74, 1, 74, 1, 74, 1, 74, 1, 74, 5, 74, 1148, 8, 74, 10, 74, 12, 74, 1151, 9, 74, 1, 74, 1, 74, 3, 74, 1155, 8, 74, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 5, 74, 1162, 8, 74, 10, 74, 12, 74, 1165, 9, 74, 1, 74, 1, 74, 1, 74, 1, 74, 3, 74, 1171, 8, 74, 1, 74, 3, 74, 1174, 8, 74, 1, 75, 1, 75, 3, 75, 1178, 8, 75, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 3, 77, 1185, 8, 77, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 3, 78, 1193, 8, 78, 1, 79, 3, 79, 1196, 8, 79, 1, 79, 1, 79, 1, 79, 3, 79, 1201, 8, 79, 1, 79, 3, 79, 1204, 8, 79, 1, 79, 1, 79, 1, 79, 5, 79, 1209, 8, 79, 10, 79, 12, 79, 1212, 9, 79, 1, 79, 1, 79, 1, 79, 3, 79, 1217, 8, 79, 1, 80, 1, 80, 1, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1227, 8, 81, 5, 81, 1229, 8, 81, 10, 81, 12, 81, 1232, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1239, 8, 81, 5, 81, 1241, 8, 81, 10, 81, 12, 81, 1244, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1251, 8, 81, 5, 81, 1253, 8, 81, 10, 81, 12, 81, 1256, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1263, 8, 81, 5, 81, 1265, 8, 81, 10, 81, 12, 81, 1268, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1276, 8, 81, 5, 81, 1278, 8, 81, 10, 81, 12, 81, 1281, 9, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 5, 81, 1288, 8, 81, 10, 81, 12, 81, 1291, 9, 81, 3, 81, 1293, 8, 81, 1, 82, 1, 82, 1, 82, 1, 82, 1, 83, 3, 83, 1300, 8, 83, 1, 83, 1, 83, 3, 83, 1304, 8, 83, 1, 83, 3, 83, 1307, 8, 83, 1, 83, 3, 83, 1310, 8, 83, 1, 83, 1, 83, 1, 84, 3, 84, 1315, 8, 84, 1, 84, 1, 84, 3, 84, 1319, 8, 84, 1, 84, 1, 84, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 3, 87, 1334, 8, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 3, 88, 1342, 8, 88, 1, 89, 1, 89, 1, 90, 1, 90, 1, 91, 3, 91, 1349, 8, 91, 1, 91, 1, 91, 1, 91, 1, 91, 3, 91, 1355, 8, 91, 1, 91, 3, 91, 1358, 8, 91, 1, 91, 1, 91, 3, 91, 1362, 8, 91, 1, 91, 3, 91, 1365, 8, 91, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 3, 93, 1372, 8, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 3, 94, 1389, 8, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 5, 95, 1396, 8, 95, 10, 95, 12, 95, 1399, 9, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 3, 96, 1406, 8, 96, 1, 96, 3, 96, 1409, 8, 96, 1, 97, 1, 97, 3, 97, 1413, 8, 97, 1, 97, 1, 97, 3, 97, 1417, 8, 97, 3, 97, 1419, 8, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 3, 98, 1427, 8, 98, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 103, 1, 103, 1, 104, 1, 104, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 3, 106, 1460, 8, 106, 1, 107, 1, 107, 1, 108, 1, 108, 1, 108, 1, 108, 3, 108, 1468, 8, 108, 1, 109, 1, 109, 1, 109, 1, 109, 3, 109, 1474, 8, 109, 1, 110, 1, 110, 1, 111, 1, 111, 1, 112, 1, 112, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 5, 114, 1487, 8, 114, 10, 114, 12, 114, 1490, 9, 114, 1, 115, 1, 115, 1, 115, 5, 115, 1495, 8, 115, 10, 115, 12, 115, 1498, 9, 115, 1, 116, 3, 116, 1501, 8, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 5, 117, 1508, 8, 117, 10, 117, 12, 117, 1511, 9, 117, 1, 118, 1, 118, 3, 118, 1515, 8, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 3, 119, 1522, 8, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 3, 120, 1530, 8, 120, 1, 120, 1, 120, 3, 120, 1534, 8, 120, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 3, 125, 1556, 8, 125, 1, 125, 1, 125, 3, 125, 1560, 8, 125, 1, 125, 1, 125, 1, 125, 1, 125, 3, 125, 1566, 8, 125, 1, 126, 1, 126, 1, 126, 3, 126, 1571, 8, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 3, 128, 1580, 8, 128, 1, 128, 1, 128, 1, 128, 1, 128, 3, 128, 1586, 8, 128, 5, 128, 1588, 8, 128, 10, 128, 12, 128, 1591, 9, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 3, 129, 1600, 8, 129, 1, 129, 3, 129, 1603, 8, 129, 1, 130, 1, 130, 1, 130, 1, 130, 5, 130, 1609, 8, 130, 10, 130, 12, 130, 1612, 9, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 133, 1, 133, 3, 133, 1625, 8, 133, 1, 133, 1, 133, 1, 133, 3, 133, 1630, 8, 133, 5, 133, 1632, 8, 133, 10, 133, 12, 133, 1635, 9, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 134, 3, 134, 1642, 8, 134, 3, 134, 1644, 8, 134, 1, 134, 3, 134, 1647, 8, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 3, 135, 1660, 8, 135, 1, 136, 1, 136, 1, 136, 5, 136, 1665, 8, 136, 10, 136, 12, 136, 1668, 9, 136, 1, 137, 1, 137, 3, 137, 1672, 8, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 4, 139, 1683, 8, 139, 11, 139, 12, 139, 1684, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 3, 143, 1701, 8, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 3, 145, 1711, 8, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 3, 148, 1727, 8, 148, 3, 148, 1729, 8, 148, 1, 148, 1, 148, 1, 148, 1, 148, 4, 148, 1735, 8, 148, 11, 148, 12, 148, 1736, 1, 148, 1, 148, 3, 148, 1741, 8, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 3, 149, 1764, 8, 149, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 3, 151, 1774, 8, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 3, 153, 1789, 8, 153, 1, 154, 1, 154, 1, 154, 3, 154, 1794, 8, 154, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 3, 158, 1811, 8, 158, 1, 158, 1, 158, 1, 158, 1, 158, 3, 158, 1817, 8, 158, 5, 158, 1819, 8, 158, 10, 158, 12, 158, 1822, 9, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 3, 159, 1834, 8, 159, 1, 159, 3, 159, 1837, 8, 159, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 3, 161, 1846, 8, 161, 1, 161, 3, 161, 1849, 8, 161, 1, 161, 1, 161, 1, 161, 1, 161, 3, 161, 1855, 8, 161, 1, 162, 1, 162, 1, 162, 5, 162, 1860, 8, 162, 10, 162, 12, 162, 1863, 9, 162, 1, 163, 1, 163, 1, 163, 5, 163, 1868, 8, 163, 10, 163, 12, 163, 1871, 9, 163, 1, 164, 1, 164, 1, 164, 5, 164, 1876, 8, 164, 10, 164, 12, 164, 1879, 9, 164, 1, 164, 1, 164, 1, 164, 3, 164, 1884, 8, 164, 1, 165, 1, 165, 1, 165, 1, 165, 3, 165, 1890, 8, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 3, 166, 1899, 8, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 3, 166, 1909, 8, 166, 1, 166, 3, 166, 1912, 8, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 3, 166, 1919, 8, 166, 1, 166, 1, 166, 1, 166, 1, 166, 3, 166, 1925, 8, 166, 1, 166, 3, 166, 1928, 8, 166, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 5, 168, 1935, 8, 168, 10, 168, 12, 168, 1938, 9, 168, 1, 169, 1, 169, 3, 169, 1942, 8, 169, 1, 169, 3, 169, 1945, 8, 169, 1, 170, 1, 170, 1, 170, 3, 170, 1950, 8, 170, 1, 170, 3, 170, 1953, 8, 170, 1, 170, 3, 170, 1956, 8, 170, 1, 170, 1, 170, 1, 171, 1, 171, 4, 171, 1962, 8, 171, 11, 171, 12, 171, 1963, 1, 172, 3, 172, 1967, 8, 172, 1, 172, 1, 172, 1, 172, 3, 172, 1972, 8, 172, 3, 172, 1974, 8, 172, 1, 172, 3, 172, 1977, 8, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 3, 173, 1984, 8, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 3, 173, 1996, 8, 173, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 5, 175, 2009, 8, 175, 10, 175, 12, 175, 2012, 9, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 3, 175, 2024, 8, 175, 1, 176, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 3, 177, 2039, 8, 177, 1, 177, 3, 177, 2042, 8, 177, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 3, 178, 2050, 8, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 3, 178, 2057, 8, 178, 1, 178, 3, 178, 2060, 8, 178, 1, 178, 3, 178, 2063, 8, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 3, 179, 2073, 8, 179, 1, 180, 1, 180, 1, 180, 5, 180, 2078, 8, 180, 10, 180, 12, 180, 2081, 9, 180, 1, 181, 1, 181, 3, 181, 2085, 8, 181, 1, 182, 1, 182, 5, 182, 2089, 8, 182, 10, 182, 12, 182, 2092, 9, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 3, 183, 2100, 8, 183, 1, 184, 1, 184, 1, 184, 1, 184, 3, 184, 2106, 8, 184, 1, 184, 1, 184, 1, 184, 1, 184, 3, 184, 2112, 8, 184, 1, 185, 1, 185, 1, 185, 3, 185, 2117, 8, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 3, 185, 2129, 8, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 3, 185, 2136, 8, 185, 1, 186, 1, 186, 1, 186, 5, 186, 2141, 8, 186, 10, 186, 12, 186, 2144, 9, 186, 1, 187, 1, 187, 1, 187, 3, 187, 2149, 8, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 3, 187, 2165, 8, 187, 1, 188, 1, 188, 1, 188, 1, 188, 3, 188, 2171, 8, 188, 1, 188, 3, 188, 2174, 8, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 3, 190, 2184, 8, 190, 1, 190, 3, 190, 2187, 8, 190, 1, 190, 3, 190, 2190, 8, 190, 1, 190, 3, 190, 2193, 8, 190, 1, 190, 3, 190, 2196, 8, 190, 1, 191, 1, 191, 1, 191, 1, 191, 3, 191, 2202, 8, 191, 1, 192, 1, 192, 1, 192, 1, 192, 1, 193, 1, 193, 1, 193, 1, 193, 3, 193, 2212, 8, 193, 1, 194, 1, 194, 1, 194, 1, 194, 3, 194, 2218, 8, 194, 1, 195, 1, 195, 3, 195, 2222, 8, 195, 1, 196, 1, 196, 1, 196, 1, 197, 1, 197, 1, 197, 3, 197, 2230, 8, 197, 1, 197, 3, 197, 2233, 8, 197, 1, 197, 1, 197, 1, 197, 3, 197, 2238, 8, 197, 1, 198, 1, 198, 1, 198, 1, 199, 1, 199, 1, 199, 1, 199, 1, 200, 1, 200, 3, 200, 2249, 8, 200, 1, 201, 1, 201, 1, 201, 1, 202, 1, 202, 1, 202, 1, 202, 1, 203, 1, 203, 1, 203, 1, 203, 1, 204, 1, 204, 1, 204, 1, 204, 1, 204, 3, 204, 2267, 8, 204, 1, 204, 1, 204, 1, 204, 1, 205, 1, 205, 1, 205, 1, 205, 1, 206, 1, 206, 1, 206, 1, 206, 1, 207, 1, 207, 1, 207, 1, 207, 1, 207, 3, 207, 2285, 8, 207, 1, 207, 1, 207, 1, 207, 1, 208, 1, 208, 1, 208, 1, 208, 3, 208, 2294, 8, 208, 1, 209, 1, 209, 1, 209, 5, 209, 2299, 8, 209, 10, 209, 12, 209, 2302, 9, 209, 1, 210, 1, 210, 3, 210, 2306, 8, 210, 1, 211, 1, 211, 3, 211, 2310, 8, 211, 1, 211, 1, 211, 1, 211, 3, 211, 2315, 8, 211, 5, 211, 2317, 8, 211, 10, 211, 12, 211, 2320, 9, 211, 1, 212, 1, 212, 1, 213, 1, 213, 1, 213, 1, 213, 1, 213, 1, 213, 1, 213, 3, 213, 2331, 8, 213, 1, 214, 1, 214, 1, 214, 1, 214, 5, 214, 2337, 8, 214, 10, 214, 12, 214, 2340, 9, 214, 1, 214, 1, 214, 1, 214, 1, 214, 3, 214, 2346, 8, 214, 1, 215, 1, 215, 1, 215, 1, 215, 5, 215, 2352, 8, 215, 10, 215, 12, 215, 2355, 9, 215, 1, 215, 1, 215, 1, 215, 1, 215, 3, 215, 2361, 8, 215, 1, 216, 1, 216, 1, 216, 1, 216, 1, 217, 1, 217, 1, 217, 1, 217, 1, 217, 1, 217, 1, 217, 3, 217, 2374, 8, 217, 1, 218, 1, 218, 1, 218, 1, 219, 1, 219, 1, 219, 1, 220, 1, 220, 1, 221, 3, 221, 2385, 8, 221, 1, 221, 1, 221, 1, 222, 3, 222, 2390, 8, 222, 1, 222, 1, 222, 1, 223, 1, 223, 1, 224, 1, 224, 1, 224, 5, 224, 2399, 8, 224, 10, 224, 12, 224, 2402, 9, 224, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 3, 225, 2553, 8, 225, 1, 225, 1, 225, 3, 225, 2557, 8, 225, 1, 225, 0, 2, 60, 62, 226, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 0, 18, 2, 0, 15, 15, 33, 33, 2, 0, 49, 49, 77, 77, 1, 0, 185, 190, 1, 0, 197, 198, 2, 0, 181, 181, 199, 200, 2, 0, 76, 76, 139, 139, 2, 0, 5, 5, 183, 184, 2, 0, 69, 69, 135, 135, 2, 0, 29, 29, 60, 60, 3, 0, 181, 181, 184, 184, 197, 197, 1, 0, 159, 160, 3, 0, 155, 155, 157, 157, 162, 162, 2, 0, 159, 160, 162, 162, 2, 0, 33, 33, 35, 35, 2, 0, 82, 82, 133, 133, 4, 0, 29, 29, 60, 60, 85, 85, 119, 119, 1, 0, 205, 207, 1, 0, 208, 209, 2878, 0, 452, 1, 0, 0, 0, 2, 480, 1, 0, 0, 0, 4, 483, 1, 0, 0, 0, 6, 487, 1, 0, 0, 0, 8, 498, 1, 0, 0, 0, 10, 501, 1, 0, 0, 0, 12, 504, 1, 0, 0, 0, 14, 506, 1, 0, 0, 0, 16, 523, 1, 0, 0, 0, 18, 550, 1, 0, 0, 0, 20, 552, 1, 0, 0, 0, 22, 572, 1, 0, 0, 0, 24, 580, 1, 0, 0, 0, 26, 588, 1, 0, 0, 0, 28, 596, 1, 0, 0, 0, 30, 599, 1, 0, 0, 0, 32, 604, 1, 0, 0, 0, 34, 613, 1, 0, 0, 0, 36, 615, 1, 0, 0, 0, 38, 637, 1, 0, 0, 0, 40, 640, 1, 0, 0, 0, 42, 644, 1, 0, 0, 0, 44, 663, 1, 0, 0, 0, 46, 699, 1, 0, 0, 0, 48, 706, 1, 0, 0, 0, 50, 708, 1, 0, 0, 0, 52, 722, 1, 0, 0, 0, 54, 728, 1, 0, 0, 0, 56, 738, 1, 0, 0, 0, 58, 741, 1, 0, 0, 0, 60, 744, 1, 0, 0, 0, 62, 755, 1, 0, 0, 0, 64, 767, 1, 0, 0, 0, 66, 771, 1, 0, 0, 0, 68, 784, 1, 0, 0, 0, 70, 786, 1, 0, 0, 0, 72, 792, 1, 0, 0, 0, 74, 801, 1, 0, 0, 0, 76, 809, 1, 0, 0, 0, 78, 814, 1, 0, 0, 0, 80, 816, 1, 0, 0, 0, 82, 828, 1, 0, 0, 0, 84, 839, 1, 0, 0, 0, 86, 850, 1, 0, 0, 0, 88, 874, 1, 0, 0, 0, 90, 879, 1, 0, 0, 0, 92, 882, 1, 0, 0, 0, 94, 908, 1, 0, 0, 0, 96, 916, 1, 0, 0, 0, 98, 924, 1, 0, 0, 0, 100, 935, 1, 0, 0, 0, 102, 937, 1, 0, 0, 0, 104, 945, 1, 0, 0, 0, 106, 955, 1, 0, 0, 0, 108, 957, 1, 0, 0, 0, 110, 966, 1, 0, 0, 0, 112, 968, 1, 0, 0, 0, 114, 978, 1, 0, 0, 0, 116, 998, 1, 0, 0, 0, 118, 1000, 1, 0, 0, 0, 120, 1013, 1, 0, 0, 0, 122, 1015, 1, 0, 0, 0, 124, 1017, 1, 0, 0, 0, 126, 1048, 1, 0, 0, 0, 128, 1050, 1, 0, 0, 0, 130, 1057, 1, 0, 0, 0, 132, 1059, 1, 0, 0, 0, 134, 1067, 1, 0, 0, 0, 136, 1081, 1, 0, 0, 0, 138, 1086, 1, 0, 0, 0, 140, 1092, 1, 0, 0, 0, 142, 1113, 1, 0, 0, 0, 144, 1120, 1, 0, 0, 0, 146, 1124, 1, 0, 0, 0, 148, 1132, 1, 0, 0, 0, 150, 1177, 1, 0, 0, 0, 152, 1179, 1, 0, 0, 0, 154, 1184, 1, 0, 0, 0, 156, 1192, 1, 0, 0, 0, 158, 1195, 1, 0, 0, 0, 160, 1218, 1, 0, 0, 0, 162, 1292, 1, 0, 0, 0, 164, 1294, 1, 0, 0, 0, 166, 1299, 1, 0, 0, 0, 168, 1314, 1, 0, 0, 0, 170, 1322, 1, 0, 0, 0, 172, 1324, 1, 0, 0, 0, 174, 1333, 1, 0, 0, 0, 176, 1341, 1, 0, 0, 0, 178, 1343, 1, 0, 0, 0, 180, 1345, 1, 0, 0, 0, 182, 1348, 1, 0, 0, 0, 184, 1366, 1, 0, 0, 0, 186, 1369, 1, 0, 0, 0, 188, 1388, 1, 0, 0, 0, 190, 1390, 1, 0, 0, 0, 192, 1402, 1, 0, 0, 0, 194, 1418, 1, 0, 0, 0, 196, 1420, 1, 0, 0, 0, 198, 1428, 1, 0, 0, 0, 200, 1431, 1, 0, 0, 0, 202, 1436, 1, 0, 0, 0, 204, 1441, 1, 0, 0, 0, 206, 1443, 1, 0, 0, 0, 208, 1445, 1, 0, 0, 0, 210, 1447, 1, 0, 0, 0, 212, 1459, 1, 0, 0, 0, 214, 1461, 1, 0, 0, 0, 216, 1463, 1, 0, 0, 0, 218, 1469, 1, 0, 0, 0, 220, 1475, 1, 0, 0, 0, 222, 1477, 1, 0, 0, 0, 224, 1479, 1, 0, 0, 0, 226, 1481, 1, 0, 0, 0, 228, 1483, 1, 0, 0, 0, 230, 1491, 1, 0, 0, 0, 232, 1500, 1, 0, 0, 0, 234, 1504, 1, 0, 0, 0, 236, 1514, 1, 0, 0, 0, 238, 1516, 1, 0, 0, 0, 240, 1525, 1, 0, 0, 0, 242, 1535, 1, 0, 0, 0, 244, 1537, 1, 0, 0, 0, 246, 1541, 1, 0, 0, 0, 248, 1545, 1, 0, 0, 0, 250, 1550, 1, 0, 0, 0, 252, 1570, 1, 0, 0, 0, 254, 1574, 1, 0, 0, 0, 256, 1579, 1, 0, 0, 0, 258, 1592, 1, 0, 0, 0, 260, 1604, 1, 0, 0, 0, 262, 1615, 1, 0, 0, 0, 264, 1620, 1, 0, 0, 0, 266, 1624, 1, 0, 0, 0, 268, 1636, 1, 0, 0, 0, 270, 1659, 1, 0, 0, 0, 272, 1661, 1, 0, 0, 0, 274, 1669, 1, 0, 0, 0, 276, 1673, 1, 0, 0, 0, 278, 1682, 1, 0, 0, 0, 280, 1686, 1, 0, 0, 0, 282, 1690, 1, 0, 0, 0, 284, 1692, 1, 0, 0, 0, 286, 1696, 1, 0, 0, 0, 288, 1702, 1, 0, 0, 0, 290, 1706, 1, 0, 0, 0, 292, 1712, 1, 0, 0, 0, 294, 1716, 1, 0, 0, 0, 296, 1720, 1, 0, 0, 0, 298, 1763, 1, 0, 0, 0, 300, 1765, 1, 0, 0, 0, 302, 1768, 1, 0, 0, 0, 304, 1775, 1, 0, 0, 0, 306, 1788, 1, 0, 0, 0, 308, 1790, 1, 0, 0, 0, 310, 1795, 1, 0, 0, 0, 312, 1798, 1, 0, 0, 0, 314, 1802, 1, 0, 0, 0, 316, 1806, 1, 0, 0, 0, 318, 1825, 1, 0, 0, 0, 320, 1838, 1, 0, 0, 0, 322, 1841, 1, 0, 0, 0, 324, 1856, 1, 0, 0, 0, 326, 1864, 1, 0, 0, 0, 328, 1883, 1, 0, 0, 0, 330, 1885, 1, 0, 0, 0, 332, 1893, 1, 0, 0, 0, 334, 1929, 1, 0, 0, 0, 336, 1931, 1, 0, 0, 0, 338, 1944, 1, 0, 0, 0, 340, 1946, 1, 0, 0, 0, 342, 1961, 1, 0, 0, 0, 344, 1976, 1, 0, 0, 0, 346, 1995, 1, 0, 0, 0, 348, 1997, 1, 0, 0, 0, 350, 1999, 1, 0, 0, 0, 352, 2025, 1, 0, 0, 0, 354, 2028, 1, 0, 0, 0, 356, 2043, 1, 0, 0, 0, 358, 2072, 1, 0, 0, 0, 360, 2074, 1, 0, 0, 0, 362, 2082, 1, 0, 0, 0, 364, 2086, 1, 0, 0, 0, 366, 2099, 1, 0, 0, 0, 368, 2101, 1, 0, 0, 0, 370, 2113, 1, 0, 0, 0, 372, 2137, 1, 0, 0, 0, 374, 2145, 1, 0, 0, 0, 376, 2166, 1, 0, 0, 0, 378, 2175, 1, 0, 0, 0, 380, 2179, 1, 0, 0, 0, 382, 2197, 1, 0, 0, 0, 384, 2203, 1, 0, 0, 0, 386, 2207, 1, 0, 0, 0, 388, 2213, 1, 0, 0, 0, 390, 2221, 1, 0, 0, 0, 392, 2223, 1, 0, 0, 0, 394, 2237, 1, 0, 0, 0, 396, 2239, 1, 0, 0, 0, 398, 2242, 1, 0, 0, 0, 400, 2246, 1, 0, 0, 0, 402, 2250, 1, 0, 0, 0, 404, 2253, 1, 0, 0, 0, 406, 2257, 1, 0, 0, 0, 408, 2261, 1, 0, 0, 0, 410, 2271, 1, 0, 0, 0, 412, 2275, 1, 0, 0, 0, 414, 2279, 1, 0, 0, 0, 416, 2293, 1, 0, 0, 0, 418, 2295, 1, 0, 0, 0, 420, 2305, 1, 0, 0, 0, 422, 2309, 1, 0, 0, 0, 424, 2321, 1, 0, 0, 0, 426, 2330, 1, 0, 0, 0, 428, 2345, 1, 0, 0, 0, 430, 2360, 1, 0, 0, 0, 432, 2362, 1, 0, 0, 0, 434, 2373, 1, 0, 0, 0, 436, 2375, 1, 0, 0, 0, 438, 2378, 1, 0, 0, 0, 440, 2381, 1, 0, 0, 0, 442, 2384, 1, 0, 0, 0, 444, 2389, 1, 0, 0, 0, 446, 2393, 1, 0, 0, 0, 448, 2395, 1, 0, 0, 0, 450, 2556, 1, 0, 0, 0, 452, 453, 3, 2, 1, 0, 453, 454, 5, 0, 0, 1, 454, 1, 1, 0, 0, 0, 455, 481, 3, 4, 2, 0, 456, 481, 3, 148, 74, 0, 457, 481, 3, 158, 79, 0, 458, 481, 3, 182, 91, 0, 459, 481, 3, 250, 125, 0, 460, 481, 3, 332, 166, 0, 461, 481, 3, 376, 188, 0, 462, 481, 3, 378, 189, 0, 463, 481, 3, 238, 119, 0, 464, 481, 3, 244, 122, 0, 465, 481, 3, 368, 184, 0, 466, 481, 3, 240, 120, 0, 467, 481, 3, 246, 123, 0, 468, 481, 3, 356, 178, 0, 469, 481, 3, 384, 192, 0, 470, 481, 3, 382, 191, 0, 471, 481, 3, 304, 152, 0, 472, 481, 3, 380, 190, 0, 473, 481, 3, 330, 165, 0, 474, 481, 3, 386, 193, 0, 475, 481, 3, 388, 194, 0, 476, 481, 3, 370, 185, 0, 477, 481, 3, 248, 124, 0, 478, 481, 3, 374, 187, 0, 479, 481, 3, 10, 5, 0, 480, 455, 1, 0, 0, 0, 480, 456, 1, 0, 0, 0, 480, 457, 1, 0, 0, 0, 480, 458, 1, 0, 0, 0, 480, 459, 1, 0, 0, 0, 480, 460, 1, 0, 0, 0, 480, 461, 1, 0, 0, 0, 480, 462, 1, 0, 0, 0, 480, 463, 1, 0, 0, 0, 480, 464, 1, 0, 0, 0, 480, 465, 1, 0, 0, 0, 480, 466, 1, 0, 0, 0, 480, 467, 1, 0, 0, 0, 480, 468, 1, 0, 0, 0, 480, 469, 1, 0, 0, 0, 480, 470, 1, 0, 0, 0, 480, 471, 1, 0, 0, 0, 480, 472, 1, 0, 0, 0, 480, 473, 1, 0, 0, 0, 480, 474, 1, 0, 0, 0, 480, 475, 1, 0, 0, 0, 480, 476, 1, 0, 0, 0, 480, 477, 1, 0, 0, 0, 480, 478, 1, 0, 0, 0, 480, 479, 1, 0, 0, 0, 481, 3, 1, 0, 0, 0, 482, 484, 3, 6, 3, 0, 483, 482, 1, 0, 0, 0, 483, 484, 1, 0, 0, 0, 484, 485, 1, 0, 0, 0, 485, 486, 3, 14, 7, 0, 486, 5, 1, 0, 0, 0, 487, 488, 5, 30, 0, 0, 488, 489, 3, 8, 4, 0, 489, 495, 5, 172, 0, 0, 490, 491, 3, 8, 4, 0, 491, 492, 5, 172, 0, 0, 492, 494, 1, 0, 0, 0, 493, 490, 1, 0, 0, 0, 494, 497, 1, 0, 0, 0, 495, 493, 1, 0, 0, 0, 495, 496, 1, 0, 0, 0, 496, 7, 1, 0, 0, 0, 497, 495, 1, 0, 0, 0, 498, 499, 5, 5, 0, 0, 499, 500, 3, 188, 94, 0, 500, 9, 1, 0, 0, 0, 501, 502, 3, 6, 3, 0, 502, 503, 3, 134, 67, 0, 503, 11, 1, 0, 0, 0, 504, 505, 3, 60, 30, 0, 505, 13, 1, 0, 0, 0, 506, 507, 3, 40, 20, 0, 507, 509, 3, 16, 8, 0, 508, 510, 3, 38, 19, 0, 509, 508, 1, 0, 0, 0, 509, 510, 1, 0, 0, 0, 510, 512, 1, 0, 0, 0, 511, 513, 3, 54, 27, 0, 512, 511, 1, 0, 0, 0, 512, 513, 1, 0, 0, 0, 513, 515, 1, 0, 0, 0, 514, 516, 3, 50, 25, 0, 515, 514, 1, 0, 0, 0, 515, 516, 1, 0, 0, 0, 516, 518, 1, 0, 0, 0, 517, 519, 3, 56, 28, 0, 518, 517, 1, 0, 0, 0, 518, 519, 1, 0, 0, 0, 519, 521, 1, 0, 0, 0, 520, 522, 3, 58, 29, 0, 521, 520, 1, 0, 0, 0, 521, 522, 1, 0, 0, 0, 522, 15, 1, 0, 0, 0, 523, 524, 5, 54, 0, 0, 524, 529, 3, 18, 9, 0, 525, 526, 5, 173, 0, 0, 526, 528, 3, 18, 9, 0, 527, 525, 1, 0, 0, 0, 528, 531, 1, 0, 0, 0, 529, 527, 1, 0, 0, 0, 529, 530, 1, 0, 0, 0, 530, 544, 1, 0, 0, 0, 531, 529, 1, 0, 0, 0, 532, 540, 5, 173, 0, 0, 533, 535, 3, 12, 6, 0, 534, 536, 5, 14, 0, 0, 535, 534, 1, 0, 0, 0, 535, 536, 1, 0, 0, 0, 536, 537, 1, 0, 0, 0, 537, 538, 5, 5, 0, 0, 538, 541, 1, 0, 0, 0, 539, 541, 3, 36, 18, 0, 540, 533, 1, 0, 0, 0, 540, 539, 1, 0, 0, 0, 541, 543, 1, 0, 0, 0, 542, 532, 1, 0, 0, 0, 543, 546, 1, 0, 0, 0, 544, 542, 1, 0, 0, 0, 544, 545, 1, 0, 0, 0, 545, 17, 1, 0, 0, 0, 546, 544, 1, 0, 0, 0, 547, 551, 3, 30, 15, 0, 548, 551, 3, 20, 10, 0, 549, 551, 3, 26, 13, 0, 550, 547, 1, 0, 0, 0, 550, 548, 1, 0, 0, 0, 550, 549, 1, 0, 0, 0, 551, 19, 1, 0, 0, 0, 552, 553, 5, 91, 0, 0, 553, 554, 5, 127, 0, 0, 554, 555, 5, 175, 0, 0, 555, 561, 3, 30, 15, 0, 556, 557, 5, 12, 0, 0, 557, 558, 5, 175, 0, 0, 558, 559, 3, 22, 11, 0, 559, 560, 5, 176, 0, 0, 560, 562, 1, 0, 0, 0, 561, 556, 1, 0, 0, 0, 561, 562, 1, 0, 0, 0, 562, 568, 1, 0, 0, 0, 563, 564, 5, 34, 0, 0, 564, 565, 5, 175, 0, 0, 565, 566, 3, 24, 12, 0, 566, 567, 5, 176, 0, 0, 567, 569, 1, 0, 0, 0, 568, 563, 1, 0, 0, 0, 568, 569, 1, 0, 0, 0, 569, 570, 1, 0, 0, 0, 570, 571, 5, 176, 0, 0, 571, 21, 1, 0, 0, 0, 572, 577, 3, 30, 15, 0, 573, 574, 5, 173, 0, 0, 574, 576, 3, 30, 15, 0, 575, 573, 1, 0, 0, 0, 576, 579, 1, 0, 0, 0, 577, 575, 1, 0, 0, 0, 577, 578, 1, 0, 0, 0, 578, 23, 1, 0, 0, 0, 579, 577, 1, 0, 0, 0, 580, 585, 3, 30, 15, 0, 581, 582, 5, 173, 0, 0, 582, 584, 3, 30, 15, 0, 583, 581, 1, 0, 0, 0, 584, 587, 1, 0, 0, 0, 585, 583, 1, 0, 0, 0, 585, 586, 1, 0, 0, 0, 586, 25, 1, 0, 0, 0, 587, 585, 1, 0, 0, 0, 588, 589, 3, 30, 15, 0, 589, 593, 3, 28, 14, 0, 590, 592, 3, 28, 14, 0, 591, 590, 1, 0, 0, 0, 592, 595, 1, 0, 0, 0, 593, 591, 1, 0, 0, 0, 593, 594, 1, 0, 0, 0, 594, 27, 1, 0, 0, 0, 595, 593, 1, 0, 0, 0, 596, 597, 5, 151, 0, 0, 597, 598, 3, 30, 15, 0, 598, 29, 1, 0, 0, 0, 599, 602, 3, 32, 16, 0, 600, 601, 5, 97, 0, 0, 601, 603, 3, 60, 30, 0, 602, 600, 1, 0, 0, 0, 602, 603, 1, 0, 0, 0, 603, 31, 1, 0, 0, 0, 604, 609, 3, 252, 126, 0, 605, 607, 5, 14, 0, 0, 606, 605, 1, 0, 0, 0, 606, 607, 1, 0, 0, 0, 607, 608, 1, 0, 0, 0, 608, 610, 3, 34, 17, 0, 609, 606, 1, 0, 0, 0, 609, 610, 1, 0, 0, 0, 610, 33, 1, 0, 0, 0, 611, 614, 5, 5, 0, 0, 612, 614, 3, 450, 225, 0, 613, 611, 1, 0, 0, 0, 613, 612, 1, 0, 0, 0, 614, 35, 1, 0, 0, 0, 615, 616, 5, 144, 0, 0, 616, 617, 5, 175, 0, 0, 617, 619, 3, 102, 51, 0, 618, 620, 5, 14, 0, 0, 619, 618, 1, 0, 0, 0, 619, 620, 1, 0, 0, 0, 620, 621, 1, 0, 0, 0, 621, 622, 5, 5, 0, 0, 622, 632, 1, 0, 0, 0, 623, 624, 5, 173, 0, 0, 624, 626, 3, 102, 51, 0, 625, 627, 5, 14, 0, 0, 626, 625, 1, 0, 0, 0, 626, 627, 1, 0, 0, 0, 627, 628, 1, 0, 0, 0, 628, 629, 5, 5, 0, 0, 629, 631, 1, 0, 0, 0, 630, 623, 1, 0, 0, 0, 631, 634, 1, 0, 0, 0, 632, 630, 1, 0, 0, 0, 632, 633, 1, 0, 0, 0, 633, 635, 1, 0, 0, 0, 634, 632, 1, 0, 0, 0, 635, 636, 5, 176, 0, 0, 636, 37, 1, 0, 0, 0, 637, 638, 5, 141, 0, 0, 638, 639, 3, 12, 6, 0, 639, 39, 1, 0, 0, 0, 640, 641, 5, 120, 0, 0, 641, 642, 3, 42, 21, 0, 642, 41, 1, 0, 0, 0, 643, 645, 3, 44, 22, 0, 644, 643, 1, 0, 0, 0, 644, 645, 1, 0, 0, 0, 645, 647, 1, 0, 0, 0, 646, 648, 5, 37, 0, 0, 647, 646, 1, 0, 0, 0, 647, 648, 1, 0, 0, 0, 648, 661, 1, 0, 0, 0, 649, 662, 5, 181, 0, 0, 650, 651, 3, 12, 6, 0, 651, 658, 3, 48, 24, 0, 652, 653, 5, 173, 0, 0, 653, 654, 3, 12, 6, 0, 654, 655, 3, 48, 24, 0, 655, 657, 1, 0, 0, 0, 656, 652, 1, 0, 0, 0, 657, 660, 1, 0, 0, 0, 658, 656, 1, 0, 0, 0, 658, 659, 1, 0, 0, 0, 659, 662, 1, 0, 0, 0, 660, 658, 1, 0, 0, 0, 661, 649, 1, 0, 0, 0, 661, 650, 1, 0, 0, 0, 662, 43, 1, 0, 0, 0, 663, 667, 5, 1, 0, 0, 664, 666, 3, 46, 23, 0, 665, 664, 1, 0, 0, 0, 666, 669, 1, 0, 0, 0, 667, 665, 1, 0, 0, 0, 667, 668, 1, 0, 0, 0, 668, 670, 1, 0, 0, 0, 669, 667, 1, 0, 0, 0, 670, 671, 5, 2, 0, 0, 671, 45, 1, 0, 0, 0, 672, 673, 5, 106, 0, 0, 673, 674, 5, 175, 0, 0, 674, 678, 3, 252, 126, 0, 675, 677, 3, 334, 167, 0, 676, 675, 1, 0, 0, 0, 677, 680, 1, 0, 0, 0, 678, 676, 1, 0, 0, 0, 678, 679, 1, 0, 0, 0, 679, 681, 1, 0, 0, 0, 680, 678, 1, 0, 0, 0, 681, 682, 5, 176, 0, 0, 682, 700, 1, 0, 0, 0, 683, 684, 5, 51, 0, 0, 684, 685, 5, 175, 0, 0, 685, 686, 3, 252, 126, 0, 686, 687, 3, 334, 167, 0, 687, 688, 5, 176, 0, 0, 688, 700, 1, 0, 0, 0, 689, 690, 5, 107, 0, 0, 690, 691, 5, 175, 0, 0, 691, 692, 3, 252, 126, 0, 692, 693, 5, 176, 0, 0, 693, 700, 1, 0, 0, 0, 694, 695, 5, 52, 0, 0, 695, 696, 5, 175, 0, 0, 696, 697, 3, 252, 126, 0, 697, 698, 5, 176, 0, 0, 698, 700, 1, 0, 0, 0, 699, 672, 1, 0, 0, 0, 699, 683, 1, 0, 0, 0, 699, 689, 1, 0, 0, 0, 699, 694, 1, 0, 0, 0, 700, 702, 1, 0, 0, 0, 701, 703, 5, 209, 0, 0, 702, 701, 1, 0, 0, 0, 702, 703, 1, 0, 0, 0, 703, 47, 1, 0, 0, 0, 704, 705, 5, 14, 0, 0, 705, 707, 3, 450, 225, 0, 706, 704, 1, 0, 0, 0, 706, 707, 1, 0, 0, 0, 707, 49, 1, 0, 0, 0, 708, 709, 5, 100, 0, 0, 709, 710, 5, 19, 0, 0, 710, 711, 3, 12, 6, 0, 711, 718, 3, 52, 26, 0, 712, 713, 5, 173, 0, 0, 713, 714, 3, 12, 6, 0, 714, 715, 3, 52, 26, 0, 715, 717, 1, 0, 0, 0, 716, 712, 1, 0, 0, 0, 717, 720, 1, 0, 0, 0, 718, 716, 1, 0, 0, 0, 718, 719, 1, 0, 0, 0, 719, 51, 1, 0, 0, 0, 720, 718, 1, 0, 0, 0, 721, 723, 7, 0, 0, 0, 722, 721, 1, 0, 0, 0, 722, 723, 1, 0, 0, 0, 723, 726, 1, 0, 0, 0, 724, 725, 5, 94, 0, 0, 725, 727, 7, 1, 0, 0, 726, 724, 1, 0, 0, 0, 726, 727, 1, 0, 0, 0, 727, 53, 1, 0, 0, 0, 728, 729, 5, 59, 0, 0, 729, 730, 5, 19, 0, 0, 730, 735, 3, 12, 6, 0, 731, 732, 5, 173, 0, 0, 732, 734, 3, 12, 6, 0, 733, 731, 1, 0, 0, 0, 734, 737, 1, 0, 0, 0, 735, 733, 1, 0, 0, 0, 735, 736, 1, 0, 0, 0, 736, 55, 1, 0, 0, 0, 737, 735, 1, 0, 0, 0, 738, 739, 5, 80, 0, 0, 739, 740, 3, 96, 48, 0, 740, 57, 1, 0, 0, 0, 741, 742, 5, 95, 0, 0, 742, 743, 3, 96, 48, 0, 743, 59, 1, 0, 0, 0, 744, 745, 6, 30, -1, 0, 745, 746, 3, 62, 31, 0, 746, 752, 1, 0, 0, 0, 747, 748, 10, 1, 0, 0, 748, 749, 5, 99, 0, 0, 749, 751, 3, 62, 31, 0, 750, 747, 1, 0, 0, 0, 751, 754, 1, 0, 0, 0, 752, 750, 1, 0, 0, 0, 752, 753, 1, 0, 0, 0, 753, 61, 1, 0, 0, 0, 754, 752, 1, 0, 0, 0, 755, 756, 6, 31, -1, 0, 756, 757, 3, 64, 32, 0, 757, 763, 1, 0, 0, 0, 758, 759, 10, 1, 0, 0, 759, 760, 5, 13, 0, 0, 760, 762, 3, 64, 32, 0, 761, 758, 1, 0, 0, 0, 762, 765, 1, 0, 0, 0, 763, 761, 1, 0, 0, 0, 763, 764, 1, 0, 0, 0, 764, 63, 1, 0, 0, 0, 765, 763, 1, 0, 0, 0, 766, 768, 5, 93, 0, 0, 767, 766, 1, 0, 0, 0, 767, 768, 1, 0, 0, 0, 768, 769, 1, 0, 0, 0, 769, 770, 3, 66, 33, 0, 770, 65, 1, 0, 0, 0, 771, 777, 3, 68, 34, 0, 772, 774, 5, 71, 0, 0, 773, 775, 5, 93, 0, 0, 774, 773, 1, 0, 0, 0, 774, 775, 1, 0, 0, 0, 775, 776, 1, 0, 0, 0, 776, 778, 5, 202, 0, 0, 777, 772, 1, 0, 0, 0, 777, 778, 1, 0, 0, 0, 778, 67, 1, 0, 0, 0, 779, 785, 3, 70, 35, 0, 780, 785, 3, 72, 36, 0, 781, 785, 3, 78, 39, 0, 782, 785, 3, 90, 45, 0, 783, 785, 3, 92, 46, 0, 784, 779, 1, 0, 0, 0, 784, 780, 1, 0, 0, 0, 784, 781, 1, 0, 0, 0, 784, 782, 1, 0, 0, 0, 784, 783, 1, 0, 0, 0, 785, 69, 1, 0, 0, 0, 786, 787, 3, 94, 47, 0, 787, 788, 5, 18, 0, 0, 788, 789, 3, 94, 47, 0, 789, 790, 5, 13, 0, 0, 790, 791, 3, 94, 47, 0, 791, 71, 1, 0, 0, 0, 792, 799, 3, 94, 47, 0, 793, 796, 3, 74, 37, 0, 794, 796, 3, 76, 38, 0, 795, 793, 1, 0, 0, 0, 795, 794, 1, 0, 0, 0, 796, 797, 1, 0, 0, 0, 797, 798, 3, 94, 47, 0, 798, 800, 1, 0, 0, 0, 799, 795, 1, 0, 0, 0, 799, 800, 1, 0, 0, 0, 800, 73, 1, 0, 0, 0, 801, 802, 7, 2, 0, 0, 802, 75, 1, 0, 0, 0, 803, 810, 5, 195, 0, 0, 804, 810, 5, 196, 0, 0, 805, 810, 5, 193, 0, 0, 806, 810, 5, 194, 0, 0, 807, 810, 5, 191, 0, 0, 808, 810, 5, 192, 0, 0, 809, 803, 1, 0, 0, 0, 809, 804, 1, 0, 0, 0, 809, 805, 1, 0, 0, 0, 809, 806, 1, 0, 0, 0, 809, 807, 1, 0, 0, 0, 809, 808, 1, 0, 0, 0, 810, 77, 1, 0, 0, 0, 811, 815, 3, 80, 40, 0, 812, 815, 3, 86, 43, 0, 813, 815, 3, 88, 44, 0, 814, 811, 1, 0, 0, 0, 814, 812, 1, 0, 0, 0, 814, 813, 1, 0, 0, 0, 815, 79, 1, 0, 0, 0, 816, 817, 3, 82, 41, 0, 817, 818, 5, 65, 0, 0, 818, 819, 5, 175, 0, 0, 819, 822, 3, 84, 42, 0, 820, 821, 5, 173, 0, 0, 821, 823, 3, 84, 42, 0, 822, 820, 1, 0, 0, 0, 823, 824, 1, 0, 0, 0, 824, 822, 1, 0, 0, 0, 824, 825, 1, 0, 0, 0, 825, 826, 1, 0, 0, 0, 826, 827, 5, 176, 0, 0, 827, 81, 1, 0, 0, 0, 828, 829, 5, 175, 0, 0, 829, 834, 3, 94, 47, 0, 830, 831, 5, 173, 0, 0, 831, 833, 3, 94, 47, 0, 832, 830, 1, 0, 0, 0, 833, 836, 1, 0, 0, 0, 834, 832, 1, 0, 0, 0, 834, 835, 1, 0, 0, 0, 835, 837, 1, 0, 0, 0, 836, 834, 1, 0, 0, 0, 837, 838, 5, 176, 0, 0, 838, 83, 1, 0, 0, 0, 839, 840, 5, 175, 0, 0, 840, 845, 3, 12, 6, 0, 841, 842, 5, 173, 0, 0, 842, 844, 3, 12, 6, 0, 843, 841, 1, 0, 0, 0, 844, 847, 1, 0, 0, 0, 845, 843, 1, 0, 0, 0, 845, 846, 1, 0, 0, 0, 846, 848, 1, 0, 0, 0, 847, 845, 1, 0, 0, 0, 848, 849, 5, 176, 0, 0, 849, 85, 1, 0, 0, 0, 850, 851, 3, 94, 47, 0, 851, 852, 5, 65, 0, 0, 852, 853, 5, 175, 0, 0, 853, 856, 3, 12, 6, 0, 854, 855, 5, 173, 0, 0, 855, 857, 3, 12, 6, 0, 856, 854, 1, 0, 0, 0, 857, 858, 1, 0, 0, 0, 858, 856, 1, 0, 0, 0, 858, 859, 1, 0, 0, 0, 859, 860, 1, 0, 0, 0, 860, 861, 5, 176, 0, 0, 861, 87, 1, 0, 0, 0, 862, 875, 3, 94, 47, 0, 863, 864, 5, 175, 0, 0, 864, 869, 3, 94, 47, 0, 865, 866, 5, 173, 0, 0, 866, 868, 3, 94, 47, 0, 867, 865, 1, 0, 0, 0, 868, 871, 1, 0, 0, 0, 869, 867, 1, 0, 0, 0, 869, 870, 1, 0, 0, 0, 870, 872, 1, 0, 0, 0, 871, 869, 1, 0, 0, 0, 872, 873, 5, 176, 0, 0, 873, 875, 1, 0, 0, 0, 874, 862, 1, 0, 0, 0, 874, 863, 1, 0, 0, 0, 875, 876, 1, 0, 0, 0, 876, 877, 5, 65, 0, 0, 877, 878, 3, 102, 51, 0, 878, 89, 1, 0, 0, 0, 879, 880, 5, 46, 0, 0, 880, 881, 3, 94, 47, 0, 881, 91, 1, 0, 0, 0, 882, 883, 3, 94, 47, 0, 883, 885, 5, 71, 0, 0, 884, 886, 5, 93, 0, 0, 885, 884, 1, 0, 0, 0, 885, 886, 1, 0, 0, 0, 886, 887, 1, 0, 0, 0, 887, 889, 5, 96, 0, 0, 888, 890, 5, 131, 0, 0, 889, 888, 1, 0, 0, 0, 889, 890, 1, 0, 0, 0, 890, 891, 1, 0, 0, 0, 891, 893, 5, 175, 0, 0, 892, 894, 5, 98, 0, 0, 893, 892, 1, 0, 0, 0, 893, 894, 1, 0, 0, 0, 894, 895, 1, 0, 0, 0, 895, 903, 3, 186, 93, 0, 896, 898, 5, 173, 0, 0, 897, 899, 5, 98, 0, 0, 898, 897, 1, 0, 0, 0, 898, 899, 1, 0, 0, 0, 899, 900, 1, 0, 0, 0, 900, 902, 3, 186, 93, 0, 901, 896, 1, 0, 0, 0, 902, 905, 1, 0, 0, 0, 903, 901, 1, 0, 0, 0, 903, 904, 1, 0, 0, 0, 904, 906, 1, 0, 0, 0, 905, 903, 1, 0, 0, 0, 906, 907, 5, 176, 0, 0, 907, 93, 1, 0, 0, 0, 908, 913, 3, 96, 48, 0, 909, 910, 5, 201, 0, 0, 910, 912, 3, 96, 48, 0, 911, 909, 1, 0, 0, 0, 912, 915, 1, 0, 0, 0, 913, 911, 1, 0, 0, 0, 913, 914, 1, 0, 0, 0, 914, 95, 1, 0, 0, 0, 915, 913, 1, 0, 0, 0, 916, 921, 3, 98, 49, 0, 917, 918, 7, 3, 0, 0, 918, 920, 3, 98, 49, 0, 919, 917, 1, 0, 0, 0, 920, 923, 1, 0, 0, 0, 921, 919, 1, 0, 0, 0, 921, 922, 1, 0, 0, 0, 922, 97, 1, 0, 0, 0, 923, 921, 1, 0, 0, 0, 924, 929, 3, 100, 50, 0, 925, 926, 7, 4, 0, 0, 926, 928, 3, 100, 50, 0, 927, 925, 1, 0, 0, 0, 928, 931, 1, 0, 0, 0, 929, 927, 1, 0, 0, 0, 929, 930, 1, 0, 0, 0, 930, 99, 1, 0, 0, 0, 931, 929, 1, 0, 0, 0, 932, 936, 3, 102, 51, 0, 933, 934, 7, 3, 0, 0, 934, 936, 3, 100, 50, 0, 935, 932, 1, 0, 0, 0, 935, 933, 1, 0, 0, 0, 936, 101, 1, 0, 0, 0, 937, 942, 3, 116, 58, 0, 938, 941, 3, 104, 52, 0, 939, 941, 3, 110, 55, 0, 940, 938, 1, 0, 0, 0, 940, 939, 1, 0, 0, 0, 941, 944, 1, 0, 0, 0, 942, 940, 1, 0, 0, 0, 942, 943, 1, 0, 0, 0, 943, 103, 1, 0, 0, 0, 944, 942, 1, 0, 0, 0, 945, 948, 5, 182, 0, 0, 946, 949, 3, 108, 54, 0, 947, 949, 3, 106, 53, 0, 948, 946, 1, 0, 0, 0, 948, 947, 1, 0, 0, 0, 949, 105, 1, 0, 0, 0, 950, 956, 3, 450, 225, 0, 951, 956, 3, 446, 223, 0, 952, 956, 3, 122, 61, 0, 953, 956, 3, 144, 72, 0, 954, 956, 3, 134, 67, 0, 955, 950, 1, 0, 0, 0, 955, 951, 1, 0, 0, 0, 955, 952, 1, 0, 0, 0, 955, 953, 1, 0, 0, 0, 955, 954, 1, 0, 0, 0, 956, 107, 1, 0, 0, 0, 957, 958, 7, 5, 0, 0, 958, 960, 5, 175, 0, 0, 959, 961, 3, 12, 6, 0, 960, 959, 1, 0, 0, 0, 960, 961, 1, 0, 0, 0, 961, 962, 1, 0, 0, 0, 962, 963, 5, 176, 0, 0, 963, 109, 1, 0, 0, 0, 964, 967, 3, 114, 57, 0, 965, 967, 3, 112, 56, 0, 966, 964, 1, 0, 0, 0, 966, 965, 1, 0, 0, 0, 967, 111, 1, 0, 0, 0, 968, 970, 5, 177, 0, 0, 969, 971, 3, 12, 6, 0, 970, 969, 1, 0, 0, 0, 970, 971, 1, 0, 0, 0, 971, 972, 1, 0, 0, 0, 972, 974, 5, 174, 0, 0, 973, 975, 3, 12, 6, 0, 974, 973, 1, 0, 0, 0, 974, 975, 1, 0, 0, 0, 975, 976, 1, 0, 0, 0, 976, 977, 5, 178, 0, 0, 977, 113, 1, 0, 0, 0, 978, 980, 5, 177, 0, 0, 979, 981, 3, 12, 6, 0, 980, 979, 1, 0, 0, 0, 980, 981, 1, 0, 0, 0, 981, 982, 1, 0, 0, 0, 982, 983, 5, 178, 0, 0, 983, 115, 1, 0, 0, 0, 984, 999, 3, 120, 60, 0, 985, 999, 3, 118, 59, 0, 986, 999, 3, 122, 61, 0, 987, 999, 3, 124, 62, 0, 988, 999, 3, 126, 63, 0, 989, 999, 3, 128, 64, 0, 990, 999, 3, 132, 66, 0, 991, 999, 3, 134, 67, 0, 992, 999, 3, 136, 68, 0, 993, 999, 3, 138, 69, 0, 994, 999, 3, 140, 70, 0, 995, 999, 3, 142, 71, 0, 996, 999, 3, 144, 72, 0, 997, 999, 3, 146, 73, 0, 998, 984, 1, 0, 0, 0, 998, 985, 1, 0, 0, 0, 998, 986, 1, 0, 0, 0, 998, 987, 1, 0, 0, 0, 998, 988, 1, 0, 0, 0, 998, 989, 1, 0, 0, 0, 998, 990, 1, 0, 0, 0, 998, 991, 1, 0, 0, 0, 998, 992, 1, 0, 0, 0, 998, 993, 1, 0, 0, 0, 998, 994, 1, 0, 0, 0, 998, 995, 1, 0, 0, 0, 998, 996, 1, 0, 0, 0, 998, 997, 1, 0, 0, 0, 999, 117, 1, 0, 0, 0, 1000, 1006, 3, 450, 225, 0, 1001, 1004, 5, 182, 0, 0, 1002, 1005, 3, 450, 225, 0, 1003, 1005, 3, 446, 223, 0, 1004, 1002, 1, 0, 0, 0, 1004, 1003, 1, 0, 0, 0, 1005, 1007, 1, 0, 0, 0, 1006, 1001, 1, 0, 0, 0, 1006, 1007, 1, 0, 0, 0, 1007, 119, 1, 0, 0, 0, 1008, 1014, 3, 442, 221, 0, 1009, 1014, 3, 446, 223, 0, 1010, 1014, 5, 204, 0, 0, 1011, 1014, 5, 203, 0, 0, 1012, 1014, 5, 202, 0, 0, 1013, 1008, 1, 0, 0, 0, 1013, 1009, 1, 0, 0, 0, 1013, 1010, 1, 0, 0, 0, 1013, 1011, 1, 0, 0, 0, 1013, 1012, 1, 0, 0, 0, 1014, 121, 1, 0, 0, 0, 1015, 1016, 7, 6, 0, 0, 1016, 123, 1, 0, 0, 0, 1017, 1019, 5, 177, 0, 0, 1018, 1020, 3, 12, 6, 0, 1019, 1018, 1, 0, 0, 0, 1019, 1020, 1, 0, 0, 0, 1020, 1025, 1, 0, 0, 0, 1021, 1022, 5, 173, 0, 0, 1022, 1024, 3, 12, 6, 0, 1023, 1021, 1, 0, 0, 0, 1024, 1027, 1, 0, 0, 0, 1025, 1023, 1, 0, 0, 0, 1025, 1026, 1, 0, 0, 0, 1026, 1028, 1, 0, 0, 0, 1027, 1025, 1, 0, 0, 0, 1028, 1029, 5, 178, 0, 0, 1029, 125, 1, 0, 0, 0, 1030, 1031, 5, 179, 0, 0, 1031, 1032, 3, 12, 6, 0, 1032, 1033, 5, 174, 0, 0, 1033, 1041, 3, 12, 6, 0, 1034, 1035, 5, 173, 0, 0, 1035, 1036, 3, 12, 6, 0, 1036, 1037, 5, 174, 0, 0, 1037, 1038, 3, 12, 6, 0, 1038, 1040, 1, 0, 0, 0, 1039, 1034, 1, 0, 0, 0, 1040, 1043, 1, 0, 0, 0, 1041, 1039, 1, 0, 0, 0, 1041, 1042, 1, 0, 0, 0, 1042, 1044, 1, 0, 0, 0, 1043, 1041, 1, 0, 0, 0, 1044, 1045, 5, 180, 0, 0, 1045, 1049, 1, 0, 0, 0, 1046, 1047, 5, 179, 0, 0, 1047, 1049, 5, 180, 0, 0, 1048, 1030, 1, 0, 0, 0, 1048, 1046, 1, 0, 0, 0, 1049, 127, 1, 0, 0, 0, 1050, 1051, 5, 121, 0, 0, 1051, 1052, 5, 175, 0, 0, 1052, 1053, 3, 130, 65, 0, 1053, 1054, 5, 173, 0, 0, 1054, 1055, 3, 12, 6, 0, 1055, 1056, 5, 176, 0, 0, 1056, 129, 1, 0, 0, 0, 1057, 1058, 3, 12, 6, 0, 1058, 131, 1, 0, 0, 0, 1059, 1060, 5, 16, 0, 0, 1060, 1062, 5, 175, 0, 0, 1061, 1063, 5, 37, 0, 0, 1062, 1061, 1, 0, 0, 0, 1062, 1063, 1, 0, 0, 0, 1063, 1064, 1, 0, 0, 0, 1064, 1065, 3, 12, 6, 0, 1065, 1066, 5, 176, 0, 0, 1066, 133, 1, 0, 0, 0, 1067, 1068, 3, 450, 225, 0, 1068, 1077, 5, 175, 0, 0, 1069, 1074, 3, 12, 6, 0, 1070, 1071, 5, 173, 0, 0, 1071, 1073, 3, 12, 6, 0, 1072, 1070, 1, 0, 0, 0, 1073, 1076, 1, 0, 0, 0, 1074, 1072, 1, 0, 0, 0, 1074, 1075, 1, 0, 0, 0, 1075, 1078, 1, 0, 0, 0, 1076, 1074, 1, 0, 0, 0, 1077, 1069, 1, 0, 0, 0, 1077, 1078, 1, 0, 0, 0, 1078, 1079, 1, 0, 0, 0, 1079, 1080, 5, 176, 0, 0, 1080, 135, 1, 0, 0, 0, 1081, 1082, 5, 26, 0, 0, 1082, 1083, 5, 175, 0, 0, 1083, 1084, 5, 181, 0, 0, 1084, 1085, 5, 176, 0, 0, 1085, 137, 1, 0, 0, 0, 1086, 1087, 5, 26, 0, 0, 1087, 1088, 5, 175, 0, 0, 1088, 1089, 5, 37, 0, 0, 1089, 1090, 3, 12, 6, 0, 1090, 1091, 5, 176, 0, 0, 1091, 139, 1, 0, 0, 0, 1092, 1093, 5, 21, 0, 0, 1093, 1094, 5, 140, 0, 0, 1094, 1095, 3, 12, 6, 0, 1095, 1096, 5, 128, 0, 0, 1096, 1104, 3, 12, 6, 0, 1097, 1098, 5, 140, 0, 0, 1098, 1099, 3, 12, 6, 0, 1099, 1100, 5, 128, 0, 0, 1100, 1101, 3, 12, 6, 0, 1101, 1103, 1, 0, 0, 0, 1102, 1097, 1, 0, 0, 0, 1103, 1106, 1, 0, 0, 0, 1104, 1102, 1, 0, 0, 0, 1104, 1105, 1, 0, 0, 0, 1105, 1109, 1, 0, 0, 0, 1106, 1104, 1, 0, 0, 0, 1107, 1108, 5, 41, 0, 0, 1108, 1110, 3, 12, 6, 0, 1109, 1107, 1, 0, 0, 0, 1109, 1110, 1, 0, 0, 0, 1110, 1111, 1, 0, 0, 0, 1111, 1112, 5, 43, 0, 0, 1112, 141, 1, 0, 0, 0, 1113, 1114, 5, 23, 0, 0, 1114, 1115, 5, 175, 0, 0, 1115, 1116, 3, 12, 6, 0, 1116, 1117, 5, 14, 0, 0, 1117, 1118, 3, 186, 93, 0, 1118, 1119, 5, 176, 0, 0, 1119, 143, 1, 0, 0, 0, 1120, 1121, 5, 175, 0, 0, 1121, 1122, 3, 12, 6, 0, 1122, 1123, 5, 176, 0, 0, 1123, 145, 1, 0, 0, 0, 1124, 1125, 5, 47, 0, 0, 1125, 1126, 5, 175, 0, 0, 1126, 1127, 3, 450, 225, 0, 1127, 1128, 5, 54, 0, 0, 1128, 1129, 3, 12, 6, 0, 1129, 1130, 5, 176, 0, 0, 1130, 147, 1, 0, 0, 0, 1131, 1133, 3, 6, 3, 0, 1132, 1131, 1, 0, 0, 0, 1132, 1133, 1, 0, 0, 0, 1133, 1134, 1, 0, 0, 0, 1134, 1135, 7, 7, 0, 0, 1135, 1136, 5, 70, 0, 0, 1136, 1141, 3, 252, 126, 0, 1137, 1139, 5, 14, 0, 0, 1138, 1137, 1, 0, 0, 0, 1138, 1139, 1, 0, 0, 0, 1139, 1140, 1, 0, 0, 0, 1140, 1142, 3, 34, 17, 0, 1141, 1138, 1, 0, 0, 0, 1141, 1142, 1, 0, 0, 0, 1142, 1154, 1, 0, 0, 0, 1143, 1144, 5, 175, 0, 0, 1144, 1149, 3, 150, 75, 0, 1145, 1146, 5, 173, 0, 0, 1146, 1148, 3, 150, 75, 0, 1147, 1145, 1, 0, 0, 0, 1148, 1151, 1, 0, 0, 0, 1149, 1147, 1, 0, 0, 0, 1149, 1150, 1, 0, 0, 0, 1150, 1152, 1, 0, 0, 0, 1151, 1149, 1, 0, 0, 0, 1152, 1153, 5, 176, 0, 0, 1153, 1155, 1, 0, 0, 0, 1154, 1143, 1, 0, 0, 0, 1154, 1155, 1, 0, 0, 0, 1155, 1156, 1, 0, 0, 0, 1156, 1157, 5, 139, 0, 0, 1157, 1158, 5, 175, 0, 0, 1158, 1163, 3, 154, 77, 0, 1159, 1160, 5, 173, 0, 0, 1160, 1162, 3, 154, 77, 0, 1161, 1159, 1, 0, 0, 0, 1162, 1165, 1, 0, 0, 0, 1163, 1161, 1, 0, 0, 0, 1163, 1164, 1, 0, 0, 0, 1164, 1166, 1, 0, 0, 0, 1165, 1163, 1, 0, 0, 0, 1166, 1170, 5, 176, 0, 0, 1167, 1168, 5, 122, 0, 0, 1168, 1169, 5, 130, 0, 0, 1169, 1171, 3, 156, 78, 0, 1170, 1167, 1, 0, 0, 0, 1170, 1171, 1, 0, 0, 0, 1171, 1173, 1, 0, 0, 0, 1172, 1174, 3, 152, 76, 0, 1173, 1172, 1, 0, 0, 0, 1173, 1174, 1, 0, 0, 0, 1174, 149, 1, 0, 0, 0, 1175, 1178, 3, 450, 225, 0, 1176, 1178, 3, 446, 223, 0, 1177, 1175, 1, 0, 0, 0, 1177, 1176, 1, 0, 0, 0, 1178, 151, 1, 0, 0, 0, 1179, 1180, 5, 113, 0, 0, 1180, 1181, 3, 42, 21, 0, 1181, 153, 1, 0, 0, 0, 1182, 1185, 5, 31, 0, 0, 1183, 1185, 3, 12, 6, 0, 1184, 1182, 1, 0, 0, 0, 1184, 1183, 1, 0, 0, 0, 1185, 155, 1, 0, 0, 0, 1186, 1187, 3, 96, 48, 0, 1187, 1188, 7, 8, 0, 0, 1188, 1193, 1, 0, 0, 0, 1189, 1190, 5, 138, 0, 0, 1190, 1191, 5, 126, 0, 0, 1191, 1193, 5, 31, 0, 0, 1192, 1186, 1, 0, 0, 0, 1192, 1189, 1, 0, 0, 0, 1193, 157, 1, 0, 0, 0, 1194, 1196, 3, 6, 3, 0, 1195, 1194, 1, 0, 0, 0, 1195, 1196, 1, 0, 0, 0, 1196, 1197, 1, 0, 0, 0, 1197, 1198, 5, 134, 0, 0, 1198, 1203, 3, 252, 126, 0, 1199, 1201, 5, 14, 0, 0, 1200, 1199, 1, 0, 0, 0, 1200, 1201, 1, 0, 0, 0, 1201, 1202, 1, 0, 0, 0, 1202, 1204, 3, 34, 17, 0, 1203, 1200, 1, 0, 0, 0, 1203, 1204, 1, 0, 0, 0, 1204, 1205, 1, 0, 0, 0, 1205, 1210, 3, 162, 81, 0, 1206, 1207, 5, 173, 0, 0, 1207, 1209, 3, 162, 81, 0, 1208, 1206, 1, 0, 0, 0, 1209, 1212, 1, 0, 0, 0, 1210, 1208, 1, 0, 0, 0, 1210, 1211, 1, 0, 0, 0, 1211, 1213, 1, 0, 0, 0, 1212, 1210, 1, 0, 0, 0, 1213, 1214, 5, 141, 0, 0, 1214, 1216, 3, 12, 6, 0, 1215, 1217, 3, 160, 80, 0, 1216, 1215, 1, 0, 0, 0, 1216, 1217, 1, 0, 0, 0, 1217, 159, 1, 0, 0, 0, 1218, 1219, 5, 113, 0, 0, 1219, 1220, 3, 42, 21, 0, 1220, 161, 1, 0, 0, 0, 1221, 1222, 5, 122, 0, 0, 1222, 1230, 3, 164, 82, 0, 1223, 1226, 5, 173, 0, 0, 1224, 1227, 3, 162, 81, 0, 1225, 1227, 3, 164, 82, 0, 1226, 1224, 1, 0, 0, 0, 1226, 1225, 1, 0, 0, 0, 1227, 1229, 1, 0, 0, 0, 1228, 1223, 1, 0, 0, 0, 1229, 1232, 1, 0, 0, 0, 1230, 1228, 1, 0, 0, 0, 1230, 1231, 1, 0, 0, 0, 1231, 1293, 1, 0, 0, 0, 1232, 1230, 1, 0, 0, 0, 1233, 1234, 5, 7, 0, 0, 1234, 1242, 3, 166, 83, 0, 1235, 1238, 5, 173, 0, 0, 1236, 1239, 3, 162, 81, 0, 1237, 1239, 3, 166, 83, 0, 1238, 1236, 1, 0, 0, 0, 1238, 1237, 1, 0, 0, 0, 1239, 1241, 1, 0, 0, 0, 1240, 1235, 1, 0, 0, 0, 1241, 1244, 1, 0, 0, 0, 1242, 1240, 1, 0, 0, 0, 1242, 1243, 1, 0, 0, 0, 1243, 1293, 1, 0, 0, 0, 1244, 1242, 1, 0, 0, 0, 1245, 1246, 5, 109, 0, 0, 1246, 1254, 3, 168, 84, 0, 1247, 1250, 5, 173, 0, 0, 1248, 1251, 3, 162, 81, 0, 1249, 1251, 3, 168, 84, 0, 1250, 1248, 1, 0, 0, 0, 1250, 1249, 1, 0, 0, 0, 1251, 1253, 1, 0, 0, 0, 1252, 1247, 1, 0, 0, 0, 1253, 1256, 1, 0, 0, 0, 1254, 1252, 1, 0, 0, 0, 1254, 1255, 1, 0, 0, 0, 1255, 1293, 1, 0, 0, 0, 1256, 1254, 1, 0, 0, 0, 1257, 1258, 5, 112, 0, 0, 1258, 1266, 3, 170, 85, 0, 1259, 1262, 5, 173, 0, 0, 1260, 1263, 3, 162, 81, 0, 1261, 1263, 3, 170, 85, 0, 1262, 1260, 1, 0, 0, 0, 1262, 1261, 1, 0, 0, 0, 1263, 1265, 1, 0, 0, 0, 1264, 1259, 1, 0, 0, 0, 1265, 1268, 1, 0, 0, 0, 1266, 1264, 1, 0, 0, 0, 1266, 1267, 1, 0, 0, 0, 1267, 1293, 1, 0, 0, 0, 1268, 1266, 1, 0, 0, 0, 1269, 1270, 5, 72, 0, 0, 1270, 1271, 5, 84, 0, 0, 1271, 1279, 3, 172, 86, 0, 1272, 1275, 5, 173, 0, 0, 1273, 1276, 3, 162, 81, 0, 1274, 1276, 3, 172, 86, 0, 1275, 1273, 1, 0, 0, 0, 1275, 1274, 1, 0, 0, 0, 1276, 1278, 1, 0, 0, 0, 1277, 1272, 1, 0, 0, 0, 1278, 1281, 1, 0, 0, 0, 1279, 1277, 1, 0, 0, 0, 1279, 1280, 1, 0, 0, 0, 1280, 1293, 1, 0, 0, 0, 1281, 1279, 1, 0, 0, 0, 1282, 1283, 5, 122, 0, 0, 1283, 1284, 5, 130, 0, 0, 1284, 1289, 3, 176, 88, 0, 1285, 1286, 5, 173, 0, 0, 1286, 1288, 3, 162, 81, 0, 1287, 1285, 1, 0, 0, 0, 1288, 1291, 1, 0, 0, 0, 1289, 1287, 1, 0, 0, 0, 1289, 1290, 1, 0, 0, 0, 1290, 1293, 1, 0, 0, 0, 1291, 1289, 1, 0, 0, 0, 1292, 1221, 1, 0, 0, 0, 1292, 1233, 1, 0, 0, 0, 1292, 1245, 1, 0, 0, 0, 1292, 1257, 1, 0, 0, 0, 1292, 1269, 1, 0, 0, 0, 1292, 1282, 1, 0, 0, 0, 1293, 163, 1, 0, 0, 0, 1294, 1295, 3, 178, 89, 0, 1295, 1296, 5, 189, 0, 0, 1296, 1297, 3, 12, 6, 0, 1297, 165, 1, 0, 0, 0, 1298, 1300, 5, 70, 0, 0, 1299, 1298, 1, 0, 0, 0, 1299, 1300, 1, 0, 0, 0, 1300, 1301, 1, 0, 0, 0, 1301, 1306, 3, 178, 89, 0, 1302, 1304, 5, 3, 0, 0, 1303, 1302, 1, 0, 0, 0, 1303, 1304, 1, 0, 0, 0, 1304, 1305, 1, 0, 0, 0, 1305, 1307, 3, 180, 90, 0, 1306, 1303, 1, 0, 0, 0, 1306, 1307, 1, 0, 0, 0, 1307, 1309, 1, 0, 0, 0, 1308, 1310, 5, 40, 0, 0, 1309, 1308, 1, 0, 0, 0, 1309, 1310, 1, 0, 0, 0, 1310, 1311, 1, 0, 0, 0, 1311, 1312, 3, 12, 6, 0, 1312, 167, 1, 0, 0, 0, 1313, 1315, 5, 70, 0, 0, 1314, 1313, 1, 0, 0, 0, 1314, 1315, 1, 0, 0, 0, 1315, 1316, 1, 0, 0, 0, 1316, 1318, 3, 178, 89, 0, 1317, 1319, 5, 48, 0, 0, 1318, 1317, 1, 0, 0, 0, 1318, 1319, 1, 0, 0, 0, 1319, 1320, 1, 0, 0, 0, 1320, 1321, 3, 12, 6, 0, 1321, 169, 1, 0, 0, 0, 1322, 1323, 3, 178, 89, 0, 1323, 171, 1, 0, 0, 0, 1324, 1325, 3, 178, 89, 0, 1325, 1326, 5, 142, 0, 0, 1326, 1327, 5, 104, 0, 0, 1327, 1328, 3, 174, 87, 0, 1328, 173, 1, 0, 0, 0, 1329, 1334, 3, 126, 63, 0, 1330, 1334, 3, 124, 62, 0, 1331, 1334, 3, 120, 60, 0, 1332, 1334, 3, 122, 61, 0, 1333, 1329, 1, 0, 0, 0, 1333, 1330, 1, 0, 0, 0, 1333, 1331, 1, 0, 0, 0, 1333, 1332, 1, 0, 0, 0, 1334, 175, 1, 0, 0, 0, 1335, 1336, 3, 96, 48, 0, 1336, 1337, 7, 8, 0, 0, 1337, 1342, 1, 0, 0, 0, 1338, 1339, 5, 138, 0, 0, 1339, 1340, 5, 126, 0, 0, 1340, 1342, 5, 31, 0, 0, 1341, 1335, 1, 0, 0, 0, 1341, 1338, 1, 0, 0, 0, 1342, 177, 1, 0, 0, 0, 1343, 1344, 3, 102, 51, 0, 1344, 179, 1, 0, 0, 0, 1345, 1346, 3, 96, 48, 0, 1346, 181, 1, 0, 0, 0, 1347, 1349, 3, 6, 3, 0, 1348, 1347, 1, 0, 0, 0, 1348, 1349, 1, 0, 0, 0, 1349, 1350, 1, 0, 0, 0, 1350, 1351, 5, 32, 0, 0, 1351, 1352, 5, 54, 0, 0, 1352, 1357, 3, 252, 126, 0, 1353, 1355, 5, 14, 0, 0, 1354, 1353, 1, 0, 0, 0, 1354, 1355, 1, 0, 0, 0, 1355, 1356, 1, 0, 0, 0, 1356, 1358, 3, 34, 17, 0, 1357, 1354, 1, 0, 0, 0, 1357, 1358, 1, 0, 0, 0, 1358, 1361, 1, 0, 0, 0, 1359, 1360, 5, 141, 0, 0, 1360, 1362, 3, 12, 6, 0, 1361, 1359, 1, 0, 0, 0, 1361, 1362, 1, 0, 0, 0, 1362, 1364, 1, 0, 0, 0, 1363, 1365, 3, 184, 92, 0, 1364, 1363, 1, 0, 0, 0, 1364, 1365, 1, 0, 0, 0, 1365, 183, 1, 0, 0, 0, 1366, 1367, 5, 113, 0, 0, 1367, 1368, 3, 42, 21, 0, 1368, 185, 1, 0, 0, 0, 1369, 1371, 3, 188, 94, 0, 1370, 1372, 7, 9, 0, 0, 1371, 1370, 1, 0, 0, 0, 1371, 1372, 1, 0, 0, 0, 1372, 187, 1, 0, 0, 0, 1373, 1389, 3, 216, 108, 0, 1374, 1389, 3, 202, 101, 0, 1375, 1389, 3, 214, 107, 0, 1376, 1389, 3, 212, 106, 0, 1377, 1389, 3, 208, 104, 0, 1378, 1389, 3, 204, 102, 0, 1379, 1389, 3, 206, 103, 0, 1380, 1389, 3, 200, 100, 0, 1381, 1389, 3, 190, 95, 0, 1382, 1389, 3, 210, 105, 0, 1383, 1389, 3, 218, 109, 0, 1384, 1389, 3, 220, 110, 0, 1385, 1389, 3, 222, 111, 0, 1386, 1389, 3, 224, 112, 0, 1387, 1389, 3, 226, 113, 0, 1388, 1373, 1, 0, 0, 0, 1388, 1374, 1, 0, 0, 0, 1388, 1375, 1, 0, 0, 0, 1388, 1376, 1, 0, 0, 0, 1388, 1377, 1, 0, 0, 0, 1388, 1378, 1, 0, 0, 0, 1388, 1379, 1, 0, 0, 0, 1388, 1380, 1, 0, 0, 0, 1388, 1381, 1, 0, 0, 0, 1388, 1382, 1, 0, 0, 0, 1388, 1383, 1, 0, 0, 0, 1388, 1384, 1, 0, 0, 0, 1388, 1385, 1, 0, 0, 0, 1388, 1386, 1, 0, 0, 0, 1388, 1387, 1, 0, 0, 0, 1389, 189, 1, 0, 0, 0, 1390, 1391, 5, 164, 0, 0, 1391, 1392, 5, 175, 0, 0, 1392, 1397, 3, 192, 96, 0, 1393, 1394, 5, 173, 0, 0, 1394, 1396, 3, 192, 96, 0, 1395, 1393, 1, 0, 0, 0, 1396, 1399, 1, 0, 0, 0, 1397, 1395, 1, 0, 0, 0, 1397, 1398, 1, 0, 0, 0, 1398, 1400, 1, 0, 0, 0, 1399, 1397, 1, 0, 0, 0, 1400, 1401, 5, 176, 0, 0, 1401, 191, 1, 0, 0, 0, 1402, 1403, 3, 450, 225, 0, 1403, 1405, 3, 188, 94, 0, 1404, 1406, 3, 194, 97, 0, 1405, 1404, 1, 0, 0, 0, 1405, 1406, 1, 0, 0, 0, 1406, 1408, 1, 0, 0, 0, 1407, 1409, 3, 436, 218, 0, 1408, 1407, 1, 0, 0, 0, 1408, 1409, 1, 0, 0, 0, 1409, 193, 1, 0, 0, 0, 1410, 1412, 3, 196, 98, 0, 1411, 1413, 3, 198, 99, 0, 1412, 1411, 1, 0, 0, 0, 1412, 1413, 1, 0, 0, 0, 1413, 1419, 1, 0, 0, 0, 1414, 1416, 3, 198, 99, 0, 1415, 1417, 3, 196, 98, 0, 1416, 1415, 1, 0, 0, 0, 1416, 1417, 1, 0, 0, 0, 1417, 1419, 1, 0, 0, 0, 1418, 1410, 1, 0, 0, 0, 1418, 1414, 1, 0, 0, 0, 1419, 195, 1, 0, 0, 0, 1420, 1426, 5, 31, 0, 0, 1421, 1427, 3, 442, 221, 0, 1422, 1427, 3, 446, 223, 0, 1423, 1427, 5, 204, 0, 0, 1424, 1427, 5, 203, 0, 0, 1425, 1427, 3, 450, 225, 0, 1426, 1421, 1, 0, 0, 0, 1426, 1422, 1, 0, 0, 0, 1426, 1423, 1, 0, 0, 0, 1426, 1424, 1, 0, 0, 0, 1426, 1425, 1, 0, 0, 0, 1427, 197, 1, 0, 0, 0, 1428, 1429, 5, 93, 0, 0, 1429, 1430, 5, 202, 0, 0, 1430, 199, 1, 0, 0, 0, 1431, 1432, 5, 161, 0, 0, 1432, 1433, 5, 175, 0, 0, 1433, 1434, 3, 188, 94, 0, 1434, 1435, 5, 176, 0, 0, 1435, 201, 1, 0, 0, 0, 1436, 1437, 5, 152, 0, 0, 1437, 1438, 5, 175, 0, 0, 1438, 1439, 3, 188, 94, 0, 1439, 1440, 5, 176, 0, 0, 1440, 203, 1, 0, 0, 0, 1441, 1442, 7, 10, 0, 0, 1442, 205, 1, 0, 0, 0, 1443, 1444, 5, 72, 0, 0, 1444, 207, 1, 0, 0, 0, 1445, 1446, 7, 11, 0, 0, 1446, 209, 1, 0, 0, 0, 1447, 1448, 5, 165, 0, 0, 1448, 211, 1, 0, 0, 0, 1449, 1450, 5, 156, 0, 0, 1450, 1451, 5, 175, 0, 0, 1451, 1452, 3, 448, 224, 0, 1452, 1453, 5, 176, 0, 0, 1453, 1460, 1, 0, 0, 0, 1454, 1455, 5, 156, 0, 0, 1455, 1456, 5, 175, 0, 0, 1456, 1457, 3, 448, 224, 0, 1457, 1458, 6, 106, -1, 0, 1458, 1460, 1, 0, 0, 0, 1459, 1449, 1, 0, 0, 0, 1459, 1454, 1, 0, 0, 0, 1460, 213, 1, 0, 0, 0, 1461, 1462, 5, 154, 0, 0, 1462, 215, 1, 0, 0, 0, 1463, 1467, 5, 153, 0, 0, 1464, 1465, 5, 175, 0, 0, 1465, 1466, 5, 205, 0, 0, 1466, 1468, 5, 176, 0, 0, 1467, 1464, 1, 0, 0, 0, 1467, 1468, 1, 0, 0, 0, 1468, 217, 1, 0, 0, 0, 1469, 1473, 5, 166, 0, 0, 1470, 1471, 5, 175, 0, 0, 1471, 1472, 5, 205, 0, 0, 1472, 1474, 5, 176, 0, 0, 1473, 1470, 1, 0, 0, 0, 1473, 1474, 1, 0, 0, 0, 1474, 219, 1, 0, 0, 0, 1475, 1476, 5, 167, 0, 0, 1476, 221, 1, 0, 0, 0, 1477, 1478, 5, 168, 0, 0, 1478, 223, 1, 0, 0, 0, 1479, 1480, 5, 169, 0, 0, 1480, 225, 1, 0, 0, 0, 1481, 1482, 5, 170, 0, 0, 1482, 227, 1, 0, 0, 0, 1483, 1488, 3, 450, 225, 0, 1484, 1485, 5, 182, 0, 0, 1485, 1487, 3, 450, 225, 0, 1486, 1484, 1, 0, 0, 0, 1487, 1490, 1, 0, 0, 0, 1488, 1486, 1, 0, 0, 0, 1488, 1489, 1, 0, 0, 0, 1489, 229, 1, 0, 0, 0, 1490, 1488, 1, 0, 0, 0, 1491, 1496, 3, 232, 116, 0, 1492, 1493, 5, 182, 0, 0, 1493, 1495, 3, 232, 116, 0, 1494, 1492, 1, 0, 0, 0, 1495, 1498, 1, 0, 0, 0, 1496, 1494, 1, 0, 0, 0, 1496, 1497, 1, 0, 0, 0, 1497, 231, 1, 0, 0, 0, 1498, 1496, 1, 0, 0, 0, 1499, 1501, 5, 210, 0, 0, 1500, 1499, 1, 0, 0, 0, 1500, 1501, 1, 0, 0, 0, 1501, 1502, 1, 0, 0, 0, 1502, 1503, 3, 450, 225, 0, 1503, 233, 1, 0, 0, 0, 1504, 1509, 3, 236, 118, 0, 1505, 1506, 5, 182, 0, 0, 1506, 1508, 3, 236, 118, 0, 1507, 1505, 1, 0, 0, 0, 1508, 1511, 1, 0, 0, 0, 1509, 1507, 1, 0, 0, 0, 1509, 1510, 1, 0, 0, 0, 1510, 235, 1, 0, 0, 0, 1511, 1509, 1, 0, 0, 0, 1512, 1515, 3, 450, 225, 0, 1513, 1515, 5, 208, 0, 0, 1514, 1512, 1, 0, 0, 0, 1514, 1513, 1, 0, 0, 0, 1515, 237, 1, 0, 0, 0, 1516, 1517, 5, 27, 0, 0, 1517, 1521, 5, 89, 0, 0, 1518, 1519, 5, 63, 0, 0, 1519, 1520, 5, 93, 0, 0, 1520, 1522, 5, 46, 0, 0, 1521, 1518, 1, 0, 0, 0, 1521, 1522, 1, 0, 0, 0, 1522, 1523, 1, 0, 0, 0, 1523, 1524, 3, 254, 127, 0, 1524, 239, 1, 0, 0, 0, 1525, 1526, 5, 38, 0, 0, 1526, 1529, 5, 89, 0, 0, 1527, 1528, 5, 63, 0, 0, 1528, 1530, 5, 46, 0, 0, 1529, 1527, 1, 0, 0, 0, 1529, 1530, 1, 0, 0, 0, 1530, 1531, 1, 0, 0, 0, 1531, 1533, 3, 254, 127, 0, 1532, 1534, 5, 22, 0, 0, 1533, 1532, 1, 0, 0, 0, 1533, 1534, 1, 0, 0, 0, 1534, 241, 1, 0, 0, 0, 1535, 1536, 3, 450, 225, 0, 1536, 243, 1, 0, 0, 0, 1537, 1538, 5, 27, 0, 0, 1538, 1539, 5, 110, 0, 0, 1539, 1540, 3, 242, 121, 0, 1540, 245, 1, 0, 0, 0, 1541, 1542, 5, 38, 0, 0, 1542, 1543, 5, 110, 0, 0, 1543, 1544, 3, 242, 121, 0, 1544, 247, 1, 0, 0, 0, 1545, 1546, 5, 122, 0, 0, 1546, 1547, 5, 81, 0, 0, 1547, 1548, 5, 110, 0, 0, 1548, 1549, 3, 242, 121, 0, 1549, 249, 1, 0, 0, 0, 1550, 1551, 5, 27, 0, 0, 1551, 1555, 5, 126, 0, 0, 1552, 1553, 5, 63, 0, 0, 1553, 1554, 5, 93, 0, 0, 1554, 1556, 5, 46, 0, 0, 1555, 1552, 1, 0, 0, 0, 1555, 1556, 1, 0, 0, 0, 1556, 1557, 1, 0, 0, 0, 1557, 1559, 3, 252, 126, 0, 1558, 1560, 3, 436, 218, 0, 1559, 1558, 1, 0, 0, 0, 1559, 1560, 1, 0, 0, 0, 1560, 1561, 1, 0, 0, 0, 1561, 1562, 5, 175, 0, 0, 1562, 1563, 3, 256, 128, 0, 1563, 1565, 5, 176, 0, 0, 1564, 1566, 3, 278, 139, 0, 1565, 1564, 1, 0, 0, 0, 1565, 1566, 1, 0, 0, 0, 1566, 251, 1, 0, 0, 0, 1567, 1568, 3, 254, 127, 0, 1568, 1569, 5, 174, 0, 0, 1569, 1571, 1, 0, 0, 0, 1570, 1567, 1, 0, 0, 0, 1570, 1571, 1, 0, 0, 0, 1571, 1572, 1, 0, 0, 0, 1572, 1573, 3, 230, 115, 0, 1573, 253, 1, 0, 0, 0, 1574, 1575, 3, 228, 114, 0, 1575, 255, 1, 0, 0, 0, 1576, 1580, 3, 258, 129, 0, 1577, 1580, 3, 268, 134, 0, 1578, 1580, 3, 264, 132, 0, 1579, 1576, 1, 0, 0, 0, 1579, 1577, 1, 0, 0, 0, 1579, 1578, 1, 0, 0, 0, 1580, 1589, 1, 0, 0, 0, 1581, 1585, 5, 173, 0, 0, 1582, 1586, 3, 258, 129, 0, 1583, 1586, 3, 268, 134, 0, 1584, 1586, 3, 264, 132, 0, 1585, 1582, 1, 0, 0, 0, 1585, 1583, 1, 0, 0, 0, 1585, 1584, 1, 0, 0, 0, 1586, 1588, 1, 0, 0, 0, 1587, 1581, 1, 0, 0, 0, 1588, 1591, 1, 0, 0, 0, 1589, 1587, 1, 0, 0, 0, 1589, 1590, 1, 0, 0, 0, 1590, 257, 1, 0, 0, 0, 1591, 1589, 1, 0, 0, 0, 1592, 1593, 3, 450, 225, 0, 1593, 1599, 3, 188, 94, 0, 1594, 1600, 3, 194, 97, 0, 1595, 1600, 3, 296, 148, 0, 1596, 1600, 3, 302, 151, 0, 1597, 1600, 3, 300, 150, 0, 1598, 1600, 3, 260, 130, 0, 1599, 1594, 1, 0, 0, 0, 1599, 1595, 1, 0, 0, 0, 1599, 1596, 1, 0, 0, 0, 1599, 1597, 1, 0, 0, 0, 1599, 1598, 1, 0, 0, 0, 1599, 1600, 1, 0, 0, 0, 1600, 1602, 1, 0, 0, 0, 1601, 1603, 3, 436, 218, 0, 1602, 1601, 1, 0, 0, 0, 1602, 1603, 1, 0, 0, 0, 1603, 259, 1, 0, 0, 0, 1604, 1605, 5, 175, 0, 0, 1605, 1610, 3, 262, 131, 0, 1606, 1607, 5, 173, 0, 0, 1607, 1609, 3, 262, 131, 0, 1608, 1606, 1, 0, 0, 0, 1609, 1612, 1, 0, 0, 0, 1610, 1608, 1, 0, 0, 0, 1610, 1611, 1, 0, 0, 0, 1611, 1613, 1, 0, 0, 0, 1612, 1610, 1, 0, 0, 0, 1613, 1614, 5, 176, 0, 0, 1614, 261, 1, 0, 0, 0, 1615, 1616, 3, 266, 133, 0, 1616, 1617, 5, 14, 0, 0, 1617, 1618, 7, 12, 0, 0, 1618, 1619, 5, 88, 0, 0, 1619, 263, 1, 0, 0, 0, 1620, 1621, 3, 262, 131, 0, 1621, 265, 1, 0, 0, 0, 1622, 1625, 3, 450, 225, 0, 1623, 1625, 3, 446, 223, 0, 1624, 1622, 1, 0, 0, 0, 1624, 1623, 1, 0, 0, 0, 1625, 1633, 1, 0, 0, 0, 1626, 1629, 5, 182, 0, 0, 1627, 1630, 3, 450, 225, 0, 1628, 1630, 3, 446, 223, 0, 1629, 1627, 1, 0, 0, 0, 1629, 1628, 1, 0, 0, 0, 1630, 1632, 1, 0, 0, 0, 1631, 1626, 1, 0, 0, 0, 1632, 1635, 1, 0, 0, 0, 1633, 1631, 1, 0, 0, 0, 1633, 1634, 1, 0, 0, 0, 1634, 267, 1, 0, 0, 0, 1635, 1633, 1, 0, 0, 0, 1636, 1637, 5, 108, 0, 0, 1637, 1638, 5, 74, 0, 0, 1638, 1643, 5, 175, 0, 0, 1639, 1641, 3, 270, 135, 0, 1640, 1642, 5, 173, 0, 0, 1641, 1640, 1, 0, 0, 0, 1641, 1642, 1, 0, 0, 0, 1642, 1644, 1, 0, 0, 0, 1643, 1639, 1, 0, 0, 0, 1643, 1644, 1, 0, 0, 0, 1644, 1646, 1, 0, 0, 0, 1645, 1647, 3, 272, 136, 0, 1646, 1645, 1, 0, 0, 0, 1646, 1647, 1, 0, 0, 0, 1647, 1648, 1, 0, 0, 0, 1648, 1649, 5, 176, 0, 0, 1649, 269, 1, 0, 0, 0, 1650, 1651, 5, 123, 0, 0, 1651, 1652, 5, 175, 0, 0, 1652, 1653, 3, 272, 136, 0, 1653, 1654, 5, 176, 0, 0, 1654, 1660, 1, 0, 0, 0, 1655, 1656, 5, 175, 0, 0, 1656, 1657, 3, 272, 136, 0, 1657, 1658, 6, 135, -1, 0, 1658, 1660, 1, 0, 0, 0, 1659, 1650, 1, 0, 0, 0, 1659, 1655, 1, 0, 0, 0, 1660, 271, 1, 0, 0, 0, 1661, 1666, 3, 274, 137, 0, 1662, 1663, 5, 173, 0, 0, 1663, 1665, 3, 274, 137, 0, 1664, 1662, 1, 0, 0, 0, 1665, 1668, 1, 0, 0, 0, 1666, 1664, 1, 0, 0, 0, 1666, 1667, 1, 0, 0, 0, 1667, 273, 1, 0, 0, 0, 1668, 1666, 1, 0, 0, 0, 1669, 1671, 3, 450, 225, 0, 1670, 1672, 3, 276, 138, 0, 1671, 1670, 1, 0, 0, 0, 1671, 1672, 1, 0, 0, 0, 1672, 275, 1, 0, 0, 0, 1673, 1674, 5, 175, 0, 0, 1674, 1675, 5, 205, 0, 0, 1675, 1676, 5, 176, 0, 0, 1676, 277, 1, 0, 0, 0, 1677, 1683, 3, 280, 140, 0, 1678, 1683, 3, 284, 142, 0, 1679, 1683, 3, 286, 143, 0, 1680, 1683, 3, 288, 144, 0, 1681, 1683, 3, 290, 145, 0, 1682, 1677, 1, 0, 0, 0, 1682, 1678, 1, 0, 0, 0, 1682, 1679, 1, 0, 0, 0, 1682, 1680, 1, 0, 0, 0, 1682, 1681, 1, 0, 0, 0, 1683, 1684, 1, 0, 0, 0, 1684, 1682, 1, 0, 0, 0, 1684, 1685, 1, 0, 0, 0, 1685, 279, 1, 0, 0, 0, 1686, 1687, 5, 138, 0, 0, 1687, 1688, 5, 130, 0, 0, 1688, 1689, 3, 438, 219, 0, 1689, 281, 1, 0, 0, 0, 1690, 1691, 3, 448, 224, 0, 1691, 283, 1, 0, 0, 0, 1692, 1693, 5, 65, 0, 0, 1693, 1694, 5, 111, 0, 0, 1694, 1695, 3, 282, 141, 0, 1695, 285, 1, 0, 0, 0, 1696, 1697, 5, 142, 0, 0, 1697, 1698, 5, 118, 0, 0, 1698, 1700, 5, 55, 0, 0, 1699, 1701, 5, 50, 0, 0, 1700, 1699, 1, 0, 0, 0, 1700, 1701, 1, 0, 0, 0, 1701, 287, 1, 0, 0, 0, 1702, 1703, 5, 14, 0, 0, 1703, 1704, 5, 72, 0, 0, 1704, 1705, 5, 24, 0, 0, 1705, 289, 1, 0, 0, 0, 1706, 1707, 5, 42, 0, 0, 1707, 1708, 5, 17, 0, 0, 1708, 1710, 5, 64, 0, 0, 1709, 1711, 3, 292, 146, 0, 1710, 1709, 1, 0, 0, 0, 1710, 1711, 1, 0, 0, 0, 1711, 291, 1, 0, 0, 0, 1712, 1713, 5, 138, 0, 0, 1713, 1714, 5, 130, 0, 0, 1714, 1715, 3, 438, 219, 0, 1715, 293, 1, 0, 0, 0, 1716, 1717, 5, 36, 0, 0, 1717, 1718, 5, 17, 0, 0, 1718, 1719, 5, 64, 0, 0, 1719, 295, 1, 0, 0, 0, 1720, 1728, 5, 57, 0, 0, 1721, 1729, 5, 11, 0, 0, 1722, 1723, 5, 19, 0, 0, 1723, 1726, 5, 31, 0, 0, 1724, 1725, 5, 97, 0, 0, 1725, 1727, 5, 202, 0, 0, 1726, 1724, 1, 0, 0, 0, 1726, 1727, 1, 0, 0, 0, 1727, 1729, 1, 0, 0, 0, 1728, 1721, 1, 0, 0, 0, 1728, 1722, 1, 0, 0, 0, 1729, 1730, 1, 0, 0, 0, 1730, 1731, 5, 14, 0, 0, 1731, 1740, 5, 62, 0, 0, 1732, 1734, 5, 175, 0, 0, 1733, 1735, 3, 298, 149, 0, 1734, 1733, 1, 0, 0, 0, 1735, 1736, 1, 0, 0, 0, 1736, 1734, 1, 0, 0, 0, 1736, 1737, 1, 0, 0, 0, 1737, 1738, 1, 0, 0, 0, 1738, 1739, 5, 176, 0, 0, 1739, 1741, 1, 0, 0, 0, 1740, 1732, 1, 0, 0, 0, 1740, 1741, 1, 0, 0, 0, 1741, 297, 1, 0, 0, 0, 1742, 1743, 5, 125, 0, 0, 1743, 1744, 5, 142, 0, 0, 1744, 1764, 3, 444, 222, 0, 1745, 1746, 5, 66, 0, 0, 1746, 1747, 5, 19, 0, 0, 1747, 1764, 3, 444, 222, 0, 1748, 1749, 5, 83, 0, 0, 1749, 1764, 3, 444, 222, 0, 1750, 1751, 5, 92, 0, 0, 1751, 1764, 5, 83, 0, 0, 1752, 1753, 5, 86, 0, 0, 1753, 1764, 3, 444, 222, 0, 1754, 1755, 5, 92, 0, 0, 1755, 1764, 5, 86, 0, 0, 1756, 1757, 5, 20, 0, 0, 1757, 1764, 5, 205, 0, 0, 1758, 1759, 5, 92, 0, 0, 1759, 1764, 5, 20, 0, 0, 1760, 1764, 5, 28, 0, 0, 1761, 1762, 5, 92, 0, 0, 1762, 1764, 5, 28, 0, 0, 1763, 1742, 1, 0, 0, 0, 1763, 1745, 1, 0, 0, 0, 1763, 1748, 1, 0, 0, 0, 1763, 1750, 1, 0, 0, 0, 1763, 1752, 1, 0, 0, 0, 1763, 1754, 1, 0, 0, 0, 1763, 1756, 1, 0, 0, 0, 1763, 1758, 1, 0, 0, 0, 1763, 1760, 1, 0, 0, 0, 1763, 1761, 1, 0, 0, 0, 1764, 299, 1, 0, 0, 0, 1765, 1766, 5, 14, 0, 0, 1766, 1767, 5, 88, 0, 0, 1767, 301, 1, 0, 0, 0, 1768, 1769, 5, 14, 0, 0, 1769, 1773, 5, 145, 0, 0, 1770, 1771, 5, 57, 0, 0, 1771, 1772, 5, 19, 0, 0, 1772, 1774, 5, 31, 0, 0, 1773, 1770, 1, 0, 0, 0, 1773, 1774, 1, 0, 0, 0, 1774, 303, 1, 0, 0, 0, 1775, 1776, 5, 10, 0, 0, 1776, 1777, 5, 126, 0, 0, 1777, 1778, 3, 252, 126, 0, 1778, 1779, 3, 306, 153, 0, 1779, 305, 1, 0, 0, 0, 1780, 1789, 3, 316, 158, 0, 1781, 1789, 3, 280, 140, 0, 1782, 1789, 3, 312, 156, 0, 1783, 1789, 3, 314, 157, 0, 1784, 1789, 3, 308, 154, 0, 1785, 1789, 3, 310, 155, 0, 1786, 1789, 3, 290, 145, 0, 1787, 1789, 3, 294, 147, 0, 1788, 1780, 1, 0, 0, 0, 1788, 1781, 1, 0, 0, 0, 1788, 1782, 1, 0, 0, 0, 1788, 1783, 1, 0, 0, 0, 1788, 1784, 1, 0, 0, 0, 1788, 1785, 1, 0, 0, 0, 1788, 1786, 1, 0, 0, 0, 1788, 1787, 1, 0, 0, 0, 1789, 307, 1, 0, 0, 0, 1790, 1791, 5, 53, 0, 0, 1791, 1793, 5, 118, 0, 0, 1792, 1794, 5, 50, 0, 0, 1793, 1792, 1, 0, 0, 0, 1793, 1794, 1, 0, 0, 0, 1794, 309, 1, 0, 0, 0, 1795, 1796, 5, 132, 0, 0, 1796, 1797, 5, 118, 0, 0, 1797, 311, 1, 0, 0, 0, 1798, 1799, 5, 7, 0, 0, 1799, 1800, 5, 111, 0, 0, 1800, 1801, 3, 282, 141, 0, 1801, 313, 1, 0, 0, 0, 1802, 1803, 5, 38, 0, 0, 1803, 1804, 5, 111, 0, 0, 1804, 1805, 3, 282, 141, 0, 1805, 315, 1, 0, 0, 0, 1806, 1810, 5, 175, 0, 0, 1807, 1811, 3, 318, 159, 0, 1808, 1811, 3, 320, 160, 0, 1809, 1811, 3, 322, 161, 0, 1810, 1807, 1, 0, 0, 0, 1810, 1808, 1, 0, 0, 0, 1810, 1809, 1, 0, 0, 0, 1811, 1820, 1, 0, 0, 0, 1812, 1816, 5, 173, 0, 0, 1813, 1817, 3, 318, 159, 0, 1814, 1817, 3, 320, 160, 0, 1815, 1817, 3, 322, 161, 0, 1816, 1813, 1, 0, 0, 0, 1816, 1814, 1, 0, 0, 0, 1816, 1815, 1, 0, 0, 0, 1817, 1819, 1, 0, 0, 0, 1818, 1812, 1, 0, 0, 0, 1819, 1822, 1, 0, 0, 0, 1820, 1818, 1, 0, 0, 0, 1820, 1821, 1, 0, 0, 0, 1821, 1823, 1, 0, 0, 0, 1822, 1820, 1, 0, 0, 0, 1823, 1824, 5, 176, 0, 0, 1824, 317, 1, 0, 0, 0, 1825, 1826, 5, 7, 0, 0, 1826, 1827, 3, 324, 162, 0, 1827, 1833, 3, 188, 94, 0, 1828, 1834, 3, 194, 97, 0, 1829, 1834, 3, 296, 148, 0, 1830, 1834, 3, 300, 150, 0, 1831, 1834, 3, 302, 151, 0, 1832, 1834, 3, 260, 130, 0, 1833, 1828, 1, 0, 0, 0, 1833, 1829, 1, 0, 0, 0, 1833, 1830, 1, 0, 0, 0, 1833, 1831, 1, 0, 0, 0, 1833, 1832, 1, 0, 0, 0, 1833, 1834, 1, 0, 0, 0, 1834, 1836, 1, 0, 0, 0, 1835, 1837, 3, 436, 218, 0, 1836, 1835, 1, 0, 0, 0, 1836, 1837, 1, 0, 0, 0, 1837, 319, 1, 0, 0, 0, 1838, 1839, 5, 38, 0, 0, 1839, 1840, 3, 324, 162, 0, 1840, 321, 1, 0, 0, 0, 1841, 1842, 5, 87, 0, 0, 1842, 1854, 3, 324, 162, 0, 1843, 1845, 3, 188, 94, 0, 1844, 1846, 3, 194, 97, 0, 1845, 1844, 1, 0, 0, 0, 1845, 1846, 1, 0, 0, 0, 1846, 1848, 1, 0, 0, 0, 1847, 1849, 3, 436, 218, 0, 1848, 1847, 1, 0, 0, 0, 1848, 1849, 1, 0, 0, 0, 1849, 1855, 1, 0, 0, 0, 1850, 1855, 3, 296, 148, 0, 1851, 1855, 3, 302, 151, 0, 1852, 1853, 5, 38, 0, 0, 1853, 1855, 5, 62, 0, 0, 1854, 1843, 1, 0, 0, 0, 1854, 1850, 1, 0, 0, 0, 1854, 1851, 1, 0, 0, 0, 1854, 1852, 1, 0, 0, 0, 1855, 323, 1, 0, 0, 0, 1856, 1861, 3, 326, 163, 0, 1857, 1858, 5, 182, 0, 0, 1858, 1860, 3, 328, 164, 0, 1859, 1857, 1, 0, 0, 0, 1860, 1863, 1, 0, 0, 0, 1861, 1859, 1, 0, 0, 0, 1861, 1862, 1, 0, 0, 0, 1862, 325, 1, 0, 0, 0, 1863, 1861, 1, 0, 0, 0, 1864, 1869, 3, 450, 225, 0, 1865, 1866, 5, 177, 0, 0, 1866, 1868, 5, 178, 0, 0, 1867, 1865, 1, 0, 0, 0, 1868, 1871, 1, 0, 0, 0, 1869, 1867, 1, 0, 0, 0, 1869, 1870, 1, 0, 0, 0, 1870, 327, 1, 0, 0, 0, 1871, 1869, 1, 0, 0, 0, 1872, 1877, 3, 450, 225, 0, 1873, 1874, 5, 177, 0, 0, 1874, 1876, 5, 178, 0, 0, 1875, 1873, 1, 0, 0, 0, 1876, 1879, 1, 0, 0, 0, 1877, 1875, 1, 0, 0, 0, 1877, 1878, 1, 0, 0, 0, 1878, 1884, 1, 0, 0, 0, 1879, 1877, 1, 0, 0, 0, 1880, 1881, 5, 139, 0, 0, 1881, 1882, 5, 175, 0, 0, 1882, 1884, 5, 176, 0, 0, 1883, 1872, 1, 0, 0, 0, 1883, 1880, 1, 0, 0, 0, 1884, 329, 1, 0, 0, 0, 1885, 1886, 5, 38, 0, 0, 1886, 1889, 5, 126, 0, 0, 1887, 1888, 5, 63, 0, 0, 1888, 1890, 5, 46, 0, 0, 1889, 1887, 1, 0, 0, 0, 1889, 1890, 1, 0, 0, 0, 1890, 1891, 1, 0, 0, 0, 1891, 1892, 3, 252, 126, 0, 1892, 331, 1, 0, 0, 0, 1893, 1894, 5, 27, 0, 0, 1894, 1898, 5, 67, 0, 0, 1895, 1896, 5, 63, 0, 0, 1896, 1897, 5, 93, 0, 0, 1897, 1899, 5, 46, 0, 0, 1898, 1895, 1, 0, 0, 0, 1898, 1899, 1, 0, 0, 0, 1899, 1900, 1, 0, 0, 0, 1900, 1901, 3, 334, 167, 0, 1901, 1902, 5, 97, 0, 0, 1902, 1924, 3, 252, 126, 0, 1903, 1904, 5, 175, 0, 0, 1904, 1905, 3, 336, 168, 0, 1905, 1911, 5, 176, 0, 0, 1906, 1908, 5, 142, 0, 0, 1907, 1909, 5, 92, 0, 0, 1908, 1907, 1, 0, 0, 0, 1908, 1909, 1, 0, 0, 0, 1909, 1910, 1, 0, 0, 0, 1910, 1912, 5, 94, 0, 0, 1911, 1906, 1, 0, 0, 0, 1911, 1912, 1, 0, 0, 0, 1912, 1918, 1, 0, 0, 0, 1913, 1914, 5, 142, 0, 0, 1914, 1915, 5, 143, 0, 0, 1915, 1916, 5, 76, 0, 0, 1916, 1917, 5, 105, 0, 0, 1917, 1919, 5, 117, 0, 0, 1918, 1913, 1, 0, 0, 0, 1918, 1919, 1, 0, 0, 0, 1919, 1925, 1, 0, 0, 0, 1920, 1921, 5, 175, 0, 0, 1921, 1922, 3, 336, 168, 0, 1922, 1923, 6, 166, -1, 0, 1923, 1925, 1, 0, 0, 0, 1924, 1903, 1, 0, 0, 0, 1924, 1920, 1, 0, 0, 0, 1925, 1927, 1, 0, 0, 0, 1926, 1928, 3, 436, 218, 0, 1927, 1926, 1, 0, 0, 0, 1927, 1928, 1, 0, 0, 0, 1928, 333, 1, 0, 0, 0, 1929, 1930, 3, 450, 225, 0, 1930, 335, 1, 0, 0, 0, 1931, 1936, 3, 338, 169, 0, 1932, 1933, 5, 173, 0, 0, 1933, 1935, 3, 338, 169, 0, 1934, 1932, 1, 0, 0, 0, 1935, 1938, 1, 0, 0, 0, 1936, 1934, 1, 0, 0, 0, 1936, 1937, 1, 0, 0, 0, 1937, 337, 1, 0, 0, 0, 1938, 1936, 1, 0, 0, 0, 1939, 1941, 3, 344, 172, 0, 1940, 1942, 3, 354, 177, 0, 1941, 1940, 1, 0, 0, 0, 1941, 1942, 1, 0, 0, 0, 1942, 1945, 1, 0, 0, 0, 1943, 1945, 3, 340, 170, 0, 1944, 1939, 1, 0, 0, 0, 1944, 1943, 1, 0, 0, 0, 1945, 339, 1, 0, 0, 0, 1946, 1947, 3, 450, 225, 0, 1947, 1949, 5, 175, 0, 0, 1948, 1950, 3, 344, 172, 0, 1949, 1948, 1, 0, 0, 0, 1949, 1950, 1, 0, 0, 0, 1950, 1952, 1, 0, 0, 0, 1951, 1953, 3, 354, 177, 0, 1952, 1951, 1, 0, 0, 0, 1952, 1953, 1, 0, 0, 0, 1953, 1955, 1, 0, 0, 0, 1954, 1956, 3, 342, 171, 0, 1955, 1954, 1, 0, 0, 0, 1955, 1956, 1, 0, 0, 0, 1956, 1957, 1, 0, 0, 0, 1957, 1958, 5, 176, 0, 0, 1958, 341, 1, 0, 0, 0, 1959, 1960, 5, 173, 0, 0, 1960, 1962, 3, 120, 60, 0, 1961, 1959, 1, 0, 0, 0, 1962, 1963, 1, 0, 0, 0, 1963, 1961, 1, 0, 0, 0, 1963, 1964, 1, 0, 0, 0, 1964, 343, 1, 0, 0, 0, 1965, 1967, 3, 348, 174, 0, 1966, 1965, 1, 0, 0, 0, 1966, 1967, 1, 0, 0, 0, 1967, 1973, 1, 0, 0, 0, 1968, 1974, 3, 234, 117, 0, 1969, 1971, 3, 350, 175, 0, 1970, 1972, 3, 352, 176, 0, 1971, 1970, 1, 0, 0, 0, 1971, 1972, 1, 0, 0, 0, 1972, 1974, 1, 0, 0, 0, 1973, 1968, 1, 0, 0, 0, 1973, 1969, 1, 0, 0, 0, 1974, 1977, 1, 0, 0, 0, 1975, 1977, 3, 346, 173, 0, 1976, 1966, 1, 0, 0, 0, 1976, 1975, 1, 0, 0, 0, 1977, 345, 1, 0, 0, 0, 1978, 1979, 5, 39, 0, 0, 1979, 1980, 5, 175, 0, 0, 1980, 1981, 3, 234, 117, 0, 1981, 1983, 5, 176, 0, 0, 1982, 1984, 3, 352, 176, 0, 1983, 1982, 1, 0, 0, 0, 1983, 1984, 1, 0, 0, 0, 1984, 1996, 1, 0, 0, 0, 1985, 1986, 5, 75, 0, 0, 1986, 1987, 5, 175, 0, 0, 1987, 1988, 3, 234, 117, 0, 1988, 1989, 5, 176, 0, 0, 1989, 1996, 1, 0, 0, 0, 1990, 1991, 5, 76, 0, 0, 1991, 1992, 5, 175, 0, 0, 1992, 1993, 3, 234, 117, 0, 1993, 1994, 5, 176, 0, 0, 1994, 1996, 1, 0, 0, 0, 1995, 1978, 1, 0, 0, 0, 1995, 1985, 1, 0, 0, 0, 1995, 1990, 1, 0, 0, 0, 1996, 347, 1, 0, 0, 0, 1997, 1998, 5, 4, 0, 0, 1998, 349, 1, 0, 0, 0, 1999, 2010, 3, 236, 118, 0, 2000, 2001, 5, 182, 0, 0, 2001, 2009, 3, 236, 118, 0, 2002, 2003, 5, 182, 0, 0, 2003, 2004, 5, 139, 0, 0, 2004, 2005, 5, 175, 0, 0, 2005, 2009, 5, 176, 0, 0, 2006, 2007, 5, 177, 0, 0, 2007, 2009, 5, 178, 0, 0, 2008, 2000, 1, 0, 0, 0, 2008, 2002, 1, 0, 0, 0, 2008, 2006, 1, 0, 0, 0, 2009, 2012, 1, 0, 0, 0, 2010, 2008, 1, 0, 0, 0, 2010, 2011, 1, 0, 0, 0, 2011, 2023, 1, 0, 0, 0, 2012, 2010, 1, 0, 0, 0, 2013, 2014, 5, 177, 0, 0, 2014, 2024, 5, 178, 0, 0, 2015, 2016, 5, 182, 0, 0, 2016, 2017, 5, 139, 0, 0, 2017, 2018, 5, 175, 0, 0, 2018, 2024, 5, 176, 0, 0, 2019, 2020, 5, 182, 0, 0, 2020, 2021, 5, 76, 0, 0, 2021, 2022, 5, 175, 0, 0, 2022, 2024, 5, 176, 0, 0, 2023, 2013, 1, 0, 0, 0, 2023, 2015, 1, 0, 0, 0, 2023, 2019, 1, 0, 0, 0, 2024, 351, 1, 0, 0, 0, 2025, 2026, 5, 182, 0, 0, 2026, 2027, 3, 234, 117, 0, 2027, 353, 1, 0, 0, 0, 2028, 2041, 5, 14, 0, 0, 2029, 2042, 5, 159, 0, 0, 2030, 2042, 5, 160, 0, 0, 2031, 2042, 5, 155, 0, 0, 2032, 2042, 5, 165, 0, 0, 2033, 2042, 5, 154, 0, 0, 2034, 2042, 5, 162, 0, 0, 2035, 2042, 5, 168, 0, 0, 2036, 2038, 5, 158, 0, 0, 2037, 2039, 3, 428, 214, 0, 2038, 2037, 1, 0, 0, 0, 2038, 2039, 1, 0, 0, 0, 2039, 2042, 1, 0, 0, 0, 2040, 2042, 5, 163, 0, 0, 2041, 2029, 1, 0, 0, 0, 2041, 2030, 1, 0, 0, 0, 2041, 2031, 1, 0, 0, 0, 2041, 2032, 1, 0, 0, 0, 2041, 2033, 1, 0, 0, 0, 2041, 2034, 1, 0, 0, 0, 2041, 2035, 1, 0, 0, 0, 2041, 2036, 1, 0, 0, 0, 2041, 2040, 1, 0, 0, 0, 2042, 355, 1, 0, 0, 0, 2043, 2044, 5, 27, 0, 0, 2044, 2045, 5, 56, 0, 0, 2045, 2049, 5, 67, 0, 0, 2046, 2047, 5, 63, 0, 0, 2047, 2048, 5, 93, 0, 0, 2048, 2050, 5, 46, 0, 0, 2049, 2046, 1, 0, 0, 0, 2049, 2050, 1, 0, 0, 0, 2050, 2051, 1, 0, 0, 0, 2051, 2052, 3, 334, 167, 0, 2052, 2053, 5, 97, 0, 0, 2053, 2054, 3, 252, 126, 0, 2054, 2056, 3, 358, 179, 0, 2055, 2057, 3, 364, 182, 0, 2056, 2055, 1, 0, 0, 0, 2056, 2057, 1, 0, 0, 0, 2057, 2059, 1, 0, 0, 0, 2058, 2060, 5, 102, 0, 0, 2059, 2058, 1, 0, 0, 0, 2059, 2060, 1, 0, 0, 0, 2060, 2062, 1, 0, 0, 0, 2061, 2063, 3, 436, 218, 0, 2062, 2061, 1, 0, 0, 0, 2062, 2063, 1, 0, 0, 0, 2063, 357, 1, 0, 0, 0, 2064, 2065, 5, 175, 0, 0, 2065, 2066, 3, 360, 180, 0, 2066, 2067, 5, 176, 0, 0, 2067, 2073, 1, 0, 0, 0, 2068, 2069, 5, 175, 0, 0, 2069, 2070, 3, 360, 180, 0, 2070, 2071, 6, 179, -1, 0, 2071, 2073, 1, 0, 0, 0, 2072, 2064, 1, 0, 0, 0, 2072, 2068, 1, 0, 0, 0, 2073, 359, 1, 0, 0, 0, 2074, 2079, 3, 362, 181, 0, 2075, 2076, 5, 173, 0, 0, 2076, 2078, 3, 362, 181, 0, 2077, 2075, 1, 0, 0, 0, 2078, 2081, 1, 0, 0, 0, 2079, 2077, 1, 0, 0, 0, 2079, 2080, 1, 0, 0, 0, 2080, 361, 1, 0, 0, 0, 2081, 2079, 1, 0, 0, 0, 2082, 2084, 3, 344, 172, 0, 2083, 2085, 3, 428, 214, 0, 2084, 2083, 1, 0, 0, 0, 2084, 2085, 1, 0, 0, 0, 2085, 363, 1, 0, 0, 0, 2086, 2090, 3, 366, 183, 0, 2087, 2089, 3, 366, 183, 0, 2088, 2087, 1, 0, 0, 0, 2089, 2092, 1, 0, 0, 0, 2090, 2088, 1, 0, 0, 0, 2090, 2091, 1, 0, 0, 0, 2091, 365, 1, 0, 0, 0, 2092, 2090, 1, 0, 0, 0, 2093, 2094, 5, 44, 0, 0, 2094, 2095, 5, 189, 0, 0, 2095, 2100, 5, 205, 0, 0, 2096, 2097, 5, 45, 0, 0, 2097, 2098, 5, 189, 0, 0, 2098, 2100, 5, 205, 0, 0, 2099, 2093, 1, 0, 0, 0, 2099, 2096, 1, 0, 0, 0, 2100, 367, 1, 0, 0, 0, 2101, 2102, 5, 38, 0, 0, 2102, 2105, 5, 67, 0, 0, 2103, 2104, 5, 63, 0, 0, 2104, 2106, 5, 46, 0, 0, 2105, 2103, 1, 0, 0, 0, 2105, 2106, 1, 0, 0, 0, 2106, 2107, 1, 0, 0, 0, 2107, 2108, 3, 334, 167, 0, 2108, 2109, 5, 97, 0, 0, 2109, 2111, 3, 252, 126, 0, 2110, 2112, 5, 102, 0, 0, 2111, 2110, 1, 0, 0, 0, 2111, 2112, 1, 0, 0, 0, 2112, 369, 1, 0, 0, 0, 2113, 2116, 7, 13, 0, 0, 2114, 2115, 5, 14, 0, 0, 2115, 2117, 5, 72, 0, 0, 2116, 2114, 1, 0, 0, 0, 2116, 2117, 1, 0, 0, 0, 2117, 2135, 1, 0, 0, 0, 2118, 2119, 5, 126, 0, 0, 2119, 2128, 3, 252, 126, 0, 2120, 2121, 5, 175, 0, 0, 2121, 2122, 3, 372, 186, 0, 2122, 2123, 5, 176, 0, 0, 2123, 2129, 1, 0, 0, 0, 2124, 2125, 5, 175, 0, 0, 2125, 2126, 3, 372, 186, 0, 2126, 2127, 6, 185, -1, 0, 2127, 2129, 1, 0, 0, 0, 2128, 2120, 1, 0, 0, 0, 2128, 2124, 1, 0, 0, 0, 2128, 2129, 1, 0, 0, 0, 2129, 2136, 1, 0, 0, 0, 2130, 2131, 5, 67, 0, 0, 2131, 2132, 3, 334, 167, 0, 2132, 2133, 5, 97, 0, 0, 2133, 2134, 3, 252, 126, 0, 2134, 2136, 1, 0, 0, 0, 2135, 2118, 1, 0, 0, 0, 2135, 2130, 1, 0, 0, 0, 2136, 371, 1, 0, 0, 0, 2137, 2142, 3, 324, 162, 0, 2138, 2139, 5, 173, 0, 0, 2139, 2141, 3, 324, 162, 0, 2140, 2138, 1, 0, 0, 0, 2141, 2144, 1, 0, 0, 0, 2142, 2140, 1, 0, 0, 0, 2142, 2143, 1, 0, 0, 0, 2143, 373, 1, 0, 0, 0, 2144, 2142, 1, 0, 0, 0, 2145, 2148, 5, 124, 0, 0, 2146, 2147, 5, 14, 0, 0, 2147, 2149, 5, 72, 0, 0, 2148, 2146, 1, 0, 0, 0, 2148, 2149, 1, 0, 0, 0, 2149, 2164, 1, 0, 0, 0, 2150, 2165, 5, 127, 0, 0, 2151, 2165, 5, 137, 0, 0, 2152, 2165, 5, 116, 0, 0, 2153, 2154, 5, 136, 0, 0, 2154, 2165, 3, 390, 195, 0, 2155, 2156, 5, 115, 0, 0, 2156, 2165, 3, 450, 225, 0, 2157, 2158, 5, 68, 0, 0, 2158, 2159, 5, 97, 0, 0, 2159, 2165, 3, 252, 126, 0, 2160, 2161, 5, 126, 0, 0, 2161, 2165, 3, 252, 126, 0, 2162, 2165, 5, 90, 0, 0, 2163, 2165, 5, 111, 0, 0, 2164, 2150, 1, 0, 0, 0, 2164, 2151, 1, 0, 0, 0, 2164, 2152, 1, 0, 0, 0, 2164, 2153, 1, 0, 0, 0, 2164, 2155, 1, 0, 0, 0, 2164, 2157, 1, 0, 0, 0, 2164, 2160, 1, 0, 0, 0, 2164, 2162, 1, 0, 0, 0, 2164, 2163, 1, 0, 0, 0, 2165, 375, 1, 0, 0, 0, 2166, 2167, 5, 27, 0, 0, 2167, 2168, 5, 136, 0, 0, 2168, 2170, 3, 394, 197, 0, 2169, 2171, 3, 402, 201, 0, 2170, 2169, 1, 0, 0, 0, 2170, 2171, 1, 0, 0, 0, 2171, 2173, 1, 0, 0, 0, 2172, 2174, 5, 8, 0, 0, 2173, 2172, 1, 0, 0, 0, 2173, 2174, 1, 0, 0, 0, 2174, 377, 1, 0, 0, 0, 2175, 2176, 5, 27, 0, 0, 2176, 2177, 5, 115, 0, 0, 2177, 2178, 3, 450, 225, 0, 2178, 379, 1, 0, 0, 0, 2179, 2180, 5, 10, 0, 0, 2180, 2181, 5, 136, 0, 0, 2181, 2183, 3, 390, 195, 0, 2182, 2184, 3, 400, 200, 0, 2183, 2182, 1, 0, 0, 0, 2183, 2184, 1, 0, 0, 0, 2184, 2186, 1, 0, 0, 0, 2185, 2187, 5, 150, 0, 0, 2186, 2185, 1, 0, 0, 0, 2186, 2187, 1, 0, 0, 0, 2187, 2189, 1, 0, 0, 0, 2188, 2190, 5, 148, 0, 0, 2189, 2188, 1, 0, 0, 0, 2189, 2190, 1, 0, 0, 0, 2190, 2192, 1, 0, 0, 0, 2191, 2193, 3, 398, 199, 0, 2192, 2191, 1, 0, 0, 0, 2192, 2193, 1, 0, 0, 0, 2193, 2195, 1, 0, 0, 0, 2194, 2196, 3, 402, 201, 0, 2195, 2194, 1, 0, 0, 0, 2195, 2196, 1, 0, 0, 0, 2196, 381, 1, 0, 0, 0, 2197, 2198, 5, 38, 0, 0, 2198, 2199, 5, 136, 0, 0, 2199, 2201, 3, 390, 195, 0, 2200, 2202, 5, 22, 0, 0, 2201, 2200, 1, 0, 0, 0, 2201, 2202, 1, 0, 0, 0, 2202, 383, 1, 0, 0, 0, 2203, 2204, 5, 38, 0, 0, 2204, 2205, 5, 115, 0, 0, 2205, 2206, 3, 450, 225, 0, 2206, 385, 1, 0, 0, 0, 2207, 2211, 5, 58, 0, 0, 2208, 2212, 3, 404, 202, 0, 2209, 2212, 3, 406, 203, 0, 2210, 2212, 3, 408, 204, 0, 2211, 2208, 1, 0, 0, 0, 2211, 2209, 1, 0, 0, 0, 2211, 2210, 1, 0, 0, 0, 2212, 387, 1, 0, 0, 0, 2213, 2217, 5, 114, 0, 0, 2214, 2218, 3, 410, 205, 0, 2215, 2218, 3, 412, 206, 0, 2216, 2218, 3, 414, 207, 0, 2217, 2214, 1, 0, 0, 0, 2217, 2215, 1, 0, 0, 0, 2217, 2216, 1, 0, 0, 0, 2218, 389, 1, 0, 0, 0, 2219, 2222, 3, 450, 225, 0, 2220, 2222, 3, 446, 223, 0, 2221, 2219, 1, 0, 0, 0, 2221, 2220, 1, 0, 0, 0, 2222, 391, 1, 0, 0, 0, 2223, 2224, 5, 61, 0, 0, 2224, 2225, 3, 396, 198, 0, 2225, 393, 1, 0, 0, 0, 2226, 2227, 3, 450, 225, 0, 2227, 2229, 3, 392, 196, 0, 2228, 2230, 5, 148, 0, 0, 2229, 2228, 1, 0, 0, 0, 2229, 2230, 1, 0, 0, 0, 2230, 2232, 1, 0, 0, 0, 2231, 2233, 3, 398, 199, 0, 2232, 2231, 1, 0, 0, 0, 2232, 2233, 1, 0, 0, 0, 2233, 2238, 1, 0, 0, 0, 2234, 2235, 3, 446, 223, 0, 2235, 2236, 5, 147, 0, 0, 2236, 2238, 1, 0, 0, 0, 2237, 2226, 1, 0, 0, 0, 2237, 2234, 1, 0, 0, 0, 2238, 395, 1, 0, 0, 0, 2239, 2240, 5, 19, 0, 0, 2240, 2241, 3, 446, 223, 0, 2241, 397, 1, 0, 0, 0, 2242, 2243, 5, 103, 0, 0, 2243, 2244, 5, 79, 0, 0, 2244, 2245, 3, 438, 219, 0, 2245, 399, 1, 0, 0, 0, 2246, 2248, 3, 392, 196, 0, 2247, 2249, 5, 149, 0, 0, 2248, 2247, 1, 0, 0, 0, 2248, 2249, 1, 0, 0, 0, 2249, 401, 1, 0, 0, 0, 2250, 2251, 5, 6, 0, 0, 2251, 2252, 7, 14, 0, 0, 2252, 403, 1, 0, 0, 0, 2253, 2254, 3, 448, 224, 0, 2254, 2255, 5, 129, 0, 0, 2255, 2256, 3, 416, 208, 0, 2256, 405, 1, 0, 0, 0, 2257, 2258, 3, 418, 209, 0, 2258, 2259, 5, 129, 0, 0, 2259, 2260, 3, 450, 225, 0, 2260, 407, 1, 0, 0, 0, 2261, 2262, 3, 422, 211, 0, 2262, 2266, 5, 97, 0, 0, 2263, 2267, 3, 424, 212, 0, 2264, 2265, 5, 89, 0, 0, 2265, 2267, 3, 254, 127, 0, 2266, 2263, 1, 0, 0, 0, 2266, 2264, 1, 0, 0, 0, 2267, 2268, 1, 0, 0, 0, 2268, 2269, 5, 129, 0, 0, 2269, 2270, 3, 450, 225, 0, 2270, 409, 1, 0, 0, 0, 2271, 2272, 3, 448, 224, 0, 2272, 2273, 5, 54, 0, 0, 2273, 2274, 3, 416, 208, 0, 2274, 411, 1, 0, 0, 0, 2275, 2276, 3, 418, 209, 0, 2276, 2277, 5, 54, 0, 0, 2277, 2278, 3, 450, 225, 0, 2278, 413, 1, 0, 0, 0, 2279, 2280, 3, 422, 211, 0, 2280, 2284, 5, 97, 0, 0, 2281, 2285, 3, 424, 212, 0, 2282, 2283, 5, 89, 0, 0, 2283, 2285, 3, 254, 127, 0, 2284, 2281, 1, 0, 0, 0, 2284, 2282, 1, 0, 0, 0, 2285, 2286, 1, 0, 0, 0, 2286, 2287, 5, 54, 0, 0, 2287, 2288, 3, 450, 225, 0, 2288, 415, 1, 0, 0, 0, 2289, 2290, 5, 136, 0, 0, 2290, 2294, 3, 390, 195, 0, 2291, 2292, 5, 115, 0, 0, 2292, 2294, 3, 450, 225, 0, 2293, 2289, 1, 0, 0, 0, 2293, 2291, 1, 0, 0, 0, 2294, 417, 1, 0, 0, 0, 2295, 2300, 3, 420, 210, 0, 2296, 2297, 5, 173, 0, 0, 2297, 2299, 3, 420, 210, 0, 2298, 2296, 1, 0, 0, 0, 2299, 2302, 1, 0, 0, 0, 2300, 2298, 1, 0, 0, 0, 2300, 2301, 1, 0, 0, 0, 2301, 419, 1, 0, 0, 0, 2302, 2300, 1, 0, 0, 0, 2303, 2306, 3, 450, 225, 0, 2304, 2306, 5, 146, 0, 0, 2305, 2303, 1, 0, 0, 0, 2305, 2304, 1, 0, 0, 0, 2306, 421, 1, 0, 0, 0, 2307, 2310, 3, 420, 210, 0, 2308, 2310, 5, 9, 0, 0, 2309, 2307, 1, 0, 0, 0, 2309, 2308, 1, 0, 0, 0, 2310, 2318, 1, 0, 0, 0, 2311, 2314, 5, 173, 0, 0, 2312, 2315, 3, 420, 210, 0, 2313, 2315, 5, 9, 0, 0, 2314, 2312, 1, 0, 0, 0, 2314, 2313, 1, 0, 0, 0, 2315, 2317, 1, 0, 0, 0, 2316, 2311, 1, 0, 0, 0, 2317, 2320, 1, 0, 0, 0, 2318, 2316, 1, 0, 0, 0, 2318, 2319, 1, 0, 0, 0, 2319, 423, 1, 0, 0, 0, 2320, 2318, 1, 0, 0, 0, 2321, 2322, 3, 252, 126, 0, 2322, 425, 1, 0, 0, 0, 2323, 2331, 3, 428, 214, 0, 2324, 2331, 3, 430, 215, 0, 2325, 2331, 3, 446, 223, 0, 2326, 2331, 3, 442, 221, 0, 2327, 2331, 5, 204, 0, 0, 2328, 2331, 5, 203, 0, 0, 2329, 2331, 5, 202, 0, 0, 2330, 2323, 1, 0, 0, 0, 2330, 2324, 1, 0, 0, 0, 2330, 2325, 1, 0, 0, 0, 2330, 2326, 1, 0, 0, 0, 2330, 2327, 1, 0, 0, 0, 2330, 2328, 1, 0, 0, 0, 2330, 2329, 1, 0, 0, 0, 2331, 427, 1, 0, 0, 0, 2332, 2333, 5, 179, 0, 0, 2333, 2338, 3, 432, 216, 0, 2334, 2335, 5, 173, 0, 0, 2335, 2337, 3, 432, 216, 0, 2336, 2334, 1, 0, 0, 0, 2337, 2340, 1, 0, 0, 0, 2338, 2336, 1, 0, 0, 0, 2338, 2339, 1, 0, 0, 0, 2339, 2341, 1, 0, 0, 0, 2340, 2338, 1, 0, 0, 0, 2341, 2342, 5, 180, 0, 0, 2342, 2346, 1, 0, 0, 0, 2343, 2344, 5, 179, 0, 0, 2344, 2346, 5, 180, 0, 0, 2345, 2332, 1, 0, 0, 0, 2345, 2343, 1, 0, 0, 0, 2346, 429, 1, 0, 0, 0, 2347, 2348, 5, 177, 0, 0, 2348, 2353, 3, 434, 217, 0, 2349, 2350, 5, 173, 0, 0, 2350, 2352, 3, 434, 217, 0, 2351, 2349, 1, 0, 0, 0, 2352, 2355, 1, 0, 0, 0, 2353, 2351, 1, 0, 0, 0, 2353, 2354, 1, 0, 0, 0, 2354, 2356, 1, 0, 0, 0, 2355, 2353, 1, 0, 0, 0, 2356, 2357, 5, 178, 0, 0, 2357, 2361, 1, 0, 0, 0, 2358, 2359, 5, 177, 0, 0, 2359, 2361, 5, 178, 0, 0, 2360, 2347, 1, 0, 0, 0, 2360, 2358, 1, 0, 0, 0, 2361, 431, 1, 0, 0, 0, 2362, 2363, 5, 208, 0, 0, 2363, 2364, 5, 174, 0, 0, 2364, 2365, 3, 434, 217, 0, 2365, 433, 1, 0, 0, 0, 2366, 2374, 3, 428, 214, 0, 2367, 2374, 3, 430, 215, 0, 2368, 2374, 5, 208, 0, 0, 2369, 2374, 3, 442, 221, 0, 2370, 2374, 5, 204, 0, 0, 2371, 2374, 5, 203, 0, 0, 2372, 2374, 5, 202, 0, 0, 2373, 2366, 1, 0, 0, 0, 2373, 2367, 1, 0, 0, 0, 2373, 2368, 1, 0, 0, 0, 2373, 2369, 1, 0, 0, 0, 2373, 2370, 1, 0, 0, 0, 2373, 2371, 1, 0, 0, 0, 2373, 2372, 1, 0, 0, 0, 2374, 435, 1, 0, 0, 0, 2375, 2376, 5, 25, 0, 0, 2376, 2377, 3, 446, 223, 0, 2377, 437, 1, 0, 0, 0, 2378, 2379, 5, 205, 0, 0, 2379, 2380, 3, 440, 220, 0, 2380, 439, 1, 0, 0, 0, 2381, 2382, 7, 15, 0, 0, 2382, 441, 1, 0, 0, 0, 2383, 2385, 5, 198, 0, 0, 2384, 2383, 1, 0, 0, 0, 2384, 2385, 1, 0, 0, 0, 2385, 2386, 1, 0, 0, 0, 2386, 2387, 7, 16, 0, 0, 2387, 443, 1, 0, 0, 0, 2388, 2390, 7, 3, 0, 0, 2389, 2388, 1, 0, 0, 0, 2389, 2390, 1, 0, 0, 0, 2390, 2391, 1, 0, 0, 0, 2391, 2392, 5, 205, 0, 0, 2392, 445, 1, 0, 0, 0, 2393, 2394, 7, 17, 0, 0, 2394, 447, 1, 0, 0, 0, 2395, 2400, 3, 450, 225, 0, 2396, 2397, 5, 173, 0, 0, 2397, 2399, 3, 450, 225, 0, 2398, 2396, 1, 0, 0, 0, 2399, 2402, 1, 0, 0, 0, 2400, 2398, 1, 0, 0, 0, 2400, 2401, 1, 0, 0, 0, 2401, 449, 1, 0, 0, 0, 2402, 2400, 1, 0, 0, 0, 2403, 2553, 5, 6, 0, 0, 2404, 2553, 5, 7, 0, 0, 2405, 2553, 5, 8, 0, 0, 2406, 2553, 5, 9, 0, 0, 2407, 2553, 5, 10, 0, 0, 2408, 2553, 5, 11, 0, 0, 2409, 2553, 5, 12, 0, 0, 2410, 2553, 5, 13, 0, 0, 2411, 2553, 5, 167, 0, 0, 2412, 2553, 5, 168, 0, 0, 2413, 2553, 5, 169, 0, 0, 2414, 2553, 5, 170, 0, 0, 2415, 2553, 5, 16, 0, 0, 2416, 2553, 5, 14, 0, 0, 2417, 2553, 5, 15, 0, 0, 2418, 2553, 5, 17, 0, 0, 2419, 2553, 5, 18, 0, 0, 2420, 2553, 5, 19, 0, 0, 2421, 2553, 5, 20, 0, 0, 2422, 2553, 5, 21, 0, 0, 2423, 2553, 5, 23, 0, 0, 2424, 2553, 5, 24, 0, 0, 2425, 2553, 5, 25, 0, 0, 2426, 2553, 5, 26, 0, 0, 2427, 2553, 5, 27, 0, 0, 2428, 2553, 5, 28, 0, 0, 2429, 2553, 5, 29, 0, 0, 2430, 2553, 5, 30, 0, 0, 2431, 2553, 5, 31, 0, 0, 2432, 2553, 5, 32, 0, 0, 2433, 2553, 5, 33, 0, 0, 2434, 2553, 5, 34, 0, 0, 2435, 2553, 5, 35, 0, 0, 2436, 2553, 5, 36, 0, 0, 2437, 2553, 5, 37, 0, 0, 2438, 2553, 5, 38, 0, 0, 2439, 2553, 5, 39, 0, 0, 2440, 2553, 5, 40, 0, 0, 2441, 2553, 5, 41, 0, 0, 2442, 2553, 5, 42, 0, 0, 2443, 2553, 5, 43, 0, 0, 2444, 2553, 5, 44, 0, 0, 2445, 2553, 5, 45, 0, 0, 2446, 2553, 5, 46, 0, 0, 2447, 2553, 5, 47, 0, 0, 2448, 2553, 5, 48, 0, 0, 2449, 2553, 5, 49, 0, 0, 2450, 2553, 5, 53, 0, 0, 2451, 2553, 5, 54, 0, 0, 2452, 2553, 5, 55, 0, 0, 2453, 2553, 5, 56, 0, 0, 2454, 2553, 5, 57, 0, 0, 2455, 2553, 5, 58, 0, 0, 2456, 2553, 5, 59, 0, 0, 2457, 2553, 5, 60, 0, 0, 2458, 2553, 5, 61, 0, 0, 2459, 2553, 5, 62, 0, 0, 2460, 2553, 5, 63, 0, 0, 2461, 2553, 5, 66, 0, 0, 2462, 2553, 5, 64, 0, 0, 2463, 2553, 5, 67, 0, 0, 2464, 2553, 5, 68, 0, 0, 2465, 2553, 5, 69, 0, 0, 2466, 2553, 5, 70, 0, 0, 2467, 2553, 5, 65, 0, 0, 2468, 2553, 5, 71, 0, 0, 2469, 2553, 5, 72, 0, 0, 2470, 2553, 5, 74, 0, 0, 2471, 2553, 5, 75, 0, 0, 2472, 2553, 5, 76, 0, 0, 2473, 2553, 5, 79, 0, 0, 2474, 2553, 5, 77, 0, 0, 2475, 2553, 5, 80, 0, 0, 2476, 2553, 5, 81, 0, 0, 2477, 2553, 5, 82, 0, 0, 2478, 2553, 5, 84, 0, 0, 2479, 2553, 5, 85, 0, 0, 2480, 2553, 5, 87, 0, 0, 2481, 2482, 5, 88, 0, 0, 2482, 2553, 5, 89, 0, 0, 2483, 2553, 5, 90, 0, 0, 2484, 2553, 5, 91, 0, 0, 2485, 2553, 5, 92, 0, 0, 2486, 2553, 5, 93, 0, 0, 2487, 2553, 5, 94, 0, 0, 2488, 2553, 5, 96, 0, 0, 2489, 2553, 5, 95, 0, 0, 2490, 2553, 5, 97, 0, 0, 2491, 2553, 5, 99, 0, 0, 2492, 2553, 5, 100, 0, 0, 2493, 2553, 5, 102, 0, 0, 2494, 2553, 5, 105, 0, 0, 2495, 2553, 5, 103, 0, 0, 2496, 2553, 5, 104, 0, 0, 2497, 2553, 5, 108, 0, 0, 2498, 2553, 5, 109, 0, 0, 2499, 2553, 5, 200, 0, 0, 2500, 2553, 5, 110, 0, 0, 2501, 2553, 5, 111, 0, 0, 2502, 2553, 5, 112, 0, 0, 2503, 2553, 5, 113, 0, 0, 2504, 2553, 5, 117, 0, 0, 2505, 2553, 5, 115, 0, 0, 2506, 2553, 5, 116, 0, 0, 2507, 2553, 5, 114, 0, 0, 2508, 2553, 5, 118, 0, 0, 2509, 2553, 5, 119, 0, 0, 2510, 2553, 5, 120, 0, 0, 2511, 2553, 5, 121, 0, 0, 2512, 2553, 5, 122, 0, 0, 2513, 2553, 5, 123, 0, 0, 2514, 2553, 5, 124, 0, 0, 2515, 2553, 5, 125, 0, 0, 2516, 2553, 5, 126, 0, 0, 2517, 2553, 5, 127, 0, 0, 2518, 2553, 5, 128, 0, 0, 2519, 2553, 5, 129, 0, 0, 2520, 2553, 5, 130, 0, 0, 2521, 2553, 5, 131, 0, 0, 2522, 2553, 5, 132, 0, 0, 2523, 2553, 5, 133, 0, 0, 2524, 2553, 5, 143, 0, 0, 2525, 2553, 5, 144, 0, 0, 2526, 2553, 5, 134, 0, 0, 2527, 2553, 5, 135, 0, 0, 2528, 2553, 5, 136, 0, 0, 2529, 2553, 5, 137, 0, 0, 2530, 2553, 5, 138, 0, 0, 2531, 2553, 5, 139, 0, 0, 2532, 2553, 5, 140, 0, 0, 2533, 2553, 5, 141, 0, 0, 2534, 2553, 5, 142, 0, 0, 2535, 2553, 5, 152, 0, 0, 2536, 2553, 5, 153, 0, 0, 2537, 2553, 5, 154, 0, 0, 2538, 2553, 5, 155, 0, 0, 2539, 2553, 5, 156, 0, 0, 2540, 2553, 5, 157, 0, 0, 2541, 2553, 5, 158, 0, 0, 2542, 2553, 5, 160, 0, 0, 2543, 2553, 5, 159, 0, 0, 2544, 2553, 5, 161, 0, 0, 2545, 2553, 5, 162, 0, 0, 2546, 2553, 5, 163, 0, 0, 2547, 2553, 5, 164, 0, 0, 2548, 2553, 5, 165, 0, 0, 2549, 2553, 5, 166, 0, 0, 2550, 2553, 5, 171, 0, 0, 2551, 2553, 5, 211, 0, 0, 2552, 2403, 1, 0, 0, 0, 2552, 2404, 1, 0, 0, 0, 2552, 2405, 1, 0, 0, 0, 2552, 2406, 1, 0, 0, 0, 2552, 2407, 1, 0, 0, 0, 2552, 2408, 1, 0, 0, 0, 2552, 2409, 1, 0, 0, 0, 2552, 2410, 1, 0, 0, 0, 2552, 2411, 1, 0, 0, 0, 2552, 2412, 1, 0, 0, 0, 2552, 2413, 1, 0, 0, 0, 2552, 2414, 1, 0, 0, 0, 2552, 2415, 1, 0, 0, 0, 2552, 2416, 1, 0, 0, 0, 2552, 2417, 1, 0, 0, 0, 2552, 2418, 1, 0, 0, 0, 2552, 2419, 1, 0, 0, 0, 2552, 2420, 1, 0, 0, 0, 2552, 2421, 1, 0, 0, 0, 2552, 2422, 1, 0, 0, 0, 2552, 2423, 1, 0, 0, 0, 2552, 2424, 1, 0, 0, 0, 2552, 2425, 1, 0, 0, 0, 2552, 2426, 1, 0, 0, 0, 2552, 2427, 1, 0, 0, 0, 2552, 2428, 1, 0, 0, 0, 2552, 2429, 1, 0, 0, 0, 2552, 2430, 1, 0, 0, 0, 2552, 2431, 1, 0, 0, 0, 2552, 2432, 1, 0, 0, 0, 2552, 2433, 1, 0, 0, 0, 2552, 2434, 1, 0, 0, 0, 2552, 2435, 1, 0, 0, 0, 2552, 2436, 1, 0, 0, 0, 2552, 2437, 1, 0, 0, 0, 2552, 2438, 1, 0, 0, 0, 2552, 2439, 1, 0, 0, 0, 2552, 2440, 1, 0, 0, 0, 2552, 2441, 1, 0, 0, 0, 2552, 2442, 1, 0, 0, 0, 2552, 2443, 1, 0, 0, 0, 2552, 2444, 1, 0, 0, 0, 2552, 2445, 1, 0, 0, 0, 2552, 2446, 1, 0, 0, 0, 2552, 2447, 1, 0, 0, 0, 2552, 2448, 1, 0, 0, 0, 2552, 2449, 1, 0, 0, 0, 2552, 2450, 1, 0, 0, 0, 2552, 2451, 1, 0, 0, 0, 2552, 2452, 1, 0, 0, 0, 2552, 2453, 1, 0, 0, 0, 2552, 2454, 1, 0, 0, 0, 2552, 2455, 1, 0, 0, 0, 2552, 2456, 1, 0, 0, 0, 2552, 2457, 1, 0, 0, 0, 2552, 2458, 1, 0, 0, 0, 2552, 2459, 1, 0, 0, 0, 2552, 2460, 1, 0, 0, 0, 2552, 2461, 1, 0, 0, 0, 2552, 2462, 1, 0, 0, 0, 2552, 2463, 1, 0, 0, 0, 2552, 2464, 1, 0, 0, 0, 2552, 2465, 1, 0, 0, 0, 2552, 2466, 1, 0, 0, 0, 2552, 2467, 1, 0, 0, 0, 2552, 2468, 1, 0, 0, 0, 2552, 2469, 1, 0, 0, 0, 2552, 2470, 1, 0, 0, 0, 2552, 2471, 1, 0, 0, 0, 2552, 2472, 1, 0, 0, 0, 2552, 2473, 1, 0, 0, 0, 2552, 2474, 1, 0, 0, 0, 2552, 2475, 1, 0, 0, 0, 2552, 2476, 1, 0, 0, 0, 2552, 2477, 1, 0, 0, 0, 2552, 2478, 1, 0, 0, 0, 2552, 2479, 1, 0, 0, 0, 2552, 2480, 1, 0, 0, 0, 2552, 2481, 1, 0, 0, 0, 2552, 2483, 1, 0, 0, 0, 2552, 2484, 1, 0, 0, 0, 2552, 2485, 1, 0, 0, 0, 2552, 2486, 1, 0, 0, 0, 2552, 2487, 1, 0, 0, 0, 2552, 2488, 1, 0, 0, 0, 2552, 2489, 1, 0, 0, 0, 2552, 2490, 1, 0, 0, 0, 2552, 2491, 1, 0, 0, 0, 2552, 2492, 1, 0, 0, 0, 2552, 2493, 1, 0, 0, 0, 2552, 2494, 1, 0, 0, 0, 2552, 2495, 1, 0, 0, 0, 2552, 2496, 1, 0, 0, 0, 2552, 2497, 1, 0, 0, 0, 2552, 2498, 1, 0, 0, 0, 2552, 2499, 1, 0, 0, 0, 2552, 2500, 1, 0, 0, 0, 2552, 2501, 1, 0, 0, 0, 2552, 2502, 1, 0, 0, 0, 2552, 2503, 1, 0, 0, 0, 2552, 2504, 1, 0, 0, 0, 2552, 2505, 1, 0, 0, 0, 2552, 2506, 1, 0, 0, 0, 2552, 2507, 1, 0, 0, 0, 2552, 2508, 1, 0, 0, 0, 2552, 2509, 1, 0, 0, 0, 2552, 2510, 1, 0, 0, 0, 2552, 2511, 1, 0, 0, 0, 2552, 2512, 1, 0, 0, 0, 2552, 2513, 1, 0, 0, 0, 2552, 2514, 1, 0, 0, 0, 2552, 2515, 1, 0, 0, 0, 2552, 2516, 1, 0, 0, 0, 2552, 2517, 1, 0, 0, 0, 2552, 2518, 1, 0, 0, 0, 2552, 2519, 1, 0, 0, 0, 2552, 2520, 1, 0, 0, 0, 2552, 2521, 1, 0, 0, 0, 2552, 2522, 1, 0, 0, 0, 2552, 2523, 1, 0, 0, 0, 2552, 2524, 1, 0, 0, 0, 2552, 2525, 1, 0, 0, 0, 2552, 2526, 1, 0, 0, 0, 2552, 2527, 1, 0, 0, 0, 2552, 2528, 1, 0, 0, 0, 2552, 2529, 1, 0, 0, 0, 2552, 2530, 1, 0, 0, 0, 2552, 2531, 1, 0, 0, 0, 2552, 2532, 1, 0, 0, 0, 2552, 2533, 1, 0, 0, 0, 2552, 2534, 1, 0, 0, 0, 2552, 2535, 1, 0, 0, 0, 2552, 2536, 1, 0, 0, 0, 2552, 2537, 1, 0, 0, 0, 2552, 2538, 1, 0, 0, 0, 2552, 2539, 1, 0, 0, 0, 2552, 2540, 1, 0, 0, 0, 2552, 2541, 1, 0, 0, 0, 2552, 2542, 1, 0, 0, 0, 2552, 2543, 1, 0, 0, 0, 2552, 2544, 1, 0, 0, 0, 2552, 2545, 1, 0, 0, 0, 2552, 2546, 1, 0, 0, 0, 2552, 2547, 1, 0, 0, 0, 2552, 2548, 1, 0, 0, 0, 2552, 2549, 1, 0, 0, 0, 2552, 2550, 1, 0, 0, 0, 2552, 2551, 1, 0, 0, 0, 2553, 2557, 1, 0, 0, 0, 2554, 2555, 5, 212, 0, 0, 2555, 2557, 6, 225, -1, 0, 2556, 2552, 1, 0, 0, 0, 2556, 2554, 1, 0, 0, 0, 2557, 451, 1, 0, 0, 0, 265, 480, 483, 495, 509, 512, 515, 518, 521, 529, 535, 540, 544, 550, 561, 568, 577, 585, 593, 602, 606, 609, 613, 619, 626, 632, 644, 647, 658, 661, 667, 678, 699, 702, 706, 718, 722, 726, 735, 752, 763, 767, 774, 777, 784, 795, 799, 809, 814, 824, 834, 845, 858, 869, 874, 885, 889, 893, 898, 903, 913, 921, 929, 935, 940, 942, 948, 955, 960, 966, 970, 974, 980, 998, 1004, 1006, 1013, 1019, 1025, 1041, 1048, 1062, 1074, 1077, 1104, 1109, 1132, 1138, 1141, 1149, 1154, 1163, 1170, 1173, 1177, 1184, 1192, 1195, 1200, 1203, 1210, 1216, 1226, 1230, 1238, 1242, 1250, 1254, 1262, 1266, 1275, 1279, 1289, 1292, 1299, 1303, 1306, 1309, 1314, 1318, 1333, 1341, 1348, 1354, 1357, 1361, 1364, 1371, 1388, 1397, 1405, 1408, 1412, 1416, 1418, 1426, 1459, 1467, 1473, 1488, 1496, 1500, 1509, 1514, 1521, 1529, 1533, 1555, 1559, 1565, 1570, 1579, 1585, 1589, 1599, 1602, 1610, 1624, 1629, 1633, 1641, 1643, 1646, 1659, 1666, 1671, 1682, 1684, 1700, 1710, 1726, 1728, 1736, 1740, 1763, 1773, 1788, 1793, 1810, 1816, 1820, 1833, 1836, 1845, 1848, 1854, 1861, 1869, 1877, 1883, 1889, 1898, 1908, 1911, 1918, 1924, 1927, 1936, 1941, 1944, 1949, 1952, 1955, 1963, 1966, 1971, 1973, 1976, 1983, 1995, 2008, 2010, 2023, 2038, 2041, 2049, 2056, 2059, 2062, 2072, 2079, 2084, 2090, 2099, 2105, 2111, 2116, 2128, 2135, 2142, 2148, 2164, 2170, 2173, 2183, 2186, 2189, 2192, 2195, 2201, 2211, 2217, 2221, 2229, 2232, 2237, 2248, 2266, 2284, 2293, 2300, 2305, 2309, 2314, 2318, 2330, 2338, 2345, 2353, 2360, 2373, 2384, 2389, 2400, 2552, 2556] \ No newline at end of file diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.tokens b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.tokens index b5b6580c..8d4e7ea5 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.tokens +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQL.tokens @@ -1,241 +1,247 @@ T__0=1 T__1=2 T__2=3 -VARNAME=4 -ACCOUNT=5 -ADD=6 -ADMIN=7 -ALL=8 -ALTER=9 -ALWAYS=10 -ANCESTORS=11 -AND=12 -AS=13 -ASC=14 -ARRAY_COLLECT=15 -BETWEEN=16 -BY=17 -CACHE=18 -CASE=19 -CASCADE=20 -CAST=21 -COLLECTION=22 -COMMENT=23 -COUNT=24 -CREATE=25 -CYCLE=26 -DAYS=27 -DECLARE=28 -DEFAULT=29 -DELETE=30 -DESC=31 -DESCENDANTS=32 -DESCRIBE=33 -DISTINCT=34 -DROP=35 -ELEMENTOF=36 -ELEMENTS=37 -ELSE=38 -END=39 -ES_SHARDS=40 -ES_REPLICAS=41 -EXISTS=42 -EXTRACT=43 -FIELDS=44 -FIRST=45 -FORCE=46 -FORCE_INDEX=47 -FORCE_PRIMARY_INDEX=48 -FREEZE=49 -FROM=50 -FROZEN=51 -FULLTEXT=52 -GENERATED=53 -GRANT=54 -GROUP=55 -HOURS=56 -IDENTIFIED=57 -IDENTITY=58 -IF=59 -IN=60 -INCREMENT=61 -INDEX=62 -INDEXES=63 -INSERT=64 -INTO=65 -IS=66 -JSON=67 -JOIN=68 -KEY=69 -KEYOF=70 -KEYS=71 -LAST=72 -LEFT=73 -LIFETIME=74 -LIMIT=75 -LOCAL=76 -LOCK=77 -MAXVALUE=78 -MERGE=79 -MINUTES=80 -MINVALUE=81 -MODIFY=82 -MR_COUNTER=83 -NAMESPACE=84 -NAMESPACES=85 -NESTED=86 -NO=87 -NOT=88 -NULLS=89 -OFFSET=90 -OF=91 -ON=92 -ONLY=93 -OR=94 -ORDER=95 -OUTER=96 -OVERRIDE=97 -PASSWORD=98 -PATCH=99 -PER=100 -PREFER_INDEXES=101 -PREFER_PRIMARY_INDEX=102 -PRIMARY=103 -PUT=104 -REGION=105 -REGIONS=106 -REMOVE=107 -RETURNING=108 -REVOKE=109 -ROLE=110 -ROLES=111 -ROW=112 -SCHEMA=113 -SECONDS=114 -SELECT=115 -SEQ_TRANSFORM=116 -SET=117 -SHARD=118 -SHOW=119 -START=120 -TABLE=121 -TABLES=122 -THEN=123 -TO=124 -TTL=125 -TYPE=126 -UNFREEZE=127 -UNLOCK=128 -UPDATE=129 -UPSERT=130 -USER=131 -USERS=132 -USING=133 -VALUES=134 -WHEN=135 -WHERE=136 -WITH=137 -UNIQUE=138 -UNNEST=139 -UUID=140 -ALL_PRIVILEGES=141 -IDENTIFIED_EXTERNALLY=142 -PASSWORD_EXPIRE=143 -RETAIN_CURRENT_PASSWORD=144 -CLEAR_RETAINED_PASSWORD=145 -LEFT_OUTER_JOIN=146 -ARRAY_T=147 -BINARY_T=148 -BOOLEAN_T=149 -DOUBLE_T=150 -ENUM_T=151 -FLOAT_T=152 -GEOMETRY_T=153 -INTEGER_T=154 -LONG_T=155 -MAP_T=156 -NUMBER_T=157 -POINT_T=158 -RECORD_T=159 -STRING_T=160 -TIMESTAMP_T=161 -ANY_T=162 -ANYATOMIC_T=163 -ANYJSONATOMIC_T=164 -ANYRECORD_T=165 -SCALAR_T=166 -SEMI=167 -COMMA=168 -COLON=169 -LP=170 -RP=171 -LBRACK=172 -RBRACK=173 -LBRACE=174 -RBRACE=175 -STAR=176 -DOT=177 -DOLLAR=178 -QUESTION_MARK=179 -LT=180 -LTE=181 -GT=182 -GTE=183 -EQ=184 -NEQ=185 -LT_ANY=186 -LTE_ANY=187 -GT_ANY=188 -GTE_ANY=189 -EQ_ANY=190 -NEQ_ANY=191 -PLUS=192 -MINUS=193 -IDIV=194 -RDIV=195 -CONCAT=196 -NULL=197 -FALSE=198 -TRUE=199 -INT=200 -FLOAT=201 -NUMBER=202 -DSTRING=203 -STRING=204 -SYSDOLAR=205 -ID=206 -BAD_ID=207 -WS=208 -C_COMMENT=209 -LINE_COMMENT=210 -LINE_COMMENT1=211 -UnrecognizedToken=212 +T__3=4 +VARNAME=5 +ACCOUNT=6 +ADD=7 +ADMIN=8 +ALL=9 +ALTER=10 +ALWAYS=11 +ANCESTORS=12 +AND=13 +AS=14 +ASC=15 +ARRAY_COLLECT=16 +BEFORE=17 +BETWEEN=18 +BY=19 +CACHE=20 +CASE=21 +CASCADE=22 +CAST=23 +COLLECTION=24 +COMMENT=25 +COUNT=26 +CREATE=27 +CYCLE=28 +DAYS=29 +DECLARE=30 +DEFAULT=31 +DELETE=32 +DESC=33 +DESCENDANTS=34 +DESCRIBE=35 +DISABLE=36 +DISTINCT=37 +DROP=38 +ELEMENTOF=39 +ELEMENTS=40 +ELSE=41 +ENABLE=42 +END=43 +ES_SHARDS=44 +ES_REPLICAS=45 +EXISTS=46 +EXTRACT=47 +FIELDS=48 +FIRST=49 +FORCE=50 +FORCE_INDEX=51 +FORCE_PRIMARY_INDEX=52 +FREEZE=53 +FROM=54 +FROZEN=55 +FULLTEXT=56 +GENERATED=57 +GRANT=58 +GROUP=59 +HOURS=60 +IDENTIFIED=61 +IDENTITY=62 +IF=63 +IMAGE=64 +IN=65 +INCREMENT=66 +INDEX=67 +INDEXES=68 +INSERT=69 +INTO=70 +IS=71 +JSON=72 +JOIN=73 +KEY=74 +KEYOF=75 +KEYS=76 +LAST=77 +LEFT=78 +LIFETIME=79 +LIMIT=80 +LOCAL=81 +LOCK=82 +MAXVALUE=83 +MERGE=84 +MINUTES=85 +MINVALUE=86 +MODIFY=87 +MR_COUNTER=88 +NAMESPACE=89 +NAMESPACES=90 +NESTED=91 +NO=92 +NOT=93 +NULLS=94 +OFFSET=95 +OF=96 +ON=97 +ONLY=98 +OR=99 +ORDER=100 +OUTER=101 +OVERRIDE=102 +PASSWORD=103 +PATCH=104 +PER=105 +PREFER_INDEXES=106 +PREFER_PRIMARY_INDEX=107 +PRIMARY=108 +PUT=109 +REGION=110 +REGIONS=111 +REMOVE=112 +RETURNING=113 +REVOKE=114 +ROLE=115 +ROLES=116 +ROW=117 +SCHEMA=118 +SECONDS=119 +SELECT=120 +SEQ_TRANSFORM=121 +SET=122 +SHARD=123 +SHOW=124 +START=125 +TABLE=126 +TABLES=127 +THEN=128 +TO=129 +TTL=130 +TYPE=131 +UNFREEZE=132 +UNLOCK=133 +UPDATE=134 +UPSERT=135 +USER=136 +USERS=137 +USING=138 +VALUES=139 +WHEN=140 +WHERE=141 +WITH=142 +UNIQUE=143 +UNNEST=144 +UUID=145 +ALL_PRIVILEGES=146 +IDENTIFIED_EXTERNALLY=147 +PASSWORD_EXPIRE=148 +RETAIN_CURRENT_PASSWORD=149 +CLEAR_RETAINED_PASSWORD=150 +LEFT_OUTER_JOIN=151 +ARRAY_T=152 +BINARY_T=153 +BOOLEAN_T=154 +DOUBLE_T=155 +ENUM_T=156 +FLOAT_T=157 +GEOMETRY_T=158 +INTEGER_T=159 +LONG_T=160 +MAP_T=161 +NUMBER_T=162 +POINT_T=163 +RECORD_T=164 +STRING_T=165 +TIMESTAMP_T=166 +ANY_T=167 +ANYATOMIC_T=168 +ANYJSONATOMIC_T=169 +ANYRECORD_T=170 +SCALAR_T=171 +SEMI=172 +COMMA=173 +COLON=174 +LP=175 +RP=176 +LBRACK=177 +RBRACK=178 +LBRACE=179 +RBRACE=180 +STAR=181 +DOT=182 +DOLLAR=183 +QUESTION_MARK=184 +LT=185 +LTE=186 +GT=187 +GTE=188 +EQ=189 +NEQ=190 +LT_ANY=191 +LTE_ANY=192 +GT_ANY=193 +GTE_ANY=194 +EQ_ANY=195 +NEQ_ANY=196 +PLUS=197 +MINUS=198 +IDIV=199 +RDIV=200 +CONCAT=201 +NULL=202 +FALSE=203 +TRUE=204 +INT=205 +FLOAT=206 +NUMBER=207 +DSTRING=208 +STRING=209 +SYSDOLAR=210 +ID=211 +BAD_ID=212 +WS=213 +C_COMMENT=214 +LINE_COMMENT=215 +LINE_COMMENT1=216 +UnrecognizedToken=217 '/*+'=1 '*/'=2 '@'=3 -'array_collect'=15 -'count'=24 -'seq_transform'=116 -';'=167 -','=168 -':'=169 -'('=170 -')'=171 -'['=172 -']'=173 -'{'=174 -'}'=175 -'*'=176 -'.'=177 -'$'=178 -'?'=179 -'<'=180 -'<='=181 -'>'=182 -'>='=183 -'='=184 -'!='=185 -'+'=192 -'-'=193 -'/'=194 -'||'=196 +'row_metadata().'=4 +'array_collect'=16 +'count'=26 +'seq_transform'=121 +';'=172 +','=173 +':'=174 +'('=175 +')'=176 +'['=177 +']'=178 +'{'=179 +'}'=180 +'*'=181 +'.'=182 +'$'=183 +'?'=184 +'<'=185 +'<='=186 +'>'=187 +'>='=188 +'='=189 +'!='=190 +'+'=197 +'-'=198 +'/'=199 +'||'=201 diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLBaseListener.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLBaseListener.java index 1d0256fa..4d0a6c27 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLBaseListener.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLBaseListener.java @@ -1900,49 +1900,61 @@ public class KVQLBaseListener implements KVQLListener { * *

    The default implementation does nothing.

    */ - @Override public void enterAdd_region_def(KVQLParser.Add_region_defContext ctx) { } + @Override public void enterFrozen_def(KVQLParser.Frozen_defContext ctx) { } /** * {@inheritDoc} * *

    The default implementation does nothing.

    */ - @Override public void exitAdd_region_def(KVQLParser.Add_region_defContext ctx) { } + @Override public void exitFrozen_def(KVQLParser.Frozen_defContext ctx) { } /** * {@inheritDoc} * *

    The default implementation does nothing.

    */ - @Override public void enterDrop_region_def(KVQLParser.Drop_region_defContext ctx) { } + @Override public void enterJson_collection_def(KVQLParser.Json_collection_defContext ctx) { } /** * {@inheritDoc} * *

    The default implementation does nothing.

    */ - @Override public void exitDrop_region_def(KVQLParser.Drop_region_defContext ctx) { } + @Override public void exitJson_collection_def(KVQLParser.Json_collection_defContext ctx) { } /** * {@inheritDoc} * *

    The default implementation does nothing.

    */ - @Override public void enterFrozen_def(KVQLParser.Frozen_defContext ctx) { } + @Override public void enterEnable_before_image(KVQLParser.Enable_before_imageContext ctx) { } /** * {@inheritDoc} * *

    The default implementation does nothing.

    */ - @Override public void exitFrozen_def(KVQLParser.Frozen_defContext ctx) { } + @Override public void exitEnable_before_image(KVQLParser.Enable_before_imageContext ctx) { } /** * {@inheritDoc} * *

    The default implementation does nothing.

    */ - @Override public void enterJson_collection_def(KVQLParser.Json_collection_defContext ctx) { } + @Override public void enterBefore_image_ttl(KVQLParser.Before_image_ttlContext ctx) { } /** * {@inheritDoc} * *

    The default implementation does nothing.

    */ - @Override public void exitJson_collection_def(KVQLParser.Json_collection_defContext ctx) { } + @Override public void exitBefore_image_ttl(KVQLParser.Before_image_ttlContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterDisable_before_image(KVQLParser.Disable_before_imageContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitDisable_before_image(KVQLParser.Disable_before_imageContext ctx) { } /** * {@inheritDoc} * @@ -2039,6 +2051,30 @@ public class KVQLBaseListener implements KVQLListener { *

    The default implementation does nothing.

    */ @Override public void exitUnfreeze_def(KVQLParser.Unfreeze_defContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterAdd_region_def(KVQLParser.Add_region_defContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitAdd_region_def(KVQLParser.Add_region_defContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterDrop_region_def(KVQLParser.Drop_region_defContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitDrop_region_def(KVQLParser.Drop_region_defContext ctx) { } /** * {@inheritDoc} * @@ -2219,6 +2255,30 @@ public class KVQLBaseListener implements KVQLListener { *

    The default implementation does nothing.

    */ @Override public void exitIndex_path(KVQLParser.Index_pathContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterOld_index_path(KVQLParser.Old_index_pathContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitOld_index_path(KVQLParser.Old_index_pathContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterRow_metadata(KVQLParser.Row_metadataContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitRow_metadata(KVQLParser.Row_metadataContext ctx) { } /** * {@inheritDoc} * @@ -2904,4 +2964,4 @@ public class KVQLBaseListener implements KVQLListener { *

    The default implementation does nothing.

    */ @Override public void visitErrorNode(ErrorNode node) { } -} +} \ No newline at end of file diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.interp b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.interp index 63ebda00..740b54a5 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.interp +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.interp @@ -3,6 +3,7 @@ null '/*+' '*/' '@' +'row_metadata().' null null null @@ -23,6 +24,7 @@ null null null null +null 'count' null null @@ -115,6 +117,9 @@ null null null null +null +null +null 'seq_transform' null null @@ -218,6 +223,7 @@ null null null null +null VARNAME ACCOUNT ADD @@ -230,6 +236,7 @@ AND AS ASC ARRAY_COLLECT +BEFORE BETWEEN BY CACHE @@ -248,11 +255,13 @@ DELETE DESC DESCENDANTS DESCRIBE +DISABLE DISTINCT DROP ELEMENTOF ELEMENTS ELSE +ENABLE END ES_SHARDS ES_REPLICAS @@ -274,6 +283,7 @@ HOURS IDENTIFIED IDENTITY IF +IMAGE IN INCREMENT INDEX @@ -432,6 +442,7 @@ rule names: T__0 T__1 T__2 +T__3 VARNAME ACCOUNT ADD @@ -444,6 +455,7 @@ AND AS ASC ARRAY_COLLECT +BEFORE BETWEEN BY CACHE @@ -462,11 +474,13 @@ DELETE DESC DESCENDANTS DESCRIBE +DISABLE DISTINCT DROP ELEMENTOF ELEMENTS ELSE +ENABLE END ES_SHARDS ES_REPLICAS @@ -488,6 +502,7 @@ HOURS IDENTIFIED IDENTITY IF +IMAGE IN INCREMENT INDEX @@ -665,4 +680,4 @@ mode names: DEFAULT_MODE atn: -[4, 0, 212, 1922, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 2, 196, 7, 196, 2, 197, 7, 197, 2, 198, 7, 198, 2, 199, 7, 199, 2, 200, 7, 200, 2, 201, 7, 201, 2, 202, 7, 202, 2, 203, 7, 203, 2, 204, 7, 204, 2, 205, 7, 205, 2, 206, 7, 206, 2, 207, 7, 207, 2, 208, 7, 208, 2, 209, 7, 209, 2, 210, 7, 210, 2, 211, 7, 211, 2, 212, 7, 212, 2, 213, 7, 213, 2, 214, 7, 214, 2, 215, 7, 215, 2, 216, 7, 216, 2, 217, 7, 217, 2, 218, 7, 218, 2, 219, 7, 219, 2, 220, 7, 220, 2, 221, 7, 221, 2, 222, 7, 222, 2, 223, 7, 223, 2, 224, 7, 224, 2, 225, 7, 225, 2, 226, 7, 226, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 5, 3, 470, 8, 3, 10, 3, 12, 3, 473, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 3, 26, 623, 8, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 1, 41, 1, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 3, 55, 838, 8, 55, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 76, 1, 76, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 79, 1, 79, 1, 79, 1, 79, 1, 79, 1, 79, 1, 79, 1, 79, 3, 79, 989, 8, 79, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 96, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 104, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 105, 1, 105, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 108, 1, 108, 1, 108, 1, 108, 1, 108, 1, 108, 1, 108, 1, 109, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 3, 113, 1211, 8, 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 126, 1, 126, 1, 126, 1, 126, 1, 126, 1, 126, 1, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 127, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 4, 140, 1376, 8, 140, 11, 140, 12, 140, 1377, 1, 140, 1, 140, 1, 141, 1, 141, 4, 141, 1384, 8, 141, 11, 141, 12, 141, 1385, 1, 141, 1, 141, 1, 142, 1, 142, 4, 142, 1392, 8, 142, 11, 142, 12, 142, 1393, 1, 142, 1, 142, 1, 143, 1, 143, 4, 143, 1400, 8, 143, 11, 143, 12, 143, 1401, 1, 143, 1, 143, 4, 143, 1406, 8, 143, 11, 143, 12, 143, 1407, 1, 143, 1, 143, 1, 144, 1, 144, 4, 144, 1414, 8, 144, 11, 144, 12, 144, 1415, 1, 144, 1, 144, 4, 144, 1420, 8, 144, 11, 144, 12, 144, 1421, 1, 144, 1, 144, 1, 145, 1, 145, 4, 145, 1428, 8, 145, 11, 145, 12, 145, 1429, 1, 145, 1, 145, 4, 145, 1434, 8, 145, 11, 145, 12, 145, 1435, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 160, 1, 160, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 167, 1, 167, 1, 168, 1, 168, 1, 169, 1, 169, 1, 170, 1, 170, 1, 171, 1, 171, 1, 172, 1, 172, 1, 173, 1, 173, 1, 174, 1, 174, 1, 175, 1, 175, 1, 176, 1, 176, 1, 177, 1, 177, 1, 178, 1, 178, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 186, 1, 186, 1, 186, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 192, 1, 192, 1, 193, 1, 193, 1, 194, 1, 194, 1, 194, 1, 194, 1, 195, 1, 195, 1, 195, 1, 196, 1, 196, 1, 196, 1, 196, 1, 196, 1, 197, 1, 197, 1, 197, 1, 197, 1, 197, 1, 197, 1, 198, 1, 198, 1, 198, 1, 198, 1, 198, 1, 199, 4, 199, 1694, 8, 199, 11, 199, 12, 199, 1695, 1, 200, 5, 200, 1699, 8, 200, 10, 200, 12, 200, 1702, 9, 200, 1, 200, 1, 200, 4, 200, 1706, 8, 200, 11, 200, 12, 200, 1707, 1, 200, 1, 200, 3, 200, 1712, 8, 200, 1, 200, 4, 200, 1715, 8, 200, 11, 200, 12, 200, 1716, 3, 200, 1719, 8, 200, 1, 200, 4, 200, 1722, 8, 200, 11, 200, 12, 200, 1723, 1, 200, 1, 200, 3, 200, 1728, 8, 200, 1, 200, 4, 200, 1731, 8, 200, 11, 200, 12, 200, 1732, 3, 200, 1735, 8, 200, 1, 201, 1, 201, 3, 201, 1739, 8, 201, 1, 201, 1, 201, 1, 202, 1, 202, 1, 202, 5, 202, 1746, 8, 202, 10, 202, 12, 202, 1749, 9, 202, 1, 202, 1, 202, 1, 203, 1, 203, 1, 203, 5, 203, 1756, 8, 203, 10, 203, 12, 203, 1759, 9, 203, 1, 203, 1, 203, 1, 204, 1, 204, 1, 204, 1, 204, 1, 204, 1, 205, 1, 205, 1, 205, 1, 205, 5, 205, 1772, 8, 205, 10, 205, 12, 205, 1775, 9, 205, 1, 206, 1, 206, 3, 206, 1779, 8, 206, 1, 206, 1, 206, 1, 206, 5, 206, 1784, 8, 206, 10, 206, 12, 206, 1787, 9, 206, 1, 207, 4, 207, 1790, 8, 207, 11, 207, 12, 207, 1791, 1, 207, 1, 207, 1, 208, 1, 208, 1, 208, 1, 208, 1, 208, 5, 208, 1801, 8, 208, 10, 208, 12, 208, 1804, 9, 208, 1, 208, 1, 208, 1, 208, 1, 208, 1, 208, 1, 209, 1, 209, 1, 209, 1, 209, 5, 209, 1815, 8, 209, 10, 209, 12, 209, 1818, 9, 209, 1, 209, 1, 209, 1, 210, 1, 210, 5, 210, 1824, 8, 210, 10, 210, 12, 210, 1827, 9, 210, 1, 210, 1, 210, 1, 211, 1, 211, 1, 212, 1, 212, 1, 213, 1, 213, 1, 214, 1, 214, 1, 214, 3, 214, 1840, 8, 214, 1, 215, 1, 215, 1, 215, 3, 215, 1845, 8, 215, 1, 216, 1, 216, 1, 217, 1, 217, 1, 218, 1, 218, 1, 218, 1, 218, 1, 218, 1, 218, 1, 219, 1, 219, 1, 219, 1, 219, 1, 219, 1, 219, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 220, 1, 221, 1, 221, 1, 221, 1, 221, 1, 221, 1, 221, 1, 221, 1, 222, 1, 222, 1, 222, 1, 222, 1, 222, 1, 222, 1, 222, 1, 222, 1, 222, 1, 222, 1, 222, 1, 223, 1, 223, 1, 223, 1, 223, 1, 223, 1, 223, 1, 223, 1, 224, 1, 224, 1, 224, 1, 224, 1, 224, 1, 224, 1, 224, 1, 224, 1, 224, 1, 224, 1, 224, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 226, 1, 226, 1, 226, 1, 226, 1, 226, 1, 226, 1, 226, 1, 226, 1, 226, 3, 1747, 1757, 1802, 0, 227, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, 30, 61, 31, 63, 32, 65, 33, 67, 34, 69, 35, 71, 36, 73, 37, 75, 38, 77, 39, 79, 40, 81, 41, 83, 42, 85, 43, 87, 44, 89, 45, 91, 46, 93, 47, 95, 48, 97, 49, 99, 50, 101, 51, 103, 52, 105, 53, 107, 54, 109, 55, 111, 56, 113, 57, 115, 58, 117, 59, 119, 60, 121, 61, 123, 62, 125, 63, 127, 64, 129, 65, 131, 66, 133, 67, 135, 68, 137, 69, 139, 70, 141, 71, 143, 72, 145, 73, 147, 74, 149, 75, 151, 76, 153, 77, 155, 78, 157, 79, 159, 80, 161, 81, 163, 82, 165, 83, 167, 84, 169, 85, 171, 86, 173, 87, 175, 88, 177, 89, 179, 90, 181, 91, 183, 92, 185, 93, 187, 94, 189, 95, 191, 96, 193, 97, 195, 98, 197, 99, 199, 100, 201, 101, 203, 102, 205, 103, 207, 104, 209, 105, 211, 106, 213, 107, 215, 108, 217, 109, 219, 110, 221, 111, 223, 112, 225, 113, 227, 114, 229, 115, 231, 116, 233, 117, 235, 118, 237, 119, 239, 120, 241, 121, 243, 122, 245, 123, 247, 124, 249, 125, 251, 126, 253, 127, 255, 128, 257, 129, 259, 130, 261, 131, 263, 132, 265, 133, 267, 134, 269, 135, 271, 136, 273, 137, 275, 138, 277, 139, 279, 140, 281, 141, 283, 142, 285, 143, 287, 144, 289, 145, 291, 146, 293, 147, 295, 148, 297, 149, 299, 150, 301, 151, 303, 152, 305, 153, 307, 154, 309, 155, 311, 156, 313, 157, 315, 158, 317, 159, 319, 160, 321, 161, 323, 162, 325, 163, 327, 164, 329, 165, 331, 166, 333, 167, 335, 168, 337, 169, 339, 170, 341, 171, 343, 172, 345, 173, 347, 174, 349, 175, 351, 176, 353, 177, 355, 178, 357, 179, 359, 180, 361, 181, 363, 182, 365, 183, 367, 184, 369, 185, 371, 186, 373, 187, 375, 188, 377, 189, 379, 190, 381, 191, 383, 192, 385, 193, 387, 194, 389, 195, 391, 196, 393, 197, 395, 198, 397, 199, 399, 200, 401, 201, 403, 202, 405, 203, 407, 204, 409, 205, 411, 206, 413, 207, 415, 208, 417, 209, 419, 210, 421, 211, 423, 212, 425, 0, 427, 0, 429, 0, 431, 0, 433, 0, 435, 0, 437, 0, 439, 0, 441, 0, 443, 0, 445, 0, 447, 0, 449, 0, 451, 0, 453, 0, 1, 0, 37, 2, 0, 65, 65, 97, 97, 2, 0, 67, 67, 99, 99, 2, 0, 79, 79, 111, 111, 2, 0, 85, 85, 117, 117, 2, 0, 78, 78, 110, 110, 2, 0, 84, 84, 116, 116, 2, 0, 68, 68, 100, 100, 2, 0, 77, 77, 109, 109, 2, 0, 73, 73, 105, 105, 2, 0, 76, 76, 108, 108, 2, 0, 69, 69, 101, 101, 2, 0, 82, 82, 114, 114, 2, 0, 87, 87, 119, 119, 2, 0, 89, 89, 121, 121, 2, 0, 83, 83, 115, 115, 2, 0, 66, 66, 98, 98, 2, 0, 72, 72, 104, 104, 2, 0, 70, 70, 102, 102, 2, 0, 80, 80, 112, 112, 2, 0, 88, 88, 120, 120, 2, 0, 90, 90, 122, 122, 2, 0, 71, 71, 103, 103, 2, 0, 74, 74, 106, 106, 2, 0, 75, 75, 107, 107, 2, 0, 86, 86, 118, 118, 2, 0, 81, 81, 113, 113, 2, 0, 43, 43, 45, 45, 1, 0, 83, 83, 1, 0, 89, 89, 1, 0, 36, 36, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 43, 43, 2, 0, 10, 10, 13, 13, 2, 0, 65, 90, 97, 122, 8, 0, 34, 34, 47, 47, 92, 92, 98, 98, 102, 102, 110, 110, 114, 114, 116, 116, 8, 0, 39, 39, 47, 47, 92, 92, 98, 98, 102, 102, 110, 110, 114, 114, 116, 116, 3, 0, 48, 57, 65, 70, 97, 102, 1950, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 63, 1, 0, 0, 0, 0, 65, 1, 0, 0, 0, 0, 67, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, 0, 0, 0, 0, 73, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 0, 77, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 0, 95, 1, 0, 0, 0, 0, 97, 1, 0, 0, 0, 0, 99, 1, 0, 0, 0, 0, 101, 1, 0, 0, 0, 0, 103, 1, 0, 0, 0, 0, 105, 1, 0, 0, 0, 0, 107, 1, 0, 0, 0, 0, 109, 1, 0, 0, 0, 0, 111, 1, 0, 0, 0, 0, 113, 1, 0, 0, 0, 0, 115, 1, 0, 0, 0, 0, 117, 1, 0, 0, 0, 0, 119, 1, 0, 0, 0, 0, 121, 1, 0, 0, 0, 0, 123, 1, 0, 0, 0, 0, 125, 1, 0, 0, 0, 0, 127, 1, 0, 0, 0, 0, 129, 1, 0, 0, 0, 0, 131, 1, 0, 0, 0, 0, 133, 1, 0, 0, 0, 0, 135, 1, 0, 0, 0, 0, 137, 1, 0, 0, 0, 0, 139, 1, 0, 0, 0, 0, 141, 1, 0, 0, 0, 0, 143, 1, 0, 0, 0, 0, 145, 1, 0, 0, 0, 0, 147, 1, 0, 0, 0, 0, 149, 1, 0, 0, 0, 0, 151, 1, 0, 0, 0, 0, 153, 1, 0, 0, 0, 0, 155, 1, 0, 0, 0, 0, 157, 1, 0, 0, 0, 0, 159, 1, 0, 0, 0, 0, 161, 1, 0, 0, 0, 0, 163, 1, 0, 0, 0, 0, 165, 1, 0, 0, 0, 0, 167, 1, 0, 0, 0, 0, 169, 1, 0, 0, 0, 0, 171, 1, 0, 0, 0, 0, 173, 1, 0, 0, 0, 0, 175, 1, 0, 0, 0, 0, 177, 1, 0, 0, 0, 0, 179, 1, 0, 0, 0, 0, 181, 1, 0, 0, 0, 0, 183, 1, 0, 0, 0, 0, 185, 1, 0, 0, 0, 0, 187, 1, 0, 0, 0, 0, 189, 1, 0, 0, 0, 0, 191, 1, 0, 0, 0, 0, 193, 1, 0, 0, 0, 0, 195, 1, 0, 0, 0, 0, 197, 1, 0, 0, 0, 0, 199, 1, 0, 0, 0, 0, 201, 1, 0, 0, 0, 0, 203, 1, 0, 0, 0, 0, 205, 1, 0, 0, 0, 0, 207, 1, 0, 0, 0, 0, 209, 1, 0, 0, 0, 0, 211, 1, 0, 0, 0, 0, 213, 1, 0, 0, 0, 0, 215, 1, 0, 0, 0, 0, 217, 1, 0, 0, 0, 0, 219, 1, 0, 0, 0, 0, 221, 1, 0, 0, 0, 0, 223, 1, 0, 0, 0, 0, 225, 1, 0, 0, 0, 0, 227, 1, 0, 0, 0, 0, 229, 1, 0, 0, 0, 0, 231, 1, 0, 0, 0, 0, 233, 1, 0, 0, 0, 0, 235, 1, 0, 0, 0, 0, 237, 1, 0, 0, 0, 0, 239, 1, 0, 0, 0, 0, 241, 1, 0, 0, 0, 0, 243, 1, 0, 0, 0, 0, 245, 1, 0, 0, 0, 0, 247, 1, 0, 0, 0, 0, 249, 1, 0, 0, 0, 0, 251, 1, 0, 0, 0, 0, 253, 1, 0, 0, 0, 0, 255, 1, 0, 0, 0, 0, 257, 1, 0, 0, 0, 0, 259, 1, 0, 0, 0, 0, 261, 1, 0, 0, 0, 0, 263, 1, 0, 0, 0, 0, 265, 1, 0, 0, 0, 0, 267, 1, 0, 0, 0, 0, 269, 1, 0, 0, 0, 0, 271, 1, 0, 0, 0, 0, 273, 1, 0, 0, 0, 0, 275, 1, 0, 0, 0, 0, 277, 1, 0, 0, 0, 0, 279, 1, 0, 0, 0, 0, 281, 1, 0, 0, 0, 0, 283, 1, 0, 0, 0, 0, 285, 1, 0, 0, 0, 0, 287, 1, 0, 0, 0, 0, 289, 1, 0, 0, 0, 0, 291, 1, 0, 0, 0, 0, 293, 1, 0, 0, 0, 0, 295, 1, 0, 0, 0, 0, 297, 1, 0, 0, 0, 0, 299, 1, 0, 0, 0, 0, 301, 1, 0, 0, 0, 0, 303, 1, 0, 0, 0, 0, 305, 1, 0, 0, 0, 0, 307, 1, 0, 0, 0, 0, 309, 1, 0, 0, 0, 0, 311, 1, 0, 0, 0, 0, 313, 1, 0, 0, 0, 0, 315, 1, 0, 0, 0, 0, 317, 1, 0, 0, 0, 0, 319, 1, 0, 0, 0, 0, 321, 1, 0, 0, 0, 0, 323, 1, 0, 0, 0, 0, 325, 1, 0, 0, 0, 0, 327, 1, 0, 0, 0, 0, 329, 1, 0, 0, 0, 0, 331, 1, 0, 0, 0, 0, 333, 1, 0, 0, 0, 0, 335, 1, 0, 0, 0, 0, 337, 1, 0, 0, 0, 0, 339, 1, 0, 0, 0, 0, 341, 1, 0, 0, 0, 0, 343, 1, 0, 0, 0, 0, 345, 1, 0, 0, 0, 0, 347, 1, 0, 0, 0, 0, 349, 1, 0, 0, 0, 0, 351, 1, 0, 0, 0, 0, 353, 1, 0, 0, 0, 0, 355, 1, 0, 0, 0, 0, 357, 1, 0, 0, 0, 0, 359, 1, 0, 0, 0, 0, 361, 1, 0, 0, 0, 0, 363, 1, 0, 0, 0, 0, 365, 1, 0, 0, 0, 0, 367, 1, 0, 0, 0, 0, 369, 1, 0, 0, 0, 0, 371, 1, 0, 0, 0, 0, 373, 1, 0, 0, 0, 0, 375, 1, 0, 0, 0, 0, 377, 1, 0, 0, 0, 0, 379, 1, 0, 0, 0, 0, 381, 1, 0, 0, 0, 0, 383, 1, 0, 0, 0, 0, 385, 1, 0, 0, 0, 0, 387, 1, 0, 0, 0, 0, 389, 1, 0, 0, 0, 0, 391, 1, 0, 0, 0, 0, 393, 1, 0, 0, 0, 0, 395, 1, 0, 0, 0, 0, 397, 1, 0, 0, 0, 0, 399, 1, 0, 0, 0, 0, 401, 1, 0, 0, 0, 0, 403, 1, 0, 0, 0, 0, 405, 1, 0, 0, 0, 0, 407, 1, 0, 0, 0, 0, 409, 1, 0, 0, 0, 0, 411, 1, 0, 0, 0, 0, 413, 1, 0, 0, 0, 0, 415, 1, 0, 0, 0, 0, 417, 1, 0, 0, 0, 0, 419, 1, 0, 0, 0, 0, 421, 1, 0, 0, 0, 0, 423, 1, 0, 0, 0, 1, 455, 1, 0, 0, 0, 3, 459, 1, 0, 0, 0, 5, 462, 1, 0, 0, 0, 7, 464, 1, 0, 0, 0, 9, 474, 1, 0, 0, 0, 11, 482, 1, 0, 0, 0, 13, 486, 1, 0, 0, 0, 15, 492, 1, 0, 0, 0, 17, 496, 1, 0, 0, 0, 19, 502, 1, 0, 0, 0, 21, 509, 1, 0, 0, 0, 23, 519, 1, 0, 0, 0, 25, 523, 1, 0, 0, 0, 27, 526, 1, 0, 0, 0, 29, 530, 1, 0, 0, 0, 31, 544, 1, 0, 0, 0, 33, 552, 1, 0, 0, 0, 35, 555, 1, 0, 0, 0, 37, 561, 1, 0, 0, 0, 39, 566, 1, 0, 0, 0, 41, 574, 1, 0, 0, 0, 43, 579, 1, 0, 0, 0, 45, 590, 1, 0, 0, 0, 47, 598, 1, 0, 0, 0, 49, 604, 1, 0, 0, 0, 51, 611, 1, 0, 0, 0, 53, 622, 1, 0, 0, 0, 55, 624, 1, 0, 0, 0, 57, 632, 1, 0, 0, 0, 59, 640, 1, 0, 0, 0, 61, 647, 1, 0, 0, 0, 63, 652, 1, 0, 0, 0, 65, 664, 1, 0, 0, 0, 67, 673, 1, 0, 0, 0, 69, 682, 1, 0, 0, 0, 71, 687, 1, 0, 0, 0, 73, 697, 1, 0, 0, 0, 75, 706, 1, 0, 0, 0, 77, 711, 1, 0, 0, 0, 79, 715, 1, 0, 0, 0, 81, 725, 1, 0, 0, 0, 83, 737, 1, 0, 0, 0, 85, 744, 1, 0, 0, 0, 87, 752, 1, 0, 0, 0, 89, 759, 1, 0, 0, 0, 91, 765, 1, 0, 0, 0, 93, 771, 1, 0, 0, 0, 95, 775, 1, 0, 0, 0, 97, 781, 1, 0, 0, 0, 99, 788, 1, 0, 0, 0, 101, 793, 1, 0, 0, 0, 103, 800, 1, 0, 0, 0, 105, 809, 1, 0, 0, 0, 107, 819, 1, 0, 0, 0, 109, 825, 1, 0, 0, 0, 111, 837, 1, 0, 0, 0, 113, 839, 1, 0, 0, 0, 115, 850, 1, 0, 0, 0, 117, 859, 1, 0, 0, 0, 119, 862, 1, 0, 0, 0, 121, 865, 1, 0, 0, 0, 123, 875, 1, 0, 0, 0, 125, 881, 1, 0, 0, 0, 127, 889, 1, 0, 0, 0, 129, 896, 1, 0, 0, 0, 131, 901, 1, 0, 0, 0, 133, 904, 1, 0, 0, 0, 135, 909, 1, 0, 0, 0, 137, 914, 1, 0, 0, 0, 139, 918, 1, 0, 0, 0, 141, 924, 1, 0, 0, 0, 143, 929, 1, 0, 0, 0, 145, 934, 1, 0, 0, 0, 147, 939, 1, 0, 0, 0, 149, 948, 1, 0, 0, 0, 151, 954, 1, 0, 0, 0, 153, 960, 1, 0, 0, 0, 155, 965, 1, 0, 0, 0, 157, 974, 1, 0, 0, 0, 159, 988, 1, 0, 0, 0, 161, 990, 1, 0, 0, 0, 163, 999, 1, 0, 0, 0, 165, 1006, 1, 0, 0, 0, 167, 1017, 1, 0, 0, 0, 169, 1027, 1, 0, 0, 0, 171, 1038, 1, 0, 0, 0, 173, 1045, 1, 0, 0, 0, 175, 1048, 1, 0, 0, 0, 177, 1052, 1, 0, 0, 0, 179, 1058, 1, 0, 0, 0, 181, 1065, 1, 0, 0, 0, 183, 1068, 1, 0, 0, 0, 185, 1071, 1, 0, 0, 0, 187, 1076, 1, 0, 0, 0, 189, 1079, 1, 0, 0, 0, 191, 1085, 1, 0, 0, 0, 193, 1091, 1, 0, 0, 0, 195, 1100, 1, 0, 0, 0, 197, 1109, 1, 0, 0, 0, 199, 1115, 1, 0, 0, 0, 201, 1119, 1, 0, 0, 0, 203, 1123, 1, 0, 0, 0, 205, 1129, 1, 0, 0, 0, 207, 1137, 1, 0, 0, 0, 209, 1141, 1, 0, 0, 0, 211, 1148, 1, 0, 0, 0, 213, 1156, 1, 0, 0, 0, 215, 1163, 1, 0, 0, 0, 217, 1173, 1, 0, 0, 0, 219, 1180, 1, 0, 0, 0, 221, 1185, 1, 0, 0, 0, 223, 1191, 1, 0, 0, 0, 225, 1195, 1, 0, 0, 0, 227, 1210, 1, 0, 0, 0, 229, 1212, 1, 0, 0, 0, 231, 1219, 1, 0, 0, 0, 233, 1233, 1, 0, 0, 0, 235, 1237, 1, 0, 0, 0, 237, 1243, 1, 0, 0, 0, 239, 1248, 1, 0, 0, 0, 241, 1254, 1, 0, 0, 0, 243, 1260, 1, 0, 0, 0, 245, 1267, 1, 0, 0, 0, 247, 1272, 1, 0, 0, 0, 249, 1275, 1, 0, 0, 0, 251, 1279, 1, 0, 0, 0, 253, 1284, 1, 0, 0, 0, 255, 1293, 1, 0, 0, 0, 257, 1300, 1, 0, 0, 0, 259, 1307, 1, 0, 0, 0, 261, 1314, 1, 0, 0, 0, 263, 1319, 1, 0, 0, 0, 265, 1325, 1, 0, 0, 0, 267, 1331, 1, 0, 0, 0, 269, 1338, 1, 0, 0, 0, 271, 1343, 1, 0, 0, 0, 273, 1349, 1, 0, 0, 0, 275, 1354, 1, 0, 0, 0, 277, 1361, 1, 0, 0, 0, 279, 1368, 1, 0, 0, 0, 281, 1373, 1, 0, 0, 0, 283, 1381, 1, 0, 0, 0, 285, 1389, 1, 0, 0, 0, 287, 1397, 1, 0, 0, 0, 289, 1411, 1, 0, 0, 0, 291, 1425, 1, 0, 0, 0, 293, 1439, 1, 0, 0, 0, 295, 1445, 1, 0, 0, 0, 297, 1452, 1, 0, 0, 0, 299, 1460, 1, 0, 0, 0, 301, 1467, 1, 0, 0, 0, 303, 1472, 1, 0, 0, 0, 305, 1478, 1, 0, 0, 0, 307, 1487, 1, 0, 0, 0, 309, 1495, 1, 0, 0, 0, 311, 1500, 1, 0, 0, 0, 313, 1504, 1, 0, 0, 0, 315, 1511, 1, 0, 0, 0, 317, 1517, 1, 0, 0, 0, 319, 1524, 1, 0, 0, 0, 321, 1531, 1, 0, 0, 0, 323, 1541, 1, 0, 0, 0, 325, 1545, 1, 0, 0, 0, 327, 1555, 1, 0, 0, 0, 329, 1569, 1, 0, 0, 0, 331, 1579, 1, 0, 0, 0, 333, 1586, 1, 0, 0, 0, 335, 1588, 1, 0, 0, 0, 337, 1590, 1, 0, 0, 0, 339, 1592, 1, 0, 0, 0, 341, 1594, 1, 0, 0, 0, 343, 1596, 1, 0, 0, 0, 345, 1598, 1, 0, 0, 0, 347, 1600, 1, 0, 0, 0, 349, 1602, 1, 0, 0, 0, 351, 1604, 1, 0, 0, 0, 353, 1606, 1, 0, 0, 0, 355, 1608, 1, 0, 0, 0, 357, 1610, 1, 0, 0, 0, 359, 1612, 1, 0, 0, 0, 361, 1614, 1, 0, 0, 0, 363, 1617, 1, 0, 0, 0, 365, 1619, 1, 0, 0, 0, 367, 1622, 1, 0, 0, 0, 369, 1624, 1, 0, 0, 0, 371, 1627, 1, 0, 0, 0, 373, 1632, 1, 0, 0, 0, 375, 1639, 1, 0, 0, 0, 377, 1644, 1, 0, 0, 0, 379, 1651, 1, 0, 0, 0, 381, 1656, 1, 0, 0, 0, 383, 1663, 1, 0, 0, 0, 385, 1665, 1, 0, 0, 0, 387, 1667, 1, 0, 0, 0, 389, 1669, 1, 0, 0, 0, 391, 1673, 1, 0, 0, 0, 393, 1676, 1, 0, 0, 0, 395, 1681, 1, 0, 0, 0, 397, 1687, 1, 0, 0, 0, 399, 1693, 1, 0, 0, 0, 401, 1734, 1, 0, 0, 0, 403, 1738, 1, 0, 0, 0, 405, 1742, 1, 0, 0, 0, 407, 1752, 1, 0, 0, 0, 409, 1762, 1, 0, 0, 0, 411, 1767, 1, 0, 0, 0, 413, 1778, 1, 0, 0, 0, 415, 1789, 1, 0, 0, 0, 417, 1795, 1, 0, 0, 0, 419, 1810, 1, 0, 0, 0, 421, 1821, 1, 0, 0, 0, 423, 1830, 1, 0, 0, 0, 425, 1832, 1, 0, 0, 0, 427, 1834, 1, 0, 0, 0, 429, 1836, 1, 0, 0, 0, 431, 1841, 1, 0, 0, 0, 433, 1846, 1, 0, 0, 0, 435, 1848, 1, 0, 0, 0, 437, 1850, 1, 0, 0, 0, 439, 1856, 1, 0, 0, 0, 441, 1862, 1, 0, 0, 0, 443, 1870, 1, 0, 0, 0, 445, 1877, 1, 0, 0, 0, 447, 1888, 1, 0, 0, 0, 449, 1895, 1, 0, 0, 0, 451, 1906, 1, 0, 0, 0, 453, 1913, 1, 0, 0, 0, 455, 456, 5, 47, 0, 0, 456, 457, 5, 42, 0, 0, 457, 458, 5, 43, 0, 0, 458, 2, 1, 0, 0, 0, 459, 460, 5, 42, 0, 0, 460, 461, 5, 47, 0, 0, 461, 4, 1, 0, 0, 0, 462, 463, 5, 64, 0, 0, 463, 6, 1, 0, 0, 0, 464, 465, 3, 355, 177, 0, 465, 471, 3, 425, 212, 0, 466, 470, 3, 425, 212, 0, 467, 470, 3, 427, 213, 0, 468, 470, 3, 435, 217, 0, 469, 466, 1, 0, 0, 0, 469, 467, 1, 0, 0, 0, 469, 468, 1, 0, 0, 0, 470, 473, 1, 0, 0, 0, 471, 469, 1, 0, 0, 0, 471, 472, 1, 0, 0, 0, 472, 8, 1, 0, 0, 0, 473, 471, 1, 0, 0, 0, 474, 475, 7, 0, 0, 0, 475, 476, 7, 1, 0, 0, 476, 477, 7, 1, 0, 0, 477, 478, 7, 2, 0, 0, 478, 479, 7, 3, 0, 0, 479, 480, 7, 4, 0, 0, 480, 481, 7, 5, 0, 0, 481, 10, 1, 0, 0, 0, 482, 483, 7, 0, 0, 0, 483, 484, 7, 6, 0, 0, 484, 485, 7, 6, 0, 0, 485, 12, 1, 0, 0, 0, 486, 487, 7, 0, 0, 0, 487, 488, 7, 6, 0, 0, 488, 489, 7, 7, 0, 0, 489, 490, 7, 8, 0, 0, 490, 491, 7, 4, 0, 0, 491, 14, 1, 0, 0, 0, 492, 493, 7, 0, 0, 0, 493, 494, 7, 9, 0, 0, 494, 495, 7, 9, 0, 0, 495, 16, 1, 0, 0, 0, 496, 497, 7, 0, 0, 0, 497, 498, 7, 9, 0, 0, 498, 499, 7, 5, 0, 0, 499, 500, 7, 10, 0, 0, 500, 501, 7, 11, 0, 0, 501, 18, 1, 0, 0, 0, 502, 503, 7, 0, 0, 0, 503, 504, 7, 9, 0, 0, 504, 505, 7, 12, 0, 0, 505, 506, 7, 0, 0, 0, 506, 507, 7, 13, 0, 0, 507, 508, 7, 14, 0, 0, 508, 20, 1, 0, 0, 0, 509, 510, 7, 0, 0, 0, 510, 511, 7, 4, 0, 0, 511, 512, 7, 1, 0, 0, 512, 513, 7, 10, 0, 0, 513, 514, 7, 14, 0, 0, 514, 515, 7, 5, 0, 0, 515, 516, 7, 2, 0, 0, 516, 517, 7, 11, 0, 0, 517, 518, 7, 14, 0, 0, 518, 22, 1, 0, 0, 0, 519, 520, 7, 0, 0, 0, 520, 521, 7, 4, 0, 0, 521, 522, 7, 6, 0, 0, 522, 24, 1, 0, 0, 0, 523, 524, 7, 0, 0, 0, 524, 525, 7, 14, 0, 0, 525, 26, 1, 0, 0, 0, 526, 527, 7, 0, 0, 0, 527, 528, 7, 14, 0, 0, 528, 529, 7, 1, 0, 0, 529, 28, 1, 0, 0, 0, 530, 531, 5, 97, 0, 0, 531, 532, 5, 114, 0, 0, 532, 533, 5, 114, 0, 0, 533, 534, 5, 97, 0, 0, 534, 535, 5, 121, 0, 0, 535, 536, 5, 95, 0, 0, 536, 537, 5, 99, 0, 0, 537, 538, 5, 111, 0, 0, 538, 539, 5, 108, 0, 0, 539, 540, 5, 108, 0, 0, 540, 541, 5, 101, 0, 0, 541, 542, 5, 99, 0, 0, 542, 543, 5, 116, 0, 0, 543, 30, 1, 0, 0, 0, 544, 545, 7, 15, 0, 0, 545, 546, 7, 10, 0, 0, 546, 547, 7, 5, 0, 0, 547, 548, 7, 12, 0, 0, 548, 549, 7, 10, 0, 0, 549, 550, 7, 10, 0, 0, 550, 551, 7, 4, 0, 0, 551, 32, 1, 0, 0, 0, 552, 553, 7, 15, 0, 0, 553, 554, 7, 13, 0, 0, 554, 34, 1, 0, 0, 0, 555, 556, 7, 1, 0, 0, 556, 557, 7, 0, 0, 0, 557, 558, 7, 1, 0, 0, 558, 559, 7, 16, 0, 0, 559, 560, 7, 10, 0, 0, 560, 36, 1, 0, 0, 0, 561, 562, 7, 1, 0, 0, 562, 563, 7, 0, 0, 0, 563, 564, 7, 14, 0, 0, 564, 565, 7, 10, 0, 0, 565, 38, 1, 0, 0, 0, 566, 567, 7, 1, 0, 0, 567, 568, 7, 0, 0, 0, 568, 569, 7, 14, 0, 0, 569, 570, 7, 1, 0, 0, 570, 571, 7, 0, 0, 0, 571, 572, 7, 6, 0, 0, 572, 573, 7, 10, 0, 0, 573, 40, 1, 0, 0, 0, 574, 575, 7, 1, 0, 0, 575, 576, 7, 0, 0, 0, 576, 577, 7, 14, 0, 0, 577, 578, 7, 5, 0, 0, 578, 42, 1, 0, 0, 0, 579, 580, 7, 1, 0, 0, 580, 581, 7, 2, 0, 0, 581, 582, 7, 9, 0, 0, 582, 583, 7, 9, 0, 0, 583, 584, 7, 10, 0, 0, 584, 585, 7, 1, 0, 0, 585, 586, 7, 5, 0, 0, 586, 587, 7, 8, 0, 0, 587, 588, 7, 2, 0, 0, 588, 589, 7, 4, 0, 0, 589, 44, 1, 0, 0, 0, 590, 591, 7, 1, 0, 0, 591, 592, 7, 2, 0, 0, 592, 593, 7, 7, 0, 0, 593, 594, 7, 7, 0, 0, 594, 595, 7, 10, 0, 0, 595, 596, 7, 4, 0, 0, 596, 597, 7, 5, 0, 0, 597, 46, 1, 0, 0, 0, 598, 599, 5, 99, 0, 0, 599, 600, 5, 111, 0, 0, 600, 601, 5, 117, 0, 0, 601, 602, 5, 110, 0, 0, 602, 603, 5, 116, 0, 0, 603, 48, 1, 0, 0, 0, 604, 605, 7, 1, 0, 0, 605, 606, 7, 11, 0, 0, 606, 607, 7, 10, 0, 0, 607, 608, 7, 0, 0, 0, 608, 609, 7, 5, 0, 0, 609, 610, 7, 10, 0, 0, 610, 50, 1, 0, 0, 0, 611, 612, 7, 1, 0, 0, 612, 613, 7, 13, 0, 0, 613, 614, 7, 1, 0, 0, 614, 615, 7, 9, 0, 0, 615, 616, 7, 10, 0, 0, 616, 52, 1, 0, 0, 0, 617, 623, 7, 6, 0, 0, 618, 619, 7, 6, 0, 0, 619, 620, 7, 0, 0, 0, 620, 621, 7, 13, 0, 0, 621, 623, 7, 14, 0, 0, 622, 617, 1, 0, 0, 0, 622, 618, 1, 0, 0, 0, 623, 54, 1, 0, 0, 0, 624, 625, 7, 6, 0, 0, 625, 626, 7, 10, 0, 0, 626, 627, 7, 1, 0, 0, 627, 628, 7, 9, 0, 0, 628, 629, 7, 0, 0, 0, 629, 630, 7, 11, 0, 0, 630, 631, 7, 10, 0, 0, 631, 56, 1, 0, 0, 0, 632, 633, 7, 6, 0, 0, 633, 634, 7, 10, 0, 0, 634, 635, 7, 17, 0, 0, 635, 636, 7, 0, 0, 0, 636, 637, 7, 3, 0, 0, 637, 638, 7, 9, 0, 0, 638, 639, 7, 5, 0, 0, 639, 58, 1, 0, 0, 0, 640, 641, 7, 6, 0, 0, 641, 642, 7, 10, 0, 0, 642, 643, 7, 9, 0, 0, 643, 644, 7, 10, 0, 0, 644, 645, 7, 5, 0, 0, 645, 646, 7, 10, 0, 0, 646, 60, 1, 0, 0, 0, 647, 648, 7, 6, 0, 0, 648, 649, 7, 10, 0, 0, 649, 650, 7, 14, 0, 0, 650, 651, 7, 1, 0, 0, 651, 62, 1, 0, 0, 0, 652, 653, 7, 6, 0, 0, 653, 654, 7, 10, 0, 0, 654, 655, 7, 14, 0, 0, 655, 656, 7, 1, 0, 0, 656, 657, 7, 10, 0, 0, 657, 658, 7, 4, 0, 0, 658, 659, 7, 6, 0, 0, 659, 660, 7, 0, 0, 0, 660, 661, 7, 4, 0, 0, 661, 662, 7, 5, 0, 0, 662, 663, 7, 14, 0, 0, 663, 64, 1, 0, 0, 0, 664, 665, 7, 6, 0, 0, 665, 666, 7, 10, 0, 0, 666, 667, 7, 14, 0, 0, 667, 668, 7, 1, 0, 0, 668, 669, 7, 11, 0, 0, 669, 670, 7, 8, 0, 0, 670, 671, 7, 15, 0, 0, 671, 672, 7, 10, 0, 0, 672, 66, 1, 0, 0, 0, 673, 674, 7, 6, 0, 0, 674, 675, 7, 8, 0, 0, 675, 676, 7, 14, 0, 0, 676, 677, 7, 5, 0, 0, 677, 678, 7, 8, 0, 0, 678, 679, 7, 4, 0, 0, 679, 680, 7, 1, 0, 0, 680, 681, 7, 5, 0, 0, 681, 68, 1, 0, 0, 0, 682, 683, 7, 6, 0, 0, 683, 684, 7, 11, 0, 0, 684, 685, 7, 2, 0, 0, 685, 686, 7, 18, 0, 0, 686, 70, 1, 0, 0, 0, 687, 688, 7, 10, 0, 0, 688, 689, 7, 9, 0, 0, 689, 690, 7, 10, 0, 0, 690, 691, 7, 7, 0, 0, 691, 692, 7, 10, 0, 0, 692, 693, 7, 4, 0, 0, 693, 694, 7, 5, 0, 0, 694, 695, 7, 2, 0, 0, 695, 696, 7, 17, 0, 0, 696, 72, 1, 0, 0, 0, 697, 698, 7, 10, 0, 0, 698, 699, 7, 9, 0, 0, 699, 700, 7, 10, 0, 0, 700, 701, 7, 7, 0, 0, 701, 702, 7, 10, 0, 0, 702, 703, 7, 4, 0, 0, 703, 704, 7, 5, 0, 0, 704, 705, 7, 14, 0, 0, 705, 74, 1, 0, 0, 0, 706, 707, 7, 10, 0, 0, 707, 708, 7, 9, 0, 0, 708, 709, 7, 14, 0, 0, 709, 710, 7, 10, 0, 0, 710, 76, 1, 0, 0, 0, 711, 712, 7, 10, 0, 0, 712, 713, 7, 4, 0, 0, 713, 714, 7, 6, 0, 0, 714, 78, 1, 0, 0, 0, 715, 716, 7, 10, 0, 0, 716, 717, 7, 14, 0, 0, 717, 718, 3, 435, 217, 0, 718, 719, 7, 14, 0, 0, 719, 720, 7, 16, 0, 0, 720, 721, 7, 0, 0, 0, 721, 722, 7, 11, 0, 0, 722, 723, 7, 6, 0, 0, 723, 724, 7, 14, 0, 0, 724, 80, 1, 0, 0, 0, 725, 726, 7, 10, 0, 0, 726, 727, 7, 14, 0, 0, 727, 728, 3, 435, 217, 0, 728, 729, 7, 11, 0, 0, 729, 730, 7, 10, 0, 0, 730, 731, 7, 18, 0, 0, 731, 732, 7, 9, 0, 0, 732, 733, 7, 8, 0, 0, 733, 734, 7, 1, 0, 0, 734, 735, 7, 0, 0, 0, 735, 736, 7, 14, 0, 0, 736, 82, 1, 0, 0, 0, 737, 738, 7, 10, 0, 0, 738, 739, 7, 19, 0, 0, 739, 740, 7, 8, 0, 0, 740, 741, 7, 14, 0, 0, 741, 742, 7, 5, 0, 0, 742, 743, 7, 14, 0, 0, 743, 84, 1, 0, 0, 0, 744, 745, 7, 10, 0, 0, 745, 746, 7, 19, 0, 0, 746, 747, 7, 5, 0, 0, 747, 748, 7, 11, 0, 0, 748, 749, 7, 0, 0, 0, 749, 750, 7, 1, 0, 0, 750, 751, 7, 5, 0, 0, 751, 86, 1, 0, 0, 0, 752, 753, 7, 17, 0, 0, 753, 754, 7, 8, 0, 0, 754, 755, 7, 10, 0, 0, 755, 756, 7, 9, 0, 0, 756, 757, 7, 6, 0, 0, 757, 758, 7, 14, 0, 0, 758, 88, 1, 0, 0, 0, 759, 760, 7, 17, 0, 0, 760, 761, 7, 8, 0, 0, 761, 762, 7, 11, 0, 0, 762, 763, 7, 14, 0, 0, 763, 764, 7, 5, 0, 0, 764, 90, 1, 0, 0, 0, 765, 766, 7, 17, 0, 0, 766, 767, 7, 2, 0, 0, 767, 768, 7, 11, 0, 0, 768, 769, 7, 1, 0, 0, 769, 770, 7, 10, 0, 0, 770, 92, 1, 0, 0, 0, 771, 772, 3, 91, 45, 0, 772, 773, 3, 435, 217, 0, 773, 774, 3, 123, 61, 0, 774, 94, 1, 0, 0, 0, 775, 776, 3, 91, 45, 0, 776, 777, 3, 435, 217, 0, 777, 778, 3, 205, 102, 0, 778, 779, 3, 435, 217, 0, 779, 780, 3, 123, 61, 0, 780, 96, 1, 0, 0, 0, 781, 782, 7, 17, 0, 0, 782, 783, 7, 11, 0, 0, 783, 784, 7, 10, 0, 0, 784, 785, 7, 10, 0, 0, 785, 786, 7, 20, 0, 0, 786, 787, 7, 10, 0, 0, 787, 98, 1, 0, 0, 0, 788, 789, 7, 17, 0, 0, 789, 790, 7, 11, 0, 0, 790, 791, 7, 2, 0, 0, 791, 792, 7, 7, 0, 0, 792, 100, 1, 0, 0, 0, 793, 794, 7, 17, 0, 0, 794, 795, 7, 11, 0, 0, 795, 796, 7, 2, 0, 0, 796, 797, 7, 20, 0, 0, 797, 798, 7, 10, 0, 0, 798, 799, 7, 4, 0, 0, 799, 102, 1, 0, 0, 0, 800, 801, 7, 17, 0, 0, 801, 802, 7, 3, 0, 0, 802, 803, 7, 9, 0, 0, 803, 804, 7, 9, 0, 0, 804, 805, 7, 5, 0, 0, 805, 806, 7, 10, 0, 0, 806, 807, 7, 19, 0, 0, 807, 808, 7, 5, 0, 0, 808, 104, 1, 0, 0, 0, 809, 810, 7, 21, 0, 0, 810, 811, 7, 10, 0, 0, 811, 812, 7, 4, 0, 0, 812, 813, 7, 10, 0, 0, 813, 814, 7, 11, 0, 0, 814, 815, 7, 0, 0, 0, 815, 816, 7, 5, 0, 0, 816, 817, 7, 10, 0, 0, 817, 818, 7, 6, 0, 0, 818, 106, 1, 0, 0, 0, 819, 820, 7, 21, 0, 0, 820, 821, 7, 11, 0, 0, 821, 822, 7, 0, 0, 0, 822, 823, 7, 4, 0, 0, 823, 824, 7, 5, 0, 0, 824, 108, 1, 0, 0, 0, 825, 826, 7, 21, 0, 0, 826, 827, 7, 11, 0, 0, 827, 828, 7, 2, 0, 0, 828, 829, 7, 3, 0, 0, 829, 830, 7, 18, 0, 0, 830, 110, 1, 0, 0, 0, 831, 838, 7, 16, 0, 0, 832, 833, 7, 16, 0, 0, 833, 834, 7, 2, 0, 0, 834, 835, 7, 3, 0, 0, 835, 836, 7, 11, 0, 0, 836, 838, 7, 14, 0, 0, 837, 831, 1, 0, 0, 0, 837, 832, 1, 0, 0, 0, 838, 112, 1, 0, 0, 0, 839, 840, 7, 8, 0, 0, 840, 841, 7, 6, 0, 0, 841, 842, 7, 10, 0, 0, 842, 843, 7, 4, 0, 0, 843, 844, 7, 5, 0, 0, 844, 845, 7, 8, 0, 0, 845, 846, 7, 17, 0, 0, 846, 847, 7, 8, 0, 0, 847, 848, 7, 10, 0, 0, 848, 849, 7, 6, 0, 0, 849, 114, 1, 0, 0, 0, 850, 851, 7, 8, 0, 0, 851, 852, 7, 6, 0, 0, 852, 853, 7, 10, 0, 0, 853, 854, 7, 4, 0, 0, 854, 855, 7, 5, 0, 0, 855, 856, 7, 8, 0, 0, 856, 857, 7, 5, 0, 0, 857, 858, 7, 13, 0, 0, 858, 116, 1, 0, 0, 0, 859, 860, 7, 8, 0, 0, 860, 861, 7, 17, 0, 0, 861, 118, 1, 0, 0, 0, 862, 863, 7, 8, 0, 0, 863, 864, 7, 4, 0, 0, 864, 120, 1, 0, 0, 0, 865, 866, 7, 8, 0, 0, 866, 867, 7, 4, 0, 0, 867, 868, 7, 1, 0, 0, 868, 869, 7, 11, 0, 0, 869, 870, 7, 10, 0, 0, 870, 871, 7, 7, 0, 0, 871, 872, 7, 10, 0, 0, 872, 873, 7, 4, 0, 0, 873, 874, 7, 5, 0, 0, 874, 122, 1, 0, 0, 0, 875, 876, 7, 8, 0, 0, 876, 877, 7, 4, 0, 0, 877, 878, 7, 6, 0, 0, 878, 879, 7, 10, 0, 0, 879, 880, 7, 19, 0, 0, 880, 124, 1, 0, 0, 0, 881, 882, 7, 8, 0, 0, 882, 883, 7, 4, 0, 0, 883, 884, 7, 6, 0, 0, 884, 885, 7, 10, 0, 0, 885, 886, 7, 19, 0, 0, 886, 887, 7, 10, 0, 0, 887, 888, 7, 14, 0, 0, 888, 126, 1, 0, 0, 0, 889, 890, 7, 8, 0, 0, 890, 891, 7, 4, 0, 0, 891, 892, 7, 14, 0, 0, 892, 893, 7, 10, 0, 0, 893, 894, 7, 11, 0, 0, 894, 895, 7, 5, 0, 0, 895, 128, 1, 0, 0, 0, 896, 897, 7, 8, 0, 0, 897, 898, 7, 4, 0, 0, 898, 899, 7, 5, 0, 0, 899, 900, 7, 2, 0, 0, 900, 130, 1, 0, 0, 0, 901, 902, 7, 8, 0, 0, 902, 903, 7, 14, 0, 0, 903, 132, 1, 0, 0, 0, 904, 905, 7, 22, 0, 0, 905, 906, 7, 14, 0, 0, 906, 907, 7, 2, 0, 0, 907, 908, 7, 4, 0, 0, 908, 134, 1, 0, 0, 0, 909, 910, 7, 22, 0, 0, 910, 911, 7, 2, 0, 0, 911, 912, 7, 8, 0, 0, 912, 913, 7, 4, 0, 0, 913, 136, 1, 0, 0, 0, 914, 915, 7, 23, 0, 0, 915, 916, 7, 10, 0, 0, 916, 917, 7, 13, 0, 0, 917, 138, 1, 0, 0, 0, 918, 919, 7, 23, 0, 0, 919, 920, 7, 10, 0, 0, 920, 921, 7, 13, 0, 0, 921, 922, 7, 2, 0, 0, 922, 923, 7, 17, 0, 0, 923, 140, 1, 0, 0, 0, 924, 925, 7, 23, 0, 0, 925, 926, 7, 10, 0, 0, 926, 927, 7, 13, 0, 0, 927, 928, 7, 14, 0, 0, 928, 142, 1, 0, 0, 0, 929, 930, 7, 9, 0, 0, 930, 931, 7, 0, 0, 0, 931, 932, 7, 14, 0, 0, 932, 933, 7, 5, 0, 0, 933, 144, 1, 0, 0, 0, 934, 935, 7, 9, 0, 0, 935, 936, 7, 10, 0, 0, 936, 937, 7, 17, 0, 0, 937, 938, 7, 5, 0, 0, 938, 146, 1, 0, 0, 0, 939, 940, 7, 9, 0, 0, 940, 941, 7, 8, 0, 0, 941, 942, 7, 17, 0, 0, 942, 943, 7, 10, 0, 0, 943, 944, 7, 5, 0, 0, 944, 945, 7, 8, 0, 0, 945, 946, 7, 7, 0, 0, 946, 947, 7, 10, 0, 0, 947, 148, 1, 0, 0, 0, 948, 949, 7, 9, 0, 0, 949, 950, 7, 8, 0, 0, 950, 951, 7, 7, 0, 0, 951, 952, 7, 8, 0, 0, 952, 953, 7, 5, 0, 0, 953, 150, 1, 0, 0, 0, 954, 955, 7, 9, 0, 0, 955, 956, 7, 2, 0, 0, 956, 957, 7, 1, 0, 0, 957, 958, 7, 0, 0, 0, 958, 959, 7, 9, 0, 0, 959, 152, 1, 0, 0, 0, 960, 961, 7, 9, 0, 0, 961, 962, 7, 2, 0, 0, 962, 963, 7, 1, 0, 0, 963, 964, 7, 23, 0, 0, 964, 154, 1, 0, 0, 0, 965, 966, 7, 7, 0, 0, 966, 967, 7, 0, 0, 0, 967, 968, 7, 19, 0, 0, 968, 969, 7, 24, 0, 0, 969, 970, 7, 0, 0, 0, 970, 971, 7, 9, 0, 0, 971, 972, 7, 3, 0, 0, 972, 973, 7, 10, 0, 0, 973, 156, 1, 0, 0, 0, 974, 975, 7, 7, 0, 0, 975, 976, 7, 10, 0, 0, 976, 977, 7, 11, 0, 0, 977, 978, 7, 21, 0, 0, 978, 979, 7, 10, 0, 0, 979, 158, 1, 0, 0, 0, 980, 989, 7, 7, 0, 0, 981, 982, 7, 7, 0, 0, 982, 983, 7, 8, 0, 0, 983, 984, 7, 4, 0, 0, 984, 985, 7, 3, 0, 0, 985, 986, 7, 5, 0, 0, 986, 987, 7, 10, 0, 0, 987, 989, 7, 14, 0, 0, 988, 980, 1, 0, 0, 0, 988, 981, 1, 0, 0, 0, 989, 160, 1, 0, 0, 0, 990, 991, 7, 7, 0, 0, 991, 992, 7, 8, 0, 0, 992, 993, 7, 4, 0, 0, 993, 994, 7, 24, 0, 0, 994, 995, 7, 0, 0, 0, 995, 996, 7, 9, 0, 0, 996, 997, 7, 3, 0, 0, 997, 998, 7, 10, 0, 0, 998, 162, 1, 0, 0, 0, 999, 1000, 7, 7, 0, 0, 1000, 1001, 7, 2, 0, 0, 1001, 1002, 7, 6, 0, 0, 1002, 1003, 7, 8, 0, 0, 1003, 1004, 7, 17, 0, 0, 1004, 1005, 7, 13, 0, 0, 1005, 164, 1, 0, 0, 0, 1006, 1007, 7, 7, 0, 0, 1007, 1008, 7, 11, 0, 0, 1008, 1009, 3, 435, 217, 0, 1009, 1010, 7, 1, 0, 0, 1010, 1011, 7, 2, 0, 0, 1011, 1012, 7, 3, 0, 0, 1012, 1013, 7, 4, 0, 0, 1013, 1014, 7, 5, 0, 0, 1014, 1015, 7, 10, 0, 0, 1015, 1016, 7, 11, 0, 0, 1016, 166, 1, 0, 0, 0, 1017, 1018, 7, 4, 0, 0, 1018, 1019, 7, 0, 0, 0, 1019, 1020, 7, 7, 0, 0, 1020, 1021, 7, 10, 0, 0, 1021, 1022, 7, 14, 0, 0, 1022, 1023, 7, 18, 0, 0, 1023, 1024, 7, 0, 0, 0, 1024, 1025, 7, 1, 0, 0, 1025, 1026, 7, 10, 0, 0, 1026, 168, 1, 0, 0, 0, 1027, 1028, 7, 4, 0, 0, 1028, 1029, 7, 0, 0, 0, 1029, 1030, 7, 7, 0, 0, 1030, 1031, 7, 10, 0, 0, 1031, 1032, 7, 14, 0, 0, 1032, 1033, 7, 18, 0, 0, 1033, 1034, 7, 0, 0, 0, 1034, 1035, 7, 1, 0, 0, 1035, 1036, 7, 10, 0, 0, 1036, 1037, 7, 14, 0, 0, 1037, 170, 1, 0, 0, 0, 1038, 1039, 7, 4, 0, 0, 1039, 1040, 7, 10, 0, 0, 1040, 1041, 7, 14, 0, 0, 1041, 1042, 7, 5, 0, 0, 1042, 1043, 7, 10, 0, 0, 1043, 1044, 7, 6, 0, 0, 1044, 172, 1, 0, 0, 0, 1045, 1046, 7, 4, 0, 0, 1046, 1047, 7, 2, 0, 0, 1047, 174, 1, 0, 0, 0, 1048, 1049, 7, 4, 0, 0, 1049, 1050, 7, 2, 0, 0, 1050, 1051, 7, 5, 0, 0, 1051, 176, 1, 0, 0, 0, 1052, 1053, 7, 4, 0, 0, 1053, 1054, 7, 3, 0, 0, 1054, 1055, 7, 9, 0, 0, 1055, 1056, 7, 9, 0, 0, 1056, 1057, 7, 14, 0, 0, 1057, 178, 1, 0, 0, 0, 1058, 1059, 7, 2, 0, 0, 1059, 1060, 7, 17, 0, 0, 1060, 1061, 7, 17, 0, 0, 1061, 1062, 7, 14, 0, 0, 1062, 1063, 7, 10, 0, 0, 1063, 1064, 7, 5, 0, 0, 1064, 180, 1, 0, 0, 0, 1065, 1066, 7, 2, 0, 0, 1066, 1067, 7, 17, 0, 0, 1067, 182, 1, 0, 0, 0, 1068, 1069, 7, 2, 0, 0, 1069, 1070, 7, 4, 0, 0, 1070, 184, 1, 0, 0, 0, 1071, 1072, 7, 2, 0, 0, 1072, 1073, 7, 4, 0, 0, 1073, 1074, 7, 9, 0, 0, 1074, 1075, 7, 13, 0, 0, 1075, 186, 1, 0, 0, 0, 1076, 1077, 7, 2, 0, 0, 1077, 1078, 7, 11, 0, 0, 1078, 188, 1, 0, 0, 0, 1079, 1080, 7, 2, 0, 0, 1080, 1081, 7, 11, 0, 0, 1081, 1082, 7, 6, 0, 0, 1082, 1083, 7, 10, 0, 0, 1083, 1084, 7, 11, 0, 0, 1084, 190, 1, 0, 0, 0, 1085, 1086, 7, 2, 0, 0, 1086, 1087, 7, 3, 0, 0, 1087, 1088, 7, 5, 0, 0, 1088, 1089, 7, 10, 0, 0, 1089, 1090, 7, 11, 0, 0, 1090, 192, 1, 0, 0, 0, 1091, 1092, 7, 2, 0, 0, 1092, 1093, 7, 24, 0, 0, 1093, 1094, 7, 10, 0, 0, 1094, 1095, 7, 11, 0, 0, 1095, 1096, 7, 11, 0, 0, 1096, 1097, 7, 8, 0, 0, 1097, 1098, 7, 6, 0, 0, 1098, 1099, 7, 10, 0, 0, 1099, 194, 1, 0, 0, 0, 1100, 1101, 7, 18, 0, 0, 1101, 1102, 7, 0, 0, 0, 1102, 1103, 7, 14, 0, 0, 1103, 1104, 7, 14, 0, 0, 1104, 1105, 7, 12, 0, 0, 1105, 1106, 7, 2, 0, 0, 1106, 1107, 7, 11, 0, 0, 1107, 1108, 7, 6, 0, 0, 1108, 196, 1, 0, 0, 0, 1109, 1110, 7, 18, 0, 0, 1110, 1111, 7, 0, 0, 0, 1111, 1112, 7, 5, 0, 0, 1112, 1113, 7, 1, 0, 0, 1113, 1114, 7, 16, 0, 0, 1114, 198, 1, 0, 0, 0, 1115, 1116, 7, 18, 0, 0, 1116, 1117, 7, 10, 0, 0, 1117, 1118, 7, 11, 0, 0, 1118, 200, 1, 0, 0, 0, 1119, 1120, 3, 447, 223, 0, 1120, 1121, 3, 435, 217, 0, 1121, 1122, 3, 125, 62, 0, 1122, 202, 1, 0, 0, 0, 1123, 1124, 3, 447, 223, 0, 1124, 1125, 3, 435, 217, 0, 1125, 1126, 3, 205, 102, 0, 1126, 1127, 3, 435, 217, 0, 1127, 1128, 3, 123, 61, 0, 1128, 204, 1, 0, 0, 0, 1129, 1130, 7, 18, 0, 0, 1130, 1131, 7, 11, 0, 0, 1131, 1132, 7, 8, 0, 0, 1132, 1133, 7, 7, 0, 0, 1133, 1134, 7, 0, 0, 0, 1134, 1135, 7, 11, 0, 0, 1135, 1136, 7, 13, 0, 0, 1136, 206, 1, 0, 0, 0, 1137, 1138, 7, 18, 0, 0, 1138, 1139, 7, 3, 0, 0, 1139, 1140, 7, 5, 0, 0, 1140, 208, 1, 0, 0, 0, 1141, 1142, 7, 11, 0, 0, 1142, 1143, 7, 10, 0, 0, 1143, 1144, 7, 21, 0, 0, 1144, 1145, 7, 8, 0, 0, 1145, 1146, 7, 2, 0, 0, 1146, 1147, 7, 4, 0, 0, 1147, 210, 1, 0, 0, 0, 1148, 1149, 7, 11, 0, 0, 1149, 1150, 7, 10, 0, 0, 1150, 1151, 7, 21, 0, 0, 1151, 1152, 7, 8, 0, 0, 1152, 1153, 7, 2, 0, 0, 1153, 1154, 7, 4, 0, 0, 1154, 1155, 7, 14, 0, 0, 1155, 212, 1, 0, 0, 0, 1156, 1157, 7, 11, 0, 0, 1157, 1158, 7, 10, 0, 0, 1158, 1159, 7, 7, 0, 0, 1159, 1160, 7, 2, 0, 0, 1160, 1161, 7, 24, 0, 0, 1161, 1162, 7, 10, 0, 0, 1162, 214, 1, 0, 0, 0, 1163, 1164, 7, 11, 0, 0, 1164, 1165, 7, 10, 0, 0, 1165, 1166, 7, 5, 0, 0, 1166, 1167, 7, 3, 0, 0, 1167, 1168, 7, 11, 0, 0, 1168, 1169, 7, 4, 0, 0, 1169, 1170, 7, 8, 0, 0, 1170, 1171, 7, 4, 0, 0, 1171, 1172, 7, 21, 0, 0, 1172, 216, 1, 0, 0, 0, 1173, 1174, 7, 11, 0, 0, 1174, 1175, 7, 10, 0, 0, 1175, 1176, 7, 24, 0, 0, 1176, 1177, 7, 2, 0, 0, 1177, 1178, 7, 23, 0, 0, 1178, 1179, 7, 10, 0, 0, 1179, 218, 1, 0, 0, 0, 1180, 1181, 7, 11, 0, 0, 1181, 1182, 7, 2, 0, 0, 1182, 1183, 7, 9, 0, 0, 1183, 1184, 7, 10, 0, 0, 1184, 220, 1, 0, 0, 0, 1185, 1186, 7, 11, 0, 0, 1186, 1187, 7, 2, 0, 0, 1187, 1188, 7, 9, 0, 0, 1188, 1189, 7, 10, 0, 0, 1189, 1190, 7, 14, 0, 0, 1190, 222, 1, 0, 0, 0, 1191, 1192, 7, 11, 0, 0, 1192, 1193, 7, 2, 0, 0, 1193, 1194, 7, 12, 0, 0, 1194, 224, 1, 0, 0, 0, 1195, 1196, 7, 14, 0, 0, 1196, 1197, 7, 1, 0, 0, 1197, 1198, 7, 16, 0, 0, 1198, 1199, 7, 10, 0, 0, 1199, 1200, 7, 7, 0, 0, 1200, 1201, 7, 0, 0, 0, 1201, 226, 1, 0, 0, 0, 1202, 1211, 7, 14, 0, 0, 1203, 1204, 7, 14, 0, 0, 1204, 1205, 7, 10, 0, 0, 1205, 1206, 7, 1, 0, 0, 1206, 1207, 7, 2, 0, 0, 1207, 1208, 7, 4, 0, 0, 1208, 1209, 7, 6, 0, 0, 1209, 1211, 7, 14, 0, 0, 1210, 1202, 1, 0, 0, 0, 1210, 1203, 1, 0, 0, 0, 1211, 228, 1, 0, 0, 0, 1212, 1213, 7, 14, 0, 0, 1213, 1214, 7, 10, 0, 0, 1214, 1215, 7, 9, 0, 0, 1215, 1216, 7, 10, 0, 0, 1216, 1217, 7, 1, 0, 0, 1217, 1218, 7, 5, 0, 0, 1218, 230, 1, 0, 0, 0, 1219, 1220, 5, 115, 0, 0, 1220, 1221, 5, 101, 0, 0, 1221, 1222, 5, 113, 0, 0, 1222, 1223, 5, 95, 0, 0, 1223, 1224, 5, 116, 0, 0, 1224, 1225, 5, 114, 0, 0, 1225, 1226, 5, 97, 0, 0, 1226, 1227, 5, 110, 0, 0, 1227, 1228, 5, 115, 0, 0, 1228, 1229, 5, 102, 0, 0, 1229, 1230, 5, 111, 0, 0, 1230, 1231, 5, 114, 0, 0, 1231, 1232, 5, 109, 0, 0, 1232, 232, 1, 0, 0, 0, 1233, 1234, 7, 14, 0, 0, 1234, 1235, 7, 10, 0, 0, 1235, 1236, 7, 5, 0, 0, 1236, 234, 1, 0, 0, 0, 1237, 1238, 7, 14, 0, 0, 1238, 1239, 7, 16, 0, 0, 1239, 1240, 7, 0, 0, 0, 1240, 1241, 7, 11, 0, 0, 1241, 1242, 7, 6, 0, 0, 1242, 236, 1, 0, 0, 0, 1243, 1244, 7, 14, 0, 0, 1244, 1245, 7, 16, 0, 0, 1245, 1246, 7, 2, 0, 0, 1246, 1247, 7, 12, 0, 0, 1247, 238, 1, 0, 0, 0, 1248, 1249, 7, 14, 0, 0, 1249, 1250, 7, 5, 0, 0, 1250, 1251, 7, 0, 0, 0, 1251, 1252, 7, 11, 0, 0, 1252, 1253, 7, 5, 0, 0, 1253, 240, 1, 0, 0, 0, 1254, 1255, 7, 5, 0, 0, 1255, 1256, 7, 0, 0, 0, 1256, 1257, 7, 15, 0, 0, 1257, 1258, 7, 9, 0, 0, 1258, 1259, 7, 10, 0, 0, 1259, 242, 1, 0, 0, 0, 1260, 1261, 7, 5, 0, 0, 1261, 1262, 7, 0, 0, 0, 1262, 1263, 7, 15, 0, 0, 1263, 1264, 7, 9, 0, 0, 1264, 1265, 7, 10, 0, 0, 1265, 1266, 7, 14, 0, 0, 1266, 244, 1, 0, 0, 0, 1267, 1268, 7, 5, 0, 0, 1268, 1269, 7, 16, 0, 0, 1269, 1270, 7, 10, 0, 0, 1270, 1271, 7, 4, 0, 0, 1271, 246, 1, 0, 0, 0, 1272, 1273, 7, 5, 0, 0, 1273, 1274, 7, 2, 0, 0, 1274, 248, 1, 0, 0, 0, 1275, 1276, 7, 5, 0, 0, 1276, 1277, 7, 5, 0, 0, 1277, 1278, 7, 9, 0, 0, 1278, 250, 1, 0, 0, 0, 1279, 1280, 7, 5, 0, 0, 1280, 1281, 7, 13, 0, 0, 1281, 1282, 7, 18, 0, 0, 1282, 1283, 7, 10, 0, 0, 1283, 252, 1, 0, 0, 0, 1284, 1285, 7, 3, 0, 0, 1285, 1286, 7, 4, 0, 0, 1286, 1287, 7, 17, 0, 0, 1287, 1288, 7, 11, 0, 0, 1288, 1289, 7, 10, 0, 0, 1289, 1290, 7, 10, 0, 0, 1290, 1291, 7, 20, 0, 0, 1291, 1292, 7, 10, 0, 0, 1292, 254, 1, 0, 0, 0, 1293, 1294, 7, 3, 0, 0, 1294, 1295, 7, 4, 0, 0, 1295, 1296, 7, 9, 0, 0, 1296, 1297, 7, 2, 0, 0, 1297, 1298, 7, 1, 0, 0, 1298, 1299, 7, 23, 0, 0, 1299, 256, 1, 0, 0, 0, 1300, 1301, 7, 3, 0, 0, 1301, 1302, 7, 18, 0, 0, 1302, 1303, 7, 6, 0, 0, 1303, 1304, 7, 0, 0, 0, 1304, 1305, 7, 5, 0, 0, 1305, 1306, 7, 10, 0, 0, 1306, 258, 1, 0, 0, 0, 1307, 1308, 7, 3, 0, 0, 1308, 1309, 7, 18, 0, 0, 1309, 1310, 7, 14, 0, 0, 1310, 1311, 7, 10, 0, 0, 1311, 1312, 7, 11, 0, 0, 1312, 1313, 7, 5, 0, 0, 1313, 260, 1, 0, 0, 0, 1314, 1315, 7, 3, 0, 0, 1315, 1316, 7, 14, 0, 0, 1316, 1317, 7, 10, 0, 0, 1317, 1318, 7, 11, 0, 0, 1318, 262, 1, 0, 0, 0, 1319, 1320, 7, 3, 0, 0, 1320, 1321, 7, 14, 0, 0, 1321, 1322, 7, 10, 0, 0, 1322, 1323, 7, 11, 0, 0, 1323, 1324, 7, 14, 0, 0, 1324, 264, 1, 0, 0, 0, 1325, 1326, 7, 3, 0, 0, 1326, 1327, 7, 14, 0, 0, 1327, 1328, 7, 8, 0, 0, 1328, 1329, 7, 4, 0, 0, 1329, 1330, 7, 21, 0, 0, 1330, 266, 1, 0, 0, 0, 1331, 1332, 7, 24, 0, 0, 1332, 1333, 7, 0, 0, 0, 1333, 1334, 7, 9, 0, 0, 1334, 1335, 7, 3, 0, 0, 1335, 1336, 7, 10, 0, 0, 1336, 1337, 7, 14, 0, 0, 1337, 268, 1, 0, 0, 0, 1338, 1339, 7, 12, 0, 0, 1339, 1340, 7, 16, 0, 0, 1340, 1341, 7, 10, 0, 0, 1341, 1342, 7, 4, 0, 0, 1342, 270, 1, 0, 0, 0, 1343, 1344, 7, 12, 0, 0, 1344, 1345, 7, 16, 0, 0, 1345, 1346, 7, 10, 0, 0, 1346, 1347, 7, 11, 0, 0, 1347, 1348, 7, 10, 0, 0, 1348, 272, 1, 0, 0, 0, 1349, 1350, 7, 12, 0, 0, 1350, 1351, 7, 8, 0, 0, 1351, 1352, 7, 5, 0, 0, 1352, 1353, 7, 16, 0, 0, 1353, 274, 1, 0, 0, 0, 1354, 1355, 7, 3, 0, 0, 1355, 1356, 7, 4, 0, 0, 1356, 1357, 7, 8, 0, 0, 1357, 1358, 7, 25, 0, 0, 1358, 1359, 7, 3, 0, 0, 1359, 1360, 7, 10, 0, 0, 1360, 276, 1, 0, 0, 0, 1361, 1362, 7, 3, 0, 0, 1362, 1363, 7, 4, 0, 0, 1363, 1364, 7, 4, 0, 0, 1364, 1365, 7, 10, 0, 0, 1365, 1366, 7, 14, 0, 0, 1366, 1367, 7, 5, 0, 0, 1367, 278, 1, 0, 0, 0, 1368, 1369, 7, 3, 0, 0, 1369, 1370, 7, 3, 0, 0, 1370, 1371, 7, 8, 0, 0, 1371, 1372, 7, 6, 0, 0, 1372, 280, 1, 0, 0, 0, 1373, 1375, 3, 15, 7, 0, 1374, 1376, 3, 415, 207, 0, 1375, 1374, 1, 0, 0, 0, 1376, 1377, 1, 0, 0, 0, 1377, 1375, 1, 0, 0, 0, 1377, 1378, 1, 0, 0, 0, 1378, 1379, 1, 0, 0, 0, 1379, 1380, 3, 449, 224, 0, 1380, 282, 1, 0, 0, 0, 1381, 1383, 3, 113, 56, 0, 1382, 1384, 3, 415, 207, 0, 1383, 1382, 1, 0, 0, 0, 1384, 1385, 1, 0, 0, 0, 1385, 1383, 1, 0, 0, 0, 1385, 1386, 1, 0, 0, 0, 1386, 1387, 1, 0, 0, 0, 1387, 1388, 3, 445, 222, 0, 1388, 284, 1, 0, 0, 0, 1389, 1391, 3, 195, 97, 0, 1390, 1392, 3, 415, 207, 0, 1391, 1390, 1, 0, 0, 0, 1392, 1393, 1, 0, 0, 0, 1393, 1391, 1, 0, 0, 0, 1393, 1394, 1, 0, 0, 0, 1394, 1395, 1, 0, 0, 0, 1395, 1396, 3, 443, 221, 0, 1396, 286, 1, 0, 0, 0, 1397, 1399, 3, 451, 225, 0, 1398, 1400, 3, 415, 207, 0, 1399, 1398, 1, 0, 0, 0, 1400, 1401, 1, 0, 0, 0, 1401, 1399, 1, 0, 0, 0, 1401, 1402, 1, 0, 0, 0, 1402, 1403, 1, 0, 0, 0, 1403, 1405, 3, 441, 220, 0, 1404, 1406, 3, 415, 207, 0, 1405, 1404, 1, 0, 0, 0, 1406, 1407, 1, 0, 0, 0, 1407, 1405, 1, 0, 0, 0, 1407, 1408, 1, 0, 0, 0, 1408, 1409, 1, 0, 0, 0, 1409, 1410, 3, 195, 97, 0, 1410, 288, 1, 0, 0, 0, 1411, 1413, 3, 439, 219, 0, 1412, 1414, 3, 415, 207, 0, 1413, 1412, 1, 0, 0, 0, 1414, 1415, 1, 0, 0, 0, 1415, 1413, 1, 0, 0, 0, 1415, 1416, 1, 0, 0, 0, 1416, 1417, 1, 0, 0, 0, 1417, 1419, 3, 453, 226, 0, 1418, 1420, 3, 415, 207, 0, 1419, 1418, 1, 0, 0, 0, 1420, 1421, 1, 0, 0, 0, 1421, 1419, 1, 0, 0, 0, 1421, 1422, 1, 0, 0, 0, 1422, 1423, 1, 0, 0, 0, 1423, 1424, 3, 195, 97, 0, 1424, 290, 1, 0, 0, 0, 1425, 1427, 3, 145, 72, 0, 1426, 1428, 3, 415, 207, 0, 1427, 1426, 1, 0, 0, 0, 1428, 1429, 1, 0, 0, 0, 1429, 1427, 1, 0, 0, 0, 1429, 1430, 1, 0, 0, 0, 1430, 1431, 1, 0, 0, 0, 1431, 1433, 3, 191, 95, 0, 1432, 1434, 3, 415, 207, 0, 1433, 1432, 1, 0, 0, 0, 1434, 1435, 1, 0, 0, 0, 1435, 1433, 1, 0, 0, 0, 1435, 1436, 1, 0, 0, 0, 1436, 1437, 1, 0, 0, 0, 1437, 1438, 3, 135, 67, 0, 1438, 292, 1, 0, 0, 0, 1439, 1440, 7, 0, 0, 0, 1440, 1441, 7, 11, 0, 0, 1441, 1442, 7, 11, 0, 0, 1442, 1443, 7, 0, 0, 0, 1443, 1444, 7, 13, 0, 0, 1444, 294, 1, 0, 0, 0, 1445, 1446, 7, 15, 0, 0, 1446, 1447, 7, 8, 0, 0, 1447, 1448, 7, 4, 0, 0, 1448, 1449, 7, 0, 0, 0, 1449, 1450, 7, 11, 0, 0, 1450, 1451, 7, 13, 0, 0, 1451, 296, 1, 0, 0, 0, 1452, 1453, 7, 15, 0, 0, 1453, 1454, 7, 2, 0, 0, 1454, 1455, 7, 2, 0, 0, 1455, 1456, 7, 9, 0, 0, 1456, 1457, 7, 10, 0, 0, 1457, 1458, 7, 0, 0, 0, 1458, 1459, 7, 4, 0, 0, 1459, 298, 1, 0, 0, 0, 1460, 1461, 7, 6, 0, 0, 1461, 1462, 7, 2, 0, 0, 1462, 1463, 7, 3, 0, 0, 1463, 1464, 7, 15, 0, 0, 1464, 1465, 7, 9, 0, 0, 1465, 1466, 7, 10, 0, 0, 1466, 300, 1, 0, 0, 0, 1467, 1468, 7, 10, 0, 0, 1468, 1469, 7, 4, 0, 0, 1469, 1470, 7, 3, 0, 0, 1470, 1471, 7, 7, 0, 0, 1471, 302, 1, 0, 0, 0, 1472, 1473, 7, 17, 0, 0, 1473, 1474, 7, 9, 0, 0, 1474, 1475, 7, 2, 0, 0, 1475, 1476, 7, 0, 0, 0, 1476, 1477, 7, 5, 0, 0, 1477, 304, 1, 0, 0, 0, 1478, 1479, 7, 21, 0, 0, 1479, 1480, 7, 10, 0, 0, 1480, 1481, 7, 2, 0, 0, 1481, 1482, 7, 7, 0, 0, 1482, 1483, 7, 10, 0, 0, 1483, 1484, 7, 5, 0, 0, 1484, 1485, 7, 11, 0, 0, 1485, 1486, 7, 13, 0, 0, 1486, 306, 1, 0, 0, 0, 1487, 1488, 7, 8, 0, 0, 1488, 1489, 7, 4, 0, 0, 1489, 1490, 7, 5, 0, 0, 1490, 1491, 7, 10, 0, 0, 1491, 1492, 7, 21, 0, 0, 1492, 1493, 7, 10, 0, 0, 1493, 1494, 7, 11, 0, 0, 1494, 308, 1, 0, 0, 0, 1495, 1496, 7, 9, 0, 0, 1496, 1497, 7, 2, 0, 0, 1497, 1498, 7, 4, 0, 0, 1498, 1499, 7, 21, 0, 0, 1499, 310, 1, 0, 0, 0, 1500, 1501, 7, 7, 0, 0, 1501, 1502, 7, 0, 0, 0, 1502, 1503, 7, 18, 0, 0, 1503, 312, 1, 0, 0, 0, 1504, 1505, 7, 4, 0, 0, 1505, 1506, 7, 3, 0, 0, 1506, 1507, 7, 7, 0, 0, 1507, 1508, 7, 15, 0, 0, 1508, 1509, 7, 10, 0, 0, 1509, 1510, 7, 11, 0, 0, 1510, 314, 1, 0, 0, 0, 1511, 1512, 7, 18, 0, 0, 1512, 1513, 7, 2, 0, 0, 1513, 1514, 7, 8, 0, 0, 1514, 1515, 7, 4, 0, 0, 1515, 1516, 7, 5, 0, 0, 1516, 316, 1, 0, 0, 0, 1517, 1518, 7, 11, 0, 0, 1518, 1519, 7, 10, 0, 0, 1519, 1520, 7, 1, 0, 0, 1520, 1521, 7, 2, 0, 0, 1521, 1522, 7, 11, 0, 0, 1522, 1523, 7, 6, 0, 0, 1523, 318, 1, 0, 0, 0, 1524, 1525, 7, 14, 0, 0, 1525, 1526, 7, 5, 0, 0, 1526, 1527, 7, 11, 0, 0, 1527, 1528, 7, 8, 0, 0, 1528, 1529, 7, 4, 0, 0, 1529, 1530, 7, 21, 0, 0, 1530, 320, 1, 0, 0, 0, 1531, 1532, 7, 5, 0, 0, 1532, 1533, 7, 8, 0, 0, 1533, 1534, 7, 7, 0, 0, 1534, 1535, 7, 10, 0, 0, 1535, 1536, 7, 14, 0, 0, 1536, 1537, 7, 5, 0, 0, 1537, 1538, 7, 0, 0, 0, 1538, 1539, 7, 7, 0, 0, 1539, 1540, 7, 18, 0, 0, 1540, 322, 1, 0, 0, 0, 1541, 1542, 7, 0, 0, 0, 1542, 1543, 7, 4, 0, 0, 1543, 1544, 7, 13, 0, 0, 1544, 324, 1, 0, 0, 0, 1545, 1546, 7, 0, 0, 0, 1546, 1547, 7, 4, 0, 0, 1547, 1548, 7, 13, 0, 0, 1548, 1549, 7, 0, 0, 0, 1549, 1550, 7, 5, 0, 0, 1550, 1551, 7, 2, 0, 0, 1551, 1552, 7, 7, 0, 0, 1552, 1553, 7, 8, 0, 0, 1553, 1554, 7, 1, 0, 0, 1554, 326, 1, 0, 0, 0, 1555, 1556, 7, 0, 0, 0, 1556, 1557, 7, 4, 0, 0, 1557, 1558, 7, 13, 0, 0, 1558, 1559, 7, 22, 0, 0, 1559, 1560, 7, 14, 0, 0, 1560, 1561, 7, 2, 0, 0, 1561, 1562, 7, 4, 0, 0, 1562, 1563, 7, 0, 0, 0, 1563, 1564, 7, 5, 0, 0, 1564, 1565, 7, 2, 0, 0, 1565, 1566, 7, 7, 0, 0, 1566, 1567, 7, 8, 0, 0, 1567, 1568, 7, 1, 0, 0, 1568, 328, 1, 0, 0, 0, 1569, 1570, 7, 0, 0, 0, 1570, 1571, 7, 4, 0, 0, 1571, 1572, 7, 13, 0, 0, 1572, 1573, 7, 11, 0, 0, 1573, 1574, 7, 10, 0, 0, 1574, 1575, 7, 1, 0, 0, 1575, 1576, 7, 2, 0, 0, 1576, 1577, 7, 11, 0, 0, 1577, 1578, 7, 6, 0, 0, 1578, 330, 1, 0, 0, 0, 1579, 1580, 7, 14, 0, 0, 1580, 1581, 7, 1, 0, 0, 1581, 1582, 7, 0, 0, 0, 1582, 1583, 7, 9, 0, 0, 1583, 1584, 7, 0, 0, 0, 1584, 1585, 7, 11, 0, 0, 1585, 332, 1, 0, 0, 0, 1586, 1587, 5, 59, 0, 0, 1587, 334, 1, 0, 0, 0, 1588, 1589, 5, 44, 0, 0, 1589, 336, 1, 0, 0, 0, 1590, 1591, 5, 58, 0, 0, 1591, 338, 1, 0, 0, 0, 1592, 1593, 5, 40, 0, 0, 1593, 340, 1, 0, 0, 0, 1594, 1595, 5, 41, 0, 0, 1595, 342, 1, 0, 0, 0, 1596, 1597, 5, 91, 0, 0, 1597, 344, 1, 0, 0, 0, 1598, 1599, 5, 93, 0, 0, 1599, 346, 1, 0, 0, 0, 1600, 1601, 5, 123, 0, 0, 1601, 348, 1, 0, 0, 0, 1602, 1603, 5, 125, 0, 0, 1603, 350, 1, 0, 0, 0, 1604, 1605, 5, 42, 0, 0, 1605, 352, 1, 0, 0, 0, 1606, 1607, 5, 46, 0, 0, 1607, 354, 1, 0, 0, 0, 1608, 1609, 5, 36, 0, 0, 1609, 356, 1, 0, 0, 0, 1610, 1611, 5, 63, 0, 0, 1611, 358, 1, 0, 0, 0, 1612, 1613, 5, 60, 0, 0, 1613, 360, 1, 0, 0, 0, 1614, 1615, 5, 60, 0, 0, 1615, 1616, 5, 61, 0, 0, 1616, 362, 1, 0, 0, 0, 1617, 1618, 5, 62, 0, 0, 1618, 364, 1, 0, 0, 0, 1619, 1620, 5, 62, 0, 0, 1620, 1621, 5, 61, 0, 0, 1621, 366, 1, 0, 0, 0, 1622, 1623, 5, 61, 0, 0, 1623, 368, 1, 0, 0, 0, 1624, 1625, 5, 33, 0, 0, 1625, 1626, 5, 61, 0, 0, 1626, 370, 1, 0, 0, 0, 1627, 1628, 5, 60, 0, 0, 1628, 1629, 7, 0, 0, 0, 1629, 1630, 7, 4, 0, 0, 1630, 1631, 7, 13, 0, 0, 1631, 372, 1, 0, 0, 0, 1632, 1633, 5, 60, 0, 0, 1633, 1634, 5, 61, 0, 0, 1634, 1635, 1, 0, 0, 0, 1635, 1636, 7, 0, 0, 0, 1636, 1637, 7, 4, 0, 0, 1637, 1638, 7, 13, 0, 0, 1638, 374, 1, 0, 0, 0, 1639, 1640, 5, 62, 0, 0, 1640, 1641, 7, 0, 0, 0, 1641, 1642, 7, 4, 0, 0, 1642, 1643, 7, 13, 0, 0, 1643, 376, 1, 0, 0, 0, 1644, 1645, 5, 62, 0, 0, 1645, 1646, 5, 61, 0, 0, 1646, 1647, 1, 0, 0, 0, 1647, 1648, 7, 0, 0, 0, 1648, 1649, 7, 4, 0, 0, 1649, 1650, 7, 13, 0, 0, 1650, 378, 1, 0, 0, 0, 1651, 1652, 5, 61, 0, 0, 1652, 1653, 7, 0, 0, 0, 1653, 1654, 7, 4, 0, 0, 1654, 1655, 7, 13, 0, 0, 1655, 380, 1, 0, 0, 0, 1656, 1657, 5, 33, 0, 0, 1657, 1658, 5, 61, 0, 0, 1658, 1659, 1, 0, 0, 0, 1659, 1660, 7, 0, 0, 0, 1660, 1661, 7, 4, 0, 0, 1661, 1662, 7, 13, 0, 0, 1662, 382, 1, 0, 0, 0, 1663, 1664, 5, 43, 0, 0, 1664, 384, 1, 0, 0, 0, 1665, 1666, 5, 45, 0, 0, 1666, 386, 1, 0, 0, 0, 1667, 1668, 5, 47, 0, 0, 1668, 388, 1, 0, 0, 0, 1669, 1670, 7, 6, 0, 0, 1670, 1671, 7, 8, 0, 0, 1671, 1672, 7, 24, 0, 0, 1672, 390, 1, 0, 0, 0, 1673, 1674, 5, 124, 0, 0, 1674, 1675, 5, 124, 0, 0, 1675, 392, 1, 0, 0, 0, 1676, 1677, 7, 4, 0, 0, 1677, 1678, 7, 3, 0, 0, 1678, 1679, 7, 9, 0, 0, 1679, 1680, 7, 9, 0, 0, 1680, 394, 1, 0, 0, 0, 1681, 1682, 7, 17, 0, 0, 1682, 1683, 7, 0, 0, 0, 1683, 1684, 7, 9, 0, 0, 1684, 1685, 7, 14, 0, 0, 1685, 1686, 7, 10, 0, 0, 1686, 396, 1, 0, 0, 0, 1687, 1688, 7, 5, 0, 0, 1688, 1689, 7, 11, 0, 0, 1689, 1690, 7, 3, 0, 0, 1690, 1691, 7, 10, 0, 0, 1691, 398, 1, 0, 0, 0, 1692, 1694, 3, 427, 213, 0, 1693, 1692, 1, 0, 0, 0, 1694, 1695, 1, 0, 0, 0, 1695, 1693, 1, 0, 0, 0, 1695, 1696, 1, 0, 0, 0, 1696, 400, 1, 0, 0, 0, 1697, 1699, 3, 427, 213, 0, 1698, 1697, 1, 0, 0, 0, 1699, 1702, 1, 0, 0, 0, 1700, 1698, 1, 0, 0, 0, 1700, 1701, 1, 0, 0, 0, 1701, 1703, 1, 0, 0, 0, 1702, 1700, 1, 0, 0, 0, 1703, 1705, 5, 46, 0, 0, 1704, 1706, 3, 427, 213, 0, 1705, 1704, 1, 0, 0, 0, 1706, 1707, 1, 0, 0, 0, 1707, 1705, 1, 0, 0, 0, 1707, 1708, 1, 0, 0, 0, 1708, 1718, 1, 0, 0, 0, 1709, 1711, 7, 10, 0, 0, 1710, 1712, 7, 26, 0, 0, 1711, 1710, 1, 0, 0, 0, 1711, 1712, 1, 0, 0, 0, 1712, 1714, 1, 0, 0, 0, 1713, 1715, 3, 427, 213, 0, 1714, 1713, 1, 0, 0, 0, 1715, 1716, 1, 0, 0, 0, 1716, 1714, 1, 0, 0, 0, 1716, 1717, 1, 0, 0, 0, 1717, 1719, 1, 0, 0, 0, 1718, 1709, 1, 0, 0, 0, 1718, 1719, 1, 0, 0, 0, 1719, 1735, 1, 0, 0, 0, 1720, 1722, 3, 427, 213, 0, 1721, 1720, 1, 0, 0, 0, 1722, 1723, 1, 0, 0, 0, 1723, 1721, 1, 0, 0, 0, 1723, 1724, 1, 0, 0, 0, 1724, 1725, 1, 0, 0, 0, 1725, 1727, 7, 10, 0, 0, 1726, 1728, 7, 26, 0, 0, 1727, 1726, 1, 0, 0, 0, 1727, 1728, 1, 0, 0, 0, 1728, 1730, 1, 0, 0, 0, 1729, 1731, 3, 427, 213, 0, 1730, 1729, 1, 0, 0, 0, 1731, 1732, 1, 0, 0, 0, 1732, 1730, 1, 0, 0, 0, 1732, 1733, 1, 0, 0, 0, 1733, 1735, 1, 0, 0, 0, 1734, 1700, 1, 0, 0, 0, 1734, 1721, 1, 0, 0, 0, 1735, 402, 1, 0, 0, 0, 1736, 1739, 3, 399, 199, 0, 1737, 1739, 3, 401, 200, 0, 1738, 1736, 1, 0, 0, 0, 1738, 1737, 1, 0, 0, 0, 1739, 1740, 1, 0, 0, 0, 1740, 1741, 7, 4, 0, 0, 1741, 404, 1, 0, 0, 0, 1742, 1747, 5, 34, 0, 0, 1743, 1746, 3, 429, 214, 0, 1744, 1746, 9, 0, 0, 0, 1745, 1743, 1, 0, 0, 0, 1745, 1744, 1, 0, 0, 0, 1746, 1749, 1, 0, 0, 0, 1747, 1748, 1, 0, 0, 0, 1747, 1745, 1, 0, 0, 0, 1748, 1750, 1, 0, 0, 0, 1749, 1747, 1, 0, 0, 0, 1750, 1751, 5, 34, 0, 0, 1751, 406, 1, 0, 0, 0, 1752, 1757, 5, 39, 0, 0, 1753, 1756, 3, 431, 215, 0, 1754, 1756, 9, 0, 0, 0, 1755, 1753, 1, 0, 0, 0, 1755, 1754, 1, 0, 0, 0, 1756, 1759, 1, 0, 0, 0, 1757, 1758, 1, 0, 0, 0, 1757, 1755, 1, 0, 0, 0, 1758, 1760, 1, 0, 0, 0, 1759, 1757, 1, 0, 0, 0, 1760, 1761, 5, 39, 0, 0, 1761, 408, 1, 0, 0, 0, 1762, 1763, 7, 27, 0, 0, 1763, 1764, 7, 28, 0, 0, 1764, 1765, 7, 27, 0, 0, 1765, 1766, 7, 29, 0, 0, 1766, 410, 1, 0, 0, 0, 1767, 1773, 3, 425, 212, 0, 1768, 1772, 3, 425, 212, 0, 1769, 1772, 3, 427, 213, 0, 1770, 1772, 3, 435, 217, 0, 1771, 1768, 1, 0, 0, 0, 1771, 1769, 1, 0, 0, 0, 1771, 1770, 1, 0, 0, 0, 1772, 1775, 1, 0, 0, 0, 1773, 1771, 1, 0, 0, 0, 1773, 1774, 1, 0, 0, 0, 1774, 412, 1, 0, 0, 0, 1775, 1773, 1, 0, 0, 0, 1776, 1779, 3, 427, 213, 0, 1777, 1779, 3, 435, 217, 0, 1778, 1776, 1, 0, 0, 0, 1778, 1777, 1, 0, 0, 0, 1779, 1785, 1, 0, 0, 0, 1780, 1784, 3, 425, 212, 0, 1781, 1784, 3, 427, 213, 0, 1782, 1784, 3, 435, 217, 0, 1783, 1780, 1, 0, 0, 0, 1783, 1781, 1, 0, 0, 0, 1783, 1782, 1, 0, 0, 0, 1784, 1787, 1, 0, 0, 0, 1785, 1783, 1, 0, 0, 0, 1785, 1786, 1, 0, 0, 0, 1786, 414, 1, 0, 0, 0, 1787, 1785, 1, 0, 0, 0, 1788, 1790, 7, 30, 0, 0, 1789, 1788, 1, 0, 0, 0, 1790, 1791, 1, 0, 0, 0, 1791, 1789, 1, 0, 0, 0, 1791, 1792, 1, 0, 0, 0, 1792, 1793, 1, 0, 0, 0, 1793, 1794, 6, 207, 0, 0, 1794, 416, 1, 0, 0, 0, 1795, 1796, 5, 47, 0, 0, 1796, 1797, 5, 42, 0, 0, 1797, 1798, 1, 0, 0, 0, 1798, 1802, 8, 31, 0, 0, 1799, 1801, 9, 0, 0, 0, 1800, 1799, 1, 0, 0, 0, 1801, 1804, 1, 0, 0, 0, 1802, 1803, 1, 0, 0, 0, 1802, 1800, 1, 0, 0, 0, 1803, 1805, 1, 0, 0, 0, 1804, 1802, 1, 0, 0, 0, 1805, 1806, 5, 42, 0, 0, 1806, 1807, 5, 47, 0, 0, 1807, 1808, 1, 0, 0, 0, 1808, 1809, 6, 208, 0, 0, 1809, 418, 1, 0, 0, 0, 1810, 1811, 5, 47, 0, 0, 1811, 1812, 5, 47, 0, 0, 1812, 1816, 1, 0, 0, 0, 1813, 1815, 8, 32, 0, 0, 1814, 1813, 1, 0, 0, 0, 1815, 1818, 1, 0, 0, 0, 1816, 1814, 1, 0, 0, 0, 1816, 1817, 1, 0, 0, 0, 1817, 1819, 1, 0, 0, 0, 1818, 1816, 1, 0, 0, 0, 1819, 1820, 6, 209, 0, 0, 1820, 420, 1, 0, 0, 0, 1821, 1825, 5, 35, 0, 0, 1822, 1824, 8, 32, 0, 0, 1823, 1822, 1, 0, 0, 0, 1824, 1827, 1, 0, 0, 0, 1825, 1823, 1, 0, 0, 0, 1825, 1826, 1, 0, 0, 0, 1826, 1828, 1, 0, 0, 0, 1827, 1825, 1, 0, 0, 0, 1828, 1829, 6, 210, 0, 0, 1829, 422, 1, 0, 0, 0, 1830, 1831, 9, 0, 0, 0, 1831, 424, 1, 0, 0, 0, 1832, 1833, 7, 33, 0, 0, 1833, 426, 1, 0, 0, 0, 1834, 1835, 2, 48, 57, 0, 1835, 428, 1, 0, 0, 0, 1836, 1839, 5, 92, 0, 0, 1837, 1840, 7, 34, 0, 0, 1838, 1840, 3, 437, 218, 0, 1839, 1837, 1, 0, 0, 0, 1839, 1838, 1, 0, 0, 0, 1840, 430, 1, 0, 0, 0, 1841, 1844, 5, 92, 0, 0, 1842, 1845, 7, 35, 0, 0, 1843, 1845, 3, 437, 218, 0, 1844, 1842, 1, 0, 0, 0, 1844, 1843, 1, 0, 0, 0, 1845, 432, 1, 0, 0, 0, 1846, 1847, 7, 36, 0, 0, 1847, 434, 1, 0, 0, 0, 1848, 1849, 5, 95, 0, 0, 1849, 436, 1, 0, 0, 0, 1850, 1851, 5, 117, 0, 0, 1851, 1852, 3, 433, 216, 0, 1852, 1853, 3, 433, 216, 0, 1853, 1854, 3, 433, 216, 0, 1854, 1855, 3, 433, 216, 0, 1855, 438, 1, 0, 0, 0, 1856, 1857, 7, 1, 0, 0, 1857, 1858, 7, 9, 0, 0, 1858, 1859, 7, 10, 0, 0, 1859, 1860, 7, 0, 0, 0, 1860, 1861, 7, 11, 0, 0, 1861, 440, 1, 0, 0, 0, 1862, 1863, 7, 1, 0, 0, 1863, 1864, 7, 3, 0, 0, 1864, 1865, 7, 11, 0, 0, 1865, 1866, 7, 11, 0, 0, 1866, 1867, 7, 10, 0, 0, 1867, 1868, 7, 4, 0, 0, 1868, 1869, 7, 5, 0, 0, 1869, 442, 1, 0, 0, 0, 1870, 1871, 7, 10, 0, 0, 1871, 1872, 7, 19, 0, 0, 1872, 1873, 7, 18, 0, 0, 1873, 1874, 7, 8, 0, 0, 1874, 1875, 7, 11, 0, 0, 1875, 1876, 7, 10, 0, 0, 1876, 444, 1, 0, 0, 0, 1877, 1878, 7, 10, 0, 0, 1878, 1879, 7, 19, 0, 0, 1879, 1880, 7, 5, 0, 0, 1880, 1881, 7, 10, 0, 0, 1881, 1882, 7, 11, 0, 0, 1882, 1883, 7, 4, 0, 0, 1883, 1884, 7, 0, 0, 0, 1884, 1885, 7, 9, 0, 0, 1885, 1886, 7, 9, 0, 0, 1886, 1887, 7, 13, 0, 0, 1887, 446, 1, 0, 0, 0, 1888, 1889, 7, 18, 0, 0, 1889, 1890, 7, 11, 0, 0, 1890, 1891, 7, 10, 0, 0, 1891, 1892, 7, 17, 0, 0, 1892, 1893, 7, 10, 0, 0, 1893, 1894, 7, 11, 0, 0, 1894, 448, 1, 0, 0, 0, 1895, 1896, 7, 18, 0, 0, 1896, 1897, 7, 11, 0, 0, 1897, 1898, 7, 8, 0, 0, 1898, 1899, 7, 24, 0, 0, 1899, 1900, 7, 8, 0, 0, 1900, 1901, 7, 9, 0, 0, 1901, 1902, 7, 10, 0, 0, 1902, 1903, 7, 21, 0, 0, 1903, 1904, 7, 10, 0, 0, 1904, 1905, 7, 14, 0, 0, 1905, 450, 1, 0, 0, 0, 1906, 1907, 7, 11, 0, 0, 1907, 1908, 7, 10, 0, 0, 1908, 1909, 7, 5, 0, 0, 1909, 1910, 7, 0, 0, 0, 1910, 1911, 7, 8, 0, 0, 1911, 1912, 7, 4, 0, 0, 1912, 452, 1, 0, 0, 0, 1913, 1914, 7, 11, 0, 0, 1914, 1915, 7, 10, 0, 0, 1915, 1916, 7, 5, 0, 0, 1916, 1917, 7, 0, 0, 0, 1917, 1918, 7, 8, 0, 0, 1918, 1919, 7, 4, 0, 0, 1919, 1920, 7, 10, 0, 0, 1920, 1921, 7, 6, 0, 0, 1921, 454, 1, 0, 0, 0, 42, 0, 469, 471, 622, 837, 988, 1210, 1377, 1385, 1393, 1401, 1407, 1415, 1421, 1429, 1435, 1695, 1700, 1707, 1711, 1716, 1718, 1723, 1727, 1732, 1734, 1738, 1745, 1747, 1755, 1757, 1771, 1773, 1778, 1783, 1785, 1791, 1802, 1816, 1825, 1839, 1844, 1, 6, 0, 0] \ No newline at end of file +[4, 0, 217, 1976, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 2, 196, 7, 196, 2, 197, 7, 197, 2, 198, 7, 198, 2, 199, 7, 199, 2, 200, 7, 200, 2, 201, 7, 201, 2, 202, 7, 202, 2, 203, 7, 203, 2, 204, 7, 204, 2, 205, 7, 205, 2, 206, 7, 206, 2, 207, 7, 207, 2, 208, 7, 208, 2, 209, 7, 209, 2, 210, 7, 210, 2, 211, 7, 211, 2, 212, 7, 212, 2, 213, 7, 213, 2, 214, 7, 214, 2, 215, 7, 215, 2, 216, 7, 216, 2, 217, 7, 217, 2, 218, 7, 218, 2, 219, 7, 219, 2, 220, 7, 220, 2, 221, 7, 221, 2, 222, 7, 222, 2, 223, 7, 223, 2, 224, 7, 224, 2, 225, 7, 225, 2, 226, 7, 226, 2, 227, 7, 227, 2, 228, 7, 228, 2, 229, 7, 229, 2, 230, 7, 230, 2, 231, 7, 231, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 496, 8, 4, 10, 4, 12, 4, 499, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 3, 28, 656, 8, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 1, 41, 1, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 50, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 3, 59, 886, 8, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 76, 1, 76, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 79, 1, 79, 1, 79, 1, 79, 1, 79, 1, 79, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 3, 84, 1043, 8, 84, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, 1, 86, 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 108, 1, 108, 1, 108, 1, 108, 1, 109, 1, 109, 1, 109, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 3, 118, 1265, 8, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 126, 1, 126, 1, 126, 1, 126, 1, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 4, 145, 1430, 8, 145, 11, 145, 12, 145, 1431, 1, 145, 1, 145, 1, 146, 1, 146, 4, 146, 1438, 8, 146, 11, 146, 12, 146, 1439, 1, 146, 1, 146, 1, 147, 1, 147, 4, 147, 1446, 8, 147, 11, 147, 12, 147, 1447, 1, 147, 1, 147, 1, 148, 1, 148, 4, 148, 1454, 8, 148, 11, 148, 12, 148, 1455, 1, 148, 1, 148, 4, 148, 1460, 8, 148, 11, 148, 12, 148, 1461, 1, 148, 1, 148, 1, 149, 1, 149, 4, 149, 1468, 8, 149, 11, 149, 12, 149, 1469, 1, 149, 1, 149, 4, 149, 1474, 8, 149, 11, 149, 12, 149, 1475, 1, 149, 1, 149, 1, 150, 1, 150, 4, 150, 1482, 8, 150, 11, 150, 12, 150, 1483, 1, 150, 1, 150, 4, 150, 1488, 8, 150, 11, 150, 12, 150, 1489, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 172, 1, 172, 1, 173, 1, 173, 1, 174, 1, 174, 1, 175, 1, 175, 1, 176, 1, 176, 1, 177, 1, 177, 1, 178, 1, 178, 1, 179, 1, 179, 1, 180, 1, 180, 1, 181, 1, 181, 1, 182, 1, 182, 1, 183, 1, 183, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 194, 1, 194, 1, 194, 1, 194, 1, 194, 1, 195, 1, 195, 1, 195, 1, 195, 1, 195, 1, 195, 1, 195, 1, 196, 1, 196, 1, 197, 1, 197, 1, 198, 1, 198, 1, 199, 1, 199, 1, 199, 1, 199, 1, 200, 1, 200, 1, 200, 1, 201, 1, 201, 1, 201, 1, 201, 1, 201, 1, 202, 1, 202, 1, 202, 1, 202, 1, 202, 1, 202, 1, 203, 1, 203, 1, 203, 1, 203, 1, 203, 1, 204, 4, 204, 1748, 8, 204, 11, 204, 12, 204, 1749, 1, 205, 5, 205, 1753, 8, 205, 10, 205, 12, 205, 1756, 9, 205, 1, 205, 1, 205, 4, 205, 1760, 8, 205, 11, 205, 12, 205, 1761, 1, 205, 1, 205, 3, 205, 1766, 8, 205, 1, 205, 4, 205, 1769, 8, 205, 11, 205, 12, 205, 1770, 3, 205, 1773, 8, 205, 1, 205, 4, 205, 1776, 8, 205, 11, 205, 12, 205, 1777, 1, 205, 1, 205, 3, 205, 1782, 8, 205, 1, 205, 4, 205, 1785, 8, 205, 11, 205, 12, 205, 1786, 3, 205, 1789, 8, 205, 1, 206, 1, 206, 3, 206, 1793, 8, 206, 1, 206, 1, 206, 1, 207, 1, 207, 1, 207, 5, 207, 1800, 8, 207, 10, 207, 12, 207, 1803, 9, 207, 1, 207, 1, 207, 1, 208, 1, 208, 1, 208, 5, 208, 1810, 8, 208, 10, 208, 12, 208, 1813, 9, 208, 1, 208, 1, 208, 1, 209, 1, 209, 1, 209, 1, 209, 1, 209, 1, 210, 1, 210, 1, 210, 1, 210, 5, 210, 1826, 8, 210, 10, 210, 12, 210, 1829, 9, 210, 1, 211, 1, 211, 3, 211, 1833, 8, 211, 1, 211, 1, 211, 1, 211, 5, 211, 1838, 8, 211, 10, 211, 12, 211, 1841, 9, 211, 1, 212, 4, 212, 1844, 8, 212, 11, 212, 12, 212, 1845, 1, 212, 1, 212, 1, 213, 1, 213, 1, 213, 1, 213, 1, 213, 5, 213, 1855, 8, 213, 10, 213, 12, 213, 1858, 9, 213, 1, 213, 1, 213, 1, 213, 1, 213, 1, 213, 1, 214, 1, 214, 1, 214, 1, 214, 5, 214, 1869, 8, 214, 10, 214, 12, 214, 1872, 9, 214, 1, 214, 1, 214, 1, 215, 1, 215, 5, 215, 1878, 8, 215, 10, 215, 12, 215, 1881, 9, 215, 1, 215, 1, 215, 1, 216, 1, 216, 1, 217, 1, 217, 1, 218, 1, 218, 1, 219, 1, 219, 1, 219, 3, 219, 1894, 8, 219, 1, 220, 1, 220, 1, 220, 3, 220, 1899, 8, 220, 1, 221, 1, 221, 1, 222, 1, 222, 1, 223, 1, 223, 1, 223, 1, 223, 1, 223, 1, 223, 1, 224, 1, 224, 1, 224, 1, 224, 1, 224, 1, 224, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 225, 1, 226, 1, 226, 1, 226, 1, 226, 1, 226, 1, 226, 1, 226, 1, 227, 1, 227, 1, 227, 1, 227, 1, 227, 1, 227, 1, 227, 1, 227, 1, 227, 1, 227, 1, 227, 1, 228, 1, 228, 1, 228, 1, 228, 1, 228, 1, 228, 1, 228, 1, 229, 1, 229, 1, 229, 1, 229, 1, 229, 1, 229, 1, 229, 1, 229, 1, 229, 1, 229, 1, 229, 1, 230, 1, 230, 1, 230, 1, 230, 1, 230, 1, 230, 1, 230, 1, 231, 1, 231, 1, 231, 1, 231, 1, 231, 1, 231, 1, 231, 1, 231, 1, 231, 3, 1801, 1811, 1856, 0, 232, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, 30, 61, 31, 63, 32, 65, 33, 67, 34, 69, 35, 71, 36, 73, 37, 75, 38, 77, 39, 79, 40, 81, 41, 83, 42, 85, 43, 87, 44, 89, 45, 91, 46, 93, 47, 95, 48, 97, 49, 99, 50, 101, 51, 103, 52, 105, 53, 107, 54, 109, 55, 111, 56, 113, 57, 115, 58, 117, 59, 119, 60, 121, 61, 123, 62, 125, 63, 127, 64, 129, 65, 131, 66, 133, 67, 135, 68, 137, 69, 139, 70, 141, 71, 143, 72, 145, 73, 147, 74, 149, 75, 151, 76, 153, 77, 155, 78, 157, 79, 159, 80, 161, 81, 163, 82, 165, 83, 167, 84, 169, 85, 171, 86, 173, 87, 175, 88, 177, 89, 179, 90, 181, 91, 183, 92, 185, 93, 187, 94, 189, 95, 191, 96, 193, 97, 195, 98, 197, 99, 199, 100, 201, 101, 203, 102, 205, 103, 207, 104, 209, 105, 211, 106, 213, 107, 215, 108, 217, 109, 219, 110, 221, 111, 223, 112, 225, 113, 227, 114, 229, 115, 231, 116, 233, 117, 235, 118, 237, 119, 239, 120, 241, 121, 243, 122, 245, 123, 247, 124, 249, 125, 251, 126, 253, 127, 255, 128, 257, 129, 259, 130, 261, 131, 263, 132, 265, 133, 267, 134, 269, 135, 271, 136, 273, 137, 275, 138, 277, 139, 279, 140, 281, 141, 283, 142, 285, 143, 287, 144, 289, 145, 291, 146, 293, 147, 295, 148, 297, 149, 299, 150, 301, 151, 303, 152, 305, 153, 307, 154, 309, 155, 311, 156, 313, 157, 315, 158, 317, 159, 319, 160, 321, 161, 323, 162, 325, 163, 327, 164, 329, 165, 331, 166, 333, 167, 335, 168, 337, 169, 339, 170, 341, 171, 343, 172, 345, 173, 347, 174, 349, 175, 351, 176, 353, 177, 355, 178, 357, 179, 359, 180, 361, 181, 363, 182, 365, 183, 367, 184, 369, 185, 371, 186, 373, 187, 375, 188, 377, 189, 379, 190, 381, 191, 383, 192, 385, 193, 387, 194, 389, 195, 391, 196, 393, 197, 395, 198, 397, 199, 399, 200, 401, 201, 403, 202, 405, 203, 407, 204, 409, 205, 411, 206, 413, 207, 415, 208, 417, 209, 419, 210, 421, 211, 423, 212, 425, 213, 427, 214, 429, 215, 431, 216, 433, 217, 435, 0, 437, 0, 439, 0, 441, 0, 443, 0, 445, 0, 447, 0, 449, 0, 451, 0, 453, 0, 455, 0, 457, 0, 459, 0, 461, 0, 463, 0, 1, 0, 37, 2, 0, 65, 65, 97, 97, 2, 0, 67, 67, 99, 99, 2, 0, 79, 79, 111, 111, 2, 0, 85, 85, 117, 117, 2, 0, 78, 78, 110, 110, 2, 0, 84, 84, 116, 116, 2, 0, 68, 68, 100, 100, 2, 0, 77, 77, 109, 109, 2, 0, 73, 73, 105, 105, 2, 0, 76, 76, 108, 108, 2, 0, 69, 69, 101, 101, 2, 0, 82, 82, 114, 114, 2, 0, 87, 87, 119, 119, 2, 0, 89, 89, 121, 121, 2, 0, 83, 83, 115, 115, 2, 0, 66, 66, 98, 98, 2, 0, 70, 70, 102, 102, 2, 0, 72, 72, 104, 104, 2, 0, 80, 80, 112, 112, 2, 0, 88, 88, 120, 120, 2, 0, 90, 90, 122, 122, 2, 0, 71, 71, 103, 103, 2, 0, 74, 74, 106, 106, 2, 0, 75, 75, 107, 107, 2, 0, 86, 86, 118, 118, 2, 0, 81, 81, 113, 113, 2, 0, 43, 43, 45, 45, 1, 0, 83, 83, 1, 0, 89, 89, 1, 0, 36, 36, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 43, 43, 2, 0, 10, 10, 13, 13, 2, 0, 65, 90, 97, 122, 8, 0, 34, 34, 47, 47, 92, 92, 98, 98, 102, 102, 110, 110, 114, 114, 116, 116, 8, 0, 39, 39, 47, 47, 92, 92, 98, 98, 102, 102, 110, 110, 114, 114, 116, 116, 3, 0, 48, 57, 65, 70, 97, 102, 2004, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 63, 1, 0, 0, 0, 0, 65, 1, 0, 0, 0, 0, 67, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, 0, 0, 0, 0, 73, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 0, 77, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 0, 95, 1, 0, 0, 0, 0, 97, 1, 0, 0, 0, 0, 99, 1, 0, 0, 0, 0, 101, 1, 0, 0, 0, 0, 103, 1, 0, 0, 0, 0, 105, 1, 0, 0, 0, 0, 107, 1, 0, 0, 0, 0, 109, 1, 0, 0, 0, 0, 111, 1, 0, 0, 0, 0, 113, 1, 0, 0, 0, 0, 115, 1, 0, 0, 0, 0, 117, 1, 0, 0, 0, 0, 119, 1, 0, 0, 0, 0, 121, 1, 0, 0, 0, 0, 123, 1, 0, 0, 0, 0, 125, 1, 0, 0, 0, 0, 127, 1, 0, 0, 0, 0, 129, 1, 0, 0, 0, 0, 131, 1, 0, 0, 0, 0, 133, 1, 0, 0, 0, 0, 135, 1, 0, 0, 0, 0, 137, 1, 0, 0, 0, 0, 139, 1, 0, 0, 0, 0, 141, 1, 0, 0, 0, 0, 143, 1, 0, 0, 0, 0, 145, 1, 0, 0, 0, 0, 147, 1, 0, 0, 0, 0, 149, 1, 0, 0, 0, 0, 151, 1, 0, 0, 0, 0, 153, 1, 0, 0, 0, 0, 155, 1, 0, 0, 0, 0, 157, 1, 0, 0, 0, 0, 159, 1, 0, 0, 0, 0, 161, 1, 0, 0, 0, 0, 163, 1, 0, 0, 0, 0, 165, 1, 0, 0, 0, 0, 167, 1, 0, 0, 0, 0, 169, 1, 0, 0, 0, 0, 171, 1, 0, 0, 0, 0, 173, 1, 0, 0, 0, 0, 175, 1, 0, 0, 0, 0, 177, 1, 0, 0, 0, 0, 179, 1, 0, 0, 0, 0, 181, 1, 0, 0, 0, 0, 183, 1, 0, 0, 0, 0, 185, 1, 0, 0, 0, 0, 187, 1, 0, 0, 0, 0, 189, 1, 0, 0, 0, 0, 191, 1, 0, 0, 0, 0, 193, 1, 0, 0, 0, 0, 195, 1, 0, 0, 0, 0, 197, 1, 0, 0, 0, 0, 199, 1, 0, 0, 0, 0, 201, 1, 0, 0, 0, 0, 203, 1, 0, 0, 0, 0, 205, 1, 0, 0, 0, 0, 207, 1, 0, 0, 0, 0, 209, 1, 0, 0, 0, 0, 211, 1, 0, 0, 0, 0, 213, 1, 0, 0, 0, 0, 215, 1, 0, 0, 0, 0, 217, 1, 0, 0, 0, 0, 219, 1, 0, 0, 0, 0, 221, 1, 0, 0, 0, 0, 223, 1, 0, 0, 0, 0, 225, 1, 0, 0, 0, 0, 227, 1, 0, 0, 0, 0, 229, 1, 0, 0, 0, 0, 231, 1, 0, 0, 0, 0, 233, 1, 0, 0, 0, 0, 235, 1, 0, 0, 0, 0, 237, 1, 0, 0, 0, 0, 239, 1, 0, 0, 0, 0, 241, 1, 0, 0, 0, 0, 243, 1, 0, 0, 0, 0, 245, 1, 0, 0, 0, 0, 247, 1, 0, 0, 0, 0, 249, 1, 0, 0, 0, 0, 251, 1, 0, 0, 0, 0, 253, 1, 0, 0, 0, 0, 255, 1, 0, 0, 0, 0, 257, 1, 0, 0, 0, 0, 259, 1, 0, 0, 0, 0, 261, 1, 0, 0, 0, 0, 263, 1, 0, 0, 0, 0, 265, 1, 0, 0, 0, 0, 267, 1, 0, 0, 0, 0, 269, 1, 0, 0, 0, 0, 271, 1, 0, 0, 0, 0, 273, 1, 0, 0, 0, 0, 275, 1, 0, 0, 0, 0, 277, 1, 0, 0, 0, 0, 279, 1, 0, 0, 0, 0, 281, 1, 0, 0, 0, 0, 283, 1, 0, 0, 0, 0, 285, 1, 0, 0, 0, 0, 287, 1, 0, 0, 0, 0, 289, 1, 0, 0, 0, 0, 291, 1, 0, 0, 0, 0, 293, 1, 0, 0, 0, 0, 295, 1, 0, 0, 0, 0, 297, 1, 0, 0, 0, 0, 299, 1, 0, 0, 0, 0, 301, 1, 0, 0, 0, 0, 303, 1, 0, 0, 0, 0, 305, 1, 0, 0, 0, 0, 307, 1, 0, 0, 0, 0, 309, 1, 0, 0, 0, 0, 311, 1, 0, 0, 0, 0, 313, 1, 0, 0, 0, 0, 315, 1, 0, 0, 0, 0, 317, 1, 0, 0, 0, 0, 319, 1, 0, 0, 0, 0, 321, 1, 0, 0, 0, 0, 323, 1, 0, 0, 0, 0, 325, 1, 0, 0, 0, 0, 327, 1, 0, 0, 0, 0, 329, 1, 0, 0, 0, 0, 331, 1, 0, 0, 0, 0, 333, 1, 0, 0, 0, 0, 335, 1, 0, 0, 0, 0, 337, 1, 0, 0, 0, 0, 339, 1, 0, 0, 0, 0, 341, 1, 0, 0, 0, 0, 343, 1, 0, 0, 0, 0, 345, 1, 0, 0, 0, 0, 347, 1, 0, 0, 0, 0, 349, 1, 0, 0, 0, 0, 351, 1, 0, 0, 0, 0, 353, 1, 0, 0, 0, 0, 355, 1, 0, 0, 0, 0, 357, 1, 0, 0, 0, 0, 359, 1, 0, 0, 0, 0, 361, 1, 0, 0, 0, 0, 363, 1, 0, 0, 0, 0, 365, 1, 0, 0, 0, 0, 367, 1, 0, 0, 0, 0, 369, 1, 0, 0, 0, 0, 371, 1, 0, 0, 0, 0, 373, 1, 0, 0, 0, 0, 375, 1, 0, 0, 0, 0, 377, 1, 0, 0, 0, 0, 379, 1, 0, 0, 0, 0, 381, 1, 0, 0, 0, 0, 383, 1, 0, 0, 0, 0, 385, 1, 0, 0, 0, 0, 387, 1, 0, 0, 0, 0, 389, 1, 0, 0, 0, 0, 391, 1, 0, 0, 0, 0, 393, 1, 0, 0, 0, 0, 395, 1, 0, 0, 0, 0, 397, 1, 0, 0, 0, 0, 399, 1, 0, 0, 0, 0, 401, 1, 0, 0, 0, 0, 403, 1, 0, 0, 0, 0, 405, 1, 0, 0, 0, 0, 407, 1, 0, 0, 0, 0, 409, 1, 0, 0, 0, 0, 411, 1, 0, 0, 0, 0, 413, 1, 0, 0, 0, 0, 415, 1, 0, 0, 0, 0, 417, 1, 0, 0, 0, 0, 419, 1, 0, 0, 0, 0, 421, 1, 0, 0, 0, 0, 423, 1, 0, 0, 0, 0, 425, 1, 0, 0, 0, 0, 427, 1, 0, 0, 0, 0, 429, 1, 0, 0, 0, 0, 431, 1, 0, 0, 0, 0, 433, 1, 0, 0, 0, 1, 465, 1, 0, 0, 0, 3, 469, 1, 0, 0, 0, 5, 472, 1, 0, 0, 0, 7, 474, 1, 0, 0, 0, 9, 490, 1, 0, 0, 0, 11, 500, 1, 0, 0, 0, 13, 508, 1, 0, 0, 0, 15, 512, 1, 0, 0, 0, 17, 518, 1, 0, 0, 0, 19, 522, 1, 0, 0, 0, 21, 528, 1, 0, 0, 0, 23, 535, 1, 0, 0, 0, 25, 545, 1, 0, 0, 0, 27, 549, 1, 0, 0, 0, 29, 552, 1, 0, 0, 0, 31, 556, 1, 0, 0, 0, 33, 570, 1, 0, 0, 0, 35, 577, 1, 0, 0, 0, 37, 585, 1, 0, 0, 0, 39, 588, 1, 0, 0, 0, 41, 594, 1, 0, 0, 0, 43, 599, 1, 0, 0, 0, 45, 607, 1, 0, 0, 0, 47, 612, 1, 0, 0, 0, 49, 623, 1, 0, 0, 0, 51, 631, 1, 0, 0, 0, 53, 637, 1, 0, 0, 0, 55, 644, 1, 0, 0, 0, 57, 655, 1, 0, 0, 0, 59, 657, 1, 0, 0, 0, 61, 665, 1, 0, 0, 0, 63, 673, 1, 0, 0, 0, 65, 680, 1, 0, 0, 0, 67, 685, 1, 0, 0, 0, 69, 697, 1, 0, 0, 0, 71, 706, 1, 0, 0, 0, 73, 714, 1, 0, 0, 0, 75, 723, 1, 0, 0, 0, 77, 728, 1, 0, 0, 0, 79, 738, 1, 0, 0, 0, 81, 747, 1, 0, 0, 0, 83, 752, 1, 0, 0, 0, 85, 759, 1, 0, 0, 0, 87, 763, 1, 0, 0, 0, 89, 773, 1, 0, 0, 0, 91, 785, 1, 0, 0, 0, 93, 792, 1, 0, 0, 0, 95, 800, 1, 0, 0, 0, 97, 807, 1, 0, 0, 0, 99, 813, 1, 0, 0, 0, 101, 819, 1, 0, 0, 0, 103, 823, 1, 0, 0, 0, 105, 829, 1, 0, 0, 0, 107, 836, 1, 0, 0, 0, 109, 841, 1, 0, 0, 0, 111, 848, 1, 0, 0, 0, 113, 857, 1, 0, 0, 0, 115, 867, 1, 0, 0, 0, 117, 873, 1, 0, 0, 0, 119, 885, 1, 0, 0, 0, 121, 887, 1, 0, 0, 0, 123, 898, 1, 0, 0, 0, 125, 907, 1, 0, 0, 0, 127, 910, 1, 0, 0, 0, 129, 916, 1, 0, 0, 0, 131, 919, 1, 0, 0, 0, 133, 929, 1, 0, 0, 0, 135, 935, 1, 0, 0, 0, 137, 943, 1, 0, 0, 0, 139, 950, 1, 0, 0, 0, 141, 955, 1, 0, 0, 0, 143, 958, 1, 0, 0, 0, 145, 963, 1, 0, 0, 0, 147, 968, 1, 0, 0, 0, 149, 972, 1, 0, 0, 0, 151, 978, 1, 0, 0, 0, 153, 983, 1, 0, 0, 0, 155, 988, 1, 0, 0, 0, 157, 993, 1, 0, 0, 0, 159, 1002, 1, 0, 0, 0, 161, 1008, 1, 0, 0, 0, 163, 1014, 1, 0, 0, 0, 165, 1019, 1, 0, 0, 0, 167, 1028, 1, 0, 0, 0, 169, 1042, 1, 0, 0, 0, 171, 1044, 1, 0, 0, 0, 173, 1053, 1, 0, 0, 0, 175, 1060, 1, 0, 0, 0, 177, 1071, 1, 0, 0, 0, 179, 1081, 1, 0, 0, 0, 181, 1092, 1, 0, 0, 0, 183, 1099, 1, 0, 0, 0, 185, 1102, 1, 0, 0, 0, 187, 1106, 1, 0, 0, 0, 189, 1112, 1, 0, 0, 0, 191, 1119, 1, 0, 0, 0, 193, 1122, 1, 0, 0, 0, 195, 1125, 1, 0, 0, 0, 197, 1130, 1, 0, 0, 0, 199, 1133, 1, 0, 0, 0, 201, 1139, 1, 0, 0, 0, 203, 1145, 1, 0, 0, 0, 205, 1154, 1, 0, 0, 0, 207, 1163, 1, 0, 0, 0, 209, 1169, 1, 0, 0, 0, 211, 1173, 1, 0, 0, 0, 213, 1177, 1, 0, 0, 0, 215, 1183, 1, 0, 0, 0, 217, 1191, 1, 0, 0, 0, 219, 1195, 1, 0, 0, 0, 221, 1202, 1, 0, 0, 0, 223, 1210, 1, 0, 0, 0, 225, 1217, 1, 0, 0, 0, 227, 1227, 1, 0, 0, 0, 229, 1234, 1, 0, 0, 0, 231, 1239, 1, 0, 0, 0, 233, 1245, 1, 0, 0, 0, 235, 1249, 1, 0, 0, 0, 237, 1264, 1, 0, 0, 0, 239, 1266, 1, 0, 0, 0, 241, 1273, 1, 0, 0, 0, 243, 1287, 1, 0, 0, 0, 245, 1291, 1, 0, 0, 0, 247, 1297, 1, 0, 0, 0, 249, 1302, 1, 0, 0, 0, 251, 1308, 1, 0, 0, 0, 253, 1314, 1, 0, 0, 0, 255, 1321, 1, 0, 0, 0, 257, 1326, 1, 0, 0, 0, 259, 1329, 1, 0, 0, 0, 261, 1333, 1, 0, 0, 0, 263, 1338, 1, 0, 0, 0, 265, 1347, 1, 0, 0, 0, 267, 1354, 1, 0, 0, 0, 269, 1361, 1, 0, 0, 0, 271, 1368, 1, 0, 0, 0, 273, 1373, 1, 0, 0, 0, 275, 1379, 1, 0, 0, 0, 277, 1385, 1, 0, 0, 0, 279, 1392, 1, 0, 0, 0, 281, 1397, 1, 0, 0, 0, 283, 1403, 1, 0, 0, 0, 285, 1408, 1, 0, 0, 0, 287, 1415, 1, 0, 0, 0, 289, 1422, 1, 0, 0, 0, 291, 1427, 1, 0, 0, 0, 293, 1435, 1, 0, 0, 0, 295, 1443, 1, 0, 0, 0, 297, 1451, 1, 0, 0, 0, 299, 1465, 1, 0, 0, 0, 301, 1479, 1, 0, 0, 0, 303, 1493, 1, 0, 0, 0, 305, 1499, 1, 0, 0, 0, 307, 1506, 1, 0, 0, 0, 309, 1514, 1, 0, 0, 0, 311, 1521, 1, 0, 0, 0, 313, 1526, 1, 0, 0, 0, 315, 1532, 1, 0, 0, 0, 317, 1541, 1, 0, 0, 0, 319, 1549, 1, 0, 0, 0, 321, 1554, 1, 0, 0, 0, 323, 1558, 1, 0, 0, 0, 325, 1565, 1, 0, 0, 0, 327, 1571, 1, 0, 0, 0, 329, 1578, 1, 0, 0, 0, 331, 1585, 1, 0, 0, 0, 333, 1595, 1, 0, 0, 0, 335, 1599, 1, 0, 0, 0, 337, 1609, 1, 0, 0, 0, 339, 1623, 1, 0, 0, 0, 341, 1633, 1, 0, 0, 0, 343, 1640, 1, 0, 0, 0, 345, 1642, 1, 0, 0, 0, 347, 1644, 1, 0, 0, 0, 349, 1646, 1, 0, 0, 0, 351, 1648, 1, 0, 0, 0, 353, 1650, 1, 0, 0, 0, 355, 1652, 1, 0, 0, 0, 357, 1654, 1, 0, 0, 0, 359, 1656, 1, 0, 0, 0, 361, 1658, 1, 0, 0, 0, 363, 1660, 1, 0, 0, 0, 365, 1662, 1, 0, 0, 0, 367, 1664, 1, 0, 0, 0, 369, 1666, 1, 0, 0, 0, 371, 1668, 1, 0, 0, 0, 373, 1671, 1, 0, 0, 0, 375, 1673, 1, 0, 0, 0, 377, 1676, 1, 0, 0, 0, 379, 1678, 1, 0, 0, 0, 381, 1681, 1, 0, 0, 0, 383, 1686, 1, 0, 0, 0, 385, 1693, 1, 0, 0, 0, 387, 1698, 1, 0, 0, 0, 389, 1705, 1, 0, 0, 0, 391, 1710, 1, 0, 0, 0, 393, 1717, 1, 0, 0, 0, 395, 1719, 1, 0, 0, 0, 397, 1721, 1, 0, 0, 0, 399, 1723, 1, 0, 0, 0, 401, 1727, 1, 0, 0, 0, 403, 1730, 1, 0, 0, 0, 405, 1735, 1, 0, 0, 0, 407, 1741, 1, 0, 0, 0, 409, 1747, 1, 0, 0, 0, 411, 1788, 1, 0, 0, 0, 413, 1792, 1, 0, 0, 0, 415, 1796, 1, 0, 0, 0, 417, 1806, 1, 0, 0, 0, 419, 1816, 1, 0, 0, 0, 421, 1821, 1, 0, 0, 0, 423, 1832, 1, 0, 0, 0, 425, 1843, 1, 0, 0, 0, 427, 1849, 1, 0, 0, 0, 429, 1864, 1, 0, 0, 0, 431, 1875, 1, 0, 0, 0, 433, 1884, 1, 0, 0, 0, 435, 1886, 1, 0, 0, 0, 437, 1888, 1, 0, 0, 0, 439, 1890, 1, 0, 0, 0, 441, 1895, 1, 0, 0, 0, 443, 1900, 1, 0, 0, 0, 445, 1902, 1, 0, 0, 0, 447, 1904, 1, 0, 0, 0, 449, 1910, 1, 0, 0, 0, 451, 1916, 1, 0, 0, 0, 453, 1924, 1, 0, 0, 0, 455, 1931, 1, 0, 0, 0, 457, 1942, 1, 0, 0, 0, 459, 1949, 1, 0, 0, 0, 461, 1960, 1, 0, 0, 0, 463, 1967, 1, 0, 0, 0, 465, 466, 5, 47, 0, 0, 466, 467, 5, 42, 0, 0, 467, 468, 5, 43, 0, 0, 468, 2, 1, 0, 0, 0, 469, 470, 5, 42, 0, 0, 470, 471, 5, 47, 0, 0, 471, 4, 1, 0, 0, 0, 472, 473, 5, 64, 0, 0, 473, 6, 1, 0, 0, 0, 474, 475, 5, 114, 0, 0, 475, 476, 5, 111, 0, 0, 476, 477, 5, 119, 0, 0, 477, 478, 5, 95, 0, 0, 478, 479, 5, 109, 0, 0, 479, 480, 5, 101, 0, 0, 480, 481, 5, 116, 0, 0, 481, 482, 5, 97, 0, 0, 482, 483, 5, 100, 0, 0, 483, 484, 5, 97, 0, 0, 484, 485, 5, 116, 0, 0, 485, 486, 5, 97, 0, 0, 486, 487, 5, 40, 0, 0, 487, 488, 5, 41, 0, 0, 488, 489, 5, 46, 0, 0, 489, 8, 1, 0, 0, 0, 490, 491, 3, 365, 182, 0, 491, 497, 3, 435, 217, 0, 492, 496, 3, 435, 217, 0, 493, 496, 3, 437, 218, 0, 494, 496, 3, 445, 222, 0, 495, 492, 1, 0, 0, 0, 495, 493, 1, 0, 0, 0, 495, 494, 1, 0, 0, 0, 496, 499, 1, 0, 0, 0, 497, 495, 1, 0, 0, 0, 497, 498, 1, 0, 0, 0, 498, 10, 1, 0, 0, 0, 499, 497, 1, 0, 0, 0, 500, 501, 7, 0, 0, 0, 501, 502, 7, 1, 0, 0, 502, 503, 7, 1, 0, 0, 503, 504, 7, 2, 0, 0, 504, 505, 7, 3, 0, 0, 505, 506, 7, 4, 0, 0, 506, 507, 7, 5, 0, 0, 507, 12, 1, 0, 0, 0, 508, 509, 7, 0, 0, 0, 509, 510, 7, 6, 0, 0, 510, 511, 7, 6, 0, 0, 511, 14, 1, 0, 0, 0, 512, 513, 7, 0, 0, 0, 513, 514, 7, 6, 0, 0, 514, 515, 7, 7, 0, 0, 515, 516, 7, 8, 0, 0, 516, 517, 7, 4, 0, 0, 517, 16, 1, 0, 0, 0, 518, 519, 7, 0, 0, 0, 519, 520, 7, 9, 0, 0, 520, 521, 7, 9, 0, 0, 521, 18, 1, 0, 0, 0, 522, 523, 7, 0, 0, 0, 523, 524, 7, 9, 0, 0, 524, 525, 7, 5, 0, 0, 525, 526, 7, 10, 0, 0, 526, 527, 7, 11, 0, 0, 527, 20, 1, 0, 0, 0, 528, 529, 7, 0, 0, 0, 529, 530, 7, 9, 0, 0, 530, 531, 7, 12, 0, 0, 531, 532, 7, 0, 0, 0, 532, 533, 7, 13, 0, 0, 533, 534, 7, 14, 0, 0, 534, 22, 1, 0, 0, 0, 535, 536, 7, 0, 0, 0, 536, 537, 7, 4, 0, 0, 537, 538, 7, 1, 0, 0, 538, 539, 7, 10, 0, 0, 539, 540, 7, 14, 0, 0, 540, 541, 7, 5, 0, 0, 541, 542, 7, 2, 0, 0, 542, 543, 7, 11, 0, 0, 543, 544, 7, 14, 0, 0, 544, 24, 1, 0, 0, 0, 545, 546, 7, 0, 0, 0, 546, 547, 7, 4, 0, 0, 547, 548, 7, 6, 0, 0, 548, 26, 1, 0, 0, 0, 549, 550, 7, 0, 0, 0, 550, 551, 7, 14, 0, 0, 551, 28, 1, 0, 0, 0, 552, 553, 7, 0, 0, 0, 553, 554, 7, 14, 0, 0, 554, 555, 7, 1, 0, 0, 555, 30, 1, 0, 0, 0, 556, 557, 5, 97, 0, 0, 557, 558, 5, 114, 0, 0, 558, 559, 5, 114, 0, 0, 559, 560, 5, 97, 0, 0, 560, 561, 5, 121, 0, 0, 561, 562, 5, 95, 0, 0, 562, 563, 5, 99, 0, 0, 563, 564, 5, 111, 0, 0, 564, 565, 5, 108, 0, 0, 565, 566, 5, 108, 0, 0, 566, 567, 5, 101, 0, 0, 567, 568, 5, 99, 0, 0, 568, 569, 5, 116, 0, 0, 569, 32, 1, 0, 0, 0, 570, 571, 7, 15, 0, 0, 571, 572, 7, 10, 0, 0, 572, 573, 7, 16, 0, 0, 573, 574, 7, 2, 0, 0, 574, 575, 7, 11, 0, 0, 575, 576, 7, 10, 0, 0, 576, 34, 1, 0, 0, 0, 577, 578, 7, 15, 0, 0, 578, 579, 7, 10, 0, 0, 579, 580, 7, 5, 0, 0, 580, 581, 7, 12, 0, 0, 581, 582, 7, 10, 0, 0, 582, 583, 7, 10, 0, 0, 583, 584, 7, 4, 0, 0, 584, 36, 1, 0, 0, 0, 585, 586, 7, 15, 0, 0, 586, 587, 7, 13, 0, 0, 587, 38, 1, 0, 0, 0, 588, 589, 7, 1, 0, 0, 589, 590, 7, 0, 0, 0, 590, 591, 7, 1, 0, 0, 591, 592, 7, 17, 0, 0, 592, 593, 7, 10, 0, 0, 593, 40, 1, 0, 0, 0, 594, 595, 7, 1, 0, 0, 595, 596, 7, 0, 0, 0, 596, 597, 7, 14, 0, 0, 597, 598, 7, 10, 0, 0, 598, 42, 1, 0, 0, 0, 599, 600, 7, 1, 0, 0, 600, 601, 7, 0, 0, 0, 601, 602, 7, 14, 0, 0, 602, 603, 7, 1, 0, 0, 603, 604, 7, 0, 0, 0, 604, 605, 7, 6, 0, 0, 605, 606, 7, 10, 0, 0, 606, 44, 1, 0, 0, 0, 607, 608, 7, 1, 0, 0, 608, 609, 7, 0, 0, 0, 609, 610, 7, 14, 0, 0, 610, 611, 7, 5, 0, 0, 611, 46, 1, 0, 0, 0, 612, 613, 7, 1, 0, 0, 613, 614, 7, 2, 0, 0, 614, 615, 7, 9, 0, 0, 615, 616, 7, 9, 0, 0, 616, 617, 7, 10, 0, 0, 617, 618, 7, 1, 0, 0, 618, 619, 7, 5, 0, 0, 619, 620, 7, 8, 0, 0, 620, 621, 7, 2, 0, 0, 621, 622, 7, 4, 0, 0, 622, 48, 1, 0, 0, 0, 623, 624, 7, 1, 0, 0, 624, 625, 7, 2, 0, 0, 625, 626, 7, 7, 0, 0, 626, 627, 7, 7, 0, 0, 627, 628, 7, 10, 0, 0, 628, 629, 7, 4, 0, 0, 629, 630, 7, 5, 0, 0, 630, 50, 1, 0, 0, 0, 631, 632, 5, 99, 0, 0, 632, 633, 5, 111, 0, 0, 633, 634, 5, 117, 0, 0, 634, 635, 5, 110, 0, 0, 635, 636, 5, 116, 0, 0, 636, 52, 1, 0, 0, 0, 637, 638, 7, 1, 0, 0, 638, 639, 7, 11, 0, 0, 639, 640, 7, 10, 0, 0, 640, 641, 7, 0, 0, 0, 641, 642, 7, 5, 0, 0, 642, 643, 7, 10, 0, 0, 643, 54, 1, 0, 0, 0, 644, 645, 7, 1, 0, 0, 645, 646, 7, 13, 0, 0, 646, 647, 7, 1, 0, 0, 647, 648, 7, 9, 0, 0, 648, 649, 7, 10, 0, 0, 649, 56, 1, 0, 0, 0, 650, 656, 7, 6, 0, 0, 651, 652, 7, 6, 0, 0, 652, 653, 7, 0, 0, 0, 653, 654, 7, 13, 0, 0, 654, 656, 7, 14, 0, 0, 655, 650, 1, 0, 0, 0, 655, 651, 1, 0, 0, 0, 656, 58, 1, 0, 0, 0, 657, 658, 7, 6, 0, 0, 658, 659, 7, 10, 0, 0, 659, 660, 7, 1, 0, 0, 660, 661, 7, 9, 0, 0, 661, 662, 7, 0, 0, 0, 662, 663, 7, 11, 0, 0, 663, 664, 7, 10, 0, 0, 664, 60, 1, 0, 0, 0, 665, 666, 7, 6, 0, 0, 666, 667, 7, 10, 0, 0, 667, 668, 7, 16, 0, 0, 668, 669, 7, 0, 0, 0, 669, 670, 7, 3, 0, 0, 670, 671, 7, 9, 0, 0, 671, 672, 7, 5, 0, 0, 672, 62, 1, 0, 0, 0, 673, 674, 7, 6, 0, 0, 674, 675, 7, 10, 0, 0, 675, 676, 7, 9, 0, 0, 676, 677, 7, 10, 0, 0, 677, 678, 7, 5, 0, 0, 678, 679, 7, 10, 0, 0, 679, 64, 1, 0, 0, 0, 680, 681, 7, 6, 0, 0, 681, 682, 7, 10, 0, 0, 682, 683, 7, 14, 0, 0, 683, 684, 7, 1, 0, 0, 684, 66, 1, 0, 0, 0, 685, 686, 7, 6, 0, 0, 686, 687, 7, 10, 0, 0, 687, 688, 7, 14, 0, 0, 688, 689, 7, 1, 0, 0, 689, 690, 7, 10, 0, 0, 690, 691, 7, 4, 0, 0, 691, 692, 7, 6, 0, 0, 692, 693, 7, 0, 0, 0, 693, 694, 7, 4, 0, 0, 694, 695, 7, 5, 0, 0, 695, 696, 7, 14, 0, 0, 696, 68, 1, 0, 0, 0, 697, 698, 7, 6, 0, 0, 698, 699, 7, 10, 0, 0, 699, 700, 7, 14, 0, 0, 700, 701, 7, 1, 0, 0, 701, 702, 7, 11, 0, 0, 702, 703, 7, 8, 0, 0, 703, 704, 7, 15, 0, 0, 704, 705, 7, 10, 0, 0, 705, 70, 1, 0, 0, 0, 706, 707, 7, 6, 0, 0, 707, 708, 7, 8, 0, 0, 708, 709, 7, 14, 0, 0, 709, 710, 7, 0, 0, 0, 710, 711, 7, 15, 0, 0, 711, 712, 7, 9, 0, 0, 712, 713, 7, 10, 0, 0, 713, 72, 1, 0, 0, 0, 714, 715, 7, 6, 0, 0, 715, 716, 7, 8, 0, 0, 716, 717, 7, 14, 0, 0, 717, 718, 7, 5, 0, 0, 718, 719, 7, 8, 0, 0, 719, 720, 7, 4, 0, 0, 720, 721, 7, 1, 0, 0, 721, 722, 7, 5, 0, 0, 722, 74, 1, 0, 0, 0, 723, 724, 7, 6, 0, 0, 724, 725, 7, 11, 0, 0, 725, 726, 7, 2, 0, 0, 726, 727, 7, 18, 0, 0, 727, 76, 1, 0, 0, 0, 728, 729, 7, 10, 0, 0, 729, 730, 7, 9, 0, 0, 730, 731, 7, 10, 0, 0, 731, 732, 7, 7, 0, 0, 732, 733, 7, 10, 0, 0, 733, 734, 7, 4, 0, 0, 734, 735, 7, 5, 0, 0, 735, 736, 7, 2, 0, 0, 736, 737, 7, 16, 0, 0, 737, 78, 1, 0, 0, 0, 738, 739, 7, 10, 0, 0, 739, 740, 7, 9, 0, 0, 740, 741, 7, 10, 0, 0, 741, 742, 7, 7, 0, 0, 742, 743, 7, 10, 0, 0, 743, 744, 7, 4, 0, 0, 744, 745, 7, 5, 0, 0, 745, 746, 7, 14, 0, 0, 746, 80, 1, 0, 0, 0, 747, 748, 7, 10, 0, 0, 748, 749, 7, 9, 0, 0, 749, 750, 7, 14, 0, 0, 750, 751, 7, 10, 0, 0, 751, 82, 1, 0, 0, 0, 752, 753, 7, 10, 0, 0, 753, 754, 7, 4, 0, 0, 754, 755, 7, 0, 0, 0, 755, 756, 7, 15, 0, 0, 756, 757, 7, 9, 0, 0, 757, 758, 7, 10, 0, 0, 758, 84, 1, 0, 0, 0, 759, 760, 7, 10, 0, 0, 760, 761, 7, 4, 0, 0, 761, 762, 7, 6, 0, 0, 762, 86, 1, 0, 0, 0, 763, 764, 7, 10, 0, 0, 764, 765, 7, 14, 0, 0, 765, 766, 3, 445, 222, 0, 766, 767, 7, 14, 0, 0, 767, 768, 7, 17, 0, 0, 768, 769, 7, 0, 0, 0, 769, 770, 7, 11, 0, 0, 770, 771, 7, 6, 0, 0, 771, 772, 7, 14, 0, 0, 772, 88, 1, 0, 0, 0, 773, 774, 7, 10, 0, 0, 774, 775, 7, 14, 0, 0, 775, 776, 3, 445, 222, 0, 776, 777, 7, 11, 0, 0, 777, 778, 7, 10, 0, 0, 778, 779, 7, 18, 0, 0, 779, 780, 7, 9, 0, 0, 780, 781, 7, 8, 0, 0, 781, 782, 7, 1, 0, 0, 782, 783, 7, 0, 0, 0, 783, 784, 7, 14, 0, 0, 784, 90, 1, 0, 0, 0, 785, 786, 7, 10, 0, 0, 786, 787, 7, 19, 0, 0, 787, 788, 7, 8, 0, 0, 788, 789, 7, 14, 0, 0, 789, 790, 7, 5, 0, 0, 790, 791, 7, 14, 0, 0, 791, 92, 1, 0, 0, 0, 792, 793, 7, 10, 0, 0, 793, 794, 7, 19, 0, 0, 794, 795, 7, 5, 0, 0, 795, 796, 7, 11, 0, 0, 796, 797, 7, 0, 0, 0, 797, 798, 7, 1, 0, 0, 798, 799, 7, 5, 0, 0, 799, 94, 1, 0, 0, 0, 800, 801, 7, 16, 0, 0, 801, 802, 7, 8, 0, 0, 802, 803, 7, 10, 0, 0, 803, 804, 7, 9, 0, 0, 804, 805, 7, 6, 0, 0, 805, 806, 7, 14, 0, 0, 806, 96, 1, 0, 0, 0, 807, 808, 7, 16, 0, 0, 808, 809, 7, 8, 0, 0, 809, 810, 7, 11, 0, 0, 810, 811, 7, 14, 0, 0, 811, 812, 7, 5, 0, 0, 812, 98, 1, 0, 0, 0, 813, 814, 7, 16, 0, 0, 814, 815, 7, 2, 0, 0, 815, 816, 7, 11, 0, 0, 816, 817, 7, 1, 0, 0, 817, 818, 7, 10, 0, 0, 818, 100, 1, 0, 0, 0, 819, 820, 3, 99, 49, 0, 820, 821, 3, 445, 222, 0, 821, 822, 3, 133, 66, 0, 822, 102, 1, 0, 0, 0, 823, 824, 3, 99, 49, 0, 824, 825, 3, 445, 222, 0, 825, 826, 3, 215, 107, 0, 826, 827, 3, 445, 222, 0, 827, 828, 3, 133, 66, 0, 828, 104, 1, 0, 0, 0, 829, 830, 7, 16, 0, 0, 830, 831, 7, 11, 0, 0, 831, 832, 7, 10, 0, 0, 832, 833, 7, 10, 0, 0, 833, 834, 7, 20, 0, 0, 834, 835, 7, 10, 0, 0, 835, 106, 1, 0, 0, 0, 836, 837, 7, 16, 0, 0, 837, 838, 7, 11, 0, 0, 838, 839, 7, 2, 0, 0, 839, 840, 7, 7, 0, 0, 840, 108, 1, 0, 0, 0, 841, 842, 7, 16, 0, 0, 842, 843, 7, 11, 0, 0, 843, 844, 7, 2, 0, 0, 844, 845, 7, 20, 0, 0, 845, 846, 7, 10, 0, 0, 846, 847, 7, 4, 0, 0, 847, 110, 1, 0, 0, 0, 848, 849, 7, 16, 0, 0, 849, 850, 7, 3, 0, 0, 850, 851, 7, 9, 0, 0, 851, 852, 7, 9, 0, 0, 852, 853, 7, 5, 0, 0, 853, 854, 7, 10, 0, 0, 854, 855, 7, 19, 0, 0, 855, 856, 7, 5, 0, 0, 856, 112, 1, 0, 0, 0, 857, 858, 7, 21, 0, 0, 858, 859, 7, 10, 0, 0, 859, 860, 7, 4, 0, 0, 860, 861, 7, 10, 0, 0, 861, 862, 7, 11, 0, 0, 862, 863, 7, 0, 0, 0, 863, 864, 7, 5, 0, 0, 864, 865, 7, 10, 0, 0, 865, 866, 7, 6, 0, 0, 866, 114, 1, 0, 0, 0, 867, 868, 7, 21, 0, 0, 868, 869, 7, 11, 0, 0, 869, 870, 7, 0, 0, 0, 870, 871, 7, 4, 0, 0, 871, 872, 7, 5, 0, 0, 872, 116, 1, 0, 0, 0, 873, 874, 7, 21, 0, 0, 874, 875, 7, 11, 0, 0, 875, 876, 7, 2, 0, 0, 876, 877, 7, 3, 0, 0, 877, 878, 7, 18, 0, 0, 878, 118, 1, 0, 0, 0, 879, 886, 7, 17, 0, 0, 880, 881, 7, 17, 0, 0, 881, 882, 7, 2, 0, 0, 882, 883, 7, 3, 0, 0, 883, 884, 7, 11, 0, 0, 884, 886, 7, 14, 0, 0, 885, 879, 1, 0, 0, 0, 885, 880, 1, 0, 0, 0, 886, 120, 1, 0, 0, 0, 887, 888, 7, 8, 0, 0, 888, 889, 7, 6, 0, 0, 889, 890, 7, 10, 0, 0, 890, 891, 7, 4, 0, 0, 891, 892, 7, 5, 0, 0, 892, 893, 7, 8, 0, 0, 893, 894, 7, 16, 0, 0, 894, 895, 7, 8, 0, 0, 895, 896, 7, 10, 0, 0, 896, 897, 7, 6, 0, 0, 897, 122, 1, 0, 0, 0, 898, 899, 7, 8, 0, 0, 899, 900, 7, 6, 0, 0, 900, 901, 7, 10, 0, 0, 901, 902, 7, 4, 0, 0, 902, 903, 7, 5, 0, 0, 903, 904, 7, 8, 0, 0, 904, 905, 7, 5, 0, 0, 905, 906, 7, 13, 0, 0, 906, 124, 1, 0, 0, 0, 907, 908, 7, 8, 0, 0, 908, 909, 7, 16, 0, 0, 909, 126, 1, 0, 0, 0, 910, 911, 7, 8, 0, 0, 911, 912, 7, 7, 0, 0, 912, 913, 7, 0, 0, 0, 913, 914, 7, 21, 0, 0, 914, 915, 7, 10, 0, 0, 915, 128, 1, 0, 0, 0, 916, 917, 7, 8, 0, 0, 917, 918, 7, 4, 0, 0, 918, 130, 1, 0, 0, 0, 919, 920, 7, 8, 0, 0, 920, 921, 7, 4, 0, 0, 921, 922, 7, 1, 0, 0, 922, 923, 7, 11, 0, 0, 923, 924, 7, 10, 0, 0, 924, 925, 7, 7, 0, 0, 925, 926, 7, 10, 0, 0, 926, 927, 7, 4, 0, 0, 927, 928, 7, 5, 0, 0, 928, 132, 1, 0, 0, 0, 929, 930, 7, 8, 0, 0, 930, 931, 7, 4, 0, 0, 931, 932, 7, 6, 0, 0, 932, 933, 7, 10, 0, 0, 933, 934, 7, 19, 0, 0, 934, 134, 1, 0, 0, 0, 935, 936, 7, 8, 0, 0, 936, 937, 7, 4, 0, 0, 937, 938, 7, 6, 0, 0, 938, 939, 7, 10, 0, 0, 939, 940, 7, 19, 0, 0, 940, 941, 7, 10, 0, 0, 941, 942, 7, 14, 0, 0, 942, 136, 1, 0, 0, 0, 943, 944, 7, 8, 0, 0, 944, 945, 7, 4, 0, 0, 945, 946, 7, 14, 0, 0, 946, 947, 7, 10, 0, 0, 947, 948, 7, 11, 0, 0, 948, 949, 7, 5, 0, 0, 949, 138, 1, 0, 0, 0, 950, 951, 7, 8, 0, 0, 951, 952, 7, 4, 0, 0, 952, 953, 7, 5, 0, 0, 953, 954, 7, 2, 0, 0, 954, 140, 1, 0, 0, 0, 955, 956, 7, 8, 0, 0, 956, 957, 7, 14, 0, 0, 957, 142, 1, 0, 0, 0, 958, 959, 7, 22, 0, 0, 959, 960, 7, 14, 0, 0, 960, 961, 7, 2, 0, 0, 961, 962, 7, 4, 0, 0, 962, 144, 1, 0, 0, 0, 963, 964, 7, 22, 0, 0, 964, 965, 7, 2, 0, 0, 965, 966, 7, 8, 0, 0, 966, 967, 7, 4, 0, 0, 967, 146, 1, 0, 0, 0, 968, 969, 7, 23, 0, 0, 969, 970, 7, 10, 0, 0, 970, 971, 7, 13, 0, 0, 971, 148, 1, 0, 0, 0, 972, 973, 7, 23, 0, 0, 973, 974, 7, 10, 0, 0, 974, 975, 7, 13, 0, 0, 975, 976, 7, 2, 0, 0, 976, 977, 7, 16, 0, 0, 977, 150, 1, 0, 0, 0, 978, 979, 7, 23, 0, 0, 979, 980, 7, 10, 0, 0, 980, 981, 7, 13, 0, 0, 981, 982, 7, 14, 0, 0, 982, 152, 1, 0, 0, 0, 983, 984, 7, 9, 0, 0, 984, 985, 7, 0, 0, 0, 985, 986, 7, 14, 0, 0, 986, 987, 7, 5, 0, 0, 987, 154, 1, 0, 0, 0, 988, 989, 7, 9, 0, 0, 989, 990, 7, 10, 0, 0, 990, 991, 7, 16, 0, 0, 991, 992, 7, 5, 0, 0, 992, 156, 1, 0, 0, 0, 993, 994, 7, 9, 0, 0, 994, 995, 7, 8, 0, 0, 995, 996, 7, 16, 0, 0, 996, 997, 7, 10, 0, 0, 997, 998, 7, 5, 0, 0, 998, 999, 7, 8, 0, 0, 999, 1000, 7, 7, 0, 0, 1000, 1001, 7, 10, 0, 0, 1001, 158, 1, 0, 0, 0, 1002, 1003, 7, 9, 0, 0, 1003, 1004, 7, 8, 0, 0, 1004, 1005, 7, 7, 0, 0, 1005, 1006, 7, 8, 0, 0, 1006, 1007, 7, 5, 0, 0, 1007, 160, 1, 0, 0, 0, 1008, 1009, 7, 9, 0, 0, 1009, 1010, 7, 2, 0, 0, 1010, 1011, 7, 1, 0, 0, 1011, 1012, 7, 0, 0, 0, 1012, 1013, 7, 9, 0, 0, 1013, 162, 1, 0, 0, 0, 1014, 1015, 7, 9, 0, 0, 1015, 1016, 7, 2, 0, 0, 1016, 1017, 7, 1, 0, 0, 1017, 1018, 7, 23, 0, 0, 1018, 164, 1, 0, 0, 0, 1019, 1020, 7, 7, 0, 0, 1020, 1021, 7, 0, 0, 0, 1021, 1022, 7, 19, 0, 0, 1022, 1023, 7, 24, 0, 0, 1023, 1024, 7, 0, 0, 0, 1024, 1025, 7, 9, 0, 0, 1025, 1026, 7, 3, 0, 0, 1026, 1027, 7, 10, 0, 0, 1027, 166, 1, 0, 0, 0, 1028, 1029, 7, 7, 0, 0, 1029, 1030, 7, 10, 0, 0, 1030, 1031, 7, 11, 0, 0, 1031, 1032, 7, 21, 0, 0, 1032, 1033, 7, 10, 0, 0, 1033, 168, 1, 0, 0, 0, 1034, 1043, 7, 7, 0, 0, 1035, 1036, 7, 7, 0, 0, 1036, 1037, 7, 8, 0, 0, 1037, 1038, 7, 4, 0, 0, 1038, 1039, 7, 3, 0, 0, 1039, 1040, 7, 5, 0, 0, 1040, 1041, 7, 10, 0, 0, 1041, 1043, 7, 14, 0, 0, 1042, 1034, 1, 0, 0, 0, 1042, 1035, 1, 0, 0, 0, 1043, 170, 1, 0, 0, 0, 1044, 1045, 7, 7, 0, 0, 1045, 1046, 7, 8, 0, 0, 1046, 1047, 7, 4, 0, 0, 1047, 1048, 7, 24, 0, 0, 1048, 1049, 7, 0, 0, 0, 1049, 1050, 7, 9, 0, 0, 1050, 1051, 7, 3, 0, 0, 1051, 1052, 7, 10, 0, 0, 1052, 172, 1, 0, 0, 0, 1053, 1054, 7, 7, 0, 0, 1054, 1055, 7, 2, 0, 0, 1055, 1056, 7, 6, 0, 0, 1056, 1057, 7, 8, 0, 0, 1057, 1058, 7, 16, 0, 0, 1058, 1059, 7, 13, 0, 0, 1059, 174, 1, 0, 0, 0, 1060, 1061, 7, 7, 0, 0, 1061, 1062, 7, 11, 0, 0, 1062, 1063, 3, 445, 222, 0, 1063, 1064, 7, 1, 0, 0, 1064, 1065, 7, 2, 0, 0, 1065, 1066, 7, 3, 0, 0, 1066, 1067, 7, 4, 0, 0, 1067, 1068, 7, 5, 0, 0, 1068, 1069, 7, 10, 0, 0, 1069, 1070, 7, 11, 0, 0, 1070, 176, 1, 0, 0, 0, 1071, 1072, 7, 4, 0, 0, 1072, 1073, 7, 0, 0, 0, 1073, 1074, 7, 7, 0, 0, 1074, 1075, 7, 10, 0, 0, 1075, 1076, 7, 14, 0, 0, 1076, 1077, 7, 18, 0, 0, 1077, 1078, 7, 0, 0, 0, 1078, 1079, 7, 1, 0, 0, 1079, 1080, 7, 10, 0, 0, 1080, 178, 1, 0, 0, 0, 1081, 1082, 7, 4, 0, 0, 1082, 1083, 7, 0, 0, 0, 1083, 1084, 7, 7, 0, 0, 1084, 1085, 7, 10, 0, 0, 1085, 1086, 7, 14, 0, 0, 1086, 1087, 7, 18, 0, 0, 1087, 1088, 7, 0, 0, 0, 1088, 1089, 7, 1, 0, 0, 1089, 1090, 7, 10, 0, 0, 1090, 1091, 7, 14, 0, 0, 1091, 180, 1, 0, 0, 0, 1092, 1093, 7, 4, 0, 0, 1093, 1094, 7, 10, 0, 0, 1094, 1095, 7, 14, 0, 0, 1095, 1096, 7, 5, 0, 0, 1096, 1097, 7, 10, 0, 0, 1097, 1098, 7, 6, 0, 0, 1098, 182, 1, 0, 0, 0, 1099, 1100, 7, 4, 0, 0, 1100, 1101, 7, 2, 0, 0, 1101, 184, 1, 0, 0, 0, 1102, 1103, 7, 4, 0, 0, 1103, 1104, 7, 2, 0, 0, 1104, 1105, 7, 5, 0, 0, 1105, 186, 1, 0, 0, 0, 1106, 1107, 7, 4, 0, 0, 1107, 1108, 7, 3, 0, 0, 1108, 1109, 7, 9, 0, 0, 1109, 1110, 7, 9, 0, 0, 1110, 1111, 7, 14, 0, 0, 1111, 188, 1, 0, 0, 0, 1112, 1113, 7, 2, 0, 0, 1113, 1114, 7, 16, 0, 0, 1114, 1115, 7, 16, 0, 0, 1115, 1116, 7, 14, 0, 0, 1116, 1117, 7, 10, 0, 0, 1117, 1118, 7, 5, 0, 0, 1118, 190, 1, 0, 0, 0, 1119, 1120, 7, 2, 0, 0, 1120, 1121, 7, 16, 0, 0, 1121, 192, 1, 0, 0, 0, 1122, 1123, 7, 2, 0, 0, 1123, 1124, 7, 4, 0, 0, 1124, 194, 1, 0, 0, 0, 1125, 1126, 7, 2, 0, 0, 1126, 1127, 7, 4, 0, 0, 1127, 1128, 7, 9, 0, 0, 1128, 1129, 7, 13, 0, 0, 1129, 196, 1, 0, 0, 0, 1130, 1131, 7, 2, 0, 0, 1131, 1132, 7, 11, 0, 0, 1132, 198, 1, 0, 0, 0, 1133, 1134, 7, 2, 0, 0, 1134, 1135, 7, 11, 0, 0, 1135, 1136, 7, 6, 0, 0, 1136, 1137, 7, 10, 0, 0, 1137, 1138, 7, 11, 0, 0, 1138, 200, 1, 0, 0, 0, 1139, 1140, 7, 2, 0, 0, 1140, 1141, 7, 3, 0, 0, 1141, 1142, 7, 5, 0, 0, 1142, 1143, 7, 10, 0, 0, 1143, 1144, 7, 11, 0, 0, 1144, 202, 1, 0, 0, 0, 1145, 1146, 7, 2, 0, 0, 1146, 1147, 7, 24, 0, 0, 1147, 1148, 7, 10, 0, 0, 1148, 1149, 7, 11, 0, 0, 1149, 1150, 7, 11, 0, 0, 1150, 1151, 7, 8, 0, 0, 1151, 1152, 7, 6, 0, 0, 1152, 1153, 7, 10, 0, 0, 1153, 204, 1, 0, 0, 0, 1154, 1155, 7, 18, 0, 0, 1155, 1156, 7, 0, 0, 0, 1156, 1157, 7, 14, 0, 0, 1157, 1158, 7, 14, 0, 0, 1158, 1159, 7, 12, 0, 0, 1159, 1160, 7, 2, 0, 0, 1160, 1161, 7, 11, 0, 0, 1161, 1162, 7, 6, 0, 0, 1162, 206, 1, 0, 0, 0, 1163, 1164, 7, 18, 0, 0, 1164, 1165, 7, 0, 0, 0, 1165, 1166, 7, 5, 0, 0, 1166, 1167, 7, 1, 0, 0, 1167, 1168, 7, 17, 0, 0, 1168, 208, 1, 0, 0, 0, 1169, 1170, 7, 18, 0, 0, 1170, 1171, 7, 10, 0, 0, 1171, 1172, 7, 11, 0, 0, 1172, 210, 1, 0, 0, 0, 1173, 1174, 3, 457, 228, 0, 1174, 1175, 3, 445, 222, 0, 1175, 1176, 3, 135, 67, 0, 1176, 212, 1, 0, 0, 0, 1177, 1178, 3, 457, 228, 0, 1178, 1179, 3, 445, 222, 0, 1179, 1180, 3, 215, 107, 0, 1180, 1181, 3, 445, 222, 0, 1181, 1182, 3, 133, 66, 0, 1182, 214, 1, 0, 0, 0, 1183, 1184, 7, 18, 0, 0, 1184, 1185, 7, 11, 0, 0, 1185, 1186, 7, 8, 0, 0, 1186, 1187, 7, 7, 0, 0, 1187, 1188, 7, 0, 0, 0, 1188, 1189, 7, 11, 0, 0, 1189, 1190, 7, 13, 0, 0, 1190, 216, 1, 0, 0, 0, 1191, 1192, 7, 18, 0, 0, 1192, 1193, 7, 3, 0, 0, 1193, 1194, 7, 5, 0, 0, 1194, 218, 1, 0, 0, 0, 1195, 1196, 7, 11, 0, 0, 1196, 1197, 7, 10, 0, 0, 1197, 1198, 7, 21, 0, 0, 1198, 1199, 7, 8, 0, 0, 1199, 1200, 7, 2, 0, 0, 1200, 1201, 7, 4, 0, 0, 1201, 220, 1, 0, 0, 0, 1202, 1203, 7, 11, 0, 0, 1203, 1204, 7, 10, 0, 0, 1204, 1205, 7, 21, 0, 0, 1205, 1206, 7, 8, 0, 0, 1206, 1207, 7, 2, 0, 0, 1207, 1208, 7, 4, 0, 0, 1208, 1209, 7, 14, 0, 0, 1209, 222, 1, 0, 0, 0, 1210, 1211, 7, 11, 0, 0, 1211, 1212, 7, 10, 0, 0, 1212, 1213, 7, 7, 0, 0, 1213, 1214, 7, 2, 0, 0, 1214, 1215, 7, 24, 0, 0, 1215, 1216, 7, 10, 0, 0, 1216, 224, 1, 0, 0, 0, 1217, 1218, 7, 11, 0, 0, 1218, 1219, 7, 10, 0, 0, 1219, 1220, 7, 5, 0, 0, 1220, 1221, 7, 3, 0, 0, 1221, 1222, 7, 11, 0, 0, 1222, 1223, 7, 4, 0, 0, 1223, 1224, 7, 8, 0, 0, 1224, 1225, 7, 4, 0, 0, 1225, 1226, 7, 21, 0, 0, 1226, 226, 1, 0, 0, 0, 1227, 1228, 7, 11, 0, 0, 1228, 1229, 7, 10, 0, 0, 1229, 1230, 7, 24, 0, 0, 1230, 1231, 7, 2, 0, 0, 1231, 1232, 7, 23, 0, 0, 1232, 1233, 7, 10, 0, 0, 1233, 228, 1, 0, 0, 0, 1234, 1235, 7, 11, 0, 0, 1235, 1236, 7, 2, 0, 0, 1236, 1237, 7, 9, 0, 0, 1237, 1238, 7, 10, 0, 0, 1238, 230, 1, 0, 0, 0, 1239, 1240, 7, 11, 0, 0, 1240, 1241, 7, 2, 0, 0, 1241, 1242, 7, 9, 0, 0, 1242, 1243, 7, 10, 0, 0, 1243, 1244, 7, 14, 0, 0, 1244, 232, 1, 0, 0, 0, 1245, 1246, 7, 11, 0, 0, 1246, 1247, 7, 2, 0, 0, 1247, 1248, 7, 12, 0, 0, 1248, 234, 1, 0, 0, 0, 1249, 1250, 7, 14, 0, 0, 1250, 1251, 7, 1, 0, 0, 1251, 1252, 7, 17, 0, 0, 1252, 1253, 7, 10, 0, 0, 1253, 1254, 7, 7, 0, 0, 1254, 1255, 7, 0, 0, 0, 1255, 236, 1, 0, 0, 0, 1256, 1265, 7, 14, 0, 0, 1257, 1258, 7, 14, 0, 0, 1258, 1259, 7, 10, 0, 0, 1259, 1260, 7, 1, 0, 0, 1260, 1261, 7, 2, 0, 0, 1261, 1262, 7, 4, 0, 0, 1262, 1263, 7, 6, 0, 0, 1263, 1265, 7, 14, 0, 0, 1264, 1256, 1, 0, 0, 0, 1264, 1257, 1, 0, 0, 0, 1265, 238, 1, 0, 0, 0, 1266, 1267, 7, 14, 0, 0, 1267, 1268, 7, 10, 0, 0, 1268, 1269, 7, 9, 0, 0, 1269, 1270, 7, 10, 0, 0, 1270, 1271, 7, 1, 0, 0, 1271, 1272, 7, 5, 0, 0, 1272, 240, 1, 0, 0, 0, 1273, 1274, 5, 115, 0, 0, 1274, 1275, 5, 101, 0, 0, 1275, 1276, 5, 113, 0, 0, 1276, 1277, 5, 95, 0, 0, 1277, 1278, 5, 116, 0, 0, 1278, 1279, 5, 114, 0, 0, 1279, 1280, 5, 97, 0, 0, 1280, 1281, 5, 110, 0, 0, 1281, 1282, 5, 115, 0, 0, 1282, 1283, 5, 102, 0, 0, 1283, 1284, 5, 111, 0, 0, 1284, 1285, 5, 114, 0, 0, 1285, 1286, 5, 109, 0, 0, 1286, 242, 1, 0, 0, 0, 1287, 1288, 7, 14, 0, 0, 1288, 1289, 7, 10, 0, 0, 1289, 1290, 7, 5, 0, 0, 1290, 244, 1, 0, 0, 0, 1291, 1292, 7, 14, 0, 0, 1292, 1293, 7, 17, 0, 0, 1293, 1294, 7, 0, 0, 0, 1294, 1295, 7, 11, 0, 0, 1295, 1296, 7, 6, 0, 0, 1296, 246, 1, 0, 0, 0, 1297, 1298, 7, 14, 0, 0, 1298, 1299, 7, 17, 0, 0, 1299, 1300, 7, 2, 0, 0, 1300, 1301, 7, 12, 0, 0, 1301, 248, 1, 0, 0, 0, 1302, 1303, 7, 14, 0, 0, 1303, 1304, 7, 5, 0, 0, 1304, 1305, 7, 0, 0, 0, 1305, 1306, 7, 11, 0, 0, 1306, 1307, 7, 5, 0, 0, 1307, 250, 1, 0, 0, 0, 1308, 1309, 7, 5, 0, 0, 1309, 1310, 7, 0, 0, 0, 1310, 1311, 7, 15, 0, 0, 1311, 1312, 7, 9, 0, 0, 1312, 1313, 7, 10, 0, 0, 1313, 252, 1, 0, 0, 0, 1314, 1315, 7, 5, 0, 0, 1315, 1316, 7, 0, 0, 0, 1316, 1317, 7, 15, 0, 0, 1317, 1318, 7, 9, 0, 0, 1318, 1319, 7, 10, 0, 0, 1319, 1320, 7, 14, 0, 0, 1320, 254, 1, 0, 0, 0, 1321, 1322, 7, 5, 0, 0, 1322, 1323, 7, 17, 0, 0, 1323, 1324, 7, 10, 0, 0, 1324, 1325, 7, 4, 0, 0, 1325, 256, 1, 0, 0, 0, 1326, 1327, 7, 5, 0, 0, 1327, 1328, 7, 2, 0, 0, 1328, 258, 1, 0, 0, 0, 1329, 1330, 7, 5, 0, 0, 1330, 1331, 7, 5, 0, 0, 1331, 1332, 7, 9, 0, 0, 1332, 260, 1, 0, 0, 0, 1333, 1334, 7, 5, 0, 0, 1334, 1335, 7, 13, 0, 0, 1335, 1336, 7, 18, 0, 0, 1336, 1337, 7, 10, 0, 0, 1337, 262, 1, 0, 0, 0, 1338, 1339, 7, 3, 0, 0, 1339, 1340, 7, 4, 0, 0, 1340, 1341, 7, 16, 0, 0, 1341, 1342, 7, 11, 0, 0, 1342, 1343, 7, 10, 0, 0, 1343, 1344, 7, 10, 0, 0, 1344, 1345, 7, 20, 0, 0, 1345, 1346, 7, 10, 0, 0, 1346, 264, 1, 0, 0, 0, 1347, 1348, 7, 3, 0, 0, 1348, 1349, 7, 4, 0, 0, 1349, 1350, 7, 9, 0, 0, 1350, 1351, 7, 2, 0, 0, 1351, 1352, 7, 1, 0, 0, 1352, 1353, 7, 23, 0, 0, 1353, 266, 1, 0, 0, 0, 1354, 1355, 7, 3, 0, 0, 1355, 1356, 7, 18, 0, 0, 1356, 1357, 7, 6, 0, 0, 1357, 1358, 7, 0, 0, 0, 1358, 1359, 7, 5, 0, 0, 1359, 1360, 7, 10, 0, 0, 1360, 268, 1, 0, 0, 0, 1361, 1362, 7, 3, 0, 0, 1362, 1363, 7, 18, 0, 0, 1363, 1364, 7, 14, 0, 0, 1364, 1365, 7, 10, 0, 0, 1365, 1366, 7, 11, 0, 0, 1366, 1367, 7, 5, 0, 0, 1367, 270, 1, 0, 0, 0, 1368, 1369, 7, 3, 0, 0, 1369, 1370, 7, 14, 0, 0, 1370, 1371, 7, 10, 0, 0, 1371, 1372, 7, 11, 0, 0, 1372, 272, 1, 0, 0, 0, 1373, 1374, 7, 3, 0, 0, 1374, 1375, 7, 14, 0, 0, 1375, 1376, 7, 10, 0, 0, 1376, 1377, 7, 11, 0, 0, 1377, 1378, 7, 14, 0, 0, 1378, 274, 1, 0, 0, 0, 1379, 1380, 7, 3, 0, 0, 1380, 1381, 7, 14, 0, 0, 1381, 1382, 7, 8, 0, 0, 1382, 1383, 7, 4, 0, 0, 1383, 1384, 7, 21, 0, 0, 1384, 276, 1, 0, 0, 0, 1385, 1386, 7, 24, 0, 0, 1386, 1387, 7, 0, 0, 0, 1387, 1388, 7, 9, 0, 0, 1388, 1389, 7, 3, 0, 0, 1389, 1390, 7, 10, 0, 0, 1390, 1391, 7, 14, 0, 0, 1391, 278, 1, 0, 0, 0, 1392, 1393, 7, 12, 0, 0, 1393, 1394, 7, 17, 0, 0, 1394, 1395, 7, 10, 0, 0, 1395, 1396, 7, 4, 0, 0, 1396, 280, 1, 0, 0, 0, 1397, 1398, 7, 12, 0, 0, 1398, 1399, 7, 17, 0, 0, 1399, 1400, 7, 10, 0, 0, 1400, 1401, 7, 11, 0, 0, 1401, 1402, 7, 10, 0, 0, 1402, 282, 1, 0, 0, 0, 1403, 1404, 7, 12, 0, 0, 1404, 1405, 7, 8, 0, 0, 1405, 1406, 7, 5, 0, 0, 1406, 1407, 7, 17, 0, 0, 1407, 284, 1, 0, 0, 0, 1408, 1409, 7, 3, 0, 0, 1409, 1410, 7, 4, 0, 0, 1410, 1411, 7, 8, 0, 0, 1411, 1412, 7, 25, 0, 0, 1412, 1413, 7, 3, 0, 0, 1413, 1414, 7, 10, 0, 0, 1414, 286, 1, 0, 0, 0, 1415, 1416, 7, 3, 0, 0, 1416, 1417, 7, 4, 0, 0, 1417, 1418, 7, 4, 0, 0, 1418, 1419, 7, 10, 0, 0, 1419, 1420, 7, 14, 0, 0, 1420, 1421, 7, 5, 0, 0, 1421, 288, 1, 0, 0, 0, 1422, 1423, 7, 3, 0, 0, 1423, 1424, 7, 3, 0, 0, 1424, 1425, 7, 8, 0, 0, 1425, 1426, 7, 6, 0, 0, 1426, 290, 1, 0, 0, 0, 1427, 1429, 3, 17, 8, 0, 1428, 1430, 3, 425, 212, 0, 1429, 1428, 1, 0, 0, 0, 1430, 1431, 1, 0, 0, 0, 1431, 1429, 1, 0, 0, 0, 1431, 1432, 1, 0, 0, 0, 1432, 1433, 1, 0, 0, 0, 1433, 1434, 3, 459, 229, 0, 1434, 292, 1, 0, 0, 0, 1435, 1437, 3, 121, 60, 0, 1436, 1438, 3, 425, 212, 0, 1437, 1436, 1, 0, 0, 0, 1438, 1439, 1, 0, 0, 0, 1439, 1437, 1, 0, 0, 0, 1439, 1440, 1, 0, 0, 0, 1440, 1441, 1, 0, 0, 0, 1441, 1442, 3, 455, 227, 0, 1442, 294, 1, 0, 0, 0, 1443, 1445, 3, 205, 102, 0, 1444, 1446, 3, 425, 212, 0, 1445, 1444, 1, 0, 0, 0, 1446, 1447, 1, 0, 0, 0, 1447, 1445, 1, 0, 0, 0, 1447, 1448, 1, 0, 0, 0, 1448, 1449, 1, 0, 0, 0, 1449, 1450, 3, 453, 226, 0, 1450, 296, 1, 0, 0, 0, 1451, 1453, 3, 461, 230, 0, 1452, 1454, 3, 425, 212, 0, 1453, 1452, 1, 0, 0, 0, 1454, 1455, 1, 0, 0, 0, 1455, 1453, 1, 0, 0, 0, 1455, 1456, 1, 0, 0, 0, 1456, 1457, 1, 0, 0, 0, 1457, 1459, 3, 451, 225, 0, 1458, 1460, 3, 425, 212, 0, 1459, 1458, 1, 0, 0, 0, 1460, 1461, 1, 0, 0, 0, 1461, 1459, 1, 0, 0, 0, 1461, 1462, 1, 0, 0, 0, 1462, 1463, 1, 0, 0, 0, 1463, 1464, 3, 205, 102, 0, 1464, 298, 1, 0, 0, 0, 1465, 1467, 3, 449, 224, 0, 1466, 1468, 3, 425, 212, 0, 1467, 1466, 1, 0, 0, 0, 1468, 1469, 1, 0, 0, 0, 1469, 1467, 1, 0, 0, 0, 1469, 1470, 1, 0, 0, 0, 1470, 1471, 1, 0, 0, 0, 1471, 1473, 3, 463, 231, 0, 1472, 1474, 3, 425, 212, 0, 1473, 1472, 1, 0, 0, 0, 1474, 1475, 1, 0, 0, 0, 1475, 1473, 1, 0, 0, 0, 1475, 1476, 1, 0, 0, 0, 1476, 1477, 1, 0, 0, 0, 1477, 1478, 3, 205, 102, 0, 1478, 300, 1, 0, 0, 0, 1479, 1481, 3, 155, 77, 0, 1480, 1482, 3, 425, 212, 0, 1481, 1480, 1, 0, 0, 0, 1482, 1483, 1, 0, 0, 0, 1483, 1481, 1, 0, 0, 0, 1483, 1484, 1, 0, 0, 0, 1484, 1485, 1, 0, 0, 0, 1485, 1487, 3, 201, 100, 0, 1486, 1488, 3, 425, 212, 0, 1487, 1486, 1, 0, 0, 0, 1488, 1489, 1, 0, 0, 0, 1489, 1487, 1, 0, 0, 0, 1489, 1490, 1, 0, 0, 0, 1490, 1491, 1, 0, 0, 0, 1491, 1492, 3, 145, 72, 0, 1492, 302, 1, 0, 0, 0, 1493, 1494, 7, 0, 0, 0, 1494, 1495, 7, 11, 0, 0, 1495, 1496, 7, 11, 0, 0, 1496, 1497, 7, 0, 0, 0, 1497, 1498, 7, 13, 0, 0, 1498, 304, 1, 0, 0, 0, 1499, 1500, 7, 15, 0, 0, 1500, 1501, 7, 8, 0, 0, 1501, 1502, 7, 4, 0, 0, 1502, 1503, 7, 0, 0, 0, 1503, 1504, 7, 11, 0, 0, 1504, 1505, 7, 13, 0, 0, 1505, 306, 1, 0, 0, 0, 1506, 1507, 7, 15, 0, 0, 1507, 1508, 7, 2, 0, 0, 1508, 1509, 7, 2, 0, 0, 1509, 1510, 7, 9, 0, 0, 1510, 1511, 7, 10, 0, 0, 1511, 1512, 7, 0, 0, 0, 1512, 1513, 7, 4, 0, 0, 1513, 308, 1, 0, 0, 0, 1514, 1515, 7, 6, 0, 0, 1515, 1516, 7, 2, 0, 0, 1516, 1517, 7, 3, 0, 0, 1517, 1518, 7, 15, 0, 0, 1518, 1519, 7, 9, 0, 0, 1519, 1520, 7, 10, 0, 0, 1520, 310, 1, 0, 0, 0, 1521, 1522, 7, 10, 0, 0, 1522, 1523, 7, 4, 0, 0, 1523, 1524, 7, 3, 0, 0, 1524, 1525, 7, 7, 0, 0, 1525, 312, 1, 0, 0, 0, 1526, 1527, 7, 16, 0, 0, 1527, 1528, 7, 9, 0, 0, 1528, 1529, 7, 2, 0, 0, 1529, 1530, 7, 0, 0, 0, 1530, 1531, 7, 5, 0, 0, 1531, 314, 1, 0, 0, 0, 1532, 1533, 7, 21, 0, 0, 1533, 1534, 7, 10, 0, 0, 1534, 1535, 7, 2, 0, 0, 1535, 1536, 7, 7, 0, 0, 1536, 1537, 7, 10, 0, 0, 1537, 1538, 7, 5, 0, 0, 1538, 1539, 7, 11, 0, 0, 1539, 1540, 7, 13, 0, 0, 1540, 316, 1, 0, 0, 0, 1541, 1542, 7, 8, 0, 0, 1542, 1543, 7, 4, 0, 0, 1543, 1544, 7, 5, 0, 0, 1544, 1545, 7, 10, 0, 0, 1545, 1546, 7, 21, 0, 0, 1546, 1547, 7, 10, 0, 0, 1547, 1548, 7, 11, 0, 0, 1548, 318, 1, 0, 0, 0, 1549, 1550, 7, 9, 0, 0, 1550, 1551, 7, 2, 0, 0, 1551, 1552, 7, 4, 0, 0, 1552, 1553, 7, 21, 0, 0, 1553, 320, 1, 0, 0, 0, 1554, 1555, 7, 7, 0, 0, 1555, 1556, 7, 0, 0, 0, 1556, 1557, 7, 18, 0, 0, 1557, 322, 1, 0, 0, 0, 1558, 1559, 7, 4, 0, 0, 1559, 1560, 7, 3, 0, 0, 1560, 1561, 7, 7, 0, 0, 1561, 1562, 7, 15, 0, 0, 1562, 1563, 7, 10, 0, 0, 1563, 1564, 7, 11, 0, 0, 1564, 324, 1, 0, 0, 0, 1565, 1566, 7, 18, 0, 0, 1566, 1567, 7, 2, 0, 0, 1567, 1568, 7, 8, 0, 0, 1568, 1569, 7, 4, 0, 0, 1569, 1570, 7, 5, 0, 0, 1570, 326, 1, 0, 0, 0, 1571, 1572, 7, 11, 0, 0, 1572, 1573, 7, 10, 0, 0, 1573, 1574, 7, 1, 0, 0, 1574, 1575, 7, 2, 0, 0, 1575, 1576, 7, 11, 0, 0, 1576, 1577, 7, 6, 0, 0, 1577, 328, 1, 0, 0, 0, 1578, 1579, 7, 14, 0, 0, 1579, 1580, 7, 5, 0, 0, 1580, 1581, 7, 11, 0, 0, 1581, 1582, 7, 8, 0, 0, 1582, 1583, 7, 4, 0, 0, 1583, 1584, 7, 21, 0, 0, 1584, 330, 1, 0, 0, 0, 1585, 1586, 7, 5, 0, 0, 1586, 1587, 7, 8, 0, 0, 1587, 1588, 7, 7, 0, 0, 1588, 1589, 7, 10, 0, 0, 1589, 1590, 7, 14, 0, 0, 1590, 1591, 7, 5, 0, 0, 1591, 1592, 7, 0, 0, 0, 1592, 1593, 7, 7, 0, 0, 1593, 1594, 7, 18, 0, 0, 1594, 332, 1, 0, 0, 0, 1595, 1596, 7, 0, 0, 0, 1596, 1597, 7, 4, 0, 0, 1597, 1598, 7, 13, 0, 0, 1598, 334, 1, 0, 0, 0, 1599, 1600, 7, 0, 0, 0, 1600, 1601, 7, 4, 0, 0, 1601, 1602, 7, 13, 0, 0, 1602, 1603, 7, 0, 0, 0, 1603, 1604, 7, 5, 0, 0, 1604, 1605, 7, 2, 0, 0, 1605, 1606, 7, 7, 0, 0, 1606, 1607, 7, 8, 0, 0, 1607, 1608, 7, 1, 0, 0, 1608, 336, 1, 0, 0, 0, 1609, 1610, 7, 0, 0, 0, 1610, 1611, 7, 4, 0, 0, 1611, 1612, 7, 13, 0, 0, 1612, 1613, 7, 22, 0, 0, 1613, 1614, 7, 14, 0, 0, 1614, 1615, 7, 2, 0, 0, 1615, 1616, 7, 4, 0, 0, 1616, 1617, 7, 0, 0, 0, 1617, 1618, 7, 5, 0, 0, 1618, 1619, 7, 2, 0, 0, 1619, 1620, 7, 7, 0, 0, 1620, 1621, 7, 8, 0, 0, 1621, 1622, 7, 1, 0, 0, 1622, 338, 1, 0, 0, 0, 1623, 1624, 7, 0, 0, 0, 1624, 1625, 7, 4, 0, 0, 1625, 1626, 7, 13, 0, 0, 1626, 1627, 7, 11, 0, 0, 1627, 1628, 7, 10, 0, 0, 1628, 1629, 7, 1, 0, 0, 1629, 1630, 7, 2, 0, 0, 1630, 1631, 7, 11, 0, 0, 1631, 1632, 7, 6, 0, 0, 1632, 340, 1, 0, 0, 0, 1633, 1634, 7, 14, 0, 0, 1634, 1635, 7, 1, 0, 0, 1635, 1636, 7, 0, 0, 0, 1636, 1637, 7, 9, 0, 0, 1637, 1638, 7, 0, 0, 0, 1638, 1639, 7, 11, 0, 0, 1639, 342, 1, 0, 0, 0, 1640, 1641, 5, 59, 0, 0, 1641, 344, 1, 0, 0, 0, 1642, 1643, 5, 44, 0, 0, 1643, 346, 1, 0, 0, 0, 1644, 1645, 5, 58, 0, 0, 1645, 348, 1, 0, 0, 0, 1646, 1647, 5, 40, 0, 0, 1647, 350, 1, 0, 0, 0, 1648, 1649, 5, 41, 0, 0, 1649, 352, 1, 0, 0, 0, 1650, 1651, 5, 91, 0, 0, 1651, 354, 1, 0, 0, 0, 1652, 1653, 5, 93, 0, 0, 1653, 356, 1, 0, 0, 0, 1654, 1655, 5, 123, 0, 0, 1655, 358, 1, 0, 0, 0, 1656, 1657, 5, 125, 0, 0, 1657, 360, 1, 0, 0, 0, 1658, 1659, 5, 42, 0, 0, 1659, 362, 1, 0, 0, 0, 1660, 1661, 5, 46, 0, 0, 1661, 364, 1, 0, 0, 0, 1662, 1663, 5, 36, 0, 0, 1663, 366, 1, 0, 0, 0, 1664, 1665, 5, 63, 0, 0, 1665, 368, 1, 0, 0, 0, 1666, 1667, 5, 60, 0, 0, 1667, 370, 1, 0, 0, 0, 1668, 1669, 5, 60, 0, 0, 1669, 1670, 5, 61, 0, 0, 1670, 372, 1, 0, 0, 0, 1671, 1672, 5, 62, 0, 0, 1672, 374, 1, 0, 0, 0, 1673, 1674, 5, 62, 0, 0, 1674, 1675, 5, 61, 0, 0, 1675, 376, 1, 0, 0, 0, 1676, 1677, 5, 61, 0, 0, 1677, 378, 1, 0, 0, 0, 1678, 1679, 5, 33, 0, 0, 1679, 1680, 5, 61, 0, 0, 1680, 380, 1, 0, 0, 0, 1681, 1682, 5, 60, 0, 0, 1682, 1683, 7, 0, 0, 0, 1683, 1684, 7, 4, 0, 0, 1684, 1685, 7, 13, 0, 0, 1685, 382, 1, 0, 0, 0, 1686, 1687, 5, 60, 0, 0, 1687, 1688, 5, 61, 0, 0, 1688, 1689, 1, 0, 0, 0, 1689, 1690, 7, 0, 0, 0, 1690, 1691, 7, 4, 0, 0, 1691, 1692, 7, 13, 0, 0, 1692, 384, 1, 0, 0, 0, 1693, 1694, 5, 62, 0, 0, 1694, 1695, 7, 0, 0, 0, 1695, 1696, 7, 4, 0, 0, 1696, 1697, 7, 13, 0, 0, 1697, 386, 1, 0, 0, 0, 1698, 1699, 5, 62, 0, 0, 1699, 1700, 5, 61, 0, 0, 1700, 1701, 1, 0, 0, 0, 1701, 1702, 7, 0, 0, 0, 1702, 1703, 7, 4, 0, 0, 1703, 1704, 7, 13, 0, 0, 1704, 388, 1, 0, 0, 0, 1705, 1706, 5, 61, 0, 0, 1706, 1707, 7, 0, 0, 0, 1707, 1708, 7, 4, 0, 0, 1708, 1709, 7, 13, 0, 0, 1709, 390, 1, 0, 0, 0, 1710, 1711, 5, 33, 0, 0, 1711, 1712, 5, 61, 0, 0, 1712, 1713, 1, 0, 0, 0, 1713, 1714, 7, 0, 0, 0, 1714, 1715, 7, 4, 0, 0, 1715, 1716, 7, 13, 0, 0, 1716, 392, 1, 0, 0, 0, 1717, 1718, 5, 43, 0, 0, 1718, 394, 1, 0, 0, 0, 1719, 1720, 5, 45, 0, 0, 1720, 396, 1, 0, 0, 0, 1721, 1722, 5, 47, 0, 0, 1722, 398, 1, 0, 0, 0, 1723, 1724, 7, 6, 0, 0, 1724, 1725, 7, 8, 0, 0, 1725, 1726, 7, 24, 0, 0, 1726, 400, 1, 0, 0, 0, 1727, 1728, 5, 124, 0, 0, 1728, 1729, 5, 124, 0, 0, 1729, 402, 1, 0, 0, 0, 1730, 1731, 7, 4, 0, 0, 1731, 1732, 7, 3, 0, 0, 1732, 1733, 7, 9, 0, 0, 1733, 1734, 7, 9, 0, 0, 1734, 404, 1, 0, 0, 0, 1735, 1736, 7, 16, 0, 0, 1736, 1737, 7, 0, 0, 0, 1737, 1738, 7, 9, 0, 0, 1738, 1739, 7, 14, 0, 0, 1739, 1740, 7, 10, 0, 0, 1740, 406, 1, 0, 0, 0, 1741, 1742, 7, 5, 0, 0, 1742, 1743, 7, 11, 0, 0, 1743, 1744, 7, 3, 0, 0, 1744, 1745, 7, 10, 0, 0, 1745, 408, 1, 0, 0, 0, 1746, 1748, 3, 437, 218, 0, 1747, 1746, 1, 0, 0, 0, 1748, 1749, 1, 0, 0, 0, 1749, 1747, 1, 0, 0, 0, 1749, 1750, 1, 0, 0, 0, 1750, 410, 1, 0, 0, 0, 1751, 1753, 3, 437, 218, 0, 1752, 1751, 1, 0, 0, 0, 1753, 1756, 1, 0, 0, 0, 1754, 1752, 1, 0, 0, 0, 1754, 1755, 1, 0, 0, 0, 1755, 1757, 1, 0, 0, 0, 1756, 1754, 1, 0, 0, 0, 1757, 1759, 5, 46, 0, 0, 1758, 1760, 3, 437, 218, 0, 1759, 1758, 1, 0, 0, 0, 1760, 1761, 1, 0, 0, 0, 1761, 1759, 1, 0, 0, 0, 1761, 1762, 1, 0, 0, 0, 1762, 1772, 1, 0, 0, 0, 1763, 1765, 7, 10, 0, 0, 1764, 1766, 7, 26, 0, 0, 1765, 1764, 1, 0, 0, 0, 1765, 1766, 1, 0, 0, 0, 1766, 1768, 1, 0, 0, 0, 1767, 1769, 3, 437, 218, 0, 1768, 1767, 1, 0, 0, 0, 1769, 1770, 1, 0, 0, 0, 1770, 1768, 1, 0, 0, 0, 1770, 1771, 1, 0, 0, 0, 1771, 1773, 1, 0, 0, 0, 1772, 1763, 1, 0, 0, 0, 1772, 1773, 1, 0, 0, 0, 1773, 1789, 1, 0, 0, 0, 1774, 1776, 3, 437, 218, 0, 1775, 1774, 1, 0, 0, 0, 1776, 1777, 1, 0, 0, 0, 1777, 1775, 1, 0, 0, 0, 1777, 1778, 1, 0, 0, 0, 1778, 1779, 1, 0, 0, 0, 1779, 1781, 7, 10, 0, 0, 1780, 1782, 7, 26, 0, 0, 1781, 1780, 1, 0, 0, 0, 1781, 1782, 1, 0, 0, 0, 1782, 1784, 1, 0, 0, 0, 1783, 1785, 3, 437, 218, 0, 1784, 1783, 1, 0, 0, 0, 1785, 1786, 1, 0, 0, 0, 1786, 1784, 1, 0, 0, 0, 1786, 1787, 1, 0, 0, 0, 1787, 1789, 1, 0, 0, 0, 1788, 1754, 1, 0, 0, 0, 1788, 1775, 1, 0, 0, 0, 1789, 412, 1, 0, 0, 0, 1790, 1793, 3, 409, 204, 0, 1791, 1793, 3, 411, 205, 0, 1792, 1790, 1, 0, 0, 0, 1792, 1791, 1, 0, 0, 0, 1793, 1794, 1, 0, 0, 0, 1794, 1795, 7, 4, 0, 0, 1795, 414, 1, 0, 0, 0, 1796, 1801, 5, 34, 0, 0, 1797, 1800, 3, 439, 219, 0, 1798, 1800, 9, 0, 0, 0, 1799, 1797, 1, 0, 0, 0, 1799, 1798, 1, 0, 0, 0, 1800, 1803, 1, 0, 0, 0, 1801, 1802, 1, 0, 0, 0, 1801, 1799, 1, 0, 0, 0, 1802, 1804, 1, 0, 0, 0, 1803, 1801, 1, 0, 0, 0, 1804, 1805, 5, 34, 0, 0, 1805, 416, 1, 0, 0, 0, 1806, 1811, 5, 39, 0, 0, 1807, 1810, 3, 441, 220, 0, 1808, 1810, 9, 0, 0, 0, 1809, 1807, 1, 0, 0, 0, 1809, 1808, 1, 0, 0, 0, 1810, 1813, 1, 0, 0, 0, 1811, 1812, 1, 0, 0, 0, 1811, 1809, 1, 0, 0, 0, 1812, 1814, 1, 0, 0, 0, 1813, 1811, 1, 0, 0, 0, 1814, 1815, 5, 39, 0, 0, 1815, 418, 1, 0, 0, 0, 1816, 1817, 7, 27, 0, 0, 1817, 1818, 7, 28, 0, 0, 1818, 1819, 7, 27, 0, 0, 1819, 1820, 7, 29, 0, 0, 1820, 420, 1, 0, 0, 0, 1821, 1827, 3, 435, 217, 0, 1822, 1826, 3, 435, 217, 0, 1823, 1826, 3, 437, 218, 0, 1824, 1826, 3, 445, 222, 0, 1825, 1822, 1, 0, 0, 0, 1825, 1823, 1, 0, 0, 0, 1825, 1824, 1, 0, 0, 0, 1826, 1829, 1, 0, 0, 0, 1827, 1825, 1, 0, 0, 0, 1827, 1828, 1, 0, 0, 0, 1828, 422, 1, 0, 0, 0, 1829, 1827, 1, 0, 0, 0, 1830, 1833, 3, 437, 218, 0, 1831, 1833, 3, 445, 222, 0, 1832, 1830, 1, 0, 0, 0, 1832, 1831, 1, 0, 0, 0, 1833, 1839, 1, 0, 0, 0, 1834, 1838, 3, 435, 217, 0, 1835, 1838, 3, 437, 218, 0, 1836, 1838, 3, 445, 222, 0, 1837, 1834, 1, 0, 0, 0, 1837, 1835, 1, 0, 0, 0, 1837, 1836, 1, 0, 0, 0, 1838, 1841, 1, 0, 0, 0, 1839, 1837, 1, 0, 0, 0, 1839, 1840, 1, 0, 0, 0, 1840, 424, 1, 0, 0, 0, 1841, 1839, 1, 0, 0, 0, 1842, 1844, 7, 30, 0, 0, 1843, 1842, 1, 0, 0, 0, 1844, 1845, 1, 0, 0, 0, 1845, 1843, 1, 0, 0, 0, 1845, 1846, 1, 0, 0, 0, 1846, 1847, 1, 0, 0, 0, 1847, 1848, 6, 212, 0, 0, 1848, 426, 1, 0, 0, 0, 1849, 1850, 5, 47, 0, 0, 1850, 1851, 5, 42, 0, 0, 1851, 1852, 1, 0, 0, 0, 1852, 1856, 8, 31, 0, 0, 1853, 1855, 9, 0, 0, 0, 1854, 1853, 1, 0, 0, 0, 1855, 1858, 1, 0, 0, 0, 1856, 1857, 1, 0, 0, 0, 1856, 1854, 1, 0, 0, 0, 1857, 1859, 1, 0, 0, 0, 1858, 1856, 1, 0, 0, 0, 1859, 1860, 5, 42, 0, 0, 1860, 1861, 5, 47, 0, 0, 1861, 1862, 1, 0, 0, 0, 1862, 1863, 6, 213, 0, 0, 1863, 428, 1, 0, 0, 0, 1864, 1865, 5, 47, 0, 0, 1865, 1866, 5, 47, 0, 0, 1866, 1870, 1, 0, 0, 0, 1867, 1869, 8, 32, 0, 0, 1868, 1867, 1, 0, 0, 0, 1869, 1872, 1, 0, 0, 0, 1870, 1868, 1, 0, 0, 0, 1870, 1871, 1, 0, 0, 0, 1871, 1873, 1, 0, 0, 0, 1872, 1870, 1, 0, 0, 0, 1873, 1874, 6, 214, 0, 0, 1874, 430, 1, 0, 0, 0, 1875, 1879, 5, 35, 0, 0, 1876, 1878, 8, 32, 0, 0, 1877, 1876, 1, 0, 0, 0, 1878, 1881, 1, 0, 0, 0, 1879, 1877, 1, 0, 0, 0, 1879, 1880, 1, 0, 0, 0, 1880, 1882, 1, 0, 0, 0, 1881, 1879, 1, 0, 0, 0, 1882, 1883, 6, 215, 0, 0, 1883, 432, 1, 0, 0, 0, 1884, 1885, 9, 0, 0, 0, 1885, 434, 1, 0, 0, 0, 1886, 1887, 7, 33, 0, 0, 1887, 436, 1, 0, 0, 0, 1888, 1889, 2, 48, 57, 0, 1889, 438, 1, 0, 0, 0, 1890, 1893, 5, 92, 0, 0, 1891, 1894, 7, 34, 0, 0, 1892, 1894, 3, 447, 223, 0, 1893, 1891, 1, 0, 0, 0, 1893, 1892, 1, 0, 0, 0, 1894, 440, 1, 0, 0, 0, 1895, 1898, 5, 92, 0, 0, 1896, 1899, 7, 35, 0, 0, 1897, 1899, 3, 447, 223, 0, 1898, 1896, 1, 0, 0, 0, 1898, 1897, 1, 0, 0, 0, 1899, 442, 1, 0, 0, 0, 1900, 1901, 7, 36, 0, 0, 1901, 444, 1, 0, 0, 0, 1902, 1903, 5, 95, 0, 0, 1903, 446, 1, 0, 0, 0, 1904, 1905, 5, 117, 0, 0, 1905, 1906, 3, 443, 221, 0, 1906, 1907, 3, 443, 221, 0, 1907, 1908, 3, 443, 221, 0, 1908, 1909, 3, 443, 221, 0, 1909, 448, 1, 0, 0, 0, 1910, 1911, 7, 1, 0, 0, 1911, 1912, 7, 9, 0, 0, 1912, 1913, 7, 10, 0, 0, 1913, 1914, 7, 0, 0, 0, 1914, 1915, 7, 11, 0, 0, 1915, 450, 1, 0, 0, 0, 1916, 1917, 7, 1, 0, 0, 1917, 1918, 7, 3, 0, 0, 1918, 1919, 7, 11, 0, 0, 1919, 1920, 7, 11, 0, 0, 1920, 1921, 7, 10, 0, 0, 1921, 1922, 7, 4, 0, 0, 1922, 1923, 7, 5, 0, 0, 1923, 452, 1, 0, 0, 0, 1924, 1925, 7, 10, 0, 0, 1925, 1926, 7, 19, 0, 0, 1926, 1927, 7, 18, 0, 0, 1927, 1928, 7, 8, 0, 0, 1928, 1929, 7, 11, 0, 0, 1929, 1930, 7, 10, 0, 0, 1930, 454, 1, 0, 0, 0, 1931, 1932, 7, 10, 0, 0, 1932, 1933, 7, 19, 0, 0, 1933, 1934, 7, 5, 0, 0, 1934, 1935, 7, 10, 0, 0, 1935, 1936, 7, 11, 0, 0, 1936, 1937, 7, 4, 0, 0, 1937, 1938, 7, 0, 0, 0, 1938, 1939, 7, 9, 0, 0, 1939, 1940, 7, 9, 0, 0, 1940, 1941, 7, 13, 0, 0, 1941, 456, 1, 0, 0, 0, 1942, 1943, 7, 18, 0, 0, 1943, 1944, 7, 11, 0, 0, 1944, 1945, 7, 10, 0, 0, 1945, 1946, 7, 16, 0, 0, 1946, 1947, 7, 10, 0, 0, 1947, 1948, 7, 11, 0, 0, 1948, 458, 1, 0, 0, 0, 1949, 1950, 7, 18, 0, 0, 1950, 1951, 7, 11, 0, 0, 1951, 1952, 7, 8, 0, 0, 1952, 1953, 7, 24, 0, 0, 1953, 1954, 7, 8, 0, 0, 1954, 1955, 7, 9, 0, 0, 1955, 1956, 7, 10, 0, 0, 1956, 1957, 7, 21, 0, 0, 1957, 1958, 7, 10, 0, 0, 1958, 1959, 7, 14, 0, 0, 1959, 460, 1, 0, 0, 0, 1960, 1961, 7, 11, 0, 0, 1961, 1962, 7, 10, 0, 0, 1962, 1963, 7, 5, 0, 0, 1963, 1964, 7, 0, 0, 0, 1964, 1965, 7, 8, 0, 0, 1965, 1966, 7, 4, 0, 0, 1966, 462, 1, 0, 0, 0, 1967, 1968, 7, 11, 0, 0, 1968, 1969, 7, 10, 0, 0, 1969, 1970, 7, 5, 0, 0, 1970, 1971, 7, 0, 0, 0, 1971, 1972, 7, 8, 0, 0, 1972, 1973, 7, 4, 0, 0, 1973, 1974, 7, 10, 0, 0, 1974, 1975, 7, 6, 0, 0, 1975, 464, 1, 0, 0, 0, 42, 0, 495, 497, 655, 885, 1042, 1264, 1431, 1439, 1447, 1455, 1461, 1469, 1475, 1483, 1489, 1749, 1754, 1761, 1765, 1770, 1772, 1777, 1781, 1786, 1788, 1792, 1799, 1801, 1809, 1811, 1825, 1827, 1832, 1837, 1839, 1845, 1856, 1870, 1879, 1893, 1898, 1, 6, 0, 0] \ No newline at end of file diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.java index a5f483ef..52d22e27 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.java @@ -17,40 +17,41 @@ public class KVQLLexer extends Lexer { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - T__0=1, T__1=2, T__2=3, VARNAME=4, ACCOUNT=5, ADD=6, ADMIN=7, ALL=8, ALTER=9, - ALWAYS=10, ANCESTORS=11, AND=12, AS=13, ASC=14, ARRAY_COLLECT=15, BETWEEN=16, - BY=17, CACHE=18, CASE=19, CASCADE=20, CAST=21, COLLECTION=22, COMMENT=23, - COUNT=24, CREATE=25, CYCLE=26, DAYS=27, DECLARE=28, DEFAULT=29, DELETE=30, - DESC=31, DESCENDANTS=32, DESCRIBE=33, DISTINCT=34, DROP=35, ELEMENTOF=36, - ELEMENTS=37, ELSE=38, END=39, ES_SHARDS=40, ES_REPLICAS=41, EXISTS=42, - EXTRACT=43, FIELDS=44, FIRST=45, FORCE=46, FORCE_INDEX=47, FORCE_PRIMARY_INDEX=48, - FREEZE=49, FROM=50, FROZEN=51, FULLTEXT=52, GENERATED=53, GRANT=54, GROUP=55, - HOURS=56, IDENTIFIED=57, IDENTITY=58, IF=59, IN=60, INCREMENT=61, INDEX=62, - INDEXES=63, INSERT=64, INTO=65, IS=66, JSON=67, JOIN=68, KEY=69, KEYOF=70, - KEYS=71, LAST=72, LEFT=73, LIFETIME=74, LIMIT=75, LOCAL=76, LOCK=77, MAXVALUE=78, - MERGE=79, MINUTES=80, MINVALUE=81, MODIFY=82, MR_COUNTER=83, NAMESPACE=84, - NAMESPACES=85, NESTED=86, NO=87, NOT=88, NULLS=89, OFFSET=90, OF=91, ON=92, - ONLY=93, OR=94, ORDER=95, OUTER=96, OVERRIDE=97, PASSWORD=98, PATCH=99, - PER=100, PREFER_INDEXES=101, PREFER_PRIMARY_INDEX=102, PRIMARY=103, PUT=104, - REGION=105, REGIONS=106, REMOVE=107, RETURNING=108, REVOKE=109, ROLE=110, - ROLES=111, ROW=112, SCHEMA=113, SECONDS=114, SELECT=115, SEQ_TRANSFORM=116, - SET=117, SHARD=118, SHOW=119, START=120, TABLE=121, TABLES=122, THEN=123, - TO=124, TTL=125, TYPE=126, UNFREEZE=127, UNLOCK=128, UPDATE=129, UPSERT=130, - USER=131, USERS=132, USING=133, VALUES=134, WHEN=135, WHERE=136, WITH=137, - UNIQUE=138, UNNEST=139, UUID=140, ALL_PRIVILEGES=141, IDENTIFIED_EXTERNALLY=142, - PASSWORD_EXPIRE=143, RETAIN_CURRENT_PASSWORD=144, CLEAR_RETAINED_PASSWORD=145, - LEFT_OUTER_JOIN=146, ARRAY_T=147, BINARY_T=148, BOOLEAN_T=149, DOUBLE_T=150, - ENUM_T=151, FLOAT_T=152, GEOMETRY_T=153, INTEGER_T=154, LONG_T=155, MAP_T=156, - NUMBER_T=157, POINT_T=158, RECORD_T=159, STRING_T=160, TIMESTAMP_T=161, - ANY_T=162, ANYATOMIC_T=163, ANYJSONATOMIC_T=164, ANYRECORD_T=165, SCALAR_T=166, - SEMI=167, COMMA=168, COLON=169, LP=170, RP=171, LBRACK=172, RBRACK=173, - LBRACE=174, RBRACE=175, STAR=176, DOT=177, DOLLAR=178, QUESTION_MARK=179, - LT=180, LTE=181, GT=182, GTE=183, EQ=184, NEQ=185, LT_ANY=186, LTE_ANY=187, - GT_ANY=188, GTE_ANY=189, EQ_ANY=190, NEQ_ANY=191, PLUS=192, MINUS=193, - IDIV=194, RDIV=195, CONCAT=196, NULL=197, FALSE=198, TRUE=199, INT=200, - FLOAT=201, NUMBER=202, DSTRING=203, STRING=204, SYSDOLAR=205, ID=206, - BAD_ID=207, WS=208, C_COMMENT=209, LINE_COMMENT=210, LINE_COMMENT1=211, - UnrecognizedToken=212; + T__0=1, T__1=2, T__2=3, T__3=4, VARNAME=5, ACCOUNT=6, ADD=7, ADMIN=8, + ALL=9, ALTER=10, ALWAYS=11, ANCESTORS=12, AND=13, AS=14, ASC=15, ARRAY_COLLECT=16, + BEFORE=17, BETWEEN=18, BY=19, CACHE=20, CASE=21, CASCADE=22, CAST=23, + COLLECTION=24, COMMENT=25, COUNT=26, CREATE=27, CYCLE=28, DAYS=29, DECLARE=30, + DEFAULT=31, DELETE=32, DESC=33, DESCENDANTS=34, DESCRIBE=35, DISABLE=36, + DISTINCT=37, DROP=38, ELEMENTOF=39, ELEMENTS=40, ELSE=41, ENABLE=42, END=43, + ES_SHARDS=44, ES_REPLICAS=45, EXISTS=46, EXTRACT=47, FIELDS=48, FIRST=49, + FORCE=50, FORCE_INDEX=51, FORCE_PRIMARY_INDEX=52, FREEZE=53, FROM=54, + FROZEN=55, FULLTEXT=56, GENERATED=57, GRANT=58, GROUP=59, HOURS=60, IDENTIFIED=61, + IDENTITY=62, IF=63, IMAGE=64, IN=65, INCREMENT=66, INDEX=67, INDEXES=68, + INSERT=69, INTO=70, IS=71, JSON=72, JOIN=73, KEY=74, KEYOF=75, KEYS=76, + LAST=77, LEFT=78, LIFETIME=79, LIMIT=80, LOCAL=81, LOCK=82, MAXVALUE=83, + MERGE=84, MINUTES=85, MINVALUE=86, MODIFY=87, MR_COUNTER=88, NAMESPACE=89, + NAMESPACES=90, NESTED=91, NO=92, NOT=93, NULLS=94, OFFSET=95, OF=96, ON=97, + ONLY=98, OR=99, ORDER=100, OUTER=101, OVERRIDE=102, PASSWORD=103, PATCH=104, + PER=105, PREFER_INDEXES=106, PREFER_PRIMARY_INDEX=107, PRIMARY=108, PUT=109, + REGION=110, REGIONS=111, REMOVE=112, RETURNING=113, REVOKE=114, ROLE=115, + ROLES=116, ROW=117, SCHEMA=118, SECONDS=119, SELECT=120, SEQ_TRANSFORM=121, + SET=122, SHARD=123, SHOW=124, START=125, TABLE=126, TABLES=127, THEN=128, + TO=129, TTL=130, TYPE=131, UNFREEZE=132, UNLOCK=133, UPDATE=134, UPSERT=135, + USER=136, USERS=137, USING=138, VALUES=139, WHEN=140, WHERE=141, WITH=142, + UNIQUE=143, UNNEST=144, UUID=145, ALL_PRIVILEGES=146, IDENTIFIED_EXTERNALLY=147, + PASSWORD_EXPIRE=148, RETAIN_CURRENT_PASSWORD=149, CLEAR_RETAINED_PASSWORD=150, + LEFT_OUTER_JOIN=151, ARRAY_T=152, BINARY_T=153, BOOLEAN_T=154, DOUBLE_T=155, + ENUM_T=156, FLOAT_T=157, GEOMETRY_T=158, INTEGER_T=159, LONG_T=160, MAP_T=161, + NUMBER_T=162, POINT_T=163, RECORD_T=164, STRING_T=165, TIMESTAMP_T=166, + ANY_T=167, ANYATOMIC_T=168, ANYJSONATOMIC_T=169, ANYRECORD_T=170, SCALAR_T=171, + SEMI=172, COMMA=173, COLON=174, LP=175, RP=176, LBRACK=177, RBRACK=178, + LBRACE=179, RBRACE=180, STAR=181, DOT=182, DOLLAR=183, QUESTION_MARK=184, + LT=185, LTE=186, GT=187, GTE=188, EQ=189, NEQ=190, LT_ANY=191, LTE_ANY=192, + GT_ANY=193, GTE_ANY=194, EQ_ANY=195, NEQ_ANY=196, PLUS=197, MINUS=198, + IDIV=199, RDIV=200, CONCAT=201, NULL=202, FALSE=203, TRUE=204, INT=205, + FLOAT=206, NUMBER=207, DSTRING=208, STRING=209, SYSDOLAR=210, ID=211, + BAD_ID=212, WS=213, C_COMMENT=214, LINE_COMMENT=215, LINE_COMMENT1=216, + UnrecognizedToken=217; public static String[] channelNames = { "DEFAULT_TOKEN_CHANNEL", "HIDDEN" }; @@ -61,97 +62,99 @@ public class KVQLLexer extends Lexer { private static String[] makeRuleNames() { return new String[] { - "T__0", "T__1", "T__2", "VARNAME", "ACCOUNT", "ADD", "ADMIN", "ALL", - "ALTER", "ALWAYS", "ANCESTORS", "AND", "AS", "ASC", "ARRAY_COLLECT", - "BETWEEN", "BY", "CACHE", "CASE", "CASCADE", "CAST", "COLLECTION", "COMMENT", - "COUNT", "CREATE", "CYCLE", "DAYS", "DECLARE", "DEFAULT", "DELETE", "DESC", - "DESCENDANTS", "DESCRIBE", "DISTINCT", "DROP", "ELEMENTOF", "ELEMENTS", - "ELSE", "END", "ES_SHARDS", "ES_REPLICAS", "EXISTS", "EXTRACT", "FIELDS", - "FIRST", "FORCE", "FORCE_INDEX", "FORCE_PRIMARY_INDEX", "FREEZE", "FROM", - "FROZEN", "FULLTEXT", "GENERATED", "GRANT", "GROUP", "HOURS", "IDENTIFIED", - "IDENTITY", "IF", "IN", "INCREMENT", "INDEX", "INDEXES", "INSERT", "INTO", - "IS", "JSON", "JOIN", "KEY", "KEYOF", "KEYS", "LAST", "LEFT", "LIFETIME", - "LIMIT", "LOCAL", "LOCK", "MAXVALUE", "MERGE", "MINUTES", "MINVALUE", - "MODIFY", "MR_COUNTER", "NAMESPACE", "NAMESPACES", "NESTED", "NO", "NOT", - "NULLS", "OFFSET", "OF", "ON", "ONLY", "OR", "ORDER", "OUTER", "OVERRIDE", - "PASSWORD", "PATCH", "PER", "PREFER_INDEXES", "PREFER_PRIMARY_INDEX", - "PRIMARY", "PUT", "REGION", "REGIONS", "REMOVE", "RETURNING", "REVOKE", - "ROLE", "ROLES", "ROW", "SCHEMA", "SECONDS", "SELECT", "SEQ_TRANSFORM", - "SET", "SHARD", "SHOW", "START", "TABLE", "TABLES", "THEN", "TO", "TTL", - "TYPE", "UNFREEZE", "UNLOCK", "UPDATE", "UPSERT", "USER", "USERS", "USING", - "VALUES", "WHEN", "WHERE", "WITH", "UNIQUE", "UNNEST", "UUID", "ALL_PRIVILEGES", - "IDENTIFIED_EXTERNALLY", "PASSWORD_EXPIRE", "RETAIN_CURRENT_PASSWORD", - "CLEAR_RETAINED_PASSWORD", "LEFT_OUTER_JOIN", "ARRAY_T", "BINARY_T", - "BOOLEAN_T", "DOUBLE_T", "ENUM_T", "FLOAT_T", "GEOMETRY_T", "INTEGER_T", - "LONG_T", "MAP_T", "NUMBER_T", "POINT_T", "RECORD_T", "STRING_T", "TIMESTAMP_T", - "ANY_T", "ANYATOMIC_T", "ANYJSONATOMIC_T", "ANYRECORD_T", "SCALAR_T", - "SEMI", "COMMA", "COLON", "LP", "RP", "LBRACK", "RBRACK", "LBRACE", "RBRACE", - "STAR", "DOT", "DOLLAR", "QUESTION_MARK", "LT", "LTE", "GT", "GTE", "EQ", - "NEQ", "LT_ANY", "LTE_ANY", "GT_ANY", "GTE_ANY", "EQ_ANY", "NEQ_ANY", - "PLUS", "MINUS", "IDIV", "RDIV", "CONCAT", "NULL", "FALSE", "TRUE", "INT", - "FLOAT", "NUMBER", "DSTRING", "STRING", "SYSDOLAR", "ID", "BAD_ID", "WS", - "C_COMMENT", "LINE_COMMENT", "LINE_COMMENT1", "UnrecognizedToken", "ALPHA", - "DIGIT", "DSTR_ESC", "ESC", "HEX", "UNDER", "UNICODE", "CLEAR", "CURRENT", - "EXPIRE", "EXTERNALLY", "PREFER", "PRIVILEGES", "RETAIN", "RETAINED" + "T__0", "T__1", "T__2", "T__3", "VARNAME", "ACCOUNT", "ADD", "ADMIN", + "ALL", "ALTER", "ALWAYS", "ANCESTORS", "AND", "AS", "ASC", "ARRAY_COLLECT", + "BEFORE", "BETWEEN", "BY", "CACHE", "CASE", "CASCADE", "CAST", "COLLECTION", + "COMMENT", "COUNT", "CREATE", "CYCLE", "DAYS", "DECLARE", "DEFAULT", + "DELETE", "DESC", "DESCENDANTS", "DESCRIBE", "DISABLE", "DISTINCT", "DROP", + "ELEMENTOF", "ELEMENTS", "ELSE", "ENABLE", "END", "ES_SHARDS", "ES_REPLICAS", + "EXISTS", "EXTRACT", "FIELDS", "FIRST", "FORCE", "FORCE_INDEX", "FORCE_PRIMARY_INDEX", + "FREEZE", "FROM", "FROZEN", "FULLTEXT", "GENERATED", "GRANT", "GROUP", + "HOURS", "IDENTIFIED", "IDENTITY", "IF", "IMAGE", "IN", "INCREMENT", + "INDEX", "INDEXES", "INSERT", "INTO", "IS", "JSON", "JOIN", "KEY", "KEYOF", + "KEYS", "LAST", "LEFT", "LIFETIME", "LIMIT", "LOCAL", "LOCK", "MAXVALUE", + "MERGE", "MINUTES", "MINVALUE", "MODIFY", "MR_COUNTER", "NAMESPACE", + "NAMESPACES", "NESTED", "NO", "NOT", "NULLS", "OFFSET", "OF", "ON", "ONLY", + "OR", "ORDER", "OUTER", "OVERRIDE", "PASSWORD", "PATCH", "PER", "PREFER_INDEXES", + "PREFER_PRIMARY_INDEX", "PRIMARY", "PUT", "REGION", "REGIONS", "REMOVE", + "RETURNING", "REVOKE", "ROLE", "ROLES", "ROW", "SCHEMA", "SECONDS", "SELECT", + "SEQ_TRANSFORM", "SET", "SHARD", "SHOW", "START", "TABLE", "TABLES", + "THEN", "TO", "TTL", "TYPE", "UNFREEZE", "UNLOCK", "UPDATE", "UPSERT", + "USER", "USERS", "USING", "VALUES", "WHEN", "WHERE", "WITH", "UNIQUE", + "UNNEST", "UUID", "ALL_PRIVILEGES", "IDENTIFIED_EXTERNALLY", "PASSWORD_EXPIRE", + "RETAIN_CURRENT_PASSWORD", "CLEAR_RETAINED_PASSWORD", "LEFT_OUTER_JOIN", + "ARRAY_T", "BINARY_T", "BOOLEAN_T", "DOUBLE_T", "ENUM_T", "FLOAT_T", + "GEOMETRY_T", "INTEGER_T", "LONG_T", "MAP_T", "NUMBER_T", "POINT_T", + "RECORD_T", "STRING_T", "TIMESTAMP_T", "ANY_T", "ANYATOMIC_T", "ANYJSONATOMIC_T", + "ANYRECORD_T", "SCALAR_T", "SEMI", "COMMA", "COLON", "LP", "RP", "LBRACK", + "RBRACK", "LBRACE", "RBRACE", "STAR", "DOT", "DOLLAR", "QUESTION_MARK", + "LT", "LTE", "GT", "GTE", "EQ", "NEQ", "LT_ANY", "LTE_ANY", "GT_ANY", + "GTE_ANY", "EQ_ANY", "NEQ_ANY", "PLUS", "MINUS", "IDIV", "RDIV", "CONCAT", + "NULL", "FALSE", "TRUE", "INT", "FLOAT", "NUMBER", "DSTRING", "STRING", + "SYSDOLAR", "ID", "BAD_ID", "WS", "C_COMMENT", "LINE_COMMENT", "LINE_COMMENT1", + "UnrecognizedToken", "ALPHA", "DIGIT", "DSTR_ESC", "ESC", "HEX", "UNDER", + "UNICODE", "CLEAR", "CURRENT", "EXPIRE", "EXTERNALLY", "PREFER", "PRIVILEGES", + "RETAIN", "RETAINED" }; } public static final String[] ruleNames = makeRuleNames(); private static String[] makeLiteralNames() { return new String[] { - null, "'/*+'", "'*/'", "'@'", null, null, null, null, null, null, null, - null, null, null, null, "'array_collect'", null, null, null, null, null, - null, null, null, "'count'", null, null, null, null, null, null, null, + null, "'/*+'", "'*/'", "'@'", "'row_metadata().'", null, null, null, + null, null, null, null, null, null, null, null, "'array_collect'", null, + null, null, null, null, null, null, null, null, "'count'", null, null, + null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, + null, null, null, null, null, null, null, null, "'seq_transform'", null, null, null, null, null, null, null, null, null, null, null, null, null, - "'seq_transform'", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, - null, null, null, null, null, "';'", "','", "':'", "'('", "')'", "'['", - "']'", "'{'", "'}'", "'*'", "'.'", "'$'", "'?'", "'<'", "'<='", "'>'", - "'>='", "'='", "'!='", null, null, null, null, null, null, "'+'", "'-'", - "'/'", null, "'||'" + null, "';'", "','", "':'", "'('", "')'", "'['", "']'", "'{'", "'}'", + "'*'", "'.'", "'$'", "'?'", "'<'", "'<='", "'>'", "'>='", "'='", "'!='", + null, null, null, null, null, null, "'+'", "'-'", "'/'", null, "'||'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, null, null, null, "VARNAME", "ACCOUNT", "ADD", "ADMIN", "ALL", + null, null, null, null, null, "VARNAME", "ACCOUNT", "ADD", "ADMIN", "ALL", "ALTER", "ALWAYS", "ANCESTORS", "AND", "AS", "ASC", "ARRAY_COLLECT", - "BETWEEN", "BY", "CACHE", "CASE", "CASCADE", "CAST", "COLLECTION", "COMMENT", - "COUNT", "CREATE", "CYCLE", "DAYS", "DECLARE", "DEFAULT", "DELETE", "DESC", - "DESCENDANTS", "DESCRIBE", "DISTINCT", "DROP", "ELEMENTOF", "ELEMENTS", - "ELSE", "END", "ES_SHARDS", "ES_REPLICAS", "EXISTS", "EXTRACT", "FIELDS", - "FIRST", "FORCE", "FORCE_INDEX", "FORCE_PRIMARY_INDEX", "FREEZE", "FROM", - "FROZEN", "FULLTEXT", "GENERATED", "GRANT", "GROUP", "HOURS", "IDENTIFIED", - "IDENTITY", "IF", "IN", "INCREMENT", "INDEX", "INDEXES", "INSERT", "INTO", - "IS", "JSON", "JOIN", "KEY", "KEYOF", "KEYS", "LAST", "LEFT", "LIFETIME", - "LIMIT", "LOCAL", "LOCK", "MAXVALUE", "MERGE", "MINUTES", "MINVALUE", - "MODIFY", "MR_COUNTER", "NAMESPACE", "NAMESPACES", "NESTED", "NO", "NOT", - "NULLS", "OFFSET", "OF", "ON", "ONLY", "OR", "ORDER", "OUTER", "OVERRIDE", - "PASSWORD", "PATCH", "PER", "PREFER_INDEXES", "PREFER_PRIMARY_INDEX", - "PRIMARY", "PUT", "REGION", "REGIONS", "REMOVE", "RETURNING", "REVOKE", - "ROLE", "ROLES", "ROW", "SCHEMA", "SECONDS", "SELECT", "SEQ_TRANSFORM", - "SET", "SHARD", "SHOW", "START", "TABLE", "TABLES", "THEN", "TO", "TTL", - "TYPE", "UNFREEZE", "UNLOCK", "UPDATE", "UPSERT", "USER", "USERS", "USING", - "VALUES", "WHEN", "WHERE", "WITH", "UNIQUE", "UNNEST", "UUID", "ALL_PRIVILEGES", - "IDENTIFIED_EXTERNALLY", "PASSWORD_EXPIRE", "RETAIN_CURRENT_PASSWORD", - "CLEAR_RETAINED_PASSWORD", "LEFT_OUTER_JOIN", "ARRAY_T", "BINARY_T", - "BOOLEAN_T", "DOUBLE_T", "ENUM_T", "FLOAT_T", "GEOMETRY_T", "INTEGER_T", - "LONG_T", "MAP_T", "NUMBER_T", "POINT_T", "RECORD_T", "STRING_T", "TIMESTAMP_T", - "ANY_T", "ANYATOMIC_T", "ANYJSONATOMIC_T", "ANYRECORD_T", "SCALAR_T", - "SEMI", "COMMA", "COLON", "LP", "RP", "LBRACK", "RBRACK", "LBRACE", "RBRACE", - "STAR", "DOT", "DOLLAR", "QUESTION_MARK", "LT", "LTE", "GT", "GTE", "EQ", - "NEQ", "LT_ANY", "LTE_ANY", "GT_ANY", "GTE_ANY", "EQ_ANY", "NEQ_ANY", - "PLUS", "MINUS", "IDIV", "RDIV", "CONCAT", "NULL", "FALSE", "TRUE", "INT", - "FLOAT", "NUMBER", "DSTRING", "STRING", "SYSDOLAR", "ID", "BAD_ID", "WS", - "C_COMMENT", "LINE_COMMENT", "LINE_COMMENT1", "UnrecognizedToken" + "BEFORE", "BETWEEN", "BY", "CACHE", "CASE", "CASCADE", "CAST", "COLLECTION", + "COMMENT", "COUNT", "CREATE", "CYCLE", "DAYS", "DECLARE", "DEFAULT", + "DELETE", "DESC", "DESCENDANTS", "DESCRIBE", "DISABLE", "DISTINCT", "DROP", + "ELEMENTOF", "ELEMENTS", "ELSE", "ENABLE", "END", "ES_SHARDS", "ES_REPLICAS", + "EXISTS", "EXTRACT", "FIELDS", "FIRST", "FORCE", "FORCE_INDEX", "FORCE_PRIMARY_INDEX", + "FREEZE", "FROM", "FROZEN", "FULLTEXT", "GENERATED", "GRANT", "GROUP", + "HOURS", "IDENTIFIED", "IDENTITY", "IF", "IMAGE", "IN", "INCREMENT", + "INDEX", "INDEXES", "INSERT", "INTO", "IS", "JSON", "JOIN", "KEY", "KEYOF", + "KEYS", "LAST", "LEFT", "LIFETIME", "LIMIT", "LOCAL", "LOCK", "MAXVALUE", + "MERGE", "MINUTES", "MINVALUE", "MODIFY", "MR_COUNTER", "NAMESPACE", + "NAMESPACES", "NESTED", "NO", "NOT", "NULLS", "OFFSET", "OF", "ON", "ONLY", + "OR", "ORDER", "OUTER", "OVERRIDE", "PASSWORD", "PATCH", "PER", "PREFER_INDEXES", + "PREFER_PRIMARY_INDEX", "PRIMARY", "PUT", "REGION", "REGIONS", "REMOVE", + "RETURNING", "REVOKE", "ROLE", "ROLES", "ROW", "SCHEMA", "SECONDS", "SELECT", + "SEQ_TRANSFORM", "SET", "SHARD", "SHOW", "START", "TABLE", "TABLES", + "THEN", "TO", "TTL", "TYPE", "UNFREEZE", "UNLOCK", "UPDATE", "UPSERT", + "USER", "USERS", "USING", "VALUES", "WHEN", "WHERE", "WITH", "UNIQUE", + "UNNEST", "UUID", "ALL_PRIVILEGES", "IDENTIFIED_EXTERNALLY", "PASSWORD_EXPIRE", + "RETAIN_CURRENT_PASSWORD", "CLEAR_RETAINED_PASSWORD", "LEFT_OUTER_JOIN", + "ARRAY_T", "BINARY_T", "BOOLEAN_T", "DOUBLE_T", "ENUM_T", "FLOAT_T", + "GEOMETRY_T", "INTEGER_T", "LONG_T", "MAP_T", "NUMBER_T", "POINT_T", + "RECORD_T", "STRING_T", "TIMESTAMP_T", "ANY_T", "ANYATOMIC_T", "ANYJSONATOMIC_T", + "ANYRECORD_T", "SCALAR_T", "SEMI", "COMMA", "COLON", "LP", "RP", "LBRACK", + "RBRACK", "LBRACE", "RBRACE", "STAR", "DOT", "DOLLAR", "QUESTION_MARK", + "LT", "LTE", "GT", "GTE", "EQ", "NEQ", "LT_ANY", "LTE_ANY", "GT_ANY", + "GTE_ANY", "EQ_ANY", "NEQ_ANY", "PLUS", "MINUS", "IDIV", "RDIV", "CONCAT", + "NULL", "FALSE", "TRUE", "INT", "FLOAT", "NUMBER", "DSTRING", "STRING", + "SYSDOLAR", "ID", "BAD_ID", "WS", "C_COMMENT", "LINE_COMMENT", "LINE_COMMENT1", + "UnrecognizedToken" }; } private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); @@ -213,7 +216,7 @@ public KVQLLexer(CharStream input) { public ATN getATN() { return _ATN; } public static final String _serializedATN = - "\u0004\u0000\u00d4\u0782\u0006\uffff\uffff\u0002\u0000\u0007\u0000\u0002"+ + "\u0004\u0000\u00d9\u07b8\u0006\uffff\uffff\u0002\u0000\u0007\u0000\u0002"+ "\u0001\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002"+ "\u0004\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002"+ "\u0007\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002"+ @@ -276,246 +279,255 @@ public KVQLLexer(CharStream input) { "\u0002\u00da\u0007\u00da\u0002\u00db\u0007\u00db\u0002\u00dc\u0007\u00dc"+ "\u0002\u00dd\u0007\u00dd\u0002\u00de\u0007\u00de\u0002\u00df\u0007\u00df"+ "\u0002\u00e0\u0007\u00e0\u0002\u00e1\u0007\u00e1\u0002\u00e2\u0007\u00e2"+ - "\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0001\u0002\u0001\u0002\u0001\u0003\u0001\u0003\u0001\u0003"+ - "\u0001\u0003\u0001\u0003\u0005\u0003\u01d6\b\u0003\n\u0003\f\u0003\u01d9"+ - "\t\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001"+ - "\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ - "\u0005\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001"+ - "\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001\b"+ - "\u0001\b\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001"+ - "\t\u0001\t\u0001\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001"+ + "\u0002\u00e3\u0007\u00e3\u0002\u00e4\u0007\u00e4\u0002\u00e5\u0007\u00e5"+ + "\u0002\u00e6\u0007\u00e6\u0002\u00e7\u0007\u00e7\u0001\u0000\u0001\u0000"+ + "\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0002"+ + "\u0001\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003"+ + "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003"+ + "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004"+ + "\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0005\u0004\u01f0\b\u0004"+ + "\n\u0004\f\u0004\u01f3\t\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0006\u0001"+ + "\u0006\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0001"+ + "\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0001\t"+ + "\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001\n\u0001\n\u0001"+ "\n\u0001\n\u0001\n\u0001\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b"+ - "\u0001\f\u0001\f\u0001\f\u0001\r\u0001\r\u0001\r\u0001\r\u0001\u000e\u0001"+ - "\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001"+ - "\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001"+ - "\u000e\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001"+ - "\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0001"+ + "\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b"+ + "\u0001\f\u0001\f\u0001\f\u0001\f\u0001\r\u0001\r\u0001\r\u0001\u000e\u0001"+ + "\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001\u000f\u0001\u000f\u0001"+ + "\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001"+ + "\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001"+ + "\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001"+ "\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001"+ - "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001"+ - "\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001"+ - "\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001"+ + "\u0011\u0001\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001"+ + "\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0014\u0001"+ + "\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0015\u0001\u0015\u0001"+ "\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001"+ - "\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0016\u0001"+ - "\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001"+ - "\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001"+ - "\u0017\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001"+ - "\u0018\u0001\u0018\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001"+ - "\u0019\u0001\u0019\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001"+ - "\u001a\u0003\u001a\u026f\b\u001a\u0001\u001b\u0001\u001b\u0001\u001b\u0001"+ - "\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001c\u0001"+ - "\u001c\u0001\u001c\u0001\u001c\u0001\u001c\u0001\u001c\u0001\u001c\u0001"+ - "\u001c\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001"+ - "\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0001"+ - "\u001e\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001"+ + "\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0017\u0001"+ + "\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001"+ + "\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018\u0001"+ + "\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001"+ + "\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001"+ + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001"+ + "\u001a\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0001"+ + "\u001b\u0001\u001c\u0001\u001c\u0001\u001c\u0001\u001c\u0001\u001c\u0003"+ + "\u001c\u0290\b\u001c\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001"+ + "\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001"+ + "\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0001"+ "\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001"+ - "\u001f\u0001 \u0001 \u0001 \u0001 \u0001 \u0001 \u0001 \u0001 \u0001 "+ - "\u0001!\u0001!\u0001!\u0001!\u0001!\u0001!\u0001!\u0001!\u0001!\u0001"+ - "\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001#\u0001#\u0001#\u0001#\u0001#"+ - "\u0001#\u0001#\u0001#\u0001#\u0001#\u0001$\u0001$\u0001$\u0001$\u0001"+ - "$\u0001$\u0001$\u0001$\u0001$\u0001%\u0001%\u0001%\u0001%\u0001%\u0001"+ - "&\u0001&\u0001&\u0001&\u0001\'\u0001\'\u0001\'\u0001\'\u0001\'\u0001\'"+ - "\u0001\'\u0001\'\u0001\'\u0001\'\u0001(\u0001(\u0001(\u0001(\u0001(\u0001"+ - "(\u0001(\u0001(\u0001(\u0001(\u0001(\u0001(\u0001)\u0001)\u0001)\u0001"+ - ")\u0001)\u0001)\u0001)\u0001*\u0001*\u0001*\u0001*\u0001*\u0001*\u0001"+ - "*\u0001*\u0001+\u0001+\u0001+\u0001+\u0001+\u0001+\u0001+\u0001,\u0001"+ - ",\u0001,\u0001,\u0001,\u0001,\u0001-\u0001-\u0001-\u0001-\u0001-\u0001"+ - "-\u0001.\u0001.\u0001.\u0001.\u0001/\u0001/\u0001/\u0001/\u0001/\u0001"+ - "/\u00010\u00010\u00010\u00010\u00010\u00010\u00010\u00011\u00011\u0001"+ - "1\u00011\u00011\u00012\u00012\u00012\u00012\u00012\u00012\u00012\u0001"+ - "3\u00013\u00013\u00013\u00013\u00013\u00013\u00013\u00013\u00014\u0001"+ - "4\u00014\u00014\u00014\u00014\u00014\u00014\u00014\u00014\u00015\u0001"+ + "\u001f\u0001 \u0001 \u0001 \u0001 \u0001 \u0001!\u0001!\u0001!\u0001!"+ + "\u0001!\u0001!\u0001!\u0001!\u0001!\u0001!\u0001!\u0001!\u0001\"\u0001"+ + "\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001#\u0001"+ + "#\u0001#\u0001#\u0001#\u0001#\u0001#\u0001#\u0001$\u0001$\u0001$\u0001"+ + "$\u0001$\u0001$\u0001$\u0001$\u0001$\u0001%\u0001%\u0001%\u0001%\u0001"+ + "%\u0001&\u0001&\u0001&\u0001&\u0001&\u0001&\u0001&\u0001&\u0001&\u0001"+ + "&\u0001\'\u0001\'\u0001\'\u0001\'\u0001\'\u0001\'\u0001\'\u0001\'\u0001"+ + "\'\u0001(\u0001(\u0001(\u0001(\u0001(\u0001)\u0001)\u0001)\u0001)\u0001"+ + ")\u0001)\u0001)\u0001*\u0001*\u0001*\u0001*\u0001+\u0001+\u0001+\u0001"+ + "+\u0001+\u0001+\u0001+\u0001+\u0001+\u0001+\u0001,\u0001,\u0001,\u0001"+ + ",\u0001,\u0001,\u0001,\u0001,\u0001,\u0001,\u0001,\u0001,\u0001-\u0001"+ + "-\u0001-\u0001-\u0001-\u0001-\u0001-\u0001.\u0001.\u0001.\u0001.\u0001"+ + ".\u0001.\u0001.\u0001.\u0001/\u0001/\u0001/\u0001/\u0001/\u0001/\u0001"+ + "/\u00010\u00010\u00010\u00010\u00010\u00010\u00011\u00011\u00011\u0001"+ + "1\u00011\u00011\u00012\u00012\u00012\u00012\u00013\u00013\u00013\u0001"+ + "3\u00013\u00013\u00014\u00014\u00014\u00014\u00014\u00014\u00014\u0001"+ "5\u00015\u00015\u00015\u00015\u00016\u00016\u00016\u00016\u00016\u0001"+ - "6\u00017\u00017\u00017\u00017\u00017\u00017\u00037\u0346\b7\u00018\u0001"+ - "8\u00018\u00018\u00018\u00018\u00018\u00018\u00018\u00018\u00018\u0001"+ - "9\u00019\u00019\u00019\u00019\u00019\u00019\u00019\u00019\u0001:\u0001"+ - ":\u0001:\u0001;\u0001;\u0001;\u0001<\u0001<\u0001<\u0001<\u0001<\u0001"+ - "<\u0001<\u0001<\u0001<\u0001<\u0001=\u0001=\u0001=\u0001=\u0001=\u0001"+ - "=\u0001>\u0001>\u0001>\u0001>\u0001>\u0001>\u0001>\u0001>\u0001?\u0001"+ - "?\u0001?\u0001?\u0001?\u0001?\u0001?\u0001@\u0001@\u0001@\u0001@\u0001"+ - "@\u0001A\u0001A\u0001A\u0001B\u0001B\u0001B\u0001B\u0001B\u0001C\u0001"+ - "C\u0001C\u0001C\u0001C\u0001D\u0001D\u0001D\u0001D\u0001E\u0001E\u0001"+ - "E\u0001E\u0001E\u0001E\u0001F\u0001F\u0001F\u0001F\u0001F\u0001G\u0001"+ - "G\u0001G\u0001G\u0001G\u0001H\u0001H\u0001H\u0001H\u0001H\u0001I\u0001"+ - "I\u0001I\u0001I\u0001I\u0001I\u0001I\u0001I\u0001I\u0001J\u0001J\u0001"+ - "J\u0001J\u0001J\u0001J\u0001K\u0001K\u0001K\u0001K\u0001K\u0001K\u0001"+ - "L\u0001L\u0001L\u0001L\u0001L\u0001M\u0001M\u0001M\u0001M\u0001M\u0001"+ - "M\u0001M\u0001M\u0001M\u0001N\u0001N\u0001N\u0001N\u0001N\u0001N\u0001"+ - "O\u0001O\u0001O\u0001O\u0001O\u0001O\u0001O\u0001O\u0003O\u03dd\bO\u0001"+ - "P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001Q\u0001"+ - "Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001R\u0001R\u0001R\u0001R\u0001"+ - "R\u0001R\u0001R\u0001R\u0001R\u0001R\u0001R\u0001S\u0001S\u0001S\u0001"+ - "S\u0001S\u0001S\u0001S\u0001S\u0001S\u0001S\u0001T\u0001T\u0001T\u0001"+ - "T\u0001T\u0001T\u0001T\u0001T\u0001T\u0001T\u0001T\u0001U\u0001U\u0001"+ - "U\u0001U\u0001U\u0001U\u0001U\u0001V\u0001V\u0001V\u0001W\u0001W\u0001"+ - "W\u0001W\u0001X\u0001X\u0001X\u0001X\u0001X\u0001X\u0001Y\u0001Y\u0001"+ - "Y\u0001Y\u0001Y\u0001Y\u0001Y\u0001Z\u0001Z\u0001Z\u0001[\u0001[\u0001"+ - "[\u0001\\\u0001\\\u0001\\\u0001\\\u0001\\\u0001]\u0001]\u0001]\u0001^"+ - "\u0001^\u0001^\u0001^\u0001^\u0001^\u0001_\u0001_\u0001_\u0001_\u0001"+ - "_\u0001_\u0001`\u0001`\u0001`\u0001`\u0001`\u0001`\u0001`\u0001`\u0001"+ - "`\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001"+ - "b\u0001b\u0001b\u0001b\u0001b\u0001b\u0001c\u0001c\u0001c\u0001c\u0001"+ - "d\u0001d\u0001d\u0001d\u0001e\u0001e\u0001e\u0001e\u0001e\u0001e\u0001"+ - "f\u0001f\u0001f\u0001f\u0001f\u0001f\u0001f\u0001f\u0001g\u0001g\u0001"+ - "g\u0001g\u0001h\u0001h\u0001h\u0001h\u0001h\u0001h\u0001h\u0001i\u0001"+ - "i\u0001i\u0001i\u0001i\u0001i\u0001i\u0001i\u0001j\u0001j\u0001j\u0001"+ - "j\u0001j\u0001j\u0001j\u0001k\u0001k\u0001k\u0001k\u0001k\u0001k\u0001"+ - "k\u0001k\u0001k\u0001k\u0001l\u0001l\u0001l\u0001l\u0001l\u0001l\u0001"+ - "l\u0001m\u0001m\u0001m\u0001m\u0001m\u0001n\u0001n\u0001n\u0001n\u0001"+ - "n\u0001n\u0001o\u0001o\u0001o\u0001o\u0001p\u0001p\u0001p\u0001p\u0001"+ - "p\u0001p\u0001p\u0001q\u0001q\u0001q\u0001q\u0001q\u0001q\u0001q\u0001"+ - "q\u0003q\u04bb\bq\u0001r\u0001r\u0001r\u0001r\u0001r\u0001r\u0001r\u0001"+ - "s\u0001s\u0001s\u0001s\u0001s\u0001s\u0001s\u0001s\u0001s\u0001s\u0001"+ - "s\u0001s\u0001s\u0001s\u0001t\u0001t\u0001t\u0001t\u0001u\u0001u\u0001"+ - "u\u0001u\u0001u\u0001u\u0001v\u0001v\u0001v\u0001v\u0001v\u0001w\u0001"+ - "w\u0001w\u0001w\u0001w\u0001w\u0001x\u0001x\u0001x\u0001x\u0001x\u0001"+ - "x\u0001y\u0001y\u0001y\u0001y\u0001y\u0001y\u0001y\u0001z\u0001z\u0001"+ - "z\u0001z\u0001z\u0001{\u0001{\u0001{\u0001|\u0001|\u0001|\u0001|\u0001"+ - "}\u0001}\u0001}\u0001}\u0001}\u0001~\u0001~\u0001~\u0001~\u0001~\u0001"+ - "~\u0001~\u0001~\u0001~\u0001\u007f\u0001\u007f\u0001\u007f\u0001\u007f"+ - "\u0001\u007f\u0001\u007f\u0001\u007f\u0001\u0080\u0001\u0080\u0001\u0080"+ - "\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0081\u0001\u0081"+ - "\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0082"+ - "\u0001\u0082\u0001\u0082\u0001\u0082\u0001\u0082\u0001\u0083\u0001\u0083"+ - "\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0084\u0001\u0084"+ - "\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0085\u0001\u0085"+ - "\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0086"+ - "\u0001\u0086\u0001\u0086\u0001\u0086\u0001\u0086\u0001\u0087\u0001\u0087"+ - "\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0088\u0001\u0088"+ - "\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0089\u0001\u0089\u0001\u0089"+ - "\u0001\u0089\u0001\u0089\u0001\u0089\u0001\u0089\u0001\u008a\u0001\u008a"+ - "\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008b"+ - "\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008c\u0001\u008c"+ - "\u0004\u008c\u0560\b\u008c\u000b\u008c\f\u008c\u0561\u0001\u008c\u0001"+ - "\u008c\u0001\u008d\u0001\u008d\u0004\u008d\u0568\b\u008d\u000b\u008d\f"+ - "\u008d\u0569\u0001\u008d\u0001\u008d\u0001\u008e\u0001\u008e\u0004\u008e"+ - "\u0570\b\u008e\u000b\u008e\f\u008e\u0571\u0001\u008e\u0001\u008e\u0001"+ - "\u008f\u0001\u008f\u0004\u008f\u0578\b\u008f\u000b\u008f\f\u008f\u0579"+ - "\u0001\u008f\u0001\u008f\u0004\u008f\u057e\b\u008f\u000b\u008f\f\u008f"+ - "\u057f\u0001\u008f\u0001\u008f\u0001\u0090\u0001\u0090\u0004\u0090\u0586"+ - "\b\u0090\u000b\u0090\f\u0090\u0587\u0001\u0090\u0001\u0090\u0004\u0090"+ - "\u058c\b\u0090\u000b\u0090\f\u0090\u058d\u0001\u0090\u0001\u0090\u0001"+ - "\u0091\u0001\u0091\u0004\u0091\u0594\b\u0091\u000b\u0091\f\u0091\u0595"+ - "\u0001\u0091\u0001\u0091\u0004\u0091\u059a\b\u0091\u000b\u0091\f\u0091"+ - "\u059b\u0001\u0091\u0001\u0091\u0001\u0092\u0001\u0092\u0001\u0092\u0001"+ - "\u0092\u0001\u0092\u0001\u0092\u0001\u0093\u0001\u0093\u0001\u0093\u0001"+ - "\u0093\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0094\u0001\u0094\u0001"+ - "\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001"+ - "\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001"+ - "\u0095\u0001\u0096\u0001\u0096\u0001\u0096\u0001\u0096\u0001\u0096\u0001"+ - "\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001"+ - "\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001"+ - "\u0098\u0001\u0098\u0001\u0098\u0001\u0099\u0001\u0099\u0001\u0099\u0001"+ - "\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u009a\u0001"+ - "\u009a\u0001\u009a\u0001\u009a\u0001\u009a\u0001\u009b\u0001\u009b\u0001"+ - "\u009b\u0001\u009b\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001"+ - "\u009c\u0001\u009c\u0001\u009c\u0001\u009d\u0001\u009d\u0001\u009d\u0001"+ - "\u009d\u0001\u009d\u0001\u009d\u0001\u009e\u0001\u009e\u0001\u009e\u0001"+ - "\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009f\u0001\u009f\u0001"+ - "\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u00a0\u0001"+ - "\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001"+ - "\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001"+ - "\u00a1\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001"+ - "\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a3\u0001"+ - "\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001"+ - "\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001"+ - "\u00a3\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001"+ - "\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a5\u0001"+ - "\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001"+ - "\u00a6\u0001\u00a6\u0001\u00a7\u0001\u00a7\u0001\u00a8\u0001\u00a8\u0001"+ - "\u00a9\u0001\u00a9\u0001\u00aa\u0001\u00aa\u0001\u00ab\u0001\u00ab\u0001"+ - "\u00ac\u0001\u00ac\u0001\u00ad\u0001\u00ad\u0001\u00ae\u0001\u00ae\u0001"+ - "\u00af\u0001\u00af\u0001\u00b0\u0001\u00b0\u0001\u00b1\u0001\u00b1\u0001"+ - "\u00b2\u0001\u00b2\u0001\u00b3\u0001\u00b3\u0001\u00b4\u0001\u00b4\u0001"+ - "\u00b4\u0001\u00b5\u0001\u00b5\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001"+ - "\u00b7\u0001\u00b7\u0001\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b9\u0001"+ - "\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00ba\u0001\u00ba\u0001"+ - "\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00bb\u0001"+ - "\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bc\u0001\u00bc\u0001"+ - "\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bd\u0001"+ - "\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00be\u0001\u00be\u0001"+ - "\u00be\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00bf\u0001"+ - "\u00bf\u0001\u00c0\u0001\u00c0\u0001\u00c1\u0001\u00c1\u0001\u00c2\u0001"+ - "\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001"+ - "\u00c4\u0001\u00c4\u0001\u00c4\u0001\u00c4\u0001\u00c4\u0001\u00c5\u0001"+ - "\u00c5\u0001\u00c5\u0001\u00c5\u0001\u00c5\u0001\u00c5\u0001\u00c6\u0001"+ - "\u00c6\u0001\u00c6\u0001\u00c6\u0001\u00c6\u0001\u00c7\u0004\u00c7\u069e"+ - "\b\u00c7\u000b\u00c7\f\u00c7\u069f\u0001\u00c8\u0005\u00c8\u06a3\b\u00c8"+ - "\n\u00c8\f\u00c8\u06a6\t\u00c8\u0001\u00c8\u0001\u00c8\u0004\u00c8\u06aa"+ - "\b\u00c8\u000b\u00c8\f\u00c8\u06ab\u0001\u00c8\u0001\u00c8\u0003\u00c8"+ - "\u06b0\b\u00c8\u0001\u00c8\u0004\u00c8\u06b3\b\u00c8\u000b\u00c8\f\u00c8"+ - "\u06b4\u0003\u00c8\u06b7\b\u00c8\u0001\u00c8\u0004\u00c8\u06ba\b\u00c8"+ - "\u000b\u00c8\f\u00c8\u06bb\u0001\u00c8\u0001\u00c8\u0003\u00c8\u06c0\b"+ - "\u00c8\u0001\u00c8\u0004\u00c8\u06c3\b\u00c8\u000b\u00c8\f\u00c8\u06c4"+ - "\u0003\u00c8\u06c7\b\u00c8\u0001\u00c9\u0001\u00c9\u0003\u00c9\u06cb\b"+ - "\u00c9\u0001\u00c9\u0001\u00c9\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0005"+ - "\u00ca\u06d2\b\u00ca\n\u00ca\f\u00ca\u06d5\t\u00ca\u0001\u00ca\u0001\u00ca"+ - "\u0001\u00cb\u0001\u00cb\u0001\u00cb\u0005\u00cb\u06dc\b\u00cb\n\u00cb"+ - "\f\u00cb\u06df\t\u00cb\u0001\u00cb\u0001\u00cb\u0001\u00cc\u0001\u00cc"+ - "\u0001\u00cc\u0001\u00cc\u0001\u00cc\u0001\u00cd\u0001\u00cd\u0001\u00cd"+ - "\u0001\u00cd\u0005\u00cd\u06ec\b\u00cd\n\u00cd\f\u00cd\u06ef\t\u00cd\u0001"+ - "\u00ce\u0001\u00ce\u0003\u00ce\u06f3\b\u00ce\u0001\u00ce\u0001\u00ce\u0001"+ - "\u00ce\u0005\u00ce\u06f8\b\u00ce\n\u00ce\f\u00ce\u06fb\t\u00ce\u0001\u00cf"+ - "\u0004\u00cf\u06fe\b\u00cf\u000b\u00cf\f\u00cf\u06ff\u0001\u00cf\u0001"+ - "\u00cf\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0005"+ - "\u00d0\u0709\b\u00d0\n\u00d0\f\u00d0\u070c\t\u00d0\u0001\u00d0\u0001\u00d0"+ - "\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0001\u00d1\u0001\u00d1\u0001\u00d1"+ - "\u0001\u00d1\u0005\u00d1\u0717\b\u00d1\n\u00d1\f\u00d1\u071a\t\u00d1\u0001"+ - "\u00d1\u0001\u00d1\u0001\u00d2\u0001\u00d2\u0005\u00d2\u0720\b\u00d2\n"+ - "\u00d2\f\u00d2\u0723\t\u00d2\u0001\u00d2\u0001\u00d2\u0001\u00d3\u0001"+ - "\u00d3\u0001\u00d4\u0001\u00d4\u0001\u00d5\u0001\u00d5\u0001\u00d6\u0001"+ - "\u00d6\u0001\u00d6\u0003\u00d6\u0730\b\u00d6\u0001\u00d7\u0001\u00d7\u0001"+ - "\u00d7\u0003\u00d7\u0735\b\u00d7\u0001\u00d8\u0001\u00d8\u0001\u00d9\u0001"+ - "\u00d9\u0001\u00da\u0001\u00da\u0001\u00da\u0001\u00da\u0001\u00da\u0001"+ - "\u00da\u0001\u00db\u0001\u00db\u0001\u00db\u0001\u00db\u0001\u00db\u0001"+ - "\u00db\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dd\u0001\u00dd\u0001\u00dd\u0001"+ - "\u00dd\u0001\u00dd\u0001\u00dd\u0001\u00dd\u0001\u00de\u0001\u00de\u0001"+ - "\u00de\u0001\u00de\u0001\u00de\u0001\u00de\u0001\u00de\u0001\u00de\u0001"+ - "\u00de\u0001\u00de\u0001\u00de\u0001\u00df\u0001\u00df\u0001\u00df\u0001"+ - "\u00df\u0001\u00df\u0001\u00df\u0001\u00df\u0001\u00e0\u0001\u00e0\u0001"+ - "\u00e0\u0001\u00e0\u0001\u00e0\u0001\u00e0\u0001\u00e0\u0001\u00e0\u0001"+ - "\u00e0\u0001\u00e0\u0001\u00e0\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "6\u00016\u00017\u00017\u00017\u00017\u00017\u00017\u00017\u00017\u0001"+ + "7\u00018\u00018\u00018\u00018\u00018\u00018\u00018\u00018\u00018\u0001"+ + "8\u00019\u00019\u00019\u00019\u00019\u00019\u0001:\u0001:\u0001:\u0001"+ + ":\u0001:\u0001:\u0001;\u0001;\u0001;\u0001;\u0001;\u0001;\u0003;\u0376"+ + "\b;\u0001<\u0001<\u0001<\u0001<\u0001<\u0001<\u0001<\u0001<\u0001<\u0001"+ + "<\u0001<\u0001=\u0001=\u0001=\u0001=\u0001=\u0001=\u0001=\u0001=\u0001"+ + "=\u0001>\u0001>\u0001>\u0001?\u0001?\u0001?\u0001?\u0001?\u0001?\u0001"+ + "@\u0001@\u0001@\u0001A\u0001A\u0001A\u0001A\u0001A\u0001A\u0001A\u0001"+ + "A\u0001A\u0001A\u0001B\u0001B\u0001B\u0001B\u0001B\u0001B\u0001C\u0001"+ + "C\u0001C\u0001C\u0001C\u0001C\u0001C\u0001C\u0001D\u0001D\u0001D\u0001"+ + "D\u0001D\u0001D\u0001D\u0001E\u0001E\u0001E\u0001E\u0001E\u0001F\u0001"+ + "F\u0001F\u0001G\u0001G\u0001G\u0001G\u0001G\u0001H\u0001H\u0001H\u0001"+ + "H\u0001H\u0001I\u0001I\u0001I\u0001I\u0001J\u0001J\u0001J\u0001J\u0001"+ + "J\u0001J\u0001K\u0001K\u0001K\u0001K\u0001K\u0001L\u0001L\u0001L\u0001"+ + "L\u0001L\u0001M\u0001M\u0001M\u0001M\u0001M\u0001N\u0001N\u0001N\u0001"+ + "N\u0001N\u0001N\u0001N\u0001N\u0001N\u0001O\u0001O\u0001O\u0001O\u0001"+ + "O\u0001O\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001Q\u0001Q\u0001"+ + "Q\u0001Q\u0001Q\u0001R\u0001R\u0001R\u0001R\u0001R\u0001R\u0001R\u0001"+ + "R\u0001R\u0001S\u0001S\u0001S\u0001S\u0001S\u0001S\u0001T\u0001T\u0001"+ + "T\u0001T\u0001T\u0001T\u0001T\u0001T\u0003T\u0413\bT\u0001U\u0001U\u0001"+ + "U\u0001U\u0001U\u0001U\u0001U\u0001U\u0001U\u0001V\u0001V\u0001V\u0001"+ + "V\u0001V\u0001V\u0001V\u0001W\u0001W\u0001W\u0001W\u0001W\u0001W\u0001"+ + "W\u0001W\u0001W\u0001W\u0001W\u0001X\u0001X\u0001X\u0001X\u0001X\u0001"+ + "X\u0001X\u0001X\u0001X\u0001X\u0001Y\u0001Y\u0001Y\u0001Y\u0001Y\u0001"+ + "Y\u0001Y\u0001Y\u0001Y\u0001Y\u0001Y\u0001Z\u0001Z\u0001Z\u0001Z\u0001"+ + "Z\u0001Z\u0001Z\u0001[\u0001[\u0001[\u0001\\\u0001\\\u0001\\\u0001\\\u0001"+ + "]\u0001]\u0001]\u0001]\u0001]\u0001]\u0001^\u0001^\u0001^\u0001^\u0001"+ + "^\u0001^\u0001^\u0001_\u0001_\u0001_\u0001`\u0001`\u0001`\u0001a\u0001"+ + "a\u0001a\u0001a\u0001a\u0001b\u0001b\u0001b\u0001c\u0001c\u0001c\u0001"+ + "c\u0001c\u0001c\u0001d\u0001d\u0001d\u0001d\u0001d\u0001d\u0001e\u0001"+ + "e\u0001e\u0001e\u0001e\u0001e\u0001e\u0001e\u0001e\u0001f\u0001f\u0001"+ + "f\u0001f\u0001f\u0001f\u0001f\u0001f\u0001f\u0001g\u0001g\u0001g\u0001"+ + "g\u0001g\u0001g\u0001h\u0001h\u0001h\u0001h\u0001i\u0001i\u0001i\u0001"+ + "i\u0001j\u0001j\u0001j\u0001j\u0001j\u0001j\u0001k\u0001k\u0001k\u0001"+ + "k\u0001k\u0001k\u0001k\u0001k\u0001l\u0001l\u0001l\u0001l\u0001m\u0001"+ + "m\u0001m\u0001m\u0001m\u0001m\u0001m\u0001n\u0001n\u0001n\u0001n\u0001"+ + "n\u0001n\u0001n\u0001n\u0001o\u0001o\u0001o\u0001o\u0001o\u0001o\u0001"+ + "o\u0001p\u0001p\u0001p\u0001p\u0001p\u0001p\u0001p\u0001p\u0001p\u0001"+ + "p\u0001q\u0001q\u0001q\u0001q\u0001q\u0001q\u0001q\u0001r\u0001r\u0001"+ + "r\u0001r\u0001r\u0001s\u0001s\u0001s\u0001s\u0001s\u0001s\u0001t\u0001"+ + "t\u0001t\u0001t\u0001u\u0001u\u0001u\u0001u\u0001u\u0001u\u0001u\u0001"+ + "v\u0001v\u0001v\u0001v\u0001v\u0001v\u0001v\u0001v\u0003v\u04f1\bv\u0001"+ + "w\u0001w\u0001w\u0001w\u0001w\u0001w\u0001w\u0001x\u0001x\u0001x\u0001"+ + "x\u0001x\u0001x\u0001x\u0001x\u0001x\u0001x\u0001x\u0001x\u0001x\u0001"+ + "x\u0001y\u0001y\u0001y\u0001y\u0001z\u0001z\u0001z\u0001z\u0001z\u0001"+ + "z\u0001{\u0001{\u0001{\u0001{\u0001{\u0001|\u0001|\u0001|\u0001|\u0001"+ + "|\u0001|\u0001}\u0001}\u0001}\u0001}\u0001}\u0001}\u0001~\u0001~\u0001"+ + "~\u0001~\u0001~\u0001~\u0001~\u0001\u007f\u0001\u007f\u0001\u007f\u0001"+ + "\u007f\u0001\u007f\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0081\u0001"+ + "\u0081\u0001\u0081\u0001\u0081\u0001\u0082\u0001\u0082\u0001\u0082\u0001"+ + "\u0082\u0001\u0082\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001"+ + "\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0084\u0001"+ + "\u0084\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0084\u0001"+ + "\u0085\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0085\u0001"+ + "\u0085\u0001\u0086\u0001\u0086\u0001\u0086\u0001\u0086\u0001\u0086\u0001"+ + "\u0086\u0001\u0086\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001"+ + "\u0087\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0088\u0001"+ + "\u0088\u0001\u0089\u0001\u0089\u0001\u0089\u0001\u0089\u0001\u0089\u0001"+ + "\u0089\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008a\u0001"+ + "\u008a\u0001\u008a\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001"+ + "\u008b\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008c\u0001"+ + "\u008c\u0001\u008d\u0001\u008d\u0001\u008d\u0001\u008d\u0001\u008d\u0001"+ + "\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001"+ + "\u008e\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u008f\u0001"+ + "\u008f\u0001\u008f\u0001\u0090\u0001\u0090\u0001\u0090\u0001\u0090\u0001"+ + "\u0090\u0001\u0091\u0001\u0091\u0004\u0091\u0596\b\u0091\u000b\u0091\f"+ + "\u0091\u0597\u0001\u0091\u0001\u0091\u0001\u0092\u0001\u0092\u0004\u0092"+ + "\u059e\b\u0092\u000b\u0092\f\u0092\u059f\u0001\u0092\u0001\u0092\u0001"+ + "\u0093\u0001\u0093\u0004\u0093\u05a6\b\u0093\u000b\u0093\f\u0093\u05a7"+ + "\u0001\u0093\u0001\u0093\u0001\u0094\u0001\u0094\u0004\u0094\u05ae\b\u0094"+ + "\u000b\u0094\f\u0094\u05af\u0001\u0094\u0001\u0094\u0004\u0094\u05b4\b"+ + "\u0094\u000b\u0094\f\u0094\u05b5\u0001\u0094\u0001\u0094\u0001\u0095\u0001"+ + "\u0095\u0004\u0095\u05bc\b\u0095\u000b\u0095\f\u0095\u05bd\u0001\u0095"+ + "\u0001\u0095\u0004\u0095\u05c2\b\u0095\u000b\u0095\f\u0095\u05c3\u0001"+ + "\u0095\u0001\u0095\u0001\u0096\u0001\u0096\u0004\u0096\u05ca\b\u0096\u000b"+ + "\u0096\f\u0096\u05cb\u0001\u0096\u0001\u0096\u0004\u0096\u05d0\b\u0096"+ + "\u000b\u0096\f\u0096\u05d1\u0001\u0096\u0001\u0096\u0001\u0097\u0001\u0097"+ + "\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0098\u0001\u0098"+ + "\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0099"+ + "\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099"+ + "\u0001\u0099\u0001\u009a\u0001\u009a\u0001\u009a\u0001\u009a\u0001\u009a"+ + "\u0001\u009a\u0001\u009a\u0001\u009b\u0001\u009b\u0001\u009b\u0001\u009b"+ + "\u0001\u009b\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c"+ + "\u0001\u009c\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009d"+ + "\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009e\u0001\u009e"+ + "\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e"+ + "\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u00a0"+ + "\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1"+ + "\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a2\u0001\u00a2"+ + "\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a3\u0001\u00a3"+ + "\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a4"+ + "\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4"+ + "\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5"+ + "\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a6\u0001\u00a6"+ + "\u0001\u00a6\u0001\u00a6\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a7"+ + "\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a7"+ + "\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a8"+ + "\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a8"+ + "\u0001\u00a8\u0001\u00a8\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9"+ + "\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9"+ + "\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa"+ + "\u0001\u00aa\u0001\u00ab\u0001\u00ab\u0001\u00ac\u0001\u00ac\u0001\u00ad"+ + "\u0001\u00ad\u0001\u00ae\u0001\u00ae\u0001\u00af\u0001\u00af\u0001\u00b0"+ + "\u0001\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b2\u0001\u00b2\u0001\u00b3"+ + "\u0001\u00b3\u0001\u00b4\u0001\u00b4\u0001\u00b5\u0001\u00b5\u0001\u00b6"+ + "\u0001\u00b6\u0001\u00b7\u0001\u00b7\u0001\u00b8\u0001\u00b8\u0001\u00b9"+ + "\u0001\u00b9\u0001\u00b9\u0001\u00ba\u0001\u00ba\u0001\u00bb\u0001\u00bb"+ + "\u0001\u00bb\u0001\u00bc\u0001\u00bc\u0001\u00bd\u0001\u00bd\u0001\u00bd"+ + "\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00bf"+ + "\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf"+ + "\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c1"+ + "\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c1"+ + "\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c3"+ + "\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c3"+ + "\u0001\u00c4\u0001\u00c4\u0001\u00c5\u0001\u00c5\u0001\u00c6\u0001\u00c6"+ + "\u0001\u00c7\u0001\u00c7\u0001\u00c7\u0001\u00c7\u0001\u00c8\u0001\u00c8"+ + "\u0001\u00c8\u0001\u00c9\u0001\u00c9\u0001\u00c9\u0001\u00c9\u0001\u00c9"+ + "\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0001\u00ca"+ + "\u0001\u00cb\u0001\u00cb\u0001\u00cb\u0001\u00cb\u0001\u00cb\u0001\u00cc"+ + "\u0004\u00cc\u06d4\b\u00cc\u000b\u00cc\f\u00cc\u06d5\u0001\u00cd\u0005"+ + "\u00cd\u06d9\b\u00cd\n\u00cd\f\u00cd\u06dc\t\u00cd\u0001\u00cd\u0001\u00cd"+ + "\u0004\u00cd\u06e0\b\u00cd\u000b\u00cd\f\u00cd\u06e1\u0001\u00cd\u0001"+ + "\u00cd\u0003\u00cd\u06e6\b\u00cd\u0001\u00cd\u0004\u00cd\u06e9\b\u00cd"+ + "\u000b\u00cd\f\u00cd\u06ea\u0003\u00cd\u06ed\b\u00cd\u0001\u00cd\u0004"+ + "\u00cd\u06f0\b\u00cd\u000b\u00cd\f\u00cd\u06f1\u0001\u00cd\u0001\u00cd"+ + "\u0003\u00cd\u06f6\b\u00cd\u0001\u00cd\u0004\u00cd\u06f9\b\u00cd\u000b"+ + "\u00cd\f\u00cd\u06fa\u0003\u00cd\u06fd\b\u00cd\u0001\u00ce\u0001\u00ce"+ + "\u0003\u00ce\u0701\b\u00ce\u0001\u00ce\u0001\u00ce\u0001\u00cf\u0001\u00cf"+ + "\u0001\u00cf\u0005\u00cf\u0708\b\u00cf\n\u00cf\f\u00cf\u070b\t\u00cf\u0001"+ + "\u00cf\u0001\u00cf\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0005\u00d0\u0712"+ + "\b\u00d0\n\u00d0\f\u00d0\u0715\t\u00d0\u0001\u00d0\u0001\u00d0\u0001\u00d1"+ + "\u0001\u00d1\u0001\u00d1\u0001\u00d1\u0001\u00d1\u0001\u00d2\u0001\u00d2"+ + "\u0001\u00d2\u0001\u00d2\u0005\u00d2\u0722\b\u00d2\n\u00d2\f\u00d2\u0725"+ + "\t\u00d2\u0001\u00d3\u0001\u00d3\u0003\u00d3\u0729\b\u00d3\u0001\u00d3"+ + "\u0001\u00d3\u0001\u00d3\u0005\u00d3\u072e\b\u00d3\n\u00d3\f\u00d3\u0731"+ + "\t\u00d3\u0001\u00d4\u0004\u00d4\u0734\b\u00d4\u000b\u00d4\f\u00d4\u0735"+ + "\u0001\u00d4\u0001\u00d4\u0001\u00d5\u0001\u00d5\u0001\u00d5\u0001\u00d5"+ + "\u0001\u00d5\u0005\u00d5\u073f\b\u00d5\n\u00d5\f\u00d5\u0742\t\u00d5\u0001"+ + "\u00d5\u0001\u00d5\u0001\u00d5\u0001\u00d5\u0001\u00d5\u0001\u00d6\u0001"+ + "\u00d6\u0001\u00d6\u0001\u00d6\u0005\u00d6\u074d\b\u00d6\n\u00d6\f\u00d6"+ + "\u0750\t\u00d6\u0001\u00d6\u0001\u00d6\u0001\u00d7\u0001\u00d7\u0005\u00d7"+ + "\u0756\b\u00d7\n\u00d7\f\u00d7\u0759\t\u00d7\u0001\u00d7\u0001\u00d7\u0001"+ + "\u00d8\u0001\u00d8\u0001\u00d9\u0001\u00d9\u0001\u00da\u0001\u00da\u0001"+ + "\u00db\u0001\u00db\u0001\u00db\u0003\u00db\u0766\b\u00db\u0001\u00dc\u0001"+ + "\u00dc\u0001\u00dc\u0003\u00dc\u076b\b\u00dc\u0001\u00dd\u0001\u00dd\u0001"+ + "\u00de\u0001\u00de\u0001\u00df\u0001\u00df\u0001\u00df\u0001\u00df\u0001"+ + "\u00df\u0001\u00df\u0001\u00e0\u0001\u00e0\u0001\u00e0\u0001\u00e0\u0001"+ + "\u00e0\u0001\u00e0\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e2\u0001\u00e2\u0001"+ - "\u00e2\u0001\u00e2\u0001\u00e2\u0001\u00e2\u0001\u00e2\u0001\u00e2\u0001"+ - "\u00e2\u0003\u06d3\u06dd\u070a\u0000\u00e3\u0001\u0001\u0003\u0002\u0005"+ - "\u0003\u0007\u0004\t\u0005\u000b\u0006\r\u0007\u000f\b\u0011\t\u0013\n"+ - "\u0015\u000b\u0017\f\u0019\r\u001b\u000e\u001d\u000f\u001f\u0010!\u0011"+ - "#\u0012%\u0013\'\u0014)\u0015+\u0016-\u0017/\u00181\u00193\u001a5\u001b"+ - "7\u001c9\u001d;\u001e=\u001f? A!C\"E#G$I%K&M\'O(Q)S*U+W,Y-[.]/_0a1c2e"+ - "3g4i5k6m7o8q9s:u;w}?\u007f@\u0081A\u0083B\u0085C\u0087D\u0089E\u008b"+ - "F\u008dG\u008fH\u0091I\u0093J\u0095K\u0097L\u0099M\u009bN\u009dO\u009f"+ - "P\u00a1Q\u00a3R\u00a5S\u00a7T\u00a9U\u00abV\u00adW\u00afX\u00b1Y\u00b3"+ - "Z\u00b5[\u00b7\\\u00b9]\u00bb^\u00bd_\u00bf`\u00c1a\u00c3b\u00c5c\u00c7"+ - "d\u00c9e\u00cbf\u00cdg\u00cfh\u00d1i\u00d3j\u00d5k\u00d7l\u00d9m\u00db"+ - "n\u00ddo\u00dfp\u00e1q\u00e3r\u00e5s\u00e7t\u00e9u\u00ebv\u00edw\u00ef"+ - "x\u00f1y\u00f3z\u00f5{\u00f7|\u00f9}\u00fb~\u00fd\u007f\u00ff\u0080\u0101"+ - "\u0081\u0103\u0082\u0105\u0083\u0107\u0084\u0109\u0085\u010b\u0086\u010d"+ - "\u0087\u010f\u0088\u0111\u0089\u0113\u008a\u0115\u008b\u0117\u008c\u0119"+ - "\u008d\u011b\u008e\u011d\u008f\u011f\u0090\u0121\u0091\u0123\u0092\u0125"+ - "\u0093\u0127\u0094\u0129\u0095\u012b\u0096\u012d\u0097\u012f\u0098\u0131"+ - "\u0099\u0133\u009a\u0135\u009b\u0137\u009c\u0139\u009d\u013b\u009e\u013d"+ - "\u009f\u013f\u00a0\u0141\u00a1\u0143\u00a2\u0145\u00a3\u0147\u00a4\u0149"+ - "\u00a5\u014b\u00a6\u014d\u00a7\u014f\u00a8\u0151\u00a9\u0153\u00aa\u0155"+ - "\u00ab\u0157\u00ac\u0159\u00ad\u015b\u00ae\u015d\u00af\u015f\u00b0\u0161"+ - "\u00b1\u0163\u00b2\u0165\u00b3\u0167\u00b4\u0169\u00b5\u016b\u00b6\u016d"+ - "\u00b7\u016f\u00b8\u0171\u00b9\u0173\u00ba\u0175\u00bb\u0177\u00bc\u0179"+ - "\u00bd\u017b\u00be\u017d\u00bf\u017f\u00c0\u0181\u00c1\u0183\u00c2\u0185"+ - "\u00c3\u0187\u00c4\u0189\u00c5\u018b\u00c6\u018d\u00c7\u018f\u00c8\u0191"+ - "\u00c9\u0193\u00ca\u0195\u00cb\u0197\u00cc\u0199\u00cd\u019b\u00ce\u019d"+ - "\u00cf\u019f\u00d0\u01a1\u00d1\u01a3\u00d2\u01a5\u00d3\u01a7\u00d4\u01a9"+ - "\u0000\u01ab\u0000\u01ad\u0000\u01af\u0000\u01b1\u0000\u01b3\u0000\u01b5"+ - "\u0000\u01b7\u0000\u01b9\u0000\u01bb\u0000\u01bd\u0000\u01bf\u0000\u01c1"+ - "\u0000\u01c3\u0000\u01c5\u0000\u0001\u0000%\u0002\u0000AAaa\u0002\u0000"+ - "CCcc\u0002\u0000OOoo\u0002\u0000UUuu\u0002\u0000NNnn\u0002\u0000TTtt\u0002"+ + "\u00e2\u0001\u00e2\u0001\u00e2\u0001\u00e2\u0001\u00e2\u0001\u00e3\u0001"+ + "\u00e3\u0001\u00e3\u0001\u00e3\u0001\u00e3\u0001\u00e3\u0001\u00e3\u0001"+ + "\u00e3\u0001\u00e3\u0001\u00e3\u0001\u00e3\u0001\u00e4\u0001\u00e4\u0001"+ + "\u00e4\u0001\u00e4\u0001\u00e4\u0001\u00e4\u0001\u00e4\u0001\u00e5\u0001"+ + "\u00e5\u0001\u00e5\u0001\u00e5\u0001\u00e5\u0001\u00e5\u0001\u00e5\u0001"+ + "\u00e5\u0001\u00e5\u0001\u00e5\u0001\u00e5\u0001\u00e6\u0001\u00e6\u0001"+ + "\u00e6\u0001\u00e6\u0001\u00e6\u0001\u00e6\u0001\u00e6\u0001\u00e7\u0001"+ + "\u00e7\u0001\u00e7\u0001\u00e7\u0001\u00e7\u0001\u00e7\u0001\u00e7\u0001"+ + "\u00e7\u0001\u00e7\u0003\u0709\u0713\u0740\u0000\u00e8\u0001\u0001\u0003"+ + "\u0002\u0005\u0003\u0007\u0004\t\u0005\u000b\u0006\r\u0007\u000f\b\u0011"+ + "\t\u0013\n\u0015\u000b\u0017\f\u0019\r\u001b\u000e\u001d\u000f\u001f\u0010"+ + "!\u0011#\u0012%\u0013\'\u0014)\u0015+\u0016-\u0017/\u00181\u00193\u001a"+ + "5\u001b7\u001c9\u001d;\u001e=\u001f? A!C\"E#G$I%K&M\'O(Q)S*U+W,Y-[.]/"+ + "_0a1c2e3g4i5k6m7o8q9s:u;w}?\u007f@\u0081A\u0083B\u0085C\u0087D\u0089"+ + "E\u008bF\u008dG\u008fH\u0091I\u0093J\u0095K\u0097L\u0099M\u009bN\u009d"+ + "O\u009fP\u00a1Q\u00a3R\u00a5S\u00a7T\u00a9U\u00abV\u00adW\u00afX\u00b1"+ + "Y\u00b3Z\u00b5[\u00b7\\\u00b9]\u00bb^\u00bd_\u00bf`\u00c1a\u00c3b\u00c5"+ + "c\u00c7d\u00c9e\u00cbf\u00cdg\u00cfh\u00d1i\u00d3j\u00d5k\u00d7l\u00d9"+ + "m\u00dbn\u00ddo\u00dfp\u00e1q\u00e3r\u00e5s\u00e7t\u00e9u\u00ebv\u00ed"+ + "w\u00efx\u00f1y\u00f3z\u00f5{\u00f7|\u00f9}\u00fb~\u00fd\u007f\u00ff\u0080"+ + "\u0101\u0081\u0103\u0082\u0105\u0083\u0107\u0084\u0109\u0085\u010b\u0086"+ + "\u010d\u0087\u010f\u0088\u0111\u0089\u0113\u008a\u0115\u008b\u0117\u008c"+ + "\u0119\u008d\u011b\u008e\u011d\u008f\u011f\u0090\u0121\u0091\u0123\u0092"+ + "\u0125\u0093\u0127\u0094\u0129\u0095\u012b\u0096\u012d\u0097\u012f\u0098"+ + "\u0131\u0099\u0133\u009a\u0135\u009b\u0137\u009c\u0139\u009d\u013b\u009e"+ + "\u013d\u009f\u013f\u00a0\u0141\u00a1\u0143\u00a2\u0145\u00a3\u0147\u00a4"+ + "\u0149\u00a5\u014b\u00a6\u014d\u00a7\u014f\u00a8\u0151\u00a9\u0153\u00aa"+ + "\u0155\u00ab\u0157\u00ac\u0159\u00ad\u015b\u00ae\u015d\u00af\u015f\u00b0"+ + "\u0161\u00b1\u0163\u00b2\u0165\u00b3\u0167\u00b4\u0169\u00b5\u016b\u00b6"+ + "\u016d\u00b7\u016f\u00b8\u0171\u00b9\u0173\u00ba\u0175\u00bb\u0177\u00bc"+ + "\u0179\u00bd\u017b\u00be\u017d\u00bf\u017f\u00c0\u0181\u00c1\u0183\u00c2"+ + "\u0185\u00c3\u0187\u00c4\u0189\u00c5\u018b\u00c6\u018d\u00c7\u018f\u00c8"+ + "\u0191\u00c9\u0193\u00ca\u0195\u00cb\u0197\u00cc\u0199\u00cd\u019b\u00ce"+ + "\u019d\u00cf\u019f\u00d0\u01a1\u00d1\u01a3\u00d2\u01a5\u00d3\u01a7\u00d4"+ + "\u01a9\u00d5\u01ab\u00d6\u01ad\u00d7\u01af\u00d8\u01b1\u00d9\u01b3\u0000"+ + "\u01b5\u0000\u01b7\u0000\u01b9\u0000\u01bb\u0000\u01bd\u0000\u01bf\u0000"+ + "\u01c1\u0000\u01c3\u0000\u01c5\u0000\u01c7\u0000\u01c9\u0000\u01cb\u0000"+ + "\u01cd\u0000\u01cf\u0000\u0001\u0000%\u0002\u0000AAaa\u0002\u0000CCcc"+ + "\u0002\u0000OOoo\u0002\u0000UUuu\u0002\u0000NNnn\u0002\u0000TTtt\u0002"+ "\u0000DDdd\u0002\u0000MMmm\u0002\u0000IIii\u0002\u0000LLll\u0002\u0000"+ "EEee\u0002\u0000RRrr\u0002\u0000WWww\u0002\u0000YYyy\u0002\u0000SSss\u0002"+ - "\u0000BBbb\u0002\u0000HHhh\u0002\u0000FFff\u0002\u0000PPpp\u0002\u0000"+ + "\u0000BBbb\u0002\u0000FFff\u0002\u0000HHhh\u0002\u0000PPpp\u0002\u0000"+ "XXxx\u0002\u0000ZZzz\u0002\u0000GGgg\u0002\u0000JJjj\u0002\u0000KKkk\u0002"+ "\u0000VVvv\u0002\u0000QQqq\u0002\u0000++--\u0001\u0000SS\u0001\u0000Y"+ "Y\u0001\u0000$$\u0003\u0000\t\n\r\r \u0001\u0000++\u0002\u0000\n\n\r"+ "\r\u0002\u0000AZaz\b\u0000\"\"//\\\\bbffnnrrtt\b\u0000\'\'//\\\\bbffn"+ - "nrrtt\u0003\u000009AFaf\u079e\u0000\u0001\u0001\u0000\u0000\u0000\u0000"+ + "nrrtt\u0003\u000009AFaf\u07d4\u0000\u0001\u0001\u0000\u0000\u0000\u0000"+ "\u0003\u0001\u0000\u0000\u0000\u0000\u0005\u0001\u0000\u0000\u0000\u0000"+ "\u0007\u0001\u0000\u0000\u0000\u0000\t\u0001\u0000\u0000\u0000\u0000\u000b"+ "\u0001\u0000\u0000\u0000\u0000\r\u0001\u0000\u0000\u0000\u0000\u000f\u0001"+ @@ -617,844 +629,869 @@ public KVQLLexer(CharStream input) { "\u0199\u0001\u0000\u0000\u0000\u0000\u019b\u0001\u0000\u0000\u0000\u0000"+ "\u019d\u0001\u0000\u0000\u0000\u0000\u019f\u0001\u0000\u0000\u0000\u0000"+ "\u01a1\u0001\u0000\u0000\u0000\u0000\u01a3\u0001\u0000\u0000\u0000\u0000"+ - "\u01a5\u0001\u0000\u0000\u0000\u0000\u01a7\u0001\u0000\u0000\u0000\u0001"+ - "\u01c7\u0001\u0000\u0000\u0000\u0003\u01cb\u0001\u0000\u0000\u0000\u0005"+ - "\u01ce\u0001\u0000\u0000\u0000\u0007\u01d0\u0001\u0000\u0000\u0000\t\u01da"+ - "\u0001\u0000\u0000\u0000\u000b\u01e2\u0001\u0000\u0000\u0000\r\u01e6\u0001"+ - "\u0000\u0000\u0000\u000f\u01ec\u0001\u0000\u0000\u0000\u0011\u01f0\u0001"+ - "\u0000\u0000\u0000\u0013\u01f6\u0001\u0000\u0000\u0000\u0015\u01fd\u0001"+ - "\u0000\u0000\u0000\u0017\u0207\u0001\u0000\u0000\u0000\u0019\u020b\u0001"+ - "\u0000\u0000\u0000\u001b\u020e\u0001\u0000\u0000\u0000\u001d\u0212\u0001"+ - "\u0000\u0000\u0000\u001f\u0220\u0001\u0000\u0000\u0000!\u0228\u0001\u0000"+ - "\u0000\u0000#\u022b\u0001\u0000\u0000\u0000%\u0231\u0001\u0000\u0000\u0000"+ - "\'\u0236\u0001\u0000\u0000\u0000)\u023e\u0001\u0000\u0000\u0000+\u0243"+ - "\u0001\u0000\u0000\u0000-\u024e\u0001\u0000\u0000\u0000/\u0256\u0001\u0000"+ - "\u0000\u00001\u025c\u0001\u0000\u0000\u00003\u0263\u0001\u0000\u0000\u0000"+ - "5\u026e\u0001\u0000\u0000\u00007\u0270\u0001\u0000\u0000\u00009\u0278"+ - "\u0001\u0000\u0000\u0000;\u0280\u0001\u0000\u0000\u0000=\u0287\u0001\u0000"+ - "\u0000\u0000?\u028c\u0001\u0000\u0000\u0000A\u0298\u0001\u0000\u0000\u0000"+ - "C\u02a1\u0001\u0000\u0000\u0000E\u02aa\u0001\u0000\u0000\u0000G\u02af"+ - "\u0001\u0000\u0000\u0000I\u02b9\u0001\u0000\u0000\u0000K\u02c2\u0001\u0000"+ - "\u0000\u0000M\u02c7\u0001\u0000\u0000\u0000O\u02cb\u0001\u0000\u0000\u0000"+ - "Q\u02d5\u0001\u0000\u0000\u0000S\u02e1\u0001\u0000\u0000\u0000U\u02e8"+ - "\u0001\u0000\u0000\u0000W\u02f0\u0001\u0000\u0000\u0000Y\u02f7\u0001\u0000"+ - "\u0000\u0000[\u02fd\u0001\u0000\u0000\u0000]\u0303\u0001\u0000\u0000\u0000"+ - "_\u0307\u0001\u0000\u0000\u0000a\u030d\u0001\u0000\u0000\u0000c\u0314"+ - "\u0001\u0000\u0000\u0000e\u0319\u0001\u0000\u0000\u0000g\u0320\u0001\u0000"+ - "\u0000\u0000i\u0329\u0001\u0000\u0000\u0000k\u0333\u0001\u0000\u0000\u0000"+ - "m\u0339\u0001\u0000\u0000\u0000o\u0345\u0001\u0000\u0000\u0000q\u0347"+ - "\u0001\u0000\u0000\u0000s\u0352\u0001\u0000\u0000\u0000u\u035b\u0001\u0000"+ - "\u0000\u0000w\u035e\u0001\u0000\u0000\u0000y\u0361\u0001\u0000\u0000\u0000"+ - "{\u036b\u0001\u0000\u0000\u0000}\u0371\u0001\u0000\u0000\u0000\u007f\u0379"+ - "\u0001\u0000\u0000\u0000\u0081\u0380\u0001\u0000\u0000\u0000\u0083\u0385"+ - "\u0001\u0000\u0000\u0000\u0085\u0388\u0001\u0000\u0000\u0000\u0087\u038d"+ - "\u0001\u0000\u0000\u0000\u0089\u0392\u0001\u0000\u0000\u0000\u008b\u0396"+ - "\u0001\u0000\u0000\u0000\u008d\u039c\u0001\u0000\u0000\u0000\u008f\u03a1"+ - "\u0001\u0000\u0000\u0000\u0091\u03a6\u0001\u0000\u0000\u0000\u0093\u03ab"+ - "\u0001\u0000\u0000\u0000\u0095\u03b4\u0001\u0000\u0000\u0000\u0097\u03ba"+ - "\u0001\u0000\u0000\u0000\u0099\u03c0\u0001\u0000\u0000\u0000\u009b\u03c5"+ - "\u0001\u0000\u0000\u0000\u009d\u03ce\u0001\u0000\u0000\u0000\u009f\u03dc"+ - "\u0001\u0000\u0000\u0000\u00a1\u03de\u0001\u0000\u0000\u0000\u00a3\u03e7"+ - "\u0001\u0000\u0000\u0000\u00a5\u03ee\u0001\u0000\u0000\u0000\u00a7\u03f9"+ - "\u0001\u0000\u0000\u0000\u00a9\u0403\u0001\u0000\u0000\u0000\u00ab\u040e"+ - "\u0001\u0000\u0000\u0000\u00ad\u0415\u0001\u0000\u0000\u0000\u00af\u0418"+ - "\u0001\u0000\u0000\u0000\u00b1\u041c\u0001\u0000\u0000\u0000\u00b3\u0422"+ - "\u0001\u0000\u0000\u0000\u00b5\u0429\u0001\u0000\u0000\u0000\u00b7\u042c"+ - "\u0001\u0000\u0000\u0000\u00b9\u042f\u0001\u0000\u0000\u0000\u00bb\u0434"+ - "\u0001\u0000\u0000\u0000\u00bd\u0437\u0001\u0000\u0000\u0000\u00bf\u043d"+ - "\u0001\u0000\u0000\u0000\u00c1\u0443\u0001\u0000\u0000\u0000\u00c3\u044c"+ - "\u0001\u0000\u0000\u0000\u00c5\u0455\u0001\u0000\u0000\u0000\u00c7\u045b"+ - "\u0001\u0000\u0000\u0000\u00c9\u045f\u0001\u0000\u0000\u0000\u00cb\u0463"+ - "\u0001\u0000\u0000\u0000\u00cd\u0469\u0001\u0000\u0000\u0000\u00cf\u0471"+ - "\u0001\u0000\u0000\u0000\u00d1\u0475\u0001\u0000\u0000\u0000\u00d3\u047c"+ - "\u0001\u0000\u0000\u0000\u00d5\u0484\u0001\u0000\u0000\u0000\u00d7\u048b"+ - "\u0001\u0000\u0000\u0000\u00d9\u0495\u0001\u0000\u0000\u0000\u00db\u049c"+ - "\u0001\u0000\u0000\u0000\u00dd\u04a1\u0001\u0000\u0000\u0000\u00df\u04a7"+ - "\u0001\u0000\u0000\u0000\u00e1\u04ab\u0001\u0000\u0000\u0000\u00e3\u04ba"+ - "\u0001\u0000\u0000\u0000\u00e5\u04bc\u0001\u0000\u0000\u0000\u00e7\u04c3"+ - "\u0001\u0000\u0000\u0000\u00e9\u04d1\u0001\u0000\u0000\u0000\u00eb\u04d5"+ - "\u0001\u0000\u0000\u0000\u00ed\u04db\u0001\u0000\u0000\u0000\u00ef\u04e0"+ - "\u0001\u0000\u0000\u0000\u00f1\u04e6\u0001\u0000\u0000\u0000\u00f3\u04ec"+ - "\u0001\u0000\u0000\u0000\u00f5\u04f3\u0001\u0000\u0000\u0000\u00f7\u04f8"+ - "\u0001\u0000\u0000\u0000\u00f9\u04fb\u0001\u0000\u0000\u0000\u00fb\u04ff"+ - "\u0001\u0000\u0000\u0000\u00fd\u0504\u0001\u0000\u0000\u0000\u00ff\u050d"+ - "\u0001\u0000\u0000\u0000\u0101\u0514\u0001\u0000\u0000\u0000\u0103\u051b"+ - "\u0001\u0000\u0000\u0000\u0105\u0522\u0001\u0000\u0000\u0000\u0107\u0527"+ - "\u0001\u0000\u0000\u0000\u0109\u052d\u0001\u0000\u0000\u0000\u010b\u0533"+ - "\u0001\u0000\u0000\u0000\u010d\u053a\u0001\u0000\u0000\u0000\u010f\u053f"+ - "\u0001\u0000\u0000\u0000\u0111\u0545\u0001\u0000\u0000\u0000\u0113\u054a"+ - "\u0001\u0000\u0000\u0000\u0115\u0551\u0001\u0000\u0000\u0000\u0117\u0558"+ - "\u0001\u0000\u0000\u0000\u0119\u055d\u0001\u0000\u0000\u0000\u011b\u0565"+ - "\u0001\u0000\u0000\u0000\u011d\u056d\u0001\u0000\u0000\u0000\u011f\u0575"+ - "\u0001\u0000\u0000\u0000\u0121\u0583\u0001\u0000\u0000\u0000\u0123\u0591"+ - "\u0001\u0000\u0000\u0000\u0125\u059f\u0001\u0000\u0000\u0000\u0127\u05a5"+ - "\u0001\u0000\u0000\u0000\u0129\u05ac\u0001\u0000\u0000\u0000\u012b\u05b4"+ - "\u0001\u0000\u0000\u0000\u012d\u05bb\u0001\u0000\u0000\u0000\u012f\u05c0"+ - "\u0001\u0000\u0000\u0000\u0131\u05c6\u0001\u0000\u0000\u0000\u0133\u05cf"+ - "\u0001\u0000\u0000\u0000\u0135\u05d7\u0001\u0000\u0000\u0000\u0137\u05dc"+ - "\u0001\u0000\u0000\u0000\u0139\u05e0\u0001\u0000\u0000\u0000\u013b\u05e7"+ - "\u0001\u0000\u0000\u0000\u013d\u05ed\u0001\u0000\u0000\u0000\u013f\u05f4"+ - "\u0001\u0000\u0000\u0000\u0141\u05fb\u0001\u0000\u0000\u0000\u0143\u0605"+ - "\u0001\u0000\u0000\u0000\u0145\u0609\u0001\u0000\u0000\u0000\u0147\u0613"+ - "\u0001\u0000\u0000\u0000\u0149\u0621\u0001\u0000\u0000\u0000\u014b\u062b"+ - "\u0001\u0000\u0000\u0000\u014d\u0632\u0001\u0000\u0000\u0000\u014f\u0634"+ - "\u0001\u0000\u0000\u0000\u0151\u0636\u0001\u0000\u0000\u0000\u0153\u0638"+ - "\u0001\u0000\u0000\u0000\u0155\u063a\u0001\u0000\u0000\u0000\u0157\u063c"+ - "\u0001\u0000\u0000\u0000\u0159\u063e\u0001\u0000\u0000\u0000\u015b\u0640"+ - "\u0001\u0000\u0000\u0000\u015d\u0642\u0001\u0000\u0000\u0000\u015f\u0644"+ - "\u0001\u0000\u0000\u0000\u0161\u0646\u0001\u0000\u0000\u0000\u0163\u0648"+ - "\u0001\u0000\u0000\u0000\u0165\u064a\u0001\u0000\u0000\u0000\u0167\u064c"+ - "\u0001\u0000\u0000\u0000\u0169\u064e\u0001\u0000\u0000\u0000\u016b\u0651"+ - "\u0001\u0000\u0000\u0000\u016d\u0653\u0001\u0000\u0000\u0000\u016f\u0656"+ - "\u0001\u0000\u0000\u0000\u0171\u0658\u0001\u0000\u0000\u0000\u0173\u065b"+ - "\u0001\u0000\u0000\u0000\u0175\u0660\u0001\u0000\u0000\u0000\u0177\u0667"+ - "\u0001\u0000\u0000\u0000\u0179\u066c\u0001\u0000\u0000\u0000\u017b\u0673"+ - "\u0001\u0000\u0000\u0000\u017d\u0678\u0001\u0000\u0000\u0000\u017f\u067f"+ - "\u0001\u0000\u0000\u0000\u0181\u0681\u0001\u0000\u0000\u0000\u0183\u0683"+ - "\u0001\u0000\u0000\u0000\u0185\u0685\u0001\u0000\u0000\u0000\u0187\u0689"+ - "\u0001\u0000\u0000\u0000\u0189\u068c\u0001\u0000\u0000\u0000\u018b\u0691"+ - "\u0001\u0000\u0000\u0000\u018d\u0697\u0001\u0000\u0000\u0000\u018f\u069d"+ - "\u0001\u0000\u0000\u0000\u0191\u06c6\u0001\u0000\u0000\u0000\u0193\u06ca"+ - "\u0001\u0000\u0000\u0000\u0195\u06ce\u0001\u0000\u0000\u0000\u0197\u06d8"+ - "\u0001\u0000\u0000\u0000\u0199\u06e2\u0001\u0000\u0000\u0000\u019b\u06e7"+ - "\u0001\u0000\u0000\u0000\u019d\u06f2\u0001\u0000\u0000\u0000\u019f\u06fd"+ - "\u0001\u0000\u0000\u0000\u01a1\u0703\u0001\u0000\u0000\u0000\u01a3\u0712"+ - "\u0001\u0000\u0000\u0000\u01a5\u071d\u0001\u0000\u0000\u0000\u01a7\u0726"+ - "\u0001\u0000\u0000\u0000\u01a9\u0728\u0001\u0000\u0000\u0000\u01ab\u072a"+ - "\u0001\u0000\u0000\u0000\u01ad\u072c\u0001\u0000\u0000\u0000\u01af\u0731"+ - "\u0001\u0000\u0000\u0000\u01b1\u0736\u0001\u0000\u0000\u0000\u01b3\u0738"+ - "\u0001\u0000\u0000\u0000\u01b5\u073a\u0001\u0000\u0000\u0000\u01b7\u0740"+ - "\u0001\u0000\u0000\u0000\u01b9\u0746\u0001\u0000\u0000\u0000\u01bb\u074e"+ - "\u0001\u0000\u0000\u0000\u01bd\u0755\u0001\u0000\u0000\u0000\u01bf\u0760"+ - "\u0001\u0000\u0000\u0000\u01c1\u0767\u0001\u0000\u0000\u0000\u01c3\u0772"+ - "\u0001\u0000\u0000\u0000\u01c5\u0779\u0001\u0000\u0000\u0000\u01c7\u01c8"+ - "\u0005/\u0000\u0000\u01c8\u01c9\u0005*\u0000\u0000\u01c9\u01ca\u0005+"+ - "\u0000\u0000\u01ca\u0002\u0001\u0000\u0000\u0000\u01cb\u01cc\u0005*\u0000"+ - "\u0000\u01cc\u01cd\u0005/\u0000\u0000\u01cd\u0004\u0001\u0000\u0000\u0000"+ - "\u01ce\u01cf\u0005@\u0000\u0000\u01cf\u0006\u0001\u0000\u0000\u0000\u01d0"+ - "\u01d1\u0003\u0163\u00b1\u0000\u01d1\u01d7\u0003\u01a9\u00d4\u0000\u01d2"+ - "\u01d6\u0003\u01a9\u00d4\u0000\u01d3\u01d6\u0003\u01ab\u00d5\u0000\u01d4"+ - "\u01d6\u0003\u01b3\u00d9\u0000\u01d5\u01d2\u0001\u0000\u0000\u0000\u01d5"+ - "\u01d3\u0001\u0000\u0000\u0000\u01d5\u01d4\u0001\u0000\u0000\u0000\u01d6"+ - "\u01d9\u0001\u0000\u0000\u0000\u01d7\u01d5\u0001\u0000\u0000\u0000\u01d7"+ - "\u01d8\u0001\u0000\u0000\u0000\u01d8\b\u0001\u0000\u0000\u0000\u01d9\u01d7"+ - "\u0001\u0000\u0000\u0000\u01da\u01db\u0007\u0000\u0000\u0000\u01db\u01dc"+ - "\u0007\u0001\u0000\u0000\u01dc\u01dd\u0007\u0001\u0000\u0000\u01dd\u01de"+ - "\u0007\u0002\u0000\u0000\u01de\u01df\u0007\u0003\u0000\u0000\u01df\u01e0"+ - "\u0007\u0004\u0000\u0000\u01e0\u01e1\u0007\u0005\u0000\u0000\u01e1\n\u0001"+ - "\u0000\u0000\u0000\u01e2\u01e3\u0007\u0000\u0000\u0000\u01e3\u01e4\u0007"+ - "\u0006\u0000\u0000\u01e4\u01e5\u0007\u0006\u0000\u0000\u01e5\f\u0001\u0000"+ - "\u0000\u0000\u01e6\u01e7\u0007\u0000\u0000\u0000\u01e7\u01e8\u0007\u0006"+ - "\u0000\u0000\u01e8\u01e9\u0007\u0007\u0000\u0000\u01e9\u01ea\u0007\b\u0000"+ - "\u0000\u01ea\u01eb\u0007\u0004\u0000\u0000\u01eb\u000e\u0001\u0000\u0000"+ - "\u0000\u01ec\u01ed\u0007\u0000\u0000\u0000\u01ed\u01ee\u0007\t\u0000\u0000"+ - "\u01ee\u01ef\u0007\t\u0000\u0000\u01ef\u0010\u0001\u0000\u0000\u0000\u01f0"+ - "\u01f1\u0007\u0000\u0000\u0000\u01f1\u01f2\u0007\t\u0000\u0000\u01f2\u01f3"+ - "\u0007\u0005\u0000\u0000\u01f3\u01f4\u0007\n\u0000\u0000\u01f4\u01f5\u0007"+ - "\u000b\u0000\u0000\u01f5\u0012\u0001\u0000\u0000\u0000\u01f6\u01f7\u0007"+ - "\u0000\u0000\u0000\u01f7\u01f8\u0007\t\u0000\u0000\u01f8\u01f9\u0007\f"+ - "\u0000\u0000\u01f9\u01fa\u0007\u0000\u0000\u0000\u01fa\u01fb\u0007\r\u0000"+ - "\u0000\u01fb\u01fc\u0007\u000e\u0000\u0000\u01fc\u0014\u0001\u0000\u0000"+ - "\u0000\u01fd\u01fe\u0007\u0000\u0000\u0000\u01fe\u01ff\u0007\u0004\u0000"+ - "\u0000\u01ff\u0200\u0007\u0001\u0000\u0000\u0200\u0201\u0007\n\u0000\u0000"+ - "\u0201\u0202\u0007\u000e\u0000\u0000\u0202\u0203\u0007\u0005\u0000\u0000"+ - "\u0203\u0204\u0007\u0002\u0000\u0000\u0204\u0205\u0007\u000b\u0000\u0000"+ - "\u0205\u0206\u0007\u000e\u0000\u0000\u0206\u0016\u0001\u0000\u0000\u0000"+ - "\u0207\u0208\u0007\u0000\u0000\u0000\u0208\u0209\u0007\u0004\u0000\u0000"+ - "\u0209\u020a\u0007\u0006\u0000\u0000\u020a\u0018\u0001\u0000\u0000\u0000"+ - "\u020b\u020c\u0007\u0000\u0000\u0000\u020c\u020d\u0007\u000e\u0000\u0000"+ - "\u020d\u001a\u0001\u0000\u0000\u0000\u020e\u020f\u0007\u0000\u0000\u0000"+ - "\u020f\u0210\u0007\u000e\u0000\u0000\u0210\u0211\u0007\u0001\u0000\u0000"+ - "\u0211\u001c\u0001\u0000\u0000\u0000\u0212\u0213\u0005a\u0000\u0000\u0213"+ - "\u0214\u0005r\u0000\u0000\u0214\u0215\u0005r\u0000\u0000\u0215\u0216\u0005"+ - "a\u0000\u0000\u0216\u0217\u0005y\u0000\u0000\u0217\u0218\u0005_\u0000"+ - "\u0000\u0218\u0219\u0005c\u0000\u0000\u0219\u021a\u0005o\u0000\u0000\u021a"+ - "\u021b\u0005l\u0000\u0000\u021b\u021c\u0005l\u0000\u0000\u021c\u021d\u0005"+ - "e\u0000\u0000\u021d\u021e\u0005c\u0000\u0000\u021e\u021f\u0005t\u0000"+ - "\u0000\u021f\u001e\u0001\u0000\u0000\u0000\u0220\u0221\u0007\u000f\u0000"+ - "\u0000\u0221\u0222\u0007\n\u0000\u0000\u0222\u0223\u0007\u0005\u0000\u0000"+ - "\u0223\u0224\u0007\f\u0000\u0000\u0224\u0225\u0007\n\u0000\u0000\u0225"+ - "\u0226\u0007\n\u0000\u0000\u0226\u0227\u0007\u0004\u0000\u0000\u0227 "+ - "\u0001\u0000\u0000\u0000\u0228\u0229\u0007\u000f\u0000\u0000\u0229\u022a"+ - "\u0007\r\u0000\u0000\u022a\"\u0001\u0000\u0000\u0000\u022b\u022c\u0007"+ - "\u0001\u0000\u0000\u022c\u022d\u0007\u0000\u0000\u0000\u022d\u022e\u0007"+ - "\u0001\u0000\u0000\u022e\u022f\u0007\u0010\u0000\u0000\u022f\u0230\u0007"+ - "\n\u0000\u0000\u0230$\u0001\u0000\u0000\u0000\u0231\u0232\u0007\u0001"+ - "\u0000\u0000\u0232\u0233\u0007\u0000\u0000\u0000\u0233\u0234\u0007\u000e"+ - "\u0000\u0000\u0234\u0235\u0007\n\u0000\u0000\u0235&\u0001\u0000\u0000"+ - "\u0000\u0236\u0237\u0007\u0001\u0000\u0000\u0237\u0238\u0007\u0000\u0000"+ - "\u0000\u0238\u0239\u0007\u000e\u0000\u0000\u0239\u023a\u0007\u0001\u0000"+ - "\u0000\u023a\u023b\u0007\u0000\u0000\u0000\u023b\u023c\u0007\u0006\u0000"+ - "\u0000\u023c\u023d\u0007\n\u0000\u0000\u023d(\u0001\u0000\u0000\u0000"+ - "\u023e\u023f\u0007\u0001\u0000\u0000\u023f\u0240\u0007\u0000\u0000\u0000"+ - "\u0240\u0241\u0007\u000e\u0000\u0000\u0241\u0242\u0007\u0005\u0000\u0000"+ - "\u0242*\u0001\u0000\u0000\u0000\u0243\u0244\u0007\u0001\u0000\u0000\u0244"+ - "\u0245\u0007\u0002\u0000\u0000\u0245\u0246\u0007\t\u0000\u0000\u0246\u0247"+ - "\u0007\t\u0000\u0000\u0247\u0248\u0007\n\u0000\u0000\u0248\u0249\u0007"+ - "\u0001\u0000\u0000\u0249\u024a\u0007\u0005\u0000\u0000\u024a\u024b\u0007"+ - "\b\u0000\u0000\u024b\u024c\u0007\u0002\u0000\u0000\u024c\u024d\u0007\u0004"+ - "\u0000\u0000\u024d,\u0001\u0000\u0000\u0000\u024e\u024f\u0007\u0001\u0000"+ - "\u0000\u024f\u0250\u0007\u0002\u0000\u0000\u0250\u0251\u0007\u0007\u0000"+ - "\u0000\u0251\u0252\u0007\u0007\u0000\u0000\u0252\u0253\u0007\n\u0000\u0000"+ - "\u0253\u0254\u0007\u0004\u0000\u0000\u0254\u0255\u0007\u0005\u0000\u0000"+ - "\u0255.\u0001\u0000\u0000\u0000\u0256\u0257\u0005c\u0000\u0000\u0257\u0258"+ - "\u0005o\u0000\u0000\u0258\u0259\u0005u\u0000\u0000\u0259\u025a\u0005n"+ - "\u0000\u0000\u025a\u025b\u0005t\u0000\u0000\u025b0\u0001\u0000\u0000\u0000"+ - "\u025c\u025d\u0007\u0001\u0000\u0000\u025d\u025e\u0007\u000b\u0000\u0000"+ - "\u025e\u025f\u0007\n\u0000\u0000\u025f\u0260\u0007\u0000\u0000\u0000\u0260"+ - "\u0261\u0007\u0005\u0000\u0000\u0261\u0262\u0007\n\u0000\u0000\u02622"+ - "\u0001\u0000\u0000\u0000\u0263\u0264\u0007\u0001\u0000\u0000\u0264\u0265"+ - "\u0007\r\u0000\u0000\u0265\u0266\u0007\u0001\u0000\u0000\u0266\u0267\u0007"+ - "\t\u0000\u0000\u0267\u0268\u0007\n\u0000\u0000\u02684\u0001\u0000\u0000"+ - "\u0000\u0269\u026f\u0007\u0006\u0000\u0000\u026a\u026b\u0007\u0006\u0000"+ - "\u0000\u026b\u026c\u0007\u0000\u0000\u0000\u026c\u026d\u0007\r\u0000\u0000"+ - "\u026d\u026f\u0007\u000e\u0000\u0000\u026e\u0269\u0001\u0000\u0000\u0000"+ - "\u026e\u026a\u0001\u0000\u0000\u0000\u026f6\u0001\u0000\u0000\u0000\u0270"+ - "\u0271\u0007\u0006\u0000\u0000\u0271\u0272\u0007\n\u0000\u0000\u0272\u0273"+ - "\u0007\u0001\u0000\u0000\u0273\u0274\u0007\t\u0000\u0000\u0274\u0275\u0007"+ - "\u0000\u0000\u0000\u0275\u0276\u0007\u000b\u0000\u0000\u0276\u0277\u0007"+ - "\n\u0000\u0000\u02778\u0001\u0000\u0000\u0000\u0278\u0279\u0007\u0006"+ - "\u0000\u0000\u0279\u027a\u0007\n\u0000\u0000\u027a\u027b\u0007\u0011\u0000"+ - "\u0000\u027b\u027c\u0007\u0000\u0000\u0000\u027c\u027d\u0007\u0003\u0000"+ - "\u0000\u027d\u027e\u0007\t\u0000\u0000\u027e\u027f\u0007\u0005\u0000\u0000"+ - "\u027f:\u0001\u0000\u0000\u0000\u0280\u0281\u0007\u0006\u0000\u0000\u0281"+ - "\u0282\u0007\n\u0000\u0000\u0282\u0283\u0007\t\u0000\u0000\u0283\u0284"+ - "\u0007\n\u0000\u0000\u0284\u0285\u0007\u0005\u0000\u0000\u0285\u0286\u0007"+ - "\n\u0000\u0000\u0286<\u0001\u0000\u0000\u0000\u0287\u0288\u0007\u0006"+ - "\u0000\u0000\u0288\u0289\u0007\n\u0000\u0000\u0289\u028a\u0007\u000e\u0000"+ - "\u0000\u028a\u028b\u0007\u0001\u0000\u0000\u028b>\u0001\u0000\u0000\u0000"+ - "\u028c\u028d\u0007\u0006\u0000\u0000\u028d\u028e\u0007\n\u0000\u0000\u028e"+ - "\u028f\u0007\u000e\u0000\u0000\u028f\u0290\u0007\u0001\u0000\u0000\u0290"+ - "\u0291\u0007\n\u0000\u0000\u0291\u0292\u0007\u0004\u0000\u0000\u0292\u0293"+ - "\u0007\u0006\u0000\u0000\u0293\u0294\u0007\u0000\u0000\u0000\u0294\u0295"+ - "\u0007\u0004\u0000\u0000\u0295\u0296\u0007\u0005\u0000\u0000\u0296\u0297"+ - "\u0007\u000e\u0000\u0000\u0297@\u0001\u0000\u0000\u0000\u0298\u0299\u0007"+ - "\u0006\u0000\u0000\u0299\u029a\u0007\n\u0000\u0000\u029a\u029b\u0007\u000e"+ - "\u0000\u0000\u029b\u029c\u0007\u0001\u0000\u0000\u029c\u029d\u0007\u000b"+ - "\u0000\u0000\u029d\u029e\u0007\b\u0000\u0000\u029e\u029f\u0007\u000f\u0000"+ - "\u0000\u029f\u02a0\u0007\n\u0000\u0000\u02a0B\u0001\u0000\u0000\u0000"+ - "\u02a1\u02a2\u0007\u0006\u0000\u0000\u02a2\u02a3\u0007\b\u0000\u0000\u02a3"+ - "\u02a4\u0007\u000e\u0000\u0000\u02a4\u02a5\u0007\u0005\u0000\u0000\u02a5"+ - "\u02a6\u0007\b\u0000\u0000\u02a6\u02a7\u0007\u0004\u0000\u0000\u02a7\u02a8"+ - "\u0007\u0001\u0000\u0000\u02a8\u02a9\u0007\u0005\u0000\u0000\u02a9D\u0001"+ - "\u0000\u0000\u0000\u02aa\u02ab\u0007\u0006\u0000\u0000\u02ab\u02ac\u0007"+ - "\u000b\u0000\u0000\u02ac\u02ad\u0007\u0002\u0000\u0000\u02ad\u02ae\u0007"+ - "\u0012\u0000\u0000\u02aeF\u0001\u0000\u0000\u0000\u02af\u02b0\u0007\n"+ - "\u0000\u0000\u02b0\u02b1\u0007\t\u0000\u0000\u02b1\u02b2\u0007\n\u0000"+ - "\u0000\u02b2\u02b3\u0007\u0007\u0000\u0000\u02b3\u02b4\u0007\n\u0000\u0000"+ - "\u02b4\u02b5\u0007\u0004\u0000\u0000\u02b5\u02b6\u0007\u0005\u0000\u0000"+ - "\u02b6\u02b7\u0007\u0002\u0000\u0000\u02b7\u02b8\u0007\u0011\u0000\u0000"+ - "\u02b8H\u0001\u0000\u0000\u0000\u02b9\u02ba\u0007\n\u0000\u0000\u02ba"+ - "\u02bb\u0007\t\u0000\u0000\u02bb\u02bc\u0007\n\u0000\u0000\u02bc\u02bd"+ - "\u0007\u0007\u0000\u0000\u02bd\u02be\u0007\n\u0000\u0000\u02be\u02bf\u0007"+ - "\u0004\u0000\u0000\u02bf\u02c0\u0007\u0005\u0000\u0000\u02c0\u02c1\u0007"+ - "\u000e\u0000\u0000\u02c1J\u0001\u0000\u0000\u0000\u02c2\u02c3\u0007\n"+ - "\u0000\u0000\u02c3\u02c4\u0007\t\u0000\u0000\u02c4\u02c5\u0007\u000e\u0000"+ - "\u0000\u02c5\u02c6\u0007\n\u0000\u0000\u02c6L\u0001\u0000\u0000\u0000"+ - "\u02c7\u02c8\u0007\n\u0000\u0000\u02c8\u02c9\u0007\u0004\u0000\u0000\u02c9"+ - "\u02ca\u0007\u0006\u0000\u0000\u02caN\u0001\u0000\u0000\u0000\u02cb\u02cc"+ - "\u0007\n\u0000\u0000\u02cc\u02cd\u0007\u000e\u0000\u0000\u02cd\u02ce\u0003"+ - "\u01b3\u00d9\u0000\u02ce\u02cf\u0007\u000e\u0000\u0000\u02cf\u02d0\u0007"+ - "\u0010\u0000\u0000\u02d0\u02d1\u0007\u0000\u0000\u0000\u02d1\u02d2\u0007"+ - "\u000b\u0000\u0000\u02d2\u02d3\u0007\u0006\u0000\u0000\u02d3\u02d4\u0007"+ - "\u000e\u0000\u0000\u02d4P\u0001\u0000\u0000\u0000\u02d5\u02d6\u0007\n"+ - "\u0000\u0000\u02d6\u02d7\u0007\u000e\u0000\u0000\u02d7\u02d8\u0003\u01b3"+ - "\u00d9\u0000\u02d8\u02d9\u0007\u000b\u0000\u0000\u02d9\u02da\u0007\n\u0000"+ - "\u0000\u02da\u02db\u0007\u0012\u0000\u0000\u02db\u02dc\u0007\t\u0000\u0000"+ - "\u02dc\u02dd\u0007\b\u0000\u0000\u02dd\u02de\u0007\u0001\u0000\u0000\u02de"+ - "\u02df\u0007\u0000\u0000\u0000\u02df\u02e0\u0007\u000e\u0000\u0000\u02e0"+ - "R\u0001\u0000\u0000\u0000\u02e1\u02e2\u0007\n\u0000\u0000\u02e2\u02e3"+ - "\u0007\u0013\u0000\u0000\u02e3\u02e4\u0007\b\u0000\u0000\u02e4\u02e5\u0007"+ - "\u000e\u0000\u0000\u02e5\u02e6\u0007\u0005\u0000\u0000\u02e6\u02e7\u0007"+ - "\u000e\u0000\u0000\u02e7T\u0001\u0000\u0000\u0000\u02e8\u02e9\u0007\n"+ - "\u0000\u0000\u02e9\u02ea\u0007\u0013\u0000\u0000\u02ea\u02eb\u0007\u0005"+ - "\u0000\u0000\u02eb\u02ec\u0007\u000b\u0000\u0000\u02ec\u02ed\u0007\u0000"+ - "\u0000\u0000\u02ed\u02ee\u0007\u0001\u0000\u0000\u02ee\u02ef\u0007\u0005"+ - "\u0000\u0000\u02efV\u0001\u0000\u0000\u0000\u02f0\u02f1\u0007\u0011\u0000"+ - "\u0000\u02f1\u02f2\u0007\b\u0000\u0000\u02f2\u02f3\u0007\n\u0000\u0000"+ - "\u02f3\u02f4\u0007\t\u0000\u0000\u02f4\u02f5\u0007\u0006\u0000\u0000\u02f5"+ - "\u02f6\u0007\u000e\u0000\u0000\u02f6X\u0001\u0000\u0000\u0000\u02f7\u02f8"+ - "\u0007\u0011\u0000\u0000\u02f8\u02f9\u0007\b\u0000\u0000\u02f9\u02fa\u0007"+ - "\u000b\u0000\u0000\u02fa\u02fb\u0007\u000e\u0000\u0000\u02fb\u02fc\u0007"+ - "\u0005\u0000\u0000\u02fcZ\u0001\u0000\u0000\u0000\u02fd\u02fe\u0007\u0011"+ - "\u0000\u0000\u02fe\u02ff\u0007\u0002\u0000\u0000\u02ff\u0300\u0007\u000b"+ - "\u0000\u0000\u0300\u0301\u0007\u0001\u0000\u0000\u0301\u0302\u0007\n\u0000"+ - "\u0000\u0302\\\u0001\u0000\u0000\u0000\u0303\u0304\u0003[-\u0000\u0304"+ - "\u0305\u0003\u01b3\u00d9\u0000\u0305\u0306\u0003{=\u0000\u0306^\u0001"+ - "\u0000\u0000\u0000\u0307\u0308\u0003[-\u0000\u0308\u0309\u0003\u01b3\u00d9"+ - "\u0000\u0309\u030a\u0003\u00cdf\u0000\u030a\u030b\u0003\u01b3\u00d9\u0000"+ - "\u030b\u030c\u0003{=\u0000\u030c`\u0001\u0000\u0000\u0000\u030d\u030e"+ - "\u0007\u0011\u0000\u0000\u030e\u030f\u0007\u000b\u0000\u0000\u030f\u0310"+ - "\u0007\n\u0000\u0000\u0310\u0311\u0007\n\u0000\u0000\u0311\u0312\u0007"+ - "\u0014\u0000\u0000\u0312\u0313\u0007\n\u0000\u0000\u0313b\u0001\u0000"+ - "\u0000\u0000\u0314\u0315\u0007\u0011\u0000\u0000\u0315\u0316\u0007\u000b"+ - "\u0000\u0000\u0316\u0317\u0007\u0002\u0000\u0000\u0317\u0318\u0007\u0007"+ - "\u0000\u0000\u0318d\u0001\u0000\u0000\u0000\u0319\u031a\u0007\u0011\u0000"+ - "\u0000\u031a\u031b\u0007\u000b\u0000\u0000\u031b\u031c\u0007\u0002\u0000"+ - "\u0000\u031c\u031d\u0007\u0014\u0000\u0000\u031d\u031e\u0007\n\u0000\u0000"+ - "\u031e\u031f\u0007\u0004\u0000\u0000\u031ff\u0001\u0000\u0000\u0000\u0320"+ - "\u0321\u0007\u0011\u0000\u0000\u0321\u0322\u0007\u0003\u0000\u0000\u0322"+ - "\u0323\u0007\t\u0000\u0000\u0323\u0324\u0007\t\u0000\u0000\u0324\u0325"+ - "\u0007\u0005\u0000\u0000\u0325\u0326\u0007\n\u0000\u0000\u0326\u0327\u0007"+ - "\u0013\u0000\u0000\u0327\u0328\u0007\u0005\u0000\u0000\u0328h\u0001\u0000"+ - "\u0000\u0000\u0329\u032a\u0007\u0015\u0000\u0000\u032a\u032b\u0007\n\u0000"+ - "\u0000\u032b\u032c\u0007\u0004\u0000\u0000\u032c\u032d\u0007\n\u0000\u0000"+ - "\u032d\u032e\u0007\u000b\u0000\u0000\u032e\u032f\u0007\u0000\u0000\u0000"+ - "\u032f\u0330\u0007\u0005\u0000\u0000\u0330\u0331\u0007\n\u0000\u0000\u0331"+ - "\u0332\u0007\u0006\u0000\u0000\u0332j\u0001\u0000\u0000\u0000\u0333\u0334"+ - "\u0007\u0015\u0000\u0000\u0334\u0335\u0007\u000b\u0000\u0000\u0335\u0336"+ - "\u0007\u0000\u0000\u0000\u0336\u0337\u0007\u0004\u0000\u0000\u0337\u0338"+ - "\u0007\u0005\u0000\u0000\u0338l\u0001\u0000\u0000\u0000\u0339\u033a\u0007"+ - "\u0015\u0000\u0000\u033a\u033b\u0007\u000b\u0000\u0000\u033b\u033c\u0007"+ - "\u0002\u0000\u0000\u033c\u033d\u0007\u0003\u0000\u0000\u033d\u033e\u0007"+ - "\u0012\u0000\u0000\u033en\u0001\u0000\u0000\u0000\u033f\u0346\u0007\u0010"+ - "\u0000\u0000\u0340\u0341\u0007\u0010\u0000\u0000\u0341\u0342\u0007\u0002"+ - "\u0000\u0000\u0342\u0343\u0007\u0003\u0000\u0000\u0343\u0344\u0007\u000b"+ - "\u0000\u0000\u0344\u0346\u0007\u000e\u0000\u0000\u0345\u033f\u0001\u0000"+ - "\u0000\u0000\u0345\u0340\u0001\u0000\u0000\u0000\u0346p\u0001\u0000\u0000"+ - "\u0000\u0347\u0348\u0007\b\u0000\u0000\u0348\u0349\u0007\u0006\u0000\u0000"+ - "\u0349\u034a\u0007\n\u0000\u0000\u034a\u034b\u0007\u0004\u0000\u0000\u034b"+ - "\u034c\u0007\u0005\u0000\u0000\u034c\u034d\u0007\b\u0000\u0000\u034d\u034e"+ - "\u0007\u0011\u0000\u0000\u034e\u034f\u0007\b\u0000\u0000\u034f\u0350\u0007"+ - "\n\u0000\u0000\u0350\u0351\u0007\u0006\u0000\u0000\u0351r\u0001\u0000"+ - "\u0000\u0000\u0352\u0353\u0007\b\u0000\u0000\u0353\u0354\u0007\u0006\u0000"+ - "\u0000\u0354\u0355\u0007\n\u0000\u0000\u0355\u0356\u0007\u0004\u0000\u0000"+ - "\u0356\u0357\u0007\u0005\u0000\u0000\u0357\u0358\u0007\b\u0000\u0000\u0358"+ - "\u0359\u0007\u0005\u0000\u0000\u0359\u035a\u0007\r\u0000\u0000\u035at"+ - "\u0001\u0000\u0000\u0000\u035b\u035c\u0007\b\u0000\u0000\u035c\u035d\u0007"+ - "\u0011\u0000\u0000\u035dv\u0001\u0000\u0000\u0000\u035e\u035f\u0007\b"+ - "\u0000\u0000\u035f\u0360\u0007\u0004\u0000\u0000\u0360x\u0001\u0000\u0000"+ - "\u0000\u0361\u0362\u0007\b\u0000\u0000\u0362\u0363\u0007\u0004\u0000\u0000"+ - "\u0363\u0364\u0007\u0001\u0000\u0000\u0364\u0365\u0007\u000b\u0000\u0000"+ - "\u0365\u0366\u0007\n\u0000\u0000\u0366\u0367\u0007\u0007\u0000\u0000\u0367"+ - "\u0368\u0007\n\u0000\u0000\u0368\u0369\u0007\u0004\u0000\u0000\u0369\u036a"+ - "\u0007\u0005\u0000\u0000\u036az\u0001\u0000\u0000\u0000\u036b\u036c\u0007"+ - "\b\u0000\u0000\u036c\u036d\u0007\u0004\u0000\u0000\u036d\u036e\u0007\u0006"+ - "\u0000\u0000\u036e\u036f\u0007\n\u0000\u0000\u036f\u0370\u0007\u0013\u0000"+ - "\u0000\u0370|\u0001\u0000\u0000\u0000\u0371\u0372\u0007\b\u0000\u0000"+ - "\u0372\u0373\u0007\u0004\u0000\u0000\u0373\u0374\u0007\u0006\u0000\u0000"+ - "\u0374\u0375\u0007\n\u0000\u0000\u0375\u0376\u0007\u0013\u0000\u0000\u0376"+ - "\u0377\u0007\n\u0000\u0000\u0377\u0378\u0007\u000e\u0000\u0000\u0378~"+ - "\u0001\u0000\u0000\u0000\u0379\u037a\u0007\b\u0000\u0000\u037a\u037b\u0007"+ - "\u0004\u0000\u0000\u037b\u037c\u0007\u000e\u0000\u0000\u037c\u037d\u0007"+ - "\n\u0000\u0000\u037d\u037e\u0007\u000b\u0000\u0000\u037e\u037f\u0007\u0005"+ - "\u0000\u0000\u037f\u0080\u0001\u0000\u0000\u0000\u0380\u0381\u0007\b\u0000"+ - "\u0000\u0381\u0382\u0007\u0004\u0000\u0000\u0382\u0383\u0007\u0005\u0000"+ - "\u0000\u0383\u0384\u0007\u0002\u0000\u0000\u0384\u0082\u0001\u0000\u0000"+ - "\u0000\u0385\u0386\u0007\b\u0000\u0000\u0386\u0387\u0007\u000e\u0000\u0000"+ - "\u0387\u0084\u0001\u0000\u0000\u0000\u0388\u0389\u0007\u0016\u0000\u0000"+ - "\u0389\u038a\u0007\u000e\u0000\u0000\u038a\u038b\u0007\u0002\u0000\u0000"+ - "\u038b\u038c\u0007\u0004\u0000\u0000\u038c\u0086\u0001\u0000\u0000\u0000"+ - "\u038d\u038e\u0007\u0016\u0000\u0000\u038e\u038f\u0007\u0002\u0000\u0000"+ - "\u038f\u0390\u0007\b\u0000\u0000\u0390\u0391\u0007\u0004\u0000\u0000\u0391"+ - "\u0088\u0001\u0000\u0000\u0000\u0392\u0393\u0007\u0017\u0000\u0000\u0393"+ - "\u0394\u0007\n\u0000\u0000\u0394\u0395\u0007\r\u0000\u0000\u0395\u008a"+ - "\u0001\u0000\u0000\u0000\u0396\u0397\u0007\u0017\u0000\u0000\u0397\u0398"+ - "\u0007\n\u0000\u0000\u0398\u0399\u0007\r\u0000\u0000\u0399\u039a\u0007"+ - "\u0002\u0000\u0000\u039a\u039b\u0007\u0011\u0000\u0000\u039b\u008c\u0001"+ - "\u0000\u0000\u0000\u039c\u039d\u0007\u0017\u0000\u0000\u039d\u039e\u0007"+ - "\n\u0000\u0000\u039e\u039f\u0007\r\u0000\u0000\u039f\u03a0\u0007\u000e"+ - "\u0000\u0000\u03a0\u008e\u0001\u0000\u0000\u0000\u03a1\u03a2\u0007\t\u0000"+ - "\u0000\u03a2\u03a3\u0007\u0000\u0000\u0000\u03a3\u03a4\u0007\u000e\u0000"+ - "\u0000\u03a4\u03a5\u0007\u0005\u0000\u0000\u03a5\u0090\u0001\u0000\u0000"+ - "\u0000\u03a6\u03a7\u0007\t\u0000\u0000\u03a7\u03a8\u0007\n\u0000\u0000"+ - "\u03a8\u03a9\u0007\u0011\u0000\u0000\u03a9\u03aa\u0007\u0005\u0000\u0000"+ - "\u03aa\u0092\u0001\u0000\u0000\u0000\u03ab\u03ac\u0007\t\u0000\u0000\u03ac"+ - "\u03ad\u0007\b\u0000\u0000\u03ad\u03ae\u0007\u0011\u0000\u0000\u03ae\u03af"+ - "\u0007\n\u0000\u0000\u03af\u03b0\u0007\u0005\u0000\u0000\u03b0\u03b1\u0007"+ - "\b\u0000\u0000\u03b1\u03b2\u0007\u0007\u0000\u0000\u03b2\u03b3\u0007\n"+ - "\u0000\u0000\u03b3\u0094\u0001\u0000\u0000\u0000\u03b4\u03b5\u0007\t\u0000"+ - "\u0000\u03b5\u03b6\u0007\b\u0000\u0000\u03b6\u03b7\u0007\u0007\u0000\u0000"+ - "\u03b7\u03b8\u0007\b\u0000\u0000\u03b8\u03b9\u0007\u0005\u0000\u0000\u03b9"+ - "\u0096\u0001\u0000\u0000\u0000\u03ba\u03bb\u0007\t\u0000\u0000\u03bb\u03bc"+ - "\u0007\u0002\u0000\u0000\u03bc\u03bd\u0007\u0001\u0000\u0000\u03bd\u03be"+ - "\u0007\u0000\u0000\u0000\u03be\u03bf\u0007\t\u0000\u0000\u03bf\u0098\u0001"+ - "\u0000\u0000\u0000\u03c0\u03c1\u0007\t\u0000\u0000\u03c1\u03c2\u0007\u0002"+ - "\u0000\u0000\u03c2\u03c3\u0007\u0001\u0000\u0000\u03c3\u03c4\u0007\u0017"+ - "\u0000\u0000\u03c4\u009a\u0001\u0000\u0000\u0000\u03c5\u03c6\u0007\u0007"+ - "\u0000\u0000\u03c6\u03c7\u0007\u0000\u0000\u0000\u03c7\u03c8\u0007\u0013"+ - "\u0000\u0000\u03c8\u03c9\u0007\u0018\u0000\u0000\u03c9\u03ca\u0007\u0000"+ - "\u0000\u0000\u03ca\u03cb\u0007\t\u0000\u0000\u03cb\u03cc\u0007\u0003\u0000"+ - "\u0000\u03cc\u03cd\u0007\n\u0000\u0000\u03cd\u009c\u0001\u0000\u0000\u0000"+ - "\u03ce\u03cf\u0007\u0007\u0000\u0000\u03cf\u03d0\u0007\n\u0000\u0000\u03d0"+ - "\u03d1\u0007\u000b\u0000\u0000\u03d1\u03d2\u0007\u0015\u0000\u0000\u03d2"+ - "\u03d3\u0007\n\u0000\u0000\u03d3\u009e\u0001\u0000\u0000\u0000\u03d4\u03dd"+ - "\u0007\u0007\u0000\u0000\u03d5\u03d6\u0007\u0007\u0000\u0000\u03d6\u03d7"+ - "\u0007\b\u0000\u0000\u03d7\u03d8\u0007\u0004\u0000\u0000\u03d8\u03d9\u0007"+ - "\u0003\u0000\u0000\u03d9\u03da\u0007\u0005\u0000\u0000\u03da\u03db\u0007"+ - "\n\u0000\u0000\u03db\u03dd\u0007\u000e\u0000\u0000\u03dc\u03d4\u0001\u0000"+ - "\u0000\u0000\u03dc\u03d5\u0001\u0000\u0000\u0000\u03dd\u00a0\u0001\u0000"+ - "\u0000\u0000\u03de\u03df\u0007\u0007\u0000\u0000\u03df\u03e0\u0007\b\u0000"+ - "\u0000\u03e0\u03e1\u0007\u0004\u0000\u0000\u03e1\u03e2\u0007\u0018\u0000"+ - "\u0000\u03e2\u03e3\u0007\u0000\u0000\u0000\u03e3\u03e4\u0007\t\u0000\u0000"+ - "\u03e4\u03e5\u0007\u0003\u0000\u0000\u03e5\u03e6\u0007\n\u0000\u0000\u03e6"+ - "\u00a2\u0001\u0000\u0000\u0000\u03e7\u03e8\u0007\u0007\u0000\u0000\u03e8"+ - "\u03e9\u0007\u0002\u0000\u0000\u03e9\u03ea\u0007\u0006\u0000\u0000\u03ea"+ - "\u03eb\u0007\b\u0000\u0000\u03eb\u03ec\u0007\u0011\u0000\u0000\u03ec\u03ed"+ - "\u0007\r\u0000\u0000\u03ed\u00a4\u0001\u0000\u0000\u0000\u03ee\u03ef\u0007"+ - "\u0007\u0000\u0000\u03ef\u03f0\u0007\u000b\u0000\u0000\u03f0\u03f1\u0003"+ - "\u01b3\u00d9\u0000\u03f1\u03f2\u0007\u0001\u0000\u0000\u03f2\u03f3\u0007"+ - "\u0002\u0000\u0000\u03f3\u03f4\u0007\u0003\u0000\u0000\u03f4\u03f5\u0007"+ - "\u0004\u0000\u0000\u03f5\u03f6\u0007\u0005\u0000\u0000\u03f6\u03f7\u0007"+ - "\n\u0000\u0000\u03f7\u03f8\u0007\u000b\u0000\u0000\u03f8\u00a6\u0001\u0000"+ - "\u0000\u0000\u03f9\u03fa\u0007\u0004\u0000\u0000\u03fa\u03fb\u0007\u0000"+ - "\u0000\u0000\u03fb\u03fc\u0007\u0007\u0000\u0000\u03fc\u03fd\u0007\n\u0000"+ - "\u0000\u03fd\u03fe\u0007\u000e\u0000\u0000\u03fe\u03ff\u0007\u0012\u0000"+ - "\u0000\u03ff\u0400\u0007\u0000\u0000\u0000\u0400\u0401\u0007\u0001\u0000"+ - "\u0000\u0401\u0402\u0007\n\u0000\u0000\u0402\u00a8\u0001\u0000\u0000\u0000"+ - "\u0403\u0404\u0007\u0004\u0000\u0000\u0404\u0405\u0007\u0000\u0000\u0000"+ - "\u0405\u0406\u0007\u0007\u0000\u0000\u0406\u0407\u0007\n\u0000\u0000\u0407"+ - "\u0408\u0007\u000e\u0000\u0000\u0408\u0409\u0007\u0012\u0000\u0000\u0409"+ - "\u040a\u0007\u0000\u0000\u0000\u040a\u040b\u0007\u0001\u0000\u0000\u040b"+ - "\u040c\u0007\n\u0000\u0000\u040c\u040d\u0007\u000e\u0000\u0000\u040d\u00aa"+ - "\u0001\u0000\u0000\u0000\u040e\u040f\u0007\u0004\u0000\u0000\u040f\u0410"+ - "\u0007\n\u0000\u0000\u0410\u0411\u0007\u000e\u0000\u0000\u0411\u0412\u0007"+ - "\u0005\u0000\u0000\u0412\u0413\u0007\n\u0000\u0000\u0413\u0414\u0007\u0006"+ - "\u0000\u0000\u0414\u00ac\u0001\u0000\u0000\u0000\u0415\u0416\u0007\u0004"+ - "\u0000\u0000\u0416\u0417\u0007\u0002\u0000\u0000\u0417\u00ae\u0001\u0000"+ - "\u0000\u0000\u0418\u0419\u0007\u0004\u0000\u0000\u0419\u041a\u0007\u0002"+ - "\u0000\u0000\u041a\u041b\u0007\u0005\u0000\u0000\u041b\u00b0\u0001\u0000"+ - "\u0000\u0000\u041c\u041d\u0007\u0004\u0000\u0000\u041d\u041e\u0007\u0003"+ - "\u0000\u0000\u041e\u041f\u0007\t\u0000\u0000\u041f\u0420\u0007\t\u0000"+ - "\u0000\u0420\u0421\u0007\u000e\u0000\u0000\u0421\u00b2\u0001\u0000\u0000"+ - "\u0000\u0422\u0423\u0007\u0002\u0000\u0000\u0423\u0424\u0007\u0011\u0000"+ - "\u0000\u0424\u0425\u0007\u0011\u0000\u0000\u0425\u0426\u0007\u000e\u0000"+ - "\u0000\u0426\u0427\u0007\n\u0000\u0000\u0427\u0428\u0007\u0005\u0000\u0000"+ - "\u0428\u00b4\u0001\u0000\u0000\u0000\u0429\u042a\u0007\u0002\u0000\u0000"+ - "\u042a\u042b\u0007\u0011\u0000\u0000\u042b\u00b6\u0001\u0000\u0000\u0000"+ - "\u042c\u042d\u0007\u0002\u0000\u0000\u042d\u042e\u0007\u0004\u0000\u0000"+ - "\u042e\u00b8\u0001\u0000\u0000\u0000\u042f\u0430\u0007\u0002\u0000\u0000"+ - "\u0430\u0431\u0007\u0004\u0000\u0000\u0431\u0432\u0007\t\u0000\u0000\u0432"+ - "\u0433\u0007\r\u0000\u0000\u0433\u00ba\u0001\u0000\u0000\u0000\u0434\u0435"+ - "\u0007\u0002\u0000\u0000\u0435\u0436\u0007\u000b\u0000\u0000\u0436\u00bc"+ - "\u0001\u0000\u0000\u0000\u0437\u0438\u0007\u0002\u0000\u0000\u0438\u0439"+ - "\u0007\u000b\u0000\u0000\u0439\u043a\u0007\u0006\u0000\u0000\u043a\u043b"+ - "\u0007\n\u0000\u0000\u043b\u043c\u0007\u000b\u0000\u0000\u043c\u00be\u0001"+ - "\u0000\u0000\u0000\u043d\u043e\u0007\u0002\u0000\u0000\u043e\u043f\u0007"+ - "\u0003\u0000\u0000\u043f\u0440\u0007\u0005\u0000\u0000\u0440\u0441\u0007"+ - "\n\u0000\u0000\u0441\u0442\u0007\u000b\u0000\u0000\u0442\u00c0\u0001\u0000"+ - "\u0000\u0000\u0443\u0444\u0007\u0002\u0000\u0000\u0444\u0445\u0007\u0018"+ - "\u0000\u0000\u0445\u0446\u0007\n\u0000\u0000\u0446\u0447\u0007\u000b\u0000"+ - "\u0000\u0447\u0448\u0007\u000b\u0000\u0000\u0448\u0449\u0007\b\u0000\u0000"+ - "\u0449\u044a\u0007\u0006\u0000\u0000\u044a\u044b\u0007\n\u0000\u0000\u044b"+ - "\u00c2\u0001\u0000\u0000\u0000\u044c\u044d\u0007\u0012\u0000\u0000\u044d"+ - "\u044e\u0007\u0000\u0000\u0000\u044e\u044f\u0007\u000e\u0000\u0000\u044f"+ - "\u0450\u0007\u000e\u0000\u0000\u0450\u0451\u0007\f\u0000\u0000\u0451\u0452"+ - "\u0007\u0002\u0000\u0000\u0452\u0453\u0007\u000b\u0000\u0000\u0453\u0454"+ - "\u0007\u0006\u0000\u0000\u0454\u00c4\u0001\u0000\u0000\u0000\u0455\u0456"+ - "\u0007\u0012\u0000\u0000\u0456\u0457\u0007\u0000\u0000\u0000\u0457\u0458"+ - "\u0007\u0005\u0000\u0000\u0458\u0459\u0007\u0001\u0000\u0000\u0459\u045a"+ - "\u0007\u0010\u0000\u0000\u045a\u00c6\u0001\u0000\u0000\u0000\u045b\u045c"+ - "\u0007\u0012\u0000\u0000\u045c\u045d\u0007\n\u0000\u0000\u045d\u045e\u0007"+ - "\u000b\u0000\u0000\u045e\u00c8\u0001\u0000\u0000\u0000\u045f\u0460\u0003"+ - "\u01bf\u00df\u0000\u0460\u0461\u0003\u01b3\u00d9\u0000\u0461\u0462\u0003"+ - "}>\u0000\u0462\u00ca\u0001\u0000\u0000\u0000\u0463\u0464\u0003\u01bf\u00df"+ - "\u0000\u0464\u0465\u0003\u01b3\u00d9\u0000\u0465\u0466\u0003\u00cdf\u0000"+ - "\u0466\u0467\u0003\u01b3\u00d9\u0000\u0467\u0468\u0003{=\u0000\u0468\u00cc"+ - "\u0001\u0000\u0000\u0000\u0469\u046a\u0007\u0012\u0000\u0000\u046a\u046b"+ - "\u0007\u000b\u0000\u0000\u046b\u046c\u0007\b\u0000\u0000\u046c\u046d\u0007"+ - "\u0007\u0000\u0000\u046d\u046e\u0007\u0000\u0000\u0000\u046e\u046f\u0007"+ - "\u000b\u0000\u0000\u046f\u0470\u0007\r\u0000\u0000\u0470\u00ce\u0001\u0000"+ - "\u0000\u0000\u0471\u0472\u0007\u0012\u0000\u0000\u0472\u0473\u0007\u0003"+ - "\u0000\u0000\u0473\u0474\u0007\u0005\u0000\u0000\u0474\u00d0\u0001\u0000"+ - "\u0000\u0000\u0475\u0476\u0007\u000b\u0000\u0000\u0476\u0477\u0007\n\u0000"+ - "\u0000\u0477\u0478\u0007\u0015\u0000\u0000\u0478\u0479\u0007\b\u0000\u0000"+ - "\u0479\u047a\u0007\u0002\u0000\u0000\u047a\u047b\u0007\u0004\u0000\u0000"+ - "\u047b\u00d2\u0001\u0000\u0000\u0000\u047c\u047d\u0007\u000b\u0000\u0000"+ - "\u047d\u047e\u0007\n\u0000\u0000\u047e\u047f\u0007\u0015\u0000\u0000\u047f"+ - "\u0480\u0007\b\u0000\u0000\u0480\u0481\u0007\u0002\u0000\u0000\u0481\u0482"+ - "\u0007\u0004\u0000\u0000\u0482\u0483\u0007\u000e\u0000\u0000\u0483\u00d4"+ - "\u0001\u0000\u0000\u0000\u0484\u0485\u0007\u000b\u0000\u0000\u0485\u0486"+ - "\u0007\n\u0000\u0000\u0486\u0487\u0007\u0007\u0000\u0000\u0487\u0488\u0007"+ - "\u0002\u0000\u0000\u0488\u0489\u0007\u0018\u0000\u0000\u0489\u048a\u0007"+ - "\n\u0000\u0000\u048a\u00d6\u0001\u0000\u0000\u0000\u048b\u048c\u0007\u000b"+ - "\u0000\u0000\u048c\u048d\u0007\n\u0000\u0000\u048d\u048e\u0007\u0005\u0000"+ - "\u0000\u048e\u048f\u0007\u0003\u0000\u0000\u048f\u0490\u0007\u000b\u0000"+ - "\u0000\u0490\u0491\u0007\u0004\u0000\u0000\u0491\u0492\u0007\b\u0000\u0000"+ - "\u0492\u0493\u0007\u0004\u0000\u0000\u0493\u0494\u0007\u0015\u0000\u0000"+ - "\u0494\u00d8\u0001\u0000\u0000\u0000\u0495\u0496\u0007\u000b\u0000\u0000"+ - "\u0496\u0497\u0007\n\u0000\u0000\u0497\u0498\u0007\u0018\u0000\u0000\u0498"+ - "\u0499\u0007\u0002\u0000\u0000\u0499\u049a\u0007\u0017\u0000\u0000\u049a"+ - "\u049b\u0007\n\u0000\u0000\u049b\u00da\u0001\u0000\u0000\u0000\u049c\u049d"+ - "\u0007\u000b\u0000\u0000\u049d\u049e\u0007\u0002\u0000\u0000\u049e\u049f"+ - "\u0007\t\u0000\u0000\u049f\u04a0\u0007\n\u0000\u0000\u04a0\u00dc\u0001"+ - "\u0000\u0000\u0000\u04a1\u04a2\u0007\u000b\u0000\u0000\u04a2\u04a3\u0007"+ - "\u0002\u0000\u0000\u04a3\u04a4\u0007\t\u0000\u0000\u04a4\u04a5\u0007\n"+ - "\u0000\u0000\u04a5\u04a6\u0007\u000e\u0000\u0000\u04a6\u00de\u0001\u0000"+ - "\u0000\u0000\u04a7\u04a8\u0007\u000b\u0000\u0000\u04a8\u04a9\u0007\u0002"+ - "\u0000\u0000\u04a9\u04aa\u0007\f\u0000\u0000\u04aa\u00e0\u0001\u0000\u0000"+ - "\u0000\u04ab\u04ac\u0007\u000e\u0000\u0000\u04ac\u04ad\u0007\u0001\u0000"+ - "\u0000\u04ad\u04ae\u0007\u0010\u0000\u0000\u04ae\u04af\u0007\n\u0000\u0000"+ - "\u04af\u04b0\u0007\u0007\u0000\u0000\u04b0\u04b1\u0007\u0000\u0000\u0000"+ - "\u04b1\u00e2\u0001\u0000\u0000\u0000\u04b2\u04bb\u0007\u000e\u0000\u0000"+ - "\u04b3\u04b4\u0007\u000e\u0000\u0000\u04b4\u04b5\u0007\n\u0000\u0000\u04b5"+ - "\u04b6\u0007\u0001\u0000\u0000\u04b6\u04b7\u0007\u0002\u0000\u0000\u04b7"+ - "\u04b8\u0007\u0004\u0000\u0000\u04b8\u04b9\u0007\u0006\u0000\u0000\u04b9"+ - "\u04bb\u0007\u000e\u0000\u0000\u04ba\u04b2\u0001\u0000\u0000\u0000\u04ba"+ - "\u04b3\u0001\u0000\u0000\u0000\u04bb\u00e4\u0001\u0000\u0000\u0000\u04bc"+ - "\u04bd\u0007\u000e\u0000\u0000\u04bd\u04be\u0007\n\u0000\u0000\u04be\u04bf"+ - "\u0007\t\u0000\u0000\u04bf\u04c0\u0007\n\u0000\u0000\u04c0\u04c1\u0007"+ - "\u0001\u0000\u0000\u04c1\u04c2\u0007\u0005\u0000\u0000\u04c2\u00e6\u0001"+ - "\u0000\u0000\u0000\u04c3\u04c4\u0005s\u0000\u0000\u04c4\u04c5\u0005e\u0000"+ - "\u0000\u04c5\u04c6\u0005q\u0000\u0000\u04c6\u04c7\u0005_\u0000\u0000\u04c7"+ - "\u04c8\u0005t\u0000\u0000\u04c8\u04c9\u0005r\u0000\u0000\u04c9\u04ca\u0005"+ - "a\u0000\u0000\u04ca\u04cb\u0005n\u0000\u0000\u04cb\u04cc\u0005s\u0000"+ - "\u0000\u04cc\u04cd\u0005f\u0000\u0000\u04cd\u04ce\u0005o\u0000\u0000\u04ce"+ - "\u04cf\u0005r\u0000\u0000\u04cf\u04d0\u0005m\u0000\u0000\u04d0\u00e8\u0001"+ - "\u0000\u0000\u0000\u04d1\u04d2\u0007\u000e\u0000\u0000\u04d2\u04d3\u0007"+ - "\n\u0000\u0000\u04d3\u04d4\u0007\u0005\u0000\u0000\u04d4\u00ea\u0001\u0000"+ - "\u0000\u0000\u04d5\u04d6\u0007\u000e\u0000\u0000\u04d6\u04d7\u0007\u0010"+ - "\u0000\u0000\u04d7\u04d8\u0007\u0000\u0000\u0000\u04d8\u04d9\u0007\u000b"+ - "\u0000\u0000\u04d9\u04da\u0007\u0006\u0000\u0000\u04da\u00ec\u0001\u0000"+ - "\u0000\u0000\u04db\u04dc\u0007\u000e\u0000\u0000\u04dc\u04dd\u0007\u0010"+ - "\u0000\u0000\u04dd\u04de\u0007\u0002\u0000\u0000\u04de\u04df\u0007\f\u0000"+ - "\u0000\u04df\u00ee\u0001\u0000\u0000\u0000\u04e0\u04e1\u0007\u000e\u0000"+ - "\u0000\u04e1\u04e2\u0007\u0005\u0000\u0000\u04e2\u04e3\u0007\u0000\u0000"+ - "\u0000\u04e3\u04e4\u0007\u000b\u0000\u0000\u04e4\u04e5\u0007\u0005\u0000"+ - "\u0000\u04e5\u00f0\u0001\u0000\u0000\u0000\u04e6\u04e7\u0007\u0005\u0000"+ - "\u0000\u04e7\u04e8\u0007\u0000\u0000\u0000\u04e8\u04e9\u0007\u000f\u0000"+ - "\u0000\u04e9\u04ea\u0007\t\u0000\u0000\u04ea\u04eb\u0007\n\u0000\u0000"+ - "\u04eb\u00f2\u0001\u0000\u0000\u0000\u04ec\u04ed\u0007\u0005\u0000\u0000"+ - "\u04ed\u04ee\u0007\u0000\u0000\u0000\u04ee\u04ef\u0007\u000f\u0000\u0000"+ - "\u04ef\u04f0\u0007\t\u0000\u0000\u04f0\u04f1\u0007\n\u0000\u0000\u04f1"+ - "\u04f2\u0007\u000e\u0000\u0000\u04f2\u00f4\u0001\u0000\u0000\u0000\u04f3"+ - "\u04f4\u0007\u0005\u0000\u0000\u04f4\u04f5\u0007\u0010\u0000\u0000\u04f5"+ - "\u04f6\u0007\n\u0000\u0000\u04f6\u04f7\u0007\u0004\u0000\u0000\u04f7\u00f6"+ - "\u0001\u0000\u0000\u0000\u04f8\u04f9\u0007\u0005\u0000\u0000\u04f9\u04fa"+ - "\u0007\u0002\u0000\u0000\u04fa\u00f8\u0001\u0000\u0000\u0000\u04fb\u04fc"+ - "\u0007\u0005\u0000\u0000\u04fc\u04fd\u0007\u0005\u0000\u0000\u04fd\u04fe"+ - "\u0007\t\u0000\u0000\u04fe\u00fa\u0001\u0000\u0000\u0000\u04ff\u0500\u0007"+ - "\u0005\u0000\u0000\u0500\u0501\u0007\r\u0000\u0000\u0501\u0502\u0007\u0012"+ - "\u0000\u0000\u0502\u0503\u0007\n\u0000\u0000\u0503\u00fc\u0001\u0000\u0000"+ - "\u0000\u0504\u0505\u0007\u0003\u0000\u0000\u0505\u0506\u0007\u0004\u0000"+ - "\u0000\u0506\u0507\u0007\u0011\u0000\u0000\u0507\u0508\u0007\u000b\u0000"+ - "\u0000\u0508\u0509\u0007\n\u0000\u0000\u0509\u050a\u0007\n\u0000\u0000"+ - "\u050a\u050b\u0007\u0014\u0000\u0000\u050b\u050c\u0007\n\u0000\u0000\u050c"+ - "\u00fe\u0001\u0000\u0000\u0000\u050d\u050e\u0007\u0003\u0000\u0000\u050e"+ - "\u050f\u0007\u0004\u0000\u0000\u050f\u0510\u0007\t\u0000\u0000\u0510\u0511"+ - "\u0007\u0002\u0000\u0000\u0511\u0512\u0007\u0001\u0000\u0000\u0512\u0513"+ - "\u0007\u0017\u0000\u0000\u0513\u0100\u0001\u0000\u0000\u0000\u0514\u0515"+ - "\u0007\u0003\u0000\u0000\u0515\u0516\u0007\u0012\u0000\u0000\u0516\u0517"+ - "\u0007\u0006\u0000\u0000\u0517\u0518\u0007\u0000\u0000\u0000\u0518\u0519"+ - "\u0007\u0005\u0000\u0000\u0519\u051a\u0007\n\u0000\u0000\u051a\u0102\u0001"+ - "\u0000\u0000\u0000\u051b\u051c\u0007\u0003\u0000\u0000\u051c\u051d\u0007"+ - "\u0012\u0000\u0000\u051d\u051e\u0007\u000e\u0000\u0000\u051e\u051f\u0007"+ - "\n\u0000\u0000\u051f\u0520\u0007\u000b\u0000\u0000\u0520\u0521\u0007\u0005"+ - "\u0000\u0000\u0521\u0104\u0001\u0000\u0000\u0000\u0522\u0523\u0007\u0003"+ - "\u0000\u0000\u0523\u0524\u0007\u000e\u0000\u0000\u0524\u0525\u0007\n\u0000"+ - "\u0000\u0525\u0526\u0007\u000b\u0000\u0000\u0526\u0106\u0001\u0000\u0000"+ - "\u0000\u0527\u0528\u0007\u0003\u0000\u0000\u0528\u0529\u0007\u000e\u0000"+ - "\u0000\u0529\u052a\u0007\n\u0000\u0000\u052a\u052b\u0007\u000b\u0000\u0000"+ - "\u052b\u052c\u0007\u000e\u0000\u0000\u052c\u0108\u0001\u0000\u0000\u0000"+ - "\u052d\u052e\u0007\u0003\u0000\u0000\u052e\u052f\u0007\u000e\u0000\u0000"+ - "\u052f\u0530\u0007\b\u0000\u0000\u0530\u0531\u0007\u0004\u0000\u0000\u0531"+ - "\u0532\u0007\u0015\u0000\u0000\u0532\u010a\u0001\u0000\u0000\u0000\u0533"+ - "\u0534\u0007\u0018\u0000\u0000\u0534\u0535\u0007\u0000\u0000\u0000\u0535"+ - "\u0536\u0007\t\u0000\u0000\u0536\u0537\u0007\u0003\u0000\u0000\u0537\u0538"+ - "\u0007\n\u0000\u0000\u0538\u0539\u0007\u000e\u0000\u0000\u0539\u010c\u0001"+ - "\u0000\u0000\u0000\u053a\u053b\u0007\f\u0000\u0000\u053b\u053c\u0007\u0010"+ - "\u0000\u0000\u053c\u053d\u0007\n\u0000\u0000\u053d\u053e\u0007\u0004\u0000"+ - "\u0000\u053e\u010e\u0001\u0000\u0000\u0000\u053f\u0540\u0007\f\u0000\u0000"+ - "\u0540\u0541\u0007\u0010\u0000\u0000\u0541\u0542\u0007\n\u0000\u0000\u0542"+ - "\u0543\u0007\u000b\u0000\u0000\u0543\u0544\u0007\n\u0000\u0000\u0544\u0110"+ - "\u0001\u0000\u0000\u0000\u0545\u0546\u0007\f\u0000\u0000\u0546\u0547\u0007"+ - "\b\u0000\u0000\u0547\u0548\u0007\u0005\u0000\u0000\u0548\u0549\u0007\u0010"+ - "\u0000\u0000\u0549\u0112\u0001\u0000\u0000\u0000\u054a\u054b\u0007\u0003"+ - "\u0000\u0000\u054b\u054c\u0007\u0004\u0000\u0000\u054c\u054d\u0007\b\u0000"+ - "\u0000\u054d\u054e\u0007\u0019\u0000\u0000\u054e\u054f\u0007\u0003\u0000"+ - "\u0000\u054f\u0550\u0007\n\u0000\u0000\u0550\u0114\u0001\u0000\u0000\u0000"+ - "\u0551\u0552\u0007\u0003\u0000\u0000\u0552\u0553\u0007\u0004\u0000\u0000"+ - "\u0553\u0554\u0007\u0004\u0000\u0000\u0554\u0555\u0007\n\u0000\u0000\u0555"+ - "\u0556\u0007\u000e\u0000\u0000\u0556\u0557\u0007\u0005\u0000\u0000\u0557"+ - "\u0116\u0001\u0000\u0000\u0000\u0558\u0559\u0007\u0003\u0000\u0000\u0559"+ - "\u055a\u0007\u0003\u0000\u0000\u055a\u055b\u0007\b\u0000\u0000\u055b\u055c"+ - "\u0007\u0006\u0000\u0000\u055c\u0118\u0001\u0000\u0000\u0000\u055d\u055f"+ - "\u0003\u000f\u0007\u0000\u055e\u0560\u0003\u019f\u00cf\u0000\u055f\u055e"+ - "\u0001\u0000\u0000\u0000\u0560\u0561\u0001\u0000\u0000\u0000\u0561\u055f"+ - "\u0001\u0000\u0000\u0000\u0561\u0562\u0001\u0000\u0000\u0000\u0562\u0563"+ - "\u0001\u0000\u0000\u0000\u0563\u0564\u0003\u01c1\u00e0\u0000\u0564\u011a"+ - "\u0001\u0000\u0000\u0000\u0565\u0567\u0003q8\u0000\u0566\u0568\u0003\u019f"+ - "\u00cf\u0000\u0567\u0566\u0001\u0000\u0000\u0000\u0568\u0569\u0001\u0000"+ - "\u0000\u0000\u0569\u0567\u0001\u0000\u0000\u0000\u0569\u056a\u0001\u0000"+ - "\u0000\u0000\u056a\u056b\u0001\u0000\u0000\u0000\u056b\u056c\u0003\u01bd"+ - "\u00de\u0000\u056c\u011c\u0001\u0000\u0000\u0000\u056d\u056f\u0003\u00c3"+ - "a\u0000\u056e\u0570\u0003\u019f\u00cf\u0000\u056f\u056e\u0001\u0000\u0000"+ - "\u0000\u0570\u0571\u0001\u0000\u0000\u0000\u0571\u056f\u0001\u0000\u0000"+ - "\u0000\u0571\u0572\u0001\u0000\u0000\u0000\u0572\u0573\u0001\u0000\u0000"+ - "\u0000\u0573\u0574\u0003\u01bb\u00dd\u0000\u0574\u011e\u0001\u0000\u0000"+ - "\u0000\u0575\u0577\u0003\u01c3\u00e1\u0000\u0576\u0578\u0003\u019f\u00cf"+ - "\u0000\u0577\u0576\u0001\u0000\u0000\u0000\u0578\u0579\u0001\u0000\u0000"+ - "\u0000\u0579\u0577\u0001\u0000\u0000\u0000\u0579\u057a\u0001\u0000\u0000"+ - "\u0000\u057a\u057b\u0001\u0000\u0000\u0000\u057b\u057d\u0003\u01b9\u00dc"+ - "\u0000\u057c\u057e\u0003\u019f\u00cf\u0000\u057d\u057c\u0001\u0000\u0000"+ - "\u0000\u057e\u057f\u0001\u0000\u0000\u0000\u057f\u057d\u0001\u0000\u0000"+ - "\u0000\u057f\u0580\u0001\u0000\u0000\u0000\u0580\u0581\u0001\u0000\u0000"+ - "\u0000\u0581\u0582\u0003\u00c3a\u0000\u0582\u0120\u0001\u0000\u0000\u0000"+ - "\u0583\u0585\u0003\u01b7\u00db\u0000\u0584\u0586\u0003\u019f\u00cf\u0000"+ - "\u0585\u0584\u0001\u0000\u0000\u0000\u0586\u0587\u0001\u0000\u0000\u0000"+ - "\u0587\u0585\u0001\u0000\u0000\u0000\u0587\u0588\u0001\u0000\u0000\u0000"+ - "\u0588\u0589\u0001\u0000\u0000\u0000\u0589\u058b\u0003\u01c5\u00e2\u0000"+ - "\u058a\u058c\u0003\u019f\u00cf\u0000\u058b\u058a\u0001\u0000\u0000\u0000"+ - "\u058c\u058d\u0001\u0000\u0000\u0000\u058d\u058b\u0001\u0000\u0000\u0000"+ - "\u058d\u058e\u0001\u0000\u0000\u0000\u058e\u058f\u0001\u0000\u0000\u0000"+ - "\u058f\u0590\u0003\u00c3a\u0000\u0590\u0122\u0001\u0000\u0000\u0000\u0591"+ - "\u0593\u0003\u0091H\u0000\u0592\u0594\u0003\u019f\u00cf\u0000\u0593\u0592"+ - "\u0001\u0000\u0000\u0000\u0594\u0595\u0001\u0000\u0000\u0000\u0595\u0593"+ - "\u0001\u0000\u0000\u0000\u0595\u0596\u0001\u0000\u0000\u0000\u0596\u0597"+ - "\u0001\u0000\u0000\u0000\u0597\u0599\u0003\u00bf_\u0000\u0598\u059a\u0003"+ - "\u019f\u00cf\u0000\u0599\u0598\u0001\u0000\u0000\u0000\u059a\u059b\u0001"+ - "\u0000\u0000\u0000\u059b\u0599\u0001\u0000\u0000\u0000\u059b\u059c\u0001"+ - "\u0000\u0000\u0000\u059c\u059d\u0001\u0000\u0000\u0000\u059d\u059e\u0003"+ - "\u0087C\u0000\u059e\u0124\u0001\u0000\u0000\u0000\u059f\u05a0\u0007\u0000"+ - "\u0000\u0000\u05a0\u05a1\u0007\u000b\u0000\u0000\u05a1\u05a2\u0007\u000b"+ - "\u0000\u0000\u05a2\u05a3\u0007\u0000\u0000\u0000\u05a3\u05a4\u0007\r\u0000"+ - "\u0000\u05a4\u0126\u0001\u0000\u0000\u0000\u05a5\u05a6\u0007\u000f\u0000"+ - "\u0000\u05a6\u05a7\u0007\b\u0000\u0000\u05a7\u05a8\u0007\u0004\u0000\u0000"+ - "\u05a8\u05a9\u0007\u0000\u0000\u0000\u05a9\u05aa\u0007\u000b\u0000\u0000"+ - "\u05aa\u05ab\u0007\r\u0000\u0000\u05ab\u0128\u0001\u0000\u0000\u0000\u05ac"+ - "\u05ad\u0007\u000f\u0000\u0000\u05ad\u05ae\u0007\u0002\u0000\u0000\u05ae"+ - "\u05af\u0007\u0002\u0000\u0000\u05af\u05b0\u0007\t\u0000\u0000\u05b0\u05b1"+ - "\u0007\n\u0000\u0000\u05b1\u05b2\u0007\u0000\u0000\u0000\u05b2\u05b3\u0007"+ - "\u0004\u0000\u0000\u05b3\u012a\u0001\u0000\u0000\u0000\u05b4\u05b5\u0007"+ - "\u0006\u0000\u0000\u05b5\u05b6\u0007\u0002\u0000\u0000\u05b6\u05b7\u0007"+ - "\u0003\u0000\u0000\u05b7\u05b8\u0007\u000f\u0000\u0000\u05b8\u05b9\u0007"+ - "\t\u0000\u0000\u05b9\u05ba\u0007\n\u0000\u0000\u05ba\u012c\u0001\u0000"+ - "\u0000\u0000\u05bb\u05bc\u0007\n\u0000\u0000\u05bc\u05bd\u0007\u0004\u0000"+ - "\u0000\u05bd\u05be\u0007\u0003\u0000\u0000\u05be\u05bf\u0007\u0007\u0000"+ - "\u0000\u05bf\u012e\u0001\u0000\u0000\u0000\u05c0\u05c1\u0007\u0011\u0000"+ - "\u0000\u05c1\u05c2\u0007\t\u0000\u0000\u05c2\u05c3\u0007\u0002\u0000\u0000"+ - "\u05c3\u05c4\u0007\u0000\u0000\u0000\u05c4\u05c5\u0007\u0005\u0000\u0000"+ - "\u05c5\u0130\u0001\u0000\u0000\u0000\u05c6\u05c7\u0007\u0015\u0000\u0000"+ - "\u05c7\u05c8\u0007\n\u0000\u0000\u05c8\u05c9\u0007\u0002\u0000\u0000\u05c9"+ - "\u05ca\u0007\u0007\u0000\u0000\u05ca\u05cb\u0007\n\u0000\u0000\u05cb\u05cc"+ - "\u0007\u0005\u0000\u0000\u05cc\u05cd\u0007\u000b\u0000\u0000\u05cd\u05ce"+ - "\u0007\r\u0000\u0000\u05ce\u0132\u0001\u0000\u0000\u0000\u05cf\u05d0\u0007"+ - "\b\u0000\u0000\u05d0\u05d1\u0007\u0004\u0000\u0000\u05d1\u05d2\u0007\u0005"+ - "\u0000\u0000\u05d2\u05d3\u0007\n\u0000\u0000\u05d3\u05d4\u0007\u0015\u0000"+ - "\u0000\u05d4\u05d5\u0007\n\u0000\u0000\u05d5\u05d6\u0007\u000b\u0000\u0000"+ - "\u05d6\u0134\u0001\u0000\u0000\u0000\u05d7\u05d8\u0007\t\u0000\u0000\u05d8"+ - "\u05d9\u0007\u0002\u0000\u0000\u05d9\u05da\u0007\u0004\u0000\u0000\u05da"+ - "\u05db\u0007\u0015\u0000\u0000\u05db\u0136\u0001\u0000\u0000\u0000\u05dc"+ - "\u05dd\u0007\u0007\u0000\u0000\u05dd\u05de\u0007\u0000\u0000\u0000\u05de"+ - "\u05df\u0007\u0012\u0000\u0000\u05df\u0138\u0001\u0000\u0000\u0000\u05e0"+ - "\u05e1\u0007\u0004\u0000\u0000\u05e1\u05e2\u0007\u0003\u0000\u0000\u05e2"+ - "\u05e3\u0007\u0007\u0000\u0000\u05e3\u05e4\u0007\u000f\u0000\u0000\u05e4"+ - "\u05e5\u0007\n\u0000\u0000\u05e5\u05e6\u0007\u000b\u0000\u0000\u05e6\u013a"+ - "\u0001\u0000\u0000\u0000\u05e7\u05e8\u0007\u0012\u0000\u0000\u05e8\u05e9"+ - "\u0007\u0002\u0000\u0000\u05e9\u05ea\u0007\b\u0000\u0000\u05ea\u05eb\u0007"+ - "\u0004\u0000\u0000\u05eb\u05ec\u0007\u0005\u0000\u0000\u05ec\u013c\u0001"+ - "\u0000\u0000\u0000\u05ed\u05ee\u0007\u000b\u0000\u0000\u05ee\u05ef\u0007"+ - "\n\u0000\u0000\u05ef\u05f0\u0007\u0001\u0000\u0000\u05f0\u05f1\u0007\u0002"+ - "\u0000\u0000\u05f1\u05f2\u0007\u000b\u0000\u0000\u05f2\u05f3\u0007\u0006"+ - "\u0000\u0000\u05f3\u013e\u0001\u0000\u0000\u0000\u05f4\u05f5\u0007\u000e"+ - "\u0000\u0000\u05f5\u05f6\u0007\u0005\u0000\u0000\u05f6\u05f7\u0007\u000b"+ - "\u0000\u0000\u05f7\u05f8\u0007\b\u0000\u0000\u05f8\u05f9\u0007\u0004\u0000"+ - "\u0000\u05f9\u05fa\u0007\u0015\u0000\u0000\u05fa\u0140\u0001\u0000\u0000"+ - "\u0000\u05fb\u05fc\u0007\u0005\u0000\u0000\u05fc\u05fd\u0007\b\u0000\u0000"+ - "\u05fd\u05fe\u0007\u0007\u0000\u0000\u05fe\u05ff\u0007\n\u0000\u0000\u05ff"+ - "\u0600\u0007\u000e\u0000\u0000\u0600\u0601\u0007\u0005\u0000\u0000\u0601"+ - "\u0602\u0007\u0000\u0000\u0000\u0602\u0603\u0007\u0007\u0000\u0000\u0603"+ - "\u0604\u0007\u0012\u0000\u0000\u0604\u0142\u0001\u0000\u0000\u0000\u0605"+ - "\u0606\u0007\u0000\u0000\u0000\u0606\u0607\u0007\u0004\u0000\u0000\u0607"+ - "\u0608\u0007\r\u0000\u0000\u0608\u0144\u0001\u0000\u0000\u0000\u0609\u060a"+ - "\u0007\u0000\u0000\u0000\u060a\u060b\u0007\u0004\u0000\u0000\u060b\u060c"+ - "\u0007\r\u0000\u0000\u060c\u060d\u0007\u0000\u0000\u0000\u060d\u060e\u0007"+ - "\u0005\u0000\u0000\u060e\u060f\u0007\u0002\u0000\u0000\u060f\u0610\u0007"+ - "\u0007\u0000\u0000\u0610\u0611\u0007\b\u0000\u0000\u0611\u0612\u0007\u0001"+ - "\u0000\u0000\u0612\u0146\u0001\u0000\u0000\u0000\u0613\u0614\u0007\u0000"+ - "\u0000\u0000\u0614\u0615\u0007\u0004\u0000\u0000\u0615\u0616\u0007\r\u0000"+ - "\u0000\u0616\u0617\u0007\u0016\u0000\u0000\u0617\u0618\u0007\u000e\u0000"+ - "\u0000\u0618\u0619\u0007\u0002\u0000\u0000\u0619\u061a\u0007\u0004\u0000"+ - "\u0000\u061a\u061b\u0007\u0000\u0000\u0000\u061b\u061c\u0007\u0005\u0000"+ - "\u0000\u061c\u061d\u0007\u0002\u0000\u0000\u061d\u061e\u0007\u0007\u0000"+ - "\u0000\u061e\u061f\u0007\b\u0000\u0000\u061f\u0620\u0007\u0001\u0000\u0000"+ - "\u0620\u0148\u0001\u0000\u0000\u0000\u0621\u0622\u0007\u0000\u0000\u0000"+ - "\u0622\u0623\u0007\u0004\u0000\u0000\u0623\u0624\u0007\r\u0000\u0000\u0624"+ - "\u0625\u0007\u000b\u0000\u0000\u0625\u0626\u0007\n\u0000\u0000\u0626\u0627"+ - "\u0007\u0001\u0000\u0000\u0627\u0628\u0007\u0002\u0000\u0000\u0628\u0629"+ - "\u0007\u000b\u0000\u0000\u0629\u062a\u0007\u0006\u0000\u0000\u062a\u014a"+ - "\u0001\u0000\u0000\u0000\u062b\u062c\u0007\u000e\u0000\u0000\u062c\u062d"+ - "\u0007\u0001\u0000\u0000\u062d\u062e\u0007\u0000\u0000\u0000\u062e\u062f"+ - "\u0007\t\u0000\u0000\u062f\u0630\u0007\u0000\u0000\u0000\u0630\u0631\u0007"+ - "\u000b\u0000\u0000\u0631\u014c\u0001\u0000\u0000\u0000\u0632\u0633\u0005"+ - ";\u0000\u0000\u0633\u014e\u0001\u0000\u0000\u0000\u0634\u0635\u0005,\u0000"+ - "\u0000\u0635\u0150\u0001\u0000\u0000\u0000\u0636\u0637\u0005:\u0000\u0000"+ - "\u0637\u0152\u0001\u0000\u0000\u0000\u0638\u0639\u0005(\u0000\u0000\u0639"+ - "\u0154\u0001\u0000\u0000\u0000\u063a\u063b\u0005)\u0000\u0000\u063b\u0156"+ - "\u0001\u0000\u0000\u0000\u063c\u063d\u0005[\u0000\u0000\u063d\u0158\u0001"+ - "\u0000\u0000\u0000\u063e\u063f\u0005]\u0000\u0000\u063f\u015a\u0001\u0000"+ - "\u0000\u0000\u0640\u0641\u0005{\u0000\u0000\u0641\u015c\u0001\u0000\u0000"+ - "\u0000\u0642\u0643\u0005}\u0000\u0000\u0643\u015e\u0001\u0000\u0000\u0000"+ - "\u0644\u0645\u0005*\u0000\u0000\u0645\u0160\u0001\u0000\u0000\u0000\u0646"+ - "\u0647\u0005.\u0000\u0000\u0647\u0162\u0001\u0000\u0000\u0000\u0648\u0649"+ - "\u0005$\u0000\u0000\u0649\u0164\u0001\u0000\u0000\u0000\u064a\u064b\u0005"+ - "?\u0000\u0000\u064b\u0166\u0001\u0000\u0000\u0000\u064c\u064d\u0005<\u0000"+ - "\u0000\u064d\u0168\u0001\u0000\u0000\u0000\u064e\u064f\u0005<\u0000\u0000"+ - "\u064f\u0650\u0005=\u0000\u0000\u0650\u016a\u0001\u0000\u0000\u0000\u0651"+ - "\u0652\u0005>\u0000\u0000\u0652\u016c\u0001\u0000\u0000\u0000\u0653\u0654"+ - "\u0005>\u0000\u0000\u0654\u0655\u0005=\u0000\u0000\u0655\u016e\u0001\u0000"+ - "\u0000\u0000\u0656\u0657\u0005=\u0000\u0000\u0657\u0170\u0001\u0000\u0000"+ - "\u0000\u0658\u0659\u0005!\u0000\u0000\u0659\u065a\u0005=\u0000\u0000\u065a"+ - "\u0172\u0001\u0000\u0000\u0000\u065b\u065c\u0005<\u0000\u0000\u065c\u065d"+ - "\u0007\u0000\u0000\u0000\u065d\u065e\u0007\u0004\u0000\u0000\u065e\u065f"+ - "\u0007\r\u0000\u0000\u065f\u0174\u0001\u0000\u0000\u0000\u0660\u0661\u0005"+ - "<\u0000\u0000\u0661\u0662\u0005=\u0000\u0000\u0662\u0663\u0001\u0000\u0000"+ - "\u0000\u0663\u0664\u0007\u0000\u0000\u0000\u0664\u0665\u0007\u0004\u0000"+ - "\u0000\u0665\u0666\u0007\r\u0000\u0000\u0666\u0176\u0001\u0000\u0000\u0000"+ - "\u0667\u0668\u0005>\u0000\u0000\u0668\u0669\u0007\u0000\u0000\u0000\u0669"+ - "\u066a\u0007\u0004\u0000\u0000\u066a\u066b\u0007\r\u0000\u0000\u066b\u0178"+ - "\u0001\u0000\u0000\u0000\u066c\u066d\u0005>\u0000\u0000\u066d\u066e\u0005"+ - "=\u0000\u0000\u066e\u066f\u0001\u0000\u0000\u0000\u066f\u0670\u0007\u0000"+ - "\u0000\u0000\u0670\u0671\u0007\u0004\u0000\u0000\u0671\u0672\u0007\r\u0000"+ - "\u0000\u0672\u017a\u0001\u0000\u0000\u0000\u0673\u0674\u0005=\u0000\u0000"+ - "\u0674\u0675\u0007\u0000\u0000\u0000\u0675\u0676\u0007\u0004\u0000\u0000"+ - "\u0676\u0677\u0007\r\u0000\u0000\u0677\u017c\u0001\u0000\u0000\u0000\u0678"+ - "\u0679\u0005!\u0000\u0000\u0679\u067a\u0005=\u0000\u0000\u067a\u067b\u0001"+ - "\u0000\u0000\u0000\u067b\u067c\u0007\u0000\u0000\u0000\u067c\u067d\u0007"+ - "\u0004\u0000\u0000\u067d\u067e\u0007\r\u0000\u0000\u067e\u017e\u0001\u0000"+ - "\u0000\u0000\u067f\u0680\u0005+\u0000\u0000\u0680\u0180\u0001\u0000\u0000"+ - "\u0000\u0681\u0682\u0005-\u0000\u0000\u0682\u0182\u0001\u0000\u0000\u0000"+ - "\u0683\u0684\u0005/\u0000\u0000\u0684\u0184\u0001\u0000\u0000\u0000\u0685"+ - "\u0686\u0007\u0006\u0000\u0000\u0686\u0687\u0007\b\u0000\u0000\u0687\u0688"+ - "\u0007\u0018\u0000\u0000\u0688\u0186\u0001\u0000\u0000\u0000\u0689\u068a"+ - "\u0005|\u0000\u0000\u068a\u068b\u0005|\u0000\u0000\u068b\u0188\u0001\u0000"+ - "\u0000\u0000\u068c\u068d\u0007\u0004\u0000\u0000\u068d\u068e\u0007\u0003"+ - "\u0000\u0000\u068e\u068f\u0007\t\u0000\u0000\u068f\u0690\u0007\t\u0000"+ - "\u0000\u0690\u018a\u0001\u0000\u0000\u0000\u0691\u0692\u0007\u0011\u0000"+ - "\u0000\u0692\u0693\u0007\u0000\u0000\u0000\u0693\u0694\u0007\t\u0000\u0000"+ - "\u0694\u0695\u0007\u000e\u0000\u0000\u0695\u0696\u0007\n\u0000\u0000\u0696"+ - "\u018c\u0001\u0000\u0000\u0000\u0697\u0698\u0007\u0005\u0000\u0000\u0698"+ - "\u0699\u0007\u000b\u0000\u0000\u0699\u069a\u0007\u0003\u0000\u0000\u069a"+ - "\u069b\u0007\n\u0000\u0000\u069b\u018e\u0001\u0000\u0000\u0000\u069c\u069e"+ - "\u0003\u01ab\u00d5\u0000\u069d\u069c\u0001\u0000\u0000\u0000\u069e\u069f"+ - "\u0001\u0000\u0000\u0000\u069f\u069d\u0001\u0000\u0000\u0000\u069f\u06a0"+ - "\u0001\u0000\u0000\u0000\u06a0\u0190\u0001\u0000\u0000\u0000\u06a1\u06a3"+ - "\u0003\u01ab\u00d5\u0000\u06a2\u06a1\u0001\u0000\u0000\u0000\u06a3\u06a6"+ - "\u0001\u0000\u0000\u0000\u06a4\u06a2\u0001\u0000\u0000\u0000\u06a4\u06a5"+ - "\u0001\u0000\u0000\u0000\u06a5\u06a7\u0001\u0000\u0000\u0000\u06a6\u06a4"+ - "\u0001\u0000\u0000\u0000\u06a7\u06a9\u0005.\u0000\u0000\u06a8\u06aa\u0003"+ - "\u01ab\u00d5\u0000\u06a9\u06a8\u0001\u0000\u0000\u0000\u06aa\u06ab\u0001"+ - "\u0000\u0000\u0000\u06ab\u06a9\u0001\u0000\u0000\u0000\u06ab\u06ac\u0001"+ - "\u0000\u0000\u0000\u06ac\u06b6\u0001\u0000\u0000\u0000\u06ad\u06af\u0007"+ - "\n\u0000\u0000\u06ae\u06b0\u0007\u001a\u0000\u0000\u06af\u06ae\u0001\u0000"+ - "\u0000\u0000\u06af\u06b0\u0001\u0000\u0000\u0000\u06b0\u06b2\u0001\u0000"+ - "\u0000\u0000\u06b1\u06b3\u0003\u01ab\u00d5\u0000\u06b2\u06b1\u0001\u0000"+ - "\u0000\u0000\u06b3\u06b4\u0001\u0000\u0000\u0000\u06b4\u06b2\u0001\u0000"+ - "\u0000\u0000\u06b4\u06b5\u0001\u0000\u0000\u0000\u06b5\u06b7\u0001\u0000"+ - "\u0000\u0000\u06b6\u06ad\u0001\u0000\u0000\u0000\u06b6\u06b7\u0001\u0000"+ - "\u0000\u0000\u06b7\u06c7\u0001\u0000\u0000\u0000\u06b8\u06ba\u0003\u01ab"+ - "\u00d5\u0000\u06b9\u06b8\u0001\u0000\u0000\u0000\u06ba\u06bb\u0001\u0000"+ - "\u0000\u0000\u06bb\u06b9\u0001\u0000\u0000\u0000\u06bb\u06bc\u0001\u0000"+ - "\u0000\u0000\u06bc\u06bd\u0001\u0000\u0000\u0000\u06bd\u06bf\u0007\n\u0000"+ - "\u0000\u06be\u06c0\u0007\u001a\u0000\u0000\u06bf\u06be\u0001\u0000\u0000"+ - "\u0000\u06bf\u06c0\u0001\u0000\u0000\u0000\u06c0\u06c2\u0001\u0000\u0000"+ - "\u0000\u06c1\u06c3\u0003\u01ab\u00d5\u0000\u06c2\u06c1\u0001\u0000\u0000"+ - "\u0000\u06c3\u06c4\u0001\u0000\u0000\u0000\u06c4\u06c2\u0001\u0000\u0000"+ - "\u0000\u06c4\u06c5\u0001\u0000\u0000\u0000\u06c5\u06c7\u0001\u0000\u0000"+ - "\u0000\u06c6\u06a4\u0001\u0000\u0000\u0000\u06c6\u06b9\u0001\u0000\u0000"+ - "\u0000\u06c7\u0192\u0001\u0000\u0000\u0000\u06c8\u06cb\u0003\u018f\u00c7"+ - "\u0000\u06c9\u06cb\u0003\u0191\u00c8\u0000\u06ca\u06c8\u0001\u0000\u0000"+ - "\u0000\u06ca\u06c9\u0001\u0000\u0000\u0000\u06cb\u06cc\u0001\u0000\u0000"+ - "\u0000\u06cc\u06cd\u0007\u0004\u0000\u0000\u06cd\u0194\u0001\u0000\u0000"+ - "\u0000\u06ce\u06d3\u0005\"\u0000\u0000\u06cf\u06d2\u0003\u01ad\u00d6\u0000"+ - "\u06d0\u06d2\t\u0000\u0000\u0000\u06d1\u06cf\u0001\u0000\u0000\u0000\u06d1"+ - "\u06d0\u0001\u0000\u0000\u0000\u06d2\u06d5\u0001\u0000\u0000\u0000\u06d3"+ - "\u06d4\u0001\u0000\u0000\u0000\u06d3\u06d1\u0001\u0000\u0000\u0000\u06d4"+ - "\u06d6\u0001\u0000\u0000\u0000\u06d5\u06d3\u0001\u0000\u0000\u0000\u06d6"+ - "\u06d7\u0005\"\u0000\u0000\u06d7\u0196\u0001\u0000\u0000\u0000\u06d8\u06dd"+ - "\u0005\'\u0000\u0000\u06d9\u06dc\u0003\u01af\u00d7\u0000\u06da\u06dc\t"+ - "\u0000\u0000\u0000\u06db\u06d9\u0001\u0000\u0000\u0000\u06db\u06da\u0001"+ - "\u0000\u0000\u0000\u06dc\u06df\u0001\u0000\u0000\u0000\u06dd\u06de\u0001"+ - "\u0000\u0000\u0000\u06dd\u06db\u0001\u0000\u0000\u0000\u06de\u06e0\u0001"+ - "\u0000\u0000\u0000\u06df\u06dd\u0001\u0000\u0000\u0000\u06e0\u06e1\u0005"+ - "\'\u0000\u0000\u06e1\u0198\u0001\u0000\u0000\u0000\u06e2\u06e3\u0007\u001b"+ - "\u0000\u0000\u06e3\u06e4\u0007\u001c\u0000\u0000\u06e4\u06e5\u0007\u001b"+ - "\u0000\u0000\u06e5\u06e6\u0007\u001d\u0000\u0000\u06e6\u019a\u0001\u0000"+ - "\u0000\u0000\u06e7\u06ed\u0003\u01a9\u00d4\u0000\u06e8\u06ec\u0003\u01a9"+ - "\u00d4\u0000\u06e9\u06ec\u0003\u01ab\u00d5\u0000\u06ea\u06ec\u0003\u01b3"+ - "\u00d9\u0000\u06eb\u06e8\u0001\u0000\u0000\u0000\u06eb\u06e9\u0001\u0000"+ - "\u0000\u0000\u06eb\u06ea\u0001\u0000\u0000\u0000\u06ec\u06ef\u0001\u0000"+ - "\u0000\u0000\u06ed\u06eb\u0001\u0000\u0000\u0000\u06ed\u06ee\u0001\u0000"+ - "\u0000\u0000\u06ee\u019c\u0001\u0000\u0000\u0000\u06ef\u06ed\u0001\u0000"+ - "\u0000\u0000\u06f0\u06f3\u0003\u01ab\u00d5\u0000\u06f1\u06f3\u0003\u01b3"+ - "\u00d9\u0000\u06f2\u06f0\u0001\u0000\u0000\u0000\u06f2\u06f1\u0001\u0000"+ - "\u0000\u0000\u06f3\u06f9\u0001\u0000\u0000\u0000\u06f4\u06f8\u0003\u01a9"+ - "\u00d4\u0000\u06f5\u06f8\u0003\u01ab\u00d5\u0000\u06f6\u06f8\u0003\u01b3"+ - "\u00d9\u0000\u06f7\u06f4\u0001\u0000\u0000\u0000\u06f7\u06f5\u0001\u0000"+ - "\u0000\u0000\u06f7\u06f6\u0001\u0000\u0000\u0000\u06f8\u06fb\u0001\u0000"+ - "\u0000\u0000\u06f9\u06f7\u0001\u0000\u0000\u0000\u06f9\u06fa\u0001\u0000"+ - "\u0000\u0000\u06fa\u019e\u0001\u0000\u0000\u0000\u06fb\u06f9\u0001\u0000"+ - "\u0000\u0000\u06fc\u06fe\u0007\u001e\u0000\u0000\u06fd\u06fc\u0001\u0000"+ - "\u0000\u0000\u06fe\u06ff\u0001\u0000\u0000\u0000\u06ff\u06fd\u0001\u0000"+ - "\u0000\u0000\u06ff\u0700\u0001\u0000\u0000\u0000\u0700\u0701\u0001\u0000"+ - "\u0000\u0000\u0701\u0702\u0006\u00cf\u0000\u0000\u0702\u01a0\u0001\u0000"+ - "\u0000\u0000\u0703\u0704\u0005/\u0000\u0000\u0704\u0705\u0005*\u0000\u0000"+ - "\u0705\u0706\u0001\u0000\u0000\u0000\u0706\u070a\b\u001f\u0000\u0000\u0707"+ - "\u0709\t\u0000\u0000\u0000\u0708\u0707\u0001\u0000\u0000\u0000\u0709\u070c"+ - "\u0001\u0000\u0000\u0000\u070a\u070b\u0001\u0000\u0000\u0000\u070a\u0708"+ - "\u0001\u0000\u0000\u0000\u070b\u070d\u0001\u0000\u0000\u0000\u070c\u070a"+ - "\u0001\u0000\u0000\u0000\u070d\u070e\u0005*\u0000\u0000\u070e\u070f\u0005"+ - "/\u0000\u0000\u070f\u0710\u0001\u0000\u0000\u0000\u0710\u0711\u0006\u00d0"+ - "\u0000\u0000\u0711\u01a2\u0001\u0000\u0000\u0000\u0712\u0713\u0005/\u0000"+ - "\u0000\u0713\u0714\u0005/\u0000\u0000\u0714\u0718\u0001\u0000\u0000\u0000"+ - "\u0715\u0717\b \u0000\u0000\u0716\u0715\u0001\u0000\u0000\u0000\u0717"+ - "\u071a\u0001\u0000\u0000\u0000\u0718\u0716\u0001\u0000\u0000\u0000\u0718"+ - "\u0719\u0001\u0000\u0000\u0000\u0719\u071b\u0001\u0000\u0000\u0000\u071a"+ - "\u0718\u0001\u0000\u0000\u0000\u071b\u071c\u0006\u00d1\u0000\u0000\u071c"+ - "\u01a4\u0001\u0000\u0000\u0000\u071d\u0721\u0005#\u0000\u0000\u071e\u0720"+ - "\b \u0000\u0000\u071f\u071e\u0001\u0000\u0000\u0000\u0720\u0723\u0001"+ - "\u0000\u0000\u0000\u0721\u071f\u0001\u0000\u0000\u0000\u0721\u0722\u0001"+ - "\u0000\u0000\u0000\u0722\u0724\u0001\u0000\u0000\u0000\u0723\u0721\u0001"+ - "\u0000\u0000\u0000\u0724\u0725\u0006\u00d2\u0000\u0000\u0725\u01a6\u0001"+ - "\u0000\u0000\u0000\u0726\u0727\t\u0000\u0000\u0000\u0727\u01a8\u0001\u0000"+ - "\u0000\u0000\u0728\u0729\u0007!\u0000\u0000\u0729\u01aa\u0001\u0000\u0000"+ - "\u0000\u072a\u072b\u000209\u0000\u072b\u01ac\u0001\u0000\u0000\u0000\u072c"+ - "\u072f\u0005\\\u0000\u0000\u072d\u0730\u0007\"\u0000\u0000\u072e\u0730"+ - "\u0003\u01b5\u00da\u0000\u072f\u072d\u0001\u0000\u0000\u0000\u072f\u072e"+ - "\u0001\u0000\u0000\u0000\u0730\u01ae\u0001\u0000\u0000\u0000\u0731\u0734"+ - "\u0005\\\u0000\u0000\u0732\u0735\u0007#\u0000\u0000\u0733\u0735\u0003"+ - "\u01b5\u00da\u0000\u0734\u0732\u0001\u0000\u0000\u0000\u0734\u0733\u0001"+ - "\u0000\u0000\u0000\u0735\u01b0\u0001\u0000\u0000\u0000\u0736\u0737\u0007"+ - "$\u0000\u0000\u0737\u01b2\u0001\u0000\u0000\u0000\u0738\u0739\u0005_\u0000"+ - "\u0000\u0739\u01b4\u0001\u0000\u0000\u0000\u073a\u073b\u0005u\u0000\u0000"+ - "\u073b\u073c\u0003\u01b1\u00d8\u0000\u073c\u073d\u0003\u01b1\u00d8\u0000"+ - "\u073d\u073e\u0003\u01b1\u00d8\u0000\u073e\u073f\u0003\u01b1\u00d8\u0000"+ - "\u073f\u01b6\u0001\u0000\u0000\u0000\u0740\u0741\u0007\u0001\u0000\u0000"+ - "\u0741\u0742\u0007\t\u0000\u0000\u0742\u0743\u0007\n\u0000\u0000\u0743"+ - "\u0744\u0007\u0000\u0000\u0000\u0744\u0745\u0007\u000b\u0000\u0000\u0745"+ - "\u01b8\u0001\u0000\u0000\u0000\u0746\u0747\u0007\u0001\u0000\u0000\u0747"+ - "\u0748\u0007\u0003\u0000\u0000\u0748\u0749\u0007\u000b\u0000\u0000\u0749"+ - "\u074a\u0007\u000b\u0000\u0000\u074a\u074b\u0007\n\u0000\u0000\u074b\u074c"+ - "\u0007\u0004\u0000\u0000\u074c\u074d\u0007\u0005\u0000\u0000\u074d\u01ba"+ - "\u0001\u0000\u0000\u0000\u074e\u074f\u0007\n\u0000\u0000\u074f\u0750\u0007"+ - "\u0013\u0000\u0000\u0750\u0751\u0007\u0012\u0000\u0000\u0751\u0752\u0007"+ - "\b\u0000\u0000\u0752\u0753\u0007\u000b\u0000\u0000\u0753\u0754\u0007\n"+ - "\u0000\u0000\u0754\u01bc\u0001\u0000\u0000\u0000\u0755\u0756\u0007\n\u0000"+ - "\u0000\u0756\u0757\u0007\u0013\u0000\u0000\u0757\u0758\u0007\u0005\u0000"+ - "\u0000\u0758\u0759\u0007\n\u0000\u0000\u0759\u075a\u0007\u000b\u0000\u0000"+ - "\u075a\u075b\u0007\u0004\u0000\u0000\u075b\u075c\u0007\u0000\u0000\u0000"+ - "\u075c\u075d\u0007\t\u0000\u0000\u075d\u075e\u0007\t\u0000\u0000\u075e"+ - "\u075f\u0007\r\u0000\u0000\u075f\u01be\u0001\u0000\u0000\u0000\u0760\u0761"+ - "\u0007\u0012\u0000\u0000\u0761\u0762\u0007\u000b\u0000\u0000\u0762\u0763"+ - "\u0007\n\u0000\u0000\u0763\u0764\u0007\u0011\u0000\u0000\u0764\u0765\u0007"+ - "\n\u0000\u0000\u0765\u0766\u0007\u000b\u0000\u0000\u0766\u01c0\u0001\u0000"+ - "\u0000\u0000\u0767\u0768\u0007\u0012\u0000\u0000\u0768\u0769\u0007\u000b"+ - "\u0000\u0000\u0769\u076a\u0007\b\u0000\u0000\u076a\u076b\u0007\u0018\u0000"+ - "\u0000\u076b\u076c\u0007\b\u0000\u0000\u076c\u076d\u0007\t\u0000\u0000"+ - "\u076d\u076e\u0007\n\u0000\u0000\u076e\u076f\u0007\u0015\u0000\u0000\u076f"+ - "\u0770\u0007\n\u0000\u0000\u0770\u0771\u0007\u000e\u0000\u0000\u0771\u01c2"+ - "\u0001\u0000\u0000\u0000\u0772\u0773\u0007\u000b\u0000\u0000\u0773\u0774"+ - "\u0007\n\u0000\u0000\u0774\u0775\u0007\u0005\u0000\u0000\u0775\u0776\u0007"+ - "\u0000\u0000\u0000\u0776\u0777\u0007\b\u0000\u0000\u0777\u0778\u0007\u0004"+ - "\u0000\u0000\u0778\u01c4\u0001\u0000\u0000\u0000\u0779\u077a\u0007\u000b"+ - "\u0000\u0000\u077a\u077b\u0007\n\u0000\u0000\u077b\u077c\u0007\u0005\u0000"+ - "\u0000\u077c\u077d\u0007\u0000\u0000\u0000\u077d\u077e\u0007\b\u0000\u0000"+ - "\u077e\u077f\u0007\u0004\u0000\u0000\u077f\u0780\u0007\n\u0000\u0000\u0780"+ - "\u0781\u0007\u0006\u0000\u0000\u0781\u01c6\u0001\u0000\u0000\u0000*\u0000"+ - "\u01d5\u01d7\u026e\u0345\u03dc\u04ba\u0561\u0569\u0571\u0579\u057f\u0587"+ - "\u058d\u0595\u059b\u069f\u06a4\u06ab\u06af\u06b4\u06b6\u06bb\u06bf\u06c4"+ - "\u06c6\u06ca\u06d1\u06d3\u06db\u06dd\u06eb\u06ed\u06f2\u06f7\u06f9\u06ff"+ - "\u070a\u0718\u0721\u072f\u0734\u0001\u0006\u0000\u0000"; + "\u01a5\u0001\u0000\u0000\u0000\u0000\u01a7\u0001\u0000\u0000\u0000\u0000"+ + "\u01a9\u0001\u0000\u0000\u0000\u0000\u01ab\u0001\u0000\u0000\u0000\u0000"+ + "\u01ad\u0001\u0000\u0000\u0000\u0000\u01af\u0001\u0000\u0000\u0000\u0000"+ + "\u01b1\u0001\u0000\u0000\u0000\u0001\u01d1\u0001\u0000\u0000\u0000\u0003"+ + "\u01d5\u0001\u0000\u0000\u0000\u0005\u01d8\u0001\u0000\u0000\u0000\u0007"+ + "\u01da\u0001\u0000\u0000\u0000\t\u01ea\u0001\u0000\u0000\u0000\u000b\u01f4"+ + "\u0001\u0000\u0000\u0000\r\u01fc\u0001\u0000\u0000\u0000\u000f\u0200\u0001"+ + "\u0000\u0000\u0000\u0011\u0206\u0001\u0000\u0000\u0000\u0013\u020a\u0001"+ + "\u0000\u0000\u0000\u0015\u0210\u0001\u0000\u0000\u0000\u0017\u0217\u0001"+ + "\u0000\u0000\u0000\u0019\u0221\u0001\u0000\u0000\u0000\u001b\u0225\u0001"+ + "\u0000\u0000\u0000\u001d\u0228\u0001\u0000\u0000\u0000\u001f\u022c\u0001"+ + "\u0000\u0000\u0000!\u023a\u0001\u0000\u0000\u0000#\u0241\u0001\u0000\u0000"+ + "\u0000%\u0249\u0001\u0000\u0000\u0000\'\u024c\u0001\u0000\u0000\u0000"+ + ")\u0252\u0001\u0000\u0000\u0000+\u0257\u0001\u0000\u0000\u0000-\u025f"+ + "\u0001\u0000\u0000\u0000/\u0264\u0001\u0000\u0000\u00001\u026f\u0001\u0000"+ + "\u0000\u00003\u0277\u0001\u0000\u0000\u00005\u027d\u0001\u0000\u0000\u0000"+ + "7\u0284\u0001\u0000\u0000\u00009\u028f\u0001\u0000\u0000\u0000;\u0291"+ + "\u0001\u0000\u0000\u0000=\u0299\u0001\u0000\u0000\u0000?\u02a1\u0001\u0000"+ + "\u0000\u0000A\u02a8\u0001\u0000\u0000\u0000C\u02ad\u0001\u0000\u0000\u0000"+ + "E\u02b9\u0001\u0000\u0000\u0000G\u02c2\u0001\u0000\u0000\u0000I\u02ca"+ + "\u0001\u0000\u0000\u0000K\u02d3\u0001\u0000\u0000\u0000M\u02d8\u0001\u0000"+ + "\u0000\u0000O\u02e2\u0001\u0000\u0000\u0000Q\u02eb\u0001\u0000\u0000\u0000"+ + "S\u02f0\u0001\u0000\u0000\u0000U\u02f7\u0001\u0000\u0000\u0000W\u02fb"+ + "\u0001\u0000\u0000\u0000Y\u0305\u0001\u0000\u0000\u0000[\u0311\u0001\u0000"+ + "\u0000\u0000]\u0318\u0001\u0000\u0000\u0000_\u0320\u0001\u0000\u0000\u0000"+ + "a\u0327\u0001\u0000\u0000\u0000c\u032d\u0001\u0000\u0000\u0000e\u0333"+ + "\u0001\u0000\u0000\u0000g\u0337\u0001\u0000\u0000\u0000i\u033d\u0001\u0000"+ + "\u0000\u0000k\u0344\u0001\u0000\u0000\u0000m\u0349\u0001\u0000\u0000\u0000"+ + "o\u0350\u0001\u0000\u0000\u0000q\u0359\u0001\u0000\u0000\u0000s\u0363"+ + "\u0001\u0000\u0000\u0000u\u0369\u0001\u0000\u0000\u0000w\u0375\u0001\u0000"+ + "\u0000\u0000y\u0377\u0001\u0000\u0000\u0000{\u0382\u0001\u0000\u0000\u0000"+ + "}\u038b\u0001\u0000\u0000\u0000\u007f\u038e\u0001\u0000\u0000\u0000\u0081"+ + "\u0394\u0001\u0000\u0000\u0000\u0083\u0397\u0001\u0000\u0000\u0000\u0085"+ + "\u03a1\u0001\u0000\u0000\u0000\u0087\u03a7\u0001\u0000\u0000\u0000\u0089"+ + "\u03af\u0001\u0000\u0000\u0000\u008b\u03b6\u0001\u0000\u0000\u0000\u008d"+ + "\u03bb\u0001\u0000\u0000\u0000\u008f\u03be\u0001\u0000\u0000\u0000\u0091"+ + "\u03c3\u0001\u0000\u0000\u0000\u0093\u03c8\u0001\u0000\u0000\u0000\u0095"+ + "\u03cc\u0001\u0000\u0000\u0000\u0097\u03d2\u0001\u0000\u0000\u0000\u0099"+ + "\u03d7\u0001\u0000\u0000\u0000\u009b\u03dc\u0001\u0000\u0000\u0000\u009d"+ + "\u03e1\u0001\u0000\u0000\u0000\u009f\u03ea\u0001\u0000\u0000\u0000\u00a1"+ + "\u03f0\u0001\u0000\u0000\u0000\u00a3\u03f6\u0001\u0000\u0000\u0000\u00a5"+ + "\u03fb\u0001\u0000\u0000\u0000\u00a7\u0404\u0001\u0000\u0000\u0000\u00a9"+ + "\u0412\u0001\u0000\u0000\u0000\u00ab\u0414\u0001\u0000\u0000\u0000\u00ad"+ + "\u041d\u0001\u0000\u0000\u0000\u00af\u0424\u0001\u0000\u0000\u0000\u00b1"+ + "\u042f\u0001\u0000\u0000\u0000\u00b3\u0439\u0001\u0000\u0000\u0000\u00b5"+ + "\u0444\u0001\u0000\u0000\u0000\u00b7\u044b\u0001\u0000\u0000\u0000\u00b9"+ + "\u044e\u0001\u0000\u0000\u0000\u00bb\u0452\u0001\u0000\u0000\u0000\u00bd"+ + "\u0458\u0001\u0000\u0000\u0000\u00bf\u045f\u0001\u0000\u0000\u0000\u00c1"+ + "\u0462\u0001\u0000\u0000\u0000\u00c3\u0465\u0001\u0000\u0000\u0000\u00c5"+ + "\u046a\u0001\u0000\u0000\u0000\u00c7\u046d\u0001\u0000\u0000\u0000\u00c9"+ + "\u0473\u0001\u0000\u0000\u0000\u00cb\u0479\u0001\u0000\u0000\u0000\u00cd"+ + "\u0482\u0001\u0000\u0000\u0000\u00cf\u048b\u0001\u0000\u0000\u0000\u00d1"+ + "\u0491\u0001\u0000\u0000\u0000\u00d3\u0495\u0001\u0000\u0000\u0000\u00d5"+ + "\u0499\u0001\u0000\u0000\u0000\u00d7\u049f\u0001\u0000\u0000\u0000\u00d9"+ + "\u04a7\u0001\u0000\u0000\u0000\u00db\u04ab\u0001\u0000\u0000\u0000\u00dd"+ + "\u04b2\u0001\u0000\u0000\u0000\u00df\u04ba\u0001\u0000\u0000\u0000\u00e1"+ + "\u04c1\u0001\u0000\u0000\u0000\u00e3\u04cb\u0001\u0000\u0000\u0000\u00e5"+ + "\u04d2\u0001\u0000\u0000\u0000\u00e7\u04d7\u0001\u0000\u0000\u0000\u00e9"+ + "\u04dd\u0001\u0000\u0000\u0000\u00eb\u04e1\u0001\u0000\u0000\u0000\u00ed"+ + "\u04f0\u0001\u0000\u0000\u0000\u00ef\u04f2\u0001\u0000\u0000\u0000\u00f1"+ + "\u04f9\u0001\u0000\u0000\u0000\u00f3\u0507\u0001\u0000\u0000\u0000\u00f5"+ + "\u050b\u0001\u0000\u0000\u0000\u00f7\u0511\u0001\u0000\u0000\u0000\u00f9"+ + "\u0516\u0001\u0000\u0000\u0000\u00fb\u051c\u0001\u0000\u0000\u0000\u00fd"+ + "\u0522\u0001\u0000\u0000\u0000\u00ff\u0529\u0001\u0000\u0000\u0000\u0101"+ + "\u052e\u0001\u0000\u0000\u0000\u0103\u0531\u0001\u0000\u0000\u0000\u0105"+ + "\u0535\u0001\u0000\u0000\u0000\u0107\u053a\u0001\u0000\u0000\u0000\u0109"+ + "\u0543\u0001\u0000\u0000\u0000\u010b\u054a\u0001\u0000\u0000\u0000\u010d"+ + "\u0551\u0001\u0000\u0000\u0000\u010f\u0558\u0001\u0000\u0000\u0000\u0111"+ + "\u055d\u0001\u0000\u0000\u0000\u0113\u0563\u0001\u0000\u0000\u0000\u0115"+ + "\u0569\u0001\u0000\u0000\u0000\u0117\u0570\u0001\u0000\u0000\u0000\u0119"+ + "\u0575\u0001\u0000\u0000\u0000\u011b\u057b\u0001\u0000\u0000\u0000\u011d"+ + "\u0580\u0001\u0000\u0000\u0000\u011f\u0587\u0001\u0000\u0000\u0000\u0121"+ + "\u058e\u0001\u0000\u0000\u0000\u0123\u0593\u0001\u0000\u0000\u0000\u0125"+ + "\u059b\u0001\u0000\u0000\u0000\u0127\u05a3\u0001\u0000\u0000\u0000\u0129"+ + "\u05ab\u0001\u0000\u0000\u0000\u012b\u05b9\u0001\u0000\u0000\u0000\u012d"+ + "\u05c7\u0001\u0000\u0000\u0000\u012f\u05d5\u0001\u0000\u0000\u0000\u0131"+ + "\u05db\u0001\u0000\u0000\u0000\u0133\u05e2\u0001\u0000\u0000\u0000\u0135"+ + "\u05ea\u0001\u0000\u0000\u0000\u0137\u05f1\u0001\u0000\u0000\u0000\u0139"+ + "\u05f6\u0001\u0000\u0000\u0000\u013b\u05fc\u0001\u0000\u0000\u0000\u013d"+ + "\u0605\u0001\u0000\u0000\u0000\u013f\u060d\u0001\u0000\u0000\u0000\u0141"+ + "\u0612\u0001\u0000\u0000\u0000\u0143\u0616\u0001\u0000\u0000\u0000\u0145"+ + "\u061d\u0001\u0000\u0000\u0000\u0147\u0623\u0001\u0000\u0000\u0000\u0149"+ + "\u062a\u0001\u0000\u0000\u0000\u014b\u0631\u0001\u0000\u0000\u0000\u014d"+ + "\u063b\u0001\u0000\u0000\u0000\u014f\u063f\u0001\u0000\u0000\u0000\u0151"+ + "\u0649\u0001\u0000\u0000\u0000\u0153\u0657\u0001\u0000\u0000\u0000\u0155"+ + "\u0661\u0001\u0000\u0000\u0000\u0157\u0668\u0001\u0000\u0000\u0000\u0159"+ + "\u066a\u0001\u0000\u0000\u0000\u015b\u066c\u0001\u0000\u0000\u0000\u015d"+ + "\u066e\u0001\u0000\u0000\u0000\u015f\u0670\u0001\u0000\u0000\u0000\u0161"+ + "\u0672\u0001\u0000\u0000\u0000\u0163\u0674\u0001\u0000\u0000\u0000\u0165"+ + "\u0676\u0001\u0000\u0000\u0000\u0167\u0678\u0001\u0000\u0000\u0000\u0169"+ + "\u067a\u0001\u0000\u0000\u0000\u016b\u067c\u0001\u0000\u0000\u0000\u016d"+ + "\u067e\u0001\u0000\u0000\u0000\u016f\u0680\u0001\u0000\u0000\u0000\u0171"+ + "\u0682\u0001\u0000\u0000\u0000\u0173\u0684\u0001\u0000\u0000\u0000\u0175"+ + "\u0687\u0001\u0000\u0000\u0000\u0177\u0689\u0001\u0000\u0000\u0000\u0179"+ + "\u068c\u0001\u0000\u0000\u0000\u017b\u068e\u0001\u0000\u0000\u0000\u017d"+ + "\u0691\u0001\u0000\u0000\u0000\u017f\u0696\u0001\u0000\u0000\u0000\u0181"+ + "\u069d\u0001\u0000\u0000\u0000\u0183\u06a2\u0001\u0000\u0000\u0000\u0185"+ + "\u06a9\u0001\u0000\u0000\u0000\u0187\u06ae\u0001\u0000\u0000\u0000\u0189"+ + "\u06b5\u0001\u0000\u0000\u0000\u018b\u06b7\u0001\u0000\u0000\u0000\u018d"+ + "\u06b9\u0001\u0000\u0000\u0000\u018f\u06bb\u0001\u0000\u0000\u0000\u0191"+ + "\u06bf\u0001\u0000\u0000\u0000\u0193\u06c2\u0001\u0000\u0000\u0000\u0195"+ + "\u06c7\u0001\u0000\u0000\u0000\u0197\u06cd\u0001\u0000\u0000\u0000\u0199"+ + "\u06d3\u0001\u0000\u0000\u0000\u019b\u06fc\u0001\u0000\u0000\u0000\u019d"+ + "\u0700\u0001\u0000\u0000\u0000\u019f\u0704\u0001\u0000\u0000\u0000\u01a1"+ + "\u070e\u0001\u0000\u0000\u0000\u01a3\u0718\u0001\u0000\u0000\u0000\u01a5"+ + "\u071d\u0001\u0000\u0000\u0000\u01a7\u0728\u0001\u0000\u0000\u0000\u01a9"+ + "\u0733\u0001\u0000\u0000\u0000\u01ab\u0739\u0001\u0000\u0000\u0000\u01ad"+ + "\u0748\u0001\u0000\u0000\u0000\u01af\u0753\u0001\u0000\u0000\u0000\u01b1"+ + "\u075c\u0001\u0000\u0000\u0000\u01b3\u075e\u0001\u0000\u0000\u0000\u01b5"+ + "\u0760\u0001\u0000\u0000\u0000\u01b7\u0762\u0001\u0000\u0000\u0000\u01b9"+ + "\u0767\u0001\u0000\u0000\u0000\u01bb\u076c\u0001\u0000\u0000\u0000\u01bd"+ + "\u076e\u0001\u0000\u0000\u0000\u01bf\u0770\u0001\u0000\u0000\u0000\u01c1"+ + "\u0776\u0001\u0000\u0000\u0000\u01c3\u077c\u0001\u0000\u0000\u0000\u01c5"+ + "\u0784\u0001\u0000\u0000\u0000\u01c7\u078b\u0001\u0000\u0000\u0000\u01c9"+ + "\u0796\u0001\u0000\u0000\u0000\u01cb\u079d\u0001\u0000\u0000\u0000\u01cd"+ + "\u07a8\u0001\u0000\u0000\u0000\u01cf\u07af\u0001\u0000\u0000\u0000\u01d1"+ + "\u01d2\u0005/\u0000\u0000\u01d2\u01d3\u0005*\u0000\u0000\u01d3\u01d4\u0005"+ + "+\u0000\u0000\u01d4\u0002\u0001\u0000\u0000\u0000\u01d5\u01d6\u0005*\u0000"+ + "\u0000\u01d6\u01d7\u0005/\u0000\u0000\u01d7\u0004\u0001\u0000\u0000\u0000"+ + "\u01d8\u01d9\u0005@\u0000\u0000\u01d9\u0006\u0001\u0000\u0000\u0000\u01da"+ + "\u01db\u0005r\u0000\u0000\u01db\u01dc\u0005o\u0000\u0000\u01dc\u01dd\u0005"+ + "w\u0000\u0000\u01dd\u01de\u0005_\u0000\u0000\u01de\u01df\u0005m\u0000"+ + "\u0000\u01df\u01e0\u0005e\u0000\u0000\u01e0\u01e1\u0005t\u0000\u0000\u01e1"+ + "\u01e2\u0005a\u0000\u0000\u01e2\u01e3\u0005d\u0000\u0000\u01e3\u01e4\u0005"+ + "a\u0000\u0000\u01e4\u01e5\u0005t\u0000\u0000\u01e5\u01e6\u0005a\u0000"+ + "\u0000\u01e6\u01e7\u0005(\u0000\u0000\u01e7\u01e8\u0005)\u0000\u0000\u01e8"+ + "\u01e9\u0005.\u0000\u0000\u01e9\b\u0001\u0000\u0000\u0000\u01ea\u01eb"+ + "\u0003\u016d\u00b6\u0000\u01eb\u01f1\u0003\u01b3\u00d9\u0000\u01ec\u01f0"+ + "\u0003\u01b3\u00d9\u0000\u01ed\u01f0\u0003\u01b5\u00da\u0000\u01ee\u01f0"+ + "\u0003\u01bd\u00de\u0000\u01ef\u01ec\u0001\u0000\u0000\u0000\u01ef\u01ed"+ + "\u0001\u0000\u0000\u0000\u01ef\u01ee\u0001\u0000\u0000\u0000\u01f0\u01f3"+ + "\u0001\u0000\u0000\u0000\u01f1\u01ef\u0001\u0000\u0000\u0000\u01f1\u01f2"+ + "\u0001\u0000\u0000\u0000\u01f2\n\u0001\u0000\u0000\u0000\u01f3\u01f1\u0001"+ + "\u0000\u0000\u0000\u01f4\u01f5\u0007\u0000\u0000\u0000\u01f5\u01f6\u0007"+ + "\u0001\u0000\u0000\u01f6\u01f7\u0007\u0001\u0000\u0000\u01f7\u01f8\u0007"+ + "\u0002\u0000\u0000\u01f8\u01f9\u0007\u0003\u0000\u0000\u01f9\u01fa\u0007"+ + "\u0004\u0000\u0000\u01fa\u01fb\u0007\u0005\u0000\u0000\u01fb\f\u0001\u0000"+ + "\u0000\u0000\u01fc\u01fd\u0007\u0000\u0000\u0000\u01fd\u01fe\u0007\u0006"+ + "\u0000\u0000\u01fe\u01ff\u0007\u0006\u0000\u0000\u01ff\u000e\u0001\u0000"+ + "\u0000\u0000\u0200\u0201\u0007\u0000\u0000\u0000\u0201\u0202\u0007\u0006"+ + "\u0000\u0000\u0202\u0203\u0007\u0007\u0000\u0000\u0203\u0204\u0007\b\u0000"+ + "\u0000\u0204\u0205\u0007\u0004\u0000\u0000\u0205\u0010\u0001\u0000\u0000"+ + "\u0000\u0206\u0207\u0007\u0000\u0000\u0000\u0207\u0208\u0007\t\u0000\u0000"+ + "\u0208\u0209\u0007\t\u0000\u0000\u0209\u0012\u0001\u0000\u0000\u0000\u020a"+ + "\u020b\u0007\u0000\u0000\u0000\u020b\u020c\u0007\t\u0000\u0000\u020c\u020d"+ + "\u0007\u0005\u0000\u0000\u020d\u020e\u0007\n\u0000\u0000\u020e\u020f\u0007"+ + "\u000b\u0000\u0000\u020f\u0014\u0001\u0000\u0000\u0000\u0210\u0211\u0007"+ + "\u0000\u0000\u0000\u0211\u0212\u0007\t\u0000\u0000\u0212\u0213\u0007\f"+ + "\u0000\u0000\u0213\u0214\u0007\u0000\u0000\u0000\u0214\u0215\u0007\r\u0000"+ + "\u0000\u0215\u0216\u0007\u000e\u0000\u0000\u0216\u0016\u0001\u0000\u0000"+ + "\u0000\u0217\u0218\u0007\u0000\u0000\u0000\u0218\u0219\u0007\u0004\u0000"+ + "\u0000\u0219\u021a\u0007\u0001\u0000\u0000\u021a\u021b\u0007\n\u0000\u0000"+ + "\u021b\u021c\u0007\u000e\u0000\u0000\u021c\u021d\u0007\u0005\u0000\u0000"+ + "\u021d\u021e\u0007\u0002\u0000\u0000\u021e\u021f\u0007\u000b\u0000\u0000"+ + "\u021f\u0220\u0007\u000e\u0000\u0000\u0220\u0018\u0001\u0000\u0000\u0000"+ + "\u0221\u0222\u0007\u0000\u0000\u0000\u0222\u0223\u0007\u0004\u0000\u0000"+ + "\u0223\u0224\u0007\u0006\u0000\u0000\u0224\u001a\u0001\u0000\u0000\u0000"+ + "\u0225\u0226\u0007\u0000\u0000\u0000\u0226\u0227\u0007\u000e\u0000\u0000"+ + "\u0227\u001c\u0001\u0000\u0000\u0000\u0228\u0229\u0007\u0000\u0000\u0000"+ + "\u0229\u022a\u0007\u000e\u0000\u0000\u022a\u022b\u0007\u0001\u0000\u0000"+ + "\u022b\u001e\u0001\u0000\u0000\u0000\u022c\u022d\u0005a\u0000\u0000\u022d"+ + "\u022e\u0005r\u0000\u0000\u022e\u022f\u0005r\u0000\u0000\u022f\u0230\u0005"+ + "a\u0000\u0000\u0230\u0231\u0005y\u0000\u0000\u0231\u0232\u0005_\u0000"+ + "\u0000\u0232\u0233\u0005c\u0000\u0000\u0233\u0234\u0005o\u0000\u0000\u0234"+ + "\u0235\u0005l\u0000\u0000\u0235\u0236\u0005l\u0000\u0000\u0236\u0237\u0005"+ + "e\u0000\u0000\u0237\u0238\u0005c\u0000\u0000\u0238\u0239\u0005t\u0000"+ + "\u0000\u0239 \u0001\u0000\u0000\u0000\u023a\u023b\u0007\u000f\u0000\u0000"+ + "\u023b\u023c\u0007\n\u0000\u0000\u023c\u023d\u0007\u0010\u0000\u0000\u023d"+ + "\u023e\u0007\u0002\u0000\u0000\u023e\u023f\u0007\u000b\u0000\u0000\u023f"+ + "\u0240\u0007\n\u0000\u0000\u0240\"\u0001\u0000\u0000\u0000\u0241\u0242"+ + "\u0007\u000f\u0000\u0000\u0242\u0243\u0007\n\u0000\u0000\u0243\u0244\u0007"+ + "\u0005\u0000\u0000\u0244\u0245\u0007\f\u0000\u0000\u0245\u0246\u0007\n"+ + "\u0000\u0000\u0246\u0247\u0007\n\u0000\u0000\u0247\u0248\u0007\u0004\u0000"+ + "\u0000\u0248$\u0001\u0000\u0000\u0000\u0249\u024a\u0007\u000f\u0000\u0000"+ + "\u024a\u024b\u0007\r\u0000\u0000\u024b&\u0001\u0000\u0000\u0000\u024c"+ + "\u024d\u0007\u0001\u0000\u0000\u024d\u024e\u0007\u0000\u0000\u0000\u024e"+ + "\u024f\u0007\u0001\u0000\u0000\u024f\u0250\u0007\u0011\u0000\u0000\u0250"+ + "\u0251\u0007\n\u0000\u0000\u0251(\u0001\u0000\u0000\u0000\u0252\u0253"+ + "\u0007\u0001\u0000\u0000\u0253\u0254\u0007\u0000\u0000\u0000\u0254\u0255"+ + "\u0007\u000e\u0000\u0000\u0255\u0256\u0007\n\u0000\u0000\u0256*\u0001"+ + "\u0000\u0000\u0000\u0257\u0258\u0007\u0001\u0000\u0000\u0258\u0259\u0007"+ + "\u0000\u0000\u0000\u0259\u025a\u0007\u000e\u0000\u0000\u025a\u025b\u0007"+ + "\u0001\u0000\u0000\u025b\u025c\u0007\u0000\u0000\u0000\u025c\u025d\u0007"+ + "\u0006\u0000\u0000\u025d\u025e\u0007\n\u0000\u0000\u025e,\u0001\u0000"+ + "\u0000\u0000\u025f\u0260\u0007\u0001\u0000\u0000\u0260\u0261\u0007\u0000"+ + "\u0000\u0000\u0261\u0262\u0007\u000e\u0000\u0000\u0262\u0263\u0007\u0005"+ + "\u0000\u0000\u0263.\u0001\u0000\u0000\u0000\u0264\u0265\u0007\u0001\u0000"+ + "\u0000\u0265\u0266\u0007\u0002\u0000\u0000\u0266\u0267\u0007\t\u0000\u0000"+ + "\u0267\u0268\u0007\t\u0000\u0000\u0268\u0269\u0007\n\u0000\u0000\u0269"+ + "\u026a\u0007\u0001\u0000\u0000\u026a\u026b\u0007\u0005\u0000\u0000\u026b"+ + "\u026c\u0007\b\u0000\u0000\u026c\u026d\u0007\u0002\u0000\u0000\u026d\u026e"+ + "\u0007\u0004\u0000\u0000\u026e0\u0001\u0000\u0000\u0000\u026f\u0270\u0007"+ + "\u0001\u0000\u0000\u0270\u0271\u0007\u0002\u0000\u0000\u0271\u0272\u0007"+ + "\u0007\u0000\u0000\u0272\u0273\u0007\u0007\u0000\u0000\u0273\u0274\u0007"+ + "\n\u0000\u0000\u0274\u0275\u0007\u0004\u0000\u0000\u0275\u0276\u0007\u0005"+ + "\u0000\u0000\u02762\u0001\u0000\u0000\u0000\u0277\u0278\u0005c\u0000\u0000"+ + "\u0278\u0279\u0005o\u0000\u0000\u0279\u027a\u0005u\u0000\u0000\u027a\u027b"+ + "\u0005n\u0000\u0000\u027b\u027c\u0005t\u0000\u0000\u027c4\u0001\u0000"+ + "\u0000\u0000\u027d\u027e\u0007\u0001\u0000\u0000\u027e\u027f\u0007\u000b"+ + "\u0000\u0000\u027f\u0280\u0007\n\u0000\u0000\u0280\u0281\u0007\u0000\u0000"+ + "\u0000\u0281\u0282\u0007\u0005\u0000\u0000\u0282\u0283\u0007\n\u0000\u0000"+ + "\u02836\u0001\u0000\u0000\u0000\u0284\u0285\u0007\u0001\u0000\u0000\u0285"+ + "\u0286\u0007\r\u0000\u0000\u0286\u0287\u0007\u0001\u0000\u0000\u0287\u0288"+ + "\u0007\t\u0000\u0000\u0288\u0289\u0007\n\u0000\u0000\u02898\u0001\u0000"+ + "\u0000\u0000\u028a\u0290\u0007\u0006\u0000\u0000\u028b\u028c\u0007\u0006"+ + "\u0000\u0000\u028c\u028d\u0007\u0000\u0000\u0000\u028d\u028e\u0007\r\u0000"+ + "\u0000\u028e\u0290\u0007\u000e\u0000\u0000\u028f\u028a\u0001\u0000\u0000"+ + "\u0000\u028f\u028b\u0001\u0000\u0000\u0000\u0290:\u0001\u0000\u0000\u0000"+ + "\u0291\u0292\u0007\u0006\u0000\u0000\u0292\u0293\u0007\n\u0000\u0000\u0293"+ + "\u0294\u0007\u0001\u0000\u0000\u0294\u0295\u0007\t\u0000\u0000\u0295\u0296"+ + "\u0007\u0000\u0000\u0000\u0296\u0297\u0007\u000b\u0000\u0000\u0297\u0298"+ + "\u0007\n\u0000\u0000\u0298<\u0001\u0000\u0000\u0000\u0299\u029a\u0007"+ + "\u0006\u0000\u0000\u029a\u029b\u0007\n\u0000\u0000\u029b\u029c\u0007\u0010"+ + "\u0000\u0000\u029c\u029d\u0007\u0000\u0000\u0000\u029d\u029e\u0007\u0003"+ + "\u0000\u0000\u029e\u029f\u0007\t\u0000\u0000\u029f\u02a0\u0007\u0005\u0000"+ + "\u0000\u02a0>\u0001\u0000\u0000\u0000\u02a1\u02a2\u0007\u0006\u0000\u0000"+ + "\u02a2\u02a3\u0007\n\u0000\u0000\u02a3\u02a4\u0007\t\u0000\u0000\u02a4"+ + "\u02a5\u0007\n\u0000\u0000\u02a5\u02a6\u0007\u0005\u0000\u0000\u02a6\u02a7"+ + "\u0007\n\u0000\u0000\u02a7@\u0001\u0000\u0000\u0000\u02a8\u02a9\u0007"+ + "\u0006\u0000\u0000\u02a9\u02aa\u0007\n\u0000\u0000\u02aa\u02ab\u0007\u000e"+ + "\u0000\u0000\u02ab\u02ac\u0007\u0001\u0000\u0000\u02acB\u0001\u0000\u0000"+ + "\u0000\u02ad\u02ae\u0007\u0006\u0000\u0000\u02ae\u02af\u0007\n\u0000\u0000"+ + "\u02af\u02b0\u0007\u000e\u0000\u0000\u02b0\u02b1\u0007\u0001\u0000\u0000"+ + "\u02b1\u02b2\u0007\n\u0000\u0000\u02b2\u02b3\u0007\u0004\u0000\u0000\u02b3"+ + "\u02b4\u0007\u0006\u0000\u0000\u02b4\u02b5\u0007\u0000\u0000\u0000\u02b5"+ + "\u02b6\u0007\u0004\u0000\u0000\u02b6\u02b7\u0007\u0005\u0000\u0000\u02b7"+ + "\u02b8\u0007\u000e\u0000\u0000\u02b8D\u0001\u0000\u0000\u0000\u02b9\u02ba"+ + "\u0007\u0006\u0000\u0000\u02ba\u02bb\u0007\n\u0000\u0000\u02bb\u02bc\u0007"+ + "\u000e\u0000\u0000\u02bc\u02bd\u0007\u0001\u0000\u0000\u02bd\u02be\u0007"+ + "\u000b\u0000\u0000\u02be\u02bf\u0007\b\u0000\u0000\u02bf\u02c0\u0007\u000f"+ + "\u0000\u0000\u02c0\u02c1\u0007\n\u0000\u0000\u02c1F\u0001\u0000\u0000"+ + "\u0000\u02c2\u02c3\u0007\u0006\u0000\u0000\u02c3\u02c4\u0007\b\u0000\u0000"+ + "\u02c4\u02c5\u0007\u000e\u0000\u0000\u02c5\u02c6\u0007\u0000\u0000\u0000"+ + "\u02c6\u02c7\u0007\u000f\u0000\u0000\u02c7\u02c8\u0007\t\u0000\u0000\u02c8"+ + "\u02c9\u0007\n\u0000\u0000\u02c9H\u0001\u0000\u0000\u0000\u02ca\u02cb"+ + "\u0007\u0006\u0000\u0000\u02cb\u02cc\u0007\b\u0000\u0000\u02cc\u02cd\u0007"+ + "\u000e\u0000\u0000\u02cd\u02ce\u0007\u0005\u0000\u0000\u02ce\u02cf\u0007"+ + "\b\u0000\u0000\u02cf\u02d0\u0007\u0004\u0000\u0000\u02d0\u02d1\u0007\u0001"+ + "\u0000\u0000\u02d1\u02d2\u0007\u0005\u0000\u0000\u02d2J\u0001\u0000\u0000"+ + "\u0000\u02d3\u02d4\u0007\u0006\u0000\u0000\u02d4\u02d5\u0007\u000b\u0000"+ + "\u0000\u02d5\u02d6\u0007\u0002\u0000\u0000\u02d6\u02d7\u0007\u0012\u0000"+ + "\u0000\u02d7L\u0001\u0000\u0000\u0000\u02d8\u02d9\u0007\n\u0000\u0000"+ + "\u02d9\u02da\u0007\t\u0000\u0000\u02da\u02db\u0007\n\u0000\u0000\u02db"+ + "\u02dc\u0007\u0007\u0000\u0000\u02dc\u02dd\u0007\n\u0000\u0000\u02dd\u02de"+ + "\u0007\u0004\u0000\u0000\u02de\u02df\u0007\u0005\u0000\u0000\u02df\u02e0"+ + "\u0007\u0002\u0000\u0000\u02e0\u02e1\u0007\u0010\u0000\u0000\u02e1N\u0001"+ + "\u0000\u0000\u0000\u02e2\u02e3\u0007\n\u0000\u0000\u02e3\u02e4\u0007\t"+ + "\u0000\u0000\u02e4\u02e5\u0007\n\u0000\u0000\u02e5\u02e6\u0007\u0007\u0000"+ + "\u0000\u02e6\u02e7\u0007\n\u0000\u0000\u02e7\u02e8\u0007\u0004\u0000\u0000"+ + "\u02e8\u02e9\u0007\u0005\u0000\u0000\u02e9\u02ea\u0007\u000e\u0000\u0000"+ + "\u02eaP\u0001\u0000\u0000\u0000\u02eb\u02ec\u0007\n\u0000\u0000\u02ec"+ + "\u02ed\u0007\t\u0000\u0000\u02ed\u02ee\u0007\u000e\u0000\u0000\u02ee\u02ef"+ + "\u0007\n\u0000\u0000\u02efR\u0001\u0000\u0000\u0000\u02f0\u02f1\u0007"+ + "\n\u0000\u0000\u02f1\u02f2\u0007\u0004\u0000\u0000\u02f2\u02f3\u0007\u0000"+ + "\u0000\u0000\u02f3\u02f4\u0007\u000f\u0000\u0000\u02f4\u02f5\u0007\t\u0000"+ + "\u0000\u02f5\u02f6\u0007\n\u0000\u0000\u02f6T\u0001\u0000\u0000\u0000"+ + "\u02f7\u02f8\u0007\n\u0000\u0000\u02f8\u02f9\u0007\u0004\u0000\u0000\u02f9"+ + "\u02fa\u0007\u0006\u0000\u0000\u02faV\u0001\u0000\u0000\u0000\u02fb\u02fc"+ + "\u0007\n\u0000\u0000\u02fc\u02fd\u0007\u000e\u0000\u0000\u02fd\u02fe\u0003"+ + "\u01bd\u00de\u0000\u02fe\u02ff\u0007\u000e\u0000\u0000\u02ff\u0300\u0007"+ + "\u0011\u0000\u0000\u0300\u0301\u0007\u0000\u0000\u0000\u0301\u0302\u0007"+ + "\u000b\u0000\u0000\u0302\u0303\u0007\u0006\u0000\u0000\u0303\u0304\u0007"+ + "\u000e\u0000\u0000\u0304X\u0001\u0000\u0000\u0000\u0305\u0306\u0007\n"+ + "\u0000\u0000\u0306\u0307\u0007\u000e\u0000\u0000\u0307\u0308\u0003\u01bd"+ + "\u00de\u0000\u0308\u0309\u0007\u000b\u0000\u0000\u0309\u030a\u0007\n\u0000"+ + "\u0000\u030a\u030b\u0007\u0012\u0000\u0000\u030b\u030c\u0007\t\u0000\u0000"+ + "\u030c\u030d\u0007\b\u0000\u0000\u030d\u030e\u0007\u0001\u0000\u0000\u030e"+ + "\u030f\u0007\u0000\u0000\u0000\u030f\u0310\u0007\u000e\u0000\u0000\u0310"+ + "Z\u0001\u0000\u0000\u0000\u0311\u0312\u0007\n\u0000\u0000\u0312\u0313"+ + "\u0007\u0013\u0000\u0000\u0313\u0314\u0007\b\u0000\u0000\u0314\u0315\u0007"+ + "\u000e\u0000\u0000\u0315\u0316\u0007\u0005\u0000\u0000\u0316\u0317\u0007"+ + "\u000e\u0000\u0000\u0317\\\u0001\u0000\u0000\u0000\u0318\u0319\u0007\n"+ + "\u0000\u0000\u0319\u031a\u0007\u0013\u0000\u0000\u031a\u031b\u0007\u0005"+ + "\u0000\u0000\u031b\u031c\u0007\u000b\u0000\u0000\u031c\u031d\u0007\u0000"+ + "\u0000\u0000\u031d\u031e\u0007\u0001\u0000\u0000\u031e\u031f\u0007\u0005"+ + "\u0000\u0000\u031f^\u0001\u0000\u0000\u0000\u0320\u0321\u0007\u0010\u0000"+ + "\u0000\u0321\u0322\u0007\b\u0000\u0000\u0322\u0323\u0007\n\u0000\u0000"+ + "\u0323\u0324\u0007\t\u0000\u0000\u0324\u0325\u0007\u0006\u0000\u0000\u0325"+ + "\u0326\u0007\u000e\u0000\u0000\u0326`\u0001\u0000\u0000\u0000\u0327\u0328"+ + "\u0007\u0010\u0000\u0000\u0328\u0329\u0007\b\u0000\u0000\u0329\u032a\u0007"+ + "\u000b\u0000\u0000\u032a\u032b\u0007\u000e\u0000\u0000\u032b\u032c\u0007"+ + "\u0005\u0000\u0000\u032cb\u0001\u0000\u0000\u0000\u032d\u032e\u0007\u0010"+ + "\u0000\u0000\u032e\u032f\u0007\u0002\u0000\u0000\u032f\u0330\u0007\u000b"+ + "\u0000\u0000\u0330\u0331\u0007\u0001\u0000\u0000\u0331\u0332\u0007\n\u0000"+ + "\u0000\u0332d\u0001\u0000\u0000\u0000\u0333\u0334\u0003c1\u0000\u0334"+ + "\u0335\u0003\u01bd\u00de\u0000\u0335\u0336\u0003\u0085B\u0000\u0336f\u0001"+ + "\u0000\u0000\u0000\u0337\u0338\u0003c1\u0000\u0338\u0339\u0003\u01bd\u00de"+ + "\u0000\u0339\u033a\u0003\u00d7k\u0000\u033a\u033b\u0003\u01bd\u00de\u0000"+ + "\u033b\u033c\u0003\u0085B\u0000\u033ch\u0001\u0000\u0000\u0000\u033d\u033e"+ + "\u0007\u0010\u0000\u0000\u033e\u033f\u0007\u000b\u0000\u0000\u033f\u0340"+ + "\u0007\n\u0000\u0000\u0340\u0341\u0007\n\u0000\u0000\u0341\u0342\u0007"+ + "\u0014\u0000\u0000\u0342\u0343\u0007\n\u0000\u0000\u0343j\u0001\u0000"+ + "\u0000\u0000\u0344\u0345\u0007\u0010\u0000\u0000\u0345\u0346\u0007\u000b"+ + "\u0000\u0000\u0346\u0347\u0007\u0002\u0000\u0000\u0347\u0348\u0007\u0007"+ + "\u0000\u0000\u0348l\u0001\u0000\u0000\u0000\u0349\u034a\u0007\u0010\u0000"+ + "\u0000\u034a\u034b\u0007\u000b\u0000\u0000\u034b\u034c\u0007\u0002\u0000"+ + "\u0000\u034c\u034d\u0007\u0014\u0000\u0000\u034d\u034e\u0007\n\u0000\u0000"+ + "\u034e\u034f\u0007\u0004\u0000\u0000\u034fn\u0001\u0000\u0000\u0000\u0350"+ + "\u0351\u0007\u0010\u0000\u0000\u0351\u0352\u0007\u0003\u0000\u0000\u0352"+ + "\u0353\u0007\t\u0000\u0000\u0353\u0354\u0007\t\u0000\u0000\u0354\u0355"+ + "\u0007\u0005\u0000\u0000\u0355\u0356\u0007\n\u0000\u0000\u0356\u0357\u0007"+ + "\u0013\u0000\u0000\u0357\u0358\u0007\u0005\u0000\u0000\u0358p\u0001\u0000"+ + "\u0000\u0000\u0359\u035a\u0007\u0015\u0000\u0000\u035a\u035b\u0007\n\u0000"+ + "\u0000\u035b\u035c\u0007\u0004\u0000\u0000\u035c\u035d\u0007\n\u0000\u0000"+ + "\u035d\u035e\u0007\u000b\u0000\u0000\u035e\u035f\u0007\u0000\u0000\u0000"+ + "\u035f\u0360\u0007\u0005\u0000\u0000\u0360\u0361\u0007\n\u0000\u0000\u0361"+ + "\u0362\u0007\u0006\u0000\u0000\u0362r\u0001\u0000\u0000\u0000\u0363\u0364"+ + "\u0007\u0015\u0000\u0000\u0364\u0365\u0007\u000b\u0000\u0000\u0365\u0366"+ + "\u0007\u0000\u0000\u0000\u0366\u0367\u0007\u0004\u0000\u0000\u0367\u0368"+ + "\u0007\u0005\u0000\u0000\u0368t\u0001\u0000\u0000\u0000\u0369\u036a\u0007"+ + "\u0015\u0000\u0000\u036a\u036b\u0007\u000b\u0000\u0000\u036b\u036c\u0007"+ + "\u0002\u0000\u0000\u036c\u036d\u0007\u0003\u0000\u0000\u036d\u036e\u0007"+ + "\u0012\u0000\u0000\u036ev\u0001\u0000\u0000\u0000\u036f\u0376\u0007\u0011"+ + "\u0000\u0000\u0370\u0371\u0007\u0011\u0000\u0000\u0371\u0372\u0007\u0002"+ + "\u0000\u0000\u0372\u0373\u0007\u0003\u0000\u0000\u0373\u0374\u0007\u000b"+ + "\u0000\u0000\u0374\u0376\u0007\u000e\u0000\u0000\u0375\u036f\u0001\u0000"+ + "\u0000\u0000\u0375\u0370\u0001\u0000\u0000\u0000\u0376x\u0001\u0000\u0000"+ + "\u0000\u0377\u0378\u0007\b\u0000\u0000\u0378\u0379\u0007\u0006\u0000\u0000"+ + "\u0379\u037a\u0007\n\u0000\u0000\u037a\u037b\u0007\u0004\u0000\u0000\u037b"+ + "\u037c\u0007\u0005\u0000\u0000\u037c\u037d\u0007\b\u0000\u0000\u037d\u037e"+ + "\u0007\u0010\u0000\u0000\u037e\u037f\u0007\b\u0000\u0000\u037f\u0380\u0007"+ + "\n\u0000\u0000\u0380\u0381\u0007\u0006\u0000\u0000\u0381z\u0001\u0000"+ + "\u0000\u0000\u0382\u0383\u0007\b\u0000\u0000\u0383\u0384\u0007\u0006\u0000"+ + "\u0000\u0384\u0385\u0007\n\u0000\u0000\u0385\u0386\u0007\u0004\u0000\u0000"+ + "\u0386\u0387\u0007\u0005\u0000\u0000\u0387\u0388\u0007\b\u0000\u0000\u0388"+ + "\u0389\u0007\u0005\u0000\u0000\u0389\u038a\u0007\r\u0000\u0000\u038a|"+ + "\u0001\u0000\u0000\u0000\u038b\u038c\u0007\b\u0000\u0000\u038c\u038d\u0007"+ + "\u0010\u0000\u0000\u038d~\u0001\u0000\u0000\u0000\u038e\u038f\u0007\b"+ + "\u0000\u0000\u038f\u0390\u0007\u0007\u0000\u0000\u0390\u0391\u0007\u0000"+ + "\u0000\u0000\u0391\u0392\u0007\u0015\u0000\u0000\u0392\u0393\u0007\n\u0000"+ + "\u0000\u0393\u0080\u0001\u0000\u0000\u0000\u0394\u0395\u0007\b\u0000\u0000"+ + "\u0395\u0396\u0007\u0004\u0000\u0000\u0396\u0082\u0001\u0000\u0000\u0000"+ + "\u0397\u0398\u0007\b\u0000\u0000\u0398\u0399\u0007\u0004\u0000\u0000\u0399"+ + "\u039a\u0007\u0001\u0000\u0000\u039a\u039b\u0007\u000b\u0000\u0000\u039b"+ + "\u039c\u0007\n\u0000\u0000\u039c\u039d\u0007\u0007\u0000\u0000\u039d\u039e"+ + "\u0007\n\u0000\u0000\u039e\u039f\u0007\u0004\u0000\u0000\u039f\u03a0\u0007"+ + "\u0005\u0000\u0000\u03a0\u0084\u0001\u0000\u0000\u0000\u03a1\u03a2\u0007"+ + "\b\u0000\u0000\u03a2\u03a3\u0007\u0004\u0000\u0000\u03a3\u03a4\u0007\u0006"+ + "\u0000\u0000\u03a4\u03a5\u0007\n\u0000\u0000\u03a5\u03a6\u0007\u0013\u0000"+ + "\u0000\u03a6\u0086\u0001\u0000\u0000\u0000\u03a7\u03a8\u0007\b\u0000\u0000"+ + "\u03a8\u03a9\u0007\u0004\u0000\u0000\u03a9\u03aa\u0007\u0006\u0000\u0000"+ + "\u03aa\u03ab\u0007\n\u0000\u0000\u03ab\u03ac\u0007\u0013\u0000\u0000\u03ac"+ + "\u03ad\u0007\n\u0000\u0000\u03ad\u03ae\u0007\u000e\u0000\u0000\u03ae\u0088"+ + "\u0001\u0000\u0000\u0000\u03af\u03b0\u0007\b\u0000\u0000\u03b0\u03b1\u0007"+ + "\u0004\u0000\u0000\u03b1\u03b2\u0007\u000e\u0000\u0000\u03b2\u03b3\u0007"+ + "\n\u0000\u0000\u03b3\u03b4\u0007\u000b\u0000\u0000\u03b4\u03b5\u0007\u0005"+ + "\u0000\u0000\u03b5\u008a\u0001\u0000\u0000\u0000\u03b6\u03b7\u0007\b\u0000"+ + "\u0000\u03b7\u03b8\u0007\u0004\u0000\u0000\u03b8\u03b9\u0007\u0005\u0000"+ + "\u0000\u03b9\u03ba\u0007\u0002\u0000\u0000\u03ba\u008c\u0001\u0000\u0000"+ + "\u0000\u03bb\u03bc\u0007\b\u0000\u0000\u03bc\u03bd\u0007\u000e\u0000\u0000"+ + "\u03bd\u008e\u0001\u0000\u0000\u0000\u03be\u03bf\u0007\u0016\u0000\u0000"+ + "\u03bf\u03c0\u0007\u000e\u0000\u0000\u03c0\u03c1\u0007\u0002\u0000\u0000"+ + "\u03c1\u03c2\u0007\u0004\u0000\u0000\u03c2\u0090\u0001\u0000\u0000\u0000"+ + "\u03c3\u03c4\u0007\u0016\u0000\u0000\u03c4\u03c5\u0007\u0002\u0000\u0000"+ + "\u03c5\u03c6\u0007\b\u0000\u0000\u03c6\u03c7\u0007\u0004\u0000\u0000\u03c7"+ + "\u0092\u0001\u0000\u0000\u0000\u03c8\u03c9\u0007\u0017\u0000\u0000\u03c9"+ + "\u03ca\u0007\n\u0000\u0000\u03ca\u03cb\u0007\r\u0000\u0000\u03cb\u0094"+ + "\u0001\u0000\u0000\u0000\u03cc\u03cd\u0007\u0017\u0000\u0000\u03cd\u03ce"+ + "\u0007\n\u0000\u0000\u03ce\u03cf\u0007\r\u0000\u0000\u03cf\u03d0\u0007"+ + "\u0002\u0000\u0000\u03d0\u03d1\u0007\u0010\u0000\u0000\u03d1\u0096\u0001"+ + "\u0000\u0000\u0000\u03d2\u03d3\u0007\u0017\u0000\u0000\u03d3\u03d4\u0007"+ + "\n\u0000\u0000\u03d4\u03d5\u0007\r\u0000\u0000\u03d5\u03d6\u0007\u000e"+ + "\u0000\u0000\u03d6\u0098\u0001\u0000\u0000\u0000\u03d7\u03d8\u0007\t\u0000"+ + "\u0000\u03d8\u03d9\u0007\u0000\u0000\u0000\u03d9\u03da\u0007\u000e\u0000"+ + "\u0000\u03da\u03db\u0007\u0005\u0000\u0000\u03db\u009a\u0001\u0000\u0000"+ + "\u0000\u03dc\u03dd\u0007\t\u0000\u0000\u03dd\u03de\u0007\n\u0000\u0000"+ + "\u03de\u03df\u0007\u0010\u0000\u0000\u03df\u03e0\u0007\u0005\u0000\u0000"+ + "\u03e0\u009c\u0001\u0000\u0000\u0000\u03e1\u03e2\u0007\t\u0000\u0000\u03e2"+ + "\u03e3\u0007\b\u0000\u0000\u03e3\u03e4\u0007\u0010\u0000\u0000\u03e4\u03e5"+ + "\u0007\n\u0000\u0000\u03e5\u03e6\u0007\u0005\u0000\u0000\u03e6\u03e7\u0007"+ + "\b\u0000\u0000\u03e7\u03e8\u0007\u0007\u0000\u0000\u03e8\u03e9\u0007\n"+ + "\u0000\u0000\u03e9\u009e\u0001\u0000\u0000\u0000\u03ea\u03eb\u0007\t\u0000"+ + "\u0000\u03eb\u03ec\u0007\b\u0000\u0000\u03ec\u03ed\u0007\u0007\u0000\u0000"+ + "\u03ed\u03ee\u0007\b\u0000\u0000\u03ee\u03ef\u0007\u0005\u0000\u0000\u03ef"+ + "\u00a0\u0001\u0000\u0000\u0000\u03f0\u03f1\u0007\t\u0000\u0000\u03f1\u03f2"+ + "\u0007\u0002\u0000\u0000\u03f2\u03f3\u0007\u0001\u0000\u0000\u03f3\u03f4"+ + "\u0007\u0000\u0000\u0000\u03f4\u03f5\u0007\t\u0000\u0000\u03f5\u00a2\u0001"+ + "\u0000\u0000\u0000\u03f6\u03f7\u0007\t\u0000\u0000\u03f7\u03f8\u0007\u0002"+ + "\u0000\u0000\u03f8\u03f9\u0007\u0001\u0000\u0000\u03f9\u03fa\u0007\u0017"+ + "\u0000\u0000\u03fa\u00a4\u0001\u0000\u0000\u0000\u03fb\u03fc\u0007\u0007"+ + "\u0000\u0000\u03fc\u03fd\u0007\u0000\u0000\u0000\u03fd\u03fe\u0007\u0013"+ + "\u0000\u0000\u03fe\u03ff\u0007\u0018\u0000\u0000\u03ff\u0400\u0007\u0000"+ + "\u0000\u0000\u0400\u0401\u0007\t\u0000\u0000\u0401\u0402\u0007\u0003\u0000"+ + "\u0000\u0402\u0403\u0007\n\u0000\u0000\u0403\u00a6\u0001\u0000\u0000\u0000"+ + "\u0404\u0405\u0007\u0007\u0000\u0000\u0405\u0406\u0007\n\u0000\u0000\u0406"+ + "\u0407\u0007\u000b\u0000\u0000\u0407\u0408\u0007\u0015\u0000\u0000\u0408"+ + "\u0409\u0007\n\u0000\u0000\u0409\u00a8\u0001\u0000\u0000\u0000\u040a\u0413"+ + "\u0007\u0007\u0000\u0000\u040b\u040c\u0007\u0007\u0000\u0000\u040c\u040d"+ + "\u0007\b\u0000\u0000\u040d\u040e\u0007\u0004\u0000\u0000\u040e\u040f\u0007"+ + "\u0003\u0000\u0000\u040f\u0410\u0007\u0005\u0000\u0000\u0410\u0411\u0007"+ + "\n\u0000\u0000\u0411\u0413\u0007\u000e\u0000\u0000\u0412\u040a\u0001\u0000"+ + "\u0000\u0000\u0412\u040b\u0001\u0000\u0000\u0000\u0413\u00aa\u0001\u0000"+ + "\u0000\u0000\u0414\u0415\u0007\u0007\u0000\u0000\u0415\u0416\u0007\b\u0000"+ + "\u0000\u0416\u0417\u0007\u0004\u0000\u0000\u0417\u0418\u0007\u0018\u0000"+ + "\u0000\u0418\u0419\u0007\u0000\u0000\u0000\u0419\u041a\u0007\t\u0000\u0000"+ + "\u041a\u041b\u0007\u0003\u0000\u0000\u041b\u041c\u0007\n\u0000\u0000\u041c"+ + "\u00ac\u0001\u0000\u0000\u0000\u041d\u041e\u0007\u0007\u0000\u0000\u041e"+ + "\u041f\u0007\u0002\u0000\u0000\u041f\u0420\u0007\u0006\u0000\u0000\u0420"+ + "\u0421\u0007\b\u0000\u0000\u0421\u0422\u0007\u0010\u0000\u0000\u0422\u0423"+ + "\u0007\r\u0000\u0000\u0423\u00ae\u0001\u0000\u0000\u0000\u0424\u0425\u0007"+ + "\u0007\u0000\u0000\u0425\u0426\u0007\u000b\u0000\u0000\u0426\u0427\u0003"+ + "\u01bd\u00de\u0000\u0427\u0428\u0007\u0001\u0000\u0000\u0428\u0429\u0007"+ + "\u0002\u0000\u0000\u0429\u042a\u0007\u0003\u0000\u0000\u042a\u042b\u0007"+ + "\u0004\u0000\u0000\u042b\u042c\u0007\u0005\u0000\u0000\u042c\u042d\u0007"+ + "\n\u0000\u0000\u042d\u042e\u0007\u000b\u0000\u0000\u042e\u00b0\u0001\u0000"+ + "\u0000\u0000\u042f\u0430\u0007\u0004\u0000\u0000\u0430\u0431\u0007\u0000"+ + "\u0000\u0000\u0431\u0432\u0007\u0007\u0000\u0000\u0432\u0433\u0007\n\u0000"+ + "\u0000\u0433\u0434\u0007\u000e\u0000\u0000\u0434\u0435\u0007\u0012\u0000"+ + "\u0000\u0435\u0436\u0007\u0000\u0000\u0000\u0436\u0437\u0007\u0001\u0000"+ + "\u0000\u0437\u0438\u0007\n\u0000\u0000\u0438\u00b2\u0001\u0000\u0000\u0000"+ + "\u0439\u043a\u0007\u0004\u0000\u0000\u043a\u043b\u0007\u0000\u0000\u0000"+ + "\u043b\u043c\u0007\u0007\u0000\u0000\u043c\u043d\u0007\n\u0000\u0000\u043d"+ + "\u043e\u0007\u000e\u0000\u0000\u043e\u043f\u0007\u0012\u0000\u0000\u043f"+ + "\u0440\u0007\u0000\u0000\u0000\u0440\u0441\u0007\u0001\u0000\u0000\u0441"+ + "\u0442\u0007\n\u0000\u0000\u0442\u0443\u0007\u000e\u0000\u0000\u0443\u00b4"+ + "\u0001\u0000\u0000\u0000\u0444\u0445\u0007\u0004\u0000\u0000\u0445\u0446"+ + "\u0007\n\u0000\u0000\u0446\u0447\u0007\u000e\u0000\u0000\u0447\u0448\u0007"+ + "\u0005\u0000\u0000\u0448\u0449\u0007\n\u0000\u0000\u0449\u044a\u0007\u0006"+ + "\u0000\u0000\u044a\u00b6\u0001\u0000\u0000\u0000\u044b\u044c\u0007\u0004"+ + "\u0000\u0000\u044c\u044d\u0007\u0002\u0000\u0000\u044d\u00b8\u0001\u0000"+ + "\u0000\u0000\u044e\u044f\u0007\u0004\u0000\u0000\u044f\u0450\u0007\u0002"+ + "\u0000\u0000\u0450\u0451\u0007\u0005\u0000\u0000\u0451\u00ba\u0001\u0000"+ + "\u0000\u0000\u0452\u0453\u0007\u0004\u0000\u0000\u0453\u0454\u0007\u0003"+ + "\u0000\u0000\u0454\u0455\u0007\t\u0000\u0000\u0455\u0456\u0007\t\u0000"+ + "\u0000\u0456\u0457\u0007\u000e\u0000\u0000\u0457\u00bc\u0001\u0000\u0000"+ + "\u0000\u0458\u0459\u0007\u0002\u0000\u0000\u0459\u045a\u0007\u0010\u0000"+ + "\u0000\u045a\u045b\u0007\u0010\u0000\u0000\u045b\u045c\u0007\u000e\u0000"+ + "\u0000\u045c\u045d\u0007\n\u0000\u0000\u045d\u045e\u0007\u0005\u0000\u0000"+ + "\u045e\u00be\u0001\u0000\u0000\u0000\u045f\u0460\u0007\u0002\u0000\u0000"+ + "\u0460\u0461\u0007\u0010\u0000\u0000\u0461\u00c0\u0001\u0000\u0000\u0000"+ + "\u0462\u0463\u0007\u0002\u0000\u0000\u0463\u0464\u0007\u0004\u0000\u0000"+ + "\u0464\u00c2\u0001\u0000\u0000\u0000\u0465\u0466\u0007\u0002\u0000\u0000"+ + "\u0466\u0467\u0007\u0004\u0000\u0000\u0467\u0468\u0007\t\u0000\u0000\u0468"+ + "\u0469\u0007\r\u0000\u0000\u0469\u00c4\u0001\u0000\u0000\u0000\u046a\u046b"+ + "\u0007\u0002\u0000\u0000\u046b\u046c\u0007\u000b\u0000\u0000\u046c\u00c6"+ + "\u0001\u0000\u0000\u0000\u046d\u046e\u0007\u0002\u0000\u0000\u046e\u046f"+ + "\u0007\u000b\u0000\u0000\u046f\u0470\u0007\u0006\u0000\u0000\u0470\u0471"+ + "\u0007\n\u0000\u0000\u0471\u0472\u0007\u000b\u0000\u0000\u0472\u00c8\u0001"+ + "\u0000\u0000\u0000\u0473\u0474\u0007\u0002\u0000\u0000\u0474\u0475\u0007"+ + "\u0003\u0000\u0000\u0475\u0476\u0007\u0005\u0000\u0000\u0476\u0477\u0007"+ + "\n\u0000\u0000\u0477\u0478\u0007\u000b\u0000\u0000\u0478\u00ca\u0001\u0000"+ + "\u0000\u0000\u0479\u047a\u0007\u0002\u0000\u0000\u047a\u047b\u0007\u0018"+ + "\u0000\u0000\u047b\u047c\u0007\n\u0000\u0000\u047c\u047d\u0007\u000b\u0000"+ + "\u0000\u047d\u047e\u0007\u000b\u0000\u0000\u047e\u047f\u0007\b\u0000\u0000"+ + "\u047f\u0480\u0007\u0006\u0000\u0000\u0480\u0481\u0007\n\u0000\u0000\u0481"+ + "\u00cc\u0001\u0000\u0000\u0000\u0482\u0483\u0007\u0012\u0000\u0000\u0483"+ + "\u0484\u0007\u0000\u0000\u0000\u0484\u0485\u0007\u000e\u0000\u0000\u0485"+ + "\u0486\u0007\u000e\u0000\u0000\u0486\u0487\u0007\f\u0000\u0000\u0487\u0488"+ + "\u0007\u0002\u0000\u0000\u0488\u0489\u0007\u000b\u0000\u0000\u0489\u048a"+ + "\u0007\u0006\u0000\u0000\u048a\u00ce\u0001\u0000\u0000\u0000\u048b\u048c"+ + "\u0007\u0012\u0000\u0000\u048c\u048d\u0007\u0000\u0000\u0000\u048d\u048e"+ + "\u0007\u0005\u0000\u0000\u048e\u048f\u0007\u0001\u0000\u0000\u048f\u0490"+ + "\u0007\u0011\u0000\u0000\u0490\u00d0\u0001\u0000\u0000\u0000\u0491\u0492"+ + "\u0007\u0012\u0000\u0000\u0492\u0493\u0007\n\u0000\u0000\u0493\u0494\u0007"+ + "\u000b\u0000\u0000\u0494\u00d2\u0001\u0000\u0000\u0000\u0495\u0496\u0003"+ + "\u01c9\u00e4\u0000\u0496\u0497\u0003\u01bd\u00de\u0000\u0497\u0498\u0003"+ + "\u0087C\u0000\u0498\u00d4\u0001\u0000\u0000\u0000\u0499\u049a\u0003\u01c9"+ + "\u00e4\u0000\u049a\u049b\u0003\u01bd\u00de\u0000\u049b\u049c\u0003\u00d7"+ + "k\u0000\u049c\u049d\u0003\u01bd\u00de\u0000\u049d\u049e\u0003\u0085B\u0000"+ + "\u049e\u00d6\u0001\u0000\u0000\u0000\u049f\u04a0\u0007\u0012\u0000\u0000"+ + "\u04a0\u04a1\u0007\u000b\u0000\u0000\u04a1\u04a2\u0007\b\u0000\u0000\u04a2"+ + "\u04a3\u0007\u0007\u0000\u0000\u04a3\u04a4\u0007\u0000\u0000\u0000\u04a4"+ + "\u04a5\u0007\u000b\u0000\u0000\u04a5\u04a6\u0007\r\u0000\u0000\u04a6\u00d8"+ + "\u0001\u0000\u0000\u0000\u04a7\u04a8\u0007\u0012\u0000\u0000\u04a8\u04a9"+ + "\u0007\u0003\u0000\u0000\u04a9\u04aa\u0007\u0005\u0000\u0000\u04aa\u00da"+ + "\u0001\u0000\u0000\u0000\u04ab\u04ac\u0007\u000b\u0000\u0000\u04ac\u04ad"+ + "\u0007\n\u0000\u0000\u04ad\u04ae\u0007\u0015\u0000\u0000\u04ae\u04af\u0007"+ + "\b\u0000\u0000\u04af\u04b0\u0007\u0002\u0000\u0000\u04b0\u04b1\u0007\u0004"+ + "\u0000\u0000\u04b1\u00dc\u0001\u0000\u0000\u0000\u04b2\u04b3\u0007\u000b"+ + "\u0000\u0000\u04b3\u04b4\u0007\n\u0000\u0000\u04b4\u04b5\u0007\u0015\u0000"+ + "\u0000\u04b5\u04b6\u0007\b\u0000\u0000\u04b6\u04b7\u0007\u0002\u0000\u0000"+ + "\u04b7\u04b8\u0007\u0004\u0000\u0000\u04b8\u04b9\u0007\u000e\u0000\u0000"+ + "\u04b9\u00de\u0001\u0000\u0000\u0000\u04ba\u04bb\u0007\u000b\u0000\u0000"+ + "\u04bb\u04bc\u0007\n\u0000\u0000\u04bc\u04bd\u0007\u0007\u0000\u0000\u04bd"+ + "\u04be\u0007\u0002\u0000\u0000\u04be\u04bf\u0007\u0018\u0000\u0000\u04bf"+ + "\u04c0\u0007\n\u0000\u0000\u04c0\u00e0\u0001\u0000\u0000\u0000\u04c1\u04c2"+ + "\u0007\u000b\u0000\u0000\u04c2\u04c3\u0007\n\u0000\u0000\u04c3\u04c4\u0007"+ + "\u0005\u0000\u0000\u04c4\u04c5\u0007\u0003\u0000\u0000\u04c5\u04c6\u0007"+ + "\u000b\u0000\u0000\u04c6\u04c7\u0007\u0004\u0000\u0000\u04c7\u04c8\u0007"+ + "\b\u0000\u0000\u04c8\u04c9\u0007\u0004\u0000\u0000\u04c9\u04ca\u0007\u0015"+ + "\u0000\u0000\u04ca\u00e2\u0001\u0000\u0000\u0000\u04cb\u04cc\u0007\u000b"+ + "\u0000\u0000\u04cc\u04cd\u0007\n\u0000\u0000\u04cd\u04ce\u0007\u0018\u0000"+ + "\u0000\u04ce\u04cf\u0007\u0002\u0000\u0000\u04cf\u04d0\u0007\u0017\u0000"+ + "\u0000\u04d0\u04d1\u0007\n\u0000\u0000\u04d1\u00e4\u0001\u0000\u0000\u0000"+ + "\u04d2\u04d3\u0007\u000b\u0000\u0000\u04d3\u04d4\u0007\u0002\u0000\u0000"+ + "\u04d4\u04d5\u0007\t\u0000\u0000\u04d5\u04d6\u0007\n\u0000\u0000\u04d6"+ + "\u00e6\u0001\u0000\u0000\u0000\u04d7\u04d8\u0007\u000b\u0000\u0000\u04d8"+ + "\u04d9\u0007\u0002\u0000\u0000\u04d9\u04da\u0007\t\u0000\u0000\u04da\u04db"+ + "\u0007\n\u0000\u0000\u04db\u04dc\u0007\u000e\u0000\u0000\u04dc\u00e8\u0001"+ + "\u0000\u0000\u0000\u04dd\u04de\u0007\u000b\u0000\u0000\u04de\u04df\u0007"+ + "\u0002\u0000\u0000\u04df\u04e0\u0007\f\u0000\u0000\u04e0\u00ea\u0001\u0000"+ + "\u0000\u0000\u04e1\u04e2\u0007\u000e\u0000\u0000\u04e2\u04e3\u0007\u0001"+ + "\u0000\u0000\u04e3\u04e4\u0007\u0011\u0000\u0000\u04e4\u04e5\u0007\n\u0000"+ + "\u0000\u04e5\u04e6\u0007\u0007\u0000\u0000\u04e6\u04e7\u0007\u0000\u0000"+ + "\u0000\u04e7\u00ec\u0001\u0000\u0000\u0000\u04e8\u04f1\u0007\u000e\u0000"+ + "\u0000\u04e9\u04ea\u0007\u000e\u0000\u0000\u04ea\u04eb\u0007\n\u0000\u0000"+ + "\u04eb\u04ec\u0007\u0001\u0000\u0000\u04ec\u04ed\u0007\u0002\u0000\u0000"+ + "\u04ed\u04ee\u0007\u0004\u0000\u0000\u04ee\u04ef\u0007\u0006\u0000\u0000"+ + "\u04ef\u04f1\u0007\u000e\u0000\u0000\u04f0\u04e8\u0001\u0000\u0000\u0000"+ + "\u04f0\u04e9\u0001\u0000\u0000\u0000\u04f1\u00ee\u0001\u0000\u0000\u0000"+ + "\u04f2\u04f3\u0007\u000e\u0000\u0000\u04f3\u04f4\u0007\n\u0000\u0000\u04f4"+ + "\u04f5\u0007\t\u0000\u0000\u04f5\u04f6\u0007\n\u0000\u0000\u04f6\u04f7"+ + "\u0007\u0001\u0000\u0000\u04f7\u04f8\u0007\u0005\u0000\u0000\u04f8\u00f0"+ + "\u0001\u0000\u0000\u0000\u04f9\u04fa\u0005s\u0000\u0000\u04fa\u04fb\u0005"+ + "e\u0000\u0000\u04fb\u04fc\u0005q\u0000\u0000\u04fc\u04fd\u0005_\u0000"+ + "\u0000\u04fd\u04fe\u0005t\u0000\u0000\u04fe\u04ff\u0005r\u0000\u0000\u04ff"+ + "\u0500\u0005a\u0000\u0000\u0500\u0501\u0005n\u0000\u0000\u0501\u0502\u0005"+ + "s\u0000\u0000\u0502\u0503\u0005f\u0000\u0000\u0503\u0504\u0005o\u0000"+ + "\u0000\u0504\u0505\u0005r\u0000\u0000\u0505\u0506\u0005m\u0000\u0000\u0506"+ + "\u00f2\u0001\u0000\u0000\u0000\u0507\u0508\u0007\u000e\u0000\u0000\u0508"+ + "\u0509\u0007\n\u0000\u0000\u0509\u050a\u0007\u0005\u0000\u0000\u050a\u00f4"+ + "\u0001\u0000\u0000\u0000\u050b\u050c\u0007\u000e\u0000\u0000\u050c\u050d"+ + "\u0007\u0011\u0000\u0000\u050d\u050e\u0007\u0000\u0000\u0000\u050e\u050f"+ + "\u0007\u000b\u0000\u0000\u050f\u0510\u0007\u0006\u0000\u0000\u0510\u00f6"+ + "\u0001\u0000\u0000\u0000\u0511\u0512\u0007\u000e\u0000\u0000\u0512\u0513"+ + "\u0007\u0011\u0000\u0000\u0513\u0514\u0007\u0002\u0000\u0000\u0514\u0515"+ + "\u0007\f\u0000\u0000\u0515\u00f8\u0001\u0000\u0000\u0000\u0516\u0517\u0007"+ + "\u000e\u0000\u0000\u0517\u0518\u0007\u0005\u0000\u0000\u0518\u0519\u0007"+ + "\u0000\u0000\u0000\u0519\u051a\u0007\u000b\u0000\u0000\u051a\u051b\u0007"+ + "\u0005\u0000\u0000\u051b\u00fa\u0001\u0000\u0000\u0000\u051c\u051d\u0007"+ + "\u0005\u0000\u0000\u051d\u051e\u0007\u0000\u0000\u0000\u051e\u051f\u0007"+ + "\u000f\u0000\u0000\u051f\u0520\u0007\t\u0000\u0000\u0520\u0521\u0007\n"+ + "\u0000\u0000\u0521\u00fc\u0001\u0000\u0000\u0000\u0522\u0523\u0007\u0005"+ + "\u0000\u0000\u0523\u0524\u0007\u0000\u0000\u0000\u0524\u0525\u0007\u000f"+ + "\u0000\u0000\u0525\u0526\u0007\t\u0000\u0000\u0526\u0527\u0007\n\u0000"+ + "\u0000\u0527\u0528\u0007\u000e\u0000\u0000\u0528\u00fe\u0001\u0000\u0000"+ + "\u0000\u0529\u052a\u0007\u0005\u0000\u0000\u052a\u052b\u0007\u0011\u0000"+ + "\u0000\u052b\u052c\u0007\n\u0000\u0000\u052c\u052d\u0007\u0004\u0000\u0000"+ + "\u052d\u0100\u0001\u0000\u0000\u0000\u052e\u052f\u0007\u0005\u0000\u0000"+ + "\u052f\u0530\u0007\u0002\u0000\u0000\u0530\u0102\u0001\u0000\u0000\u0000"+ + "\u0531\u0532\u0007\u0005\u0000\u0000\u0532\u0533\u0007\u0005\u0000\u0000"+ + "\u0533\u0534\u0007\t\u0000\u0000\u0534\u0104\u0001\u0000\u0000\u0000\u0535"+ + "\u0536\u0007\u0005\u0000\u0000\u0536\u0537\u0007\r\u0000\u0000\u0537\u0538"+ + "\u0007\u0012\u0000\u0000\u0538\u0539\u0007\n\u0000\u0000\u0539\u0106\u0001"+ + "\u0000\u0000\u0000\u053a\u053b\u0007\u0003\u0000\u0000\u053b\u053c\u0007"+ + "\u0004\u0000\u0000\u053c\u053d\u0007\u0010\u0000\u0000\u053d\u053e\u0007"+ + "\u000b\u0000\u0000\u053e\u053f\u0007\n\u0000\u0000\u053f\u0540\u0007\n"+ + "\u0000\u0000\u0540\u0541\u0007\u0014\u0000\u0000\u0541\u0542\u0007\n\u0000"+ + "\u0000\u0542\u0108\u0001\u0000\u0000\u0000\u0543\u0544\u0007\u0003\u0000"+ + "\u0000\u0544\u0545\u0007\u0004\u0000\u0000\u0545\u0546\u0007\t\u0000\u0000"+ + "\u0546\u0547\u0007\u0002\u0000\u0000\u0547\u0548\u0007\u0001\u0000\u0000"+ + "\u0548\u0549\u0007\u0017\u0000\u0000\u0549\u010a\u0001\u0000\u0000\u0000"+ + "\u054a\u054b\u0007\u0003\u0000\u0000\u054b\u054c\u0007\u0012\u0000\u0000"+ + "\u054c\u054d\u0007\u0006\u0000\u0000\u054d\u054e\u0007\u0000\u0000\u0000"+ + "\u054e\u054f\u0007\u0005\u0000\u0000\u054f\u0550\u0007\n\u0000\u0000\u0550"+ + "\u010c\u0001\u0000\u0000\u0000\u0551\u0552\u0007\u0003\u0000\u0000\u0552"+ + "\u0553\u0007\u0012\u0000\u0000\u0553\u0554\u0007\u000e\u0000\u0000\u0554"+ + "\u0555\u0007\n\u0000\u0000\u0555\u0556\u0007\u000b\u0000\u0000\u0556\u0557"+ + "\u0007\u0005\u0000\u0000\u0557\u010e\u0001\u0000\u0000\u0000\u0558\u0559"+ + "\u0007\u0003\u0000\u0000\u0559\u055a\u0007\u000e\u0000\u0000\u055a\u055b"+ + "\u0007\n\u0000\u0000\u055b\u055c\u0007\u000b\u0000\u0000\u055c\u0110\u0001"+ + "\u0000\u0000\u0000\u055d\u055e\u0007\u0003\u0000\u0000\u055e\u055f\u0007"+ + "\u000e\u0000\u0000\u055f\u0560\u0007\n\u0000\u0000\u0560\u0561\u0007\u000b"+ + "\u0000\u0000\u0561\u0562\u0007\u000e\u0000\u0000\u0562\u0112\u0001\u0000"+ + "\u0000\u0000\u0563\u0564\u0007\u0003\u0000\u0000\u0564\u0565\u0007\u000e"+ + "\u0000\u0000\u0565\u0566\u0007\b\u0000\u0000\u0566\u0567\u0007\u0004\u0000"+ + "\u0000\u0567\u0568\u0007\u0015\u0000\u0000\u0568\u0114\u0001\u0000\u0000"+ + "\u0000\u0569\u056a\u0007\u0018\u0000\u0000\u056a\u056b\u0007\u0000\u0000"+ + "\u0000\u056b\u056c\u0007\t\u0000\u0000\u056c\u056d\u0007\u0003\u0000\u0000"+ + "\u056d\u056e\u0007\n\u0000\u0000\u056e\u056f\u0007\u000e\u0000\u0000\u056f"+ + "\u0116\u0001\u0000\u0000\u0000\u0570\u0571\u0007\f\u0000\u0000\u0571\u0572"+ + "\u0007\u0011\u0000\u0000\u0572\u0573\u0007\n\u0000\u0000\u0573\u0574\u0007"+ + "\u0004\u0000\u0000\u0574\u0118\u0001\u0000\u0000\u0000\u0575\u0576\u0007"+ + "\f\u0000\u0000\u0576\u0577\u0007\u0011\u0000\u0000\u0577\u0578\u0007\n"+ + "\u0000\u0000\u0578\u0579\u0007\u000b\u0000\u0000\u0579\u057a\u0007\n\u0000"+ + "\u0000\u057a\u011a\u0001\u0000\u0000\u0000\u057b\u057c\u0007\f\u0000\u0000"+ + "\u057c\u057d\u0007\b\u0000\u0000\u057d\u057e\u0007\u0005\u0000\u0000\u057e"+ + "\u057f\u0007\u0011\u0000\u0000\u057f\u011c\u0001\u0000\u0000\u0000\u0580"+ + "\u0581\u0007\u0003\u0000\u0000\u0581\u0582\u0007\u0004\u0000\u0000\u0582"+ + "\u0583\u0007\b\u0000\u0000\u0583\u0584\u0007\u0019\u0000\u0000\u0584\u0585"+ + "\u0007\u0003\u0000\u0000\u0585\u0586\u0007\n\u0000\u0000\u0586\u011e\u0001"+ + "\u0000\u0000\u0000\u0587\u0588\u0007\u0003\u0000\u0000\u0588\u0589\u0007"+ + "\u0004\u0000\u0000\u0589\u058a\u0007\u0004\u0000\u0000\u058a\u058b\u0007"+ + "\n\u0000\u0000\u058b\u058c\u0007\u000e\u0000\u0000\u058c\u058d\u0007\u0005"+ + "\u0000\u0000\u058d\u0120\u0001\u0000\u0000\u0000\u058e\u058f\u0007\u0003"+ + "\u0000\u0000\u058f\u0590\u0007\u0003\u0000\u0000\u0590\u0591\u0007\b\u0000"+ + "\u0000\u0591\u0592\u0007\u0006\u0000\u0000\u0592\u0122\u0001\u0000\u0000"+ + "\u0000\u0593\u0595\u0003\u0011\b\u0000\u0594\u0596\u0003\u01a9\u00d4\u0000"+ + "\u0595\u0594\u0001\u0000\u0000\u0000\u0596\u0597\u0001\u0000\u0000\u0000"+ + "\u0597\u0595\u0001\u0000\u0000\u0000\u0597\u0598\u0001\u0000\u0000\u0000"+ + "\u0598\u0599\u0001\u0000\u0000\u0000\u0599\u059a\u0003\u01cb\u00e5\u0000"+ + "\u059a\u0124\u0001\u0000\u0000\u0000\u059b\u059d\u0003y<\u0000\u059c\u059e"+ + "\u0003\u01a9\u00d4\u0000\u059d\u059c\u0001\u0000\u0000\u0000\u059e\u059f"+ + "\u0001\u0000\u0000\u0000\u059f\u059d\u0001\u0000\u0000\u0000\u059f\u05a0"+ + "\u0001\u0000\u0000\u0000\u05a0\u05a1\u0001\u0000\u0000\u0000\u05a1\u05a2"+ + "\u0003\u01c7\u00e3\u0000\u05a2\u0126\u0001\u0000\u0000\u0000\u05a3\u05a5"+ + "\u0003\u00cdf\u0000\u05a4\u05a6\u0003\u01a9\u00d4\u0000\u05a5\u05a4\u0001"+ + "\u0000\u0000\u0000\u05a6\u05a7\u0001\u0000\u0000\u0000\u05a7\u05a5\u0001"+ + "\u0000\u0000\u0000\u05a7\u05a8\u0001\u0000\u0000\u0000\u05a8\u05a9\u0001"+ + "\u0000\u0000\u0000\u05a9\u05aa\u0003\u01c5\u00e2\u0000\u05aa\u0128\u0001"+ + "\u0000\u0000\u0000\u05ab\u05ad\u0003\u01cd\u00e6\u0000\u05ac\u05ae\u0003"+ + "\u01a9\u00d4\u0000\u05ad\u05ac\u0001\u0000\u0000\u0000\u05ae\u05af\u0001"+ + "\u0000\u0000\u0000\u05af\u05ad\u0001\u0000\u0000\u0000\u05af\u05b0\u0001"+ + "\u0000\u0000\u0000\u05b0\u05b1\u0001\u0000\u0000\u0000\u05b1\u05b3\u0003"+ + "\u01c3\u00e1\u0000\u05b2\u05b4\u0003\u01a9\u00d4\u0000\u05b3\u05b2\u0001"+ + "\u0000\u0000\u0000\u05b4\u05b5\u0001\u0000\u0000\u0000\u05b5\u05b3\u0001"+ + "\u0000\u0000\u0000\u05b5\u05b6\u0001\u0000\u0000\u0000\u05b6\u05b7\u0001"+ + "\u0000\u0000\u0000\u05b7\u05b8\u0003\u00cdf\u0000\u05b8\u012a\u0001\u0000"+ + "\u0000\u0000\u05b9\u05bb\u0003\u01c1\u00e0\u0000\u05ba\u05bc\u0003\u01a9"+ + "\u00d4\u0000\u05bb\u05ba\u0001\u0000\u0000\u0000\u05bc\u05bd\u0001\u0000"+ + "\u0000\u0000\u05bd\u05bb\u0001\u0000\u0000\u0000\u05bd\u05be\u0001\u0000"+ + "\u0000\u0000\u05be\u05bf\u0001\u0000\u0000\u0000\u05bf\u05c1\u0003\u01cf"+ + "\u00e7\u0000\u05c0\u05c2\u0003\u01a9\u00d4\u0000\u05c1\u05c0\u0001\u0000"+ + "\u0000\u0000\u05c2\u05c3\u0001\u0000\u0000\u0000\u05c3\u05c1\u0001\u0000"+ + "\u0000\u0000\u05c3\u05c4\u0001\u0000\u0000\u0000\u05c4\u05c5\u0001\u0000"+ + "\u0000\u0000\u05c5\u05c6\u0003\u00cdf\u0000\u05c6\u012c\u0001\u0000\u0000"+ + "\u0000\u05c7\u05c9\u0003\u009bM\u0000\u05c8\u05ca\u0003\u01a9\u00d4\u0000"+ + "\u05c9\u05c8\u0001\u0000\u0000\u0000\u05ca\u05cb\u0001\u0000\u0000\u0000"+ + "\u05cb\u05c9\u0001\u0000\u0000\u0000\u05cb\u05cc\u0001\u0000\u0000\u0000"+ + "\u05cc\u05cd\u0001\u0000\u0000\u0000\u05cd\u05cf\u0003\u00c9d\u0000\u05ce"+ + "\u05d0\u0003\u01a9\u00d4\u0000\u05cf\u05ce\u0001\u0000\u0000\u0000\u05d0"+ + "\u05d1\u0001\u0000\u0000\u0000\u05d1\u05cf\u0001\u0000\u0000\u0000\u05d1"+ + "\u05d2\u0001\u0000\u0000\u0000\u05d2\u05d3\u0001\u0000\u0000\u0000\u05d3"+ + "\u05d4\u0003\u0091H\u0000\u05d4\u012e\u0001\u0000\u0000\u0000\u05d5\u05d6"+ + "\u0007\u0000\u0000\u0000\u05d6\u05d7\u0007\u000b\u0000\u0000\u05d7\u05d8"+ + "\u0007\u000b\u0000\u0000\u05d8\u05d9\u0007\u0000\u0000\u0000\u05d9\u05da"+ + "\u0007\r\u0000\u0000\u05da\u0130\u0001\u0000\u0000\u0000\u05db\u05dc\u0007"+ + "\u000f\u0000\u0000\u05dc\u05dd\u0007\b\u0000\u0000\u05dd\u05de\u0007\u0004"+ + "\u0000\u0000\u05de\u05df\u0007\u0000\u0000\u0000\u05df\u05e0\u0007\u000b"+ + "\u0000\u0000\u05e0\u05e1\u0007\r\u0000\u0000\u05e1\u0132\u0001\u0000\u0000"+ + "\u0000\u05e2\u05e3\u0007\u000f\u0000\u0000\u05e3\u05e4\u0007\u0002\u0000"+ + "\u0000\u05e4\u05e5\u0007\u0002\u0000\u0000\u05e5\u05e6\u0007\t\u0000\u0000"+ + "\u05e6\u05e7\u0007\n\u0000\u0000\u05e7\u05e8\u0007\u0000\u0000\u0000\u05e8"+ + "\u05e9\u0007\u0004\u0000\u0000\u05e9\u0134\u0001\u0000\u0000\u0000\u05ea"+ + "\u05eb\u0007\u0006\u0000\u0000\u05eb\u05ec\u0007\u0002\u0000\u0000\u05ec"+ + "\u05ed\u0007\u0003\u0000\u0000\u05ed\u05ee\u0007\u000f\u0000\u0000\u05ee"+ + "\u05ef\u0007\t\u0000\u0000\u05ef\u05f0\u0007\n\u0000\u0000\u05f0\u0136"+ + "\u0001\u0000\u0000\u0000\u05f1\u05f2\u0007\n\u0000\u0000\u05f2\u05f3\u0007"+ + "\u0004\u0000\u0000\u05f3\u05f4\u0007\u0003\u0000\u0000\u05f4\u05f5\u0007"+ + "\u0007\u0000\u0000\u05f5\u0138\u0001\u0000\u0000\u0000\u05f6\u05f7\u0007"+ + "\u0010\u0000\u0000\u05f7\u05f8\u0007\t\u0000\u0000\u05f8\u05f9\u0007\u0002"+ + "\u0000\u0000\u05f9\u05fa\u0007\u0000\u0000\u0000\u05fa\u05fb\u0007\u0005"+ + "\u0000\u0000\u05fb\u013a\u0001\u0000\u0000\u0000\u05fc\u05fd\u0007\u0015"+ + "\u0000\u0000\u05fd\u05fe\u0007\n\u0000\u0000\u05fe\u05ff\u0007\u0002\u0000"+ + "\u0000\u05ff\u0600\u0007\u0007\u0000\u0000\u0600\u0601\u0007\n\u0000\u0000"+ + "\u0601\u0602\u0007\u0005\u0000\u0000\u0602\u0603\u0007\u000b\u0000\u0000"+ + "\u0603\u0604\u0007\r\u0000\u0000\u0604\u013c\u0001\u0000\u0000\u0000\u0605"+ + "\u0606\u0007\b\u0000\u0000\u0606\u0607\u0007\u0004\u0000\u0000\u0607\u0608"+ + "\u0007\u0005\u0000\u0000\u0608\u0609\u0007\n\u0000\u0000\u0609\u060a\u0007"+ + "\u0015\u0000\u0000\u060a\u060b\u0007\n\u0000\u0000\u060b\u060c\u0007\u000b"+ + "\u0000\u0000\u060c\u013e\u0001\u0000\u0000\u0000\u060d\u060e\u0007\t\u0000"+ + "\u0000\u060e\u060f\u0007\u0002\u0000\u0000\u060f\u0610\u0007\u0004\u0000"+ + "\u0000\u0610\u0611\u0007\u0015\u0000\u0000\u0611\u0140\u0001\u0000\u0000"+ + "\u0000\u0612\u0613\u0007\u0007\u0000\u0000\u0613\u0614\u0007\u0000\u0000"+ + "\u0000\u0614\u0615\u0007\u0012\u0000\u0000\u0615\u0142\u0001\u0000\u0000"+ + "\u0000\u0616\u0617\u0007\u0004\u0000\u0000\u0617\u0618\u0007\u0003\u0000"+ + "\u0000\u0618\u0619\u0007\u0007\u0000\u0000\u0619\u061a\u0007\u000f\u0000"+ + "\u0000\u061a\u061b\u0007\n\u0000\u0000\u061b\u061c\u0007\u000b\u0000\u0000"+ + "\u061c\u0144\u0001\u0000\u0000\u0000\u061d\u061e\u0007\u0012\u0000\u0000"+ + "\u061e\u061f\u0007\u0002\u0000\u0000\u061f\u0620\u0007\b\u0000\u0000\u0620"+ + "\u0621\u0007\u0004\u0000\u0000\u0621\u0622\u0007\u0005\u0000\u0000\u0622"+ + "\u0146\u0001\u0000\u0000\u0000\u0623\u0624\u0007\u000b\u0000\u0000\u0624"+ + "\u0625\u0007\n\u0000\u0000\u0625\u0626\u0007\u0001\u0000\u0000\u0626\u0627"+ + "\u0007\u0002\u0000\u0000\u0627\u0628\u0007\u000b\u0000\u0000\u0628\u0629"+ + "\u0007\u0006\u0000\u0000\u0629\u0148\u0001\u0000\u0000\u0000\u062a\u062b"+ + "\u0007\u000e\u0000\u0000\u062b\u062c\u0007\u0005\u0000\u0000\u062c\u062d"+ + "\u0007\u000b\u0000\u0000\u062d\u062e\u0007\b\u0000\u0000\u062e\u062f\u0007"+ + "\u0004\u0000\u0000\u062f\u0630\u0007\u0015\u0000\u0000\u0630\u014a\u0001"+ + "\u0000\u0000\u0000\u0631\u0632\u0007\u0005\u0000\u0000\u0632\u0633\u0007"+ + "\b\u0000\u0000\u0633\u0634\u0007\u0007\u0000\u0000\u0634\u0635\u0007\n"+ + "\u0000\u0000\u0635\u0636\u0007\u000e\u0000\u0000\u0636\u0637\u0007\u0005"+ + "\u0000\u0000\u0637\u0638\u0007\u0000\u0000\u0000\u0638\u0639\u0007\u0007"+ + "\u0000\u0000\u0639\u063a\u0007\u0012\u0000\u0000\u063a\u014c\u0001\u0000"+ + "\u0000\u0000\u063b\u063c\u0007\u0000\u0000\u0000\u063c\u063d\u0007\u0004"+ + "\u0000\u0000\u063d\u063e\u0007\r\u0000\u0000\u063e\u014e\u0001\u0000\u0000"+ + "\u0000\u063f\u0640\u0007\u0000\u0000\u0000\u0640\u0641\u0007\u0004\u0000"+ + "\u0000\u0641\u0642\u0007\r\u0000\u0000\u0642\u0643\u0007\u0000\u0000\u0000"+ + "\u0643\u0644\u0007\u0005\u0000\u0000\u0644\u0645\u0007\u0002\u0000\u0000"+ + "\u0645\u0646\u0007\u0007\u0000\u0000\u0646\u0647\u0007\b\u0000\u0000\u0647"+ + "\u0648\u0007\u0001\u0000\u0000\u0648\u0150\u0001\u0000\u0000\u0000\u0649"+ + "\u064a\u0007\u0000\u0000\u0000\u064a\u064b\u0007\u0004\u0000\u0000\u064b"+ + "\u064c\u0007\r\u0000\u0000\u064c\u064d\u0007\u0016\u0000\u0000\u064d\u064e"+ + "\u0007\u000e\u0000\u0000\u064e\u064f\u0007\u0002\u0000\u0000\u064f\u0650"+ + "\u0007\u0004\u0000\u0000\u0650\u0651\u0007\u0000\u0000\u0000\u0651\u0652"+ + "\u0007\u0005\u0000\u0000\u0652\u0653\u0007\u0002\u0000\u0000\u0653\u0654"+ + "\u0007\u0007\u0000\u0000\u0654\u0655\u0007\b\u0000\u0000\u0655\u0656\u0007"+ + "\u0001\u0000\u0000\u0656\u0152\u0001\u0000\u0000\u0000\u0657\u0658\u0007"+ + "\u0000\u0000\u0000\u0658\u0659\u0007\u0004\u0000\u0000\u0659\u065a\u0007"+ + "\r\u0000\u0000\u065a\u065b\u0007\u000b\u0000\u0000\u065b\u065c\u0007\n"+ + "\u0000\u0000\u065c\u065d\u0007\u0001\u0000\u0000\u065d\u065e\u0007\u0002"+ + "\u0000\u0000\u065e\u065f\u0007\u000b\u0000\u0000\u065f\u0660\u0007\u0006"+ + "\u0000\u0000\u0660\u0154\u0001\u0000\u0000\u0000\u0661\u0662\u0007\u000e"+ + "\u0000\u0000\u0662\u0663\u0007\u0001\u0000\u0000\u0663\u0664\u0007\u0000"+ + "\u0000\u0000\u0664\u0665\u0007\t\u0000\u0000\u0665\u0666\u0007\u0000\u0000"+ + "\u0000\u0666\u0667\u0007\u000b\u0000\u0000\u0667\u0156\u0001\u0000\u0000"+ + "\u0000\u0668\u0669\u0005;\u0000\u0000\u0669\u0158\u0001\u0000\u0000\u0000"+ + "\u066a\u066b\u0005,\u0000\u0000\u066b\u015a\u0001\u0000\u0000\u0000\u066c"+ + "\u066d\u0005:\u0000\u0000\u066d\u015c\u0001\u0000\u0000\u0000\u066e\u066f"+ + "\u0005(\u0000\u0000\u066f\u015e\u0001\u0000\u0000\u0000\u0670\u0671\u0005"+ + ")\u0000\u0000\u0671\u0160\u0001\u0000\u0000\u0000\u0672\u0673\u0005[\u0000"+ + "\u0000\u0673\u0162\u0001\u0000\u0000\u0000\u0674\u0675\u0005]\u0000\u0000"+ + "\u0675\u0164\u0001\u0000\u0000\u0000\u0676\u0677\u0005{\u0000\u0000\u0677"+ + "\u0166\u0001\u0000\u0000\u0000\u0678\u0679\u0005}\u0000\u0000\u0679\u0168"+ + "\u0001\u0000\u0000\u0000\u067a\u067b\u0005*\u0000\u0000\u067b\u016a\u0001"+ + "\u0000\u0000\u0000\u067c\u067d\u0005.\u0000\u0000\u067d\u016c\u0001\u0000"+ + "\u0000\u0000\u067e\u067f\u0005$\u0000\u0000\u067f\u016e\u0001\u0000\u0000"+ + "\u0000\u0680\u0681\u0005?\u0000\u0000\u0681\u0170\u0001\u0000\u0000\u0000"+ + "\u0682\u0683\u0005<\u0000\u0000\u0683\u0172\u0001\u0000\u0000\u0000\u0684"+ + "\u0685\u0005<\u0000\u0000\u0685\u0686\u0005=\u0000\u0000\u0686\u0174\u0001"+ + "\u0000\u0000\u0000\u0687\u0688\u0005>\u0000\u0000\u0688\u0176\u0001\u0000"+ + "\u0000\u0000\u0689\u068a\u0005>\u0000\u0000\u068a\u068b\u0005=\u0000\u0000"+ + "\u068b\u0178\u0001\u0000\u0000\u0000\u068c\u068d\u0005=\u0000\u0000\u068d"+ + "\u017a\u0001\u0000\u0000\u0000\u068e\u068f\u0005!\u0000\u0000\u068f\u0690"+ + "\u0005=\u0000\u0000\u0690\u017c\u0001\u0000\u0000\u0000\u0691\u0692\u0005"+ + "<\u0000\u0000\u0692\u0693\u0007\u0000\u0000\u0000\u0693\u0694\u0007\u0004"+ + "\u0000\u0000\u0694\u0695\u0007\r\u0000\u0000\u0695\u017e\u0001\u0000\u0000"+ + "\u0000\u0696\u0697\u0005<\u0000\u0000\u0697\u0698\u0005=\u0000\u0000\u0698"+ + "\u0699\u0001\u0000\u0000\u0000\u0699\u069a\u0007\u0000\u0000\u0000\u069a"+ + "\u069b\u0007\u0004\u0000\u0000\u069b\u069c\u0007\r\u0000\u0000\u069c\u0180"+ + "\u0001\u0000\u0000\u0000\u069d\u069e\u0005>\u0000\u0000\u069e\u069f\u0007"+ + "\u0000\u0000\u0000\u069f\u06a0\u0007\u0004\u0000\u0000\u06a0\u06a1\u0007"+ + "\r\u0000\u0000\u06a1\u0182\u0001\u0000\u0000\u0000\u06a2\u06a3\u0005>"+ + "\u0000\u0000\u06a3\u06a4\u0005=\u0000\u0000\u06a4\u06a5\u0001\u0000\u0000"+ + "\u0000\u06a5\u06a6\u0007\u0000\u0000\u0000\u06a6\u06a7\u0007\u0004\u0000"+ + "\u0000\u06a7\u06a8\u0007\r\u0000\u0000\u06a8\u0184\u0001\u0000\u0000\u0000"+ + "\u06a9\u06aa\u0005=\u0000\u0000\u06aa\u06ab\u0007\u0000\u0000\u0000\u06ab"+ + "\u06ac\u0007\u0004\u0000\u0000\u06ac\u06ad\u0007\r\u0000\u0000\u06ad\u0186"+ + "\u0001\u0000\u0000\u0000\u06ae\u06af\u0005!\u0000\u0000\u06af\u06b0\u0005"+ + "=\u0000\u0000\u06b0\u06b1\u0001\u0000\u0000\u0000\u06b1\u06b2\u0007\u0000"+ + "\u0000\u0000\u06b2\u06b3\u0007\u0004\u0000\u0000\u06b3\u06b4\u0007\r\u0000"+ + "\u0000\u06b4\u0188\u0001\u0000\u0000\u0000\u06b5\u06b6\u0005+\u0000\u0000"+ + "\u06b6\u018a\u0001\u0000\u0000\u0000\u06b7\u06b8\u0005-\u0000\u0000\u06b8"+ + "\u018c\u0001\u0000\u0000\u0000\u06b9\u06ba\u0005/\u0000\u0000\u06ba\u018e"+ + "\u0001\u0000\u0000\u0000\u06bb\u06bc\u0007\u0006\u0000\u0000\u06bc\u06bd"+ + "\u0007\b\u0000\u0000\u06bd\u06be\u0007\u0018\u0000\u0000\u06be\u0190\u0001"+ + "\u0000\u0000\u0000\u06bf\u06c0\u0005|\u0000\u0000\u06c0\u06c1\u0005|\u0000"+ + "\u0000\u06c1\u0192\u0001\u0000\u0000\u0000\u06c2\u06c3\u0007\u0004\u0000"+ + "\u0000\u06c3\u06c4\u0007\u0003\u0000\u0000\u06c4\u06c5\u0007\t\u0000\u0000"+ + "\u06c5\u06c6\u0007\t\u0000\u0000\u06c6\u0194\u0001\u0000\u0000\u0000\u06c7"+ + "\u06c8\u0007\u0010\u0000\u0000\u06c8\u06c9\u0007\u0000\u0000\u0000\u06c9"+ + "\u06ca\u0007\t\u0000\u0000\u06ca\u06cb\u0007\u000e\u0000\u0000\u06cb\u06cc"+ + "\u0007\n\u0000\u0000\u06cc\u0196\u0001\u0000\u0000\u0000\u06cd\u06ce\u0007"+ + "\u0005\u0000\u0000\u06ce\u06cf\u0007\u000b\u0000\u0000\u06cf\u06d0\u0007"+ + "\u0003\u0000\u0000\u06d0\u06d1\u0007\n\u0000\u0000\u06d1\u0198\u0001\u0000"+ + "\u0000\u0000\u06d2\u06d4\u0003\u01b5\u00da\u0000\u06d3\u06d2\u0001\u0000"+ + "\u0000\u0000\u06d4\u06d5\u0001\u0000\u0000\u0000\u06d5\u06d3\u0001\u0000"+ + "\u0000\u0000\u06d5\u06d6\u0001\u0000\u0000\u0000\u06d6\u019a\u0001\u0000"+ + "\u0000\u0000\u06d7\u06d9\u0003\u01b5\u00da\u0000\u06d8\u06d7\u0001\u0000"+ + "\u0000\u0000\u06d9\u06dc\u0001\u0000\u0000\u0000\u06da\u06d8\u0001\u0000"+ + "\u0000\u0000\u06da\u06db\u0001\u0000\u0000\u0000\u06db\u06dd\u0001\u0000"+ + "\u0000\u0000\u06dc\u06da\u0001\u0000\u0000\u0000\u06dd\u06df\u0005.\u0000"+ + "\u0000\u06de\u06e0\u0003\u01b5\u00da\u0000\u06df\u06de\u0001\u0000\u0000"+ + "\u0000\u06e0\u06e1\u0001\u0000\u0000\u0000\u06e1\u06df\u0001\u0000\u0000"+ + "\u0000\u06e1\u06e2\u0001\u0000\u0000\u0000\u06e2\u06ec\u0001\u0000\u0000"+ + "\u0000\u06e3\u06e5\u0007\n\u0000\u0000\u06e4\u06e6\u0007\u001a\u0000\u0000"+ + "\u06e5\u06e4\u0001\u0000\u0000\u0000\u06e5\u06e6\u0001\u0000\u0000\u0000"+ + "\u06e6\u06e8\u0001\u0000\u0000\u0000\u06e7\u06e9\u0003\u01b5\u00da\u0000"+ + "\u06e8\u06e7\u0001\u0000\u0000\u0000\u06e9\u06ea\u0001\u0000\u0000\u0000"+ + "\u06ea\u06e8\u0001\u0000\u0000\u0000\u06ea\u06eb\u0001\u0000\u0000\u0000"+ + "\u06eb\u06ed\u0001\u0000\u0000\u0000\u06ec\u06e3\u0001\u0000\u0000\u0000"+ + "\u06ec\u06ed\u0001\u0000\u0000\u0000\u06ed\u06fd\u0001\u0000\u0000\u0000"+ + "\u06ee\u06f0\u0003\u01b5\u00da\u0000\u06ef\u06ee\u0001\u0000\u0000\u0000"+ + "\u06f0\u06f1\u0001\u0000\u0000\u0000\u06f1\u06ef\u0001\u0000\u0000\u0000"+ + "\u06f1\u06f2\u0001\u0000\u0000\u0000\u06f2\u06f3\u0001\u0000\u0000\u0000"+ + "\u06f3\u06f5\u0007\n\u0000\u0000\u06f4\u06f6\u0007\u001a\u0000\u0000\u06f5"+ + "\u06f4\u0001\u0000\u0000\u0000\u06f5\u06f6\u0001\u0000\u0000\u0000\u06f6"+ + "\u06f8\u0001\u0000\u0000\u0000\u06f7\u06f9\u0003\u01b5\u00da\u0000\u06f8"+ + "\u06f7\u0001\u0000\u0000\u0000\u06f9\u06fa\u0001\u0000\u0000\u0000\u06fa"+ + "\u06f8\u0001\u0000\u0000\u0000\u06fa\u06fb\u0001\u0000\u0000\u0000\u06fb"+ + "\u06fd\u0001\u0000\u0000\u0000\u06fc\u06da\u0001\u0000\u0000\u0000\u06fc"+ + "\u06ef\u0001\u0000\u0000\u0000\u06fd\u019c\u0001\u0000\u0000\u0000\u06fe"+ + "\u0701\u0003\u0199\u00cc\u0000\u06ff\u0701\u0003\u019b\u00cd\u0000\u0700"+ + "\u06fe\u0001\u0000\u0000\u0000\u0700\u06ff\u0001\u0000\u0000\u0000\u0701"+ + "\u0702\u0001\u0000\u0000\u0000\u0702\u0703\u0007\u0004\u0000\u0000\u0703"+ + "\u019e\u0001\u0000\u0000\u0000\u0704\u0709\u0005\"\u0000\u0000\u0705\u0708"+ + "\u0003\u01b7\u00db\u0000\u0706\u0708\t\u0000\u0000\u0000\u0707\u0705\u0001"+ + "\u0000\u0000\u0000\u0707\u0706\u0001\u0000\u0000\u0000\u0708\u070b\u0001"+ + "\u0000\u0000\u0000\u0709\u070a\u0001\u0000\u0000\u0000\u0709\u0707\u0001"+ + "\u0000\u0000\u0000\u070a\u070c\u0001\u0000\u0000\u0000\u070b\u0709\u0001"+ + "\u0000\u0000\u0000\u070c\u070d\u0005\"\u0000\u0000\u070d\u01a0\u0001\u0000"+ + "\u0000\u0000\u070e\u0713\u0005\'\u0000\u0000\u070f\u0712\u0003\u01b9\u00dc"+ + "\u0000\u0710\u0712\t\u0000\u0000\u0000\u0711\u070f\u0001\u0000\u0000\u0000"+ + "\u0711\u0710\u0001\u0000\u0000\u0000\u0712\u0715\u0001\u0000\u0000\u0000"+ + "\u0713\u0714\u0001\u0000\u0000\u0000\u0713\u0711\u0001\u0000\u0000\u0000"+ + "\u0714\u0716\u0001\u0000\u0000\u0000\u0715\u0713\u0001\u0000\u0000\u0000"+ + "\u0716\u0717\u0005\'\u0000\u0000\u0717\u01a2\u0001\u0000\u0000\u0000\u0718"+ + "\u0719\u0007\u001b\u0000\u0000\u0719\u071a\u0007\u001c\u0000\u0000\u071a"+ + "\u071b\u0007\u001b\u0000\u0000\u071b\u071c\u0007\u001d\u0000\u0000\u071c"+ + "\u01a4\u0001\u0000\u0000\u0000\u071d\u0723\u0003\u01b3\u00d9\u0000\u071e"+ + "\u0722\u0003\u01b3\u00d9\u0000\u071f\u0722\u0003\u01b5\u00da\u0000\u0720"+ + "\u0722\u0003\u01bd\u00de\u0000\u0721\u071e\u0001\u0000\u0000\u0000\u0721"+ + "\u071f\u0001\u0000\u0000\u0000\u0721\u0720\u0001\u0000\u0000\u0000\u0722"+ + "\u0725\u0001\u0000\u0000\u0000\u0723\u0721\u0001\u0000\u0000\u0000\u0723"+ + "\u0724\u0001\u0000\u0000\u0000\u0724\u01a6\u0001\u0000\u0000\u0000\u0725"+ + "\u0723\u0001\u0000\u0000\u0000\u0726\u0729\u0003\u01b5\u00da\u0000\u0727"+ + "\u0729\u0003\u01bd\u00de\u0000\u0728\u0726\u0001\u0000\u0000\u0000\u0728"+ + "\u0727\u0001\u0000\u0000\u0000\u0729\u072f\u0001\u0000\u0000\u0000\u072a"+ + "\u072e\u0003\u01b3\u00d9\u0000\u072b\u072e\u0003\u01b5\u00da\u0000\u072c"+ + "\u072e\u0003\u01bd\u00de\u0000\u072d\u072a\u0001\u0000\u0000\u0000\u072d"+ + "\u072b\u0001\u0000\u0000\u0000\u072d\u072c\u0001\u0000\u0000\u0000\u072e"+ + "\u0731\u0001\u0000\u0000\u0000\u072f\u072d\u0001\u0000\u0000\u0000\u072f"+ + "\u0730\u0001\u0000\u0000\u0000\u0730\u01a8\u0001\u0000\u0000\u0000\u0731"+ + "\u072f\u0001\u0000\u0000\u0000\u0732\u0734\u0007\u001e\u0000\u0000\u0733"+ + "\u0732\u0001\u0000\u0000\u0000\u0734\u0735\u0001\u0000\u0000\u0000\u0735"+ + "\u0733\u0001\u0000\u0000\u0000\u0735\u0736\u0001\u0000\u0000\u0000\u0736"+ + "\u0737\u0001\u0000\u0000\u0000\u0737\u0738\u0006\u00d4\u0000\u0000\u0738"+ + "\u01aa\u0001\u0000\u0000\u0000\u0739\u073a\u0005/\u0000\u0000\u073a\u073b"+ + "\u0005*\u0000\u0000\u073b\u073c\u0001\u0000\u0000\u0000\u073c\u0740\b"+ + "\u001f\u0000\u0000\u073d\u073f\t\u0000\u0000\u0000\u073e\u073d\u0001\u0000"+ + "\u0000\u0000\u073f\u0742\u0001\u0000\u0000\u0000\u0740\u0741\u0001\u0000"+ + "\u0000\u0000\u0740\u073e\u0001\u0000\u0000\u0000\u0741\u0743\u0001\u0000"+ + "\u0000\u0000\u0742\u0740\u0001\u0000\u0000\u0000\u0743\u0744\u0005*\u0000"+ + "\u0000\u0744\u0745\u0005/\u0000\u0000\u0745\u0746\u0001\u0000\u0000\u0000"+ + "\u0746\u0747\u0006\u00d5\u0000\u0000\u0747\u01ac\u0001\u0000\u0000\u0000"+ + "\u0748\u0749\u0005/\u0000\u0000\u0749\u074a\u0005/\u0000\u0000\u074a\u074e"+ + "\u0001\u0000\u0000\u0000\u074b\u074d\b \u0000\u0000\u074c\u074b\u0001"+ + "\u0000\u0000\u0000\u074d\u0750\u0001\u0000\u0000\u0000\u074e\u074c\u0001"+ + "\u0000\u0000\u0000\u074e\u074f\u0001\u0000\u0000\u0000\u074f\u0751\u0001"+ + "\u0000\u0000\u0000\u0750\u074e\u0001\u0000\u0000\u0000\u0751\u0752\u0006"+ + "\u00d6\u0000\u0000\u0752\u01ae\u0001\u0000\u0000\u0000\u0753\u0757\u0005"+ + "#\u0000\u0000\u0754\u0756\b \u0000\u0000\u0755\u0754\u0001\u0000\u0000"+ + "\u0000\u0756\u0759\u0001\u0000\u0000\u0000\u0757\u0755\u0001\u0000\u0000"+ + "\u0000\u0757\u0758\u0001\u0000\u0000\u0000\u0758\u075a\u0001\u0000\u0000"+ + "\u0000\u0759\u0757\u0001\u0000\u0000\u0000\u075a\u075b\u0006\u00d7\u0000"+ + "\u0000\u075b\u01b0\u0001\u0000\u0000\u0000\u075c\u075d\t\u0000\u0000\u0000"+ + "\u075d\u01b2\u0001\u0000\u0000\u0000\u075e\u075f\u0007!\u0000\u0000\u075f"+ + "\u01b4\u0001\u0000\u0000\u0000\u0760\u0761\u000209\u0000\u0761\u01b6\u0001"+ + "\u0000\u0000\u0000\u0762\u0765\u0005\\\u0000\u0000\u0763\u0766\u0007\""+ + "\u0000\u0000\u0764\u0766\u0003\u01bf\u00df\u0000\u0765\u0763\u0001\u0000"+ + "\u0000\u0000\u0765\u0764\u0001\u0000\u0000\u0000\u0766\u01b8\u0001\u0000"+ + "\u0000\u0000\u0767\u076a\u0005\\\u0000\u0000\u0768\u076b\u0007#\u0000"+ + "\u0000\u0769\u076b\u0003\u01bf\u00df\u0000\u076a\u0768\u0001\u0000\u0000"+ + "\u0000\u076a\u0769\u0001\u0000\u0000\u0000\u076b\u01ba\u0001\u0000\u0000"+ + "\u0000\u076c\u076d\u0007$\u0000\u0000\u076d\u01bc\u0001\u0000\u0000\u0000"+ + "\u076e\u076f\u0005_\u0000\u0000\u076f\u01be\u0001\u0000\u0000\u0000\u0770"+ + "\u0771\u0005u\u0000\u0000\u0771\u0772\u0003\u01bb\u00dd\u0000\u0772\u0773"+ + "\u0003\u01bb\u00dd\u0000\u0773\u0774\u0003\u01bb\u00dd\u0000\u0774\u0775"+ + "\u0003\u01bb\u00dd\u0000\u0775\u01c0\u0001\u0000\u0000\u0000\u0776\u0777"+ + "\u0007\u0001\u0000\u0000\u0777\u0778\u0007\t\u0000\u0000\u0778\u0779\u0007"+ + "\n\u0000\u0000\u0779\u077a\u0007\u0000\u0000\u0000\u077a\u077b\u0007\u000b"+ + "\u0000\u0000\u077b\u01c2\u0001\u0000\u0000\u0000\u077c\u077d\u0007\u0001"+ + "\u0000\u0000\u077d\u077e\u0007\u0003\u0000\u0000\u077e\u077f\u0007\u000b"+ + "\u0000\u0000\u077f\u0780\u0007\u000b\u0000\u0000\u0780\u0781\u0007\n\u0000"+ + "\u0000\u0781\u0782\u0007\u0004\u0000\u0000\u0782\u0783\u0007\u0005\u0000"+ + "\u0000\u0783\u01c4\u0001\u0000\u0000\u0000\u0784\u0785\u0007\n\u0000\u0000"+ + "\u0785\u0786\u0007\u0013\u0000\u0000\u0786\u0787\u0007\u0012\u0000\u0000"+ + "\u0787\u0788\u0007\b\u0000\u0000\u0788\u0789\u0007\u000b\u0000\u0000\u0789"+ + "\u078a\u0007\n\u0000\u0000\u078a\u01c6\u0001\u0000\u0000\u0000\u078b\u078c"+ + "\u0007\n\u0000\u0000\u078c\u078d\u0007\u0013\u0000\u0000\u078d\u078e\u0007"+ + "\u0005\u0000\u0000\u078e\u078f\u0007\n\u0000\u0000\u078f\u0790\u0007\u000b"+ + "\u0000\u0000\u0790\u0791\u0007\u0004\u0000\u0000\u0791\u0792\u0007\u0000"+ + "\u0000\u0000\u0792\u0793\u0007\t\u0000\u0000\u0793\u0794\u0007\t\u0000"+ + "\u0000\u0794\u0795\u0007\r\u0000\u0000\u0795\u01c8\u0001\u0000\u0000\u0000"+ + "\u0796\u0797\u0007\u0012\u0000\u0000\u0797\u0798\u0007\u000b\u0000\u0000"+ + "\u0798\u0799\u0007\n\u0000\u0000\u0799\u079a\u0007\u0010\u0000\u0000\u079a"+ + "\u079b\u0007\n\u0000\u0000\u079b\u079c\u0007\u000b\u0000\u0000\u079c\u01ca"+ + "\u0001\u0000\u0000\u0000\u079d\u079e\u0007\u0012\u0000\u0000\u079e\u079f"+ + "\u0007\u000b\u0000\u0000\u079f\u07a0\u0007\b\u0000\u0000\u07a0\u07a1\u0007"+ + "\u0018\u0000\u0000\u07a1\u07a2\u0007\b\u0000\u0000\u07a2\u07a3\u0007\t"+ + "\u0000\u0000\u07a3\u07a4\u0007\n\u0000\u0000\u07a4\u07a5\u0007\u0015\u0000"+ + "\u0000\u07a5\u07a6\u0007\n\u0000\u0000\u07a6\u07a7\u0007\u000e\u0000\u0000"+ + "\u07a7\u01cc\u0001\u0000\u0000\u0000\u07a8\u07a9\u0007\u000b\u0000\u0000"+ + "\u07a9\u07aa\u0007\n\u0000\u0000\u07aa\u07ab\u0007\u0005\u0000\u0000\u07ab"+ + "\u07ac\u0007\u0000\u0000\u0000\u07ac\u07ad\u0007\b\u0000\u0000\u07ad\u07ae"+ + "\u0007\u0004\u0000\u0000\u07ae\u01ce\u0001\u0000\u0000\u0000\u07af\u07b0"+ + "\u0007\u000b\u0000\u0000\u07b0\u07b1\u0007\n\u0000\u0000\u07b1\u07b2\u0007"+ + "\u0005\u0000\u0000\u07b2\u07b3\u0007\u0000\u0000\u0000\u07b3\u07b4\u0007"+ + "\b\u0000\u0000\u07b4\u07b5\u0007\u0004\u0000\u0000\u07b5\u07b6\u0007\n"+ + "\u0000\u0000\u07b6\u07b7\u0007\u0006\u0000\u0000\u07b7\u01d0\u0001\u0000"+ + "\u0000\u0000*\u0000\u01ef\u01f1\u028f\u0375\u0412\u04f0\u0597\u059f\u05a7"+ + "\u05af\u05b5\u05bd\u05c3\u05cb\u05d1\u06d5\u06da\u06e1\u06e5\u06ea\u06ec"+ + "\u06f1\u06f5\u06fa\u06fc\u0700\u0707\u0709\u0711\u0713\u0721\u0723\u0728"+ + "\u072d\u072f\u0735\u0740\u074e\u0757\u0765\u076a\u0001\u0006\u0000\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.tokens b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.tokens index b5b6580c..8d4e7ea5 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.tokens +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLLexer.tokens @@ -1,241 +1,247 @@ T__0=1 T__1=2 T__2=3 -VARNAME=4 -ACCOUNT=5 -ADD=6 -ADMIN=7 -ALL=8 -ALTER=9 -ALWAYS=10 -ANCESTORS=11 -AND=12 -AS=13 -ASC=14 -ARRAY_COLLECT=15 -BETWEEN=16 -BY=17 -CACHE=18 -CASE=19 -CASCADE=20 -CAST=21 -COLLECTION=22 -COMMENT=23 -COUNT=24 -CREATE=25 -CYCLE=26 -DAYS=27 -DECLARE=28 -DEFAULT=29 -DELETE=30 -DESC=31 -DESCENDANTS=32 -DESCRIBE=33 -DISTINCT=34 -DROP=35 -ELEMENTOF=36 -ELEMENTS=37 -ELSE=38 -END=39 -ES_SHARDS=40 -ES_REPLICAS=41 -EXISTS=42 -EXTRACT=43 -FIELDS=44 -FIRST=45 -FORCE=46 -FORCE_INDEX=47 -FORCE_PRIMARY_INDEX=48 -FREEZE=49 -FROM=50 -FROZEN=51 -FULLTEXT=52 -GENERATED=53 -GRANT=54 -GROUP=55 -HOURS=56 -IDENTIFIED=57 -IDENTITY=58 -IF=59 -IN=60 -INCREMENT=61 -INDEX=62 -INDEXES=63 -INSERT=64 -INTO=65 -IS=66 -JSON=67 -JOIN=68 -KEY=69 -KEYOF=70 -KEYS=71 -LAST=72 -LEFT=73 -LIFETIME=74 -LIMIT=75 -LOCAL=76 -LOCK=77 -MAXVALUE=78 -MERGE=79 -MINUTES=80 -MINVALUE=81 -MODIFY=82 -MR_COUNTER=83 -NAMESPACE=84 -NAMESPACES=85 -NESTED=86 -NO=87 -NOT=88 -NULLS=89 -OFFSET=90 -OF=91 -ON=92 -ONLY=93 -OR=94 -ORDER=95 -OUTER=96 -OVERRIDE=97 -PASSWORD=98 -PATCH=99 -PER=100 -PREFER_INDEXES=101 -PREFER_PRIMARY_INDEX=102 -PRIMARY=103 -PUT=104 -REGION=105 -REGIONS=106 -REMOVE=107 -RETURNING=108 -REVOKE=109 -ROLE=110 -ROLES=111 -ROW=112 -SCHEMA=113 -SECONDS=114 -SELECT=115 -SEQ_TRANSFORM=116 -SET=117 -SHARD=118 -SHOW=119 -START=120 -TABLE=121 -TABLES=122 -THEN=123 -TO=124 -TTL=125 -TYPE=126 -UNFREEZE=127 -UNLOCK=128 -UPDATE=129 -UPSERT=130 -USER=131 -USERS=132 -USING=133 -VALUES=134 -WHEN=135 -WHERE=136 -WITH=137 -UNIQUE=138 -UNNEST=139 -UUID=140 -ALL_PRIVILEGES=141 -IDENTIFIED_EXTERNALLY=142 -PASSWORD_EXPIRE=143 -RETAIN_CURRENT_PASSWORD=144 -CLEAR_RETAINED_PASSWORD=145 -LEFT_OUTER_JOIN=146 -ARRAY_T=147 -BINARY_T=148 -BOOLEAN_T=149 -DOUBLE_T=150 -ENUM_T=151 -FLOAT_T=152 -GEOMETRY_T=153 -INTEGER_T=154 -LONG_T=155 -MAP_T=156 -NUMBER_T=157 -POINT_T=158 -RECORD_T=159 -STRING_T=160 -TIMESTAMP_T=161 -ANY_T=162 -ANYATOMIC_T=163 -ANYJSONATOMIC_T=164 -ANYRECORD_T=165 -SCALAR_T=166 -SEMI=167 -COMMA=168 -COLON=169 -LP=170 -RP=171 -LBRACK=172 -RBRACK=173 -LBRACE=174 -RBRACE=175 -STAR=176 -DOT=177 -DOLLAR=178 -QUESTION_MARK=179 -LT=180 -LTE=181 -GT=182 -GTE=183 -EQ=184 -NEQ=185 -LT_ANY=186 -LTE_ANY=187 -GT_ANY=188 -GTE_ANY=189 -EQ_ANY=190 -NEQ_ANY=191 -PLUS=192 -MINUS=193 -IDIV=194 -RDIV=195 -CONCAT=196 -NULL=197 -FALSE=198 -TRUE=199 -INT=200 -FLOAT=201 -NUMBER=202 -DSTRING=203 -STRING=204 -SYSDOLAR=205 -ID=206 -BAD_ID=207 -WS=208 -C_COMMENT=209 -LINE_COMMENT=210 -LINE_COMMENT1=211 -UnrecognizedToken=212 +T__3=4 +VARNAME=5 +ACCOUNT=6 +ADD=7 +ADMIN=8 +ALL=9 +ALTER=10 +ALWAYS=11 +ANCESTORS=12 +AND=13 +AS=14 +ASC=15 +ARRAY_COLLECT=16 +BEFORE=17 +BETWEEN=18 +BY=19 +CACHE=20 +CASE=21 +CASCADE=22 +CAST=23 +COLLECTION=24 +COMMENT=25 +COUNT=26 +CREATE=27 +CYCLE=28 +DAYS=29 +DECLARE=30 +DEFAULT=31 +DELETE=32 +DESC=33 +DESCENDANTS=34 +DESCRIBE=35 +DISABLE=36 +DISTINCT=37 +DROP=38 +ELEMENTOF=39 +ELEMENTS=40 +ELSE=41 +ENABLE=42 +END=43 +ES_SHARDS=44 +ES_REPLICAS=45 +EXISTS=46 +EXTRACT=47 +FIELDS=48 +FIRST=49 +FORCE=50 +FORCE_INDEX=51 +FORCE_PRIMARY_INDEX=52 +FREEZE=53 +FROM=54 +FROZEN=55 +FULLTEXT=56 +GENERATED=57 +GRANT=58 +GROUP=59 +HOURS=60 +IDENTIFIED=61 +IDENTITY=62 +IF=63 +IMAGE=64 +IN=65 +INCREMENT=66 +INDEX=67 +INDEXES=68 +INSERT=69 +INTO=70 +IS=71 +JSON=72 +JOIN=73 +KEY=74 +KEYOF=75 +KEYS=76 +LAST=77 +LEFT=78 +LIFETIME=79 +LIMIT=80 +LOCAL=81 +LOCK=82 +MAXVALUE=83 +MERGE=84 +MINUTES=85 +MINVALUE=86 +MODIFY=87 +MR_COUNTER=88 +NAMESPACE=89 +NAMESPACES=90 +NESTED=91 +NO=92 +NOT=93 +NULLS=94 +OFFSET=95 +OF=96 +ON=97 +ONLY=98 +OR=99 +ORDER=100 +OUTER=101 +OVERRIDE=102 +PASSWORD=103 +PATCH=104 +PER=105 +PREFER_INDEXES=106 +PREFER_PRIMARY_INDEX=107 +PRIMARY=108 +PUT=109 +REGION=110 +REGIONS=111 +REMOVE=112 +RETURNING=113 +REVOKE=114 +ROLE=115 +ROLES=116 +ROW=117 +SCHEMA=118 +SECONDS=119 +SELECT=120 +SEQ_TRANSFORM=121 +SET=122 +SHARD=123 +SHOW=124 +START=125 +TABLE=126 +TABLES=127 +THEN=128 +TO=129 +TTL=130 +TYPE=131 +UNFREEZE=132 +UNLOCK=133 +UPDATE=134 +UPSERT=135 +USER=136 +USERS=137 +USING=138 +VALUES=139 +WHEN=140 +WHERE=141 +WITH=142 +UNIQUE=143 +UNNEST=144 +UUID=145 +ALL_PRIVILEGES=146 +IDENTIFIED_EXTERNALLY=147 +PASSWORD_EXPIRE=148 +RETAIN_CURRENT_PASSWORD=149 +CLEAR_RETAINED_PASSWORD=150 +LEFT_OUTER_JOIN=151 +ARRAY_T=152 +BINARY_T=153 +BOOLEAN_T=154 +DOUBLE_T=155 +ENUM_T=156 +FLOAT_T=157 +GEOMETRY_T=158 +INTEGER_T=159 +LONG_T=160 +MAP_T=161 +NUMBER_T=162 +POINT_T=163 +RECORD_T=164 +STRING_T=165 +TIMESTAMP_T=166 +ANY_T=167 +ANYATOMIC_T=168 +ANYJSONATOMIC_T=169 +ANYRECORD_T=170 +SCALAR_T=171 +SEMI=172 +COMMA=173 +COLON=174 +LP=175 +RP=176 +LBRACK=177 +RBRACK=178 +LBRACE=179 +RBRACE=180 +STAR=181 +DOT=182 +DOLLAR=183 +QUESTION_MARK=184 +LT=185 +LTE=186 +GT=187 +GTE=188 +EQ=189 +NEQ=190 +LT_ANY=191 +LTE_ANY=192 +GT_ANY=193 +GTE_ANY=194 +EQ_ANY=195 +NEQ_ANY=196 +PLUS=197 +MINUS=198 +IDIV=199 +RDIV=200 +CONCAT=201 +NULL=202 +FALSE=203 +TRUE=204 +INT=205 +FLOAT=206 +NUMBER=207 +DSTRING=208 +STRING=209 +SYSDOLAR=210 +ID=211 +BAD_ID=212 +WS=213 +C_COMMENT=214 +LINE_COMMENT=215 +LINE_COMMENT1=216 +UnrecognizedToken=217 '/*+'=1 '*/'=2 '@'=3 -'array_collect'=15 -'count'=24 -'seq_transform'=116 -';'=167 -','=168 -':'=169 -'('=170 -')'=171 -'['=172 -']'=173 -'{'=174 -'}'=175 -'*'=176 -'.'=177 -'$'=178 -'?'=179 -'<'=180 -'<='=181 -'>'=182 -'>='=183 -'='=184 -'!='=185 -'+'=192 -'-'=193 -'/'=194 -'||'=196 +'row_metadata().'=4 +'array_collect'=16 +'count'=26 +'seq_transform'=121 +';'=172 +','=173 +':'=174 +'('=175 +')'=176 +'['=177 +']'=178 +'{'=179 +'}'=180 +'*'=181 +'.'=182 +'$'=183 +'?'=184 +'<'=185 +'<='=186 +'>'=187 +'>='=188 +'='=189 +'!='=190 +'+'=197 +'-'=198 +'/'=199 +'||'=201 diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLListener.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLListener.java index 0bb52cf2..7b6138ae 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLListener.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLListener.java @@ -1608,45 +1608,55 @@ public interface KVQLListener extends ParseTreeListener { */ void exitRegions_def(KVQLParser.Regions_defContext ctx); /** - * Enter a parse tree produced by {@link KVQLParser#add_region_def}. + * Enter a parse tree produced by {@link KVQLParser#frozen_def}. * @param ctx the parse tree */ - void enterAdd_region_def(KVQLParser.Add_region_defContext ctx); + void enterFrozen_def(KVQLParser.Frozen_defContext ctx); /** - * Exit a parse tree produced by {@link KVQLParser#add_region_def}. + * Exit a parse tree produced by {@link KVQLParser#frozen_def}. * @param ctx the parse tree */ - void exitAdd_region_def(KVQLParser.Add_region_defContext ctx); + void exitFrozen_def(KVQLParser.Frozen_defContext ctx); /** - * Enter a parse tree produced by {@link KVQLParser#drop_region_def}. + * Enter a parse tree produced by {@link KVQLParser#json_collection_def}. * @param ctx the parse tree */ - void enterDrop_region_def(KVQLParser.Drop_region_defContext ctx); + void enterJson_collection_def(KVQLParser.Json_collection_defContext ctx); /** - * Exit a parse tree produced by {@link KVQLParser#drop_region_def}. + * Exit a parse tree produced by {@link KVQLParser#json_collection_def}. * @param ctx the parse tree */ - void exitDrop_region_def(KVQLParser.Drop_region_defContext ctx); + void exitJson_collection_def(KVQLParser.Json_collection_defContext ctx); /** - * Enter a parse tree produced by {@link KVQLParser#frozen_def}. + * Enter a parse tree produced by {@link KVQLParser#enable_before_image}. * @param ctx the parse tree */ - void enterFrozen_def(KVQLParser.Frozen_defContext ctx); + void enterEnable_before_image(KVQLParser.Enable_before_imageContext ctx); /** - * Exit a parse tree produced by {@link KVQLParser#frozen_def}. + * Exit a parse tree produced by {@link KVQLParser#enable_before_image}. * @param ctx the parse tree */ - void exitFrozen_def(KVQLParser.Frozen_defContext ctx); + void exitEnable_before_image(KVQLParser.Enable_before_imageContext ctx); /** - * Enter a parse tree produced by {@link KVQLParser#json_collection_def}. + * Enter a parse tree produced by {@link KVQLParser#before_image_ttl}. * @param ctx the parse tree */ - void enterJson_collection_def(KVQLParser.Json_collection_defContext ctx); + void enterBefore_image_ttl(KVQLParser.Before_image_ttlContext ctx); /** - * Exit a parse tree produced by {@link KVQLParser#json_collection_def}. + * Exit a parse tree produced by {@link KVQLParser#before_image_ttl}. * @param ctx the parse tree */ - void exitJson_collection_def(KVQLParser.Json_collection_defContext ctx); + void exitBefore_image_ttl(KVQLParser.Before_image_ttlContext ctx); + /** + * Enter a parse tree produced by {@link KVQLParser#disable_before_image}. + * @param ctx the parse tree + */ + void enterDisable_before_image(KVQLParser.Disable_before_imageContext ctx); + /** + * Exit a parse tree produced by {@link KVQLParser#disable_before_image}. + * @param ctx the parse tree + */ + void exitDisable_before_image(KVQLParser.Disable_before_imageContext ctx); /** * Enter a parse tree produced by {@link KVQLParser#identity_def}. * @param ctx the parse tree @@ -1727,6 +1737,26 @@ public interface KVQLListener extends ParseTreeListener { * @param ctx the parse tree */ void exitUnfreeze_def(KVQLParser.Unfreeze_defContext ctx); + /** + * Enter a parse tree produced by {@link KVQLParser#add_region_def}. + * @param ctx the parse tree + */ + void enterAdd_region_def(KVQLParser.Add_region_defContext ctx); + /** + * Exit a parse tree produced by {@link KVQLParser#add_region_def}. + * @param ctx the parse tree + */ + void exitAdd_region_def(KVQLParser.Add_region_defContext ctx); + /** + * Enter a parse tree produced by {@link KVQLParser#drop_region_def}. + * @param ctx the parse tree + */ + void enterDrop_region_def(KVQLParser.Drop_region_defContext ctx); + /** + * Exit a parse tree produced by {@link KVQLParser#drop_region_def}. + * @param ctx the parse tree + */ + void exitDrop_region_def(KVQLParser.Drop_region_defContext ctx); /** * Enter a parse tree produced by {@link KVQLParser#alter_field_statements}. * @param ctx the parse tree @@ -1877,6 +1907,26 @@ public interface KVQLListener extends ParseTreeListener { * @param ctx the parse tree */ void exitIndex_path(KVQLParser.Index_pathContext ctx); + /** + * Enter a parse tree produced by {@link KVQLParser#old_index_path}. + * @param ctx the parse tree + */ + void enterOld_index_path(KVQLParser.Old_index_pathContext ctx); + /** + * Exit a parse tree produced by {@link KVQLParser#old_index_path}. + * @param ctx the parse tree + */ + void exitOld_index_path(KVQLParser.Old_index_pathContext ctx); + /** + * Enter a parse tree produced by {@link KVQLParser#row_metadata}. + * @param ctx the parse tree + */ + void enterRow_metadata(KVQLParser.Row_metadataContext ctx); + /** + * Exit a parse tree produced by {@link KVQLParser#row_metadata}. + * @param ctx the parse tree + */ + void exitRow_metadata(KVQLParser.Row_metadataContext ctx); /** * Enter a parse tree produced by {@link KVQLParser#multikey_path_prefix}. * @param ctx the parse tree diff --git a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLParser.java b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLParser.java index 9e5f9e4d..3b66528d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLParser.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/compiler/parser/KVQLParser.java @@ -17,40 +17,41 @@ public class KVQLParser extends Parser { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - T__0=1, T__1=2, T__2=3, VARNAME=4, ACCOUNT=5, ADD=6, ADMIN=7, ALL=8, ALTER=9, - ALWAYS=10, ANCESTORS=11, AND=12, AS=13, ASC=14, ARRAY_COLLECT=15, BETWEEN=16, - BY=17, CACHE=18, CASE=19, CASCADE=20, CAST=21, COLLECTION=22, COMMENT=23, - COUNT=24, CREATE=25, CYCLE=26, DAYS=27, DECLARE=28, DEFAULT=29, DELETE=30, - DESC=31, DESCENDANTS=32, DESCRIBE=33, DISTINCT=34, DROP=35, ELEMENTOF=36, - ELEMENTS=37, ELSE=38, END=39, ES_SHARDS=40, ES_REPLICAS=41, EXISTS=42, - EXTRACT=43, FIELDS=44, FIRST=45, FORCE=46, FORCE_INDEX=47, FORCE_PRIMARY_INDEX=48, - FREEZE=49, FROM=50, FROZEN=51, FULLTEXT=52, GENERATED=53, GRANT=54, GROUP=55, - HOURS=56, IDENTIFIED=57, IDENTITY=58, IF=59, IN=60, INCREMENT=61, INDEX=62, - INDEXES=63, INSERT=64, INTO=65, IS=66, JSON=67, JOIN=68, KEY=69, KEYOF=70, - KEYS=71, LAST=72, LEFT=73, LIFETIME=74, LIMIT=75, LOCAL=76, LOCK=77, MAXVALUE=78, - MERGE=79, MINUTES=80, MINVALUE=81, MODIFY=82, MR_COUNTER=83, NAMESPACE=84, - NAMESPACES=85, NESTED=86, NO=87, NOT=88, NULLS=89, OFFSET=90, OF=91, ON=92, - ONLY=93, OR=94, ORDER=95, OUTER=96, OVERRIDE=97, PASSWORD=98, PATCH=99, - PER=100, PREFER_INDEXES=101, PREFER_PRIMARY_INDEX=102, PRIMARY=103, PUT=104, - REGION=105, REGIONS=106, REMOVE=107, RETURNING=108, REVOKE=109, ROLE=110, - ROLES=111, ROW=112, SCHEMA=113, SECONDS=114, SELECT=115, SEQ_TRANSFORM=116, - SET=117, SHARD=118, SHOW=119, START=120, TABLE=121, TABLES=122, THEN=123, - TO=124, TTL=125, TYPE=126, UNFREEZE=127, UNLOCK=128, UPDATE=129, UPSERT=130, - USER=131, USERS=132, USING=133, VALUES=134, WHEN=135, WHERE=136, WITH=137, - UNIQUE=138, UNNEST=139, UUID=140, ALL_PRIVILEGES=141, IDENTIFIED_EXTERNALLY=142, - PASSWORD_EXPIRE=143, RETAIN_CURRENT_PASSWORD=144, CLEAR_RETAINED_PASSWORD=145, - LEFT_OUTER_JOIN=146, ARRAY_T=147, BINARY_T=148, BOOLEAN_T=149, DOUBLE_T=150, - ENUM_T=151, FLOAT_T=152, GEOMETRY_T=153, INTEGER_T=154, LONG_T=155, MAP_T=156, - NUMBER_T=157, POINT_T=158, RECORD_T=159, STRING_T=160, TIMESTAMP_T=161, - ANY_T=162, ANYATOMIC_T=163, ANYJSONATOMIC_T=164, ANYRECORD_T=165, SCALAR_T=166, - SEMI=167, COMMA=168, COLON=169, LP=170, RP=171, LBRACK=172, RBRACK=173, - LBRACE=174, RBRACE=175, STAR=176, DOT=177, DOLLAR=178, QUESTION_MARK=179, - LT=180, LTE=181, GT=182, GTE=183, EQ=184, NEQ=185, LT_ANY=186, LTE_ANY=187, - GT_ANY=188, GTE_ANY=189, EQ_ANY=190, NEQ_ANY=191, PLUS=192, MINUS=193, - IDIV=194, RDIV=195, CONCAT=196, NULL=197, FALSE=198, TRUE=199, INT=200, - FLOAT=201, NUMBER=202, DSTRING=203, STRING=204, SYSDOLAR=205, ID=206, - BAD_ID=207, WS=208, C_COMMENT=209, LINE_COMMENT=210, LINE_COMMENT1=211, - UnrecognizedToken=212; + T__0=1, T__1=2, T__2=3, T__3=4, VARNAME=5, ACCOUNT=6, ADD=7, ADMIN=8, + ALL=9, ALTER=10, ALWAYS=11, ANCESTORS=12, AND=13, AS=14, ASC=15, ARRAY_COLLECT=16, + BEFORE=17, BETWEEN=18, BY=19, CACHE=20, CASE=21, CASCADE=22, CAST=23, + COLLECTION=24, COMMENT=25, COUNT=26, CREATE=27, CYCLE=28, DAYS=29, DECLARE=30, + DEFAULT=31, DELETE=32, DESC=33, DESCENDANTS=34, DESCRIBE=35, DISABLE=36, + DISTINCT=37, DROP=38, ELEMENTOF=39, ELEMENTS=40, ELSE=41, ENABLE=42, END=43, + ES_SHARDS=44, ES_REPLICAS=45, EXISTS=46, EXTRACT=47, FIELDS=48, FIRST=49, + FORCE=50, FORCE_INDEX=51, FORCE_PRIMARY_INDEX=52, FREEZE=53, FROM=54, + FROZEN=55, FULLTEXT=56, GENERATED=57, GRANT=58, GROUP=59, HOURS=60, IDENTIFIED=61, + IDENTITY=62, IF=63, IMAGE=64, IN=65, INCREMENT=66, INDEX=67, INDEXES=68, + INSERT=69, INTO=70, IS=71, JSON=72, JOIN=73, KEY=74, KEYOF=75, KEYS=76, + LAST=77, LEFT=78, LIFETIME=79, LIMIT=80, LOCAL=81, LOCK=82, MAXVALUE=83, + MERGE=84, MINUTES=85, MINVALUE=86, MODIFY=87, MR_COUNTER=88, NAMESPACE=89, + NAMESPACES=90, NESTED=91, NO=92, NOT=93, NULLS=94, OFFSET=95, OF=96, ON=97, + ONLY=98, OR=99, ORDER=100, OUTER=101, OVERRIDE=102, PASSWORD=103, PATCH=104, + PER=105, PREFER_INDEXES=106, PREFER_PRIMARY_INDEX=107, PRIMARY=108, PUT=109, + REGION=110, REGIONS=111, REMOVE=112, RETURNING=113, REVOKE=114, ROLE=115, + ROLES=116, ROW=117, SCHEMA=118, SECONDS=119, SELECT=120, SEQ_TRANSFORM=121, + SET=122, SHARD=123, SHOW=124, START=125, TABLE=126, TABLES=127, THEN=128, + TO=129, TTL=130, TYPE=131, UNFREEZE=132, UNLOCK=133, UPDATE=134, UPSERT=135, + USER=136, USERS=137, USING=138, VALUES=139, WHEN=140, WHERE=141, WITH=142, + UNIQUE=143, UNNEST=144, UUID=145, ALL_PRIVILEGES=146, IDENTIFIED_EXTERNALLY=147, + PASSWORD_EXPIRE=148, RETAIN_CURRENT_PASSWORD=149, CLEAR_RETAINED_PASSWORD=150, + LEFT_OUTER_JOIN=151, ARRAY_T=152, BINARY_T=153, BOOLEAN_T=154, DOUBLE_T=155, + ENUM_T=156, FLOAT_T=157, GEOMETRY_T=158, INTEGER_T=159, LONG_T=160, MAP_T=161, + NUMBER_T=162, POINT_T=163, RECORD_T=164, STRING_T=165, TIMESTAMP_T=166, + ANY_T=167, ANYATOMIC_T=168, ANYJSONATOMIC_T=169, ANYRECORD_T=170, SCALAR_T=171, + SEMI=172, COMMA=173, COLON=174, LP=175, RP=176, LBRACK=177, RBRACK=178, + LBRACE=179, RBRACE=180, STAR=181, DOT=182, DOLLAR=183, QUESTION_MARK=184, + LT=185, LTE=186, GT=187, GTE=188, EQ=189, NEQ=190, LT_ANY=191, LTE_ANY=192, + GT_ANY=193, GTE_ANY=194, EQ_ANY=195, NEQ_ANY=196, PLUS=197, MINUS=198, + IDIV=199, RDIV=200, CONCAT=201, NULL=202, FALSE=203, TRUE=204, INT=205, + FLOAT=206, NUMBER=207, DSTRING=208, STRING=209, SYSDOLAR=210, ID=211, + BAD_ID=212, WS=213, C_COMMENT=214, LINE_COMMENT=215, LINE_COMMENT1=216, + UnrecognizedToken=217; public static final int RULE_parse = 0, RULE_statement = 1, RULE_query = 2, RULE_prolog = 3, RULE_var_decl = 4, RULE_index_function_path = 5, RULE_expr = 6, RULE_sfw_expr = 7, RULE_from_clause = 8, @@ -95,31 +96,33 @@ public class KVQLParser extends Parser { RULE_json_mrcounter_path = 133, RULE_key_def = 134, RULE_shard_key_def = 135, RULE_id_list_with_size = 136, RULE_id_with_size = 137, RULE_storage_size = 138, RULE_table_options = 139, RULE_ttl_def = 140, RULE_region_names = 141, - RULE_regions_def = 142, RULE_add_region_def = 143, RULE_drop_region_def = 144, - RULE_frozen_def = 145, RULE_json_collection_def = 146, RULE_identity_def = 147, - RULE_sequence_options = 148, RULE_mr_counter_def = 149, RULE_uuid_def = 150, - RULE_alter_table_statement = 151, RULE_alter_def = 152, RULE_freeze_def = 153, - RULE_unfreeze_def = 154, RULE_alter_field_statements = 155, RULE_add_field_statement = 156, - RULE_drop_field_statement = 157, RULE_modify_field_statement = 158, RULE_schema_path = 159, - RULE_init_schema_path_step = 160, RULE_schema_path_step = 161, RULE_drop_table_statement = 162, - RULE_create_index_statement = 163, RULE_index_name = 164, RULE_index_field_list = 165, - RULE_index_field = 166, RULE_index_function = 167, RULE_index_function_args = 168, - RULE_index_path = 169, RULE_multikey_path_prefix = 170, RULE_multikey_path_suffix = 171, - RULE_path_type = 172, RULE_create_text_index_statement = 173, RULE_fts_field_list = 174, - RULE_fts_path_list = 175, RULE_fts_path = 176, RULE_es_properties = 177, - RULE_es_property_assignment = 178, RULE_drop_index_statement = 179, RULE_describe_statement = 180, - RULE_schema_path_list = 181, RULE_show_statement = 182, RULE_create_user_statement = 183, - RULE_create_role_statement = 184, RULE_alter_user_statement = 185, RULE_drop_user_statement = 186, - RULE_drop_role_statement = 187, RULE_grant_statement = 188, RULE_revoke_statement = 189, - RULE_identifier_or_string = 190, RULE_identified_clause = 191, RULE_create_user_identified_clause = 192, - RULE_by_password = 193, RULE_password_lifetime = 194, RULE_reset_password_clause = 195, - RULE_account_lock = 196, RULE_grant_roles = 197, RULE_grant_system_privileges = 198, - RULE_grant_object_privileges = 199, RULE_revoke_roles = 200, RULE_revoke_system_privileges = 201, - RULE_revoke_object_privileges = 202, RULE_principal = 203, RULE_sys_priv_list = 204, - RULE_priv_item = 205, RULE_obj_priv_list = 206, RULE_object = 207, RULE_json_text = 208, - RULE_jsobject = 209, RULE_jsarray = 210, RULE_jspair = 211, RULE_jsvalue = 212, - RULE_comment = 213, RULE_duration = 214, RULE_time_unit = 215, RULE_number = 216, - RULE_signed_int = 217, RULE_string = 218, RULE_id_list = 219, RULE_id = 220; + RULE_regions_def = 142, RULE_frozen_def = 143, RULE_json_collection_def = 144, + RULE_enable_before_image = 145, RULE_before_image_ttl = 146, RULE_disable_before_image = 147, + RULE_identity_def = 148, RULE_sequence_options = 149, RULE_mr_counter_def = 150, + RULE_uuid_def = 151, RULE_alter_table_statement = 152, RULE_alter_def = 153, + RULE_freeze_def = 154, RULE_unfreeze_def = 155, RULE_add_region_def = 156, + RULE_drop_region_def = 157, RULE_alter_field_statements = 158, RULE_add_field_statement = 159, + RULE_drop_field_statement = 160, RULE_modify_field_statement = 161, RULE_schema_path = 162, + RULE_init_schema_path_step = 163, RULE_schema_path_step = 164, RULE_drop_table_statement = 165, + RULE_create_index_statement = 166, RULE_index_name = 167, RULE_index_field_list = 168, + RULE_index_field = 169, RULE_index_function = 170, RULE_index_function_args = 171, + RULE_index_path = 172, RULE_old_index_path = 173, RULE_row_metadata = 174, + RULE_multikey_path_prefix = 175, RULE_multikey_path_suffix = 176, RULE_path_type = 177, + RULE_create_text_index_statement = 178, RULE_fts_field_list = 179, RULE_fts_path_list = 180, + RULE_fts_path = 181, RULE_es_properties = 182, RULE_es_property_assignment = 183, + RULE_drop_index_statement = 184, RULE_describe_statement = 185, RULE_schema_path_list = 186, + RULE_show_statement = 187, RULE_create_user_statement = 188, RULE_create_role_statement = 189, + RULE_alter_user_statement = 190, RULE_drop_user_statement = 191, RULE_drop_role_statement = 192, + RULE_grant_statement = 193, RULE_revoke_statement = 194, RULE_identifier_or_string = 195, + RULE_identified_clause = 196, RULE_create_user_identified_clause = 197, + RULE_by_password = 198, RULE_password_lifetime = 199, RULE_reset_password_clause = 200, + RULE_account_lock = 201, RULE_grant_roles = 202, RULE_grant_system_privileges = 203, + RULE_grant_object_privileges = 204, RULE_revoke_roles = 205, RULE_revoke_system_privileges = 206, + RULE_revoke_object_privileges = 207, RULE_principal = 208, RULE_sys_priv_list = 209, + RULE_priv_item = 210, RULE_obj_priv_list = 211, RULE_object = 212, RULE_json_text = 213, + RULE_jsobject = 214, RULE_jsarray = 215, RULE_jspair = 216, RULE_jsvalue = 217, + RULE_comment = 218, RULE_duration = 219, RULE_time_unit = 220, RULE_number = 221, + RULE_signed_int = 222, RULE_string = 223, RULE_id_list = 224, RULE_id = 225; private static String[] makeRuleNames() { return new String[] { "parse", "statement", "query", "prolog", "var_decl", "index_function_path", @@ -152,34 +155,36 @@ private static String[] makeRuleNames() { "json_mrcounter_fields", "json_mrcounter_def", "json_collection_mrcounter_def", "json_mrcounter_path", "key_def", "shard_key_def", "id_list_with_size", "id_with_size", "storage_size", "table_options", "ttl_def", "region_names", - "regions_def", "add_region_def", "drop_region_def", "frozen_def", "json_collection_def", - "identity_def", "sequence_options", "mr_counter_def", "uuid_def", "alter_table_statement", - "alter_def", "freeze_def", "unfreeze_def", "alter_field_statements", + "regions_def", "frozen_def", "json_collection_def", "enable_before_image", + "before_image_ttl", "disable_before_image", "identity_def", "sequence_options", + "mr_counter_def", "uuid_def", "alter_table_statement", "alter_def", "freeze_def", + "unfreeze_def", "add_region_def", "drop_region_def", "alter_field_statements", "add_field_statement", "drop_field_statement", "modify_field_statement", "schema_path", "init_schema_path_step", "schema_path_step", "drop_table_statement", "create_index_statement", "index_name", "index_field_list", "index_field", - "index_function", "index_function_args", "index_path", "multikey_path_prefix", - "multikey_path_suffix", "path_type", "create_text_index_statement", "fts_field_list", - "fts_path_list", "fts_path", "es_properties", "es_property_assignment", - "drop_index_statement", "describe_statement", "schema_path_list", "show_statement", - "create_user_statement", "create_role_statement", "alter_user_statement", - "drop_user_statement", "drop_role_statement", "grant_statement", "revoke_statement", - "identifier_or_string", "identified_clause", "create_user_identified_clause", - "by_password", "password_lifetime", "reset_password_clause", "account_lock", - "grant_roles", "grant_system_privileges", "grant_object_privileges", - "revoke_roles", "revoke_system_privileges", "revoke_object_privileges", - "principal", "sys_priv_list", "priv_item", "obj_priv_list", "object", - "json_text", "jsobject", "jsarray", "jspair", "jsvalue", "comment", "duration", - "time_unit", "number", "signed_int", "string", "id_list", "id" + "index_function", "index_function_args", "index_path", "old_index_path", + "row_metadata", "multikey_path_prefix", "multikey_path_suffix", "path_type", + "create_text_index_statement", "fts_field_list", "fts_path_list", "fts_path", + "es_properties", "es_property_assignment", "drop_index_statement", "describe_statement", + "schema_path_list", "show_statement", "create_user_statement", "create_role_statement", + "alter_user_statement", "drop_user_statement", "drop_role_statement", + "grant_statement", "revoke_statement", "identifier_or_string", "identified_clause", + "create_user_identified_clause", "by_password", "password_lifetime", + "reset_password_clause", "account_lock", "grant_roles", "grant_system_privileges", + "grant_object_privileges", "revoke_roles", "revoke_system_privileges", + "revoke_object_privileges", "principal", "sys_priv_list", "priv_item", + "obj_priv_list", "object", "json_text", "jsobject", "jsarray", "jspair", + "jsvalue", "comment", "duration", "time_unit", "number", "signed_int", + "string", "id_list", "id" }; } public static final String[] ruleNames = makeRuleNames(); private static String[] makeLiteralNames() { return new String[] { - null, "'/*+'", "'*/'", "'@'", null, null, null, null, null, null, null, - null, null, null, null, "'array_collect'", null, null, null, null, null, - null, null, null, "'count'", null, null, null, null, null, null, null, + null, "'/*+'", "'*/'", "'@'", "'row_metadata().'", null, null, null, + null, null, null, null, null, null, null, null, "'array_collect'", null, + null, null, null, null, null, null, null, null, "'count'", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, @@ -187,49 +192,50 @@ private static String[] makeLiteralNames() { null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, - "'seq_transform'", null, null, null, null, null, null, null, null, null, + null, null, null, null, null, null, null, null, "'seq_transform'", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, - null, null, null, null, null, "';'", "','", "':'", "'('", "')'", "'['", - "']'", "'{'", "'}'", "'*'", "'.'", "'$'", "'?'", "'<'", "'<='", "'>'", - "'>='", "'='", "'!='", null, null, null, null, null, null, "'+'", "'-'", - "'/'", null, "'||'" + null, null, null, null, null, null, null, null, null, null, null, null, + null, "';'", "','", "':'", "'('", "')'", "'['", "']'", "'{'", "'}'", + "'*'", "'.'", "'$'", "'?'", "'<'", "'<='", "'>'", "'>='", "'='", "'!='", + null, null, null, null, null, null, "'+'", "'-'", "'/'", null, "'||'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, null, null, null, "VARNAME", "ACCOUNT", "ADD", "ADMIN", "ALL", + null, null, null, null, null, "VARNAME", "ACCOUNT", "ADD", "ADMIN", "ALL", "ALTER", "ALWAYS", "ANCESTORS", "AND", "AS", "ASC", "ARRAY_COLLECT", - "BETWEEN", "BY", "CACHE", "CASE", "CASCADE", "CAST", "COLLECTION", "COMMENT", - "COUNT", "CREATE", "CYCLE", "DAYS", "DECLARE", "DEFAULT", "DELETE", "DESC", - "DESCENDANTS", "DESCRIBE", "DISTINCT", "DROP", "ELEMENTOF", "ELEMENTS", - "ELSE", "END", "ES_SHARDS", "ES_REPLICAS", "EXISTS", "EXTRACT", "FIELDS", - "FIRST", "FORCE", "FORCE_INDEX", "FORCE_PRIMARY_INDEX", "FREEZE", "FROM", - "FROZEN", "FULLTEXT", "GENERATED", "GRANT", "GROUP", "HOURS", "IDENTIFIED", - "IDENTITY", "IF", "IN", "INCREMENT", "INDEX", "INDEXES", "INSERT", "INTO", - "IS", "JSON", "JOIN", "KEY", "KEYOF", "KEYS", "LAST", "LEFT", "LIFETIME", - "LIMIT", "LOCAL", "LOCK", "MAXVALUE", "MERGE", "MINUTES", "MINVALUE", - "MODIFY", "MR_COUNTER", "NAMESPACE", "NAMESPACES", "NESTED", "NO", "NOT", - "NULLS", "OFFSET", "OF", "ON", "ONLY", "OR", "ORDER", "OUTER", "OVERRIDE", - "PASSWORD", "PATCH", "PER", "PREFER_INDEXES", "PREFER_PRIMARY_INDEX", - "PRIMARY", "PUT", "REGION", "REGIONS", "REMOVE", "RETURNING", "REVOKE", - "ROLE", "ROLES", "ROW", "SCHEMA", "SECONDS", "SELECT", "SEQ_TRANSFORM", - "SET", "SHARD", "SHOW", "START", "TABLE", "TABLES", "THEN", "TO", "TTL", - "TYPE", "UNFREEZE", "UNLOCK", "UPDATE", "UPSERT", "USER", "USERS", "USING", - "VALUES", "WHEN", "WHERE", "WITH", "UNIQUE", "UNNEST", "UUID", "ALL_PRIVILEGES", - "IDENTIFIED_EXTERNALLY", "PASSWORD_EXPIRE", "RETAIN_CURRENT_PASSWORD", - "CLEAR_RETAINED_PASSWORD", "LEFT_OUTER_JOIN", "ARRAY_T", "BINARY_T", - "BOOLEAN_T", "DOUBLE_T", "ENUM_T", "FLOAT_T", "GEOMETRY_T", "INTEGER_T", - "LONG_T", "MAP_T", "NUMBER_T", "POINT_T", "RECORD_T", "STRING_T", "TIMESTAMP_T", - "ANY_T", "ANYATOMIC_T", "ANYJSONATOMIC_T", "ANYRECORD_T", "SCALAR_T", - "SEMI", "COMMA", "COLON", "LP", "RP", "LBRACK", "RBRACK", "LBRACE", "RBRACE", - "STAR", "DOT", "DOLLAR", "QUESTION_MARK", "LT", "LTE", "GT", "GTE", "EQ", - "NEQ", "LT_ANY", "LTE_ANY", "GT_ANY", "GTE_ANY", "EQ_ANY", "NEQ_ANY", - "PLUS", "MINUS", "IDIV", "RDIV", "CONCAT", "NULL", "FALSE", "TRUE", "INT", - "FLOAT", "NUMBER", "DSTRING", "STRING", "SYSDOLAR", "ID", "BAD_ID", "WS", - "C_COMMENT", "LINE_COMMENT", "LINE_COMMENT1", "UnrecognizedToken" + "BEFORE", "BETWEEN", "BY", "CACHE", "CASE", "CASCADE", "CAST", "COLLECTION", + "COMMENT", "COUNT", "CREATE", "CYCLE", "DAYS", "DECLARE", "DEFAULT", + "DELETE", "DESC", "DESCENDANTS", "DESCRIBE", "DISABLE", "DISTINCT", "DROP", + "ELEMENTOF", "ELEMENTS", "ELSE", "ENABLE", "END", "ES_SHARDS", "ES_REPLICAS", + "EXISTS", "EXTRACT", "FIELDS", "FIRST", "FORCE", "FORCE_INDEX", "FORCE_PRIMARY_INDEX", + "FREEZE", "FROM", "FROZEN", "FULLTEXT", "GENERATED", "GRANT", "GROUP", + "HOURS", "IDENTIFIED", "IDENTITY", "IF", "IMAGE", "IN", "INCREMENT", + "INDEX", "INDEXES", "INSERT", "INTO", "IS", "JSON", "JOIN", "KEY", "KEYOF", + "KEYS", "LAST", "LEFT", "LIFETIME", "LIMIT", "LOCAL", "LOCK", "MAXVALUE", + "MERGE", "MINUTES", "MINVALUE", "MODIFY", "MR_COUNTER", "NAMESPACE", + "NAMESPACES", "NESTED", "NO", "NOT", "NULLS", "OFFSET", "OF", "ON", "ONLY", + "OR", "ORDER", "OUTER", "OVERRIDE", "PASSWORD", "PATCH", "PER", "PREFER_INDEXES", + "PREFER_PRIMARY_INDEX", "PRIMARY", "PUT", "REGION", "REGIONS", "REMOVE", + "RETURNING", "REVOKE", "ROLE", "ROLES", "ROW", "SCHEMA", "SECONDS", "SELECT", + "SEQ_TRANSFORM", "SET", "SHARD", "SHOW", "START", "TABLE", "TABLES", + "THEN", "TO", "TTL", "TYPE", "UNFREEZE", "UNLOCK", "UPDATE", "UPSERT", + "USER", "USERS", "USING", "VALUES", "WHEN", "WHERE", "WITH", "UNIQUE", + "UNNEST", "UUID", "ALL_PRIVILEGES", "IDENTIFIED_EXTERNALLY", "PASSWORD_EXPIRE", + "RETAIN_CURRENT_PASSWORD", "CLEAR_RETAINED_PASSWORD", "LEFT_OUTER_JOIN", + "ARRAY_T", "BINARY_T", "BOOLEAN_T", "DOUBLE_T", "ENUM_T", "FLOAT_T", + "GEOMETRY_T", "INTEGER_T", "LONG_T", "MAP_T", "NUMBER_T", "POINT_T", + "RECORD_T", "STRING_T", "TIMESTAMP_T", "ANY_T", "ANYATOMIC_T", "ANYJSONATOMIC_T", + "ANYRECORD_T", "SCALAR_T", "SEMI", "COMMA", "COLON", "LP", "RP", "LBRACK", + "RBRACK", "LBRACE", "RBRACE", "STAR", "DOT", "DOLLAR", "QUESTION_MARK", + "LT", "LTE", "GT", "GTE", "EQ", "NEQ", "LT_ANY", "LTE_ANY", "GT_ANY", + "GTE_ANY", "EQ_ANY", "NEQ_ANY", "PLUS", "MINUS", "IDIV", "RDIV", "CONCAT", + "NULL", "FALSE", "TRUE", "INT", "FLOAT", "NUMBER", "DSTRING", "STRING", + "SYSDOLAR", "ID", "BAD_ID", "WS", "C_COMMENT", "LINE_COMMENT", "LINE_COMMENT1", + "UnrecognizedToken" }; } private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); @@ -309,9 +315,9 @@ public final ParseContext parse() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(442); + setState(452); statement(); - setState(443); + setState(453); match(EOF); } } @@ -423,156 +429,156 @@ public final StatementContext statement() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(470); + setState(480); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,0,_ctx) ) { case 1: { - setState(445); + setState(455); query(); } break; case 2: { - setState(446); + setState(456); insert_statement(); } break; case 3: { - setState(447); + setState(457); update_statement(); } break; case 4: { - setState(448); + setState(458); delete_statement(); } break; case 5: { - setState(449); + setState(459); create_table_statement(); } break; case 6: { - setState(450); + setState(460); create_index_statement(); } break; case 7: { - setState(451); + setState(461); create_user_statement(); } break; case 8: { - setState(452); + setState(462); create_role_statement(); } break; case 9: { - setState(453); + setState(463); create_namespace_statement(); } break; case 10: { - setState(454); + setState(464); create_region_statement(); } break; case 11: { - setState(455); + setState(465); drop_index_statement(); } break; case 12: { - setState(456); + setState(466); drop_namespace_statement(); } break; case 13: { - setState(457); + setState(467); drop_region_statement(); } break; case 14: { - setState(458); + setState(468); create_text_index_statement(); } break; case 15: { - setState(459); + setState(469); drop_role_statement(); } break; case 16: { - setState(460); + setState(470); drop_user_statement(); } break; case 17: { - setState(461); + setState(471); alter_table_statement(); } break; case 18: { - setState(462); + setState(472); alter_user_statement(); } break; case 19: { - setState(463); + setState(473); drop_table_statement(); } break; case 20: { - setState(464); + setState(474); grant_statement(); } break; case 21: { - setState(465); + setState(475); revoke_statement(); } break; case 22: { - setState(466); + setState(476); describe_statement(); } break; case 23: { - setState(467); + setState(477); set_local_region_statement(); } break; case 24: { - setState(468); + setState(478); show_statement(); } break; case 25: { - setState(469); + setState(479); index_function_path(); } break; @@ -619,17 +625,17 @@ public final QueryContext query() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(473); + setState(483); _errHandler.sync(this); _la = _input.LA(1); if (_la==DECLARE) { { - setState(472); + setState(482); prolog(); } } - setState(475); + setState(485); sfw_expr(); } } @@ -678,25 +684,25 @@ public final PrologContext prolog() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(477); + setState(487); match(DECLARE); - setState(478); + setState(488); var_decl(); - setState(479); + setState(489); match(SEMI); - setState(485); + setState(495); _errHandler.sync(this); _la = _input.LA(1); while (_la==VARNAME) { { { - setState(480); + setState(490); var_decl(); - setState(481); + setState(491); match(SEMI); } } - setState(487); + setState(497); _errHandler.sync(this); _la = _input.LA(1); } @@ -739,9 +745,9 @@ public final Var_declContext var_decl() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(488); + setState(498); match(VARNAME); - setState(489); + setState(499); type_def(); } } @@ -784,9 +790,9 @@ public final Index_function_pathContext index_function_path() throws Recognition try { enterOuterAlt(_localctx, 1); { - setState(491); + setState(501); prolog(); - setState(492); + setState(502); func_call(); } } @@ -826,7 +832,7 @@ public final ExprContext expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(494); + setState(504); or_expr(0); } } @@ -885,56 +891,56 @@ public final Sfw_exprContext sfw_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(496); + setState(506); select_clause(); - setState(497); + setState(507); from_clause(); - setState(499); + setState(509); _errHandler.sync(this); _la = _input.LA(1); if (_la==WHERE) { { - setState(498); + setState(508); where_clause(); } } - setState(502); + setState(512); _errHandler.sync(this); _la = _input.LA(1); if (_la==GROUP) { { - setState(501); + setState(511); groupby_clause(); } } - setState(505); + setState(515); _errHandler.sync(this); _la = _input.LA(1); if (_la==ORDER) { { - setState(504); + setState(514); orderby_clause(); } } - setState(508); + setState(518); _errHandler.sync(this); _la = _input.LA(1); if (_la==LIMIT) { { - setState(507); + setState(517); limit_clause(); } } - setState(511); + setState(521); _errHandler.sync(this); _la = _input.LA(1); if (_la==OFFSET) { { - setState(510); + setState(520); offset_clause(); } } @@ -1007,56 +1013,56 @@ public final From_clauseContext from_clause() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(513); + setState(523); match(FROM); - setState(514); + setState(524); table_spec(); - setState(519); + setState(529); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,8,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(515); + setState(525); match(COMMA); - setState(516); + setState(526); table_spec(); } } } - setState(521); + setState(531); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,8,_ctx); } - setState(534); + setState(544); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(522); + setState(532); match(COMMA); - setState(530); + setState(540); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,10,_ctx) ) { case 1: { { - setState(523); + setState(533); expr(); { - setState(525); + setState(535); _errHandler.sync(this); _la = _input.LA(1); if (_la==AS) { { - setState(524); + setState(534); match(AS); } } - setState(527); + setState(537); match(VARNAME); } } @@ -1064,14 +1070,14 @@ public final From_clauseContext from_clause() throws RecognitionException { break; case 2: { - setState(529); + setState(539); unnest_clause(); } break; } } } - setState(536); + setState(546); _errHandler.sync(this); _la = _input.LA(1); } @@ -1117,27 +1123,27 @@ public final Table_specContext table_spec() throws RecognitionException { Table_specContext _localctx = new Table_specContext(_ctx, getState()); enterRule(_localctx, 18, RULE_table_spec); try { - setState(540); + setState(550); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(537); + setState(547); from_table(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(538); + setState(548); nested_tables(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(539); + setState(549); left_outer_join_tables(); } break; @@ -1198,47 +1204,47 @@ public final Nested_tablesContext nested_tables() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(542); + setState(552); match(NESTED); - setState(543); + setState(553); match(TABLES); - setState(544); + setState(554); match(LP); - setState(545); + setState(555); from_table(); - setState(551); + setState(561); _errHandler.sync(this); _la = _input.LA(1); if (_la==ANCESTORS) { { - setState(546); + setState(556); match(ANCESTORS); - setState(547); + setState(557); match(LP); - setState(548); + setState(558); ancestor_tables(); - setState(549); + setState(559); match(RP); } } - setState(558); + setState(568); _errHandler.sync(this); _la = _input.LA(1); if (_la==DESCENDANTS) { { - setState(553); + setState(563); match(DESCENDANTS); - setState(554); + setState(564); match(LP); - setState(555); + setState(565); descendant_tables(); - setState(556); + setState(566); match(RP); } } - setState(560); + setState(570); match(RP); } } @@ -1286,21 +1292,21 @@ public final Ancestor_tablesContext ancestor_tables() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(562); + setState(572); from_table(); - setState(567); + setState(577); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(563); + setState(573); match(COMMA); - setState(564); + setState(574); from_table(); } } - setState(569); + setState(579); _errHandler.sync(this); _la = _input.LA(1); } @@ -1350,21 +1356,21 @@ public final Descendant_tablesContext descendant_tables() throws RecognitionExce try { enterOuterAlt(_localctx, 1); { - setState(570); + setState(580); from_table(); - setState(575); + setState(585); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(571); + setState(581); match(COMMA); - setState(572); + setState(582); from_table(); } } - setState(577); + setState(587); _errHandler.sync(this); _la = _input.LA(1); } @@ -1413,21 +1419,21 @@ public final Left_outer_join_tablesContext left_outer_join_tables() throws Recog try { enterOuterAlt(_localctx, 1); { - setState(578); + setState(588); from_table(); - setState(579); + setState(589); left_outer_join_table(); - setState(583); + setState(593); _errHandler.sync(this); _la = _input.LA(1); while (_la==LEFT_OUTER_JOIN) { { { - setState(580); + setState(590); left_outer_join_table(); } } - setState(585); + setState(595); _errHandler.sync(this); _la = _input.LA(1); } @@ -1470,9 +1476,9 @@ public final Left_outer_join_tableContext left_outer_join_table() throws Recogni try { enterOuterAlt(_localctx, 1); { - setState(586); + setState(596); match(LEFT_OUTER_JOIN); - setState(587); + setState(597); from_table(); } } @@ -1517,16 +1523,16 @@ public final From_tableContext from_table() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(589); + setState(599); aliased_table_name(); - setState(592); + setState(602); _errHandler.sync(this); _la = _input.LA(1); if (_la==ON) { { - setState(590); + setState(600); match(ON); - setState(591); + setState(601); or_expr(0); } } @@ -1574,25 +1580,25 @@ public final Aliased_table_nameContext aliased_table_name() throws RecognitionEx enterOuterAlt(_localctx, 1); { { - setState(594); + setState(604); table_name(); } - setState(599); + setState(609); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,20,_ctx) ) { case 1: { - setState(596); + setState(606); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,19,_ctx) ) { case 1: { - setState(595); + setState(605); match(AS); } break; } - setState(598); + setState(608); tab_alias(); } break; @@ -1634,13 +1640,13 @@ public final Tab_aliasContext tab_alias() throws RecognitionException { Tab_aliasContext _localctx = new Tab_aliasContext(_ctx, getState()); enterRule(_localctx, 34, RULE_tab_alias); try { - setState(603); + setState(613); _errHandler.sync(this); switch (_input.LA(1)) { case VARNAME: enterOuterAlt(_localctx, 1); { - setState(601); + setState(611); match(VARNAME); } break; @@ -1655,6 +1661,7 @@ public final Tab_aliasContext tab_alias() throws RecognitionException { case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -1672,11 +1679,13 @@ public final Tab_aliasContext tab_alias() throws RecognitionException { case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -1695,6 +1704,7 @@ public final Tab_aliasContext tab_alias() throws RecognitionException { case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -1791,7 +1801,7 @@ public final Tab_aliasContext tab_alias() throws RecognitionException { case BAD_ID: enterOuterAlt(_localctx, 2); { - setState(602); + setState(612); id(); } break; @@ -1854,59 +1864,59 @@ public final Unnest_clauseContext unnest_clause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(605); + setState(615); match(UNNEST); - setState(606); + setState(616); match(LP); - setState(607); + setState(617); path_expr(); { - setState(609); + setState(619); _errHandler.sync(this); _la = _input.LA(1); if (_la==AS) { { - setState(608); + setState(618); match(AS); } } - setState(611); + setState(621); match(VARNAME); } - setState(622); + setState(632); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(613); + setState(623); match(COMMA); { - setState(614); + setState(624); path_expr(); { - setState(616); + setState(626); _errHandler.sync(this); _la = _input.LA(1); if (_la==AS) { { - setState(615); + setState(625); match(AS); } } - setState(618); + setState(628); match(VARNAME); } } } } - setState(624); + setState(634); _errHandler.sync(this); _la = _input.LA(1); } - setState(625); + setState(635); match(RP); } } @@ -1947,9 +1957,9 @@ public final Where_clauseContext where_clause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(627); + setState(637); match(WHERE); - setState(628); + setState(638); expr(); } } @@ -1990,9 +2000,9 @@ public final Select_clauseContext select_clause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(630); + setState(640); match(SELECT); - setState(631); + setState(641); select_list(); } } @@ -2051,32 +2061,32 @@ public final Select_listContext select_list() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(634); + setState(644); _errHandler.sync(this); _la = _input.LA(1); if (_la==T__0) { { - setState(633); + setState(643); hints(); } } - setState(637); + setState(647); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) { case 1: { - setState(636); + setState(646); match(DISTINCT); } break; } - setState(651); + setState(661); _errHandler.sync(this); switch (_input.LA(1)) { case STAR: { - setState(639); + setState(649); match(STAR); } break; @@ -2092,6 +2102,7 @@ public final Select_listContext select_list() throws RecognitionException { case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -2109,11 +2120,13 @@ public final Select_listContext select_list() throws RecognitionException { case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -2132,6 +2145,7 @@ public final Select_listContext select_list() throws RecognitionException { case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -2243,25 +2257,25 @@ public final Select_listContext select_list() throws RecognitionException { case BAD_ID: { { - setState(640); + setState(650); expr(); - setState(641); + setState(651); col_alias(); - setState(648); + setState(658); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(642); + setState(652); match(COMMA); - setState(643); + setState(653); expr(); - setState(644); + setState(654); col_alias(); } } - setState(650); + setState(660); _errHandler.sync(this); _la = _input.LA(1); } @@ -2313,23 +2327,23 @@ public final HintsContext hints() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(653); + setState(663); match(T__0); - setState(657); + setState(667); _errHandler.sync(this); _la = _input.LA(1); - while (((((_la - 47)) & ~0x3f) == 0 && ((1L << (_la - 47)) & 54043195528445955L) != 0)) { + while (((((_la - 51)) & ~0x3f) == 0 && ((1L << (_la - 51)) & 108086391056891907L) != 0)) { { { - setState(654); + setState(664); hint(); } } - setState(659); + setState(669); _errHandler.sync(this); _la = _input.LA(1); } - setState(660); + setState(670); match(T__1); } } @@ -2383,33 +2397,33 @@ public final HintContext hint() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(689); + setState(699); _errHandler.sync(this); switch (_input.LA(1)) { case PREFER_INDEXES: { { - setState(662); + setState(672); match(PREFER_INDEXES); - setState(663); + setState(673); match(LP); - setState(664); + setState(674); table_name(); - setState(668); + setState(678); _errHandler.sync(this); _la = _input.LA(1); - while ((((_la) & ~0x3f) == 0 && ((1L << _la) & -492581210292256L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -417149895185L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 549755293695L) != 0) || ((((_la - 195)) & ~0x3f) == 0 && ((1L << (_la - 195)) & 6145L) != 0)) { + while ((((_la) & ~0x3f) == 0 && ((1L << _la) & -7881299352092736L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -13348796645889L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 17592169398271L) != 0) || ((((_la - 200)) & ~0x3f) == 0 && ((1L << (_la - 200)) & 6145L) != 0)) { { { - setState(665); + setState(675); index_name(); } } - setState(670); + setState(680); _errHandler.sync(this); _la = _input.LA(1); } - setState(671); + setState(681); match(RP); } } @@ -2417,15 +2431,15 @@ public final HintContext hint() throws RecognitionException { case FORCE_INDEX: { { - setState(673); + setState(683); match(FORCE_INDEX); - setState(674); + setState(684); match(LP); - setState(675); + setState(685); table_name(); - setState(676); + setState(686); index_name(); - setState(677); + setState(687); match(RP); } } @@ -2433,13 +2447,13 @@ public final HintContext hint() throws RecognitionException { case PREFER_PRIMARY_INDEX: { { - setState(679); + setState(689); match(PREFER_PRIMARY_INDEX); - setState(680); + setState(690); match(LP); - setState(681); + setState(691); table_name(); - setState(682); + setState(692); match(RP); } } @@ -2447,13 +2461,13 @@ public final HintContext hint() throws RecognitionException { case FORCE_PRIMARY_INDEX: { { - setState(684); + setState(694); match(FORCE_PRIMARY_INDEX); - setState(685); + setState(695); match(LP); - setState(686); + setState(696); table_name(); - setState(687); + setState(697); match(RP); } } @@ -2461,12 +2475,12 @@ public final HintContext hint() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(692); + setState(702); _errHandler.sync(this); _la = _input.LA(1); if (_la==STRING) { { - setState(691); + setState(701); match(STRING); } } @@ -2511,14 +2525,14 @@ public final Col_aliasContext col_alias() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(696); + setState(706); _errHandler.sync(this); _la = _input.LA(1); if (_la==AS) { { - setState(694); + setState(704); match(AS); - setState(695); + setState(705); id(); } } @@ -2577,29 +2591,29 @@ public final Orderby_clauseContext orderby_clause() throws RecognitionException try { enterOuterAlt(_localctx, 1); { - setState(698); + setState(708); match(ORDER); - setState(699); + setState(709); match(BY); - setState(700); + setState(710); expr(); - setState(701); + setState(711); sort_spec(); - setState(708); + setState(718); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(702); + setState(712); match(COMMA); - setState(703); + setState(713); expr(); - setState(704); + setState(714); sort_spec(); } } - setState(710); + setState(720); _errHandler.sync(this); _la = _input.LA(1); } @@ -2644,12 +2658,12 @@ public final Sort_specContext sort_spec() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(712); + setState(722); _errHandler.sync(this); _la = _input.LA(1); if (_la==ASC || _la==DESC) { { - setState(711); + setState(721); _la = _input.LA(1); if ( !(_la==ASC || _la==DESC) ) { _errHandler.recoverInline(this); @@ -2662,14 +2676,14 @@ public final Sort_specContext sort_spec() throws RecognitionException { } } - setState(716); + setState(726); _errHandler.sync(this); _la = _input.LA(1); if (_la==NULLS) { { - setState(714); + setState(724); match(NULLS); - setState(715); + setState(725); _la = _input.LA(1); if ( !(_la==FIRST || _la==LAST) ) { _errHandler.recoverInline(this); @@ -2730,25 +2744,25 @@ public final Groupby_clauseContext groupby_clause() throws RecognitionException try { enterOuterAlt(_localctx, 1); { - setState(718); + setState(728); match(GROUP); - setState(719); + setState(729); match(BY); - setState(720); + setState(730); expr(); - setState(725); + setState(735); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(721); + setState(731); match(COMMA); - setState(722); + setState(732); expr(); } } - setState(727); + setState(737); _errHandler.sync(this); _la = _input.LA(1); } @@ -2791,9 +2805,9 @@ public final Limit_clauseContext limit_clause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(728); + setState(738); match(LIMIT); - setState(729); + setState(739); add_expr(); } } @@ -2834,9 +2848,9 @@ public final Offset_clauseContext offset_clause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(731); + setState(741); match(OFFSET); - setState(732); + setState(742); add_expr(); } } @@ -2890,11 +2904,11 @@ private Or_exprContext or_expr(int _p) throws RecognitionException { enterOuterAlt(_localctx, 1); { { - setState(735); + setState(745); and_expr(0); } _ctx.stop = _input.LT(-1); - setState(742); + setState(752); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,38,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -2905,16 +2919,16 @@ private Or_exprContext or_expr(int _p) throws RecognitionException { { _localctx = new Or_exprContext(_parentctx, _parentState); pushNewRecursionContext(_localctx, _startState, RULE_or_expr); - setState(737); + setState(747); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(738); + setState(748); match(OR); - setState(739); + setState(749); and_expr(0); } } } - setState(744); + setState(754); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,38,_ctx); } @@ -2970,11 +2984,11 @@ private And_exprContext and_expr(int _p) throws RecognitionException { enterOuterAlt(_localctx, 1); { { - setState(746); + setState(756); not_expr(); } _ctx.stop = _input.LT(-1); - setState(753); + setState(763); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,39,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -2985,16 +2999,16 @@ private And_exprContext and_expr(int _p) throws RecognitionException { { _localctx = new And_exprContext(_parentctx, _parentState); pushNewRecursionContext(_localctx, _startState, RULE_and_expr); - setState(748); + setState(758); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(749); + setState(759); match(AND); - setState(750); + setState(760); not_expr(); } } } - setState(755); + setState(765); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,39,_ctx); } @@ -3037,17 +3051,17 @@ public final Not_exprContext not_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(757); + setState(767); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,40,_ctx) ) { case 1: { - setState(756); + setState(766); match(NOT); } break; } - setState(759); + setState(769); is_null_expr(); } } @@ -3091,26 +3105,26 @@ public final Is_null_exprContext is_null_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(761); + setState(771); cond_expr(); - setState(767); + setState(777); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,42,_ctx) ) { case 1: { - setState(762); + setState(772); match(IS); - setState(764); + setState(774); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(763); + setState(773); match(NOT); } } - setState(766); + setState(776); match(NULL); } break; @@ -3163,41 +3177,41 @@ public final Cond_exprContext cond_expr() throws RecognitionException { Cond_exprContext _localctx = new Cond_exprContext(_ctx, getState()); enterRule(_localctx, 68, RULE_cond_expr); try { - setState(774); + setState(784); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(769); + setState(779); between_expr(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(770); + setState(780); comp_expr(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(771); + setState(781); in_expr(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(772); + setState(782); exists_expr(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(773); + setState(783); is_of_type_expr(); } break; @@ -3244,15 +3258,15 @@ public final Between_exprContext between_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(776); + setState(786); concatenate_expr(); - setState(777); + setState(787); match(BETWEEN); - setState(778); + setState(788); concatenate_expr(); - setState(779); + setState(789); match(AND); - setState(780); + setState(790); concatenate_expr(); } } @@ -3302,14 +3316,14 @@ public final Comp_exprContext comp_expr() throws RecognitionException { enterOuterAlt(_localctx, 1); { { - setState(782); + setState(792); concatenate_expr(); - setState(789); + setState(799); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,45,_ctx) ) { case 1: { - setState(785); + setState(795); _errHandler.sync(this); switch (_input.LA(1)) { case LT: @@ -3319,7 +3333,7 @@ public final Comp_exprContext comp_expr() throws RecognitionException { case EQ: case NEQ: { - setState(783); + setState(793); comp_op(); } break; @@ -3330,14 +3344,14 @@ public final Comp_exprContext comp_expr() throws RecognitionException { case EQ_ANY: case NEQ_ANY: { - setState(784); + setState(794); any_op(); } break; default: throw new NoViableAltException(this); } - setState(787); + setState(797); concatenate_expr(); } break; @@ -3385,9 +3399,9 @@ public final Comp_opContext comp_op() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(791); + setState(801); _la = _input.LA(1); - if ( !(((((_la - 180)) & ~0x3f) == 0 && ((1L << (_la - 180)) & 63L) != 0)) ) { + if ( !(((((_la - 185)) & ~0x3f) == 0 && ((1L << (_la - 185)) & 63L) != 0)) ) { _errHandler.recoverInline(this); } else { @@ -3434,14 +3448,14 @@ public final Any_opContext any_op() throws RecognitionException { Any_opContext _localctx = new Any_opContext(_ctx, getState()); enterRule(_localctx, 76, RULE_any_op); try { - setState(799); + setState(809); _errHandler.sync(this); switch (_input.LA(1)) { case EQ_ANY: enterOuterAlt(_localctx, 1); { { - setState(793); + setState(803); match(EQ_ANY); } } @@ -3450,7 +3464,7 @@ public final Any_opContext any_op() throws RecognitionException { enterOuterAlt(_localctx, 2); { { - setState(794); + setState(804); match(NEQ_ANY); } } @@ -3459,7 +3473,7 @@ public final Any_opContext any_op() throws RecognitionException { enterOuterAlt(_localctx, 3); { { - setState(795); + setState(805); match(GT_ANY); } } @@ -3468,7 +3482,7 @@ public final Any_opContext any_op() throws RecognitionException { enterOuterAlt(_localctx, 4); { { - setState(796); + setState(806); match(GTE_ANY); } } @@ -3477,7 +3491,7 @@ public final Any_opContext any_op() throws RecognitionException { enterOuterAlt(_localctx, 5); { { - setState(797); + setState(807); match(LT_ANY); } } @@ -3486,7 +3500,7 @@ public final Any_opContext any_op() throws RecognitionException { enterOuterAlt(_localctx, 6); { { - setState(798); + setState(808); match(LTE_ANY); } } @@ -3535,27 +3549,27 @@ public final In_exprContext in_expr() throws RecognitionException { In_exprContext _localctx = new In_exprContext(_ctx, getState()); enterRule(_localctx, 78, RULE_in_expr); try { - setState(804); + setState(814); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,47,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(801); + setState(811); in1_expr(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(802); + setState(812); in2_expr(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(803); + setState(813); in3_expr(); } break; @@ -3611,31 +3625,31 @@ public final In1_exprContext in1_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(806); + setState(816); in1_left_op(); - setState(807); + setState(817); match(IN); - setState(808); + setState(818); match(LP); - setState(809); + setState(819); in1_expr_list(); - setState(812); + setState(822); _errHandler.sync(this); _la = _input.LA(1); do { { { - setState(810); + setState(820); match(COMMA); - setState(811); + setState(821); in1_expr_list(); } } - setState(814); + setState(824); _errHandler.sync(this); _la = _input.LA(1); } while ( _la==COMMA ); - setState(816); + setState(826); match(RP); } } @@ -3685,27 +3699,27 @@ public final In1_left_opContext in1_left_op() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(818); + setState(828); match(LP); - setState(819); + setState(829); concatenate_expr(); - setState(824); + setState(834); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(820); + setState(830); match(COMMA); - setState(821); + setState(831); concatenate_expr(); } } - setState(826); + setState(836); _errHandler.sync(this); _la = _input.LA(1); } - setState(827); + setState(837); match(RP); } } @@ -3755,27 +3769,27 @@ public final In1_expr_listContext in1_expr_list() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(829); + setState(839); match(LP); - setState(830); + setState(840); expr(); - setState(835); + setState(845); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(831); + setState(841); match(COMMA); - setState(832); + setState(842); expr(); } } - setState(837); + setState(847); _errHandler.sync(this); _la = _input.LA(1); } - setState(838); + setState(848); match(RP); } } @@ -3829,31 +3843,31 @@ public final In2_exprContext in2_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(840); + setState(850); concatenate_expr(); - setState(841); + setState(851); match(IN); - setState(842); + setState(852); match(LP); - setState(843); + setState(853); expr(); - setState(846); + setState(856); _errHandler.sync(this); _la = _input.LA(1); do { { { - setState(844); + setState(854); match(COMMA); - setState(845); + setState(855); expr(); } } - setState(848); + setState(858); _errHandler.sync(this); _la = _input.LA(1); } while ( _la==COMMA ); - setState(850); + setState(860); match(RP); } } @@ -3907,47 +3921,47 @@ public final In3_exprContext in3_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(864); + setState(874); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,53,_ctx) ) { case 1: { - setState(852); + setState(862); concatenate_expr(); } break; case 2: { { - setState(853); + setState(863); match(LP); - setState(854); + setState(864); concatenate_expr(); - setState(859); + setState(869); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(855); + setState(865); match(COMMA); - setState(856); + setState(866); concatenate_expr(); } } - setState(861); + setState(871); _errHandler.sync(this); _la = _input.LA(1); } - setState(862); + setState(872); match(RP); } } break; } - setState(866); + setState(876); match(IN); - setState(867); + setState(877); path_expr(); } } @@ -3988,9 +4002,9 @@ public final Exists_exprContext exists_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(869); + setState(879); match(EXISTS); - setState(870); + setState(880); concatenate_expr(); } } @@ -4051,73 +4065,73 @@ public final Is_of_type_exprContext is_of_type_expr() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(872); + setState(882); concatenate_expr(); - setState(873); + setState(883); match(IS); - setState(875); + setState(885); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(874); + setState(884); match(NOT); } } - setState(877); + setState(887); match(OF); - setState(879); + setState(889); _errHandler.sync(this); _la = _input.LA(1); if (_la==TYPE) { { - setState(878); + setState(888); match(TYPE); } } - setState(881); + setState(891); match(LP); - setState(883); + setState(893); _errHandler.sync(this); _la = _input.LA(1); if (_la==ONLY) { { - setState(882); + setState(892); match(ONLY); } } - setState(885); + setState(895); quantified_type_def(); - setState(893); + setState(903); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(886); + setState(896); match(COMMA); - setState(888); + setState(898); _errHandler.sync(this); _la = _input.LA(1); if (_la==ONLY) { { - setState(887); + setState(897); match(ONLY); } } - setState(890); + setState(900); quantified_type_def(); } } - setState(895); + setState(905); _errHandler.sync(this); _la = _input.LA(1); } - setState(896); + setState(906); match(RP); } } @@ -4165,23 +4179,23 @@ public final Concatenate_exprContext concatenate_expr() throws RecognitionExcept int _alt; enterOuterAlt(_localctx, 1); { - setState(898); + setState(908); add_expr(); - setState(903); + setState(913); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,59,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(899); + setState(909); match(CONCAT); - setState(900); + setState(910); add_expr(); } } } - setState(905); + setState(915); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,59,_ctx); } @@ -4236,16 +4250,16 @@ public final Add_exprContext add_expr() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(906); + setState(916); multiply_expr(); - setState(911); + setState(921); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,60,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(907); + setState(917); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -4255,12 +4269,12 @@ public final Add_exprContext add_expr() throws RecognitionException { _errHandler.reportMatch(this); consume(); } - setState(908); + setState(918); multiply_expr(); } } } - setState(913); + setState(923); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,60,_ctx); } @@ -4319,18 +4333,18 @@ public final Multiply_exprContext multiply_expr() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(914); + setState(924); unary_expr(); - setState(919); + setState(929); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,61,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(915); + setState(925); _la = _input.LA(1); - if ( !(((((_la - 176)) & ~0x3f) == 0 && ((1L << (_la - 176)) & 786433L) != 0)) ) { + if ( !(((((_la - 181)) & ~0x3f) == 0 && ((1L << (_la - 181)) & 786433L) != 0)) ) { _errHandler.recoverInline(this); } else { @@ -4338,12 +4352,12 @@ public final Multiply_exprContext multiply_expr() throws RecognitionException { _errHandler.reportMatch(this); consume(); } - setState(916); + setState(926); unary_expr(); } } } - setState(921); + setState(931); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,61,_ctx); } @@ -4389,20 +4403,20 @@ public final Unary_exprContext unary_expr() throws RecognitionException { enterRule(_localctx, 100, RULE_unary_expr); int _la; try { - setState(925); + setState(935); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,62,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(922); + setState(932); path_expr(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(923); + setState(933); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -4412,7 +4426,7 @@ public final Unary_exprContext unary_expr() throws RecognitionException { _errHandler.reportMatch(this); consume(); } - setState(924); + setState(934); unary_expr(); } break; @@ -4467,26 +4481,26 @@ public final Path_exprContext path_expr() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(927); + setState(937); primary_expr(); - setState(932); + setState(942); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,64,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { - setState(930); + setState(940); _errHandler.sync(this); switch (_input.LA(1)) { case DOT: { - setState(928); + setState(938); map_step(); } break; case LBRACK: { - setState(929); + setState(939); array_step(); } break; @@ -4495,7 +4509,7 @@ public final Path_exprContext path_expr() throws RecognitionException { } } } - setState(934); + setState(944); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,64,_ctx); } @@ -4541,20 +4555,20 @@ public final Map_stepContext map_step() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(935); + setState(945); match(DOT); - setState(938); + setState(948); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,65,_ctx) ) { case 1: { - setState(936); + setState(946); map_filter_step(); } break; case 2: { - setState(937); + setState(947); map_field_step(); } break; @@ -4609,36 +4623,36 @@ public final Map_field_stepContext map_field_step() throws RecognitionException try { enterOuterAlt(_localctx, 1); { - setState(945); + setState(955); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,66,_ctx) ) { case 1: { - setState(940); + setState(950); id(); } break; case 2: { - setState(941); + setState(951); string(); } break; case 3: { - setState(942); + setState(952); var_ref(); } break; case 4: { - setState(943); + setState(953); parenthesized_expr(); } break; case 5: { - setState(944); + setState(954); func_call(); } break; @@ -4686,7 +4700,7 @@ public final Map_filter_stepContext map_filter_step() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(947); + setState(957); _la = _input.LA(1); if ( !(_la==KEYS || _la==VALUES) ) { _errHandler.recoverInline(this); @@ -4696,19 +4710,19 @@ public final Map_filter_stepContext map_filter_step() throws RecognitionExceptio _errHandler.reportMatch(this); consume(); } - setState(948); + setState(958); match(LP); - setState(950); + setState(960); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -492581210292240L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -417149895185L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 3470608452554751L) != 0) || ((((_la - 192)) & ~0x3f) == 0 && ((1L << (_la - 192)) & 57323L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -7881299352092704L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -13348796645889L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 111059470481752063L) != 0) || ((((_la - 197)) & ~0x3f) == 0 && ((1L << (_la - 197)) & 57323L) != 0)) { { - setState(949); + setState(959); expr(); } } - setState(952); + setState(962); match(RP); } } @@ -4749,20 +4763,20 @@ public final Array_stepContext array_step() throws RecognitionException { Array_stepContext _localctx = new Array_stepContext(_ctx, getState()); enterRule(_localctx, 110, RULE_array_step); try { - setState(956); + setState(966); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,68,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(954); + setState(964); array_filter_step(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(955); + setState(965); array_slice_step(); } break; @@ -4811,31 +4825,31 @@ public final Array_slice_stepContext array_slice_step() throws RecognitionExcept try { enterOuterAlt(_localctx, 1); { - setState(958); + setState(968); match(LBRACK); - setState(960); + setState(970); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -492581210292240L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -417149895185L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 3470608452554751L) != 0) || ((((_la - 192)) & ~0x3f) == 0 && ((1L << (_la - 192)) & 57323L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -7881299352092704L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -13348796645889L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 111059470481752063L) != 0) || ((((_la - 197)) & ~0x3f) == 0 && ((1L << (_la - 197)) & 57323L) != 0)) { { - setState(959); + setState(969); expr(); } } - setState(962); + setState(972); match(COLON); - setState(964); + setState(974); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -492581210292240L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -417149895185L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 3470608452554751L) != 0) || ((((_la - 192)) & ~0x3f) == 0 && ((1L << (_la - 192)) & 57323L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -7881299352092704L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -13348796645889L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 111059470481752063L) != 0) || ((((_la - 197)) & ~0x3f) == 0 && ((1L << (_la - 197)) & 57323L) != 0)) { { - setState(963); + setState(973); expr(); } } - setState(966); + setState(976); match(RBRACK); } } @@ -4878,19 +4892,19 @@ public final Array_filter_stepContext array_filter_step() throws RecognitionExce try { enterOuterAlt(_localctx, 1); { - setState(968); + setState(978); match(LBRACK); - setState(970); + setState(980); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -492581210292240L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -417149895185L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 3470608452554751L) != 0) || ((((_la - 192)) & ~0x3f) == 0 && ((1L << (_la - 192)) & 57323L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -7881299352092704L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -13348796645889L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 111059470481752063L) != 0) || ((((_la - 197)) & ~0x3f) == 0 && ((1L << (_la - 197)) & 57323L) != 0)) { { - setState(969); + setState(979); expr(); } } - setState(972); + setState(982); match(RBRACK); } } @@ -4967,104 +4981,104 @@ public final Primary_exprContext primary_expr() throws RecognitionException { Primary_exprContext _localctx = new Primary_exprContext(_ctx, getState()); enterRule(_localctx, 116, RULE_primary_expr); try { - setState(988); + setState(998); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,72,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(974); + setState(984); const_expr(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(975); + setState(985); column_ref(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(976); + setState(986); var_ref(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(977); + setState(987); array_constructor(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(978); + setState(988); map_constructor(); } break; case 6: enterOuterAlt(_localctx, 6); { - setState(979); + setState(989); transform_expr(); } break; case 7: enterOuterAlt(_localctx, 7); { - setState(980); + setState(990); collect(); } break; case 8: enterOuterAlt(_localctx, 8); { - setState(981); + setState(991); func_call(); } break; case 9: enterOuterAlt(_localctx, 9); { - setState(982); + setState(992); count_star(); } break; case 10: enterOuterAlt(_localctx, 10); { - setState(983); + setState(993); count_distinct(); } break; case 11: enterOuterAlt(_localctx, 11); { - setState(984); + setState(994); case_expr(); } break; case 12: enterOuterAlt(_localctx, 12); { - setState(985); + setState(995); cast_expr(); } break; case 13: enterOuterAlt(_localctx, 13); { - setState(986); + setState(996); parenthesized_expr(); } break; case 14: enterOuterAlt(_localctx, 14); { - setState(987); + setState(997); extract_expr(); } break; @@ -5113,16 +5127,16 @@ public final Column_refContext column_ref() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(990); + setState(1000); id(); - setState(996); + setState(1006); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,74,_ctx) ) { case 1: { - setState(991); + setState(1001); match(DOT); - setState(994); + setState(1004); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -5136,6 +5150,7 @@ public final Column_refContext column_ref() throws RecognitionException { case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -5153,11 +5168,13 @@ public final Column_refContext column_ref() throws RecognitionException { case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -5176,6 +5193,7 @@ public final Column_refContext column_ref() throws RecognitionException { case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -5271,14 +5289,14 @@ public final Column_refContext column_ref() throws RecognitionException { case ID: case BAD_ID: { - setState(992); + setState(1002); id(); } break; case DSTRING: case STRING: { - setState(993); + setState(1003); string(); } break; @@ -5330,7 +5348,7 @@ public final Const_exprContext const_expr() throws RecognitionException { Const_exprContext _localctx = new Const_exprContext(_ctx, getState()); enterRule(_localctx, 120, RULE_const_expr); try { - setState(1003); + setState(1013); _errHandler.sync(this); switch (_input.LA(1)) { case MINUS: @@ -5339,7 +5357,7 @@ public final Const_exprContext const_expr() throws RecognitionException { case NUMBER: enterOuterAlt(_localctx, 1); { - setState(998); + setState(1008); number(); } break; @@ -5347,28 +5365,28 @@ public final Const_exprContext const_expr() throws RecognitionException { case STRING: enterOuterAlt(_localctx, 2); { - setState(999); + setState(1009); string(); } break; case TRUE: enterOuterAlt(_localctx, 3); { - setState(1000); + setState(1010); match(TRUE); } break; case FALSE: enterOuterAlt(_localctx, 4); { - setState(1001); + setState(1011); match(FALSE); } break; case NULL: enterOuterAlt(_localctx, 5); { - setState(1002); + setState(1012); match(NULL); } break; @@ -5413,7 +5431,7 @@ public final Var_refContext var_ref() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1005); + setState(1015); _la = _input.LA(1); if ( !(_la==VARNAME || _la==DOLLAR || _la==QUESTION_MARK) ) { _errHandler.recoverInline(this); @@ -5471,35 +5489,35 @@ public final Array_constructorContext array_constructor() throws RecognitionExce try { enterOuterAlt(_localctx, 1); { - setState(1007); + setState(1017); match(LBRACK); - setState(1009); + setState(1019); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -492581210292240L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -417149895185L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 3470608452554751L) != 0) || ((((_la - 192)) & ~0x3f) == 0 && ((1L << (_la - 192)) & 57323L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -7881299352092704L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -13348796645889L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 111059470481752063L) != 0) || ((((_la - 197)) & ~0x3f) == 0 && ((1L << (_la - 197)) & 57323L) != 0)) { { - setState(1008); + setState(1018); expr(); } } - setState(1015); + setState(1025); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1011); + setState(1021); match(COMMA); - setState(1012); + setState(1022); expr(); } } - setState(1017); + setState(1027); _errHandler.sync(this); _la = _input.LA(1); } - setState(1018); + setState(1028); match(RBRACK); } } @@ -5551,42 +5569,42 @@ public final Map_constructorContext map_constructor() throws RecognitionExceptio enterRule(_localctx, 126, RULE_map_constructor); int _la; try { - setState(1038); + setState(1048); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,79,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { { - setState(1020); + setState(1030); match(LBRACE); - setState(1021); + setState(1031); expr(); - setState(1022); + setState(1032); match(COLON); - setState(1023); + setState(1033); expr(); - setState(1031); + setState(1041); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1024); + setState(1034); match(COMMA); - setState(1025); + setState(1035); expr(); - setState(1026); + setState(1036); match(COLON); - setState(1027); + setState(1037); expr(); } } - setState(1033); + setState(1043); _errHandler.sync(this); _la = _input.LA(1); } - setState(1034); + setState(1044); match(RBRACE); } } @@ -5595,9 +5613,9 @@ public final Map_constructorContext map_constructor() throws RecognitionExceptio enterOuterAlt(_localctx, 2); { { - setState(1036); + setState(1046); match(LBRACE); - setState(1037); + setState(1047); match(RBRACE); } } @@ -5647,17 +5665,17 @@ public final Transform_exprContext transform_expr() throws RecognitionException try { enterOuterAlt(_localctx, 1); { - setState(1040); + setState(1050); match(SEQ_TRANSFORM); - setState(1041); + setState(1051); match(LP); - setState(1042); + setState(1052); transform_input_expr(); - setState(1043); + setState(1053); match(COMMA); - setState(1044); + setState(1054); expr(); - setState(1045); + setState(1055); match(RP); } } @@ -5697,7 +5715,7 @@ public final Transform_input_exprContext transform_input_expr() throws Recogniti try { enterOuterAlt(_localctx, 1); { - setState(1047); + setState(1057); expr(); } } @@ -5741,23 +5759,23 @@ public final CollectContext collect() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1049); + setState(1059); match(ARRAY_COLLECT); - setState(1050); + setState(1060); match(LP); - setState(1052); + setState(1062); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,80,_ctx) ) { case 1: { - setState(1051); + setState(1061); match(DISTINCT); } break; } - setState(1054); + setState(1064); expr(); - setState(1055); + setState(1065); match(RP); } } @@ -5810,37 +5828,37 @@ public final Func_callContext func_call() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1057); + setState(1067); id(); - setState(1058); + setState(1068); match(LP); - setState(1067); + setState(1077); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -492581210292240L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -417149895185L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 3470608452554751L) != 0) || ((((_la - 192)) & ~0x3f) == 0 && ((1L << (_la - 192)) & 57323L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -7881299352092704L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -13348796645889L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 111059470481752063L) != 0) || ((((_la - 197)) & ~0x3f) == 0 && ((1L << (_la - 197)) & 57323L) != 0)) { { - setState(1059); + setState(1069); expr(); - setState(1064); + setState(1074); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1060); + setState(1070); match(COMMA); - setState(1061); + setState(1071); expr(); } } - setState(1066); + setState(1076); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(1069); + setState(1079); match(RP); } } @@ -5881,13 +5899,13 @@ public final Count_starContext count_star() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1071); + setState(1081); match(COUNT); - setState(1072); + setState(1082); match(LP); - setState(1073); + setState(1083); match(STAR); - setState(1074); + setState(1084); match(RP); } } @@ -5931,15 +5949,15 @@ public final Count_distinctContext count_distinct() throws RecognitionException try { enterOuterAlt(_localctx, 1); { - setState(1076); + setState(1086); match(COUNT); - setState(1077); + setState(1087); match(LP); - setState(1078); + setState(1088); match(DISTINCT); - setState(1079); + setState(1089); expr(); - setState(1080); + setState(1090); match(RP); } } @@ -5994,49 +6012,49 @@ public final Case_exprContext case_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1082); + setState(1092); match(CASE); - setState(1083); + setState(1093); match(WHEN); - setState(1084); + setState(1094); expr(); - setState(1085); + setState(1095); match(THEN); - setState(1086); + setState(1096); expr(); - setState(1094); + setState(1104); _errHandler.sync(this); _la = _input.LA(1); while (_la==WHEN) { { { - setState(1087); + setState(1097); match(WHEN); - setState(1088); + setState(1098); expr(); - setState(1089); + setState(1099); match(THEN); - setState(1090); + setState(1100); expr(); } } - setState(1096); + setState(1106); _errHandler.sync(this); _la = _input.LA(1); } - setState(1099); + setState(1109); _errHandler.sync(this); _la = _input.LA(1); if (_la==ELSE) { { - setState(1097); + setState(1107); match(ELSE); - setState(1098); + setState(1108); expr(); } } - setState(1101); + setState(1111); match(END); } } @@ -6083,17 +6101,17 @@ public final Cast_exprContext cast_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1103); + setState(1113); match(CAST); - setState(1104); + setState(1114); match(LP); - setState(1105); + setState(1115); expr(); - setState(1106); + setState(1116); match(AS); - setState(1107); + setState(1117); quantified_type_def(); - setState(1108); + setState(1118); match(RP); } } @@ -6135,11 +6153,11 @@ public final Parenthesized_exprContext parenthesized_expr() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(1110); + setState(1120); match(LP); - setState(1111); + setState(1121); expr(); - setState(1112); + setState(1122); match(RP); } } @@ -6186,17 +6204,17 @@ public final Extract_exprContext extract_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1114); + setState(1124); match(EXTRACT); - setState(1115); + setState(1125); match(LP); - setState(1116); + setState(1126); id(); - setState(1117); + setState(1127); match(FROM); - setState(1118); + setState(1128); expr(); - setState(1119); + setState(1129); match(RP); } } @@ -6280,17 +6298,17 @@ public final Insert_statementContext insert_statement() throws RecognitionExcept try { enterOuterAlt(_localctx, 1); { - setState(1122); + setState(1132); _errHandler.sync(this); _la = _input.LA(1); if (_la==DECLARE) { { - setState(1121); + setState(1131); prolog(); } } - setState(1124); + setState(1134); _la = _input.LA(1); if ( !(_la==INSERT || _la==UPSERT) ) { _errHandler.recoverInline(this); @@ -6300,104 +6318,104 @@ public final Insert_statementContext insert_statement() throws RecognitionExcept _errHandler.reportMatch(this); consume(); } - setState(1125); + setState(1135); match(INTO); - setState(1126); + setState(1136); table_name(); - setState(1131); + setState(1141); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,87,_ctx) ) { case 1: { - setState(1128); + setState(1138); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,86,_ctx) ) { case 1: { - setState(1127); + setState(1137); match(AS); } break; } - setState(1130); + setState(1140); tab_alias(); } break; } - setState(1144); + setState(1154); _errHandler.sync(this); _la = _input.LA(1); if (_la==LP) { { - setState(1133); + setState(1143); match(LP); - setState(1134); + setState(1144); insert_label(); - setState(1139); + setState(1149); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1135); + setState(1145); match(COMMA); - setState(1136); + setState(1146); insert_label(); } } - setState(1141); + setState(1151); _errHandler.sync(this); _la = _input.LA(1); } - setState(1142); + setState(1152); match(RP); } } - setState(1146); + setState(1156); match(VALUES); - setState(1147); + setState(1157); match(LP); - setState(1148); + setState(1158); insert_clause(); - setState(1153); + setState(1163); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1149); + setState(1159); match(COMMA); - setState(1150); + setState(1160); insert_clause(); } } - setState(1155); + setState(1165); _errHandler.sync(this); _la = _input.LA(1); } - setState(1156); + setState(1166); match(RP); - setState(1160); + setState(1170); _errHandler.sync(this); _la = _input.LA(1); if (_la==SET) { { - setState(1157); + setState(1167); match(SET); - setState(1158); + setState(1168); match(TTL); - setState(1159); + setState(1169); insert_ttl_clause(); } } - setState(1163); + setState(1173); _errHandler.sync(this); _la = _input.LA(1); if (_la==RETURNING) { { - setState(1162); + setState(1172); insert_returning_clause(); } } @@ -6441,7 +6459,7 @@ public final Insert_labelContext insert_label() throws RecognitionException { Insert_labelContext _localctx = new Insert_labelContext(_ctx, getState()); enterRule(_localctx, 150, RULE_insert_label); try { - setState(1167); + setState(1177); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -6455,6 +6473,7 @@ public final Insert_labelContext insert_label() throws RecognitionException { case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -6472,11 +6491,13 @@ public final Insert_labelContext insert_label() throws RecognitionException { case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -6495,6 +6516,7 @@ public final Insert_labelContext insert_label() throws RecognitionException { case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -6591,7 +6613,7 @@ public final Insert_labelContext insert_label() throws RecognitionException { case BAD_ID: enterOuterAlt(_localctx, 1); { - setState(1165); + setState(1175); id(); } break; @@ -6599,7 +6621,7 @@ public final Insert_labelContext insert_label() throws RecognitionException { case STRING: enterOuterAlt(_localctx, 2); { - setState(1166); + setState(1176); string(); } break; @@ -6644,9 +6666,9 @@ public final Insert_returning_clauseContext insert_returning_clause() throws Rec try { enterOuterAlt(_localctx, 1); { - setState(1169); + setState(1179); match(RETURNING); - setState(1170); + setState(1180); select_list(); } } @@ -6685,20 +6707,20 @@ public final Insert_clauseContext insert_clause() throws RecognitionException { Insert_clauseContext _localctx = new Insert_clauseContext(_ctx, getState()); enterRule(_localctx, 154, RULE_insert_clause); try { - setState(1174); + setState(1184); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,94,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(1172); + setState(1182); match(DEFAULT); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(1173); + setState(1183); expr(); } break; @@ -6744,16 +6766,16 @@ public final Insert_ttl_clauseContext insert_ttl_clause() throws RecognitionExce enterRule(_localctx, 156, RULE_insert_ttl_clause); int _la; try { - setState(1182); + setState(1192); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,95,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { { - setState(1176); + setState(1186); add_expr(); - setState(1177); + setState(1187); _la = _input.LA(1); if ( !(_la==DAYS || _la==HOURS) ) { _errHandler.recoverInline(this); @@ -6770,11 +6792,11 @@ public final Insert_ttl_clauseContext insert_ttl_clause() throws RecognitionExce enterOuterAlt(_localctx, 2); { { - setState(1179); + setState(1189); match(USING); - setState(1180); + setState(1190); match(TABLE); - setState(1181); + setState(1191); match(DEFAULT); } } @@ -6843,68 +6865,68 @@ public final Update_statementContext update_statement() throws RecognitionExcept try { enterOuterAlt(_localctx, 1); { - setState(1185); + setState(1195); _errHandler.sync(this); _la = _input.LA(1); if (_la==DECLARE) { { - setState(1184); + setState(1194); prolog(); } } - setState(1187); + setState(1197); match(UPDATE); - setState(1188); + setState(1198); table_name(); - setState(1193); + setState(1203); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,98,_ctx) ) { case 1: { - setState(1190); + setState(1200); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,97,_ctx) ) { case 1: { - setState(1189); + setState(1199); match(AS); } break; } - setState(1192); + setState(1202); tab_alias(); } break; } - setState(1195); + setState(1205); update_clause(); - setState(1200); + setState(1210); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1196); + setState(1206); match(COMMA); - setState(1197); + setState(1207); update_clause(); } } - setState(1202); + setState(1212); _errHandler.sync(this); _la = _input.LA(1); } - setState(1203); + setState(1213); match(WHERE); - setState(1204); + setState(1214); expr(); - setState(1206); + setState(1216); _errHandler.sync(this); _la = _input.LA(1); if (_la==RETURNING) { { - setState(1205); + setState(1215); update_returning_clause(); } } @@ -6948,9 +6970,9 @@ public final Update_returning_clauseContext update_returning_clause() throws Rec try { enterOuterAlt(_localctx, 1); { - setState(1208); + setState(1218); match(RETURNING); - setState(1209); + setState(1219); select_list(); } } @@ -7036,38 +7058,38 @@ public final Update_clauseContext update_clause() throws RecognitionException { enterRule(_localctx, 162, RULE_update_clause); try { int _alt; - setState(1282); + setState(1292); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,112,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { { - setState(1211); + setState(1221); match(SET); - setState(1212); + setState(1222); set_clause(); - setState(1220); + setState(1230); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,102,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(1213); + setState(1223); match(COMMA); - setState(1216); + setState(1226); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,101,_ctx) ) { case 1: { - setState(1214); + setState(1224); update_clause(); } break; case 2: { - setState(1215); + setState(1225); set_clause(); } break; @@ -7075,7 +7097,7 @@ public final Update_clauseContext update_clause() throws RecognitionException { } } } - setState(1222); + setState(1232); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,102,_ctx); } @@ -7086,31 +7108,31 @@ public final Update_clauseContext update_clause() throws RecognitionException { enterOuterAlt(_localctx, 2); { { - setState(1223); + setState(1233); match(ADD); - setState(1224); + setState(1234); add_clause(); - setState(1232); + setState(1242); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,104,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(1225); + setState(1235); match(COMMA); - setState(1228); + setState(1238); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,103,_ctx) ) { case 1: { - setState(1226); + setState(1236); update_clause(); } break; case 2: { - setState(1227); + setState(1237); add_clause(); } break; @@ -7118,7 +7140,7 @@ public final Update_clauseContext update_clause() throws RecognitionException { } } } - setState(1234); + setState(1244); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,104,_ctx); } @@ -7129,31 +7151,31 @@ public final Update_clauseContext update_clause() throws RecognitionException { enterOuterAlt(_localctx, 3); { { - setState(1235); + setState(1245); match(PUT); - setState(1236); + setState(1246); put_clause(); - setState(1244); + setState(1254); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,106,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(1237); + setState(1247); match(COMMA); - setState(1240); + setState(1250); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,105,_ctx) ) { case 1: { - setState(1238); + setState(1248); update_clause(); } break; case 2: { - setState(1239); + setState(1249); put_clause(); } break; @@ -7161,7 +7183,7 @@ public final Update_clauseContext update_clause() throws RecognitionException { } } } - setState(1246); + setState(1256); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,106,_ctx); } @@ -7172,31 +7194,31 @@ public final Update_clauseContext update_clause() throws RecognitionException { enterOuterAlt(_localctx, 4); { { - setState(1247); + setState(1257); match(REMOVE); - setState(1248); + setState(1258); remove_clause(); - setState(1256); + setState(1266); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,108,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(1249); + setState(1259); match(COMMA); - setState(1252); + setState(1262); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,107,_ctx) ) { case 1: { - setState(1250); + setState(1260); update_clause(); } break; case 2: { - setState(1251); + setState(1261); remove_clause(); } break; @@ -7204,7 +7226,7 @@ public final Update_clauseContext update_clause() throws RecognitionException { } } } - setState(1258); + setState(1268); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,108,_ctx); } @@ -7215,33 +7237,33 @@ public final Update_clauseContext update_clause() throws RecognitionException { enterOuterAlt(_localctx, 5); { { - setState(1259); + setState(1269); match(JSON); - setState(1260); + setState(1270); match(MERGE); - setState(1261); + setState(1271); json_merge_patch_clause(); - setState(1269); + setState(1279); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,110,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(1262); + setState(1272); match(COMMA); - setState(1265); + setState(1275); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,109,_ctx) ) { case 1: { - setState(1263); + setState(1273); update_clause(); } break; case 2: { - setState(1264); + setState(1274); json_merge_patch_clause(); } break; @@ -7249,7 +7271,7 @@ public final Update_clauseContext update_clause() throws RecognitionException { } } } - setState(1271); + setState(1281); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,110,_ctx); } @@ -7260,27 +7282,27 @@ public final Update_clauseContext update_clause() throws RecognitionException { enterOuterAlt(_localctx, 6); { { - setState(1272); + setState(1282); match(SET); - setState(1273); + setState(1283); match(TTL); - setState(1274); + setState(1284); ttl_clause(); - setState(1279); + setState(1289); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,111,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(1275); + setState(1285); match(COMMA); - setState(1276); + setState(1286); update_clause(); } } } - setState(1281); + setState(1291); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,111,_ctx); } @@ -7329,11 +7351,11 @@ public final Set_clauseContext set_clause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1284); + setState(1294); target_expr(); - setState(1285); + setState(1295); match(EQ); - setState(1286); + setState(1296); expr(); } } @@ -7382,49 +7404,49 @@ public final Add_clauseContext add_clause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1289); + setState(1299); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,113,_ctx) ) { case 1: { - setState(1288); + setState(1298); match(INTO); } break; } - setState(1291); + setState(1301); target_expr(); - setState(1296); + setState(1306); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,115,_ctx) ) { case 1: { - setState(1293); + setState(1303); _errHandler.sync(this); _la = _input.LA(1); if (_la==T__2) { { - setState(1292); + setState(1302); match(T__2); } } - setState(1295); + setState(1305); pos_expr(); } break; } - setState(1299); + setState(1309); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,116,_ctx) ) { case 1: { - setState(1298); + setState(1308); match(ELEMENTS); } break; } - setState(1301); + setState(1311); expr(); } } @@ -7469,29 +7491,29 @@ public final Put_clauseContext put_clause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1304); + setState(1314); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,117,_ctx) ) { case 1: { - setState(1303); + setState(1313); match(INTO); } break; } - setState(1306); + setState(1316); target_expr(); - setState(1308); + setState(1318); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,118,_ctx) ) { case 1: { - setState(1307); + setState(1317); match(FIELDS); } break; } - setState(1310); + setState(1320); expr(); } } @@ -7531,7 +7553,7 @@ public final Remove_clauseContext remove_clause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1312); + setState(1322); target_expr(); } } @@ -7576,13 +7598,13 @@ public final Json_merge_patch_clauseContext json_merge_patch_clause() throws Rec try { enterOuterAlt(_localctx, 1); { - setState(1314); + setState(1324); target_expr(); - setState(1315); + setState(1325); match(WITH); - setState(1316); + setState(1326); match(PATCH); - setState(1317); + setState(1327); json_patch_expr(); } } @@ -7629,20 +7651,20 @@ public final Json_patch_exprContext json_patch_expr() throws RecognitionExceptio Json_patch_exprContext _localctx = new Json_patch_exprContext(_ctx, getState()); enterRule(_localctx, 174, RULE_json_patch_expr); try { - setState(1323); + setState(1333); _errHandler.sync(this); switch (_input.LA(1)) { case LBRACE: enterOuterAlt(_localctx, 1); { - setState(1319); + setState(1329); map_constructor(); } break; case LBRACK: enterOuterAlt(_localctx, 2); { - setState(1320); + setState(1330); array_constructor(); } break; @@ -7657,7 +7679,7 @@ public final Json_patch_exprContext json_patch_expr() throws RecognitionExceptio case STRING: enterOuterAlt(_localctx, 3); { - setState(1321); + setState(1331); const_expr(); } break; @@ -7666,7 +7688,7 @@ public final Json_patch_exprContext json_patch_expr() throws RecognitionExceptio case QUESTION_MARK: enterOuterAlt(_localctx, 4); { - setState(1322); + setState(1332); var_ref(); } break; @@ -7714,16 +7736,16 @@ public final Ttl_clauseContext ttl_clause() throws RecognitionException { enterRule(_localctx, 176, RULE_ttl_clause); int _la; try { - setState(1331); + setState(1341); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,120,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { { - setState(1325); + setState(1335); add_expr(); - setState(1326); + setState(1336); _la = _input.LA(1); if ( !(_la==DAYS || _la==HOURS) ) { _errHandler.recoverInline(this); @@ -7740,11 +7762,11 @@ public final Ttl_clauseContext ttl_clause() throws RecognitionException { enterOuterAlt(_localctx, 2); { { - setState(1328); + setState(1338); match(USING); - setState(1329); + setState(1339); match(TABLE); - setState(1330); + setState(1340); match(DEFAULT); } } @@ -7787,7 +7809,7 @@ public final Target_exprContext target_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1333); + setState(1343); path_expr(); } } @@ -7827,7 +7849,7 @@ public final Pos_exprContext pos_expr() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1335); + setState(1345); add_expr(); } } @@ -7884,60 +7906,60 @@ public final Delete_statementContext delete_statement() throws RecognitionExcept try { enterOuterAlt(_localctx, 1); { - setState(1338); + setState(1348); _errHandler.sync(this); _la = _input.LA(1); if (_la==DECLARE) { { - setState(1337); + setState(1347); prolog(); } } - setState(1340); + setState(1350); match(DELETE); - setState(1341); + setState(1351); match(FROM); - setState(1342); + setState(1352); table_name(); - setState(1347); + setState(1357); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,123,_ctx) ) { case 1: { - setState(1344); + setState(1354); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,122,_ctx) ) { case 1: { - setState(1343); + setState(1353); match(AS); } break; } - setState(1346); + setState(1356); tab_alias(); } break; } - setState(1351); + setState(1361); _errHandler.sync(this); _la = _input.LA(1); if (_la==WHERE) { { - setState(1349); + setState(1359); match(WHERE); - setState(1350); + setState(1360); expr(); } } - setState(1354); + setState(1364); _errHandler.sync(this); _la = _input.LA(1); if (_la==RETURNING) { { - setState(1353); + setState(1363); delete_returning_clause(); } } @@ -7981,9 +8003,9 @@ public final Delete_returning_clauseContext delete_returning_clause() throws Rec try { enterOuterAlt(_localctx, 1); { - setState(1356); + setState(1366); match(RETURNING); - setState(1357); + setState(1367); select_list(); } } @@ -8027,16 +8049,16 @@ public final Quantified_type_defContext quantified_type_def() throws Recognition try { enterOuterAlt(_localctx, 1); { - setState(1359); + setState(1369); type_def(); - setState(1361); + setState(1371); _errHandler.sync(this); _la = _input.LA(1); - if (((((_la - 176)) & ~0x3f) == 0 && ((1L << (_la - 176)) & 65545L) != 0)) { + if (((((_la - 181)) & ~0x3f) == 0 && ((1L << (_la - 181)) & 65545L) != 0)) { { - setState(1360); + setState(1370); _la = _input.LA(1); - if ( !(((((_la - 176)) & ~0x3f) == 0 && ((1L << (_la - 176)) & 65545L) != 0)) ) { + if ( !(((((_la - 181)) & ~0x3f) == 0 && ((1L << (_la - 181)) & 65545L) != 0)) ) { _errHandler.recoverInline(this); } else { @@ -8302,14 +8324,14 @@ public final Type_defContext type_def() throws RecognitionException { Type_defContext _localctx = new Type_defContext(_ctx, getState()); enterRule(_localctx, 188, RULE_type_def); try { - setState(1378); + setState(1388); _errHandler.sync(this); switch (_input.LA(1)) { case BINARY_T: _localctx = new BinaryContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(1363); + setState(1373); binary_def(); } break; @@ -8317,7 +8339,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new ArrayContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(1364); + setState(1374); array_def(); } break; @@ -8325,7 +8347,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new BooleanContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(1365); + setState(1375); boolean_def(); } break; @@ -8333,7 +8355,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new EnumContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(1366); + setState(1376); enum_def(); } break; @@ -8343,7 +8365,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new FloatContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(1367); + setState(1377); float_def(); } break; @@ -8352,7 +8374,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new IntContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(1368); + setState(1378); integer_def(); } break; @@ -8360,7 +8382,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new JSONContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(1369); + setState(1379); json_def(); } break; @@ -8368,7 +8390,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new MapContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(1370); + setState(1380); map_def(); } break; @@ -8376,7 +8398,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new RecordContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(1371); + setState(1381); record_def(); } break; @@ -8384,7 +8406,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new StringTContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(1372); + setState(1382); string_def(); } break; @@ -8392,7 +8414,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new TimestampContext(_localctx); enterOuterAlt(_localctx, 11); { - setState(1373); + setState(1383); timestamp_def(); } break; @@ -8400,7 +8422,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new AnyContext(_localctx); enterOuterAlt(_localctx, 12); { - setState(1374); + setState(1384); any_def(); } break; @@ -8408,7 +8430,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new AnyAtomicContext(_localctx); enterOuterAlt(_localctx, 13); { - setState(1375); + setState(1385); anyAtomic_def(); } break; @@ -8416,7 +8438,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new AnyJsonAtomicContext(_localctx); enterOuterAlt(_localctx, 14); { - setState(1376); + setState(1386); anyJsonAtomic_def(); } break; @@ -8424,7 +8446,7 @@ public final Type_defContext type_def() throws RecognitionException { _localctx = new AnyRecordContext(_localctx); enterOuterAlt(_localctx, 15); { - setState(1377); + setState(1387); anyRecord_def(); } break; @@ -8479,29 +8501,29 @@ public final Record_defContext record_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1380); + setState(1390); match(RECORD_T); - setState(1381); + setState(1391); match(LP); - setState(1382); + setState(1392); field_def(); - setState(1387); + setState(1397); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1383); + setState(1393); match(COMMA); - setState(1384); + setState(1394); field_def(); } } - setState(1389); + setState(1399); _errHandler.sync(this); _la = _input.LA(1); } - setState(1390); + setState(1400); match(RP); } } @@ -8551,26 +8573,26 @@ public final Field_defContext field_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1392); + setState(1402); id(); - setState(1393); + setState(1403); type_def(); - setState(1395); + setState(1405); _errHandler.sync(this); _la = _input.LA(1); if (_la==DEFAULT || _la==NOT) { { - setState(1394); + setState(1404); default_def(); } } - setState(1398); + setState(1408); _errHandler.sync(this); _la = _input.LA(1); if (_la==COMMENT) { { - setState(1397); + setState(1407); comment(); } } @@ -8615,21 +8637,21 @@ public final Default_defContext default_def() throws RecognitionException { enterRule(_localctx, 194, RULE_default_def); int _la; try { - setState(1408); + setState(1418); _errHandler.sync(this); switch (_input.LA(1)) { case DEFAULT: enterOuterAlt(_localctx, 1); { { - setState(1400); + setState(1410); default_value(); - setState(1402); + setState(1412); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(1401); + setState(1411); not_null(); } } @@ -8641,14 +8663,14 @@ public final Default_defContext default_def() throws RecognitionException { enterOuterAlt(_localctx, 2); { { - setState(1404); + setState(1414); not_null(); - setState(1406); + setState(1416); _errHandler.sync(this); _la = _input.LA(1); if (_la==DEFAULT) { { - setState(1405); + setState(1415); default_value(); } } @@ -8705,9 +8727,9 @@ public final Default_valueContext default_value() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1410); + setState(1420); match(DEFAULT); - setState(1416); + setState(1426); _errHandler.sync(this); switch (_input.LA(1)) { case MINUS: @@ -8715,26 +8737,26 @@ public final Default_valueContext default_value() throws RecognitionException { case FLOAT: case NUMBER: { - setState(1411); + setState(1421); number(); } break; case DSTRING: case STRING: { - setState(1412); + setState(1422); string(); } break; case TRUE: { - setState(1413); + setState(1423); match(TRUE); } break; case FALSE: { - setState(1414); + setState(1424); match(FALSE); } break; @@ -8749,6 +8771,7 @@ public final Default_valueContext default_value() throws RecognitionException { case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -8766,11 +8789,13 @@ public final Default_valueContext default_value() throws RecognitionException { case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -8789,6 +8814,7 @@ public final Default_valueContext default_value() throws RecognitionException { case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -8884,7 +8910,7 @@ public final Default_valueContext default_value() throws RecognitionException { case ID: case BAD_ID: { - setState(1415); + setState(1425); id(); } break; @@ -8928,9 +8954,9 @@ public final Not_nullContext not_null() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1418); + setState(1428); match(NOT); - setState(1419); + setState(1429); match(NULL); } } @@ -8973,13 +8999,13 @@ public final Map_defContext map_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1421); + setState(1431); match(MAP_T); - setState(1422); + setState(1432); match(LP); - setState(1423); + setState(1433); type_def(); - setState(1424); + setState(1434); match(RP); } } @@ -9022,13 +9048,13 @@ public final Array_defContext array_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1426); + setState(1436); match(ARRAY_T); - setState(1427); + setState(1437); match(LP); - setState(1428); + setState(1438); type_def(); - setState(1429); + setState(1439); match(RP); } } @@ -9068,7 +9094,7 @@ public final Integer_defContext integer_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1431); + setState(1441); _la = _input.LA(1); if ( !(_la==INTEGER_T || _la==LONG_T) ) { _errHandler.recoverInline(this); @@ -9114,7 +9140,7 @@ public final Json_defContext json_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1433); + setState(1443); match(JSON); } } @@ -9155,9 +9181,9 @@ public final Float_defContext float_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1435); + setState(1445); _la = _input.LA(1); - if ( !(((((_la - 150)) & ~0x3f) == 0 && ((1L << (_la - 150)) & 133L) != 0)) ) { + if ( !(((((_la - 155)) & ~0x3f) == 0 && ((1L << (_la - 155)) & 133L) != 0)) ) { _errHandler.recoverInline(this); } else { @@ -9201,7 +9227,7 @@ public final String_defContext string_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1437); + setState(1447); match(STRING_T); } } @@ -9242,20 +9268,20 @@ public final Enum_defContext enum_def() throws RecognitionException { Enum_defContext _localctx = new Enum_defContext(_ctx, getState()); enterRule(_localctx, 212, RULE_enum_def); try { - setState(1449); + setState(1459); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,135,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { { - setState(1439); + setState(1449); match(ENUM_T); - setState(1440); + setState(1450); match(LP); - setState(1441); + setState(1451); id_list(); - setState(1442); + setState(1452); match(RP); } } @@ -9264,11 +9290,11 @@ public final Enum_defContext enum_def() throws RecognitionException { enterOuterAlt(_localctx, 2); { { - setState(1444); + setState(1454); match(ENUM_T); - setState(1445); + setState(1455); match(LP); - setState(1446); + setState(1456); id_list(); notifyErrorListeners("Missing closing ')'"); } @@ -9310,7 +9336,7 @@ public final Boolean_defContext boolean_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1451); + setState(1461); match(BOOLEAN_T); } } @@ -9351,18 +9377,18 @@ public final Binary_defContext binary_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1453); + setState(1463); match(BINARY_T); - setState(1457); + setState(1467); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,136,_ctx) ) { case 1: { - setState(1454); + setState(1464); match(LP); - setState(1455); + setState(1465); match(INT); - setState(1456); + setState(1466); match(RP); } break; @@ -9406,18 +9432,18 @@ public final Timestamp_defContext timestamp_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1459); + setState(1469); match(TIMESTAMP_T); - setState(1463); + setState(1473); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,137,_ctx) ) { case 1: { - setState(1460); + setState(1470); match(LP); - setState(1461); + setState(1471); match(INT); - setState(1462); + setState(1472); match(RP); } break; @@ -9458,7 +9484,7 @@ public final Any_defContext any_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1465); + setState(1475); match(ANY_T); } } @@ -9496,7 +9522,7 @@ public final AnyAtomic_defContext anyAtomic_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1467); + setState(1477); match(ANYATOMIC_T); } } @@ -9534,7 +9560,7 @@ public final AnyJsonAtomic_defContext anyJsonAtomic_def() throws RecognitionExce try { enterOuterAlt(_localctx, 1); { - setState(1469); + setState(1479); match(ANYJSONATOMIC_T); } } @@ -9572,7 +9598,7 @@ public final AnyRecord_defContext anyRecord_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1471); + setState(1481); match(ANYRECORD_T); } } @@ -9620,21 +9646,21 @@ public final Id_pathContext id_path() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1473); + setState(1483); id(); - setState(1478); + setState(1488); _errHandler.sync(this); _la = _input.LA(1); while (_la==DOT) { { { - setState(1474); + setState(1484); match(DOT); - setState(1475); + setState(1485); id(); } } - setState(1480); + setState(1490); _errHandler.sync(this); _la = _input.LA(1); } @@ -9684,21 +9710,21 @@ public final Table_id_pathContext table_id_path() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1481); + setState(1491); table_id(); - setState(1486); + setState(1496); _errHandler.sync(this); _la = _input.LA(1); while (_la==DOT) { { { - setState(1482); + setState(1492); match(DOT); - setState(1483); + setState(1493); table_id(); } } - setState(1488); + setState(1498); _errHandler.sync(this); _la = _input.LA(1); } @@ -9742,17 +9768,17 @@ public final Table_idContext table_id() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1490); + setState(1500); _errHandler.sync(this); _la = _input.LA(1); if (_la==SYSDOLAR) { { - setState(1489); + setState(1499); match(SYSDOLAR); } } - setState(1492); + setState(1502); id(); } } @@ -9800,21 +9826,21 @@ public final Name_pathContext name_path() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1494); + setState(1504); field_name(); - setState(1499); + setState(1509); _errHandler.sync(this); _la = _input.LA(1); while (_la==DOT) { { { - setState(1495); + setState(1505); match(DOT); - setState(1496); + setState(1506); field_name(); } } - setState(1501); + setState(1511); _errHandler.sync(this); _la = _input.LA(1); } @@ -9855,7 +9881,7 @@ public final Field_nameContext field_name() throws RecognitionException { Field_nameContext _localctx = new Field_nameContext(_ctx, getState()); enterRule(_localctx, 236, RULE_field_name); try { - setState(1504); + setState(1514); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -9869,6 +9895,7 @@ public final Field_nameContext field_name() throws RecognitionException { case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -9886,11 +9913,13 @@ public final Field_nameContext field_name() throws RecognitionException { case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -9909,6 +9938,7 @@ public final Field_nameContext field_name() throws RecognitionException { case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -10005,14 +10035,14 @@ public final Field_nameContext field_name() throws RecognitionException { case BAD_ID: enterOuterAlt(_localctx, 1); { - setState(1502); + setState(1512); id(); } break; case DSTRING: enterOuterAlt(_localctx, 2); { - setState(1503); + setState(1513); match(DSTRING); } break; @@ -10061,25 +10091,25 @@ public final Create_namespace_statementContext create_namespace_statement() thro try { enterOuterAlt(_localctx, 1); { - setState(1506); + setState(1516); match(CREATE); - setState(1507); + setState(1517); match(NAMESPACE); - setState(1511); + setState(1521); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,143,_ctx) ) { case 1: { - setState(1508); + setState(1518); match(IF); - setState(1509); + setState(1519); match(NOT); - setState(1510); + setState(1520); match(EXISTS); } break; } - setState(1513); + setState(1523); namespace(); } } @@ -10125,30 +10155,30 @@ public final Drop_namespace_statementContext drop_namespace_statement() throws R try { enterOuterAlt(_localctx, 1); { - setState(1515); + setState(1525); match(DROP); - setState(1516); + setState(1526); match(NAMESPACE); - setState(1519); + setState(1529); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,144,_ctx) ) { case 1: { - setState(1517); + setState(1527); match(IF); - setState(1518); + setState(1528); match(EXISTS); } break; } - setState(1521); + setState(1531); namespace(); - setState(1523); + setState(1533); _errHandler.sync(this); _la = _input.LA(1); if (_la==CASCADE) { { - setState(1522); + setState(1532); match(CASCADE); } } @@ -10191,7 +10221,7 @@ public final Region_nameContext region_name() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1525); + setState(1535); id(); } } @@ -10233,11 +10263,11 @@ public final Create_region_statementContext create_region_statement() throws Rec try { enterOuterAlt(_localctx, 1); { - setState(1527); + setState(1537); match(CREATE); - setState(1528); + setState(1538); match(REGION); - setState(1529); + setState(1539); region_name(); } } @@ -10279,11 +10309,11 @@ public final Drop_region_statementContext drop_region_statement() throws Recogni try { enterOuterAlt(_localctx, 1); { - setState(1531); + setState(1541); match(DROP); - setState(1532); + setState(1542); match(REGION); - setState(1533); + setState(1543); region_name(); } } @@ -10326,13 +10356,13 @@ public final Set_local_region_statementContext set_local_region_statement() thro try { enterOuterAlt(_localctx, 1); { - setState(1535); + setState(1545); match(SET); - setState(1536); + setState(1546); match(LOCAL); - setState(1537); + setState(1547); match(REGION); - setState(1538); + setState(1548); region_name(); } } @@ -10359,15 +10389,15 @@ public Table_defContext table_def() { return getRuleContext(Table_defContext.class,0); } public TerminalNode RP() { return getToken(KVQLParser.RP, 0); } - public Table_optionsContext table_options() { - return getRuleContext(Table_optionsContext.class,0); - } public TerminalNode IF() { return getToken(KVQLParser.IF, 0); } public TerminalNode NOT() { return getToken(KVQLParser.NOT, 0); } public TerminalNode EXISTS() { return getToken(KVQLParser.EXISTS, 0); } public CommentContext comment() { return getRuleContext(CommentContext.class,0); } + public Table_optionsContext table_options() { + return getRuleContext(Table_optionsContext.class,0); + } public Create_table_statementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -10389,44 +10419,52 @@ public final Create_table_statementContext create_table_statement() throws Recog try { enterOuterAlt(_localctx, 1); { - setState(1540); + setState(1550); match(CREATE); - setState(1541); + setState(1551); match(TABLE); - setState(1545); + setState(1555); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,146,_ctx) ) { case 1: { - setState(1542); + setState(1552); match(IF); - setState(1543); + setState(1553); match(NOT); - setState(1544); + setState(1554); match(EXISTS); } break; } - setState(1547); + setState(1557); table_name(); - setState(1549); + setState(1559); _errHandler.sync(this); _la = _input.LA(1); if (_la==COMMENT) { { - setState(1548); + setState(1558); comment(); } } - setState(1551); + setState(1561); match(LP); - setState(1552); + setState(1562); table_def(); - setState(1553); + setState(1563); match(RP); - setState(1554); - table_options(); + setState(1565); + _errHandler.sync(this); + _la = _input.LA(1); + if (((((_la - 14)) & ~0x3f) == 0 && ((1L << (_la - 14)) & 2251800082120705L) != 0) || _la==USING || _la==WITH) { + { + setState(1564); + table_options(); + } + } + } } catch (RecognitionException re) { @@ -10469,19 +10507,19 @@ public final Table_nameContext table_name() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1559); + setState(1570); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,148,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,149,_ctx) ) { case 1: { - setState(1556); + setState(1567); namespace(); - setState(1557); + setState(1568); match(COLON); } break; } - setState(1561); + setState(1572); table_id_path(); } } @@ -10521,7 +10559,7 @@ public final NamespaceContext namespace() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1563); + setState(1574); id_path(); } } @@ -10581,61 +10619,61 @@ public final Table_defContext table_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1568); + setState(1579); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,149,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,150,_ctx) ) { case 1: { - setState(1565); + setState(1576); column_def(); } break; case 2: { - setState(1566); + setState(1577); key_def(); } break; case 3: { - setState(1567); + setState(1578); json_collection_mrcounter_def(); } break; } - setState(1578); + setState(1589); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1570); + setState(1581); match(COMMA); - setState(1574); + setState(1585); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,150,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,151,_ctx) ) { case 1: { - setState(1571); + setState(1582); column_def(); } break; case 2: { - setState(1572); + setState(1583); key_def(); } break; case 3: { - setState(1573); + setState(1584); json_collection_mrcounter_def(); } break; } } } - setState(1580); + setState(1591); _errHandler.sync(this); _la = _input.LA(1); } @@ -10699,50 +10737,50 @@ public final Column_defContext column_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1581); + setState(1592); id(); - setState(1582); + setState(1593); type_def(); - setState(1588); + setState(1599); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,152,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,153,_ctx) ) { case 1: { - setState(1583); + setState(1594); default_def(); } break; case 2: { - setState(1584); + setState(1595); identity_def(); } break; case 3: { - setState(1585); + setState(1596); uuid_def(); } break; case 4: { - setState(1586); + setState(1597); mr_counter_def(); } break; case 5: { - setState(1587); + setState(1598); json_mrcounter_fields(); } break; } - setState(1591); + setState(1602); _errHandler.sync(this); _la = _input.LA(1); if (_la==COMMENT) { { - setState(1590); + setState(1601); comment(); } } @@ -10795,27 +10833,27 @@ public final Json_mrcounter_fieldsContext json_mrcounter_fields() throws Recogni try { enterOuterAlt(_localctx, 1); { - setState(1593); + setState(1604); match(LP); - setState(1594); + setState(1605); json_mrcounter_def(); - setState(1599); + setState(1610); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1595); + setState(1606); match(COMMA); - setState(1596); + setState(1607); json_mrcounter_def(); } } - setState(1601); + setState(1612); _errHandler.sync(this); _la = _input.LA(1); } - setState(1602); + setState(1613); match(RP); } } @@ -10861,13 +10899,13 @@ public final Json_mrcounter_defContext json_mrcounter_def() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(1604); + setState(1615); json_mrcounter_path(); - setState(1605); + setState(1616); match(AS); - setState(1606); + setState(1617); _la = _input.LA(1); - if ( !(((((_la - 154)) & ~0x3f) == 0 && ((1L << (_la - 154)) & 11L) != 0)) ) { + if ( !(((((_la - 159)) & ~0x3f) == 0 && ((1L << (_la - 159)) & 11L) != 0)) ) { _errHandler.recoverInline(this); } else { @@ -10875,7 +10913,7 @@ public final Json_mrcounter_defContext json_mrcounter_def() throws RecognitionEx _errHandler.reportMatch(this); consume(); } - setState(1607); + setState(1618); match(MR_COUNTER); } } @@ -10915,7 +10953,7 @@ public final Json_collection_mrcounter_defContext json_collection_mrcounter_def( try { enterOuterAlt(_localctx, 1); { - setState(1609); + setState(1620); json_mrcounter_def(); } } @@ -10969,7 +11007,7 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition try { enterOuterAlt(_localctx, 1); { - setState(1613); + setState(1624); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -10983,6 +11021,7 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -11000,11 +11039,13 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -11023,6 +11064,7 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -11118,29 +11160,29 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition case ID: case BAD_ID: { - setState(1611); + setState(1622); id(); } break; case DSTRING: case STRING: { - setState(1612); + setState(1623); string(); } break; default: throw new NoViableAltException(this); } - setState(1622); + setState(1633); _errHandler.sync(this); _la = _input.LA(1); while (_la==DOT) { { { - setState(1615); + setState(1626); match(DOT); - setState(1618); + setState(1629); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -11154,6 +11196,7 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -11171,11 +11214,13 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -11194,6 +11239,7 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -11289,14 +11335,14 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition case ID: case BAD_ID: { - setState(1616); + setState(1627); id(); } break; case DSTRING: case STRING: { - setState(1617); + setState(1628); string(); } break; @@ -11305,7 +11351,7 @@ public final Json_mrcounter_pathContext json_mrcounter_path() throws Recognition } } } - setState(1624); + setState(1635); _errHandler.sync(this); _la = _input.LA(1); } @@ -11356,25 +11402,25 @@ public final Key_defContext key_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1625); + setState(1636); match(PRIMARY); - setState(1626); + setState(1637); match(KEY); - setState(1627); + setState(1638); match(LP); - setState(1632); + setState(1643); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,159,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,160,_ctx) ) { case 1: { - setState(1628); + setState(1639); shard_key_def(); - setState(1630); + setState(1641); _errHandler.sync(this); _la = _input.LA(1); if (_la==COMMA) { { - setState(1629); + setState(1640); match(COMMA); } } @@ -11382,17 +11428,17 @@ public final Key_defContext key_def() throws RecognitionException { } break; } - setState(1635); + setState(1646); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -492581210292256L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -417149895185L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 549755293695L) != 0) || ((((_la - 195)) & ~0x3f) == 0 && ((1L << (_la - 195)) & 6145L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & -7881299352092736L) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & -13348796645889L) != 0) || ((((_la - 128)) & ~0x3f) == 0 && ((1L << (_la - 128)) & 17592169398271L) != 0) || ((((_la - 200)) & ~0x3f) == 0 && ((1L << (_la - 200)) & 6145L) != 0)) { { - setState(1634); + setState(1645); id_list_with_size(); } } - setState(1637); + setState(1648); match(RP); } } @@ -11433,20 +11479,20 @@ public final Shard_key_defContext shard_key_def() throws RecognitionException { Shard_key_defContext _localctx = new Shard_key_defContext(_ctx, getState()); enterRule(_localctx, 270, RULE_shard_key_def); try { - setState(1648); + setState(1659); _errHandler.sync(this); switch (_input.LA(1)) { case SHARD: enterOuterAlt(_localctx, 1); { { - setState(1639); + setState(1650); match(SHARD); - setState(1640); + setState(1651); match(LP); - setState(1641); + setState(1652); id_list_with_size(); - setState(1642); + setState(1653); match(RP); } } @@ -11455,9 +11501,9 @@ public final Shard_key_defContext shard_key_def() throws RecognitionException { enterOuterAlt(_localctx, 2); { { - setState(1644); + setState(1655); match(LP); - setState(1645); + setState(1656); id_list_with_size(); notifyErrorListeners("Missing closing ')'"); } @@ -11511,25 +11557,25 @@ public final Id_list_with_sizeContext id_list_with_size() throws RecognitionExce int _alt; enterOuterAlt(_localctx, 1); { - setState(1650); + setState(1661); id_with_size(); - setState(1655); + setState(1666); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,162,_ctx); + _alt = getInterpreter().adaptivePredict(_input,163,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(1651); + setState(1662); match(COMMA); - setState(1652); + setState(1663); id_with_size(); } } } - setState(1657); + setState(1668); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,162,_ctx); + _alt = getInterpreter().adaptivePredict(_input,163,_ctx); } } } @@ -11573,14 +11619,14 @@ public final Id_with_sizeContext id_with_size() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1658); + setState(1669); id(); - setState(1660); + setState(1671); _errHandler.sync(this); _la = _input.LA(1); if (_la==LP) { { - setState(1659); + setState(1670); storage_size(); } } @@ -11623,11 +11669,11 @@ public final Storage_sizeContext storage_size() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1662); + setState(1673); match(LP); - setState(1663); + setState(1674); match(INT); - setState(1664); + setState(1675); match(RP); } } @@ -11644,17 +11690,35 @@ public final Storage_sizeContext storage_size() throws RecognitionException { @SuppressWarnings("CheckReturnValue") public static class Table_optionsContext extends ParserRuleContext { - public Ttl_defContext ttl_def() { - return getRuleContext(Ttl_defContext.class,0); + public List ttl_def() { + return getRuleContexts(Ttl_defContext.class); + } + public Ttl_defContext ttl_def(int i) { + return getRuleContext(Ttl_defContext.class,i); } - public Regions_defContext regions_def() { - return getRuleContext(Regions_defContext.class,0); + public List regions_def() { + return getRuleContexts(Regions_defContext.class); } - public Frozen_defContext frozen_def() { - return getRuleContext(Frozen_defContext.class,0); + public Regions_defContext regions_def(int i) { + return getRuleContext(Regions_defContext.class,i); } - public Json_collection_defContext json_collection_def() { - return getRuleContext(Json_collection_defContext.class,0); + public List frozen_def() { + return getRuleContexts(Frozen_defContext.class); + } + public Frozen_defContext frozen_def(int i) { + return getRuleContext(Frozen_defContext.class,i); + } + public List json_collection_def() { + return getRuleContexts(Json_collection_defContext.class); + } + public Json_collection_defContext json_collection_def(int i) { + return getRuleContext(Json_collection_defContext.class,i); + } + public List enable_before_image() { + return getRuleContexts(Enable_before_imageContext.class); + } + public Enable_before_imageContext enable_before_image(int i) { + return getRuleContext(Enable_before_imageContext.class,i); } public Table_optionsContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -11677,242 +11741,52 @@ public final Table_optionsContext table_options() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1726); + setState(1682); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,184,_ctx) ) { - case 1: - { - setState(1667); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==USING) { - { - setState(1666); - ttl_def(); - } - } - - setState(1670); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==IN) { - { - setState(1669); - regions_def(); - } - } - - } - break; - case 2: - { - setState(1673); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==IN) { - { - setState(1672); - regions_def(); - } - } - - setState(1676); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==USING) { - { - setState(1675); - ttl_def(); - } - } - - } - break; - case 3: + _la = _input.LA(1); + do { { - setState(1679); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==USING) { - { - setState(1678); - ttl_def(); - } - } - setState(1682); _errHandler.sync(this); - _la = _input.LA(1); - if (_la==WITH) { - { - setState(1681); - frozen_def(); - } - } - - } - break; - case 4: - { - setState(1685); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==WITH) { - { - setState(1684); - frozen_def(); - } - } - - setState(1688); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==USING) { - { - setState(1687); - ttl_def(); - } - } - - } - break; - case 5: - { - setState(1691); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==IN) { - { - setState(1690); - regions_def(); - } - } - - setState(1694); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==AS) { - { - setState(1693); - json_collection_def(); - } - } - - setState(1697); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==USING) { + switch (_input.LA(1)) { + case USING: { - setState(1696); + setState(1677); ttl_def(); } - } - - } - break; - case 6: - { - setState(1700); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==IN) { + break; + case IN: { - setState(1699); + setState(1678); regions_def(); } - } - - setState(1703); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==USING) { - { - setState(1702); - ttl_def(); - } - } - - setState(1706); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==AS) { + break; + case WITH: { - setState(1705); - json_collection_def(); + setState(1679); + frozen_def(); } - } - - } - break; - case 7: - { - setState(1709); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==AS) { + break; + case AS: { - setState(1708); + setState(1680); json_collection_def(); } - } - - setState(1712); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==USING) { - { - setState(1711); - ttl_def(); - } - } - - setState(1715); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==IN) { - { - setState(1714); - regions_def(); - } - } - - } - break; - case 8: - { - setState(1718); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==USING) { + break; + case ENABLE: { - setState(1717); - ttl_def(); + setState(1681); + enable_before_image(); } + break; + default: + throw new NoViableAltException(this); } - - setState(1721); - _errHandler.sync(this); - _la = _input.LA(1); - if (_la==AS) { - { - setState(1720); - json_collection_def(); - } } - - setState(1724); + setState(1684); _errHandler.sync(this); _la = _input.LA(1); - if (_la==IN) { - { - setState(1723); - regions_def(); - } - } - - } - break; - } + } while ( ((((_la - 14)) & ~0x3f) == 0 && ((1L << (_la - 14)) & 2251800082120705L) != 0) || _la==USING || _la==WITH ); } } catch (RecognitionException re) { @@ -11953,11 +11827,11 @@ public final Ttl_defContext ttl_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1728); + setState(1686); match(USING); - setState(1729); + setState(1687); match(TTL); - setState(1730); + setState(1688); duration(); } } @@ -11997,7 +11871,7 @@ public final Region_namesContext region_names() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1732); + setState(1690); id_list(); } } @@ -12039,11 +11913,11 @@ public final Regions_defContext regions_def() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(1734); + setState(1692); match(IN); - setState(1735); + setState(1693); match(REGIONS); - setState(1736); + setState(1694); region_names(); } } @@ -12059,38 +11933,48 @@ public final Regions_defContext regions_def() throws RecognitionException { } @SuppressWarnings("CheckReturnValue") - public static class Add_region_defContext extends ParserRuleContext { - public TerminalNode ADD() { return getToken(KVQLParser.ADD, 0); } - public TerminalNode REGIONS() { return getToken(KVQLParser.REGIONS, 0); } - public Region_namesContext region_names() { - return getRuleContext(Region_namesContext.class,0); - } - public Add_region_defContext(ParserRuleContext parent, int invokingState) { + public static class Frozen_defContext extends ParserRuleContext { + public TerminalNode WITH() { return getToken(KVQLParser.WITH, 0); } + public TerminalNode SCHEMA() { return getToken(KVQLParser.SCHEMA, 0); } + public TerminalNode FROZEN() { return getToken(KVQLParser.FROZEN, 0); } + public TerminalNode FORCE() { return getToken(KVQLParser.FORCE, 0); } + public Frozen_defContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_add_region_def; } + @Override public int getRuleIndex() { return RULE_frozen_def; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterAdd_region_def(this); + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterFrozen_def(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitAdd_region_def(this); + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitFrozen_def(this); } } - public final Add_region_defContext add_region_def() throws RecognitionException { - Add_region_defContext _localctx = new Add_region_defContext(_ctx, getState()); - enterRule(_localctx, 286, RULE_add_region_def); + public final Frozen_defContext frozen_def() throws RecognitionException { + Frozen_defContext _localctx = new Frozen_defContext(_ctx, getState()); + enterRule(_localctx, 286, RULE_frozen_def); + int _la; try { enterOuterAlt(_localctx, 1); { - setState(1738); - match(ADD); - setState(1739); - match(REGIONS); - setState(1740); - region_names(); + setState(1696); + match(WITH); + setState(1697); + match(SCHEMA); + setState(1698); + match(FROZEN); + setState(1700); + _errHandler.sync(this); + _la = _input.LA(1); + if (_la==FORCE) { + { + setState(1699); + match(FORCE); + } + } + } } catch (RecognitionException re) { @@ -12105,38 +11989,36 @@ public final Add_region_defContext add_region_def() throws RecognitionException } @SuppressWarnings("CheckReturnValue") - public static class Drop_region_defContext extends ParserRuleContext { - public TerminalNode DROP() { return getToken(KVQLParser.DROP, 0); } - public TerminalNode REGIONS() { return getToken(KVQLParser.REGIONS, 0); } - public Region_namesContext region_names() { - return getRuleContext(Region_namesContext.class,0); - } - public Drop_region_defContext(ParserRuleContext parent, int invokingState) { + public static class Json_collection_defContext extends ParserRuleContext { + public TerminalNode AS() { return getToken(KVQLParser.AS, 0); } + public TerminalNode JSON() { return getToken(KVQLParser.JSON, 0); } + public TerminalNode COLLECTION() { return getToken(KVQLParser.COLLECTION, 0); } + public Json_collection_defContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_drop_region_def; } + @Override public int getRuleIndex() { return RULE_json_collection_def; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterDrop_region_def(this); + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterJson_collection_def(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitDrop_region_def(this); + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitJson_collection_def(this); } } - public final Drop_region_defContext drop_region_def() throws RecognitionException { - Drop_region_defContext _localctx = new Drop_region_defContext(_ctx, getState()); - enterRule(_localctx, 288, RULE_drop_region_def); + public final Json_collection_defContext json_collection_def() throws RecognitionException { + Json_collection_defContext _localctx = new Json_collection_defContext(_ctx, getState()); + enterRule(_localctx, 288, RULE_json_collection_def); try { enterOuterAlt(_localctx, 1); { - setState(1742); - match(DROP); - setState(1743); - match(REGIONS); - setState(1744); - region_names(); + setState(1702); + match(AS); + setState(1703); + match(JSON); + setState(1704); + match(COLLECTION); } } catch (RecognitionException re) { @@ -12151,48 +12033,95 @@ public final Drop_region_defContext drop_region_def() throws RecognitionExceptio } @SuppressWarnings("CheckReturnValue") - public static class Frozen_defContext extends ParserRuleContext { - public TerminalNode WITH() { return getToken(KVQLParser.WITH, 0); } - public TerminalNode SCHEMA() { return getToken(KVQLParser.SCHEMA, 0); } - public TerminalNode FROZEN() { return getToken(KVQLParser.FROZEN, 0); } - public TerminalNode FORCE() { return getToken(KVQLParser.FORCE, 0); } - public Frozen_defContext(ParserRuleContext parent, int invokingState) { + public static class Enable_before_imageContext extends ParserRuleContext { + public TerminalNode ENABLE() { return getToken(KVQLParser.ENABLE, 0); } + public TerminalNode BEFORE() { return getToken(KVQLParser.BEFORE, 0); } + public TerminalNode IMAGE() { return getToken(KVQLParser.IMAGE, 0); } + public Before_image_ttlContext before_image_ttl() { + return getRuleContext(Before_image_ttlContext.class,0); + } + public Enable_before_imageContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_frozen_def; } + @Override public int getRuleIndex() { return RULE_enable_before_image; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterFrozen_def(this); + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterEnable_before_image(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitFrozen_def(this); + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitEnable_before_image(this); } } - public final Frozen_defContext frozen_def() throws RecognitionException { - Frozen_defContext _localctx = new Frozen_defContext(_ctx, getState()); - enterRule(_localctx, 290, RULE_frozen_def); - int _la; + public final Enable_before_imageContext enable_before_image() throws RecognitionException { + Enable_before_imageContext _localctx = new Enable_before_imageContext(_ctx, getState()); + enterRule(_localctx, 290, RULE_enable_before_image); try { enterOuterAlt(_localctx, 1); { - setState(1746); - match(WITH); - setState(1747); - match(SCHEMA); - setState(1748); - match(FROZEN); - setState(1750); + setState(1706); + match(ENABLE); + setState(1707); + match(BEFORE); + setState(1708); + match(IMAGE); + setState(1710); _errHandler.sync(this); - _la = _input.LA(1); - if (_la==FORCE) { + switch ( getInterpreter().adaptivePredict(_input,168,_ctx) ) { + case 1: { - setState(1749); - match(FORCE); + setState(1709); + before_image_ttl(); } + break; + } } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + @SuppressWarnings("CheckReturnValue") + public static class Before_image_ttlContext extends ParserRuleContext { + public TerminalNode USING() { return getToken(KVQLParser.USING, 0); } + public TerminalNode TTL() { return getToken(KVQLParser.TTL, 0); } + public DurationContext duration() { + return getRuleContext(DurationContext.class,0); + } + public Before_image_ttlContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_before_image_ttl; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterBefore_image_ttl(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitBefore_image_ttl(this); + } + } + + public final Before_image_ttlContext before_image_ttl() throws RecognitionException { + Before_image_ttlContext _localctx = new Before_image_ttlContext(_ctx, getState()); + enterRule(_localctx, 292, RULE_before_image_ttl); + try { + enterOuterAlt(_localctx, 1); + { + setState(1712); + match(USING); + setState(1713); + match(TTL); + setState(1714); + duration(); } } catch (RecognitionException re) { @@ -12207,36 +12136,36 @@ public final Frozen_defContext frozen_def() throws RecognitionException { } @SuppressWarnings("CheckReturnValue") - public static class Json_collection_defContext extends ParserRuleContext { - public TerminalNode AS() { return getToken(KVQLParser.AS, 0); } - public TerminalNode JSON() { return getToken(KVQLParser.JSON, 0); } - public TerminalNode COLLECTION() { return getToken(KVQLParser.COLLECTION, 0); } - public Json_collection_defContext(ParserRuleContext parent, int invokingState) { + public static class Disable_before_imageContext extends ParserRuleContext { + public TerminalNode DISABLE() { return getToken(KVQLParser.DISABLE, 0); } + public TerminalNode BEFORE() { return getToken(KVQLParser.BEFORE, 0); } + public TerminalNode IMAGE() { return getToken(KVQLParser.IMAGE, 0); } + public Disable_before_imageContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_json_collection_def; } + @Override public int getRuleIndex() { return RULE_disable_before_image; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterJson_collection_def(this); + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterDisable_before_image(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitJson_collection_def(this); + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitDisable_before_image(this); } } - public final Json_collection_defContext json_collection_def() throws RecognitionException { - Json_collection_defContext _localctx = new Json_collection_defContext(_ctx, getState()); - enterRule(_localctx, 292, RULE_json_collection_def); + public final Disable_before_imageContext disable_before_image() throws RecognitionException { + Disable_before_imageContext _localctx = new Disable_before_imageContext(_ctx, getState()); + enterRule(_localctx, 294, RULE_disable_before_image); try { enterOuterAlt(_localctx, 1); { - setState(1752); - match(AS); - setState(1753); - match(JSON); - setState(1754); - match(COLLECTION); + setState(1716); + match(DISABLE); + setState(1717); + match(BEFORE); + setState(1718); + match(IMAGE); } } catch (RecognitionException re) { @@ -12284,37 +12213,37 @@ public void exitRule(ParseTreeListener listener) { public final Identity_defContext identity_def() throws RecognitionException { Identity_defContext _localctx = new Identity_defContext(_ctx, getState()); - enterRule(_localctx, 294, RULE_identity_def); + enterRule(_localctx, 296, RULE_identity_def); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1756); + setState(1720); match(GENERATED); - setState(1764); + setState(1728); _errHandler.sync(this); switch (_input.LA(1)) { case ALWAYS: { - setState(1757); + setState(1721); match(ALWAYS); } break; case BY: { { - setState(1758); + setState(1722); match(BY); - setState(1759); + setState(1723); match(DEFAULT); - setState(1762); + setState(1726); _errHandler.sync(this); _la = _input.LA(1); if (_la==ON) { { - setState(1760); + setState(1724); match(ON); - setState(1761); + setState(1725); match(NULL); } } @@ -12325,32 +12254,32 @@ public final Identity_defContext identity_def() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(1766); + setState(1730); match(AS); - setState(1767); + setState(1731); match(IDENTITY); - setState(1776); + setState(1740); _errHandler.sync(this); _la = _input.LA(1); if (_la==LP) { { - setState(1768); + setState(1732); match(LP); - setState(1770); + setState(1734); _errHandler.sync(this); _la = _input.LA(1); do { { { - setState(1769); + setState(1733); sequence_options(); } } - setState(1772); + setState(1736); _errHandler.sync(this); _la = _input.LA(1); - } while ( (((_la) & ~0x3f) == 0 && ((1L << _la) & 2305843009281064960L) != 0) || ((((_la - 78)) & ~0x3f) == 0 && ((1L << (_la - 78)) & 4398046511625L) != 0) ); - setState(1774); + } while ( _la==CACHE || _la==CYCLE || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & 576460752371712001L) != 0) ); + setState(1738); match(RP); } } @@ -12399,20 +12328,20 @@ public void exitRule(ParseTreeListener listener) { public final Sequence_optionsContext sequence_options() throws RecognitionException { Sequence_optionsContext _localctx = new Sequence_optionsContext(_ctx, getState()); - enterRule(_localctx, 296, RULE_sequence_options); + enterRule(_localctx, 298, RULE_sequence_options); try { - setState(1799); + setState(1763); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,190,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,173,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { { - setState(1778); + setState(1742); match(START); - setState(1779); + setState(1743); match(WITH); - setState(1780); + setState(1744); signed_int(); } } @@ -12421,11 +12350,11 @@ public final Sequence_optionsContext sequence_options() throws RecognitionExcept enterOuterAlt(_localctx, 2); { { - setState(1781); + setState(1745); match(INCREMENT); - setState(1782); + setState(1746); match(BY); - setState(1783); + setState(1747); signed_int(); } } @@ -12434,9 +12363,9 @@ public final Sequence_optionsContext sequence_options() throws RecognitionExcept enterOuterAlt(_localctx, 3); { { - setState(1784); + setState(1748); match(MAXVALUE); - setState(1785); + setState(1749); signed_int(); } } @@ -12445,9 +12374,9 @@ public final Sequence_optionsContext sequence_options() throws RecognitionExcept enterOuterAlt(_localctx, 4); { { - setState(1786); + setState(1750); match(NO); - setState(1787); + setState(1751); match(MAXVALUE); } } @@ -12456,9 +12385,9 @@ public final Sequence_optionsContext sequence_options() throws RecognitionExcept enterOuterAlt(_localctx, 5); { { - setState(1788); + setState(1752); match(MINVALUE); - setState(1789); + setState(1753); signed_int(); } } @@ -12467,9 +12396,9 @@ public final Sequence_optionsContext sequence_options() throws RecognitionExcept enterOuterAlt(_localctx, 6); { { - setState(1790); + setState(1754); match(NO); - setState(1791); + setState(1755); match(MINVALUE); } } @@ -12478,9 +12407,9 @@ public final Sequence_optionsContext sequence_options() throws RecognitionExcept enterOuterAlt(_localctx, 7); { { - setState(1792); + setState(1756); match(CACHE); - setState(1793); + setState(1757); match(INT); } } @@ -12489,9 +12418,9 @@ public final Sequence_optionsContext sequence_options() throws RecognitionExcept enterOuterAlt(_localctx, 8); { { - setState(1794); + setState(1758); match(NO); - setState(1795); + setState(1759); match(CACHE); } } @@ -12499,7 +12428,7 @@ public final Sequence_optionsContext sequence_options() throws RecognitionExcept case 9: enterOuterAlt(_localctx, 9); { - setState(1796); + setState(1760); match(CYCLE); } break; @@ -12507,9 +12436,9 @@ public final Sequence_optionsContext sequence_options() throws RecognitionExcept enterOuterAlt(_localctx, 10); { { - setState(1797); + setState(1761); match(NO); - setState(1798); + setState(1762); match(CYCLE); } } @@ -12547,13 +12476,13 @@ public void exitRule(ParseTreeListener listener) { public final Mr_counter_defContext mr_counter_def() throws RecognitionException { Mr_counter_defContext _localctx = new Mr_counter_defContext(_ctx, getState()); - enterRule(_localctx, 298, RULE_mr_counter_def); + enterRule(_localctx, 300, RULE_mr_counter_def); try { enterOuterAlt(_localctx, 1); { - setState(1801); + setState(1765); match(AS); - setState(1802); + setState(1766); match(MR_COUNTER); } } @@ -12591,25 +12520,25 @@ public void exitRule(ParseTreeListener listener) { public final Uuid_defContext uuid_def() throws RecognitionException { Uuid_defContext _localctx = new Uuid_defContext(_ctx, getState()); - enterRule(_localctx, 300, RULE_uuid_def); + enterRule(_localctx, 302, RULE_uuid_def); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1804); + setState(1768); match(AS); - setState(1805); + setState(1769); match(UUID); - setState(1809); + setState(1773); _errHandler.sync(this); _la = _input.LA(1); if (_la==GENERATED) { { - setState(1806); + setState(1770); match(GENERATED); - setState(1807); + setState(1771); match(BY); - setState(1808); + setState(1772); match(DEFAULT); } } @@ -12653,17 +12582,17 @@ public void exitRule(ParseTreeListener listener) { public final Alter_table_statementContext alter_table_statement() throws RecognitionException { Alter_table_statementContext _localctx = new Alter_table_statementContext(_ctx, getState()); - enterRule(_localctx, 302, RULE_alter_table_statement); + enterRule(_localctx, 304, RULE_alter_table_statement); try { enterOuterAlt(_localctx, 1); { - setState(1811); + setState(1775); match(ALTER); - setState(1812); + setState(1776); match(TABLE); - setState(1813); + setState(1777); table_name(); - setState(1814); + setState(1778); alter_def(); } } @@ -12698,6 +12627,12 @@ public Freeze_defContext freeze_def() { public Unfreeze_defContext unfreeze_def() { return getRuleContext(Unfreeze_defContext.class,0); } + public Enable_before_imageContext enable_before_image() { + return getRuleContext(Enable_before_imageContext.class,0); + } + public Disable_before_imageContext disable_before_image() { + return getRuleContext(Disable_before_imageContext.class,0); + } public Alter_defContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -12714,51 +12649,65 @@ public void exitRule(ParseTreeListener listener) { public final Alter_defContext alter_def() throws RecognitionException { Alter_defContext _localctx = new Alter_defContext(_ctx, getState()); - enterRule(_localctx, 304, RULE_alter_def); + enterRule(_localctx, 306, RULE_alter_def); try { - setState(1822); + setState(1788); _errHandler.sync(this); switch (_input.LA(1)) { case LP: enterOuterAlt(_localctx, 1); { - setState(1816); + setState(1780); alter_field_statements(); } break; case USING: enterOuterAlt(_localctx, 2); { - setState(1817); + setState(1781); ttl_def(); } break; case ADD: enterOuterAlt(_localctx, 3); { - setState(1818); + setState(1782); add_region_def(); } break; case DROP: enterOuterAlt(_localctx, 4); { - setState(1819); + setState(1783); drop_region_def(); } break; case FREEZE: enterOuterAlt(_localctx, 5); { - setState(1820); - freeze_def(); + setState(1784); + freeze_def(); + } + break; + case UNFREEZE: + enterOuterAlt(_localctx, 6); + { + setState(1785); + unfreeze_def(); + } + break; + case ENABLE: + enterOuterAlt(_localctx, 7); + { + setState(1786); + enable_before_image(); } break; - case UNFREEZE: - enterOuterAlt(_localctx, 6); + case DISABLE: + enterOuterAlt(_localctx, 8); { - setState(1821); - unfreeze_def(); + setState(1787); + disable_before_image(); } break; default: @@ -12797,21 +12746,21 @@ public void exitRule(ParseTreeListener listener) { public final Freeze_defContext freeze_def() throws RecognitionException { Freeze_defContext _localctx = new Freeze_defContext(_ctx, getState()); - enterRule(_localctx, 306, RULE_freeze_def); + enterRule(_localctx, 308, RULE_freeze_def); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1824); + setState(1790); match(FREEZE); - setState(1825); + setState(1791); match(SCHEMA); - setState(1827); + setState(1793); _errHandler.sync(this); _la = _input.LA(1); if (_la==FORCE) { { - setState(1826); + setState(1792); match(FORCE); } } @@ -12849,13 +12798,13 @@ public void exitRule(ParseTreeListener listener) { public final Unfreeze_defContext unfreeze_def() throws RecognitionException { Unfreeze_defContext _localctx = new Unfreeze_defContext(_ctx, getState()); - enterRule(_localctx, 308, RULE_unfreeze_def); + enterRule(_localctx, 310, RULE_unfreeze_def); try { enterOuterAlt(_localctx, 1); { - setState(1829); + setState(1795); match(UNFREEZE); - setState(1830); + setState(1796); match(SCHEMA); } } @@ -12870,6 +12819,98 @@ public final Unfreeze_defContext unfreeze_def() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") + public static class Add_region_defContext extends ParserRuleContext { + public TerminalNode ADD() { return getToken(KVQLParser.ADD, 0); } + public TerminalNode REGIONS() { return getToken(KVQLParser.REGIONS, 0); } + public Region_namesContext region_names() { + return getRuleContext(Region_namesContext.class,0); + } + public Add_region_defContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_add_region_def; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterAdd_region_def(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitAdd_region_def(this); + } + } + + public final Add_region_defContext add_region_def() throws RecognitionException { + Add_region_defContext _localctx = new Add_region_defContext(_ctx, getState()); + enterRule(_localctx, 312, RULE_add_region_def); + try { + enterOuterAlt(_localctx, 1); + { + setState(1798); + match(ADD); + setState(1799); + match(REGIONS); + setState(1800); + region_names(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class Drop_region_defContext extends ParserRuleContext { + public TerminalNode DROP() { return getToken(KVQLParser.DROP, 0); } + public TerminalNode REGIONS() { return getToken(KVQLParser.REGIONS, 0); } + public Region_namesContext region_names() { + return getRuleContext(Region_namesContext.class,0); + } + public Drop_region_defContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_drop_region_def; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterDrop_region_def(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitDrop_region_def(this); + } + } + + public final Drop_region_defContext drop_region_def() throws RecognitionException { + Drop_region_defContext _localctx = new Drop_region_defContext(_ctx, getState()); + enterRule(_localctx, 314, RULE_drop_region_def); + try { + enterOuterAlt(_localctx, 1); + { + setState(1802); + match(DROP); + setState(1803); + match(REGIONS); + setState(1804); + region_names(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + @SuppressWarnings("CheckReturnValue") public static class Alter_field_statementsContext extends ParserRuleContext { public TerminalNode LP() { return getToken(KVQLParser.LP, 0); } @@ -12912,63 +12953,63 @@ public void exitRule(ParseTreeListener listener) { public final Alter_field_statementsContext alter_field_statements() throws RecognitionException { Alter_field_statementsContext _localctx = new Alter_field_statementsContext(_ctx, getState()); - enterRule(_localctx, 310, RULE_alter_field_statements); + enterRule(_localctx, 316, RULE_alter_field_statements); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1832); + setState(1806); match(LP); - setState(1836); + setState(1810); _errHandler.sync(this); switch (_input.LA(1)) { case ADD: { - setState(1833); + setState(1807); add_field_statement(); } break; case DROP: { - setState(1834); + setState(1808); drop_field_statement(); } break; case MODIFY: { - setState(1835); + setState(1809); modify_field_statement(); } break; default: throw new NoViableAltException(this); } - setState(1846); + setState(1820); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1838); + setState(1812); match(COMMA); - setState(1842); + setState(1816); _errHandler.sync(this); switch (_input.LA(1)) { case ADD: { - setState(1839); + setState(1813); add_field_statement(); } break; case DROP: { - setState(1840); + setState(1814); drop_field_statement(); } break; case MODIFY: { - setState(1841); + setState(1815); modify_field_statement(); } break; @@ -12977,11 +13018,11 @@ public final Alter_field_statementsContext alter_field_statements() throws Recog } } } - setState(1848); + setState(1822); _errHandler.sync(this); _la = _input.LA(1); } - setState(1849); + setState(1823); match(RP); } } @@ -13039,57 +13080,57 @@ public void exitRule(ParseTreeListener listener) { public final Add_field_statementContext add_field_statement() throws RecognitionException { Add_field_statementContext _localctx = new Add_field_statementContext(_ctx, getState()); - enterRule(_localctx, 312, RULE_add_field_statement); + enterRule(_localctx, 318, RULE_add_field_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1851); + setState(1825); match(ADD); - setState(1852); + setState(1826); schema_path(); - setState(1853); + setState(1827); type_def(); - setState(1859); + setState(1833); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,197,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,180,_ctx) ) { case 1: { - setState(1854); + setState(1828); default_def(); } break; case 2: { - setState(1855); + setState(1829); identity_def(); } break; case 3: { - setState(1856); + setState(1830); mr_counter_def(); } break; case 4: { - setState(1857); + setState(1831); uuid_def(); } break; case 5: { - setState(1858); + setState(1832); json_mrcounter_fields(); } break; } - setState(1862); + setState(1836); _errHandler.sync(this); _la = _input.LA(1); if (_la==COMMENT) { { - setState(1861); + setState(1835); comment(); } } @@ -13129,13 +13170,13 @@ public void exitRule(ParseTreeListener listener) { public final Drop_field_statementContext drop_field_statement() throws RecognitionException { Drop_field_statementContext _localctx = new Drop_field_statementContext(_ctx, getState()); - enterRule(_localctx, 314, RULE_drop_field_statement); + enterRule(_localctx, 320, RULE_drop_field_statement); try { enterOuterAlt(_localctx, 1); { - setState(1864); + setState(1838); match(DROP); - setState(1865); + setState(1839); schema_path(); } } @@ -13189,16 +13230,16 @@ public void exitRule(ParseTreeListener listener) { public final Modify_field_statementContext modify_field_statement() throws RecognitionException { Modify_field_statementContext _localctx = new Modify_field_statementContext(_ctx, getState()); - enterRule(_localctx, 316, RULE_modify_field_statement); + enterRule(_localctx, 322, RULE_modify_field_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1867); + setState(1841); match(MODIFY); - setState(1868); + setState(1842); schema_path(); - setState(1880); + setState(1854); _errHandler.sync(this); switch (_input.LA(1)) { case JSON: @@ -13221,24 +13262,24 @@ public final Modify_field_statementContext modify_field_statement() throws Recog case ANYRECORD_T: { { - setState(1869); + setState(1843); type_def(); - setState(1871); + setState(1845); _errHandler.sync(this); _la = _input.LA(1); if (_la==DEFAULT || _la==NOT) { { - setState(1870); + setState(1844); default_def(); } } - setState(1874); + setState(1848); _errHandler.sync(this); _la = _input.LA(1); if (_la==COMMENT) { { - setState(1873); + setState(1847); comment(); } } @@ -13248,21 +13289,21 @@ public final Modify_field_statementContext modify_field_statement() throws Recog break; case GENERATED: { - setState(1876); + setState(1850); identity_def(); } break; case AS: { - setState(1877); + setState(1851); uuid_def(); } break; case DROP: { - setState(1878); + setState(1852); match(DROP); - setState(1879); + setState(1853); match(IDENTITY); } break; @@ -13313,26 +13354,26 @@ public void exitRule(ParseTreeListener listener) { public final Schema_pathContext schema_path() throws RecognitionException { Schema_pathContext _localctx = new Schema_pathContext(_ctx, getState()); - enterRule(_localctx, 318, RULE_schema_path); + enterRule(_localctx, 324, RULE_schema_path); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1882); + setState(1856); init_schema_path_step(); - setState(1887); + setState(1861); _errHandler.sync(this); _la = _input.LA(1); while (_la==DOT) { { { - setState(1883); + setState(1857); match(DOT); - setState(1884); + setState(1858); schema_path_step(); } } - setState(1889); + setState(1863); _errHandler.sync(this); _la = _input.LA(1); } @@ -13378,26 +13419,26 @@ public void exitRule(ParseTreeListener listener) { public final Init_schema_path_stepContext init_schema_path_step() throws RecognitionException { Init_schema_path_stepContext _localctx = new Init_schema_path_stepContext(_ctx, getState()); - enterRule(_localctx, 320, RULE_init_schema_path_step); + enterRule(_localctx, 326, RULE_init_schema_path_step); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1890); + setState(1864); id(); - setState(1895); + setState(1869); _errHandler.sync(this); _la = _input.LA(1); while (_la==LBRACK) { { { - setState(1891); + setState(1865); match(LBRACK); - setState(1892); + setState(1866); match(RBRACK); } } - setState(1897); + setState(1871); _errHandler.sync(this); _la = _input.LA(1); } @@ -13446,30 +13487,30 @@ public void exitRule(ParseTreeListener listener) { public final Schema_path_stepContext schema_path_step() throws RecognitionException { Schema_path_stepContext _localctx = new Schema_path_stepContext(_ctx, getState()); - enterRule(_localctx, 322, RULE_schema_path_step); + enterRule(_localctx, 328, RULE_schema_path_step); int _la; try { - setState(1909); + setState(1883); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,205,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,188,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(1898); + setState(1872); id(); - setState(1903); + setState(1877); _errHandler.sync(this); _la = _input.LA(1); while (_la==LBRACK) { { { - setState(1899); + setState(1873); match(LBRACK); - setState(1900); + setState(1874); match(RBRACK); } } - setState(1905); + setState(1879); _errHandler.sync(this); _la = _input.LA(1); } @@ -13478,11 +13519,11 @@ public final Schema_path_stepContext schema_path_step() throws RecognitionExcept case 2: enterOuterAlt(_localctx, 2); { - setState(1906); + setState(1880); match(VALUES); - setState(1907); + setState(1881); match(LP); - setState(1908); + setState(1882); match(RP); } break; @@ -13524,27 +13565,27 @@ public void exitRule(ParseTreeListener listener) { public final Drop_table_statementContext drop_table_statement() throws RecognitionException { Drop_table_statementContext _localctx = new Drop_table_statementContext(_ctx, getState()); - enterRule(_localctx, 324, RULE_drop_table_statement); + enterRule(_localctx, 330, RULE_drop_table_statement); try { enterOuterAlt(_localctx, 1); { - setState(1911); + setState(1885); match(DROP); - setState(1912); + setState(1886); match(TABLE); - setState(1915); + setState(1889); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,206,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,189,_ctx) ) { case 1: { - setState(1913); + setState(1887); match(IF); - setState(1914); + setState(1888); match(EXISTS); } break; } - setState(1917); + setState(1891); table_name(); } } @@ -13607,83 +13648,83 @@ public void exitRule(ParseTreeListener listener) { public final Create_index_statementContext create_index_statement() throws RecognitionException { Create_index_statementContext _localctx = new Create_index_statementContext(_ctx, getState()); - enterRule(_localctx, 326, RULE_create_index_statement); + enterRule(_localctx, 332, RULE_create_index_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1919); + setState(1893); match(CREATE); - setState(1920); + setState(1894); match(INDEX); - setState(1924); + setState(1898); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,207,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,190,_ctx) ) { case 1: { - setState(1921); + setState(1895); match(IF); - setState(1922); + setState(1896); match(NOT); - setState(1923); + setState(1897); match(EXISTS); } break; } - setState(1926); + setState(1900); index_name(); - setState(1927); + setState(1901); match(ON); - setState(1928); + setState(1902); table_name(); - setState(1950); + setState(1924); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,211,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,194,_ctx) ) { case 1: { { - setState(1929); + setState(1903); match(LP); - setState(1930); + setState(1904); index_field_list(); - setState(1931); + setState(1905); match(RP); - setState(1937); + setState(1911); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,209,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,192,_ctx) ) { case 1: { - setState(1932); + setState(1906); match(WITH); - setState(1934); + setState(1908); _errHandler.sync(this); _la = _input.LA(1); if (_la==NO) { { - setState(1933); + setState(1907); match(NO); } } - setState(1936); + setState(1910); match(NULLS); } break; } - setState(1944); + setState(1918); _errHandler.sync(this); _la = _input.LA(1); if (_la==WITH) { { - setState(1939); + setState(1913); match(WITH); - setState(1940); + setState(1914); match(UNIQUE); - setState(1941); + setState(1915); match(KEYS); - setState(1942); + setState(1916); match(PER); - setState(1943); + setState(1917); match(ROW); } } @@ -13694,21 +13735,21 @@ public final Create_index_statementContext create_index_statement() throws Recog case 2: { { - setState(1946); + setState(1920); match(LP); - setState(1947); + setState(1921); index_field_list(); notifyErrorListeners("Missing closing ')'"); } } break; } - setState(1953); + setState(1927); _errHandler.sync(this); _la = _input.LA(1); if (_la==COMMENT) { { - setState(1952); + setState(1926); comment(); } } @@ -13747,11 +13788,11 @@ public void exitRule(ParseTreeListener listener) { public final Index_nameContext index_name() throws RecognitionException { Index_nameContext _localctx = new Index_nameContext(_ctx, getState()); - enterRule(_localctx, 328, RULE_index_name); + enterRule(_localctx, 334, RULE_index_name); try { enterOuterAlt(_localctx, 1); { - setState(1955); + setState(1929); id(); } } @@ -13794,26 +13835,26 @@ public void exitRule(ParseTreeListener listener) { public final Index_field_listContext index_field_list() throws RecognitionException { Index_field_listContext _localctx = new Index_field_listContext(_ctx, getState()); - enterRule(_localctx, 330, RULE_index_field_list); + enterRule(_localctx, 336, RULE_index_field_list); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1957); + setState(1931); index_field(); - setState(1962); + setState(1936); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(1958); + setState(1932); match(COMMA); - setState(1959); + setState(1933); index_field(); } } - setState(1964); + setState(1938); _errHandler.sync(this); _la = _input.LA(1); } @@ -13857,24 +13898,24 @@ public void exitRule(ParseTreeListener listener) { public final Index_fieldContext index_field() throws RecognitionException { Index_fieldContext _localctx = new Index_fieldContext(_ctx, getState()); - enterRule(_localctx, 332, RULE_index_field); + enterRule(_localctx, 338, RULE_index_field); int _la; try { - setState(1970); + setState(1944); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,215,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,198,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { { - setState(1965); + setState(1939); index_path(); - setState(1967); + setState(1941); _errHandler.sync(this); _la = _input.LA(1); if (_la==AS) { { - setState(1966); + setState(1940); path_type(); } } @@ -13885,7 +13926,7 @@ public final Index_fieldContext index_field() throws RecognitionException { case 2: enterOuterAlt(_localctx, 2); { - setState(1969); + setState(1943); index_function(); } break; @@ -13934,46 +13975,46 @@ public void exitRule(ParseTreeListener listener) { public final Index_functionContext index_function() throws RecognitionException { Index_functionContext _localctx = new Index_functionContext(_ctx, getState()); - enterRule(_localctx, 334, RULE_index_function); + enterRule(_localctx, 340, RULE_index_function); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1972); + setState(1946); id(); - setState(1973); + setState(1947); match(LP); - setState(1975); + setState(1949); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,216,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,199,_ctx) ) { case 1: { - setState(1974); + setState(1948); index_path(); } break; } - setState(1978); + setState(1952); _errHandler.sync(this); _la = _input.LA(1); if (_la==AS) { { - setState(1977); + setState(1951); path_type(); } } - setState(1981); + setState(1955); _errHandler.sync(this); _la = _input.LA(1); if (_la==COMMA) { { - setState(1980); + setState(1954); index_function_args(); } } - setState(1983); + setState(1957); match(RP); } } @@ -14016,24 +14057,24 @@ public void exitRule(ParseTreeListener listener) { public final Index_function_argsContext index_function_args() throws RecognitionException { Index_function_argsContext _localctx = new Index_function_argsContext(_ctx, getState()); - enterRule(_localctx, 336, RULE_index_function_args); + enterRule(_localctx, 342, RULE_index_function_args); int _la; try { enterOuterAlt(_localctx, 1); { - setState(1987); + setState(1961); _errHandler.sync(this); _la = _input.LA(1); do { { { - setState(1985); + setState(1959); match(COMMA); - setState(1986); + setState(1960); const_expr(); } } - setState(1989); + setState(1963); _errHandler.sync(this); _la = _input.LA(1); } while ( _la==COMMA ); @@ -14058,14 +14099,15 @@ public Name_pathContext name_path() { public Multikey_path_prefixContext multikey_path_prefix() { return getRuleContext(Multikey_path_prefixContext.class,0); } + public Row_metadataContext row_metadata() { + return getRuleContext(Row_metadataContext.class,0); + } public Multikey_path_suffixContext multikey_path_suffix() { return getRuleContext(Multikey_path_suffixContext.class,0); } - public TerminalNode ELEMENTOF() { return getToken(KVQLParser.ELEMENTOF, 0); } - public TerminalNode LP() { return getToken(KVQLParser.LP, 0); } - public TerminalNode RP() { return getToken(KVQLParser.RP, 0); } - public TerminalNode KEYOF() { return getToken(KVQLParser.KEYOF, 0); } - public TerminalNode KEYS() { return getToken(KVQLParser.KEYS, 0); } + public Old_index_pathContext old_index_path() { + return getRuleContext(Old_index_pathContext.class,0); + } public Index_pathContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -14082,85 +14124,198 @@ public void exitRule(ParseTreeListener listener) { public final Index_pathContext index_path() throws RecognitionException { Index_pathContext _localctx = new Index_pathContext(_ctx, getState()); - enterRule(_localctx, 338, RULE_index_path); + enterRule(_localctx, 344, RULE_index_path); int _la; try { - setState(2013); + setState(1976); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,222,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,206,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(1991); - name_path(); - } - break; - case 2: - enterOuterAlt(_localctx, 2); { - setState(1992); - multikey_path_prefix(); - setState(1994); + setState(1966); _errHandler.sync(this); _la = _input.LA(1); - if (_la==DOT) { + if (_la==T__3) { { - setState(1993); - multikey_path_suffix(); + setState(1965); + row_metadata(); } } + setState(1973); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,205,_ctx) ) { + case 1: + { + setState(1968); + name_path(); + } + break; + case 2: + { + setState(1969); + multikey_path_prefix(); + setState(1971); + _errHandler.sync(this); + _la = _input.LA(1); + if (_la==DOT) { + { + setState(1970); + multikey_path_suffix(); + } + } + + } + break; + } + } } break; - case 3: - enterOuterAlt(_localctx, 3); + case 2: + enterOuterAlt(_localctx, 2); + { + setState(1975); + old_index_path(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class Old_index_pathContext extends ParserRuleContext { + public TerminalNode ELEMENTOF() { return getToken(KVQLParser.ELEMENTOF, 0); } + public TerminalNode LP() { return getToken(KVQLParser.LP, 0); } + public Name_pathContext name_path() { + return getRuleContext(Name_pathContext.class,0); + } + public TerminalNode RP() { return getToken(KVQLParser.RP, 0); } + public Multikey_path_suffixContext multikey_path_suffix() { + return getRuleContext(Multikey_path_suffixContext.class,0); + } + public TerminalNode KEYOF() { return getToken(KVQLParser.KEYOF, 0); } + public TerminalNode KEYS() { return getToken(KVQLParser.KEYS, 0); } + public Old_index_pathContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_old_index_path; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterOld_index_path(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitOld_index_path(this); + } + } + + public final Old_index_pathContext old_index_path() throws RecognitionException { + Old_index_pathContext _localctx = new Old_index_pathContext(_ctx, getState()); + enterRule(_localctx, 346, RULE_old_index_path); + int _la; + try { + setState(1995); + _errHandler.sync(this); + switch (_input.LA(1)) { + case ELEMENTOF: + enterOuterAlt(_localctx, 1); { - setState(1996); + setState(1978); match(ELEMENTOF); - setState(1997); + setState(1979); match(LP); - setState(1998); + setState(1980); name_path(); - setState(1999); + setState(1981); match(RP); - setState(2001); + setState(1983); _errHandler.sync(this); _la = _input.LA(1); if (_la==DOT) { { - setState(2000); + setState(1982); multikey_path_suffix(); } } } break; - case 4: - enterOuterAlt(_localctx, 4); + case KEYOF: + enterOuterAlt(_localctx, 2); { - setState(2003); + setState(1985); match(KEYOF); - setState(2004); + setState(1986); match(LP); - setState(2005); + setState(1987); name_path(); - setState(2006); + setState(1988); match(RP); } break; - case 5: - enterOuterAlt(_localctx, 5); + case KEYS: + enterOuterAlt(_localctx, 3); { - setState(2008); + setState(1990); match(KEYS); - setState(2009); + setState(1991); match(LP); - setState(2010); + setState(1992); name_path(); - setState(2011); + setState(1993); match(RP); } break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class Row_metadataContext extends ParserRuleContext { + public Row_metadataContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_row_metadata; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).enterRow_metadata(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KVQLListener ) ((KVQLListener)listener).exitRow_metadata(this); + } + } + + public final Row_metadataContext row_metadata() throws RecognitionException { + Row_metadataContext _localctx = new Row_metadataContext(_ctx, getState()); + enterRule(_localctx, 348, RULE_row_metadata); + try { + enterOuterAlt(_localctx, 1); + { + setState(1997); + match(T__3); } } catch (RecognitionException re) { @@ -14223,28 +14378,28 @@ public void exitRule(ParseTreeListener listener) { public final Multikey_path_prefixContext multikey_path_prefix() throws RecognitionException { Multikey_path_prefixContext _localctx = new Multikey_path_prefixContext(_ctx, getState()); - enterRule(_localctx, 340, RULE_multikey_path_prefix); + enterRule(_localctx, 350, RULE_multikey_path_prefix); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(2015); + setState(1999); field_name(); - setState(2026); + setState(2010); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,224,_ctx); + _alt = getInterpreter().adaptivePredict(_input,210,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { - setState(2024); + setState(2008); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,223,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,209,_ctx) ) { case 1: { { - setState(2016); + setState(2000); match(DOT); - setState(2017); + setState(2001); field_name(); } } @@ -14252,13 +14407,13 @@ public final Multikey_path_prefixContext multikey_path_prefix() throws Recogniti case 2: { { - setState(2018); + setState(2002); match(DOT); - setState(2019); + setState(2003); match(VALUES); - setState(2020); + setState(2004); match(LP); - setState(2021); + setState(2005); match(RP); } } @@ -14266,9 +14421,9 @@ public final Multikey_path_prefixContext multikey_path_prefix() throws Recogniti case 3: { { - setState(2022); + setState(2006); match(LBRACK); - setState(2023); + setState(2007); match(RBRACK); } } @@ -14276,19 +14431,19 @@ public final Multikey_path_prefixContext multikey_path_prefix() throws Recogniti } } } - setState(2028); + setState(2012); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,224,_ctx); + _alt = getInterpreter().adaptivePredict(_input,210,_ctx); } - setState(2039); + setState(2023); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,225,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,211,_ctx) ) { case 1: { { - setState(2029); + setState(2013); match(LBRACK); - setState(2030); + setState(2014); match(RBRACK); } } @@ -14296,13 +14451,13 @@ public final Multikey_path_prefixContext multikey_path_prefix() throws Recogniti case 2: { { - setState(2031); + setState(2015); match(DOT); - setState(2032); + setState(2016); match(VALUES); - setState(2033); + setState(2017); match(LP); - setState(2034); + setState(2018); match(RP); } } @@ -14310,13 +14465,13 @@ public final Multikey_path_prefixContext multikey_path_prefix() throws Recogniti case 3: { { - setState(2035); + setState(2019); match(DOT); - setState(2036); + setState(2020); match(KEYS); - setState(2037); + setState(2021); match(LP); - setState(2038); + setState(2022); match(RP); } } @@ -14357,14 +14512,14 @@ public void exitRule(ParseTreeListener listener) { public final Multikey_path_suffixContext multikey_path_suffix() throws RecognitionException { Multikey_path_suffixContext _localctx = new Multikey_path_suffixContext(_ctx, getState()); - enterRule(_localctx, 342, RULE_multikey_path_suffix); + enterRule(_localctx, 352, RULE_multikey_path_suffix); try { enterOuterAlt(_localctx, 1); { { - setState(2041); + setState(2025); match(DOT); - setState(2042); + setState(2026); name_path(); } } @@ -14411,69 +14566,69 @@ public void exitRule(ParseTreeListener listener) { public final Path_typeContext path_type() throws RecognitionException { Path_typeContext _localctx = new Path_typeContext(_ctx, getState()); - enterRule(_localctx, 344, RULE_path_type); + enterRule(_localctx, 354, RULE_path_type); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2044); + setState(2028); match(AS); - setState(2057); + setState(2041); _errHandler.sync(this); switch (_input.LA(1)) { case INTEGER_T: { - setState(2045); + setState(2029); match(INTEGER_T); } break; case LONG_T: { - setState(2046); + setState(2030); match(LONG_T); } break; case DOUBLE_T: { - setState(2047); + setState(2031); match(DOUBLE_T); } break; case STRING_T: { - setState(2048); + setState(2032); match(STRING_T); } break; case BOOLEAN_T: { - setState(2049); + setState(2033); match(BOOLEAN_T); } break; case NUMBER_T: { - setState(2050); + setState(2034); match(NUMBER_T); } break; case ANYATOMIC_T: { - setState(2051); + setState(2035); match(ANYATOMIC_T); } break; case GEOMETRY_T: { { - setState(2052); + setState(2036); match(GEOMETRY_T); - setState(2054); + setState(2038); _errHandler.sync(this); _la = _input.LA(1); if (_la==LBRACE) { { - setState(2053); + setState(2037); jsobject(); } } @@ -14483,7 +14638,7 @@ public final Path_typeContext path_type() throws RecognitionException { break; case POINT_T: { - setState(2056); + setState(2040); match(POINT_T); } break; @@ -14544,65 +14699,65 @@ public void exitRule(ParseTreeListener listener) { public final Create_text_index_statementContext create_text_index_statement() throws RecognitionException { Create_text_index_statementContext _localctx = new Create_text_index_statementContext(_ctx, getState()); - enterRule(_localctx, 346, RULE_create_text_index_statement); + enterRule(_localctx, 356, RULE_create_text_index_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2059); + setState(2043); match(CREATE); - setState(2060); + setState(2044); match(FULLTEXT); - setState(2061); + setState(2045); match(INDEX); - setState(2065); + setState(2049); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,228,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,214,_ctx) ) { case 1: { - setState(2062); + setState(2046); match(IF); - setState(2063); + setState(2047); match(NOT); - setState(2064); + setState(2048); match(EXISTS); } break; } - setState(2067); + setState(2051); index_name(); - setState(2068); + setState(2052); match(ON); - setState(2069); + setState(2053); table_name(); - setState(2070); + setState(2054); fts_field_list(); - setState(2072); + setState(2056); _errHandler.sync(this); _la = _input.LA(1); if (_la==ES_SHARDS || _la==ES_REPLICAS) { { - setState(2071); + setState(2055); es_properties(); } } - setState(2075); + setState(2059); _errHandler.sync(this); _la = _input.LA(1); if (_la==OVERRIDE) { { - setState(2074); + setState(2058); match(OVERRIDE); } } - setState(2078); + setState(2062); _errHandler.sync(this); _la = _input.LA(1); if (_la==COMMENT) { { - setState(2077); + setState(2061); comment(); } } @@ -14643,28 +14798,28 @@ public void exitRule(ParseTreeListener listener) { public final Fts_field_listContext fts_field_list() throws RecognitionException { Fts_field_listContext _localctx = new Fts_field_listContext(_ctx, getState()); - enterRule(_localctx, 348, RULE_fts_field_list); + enterRule(_localctx, 358, RULE_fts_field_list); try { - setState(2088); + setState(2072); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,232,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,218,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(2080); + setState(2064); match(LP); - setState(2081); + setState(2065); fts_path_list(); - setState(2082); + setState(2066); match(RP); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(2084); + setState(2068); match(LP); - setState(2085); + setState(2069); fts_path_list(); notifyErrorListeners("Missing closing ')'"); } @@ -14710,26 +14865,26 @@ public void exitRule(ParseTreeListener listener) { public final Fts_path_listContext fts_path_list() throws RecognitionException { Fts_path_listContext _localctx = new Fts_path_listContext(_ctx, getState()); - enterRule(_localctx, 350, RULE_fts_path_list); + enterRule(_localctx, 360, RULE_fts_path_list); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2090); + setState(2074); fts_path(); - setState(2095); + setState(2079); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(2091); + setState(2075); match(COMMA); - setState(2092); + setState(2076); fts_path(); } } - setState(2097); + setState(2081); _errHandler.sync(this); _la = _input.LA(1); } @@ -14770,19 +14925,19 @@ public void exitRule(ParseTreeListener listener) { public final Fts_pathContext fts_path() throws RecognitionException { Fts_pathContext _localctx = new Fts_pathContext(_ctx, getState()); - enterRule(_localctx, 352, RULE_fts_path); + enterRule(_localctx, 362, RULE_fts_path); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2098); + setState(2082); index_path(); - setState(2100); + setState(2084); _errHandler.sync(this); _la = _input.LA(1); if (_la==LBRACE) { { - setState(2099); + setState(2083); jsobject(); } } @@ -14824,24 +14979,24 @@ public void exitRule(ParseTreeListener listener) { public final Es_propertiesContext es_properties() throws RecognitionException { Es_propertiesContext _localctx = new Es_propertiesContext(_ctx, getState()); - enterRule(_localctx, 354, RULE_es_properties); + enterRule(_localctx, 364, RULE_es_properties); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2102); + setState(2086); es_property_assignment(); - setState(2106); + setState(2090); _errHandler.sync(this); _la = _input.LA(1); while (_la==ES_SHARDS || _la==ES_REPLICAS) { { { - setState(2103); + setState(2087); es_property_assignment(); } } - setState(2108); + setState(2092); _errHandler.sync(this); _la = _input.LA(1); } @@ -14880,30 +15035,30 @@ public void exitRule(ParseTreeListener listener) { public final Es_property_assignmentContext es_property_assignment() throws RecognitionException { Es_property_assignmentContext _localctx = new Es_property_assignmentContext(_ctx, getState()); - enterRule(_localctx, 356, RULE_es_property_assignment); + enterRule(_localctx, 366, RULE_es_property_assignment); try { - setState(2115); + setState(2099); _errHandler.sync(this); switch (_input.LA(1)) { case ES_SHARDS: enterOuterAlt(_localctx, 1); { - setState(2109); + setState(2093); match(ES_SHARDS); - setState(2110); + setState(2094); match(EQ); - setState(2111); + setState(2095); match(INT); } break; case ES_REPLICAS: enterOuterAlt(_localctx, 2); { - setState(2112); + setState(2096); match(ES_REPLICAS); - setState(2113); + setState(2097); match(EQ); - setState(2114); + setState(2098); match(INT); } break; @@ -14952,39 +15107,39 @@ public void exitRule(ParseTreeListener listener) { public final Drop_index_statementContext drop_index_statement() throws RecognitionException { Drop_index_statementContext _localctx = new Drop_index_statementContext(_ctx, getState()); - enterRule(_localctx, 358, RULE_drop_index_statement); + enterRule(_localctx, 368, RULE_drop_index_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2117); + setState(2101); match(DROP); - setState(2118); + setState(2102); match(INDEX); - setState(2121); + setState(2105); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,237,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,223,_ctx) ) { case 1: { - setState(2119); + setState(2103); match(IF); - setState(2120); + setState(2104); match(EXISTS); } break; } - setState(2123); + setState(2107); index_name(); - setState(2124); + setState(2108); match(ON); - setState(2125); + setState(2109); table_name(); - setState(2127); + setState(2111); _errHandler.sync(this); _la = _input.LA(1); if (_la==OVERRIDE) { { - setState(2126); + setState(2110); match(OVERRIDE); } } @@ -15038,12 +15193,12 @@ public void exitRule(ParseTreeListener listener) { public final Describe_statementContext describe_statement() throws RecognitionException { Describe_statementContext _localctx = new Describe_statementContext(_ctx, getState()); - enterRule(_localctx, 360, RULE_describe_statement); + enterRule(_localctx, 370, RULE_describe_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2129); + setState(2113); _la = _input.LA(1); if ( !(_la==DESC || _la==DESCRIBE) ) { _errHandler.recoverInline(this); @@ -15053,40 +15208,40 @@ public final Describe_statementContext describe_statement() throws RecognitionEx _errHandler.reportMatch(this); consume(); } - setState(2132); + setState(2116); _errHandler.sync(this); _la = _input.LA(1); if (_la==AS) { { - setState(2130); + setState(2114); match(AS); - setState(2131); + setState(2115); match(JSON); } } - setState(2151); + setState(2135); _errHandler.sync(this); switch (_input.LA(1)) { case TABLE: { - setState(2134); + setState(2118); match(TABLE); { - setState(2135); + setState(2119); table_name(); } - setState(2144); + setState(2128); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,240,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,226,_ctx) ) { case 1: { { - setState(2136); + setState(2120); match(LP); - setState(2137); + setState(2121); schema_path_list(); - setState(2138); + setState(2122); match(RP); } } @@ -15094,9 +15249,9 @@ public final Describe_statementContext describe_statement() throws RecognitionEx case 2: { { - setState(2140); + setState(2124); match(LP); - setState(2141); + setState(2125); schema_path_list(); notifyErrorListeners("Missing closing ')'") ; @@ -15108,13 +15263,13 @@ public final Describe_statementContext describe_statement() throws RecognitionEx break; case INDEX: { - setState(2146); + setState(2130); match(INDEX); - setState(2147); + setState(2131); index_name(); - setState(2148); + setState(2132); match(ON); - setState(2149); + setState(2133); table_name(); } break; @@ -15162,26 +15317,26 @@ public void exitRule(ParseTreeListener listener) { public final Schema_path_listContext schema_path_list() throws RecognitionException { Schema_path_listContext _localctx = new Schema_path_listContext(_ctx, getState()); - enterRule(_localctx, 362, RULE_schema_path_list); + enterRule(_localctx, 372, RULE_schema_path_list); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2153); + setState(2137); schema_path(); - setState(2158); + setState(2142); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(2154); + setState(2138); match(COMMA); - setState(2155); + setState(2139); schema_path(); } } - setState(2160); + setState(2144); _errHandler.sync(this); _la = _input.LA(1); } @@ -15238,89 +15393,89 @@ public void exitRule(ParseTreeListener listener) { public final Show_statementContext show_statement() throws RecognitionException { Show_statementContext _localctx = new Show_statementContext(_ctx, getState()); - enterRule(_localctx, 364, RULE_show_statement); + enterRule(_localctx, 374, RULE_show_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2161); + setState(2145); match(SHOW); - setState(2164); + setState(2148); _errHandler.sync(this); _la = _input.LA(1); if (_la==AS) { { - setState(2162); + setState(2146); match(AS); - setState(2163); + setState(2147); match(JSON); } } - setState(2180); + setState(2164); _errHandler.sync(this); switch (_input.LA(1)) { case TABLES: { - setState(2166); + setState(2150); match(TABLES); } break; case USERS: { - setState(2167); + setState(2151); match(USERS); } break; case ROLES: { - setState(2168); + setState(2152); match(ROLES); } break; case USER: { - setState(2169); + setState(2153); match(USER); - setState(2170); + setState(2154); identifier_or_string(); } break; case ROLE: { - setState(2171); + setState(2155); match(ROLE); - setState(2172); + setState(2156); id(); } break; case INDEXES: { - setState(2173); + setState(2157); match(INDEXES); - setState(2174); + setState(2158); match(ON); - setState(2175); + setState(2159); table_name(); } break; case TABLE: { - setState(2176); + setState(2160); match(TABLE); - setState(2177); + setState(2161); table_name(); } break; case NAMESPACES: { - setState(2178); + setState(2162); match(NAMESPACES); } break; case REGIONS: { - setState(2179); + setState(2163); match(REGIONS); } break; @@ -15367,33 +15522,33 @@ public void exitRule(ParseTreeListener listener) { public final Create_user_statementContext create_user_statement() throws RecognitionException { Create_user_statementContext _localctx = new Create_user_statementContext(_ctx, getState()); - enterRule(_localctx, 366, RULE_create_user_statement); + enterRule(_localctx, 376, RULE_create_user_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2182); + setState(2166); match(CREATE); - setState(2183); + setState(2167); match(USER); - setState(2184); + setState(2168); create_user_identified_clause(); - setState(2186); + setState(2170); _errHandler.sync(this); _la = _input.LA(1); if (_la==ACCOUNT) { { - setState(2185); + setState(2169); account_lock(); } } - setState(2189); + setState(2173); _errHandler.sync(this); _la = _input.LA(1); if (_la==ADMIN) { { - setState(2188); + setState(2172); match(ADMIN); } } @@ -15434,15 +15589,15 @@ public void exitRule(ParseTreeListener listener) { public final Create_role_statementContext create_role_statement() throws RecognitionException { Create_role_statementContext _localctx = new Create_role_statementContext(_ctx, getState()); - enterRule(_localctx, 368, RULE_create_role_statement); + enterRule(_localctx, 378, RULE_create_role_statement); try { enterOuterAlt(_localctx, 1); { - setState(2191); + setState(2175); match(CREATE); - setState(2192); + setState(2176); match(ROLE); - setState(2193); + setState(2177); id(); } } @@ -15491,63 +15646,63 @@ public void exitRule(ParseTreeListener listener) { public final Alter_user_statementContext alter_user_statement() throws RecognitionException { Alter_user_statementContext _localctx = new Alter_user_statementContext(_ctx, getState()); - enterRule(_localctx, 370, RULE_alter_user_statement); + enterRule(_localctx, 380, RULE_alter_user_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2195); + setState(2179); match(ALTER); - setState(2196); + setState(2180); match(USER); - setState(2197); + setState(2181); identifier_or_string(); - setState(2199); + setState(2183); _errHandler.sync(this); _la = _input.LA(1); if (_la==IDENTIFIED) { { - setState(2198); + setState(2182); reset_password_clause(); } } - setState(2202); + setState(2186); _errHandler.sync(this); _la = _input.LA(1); if (_la==CLEAR_RETAINED_PASSWORD) { { - setState(2201); + setState(2185); match(CLEAR_RETAINED_PASSWORD); } } - setState(2205); + setState(2189); _errHandler.sync(this); _la = _input.LA(1); if (_la==PASSWORD_EXPIRE) { { - setState(2204); + setState(2188); match(PASSWORD_EXPIRE); } } - setState(2208); + setState(2192); _errHandler.sync(this); _la = _input.LA(1); if (_la==PASSWORD) { { - setState(2207); + setState(2191); password_lifetime(); } } - setState(2211); + setState(2195); _errHandler.sync(this); _la = _input.LA(1); if (_la==ACCOUNT) { { - setState(2210); + setState(2194); account_lock(); } } @@ -15589,23 +15744,23 @@ public void exitRule(ParseTreeListener listener) { public final Drop_user_statementContext drop_user_statement() throws RecognitionException { Drop_user_statementContext _localctx = new Drop_user_statementContext(_ctx, getState()); - enterRule(_localctx, 372, RULE_drop_user_statement); + enterRule(_localctx, 382, RULE_drop_user_statement); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2213); + setState(2197); match(DROP); - setState(2214); + setState(2198); match(USER); - setState(2215); + setState(2199); identifier_or_string(); - setState(2217); + setState(2201); _errHandler.sync(this); _la = _input.LA(1); if (_la==CASCADE) { { - setState(2216); + setState(2200); match(CASCADE); } } @@ -15646,15 +15801,15 @@ public void exitRule(ParseTreeListener listener) { public final Drop_role_statementContext drop_role_statement() throws RecognitionException { Drop_role_statementContext _localctx = new Drop_role_statementContext(_ctx, getState()); - enterRule(_localctx, 374, RULE_drop_role_statement); + enterRule(_localctx, 384, RULE_drop_role_statement); try { enterOuterAlt(_localctx, 1); { - setState(2219); + setState(2203); match(DROP); - setState(2220); + setState(2204); match(ROLE); - setState(2221); + setState(2205); id(); } } @@ -15697,30 +15852,30 @@ public void exitRule(ParseTreeListener listener) { public final Grant_statementContext grant_statement() throws RecognitionException { Grant_statementContext _localctx = new Grant_statementContext(_ctx, getState()); - enterRule(_localctx, 376, RULE_grant_statement); + enterRule(_localctx, 386, RULE_grant_statement); try { enterOuterAlt(_localctx, 1); { - setState(2223); + setState(2207); match(GRANT); - setState(2227); + setState(2211); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,253,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,239,_ctx) ) { case 1: { - setState(2224); + setState(2208); grant_roles(); } break; case 2: { - setState(2225); + setState(2209); grant_system_privileges(); } break; case 3: { - setState(2226); + setState(2210); grant_object_privileges(); } break; @@ -15766,30 +15921,30 @@ public void exitRule(ParseTreeListener listener) { public final Revoke_statementContext revoke_statement() throws RecognitionException { Revoke_statementContext _localctx = new Revoke_statementContext(_ctx, getState()); - enterRule(_localctx, 378, RULE_revoke_statement); + enterRule(_localctx, 388, RULE_revoke_statement); try { enterOuterAlt(_localctx, 1); { - setState(2229); + setState(2213); match(REVOKE); - setState(2233); + setState(2217); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,254,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,240,_ctx) ) { case 1: { - setState(2230); + setState(2214); revoke_roles(); } break; case 2: { - setState(2231); + setState(2215); revoke_system_privileges(); } break; case 3: { - setState(2232); + setState(2216); revoke_object_privileges(); } break; @@ -15831,11 +15986,11 @@ public void exitRule(ParseTreeListener listener) { public final Identifier_or_stringContext identifier_or_string() throws RecognitionException { Identifier_or_stringContext _localctx = new Identifier_or_stringContext(_ctx, getState()); - enterRule(_localctx, 380, RULE_identifier_or_string); + enterRule(_localctx, 390, RULE_identifier_or_string); try { enterOuterAlt(_localctx, 1); { - setState(2237); + setState(2221); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -15849,6 +16004,7 @@ public final Identifier_or_stringContext identifier_or_string() throws Recogniti case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -15866,11 +16022,13 @@ public final Identifier_or_stringContext identifier_or_string() throws Recogniti case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -15889,6 +16047,7 @@ public final Identifier_or_stringContext identifier_or_string() throws Recogniti case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -15984,14 +16143,14 @@ public final Identifier_or_stringContext identifier_or_string() throws Recogniti case ID: case BAD_ID: { - setState(2235); + setState(2219); id(); } break; case DSTRING: case STRING: { - setState(2236); + setState(2220); string(); } break; @@ -16033,13 +16192,13 @@ public void exitRule(ParseTreeListener listener) { public final Identified_clauseContext identified_clause() throws RecognitionException { Identified_clauseContext _localctx = new Identified_clauseContext(_ctx, getState()); - enterRule(_localctx, 382, RULE_identified_clause); + enterRule(_localctx, 392, RULE_identified_clause); try { enterOuterAlt(_localctx, 1); { - setState(2239); + setState(2223); match(IDENTIFIED); - setState(2240); + setState(2224); by_password(); } } @@ -16086,10 +16245,10 @@ public void exitRule(ParseTreeListener listener) { public final Create_user_identified_clauseContext create_user_identified_clause() throws RecognitionException { Create_user_identified_clauseContext _localctx = new Create_user_identified_clauseContext(_ctx, getState()); - enterRule(_localctx, 384, RULE_create_user_identified_clause); + enterRule(_localctx, 394, RULE_create_user_identified_clause); int _la; try { - setState(2253); + setState(2237); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -16103,6 +16262,7 @@ public final Create_user_identified_clauseContext create_user_identified_clause( case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -16120,11 +16280,13 @@ public final Create_user_identified_clauseContext create_user_identified_clause( case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -16143,6 +16305,7 @@ public final Create_user_identified_clauseContext create_user_identified_clause( case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -16239,26 +16402,26 @@ public final Create_user_identified_clauseContext create_user_identified_clause( case BAD_ID: enterOuterAlt(_localctx, 1); { - setState(2242); + setState(2226); id(); - setState(2243); + setState(2227); identified_clause(); - setState(2245); + setState(2229); _errHandler.sync(this); _la = _input.LA(1); if (_la==PASSWORD_EXPIRE) { { - setState(2244); + setState(2228); match(PASSWORD_EXPIRE); } } - setState(2248); + setState(2232); _errHandler.sync(this); _la = _input.LA(1); if (_la==PASSWORD) { { - setState(2247); + setState(2231); password_lifetime(); } } @@ -16269,9 +16432,9 @@ public final Create_user_identified_clauseContext create_user_identified_clause( case STRING: enterOuterAlt(_localctx, 2); { - setState(2250); + setState(2234); string(); - setState(2251); + setState(2235); match(IDENTIFIED_EXTERNALLY); } break; @@ -16312,13 +16475,13 @@ public void exitRule(ParseTreeListener listener) { public final By_passwordContext by_password() throws RecognitionException { By_passwordContext _localctx = new By_passwordContext(_ctx, getState()); - enterRule(_localctx, 386, RULE_by_password); + enterRule(_localctx, 396, RULE_by_password); try { enterOuterAlt(_localctx, 1); { - setState(2255); + setState(2239); match(BY); - setState(2256); + setState(2240); string(); } } @@ -16356,15 +16519,15 @@ public void exitRule(ParseTreeListener listener) { public final Password_lifetimeContext password_lifetime() throws RecognitionException { Password_lifetimeContext _localctx = new Password_lifetimeContext(_ctx, getState()); - enterRule(_localctx, 388, RULE_password_lifetime); + enterRule(_localctx, 398, RULE_password_lifetime); try { enterOuterAlt(_localctx, 1); { - setState(2258); + setState(2242); match(PASSWORD); - setState(2259); + setState(2243); match(LIFETIME); - setState(2260); + setState(2244); duration(); } } @@ -16401,19 +16564,19 @@ public void exitRule(ParseTreeListener listener) { public final Reset_password_clauseContext reset_password_clause() throws RecognitionException { Reset_password_clauseContext _localctx = new Reset_password_clauseContext(_ctx, getState()); - enterRule(_localctx, 390, RULE_reset_password_clause); + enterRule(_localctx, 400, RULE_reset_password_clause); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2262); + setState(2246); identified_clause(); - setState(2264); + setState(2248); _errHandler.sync(this); _la = _input.LA(1); if (_la==RETAIN_CURRENT_PASSWORD) { { - setState(2263); + setState(2247); match(RETAIN_CURRENT_PASSWORD); } } @@ -16452,14 +16615,14 @@ public void exitRule(ParseTreeListener listener) { public final Account_lockContext account_lock() throws RecognitionException { Account_lockContext _localctx = new Account_lockContext(_ctx, getState()); - enterRule(_localctx, 392, RULE_account_lock); + enterRule(_localctx, 402, RULE_account_lock); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2266); + setState(2250); match(ACCOUNT); - setState(2267); + setState(2251); _la = _input.LA(1); if ( !(_la==LOCK || _la==UNLOCK) ) { _errHandler.recoverInline(this); @@ -16507,15 +16670,15 @@ public void exitRule(ParseTreeListener listener) { public final Grant_rolesContext grant_roles() throws RecognitionException { Grant_rolesContext _localctx = new Grant_rolesContext(_ctx, getState()); - enterRule(_localctx, 394, RULE_grant_roles); + enterRule(_localctx, 404, RULE_grant_roles); try { enterOuterAlt(_localctx, 1); { - setState(2269); + setState(2253); id_list(); - setState(2270); + setState(2254); match(TO); - setState(2271); + setState(2255); principal(); } } @@ -16555,15 +16718,15 @@ public void exitRule(ParseTreeListener listener) { public final Grant_system_privilegesContext grant_system_privileges() throws RecognitionException { Grant_system_privilegesContext _localctx = new Grant_system_privilegesContext(_ctx, getState()); - enterRule(_localctx, 396, RULE_grant_system_privileges); + enterRule(_localctx, 406, RULE_grant_system_privileges); try { enterOuterAlt(_localctx, 1); { - setState(2273); + setState(2257); sys_priv_list(); - setState(2274); + setState(2258); match(TO); - setState(2275); + setState(2259); id(); } } @@ -16611,15 +16774,15 @@ public void exitRule(ParseTreeListener listener) { public final Grant_object_privilegesContext grant_object_privileges() throws RecognitionException { Grant_object_privilegesContext _localctx = new Grant_object_privilegesContext(_ctx, getState()); - enterRule(_localctx, 398, RULE_grant_object_privileges); + enterRule(_localctx, 408, RULE_grant_object_privileges); try { enterOuterAlt(_localctx, 1); { - setState(2277); + setState(2261); obj_priv_list(); - setState(2278); + setState(2262); match(ON); - setState(2282); + setState(2266); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -16633,6 +16796,7 @@ public final Grant_object_privilegesContext grant_object_privileges() throws Rec case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -16650,11 +16814,13 @@ public final Grant_object_privilegesContext grant_object_privileges() throws Rec case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -16673,6 +16839,7 @@ public final Grant_object_privilegesContext grant_object_privileges() throws Rec case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -16769,24 +16936,24 @@ public final Grant_object_privilegesContext grant_object_privileges() throws Rec case ID: case BAD_ID: { - setState(2279); + setState(2263); object(); } break; case NAMESPACE: { - setState(2280); + setState(2264); match(NAMESPACE); - setState(2281); + setState(2265); namespace(); } break; default: throw new NoViableAltException(this); } - setState(2284); + setState(2268); match(TO); - setState(2285); + setState(2269); id(); } } @@ -16826,15 +16993,15 @@ public void exitRule(ParseTreeListener listener) { public final Revoke_rolesContext revoke_roles() throws RecognitionException { Revoke_rolesContext _localctx = new Revoke_rolesContext(_ctx, getState()); - enterRule(_localctx, 400, RULE_revoke_roles); + enterRule(_localctx, 410, RULE_revoke_roles); try { enterOuterAlt(_localctx, 1); { - setState(2287); + setState(2271); id_list(); - setState(2288); + setState(2272); match(FROM); - setState(2289); + setState(2273); principal(); } } @@ -16874,15 +17041,15 @@ public void exitRule(ParseTreeListener listener) { public final Revoke_system_privilegesContext revoke_system_privileges() throws RecognitionException { Revoke_system_privilegesContext _localctx = new Revoke_system_privilegesContext(_ctx, getState()); - enterRule(_localctx, 402, RULE_revoke_system_privileges); + enterRule(_localctx, 412, RULE_revoke_system_privileges); try { enterOuterAlt(_localctx, 1); { - setState(2291); + setState(2275); sys_priv_list(); - setState(2292); + setState(2276); match(FROM); - setState(2293); + setState(2277); id(); } } @@ -16930,15 +17097,15 @@ public void exitRule(ParseTreeListener listener) { public final Revoke_object_privilegesContext revoke_object_privileges() throws RecognitionException { Revoke_object_privilegesContext _localctx = new Revoke_object_privilegesContext(_ctx, getState()); - enterRule(_localctx, 404, RULE_revoke_object_privileges); + enterRule(_localctx, 414, RULE_revoke_object_privileges); try { enterOuterAlt(_localctx, 1); { - setState(2295); + setState(2279); obj_priv_list(); - setState(2296); + setState(2280); match(ON); - setState(2300); + setState(2284); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -16952,6 +17119,7 @@ public final Revoke_object_privilegesContext revoke_object_privileges() throws R case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -16969,11 +17137,13 @@ public final Revoke_object_privilegesContext revoke_object_privileges() throws R case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -16992,6 +17162,7 @@ public final Revoke_object_privilegesContext revoke_object_privileges() throws R case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -17088,24 +17259,24 @@ public final Revoke_object_privilegesContext revoke_object_privileges() throws R case ID: case BAD_ID: { - setState(2297); + setState(2281); object(); } break; case NAMESPACE: { - setState(2298); + setState(2282); match(NAMESPACE); - setState(2299); + setState(2283); namespace(); } break; default: throw new NoViableAltException(this); } - setState(2302); + setState(2286); match(FROM); - setState(2303); + setState(2287); id(); } } @@ -17146,26 +17317,26 @@ public void exitRule(ParseTreeListener listener) { public final PrincipalContext principal() throws RecognitionException { PrincipalContext _localctx = new PrincipalContext(_ctx, getState()); - enterRule(_localctx, 406, RULE_principal); + enterRule(_localctx, 416, RULE_principal); try { enterOuterAlt(_localctx, 1); { - setState(2309); + setState(2293); _errHandler.sync(this); switch (_input.LA(1)) { case USER: { - setState(2305); + setState(2289); match(USER); - setState(2306); + setState(2290); identifier_or_string(); } break; case ROLE: { - setState(2307); + setState(2291); match(ROLE); - setState(2308); + setState(2292); id(); } break; @@ -17213,26 +17384,26 @@ public void exitRule(ParseTreeListener listener) { public final Sys_priv_listContext sys_priv_list() throws RecognitionException { Sys_priv_listContext _localctx = new Sys_priv_listContext(_ctx, getState()); - enterRule(_localctx, 408, RULE_sys_priv_list); + enterRule(_localctx, 418, RULE_sys_priv_list); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2311); + setState(2295); priv_item(); - setState(2316); + setState(2300); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(2312); + setState(2296); match(COMMA); - setState(2313); + setState(2297); priv_item(); } } - setState(2318); + setState(2302); _errHandler.sync(this); _la = _input.LA(1); } @@ -17271,11 +17442,11 @@ public void exitRule(ParseTreeListener listener) { public final Priv_itemContext priv_item() throws RecognitionException { Priv_itemContext _localctx = new Priv_itemContext(_ctx, getState()); - enterRule(_localctx, 410, RULE_priv_item); + enterRule(_localctx, 420, RULE_priv_item); try { enterOuterAlt(_localctx, 1); { - setState(2321); + setState(2305); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -17289,6 +17460,7 @@ public final Priv_itemContext priv_item() throws RecognitionException { case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -17306,11 +17478,13 @@ public final Priv_itemContext priv_item() throws RecognitionException { case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -17329,6 +17503,7 @@ public final Priv_itemContext priv_item() throws RecognitionException { case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -17424,13 +17599,13 @@ public final Priv_itemContext priv_item() throws RecognitionException { case ID: case BAD_ID: { - setState(2319); + setState(2303); id(); } break; case ALL_PRIVILEGES: { - setState(2320); + setState(2304); match(ALL_PRIVILEGES); } break; @@ -17482,54 +17657,54 @@ public void exitRule(ParseTreeListener listener) { public final Obj_priv_listContext obj_priv_list() throws RecognitionException { Obj_priv_listContext _localctx = new Obj_priv_listContext(_ctx, getState()); - enterRule(_localctx, 412, RULE_obj_priv_list); + enterRule(_localctx, 422, RULE_obj_priv_list); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2325); + setState(2309); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,265,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,251,_ctx) ) { case 1: { - setState(2323); + setState(2307); priv_item(); } break; case 2: { - setState(2324); + setState(2308); match(ALL); } break; } - setState(2334); + setState(2318); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(2327); + setState(2311); match(COMMA); - setState(2330); + setState(2314); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,266,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,252,_ctx) ) { case 1: { - setState(2328); + setState(2312); priv_item(); } break; case 2: { - setState(2329); + setState(2313); match(ALL); } break; } } } - setState(2336); + setState(2320); _errHandler.sync(this); _la = _input.LA(1); } @@ -17567,11 +17742,11 @@ public void exitRule(ParseTreeListener listener) { public final ObjectContext object() throws RecognitionException { ObjectContext _localctx = new ObjectContext(_ctx, getState()); - enterRule(_localctx, 414, RULE_object); + enterRule(_localctx, 424, RULE_object); try { enterOuterAlt(_localctx, 1); { - setState(2337); + setState(2321); table_name(); } } @@ -17619,22 +17794,22 @@ public void exitRule(ParseTreeListener listener) { public final Json_textContext json_text() throws RecognitionException { Json_textContext _localctx = new Json_textContext(_ctx, getState()); - enterRule(_localctx, 416, RULE_json_text); + enterRule(_localctx, 426, RULE_json_text); try { - setState(2346); + setState(2330); _errHandler.sync(this); switch (_input.LA(1)) { case LBRACE: enterOuterAlt(_localctx, 1); { - setState(2339); + setState(2323); jsobject(); } break; case LBRACK: enterOuterAlt(_localctx, 2); { - setState(2340); + setState(2324); jsarray(); } break; @@ -17642,7 +17817,7 @@ public final Json_textContext json_text() throws RecognitionException { case STRING: enterOuterAlt(_localctx, 3); { - setState(2341); + setState(2325); string(); } break; @@ -17652,28 +17827,28 @@ public final Json_textContext json_text() throws RecognitionException { case NUMBER: enterOuterAlt(_localctx, 4); { - setState(2342); + setState(2326); number(); } break; case TRUE: enterOuterAlt(_localctx, 5); { - setState(2343); + setState(2327); match(TRUE); } break; case FALSE: enterOuterAlt(_localctx, 6); { - setState(2344); + setState(2328); match(FALSE); } break; case NULL: enterOuterAlt(_localctx, 7); { - setState(2345); + setState(2329); match(NULL); } break; @@ -17745,37 +17920,37 @@ public void exitRule(ParseTreeListener listener) { public final JsobjectContext jsobject() throws RecognitionException { JsobjectContext _localctx = new JsobjectContext(_ctx, getState()); - enterRule(_localctx, 418, RULE_jsobject); + enterRule(_localctx, 428, RULE_jsobject); int _la; try { - setState(2361); + setState(2345); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,270,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,256,_ctx) ) { case 1: _localctx = new JsonObjectContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(2348); + setState(2332); match(LBRACE); - setState(2349); + setState(2333); jspair(); - setState(2354); + setState(2338); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(2350); + setState(2334); match(COMMA); - setState(2351); + setState(2335); jspair(); } } - setState(2356); + setState(2340); _errHandler.sync(this); _la = _input.LA(1); } - setState(2357); + setState(2341); match(RBRACE); } break; @@ -17783,9 +17958,9 @@ public final JsobjectContext jsobject() throws RecognitionException { _localctx = new EmptyJsonObjectContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(2359); + setState(2343); match(LBRACE); - setState(2360); + setState(2344); match(RBRACE); } break; @@ -17855,37 +18030,37 @@ public void exitRule(ParseTreeListener listener) { public final JsarrayContext jsarray() throws RecognitionException { JsarrayContext _localctx = new JsarrayContext(_ctx, getState()); - enterRule(_localctx, 420, RULE_jsarray); + enterRule(_localctx, 430, RULE_jsarray); int _la; try { - setState(2376); + setState(2360); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,272,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,258,_ctx) ) { case 1: _localctx = new ArrayOfJsonValuesContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(2363); + setState(2347); match(LBRACK); - setState(2364); + setState(2348); jsvalue(); - setState(2369); + setState(2353); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(2365); + setState(2349); match(COMMA); - setState(2366); + setState(2350); jsvalue(); } } - setState(2371); + setState(2355); _errHandler.sync(this); _la = _input.LA(1); } - setState(2372); + setState(2356); match(RBRACK); } break; @@ -17893,9 +18068,9 @@ public final JsarrayContext jsarray() throws RecognitionException { _localctx = new EmptyJsonArrayContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(2374); + setState(2358); match(LBRACK); - setState(2375); + setState(2359); match(RBRACK); } break; @@ -17944,16 +18119,16 @@ public void exitRule(ParseTreeListener listener) { public final JspairContext jspair() throws RecognitionException { JspairContext _localctx = new JspairContext(_ctx, getState()); - enterRule(_localctx, 422, RULE_jspair); + enterRule(_localctx, 432, RULE_jspair); try { _localctx = new JsonPairContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(2378); + setState(2362); match(DSTRING); - setState(2379); + setState(2363); match(COLON); - setState(2380); + setState(2364); jsvalue(); } } @@ -18032,16 +18207,16 @@ public void exitRule(ParseTreeListener listener) { public final JsvalueContext jsvalue() throws RecognitionException { JsvalueContext _localctx = new JsvalueContext(_ctx, getState()); - enterRule(_localctx, 424, RULE_jsvalue); + enterRule(_localctx, 434, RULE_jsvalue); try { - setState(2389); + setState(2373); _errHandler.sync(this); switch (_input.LA(1)) { case LBRACE: _localctx = new JsonObjectValueContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(2382); + setState(2366); jsobject(); } break; @@ -18049,7 +18224,7 @@ public final JsvalueContext jsvalue() throws RecognitionException { _localctx = new JsonArrayValueContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(2383); + setState(2367); jsarray(); } break; @@ -18057,7 +18232,7 @@ public final JsvalueContext jsvalue() throws RecognitionException { _localctx = new JsonAtomContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(2384); + setState(2368); match(DSTRING); } break; @@ -18068,7 +18243,7 @@ public final JsvalueContext jsvalue() throws RecognitionException { _localctx = new JsonAtomContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(2385); + setState(2369); number(); } break; @@ -18076,7 +18251,7 @@ public final JsvalueContext jsvalue() throws RecognitionException { _localctx = new JsonAtomContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(2386); + setState(2370); match(TRUE); } break; @@ -18084,7 +18259,7 @@ public final JsvalueContext jsvalue() throws RecognitionException { _localctx = new JsonAtomContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(2387); + setState(2371); match(FALSE); } break; @@ -18092,7 +18267,7 @@ public final JsvalueContext jsvalue() throws RecognitionException { _localctx = new JsonAtomContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(2388); + setState(2372); match(NULL); } break; @@ -18133,13 +18308,13 @@ public void exitRule(ParseTreeListener listener) { public final CommentContext comment() throws RecognitionException { CommentContext _localctx = new CommentContext(_ctx, getState()); - enterRule(_localctx, 426, RULE_comment); + enterRule(_localctx, 436, RULE_comment); try { enterOuterAlt(_localctx, 1); { - setState(2391); + setState(2375); match(COMMENT); - setState(2392); + setState(2376); string(); } } @@ -18176,13 +18351,13 @@ public void exitRule(ParseTreeListener listener) { public final DurationContext duration() throws RecognitionException { DurationContext _localctx = new DurationContext(_ctx, getState()); - enterRule(_localctx, 428, RULE_duration); + enterRule(_localctx, 438, RULE_duration); try { enterOuterAlt(_localctx, 1); { - setState(2394); + setState(2378); match(INT); - setState(2395); + setState(2379); time_unit(); } } @@ -18219,12 +18394,12 @@ public void exitRule(ParseTreeListener listener) { public final Time_unitContext time_unit() throws RecognitionException { Time_unitContext _localctx = new Time_unitContext(_ctx, getState()); - enterRule(_localctx, 430, RULE_time_unit); + enterRule(_localctx, 440, RULE_time_unit); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2397); + setState(2381); _la = _input.LA(1); if ( !(_la==DAYS || _la==HOURS || _la==MINUTES || _la==SECONDS) ) { _errHandler.recoverInline(this); @@ -18269,24 +18444,24 @@ public void exitRule(ParseTreeListener listener) { public final NumberContext number() throws RecognitionException { NumberContext _localctx = new NumberContext(_ctx, getState()); - enterRule(_localctx, 432, RULE_number); + enterRule(_localctx, 442, RULE_number); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2400); + setState(2384); _errHandler.sync(this); _la = _input.LA(1); if (_la==MINUS) { { - setState(2399); + setState(2383); match(MINUS); } } - setState(2402); + setState(2386); _la = _input.LA(1); - if ( !(((((_la - 200)) & ~0x3f) == 0 && ((1L << (_la - 200)) & 7L) != 0)) ) { + if ( !(((((_la - 205)) & ~0x3f) == 0 && ((1L << (_la - 205)) & 7L) != 0)) ) { _errHandler.recoverInline(this); } else { @@ -18328,17 +18503,17 @@ public void exitRule(ParseTreeListener listener) { public final Signed_intContext signed_int() throws RecognitionException { Signed_intContext _localctx = new Signed_intContext(_ctx, getState()); - enterRule(_localctx, 434, RULE_signed_int); + enterRule(_localctx, 444, RULE_signed_int); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2405); + setState(2389); _errHandler.sync(this); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(2404); + setState(2388); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -18351,7 +18526,7 @@ public final Signed_intContext signed_int() throws RecognitionException { } } - setState(2407); + setState(2391); match(INT); } } @@ -18386,12 +18561,12 @@ public void exitRule(ParseTreeListener listener) { public final StringContext string() throws RecognitionException { StringContext _localctx = new StringContext(_ctx, getState()); - enterRule(_localctx, 436, RULE_string); + enterRule(_localctx, 446, RULE_string); int _la; try { enterOuterAlt(_localctx, 1); { - setState(2409); + setState(2393); _la = _input.LA(1); if ( !(_la==DSTRING || _la==STRING) ) { _errHandler.recoverInline(this); @@ -18442,30 +18617,30 @@ public void exitRule(ParseTreeListener listener) { public final Id_listContext id_list() throws RecognitionException { Id_listContext _localctx = new Id_listContext(_ctx, getState()); - enterRule(_localctx, 438, RULE_id_list); + enterRule(_localctx, 448, RULE_id_list); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(2411); + setState(2395); id(); - setState(2416); + setState(2400); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,276,_ctx); + _alt = getInterpreter().adaptivePredict(_input,262,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(2412); + setState(2396); match(COMMA); - setState(2413); + setState(2397); id(); } } } - setState(2418); + setState(2402); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,276,_ctx); + _alt = getInterpreter().adaptivePredict(_input,262,_ctx); } } } @@ -18497,6 +18672,7 @@ public static class IdContext extends ParserRuleContext { public TerminalNode ARRAY_COLLECT() { return getToken(KVQLParser.ARRAY_COLLECT, 0); } public TerminalNode AS() { return getToken(KVQLParser.AS, 0); } public TerminalNode ASC() { return getToken(KVQLParser.ASC, 0); } + public TerminalNode BEFORE() { return getToken(KVQLParser.BEFORE, 0); } public TerminalNode BETWEEN() { return getToken(KVQLParser.BETWEEN, 0); } public TerminalNode BY() { return getToken(KVQLParser.BY, 0); } public TerminalNode CACHE() { return getToken(KVQLParser.CACHE, 0); } @@ -18514,11 +18690,13 @@ public static class IdContext extends ParserRuleContext { public TerminalNode DESC() { return getToken(KVQLParser.DESC, 0); } public TerminalNode DESCENDANTS() { return getToken(KVQLParser.DESCENDANTS, 0); } public TerminalNode DESCRIBE() { return getToken(KVQLParser.DESCRIBE, 0); } + public TerminalNode DISABLE() { return getToken(KVQLParser.DISABLE, 0); } public TerminalNode DISTINCT() { return getToken(KVQLParser.DISTINCT, 0); } public TerminalNode DROP() { return getToken(KVQLParser.DROP, 0); } public TerminalNode ELEMENTOF() { return getToken(KVQLParser.ELEMENTOF, 0); } public TerminalNode ELEMENTS() { return getToken(KVQLParser.ELEMENTS, 0); } public TerminalNode ELSE() { return getToken(KVQLParser.ELSE, 0); } + public TerminalNode ENABLE() { return getToken(KVQLParser.ENABLE, 0); } public TerminalNode END() { return getToken(KVQLParser.END, 0); } public TerminalNode ES_SHARDS() { return getToken(KVQLParser.ES_SHARDS, 0); } public TerminalNode ES_REPLICAS() { return getToken(KVQLParser.ES_REPLICAS, 0); } @@ -18538,6 +18716,7 @@ public static class IdContext extends ParserRuleContext { public TerminalNode IDENTITY() { return getToken(KVQLParser.IDENTITY, 0); } public TerminalNode IF() { return getToken(KVQLParser.IF, 0); } public TerminalNode INCREMENT() { return getToken(KVQLParser.INCREMENT, 0); } + public TerminalNode IMAGE() { return getToken(KVQLParser.IMAGE, 0); } public TerminalNode INDEX() { return getToken(KVQLParser.INDEX, 0); } public TerminalNode INDEXES() { return getToken(KVQLParser.INDEXES, 0); } public TerminalNode INSERT() { return getToken(KVQLParser.INSERT, 0); } @@ -18644,9 +18823,9 @@ public void exitRule(ParseTreeListener listener) { public final IdContext id() throws RecognitionException { IdContext _localctx = new IdContext(_ctx, getState()); - enterRule(_localctx, 440, RULE_id); + enterRule(_localctx, 450, RULE_id); try { - setState(2568); + setState(2556); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: @@ -18660,6 +18839,7 @@ public final IdContext id() throws RecognitionException { case AS: case ASC: case ARRAY_COLLECT: + case BEFORE: case BETWEEN: case BY: case CACHE: @@ -18677,11 +18857,13 @@ public final IdContext id() throws RecognitionException { case DESC: case DESCENDANTS: case DESCRIBE: + case DISABLE: case DISTINCT: case DROP: case ELEMENTOF: case ELEMENTS: case ELSE: + case ENABLE: case END: case ES_SHARDS: case ES_REPLICAS: @@ -18700,6 +18882,7 @@ public final IdContext id() throws RecognitionException { case IDENTIFIED: case IDENTITY: case IF: + case IMAGE: case IN: case INCREMENT: case INDEX: @@ -18795,872 +18978,896 @@ public final IdContext id() throws RecognitionException { case ID: enterOuterAlt(_localctx, 1); { - setState(2564); + setState(2552); _errHandler.sync(this); switch (_input.LA(1)) { case ACCOUNT: { - setState(2419); + setState(2403); match(ACCOUNT); } break; case ADD: { - setState(2420); + setState(2404); match(ADD); } break; case ADMIN: { - setState(2421); + setState(2405); match(ADMIN); } break; case ALL: { - setState(2422); + setState(2406); match(ALL); } break; case ALTER: { - setState(2423); + setState(2407); match(ALTER); } break; case ALWAYS: { - setState(2424); + setState(2408); match(ALWAYS); } break; case ANCESTORS: { - setState(2425); + setState(2409); match(ANCESTORS); } break; case AND: { - setState(2426); + setState(2410); match(AND); } break; case ANY_T: { - setState(2427); + setState(2411); match(ANY_T); } break; case ANYATOMIC_T: { - setState(2428); + setState(2412); match(ANYATOMIC_T); } break; case ANYJSONATOMIC_T: { - setState(2429); + setState(2413); match(ANYJSONATOMIC_T); } break; case ANYRECORD_T: { - setState(2430); + setState(2414); match(ANYRECORD_T); } break; case ARRAY_COLLECT: { - setState(2431); + setState(2415); match(ARRAY_COLLECT); } break; case AS: { - setState(2432); + setState(2416); match(AS); } break; case ASC: { - setState(2433); + setState(2417); match(ASC); } break; + case BEFORE: + { + setState(2418); + match(BEFORE); + } + break; case BETWEEN: { - setState(2434); + setState(2419); match(BETWEEN); } break; case BY: { - setState(2435); + setState(2420); match(BY); } break; case CACHE: { - setState(2436); + setState(2421); match(CACHE); } break; case CASE: { - setState(2437); + setState(2422); match(CASE); } break; case CAST: { - setState(2438); + setState(2423); match(CAST); } break; case COLLECTION: { - setState(2439); + setState(2424); match(COLLECTION); } break; case COMMENT: { - setState(2440); + setState(2425); match(COMMENT); } break; case COUNT: { - setState(2441); + setState(2426); match(COUNT); } break; case CREATE: { - setState(2442); + setState(2427); match(CREATE); } break; case CYCLE: { - setState(2443); + setState(2428); match(CYCLE); } break; case DAYS: { - setState(2444); + setState(2429); match(DAYS); } break; case DECLARE: { - setState(2445); + setState(2430); match(DECLARE); } break; case DEFAULT: { - setState(2446); + setState(2431); match(DEFAULT); } break; case DELETE: { - setState(2447); + setState(2432); match(DELETE); } break; case DESC: { - setState(2448); + setState(2433); match(DESC); } break; case DESCENDANTS: { - setState(2449); + setState(2434); match(DESCENDANTS); } break; case DESCRIBE: { - setState(2450); + setState(2435); match(DESCRIBE); } break; + case DISABLE: + { + setState(2436); + match(DISABLE); + } + break; case DISTINCT: { - setState(2451); + setState(2437); match(DISTINCT); } break; case DROP: { - setState(2452); + setState(2438); match(DROP); } break; case ELEMENTOF: { - setState(2453); + setState(2439); match(ELEMENTOF); } break; case ELEMENTS: { - setState(2454); + setState(2440); match(ELEMENTS); } break; case ELSE: { - setState(2455); + setState(2441); match(ELSE); } break; + case ENABLE: + { + setState(2442); + match(ENABLE); + } + break; case END: { - setState(2456); + setState(2443); match(END); } break; case ES_SHARDS: { - setState(2457); + setState(2444); match(ES_SHARDS); } break; case ES_REPLICAS: { - setState(2458); + setState(2445); match(ES_REPLICAS); } break; case EXISTS: { - setState(2459); + setState(2446); match(EXISTS); } break; case EXTRACT: { - setState(2460); + setState(2447); match(EXTRACT); } break; case FIELDS: { - setState(2461); + setState(2448); match(FIELDS); } break; case FIRST: { - setState(2462); + setState(2449); match(FIRST); } break; case FREEZE: { - setState(2463); + setState(2450); match(FREEZE); } break; case FROM: { - setState(2464); + setState(2451); match(FROM); } break; case FROZEN: { - setState(2465); + setState(2452); match(FROZEN); } break; case FULLTEXT: { - setState(2466); + setState(2453); match(FULLTEXT); } break; case GENERATED: { - setState(2467); + setState(2454); match(GENERATED); } break; case GRANT: { - setState(2468); + setState(2455); match(GRANT); } break; case GROUP: { - setState(2469); + setState(2456); match(GROUP); } break; case HOURS: { - setState(2470); + setState(2457); match(HOURS); } break; case IDENTIFIED: { - setState(2471); + setState(2458); match(IDENTIFIED); } break; case IDENTITY: { - setState(2472); + setState(2459); match(IDENTITY); } break; case IF: { - setState(2473); + setState(2460); match(IF); } break; case INCREMENT: { - setState(2474); + setState(2461); match(INCREMENT); } break; + case IMAGE: + { + setState(2462); + match(IMAGE); + } + break; case INDEX: { - setState(2475); + setState(2463); match(INDEX); } break; case INDEXES: { - setState(2476); + setState(2464); match(INDEXES); } break; case INSERT: { - setState(2477); + setState(2465); match(INSERT); } break; case INTO: { - setState(2478); + setState(2466); match(INTO); } break; case IN: { - setState(2479); + setState(2467); match(IN); } break; case IS: { - setState(2480); + setState(2468); match(IS); } break; case JSON: { - setState(2481); + setState(2469); match(JSON); } break; case KEY: { - setState(2482); + setState(2470); match(KEY); } break; case KEYOF: { - setState(2483); + setState(2471); match(KEYOF); } break; case KEYS: { - setState(2484); + setState(2472); match(KEYS); } break; case LIFETIME: { - setState(2485); + setState(2473); match(LIFETIME); } break; case LAST: { - setState(2486); + setState(2474); match(LAST); } break; case LIMIT: { - setState(2487); + setState(2475); match(LIMIT); } break; case LOCAL: { - setState(2488); + setState(2476); match(LOCAL); } break; case LOCK: { - setState(2489); + setState(2477); match(LOCK); } break; case MERGE: { - setState(2490); + setState(2478); match(MERGE); } break; case MINUTES: { - setState(2491); + setState(2479); match(MINUTES); } break; case MODIFY: { - setState(2492); + setState(2480); match(MODIFY); } break; case MR_COUNTER: { - setState(2493); + setState(2481); match(MR_COUNTER); - setState(2494); + setState(2482); match(NAMESPACE); } break; case NAMESPACES: { - setState(2495); + setState(2483); match(NAMESPACES); } break; case NESTED: { - setState(2496); + setState(2484); match(NESTED); } break; case NO: { - setState(2497); + setState(2485); match(NO); } break; case NOT: { - setState(2498); + setState(2486); match(NOT); } break; case NULLS: { - setState(2499); + setState(2487); match(NULLS); } break; case OF: { - setState(2500); + setState(2488); match(OF); } break; case OFFSET: { - setState(2501); + setState(2489); match(OFFSET); } break; case ON: { - setState(2502); + setState(2490); match(ON); } break; case OR: { - setState(2503); + setState(2491); match(OR); } break; case ORDER: { - setState(2504); + setState(2492); match(ORDER); } break; case OVERRIDE: { - setState(2505); + setState(2493); match(OVERRIDE); } break; case PER: { - setState(2506); + setState(2494); match(PER); } break; case PASSWORD: { - setState(2507); + setState(2495); match(PASSWORD); } break; case PATCH: { - setState(2508); + setState(2496); match(PATCH); } break; case PRIMARY: { - setState(2509); + setState(2497); match(PRIMARY); } break; case PUT: { - setState(2510); + setState(2498); match(PUT); } break; case RDIV: { - setState(2511); + setState(2499); match(RDIV); } break; case REGION: { - setState(2512); + setState(2500); match(REGION); } break; case REGIONS: { - setState(2513); + setState(2501); match(REGIONS); } break; case REMOVE: { - setState(2514); + setState(2502); match(REMOVE); } break; case RETURNING: { - setState(2515); + setState(2503); match(RETURNING); } break; case ROW: { - setState(2516); + setState(2504); match(ROW); } break; case ROLE: { - setState(2517); + setState(2505); match(ROLE); } break; case ROLES: { - setState(2518); + setState(2506); match(ROLES); } break; case REVOKE: { - setState(2519); + setState(2507); match(REVOKE); } break; case SCHEMA: { - setState(2520); + setState(2508); match(SCHEMA); } break; case SECONDS: { - setState(2521); + setState(2509); match(SECONDS); } break; case SELECT: { - setState(2522); + setState(2510); match(SELECT); } break; case SEQ_TRANSFORM: { - setState(2523); + setState(2511); match(SEQ_TRANSFORM); } break; case SET: { - setState(2524); + setState(2512); match(SET); } break; case SHARD: { - setState(2525); + setState(2513); match(SHARD); } break; case SHOW: { - setState(2526); + setState(2514); match(SHOW); } break; case START: { - setState(2527); + setState(2515); match(START); } break; case TABLE: { - setState(2528); + setState(2516); match(TABLE); } break; case TABLES: { - setState(2529); + setState(2517); match(TABLES); } break; case THEN: { - setState(2530); + setState(2518); match(THEN); } break; case TO: { - setState(2531); + setState(2519); match(TO); } break; case TTL: { - setState(2532); + setState(2520); match(TTL); } break; case TYPE: { - setState(2533); + setState(2521); match(TYPE); } break; case UNFREEZE: { - setState(2534); + setState(2522); match(UNFREEZE); } break; case UNLOCK: { - setState(2535); + setState(2523); match(UNLOCK); } break; case UNIQUE: { - setState(2536); + setState(2524); match(UNIQUE); } break; case UNNEST: { - setState(2537); + setState(2525); match(UNNEST); } break; case UPDATE: { - setState(2538); + setState(2526); match(UPDATE); } break; case UPSERT: { - setState(2539); + setState(2527); match(UPSERT); } break; case USER: { - setState(2540); + setState(2528); match(USER); } break; case USERS: { - setState(2541); + setState(2529); match(USERS); } break; case USING: { - setState(2542); + setState(2530); match(USING); } break; case VALUES: { - setState(2543); + setState(2531); match(VALUES); } break; case WHEN: { - setState(2544); + setState(2532); match(WHEN); } break; case WHERE: { - setState(2545); + setState(2533); match(WHERE); } break; case WITH: { - setState(2546); + setState(2534); match(WITH); } break; case ARRAY_T: { - setState(2547); + setState(2535); match(ARRAY_T); } break; case BINARY_T: { - setState(2548); + setState(2536); match(BINARY_T); } break; case BOOLEAN_T: { - setState(2549); + setState(2537); match(BOOLEAN_T); } break; case DOUBLE_T: { - setState(2550); + setState(2538); match(DOUBLE_T); } break; case ENUM_T: { - setState(2551); + setState(2539); match(ENUM_T); } break; case FLOAT_T: { - setState(2552); + setState(2540); match(FLOAT_T); } break; case GEOMETRY_T: { - setState(2553); + setState(2541); match(GEOMETRY_T); } break; case LONG_T: { - setState(2554); + setState(2542); match(LONG_T); } break; case INTEGER_T: { - setState(2555); + setState(2543); match(INTEGER_T); } break; case MAP_T: { - setState(2556); + setState(2544); match(MAP_T); } break; case NUMBER_T: { - setState(2557); + setState(2545); match(NUMBER_T); } break; case POINT_T: { - setState(2558); + setState(2546); match(POINT_T); } break; case RECORD_T: { - setState(2559); + setState(2547); match(RECORD_T); } break; case STRING_T: { - setState(2560); + setState(2548); match(STRING_T); } break; case TIMESTAMP_T: { - setState(2561); + setState(2549); match(TIMESTAMP_T); } break; case SCALAR_T: { - setState(2562); + setState(2550); match(SCALAR_T); } break; case ID: { - setState(2563); + setState(2551); match(ID); } break; @@ -19672,7 +19879,7 @@ public final IdContext id() throws RecognitionException { case BAD_ID: enterOuterAlt(_localctx, 2); { - setState(2566); + setState(2554); match(BAD_ID); notifyErrorListeners("Identifiers must start with a letter: " + _input.getText(_localctx.start, _input.LT(-1))); @@ -19719,7 +19926,7 @@ private boolean and_expr_sempred(And_exprContext _localctx, int predIndex) { } private static final String _serializedATNSegment0 = - "\u0004\u0001\u00d4\u0a0b\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ + "\u0004\u0001\u00d9\u09ff\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ "\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004"+ "\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007"+ "\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b"+ @@ -19779,1753 +19986,1735 @@ private boolean and_expr_sempred(And_exprContext _localctx, int predIndex) { "\u00d1\u0002\u00d2\u0007\u00d2\u0002\u00d3\u0007\u00d3\u0002\u00d4\u0007"+ "\u00d4\u0002\u00d5\u0007\u00d5\u0002\u00d6\u0007\u00d6\u0002\u00d7\u0007"+ "\u00d7\u0002\u00d8\u0007\u00d8\u0002\u00d9\u0007\u00d9\u0002\u00da\u0007"+ - "\u00da\u0002\u00db\u0007\u00db\u0002\u00dc\u0007\u00dc\u0001\u0000\u0001"+ - "\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ + "\u00da\u0002\u00db\u0007\u00db\u0002\u00dc\u0007\u00dc\u0002\u00dd\u0007"+ + "\u00dd\u0002\u00de\u0007\u00de\u0002\u00df\u0007\u00df\u0002\u00e0\u0007"+ + "\u00e0\u0002\u00e1\u0007\u00e1\u0001\u0000\u0001\u0000\u0001\u0000\u0001"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0001\u0001\u0001\u0003\u0001\u01d7\b\u0001\u0001\u0002\u0003"+ - "\u0002\u01da\b\u0002\u0001\u0002\u0001\u0002\u0001\u0003\u0001\u0003\u0001"+ - "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0005\u0003\u01e4\b\u0003\n"+ - "\u0003\f\u0003\u01e7\t\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001"+ - "\u0005\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0007\u0001"+ - "\u0007\u0001\u0007\u0003\u0007\u01f4\b\u0007\u0001\u0007\u0003\u0007\u01f7"+ - "\b\u0007\u0001\u0007\u0003\u0007\u01fa\b\u0007\u0001\u0007\u0003\u0007"+ - "\u01fd\b\u0007\u0001\u0007\u0003\u0007\u0200\b\u0007\u0001\b\u0001\b\u0001"+ - "\b\u0001\b\u0005\b\u0206\b\b\n\b\f\b\u0209\t\b\u0001\b\u0001\b\u0001\b"+ - "\u0003\b\u020e\b\b\u0001\b\u0001\b\u0001\b\u0003\b\u0213\b\b\u0005\b\u0215"+ - "\b\b\n\b\f\b\u0218\t\b\u0001\t\u0001\t\u0001\t\u0003\t\u021d\b\t\u0001"+ - "\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003"+ - "\n\u0228\b\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003\n\u022f\b\n"+ - "\u0001\n\u0001\n\u0001\u000b\u0001\u000b\u0001\u000b\u0005\u000b\u0236"+ - "\b\u000b\n\u000b\f\u000b\u0239\t\u000b\u0001\f\u0001\f\u0001\f\u0005\f"+ - "\u023e\b\f\n\f\f\f\u0241\t\f\u0001\r\u0001\r\u0001\r\u0005\r\u0246\b\r"+ - "\n\r\f\r\u0249\t\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001"+ - "\u000f\u0001\u000f\u0003\u000f\u0251\b\u000f\u0001\u0010\u0001\u0010\u0003"+ - "\u0010\u0255\b\u0010\u0001\u0010\u0003\u0010\u0258\b\u0010\u0001\u0011"+ - "\u0001\u0011\u0003\u0011\u025c\b\u0011\u0001\u0012\u0001\u0012\u0001\u0012"+ - "\u0001\u0012\u0003\u0012\u0262\b\u0012\u0001\u0012\u0001\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0003\u0012\u0269\b\u0012\u0001\u0012\u0001\u0012"+ - "\u0005\u0012\u026d\b\u0012\n\u0012\f\u0012\u0270\t\u0012\u0001\u0012\u0001"+ - "\u0012\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0014\u0001\u0014\u0001"+ - "\u0014\u0001\u0015\u0003\u0015\u027b\b\u0015\u0001\u0015\u0003\u0015\u027e"+ - "\b\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001"+ - "\u0015\u0001\u0015\u0005\u0015\u0287\b\u0015\n\u0015\f\u0015\u028a\t\u0015"+ - "\u0003\u0015\u028c\b\u0015\u0001\u0016\u0001\u0016\u0005\u0016\u0290\b"+ - "\u0016\n\u0016\f\u0016\u0293\t\u0016\u0001\u0016\u0001\u0016\u0001\u0017"+ - "\u0001\u0017\u0001\u0017\u0001\u0017\u0005\u0017\u029b\b\u0017\n\u0017"+ - "\f\u0017\u029e\t\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017"+ - "\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017"+ - "\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017"+ - "\u0001\u0017\u0001\u0017\u0003\u0017\u02b2\b\u0017\u0001\u0017\u0003\u0017"+ - "\u02b5\b\u0017\u0001\u0018\u0001\u0018\u0003\u0018\u02b9\b\u0018\u0001"+ - "\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001"+ - "\u0019\u0001\u0019\u0005\u0019\u02c3\b\u0019\n\u0019\f\u0019\u02c6\t\u0019"+ - "\u0001\u001a\u0003\u001a\u02c9\b\u001a\u0001\u001a\u0001\u001a\u0003\u001a"+ - "\u02cd\b\u001a\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b"+ - "\u0005\u001b\u02d4\b\u001b\n\u001b\f\u001b\u02d7\t\u001b\u0001\u001c\u0001"+ - "\u001c\u0001\u001c\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001e\u0001"+ - "\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0005\u001e\u02e5"+ - "\b\u001e\n\u001e\f\u001e\u02e8\t\u001e\u0001\u001f\u0001\u001f\u0001\u001f"+ - "\u0001\u001f\u0001\u001f\u0001\u001f\u0005\u001f\u02f0\b\u001f\n\u001f"+ - "\f\u001f\u02f3\t\u001f\u0001 \u0003 \u02f6\b \u0001 \u0001 \u0001!\u0001"+ - "!\u0001!\u0003!\u02fd\b!\u0001!\u0003!\u0300\b!\u0001\"\u0001\"\u0001"+ - "\"\u0001\"\u0001\"\u0003\"\u0307\b\"\u0001#\u0001#\u0001#\u0001#\u0001"+ - "#\u0001#\u0001$\u0001$\u0001$\u0003$\u0312\b$\u0001$\u0001$\u0003$\u0316"+ - "\b$\u0001%\u0001%\u0001&\u0001&\u0001&\u0001&\u0001&\u0001&\u0003&\u0320"+ - "\b&\u0001\'\u0001\'\u0001\'\u0003\'\u0325\b\'\u0001(\u0001(\u0001(\u0001"+ - "(\u0001(\u0001(\u0004(\u032d\b(\u000b(\f(\u032e\u0001(\u0001(\u0001)\u0001"+ - ")\u0001)\u0001)\u0005)\u0337\b)\n)\f)\u033a\t)\u0001)\u0001)\u0001*\u0001"+ - "*\u0001*\u0001*\u0005*\u0342\b*\n*\f*\u0345\t*\u0001*\u0001*\u0001+\u0001"+ - "+\u0001+\u0001+\u0001+\u0001+\u0004+\u034f\b+\u000b+\f+\u0350\u0001+\u0001"+ - "+\u0001,\u0001,\u0001,\u0001,\u0001,\u0005,\u035a\b,\n,\f,\u035d\t,\u0001"+ - ",\u0001,\u0003,\u0361\b,\u0001,\u0001,\u0001,\u0001-\u0001-\u0001-\u0001"+ - ".\u0001.\u0001.\u0003.\u036c\b.\u0001.\u0001.\u0003.\u0370\b.\u0001.\u0001"+ - ".\u0003.\u0374\b.\u0001.\u0001.\u0001.\u0003.\u0379\b.\u0001.\u0005.\u037c"+ - "\b.\n.\f.\u037f\t.\u0001.\u0001.\u0001/\u0001/\u0001/\u0005/\u0386\b/"+ - "\n/\f/\u0389\t/\u00010\u00010\u00010\u00050\u038e\b0\n0\f0\u0391\t0\u0001"+ - "1\u00011\u00011\u00051\u0396\b1\n1\f1\u0399\t1\u00012\u00012\u00012\u0003"+ - "2\u039e\b2\u00013\u00013\u00013\u00053\u03a3\b3\n3\f3\u03a6\t3\u00014"+ - "\u00014\u00014\u00034\u03ab\b4\u00015\u00015\u00015\u00015\u00015\u0003"+ - "5\u03b2\b5\u00016\u00016\u00016\u00036\u03b7\b6\u00016\u00016\u00017\u0001"+ - "7\u00037\u03bd\b7\u00018\u00018\u00038\u03c1\b8\u00018\u00018\u00038\u03c5"+ - "\b8\u00018\u00018\u00019\u00019\u00039\u03cb\b9\u00019\u00019\u0001:\u0001"+ + "\u0001\u0003\u0001\u01e1\b\u0001\u0001\u0002\u0003\u0002\u01e4\b\u0002"+ + "\u0001\u0002\u0001\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003"+ + "\u0001\u0003\u0001\u0003\u0005\u0003\u01ee\b\u0003\n\u0003\f\u0003\u01f1"+ + "\t\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005\u0001"+ + "\u0005\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0003"+ + "\u0007\u01fe\b\u0007\u0001\u0007\u0003\u0007\u0201\b\u0007\u0001\u0007"+ + "\u0003\u0007\u0204\b\u0007\u0001\u0007\u0003\u0007\u0207\b\u0007\u0001"+ + "\u0007\u0003\u0007\u020a\b\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0005"+ + "\b\u0210\b\b\n\b\f\b\u0213\t\b\u0001\b\u0001\b\u0001\b\u0003\b\u0218\b"+ + "\b\u0001\b\u0001\b\u0001\b\u0003\b\u021d\b\b\u0005\b\u021f\b\b\n\b\f\b"+ + "\u0222\t\b\u0001\t\u0001\t\u0001\t\u0003\t\u0227\b\t\u0001\n\u0001\n\u0001"+ + "\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003\n\u0232\b\n\u0001"+ + "\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003\n\u0239\b\n\u0001\n\u0001\n\u0001"+ + "\u000b\u0001\u000b\u0001\u000b\u0005\u000b\u0240\b\u000b\n\u000b\f\u000b"+ + "\u0243\t\u000b\u0001\f\u0001\f\u0001\f\u0005\f\u0248\b\f\n\f\f\f\u024b"+ + "\t\f\u0001\r\u0001\r\u0001\r\u0005\r\u0250\b\r\n\r\f\r\u0253\t\r\u0001"+ + "\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001\u000f\u0001\u000f\u0003"+ + "\u000f\u025b\b\u000f\u0001\u0010\u0001\u0010\u0003\u0010\u025f\b\u0010"+ + "\u0001\u0010\u0003\u0010\u0262\b\u0010\u0001\u0011\u0001\u0011\u0003\u0011"+ + "\u0266\b\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012"+ + "\u026c\b\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012"+ + "\u0003\u0012\u0273\b\u0012\u0001\u0012\u0001\u0012\u0005\u0012\u0277\b"+ + "\u0012\n\u0012\f\u0012\u027a\t\u0012\u0001\u0012\u0001\u0012\u0001\u0013"+ + "\u0001\u0013\u0001\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0015"+ + "\u0003\u0015\u0285\b\u0015\u0001\u0015\u0003\u0015\u0288\b\u0015\u0001"+ + "\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001"+ + "\u0015\u0005\u0015\u0291\b\u0015\n\u0015\f\u0015\u0294\t\u0015\u0003\u0015"+ + "\u0296\b\u0015\u0001\u0016\u0001\u0016\u0005\u0016\u029a\b\u0016\n\u0016"+ + "\f\u0016\u029d\t\u0016\u0001\u0016\u0001\u0016\u0001\u0017\u0001\u0017"+ + "\u0001\u0017\u0001\u0017\u0005\u0017\u02a5\b\u0017\n\u0017\f\u0017\u02a8"+ + "\t\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001"+ + "\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001"+ + "\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001"+ + "\u0017\u0003\u0017\u02bc\b\u0017\u0001\u0017\u0003\u0017\u02bf\b\u0017"+ + "\u0001\u0018\u0001\u0018\u0003\u0018\u02c3\b\u0018\u0001\u0019\u0001\u0019"+ + "\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019"+ + "\u0005\u0019\u02cd\b\u0019\n\u0019\f\u0019\u02d0\t\u0019\u0001\u001a\u0003"+ + "\u001a\u02d3\b\u001a\u0001\u001a\u0001\u001a\u0003\u001a\u02d7\b\u001a"+ + "\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0005\u001b"+ + "\u02de\b\u001b\n\u001b\f\u001b\u02e1\t\u001b\u0001\u001c\u0001\u001c\u0001"+ + "\u001c\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001"+ + "\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0005\u001e\u02ef\b\u001e\n"+ + "\u001e\f\u001e\u02f2\t\u001e\u0001\u001f\u0001\u001f\u0001\u001f\u0001"+ + "\u001f\u0001\u001f\u0001\u001f\u0005\u001f\u02fa\b\u001f\n\u001f\f\u001f"+ + "\u02fd\t\u001f\u0001 \u0003 \u0300\b \u0001 \u0001 \u0001!\u0001!\u0001"+ + "!\u0003!\u0307\b!\u0001!\u0003!\u030a\b!\u0001\"\u0001\"\u0001\"\u0001"+ + "\"\u0001\"\u0003\"\u0311\b\"\u0001#\u0001#\u0001#\u0001#\u0001#\u0001"+ + "#\u0001$\u0001$\u0001$\u0003$\u031c\b$\u0001$\u0001$\u0003$\u0320\b$\u0001"+ + "%\u0001%\u0001&\u0001&\u0001&\u0001&\u0001&\u0001&\u0003&\u032a\b&\u0001"+ + "\'\u0001\'\u0001\'\u0003\'\u032f\b\'\u0001(\u0001(\u0001(\u0001(\u0001"+ + "(\u0001(\u0004(\u0337\b(\u000b(\f(\u0338\u0001(\u0001(\u0001)\u0001)\u0001"+ + ")\u0001)\u0005)\u0341\b)\n)\f)\u0344\t)\u0001)\u0001)\u0001*\u0001*\u0001"+ + "*\u0001*\u0005*\u034c\b*\n*\f*\u034f\t*\u0001*\u0001*\u0001+\u0001+\u0001"+ + "+\u0001+\u0001+\u0001+\u0004+\u0359\b+\u000b+\f+\u035a\u0001+\u0001+\u0001"+ + ",\u0001,\u0001,\u0001,\u0001,\u0005,\u0364\b,\n,\f,\u0367\t,\u0001,\u0001"+ + ",\u0003,\u036b\b,\u0001,\u0001,\u0001,\u0001-\u0001-\u0001-\u0001.\u0001"+ + ".\u0001.\u0003.\u0376\b.\u0001.\u0001.\u0003.\u037a\b.\u0001.\u0001.\u0003"+ + ".\u037e\b.\u0001.\u0001.\u0001.\u0003.\u0383\b.\u0001.\u0005.\u0386\b"+ + ".\n.\f.\u0389\t.\u0001.\u0001.\u0001/\u0001/\u0001/\u0005/\u0390\b/\n"+ + "/\f/\u0393\t/\u00010\u00010\u00010\u00050\u0398\b0\n0\f0\u039b\t0\u0001"+ + "1\u00011\u00011\u00051\u03a0\b1\n1\f1\u03a3\t1\u00012\u00012\u00012\u0003"+ + "2\u03a8\b2\u00013\u00013\u00013\u00053\u03ad\b3\n3\f3\u03b0\t3\u00014"+ + "\u00014\u00014\u00034\u03b5\b4\u00015\u00015\u00015\u00015\u00015\u0003"+ + "5\u03bc\b5\u00016\u00016\u00016\u00036\u03c1\b6\u00016\u00016\u00017\u0001"+ + "7\u00037\u03c7\b7\u00018\u00018\u00038\u03cb\b8\u00018\u00018\u00038\u03cf"+ + "\b8\u00018\u00018\u00019\u00019\u00039\u03d5\b9\u00019\u00019\u0001:\u0001"+ ":\u0001:\u0001:\u0001:\u0001:\u0001:\u0001:\u0001:\u0001:\u0001:\u0001"+ - ":\u0001:\u0001:\u0003:\u03dd\b:\u0001;\u0001;\u0001;\u0001;\u0003;\u03e3"+ - "\b;\u0003;\u03e5\b;\u0001<\u0001<\u0001<\u0001<\u0001<\u0003<\u03ec\b"+ - "<\u0001=\u0001=\u0001>\u0001>\u0003>\u03f2\b>\u0001>\u0001>\u0005>\u03f6"+ - "\b>\n>\f>\u03f9\t>\u0001>\u0001>\u0001?\u0001?\u0001?\u0001?\u0001?\u0001"+ - "?\u0001?\u0001?\u0001?\u0005?\u0406\b?\n?\f?\u0409\t?\u0001?\u0001?\u0001"+ - "?\u0001?\u0003?\u040f\b?\u0001@\u0001@\u0001@\u0001@\u0001@\u0001@\u0001"+ - "@\u0001A\u0001A\u0001B\u0001B\u0001B\u0003B\u041d\bB\u0001B\u0001B\u0001"+ - "B\u0001C\u0001C\u0001C\u0001C\u0001C\u0005C\u0427\bC\nC\fC\u042a\tC\u0003"+ - "C\u042c\bC\u0001C\u0001C\u0001D\u0001D\u0001D\u0001D\u0001D\u0001E\u0001"+ + ":\u0001:\u0001:\u0003:\u03e7\b:\u0001;\u0001;\u0001;\u0001;\u0003;\u03ed"+ + "\b;\u0003;\u03ef\b;\u0001<\u0001<\u0001<\u0001<\u0001<\u0003<\u03f6\b"+ + "<\u0001=\u0001=\u0001>\u0001>\u0003>\u03fc\b>\u0001>\u0001>\u0005>\u0400"+ + "\b>\n>\f>\u0403\t>\u0001>\u0001>\u0001?\u0001?\u0001?\u0001?\u0001?\u0001"+ + "?\u0001?\u0001?\u0001?\u0005?\u0410\b?\n?\f?\u0413\t?\u0001?\u0001?\u0001"+ + "?\u0001?\u0003?\u0419\b?\u0001@\u0001@\u0001@\u0001@\u0001@\u0001@\u0001"+ + "@\u0001A\u0001A\u0001B\u0001B\u0001B\u0003B\u0427\bB\u0001B\u0001B\u0001"+ + "B\u0001C\u0001C\u0001C\u0001C\u0001C\u0005C\u0431\bC\nC\fC\u0434\tC\u0003"+ + "C\u0436\bC\u0001C\u0001C\u0001D\u0001D\u0001D\u0001D\u0001D\u0001E\u0001"+ "E\u0001E\u0001E\u0001E\u0001E\u0001F\u0001F\u0001F\u0001F\u0001F\u0001"+ - "F\u0001F\u0001F\u0001F\u0001F\u0005F\u0445\bF\nF\fF\u0448\tF\u0001F\u0001"+ - "F\u0003F\u044c\bF\u0001F\u0001F\u0001G\u0001G\u0001G\u0001G\u0001G\u0001"+ + "F\u0001F\u0001F\u0001F\u0001F\u0005F\u044f\bF\nF\fF\u0452\tF\u0001F\u0001"+ + "F\u0003F\u0456\bF\u0001F\u0001F\u0001G\u0001G\u0001G\u0001G\u0001G\u0001"+ "G\u0001G\u0001H\u0001H\u0001H\u0001H\u0001I\u0001I\u0001I\u0001I\u0001"+ - "I\u0001I\u0001I\u0001J\u0003J\u0463\bJ\u0001J\u0001J\u0001J\u0001J\u0003"+ - "J\u0469\bJ\u0001J\u0003J\u046c\bJ\u0001J\u0001J\u0001J\u0001J\u0005J\u0472"+ - "\bJ\nJ\fJ\u0475\tJ\u0001J\u0001J\u0003J\u0479\bJ\u0001J\u0001J\u0001J"+ - "\u0001J\u0001J\u0005J\u0480\bJ\nJ\fJ\u0483\tJ\u0001J\u0001J\u0001J\u0001"+ - "J\u0003J\u0489\bJ\u0001J\u0003J\u048c\bJ\u0001K\u0001K\u0003K\u0490\b"+ - "K\u0001L\u0001L\u0001L\u0001M\u0001M\u0003M\u0497\bM\u0001N\u0001N\u0001"+ - "N\u0001N\u0001N\u0001N\u0003N\u049f\bN\u0001O\u0003O\u04a2\bO\u0001O\u0001"+ - "O\u0001O\u0003O\u04a7\bO\u0001O\u0003O\u04aa\bO\u0001O\u0001O\u0001O\u0005"+ - "O\u04af\bO\nO\fO\u04b2\tO\u0001O\u0001O\u0001O\u0003O\u04b7\bO\u0001P"+ - "\u0001P\u0001P\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0003Q\u04c1\bQ\u0005"+ - "Q\u04c3\bQ\nQ\fQ\u04c6\tQ\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0003Q\u04cd"+ - "\bQ\u0005Q\u04cf\bQ\nQ\fQ\u04d2\tQ\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q"+ - "\u0003Q\u04d9\bQ\u0005Q\u04db\bQ\nQ\fQ\u04de\tQ\u0001Q\u0001Q\u0001Q\u0001"+ - "Q\u0001Q\u0003Q\u04e5\bQ\u0005Q\u04e7\bQ\nQ\fQ\u04ea\tQ\u0001Q\u0001Q"+ - "\u0001Q\u0001Q\u0001Q\u0001Q\u0003Q\u04f2\bQ\u0005Q\u04f4\bQ\nQ\fQ\u04f7"+ - "\tQ\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0005Q\u04fe\bQ\nQ\fQ\u0501\tQ"+ - "\u0003Q\u0503\bQ\u0001R\u0001R\u0001R\u0001R\u0001S\u0003S\u050a\bS\u0001"+ - "S\u0001S\u0003S\u050e\bS\u0001S\u0003S\u0511\bS\u0001S\u0003S\u0514\b"+ - "S\u0001S\u0001S\u0001T\u0003T\u0519\bT\u0001T\u0001T\u0003T\u051d\bT\u0001"+ + "I\u0001I\u0001I\u0001J\u0003J\u046d\bJ\u0001J\u0001J\u0001J\u0001J\u0003"+ + "J\u0473\bJ\u0001J\u0003J\u0476\bJ\u0001J\u0001J\u0001J\u0001J\u0005J\u047c"+ + "\bJ\nJ\fJ\u047f\tJ\u0001J\u0001J\u0003J\u0483\bJ\u0001J\u0001J\u0001J"+ + "\u0001J\u0001J\u0005J\u048a\bJ\nJ\fJ\u048d\tJ\u0001J\u0001J\u0001J\u0001"+ + "J\u0003J\u0493\bJ\u0001J\u0003J\u0496\bJ\u0001K\u0001K\u0003K\u049a\b"+ + "K\u0001L\u0001L\u0001L\u0001M\u0001M\u0003M\u04a1\bM\u0001N\u0001N\u0001"+ + "N\u0001N\u0001N\u0001N\u0003N\u04a9\bN\u0001O\u0003O\u04ac\bO\u0001O\u0001"+ + "O\u0001O\u0003O\u04b1\bO\u0001O\u0003O\u04b4\bO\u0001O\u0001O\u0001O\u0005"+ + "O\u04b9\bO\nO\fO\u04bc\tO\u0001O\u0001O\u0001O\u0003O\u04c1\bO\u0001P"+ + "\u0001P\u0001P\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0003Q\u04cb\bQ\u0005"+ + "Q\u04cd\bQ\nQ\fQ\u04d0\tQ\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0003Q\u04d7"+ + "\bQ\u0005Q\u04d9\bQ\nQ\fQ\u04dc\tQ\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q"+ + "\u0003Q\u04e3\bQ\u0005Q\u04e5\bQ\nQ\fQ\u04e8\tQ\u0001Q\u0001Q\u0001Q\u0001"+ + "Q\u0001Q\u0003Q\u04ef\bQ\u0005Q\u04f1\bQ\nQ\fQ\u04f4\tQ\u0001Q\u0001Q"+ + "\u0001Q\u0001Q\u0001Q\u0001Q\u0003Q\u04fc\bQ\u0005Q\u04fe\bQ\nQ\fQ\u0501"+ + "\tQ\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0005Q\u0508\bQ\nQ\fQ\u050b\tQ"+ + "\u0003Q\u050d\bQ\u0001R\u0001R\u0001R\u0001R\u0001S\u0003S\u0514\bS\u0001"+ + "S\u0001S\u0003S\u0518\bS\u0001S\u0003S\u051b\bS\u0001S\u0003S\u051e\b"+ + "S\u0001S\u0001S\u0001T\u0003T\u0523\bT\u0001T\u0001T\u0003T\u0527\bT\u0001"+ "T\u0001T\u0001U\u0001U\u0001V\u0001V\u0001V\u0001V\u0001V\u0001W\u0001"+ - "W\u0001W\u0001W\u0003W\u052c\bW\u0001X\u0001X\u0001X\u0001X\u0001X\u0001"+ - "X\u0003X\u0534\bX\u0001Y\u0001Y\u0001Z\u0001Z\u0001[\u0003[\u053b\b[\u0001"+ - "[\u0001[\u0001[\u0001[\u0003[\u0541\b[\u0001[\u0003[\u0544\b[\u0001[\u0001"+ - "[\u0003[\u0548\b[\u0001[\u0003[\u054b\b[\u0001\\\u0001\\\u0001\\\u0001"+ - "]\u0001]\u0003]\u0552\b]\u0001^\u0001^\u0001^\u0001^\u0001^\u0001^\u0001"+ - "^\u0001^\u0001^\u0001^\u0001^\u0001^\u0001^\u0001^\u0001^\u0003^\u0563"+ - "\b^\u0001_\u0001_\u0001_\u0001_\u0001_\u0005_\u056a\b_\n_\f_\u056d\t_"+ - "\u0001_\u0001_\u0001`\u0001`\u0001`\u0003`\u0574\b`\u0001`\u0003`\u0577"+ - "\b`\u0001a\u0001a\u0003a\u057b\ba\u0001a\u0001a\u0003a\u057f\ba\u0003"+ - "a\u0581\ba\u0001b\u0001b\u0001b\u0001b\u0001b\u0001b\u0003b\u0589\bb\u0001"+ + "W\u0001W\u0001W\u0003W\u0536\bW\u0001X\u0001X\u0001X\u0001X\u0001X\u0001"+ + "X\u0003X\u053e\bX\u0001Y\u0001Y\u0001Z\u0001Z\u0001[\u0003[\u0545\b[\u0001"+ + "[\u0001[\u0001[\u0001[\u0003[\u054b\b[\u0001[\u0003[\u054e\b[\u0001[\u0001"+ + "[\u0003[\u0552\b[\u0001[\u0003[\u0555\b[\u0001\\\u0001\\\u0001\\\u0001"+ + "]\u0001]\u0003]\u055c\b]\u0001^\u0001^\u0001^\u0001^\u0001^\u0001^\u0001"+ + "^\u0001^\u0001^\u0001^\u0001^\u0001^\u0001^\u0001^\u0001^\u0003^\u056d"+ + "\b^\u0001_\u0001_\u0001_\u0001_\u0001_\u0005_\u0574\b_\n_\f_\u0577\t_"+ + "\u0001_\u0001_\u0001`\u0001`\u0001`\u0003`\u057e\b`\u0001`\u0003`\u0581"+ + "\b`\u0001a\u0001a\u0003a\u0585\ba\u0001a\u0001a\u0003a\u0589\ba\u0003"+ + "a\u058b\ba\u0001b\u0001b\u0001b\u0001b\u0001b\u0001b\u0003b\u0593\bb\u0001"+ "c\u0001c\u0001c\u0001d\u0001d\u0001d\u0001d\u0001d\u0001e\u0001e\u0001"+ "e\u0001e\u0001e\u0001f\u0001f\u0001g\u0001g\u0001h\u0001h\u0001i\u0001"+ "i\u0001j\u0001j\u0001j\u0001j\u0001j\u0001j\u0001j\u0001j\u0001j\u0001"+ - "j\u0003j\u05aa\bj\u0001k\u0001k\u0001l\u0001l\u0001l\u0001l\u0003l\u05b2"+ - "\bl\u0001m\u0001m\u0001m\u0001m\u0003m\u05b8\bm\u0001n\u0001n\u0001o\u0001"+ - "o\u0001p\u0001p\u0001q\u0001q\u0001r\u0001r\u0001r\u0005r\u05c5\br\nr"+ - "\fr\u05c8\tr\u0001s\u0001s\u0001s\u0005s\u05cd\bs\ns\fs\u05d0\ts\u0001"+ - "t\u0003t\u05d3\bt\u0001t\u0001t\u0001u\u0001u\u0001u\u0005u\u05da\bu\n"+ - "u\fu\u05dd\tu\u0001v\u0001v\u0003v\u05e1\bv\u0001w\u0001w\u0001w\u0001"+ - "w\u0001w\u0003w\u05e8\bw\u0001w\u0001w\u0001x\u0001x\u0001x\u0001x\u0003"+ - "x\u05f0\bx\u0001x\u0001x\u0003x\u05f4\bx\u0001y\u0001y\u0001z\u0001z\u0001"+ + "j\u0003j\u05b4\bj\u0001k\u0001k\u0001l\u0001l\u0001l\u0001l\u0003l\u05bc"+ + "\bl\u0001m\u0001m\u0001m\u0001m\u0003m\u05c2\bm\u0001n\u0001n\u0001o\u0001"+ + "o\u0001p\u0001p\u0001q\u0001q\u0001r\u0001r\u0001r\u0005r\u05cf\br\nr"+ + "\fr\u05d2\tr\u0001s\u0001s\u0001s\u0005s\u05d7\bs\ns\fs\u05da\ts\u0001"+ + "t\u0003t\u05dd\bt\u0001t\u0001t\u0001u\u0001u\u0001u\u0005u\u05e4\bu\n"+ + "u\fu\u05e7\tu\u0001v\u0001v\u0003v\u05eb\bv\u0001w\u0001w\u0001w\u0001"+ + "w\u0001w\u0003w\u05f2\bw\u0001w\u0001w\u0001x\u0001x\u0001x\u0001x\u0003"+ + "x\u05fa\bx\u0001x\u0001x\u0003x\u05fe\bx\u0001y\u0001y\u0001z\u0001z\u0001"+ "z\u0001z\u0001{\u0001{\u0001{\u0001{\u0001|\u0001|\u0001|\u0001|\u0001"+ - "|\u0001}\u0001}\u0001}\u0001}\u0001}\u0003}\u060a\b}\u0001}\u0001}\u0003"+ - "}\u060e\b}\u0001}\u0001}\u0001}\u0001}\u0001}\u0001~\u0001~\u0001~\u0003"+ - "~\u0618\b~\u0001~\u0001~\u0001\u007f\u0001\u007f\u0001\u0080\u0001\u0080"+ - "\u0001\u0080\u0003\u0080\u0621\b\u0080\u0001\u0080\u0001\u0080\u0001\u0080"+ - "\u0001\u0080\u0003\u0080\u0627\b\u0080\u0005\u0080\u0629\b\u0080\n\u0080"+ - "\f\u0080\u062c\t\u0080\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0081"+ - "\u0001\u0081\u0001\u0081\u0001\u0081\u0003\u0081\u0635\b\u0081\u0001\u0081"+ - "\u0003\u0081\u0638\b\u0081\u0001\u0082\u0001\u0082\u0001\u0082\u0001\u0082"+ - "\u0005\u0082\u063e\b\u0082\n\u0082\f\u0082\u0641\t\u0082\u0001\u0082\u0001"+ - "\u0082\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001"+ - "\u0084\u0001\u0084\u0001\u0085\u0001\u0085\u0003\u0085\u064e\b\u0085\u0001"+ - "\u0085\u0001\u0085\u0001\u0085\u0003\u0085\u0653\b\u0085\u0005\u0085\u0655"+ - "\b\u0085\n\u0085\f\u0085\u0658\t\u0085\u0001\u0086\u0001\u0086\u0001\u0086"+ - "\u0001\u0086\u0001\u0086\u0003\u0086\u065f\b\u0086\u0003\u0086\u0661\b"+ - "\u0086\u0001\u0086\u0003\u0086\u0664\b\u0086\u0001\u0086\u0001\u0086\u0001"+ - "\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001"+ - "\u0087\u0001\u0087\u0001\u0087\u0003\u0087\u0671\b\u0087\u0001\u0088\u0001"+ - "\u0088\u0001\u0088\u0005\u0088\u0676\b\u0088\n\u0088\f\u0088\u0679\t\u0088"+ - "\u0001\u0089\u0001\u0089\u0003\u0089\u067d\b\u0089\u0001\u008a\u0001\u008a"+ - "\u0001\u008a\u0001\u008a\u0001\u008b\u0003\u008b\u0684\b\u008b\u0001\u008b"+ - "\u0003\u008b\u0687\b\u008b\u0001\u008b\u0003\u008b\u068a\b\u008b\u0001"+ - "\u008b\u0003\u008b\u068d\b\u008b\u0001\u008b\u0003\u008b\u0690\b\u008b"+ - "\u0001\u008b\u0003\u008b\u0693\b\u008b\u0001\u008b\u0003\u008b\u0696\b"+ - "\u008b\u0001\u008b\u0003\u008b\u0699\b\u008b\u0001\u008b\u0003\u008b\u069c"+ - "\b\u008b\u0001\u008b\u0003\u008b\u069f\b\u008b\u0001\u008b\u0003\u008b"+ - "\u06a2\b\u008b\u0001\u008b\u0003\u008b\u06a5\b\u008b\u0001\u008b\u0003"+ - "\u008b\u06a8\b\u008b\u0001\u008b\u0003\u008b\u06ab\b\u008b\u0001\u008b"+ - "\u0003\u008b\u06ae\b\u008b\u0001\u008b\u0003\u008b\u06b1\b\u008b\u0001"+ - "\u008b\u0003\u008b\u06b4\b\u008b\u0001\u008b\u0003\u008b\u06b7\b\u008b"+ - "\u0001\u008b\u0003\u008b\u06ba\b\u008b\u0001\u008b\u0003\u008b\u06bd\b"+ - "\u008b\u0003\u008b\u06bf\b\u008b\u0001\u008c\u0001\u008c\u0001\u008c\u0001"+ - "\u008c\u0001\u008d\u0001\u008d\u0001\u008e\u0001\u008e\u0001\u008e\u0001"+ - "\u008e\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u0090\u0001"+ + "|\u0001}\u0001}\u0001}\u0001}\u0001}\u0003}\u0614\b}\u0001}\u0001}\u0003"+ + "}\u0618\b}\u0001}\u0001}\u0001}\u0001}\u0003}\u061e\b}\u0001~\u0001~\u0001"+ + "~\u0003~\u0623\b~\u0001~\u0001~\u0001\u007f\u0001\u007f\u0001\u0080\u0001"+ + "\u0080\u0001\u0080\u0003\u0080\u062c\b\u0080\u0001\u0080\u0001\u0080\u0001"+ + "\u0080\u0001\u0080\u0003\u0080\u0632\b\u0080\u0005\u0080\u0634\b\u0080"+ + "\n\u0080\f\u0080\u0637\t\u0080\u0001\u0081\u0001\u0081\u0001\u0081\u0001"+ + "\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0003\u0081\u0640\b\u0081\u0001"+ + "\u0081\u0003\u0081\u0643\b\u0081\u0001\u0082\u0001\u0082\u0001\u0082\u0001"+ + "\u0082\u0005\u0082\u0649\b\u0082\n\u0082\f\u0082\u064c\t\u0082\u0001\u0082"+ + "\u0001\u0082\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083"+ + "\u0001\u0084\u0001\u0084\u0001\u0085\u0001\u0085\u0003\u0085\u0659\b\u0085"+ + "\u0001\u0085\u0001\u0085\u0001\u0085\u0003\u0085\u065e\b\u0085\u0005\u0085"+ + "\u0660\b\u0085\n\u0085\f\u0085\u0663\t\u0085\u0001\u0086\u0001\u0086\u0001"+ + "\u0086\u0001\u0086\u0001\u0086\u0003\u0086\u066a\b\u0086\u0003\u0086\u066c"+ + "\b\u0086\u0001\u0086\u0003\u0086\u066f\b\u0086\u0001\u0086\u0001\u0086"+ + "\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087"+ + "\u0001\u0087\u0001\u0087\u0001\u0087\u0003\u0087\u067c\b\u0087\u0001\u0088"+ + "\u0001\u0088\u0001\u0088\u0005\u0088\u0681\b\u0088\n\u0088\f\u0088\u0684"+ + "\t\u0088\u0001\u0089\u0001\u0089\u0003\u0089\u0688\b\u0089\u0001\u008a"+ + "\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008b\u0001\u008b\u0001\u008b"+ + "\u0001\u008b\u0001\u008b\u0004\u008b\u0693\b\u008b\u000b\u008b\f\u008b"+ + "\u0694\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008d\u0001"+ + "\u008d\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008f\u0001"+ + "\u008f\u0001\u008f\u0001\u008f\u0003\u008f\u06a5\b\u008f\u0001\u0090\u0001"+ "\u0090\u0001\u0090\u0001\u0090\u0001\u0091\u0001\u0091\u0001\u0091\u0001"+ - "\u0091\u0003\u0091\u06d7\b\u0091\u0001\u0092\u0001\u0092\u0001\u0092\u0001"+ - "\u0092\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0093\u0001"+ - "\u0093\u0003\u0093\u06e3\b\u0093\u0003\u0093\u06e5\b\u0093\u0001\u0093"+ - "\u0001\u0093\u0001\u0093\u0001\u0093\u0004\u0093\u06eb\b\u0093\u000b\u0093"+ - "\f\u0093\u06ec\u0001\u0093\u0001\u0093\u0003\u0093\u06f1\b\u0093\u0001"+ - "\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001"+ - "\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001"+ - "\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001"+ - "\u0094\u0001\u0094\u0001\u0094\u0003\u0094\u0708\b\u0094\u0001\u0095\u0001"+ - "\u0095\u0001\u0095\u0001\u0096\u0001\u0096\u0001\u0096\u0001\u0096\u0001"+ - "\u0096\u0003\u0096\u0712\b\u0096\u0001\u0097\u0001\u0097\u0001\u0097\u0001"+ - "\u0097\u0001\u0097\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001"+ - "\u0098\u0001\u0098\u0003\u0098\u071f\b\u0098\u0001\u0099\u0001\u0099\u0001"+ - "\u0099\u0003\u0099\u0724\b\u0099\u0001\u009a\u0001\u009a\u0001\u009a\u0001"+ - "\u009b\u0001\u009b\u0001\u009b\u0001\u009b\u0003\u009b\u072d\b\u009b\u0001"+ - "\u009b\u0001\u009b\u0001\u009b\u0001\u009b\u0003\u009b\u0733\b\u009b\u0005"+ - "\u009b\u0735\b\u009b\n\u009b\f\u009b\u0738\t\u009b\u0001\u009b\u0001\u009b"+ - "\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c"+ - "\u0001\u009c\u0001\u009c\u0003\u009c\u0744\b\u009c\u0001\u009c\u0003\u009c"+ - "\u0747\b\u009c\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009e\u0001\u009e"+ - "\u0001\u009e\u0001\u009e\u0003\u009e\u0750\b\u009e\u0001\u009e\u0003\u009e"+ - "\u0753\b\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0003\u009e"+ - "\u0759\b\u009e\u0001\u009f\u0001\u009f\u0001\u009f\u0005\u009f\u075e\b"+ - "\u009f\n\u009f\f\u009f\u0761\t\u009f\u0001\u00a0\u0001\u00a0\u0001\u00a0"+ - "\u0005\u00a0\u0766\b\u00a0\n\u00a0\f\u00a0\u0769\t\u00a0\u0001\u00a1\u0001"+ - "\u00a1\u0001\u00a1\u0005\u00a1\u076e\b\u00a1\n\u00a1\f\u00a1\u0771\t\u00a1"+ - "\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0003\u00a1\u0776\b\u00a1\u0001\u00a2"+ - "\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0003\u00a2\u077c\b\u00a2\u0001\u00a2"+ - "\u0001\u00a2\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3"+ - "\u0003\u00a3\u0785\b\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3"+ - "\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0003\u00a3\u078f\b\u00a3"+ - "\u0001\u00a3\u0003\u00a3\u0792\b\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3"+ - "\u0001\u00a3\u0001\u00a3\u0003\u00a3\u0799\b\u00a3\u0001\u00a3\u0001\u00a3"+ - "\u0001\u00a3\u0001\u00a3\u0003\u00a3\u079f\b\u00a3\u0001\u00a3\u0003\u00a3"+ - "\u07a2\b\u00a3\u0001\u00a4\u0001\u00a4\u0001\u00a5\u0001\u00a5\u0001\u00a5"+ - "\u0005\u00a5\u07a9\b\u00a5\n\u00a5\f\u00a5\u07ac\t\u00a5\u0001\u00a6\u0001"+ - "\u00a6\u0003\u00a6\u07b0\b\u00a6\u0001\u00a6\u0003\u00a6\u07b3\b\u00a6"+ - "\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0003\u00a7\u07b8\b\u00a7\u0001\u00a7"+ - "\u0003\u00a7\u07bb\b\u00a7\u0001\u00a7\u0003\u00a7\u07be\b\u00a7\u0001"+ - "\u00a7\u0001\u00a7\u0001\u00a8\u0001\u00a8\u0004\u00a8\u07c4\b\u00a8\u000b"+ - "\u00a8\f\u00a8\u07c5\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0003\u00a9\u07cb"+ - "\b\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0003"+ - "\u00a9\u07d2\b\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001"+ - "\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0003"+ - "\u00a9\u07de\b\u00a9\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001"+ - "\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0005\u00aa\u07e9"+ - "\b\u00aa\n\u00aa\f\u00aa\u07ec\t\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa"+ - "\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa"+ - "\u0001\u00aa\u0003\u00aa\u07f8\b\u00aa\u0001\u00ab\u0001\u00ab\u0001\u00ab"+ - "\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac"+ - "\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0003\u00ac\u0807\b\u00ac"+ - "\u0001\u00ac\u0003\u00ac\u080a\b\u00ac\u0001\u00ad\u0001\u00ad\u0001\u00ad"+ - "\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0003\u00ad\u0812\b\u00ad\u0001\u00ad"+ - "\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0003\u00ad\u0819\b\u00ad"+ - "\u0001\u00ad\u0003\u00ad\u081c\b\u00ad\u0001\u00ad\u0003\u00ad\u081f\b"+ - "\u00ad\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001"+ - "\u00ae\u0001\u00ae\u0001\u00ae\u0003\u00ae\u0829\b\u00ae\u0001\u00af\u0001"+ - "\u00af\u0001\u00af\u0005\u00af\u082e\b\u00af\n\u00af\f\u00af\u0831\t\u00af"+ - "\u0001\u00b0\u0001\u00b0\u0003\u00b0\u0835\b\u00b0\u0001\u00b1\u0001\u00b1"+ - "\u0005\u00b1\u0839\b\u00b1\n\u00b1\f\u00b1\u083c\t\u00b1\u0001\u00b2\u0001"+ - "\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0003\u00b2\u0844"+ - "\b\u00b2\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0003\u00b3\u084a"+ - "\b\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0003\u00b3\u0850"+ - "\b\u00b3\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0003\u00b4\u0855\b\u00b4"+ - "\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4"+ - "\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0003\u00b4\u0861\b\u00b4"+ - "\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0003\u00b4"+ - "\u0868\b\u00b4\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0005\u00b5\u086d\b"+ - "\u00b5\n\u00b5\f\u00b5\u0870\t\u00b5\u0001\u00b6\u0001\u00b6\u0001\u00b6"+ - "\u0003\u00b6\u0875\b\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6"+ - "\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6"+ - "\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0003\u00b6\u0885\b\u00b6"+ - "\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0003\u00b7\u088b\b\u00b7"+ - "\u0001\u00b7\u0003\u00b7\u088e\b\u00b7\u0001\u00b8\u0001\u00b8\u0001\u00b8"+ - "\u0001\u00b8\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0003\u00b9"+ - "\u0898\b\u00b9\u0001\u00b9\u0003\u00b9\u089b\b\u00b9\u0001\u00b9\u0003"+ - "\u00b9\u089e\b\u00b9\u0001\u00b9\u0003\u00b9\u08a1\b\u00b9\u0001\u00b9"+ - "\u0003\u00b9\u08a4\b\u00b9\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba"+ - "\u0003\u00ba\u08aa\b\u00ba\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb"+ - "\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0003\u00bc\u08b4\b\u00bc"+ - "\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0003\u00bd\u08ba\b\u00bd"+ - "\u0001\u00be\u0001\u00be\u0003\u00be\u08be\b\u00be\u0001\u00bf\u0001\u00bf"+ - "\u0001\u00bf\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0003\u00c0\u08c6\b\u00c0"+ - "\u0001\u00c0\u0003\u00c0\u08c9\b\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0"+ - "\u0003\u00c0\u08ce\b\u00c0\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c2"+ - "\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c3\u0001\u00c3\u0003\u00c3"+ - "\u08d9\b\u00c3\u0001\u00c4\u0001\u00c4\u0001\u00c4\u0001\u00c5\u0001\u00c5"+ - "\u0001\u00c5\u0001\u00c5\u0001\u00c6\u0001\u00c6\u0001\u00c6\u0001\u00c6"+ - "\u0001\u00c7\u0001\u00c7\u0001\u00c7\u0001\u00c7\u0001\u00c7\u0003\u00c7"+ - "\u08eb\b\u00c7\u0001\u00c7\u0001\u00c7\u0001\u00c7\u0001\u00c8\u0001\u00c8"+ - "\u0001\u00c8\u0001\u00c8\u0001\u00c9\u0001\u00c9\u0001\u00c9\u0001\u00c9"+ - "\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0003\u00ca"+ - "\u08fd\b\u00ca\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0001\u00cb\u0001\u00cb"+ - "\u0001\u00cb\u0001\u00cb\u0003\u00cb\u0906\b\u00cb\u0001\u00cc\u0001\u00cc"+ - "\u0001\u00cc\u0005\u00cc\u090b\b\u00cc\n\u00cc\f\u00cc\u090e\t\u00cc\u0001"+ - "\u00cd\u0001\u00cd\u0003\u00cd\u0912\b\u00cd\u0001\u00ce\u0001\u00ce\u0003"+ - "\u00ce\u0916\b\u00ce\u0001\u00ce\u0001\u00ce\u0001\u00ce\u0003\u00ce\u091b"+ - "\b\u00ce\u0005\u00ce\u091d\b\u00ce\n\u00ce\f\u00ce\u0920\t\u00ce\u0001"+ - "\u00cf\u0001\u00cf\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0001"+ - "\u00d0\u0001\u00d0\u0001\u00d0\u0003\u00d0\u092b\b\u00d0\u0001\u00d1\u0001"+ - "\u00d1\u0001\u00d1\u0001\u00d1\u0005\u00d1\u0931\b\u00d1\n\u00d1\f\u00d1"+ - "\u0934\t\u00d1\u0001\u00d1\u0001\u00d1\u0001\u00d1\u0001\u00d1\u0003\u00d1"+ - "\u093a\b\u00d1\u0001\u00d2\u0001\u00d2\u0001\u00d2\u0001\u00d2\u0005\u00d2"+ - "\u0940\b\u00d2\n\u00d2\f\u00d2\u0943\t\u00d2\u0001\u00d2\u0001\u00d2\u0001"+ - "\u00d2\u0001\u00d2\u0003\u00d2\u0949\b\u00d2\u0001\u00d3\u0001\u00d3\u0001"+ - "\u00d3\u0001\u00d3\u0001\u00d4\u0001\u00d4\u0001\u00d4\u0001\u00d4\u0001"+ - "\u00d4\u0001\u00d4\u0001\u00d4\u0003\u00d4\u0956\b\u00d4\u0001\u00d5\u0001"+ - "\u00d5\u0001\u00d5\u0001\u00d6\u0001\u00d6\u0001\u00d6\u0001\u00d7\u0001"+ - "\u00d7\u0001\u00d8\u0003\u00d8\u0961\b\u00d8\u0001\u00d8\u0001\u00d8\u0001"+ - "\u00d9\u0003\u00d9\u0966\b\u00d9\u0001\u00d9\u0001\u00d9\u0001\u00da\u0001"+ - "\u00da\u0001\u00db\u0001\u00db\u0001\u00db\u0005\u00db\u096f\b\u00db\n"+ - "\u00db\f\u00db\u0972\t\u00db\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0001\u00dc\u0001\u00dc\u0003\u00dc\u0a05\b\u00dc\u0001"+ - "\u00dc\u0001\u00dc\u0003\u00dc\u0a09\b\u00dc\u0001\u00dc\u0000\u0002<"+ - ">\u00dd\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012\u0014\u0016\u0018"+ - "\u001a\u001c\u001e \"$&(*,.02468:<>@BDFHJLNPRTVXZ\\^`bdfhjlnprtvxz|~\u0080"+ - "\u0082\u0084\u0086\u0088\u008a\u008c\u008e\u0090\u0092\u0094\u0096\u0098"+ - "\u009a\u009c\u009e\u00a0\u00a2\u00a4\u00a6\u00a8\u00aa\u00ac\u00ae\u00b0"+ - "\u00b2\u00b4\u00b6\u00b8\u00ba\u00bc\u00be\u00c0\u00c2\u00c4\u00c6\u00c8"+ - "\u00ca\u00cc\u00ce\u00d0\u00d2\u00d4\u00d6\u00d8\u00da\u00dc\u00de\u00e0"+ - "\u00e2\u00e4\u00e6\u00e8\u00ea\u00ec\u00ee\u00f0\u00f2\u00f4\u00f6\u00f8"+ - "\u00fa\u00fc\u00fe\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110"+ - "\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128"+ - "\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0138\u013a\u013c\u013e\u0140"+ - "\u0142\u0144\u0146\u0148\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158"+ - "\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170"+ - "\u0172\u0174\u0176\u0178\u017a\u017c\u017e\u0180\u0182\u0184\u0186\u0188"+ - "\u018a\u018c\u018e\u0190\u0192\u0194\u0196\u0198\u019a\u019c\u019e\u01a0"+ - "\u01a2\u01a4\u01a6\u01a8\u01aa\u01ac\u01ae\u01b0\u01b2\u01b4\u01b6\u01b8"+ - "\u0000\u0012\u0002\u0000\u000e\u000e\u001f\u001f\u0002\u0000--HH\u0001"+ - "\u0000\u00b4\u00b9\u0001\u0000\u00c0\u00c1\u0002\u0000\u00b0\u00b0\u00c2"+ - "\u00c3\u0002\u0000GG\u0086\u0086\u0002\u0000\u0004\u0004\u00b2\u00b3\u0002"+ - "\u0000@@\u0082\u0082\u0002\u0000\u001b\u001b88\u0003\u0000\u00b0\u00b0"+ - "\u00b3\u00b3\u00c0\u00c0\u0001\u0000\u009a\u009b\u0003\u0000\u0096\u0096"+ - "\u0098\u0098\u009d\u009d\u0002\u0000\u009a\u009b\u009d\u009d\u0002\u0000"+ - "\u001f\u001f!!\u0002\u0000MM\u0080\u0080\u0004\u0000\u001b\u001b88PPr"+ - "r\u0001\u0000\u00c8\u00ca\u0001\u0000\u00cb\u00cc\u0b5c\u0000\u01ba\u0001"+ - "\u0000\u0000\u0000\u0002\u01d6\u0001\u0000\u0000\u0000\u0004\u01d9\u0001"+ - "\u0000\u0000\u0000\u0006\u01dd\u0001\u0000\u0000\u0000\b\u01e8\u0001\u0000"+ - "\u0000\u0000\n\u01eb\u0001\u0000\u0000\u0000\f\u01ee\u0001\u0000\u0000"+ - "\u0000\u000e\u01f0\u0001\u0000\u0000\u0000\u0010\u0201\u0001\u0000\u0000"+ - "\u0000\u0012\u021c\u0001\u0000\u0000\u0000\u0014\u021e\u0001\u0000\u0000"+ - "\u0000\u0016\u0232\u0001\u0000\u0000\u0000\u0018\u023a\u0001\u0000\u0000"+ - "\u0000\u001a\u0242\u0001\u0000\u0000\u0000\u001c\u024a\u0001\u0000\u0000"+ - "\u0000\u001e\u024d\u0001\u0000\u0000\u0000 \u0252\u0001\u0000\u0000\u0000"+ - "\"\u025b\u0001\u0000\u0000\u0000$\u025d\u0001\u0000\u0000\u0000&\u0273"+ - "\u0001\u0000\u0000\u0000(\u0276\u0001\u0000\u0000\u0000*\u027a\u0001\u0000"+ - "\u0000\u0000,\u028d\u0001\u0000\u0000\u0000.\u02b1\u0001\u0000\u0000\u0000"+ - "0\u02b8\u0001\u0000\u0000\u00002\u02ba\u0001\u0000\u0000\u00004\u02c8"+ - "\u0001\u0000\u0000\u00006\u02ce\u0001\u0000\u0000\u00008\u02d8\u0001\u0000"+ - "\u0000\u0000:\u02db\u0001\u0000\u0000\u0000<\u02de\u0001\u0000\u0000\u0000"+ - ">\u02e9\u0001\u0000\u0000\u0000@\u02f5\u0001\u0000\u0000\u0000B\u02f9"+ - "\u0001\u0000\u0000\u0000D\u0306\u0001\u0000\u0000\u0000F\u0308\u0001\u0000"+ - "\u0000\u0000H\u030e\u0001\u0000\u0000\u0000J\u0317\u0001\u0000\u0000\u0000"+ - "L\u031f\u0001\u0000\u0000\u0000N\u0324\u0001\u0000\u0000\u0000P\u0326"+ - "\u0001\u0000\u0000\u0000R\u0332\u0001\u0000\u0000\u0000T\u033d\u0001\u0000"+ - "\u0000\u0000V\u0348\u0001\u0000\u0000\u0000X\u0360\u0001\u0000\u0000\u0000"+ - "Z\u0365\u0001\u0000\u0000\u0000\\\u0368\u0001\u0000\u0000\u0000^\u0382"+ - "\u0001\u0000\u0000\u0000`\u038a\u0001\u0000\u0000\u0000b\u0392\u0001\u0000"+ - "\u0000\u0000d\u039d\u0001\u0000\u0000\u0000f\u039f\u0001\u0000\u0000\u0000"+ - "h\u03a7\u0001\u0000\u0000\u0000j\u03b1\u0001\u0000\u0000\u0000l\u03b3"+ - "\u0001\u0000\u0000\u0000n\u03bc\u0001\u0000\u0000\u0000p\u03be\u0001\u0000"+ - "\u0000\u0000r\u03c8\u0001\u0000\u0000\u0000t\u03dc\u0001\u0000\u0000\u0000"+ - "v\u03de\u0001\u0000\u0000\u0000x\u03eb\u0001\u0000\u0000\u0000z\u03ed"+ - "\u0001\u0000\u0000\u0000|\u03ef\u0001\u0000\u0000\u0000~\u040e\u0001\u0000"+ - "\u0000\u0000\u0080\u0410\u0001\u0000\u0000\u0000\u0082\u0417\u0001\u0000"+ - "\u0000\u0000\u0084\u0419\u0001\u0000\u0000\u0000\u0086\u0421\u0001\u0000"+ - "\u0000\u0000\u0088\u042f\u0001\u0000\u0000\u0000\u008a\u0434\u0001\u0000"+ - "\u0000\u0000\u008c\u043a\u0001\u0000\u0000\u0000\u008e\u044f\u0001\u0000"+ - "\u0000\u0000\u0090\u0456\u0001\u0000\u0000\u0000\u0092\u045a\u0001\u0000"+ - "\u0000\u0000\u0094\u0462\u0001\u0000\u0000\u0000\u0096\u048f\u0001\u0000"+ - "\u0000\u0000\u0098\u0491\u0001\u0000\u0000\u0000\u009a\u0496\u0001\u0000"+ - "\u0000\u0000\u009c\u049e\u0001\u0000\u0000\u0000\u009e\u04a1\u0001\u0000"+ - "\u0000\u0000\u00a0\u04b8\u0001\u0000\u0000\u0000\u00a2\u0502\u0001\u0000"+ - "\u0000\u0000\u00a4\u0504\u0001\u0000\u0000\u0000\u00a6\u0509\u0001\u0000"+ - "\u0000\u0000\u00a8\u0518\u0001\u0000\u0000\u0000\u00aa\u0520\u0001\u0000"+ - "\u0000\u0000\u00ac\u0522\u0001\u0000\u0000\u0000\u00ae\u052b\u0001\u0000"+ - "\u0000\u0000\u00b0\u0533\u0001\u0000\u0000\u0000\u00b2\u0535\u0001\u0000"+ - "\u0000\u0000\u00b4\u0537\u0001\u0000\u0000\u0000\u00b6\u053a\u0001\u0000"+ - "\u0000\u0000\u00b8\u054c\u0001\u0000\u0000\u0000\u00ba\u054f\u0001\u0000"+ - "\u0000\u0000\u00bc\u0562\u0001\u0000\u0000\u0000\u00be\u0564\u0001\u0000"+ - "\u0000\u0000\u00c0\u0570\u0001\u0000\u0000\u0000\u00c2\u0580\u0001\u0000"+ - "\u0000\u0000\u00c4\u0582\u0001\u0000\u0000\u0000\u00c6\u058a\u0001\u0000"+ - "\u0000\u0000\u00c8\u058d\u0001\u0000\u0000\u0000\u00ca\u0592\u0001\u0000"+ - "\u0000\u0000\u00cc\u0597\u0001\u0000\u0000\u0000\u00ce\u0599\u0001\u0000"+ - "\u0000\u0000\u00d0\u059b\u0001\u0000\u0000\u0000\u00d2\u059d\u0001\u0000"+ - "\u0000\u0000\u00d4\u05a9\u0001\u0000\u0000\u0000\u00d6\u05ab\u0001\u0000"+ - "\u0000\u0000\u00d8\u05ad\u0001\u0000\u0000\u0000\u00da\u05b3\u0001\u0000"+ - "\u0000\u0000\u00dc\u05b9\u0001\u0000\u0000\u0000\u00de\u05bb\u0001\u0000"+ - "\u0000\u0000\u00e0\u05bd\u0001\u0000\u0000\u0000\u00e2\u05bf\u0001\u0000"+ - "\u0000\u0000\u00e4\u05c1\u0001\u0000\u0000\u0000\u00e6\u05c9\u0001\u0000"+ - "\u0000\u0000\u00e8\u05d2\u0001\u0000\u0000\u0000\u00ea\u05d6\u0001\u0000"+ - "\u0000\u0000\u00ec\u05e0\u0001\u0000\u0000\u0000\u00ee\u05e2\u0001\u0000"+ - "\u0000\u0000\u00f0\u05eb\u0001\u0000\u0000\u0000\u00f2\u05f5\u0001\u0000"+ - "\u0000\u0000\u00f4\u05f7\u0001\u0000\u0000\u0000\u00f6\u05fb\u0001\u0000"+ - "\u0000\u0000\u00f8\u05ff\u0001\u0000\u0000\u0000\u00fa\u0604\u0001\u0000"+ - "\u0000\u0000\u00fc\u0617\u0001\u0000\u0000\u0000\u00fe\u061b\u0001\u0000"+ - "\u0000\u0000\u0100\u0620\u0001\u0000\u0000\u0000\u0102\u062d\u0001\u0000"+ - "\u0000\u0000\u0104\u0639\u0001\u0000\u0000\u0000\u0106\u0644\u0001\u0000"+ - "\u0000\u0000\u0108\u0649\u0001\u0000\u0000\u0000\u010a\u064d\u0001\u0000"+ - "\u0000\u0000\u010c\u0659\u0001\u0000\u0000\u0000\u010e\u0670\u0001\u0000"+ - "\u0000\u0000\u0110\u0672\u0001\u0000\u0000\u0000\u0112\u067a\u0001\u0000"+ - "\u0000\u0000\u0114\u067e\u0001\u0000\u0000\u0000\u0116\u06be\u0001\u0000"+ - "\u0000\u0000\u0118\u06c0\u0001\u0000\u0000\u0000\u011a\u06c4\u0001\u0000"+ - "\u0000\u0000\u011c\u06c6\u0001\u0000\u0000\u0000\u011e\u06ca\u0001\u0000"+ - "\u0000\u0000\u0120\u06ce\u0001\u0000\u0000\u0000\u0122\u06d2\u0001\u0000"+ - "\u0000\u0000\u0124\u06d8\u0001\u0000\u0000\u0000\u0126\u06dc\u0001\u0000"+ - "\u0000\u0000\u0128\u0707\u0001\u0000\u0000\u0000\u012a\u0709\u0001\u0000"+ - "\u0000\u0000\u012c\u070c\u0001\u0000\u0000\u0000\u012e\u0713\u0001\u0000"+ - "\u0000\u0000\u0130\u071e\u0001\u0000\u0000\u0000\u0132\u0720\u0001\u0000"+ - "\u0000\u0000\u0134\u0725\u0001\u0000\u0000\u0000\u0136\u0728\u0001\u0000"+ - "\u0000\u0000\u0138\u073b\u0001\u0000\u0000\u0000\u013a\u0748\u0001\u0000"+ - "\u0000\u0000\u013c\u074b\u0001\u0000\u0000\u0000\u013e\u075a\u0001\u0000"+ - "\u0000\u0000\u0140\u0762\u0001\u0000\u0000\u0000\u0142\u0775\u0001\u0000"+ - "\u0000\u0000\u0144\u0777\u0001\u0000\u0000\u0000\u0146\u077f\u0001\u0000"+ - "\u0000\u0000\u0148\u07a3\u0001\u0000\u0000\u0000\u014a\u07a5\u0001\u0000"+ - "\u0000\u0000\u014c\u07b2\u0001\u0000\u0000\u0000\u014e\u07b4\u0001\u0000"+ - "\u0000\u0000\u0150\u07c3\u0001\u0000\u0000\u0000\u0152\u07dd\u0001\u0000"+ - "\u0000\u0000\u0154\u07df\u0001\u0000\u0000\u0000\u0156\u07f9\u0001\u0000"+ - "\u0000\u0000\u0158\u07fc\u0001\u0000\u0000\u0000\u015a\u080b\u0001\u0000"+ - "\u0000\u0000\u015c\u0828\u0001\u0000\u0000\u0000\u015e\u082a\u0001\u0000"+ - "\u0000\u0000\u0160\u0832\u0001\u0000\u0000\u0000\u0162\u0836\u0001\u0000"+ - "\u0000\u0000\u0164\u0843\u0001\u0000\u0000\u0000\u0166\u0845\u0001\u0000"+ - "\u0000\u0000\u0168\u0851\u0001\u0000\u0000\u0000\u016a\u0869\u0001\u0000"+ - "\u0000\u0000\u016c\u0871\u0001\u0000\u0000\u0000\u016e\u0886\u0001\u0000"+ - "\u0000\u0000\u0170\u088f\u0001\u0000\u0000\u0000\u0172\u0893\u0001\u0000"+ - "\u0000\u0000\u0174\u08a5\u0001\u0000\u0000\u0000\u0176\u08ab\u0001\u0000"+ - "\u0000\u0000\u0178\u08af\u0001\u0000\u0000\u0000\u017a\u08b5\u0001\u0000"+ - "\u0000\u0000\u017c\u08bd\u0001\u0000\u0000\u0000\u017e\u08bf\u0001\u0000"+ - "\u0000\u0000\u0180\u08cd\u0001\u0000\u0000\u0000\u0182\u08cf\u0001\u0000"+ - "\u0000\u0000\u0184\u08d2\u0001\u0000\u0000\u0000\u0186\u08d6\u0001\u0000"+ - "\u0000\u0000\u0188\u08da\u0001\u0000\u0000\u0000\u018a\u08dd\u0001\u0000"+ - "\u0000\u0000\u018c\u08e1\u0001\u0000\u0000\u0000\u018e\u08e5\u0001\u0000"+ - "\u0000\u0000\u0190\u08ef\u0001\u0000\u0000\u0000\u0192\u08f3\u0001\u0000"+ - "\u0000\u0000\u0194\u08f7\u0001\u0000\u0000\u0000\u0196\u0905\u0001\u0000"+ - "\u0000\u0000\u0198\u0907\u0001\u0000\u0000\u0000\u019a\u0911\u0001\u0000"+ - "\u0000\u0000\u019c\u0915\u0001\u0000\u0000\u0000\u019e\u0921\u0001\u0000"+ - "\u0000\u0000\u01a0\u092a\u0001\u0000\u0000\u0000\u01a2\u0939\u0001\u0000"+ - "\u0000\u0000\u01a4\u0948\u0001\u0000\u0000\u0000\u01a6\u094a\u0001\u0000"+ - "\u0000\u0000\u01a8\u0955\u0001\u0000\u0000\u0000\u01aa\u0957\u0001\u0000"+ - "\u0000\u0000\u01ac\u095a\u0001\u0000\u0000\u0000\u01ae\u095d\u0001\u0000"+ - "\u0000\u0000\u01b0\u0960\u0001\u0000\u0000\u0000\u01b2\u0965\u0001\u0000"+ - "\u0000\u0000\u01b4\u0969\u0001\u0000\u0000\u0000\u01b6\u096b\u0001\u0000"+ - "\u0000\u0000\u01b8\u0a08\u0001\u0000\u0000\u0000\u01ba\u01bb\u0003\u0002"+ - "\u0001\u0000\u01bb\u01bc\u0005\u0000\u0000\u0001\u01bc\u0001\u0001\u0000"+ - "\u0000\u0000\u01bd\u01d7\u0003\u0004\u0002\u0000\u01be\u01d7\u0003\u0094"+ - "J\u0000\u01bf\u01d7\u0003\u009eO\u0000\u01c0\u01d7\u0003\u00b6[\u0000"+ - "\u01c1\u01d7\u0003\u00fa}\u0000\u01c2\u01d7\u0003\u0146\u00a3\u0000\u01c3"+ - "\u01d7\u0003\u016e\u00b7\u0000\u01c4\u01d7\u0003\u0170\u00b8\u0000\u01c5"+ - "\u01d7\u0003\u00eew\u0000\u01c6\u01d7\u0003\u00f4z\u0000\u01c7\u01d7\u0003"+ - "\u0166\u00b3\u0000\u01c8\u01d7\u0003\u00f0x\u0000\u01c9\u01d7\u0003\u00f6"+ - "{\u0000\u01ca\u01d7\u0003\u015a\u00ad\u0000\u01cb\u01d7\u0003\u0176\u00bb"+ - "\u0000\u01cc\u01d7\u0003\u0174\u00ba\u0000\u01cd\u01d7\u0003\u012e\u0097"+ - "\u0000\u01ce\u01d7\u0003\u0172\u00b9\u0000\u01cf\u01d7\u0003\u0144\u00a2"+ - "\u0000\u01d0\u01d7\u0003\u0178\u00bc\u0000\u01d1\u01d7\u0003\u017a\u00bd"+ - "\u0000\u01d2\u01d7\u0003\u0168\u00b4\u0000\u01d3\u01d7\u0003\u00f8|\u0000"+ - "\u01d4\u01d7\u0003\u016c\u00b6\u0000\u01d5\u01d7\u0003\n\u0005\u0000\u01d6"+ - "\u01bd\u0001\u0000\u0000\u0000\u01d6\u01be\u0001\u0000\u0000\u0000\u01d6"+ - "\u01bf\u0001\u0000\u0000\u0000\u01d6\u01c0\u0001\u0000\u0000\u0000\u01d6"+ - "\u01c1\u0001\u0000\u0000\u0000\u01d6\u01c2\u0001\u0000\u0000\u0000\u01d6"+ - "\u01c3\u0001\u0000\u0000\u0000\u01d6\u01c4\u0001\u0000\u0000\u0000\u01d6"+ - "\u01c5\u0001\u0000\u0000\u0000\u01d6\u01c6\u0001\u0000\u0000\u0000\u01d6"+ - "\u01c7\u0001\u0000\u0000\u0000\u01d6\u01c8\u0001\u0000\u0000\u0000\u01d6"+ - "\u01c9\u0001\u0000\u0000\u0000\u01d6\u01ca\u0001\u0000\u0000\u0000\u01d6"+ - "\u01cb\u0001\u0000\u0000\u0000\u01d6\u01cc\u0001\u0000\u0000\u0000\u01d6"+ - "\u01cd\u0001\u0000\u0000\u0000\u01d6\u01ce\u0001\u0000\u0000\u0000\u01d6"+ - "\u01cf\u0001\u0000\u0000\u0000\u01d6\u01d0\u0001\u0000\u0000\u0000\u01d6"+ - "\u01d1\u0001\u0000\u0000\u0000\u01d6\u01d2\u0001\u0000\u0000\u0000\u01d6"+ - "\u01d3\u0001\u0000\u0000\u0000\u01d6\u01d4\u0001\u0000\u0000\u0000\u01d6"+ - "\u01d5\u0001\u0000\u0000\u0000\u01d7\u0003\u0001\u0000\u0000\u0000\u01d8"+ - "\u01da\u0003\u0006\u0003\u0000\u01d9\u01d8\u0001\u0000\u0000\u0000\u01d9"+ - "\u01da\u0001\u0000\u0000\u0000\u01da\u01db\u0001\u0000\u0000\u0000\u01db"+ - "\u01dc\u0003\u000e\u0007\u0000\u01dc\u0005\u0001\u0000\u0000\u0000\u01dd"+ - "\u01de\u0005\u001c\u0000\u0000\u01de\u01df\u0003\b\u0004\u0000\u01df\u01e5"+ - "\u0005\u00a7\u0000\u0000\u01e0\u01e1\u0003\b\u0004\u0000\u01e1\u01e2\u0005"+ - "\u00a7\u0000\u0000\u01e2\u01e4\u0001\u0000\u0000\u0000\u01e3\u01e0\u0001"+ - "\u0000\u0000\u0000\u01e4\u01e7\u0001\u0000\u0000\u0000\u01e5\u01e3\u0001"+ - "\u0000\u0000\u0000\u01e5\u01e6\u0001\u0000\u0000\u0000\u01e6\u0007\u0001"+ - "\u0000\u0000\u0000\u01e7\u01e5\u0001\u0000\u0000\u0000\u01e8\u01e9\u0005"+ - "\u0004\u0000\u0000\u01e9\u01ea\u0003\u00bc^\u0000\u01ea\t\u0001\u0000"+ - "\u0000\u0000\u01eb\u01ec\u0003\u0006\u0003\u0000\u01ec\u01ed\u0003\u0086"+ - "C\u0000\u01ed\u000b\u0001\u0000\u0000\u0000\u01ee\u01ef\u0003<\u001e\u0000"+ - "\u01ef\r\u0001\u0000\u0000\u0000\u01f0\u01f1\u0003(\u0014\u0000\u01f1"+ - "\u01f3\u0003\u0010\b\u0000\u01f2\u01f4\u0003&\u0013\u0000\u01f3\u01f2"+ - "\u0001\u0000\u0000\u0000\u01f3\u01f4\u0001\u0000\u0000\u0000\u01f4\u01f6"+ - "\u0001\u0000\u0000\u0000\u01f5\u01f7\u00036\u001b\u0000\u01f6\u01f5\u0001"+ - "\u0000\u0000\u0000\u01f6\u01f7\u0001\u0000\u0000\u0000\u01f7\u01f9\u0001"+ - "\u0000\u0000\u0000\u01f8\u01fa\u00032\u0019\u0000\u01f9\u01f8\u0001\u0000"+ - "\u0000\u0000\u01f9\u01fa\u0001\u0000\u0000\u0000\u01fa\u01fc\u0001\u0000"+ - "\u0000\u0000\u01fb\u01fd\u00038\u001c\u0000\u01fc\u01fb\u0001\u0000\u0000"+ - "\u0000\u01fc\u01fd\u0001\u0000\u0000\u0000\u01fd\u01ff\u0001\u0000\u0000"+ - "\u0000\u01fe\u0200\u0003:\u001d\u0000\u01ff\u01fe\u0001\u0000\u0000\u0000"+ - "\u01ff\u0200\u0001\u0000\u0000\u0000\u0200\u000f\u0001\u0000\u0000\u0000"+ - "\u0201\u0202\u00052\u0000\u0000\u0202\u0207\u0003\u0012\t\u0000\u0203"+ - "\u0204\u0005\u00a8\u0000\u0000\u0204\u0206\u0003\u0012\t\u0000\u0205\u0203"+ - "\u0001\u0000\u0000\u0000\u0206\u0209\u0001\u0000\u0000\u0000\u0207\u0205"+ - "\u0001\u0000\u0000\u0000\u0207\u0208\u0001\u0000\u0000\u0000\u0208\u0216"+ - "\u0001\u0000\u0000\u0000\u0209\u0207\u0001\u0000\u0000\u0000\u020a\u0212"+ - "\u0005\u00a8\u0000\u0000\u020b\u020d\u0003\f\u0006\u0000\u020c\u020e\u0005"+ - "\r\u0000\u0000\u020d\u020c\u0001\u0000\u0000\u0000\u020d\u020e\u0001\u0000"+ - "\u0000\u0000\u020e\u020f\u0001\u0000\u0000\u0000\u020f\u0210\u0005\u0004"+ - "\u0000\u0000\u0210\u0213\u0001\u0000\u0000\u0000\u0211\u0213\u0003$\u0012"+ - "\u0000\u0212\u020b\u0001\u0000\u0000\u0000\u0212\u0211\u0001\u0000\u0000"+ - "\u0000\u0213\u0215\u0001\u0000\u0000\u0000\u0214\u020a\u0001\u0000\u0000"+ - "\u0000\u0215\u0218\u0001\u0000\u0000\u0000\u0216\u0214\u0001\u0000\u0000"+ - "\u0000\u0216\u0217\u0001\u0000\u0000\u0000\u0217\u0011\u0001\u0000\u0000"+ - "\u0000\u0218\u0216\u0001\u0000\u0000\u0000\u0219\u021d\u0003\u001e\u000f"+ - "\u0000\u021a\u021d\u0003\u0014\n\u0000\u021b\u021d\u0003\u001a\r\u0000"+ - "\u021c\u0219\u0001\u0000\u0000\u0000\u021c\u021a\u0001\u0000\u0000\u0000"+ - "\u021c\u021b\u0001\u0000\u0000\u0000\u021d\u0013\u0001\u0000\u0000\u0000"+ - "\u021e\u021f\u0005V\u0000\u0000\u021f\u0220\u0005z\u0000\u0000\u0220\u0221"+ - "\u0005\u00aa\u0000\u0000\u0221\u0227\u0003\u001e\u000f\u0000\u0222\u0223"+ - "\u0005\u000b\u0000\u0000\u0223\u0224\u0005\u00aa\u0000\u0000\u0224\u0225"+ - "\u0003\u0016\u000b\u0000\u0225\u0226\u0005\u00ab\u0000\u0000\u0226\u0228"+ - "\u0001\u0000\u0000\u0000\u0227\u0222\u0001\u0000\u0000\u0000\u0227\u0228"+ - "\u0001\u0000\u0000\u0000\u0228\u022e\u0001\u0000\u0000\u0000\u0229\u022a"+ - "\u0005 \u0000\u0000\u022a\u022b\u0005\u00aa\u0000\u0000\u022b\u022c\u0003"+ - "\u0018\f\u0000\u022c\u022d\u0005\u00ab\u0000\u0000\u022d\u022f\u0001\u0000"+ - "\u0000\u0000\u022e\u0229\u0001\u0000\u0000\u0000\u022e\u022f\u0001\u0000"+ - "\u0000\u0000\u022f\u0230\u0001\u0000\u0000\u0000\u0230\u0231\u0005\u00ab"+ - "\u0000\u0000\u0231\u0015\u0001\u0000\u0000\u0000\u0232\u0237\u0003\u001e"+ - "\u000f\u0000\u0233\u0234\u0005\u00a8\u0000\u0000\u0234\u0236\u0003\u001e"+ - "\u000f\u0000\u0235\u0233\u0001\u0000\u0000\u0000\u0236\u0239\u0001\u0000"+ - "\u0000\u0000\u0237\u0235\u0001\u0000\u0000\u0000\u0237\u0238\u0001\u0000"+ - "\u0000\u0000\u0238\u0017\u0001\u0000\u0000\u0000\u0239\u0237\u0001\u0000"+ - "\u0000\u0000\u023a\u023f\u0003\u001e\u000f\u0000\u023b\u023c\u0005\u00a8"+ - "\u0000\u0000\u023c\u023e\u0003\u001e\u000f\u0000\u023d\u023b\u0001\u0000"+ - "\u0000\u0000\u023e\u0241\u0001\u0000\u0000\u0000\u023f\u023d\u0001\u0000"+ - "\u0000\u0000\u023f\u0240\u0001\u0000\u0000\u0000\u0240\u0019\u0001\u0000"+ - "\u0000\u0000\u0241\u023f\u0001\u0000\u0000\u0000\u0242\u0243\u0003\u001e"+ - "\u000f\u0000\u0243\u0247\u0003\u001c\u000e\u0000\u0244\u0246\u0003\u001c"+ - "\u000e\u0000\u0245\u0244\u0001\u0000\u0000\u0000\u0246\u0249\u0001\u0000"+ - "\u0000\u0000\u0247\u0245\u0001\u0000\u0000\u0000\u0247\u0248\u0001\u0000"+ - "\u0000\u0000\u0248\u001b\u0001\u0000\u0000\u0000\u0249\u0247\u0001\u0000"+ - "\u0000\u0000\u024a\u024b\u0005\u0092\u0000\u0000\u024b\u024c\u0003\u001e"+ - "\u000f\u0000\u024c\u001d\u0001\u0000\u0000\u0000\u024d\u0250\u0003 \u0010"+ - "\u0000\u024e\u024f\u0005\\\u0000\u0000\u024f\u0251\u0003<\u001e\u0000"+ - "\u0250\u024e\u0001\u0000\u0000\u0000\u0250\u0251\u0001\u0000\u0000\u0000"+ - "\u0251\u001f\u0001\u0000\u0000\u0000\u0252\u0257\u0003\u00fc~\u0000\u0253"+ - "\u0255\u0005\r\u0000\u0000\u0254\u0253\u0001\u0000\u0000\u0000\u0254\u0255"+ - "\u0001\u0000\u0000\u0000\u0255\u0256\u0001\u0000\u0000\u0000\u0256\u0258"+ - "\u0003\"\u0011\u0000\u0257\u0254\u0001\u0000\u0000\u0000\u0257\u0258\u0001"+ - "\u0000\u0000\u0000\u0258!\u0001\u0000\u0000\u0000\u0259\u025c\u0005\u0004"+ - "\u0000\u0000\u025a\u025c\u0003\u01b8\u00dc\u0000\u025b\u0259\u0001\u0000"+ - "\u0000\u0000\u025b\u025a\u0001\u0000\u0000\u0000\u025c#\u0001\u0000\u0000"+ - "\u0000\u025d\u025e\u0005\u008b\u0000\u0000\u025e\u025f\u0005\u00aa\u0000"+ - "\u0000\u025f\u0261\u0003f3\u0000\u0260\u0262\u0005\r\u0000\u0000\u0261"+ - "\u0260\u0001\u0000\u0000\u0000\u0261\u0262\u0001\u0000\u0000\u0000\u0262"+ - "\u0263\u0001\u0000\u0000\u0000\u0263\u0264\u0005\u0004\u0000\u0000\u0264"+ - "\u026e\u0001\u0000\u0000\u0000\u0265\u0266\u0005\u00a8\u0000\u0000\u0266"+ - "\u0268\u0003f3\u0000\u0267\u0269\u0005\r\u0000\u0000\u0268\u0267\u0001"+ - "\u0000\u0000\u0000\u0268\u0269\u0001\u0000\u0000\u0000\u0269\u026a\u0001"+ - "\u0000\u0000\u0000\u026a\u026b\u0005\u0004\u0000\u0000\u026b\u026d\u0001"+ - "\u0000\u0000\u0000\u026c\u0265\u0001\u0000\u0000\u0000\u026d\u0270\u0001"+ - "\u0000\u0000\u0000\u026e\u026c\u0001\u0000\u0000\u0000\u026e\u026f\u0001"+ - "\u0000\u0000\u0000\u026f\u0271\u0001\u0000\u0000\u0000\u0270\u026e\u0001"+ - "\u0000\u0000\u0000\u0271\u0272\u0005\u00ab\u0000\u0000\u0272%\u0001\u0000"+ - "\u0000\u0000\u0273\u0274\u0005\u0088\u0000\u0000\u0274\u0275\u0003\f\u0006"+ - "\u0000\u0275\'\u0001\u0000\u0000\u0000\u0276\u0277\u0005s\u0000\u0000"+ - "\u0277\u0278\u0003*\u0015\u0000\u0278)\u0001\u0000\u0000\u0000\u0279\u027b"+ - "\u0003,\u0016\u0000\u027a\u0279\u0001\u0000\u0000\u0000\u027a\u027b\u0001"+ - "\u0000\u0000\u0000\u027b\u027d\u0001\u0000\u0000\u0000\u027c\u027e\u0005"+ - "\"\u0000\u0000\u027d\u027c\u0001\u0000\u0000\u0000\u027d\u027e\u0001\u0000"+ - "\u0000\u0000\u027e\u028b\u0001\u0000\u0000\u0000\u027f\u028c\u0005\u00b0"+ - "\u0000\u0000\u0280\u0281\u0003\f\u0006\u0000\u0281\u0288\u00030\u0018"+ - "\u0000\u0282\u0283\u0005\u00a8\u0000\u0000\u0283\u0284\u0003\f\u0006\u0000"+ - "\u0284\u0285\u00030\u0018\u0000\u0285\u0287\u0001\u0000\u0000\u0000\u0286"+ - "\u0282\u0001\u0000\u0000\u0000\u0287\u028a\u0001\u0000\u0000\u0000\u0288"+ - "\u0286\u0001\u0000\u0000\u0000\u0288\u0289\u0001\u0000\u0000\u0000\u0289"+ - "\u028c\u0001\u0000\u0000\u0000\u028a\u0288\u0001\u0000\u0000\u0000\u028b"+ - "\u027f\u0001\u0000\u0000\u0000\u028b\u0280\u0001\u0000\u0000\u0000\u028c"+ - "+\u0001\u0000\u0000\u0000\u028d\u0291\u0005\u0001\u0000\u0000\u028e\u0290"+ - "\u0003.\u0017\u0000\u028f\u028e\u0001\u0000\u0000\u0000\u0290\u0293\u0001"+ - "\u0000\u0000\u0000\u0291\u028f\u0001\u0000\u0000\u0000\u0291\u0292\u0001"+ - "\u0000\u0000\u0000\u0292\u0294\u0001\u0000\u0000\u0000\u0293\u0291\u0001"+ - "\u0000\u0000\u0000\u0294\u0295\u0005\u0002\u0000\u0000\u0295-\u0001\u0000"+ - "\u0000\u0000\u0296\u0297\u0005e\u0000\u0000\u0297\u0298\u0005\u00aa\u0000"+ - "\u0000\u0298\u029c\u0003\u00fc~\u0000\u0299\u029b\u0003\u0148\u00a4\u0000"+ - "\u029a\u0299\u0001\u0000\u0000\u0000\u029b\u029e\u0001\u0000\u0000\u0000"+ - "\u029c\u029a\u0001\u0000\u0000\u0000\u029c\u029d\u0001\u0000\u0000\u0000"+ - "\u029d\u029f\u0001\u0000\u0000\u0000\u029e\u029c\u0001\u0000\u0000\u0000"+ - "\u029f\u02a0\u0005\u00ab\u0000\u0000\u02a0\u02b2\u0001\u0000\u0000\u0000"+ - "\u02a1\u02a2\u0005/\u0000\u0000\u02a2\u02a3\u0005\u00aa\u0000\u0000\u02a3"+ - "\u02a4\u0003\u00fc~\u0000\u02a4\u02a5\u0003\u0148\u00a4\u0000\u02a5\u02a6"+ - "\u0005\u00ab\u0000\u0000\u02a6\u02b2\u0001\u0000\u0000\u0000\u02a7\u02a8"+ - "\u0005f\u0000\u0000\u02a8\u02a9\u0005\u00aa\u0000\u0000\u02a9\u02aa\u0003"+ - "\u00fc~\u0000\u02aa\u02ab\u0005\u00ab\u0000\u0000\u02ab\u02b2\u0001\u0000"+ - "\u0000\u0000\u02ac\u02ad\u00050\u0000\u0000\u02ad\u02ae\u0005\u00aa\u0000"+ - "\u0000\u02ae\u02af\u0003\u00fc~\u0000\u02af\u02b0\u0005\u00ab\u0000\u0000"+ - "\u02b0\u02b2\u0001\u0000\u0000\u0000\u02b1\u0296\u0001\u0000\u0000\u0000"+ - "\u02b1\u02a1\u0001\u0000\u0000\u0000\u02b1\u02a7\u0001\u0000\u0000\u0000"+ - "\u02b1\u02ac\u0001\u0000\u0000\u0000\u02b2\u02b4\u0001\u0000\u0000\u0000"+ - "\u02b3\u02b5\u0005\u00cc\u0000\u0000\u02b4\u02b3\u0001\u0000\u0000\u0000"+ - "\u02b4\u02b5\u0001\u0000\u0000\u0000\u02b5/\u0001\u0000\u0000\u0000\u02b6"+ - "\u02b7\u0005\r\u0000\u0000\u02b7\u02b9\u0003\u01b8\u00dc\u0000\u02b8\u02b6"+ - "\u0001\u0000\u0000\u0000\u02b8\u02b9\u0001\u0000\u0000\u0000\u02b91\u0001"+ - "\u0000\u0000\u0000\u02ba\u02bb\u0005_\u0000\u0000\u02bb\u02bc\u0005\u0011"+ - "\u0000\u0000\u02bc\u02bd\u0003\f\u0006\u0000\u02bd\u02c4\u00034\u001a"+ - "\u0000\u02be\u02bf\u0005\u00a8\u0000\u0000\u02bf\u02c0\u0003\f\u0006\u0000"+ - "\u02c0\u02c1\u00034\u001a\u0000\u02c1\u02c3\u0001\u0000\u0000\u0000\u02c2"+ - "\u02be\u0001\u0000\u0000\u0000\u02c3\u02c6\u0001\u0000\u0000\u0000\u02c4"+ - "\u02c2\u0001\u0000\u0000\u0000\u02c4\u02c5\u0001\u0000\u0000\u0000\u02c5"+ - "3\u0001\u0000\u0000\u0000\u02c6\u02c4\u0001\u0000\u0000\u0000\u02c7\u02c9"+ - "\u0007\u0000\u0000\u0000\u02c8\u02c7\u0001\u0000\u0000\u0000\u02c8\u02c9"+ - "\u0001\u0000\u0000\u0000\u02c9\u02cc\u0001\u0000\u0000\u0000\u02ca\u02cb"+ - "\u0005Y\u0000\u0000\u02cb\u02cd\u0007\u0001\u0000\u0000\u02cc\u02ca\u0001"+ - "\u0000\u0000\u0000\u02cc\u02cd\u0001\u0000\u0000\u0000\u02cd5\u0001\u0000"+ - "\u0000\u0000\u02ce\u02cf\u00057\u0000\u0000\u02cf\u02d0\u0005\u0011\u0000"+ - "\u0000\u02d0\u02d5\u0003\f\u0006\u0000\u02d1\u02d2\u0005\u00a8\u0000\u0000"+ - "\u02d2\u02d4\u0003\f\u0006\u0000\u02d3\u02d1\u0001\u0000\u0000\u0000\u02d4"+ - "\u02d7\u0001\u0000\u0000\u0000\u02d5\u02d3\u0001\u0000\u0000\u0000\u02d5"+ - "\u02d6\u0001\u0000\u0000\u0000\u02d67\u0001\u0000\u0000\u0000\u02d7\u02d5"+ - "\u0001\u0000\u0000\u0000\u02d8\u02d9\u0005K\u0000\u0000\u02d9\u02da\u0003"+ - "`0\u0000\u02da9\u0001\u0000\u0000\u0000\u02db\u02dc\u0005Z\u0000\u0000"+ - "\u02dc\u02dd\u0003`0\u0000\u02dd;\u0001\u0000\u0000\u0000\u02de\u02df"+ - "\u0006\u001e\uffff\uffff\u0000\u02df\u02e0\u0003>\u001f\u0000\u02e0\u02e6"+ - "\u0001\u0000\u0000\u0000\u02e1\u02e2\n\u0001\u0000\u0000\u02e2\u02e3\u0005"+ - "^\u0000\u0000\u02e3\u02e5\u0003>\u001f\u0000\u02e4\u02e1\u0001\u0000\u0000"+ - "\u0000\u02e5\u02e8\u0001\u0000\u0000\u0000\u02e6\u02e4\u0001\u0000\u0000"+ - "\u0000\u02e6\u02e7\u0001\u0000\u0000\u0000\u02e7=\u0001\u0000\u0000\u0000"+ - "\u02e8\u02e6\u0001\u0000\u0000\u0000\u02e9\u02ea\u0006\u001f\uffff\uffff"+ - "\u0000\u02ea\u02eb\u0003@ \u0000\u02eb\u02f1\u0001\u0000\u0000\u0000\u02ec"+ - "\u02ed\n\u0001\u0000\u0000\u02ed\u02ee\u0005\f\u0000\u0000\u02ee\u02f0"+ - "\u0003@ \u0000\u02ef\u02ec\u0001\u0000\u0000\u0000\u02f0\u02f3\u0001\u0000"+ - "\u0000\u0000\u02f1\u02ef\u0001\u0000\u0000\u0000\u02f1\u02f2\u0001\u0000"+ - "\u0000\u0000\u02f2?\u0001\u0000\u0000\u0000\u02f3\u02f1\u0001\u0000\u0000"+ - "\u0000\u02f4\u02f6\u0005X\u0000\u0000\u02f5\u02f4\u0001\u0000\u0000\u0000"+ - "\u02f5\u02f6\u0001\u0000\u0000\u0000\u02f6\u02f7\u0001\u0000\u0000\u0000"+ - "\u02f7\u02f8\u0003B!\u0000\u02f8A\u0001\u0000\u0000\u0000\u02f9\u02ff"+ - "\u0003D\"\u0000\u02fa\u02fc\u0005B\u0000\u0000\u02fb\u02fd\u0005X\u0000"+ - "\u0000\u02fc\u02fb\u0001\u0000\u0000\u0000\u02fc\u02fd\u0001\u0000\u0000"+ - "\u0000\u02fd\u02fe\u0001\u0000\u0000\u0000\u02fe\u0300\u0005\u00c5\u0000"+ - "\u0000\u02ff\u02fa\u0001\u0000\u0000\u0000\u02ff\u0300\u0001\u0000\u0000"+ - "\u0000\u0300C\u0001\u0000\u0000\u0000\u0301\u0307\u0003F#\u0000\u0302"+ - "\u0307\u0003H$\u0000\u0303\u0307\u0003N\'\u0000\u0304\u0307\u0003Z-\u0000"+ - "\u0305\u0307\u0003\\.\u0000\u0306\u0301\u0001\u0000\u0000\u0000\u0306"+ - "\u0302\u0001\u0000\u0000\u0000\u0306\u0303\u0001\u0000\u0000\u0000\u0306"+ - "\u0304\u0001\u0000\u0000\u0000\u0306\u0305\u0001\u0000\u0000\u0000\u0307"+ - "E\u0001\u0000\u0000\u0000\u0308\u0309\u0003^/\u0000\u0309\u030a\u0005"+ - "\u0010\u0000\u0000\u030a\u030b\u0003^/\u0000\u030b\u030c\u0005\f\u0000"+ - "\u0000\u030c\u030d\u0003^/\u0000\u030dG\u0001\u0000\u0000\u0000\u030e"+ - "\u0315\u0003^/\u0000\u030f\u0312\u0003J%\u0000\u0310\u0312\u0003L&\u0000"+ - "\u0311\u030f\u0001\u0000\u0000\u0000\u0311\u0310\u0001\u0000\u0000\u0000"+ - "\u0312\u0313\u0001\u0000\u0000\u0000\u0313\u0314\u0003^/\u0000\u0314\u0316"+ - "\u0001\u0000\u0000\u0000\u0315\u0311\u0001\u0000\u0000\u0000\u0315\u0316"+ - "\u0001\u0000\u0000\u0000\u0316I\u0001\u0000\u0000\u0000\u0317\u0318\u0007"+ - "\u0002\u0000\u0000\u0318K\u0001\u0000\u0000\u0000\u0319\u0320\u0005\u00be"+ - "\u0000\u0000\u031a\u0320\u0005\u00bf\u0000\u0000\u031b\u0320\u0005\u00bc"+ - "\u0000\u0000\u031c\u0320\u0005\u00bd\u0000\u0000\u031d\u0320\u0005\u00ba"+ - "\u0000\u0000\u031e\u0320\u0005\u00bb\u0000\u0000\u031f\u0319\u0001\u0000"+ - "\u0000\u0000\u031f\u031a\u0001\u0000\u0000\u0000\u031f\u031b\u0001\u0000"+ - "\u0000\u0000\u031f\u031c\u0001\u0000\u0000\u0000\u031f\u031d\u0001\u0000"+ - "\u0000\u0000\u031f\u031e\u0001\u0000\u0000\u0000\u0320M\u0001\u0000\u0000"+ - "\u0000\u0321\u0325\u0003P(\u0000\u0322\u0325\u0003V+\u0000\u0323\u0325"+ - "\u0003X,\u0000\u0324\u0321\u0001\u0000\u0000\u0000\u0324\u0322\u0001\u0000"+ - "\u0000\u0000\u0324\u0323\u0001\u0000\u0000\u0000\u0325O\u0001\u0000\u0000"+ - "\u0000\u0326\u0327\u0003R)\u0000\u0327\u0328\u0005<\u0000\u0000\u0328"+ - "\u0329\u0005\u00aa\u0000\u0000\u0329\u032c\u0003T*\u0000\u032a\u032b\u0005"+ - "\u00a8\u0000\u0000\u032b\u032d\u0003T*\u0000\u032c\u032a\u0001\u0000\u0000"+ - "\u0000\u032d\u032e\u0001\u0000\u0000\u0000\u032e\u032c\u0001\u0000\u0000"+ - "\u0000\u032e\u032f\u0001\u0000\u0000\u0000\u032f\u0330\u0001\u0000\u0000"+ - "\u0000\u0330\u0331\u0005\u00ab\u0000\u0000\u0331Q\u0001\u0000\u0000\u0000"+ - "\u0332\u0333\u0005\u00aa\u0000\u0000\u0333\u0338\u0003^/\u0000\u0334\u0335"+ - "\u0005\u00a8\u0000\u0000\u0335\u0337\u0003^/\u0000\u0336\u0334\u0001\u0000"+ - "\u0000\u0000\u0337\u033a\u0001\u0000\u0000\u0000\u0338\u0336\u0001\u0000"+ - "\u0000\u0000\u0338\u0339\u0001\u0000\u0000\u0000\u0339\u033b\u0001\u0000"+ - "\u0000\u0000\u033a\u0338\u0001\u0000\u0000\u0000\u033b\u033c\u0005\u00ab"+ - "\u0000\u0000\u033cS\u0001\u0000\u0000\u0000\u033d\u033e\u0005\u00aa\u0000"+ - "\u0000\u033e\u0343\u0003\f\u0006\u0000\u033f\u0340\u0005\u00a8\u0000\u0000"+ - "\u0340\u0342\u0003\f\u0006\u0000\u0341\u033f\u0001\u0000\u0000\u0000\u0342"+ - "\u0345\u0001\u0000\u0000\u0000\u0343\u0341\u0001\u0000\u0000\u0000\u0343"+ - "\u0344\u0001\u0000\u0000\u0000\u0344\u0346\u0001\u0000\u0000\u0000\u0345"+ - "\u0343\u0001\u0000\u0000\u0000\u0346\u0347\u0005\u00ab\u0000\u0000\u0347"+ - "U\u0001\u0000\u0000\u0000\u0348\u0349\u0003^/\u0000\u0349\u034a\u0005"+ - "<\u0000\u0000\u034a\u034b\u0005\u00aa\u0000\u0000\u034b\u034e\u0003\f"+ - "\u0006\u0000\u034c\u034d\u0005\u00a8\u0000\u0000\u034d\u034f\u0003\f\u0006"+ - "\u0000\u034e\u034c\u0001\u0000\u0000\u0000\u034f\u0350\u0001\u0000\u0000"+ - "\u0000\u0350\u034e\u0001\u0000\u0000\u0000\u0350\u0351\u0001\u0000\u0000"+ - "\u0000\u0351\u0352\u0001\u0000\u0000\u0000\u0352\u0353\u0005\u00ab\u0000"+ - "\u0000\u0353W\u0001\u0000\u0000\u0000\u0354\u0361\u0003^/\u0000\u0355"+ - "\u0356\u0005\u00aa\u0000\u0000\u0356\u035b\u0003^/\u0000\u0357\u0358\u0005"+ - "\u00a8\u0000\u0000\u0358\u035a\u0003^/\u0000\u0359\u0357\u0001\u0000\u0000"+ - "\u0000\u035a\u035d\u0001\u0000\u0000\u0000\u035b\u0359\u0001\u0000\u0000"+ - "\u0000\u035b\u035c\u0001\u0000\u0000\u0000\u035c\u035e\u0001\u0000\u0000"+ - "\u0000\u035d\u035b\u0001\u0000\u0000\u0000\u035e\u035f\u0005\u00ab\u0000"+ - "\u0000\u035f\u0361\u0001\u0000\u0000\u0000\u0360\u0354\u0001\u0000\u0000"+ - "\u0000\u0360\u0355\u0001\u0000\u0000\u0000\u0361\u0362\u0001\u0000\u0000"+ - "\u0000\u0362\u0363\u0005<\u0000\u0000\u0363\u0364\u0003f3\u0000\u0364"+ - "Y\u0001\u0000\u0000\u0000\u0365\u0366\u0005*\u0000\u0000\u0366\u0367\u0003"+ - "^/\u0000\u0367[\u0001\u0000\u0000\u0000\u0368\u0369\u0003^/\u0000\u0369"+ - "\u036b\u0005B\u0000\u0000\u036a\u036c\u0005X\u0000\u0000\u036b\u036a\u0001"+ - "\u0000\u0000\u0000\u036b\u036c\u0001\u0000\u0000\u0000\u036c\u036d\u0001"+ - "\u0000\u0000\u0000\u036d\u036f\u0005[\u0000\u0000\u036e\u0370\u0005~\u0000"+ - "\u0000\u036f\u036e\u0001\u0000\u0000\u0000\u036f\u0370\u0001\u0000\u0000"+ - "\u0000\u0370\u0371\u0001\u0000\u0000\u0000\u0371\u0373\u0005\u00aa\u0000"+ - "\u0000\u0372\u0374\u0005]\u0000\u0000\u0373\u0372\u0001\u0000\u0000\u0000"+ - "\u0373\u0374\u0001\u0000\u0000\u0000\u0374\u0375\u0001\u0000\u0000\u0000"+ - "\u0375\u037d\u0003\u00ba]\u0000\u0376\u0378\u0005\u00a8\u0000\u0000\u0377"+ - "\u0379\u0005]\u0000\u0000\u0378\u0377\u0001\u0000\u0000\u0000\u0378\u0379"+ - "\u0001\u0000\u0000\u0000\u0379\u037a\u0001\u0000\u0000\u0000\u037a\u037c"+ - "\u0003\u00ba]\u0000\u037b\u0376\u0001\u0000\u0000\u0000\u037c\u037f\u0001"+ - "\u0000\u0000\u0000\u037d\u037b\u0001\u0000\u0000\u0000\u037d\u037e\u0001"+ - "\u0000\u0000\u0000\u037e\u0380\u0001\u0000\u0000\u0000\u037f\u037d\u0001"+ - "\u0000\u0000\u0000\u0380\u0381\u0005\u00ab\u0000\u0000\u0381]\u0001\u0000"+ - "\u0000\u0000\u0382\u0387\u0003`0\u0000\u0383\u0384\u0005\u00c4\u0000\u0000"+ - "\u0384\u0386\u0003`0\u0000\u0385\u0383\u0001\u0000\u0000\u0000\u0386\u0389"+ + "\u0091\u0003\u0091\u06af\b\u0091\u0001\u0092\u0001\u0092\u0001\u0092\u0001"+ + "\u0092\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0094\u0001"+ + "\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0003\u0094\u06bf"+ + "\b\u0094\u0003\u0094\u06c1\b\u0094\u0001\u0094\u0001\u0094\u0001\u0094"+ + "\u0001\u0094\u0004\u0094\u06c7\b\u0094\u000b\u0094\f\u0094\u06c8\u0001"+ + "\u0094\u0001\u0094\u0003\u0094\u06cd\b\u0094\u0001\u0095\u0001\u0095\u0001"+ + "\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001"+ + "\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001"+ + "\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001"+ + "\u0095\u0003\u0095\u06e4\b\u0095\u0001\u0096\u0001\u0096\u0001\u0096\u0001"+ + "\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0003\u0097\u06ee"+ + "\b\u0097\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001"+ + "\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001"+ + "\u0099\u0001\u0099\u0003\u0099\u06fd\b\u0099\u0001\u009a\u0001\u009a\u0001"+ + "\u009a\u0003\u009a\u0702\b\u009a\u0001\u009b\u0001\u009b\u0001\u009b\u0001"+ + "\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009d\u0001\u009d\u0001"+ + "\u009d\u0001\u009d\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0003"+ + "\u009e\u0713\b\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0003"+ + "\u009e\u0719\b\u009e\u0005\u009e\u071b\b\u009e\n\u009e\f\u009e\u071e\t"+ + "\u009e\u0001\u009e\u0001\u009e\u0001\u009f\u0001\u009f\u0001\u009f\u0001"+ + "\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0003\u009f\u072a"+ + "\b\u009f\u0001\u009f\u0003\u009f\u072d\b\u009f\u0001\u00a0\u0001\u00a0"+ + "\u0001\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0003\u00a1"+ + "\u0736\b\u00a1\u0001\u00a1\u0003\u00a1\u0739\b\u00a1\u0001\u00a1\u0001"+ + "\u00a1\u0001\u00a1\u0001\u00a1\u0003\u00a1\u073f\b\u00a1\u0001\u00a2\u0001"+ + "\u00a2\u0001\u00a2\u0005\u00a2\u0744\b\u00a2\n\u00a2\f\u00a2\u0747\t\u00a2"+ + "\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0005\u00a3\u074c\b\u00a3\n\u00a3"+ + "\f\u00a3\u074f\t\u00a3\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0005\u00a4"+ + "\u0754\b\u00a4\n\u00a4\f\u00a4\u0757\t\u00a4\u0001\u00a4\u0001\u00a4\u0001"+ + "\u00a4\u0003\u00a4\u075c\b\u00a4\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001"+ + "\u00a5\u0003\u00a5\u0762\b\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a6\u0001"+ + "\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0003\u00a6\u076b\b\u00a6\u0001"+ + "\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001"+ + "\u00a6\u0001\u00a6\u0003\u00a6\u0775\b\u00a6\u0001\u00a6\u0003\u00a6\u0778"+ + "\b\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0003"+ + "\u00a6\u077f\b\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0003"+ + "\u00a6\u0785\b\u00a6\u0001\u00a6\u0003\u00a6\u0788\b\u00a6\u0001\u00a7"+ + "\u0001\u00a7\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0005\u00a8\u078f\b\u00a8"+ + "\n\u00a8\f\u00a8\u0792\t\u00a8\u0001\u00a9\u0001\u00a9\u0003\u00a9\u0796"+ + "\b\u00a9\u0001\u00a9\u0003\u00a9\u0799\b\u00a9\u0001\u00aa\u0001\u00aa"+ + "\u0001\u00aa\u0003\u00aa\u079e\b\u00aa\u0001\u00aa\u0003\u00aa\u07a1\b"+ + "\u00aa\u0001\u00aa\u0003\u00aa\u07a4\b\u00aa\u0001\u00aa\u0001\u00aa\u0001"+ + "\u00ab\u0001\u00ab\u0004\u00ab\u07aa\b\u00ab\u000b\u00ab\f\u00ab\u07ab"+ + "\u0001\u00ac\u0003\u00ac\u07af\b\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac"+ + "\u0003\u00ac\u07b4\b\u00ac\u0003\u00ac\u07b6\b\u00ac\u0001\u00ac\u0003"+ + "\u00ac\u07b9\b\u00ac\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001"+ + "\u00ad\u0003\u00ad\u07c0\b\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001"+ + "\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001"+ + "\u00ad\u0003\u00ad\u07cc\b\u00ad\u0001\u00ae\u0001\u00ae\u0001\u00af\u0001"+ + "\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001"+ + "\u00af\u0001\u00af\u0005\u00af\u07d9\b\u00af\n\u00af\f\u00af\u07dc\t\u00af"+ + "\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af"+ + "\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0003\u00af\u07e8\b\u00af"+ + "\u0001\u00b0\u0001\u00b0\u0001\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b1"+ + "\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1"+ + "\u0001\u00b1\u0003\u00b1\u07f7\b\u00b1\u0001\u00b1\u0003\u00b1\u07fa\b"+ + "\u00b1\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001"+ + "\u00b2\u0003\u00b2\u0802\b\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001"+ + "\u00b2\u0001\u00b2\u0003\u00b2\u0809\b\u00b2\u0001\u00b2\u0003\u00b2\u080c"+ + "\b\u00b2\u0001\u00b2\u0003\u00b2\u080f\b\u00b2\u0001\u00b3\u0001\u00b3"+ + "\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3"+ + "\u0003\u00b3\u0819\b\u00b3\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0005\u00b4"+ + "\u081e\b\u00b4\n\u00b4\f\u00b4\u0821\t\u00b4\u0001\u00b5\u0001\u00b5\u0003"+ + "\u00b5\u0825\b\u00b5\u0001\u00b6\u0001\u00b6\u0005\u00b6\u0829\b\u00b6"+ + "\n\u00b6\f\u00b6\u082c\t\u00b6\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001"+ + "\u00b7\u0001\u00b7\u0001\u00b7\u0003\u00b7\u0834\b\u00b7\u0001\u00b8\u0001"+ + "\u00b8\u0001\u00b8\u0001\u00b8\u0003\u00b8\u083a\b\u00b8\u0001\u00b8\u0001"+ + "\u00b8\u0001\u00b8\u0001\u00b8\u0003\u00b8\u0840\b\u00b8\u0001\u00b9\u0001"+ + "\u00b9\u0001\u00b9\u0003\u00b9\u0845\b\u00b9\u0001\u00b9\u0001\u00b9\u0001"+ + "\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001"+ + "\u00b9\u0001\u00b9\u0003\u00b9\u0851\b\u00b9\u0001\u00b9\u0001\u00b9\u0001"+ + "\u00b9\u0001\u00b9\u0001\u00b9\u0003\u00b9\u0858\b\u00b9\u0001\u00ba\u0001"+ + "\u00ba\u0001\u00ba\u0005\u00ba\u085d\b\u00ba\n\u00ba\f\u00ba\u0860\t\u00ba"+ + "\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0003\u00bb\u0865\b\u00bb\u0001\u00bb"+ + "\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb"+ + "\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb"+ + "\u0001\u00bb\u0003\u00bb\u0875\b\u00bb\u0001\u00bc\u0001\u00bc\u0001\u00bc"+ + "\u0001\u00bc\u0003\u00bc\u087b\b\u00bc\u0001\u00bc\u0003\u00bc\u087e\b"+ + "\u00bc\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00be\u0001"+ + "\u00be\u0001\u00be\u0001\u00be\u0003\u00be\u0888\b\u00be\u0001\u00be\u0003"+ + "\u00be\u088b\b\u00be\u0001\u00be\u0003\u00be\u088e\b\u00be\u0001\u00be"+ + "\u0003\u00be\u0891\b\u00be\u0001\u00be\u0003\u00be\u0894\b\u00be\u0001"+ + "\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0003\u00bf\u089a\b\u00bf\u0001"+ + "\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c1\u0001\u00c1\u0001"+ + "\u00c1\u0001\u00c1\u0003\u00c1\u08a4\b\u00c1\u0001\u00c2\u0001\u00c2\u0001"+ + "\u00c2\u0001\u00c2\u0003\u00c2\u08aa\b\u00c2\u0001\u00c3\u0001\u00c3\u0003"+ + "\u00c3\u08ae\b\u00c3\u0001\u00c4\u0001\u00c4\u0001\u00c4\u0001\u00c5\u0001"+ + "\u00c5\u0001\u00c5\u0003\u00c5\u08b6\b\u00c5\u0001\u00c5\u0003\u00c5\u08b9"+ + "\b\u00c5\u0001\u00c5\u0001\u00c5\u0001\u00c5\u0003\u00c5\u08be\b\u00c5"+ + "\u0001\u00c6\u0001\u00c6\u0001\u00c6\u0001\u00c7\u0001\u00c7\u0001\u00c7"+ + "\u0001\u00c7\u0001\u00c8\u0001\u00c8\u0003\u00c8\u08c9\b\u00c8\u0001\u00c9"+ + "\u0001\u00c9\u0001\u00c9\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0001\u00ca"+ + "\u0001\u00cb\u0001\u00cb\u0001\u00cb\u0001\u00cb\u0001\u00cc\u0001\u00cc"+ + "\u0001\u00cc\u0001\u00cc\u0001\u00cc\u0003\u00cc\u08db\b\u00cc\u0001\u00cc"+ + "\u0001\u00cc\u0001\u00cc\u0001\u00cd\u0001\u00cd\u0001\u00cd\u0001\u00cd"+ + "\u0001\u00ce\u0001\u00ce\u0001\u00ce\u0001\u00ce\u0001\u00cf\u0001\u00cf"+ + "\u0001\u00cf\u0001\u00cf\u0001\u00cf\u0003\u00cf\u08ed\b\u00cf\u0001\u00cf"+ + "\u0001\u00cf\u0001\u00cf\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0001\u00d0"+ + "\u0003\u00d0\u08f6\b\u00d0\u0001\u00d1\u0001\u00d1\u0001\u00d1\u0005\u00d1"+ + "\u08fb\b\u00d1\n\u00d1\f\u00d1\u08fe\t\u00d1\u0001\u00d2\u0001\u00d2\u0003"+ + "\u00d2\u0902\b\u00d2\u0001\u00d3\u0001\u00d3\u0003\u00d3\u0906\b\u00d3"+ + "\u0001\u00d3\u0001\u00d3\u0001\u00d3\u0003\u00d3\u090b\b\u00d3\u0005\u00d3"+ + "\u090d\b\u00d3\n\u00d3\f\u00d3\u0910\t\u00d3\u0001\u00d4\u0001\u00d4\u0001"+ + "\u00d5\u0001\u00d5\u0001\u00d5\u0001\u00d5\u0001\u00d5\u0001\u00d5\u0001"+ + "\u00d5\u0003\u00d5\u091b\b\u00d5\u0001\u00d6\u0001\u00d6\u0001\u00d6\u0001"+ + "\u00d6\u0005\u00d6\u0921\b\u00d6\n\u00d6\f\u00d6\u0924\t\u00d6\u0001\u00d6"+ + "\u0001\u00d6\u0001\u00d6\u0001\u00d6\u0003\u00d6\u092a\b\u00d6\u0001\u00d7"+ + "\u0001\u00d7\u0001\u00d7\u0001\u00d7\u0005\u00d7\u0930\b\u00d7\n\u00d7"+ + "\f\u00d7\u0933\t\u00d7\u0001\u00d7\u0001\u00d7\u0001\u00d7\u0001\u00d7"+ + "\u0003\u00d7\u0939\b\u00d7\u0001\u00d8\u0001\u00d8\u0001\u00d8\u0001\u00d8"+ + "\u0001\u00d9\u0001\u00d9\u0001\u00d9\u0001\u00d9\u0001\u00d9\u0001\u00d9"+ + "\u0001\u00d9\u0003\u00d9\u0946\b\u00d9\u0001\u00da\u0001\u00da\u0001\u00da"+ + "\u0001\u00db\u0001\u00db\u0001\u00db\u0001\u00dc\u0001\u00dc\u0001\u00dd"+ + "\u0003\u00dd\u0951\b\u00dd\u0001\u00dd\u0001\u00dd\u0001\u00de\u0003\u00de"+ + "\u0956\b\u00de\u0001\u00de\u0001\u00de\u0001\u00df\u0001\u00df\u0001\u00e0"+ + "\u0001\u00e0\u0001\u00e0\u0005\u00e0\u095f\b\u00e0\n\u00e0\f\u00e0\u0962"+ + "\t\u00e0\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001"+ + "\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0001\u00e1\u0003"+ + "\u00e1\u09f9\b\u00e1\u0001\u00e1\u0001\u00e1\u0003\u00e1\u09fd\b\u00e1"+ + "\u0001\u00e1\u0000\u0002<>\u00e2\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010"+ + "\u0012\u0014\u0016\u0018\u001a\u001c\u001e \"$&(*,.02468:<>@BDFHJLNPR"+ + "TVXZ\\^`bdfhjlnprtvxz|~\u0080\u0082\u0084\u0086\u0088\u008a\u008c\u008e"+ + "\u0090\u0092\u0094\u0096\u0098\u009a\u009c\u009e\u00a0\u00a2\u00a4\u00a6"+ + "\u00a8\u00aa\u00ac\u00ae\u00b0\u00b2\u00b4\u00b6\u00b8\u00ba\u00bc\u00be"+ + "\u00c0\u00c2\u00c4\u00c6\u00c8\u00ca\u00cc\u00ce\u00d0\u00d2\u00d4\u00d6"+ + "\u00d8\u00da\u00dc\u00de\u00e0\u00e2\u00e4\u00e6\u00e8\u00ea\u00ec\u00ee"+ + "\u00f0\u00f2\u00f4\u00f6\u00f8\u00fa\u00fc\u00fe\u0100\u0102\u0104\u0106"+ + "\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e"+ + "\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136"+ + "\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148\u014a\u014c\u014e"+ + "\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166"+ + "\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178\u017a\u017c\u017e"+ + "\u0180\u0182\u0184\u0186\u0188\u018a\u018c\u018e\u0190\u0192\u0194\u0196"+ + "\u0198\u019a\u019c\u019e\u01a0\u01a2\u01a4\u01a6\u01a8\u01aa\u01ac\u01ae"+ + "\u01b0\u01b2\u01b4\u01b6\u01b8\u01ba\u01bc\u01be\u01c0\u01c2\u0000\u0012"+ + "\u0002\u0000\u000f\u000f!!\u0002\u000011MM\u0001\u0000\u00b9\u00be\u0001"+ + "\u0000\u00c5\u00c6\u0002\u0000\u00b5\u00b5\u00c7\u00c8\u0002\u0000LL\u008b"+ + "\u008b\u0002\u0000\u0005\u0005\u00b7\u00b8\u0002\u0000EE\u0087\u0087\u0002"+ + "\u0000\u001d\u001d<<\u0003\u0000\u00b5\u00b5\u00b8\u00b8\u00c5\u00c5\u0001"+ + "\u0000\u009f\u00a0\u0003\u0000\u009b\u009b\u009d\u009d\u00a2\u00a2\u0002"+ + "\u0000\u009f\u00a0\u00a2\u00a2\u0002\u0000!!##\u0002\u0000RR\u0085\u0085"+ + "\u0004\u0000\u001d\u001d<\u02f3\u0001\u0000\u0000\u0000@\u02ff"+ + "\u0001\u0000\u0000\u0000B\u0303\u0001\u0000\u0000\u0000D\u0310\u0001\u0000"+ + "\u0000\u0000F\u0312\u0001\u0000\u0000\u0000H\u0318\u0001\u0000\u0000\u0000"+ + "J\u0321\u0001\u0000\u0000\u0000L\u0329\u0001\u0000\u0000\u0000N\u032e"+ + "\u0001\u0000\u0000\u0000P\u0330\u0001\u0000\u0000\u0000R\u033c\u0001\u0000"+ + "\u0000\u0000T\u0347\u0001\u0000\u0000\u0000V\u0352\u0001\u0000\u0000\u0000"+ + "X\u036a\u0001\u0000\u0000\u0000Z\u036f\u0001\u0000\u0000\u0000\\\u0372"+ + "\u0001\u0000\u0000\u0000^\u038c\u0001\u0000\u0000\u0000`\u0394\u0001\u0000"+ + "\u0000\u0000b\u039c\u0001\u0000\u0000\u0000d\u03a7\u0001\u0000\u0000\u0000"+ + "f\u03a9\u0001\u0000\u0000\u0000h\u03b1\u0001\u0000\u0000\u0000j\u03bb"+ + "\u0001\u0000\u0000\u0000l\u03bd\u0001\u0000\u0000\u0000n\u03c6\u0001\u0000"+ + "\u0000\u0000p\u03c8\u0001\u0000\u0000\u0000r\u03d2\u0001\u0000\u0000\u0000"+ + "t\u03e6\u0001\u0000\u0000\u0000v\u03e8\u0001\u0000\u0000\u0000x\u03f5"+ + "\u0001\u0000\u0000\u0000z\u03f7\u0001\u0000\u0000\u0000|\u03f9\u0001\u0000"+ + "\u0000\u0000~\u0418\u0001\u0000\u0000\u0000\u0080\u041a\u0001\u0000\u0000"+ + "\u0000\u0082\u0421\u0001\u0000\u0000\u0000\u0084\u0423\u0001\u0000\u0000"+ + "\u0000\u0086\u042b\u0001\u0000\u0000\u0000\u0088\u0439\u0001\u0000\u0000"+ + "\u0000\u008a\u043e\u0001\u0000\u0000\u0000\u008c\u0444\u0001\u0000\u0000"+ + "\u0000\u008e\u0459\u0001\u0000\u0000\u0000\u0090\u0460\u0001\u0000\u0000"+ + "\u0000\u0092\u0464\u0001\u0000\u0000\u0000\u0094\u046c\u0001\u0000\u0000"+ + "\u0000\u0096\u0499\u0001\u0000\u0000\u0000\u0098\u049b\u0001\u0000\u0000"+ + "\u0000\u009a\u04a0\u0001\u0000\u0000\u0000\u009c\u04a8\u0001\u0000\u0000"+ + "\u0000\u009e\u04ab\u0001\u0000\u0000\u0000\u00a0\u04c2\u0001\u0000\u0000"+ + "\u0000\u00a2\u050c\u0001\u0000\u0000\u0000\u00a4\u050e\u0001\u0000\u0000"+ + "\u0000\u00a6\u0513\u0001\u0000\u0000\u0000\u00a8\u0522\u0001\u0000\u0000"+ + "\u0000\u00aa\u052a\u0001\u0000\u0000\u0000\u00ac\u052c\u0001\u0000\u0000"+ + "\u0000\u00ae\u0535\u0001\u0000\u0000\u0000\u00b0\u053d\u0001\u0000\u0000"+ + "\u0000\u00b2\u053f\u0001\u0000\u0000\u0000\u00b4\u0541\u0001\u0000\u0000"+ + "\u0000\u00b6\u0544\u0001\u0000\u0000\u0000\u00b8\u0556\u0001\u0000\u0000"+ + "\u0000\u00ba\u0559\u0001\u0000\u0000\u0000\u00bc\u056c\u0001\u0000\u0000"+ + "\u0000\u00be\u056e\u0001\u0000\u0000\u0000\u00c0\u057a\u0001\u0000\u0000"+ + "\u0000\u00c2\u058a\u0001\u0000\u0000\u0000\u00c4\u058c\u0001\u0000\u0000"+ + "\u0000\u00c6\u0594\u0001\u0000\u0000\u0000\u00c8\u0597\u0001\u0000\u0000"+ + "\u0000\u00ca\u059c\u0001\u0000\u0000\u0000\u00cc\u05a1\u0001\u0000\u0000"+ + "\u0000\u00ce\u05a3\u0001\u0000\u0000\u0000\u00d0\u05a5\u0001\u0000\u0000"+ + "\u0000\u00d2\u05a7\u0001\u0000\u0000\u0000\u00d4\u05b3\u0001\u0000\u0000"+ + "\u0000\u00d6\u05b5\u0001\u0000\u0000\u0000\u00d8\u05b7\u0001\u0000\u0000"+ + "\u0000\u00da\u05bd\u0001\u0000\u0000\u0000\u00dc\u05c3\u0001\u0000\u0000"+ + "\u0000\u00de\u05c5\u0001\u0000\u0000\u0000\u00e0\u05c7\u0001\u0000\u0000"+ + "\u0000\u00e2\u05c9\u0001\u0000\u0000\u0000\u00e4\u05cb\u0001\u0000\u0000"+ + "\u0000\u00e6\u05d3\u0001\u0000\u0000\u0000\u00e8\u05dc\u0001\u0000\u0000"+ + "\u0000\u00ea\u05e0\u0001\u0000\u0000\u0000\u00ec\u05ea\u0001\u0000\u0000"+ + "\u0000\u00ee\u05ec\u0001\u0000\u0000\u0000\u00f0\u05f5\u0001\u0000\u0000"+ + "\u0000\u00f2\u05ff\u0001\u0000\u0000\u0000\u00f4\u0601\u0001\u0000\u0000"+ + "\u0000\u00f6\u0605\u0001\u0000\u0000\u0000\u00f8\u0609\u0001\u0000\u0000"+ + "\u0000\u00fa\u060e\u0001\u0000\u0000\u0000\u00fc\u0622\u0001\u0000\u0000"+ + "\u0000\u00fe\u0626\u0001\u0000\u0000\u0000\u0100\u062b\u0001\u0000\u0000"+ + "\u0000\u0102\u0638\u0001\u0000\u0000\u0000\u0104\u0644\u0001\u0000\u0000"+ + "\u0000\u0106\u064f\u0001\u0000\u0000\u0000\u0108\u0654\u0001\u0000\u0000"+ + "\u0000\u010a\u0658\u0001\u0000\u0000\u0000\u010c\u0664\u0001\u0000\u0000"+ + "\u0000\u010e\u067b\u0001\u0000\u0000\u0000\u0110\u067d\u0001\u0000\u0000"+ + "\u0000\u0112\u0685\u0001\u0000\u0000\u0000\u0114\u0689\u0001\u0000\u0000"+ + "\u0000\u0116\u0692\u0001\u0000\u0000\u0000\u0118\u0696\u0001\u0000\u0000"+ + "\u0000\u011a\u069a\u0001\u0000\u0000\u0000\u011c\u069c\u0001\u0000\u0000"+ + "\u0000\u011e\u06a0\u0001\u0000\u0000\u0000\u0120\u06a6\u0001\u0000\u0000"+ + "\u0000\u0122\u06aa\u0001\u0000\u0000\u0000\u0124\u06b0\u0001\u0000\u0000"+ + "\u0000\u0126\u06b4\u0001\u0000\u0000\u0000\u0128\u06b8\u0001\u0000\u0000"+ + "\u0000\u012a\u06e3\u0001\u0000\u0000\u0000\u012c\u06e5\u0001\u0000\u0000"+ + "\u0000\u012e\u06e8\u0001\u0000\u0000\u0000\u0130\u06ef\u0001\u0000\u0000"+ + "\u0000\u0132\u06fc\u0001\u0000\u0000\u0000\u0134\u06fe\u0001\u0000\u0000"+ + "\u0000\u0136\u0703\u0001\u0000\u0000\u0000\u0138\u0706\u0001\u0000\u0000"+ + "\u0000\u013a\u070a\u0001\u0000\u0000\u0000\u013c\u070e\u0001\u0000\u0000"+ + "\u0000\u013e\u0721\u0001\u0000\u0000\u0000\u0140\u072e\u0001\u0000\u0000"+ + "\u0000\u0142\u0731\u0001\u0000\u0000\u0000\u0144\u0740\u0001\u0000\u0000"+ + "\u0000\u0146\u0748\u0001\u0000\u0000\u0000\u0148\u075b\u0001\u0000\u0000"+ + "\u0000\u014a\u075d\u0001\u0000\u0000\u0000\u014c\u0765\u0001\u0000\u0000"+ + "\u0000\u014e\u0789\u0001\u0000\u0000\u0000\u0150\u078b\u0001\u0000\u0000"+ + "\u0000\u0152\u0798\u0001\u0000\u0000\u0000\u0154\u079a\u0001\u0000\u0000"+ + "\u0000\u0156\u07a9\u0001\u0000\u0000\u0000\u0158\u07b8\u0001\u0000\u0000"+ + "\u0000\u015a\u07cb\u0001\u0000\u0000\u0000\u015c\u07cd\u0001\u0000\u0000"+ + "\u0000\u015e\u07cf\u0001\u0000\u0000\u0000\u0160\u07e9\u0001\u0000\u0000"+ + "\u0000\u0162\u07ec\u0001\u0000\u0000\u0000\u0164\u07fb\u0001\u0000\u0000"+ + "\u0000\u0166\u0818\u0001\u0000\u0000\u0000\u0168\u081a\u0001\u0000\u0000"+ + "\u0000\u016a\u0822\u0001\u0000\u0000\u0000\u016c\u0826\u0001\u0000\u0000"+ + "\u0000\u016e\u0833\u0001\u0000\u0000\u0000\u0170\u0835\u0001\u0000\u0000"+ + "\u0000\u0172\u0841\u0001\u0000\u0000\u0000\u0174\u0859\u0001\u0000\u0000"+ + "\u0000\u0176\u0861\u0001\u0000\u0000\u0000\u0178\u0876\u0001\u0000\u0000"+ + "\u0000\u017a\u087f\u0001\u0000\u0000\u0000\u017c\u0883\u0001\u0000\u0000"+ + "\u0000\u017e\u0895\u0001\u0000\u0000\u0000\u0180\u089b\u0001\u0000\u0000"+ + "\u0000\u0182\u089f\u0001\u0000\u0000\u0000\u0184\u08a5\u0001\u0000\u0000"+ + "\u0000\u0186\u08ad\u0001\u0000\u0000\u0000\u0188\u08af\u0001\u0000\u0000"+ + "\u0000\u018a\u08bd\u0001\u0000\u0000\u0000\u018c\u08bf\u0001\u0000\u0000"+ + "\u0000\u018e\u08c2\u0001\u0000\u0000\u0000\u0190\u08c6\u0001\u0000\u0000"+ + "\u0000\u0192\u08ca\u0001\u0000\u0000\u0000\u0194\u08cd\u0001\u0000\u0000"+ + "\u0000\u0196\u08d1\u0001\u0000\u0000\u0000\u0198\u08d5\u0001\u0000\u0000"+ + "\u0000\u019a\u08df\u0001\u0000\u0000\u0000\u019c\u08e3\u0001\u0000\u0000"+ + "\u0000\u019e\u08e7\u0001\u0000\u0000\u0000\u01a0\u08f5\u0001\u0000\u0000"+ + "\u0000\u01a2\u08f7\u0001\u0000\u0000\u0000\u01a4\u0901\u0001\u0000\u0000"+ + "\u0000\u01a6\u0905\u0001\u0000\u0000\u0000\u01a8\u0911\u0001\u0000\u0000"+ + "\u0000\u01aa\u091a\u0001\u0000\u0000\u0000\u01ac\u0929\u0001\u0000\u0000"+ + "\u0000\u01ae\u0938\u0001\u0000\u0000\u0000\u01b0\u093a\u0001\u0000\u0000"+ + "\u0000\u01b2\u0945\u0001\u0000\u0000\u0000\u01b4\u0947\u0001\u0000\u0000"+ + "\u0000\u01b6\u094a\u0001\u0000\u0000\u0000\u01b8\u094d\u0001\u0000\u0000"+ + "\u0000\u01ba\u0950\u0001\u0000\u0000\u0000\u01bc\u0955\u0001\u0000\u0000"+ + "\u0000\u01be\u0959\u0001\u0000\u0000\u0000\u01c0\u095b\u0001\u0000\u0000"+ + "\u0000\u01c2\u09fc\u0001\u0000\u0000\u0000\u01c4\u01c5\u0003\u0002\u0001"+ + "\u0000\u01c5\u01c6\u0005\u0000\u0000\u0001\u01c6\u0001\u0001\u0000\u0000"+ + "\u0000\u01c7\u01e1\u0003\u0004\u0002\u0000\u01c8\u01e1\u0003\u0094J\u0000"+ + "\u01c9\u01e1\u0003\u009eO\u0000\u01ca\u01e1\u0003\u00b6[\u0000\u01cb\u01e1"+ + "\u0003\u00fa}\u0000\u01cc\u01e1\u0003\u014c\u00a6\u0000\u01cd\u01e1\u0003"+ + "\u0178\u00bc\u0000\u01ce\u01e1\u0003\u017a\u00bd\u0000\u01cf\u01e1\u0003"+ + "\u00eew\u0000\u01d0\u01e1\u0003\u00f4z\u0000\u01d1\u01e1\u0003\u0170\u00b8"+ + "\u0000\u01d2\u01e1\u0003\u00f0x\u0000\u01d3\u01e1\u0003\u00f6{\u0000\u01d4"+ + "\u01e1\u0003\u0164\u00b2\u0000\u01d5\u01e1\u0003\u0180\u00c0\u0000\u01d6"+ + "\u01e1\u0003\u017e\u00bf\u0000\u01d7\u01e1\u0003\u0130\u0098\u0000\u01d8"+ + "\u01e1\u0003\u017c\u00be\u0000\u01d9\u01e1\u0003\u014a\u00a5\u0000\u01da"+ + "\u01e1\u0003\u0182\u00c1\u0000\u01db\u01e1\u0003\u0184\u00c2\u0000\u01dc"+ + "\u01e1\u0003\u0172\u00b9\u0000\u01dd\u01e1\u0003\u00f8|\u0000\u01de\u01e1"+ + "\u0003\u0176\u00bb\u0000\u01df\u01e1\u0003\n\u0005\u0000\u01e0\u01c7\u0001"+ + "\u0000\u0000\u0000\u01e0\u01c8\u0001\u0000\u0000\u0000\u01e0\u01c9\u0001"+ + "\u0000\u0000\u0000\u01e0\u01ca\u0001\u0000\u0000\u0000\u01e0\u01cb\u0001"+ + "\u0000\u0000\u0000\u01e0\u01cc\u0001\u0000\u0000\u0000\u01e0\u01cd\u0001"+ + "\u0000\u0000\u0000\u01e0\u01ce\u0001\u0000\u0000\u0000\u01e0\u01cf\u0001"+ + "\u0000\u0000\u0000\u01e0\u01d0\u0001\u0000\u0000\u0000\u01e0\u01d1\u0001"+ + "\u0000\u0000\u0000\u01e0\u01d2\u0001\u0000\u0000\u0000\u01e0\u01d3\u0001"+ + "\u0000\u0000\u0000\u01e0\u01d4\u0001\u0000\u0000\u0000\u01e0\u01d5\u0001"+ + "\u0000\u0000\u0000\u01e0\u01d6\u0001\u0000\u0000\u0000\u01e0\u01d7\u0001"+ + "\u0000\u0000\u0000\u01e0\u01d8\u0001\u0000\u0000\u0000\u01e0\u01d9\u0001"+ + "\u0000\u0000\u0000\u01e0\u01da\u0001\u0000\u0000\u0000\u01e0\u01db\u0001"+ + "\u0000\u0000\u0000\u01e0\u01dc\u0001\u0000\u0000\u0000\u01e0\u01dd\u0001"+ + "\u0000\u0000\u0000\u01e0\u01de\u0001\u0000\u0000\u0000\u01e0\u01df\u0001"+ + "\u0000\u0000\u0000\u01e1\u0003\u0001\u0000\u0000\u0000\u01e2\u01e4\u0003"+ + "\u0006\u0003\u0000\u01e3\u01e2\u0001\u0000\u0000\u0000\u01e3\u01e4\u0001"+ + "\u0000\u0000\u0000\u01e4\u01e5\u0001\u0000\u0000\u0000\u01e5\u01e6\u0003"+ + "\u000e\u0007\u0000\u01e6\u0005\u0001\u0000\u0000\u0000\u01e7\u01e8\u0005"+ + "\u001e\u0000\u0000\u01e8\u01e9\u0003\b\u0004\u0000\u01e9\u01ef\u0005\u00ac"+ + "\u0000\u0000\u01ea\u01eb\u0003\b\u0004\u0000\u01eb\u01ec\u0005\u00ac\u0000"+ + "\u0000\u01ec\u01ee\u0001\u0000\u0000\u0000\u01ed\u01ea\u0001\u0000\u0000"+ + "\u0000\u01ee\u01f1\u0001\u0000\u0000\u0000\u01ef\u01ed\u0001\u0000\u0000"+ + "\u0000\u01ef\u01f0\u0001\u0000\u0000\u0000\u01f0\u0007\u0001\u0000\u0000"+ + "\u0000\u01f1\u01ef\u0001\u0000\u0000\u0000\u01f2\u01f3\u0005\u0005\u0000"+ + "\u0000\u01f3\u01f4\u0003\u00bc^\u0000\u01f4\t\u0001\u0000\u0000\u0000"+ + "\u01f5\u01f6\u0003\u0006\u0003\u0000\u01f6\u01f7\u0003\u0086C\u0000\u01f7"+ + "\u000b\u0001\u0000\u0000\u0000\u01f8\u01f9\u0003<\u001e\u0000\u01f9\r"+ + "\u0001\u0000\u0000\u0000\u01fa\u01fb\u0003(\u0014\u0000\u01fb\u01fd\u0003"+ + "\u0010\b\u0000\u01fc\u01fe\u0003&\u0013\u0000\u01fd\u01fc\u0001\u0000"+ + "\u0000\u0000\u01fd\u01fe\u0001\u0000\u0000\u0000\u01fe\u0200\u0001\u0000"+ + "\u0000\u0000\u01ff\u0201\u00036\u001b\u0000\u0200\u01ff\u0001\u0000\u0000"+ + "\u0000\u0200\u0201\u0001\u0000\u0000\u0000\u0201\u0203\u0001\u0000\u0000"+ + "\u0000\u0202\u0204\u00032\u0019\u0000\u0203\u0202\u0001\u0000\u0000\u0000"+ + "\u0203\u0204\u0001\u0000\u0000\u0000\u0204\u0206\u0001\u0000\u0000\u0000"+ + "\u0205\u0207\u00038\u001c\u0000\u0206\u0205\u0001\u0000\u0000\u0000\u0206"+ + "\u0207\u0001\u0000\u0000\u0000\u0207\u0209\u0001\u0000\u0000\u0000\u0208"+ + "\u020a\u0003:\u001d\u0000\u0209\u0208\u0001\u0000\u0000\u0000\u0209\u020a"+ + "\u0001\u0000\u0000\u0000\u020a\u000f\u0001\u0000\u0000\u0000\u020b\u020c"+ + "\u00056\u0000\u0000\u020c\u0211\u0003\u0012\t\u0000\u020d\u020e\u0005"+ + "\u00ad\u0000\u0000\u020e\u0210\u0003\u0012\t\u0000\u020f\u020d\u0001\u0000"+ + "\u0000\u0000\u0210\u0213\u0001\u0000\u0000\u0000\u0211\u020f\u0001\u0000"+ + "\u0000\u0000\u0211\u0212\u0001\u0000\u0000\u0000\u0212\u0220\u0001\u0000"+ + "\u0000\u0000\u0213\u0211\u0001\u0000\u0000\u0000\u0214\u021c\u0005\u00ad"+ + "\u0000\u0000\u0215\u0217\u0003\f\u0006\u0000\u0216\u0218\u0005\u000e\u0000"+ + "\u0000\u0217\u0216\u0001\u0000\u0000\u0000\u0217\u0218\u0001\u0000\u0000"+ + "\u0000\u0218\u0219\u0001\u0000\u0000\u0000\u0219\u021a\u0005\u0005\u0000"+ + "\u0000\u021a\u021d\u0001\u0000\u0000\u0000\u021b\u021d\u0003$\u0012\u0000"+ + "\u021c\u0215\u0001\u0000\u0000\u0000\u021c\u021b\u0001\u0000\u0000\u0000"+ + "\u021d\u021f\u0001\u0000\u0000\u0000\u021e\u0214\u0001\u0000\u0000\u0000"+ + "\u021f\u0222\u0001\u0000\u0000\u0000\u0220\u021e\u0001\u0000\u0000\u0000"+ + "\u0220\u0221\u0001\u0000\u0000\u0000\u0221\u0011\u0001\u0000\u0000\u0000"+ + "\u0222\u0220\u0001\u0000\u0000\u0000\u0223\u0227\u0003\u001e\u000f\u0000"+ + "\u0224\u0227\u0003\u0014\n\u0000\u0225\u0227\u0003\u001a\r\u0000\u0226"+ + "\u0223\u0001\u0000\u0000\u0000\u0226\u0224\u0001\u0000\u0000\u0000\u0226"+ + "\u0225\u0001\u0000\u0000\u0000\u0227\u0013\u0001\u0000\u0000\u0000\u0228"+ + "\u0229\u0005[\u0000\u0000\u0229\u022a\u0005\u007f\u0000\u0000\u022a\u022b"+ + "\u0005\u00af\u0000\u0000\u022b\u0231\u0003\u001e\u000f\u0000\u022c\u022d"+ + "\u0005\f\u0000\u0000\u022d\u022e\u0005\u00af\u0000\u0000\u022e\u022f\u0003"+ + "\u0016\u000b\u0000\u022f\u0230\u0005\u00b0\u0000\u0000\u0230\u0232\u0001"+ + "\u0000\u0000\u0000\u0231\u022c\u0001\u0000\u0000\u0000\u0231\u0232\u0001"+ + "\u0000\u0000\u0000\u0232\u0238\u0001\u0000\u0000\u0000\u0233\u0234\u0005"+ + "\"\u0000\u0000\u0234\u0235\u0005\u00af\u0000\u0000\u0235\u0236\u0003\u0018"+ + "\f\u0000\u0236\u0237\u0005\u00b0\u0000\u0000\u0237\u0239\u0001\u0000\u0000"+ + "\u0000\u0238\u0233\u0001\u0000\u0000\u0000\u0238\u0239\u0001\u0000\u0000"+ + "\u0000\u0239\u023a\u0001\u0000\u0000\u0000\u023a\u023b\u0005\u00b0\u0000"+ + "\u0000\u023b\u0015\u0001\u0000\u0000\u0000\u023c\u0241\u0003\u001e\u000f"+ + "\u0000\u023d\u023e\u0005\u00ad\u0000\u0000\u023e\u0240\u0003\u001e\u000f"+ + "\u0000\u023f\u023d\u0001\u0000\u0000\u0000\u0240\u0243\u0001\u0000\u0000"+ + "\u0000\u0241\u023f\u0001\u0000\u0000\u0000\u0241\u0242\u0001\u0000\u0000"+ + "\u0000\u0242\u0017\u0001\u0000\u0000\u0000\u0243\u0241\u0001\u0000\u0000"+ + "\u0000\u0244\u0249\u0003\u001e\u000f\u0000\u0245\u0246\u0005\u00ad\u0000"+ + "\u0000\u0246\u0248\u0003\u001e\u000f\u0000\u0247\u0245\u0001\u0000\u0000"+ + "\u0000\u0248\u024b\u0001\u0000\u0000\u0000\u0249\u0247\u0001\u0000\u0000"+ + "\u0000\u0249\u024a\u0001\u0000\u0000\u0000\u024a\u0019\u0001\u0000\u0000"+ + "\u0000\u024b\u0249\u0001\u0000\u0000\u0000\u024c\u024d\u0003\u001e\u000f"+ + "\u0000\u024d\u0251\u0003\u001c\u000e\u0000\u024e\u0250\u0003\u001c\u000e"+ + "\u0000\u024f\u024e\u0001\u0000\u0000\u0000\u0250\u0253\u0001\u0000\u0000"+ + "\u0000\u0251\u024f\u0001\u0000\u0000\u0000\u0251\u0252\u0001\u0000\u0000"+ + "\u0000\u0252\u001b\u0001\u0000\u0000\u0000\u0253\u0251\u0001\u0000\u0000"+ + "\u0000\u0254\u0255\u0005\u0097\u0000\u0000\u0255\u0256\u0003\u001e\u000f"+ + "\u0000\u0256\u001d\u0001\u0000\u0000\u0000\u0257\u025a\u0003 \u0010\u0000"+ + "\u0258\u0259\u0005a\u0000\u0000\u0259\u025b\u0003<\u001e\u0000\u025a\u0258"+ + "\u0001\u0000\u0000\u0000\u025a\u025b\u0001\u0000\u0000\u0000\u025b\u001f"+ + "\u0001\u0000\u0000\u0000\u025c\u0261\u0003\u00fc~\u0000\u025d\u025f\u0005"+ + "\u000e\u0000\u0000\u025e\u025d\u0001\u0000\u0000\u0000\u025e\u025f\u0001"+ + "\u0000\u0000\u0000\u025f\u0260\u0001\u0000\u0000\u0000\u0260\u0262\u0003"+ + "\"\u0011\u0000\u0261\u025e\u0001\u0000\u0000\u0000\u0261\u0262\u0001\u0000"+ + "\u0000\u0000\u0262!\u0001\u0000\u0000\u0000\u0263\u0266\u0005\u0005\u0000"+ + "\u0000\u0264\u0266\u0003\u01c2\u00e1\u0000\u0265\u0263\u0001\u0000\u0000"+ + "\u0000\u0265\u0264\u0001\u0000\u0000\u0000\u0266#\u0001\u0000\u0000\u0000"+ + "\u0267\u0268\u0005\u0090\u0000\u0000\u0268\u0269\u0005\u00af\u0000\u0000"+ + "\u0269\u026b\u0003f3\u0000\u026a\u026c\u0005\u000e\u0000\u0000\u026b\u026a"+ + "\u0001\u0000\u0000\u0000\u026b\u026c\u0001\u0000\u0000\u0000\u026c\u026d"+ + "\u0001\u0000\u0000\u0000\u026d\u026e\u0005\u0005\u0000\u0000\u026e\u0278"+ + "\u0001\u0000\u0000\u0000\u026f\u0270\u0005\u00ad\u0000\u0000\u0270\u0272"+ + "\u0003f3\u0000\u0271\u0273\u0005\u000e\u0000\u0000\u0272\u0271\u0001\u0000"+ + "\u0000\u0000\u0272\u0273\u0001\u0000\u0000\u0000\u0273\u0274\u0001\u0000"+ + "\u0000\u0000\u0274\u0275\u0005\u0005\u0000\u0000\u0275\u0277\u0001\u0000"+ + "\u0000\u0000\u0276\u026f\u0001\u0000\u0000\u0000\u0277\u027a\u0001\u0000"+ + "\u0000\u0000\u0278\u0276\u0001\u0000\u0000\u0000\u0278\u0279\u0001\u0000"+ + "\u0000\u0000\u0279\u027b\u0001\u0000\u0000\u0000\u027a\u0278\u0001\u0000"+ + "\u0000\u0000\u027b\u027c\u0005\u00b0\u0000\u0000\u027c%\u0001\u0000\u0000"+ + "\u0000\u027d\u027e\u0005\u008d\u0000\u0000\u027e\u027f\u0003\f\u0006\u0000"+ + "\u027f\'\u0001\u0000\u0000\u0000\u0280\u0281\u0005x\u0000\u0000\u0281"+ + "\u0282\u0003*\u0015\u0000\u0282)\u0001\u0000\u0000\u0000\u0283\u0285\u0003"+ + ",\u0016\u0000\u0284\u0283\u0001\u0000\u0000\u0000\u0284\u0285\u0001\u0000"+ + "\u0000\u0000\u0285\u0287\u0001\u0000\u0000\u0000\u0286\u0288\u0005%\u0000"+ + "\u0000\u0287\u0286\u0001\u0000\u0000\u0000\u0287\u0288\u0001\u0000\u0000"+ + "\u0000\u0288\u0295\u0001\u0000\u0000\u0000\u0289\u0296\u0005\u00b5\u0000"+ + "\u0000\u028a\u028b\u0003\f\u0006\u0000\u028b\u0292\u00030\u0018\u0000"+ + "\u028c\u028d\u0005\u00ad\u0000\u0000\u028d\u028e\u0003\f\u0006\u0000\u028e"+ + "\u028f\u00030\u0018\u0000\u028f\u0291\u0001\u0000\u0000\u0000\u0290\u028c"+ + "\u0001\u0000\u0000\u0000\u0291\u0294\u0001\u0000\u0000\u0000\u0292\u0290"+ + "\u0001\u0000\u0000\u0000\u0292\u0293\u0001\u0000\u0000\u0000\u0293\u0296"+ + "\u0001\u0000\u0000\u0000\u0294\u0292\u0001\u0000\u0000\u0000\u0295\u0289"+ + "\u0001\u0000\u0000\u0000\u0295\u028a\u0001\u0000\u0000\u0000\u0296+\u0001"+ + "\u0000\u0000\u0000\u0297\u029b\u0005\u0001\u0000\u0000\u0298\u029a\u0003"+ + ".\u0017\u0000\u0299\u0298\u0001\u0000\u0000\u0000\u029a\u029d\u0001\u0000"+ + "\u0000\u0000\u029b\u0299\u0001\u0000\u0000\u0000\u029b\u029c\u0001\u0000"+ + "\u0000\u0000\u029c\u029e\u0001\u0000\u0000\u0000\u029d\u029b\u0001\u0000"+ + "\u0000\u0000\u029e\u029f\u0005\u0002\u0000\u0000\u029f-\u0001\u0000\u0000"+ + "\u0000\u02a0\u02a1\u0005j\u0000\u0000\u02a1\u02a2\u0005\u00af\u0000\u0000"+ + "\u02a2\u02a6\u0003\u00fc~\u0000\u02a3\u02a5\u0003\u014e\u00a7\u0000\u02a4"+ + "\u02a3\u0001\u0000\u0000\u0000\u02a5\u02a8\u0001\u0000\u0000\u0000\u02a6"+ + "\u02a4\u0001\u0000\u0000\u0000\u02a6\u02a7\u0001\u0000\u0000\u0000\u02a7"+ + "\u02a9\u0001\u0000\u0000\u0000\u02a8\u02a6\u0001\u0000\u0000\u0000\u02a9"+ + "\u02aa\u0005\u00b0\u0000\u0000\u02aa\u02bc\u0001\u0000\u0000\u0000\u02ab"+ + "\u02ac\u00053\u0000\u0000\u02ac\u02ad\u0005\u00af\u0000\u0000\u02ad\u02ae"+ + "\u0003\u00fc~\u0000\u02ae\u02af\u0003\u014e\u00a7\u0000\u02af\u02b0\u0005"+ + "\u00b0\u0000\u0000\u02b0\u02bc\u0001\u0000\u0000\u0000\u02b1\u02b2\u0005"+ + "k\u0000\u0000\u02b2\u02b3\u0005\u00af\u0000\u0000\u02b3\u02b4\u0003\u00fc"+ + "~\u0000\u02b4\u02b5\u0005\u00b0\u0000\u0000\u02b5\u02bc\u0001\u0000\u0000"+ + "\u0000\u02b6\u02b7\u00054\u0000\u0000\u02b7\u02b8\u0005\u00af\u0000\u0000"+ + "\u02b8\u02b9\u0003\u00fc~\u0000\u02b9\u02ba\u0005\u00b0\u0000\u0000\u02ba"+ + "\u02bc\u0001\u0000\u0000\u0000\u02bb\u02a0\u0001\u0000\u0000\u0000\u02bb"+ + "\u02ab\u0001\u0000\u0000\u0000\u02bb\u02b1\u0001\u0000\u0000\u0000\u02bb"+ + "\u02b6\u0001\u0000\u0000\u0000\u02bc\u02be\u0001\u0000\u0000\u0000\u02bd"+ + "\u02bf\u0005\u00d1\u0000\u0000\u02be\u02bd\u0001\u0000\u0000\u0000\u02be"+ + "\u02bf\u0001\u0000\u0000\u0000\u02bf/\u0001\u0000\u0000\u0000\u02c0\u02c1"+ + "\u0005\u000e\u0000\u0000\u02c1\u02c3\u0003\u01c2\u00e1\u0000\u02c2\u02c0"+ + "\u0001\u0000\u0000\u0000\u02c2\u02c3\u0001\u0000\u0000\u0000\u02c31\u0001"+ + "\u0000\u0000\u0000\u02c4\u02c5\u0005d\u0000\u0000\u02c5\u02c6\u0005\u0013"+ + "\u0000\u0000\u02c6\u02c7\u0003\f\u0006\u0000\u02c7\u02ce\u00034\u001a"+ + "\u0000\u02c8\u02c9\u0005\u00ad\u0000\u0000\u02c9\u02ca\u0003\f\u0006\u0000"+ + "\u02ca\u02cb\u00034\u001a\u0000\u02cb\u02cd\u0001\u0000\u0000\u0000\u02cc"+ + "\u02c8\u0001\u0000\u0000\u0000\u02cd\u02d0\u0001\u0000\u0000\u0000\u02ce"+ + "\u02cc\u0001\u0000\u0000\u0000\u02ce\u02cf\u0001\u0000\u0000\u0000\u02cf"+ + "3\u0001\u0000\u0000\u0000\u02d0\u02ce\u0001\u0000\u0000\u0000\u02d1\u02d3"+ + "\u0007\u0000\u0000\u0000\u02d2\u02d1\u0001\u0000\u0000\u0000\u02d2\u02d3"+ + "\u0001\u0000\u0000\u0000\u02d3\u02d6\u0001\u0000\u0000\u0000\u02d4\u02d5"+ + "\u0005^\u0000\u0000\u02d5\u02d7\u0007\u0001\u0000\u0000\u02d6\u02d4\u0001"+ + "\u0000\u0000\u0000\u02d6\u02d7\u0001\u0000\u0000\u0000\u02d75\u0001\u0000"+ + "\u0000\u0000\u02d8\u02d9\u0005;\u0000\u0000\u02d9\u02da\u0005\u0013\u0000"+ + "\u0000\u02da\u02df\u0003\f\u0006\u0000\u02db\u02dc\u0005\u00ad\u0000\u0000"+ + "\u02dc\u02de\u0003\f\u0006\u0000\u02dd\u02db\u0001\u0000\u0000\u0000\u02de"+ + "\u02e1\u0001\u0000\u0000\u0000\u02df\u02dd\u0001\u0000\u0000\u0000\u02df"+ + "\u02e0\u0001\u0000\u0000\u0000\u02e07\u0001\u0000\u0000\u0000\u02e1\u02df"+ + "\u0001\u0000\u0000\u0000\u02e2\u02e3\u0005P\u0000\u0000\u02e3\u02e4\u0003"+ + "`0\u0000\u02e49\u0001\u0000\u0000\u0000\u02e5\u02e6\u0005_\u0000\u0000"+ + "\u02e6\u02e7\u0003`0\u0000\u02e7;\u0001\u0000\u0000\u0000\u02e8\u02e9"+ + "\u0006\u001e\uffff\uffff\u0000\u02e9\u02ea\u0003>\u001f\u0000\u02ea\u02f0"+ + "\u0001\u0000\u0000\u0000\u02eb\u02ec\n\u0001\u0000\u0000\u02ec\u02ed\u0005"+ + "c\u0000\u0000\u02ed\u02ef\u0003>\u001f\u0000\u02ee\u02eb\u0001\u0000\u0000"+ + "\u0000\u02ef\u02f2\u0001\u0000\u0000\u0000\u02f0\u02ee\u0001\u0000\u0000"+ + "\u0000\u02f0\u02f1\u0001\u0000\u0000\u0000\u02f1=\u0001\u0000\u0000\u0000"+ + "\u02f2\u02f0\u0001\u0000\u0000\u0000\u02f3\u02f4\u0006\u001f\uffff\uffff"+ + "\u0000\u02f4\u02f5\u0003@ \u0000\u02f5\u02fb\u0001\u0000\u0000\u0000\u02f6"+ + "\u02f7\n\u0001\u0000\u0000\u02f7\u02f8\u0005\r\u0000\u0000\u02f8\u02fa"+ + "\u0003@ \u0000\u02f9\u02f6\u0001\u0000\u0000\u0000\u02fa\u02fd\u0001\u0000"+ + "\u0000\u0000\u02fb\u02f9\u0001\u0000\u0000\u0000\u02fb\u02fc\u0001\u0000"+ + "\u0000\u0000\u02fc?\u0001\u0000\u0000\u0000\u02fd\u02fb\u0001\u0000\u0000"+ + "\u0000\u02fe\u0300\u0005]\u0000\u0000\u02ff\u02fe\u0001\u0000\u0000\u0000"+ + "\u02ff\u0300\u0001\u0000\u0000\u0000\u0300\u0301\u0001\u0000\u0000\u0000"+ + "\u0301\u0302\u0003B!\u0000\u0302A\u0001\u0000\u0000\u0000\u0303\u0309"+ + "\u0003D\"\u0000\u0304\u0306\u0005G\u0000\u0000\u0305\u0307\u0005]\u0000"+ + "\u0000\u0306\u0305\u0001\u0000\u0000\u0000\u0306\u0307\u0001\u0000\u0000"+ + "\u0000\u0307\u0308\u0001\u0000\u0000\u0000\u0308\u030a\u0005\u00ca\u0000"+ + "\u0000\u0309\u0304\u0001\u0000\u0000\u0000\u0309\u030a\u0001\u0000\u0000"+ + "\u0000\u030aC\u0001\u0000\u0000\u0000\u030b\u0311\u0003F#\u0000\u030c"+ + "\u0311\u0003H$\u0000\u030d\u0311\u0003N\'\u0000\u030e\u0311\u0003Z-\u0000"+ + "\u030f\u0311\u0003\\.\u0000\u0310\u030b\u0001\u0000\u0000\u0000\u0310"+ + "\u030c\u0001\u0000\u0000\u0000\u0310\u030d\u0001\u0000\u0000\u0000\u0310"+ + "\u030e\u0001\u0000\u0000\u0000\u0310\u030f\u0001\u0000\u0000\u0000\u0311"+ + "E\u0001\u0000\u0000\u0000\u0312\u0313\u0003^/\u0000\u0313\u0314\u0005"+ + "\u0012\u0000\u0000\u0314\u0315\u0003^/\u0000\u0315\u0316\u0005\r\u0000"+ + "\u0000\u0316\u0317\u0003^/\u0000\u0317G\u0001\u0000\u0000\u0000\u0318"+ + "\u031f\u0003^/\u0000\u0319\u031c\u0003J%\u0000\u031a\u031c\u0003L&\u0000"+ + "\u031b\u0319\u0001\u0000\u0000\u0000\u031b\u031a\u0001\u0000\u0000\u0000"+ + "\u031c\u031d\u0001\u0000\u0000\u0000\u031d\u031e\u0003^/\u0000\u031e\u0320"+ + "\u0001\u0000\u0000\u0000\u031f\u031b\u0001\u0000\u0000\u0000\u031f\u0320"+ + "\u0001\u0000\u0000\u0000\u0320I\u0001\u0000\u0000\u0000\u0321\u0322\u0007"+ + "\u0002\u0000\u0000\u0322K\u0001\u0000\u0000\u0000\u0323\u032a\u0005\u00c3"+ + "\u0000\u0000\u0324\u032a\u0005\u00c4\u0000\u0000\u0325\u032a\u0005\u00c1"+ + "\u0000\u0000\u0326\u032a\u0005\u00c2\u0000\u0000\u0327\u032a\u0005\u00bf"+ + "\u0000\u0000\u0328\u032a\u0005\u00c0\u0000\u0000\u0329\u0323\u0001\u0000"+ + "\u0000\u0000\u0329\u0324\u0001\u0000\u0000\u0000\u0329\u0325\u0001\u0000"+ + "\u0000\u0000\u0329\u0326\u0001\u0000\u0000\u0000\u0329\u0327\u0001\u0000"+ + "\u0000\u0000\u0329\u0328\u0001\u0000\u0000\u0000\u032aM\u0001\u0000\u0000"+ + "\u0000\u032b\u032f\u0003P(\u0000\u032c\u032f\u0003V+\u0000\u032d\u032f"+ + "\u0003X,\u0000\u032e\u032b\u0001\u0000\u0000\u0000\u032e\u032c\u0001\u0000"+ + "\u0000\u0000\u032e\u032d\u0001\u0000\u0000\u0000\u032fO\u0001\u0000\u0000"+ + "\u0000\u0330\u0331\u0003R)\u0000\u0331\u0332\u0005A\u0000\u0000\u0332"+ + "\u0333\u0005\u00af\u0000\u0000\u0333\u0336\u0003T*\u0000\u0334\u0335\u0005"+ + "\u00ad\u0000\u0000\u0335\u0337\u0003T*\u0000\u0336\u0334\u0001\u0000\u0000"+ + "\u0000\u0337\u0338\u0001\u0000\u0000\u0000\u0338\u0336\u0001\u0000\u0000"+ + "\u0000\u0338\u0339\u0001\u0000\u0000\u0000\u0339\u033a\u0001\u0000\u0000"+ + "\u0000\u033a\u033b\u0005\u00b0\u0000\u0000\u033bQ\u0001\u0000\u0000\u0000"+ + "\u033c\u033d\u0005\u00af\u0000\u0000\u033d\u0342\u0003^/\u0000\u033e\u033f"+ + "\u0005\u00ad\u0000\u0000\u033f\u0341\u0003^/\u0000\u0340\u033e\u0001\u0000"+ + "\u0000\u0000\u0341\u0344\u0001\u0000\u0000\u0000\u0342\u0340\u0001\u0000"+ + "\u0000\u0000\u0342\u0343\u0001\u0000\u0000\u0000\u0343\u0345\u0001\u0000"+ + "\u0000\u0000\u0344\u0342\u0001\u0000\u0000\u0000\u0345\u0346\u0005\u00b0"+ + "\u0000\u0000\u0346S\u0001\u0000\u0000\u0000\u0347\u0348\u0005\u00af\u0000"+ + "\u0000\u0348\u034d\u0003\f\u0006\u0000\u0349\u034a\u0005\u00ad\u0000\u0000"+ + "\u034a\u034c\u0003\f\u0006\u0000\u034b\u0349\u0001\u0000\u0000\u0000\u034c"+ + "\u034f\u0001\u0000\u0000\u0000\u034d\u034b\u0001\u0000\u0000\u0000\u034d"+ + "\u034e\u0001\u0000\u0000\u0000\u034e\u0350\u0001\u0000\u0000\u0000\u034f"+ + "\u034d\u0001\u0000\u0000\u0000\u0350\u0351\u0005\u00b0\u0000\u0000\u0351"+ + "U\u0001\u0000\u0000\u0000\u0352\u0353\u0003^/\u0000\u0353\u0354\u0005"+ + "A\u0000\u0000\u0354\u0355\u0005\u00af\u0000\u0000\u0355\u0358\u0003\f"+ + "\u0006\u0000\u0356\u0357\u0005\u00ad\u0000\u0000\u0357\u0359\u0003\f\u0006"+ + "\u0000\u0358\u0356\u0001\u0000\u0000\u0000\u0359\u035a\u0001\u0000\u0000"+ + "\u0000\u035a\u0358\u0001\u0000\u0000\u0000\u035a\u035b\u0001\u0000\u0000"+ + "\u0000\u035b\u035c\u0001\u0000\u0000\u0000\u035c\u035d\u0005\u00b0\u0000"+ + "\u0000\u035dW\u0001\u0000\u0000\u0000\u035e\u036b\u0003^/\u0000\u035f"+ + "\u0360\u0005\u00af\u0000\u0000\u0360\u0365\u0003^/\u0000\u0361\u0362\u0005"+ + "\u00ad\u0000\u0000\u0362\u0364\u0003^/\u0000\u0363\u0361\u0001\u0000\u0000"+ + "\u0000\u0364\u0367\u0001\u0000\u0000\u0000\u0365\u0363\u0001\u0000\u0000"+ + "\u0000\u0365\u0366\u0001\u0000\u0000\u0000\u0366\u0368\u0001\u0000\u0000"+ + "\u0000\u0367\u0365\u0001\u0000\u0000\u0000\u0368\u0369\u0005\u00b0\u0000"+ + "\u0000\u0369\u036b\u0001\u0000\u0000\u0000\u036a\u035e\u0001\u0000\u0000"+ + "\u0000\u036a\u035f\u0001\u0000\u0000\u0000\u036b\u036c\u0001\u0000\u0000"+ + "\u0000\u036c\u036d\u0005A\u0000\u0000\u036d\u036e\u0003f3\u0000\u036e"+ + "Y\u0001\u0000\u0000\u0000\u036f\u0370\u0005.\u0000\u0000\u0370\u0371\u0003"+ + "^/\u0000\u0371[\u0001\u0000\u0000\u0000\u0372\u0373\u0003^/\u0000\u0373"+ + "\u0375\u0005G\u0000\u0000\u0374\u0376\u0005]\u0000\u0000\u0375\u0374\u0001"+ + "\u0000\u0000\u0000\u0375\u0376\u0001\u0000\u0000\u0000\u0376\u0377\u0001"+ + "\u0000\u0000\u0000\u0377\u0379\u0005`\u0000\u0000\u0378\u037a\u0005\u0083"+ + "\u0000\u0000\u0379\u0378\u0001\u0000\u0000\u0000\u0379\u037a\u0001\u0000"+ + "\u0000\u0000\u037a\u037b\u0001\u0000\u0000\u0000\u037b\u037d\u0005\u00af"+ + "\u0000\u0000\u037c\u037e\u0005b\u0000\u0000\u037d\u037c\u0001\u0000\u0000"+ + "\u0000\u037d\u037e\u0001\u0000\u0000\u0000\u037e\u037f\u0001\u0000\u0000"+ + "\u0000\u037f\u0387\u0003\u00ba]\u0000\u0380\u0382\u0005\u00ad\u0000\u0000"+ + "\u0381\u0383\u0005b\u0000\u0000\u0382\u0381\u0001\u0000\u0000\u0000\u0382"+ + "\u0383\u0001\u0000\u0000\u0000\u0383\u0384\u0001\u0000\u0000\u0000\u0384"+ + "\u0386\u0003\u00ba]\u0000\u0385\u0380\u0001\u0000\u0000\u0000\u0386\u0389"+ "\u0001\u0000\u0000\u0000\u0387\u0385\u0001\u0000\u0000\u0000\u0387\u0388"+ - "\u0001\u0000\u0000\u0000\u0388_\u0001\u0000\u0000\u0000\u0389\u0387\u0001"+ - "\u0000\u0000\u0000\u038a\u038f\u0003b1\u0000\u038b\u038c\u0007\u0003\u0000"+ - "\u0000\u038c\u038e\u0003b1\u0000\u038d\u038b\u0001\u0000\u0000\u0000\u038e"+ - "\u0391\u0001\u0000\u0000\u0000\u038f\u038d\u0001\u0000\u0000\u0000\u038f"+ - "\u0390\u0001\u0000\u0000\u0000\u0390a\u0001\u0000\u0000\u0000\u0391\u038f"+ - "\u0001\u0000\u0000\u0000\u0392\u0397\u0003d2\u0000\u0393\u0394\u0007\u0004"+ - "\u0000\u0000\u0394\u0396\u0003d2\u0000\u0395\u0393\u0001\u0000\u0000\u0000"+ - "\u0396\u0399\u0001\u0000\u0000\u0000\u0397\u0395\u0001\u0000\u0000\u0000"+ - "\u0397\u0398\u0001\u0000\u0000\u0000\u0398c\u0001\u0000\u0000\u0000\u0399"+ - "\u0397\u0001\u0000\u0000\u0000\u039a\u039e\u0003f3\u0000\u039b\u039c\u0007"+ - "\u0003\u0000\u0000\u039c\u039e\u0003d2\u0000\u039d\u039a\u0001\u0000\u0000"+ - "\u0000\u039d\u039b\u0001\u0000\u0000\u0000\u039ee\u0001\u0000\u0000\u0000"+ - "\u039f\u03a4\u0003t:\u0000\u03a0\u03a3\u0003h4\u0000\u03a1\u03a3\u0003"+ - "n7\u0000\u03a2\u03a0\u0001\u0000\u0000\u0000\u03a2\u03a1\u0001\u0000\u0000"+ - "\u0000\u03a3\u03a6\u0001\u0000\u0000\u0000\u03a4\u03a2\u0001\u0000\u0000"+ - "\u0000\u03a4\u03a5\u0001\u0000\u0000\u0000\u03a5g\u0001\u0000\u0000\u0000"+ - "\u03a6\u03a4\u0001\u0000\u0000\u0000\u03a7\u03aa\u0005\u00b1\u0000\u0000"+ - "\u03a8\u03ab\u0003l6\u0000\u03a9\u03ab\u0003j5\u0000\u03aa\u03a8\u0001"+ - "\u0000\u0000\u0000\u03aa\u03a9\u0001\u0000\u0000\u0000\u03abi\u0001\u0000"+ - "\u0000\u0000\u03ac\u03b2\u0003\u01b8\u00dc\u0000\u03ad\u03b2\u0003\u01b4"+ - "\u00da\u0000\u03ae\u03b2\u0003z=\u0000\u03af\u03b2\u0003\u0090H\u0000"+ - "\u03b0\u03b2\u0003\u0086C\u0000\u03b1\u03ac\u0001\u0000\u0000\u0000\u03b1"+ - "\u03ad\u0001\u0000\u0000\u0000\u03b1\u03ae\u0001\u0000\u0000\u0000\u03b1"+ - "\u03af\u0001\u0000\u0000\u0000\u03b1\u03b0\u0001\u0000\u0000\u0000\u03b2"+ - "k\u0001\u0000\u0000\u0000\u03b3\u03b4\u0007\u0005\u0000\u0000\u03b4\u03b6"+ - "\u0005\u00aa\u0000\u0000\u03b5\u03b7\u0003\f\u0006\u0000\u03b6\u03b5\u0001"+ - "\u0000\u0000\u0000\u03b6\u03b7\u0001\u0000\u0000\u0000\u03b7\u03b8\u0001"+ - "\u0000\u0000\u0000\u03b8\u03b9\u0005\u00ab\u0000\u0000\u03b9m\u0001\u0000"+ - "\u0000\u0000\u03ba\u03bd\u0003r9\u0000\u03bb\u03bd\u0003p8\u0000\u03bc"+ - "\u03ba\u0001\u0000\u0000\u0000\u03bc\u03bb\u0001\u0000\u0000\u0000\u03bd"+ - "o\u0001\u0000\u0000\u0000\u03be\u03c0\u0005\u00ac\u0000\u0000\u03bf\u03c1"+ - "\u0003\f\u0006\u0000\u03c0\u03bf\u0001\u0000\u0000\u0000\u03c0\u03c1\u0001"+ - "\u0000\u0000\u0000\u03c1\u03c2\u0001\u0000\u0000\u0000\u03c2\u03c4\u0005"+ - "\u00a9\u0000\u0000\u03c3\u03c5\u0003\f\u0006\u0000\u03c4\u03c3\u0001\u0000"+ - "\u0000\u0000\u03c4\u03c5\u0001\u0000\u0000\u0000\u03c5\u03c6\u0001\u0000"+ - "\u0000\u0000\u03c6\u03c7\u0005\u00ad\u0000\u0000\u03c7q\u0001\u0000\u0000"+ - "\u0000\u03c8\u03ca\u0005\u00ac\u0000\u0000\u03c9\u03cb\u0003\f\u0006\u0000"+ - "\u03ca\u03c9\u0001\u0000\u0000\u0000\u03ca\u03cb\u0001\u0000\u0000\u0000"+ - "\u03cb\u03cc\u0001\u0000\u0000\u0000\u03cc\u03cd\u0005\u00ad\u0000\u0000"+ - "\u03cds\u0001\u0000\u0000\u0000\u03ce\u03dd\u0003x<\u0000\u03cf\u03dd"+ - "\u0003v;\u0000\u03d0\u03dd\u0003z=\u0000\u03d1\u03dd\u0003|>\u0000\u03d2"+ - "\u03dd\u0003~?\u0000\u03d3\u03dd\u0003\u0080@\u0000\u03d4\u03dd\u0003"+ - "\u0084B\u0000\u03d5\u03dd\u0003\u0086C\u0000\u03d6\u03dd\u0003\u0088D"+ - "\u0000\u03d7\u03dd\u0003\u008aE\u0000\u03d8\u03dd\u0003\u008cF\u0000\u03d9"+ - "\u03dd\u0003\u008eG\u0000\u03da\u03dd\u0003\u0090H\u0000\u03db\u03dd\u0003"+ - "\u0092I\u0000\u03dc\u03ce\u0001\u0000\u0000\u0000\u03dc\u03cf\u0001\u0000"+ - "\u0000\u0000\u03dc\u03d0\u0001\u0000\u0000\u0000\u03dc\u03d1\u0001\u0000"+ - "\u0000\u0000\u03dc\u03d2\u0001\u0000\u0000\u0000\u03dc\u03d3\u0001\u0000"+ - "\u0000\u0000\u03dc\u03d4\u0001\u0000\u0000\u0000\u03dc\u03d5\u0001\u0000"+ - "\u0000\u0000\u03dc\u03d6\u0001\u0000\u0000\u0000\u03dc\u03d7\u0001\u0000"+ - "\u0000\u0000\u03dc\u03d8\u0001\u0000\u0000\u0000\u03dc\u03d9\u0001\u0000"+ - "\u0000\u0000\u03dc\u03da\u0001\u0000\u0000\u0000\u03dc\u03db\u0001\u0000"+ - "\u0000\u0000\u03ddu\u0001\u0000\u0000\u0000\u03de\u03e4\u0003\u01b8\u00dc"+ - "\u0000\u03df\u03e2\u0005\u00b1\u0000\u0000\u03e0\u03e3\u0003\u01b8\u00dc"+ - "\u0000\u03e1\u03e3\u0003\u01b4\u00da\u0000\u03e2\u03e0\u0001\u0000\u0000"+ - "\u0000\u03e2\u03e1\u0001\u0000\u0000\u0000\u03e3\u03e5\u0001\u0000\u0000"+ - "\u0000\u03e4\u03df\u0001\u0000\u0000\u0000\u03e4\u03e5\u0001\u0000\u0000"+ - "\u0000\u03e5w\u0001\u0000\u0000\u0000\u03e6\u03ec\u0003\u01b0\u00d8\u0000"+ - "\u03e7\u03ec\u0003\u01b4\u00da\u0000\u03e8\u03ec\u0005\u00c7\u0000\u0000"+ - "\u03e9\u03ec\u0005\u00c6\u0000\u0000\u03ea\u03ec\u0005\u00c5\u0000\u0000"+ - "\u03eb\u03e6\u0001\u0000\u0000\u0000\u03eb\u03e7\u0001\u0000\u0000\u0000"+ - "\u03eb\u03e8\u0001\u0000\u0000\u0000\u03eb\u03e9\u0001\u0000\u0000\u0000"+ - "\u03eb\u03ea\u0001\u0000\u0000\u0000\u03ecy\u0001\u0000\u0000\u0000\u03ed"+ - "\u03ee\u0007\u0006\u0000\u0000\u03ee{\u0001\u0000\u0000\u0000\u03ef\u03f1"+ - "\u0005\u00ac\u0000\u0000\u03f0\u03f2\u0003\f\u0006\u0000\u03f1\u03f0\u0001"+ - "\u0000\u0000\u0000\u03f1\u03f2\u0001\u0000\u0000\u0000\u03f2\u03f7\u0001"+ - "\u0000\u0000\u0000\u03f3\u03f4\u0005\u00a8\u0000\u0000\u03f4\u03f6\u0003"+ - "\f\u0006\u0000\u03f5\u03f3\u0001\u0000\u0000\u0000\u03f6\u03f9\u0001\u0000"+ - "\u0000\u0000\u03f7\u03f5\u0001\u0000\u0000\u0000\u03f7\u03f8\u0001\u0000"+ - "\u0000\u0000\u03f8\u03fa\u0001\u0000\u0000\u0000\u03f9\u03f7\u0001\u0000"+ - "\u0000\u0000\u03fa\u03fb\u0005\u00ad\u0000\u0000\u03fb}\u0001\u0000\u0000"+ - "\u0000\u03fc\u03fd\u0005\u00ae\u0000\u0000\u03fd\u03fe\u0003\f\u0006\u0000"+ - "\u03fe\u03ff\u0005\u00a9\u0000\u0000\u03ff\u0407\u0003\f\u0006\u0000\u0400"+ - "\u0401\u0005\u00a8\u0000\u0000\u0401\u0402\u0003\f\u0006\u0000\u0402\u0403"+ - "\u0005\u00a9\u0000\u0000\u0403\u0404\u0003\f\u0006\u0000\u0404\u0406\u0001"+ - "\u0000\u0000\u0000\u0405\u0400\u0001\u0000\u0000\u0000\u0406\u0409\u0001"+ - "\u0000\u0000\u0000\u0407\u0405\u0001\u0000\u0000\u0000\u0407\u0408\u0001"+ - "\u0000\u0000\u0000\u0408\u040a\u0001\u0000\u0000\u0000\u0409\u0407\u0001"+ - "\u0000\u0000\u0000\u040a\u040b\u0005\u00af\u0000\u0000\u040b\u040f\u0001"+ - "\u0000\u0000\u0000\u040c\u040d\u0005\u00ae\u0000\u0000\u040d\u040f\u0005"+ - "\u00af\u0000\u0000\u040e\u03fc\u0001\u0000\u0000\u0000\u040e\u040c\u0001"+ - "\u0000\u0000\u0000\u040f\u007f\u0001\u0000\u0000\u0000\u0410\u0411\u0005"+ - "t\u0000\u0000\u0411\u0412\u0005\u00aa\u0000\u0000\u0412\u0413\u0003\u0082"+ - "A\u0000\u0413\u0414\u0005\u00a8\u0000\u0000\u0414\u0415\u0003\f\u0006"+ - "\u0000\u0415\u0416\u0005\u00ab\u0000\u0000\u0416\u0081\u0001\u0000\u0000"+ - "\u0000\u0417\u0418\u0003\f\u0006\u0000\u0418\u0083\u0001\u0000\u0000\u0000"+ - "\u0419\u041a\u0005\u000f\u0000\u0000\u041a\u041c\u0005\u00aa\u0000\u0000"+ - "\u041b\u041d\u0005\"\u0000\u0000\u041c\u041b\u0001\u0000\u0000\u0000\u041c"+ - "\u041d\u0001\u0000\u0000\u0000\u041d\u041e\u0001\u0000\u0000\u0000\u041e"+ - "\u041f\u0003\f\u0006\u0000\u041f\u0420\u0005\u00ab\u0000\u0000\u0420\u0085"+ - "\u0001\u0000\u0000\u0000\u0421\u0422\u0003\u01b8\u00dc\u0000\u0422\u042b"+ - "\u0005\u00aa\u0000\u0000\u0423\u0428\u0003\f\u0006\u0000\u0424\u0425\u0005"+ - "\u00a8\u0000\u0000\u0425\u0427\u0003\f\u0006\u0000\u0426\u0424\u0001\u0000"+ - "\u0000\u0000\u0427\u042a\u0001\u0000\u0000\u0000\u0428\u0426\u0001\u0000"+ - "\u0000\u0000\u0428\u0429\u0001\u0000\u0000\u0000\u0429\u042c\u0001\u0000"+ - "\u0000\u0000\u042a\u0428\u0001\u0000\u0000\u0000\u042b\u0423\u0001\u0000"+ - "\u0000\u0000\u042b\u042c\u0001\u0000\u0000\u0000\u042c\u042d\u0001\u0000"+ - "\u0000\u0000\u042d\u042e\u0005\u00ab\u0000\u0000\u042e\u0087\u0001\u0000"+ - "\u0000\u0000\u042f\u0430\u0005\u0018\u0000\u0000\u0430\u0431\u0005\u00aa"+ - "\u0000\u0000\u0431\u0432\u0005\u00b0\u0000\u0000\u0432\u0433\u0005\u00ab"+ - "\u0000\u0000\u0433\u0089\u0001\u0000\u0000\u0000\u0434\u0435\u0005\u0018"+ - "\u0000\u0000\u0435\u0436\u0005\u00aa\u0000\u0000\u0436\u0437\u0005\"\u0000"+ - "\u0000\u0437\u0438\u0003\f\u0006\u0000\u0438\u0439\u0005\u00ab\u0000\u0000"+ - "\u0439\u008b\u0001\u0000\u0000\u0000\u043a\u043b\u0005\u0013\u0000\u0000"+ - "\u043b\u043c\u0005\u0087\u0000\u0000\u043c\u043d\u0003\f\u0006\u0000\u043d"+ - "\u043e\u0005{\u0000\u0000\u043e\u0446\u0003\f\u0006\u0000\u043f\u0440"+ - "\u0005\u0087\u0000\u0000\u0440\u0441\u0003\f\u0006\u0000\u0441\u0442\u0005"+ - "{\u0000\u0000\u0442\u0443\u0003\f\u0006\u0000\u0443\u0445\u0001\u0000"+ - "\u0000\u0000\u0444\u043f\u0001\u0000\u0000\u0000\u0445\u0448\u0001\u0000"+ - "\u0000\u0000\u0446\u0444\u0001\u0000\u0000\u0000\u0446\u0447\u0001\u0000"+ - "\u0000\u0000\u0447\u044b\u0001\u0000\u0000\u0000\u0448\u0446\u0001\u0000"+ - "\u0000\u0000\u0449\u044a\u0005&\u0000\u0000\u044a\u044c\u0003\f\u0006"+ - "\u0000\u044b\u0449\u0001\u0000\u0000\u0000\u044b\u044c\u0001\u0000\u0000"+ - "\u0000\u044c\u044d\u0001\u0000\u0000\u0000\u044d\u044e\u0005\'\u0000\u0000"+ - "\u044e\u008d\u0001\u0000\u0000\u0000\u044f\u0450\u0005\u0015\u0000\u0000"+ - "\u0450\u0451\u0005\u00aa\u0000\u0000\u0451\u0452\u0003\f\u0006\u0000\u0452"+ - "\u0453\u0005\r\u0000\u0000\u0453\u0454\u0003\u00ba]\u0000\u0454\u0455"+ - "\u0005\u00ab\u0000\u0000\u0455\u008f\u0001\u0000\u0000\u0000\u0456\u0457"+ - "\u0005\u00aa\u0000\u0000\u0457\u0458\u0003\f\u0006\u0000\u0458\u0459\u0005"+ - "\u00ab\u0000\u0000\u0459\u0091\u0001\u0000\u0000\u0000\u045a\u045b\u0005"+ - "+\u0000\u0000\u045b\u045c\u0005\u00aa\u0000\u0000\u045c\u045d\u0003\u01b8"+ - "\u00dc\u0000\u045d\u045e\u00052\u0000\u0000\u045e\u045f\u0003\f\u0006"+ - "\u0000\u045f\u0460\u0005\u00ab\u0000\u0000\u0460\u0093\u0001\u0000\u0000"+ - "\u0000\u0461\u0463\u0003\u0006\u0003\u0000\u0462\u0461\u0001\u0000\u0000"+ - "\u0000\u0462\u0463\u0001\u0000\u0000\u0000\u0463\u0464\u0001\u0000\u0000"+ - "\u0000\u0464\u0465\u0007\u0007\u0000\u0000\u0465\u0466\u0005A\u0000\u0000"+ - "\u0466\u046b\u0003\u00fc~\u0000\u0467\u0469\u0005\r\u0000\u0000\u0468"+ - "\u0467\u0001\u0000\u0000\u0000\u0468\u0469\u0001\u0000\u0000\u0000\u0469"+ - "\u046a\u0001\u0000\u0000\u0000\u046a\u046c\u0003\"\u0011\u0000\u046b\u0468"+ - "\u0001\u0000\u0000\u0000\u046b\u046c\u0001\u0000\u0000\u0000\u046c\u0478"+ - "\u0001\u0000\u0000\u0000\u046d\u046e\u0005\u00aa\u0000\u0000\u046e\u0473"+ - "\u0003\u0096K\u0000\u046f\u0470\u0005\u00a8\u0000\u0000\u0470\u0472\u0003"+ - "\u0096K\u0000\u0471\u046f\u0001\u0000\u0000\u0000\u0472\u0475\u0001\u0000"+ - "\u0000\u0000\u0473\u0471\u0001\u0000\u0000\u0000\u0473\u0474\u0001\u0000"+ - "\u0000\u0000\u0474\u0476\u0001\u0000\u0000\u0000\u0475\u0473\u0001\u0000"+ - "\u0000\u0000\u0476\u0477\u0005\u00ab\u0000\u0000\u0477\u0479\u0001\u0000"+ - "\u0000\u0000\u0478\u046d\u0001\u0000\u0000\u0000\u0478\u0479\u0001\u0000"+ - "\u0000\u0000\u0479\u047a\u0001\u0000\u0000\u0000\u047a\u047b\u0005\u0086"+ - "\u0000\u0000\u047b\u047c\u0005\u00aa\u0000\u0000\u047c\u0481\u0003\u009a"+ - "M\u0000\u047d\u047e\u0005\u00a8\u0000\u0000\u047e\u0480\u0003\u009aM\u0000"+ - "\u047f\u047d\u0001\u0000\u0000\u0000\u0480\u0483\u0001\u0000\u0000\u0000"+ - "\u0481\u047f\u0001\u0000\u0000\u0000\u0481\u0482\u0001\u0000\u0000\u0000"+ - "\u0482\u0484\u0001\u0000\u0000\u0000\u0483\u0481\u0001\u0000\u0000\u0000"+ - "\u0484\u0488\u0005\u00ab\u0000\u0000\u0485\u0486\u0005u\u0000\u0000\u0486"+ - "\u0487\u0005}\u0000\u0000\u0487\u0489\u0003\u009cN\u0000\u0488\u0485\u0001"+ - "\u0000\u0000\u0000\u0488\u0489\u0001\u0000\u0000\u0000\u0489\u048b\u0001"+ - "\u0000\u0000\u0000\u048a\u048c\u0003\u0098L\u0000\u048b\u048a\u0001\u0000"+ - "\u0000\u0000\u048b\u048c\u0001\u0000\u0000\u0000\u048c\u0095\u0001\u0000"+ - "\u0000\u0000\u048d\u0490\u0003\u01b8\u00dc\u0000\u048e\u0490\u0003\u01b4"+ - "\u00da\u0000\u048f\u048d\u0001\u0000\u0000\u0000\u048f\u048e\u0001\u0000"+ - "\u0000\u0000\u0490\u0097\u0001\u0000\u0000\u0000\u0491\u0492\u0005l\u0000"+ - "\u0000\u0492\u0493\u0003*\u0015\u0000\u0493\u0099\u0001\u0000\u0000\u0000"+ - "\u0494\u0497\u0005\u001d\u0000\u0000\u0495\u0497\u0003\f\u0006\u0000\u0496"+ - "\u0494\u0001\u0000\u0000\u0000\u0496\u0495\u0001\u0000\u0000\u0000\u0497"+ - "\u009b\u0001\u0000\u0000\u0000\u0498\u0499\u0003`0\u0000\u0499\u049a\u0007"+ - "\b\u0000\u0000\u049a\u049f\u0001\u0000\u0000\u0000\u049b\u049c\u0005\u0085"+ - "\u0000\u0000\u049c\u049d\u0005y\u0000\u0000\u049d\u049f\u0005\u001d\u0000"+ - "\u0000\u049e\u0498\u0001\u0000\u0000\u0000\u049e\u049b\u0001\u0000\u0000"+ - "\u0000\u049f\u009d\u0001\u0000\u0000\u0000\u04a0\u04a2\u0003\u0006\u0003"+ - "\u0000\u04a1\u04a0\u0001\u0000\u0000\u0000\u04a1\u04a2\u0001\u0000\u0000"+ - "\u0000\u04a2\u04a3\u0001\u0000\u0000\u0000\u04a3\u04a4\u0005\u0081\u0000"+ - "\u0000\u04a4\u04a9\u0003\u00fc~\u0000\u04a5\u04a7\u0005\r\u0000\u0000"+ - "\u04a6\u04a5\u0001\u0000\u0000\u0000\u04a6\u04a7\u0001\u0000\u0000\u0000"+ - "\u04a7\u04a8\u0001\u0000\u0000\u0000\u04a8\u04aa\u0003\"\u0011\u0000\u04a9"+ - "\u04a6\u0001\u0000\u0000\u0000\u04a9\u04aa\u0001\u0000\u0000\u0000\u04aa"+ - "\u04ab\u0001\u0000\u0000\u0000\u04ab\u04b0\u0003\u00a2Q\u0000\u04ac\u04ad"+ - "\u0005\u00a8\u0000\u0000\u04ad\u04af\u0003\u00a2Q\u0000\u04ae\u04ac\u0001"+ - "\u0000\u0000\u0000\u04af\u04b2\u0001\u0000\u0000\u0000\u04b0\u04ae\u0001"+ - "\u0000\u0000\u0000\u04b0\u04b1\u0001\u0000\u0000\u0000\u04b1\u04b3\u0001"+ - "\u0000\u0000\u0000\u04b2\u04b0\u0001\u0000\u0000\u0000\u04b3\u04b4\u0005"+ - "\u0088\u0000\u0000\u04b4\u04b6\u0003\f\u0006\u0000\u04b5\u04b7\u0003\u00a0"+ - "P\u0000\u04b6\u04b5\u0001\u0000\u0000\u0000\u04b6\u04b7\u0001\u0000\u0000"+ - "\u0000\u04b7\u009f\u0001\u0000\u0000\u0000\u04b8\u04b9\u0005l\u0000\u0000"+ - "\u04b9\u04ba\u0003*\u0015\u0000\u04ba\u00a1\u0001\u0000\u0000\u0000\u04bb"+ - "\u04bc\u0005u\u0000\u0000\u04bc\u04c4\u0003\u00a4R\u0000\u04bd\u04c0\u0005"+ - "\u00a8\u0000\u0000\u04be\u04c1\u0003\u00a2Q\u0000\u04bf\u04c1\u0003\u00a4"+ - "R\u0000\u04c0\u04be\u0001\u0000\u0000\u0000\u04c0\u04bf\u0001\u0000\u0000"+ - "\u0000\u04c1\u04c3\u0001\u0000\u0000\u0000\u04c2\u04bd\u0001\u0000\u0000"+ - "\u0000\u04c3\u04c6\u0001\u0000\u0000\u0000\u04c4\u04c2\u0001\u0000\u0000"+ - "\u0000\u04c4\u04c5\u0001\u0000\u0000\u0000\u04c5\u0503\u0001\u0000\u0000"+ - "\u0000\u04c6\u04c4\u0001\u0000\u0000\u0000\u04c7\u04c8\u0005\u0006\u0000"+ - "\u0000\u04c8\u04d0\u0003\u00a6S\u0000\u04c9\u04cc\u0005\u00a8\u0000\u0000"+ - "\u04ca\u04cd\u0003\u00a2Q\u0000\u04cb\u04cd\u0003\u00a6S\u0000\u04cc\u04ca"+ - "\u0001\u0000\u0000\u0000\u04cc\u04cb\u0001\u0000\u0000\u0000\u04cd\u04cf"+ - "\u0001\u0000\u0000\u0000\u04ce\u04c9\u0001\u0000\u0000\u0000\u04cf\u04d2"+ - "\u0001\u0000\u0000\u0000\u04d0\u04ce\u0001\u0000\u0000\u0000\u04d0\u04d1"+ - "\u0001\u0000\u0000\u0000\u04d1\u0503\u0001\u0000\u0000\u0000\u04d2\u04d0"+ - "\u0001\u0000\u0000\u0000\u04d3\u04d4\u0005h\u0000\u0000\u04d4\u04dc\u0003"+ - "\u00a8T\u0000\u04d5\u04d8\u0005\u00a8\u0000\u0000\u04d6\u04d9\u0003\u00a2"+ - "Q\u0000\u04d7\u04d9\u0003\u00a8T\u0000\u04d8\u04d6\u0001\u0000\u0000\u0000"+ - "\u04d8\u04d7\u0001\u0000\u0000\u0000\u04d9\u04db\u0001\u0000\u0000\u0000"+ - "\u04da\u04d5\u0001\u0000\u0000\u0000\u04db\u04de\u0001\u0000\u0000\u0000"+ - "\u04dc\u04da\u0001\u0000\u0000\u0000\u04dc\u04dd\u0001\u0000\u0000\u0000"+ - "\u04dd\u0503\u0001\u0000\u0000\u0000\u04de\u04dc\u0001\u0000\u0000\u0000"+ - "\u04df\u04e0\u0005k\u0000\u0000\u04e0\u04e8\u0003\u00aaU\u0000\u04e1\u04e4"+ - "\u0005\u00a8\u0000\u0000\u04e2\u04e5\u0003\u00a2Q\u0000\u04e3\u04e5\u0003"+ - "\u00aaU\u0000\u04e4\u04e2\u0001\u0000\u0000\u0000\u04e4\u04e3\u0001\u0000"+ - "\u0000\u0000\u04e5\u04e7\u0001\u0000\u0000\u0000\u04e6\u04e1\u0001\u0000"+ - "\u0000\u0000\u04e7\u04ea\u0001\u0000\u0000\u0000\u04e8\u04e6\u0001\u0000"+ - "\u0000\u0000\u04e8\u04e9\u0001\u0000\u0000\u0000\u04e9\u0503\u0001\u0000"+ - "\u0000\u0000\u04ea\u04e8\u0001\u0000\u0000\u0000\u04eb\u04ec\u0005C\u0000"+ - "\u0000\u04ec\u04ed\u0005O\u0000\u0000\u04ed\u04f5\u0003\u00acV\u0000\u04ee"+ - "\u04f1\u0005\u00a8\u0000\u0000\u04ef\u04f2\u0003\u00a2Q\u0000\u04f0\u04f2"+ - "\u0003\u00acV\u0000\u04f1\u04ef\u0001\u0000\u0000\u0000\u04f1\u04f0\u0001"+ - "\u0000\u0000\u0000\u04f2\u04f4\u0001\u0000\u0000\u0000\u04f3\u04ee\u0001"+ - "\u0000\u0000\u0000\u04f4\u04f7\u0001\u0000\u0000\u0000\u04f5\u04f3\u0001"+ - "\u0000\u0000\u0000\u04f5\u04f6\u0001\u0000\u0000\u0000\u04f6\u0503\u0001"+ - "\u0000\u0000\u0000\u04f7\u04f5\u0001\u0000\u0000\u0000\u04f8\u04f9\u0005"+ - "u\u0000\u0000\u04f9\u04fa\u0005}\u0000\u0000\u04fa\u04ff\u0003\u00b0X"+ - "\u0000\u04fb\u04fc\u0005\u00a8\u0000\u0000\u04fc\u04fe\u0003\u00a2Q\u0000"+ - "\u04fd\u04fb\u0001\u0000\u0000\u0000\u04fe\u0501\u0001\u0000\u0000\u0000"+ - "\u04ff\u04fd\u0001\u0000\u0000\u0000\u04ff\u0500\u0001\u0000\u0000\u0000"+ - "\u0500\u0503\u0001\u0000\u0000\u0000\u0501\u04ff\u0001\u0000\u0000\u0000"+ - "\u0502\u04bb\u0001\u0000\u0000\u0000\u0502\u04c7\u0001\u0000\u0000\u0000"+ - "\u0502\u04d3\u0001\u0000\u0000\u0000\u0502\u04df\u0001\u0000\u0000\u0000"+ - "\u0502\u04eb\u0001\u0000\u0000\u0000\u0502\u04f8\u0001\u0000\u0000\u0000"+ - "\u0503\u00a3\u0001\u0000\u0000\u0000\u0504\u0505\u0003\u00b2Y\u0000\u0505"+ - "\u0506\u0005\u00b8\u0000\u0000\u0506\u0507\u0003\f\u0006\u0000\u0507\u00a5"+ - "\u0001\u0000\u0000\u0000\u0508\u050a\u0005A\u0000\u0000\u0509\u0508\u0001"+ - "\u0000\u0000\u0000\u0509\u050a\u0001\u0000\u0000\u0000\u050a\u050b\u0001"+ - "\u0000\u0000\u0000\u050b\u0510\u0003\u00b2Y\u0000\u050c\u050e\u0005\u0003"+ - "\u0000\u0000\u050d\u050c\u0001\u0000\u0000\u0000\u050d\u050e\u0001\u0000"+ - "\u0000\u0000\u050e\u050f\u0001\u0000\u0000\u0000\u050f\u0511\u0003\u00b4"+ - "Z\u0000\u0510\u050d\u0001\u0000\u0000\u0000\u0510\u0511\u0001\u0000\u0000"+ - "\u0000\u0511\u0513\u0001\u0000\u0000\u0000\u0512\u0514\u0005%\u0000\u0000"+ + "\u0001\u0000\u0000\u0000\u0388\u038a\u0001\u0000\u0000\u0000\u0389\u0387"+ + "\u0001\u0000\u0000\u0000\u038a\u038b\u0005\u00b0\u0000\u0000\u038b]\u0001"+ + "\u0000\u0000\u0000\u038c\u0391\u0003`0\u0000\u038d\u038e\u0005\u00c9\u0000"+ + "\u0000\u038e\u0390\u0003`0\u0000\u038f\u038d\u0001\u0000\u0000\u0000\u0390"+ + "\u0393\u0001\u0000\u0000\u0000\u0391\u038f\u0001\u0000\u0000\u0000\u0391"+ + "\u0392\u0001\u0000\u0000\u0000\u0392_\u0001\u0000\u0000\u0000\u0393\u0391"+ + "\u0001\u0000\u0000\u0000\u0394\u0399\u0003b1\u0000\u0395\u0396\u0007\u0003"+ + "\u0000\u0000\u0396\u0398\u0003b1\u0000\u0397\u0395\u0001\u0000\u0000\u0000"+ + "\u0398\u039b\u0001\u0000\u0000\u0000\u0399\u0397\u0001\u0000\u0000\u0000"+ + "\u0399\u039a\u0001\u0000\u0000\u0000\u039aa\u0001\u0000\u0000\u0000\u039b"+ + "\u0399\u0001\u0000\u0000\u0000\u039c\u03a1\u0003d2\u0000\u039d\u039e\u0007"+ + "\u0004\u0000\u0000\u039e\u03a0\u0003d2\u0000\u039f\u039d\u0001\u0000\u0000"+ + "\u0000\u03a0\u03a3\u0001\u0000\u0000\u0000\u03a1\u039f\u0001\u0000\u0000"+ + "\u0000\u03a1\u03a2\u0001\u0000\u0000\u0000\u03a2c\u0001\u0000\u0000\u0000"+ + "\u03a3\u03a1\u0001\u0000\u0000\u0000\u03a4\u03a8\u0003f3\u0000\u03a5\u03a6"+ + "\u0007\u0003\u0000\u0000\u03a6\u03a8\u0003d2\u0000\u03a7\u03a4\u0001\u0000"+ + "\u0000\u0000\u03a7\u03a5\u0001\u0000\u0000\u0000\u03a8e\u0001\u0000\u0000"+ + "\u0000\u03a9\u03ae\u0003t:\u0000\u03aa\u03ad\u0003h4\u0000\u03ab\u03ad"+ + "\u0003n7\u0000\u03ac\u03aa\u0001\u0000\u0000\u0000\u03ac\u03ab\u0001\u0000"+ + "\u0000\u0000\u03ad\u03b0\u0001\u0000\u0000\u0000\u03ae\u03ac\u0001\u0000"+ + "\u0000\u0000\u03ae\u03af\u0001\u0000\u0000\u0000\u03afg\u0001\u0000\u0000"+ + "\u0000\u03b0\u03ae\u0001\u0000\u0000\u0000\u03b1\u03b4\u0005\u00b6\u0000"+ + "\u0000\u03b2\u03b5\u0003l6\u0000\u03b3\u03b5\u0003j5\u0000\u03b4\u03b2"+ + "\u0001\u0000\u0000\u0000\u03b4\u03b3\u0001\u0000\u0000\u0000\u03b5i\u0001"+ + "\u0000\u0000\u0000\u03b6\u03bc\u0003\u01c2\u00e1\u0000\u03b7\u03bc\u0003"+ + "\u01be\u00df\u0000\u03b8\u03bc\u0003z=\u0000\u03b9\u03bc\u0003\u0090H"+ + "\u0000\u03ba\u03bc\u0003\u0086C\u0000\u03bb\u03b6\u0001\u0000\u0000\u0000"+ + "\u03bb\u03b7\u0001\u0000\u0000\u0000\u03bb\u03b8\u0001\u0000\u0000\u0000"+ + "\u03bb\u03b9\u0001\u0000\u0000\u0000\u03bb\u03ba\u0001\u0000\u0000\u0000"+ + "\u03bck\u0001\u0000\u0000\u0000\u03bd\u03be\u0007\u0005\u0000\u0000\u03be"+ + "\u03c0\u0005\u00af\u0000\u0000\u03bf\u03c1\u0003\f\u0006\u0000\u03c0\u03bf"+ + "\u0001\u0000\u0000\u0000\u03c0\u03c1\u0001\u0000\u0000\u0000\u03c1\u03c2"+ + "\u0001\u0000\u0000\u0000\u03c2\u03c3\u0005\u00b0\u0000\u0000\u03c3m\u0001"+ + "\u0000\u0000\u0000\u03c4\u03c7\u0003r9\u0000\u03c5\u03c7\u0003p8\u0000"+ + "\u03c6\u03c4\u0001\u0000\u0000\u0000\u03c6\u03c5\u0001\u0000\u0000\u0000"+ + "\u03c7o\u0001\u0000\u0000\u0000\u03c8\u03ca\u0005\u00b1\u0000\u0000\u03c9"+ + "\u03cb\u0003\f\u0006\u0000\u03ca\u03c9\u0001\u0000\u0000\u0000\u03ca\u03cb"+ + "\u0001\u0000\u0000\u0000\u03cb\u03cc\u0001\u0000\u0000\u0000\u03cc\u03ce"+ + "\u0005\u00ae\u0000\u0000\u03cd\u03cf\u0003\f\u0006\u0000\u03ce\u03cd\u0001"+ + "\u0000\u0000\u0000\u03ce\u03cf\u0001\u0000\u0000\u0000\u03cf\u03d0\u0001"+ + "\u0000\u0000\u0000\u03d0\u03d1\u0005\u00b2\u0000\u0000\u03d1q\u0001\u0000"+ + "\u0000\u0000\u03d2\u03d4\u0005\u00b1\u0000\u0000\u03d3\u03d5\u0003\f\u0006"+ + "\u0000\u03d4\u03d3\u0001\u0000\u0000\u0000\u03d4\u03d5\u0001\u0000\u0000"+ + "\u0000\u03d5\u03d6\u0001\u0000\u0000\u0000\u03d6\u03d7\u0005\u00b2\u0000"+ + "\u0000\u03d7s\u0001\u0000\u0000\u0000\u03d8\u03e7\u0003x<\u0000\u03d9"+ + "\u03e7\u0003v;\u0000\u03da\u03e7\u0003z=\u0000\u03db\u03e7\u0003|>\u0000"+ + "\u03dc\u03e7\u0003~?\u0000\u03dd\u03e7\u0003\u0080@\u0000\u03de\u03e7"+ + "\u0003\u0084B\u0000\u03df\u03e7\u0003\u0086C\u0000\u03e0\u03e7\u0003\u0088"+ + "D\u0000\u03e1\u03e7\u0003\u008aE\u0000\u03e2\u03e7\u0003\u008cF\u0000"+ + "\u03e3\u03e7\u0003\u008eG\u0000\u03e4\u03e7\u0003\u0090H\u0000\u03e5\u03e7"+ + "\u0003\u0092I\u0000\u03e6\u03d8\u0001\u0000\u0000\u0000\u03e6\u03d9\u0001"+ + "\u0000\u0000\u0000\u03e6\u03da\u0001\u0000\u0000\u0000\u03e6\u03db\u0001"+ + "\u0000\u0000\u0000\u03e6\u03dc\u0001\u0000\u0000\u0000\u03e6\u03dd\u0001"+ + "\u0000\u0000\u0000\u03e6\u03de\u0001\u0000\u0000\u0000\u03e6\u03df\u0001"+ + "\u0000\u0000\u0000\u03e6\u03e0\u0001\u0000\u0000\u0000\u03e6\u03e1\u0001"+ + "\u0000\u0000\u0000\u03e6\u03e2\u0001\u0000\u0000\u0000\u03e6\u03e3\u0001"+ + "\u0000\u0000\u0000\u03e6\u03e4\u0001\u0000\u0000\u0000\u03e6\u03e5\u0001"+ + "\u0000\u0000\u0000\u03e7u\u0001\u0000\u0000\u0000\u03e8\u03ee\u0003\u01c2"+ + "\u00e1\u0000\u03e9\u03ec\u0005\u00b6\u0000\u0000\u03ea\u03ed\u0003\u01c2"+ + "\u00e1\u0000\u03eb\u03ed\u0003\u01be\u00df\u0000\u03ec\u03ea\u0001\u0000"+ + "\u0000\u0000\u03ec\u03eb\u0001\u0000\u0000\u0000\u03ed\u03ef\u0001\u0000"+ + "\u0000\u0000\u03ee\u03e9\u0001\u0000\u0000\u0000\u03ee\u03ef\u0001\u0000"+ + "\u0000\u0000\u03efw\u0001\u0000\u0000\u0000\u03f0\u03f6\u0003\u01ba\u00dd"+ + "\u0000\u03f1\u03f6\u0003\u01be\u00df\u0000\u03f2\u03f6\u0005\u00cc\u0000"+ + "\u0000\u03f3\u03f6\u0005\u00cb\u0000\u0000\u03f4\u03f6\u0005\u00ca\u0000"+ + "\u0000\u03f5\u03f0\u0001\u0000\u0000\u0000\u03f5\u03f1\u0001\u0000\u0000"+ + "\u0000\u03f5\u03f2\u0001\u0000\u0000\u0000\u03f5\u03f3\u0001\u0000\u0000"+ + "\u0000\u03f5\u03f4\u0001\u0000\u0000\u0000\u03f6y\u0001\u0000\u0000\u0000"+ + "\u03f7\u03f8\u0007\u0006\u0000\u0000\u03f8{\u0001\u0000\u0000\u0000\u03f9"+ + "\u03fb\u0005\u00b1\u0000\u0000\u03fa\u03fc\u0003\f\u0006\u0000\u03fb\u03fa"+ + "\u0001\u0000\u0000\u0000\u03fb\u03fc\u0001\u0000\u0000\u0000\u03fc\u0401"+ + "\u0001\u0000\u0000\u0000\u03fd\u03fe\u0005\u00ad\u0000\u0000\u03fe\u0400"+ + "\u0003\f\u0006\u0000\u03ff\u03fd\u0001\u0000\u0000\u0000\u0400\u0403\u0001"+ + "\u0000\u0000\u0000\u0401\u03ff\u0001\u0000\u0000\u0000\u0401\u0402\u0001"+ + "\u0000\u0000\u0000\u0402\u0404\u0001\u0000\u0000\u0000\u0403\u0401\u0001"+ + "\u0000\u0000\u0000\u0404\u0405\u0005\u00b2\u0000\u0000\u0405}\u0001\u0000"+ + "\u0000\u0000\u0406\u0407\u0005\u00b3\u0000\u0000\u0407\u0408\u0003\f\u0006"+ + "\u0000\u0408\u0409\u0005\u00ae\u0000\u0000\u0409\u0411\u0003\f\u0006\u0000"+ + "\u040a\u040b\u0005\u00ad\u0000\u0000\u040b\u040c\u0003\f\u0006\u0000\u040c"+ + "\u040d\u0005\u00ae\u0000\u0000\u040d\u040e\u0003\f\u0006\u0000\u040e\u0410"+ + "\u0001\u0000\u0000\u0000\u040f\u040a\u0001\u0000\u0000\u0000\u0410\u0413"+ + "\u0001\u0000\u0000\u0000\u0411\u040f\u0001\u0000\u0000\u0000\u0411\u0412"+ + "\u0001\u0000\u0000\u0000\u0412\u0414\u0001\u0000\u0000\u0000\u0413\u0411"+ + "\u0001\u0000\u0000\u0000\u0414\u0415\u0005\u00b4\u0000\u0000\u0415\u0419"+ + "\u0001\u0000\u0000\u0000\u0416\u0417\u0005\u00b3\u0000\u0000\u0417\u0419"+ + "\u0005\u00b4\u0000\u0000\u0418\u0406\u0001\u0000\u0000\u0000\u0418\u0416"+ + "\u0001\u0000\u0000\u0000\u0419\u007f\u0001\u0000\u0000\u0000\u041a\u041b"+ + "\u0005y\u0000\u0000\u041b\u041c\u0005\u00af\u0000\u0000\u041c\u041d\u0003"+ + "\u0082A\u0000\u041d\u041e\u0005\u00ad\u0000\u0000\u041e\u041f\u0003\f"+ + "\u0006\u0000\u041f\u0420\u0005\u00b0\u0000\u0000\u0420\u0081\u0001\u0000"+ + "\u0000\u0000\u0421\u0422\u0003\f\u0006\u0000\u0422\u0083\u0001\u0000\u0000"+ + "\u0000\u0423\u0424\u0005\u0010\u0000\u0000\u0424\u0426\u0005\u00af\u0000"+ + "\u0000\u0425\u0427\u0005%\u0000\u0000\u0426\u0425\u0001\u0000\u0000\u0000"+ + "\u0426\u0427\u0001\u0000\u0000\u0000\u0427\u0428\u0001\u0000\u0000\u0000"+ + "\u0428\u0429\u0003\f\u0006\u0000\u0429\u042a\u0005\u00b0\u0000\u0000\u042a"+ + "\u0085\u0001\u0000\u0000\u0000\u042b\u042c\u0003\u01c2\u00e1\u0000\u042c"+ + "\u0435\u0005\u00af\u0000\u0000\u042d\u0432\u0003\f\u0006\u0000\u042e\u042f"+ + "\u0005\u00ad\u0000\u0000\u042f\u0431\u0003\f\u0006\u0000\u0430\u042e\u0001"+ + "\u0000\u0000\u0000\u0431\u0434\u0001\u0000\u0000\u0000\u0432\u0430\u0001"+ + "\u0000\u0000\u0000\u0432\u0433\u0001\u0000\u0000\u0000\u0433\u0436\u0001"+ + "\u0000\u0000\u0000\u0434\u0432\u0001\u0000\u0000\u0000\u0435\u042d\u0001"+ + "\u0000\u0000\u0000\u0435\u0436\u0001\u0000\u0000\u0000\u0436\u0437\u0001"+ + "\u0000\u0000\u0000\u0437\u0438\u0005\u00b0\u0000\u0000\u0438\u0087\u0001"+ + "\u0000\u0000\u0000\u0439\u043a\u0005\u001a\u0000\u0000\u043a\u043b\u0005"+ + "\u00af\u0000\u0000\u043b\u043c\u0005\u00b5\u0000\u0000\u043c\u043d\u0005"+ + "\u00b0\u0000\u0000\u043d\u0089\u0001\u0000\u0000\u0000\u043e\u043f\u0005"+ + "\u001a\u0000\u0000\u043f\u0440\u0005\u00af\u0000\u0000\u0440\u0441\u0005"+ + "%\u0000\u0000\u0441\u0442\u0003\f\u0006\u0000\u0442\u0443\u0005\u00b0"+ + "\u0000\u0000\u0443\u008b\u0001\u0000\u0000\u0000\u0444\u0445\u0005\u0015"+ + "\u0000\u0000\u0445\u0446\u0005\u008c\u0000\u0000\u0446\u0447\u0003\f\u0006"+ + "\u0000\u0447\u0448\u0005\u0080\u0000\u0000\u0448\u0450\u0003\f\u0006\u0000"+ + "\u0449\u044a\u0005\u008c\u0000\u0000\u044a\u044b\u0003\f\u0006\u0000\u044b"+ + "\u044c\u0005\u0080\u0000\u0000\u044c\u044d\u0003\f\u0006\u0000\u044d\u044f"+ + "\u0001\u0000\u0000\u0000\u044e\u0449\u0001\u0000\u0000\u0000\u044f\u0452"+ + "\u0001\u0000\u0000\u0000\u0450\u044e\u0001\u0000\u0000\u0000\u0450\u0451"+ + "\u0001\u0000\u0000\u0000\u0451\u0455\u0001\u0000\u0000\u0000\u0452\u0450"+ + "\u0001\u0000\u0000\u0000\u0453\u0454\u0005)\u0000\u0000\u0454\u0456\u0003"+ + "\f\u0006\u0000\u0455\u0453\u0001\u0000\u0000\u0000\u0455\u0456\u0001\u0000"+ + "\u0000\u0000\u0456\u0457\u0001\u0000\u0000\u0000\u0457\u0458\u0005+\u0000"+ + "\u0000\u0458\u008d\u0001\u0000\u0000\u0000\u0459\u045a\u0005\u0017\u0000"+ + "\u0000\u045a\u045b\u0005\u00af\u0000\u0000\u045b\u045c\u0003\f\u0006\u0000"+ + "\u045c\u045d\u0005\u000e\u0000\u0000\u045d\u045e\u0003\u00ba]\u0000\u045e"+ + "\u045f\u0005\u00b0\u0000\u0000\u045f\u008f\u0001\u0000\u0000\u0000\u0460"+ + "\u0461\u0005\u00af\u0000\u0000\u0461\u0462\u0003\f\u0006\u0000\u0462\u0463"+ + "\u0005\u00b0\u0000\u0000\u0463\u0091\u0001\u0000\u0000\u0000\u0464\u0465"+ + "\u0005/\u0000\u0000\u0465\u0466\u0005\u00af\u0000\u0000\u0466\u0467\u0003"+ + "\u01c2\u00e1\u0000\u0467\u0468\u00056\u0000\u0000\u0468\u0469\u0003\f"+ + "\u0006\u0000\u0469\u046a\u0005\u00b0\u0000\u0000\u046a\u0093\u0001\u0000"+ + "\u0000\u0000\u046b\u046d\u0003\u0006\u0003\u0000\u046c\u046b\u0001\u0000"+ + "\u0000\u0000\u046c\u046d\u0001\u0000\u0000\u0000\u046d\u046e\u0001\u0000"+ + "\u0000\u0000\u046e\u046f\u0007\u0007\u0000\u0000\u046f\u0470\u0005F\u0000"+ + "\u0000\u0470\u0475\u0003\u00fc~\u0000\u0471\u0473\u0005\u000e\u0000\u0000"+ + "\u0472\u0471\u0001\u0000\u0000\u0000\u0472\u0473\u0001\u0000\u0000\u0000"+ + "\u0473\u0474\u0001\u0000\u0000\u0000\u0474\u0476\u0003\"\u0011\u0000\u0475"+ + "\u0472\u0001\u0000\u0000\u0000\u0475\u0476\u0001\u0000\u0000\u0000\u0476"+ + "\u0482\u0001\u0000\u0000\u0000\u0477\u0478\u0005\u00af\u0000\u0000\u0478"+ + "\u047d\u0003\u0096K\u0000\u0479\u047a\u0005\u00ad\u0000\u0000\u047a\u047c"+ + "\u0003\u0096K\u0000\u047b\u0479\u0001\u0000\u0000\u0000\u047c\u047f\u0001"+ + "\u0000\u0000\u0000\u047d\u047b\u0001\u0000\u0000\u0000\u047d\u047e\u0001"+ + "\u0000\u0000\u0000\u047e\u0480\u0001\u0000\u0000\u0000\u047f\u047d\u0001"+ + "\u0000\u0000\u0000\u0480\u0481\u0005\u00b0\u0000\u0000\u0481\u0483\u0001"+ + "\u0000\u0000\u0000\u0482\u0477\u0001\u0000\u0000\u0000\u0482\u0483\u0001"+ + "\u0000\u0000\u0000\u0483\u0484\u0001\u0000\u0000\u0000\u0484\u0485\u0005"+ + "\u008b\u0000\u0000\u0485\u0486\u0005\u00af\u0000\u0000\u0486\u048b\u0003"+ + "\u009aM\u0000\u0487\u0488\u0005\u00ad\u0000\u0000\u0488\u048a\u0003\u009a"+ + "M\u0000\u0489\u0487\u0001\u0000\u0000\u0000\u048a\u048d\u0001\u0000\u0000"+ + "\u0000\u048b\u0489\u0001\u0000\u0000\u0000\u048b\u048c\u0001\u0000\u0000"+ + "\u0000\u048c\u048e\u0001\u0000\u0000\u0000\u048d\u048b\u0001\u0000\u0000"+ + "\u0000\u048e\u0492\u0005\u00b0\u0000\u0000\u048f\u0490\u0005z\u0000\u0000"+ + "\u0490\u0491\u0005\u0082\u0000\u0000\u0491\u0493\u0003\u009cN\u0000\u0492"+ + "\u048f\u0001\u0000\u0000\u0000\u0492\u0493\u0001\u0000\u0000\u0000\u0493"+ + "\u0495\u0001\u0000\u0000\u0000\u0494\u0496\u0003\u0098L\u0000\u0495\u0494"+ + "\u0001\u0000\u0000\u0000\u0495\u0496\u0001\u0000\u0000\u0000\u0496\u0095"+ + "\u0001\u0000\u0000\u0000\u0497\u049a\u0003\u01c2\u00e1\u0000\u0498\u049a"+ + "\u0003\u01be\u00df\u0000\u0499\u0497\u0001\u0000\u0000\u0000\u0499\u0498"+ + "\u0001\u0000\u0000\u0000\u049a\u0097\u0001\u0000\u0000\u0000\u049b\u049c"+ + "\u0005q\u0000\u0000\u049c\u049d\u0003*\u0015\u0000\u049d\u0099\u0001\u0000"+ + "\u0000\u0000\u049e\u04a1\u0005\u001f\u0000\u0000\u049f\u04a1\u0003\f\u0006"+ + "\u0000\u04a0\u049e\u0001\u0000\u0000\u0000\u04a0\u049f\u0001\u0000\u0000"+ + "\u0000\u04a1\u009b\u0001\u0000\u0000\u0000\u04a2\u04a3\u0003`0\u0000\u04a3"+ + "\u04a4\u0007\b\u0000\u0000\u04a4\u04a9\u0001\u0000\u0000\u0000\u04a5\u04a6"+ + "\u0005\u008a\u0000\u0000\u04a6\u04a7\u0005~\u0000\u0000\u04a7\u04a9\u0005"+ + "\u001f\u0000\u0000\u04a8\u04a2\u0001\u0000\u0000\u0000\u04a8\u04a5\u0001"+ + "\u0000\u0000\u0000\u04a9\u009d\u0001\u0000\u0000\u0000\u04aa\u04ac\u0003"+ + "\u0006\u0003\u0000\u04ab\u04aa\u0001\u0000\u0000\u0000\u04ab\u04ac\u0001"+ + "\u0000\u0000\u0000\u04ac\u04ad\u0001\u0000\u0000\u0000\u04ad\u04ae\u0005"+ + "\u0086\u0000\u0000\u04ae\u04b3\u0003\u00fc~\u0000\u04af\u04b1\u0005\u000e"+ + "\u0000\u0000\u04b0\u04af\u0001\u0000\u0000\u0000\u04b0\u04b1\u0001\u0000"+ + "\u0000\u0000\u04b1\u04b2\u0001\u0000\u0000\u0000\u04b2\u04b4\u0003\"\u0011"+ + "\u0000\u04b3\u04b0\u0001\u0000\u0000\u0000\u04b3\u04b4\u0001\u0000\u0000"+ + "\u0000\u04b4\u04b5\u0001\u0000\u0000\u0000\u04b5\u04ba\u0003\u00a2Q\u0000"+ + "\u04b6\u04b7\u0005\u00ad\u0000\u0000\u04b7\u04b9\u0003\u00a2Q\u0000\u04b8"+ + "\u04b6\u0001\u0000\u0000\u0000\u04b9\u04bc\u0001\u0000\u0000\u0000\u04ba"+ + "\u04b8\u0001\u0000\u0000\u0000\u04ba\u04bb\u0001\u0000\u0000\u0000\u04bb"+ + "\u04bd\u0001\u0000\u0000\u0000\u04bc\u04ba\u0001\u0000\u0000\u0000\u04bd"+ + "\u04be\u0005\u008d\u0000\u0000\u04be\u04c0\u0003\f\u0006\u0000\u04bf\u04c1"+ + "\u0003\u00a0P\u0000\u04c0\u04bf\u0001\u0000\u0000\u0000\u04c0\u04c1\u0001"+ + "\u0000\u0000\u0000\u04c1\u009f\u0001\u0000\u0000\u0000\u04c2\u04c3\u0005"+ + "q\u0000\u0000\u04c3\u04c4\u0003*\u0015\u0000\u04c4\u00a1\u0001\u0000\u0000"+ + "\u0000\u04c5\u04c6\u0005z\u0000\u0000\u04c6\u04ce\u0003\u00a4R\u0000\u04c7"+ + "\u04ca\u0005\u00ad\u0000\u0000\u04c8\u04cb\u0003\u00a2Q\u0000\u04c9\u04cb"+ + "\u0003\u00a4R\u0000\u04ca\u04c8\u0001\u0000\u0000\u0000\u04ca\u04c9\u0001"+ + "\u0000\u0000\u0000\u04cb\u04cd\u0001\u0000\u0000\u0000\u04cc\u04c7\u0001"+ + "\u0000\u0000\u0000\u04cd\u04d0\u0001\u0000\u0000\u0000\u04ce\u04cc\u0001"+ + "\u0000\u0000\u0000\u04ce\u04cf\u0001\u0000\u0000\u0000\u04cf\u050d\u0001"+ + "\u0000\u0000\u0000\u04d0\u04ce\u0001\u0000\u0000\u0000\u04d1\u04d2\u0005"+ + "\u0007\u0000\u0000\u04d2\u04da\u0003\u00a6S\u0000\u04d3\u04d6\u0005\u00ad"+ + "\u0000\u0000\u04d4\u04d7\u0003\u00a2Q\u0000\u04d5\u04d7\u0003\u00a6S\u0000"+ + "\u04d6\u04d4\u0001\u0000\u0000\u0000\u04d6\u04d5\u0001\u0000\u0000\u0000"+ + "\u04d7\u04d9\u0001\u0000\u0000\u0000\u04d8\u04d3\u0001\u0000\u0000\u0000"+ + "\u04d9\u04dc\u0001\u0000\u0000\u0000\u04da\u04d8\u0001\u0000\u0000\u0000"+ + "\u04da\u04db\u0001\u0000\u0000\u0000\u04db\u050d\u0001\u0000\u0000\u0000"+ + "\u04dc\u04da\u0001\u0000\u0000\u0000\u04dd\u04de\u0005m\u0000\u0000\u04de"+ + "\u04e6\u0003\u00a8T\u0000\u04df\u04e2\u0005\u00ad\u0000\u0000\u04e0\u04e3"+ + "\u0003\u00a2Q\u0000\u04e1\u04e3\u0003\u00a8T\u0000\u04e2\u04e0\u0001\u0000"+ + "\u0000\u0000\u04e2\u04e1\u0001\u0000\u0000\u0000\u04e3\u04e5\u0001\u0000"+ + "\u0000\u0000\u04e4\u04df\u0001\u0000\u0000\u0000\u04e5\u04e8\u0001\u0000"+ + "\u0000\u0000\u04e6\u04e4\u0001\u0000\u0000\u0000\u04e6\u04e7\u0001\u0000"+ + "\u0000\u0000\u04e7\u050d\u0001\u0000\u0000\u0000\u04e8\u04e6\u0001\u0000"+ + "\u0000\u0000\u04e9\u04ea\u0005p\u0000\u0000\u04ea\u04f2\u0003\u00aaU\u0000"+ + "\u04eb\u04ee\u0005\u00ad\u0000\u0000\u04ec\u04ef\u0003\u00a2Q\u0000\u04ed"+ + "\u04ef\u0003\u00aaU\u0000\u04ee\u04ec\u0001\u0000\u0000\u0000\u04ee\u04ed"+ + "\u0001\u0000\u0000\u0000\u04ef\u04f1\u0001\u0000\u0000\u0000\u04f0\u04eb"+ + "\u0001\u0000\u0000\u0000\u04f1\u04f4\u0001\u0000\u0000\u0000\u04f2\u04f0"+ + "\u0001\u0000\u0000\u0000\u04f2\u04f3\u0001\u0000\u0000\u0000\u04f3\u050d"+ + "\u0001\u0000\u0000\u0000\u04f4\u04f2\u0001\u0000\u0000\u0000\u04f5\u04f6"+ + "\u0005H\u0000\u0000\u04f6\u04f7\u0005T\u0000\u0000\u04f7\u04ff\u0003\u00ac"+ + "V\u0000\u04f8\u04fb\u0005\u00ad\u0000\u0000\u04f9\u04fc\u0003\u00a2Q\u0000"+ + "\u04fa\u04fc\u0003\u00acV\u0000\u04fb\u04f9\u0001\u0000\u0000\u0000\u04fb"+ + "\u04fa\u0001\u0000\u0000\u0000\u04fc\u04fe\u0001\u0000\u0000\u0000\u04fd"+ + "\u04f8\u0001\u0000\u0000\u0000\u04fe\u0501\u0001\u0000\u0000\u0000\u04ff"+ + "\u04fd\u0001\u0000\u0000\u0000\u04ff\u0500\u0001\u0000\u0000\u0000\u0500"+ + "\u050d\u0001\u0000\u0000\u0000\u0501\u04ff\u0001\u0000\u0000\u0000\u0502"+ + "\u0503\u0005z\u0000\u0000\u0503\u0504\u0005\u0082\u0000\u0000\u0504\u0509"+ + "\u0003\u00b0X\u0000\u0505\u0506\u0005\u00ad\u0000\u0000\u0506\u0508\u0003"+ + "\u00a2Q\u0000\u0507\u0505\u0001\u0000\u0000\u0000\u0508\u050b\u0001\u0000"+ + "\u0000\u0000\u0509\u0507\u0001\u0000\u0000\u0000\u0509\u050a\u0001\u0000"+ + "\u0000\u0000\u050a\u050d\u0001\u0000\u0000\u0000\u050b\u0509\u0001\u0000"+ + "\u0000\u0000\u050c\u04c5\u0001\u0000\u0000\u0000\u050c\u04d1\u0001\u0000"+ + "\u0000\u0000\u050c\u04dd\u0001\u0000\u0000\u0000\u050c\u04e9\u0001\u0000"+ + "\u0000\u0000\u050c\u04f5\u0001\u0000\u0000\u0000\u050c\u0502\u0001\u0000"+ + "\u0000\u0000\u050d\u00a3\u0001\u0000\u0000\u0000\u050e\u050f\u0003\u00b2"+ + "Y\u0000\u050f\u0510\u0005\u00bd\u0000\u0000\u0510\u0511\u0003\f\u0006"+ + "\u0000\u0511\u00a5\u0001\u0000\u0000\u0000\u0512\u0514\u0005F\u0000\u0000"+ "\u0513\u0512\u0001\u0000\u0000\u0000\u0513\u0514\u0001\u0000\u0000\u0000"+ - "\u0514\u0515\u0001\u0000\u0000\u0000\u0515\u0516\u0003\f\u0006\u0000\u0516"+ - "\u00a7\u0001\u0000\u0000\u0000\u0517\u0519\u0005A\u0000\u0000\u0518\u0517"+ - "\u0001\u0000\u0000\u0000\u0518\u0519\u0001\u0000\u0000\u0000\u0519\u051a"+ - "\u0001\u0000\u0000\u0000\u051a\u051c\u0003\u00b2Y\u0000\u051b\u051d\u0005"+ - ",\u0000\u0000\u051c\u051b\u0001\u0000\u0000\u0000\u051c\u051d\u0001\u0000"+ - "\u0000\u0000\u051d\u051e\u0001\u0000\u0000\u0000\u051e\u051f\u0003\f\u0006"+ - "\u0000\u051f\u00a9\u0001\u0000\u0000\u0000\u0520\u0521\u0003\u00b2Y\u0000"+ - "\u0521\u00ab\u0001\u0000\u0000\u0000\u0522\u0523\u0003\u00b2Y\u0000\u0523"+ - "\u0524\u0005\u0089\u0000\u0000\u0524\u0525\u0005c\u0000\u0000\u0525\u0526"+ - "\u0003\u00aeW\u0000\u0526\u00ad\u0001\u0000\u0000\u0000\u0527\u052c\u0003"+ - "~?\u0000\u0528\u052c\u0003|>\u0000\u0529\u052c\u0003x<\u0000\u052a\u052c"+ - "\u0003z=\u0000\u052b\u0527\u0001\u0000\u0000\u0000\u052b\u0528\u0001\u0000"+ - "\u0000\u0000\u052b\u0529\u0001\u0000\u0000\u0000\u052b\u052a\u0001\u0000"+ - "\u0000\u0000\u052c\u00af\u0001\u0000\u0000\u0000\u052d\u052e\u0003`0\u0000"+ - "\u052e\u052f\u0007\b\u0000\u0000\u052f\u0534\u0001\u0000\u0000\u0000\u0530"+ - "\u0531\u0005\u0085\u0000\u0000\u0531\u0532\u0005y\u0000\u0000\u0532\u0534"+ - "\u0005\u001d\u0000\u0000\u0533\u052d\u0001\u0000\u0000\u0000\u0533\u0530"+ - "\u0001\u0000\u0000\u0000\u0534\u00b1\u0001\u0000\u0000\u0000\u0535\u0536"+ - "\u0003f3\u0000\u0536\u00b3\u0001\u0000\u0000\u0000\u0537\u0538\u0003`"+ - "0\u0000\u0538\u00b5\u0001\u0000\u0000\u0000\u0539\u053b\u0003\u0006\u0003"+ - "\u0000\u053a\u0539\u0001\u0000\u0000\u0000\u053a\u053b\u0001\u0000\u0000"+ - "\u0000\u053b\u053c\u0001\u0000\u0000\u0000\u053c\u053d\u0005\u001e\u0000"+ - "\u0000\u053d\u053e\u00052\u0000\u0000\u053e\u0543\u0003\u00fc~\u0000\u053f"+ - "\u0541\u0005\r\u0000\u0000\u0540\u053f\u0001\u0000\u0000\u0000\u0540\u0541"+ - "\u0001\u0000\u0000\u0000\u0541\u0542\u0001\u0000\u0000\u0000\u0542\u0544"+ - "\u0003\"\u0011\u0000\u0543\u0540\u0001\u0000\u0000\u0000\u0543\u0544\u0001"+ - "\u0000\u0000\u0000\u0544\u0547\u0001\u0000\u0000\u0000\u0545\u0546\u0005"+ - "\u0088\u0000\u0000\u0546\u0548\u0003\f\u0006\u0000\u0547\u0545\u0001\u0000"+ - "\u0000\u0000\u0547\u0548\u0001\u0000\u0000\u0000\u0548\u054a\u0001\u0000"+ - "\u0000\u0000\u0549\u054b\u0003\u00b8\\\u0000\u054a\u0549\u0001\u0000\u0000"+ - "\u0000\u054a\u054b\u0001\u0000\u0000\u0000\u054b\u00b7\u0001\u0000\u0000"+ - "\u0000\u054c\u054d\u0005l\u0000\u0000\u054d\u054e\u0003*\u0015\u0000\u054e"+ - "\u00b9\u0001\u0000\u0000\u0000\u054f\u0551\u0003\u00bc^\u0000\u0550\u0552"+ - "\u0007\t\u0000\u0000\u0551\u0550\u0001\u0000\u0000\u0000\u0551\u0552\u0001"+ - "\u0000\u0000\u0000\u0552\u00bb\u0001\u0000\u0000\u0000\u0553\u0563\u0003"+ - "\u00d8l\u0000\u0554\u0563\u0003\u00cae\u0000\u0555\u0563\u0003\u00d6k"+ - "\u0000\u0556\u0563\u0003\u00d4j\u0000\u0557\u0563\u0003\u00d0h\u0000\u0558"+ - "\u0563\u0003\u00ccf\u0000\u0559\u0563\u0003\u00ceg\u0000\u055a\u0563\u0003"+ - "\u00c8d\u0000\u055b\u0563\u0003\u00be_\u0000\u055c\u0563\u0003\u00d2i"+ - "\u0000\u055d\u0563\u0003\u00dam\u0000\u055e\u0563\u0003\u00dcn\u0000\u055f"+ - "\u0563\u0003\u00deo\u0000\u0560\u0563\u0003\u00e0p\u0000\u0561\u0563\u0003"+ - "\u00e2q\u0000\u0562\u0553\u0001\u0000\u0000\u0000\u0562\u0554\u0001\u0000"+ - "\u0000\u0000\u0562\u0555\u0001\u0000\u0000\u0000\u0562\u0556\u0001\u0000"+ - "\u0000\u0000\u0562\u0557\u0001\u0000\u0000\u0000\u0562\u0558\u0001\u0000"+ - "\u0000\u0000\u0562\u0559\u0001\u0000\u0000\u0000\u0562\u055a\u0001\u0000"+ - "\u0000\u0000\u0562\u055b\u0001\u0000\u0000\u0000\u0562\u055c\u0001\u0000"+ - "\u0000\u0000\u0562\u055d\u0001\u0000\u0000\u0000\u0562\u055e\u0001\u0000"+ - "\u0000\u0000\u0562\u055f\u0001\u0000\u0000\u0000\u0562\u0560\u0001\u0000"+ - "\u0000\u0000\u0562\u0561\u0001\u0000\u0000\u0000\u0563\u00bd\u0001\u0000"+ - "\u0000\u0000\u0564\u0565\u0005\u009f\u0000\u0000\u0565\u0566\u0005\u00aa"+ - "\u0000\u0000\u0566\u056b\u0003\u00c0`\u0000\u0567\u0568\u0005\u00a8\u0000"+ - "\u0000\u0568\u056a\u0003\u00c0`\u0000\u0569\u0567\u0001\u0000\u0000\u0000"+ - "\u056a\u056d\u0001\u0000\u0000\u0000\u056b\u0569\u0001\u0000\u0000\u0000"+ - "\u056b\u056c\u0001\u0000\u0000\u0000\u056c\u056e\u0001\u0000\u0000\u0000"+ - "\u056d\u056b\u0001\u0000\u0000\u0000\u056e\u056f\u0005\u00ab\u0000\u0000"+ - "\u056f\u00bf\u0001\u0000\u0000\u0000\u0570\u0571\u0003\u01b8\u00dc\u0000"+ - "\u0571\u0573\u0003\u00bc^\u0000\u0572\u0574\u0003\u00c2a\u0000\u0573\u0572"+ - "\u0001\u0000\u0000\u0000\u0573\u0574\u0001\u0000\u0000\u0000\u0574\u0576"+ - "\u0001\u0000\u0000\u0000\u0575\u0577\u0003\u01aa\u00d5\u0000\u0576\u0575"+ - "\u0001\u0000\u0000\u0000\u0576\u0577\u0001\u0000\u0000\u0000\u0577\u00c1"+ - "\u0001\u0000\u0000\u0000\u0578\u057a\u0003\u00c4b\u0000\u0579\u057b\u0003"+ - "\u00c6c\u0000\u057a\u0579\u0001\u0000\u0000\u0000\u057a\u057b\u0001\u0000"+ - "\u0000\u0000\u057b\u0581\u0001\u0000\u0000\u0000\u057c\u057e\u0003\u00c6"+ - "c\u0000\u057d\u057f\u0003\u00c4b\u0000\u057e\u057d\u0001\u0000\u0000\u0000"+ - "\u057e\u057f\u0001\u0000\u0000\u0000\u057f\u0581\u0001\u0000\u0000\u0000"+ - "\u0580\u0578\u0001\u0000\u0000\u0000\u0580\u057c\u0001\u0000\u0000\u0000"+ - "\u0581\u00c3\u0001\u0000\u0000\u0000\u0582\u0588\u0005\u001d\u0000\u0000"+ - "\u0583\u0589\u0003\u01b0\u00d8\u0000\u0584\u0589\u0003\u01b4\u00da\u0000"+ - "\u0585\u0589\u0005\u00c7\u0000\u0000\u0586\u0589\u0005\u00c6\u0000\u0000"+ - "\u0587\u0589\u0003\u01b8\u00dc\u0000\u0588\u0583\u0001\u0000\u0000\u0000"+ - "\u0588\u0584\u0001\u0000\u0000\u0000\u0588\u0585\u0001\u0000\u0000\u0000"+ - "\u0588\u0586\u0001\u0000\u0000\u0000\u0588\u0587\u0001\u0000\u0000\u0000"+ - "\u0589\u00c5\u0001\u0000\u0000\u0000\u058a\u058b\u0005X\u0000\u0000\u058b"+ - "\u058c\u0005\u00c5\u0000\u0000\u058c\u00c7\u0001\u0000\u0000\u0000\u058d"+ - "\u058e\u0005\u009c\u0000\u0000\u058e\u058f\u0005\u00aa\u0000\u0000\u058f"+ - "\u0590\u0003\u00bc^\u0000\u0590\u0591\u0005\u00ab\u0000\u0000\u0591\u00c9"+ - "\u0001\u0000\u0000\u0000\u0592\u0593\u0005\u0093\u0000\u0000\u0593\u0594"+ - "\u0005\u00aa\u0000\u0000\u0594\u0595\u0003\u00bc^\u0000\u0595\u0596\u0005"+ - "\u00ab\u0000\u0000\u0596\u00cb\u0001\u0000\u0000\u0000\u0597\u0598\u0007"+ - "\n\u0000\u0000\u0598\u00cd\u0001\u0000\u0000\u0000\u0599\u059a\u0005C"+ - "\u0000\u0000\u059a\u00cf\u0001\u0000\u0000\u0000\u059b\u059c\u0007\u000b"+ - "\u0000\u0000\u059c\u00d1\u0001\u0000\u0000\u0000\u059d\u059e\u0005\u00a0"+ - "\u0000\u0000\u059e\u00d3\u0001\u0000\u0000\u0000\u059f\u05a0\u0005\u0097"+ - "\u0000\u0000\u05a0\u05a1\u0005\u00aa\u0000\u0000\u05a1\u05a2\u0003\u01b6"+ - "\u00db\u0000\u05a2\u05a3\u0005\u00ab\u0000\u0000\u05a3\u05aa\u0001\u0000"+ - "\u0000\u0000\u05a4\u05a5\u0005\u0097\u0000\u0000\u05a5\u05a6\u0005\u00aa"+ - "\u0000\u0000\u05a6\u05a7\u0003\u01b6\u00db\u0000\u05a7\u05a8\u0006j\uffff"+ - "\uffff\u0000\u05a8\u05aa\u0001\u0000\u0000\u0000\u05a9\u059f\u0001\u0000"+ - "\u0000\u0000\u05a9\u05a4\u0001\u0000\u0000\u0000\u05aa\u00d5\u0001\u0000"+ - "\u0000\u0000\u05ab\u05ac\u0005\u0095\u0000\u0000\u05ac\u00d7\u0001\u0000"+ - "\u0000\u0000\u05ad\u05b1\u0005\u0094\u0000\u0000\u05ae\u05af\u0005\u00aa"+ - "\u0000\u0000\u05af\u05b0\u0005\u00c8\u0000\u0000\u05b0\u05b2\u0005\u00ab"+ - "\u0000\u0000\u05b1\u05ae\u0001\u0000\u0000\u0000\u05b1\u05b2\u0001\u0000"+ - "\u0000\u0000\u05b2\u00d9\u0001\u0000\u0000\u0000\u05b3\u05b7\u0005\u00a1"+ - "\u0000\u0000\u05b4\u05b5\u0005\u00aa\u0000\u0000\u05b5\u05b6\u0005\u00c8"+ - "\u0000\u0000\u05b6\u05b8\u0005\u00ab\u0000\u0000\u05b7\u05b4\u0001\u0000"+ - "\u0000\u0000\u05b7\u05b8\u0001\u0000\u0000\u0000\u05b8\u00db\u0001\u0000"+ - "\u0000\u0000\u05b9\u05ba\u0005\u00a2\u0000\u0000\u05ba\u00dd\u0001\u0000"+ - "\u0000\u0000\u05bb\u05bc\u0005\u00a3\u0000\u0000\u05bc\u00df\u0001\u0000"+ - "\u0000\u0000\u05bd\u05be\u0005\u00a4\u0000\u0000\u05be\u00e1\u0001\u0000"+ - "\u0000\u0000\u05bf\u05c0\u0005\u00a5\u0000\u0000\u05c0\u00e3\u0001\u0000"+ - "\u0000\u0000\u05c1\u05c6\u0003\u01b8\u00dc\u0000\u05c2\u05c3\u0005\u00b1"+ - "\u0000\u0000\u05c3\u05c5\u0003\u01b8\u00dc\u0000\u05c4\u05c2\u0001\u0000"+ - "\u0000\u0000\u05c5\u05c8\u0001\u0000\u0000\u0000\u05c6\u05c4\u0001\u0000"+ - "\u0000\u0000\u05c6\u05c7\u0001\u0000\u0000\u0000\u05c7\u00e5\u0001\u0000"+ - "\u0000\u0000\u05c8\u05c6\u0001\u0000\u0000\u0000\u05c9\u05ce\u0003\u00e8"+ - "t\u0000\u05ca\u05cb\u0005\u00b1\u0000\u0000\u05cb\u05cd\u0003\u00e8t\u0000"+ - "\u05cc\u05ca\u0001\u0000\u0000\u0000\u05cd\u05d0\u0001\u0000\u0000\u0000"+ - "\u05ce\u05cc\u0001\u0000\u0000\u0000\u05ce\u05cf\u0001\u0000\u0000\u0000"+ - "\u05cf\u00e7\u0001\u0000\u0000\u0000\u05d0\u05ce\u0001\u0000\u0000\u0000"+ - "\u05d1\u05d3\u0005\u00cd\u0000\u0000\u05d2\u05d1\u0001\u0000\u0000\u0000"+ - "\u05d2\u05d3\u0001\u0000\u0000\u0000\u05d3\u05d4\u0001\u0000\u0000\u0000"+ - "\u05d4\u05d5\u0003\u01b8\u00dc\u0000\u05d5\u00e9\u0001\u0000\u0000\u0000"+ - "\u05d6\u05db\u0003\u00ecv\u0000\u05d7\u05d8\u0005\u00b1\u0000\u0000\u05d8"+ - "\u05da\u0003\u00ecv\u0000\u05d9\u05d7\u0001\u0000\u0000\u0000\u05da\u05dd"+ - "\u0001\u0000\u0000\u0000\u05db\u05d9\u0001\u0000\u0000\u0000\u05db\u05dc"+ - "\u0001\u0000\u0000\u0000\u05dc\u00eb\u0001\u0000\u0000\u0000\u05dd\u05db"+ - "\u0001\u0000\u0000\u0000\u05de\u05e1\u0003\u01b8\u00dc\u0000\u05df\u05e1"+ - "\u0005\u00cb\u0000\u0000\u05e0\u05de\u0001\u0000\u0000\u0000\u05e0\u05df"+ - "\u0001\u0000\u0000\u0000\u05e1\u00ed\u0001\u0000\u0000\u0000\u05e2\u05e3"+ - "\u0005\u0019\u0000\u0000\u05e3\u05e7\u0005T\u0000\u0000\u05e4\u05e5\u0005"+ - ";\u0000\u0000\u05e5\u05e6\u0005X\u0000\u0000\u05e6\u05e8\u0005*\u0000"+ - "\u0000\u05e7\u05e4\u0001\u0000\u0000\u0000\u05e7\u05e8\u0001\u0000\u0000"+ - "\u0000\u05e8\u05e9\u0001\u0000\u0000\u0000\u05e9\u05ea\u0003\u00fe\u007f"+ - "\u0000\u05ea\u00ef\u0001\u0000\u0000\u0000\u05eb\u05ec\u0005#\u0000\u0000"+ - "\u05ec\u05ef\u0005T\u0000\u0000\u05ed\u05ee\u0005;\u0000\u0000\u05ee\u05f0"+ - "\u0005*\u0000\u0000\u05ef\u05ed\u0001\u0000\u0000\u0000\u05ef\u05f0\u0001"+ - "\u0000\u0000\u0000\u05f0\u05f1\u0001\u0000\u0000\u0000\u05f1\u05f3\u0003"+ - "\u00fe\u007f\u0000\u05f2\u05f4\u0005\u0014\u0000\u0000\u05f3\u05f2\u0001"+ - "\u0000\u0000\u0000\u05f3\u05f4\u0001\u0000\u0000\u0000\u05f4\u00f1\u0001"+ - "\u0000\u0000\u0000\u05f5\u05f6\u0003\u01b8\u00dc\u0000\u05f6\u00f3\u0001"+ - "\u0000\u0000\u0000\u05f7\u05f8\u0005\u0019\u0000\u0000\u05f8\u05f9\u0005"+ - "i\u0000\u0000\u05f9\u05fa\u0003\u00f2y\u0000\u05fa\u00f5\u0001\u0000\u0000"+ - "\u0000\u05fb\u05fc\u0005#\u0000\u0000\u05fc\u05fd\u0005i\u0000\u0000\u05fd"+ - "\u05fe\u0003\u00f2y\u0000\u05fe\u00f7\u0001\u0000\u0000\u0000\u05ff\u0600"+ - "\u0005u\u0000\u0000\u0600\u0601\u0005L\u0000\u0000\u0601\u0602\u0005i"+ - "\u0000\u0000\u0602\u0603\u0003\u00f2y\u0000\u0603\u00f9\u0001\u0000\u0000"+ - "\u0000\u0604\u0605\u0005\u0019\u0000\u0000\u0605\u0609\u0005y\u0000\u0000"+ - "\u0606\u0607\u0005;\u0000\u0000\u0607\u0608\u0005X\u0000\u0000\u0608\u060a"+ - "\u0005*\u0000\u0000\u0609\u0606\u0001\u0000\u0000\u0000\u0609\u060a\u0001"+ - "\u0000\u0000\u0000\u060a\u060b\u0001\u0000\u0000\u0000\u060b\u060d\u0003"+ - "\u00fc~\u0000\u060c\u060e\u0003\u01aa\u00d5\u0000\u060d\u060c\u0001\u0000"+ - "\u0000\u0000\u060d\u060e\u0001\u0000\u0000\u0000\u060e\u060f\u0001\u0000"+ - "\u0000\u0000\u060f\u0610\u0005\u00aa\u0000\u0000\u0610\u0611\u0003\u0100"+ - "\u0080\u0000\u0611\u0612\u0005\u00ab\u0000\u0000\u0612\u0613\u0003\u0116"+ - "\u008b\u0000\u0613\u00fb\u0001\u0000\u0000\u0000\u0614\u0615\u0003\u00fe"+ - "\u007f\u0000\u0615\u0616\u0005\u00a9\u0000\u0000\u0616\u0618\u0001\u0000"+ - "\u0000\u0000\u0617\u0614\u0001\u0000\u0000\u0000\u0617\u0618\u0001\u0000"+ - "\u0000\u0000\u0618\u0619\u0001\u0000\u0000\u0000\u0619\u061a\u0003\u00e6"+ - "s\u0000\u061a\u00fd\u0001\u0000\u0000\u0000\u061b\u061c\u0003\u00e4r\u0000"+ - "\u061c\u00ff\u0001\u0000\u0000\u0000\u061d\u0621\u0003\u0102\u0081\u0000"+ - "\u061e\u0621\u0003\u010c\u0086\u0000\u061f\u0621\u0003\u0108\u0084\u0000"+ - "\u0620\u061d\u0001\u0000\u0000\u0000\u0620\u061e\u0001\u0000\u0000\u0000"+ - "\u0620\u061f\u0001\u0000\u0000\u0000\u0621\u062a\u0001\u0000\u0000\u0000"+ - "\u0622\u0626\u0005\u00a8\u0000\u0000\u0623\u0627\u0003\u0102\u0081\u0000"+ - "\u0624\u0627\u0003\u010c\u0086\u0000\u0625\u0627\u0003\u0108\u0084\u0000"+ - "\u0626\u0623\u0001\u0000\u0000\u0000\u0626\u0624\u0001\u0000\u0000\u0000"+ - "\u0626\u0625\u0001\u0000\u0000\u0000\u0627\u0629\u0001\u0000\u0000\u0000"+ - "\u0628\u0622\u0001\u0000\u0000\u0000\u0629\u062c\u0001\u0000\u0000\u0000"+ - "\u062a\u0628\u0001\u0000\u0000\u0000\u062a\u062b\u0001\u0000\u0000\u0000"+ - "\u062b\u0101\u0001\u0000\u0000\u0000\u062c\u062a\u0001\u0000\u0000\u0000"+ - "\u062d\u062e\u0003\u01b8\u00dc\u0000\u062e\u0634\u0003\u00bc^\u0000\u062f"+ - "\u0635\u0003\u00c2a\u0000\u0630\u0635\u0003\u0126\u0093\u0000\u0631\u0635"+ - "\u0003\u012c\u0096\u0000\u0632\u0635\u0003\u012a\u0095\u0000\u0633\u0635"+ - "\u0003\u0104\u0082\u0000\u0634\u062f\u0001\u0000\u0000\u0000\u0634\u0630"+ - "\u0001\u0000\u0000\u0000\u0634\u0631\u0001\u0000\u0000\u0000\u0634\u0632"+ - "\u0001\u0000\u0000\u0000\u0634\u0633\u0001\u0000\u0000\u0000\u0634\u0635"+ - "\u0001\u0000\u0000\u0000\u0635\u0637\u0001\u0000\u0000\u0000\u0636\u0638"+ - "\u0003\u01aa\u00d5\u0000\u0637\u0636\u0001\u0000\u0000\u0000\u0637\u0638"+ - "\u0001\u0000\u0000\u0000\u0638\u0103\u0001\u0000\u0000\u0000\u0639\u063a"+ - "\u0005\u00aa\u0000\u0000\u063a\u063f\u0003\u0106\u0083\u0000\u063b\u063c"+ - "\u0005\u00a8\u0000\u0000\u063c\u063e\u0003\u0106\u0083\u0000\u063d\u063b"+ - "\u0001\u0000\u0000\u0000\u063e\u0641\u0001\u0000\u0000\u0000\u063f\u063d"+ - "\u0001\u0000\u0000\u0000\u063f\u0640\u0001\u0000\u0000\u0000\u0640\u0642"+ - "\u0001\u0000\u0000\u0000\u0641\u063f\u0001\u0000\u0000\u0000\u0642\u0643"+ - "\u0005\u00ab\u0000\u0000\u0643\u0105\u0001\u0000\u0000\u0000\u0644\u0645"+ - "\u0003\u010a\u0085\u0000\u0645\u0646\u0005\r\u0000\u0000\u0646\u0647\u0007"+ - "\f\u0000\u0000\u0647\u0648\u0005S\u0000\u0000\u0648\u0107\u0001\u0000"+ - "\u0000\u0000\u0649\u064a\u0003\u0106\u0083\u0000\u064a\u0109\u0001\u0000"+ - "\u0000\u0000\u064b\u064e\u0003\u01b8\u00dc\u0000\u064c\u064e\u0003\u01b4"+ - "\u00da\u0000\u064d\u064b\u0001\u0000\u0000\u0000\u064d\u064c\u0001\u0000"+ - "\u0000\u0000\u064e\u0656\u0001\u0000\u0000\u0000\u064f\u0652\u0005\u00b1"+ - "\u0000\u0000\u0650\u0653\u0003\u01b8\u00dc\u0000\u0651\u0653\u0003\u01b4"+ - "\u00da\u0000\u0652\u0650\u0001\u0000\u0000\u0000\u0652\u0651\u0001\u0000"+ - "\u0000\u0000\u0653\u0655\u0001\u0000\u0000\u0000\u0654\u064f\u0001\u0000"+ - "\u0000\u0000\u0655\u0658\u0001\u0000\u0000\u0000\u0656\u0654\u0001\u0000"+ - "\u0000\u0000\u0656\u0657\u0001\u0000\u0000\u0000\u0657\u010b\u0001\u0000"+ - "\u0000\u0000\u0658\u0656\u0001\u0000\u0000\u0000\u0659\u065a\u0005g\u0000"+ - "\u0000\u065a\u065b\u0005E\u0000\u0000\u065b\u0660\u0005\u00aa\u0000\u0000"+ - "\u065c\u065e\u0003\u010e\u0087\u0000\u065d\u065f\u0005\u00a8\u0000\u0000"+ - "\u065e\u065d\u0001\u0000\u0000\u0000\u065e\u065f\u0001\u0000\u0000\u0000"+ - "\u065f\u0661\u0001\u0000\u0000\u0000\u0660\u065c\u0001\u0000\u0000\u0000"+ - "\u0660\u0661\u0001\u0000\u0000\u0000\u0661\u0663\u0001\u0000\u0000\u0000"+ - "\u0662\u0664\u0003\u0110\u0088\u0000\u0663\u0662\u0001\u0000\u0000\u0000"+ - "\u0663\u0664\u0001\u0000\u0000\u0000\u0664\u0665\u0001\u0000\u0000\u0000"+ - "\u0665\u0666\u0005\u00ab\u0000\u0000\u0666\u010d\u0001\u0000\u0000\u0000"+ - "\u0667\u0668\u0005v\u0000\u0000\u0668\u0669\u0005\u00aa\u0000\u0000\u0669"+ - "\u066a\u0003\u0110\u0088\u0000\u066a\u066b\u0005\u00ab\u0000\u0000\u066b"+ - "\u0671\u0001\u0000\u0000\u0000\u066c\u066d\u0005\u00aa\u0000\u0000\u066d"+ - "\u066e\u0003\u0110\u0088\u0000\u066e\u066f\u0006\u0087\uffff\uffff\u0000"+ - "\u066f\u0671\u0001\u0000\u0000\u0000\u0670\u0667\u0001\u0000\u0000\u0000"+ - "\u0670\u066c\u0001\u0000\u0000\u0000\u0671\u010f\u0001\u0000\u0000\u0000"+ - "\u0672\u0677\u0003\u0112\u0089\u0000\u0673\u0674\u0005\u00a8\u0000\u0000"+ - "\u0674\u0676\u0003\u0112\u0089\u0000\u0675\u0673\u0001\u0000\u0000\u0000"+ - "\u0676\u0679\u0001\u0000\u0000\u0000\u0677\u0675\u0001\u0000\u0000\u0000"+ - "\u0677\u0678\u0001\u0000\u0000\u0000\u0678\u0111\u0001\u0000\u0000\u0000"+ - "\u0679\u0677\u0001\u0000\u0000\u0000\u067a\u067c\u0003\u01b8\u00dc\u0000"+ - "\u067b\u067d\u0003\u0114\u008a\u0000\u067c\u067b\u0001\u0000\u0000\u0000"+ - "\u067c\u067d\u0001\u0000\u0000\u0000\u067d\u0113\u0001\u0000\u0000\u0000"+ - "\u067e\u067f\u0005\u00aa\u0000\u0000\u067f\u0680\u0005\u00c8\u0000\u0000"+ - "\u0680\u0681\u0005\u00ab\u0000\u0000\u0681\u0115\u0001\u0000\u0000\u0000"+ - "\u0682\u0684\u0003\u0118\u008c\u0000\u0683\u0682\u0001\u0000\u0000\u0000"+ - "\u0683\u0684\u0001\u0000\u0000\u0000\u0684\u0686\u0001\u0000\u0000\u0000"+ - "\u0685\u0687\u0003\u011c\u008e\u0000\u0686\u0685\u0001\u0000\u0000\u0000"+ - "\u0686\u0687\u0001\u0000\u0000\u0000\u0687\u06bf\u0001\u0000\u0000\u0000"+ - "\u0688\u068a\u0003\u011c\u008e\u0000\u0689\u0688\u0001\u0000\u0000\u0000"+ - "\u0689\u068a\u0001\u0000\u0000\u0000\u068a\u068c\u0001\u0000\u0000\u0000"+ - "\u068b\u068d\u0003\u0118\u008c\u0000\u068c\u068b\u0001\u0000\u0000\u0000"+ - "\u068c\u068d\u0001\u0000\u0000\u0000\u068d\u06bf\u0001\u0000\u0000\u0000"+ - "\u068e\u0690\u0003\u0118\u008c\u0000\u068f\u068e\u0001\u0000\u0000\u0000"+ - "\u068f\u0690\u0001\u0000\u0000\u0000\u0690\u0692\u0001\u0000\u0000\u0000"+ - "\u0691\u0693\u0003\u0122\u0091\u0000\u0692\u0691\u0001\u0000\u0000\u0000"+ - "\u0692\u0693\u0001\u0000\u0000\u0000\u0693\u06bf\u0001\u0000\u0000\u0000"+ - "\u0694\u0696\u0003\u0122\u0091\u0000\u0695\u0694\u0001\u0000\u0000\u0000"+ - "\u0695\u0696\u0001\u0000\u0000\u0000\u0696\u0698\u0001\u0000\u0000\u0000"+ - "\u0697\u0699\u0003\u0118\u008c\u0000\u0698\u0697\u0001\u0000\u0000\u0000"+ - "\u0698\u0699\u0001\u0000\u0000\u0000\u0699\u06bf\u0001\u0000\u0000\u0000"+ - "\u069a\u069c\u0003\u011c\u008e\u0000\u069b\u069a\u0001\u0000\u0000\u0000"+ - "\u069b\u069c\u0001\u0000\u0000\u0000\u069c\u069e\u0001\u0000\u0000\u0000"+ - "\u069d\u069f\u0003\u0124\u0092\u0000\u069e\u069d\u0001\u0000\u0000\u0000"+ - "\u069e\u069f\u0001\u0000\u0000\u0000\u069f\u06a1\u0001\u0000\u0000\u0000"+ - "\u06a0\u06a2\u0003\u0118\u008c\u0000\u06a1\u06a0\u0001\u0000\u0000\u0000"+ - "\u06a1\u06a2\u0001\u0000\u0000\u0000\u06a2\u06bf\u0001\u0000\u0000\u0000"+ - "\u06a3\u06a5\u0003\u011c\u008e\u0000\u06a4\u06a3\u0001\u0000\u0000\u0000"+ - "\u06a4\u06a5\u0001\u0000\u0000\u0000\u06a5\u06a7\u0001\u0000\u0000\u0000"+ - "\u06a6\u06a8\u0003\u0118\u008c\u0000\u06a7\u06a6\u0001\u0000\u0000\u0000"+ - "\u06a7\u06a8\u0001\u0000\u0000\u0000\u06a8\u06aa\u0001\u0000\u0000\u0000"+ - "\u06a9\u06ab\u0003\u0124\u0092\u0000\u06aa\u06a9\u0001\u0000\u0000\u0000"+ - "\u06aa\u06ab\u0001\u0000\u0000\u0000\u06ab\u06bf\u0001\u0000\u0000\u0000"+ - "\u06ac\u06ae\u0003\u0124\u0092\u0000\u06ad\u06ac\u0001\u0000\u0000\u0000"+ - "\u06ad\u06ae\u0001\u0000\u0000\u0000\u06ae\u06b0\u0001\u0000\u0000\u0000"+ - "\u06af\u06b1\u0003\u0118\u008c\u0000\u06b0\u06af\u0001\u0000\u0000\u0000"+ - "\u06b0\u06b1\u0001\u0000\u0000\u0000\u06b1\u06b3\u0001\u0000\u0000\u0000"+ - "\u06b2\u06b4\u0003\u011c\u008e\u0000\u06b3\u06b2\u0001\u0000\u0000\u0000"+ - "\u06b3\u06b4\u0001\u0000\u0000\u0000\u06b4\u06bf\u0001\u0000\u0000\u0000"+ - "\u06b5\u06b7\u0003\u0118\u008c\u0000\u06b6\u06b5\u0001\u0000\u0000\u0000"+ - "\u06b6\u06b7\u0001\u0000\u0000\u0000\u06b7\u06b9\u0001\u0000\u0000\u0000"+ - "\u06b8\u06ba\u0003\u0124\u0092\u0000\u06b9\u06b8\u0001\u0000\u0000\u0000"+ - "\u06b9\u06ba\u0001\u0000\u0000\u0000\u06ba\u06bc\u0001\u0000\u0000\u0000"+ - "\u06bb\u06bd\u0003\u011c\u008e\u0000\u06bc\u06bb\u0001\u0000\u0000\u0000"+ - "\u06bc\u06bd\u0001\u0000\u0000\u0000\u06bd\u06bf\u0001\u0000\u0000\u0000"+ - "\u06be\u0683\u0001\u0000\u0000\u0000\u06be\u0689\u0001\u0000\u0000\u0000"+ - "\u06be\u068f\u0001\u0000\u0000\u0000\u06be\u0695\u0001\u0000\u0000\u0000"+ - "\u06be\u069b\u0001\u0000\u0000\u0000\u06be\u06a4\u0001\u0000\u0000\u0000"+ - "\u06be\u06ad\u0001\u0000\u0000\u0000\u06be\u06b6\u0001\u0000\u0000\u0000"+ - "\u06bf\u0117\u0001\u0000\u0000\u0000\u06c0\u06c1\u0005\u0085\u0000\u0000"+ - "\u06c1\u06c2\u0005}\u0000\u0000\u06c2\u06c3\u0003\u01ac\u00d6\u0000\u06c3"+ - "\u0119\u0001\u0000\u0000\u0000\u06c4\u06c5\u0003\u01b6\u00db\u0000\u06c5"+ - "\u011b\u0001\u0000\u0000\u0000\u06c6\u06c7\u0005<\u0000\u0000\u06c7\u06c8"+ - "\u0005j\u0000\u0000\u06c8\u06c9\u0003\u011a\u008d\u0000\u06c9\u011d\u0001"+ - "\u0000\u0000\u0000\u06ca\u06cb\u0005\u0006\u0000\u0000\u06cb\u06cc\u0005"+ - "j\u0000\u0000\u06cc\u06cd\u0003\u011a\u008d\u0000\u06cd\u011f\u0001\u0000"+ - "\u0000\u0000\u06ce\u06cf\u0005#\u0000\u0000\u06cf\u06d0\u0005j\u0000\u0000"+ - "\u06d0\u06d1\u0003\u011a\u008d\u0000\u06d1\u0121\u0001\u0000\u0000\u0000"+ - "\u06d2\u06d3\u0005\u0089\u0000\u0000\u06d3\u06d4\u0005q\u0000\u0000\u06d4"+ - "\u06d6\u00053\u0000\u0000\u06d5\u06d7\u0005.\u0000\u0000\u06d6\u06d5\u0001"+ - "\u0000\u0000\u0000\u06d6\u06d7\u0001\u0000\u0000\u0000\u06d7\u0123\u0001"+ - "\u0000\u0000\u0000\u06d8\u06d9\u0005\r\u0000\u0000\u06d9\u06da\u0005C"+ - "\u0000\u0000\u06da\u06db\u0005\u0016\u0000\u0000\u06db\u0125\u0001\u0000"+ - "\u0000\u0000\u06dc\u06e4\u00055\u0000\u0000\u06dd\u06e5\u0005\n\u0000"+ - "\u0000\u06de\u06df\u0005\u0011\u0000\u0000\u06df\u06e2\u0005\u001d\u0000"+ - "\u0000\u06e0\u06e1\u0005\\\u0000\u0000\u06e1\u06e3\u0005\u00c5\u0000\u0000"+ - "\u06e2\u06e0\u0001\u0000\u0000\u0000\u06e2\u06e3\u0001\u0000\u0000\u0000"+ - "\u06e3\u06e5\u0001\u0000\u0000\u0000\u06e4\u06dd\u0001\u0000\u0000\u0000"+ - "\u06e4\u06de\u0001\u0000\u0000\u0000\u06e5\u06e6\u0001\u0000\u0000\u0000"+ - "\u06e6\u06e7\u0005\r\u0000\u0000\u06e7\u06f0\u0005:\u0000\u0000\u06e8"+ - "\u06ea\u0005\u00aa\u0000\u0000\u06e9\u06eb\u0003\u0128\u0094\u0000\u06ea"+ - "\u06e9\u0001\u0000\u0000\u0000\u06eb\u06ec\u0001\u0000\u0000\u0000\u06ec"+ - "\u06ea\u0001\u0000\u0000\u0000\u06ec\u06ed\u0001\u0000\u0000\u0000\u06ed"+ - "\u06ee\u0001\u0000\u0000\u0000\u06ee\u06ef\u0005\u00ab\u0000\u0000\u06ef"+ - "\u06f1\u0001\u0000\u0000\u0000\u06f0\u06e8\u0001\u0000\u0000\u0000\u06f0"+ - "\u06f1\u0001\u0000\u0000\u0000\u06f1\u0127\u0001\u0000\u0000\u0000\u06f2"+ - "\u06f3\u0005x\u0000\u0000\u06f3\u06f4\u0005\u0089\u0000\u0000\u06f4\u0708"+ - "\u0003\u01b2\u00d9\u0000\u06f5\u06f6\u0005=\u0000\u0000\u06f6\u06f7\u0005"+ - "\u0011\u0000\u0000\u06f7\u0708\u0003\u01b2\u00d9\u0000\u06f8\u06f9\u0005"+ - "N\u0000\u0000\u06f9\u0708\u0003\u01b2\u00d9\u0000\u06fa\u06fb\u0005W\u0000"+ - "\u0000\u06fb\u0708\u0005N\u0000\u0000\u06fc\u06fd\u0005Q\u0000\u0000\u06fd"+ - "\u0708\u0003\u01b2\u00d9\u0000\u06fe\u06ff\u0005W\u0000\u0000\u06ff\u0708"+ - "\u0005Q\u0000\u0000\u0700\u0701\u0005\u0012\u0000\u0000\u0701\u0708\u0005"+ - "\u00c8\u0000\u0000\u0702\u0703\u0005W\u0000\u0000\u0703\u0708\u0005\u0012"+ - "\u0000\u0000\u0704\u0708\u0005\u001a\u0000\u0000\u0705\u0706\u0005W\u0000"+ - "\u0000\u0706\u0708\u0005\u001a\u0000\u0000\u0707\u06f2\u0001\u0000\u0000"+ - "\u0000\u0707\u06f5\u0001\u0000\u0000\u0000\u0707\u06f8\u0001\u0000\u0000"+ - "\u0000\u0707\u06fa\u0001\u0000\u0000\u0000\u0707\u06fc\u0001\u0000\u0000"+ - "\u0000\u0707\u06fe\u0001\u0000\u0000\u0000\u0707\u0700\u0001\u0000\u0000"+ - "\u0000\u0707\u0702\u0001\u0000\u0000\u0000\u0707\u0704\u0001\u0000\u0000"+ - "\u0000\u0707\u0705\u0001\u0000\u0000\u0000\u0708\u0129\u0001\u0000\u0000"+ - "\u0000\u0709\u070a\u0005\r\u0000\u0000\u070a\u070b\u0005S\u0000\u0000"+ - "\u070b\u012b\u0001\u0000\u0000\u0000\u070c\u070d\u0005\r\u0000\u0000\u070d"+ - "\u0711\u0005\u008c\u0000\u0000\u070e\u070f\u00055\u0000\u0000\u070f\u0710"+ - "\u0005\u0011\u0000\u0000\u0710\u0712\u0005\u001d\u0000\u0000\u0711\u070e"+ - "\u0001\u0000\u0000\u0000\u0711\u0712\u0001\u0000\u0000\u0000\u0712\u012d"+ - "\u0001\u0000\u0000\u0000\u0713\u0714\u0005\t\u0000\u0000\u0714\u0715\u0005"+ - "y\u0000\u0000\u0715\u0716\u0003\u00fc~\u0000\u0716\u0717\u0003\u0130\u0098"+ - "\u0000\u0717\u012f\u0001\u0000\u0000\u0000\u0718\u071f\u0003\u0136\u009b"+ - "\u0000\u0719\u071f\u0003\u0118\u008c\u0000\u071a\u071f\u0003\u011e\u008f"+ - "\u0000\u071b\u071f\u0003\u0120\u0090\u0000\u071c\u071f\u0003\u0132\u0099"+ - "\u0000\u071d\u071f\u0003\u0134\u009a\u0000\u071e\u0718\u0001\u0000\u0000"+ - "\u0000\u071e\u0719\u0001\u0000\u0000\u0000\u071e\u071a\u0001\u0000\u0000"+ - "\u0000\u071e\u071b\u0001\u0000\u0000\u0000\u071e\u071c\u0001\u0000\u0000"+ - "\u0000\u071e\u071d\u0001\u0000\u0000\u0000\u071f\u0131\u0001\u0000\u0000"+ - "\u0000\u0720\u0721\u00051\u0000\u0000\u0721\u0723\u0005q\u0000\u0000\u0722"+ - "\u0724\u0005.\u0000\u0000\u0723\u0722\u0001\u0000\u0000\u0000\u0723\u0724"+ - "\u0001\u0000\u0000\u0000\u0724\u0133\u0001\u0000\u0000\u0000\u0725\u0726"+ - "\u0005\u007f\u0000\u0000\u0726\u0727\u0005q\u0000\u0000\u0727\u0135\u0001"+ - "\u0000\u0000\u0000\u0728\u072c\u0005\u00aa\u0000\u0000\u0729\u072d\u0003"+ - "\u0138\u009c\u0000\u072a\u072d\u0003\u013a\u009d\u0000\u072b\u072d\u0003"+ - "\u013c\u009e\u0000\u072c\u0729\u0001\u0000\u0000\u0000\u072c\u072a\u0001"+ - "\u0000\u0000\u0000\u072c\u072b\u0001\u0000\u0000\u0000\u072d\u0736\u0001"+ - "\u0000\u0000\u0000\u072e\u0732\u0005\u00a8\u0000\u0000\u072f\u0733\u0003"+ - "\u0138\u009c\u0000\u0730\u0733\u0003\u013a\u009d\u0000\u0731\u0733\u0003"+ - "\u013c\u009e\u0000\u0732\u072f\u0001\u0000\u0000\u0000\u0732\u0730\u0001"+ - "\u0000\u0000\u0000\u0732\u0731\u0001\u0000\u0000\u0000\u0733\u0735\u0001"+ - "\u0000\u0000\u0000\u0734\u072e\u0001\u0000\u0000\u0000\u0735\u0738\u0001"+ - "\u0000\u0000\u0000\u0736\u0734\u0001\u0000\u0000\u0000\u0736\u0737\u0001"+ - "\u0000\u0000\u0000\u0737\u0739\u0001\u0000\u0000\u0000\u0738\u0736\u0001"+ - "\u0000\u0000\u0000\u0739\u073a\u0005\u00ab\u0000\u0000\u073a\u0137\u0001"+ - "\u0000\u0000\u0000\u073b\u073c\u0005\u0006\u0000\u0000\u073c\u073d\u0003"+ - "\u013e\u009f\u0000\u073d\u0743\u0003\u00bc^\u0000\u073e\u0744\u0003\u00c2"+ - "a\u0000\u073f\u0744\u0003\u0126\u0093\u0000\u0740\u0744\u0003\u012a\u0095"+ - "\u0000\u0741\u0744\u0003\u012c\u0096\u0000\u0742\u0744\u0003\u0104\u0082"+ - "\u0000\u0743\u073e\u0001\u0000\u0000\u0000\u0743\u073f\u0001\u0000\u0000"+ - "\u0000\u0743\u0740\u0001\u0000\u0000\u0000\u0743\u0741\u0001\u0000\u0000"+ - "\u0000\u0743\u0742\u0001\u0000\u0000\u0000\u0743\u0744\u0001\u0000\u0000"+ - "\u0000\u0744\u0746\u0001\u0000\u0000\u0000\u0745\u0747\u0003\u01aa\u00d5"+ - "\u0000\u0746\u0745\u0001\u0000\u0000\u0000\u0746\u0747\u0001\u0000\u0000"+ - "\u0000\u0747\u0139\u0001\u0000\u0000\u0000\u0748\u0749\u0005#\u0000\u0000"+ - "\u0749\u074a\u0003\u013e\u009f\u0000\u074a\u013b\u0001\u0000\u0000\u0000"+ - "\u074b\u074c\u0005R\u0000\u0000\u074c\u0758\u0003\u013e\u009f\u0000\u074d"+ - "\u074f\u0003\u00bc^\u0000\u074e\u0750\u0003\u00c2a\u0000\u074f\u074e\u0001"+ - "\u0000\u0000\u0000\u074f\u0750\u0001\u0000\u0000\u0000\u0750\u0752\u0001"+ - "\u0000\u0000\u0000\u0751\u0753\u0003\u01aa\u00d5\u0000\u0752\u0751\u0001"+ - "\u0000\u0000\u0000\u0752\u0753\u0001\u0000\u0000\u0000\u0753\u0759\u0001"+ - "\u0000\u0000\u0000\u0754\u0759\u0003\u0126\u0093\u0000\u0755\u0759\u0003"+ - "\u012c\u0096\u0000\u0756\u0757\u0005#\u0000\u0000\u0757\u0759\u0005:\u0000"+ - "\u0000\u0758\u074d\u0001\u0000\u0000\u0000\u0758\u0754\u0001\u0000\u0000"+ - "\u0000\u0758\u0755\u0001\u0000\u0000\u0000\u0758\u0756\u0001\u0000\u0000"+ - "\u0000\u0759\u013d\u0001\u0000\u0000\u0000\u075a\u075f\u0003\u0140\u00a0"+ - "\u0000\u075b\u075c\u0005\u00b1\u0000\u0000\u075c\u075e\u0003\u0142\u00a1"+ - "\u0000\u075d\u075b\u0001\u0000\u0000\u0000\u075e\u0761\u0001\u0000\u0000"+ - "\u0000\u075f\u075d\u0001\u0000\u0000\u0000\u075f\u0760\u0001\u0000\u0000"+ - "\u0000\u0760\u013f\u0001\u0000\u0000\u0000\u0761\u075f\u0001\u0000\u0000"+ - "\u0000\u0762\u0767\u0003\u01b8\u00dc\u0000\u0763\u0764\u0005\u00ac\u0000"+ - "\u0000\u0764\u0766\u0005\u00ad\u0000\u0000\u0765\u0763\u0001\u0000\u0000"+ - "\u0000\u0766\u0769\u0001\u0000\u0000\u0000\u0767\u0765\u0001\u0000\u0000"+ - "\u0000\u0767\u0768\u0001\u0000\u0000\u0000\u0768\u0141\u0001\u0000\u0000"+ - "\u0000\u0769\u0767\u0001\u0000\u0000\u0000\u076a\u076f\u0003\u01b8\u00dc"+ - "\u0000\u076b\u076c\u0005\u00ac\u0000\u0000\u076c\u076e\u0005\u00ad\u0000"+ - "\u0000\u076d\u076b\u0001\u0000\u0000\u0000\u076e\u0771\u0001\u0000\u0000"+ - "\u0000\u076f\u076d\u0001\u0000\u0000\u0000\u076f\u0770\u0001\u0000\u0000"+ - "\u0000\u0770\u0776\u0001\u0000\u0000\u0000\u0771\u076f\u0001\u0000\u0000"+ - "\u0000\u0772\u0773\u0005\u0086\u0000\u0000\u0773\u0774\u0005\u00aa\u0000"+ - "\u0000\u0774\u0776\u0005\u00ab\u0000\u0000\u0775\u076a\u0001\u0000\u0000"+ - "\u0000\u0775\u0772\u0001\u0000\u0000\u0000\u0776\u0143\u0001\u0000\u0000"+ - "\u0000\u0777\u0778\u0005#\u0000\u0000\u0778\u077b\u0005y\u0000\u0000\u0779"+ - "\u077a\u0005;\u0000\u0000\u077a\u077c\u0005*\u0000\u0000\u077b\u0779\u0001"+ - "\u0000\u0000\u0000\u077b\u077c\u0001\u0000\u0000\u0000\u077c\u077d\u0001"+ - "\u0000\u0000\u0000\u077d\u077e\u0003\u00fc~\u0000\u077e\u0145\u0001\u0000"+ - "\u0000\u0000\u077f\u0780\u0005\u0019\u0000\u0000\u0780\u0784\u0005>\u0000"+ - "\u0000\u0781\u0782\u0005;\u0000\u0000\u0782\u0783\u0005X\u0000\u0000\u0783"+ - "\u0785\u0005*\u0000\u0000\u0784\u0781\u0001\u0000\u0000\u0000\u0784\u0785"+ - "\u0001\u0000\u0000\u0000\u0785\u0786\u0001\u0000\u0000\u0000\u0786\u0787"+ - "\u0003\u0148\u00a4\u0000\u0787\u0788\u0005\\\u0000\u0000\u0788\u079e\u0003"+ - "\u00fc~\u0000\u0789\u078a\u0005\u00aa\u0000\u0000\u078a\u078b\u0003\u014a"+ - "\u00a5\u0000\u078b\u0791\u0005\u00ab\u0000\u0000\u078c\u078e\u0005\u0089"+ - "\u0000\u0000\u078d\u078f\u0005W\u0000\u0000\u078e\u078d\u0001\u0000\u0000"+ - "\u0000\u078e\u078f\u0001\u0000\u0000\u0000\u078f\u0790\u0001\u0000\u0000"+ - "\u0000\u0790\u0792\u0005Y\u0000\u0000\u0791\u078c\u0001\u0000\u0000\u0000"+ - "\u0791\u0792\u0001\u0000\u0000\u0000\u0792\u0798\u0001\u0000\u0000\u0000"+ - "\u0793\u0794\u0005\u0089\u0000\u0000\u0794\u0795\u0005\u008a\u0000\u0000"+ - "\u0795\u0796\u0005G\u0000\u0000\u0796\u0797\u0005d\u0000\u0000\u0797\u0799"+ - "\u0005p\u0000\u0000\u0798\u0793\u0001\u0000\u0000\u0000\u0798\u0799\u0001"+ - "\u0000\u0000\u0000\u0799\u079f\u0001\u0000\u0000\u0000\u079a\u079b\u0005"+ - "\u00aa\u0000\u0000\u079b\u079c\u0003\u014a\u00a5\u0000\u079c\u079d\u0006"+ - "\u00a3\uffff\uffff\u0000\u079d\u079f\u0001\u0000\u0000\u0000\u079e\u0789"+ - "\u0001\u0000\u0000\u0000\u079e\u079a\u0001\u0000\u0000\u0000\u079f\u07a1"+ - "\u0001\u0000\u0000\u0000\u07a0\u07a2\u0003\u01aa\u00d5\u0000\u07a1\u07a0"+ - "\u0001\u0000\u0000\u0000\u07a1\u07a2\u0001\u0000\u0000\u0000\u07a2\u0147"+ - "\u0001\u0000\u0000\u0000\u07a3\u07a4\u0003\u01b8\u00dc\u0000\u07a4\u0149"+ - "\u0001\u0000\u0000\u0000\u07a5\u07aa\u0003\u014c\u00a6\u0000\u07a6\u07a7"+ - "\u0005\u00a8\u0000\u0000\u07a7\u07a9\u0003\u014c\u00a6\u0000\u07a8\u07a6"+ - "\u0001\u0000\u0000\u0000\u07a9\u07ac\u0001\u0000\u0000\u0000\u07aa\u07a8"+ - "\u0001\u0000\u0000\u0000\u07aa\u07ab\u0001\u0000\u0000\u0000\u07ab\u014b"+ - "\u0001\u0000\u0000\u0000\u07ac\u07aa\u0001\u0000\u0000\u0000\u07ad\u07af"+ - "\u0003\u0152\u00a9\u0000\u07ae\u07b0\u0003\u0158\u00ac\u0000\u07af\u07ae"+ - "\u0001\u0000\u0000\u0000\u07af\u07b0\u0001\u0000\u0000\u0000\u07b0\u07b3"+ - "\u0001\u0000\u0000\u0000\u07b1\u07b3\u0003\u014e\u00a7\u0000\u07b2\u07ad"+ - "\u0001\u0000\u0000\u0000\u07b2\u07b1\u0001\u0000\u0000\u0000\u07b3\u014d"+ - "\u0001\u0000\u0000\u0000\u07b4\u07b5\u0003\u01b8\u00dc\u0000\u07b5\u07b7"+ - "\u0005\u00aa\u0000\u0000\u07b6\u07b8\u0003\u0152\u00a9\u0000\u07b7\u07b6"+ - "\u0001\u0000\u0000\u0000\u07b7\u07b8\u0001\u0000\u0000\u0000\u07b8\u07ba"+ - "\u0001\u0000\u0000\u0000\u07b9\u07bb\u0003\u0158\u00ac\u0000\u07ba\u07b9"+ - "\u0001\u0000\u0000\u0000\u07ba\u07bb\u0001\u0000\u0000\u0000\u07bb\u07bd"+ - "\u0001\u0000\u0000\u0000\u07bc\u07be\u0003\u0150\u00a8\u0000\u07bd\u07bc"+ - "\u0001\u0000\u0000\u0000\u07bd\u07be\u0001\u0000\u0000\u0000\u07be\u07bf"+ - "\u0001\u0000\u0000\u0000\u07bf\u07c0\u0005\u00ab\u0000\u0000\u07c0\u014f"+ - "\u0001\u0000\u0000\u0000\u07c1\u07c2\u0005\u00a8\u0000\u0000\u07c2\u07c4"+ - "\u0003x<\u0000\u07c3\u07c1\u0001\u0000\u0000\u0000\u07c4\u07c5\u0001\u0000"+ - "\u0000\u0000\u07c5\u07c3\u0001\u0000\u0000\u0000\u07c5\u07c6\u0001\u0000"+ - "\u0000\u0000\u07c6\u0151\u0001\u0000\u0000\u0000\u07c7\u07de\u0003\u00ea"+ - "u\u0000\u07c8\u07ca\u0003\u0154\u00aa\u0000\u07c9\u07cb\u0003\u0156\u00ab"+ - "\u0000\u07ca\u07c9\u0001\u0000\u0000\u0000\u07ca\u07cb\u0001\u0000\u0000"+ - "\u0000\u07cb\u07de\u0001\u0000\u0000\u0000\u07cc\u07cd\u0005$\u0000\u0000"+ - "\u07cd\u07ce\u0005\u00aa\u0000\u0000\u07ce\u07cf\u0003\u00eau\u0000\u07cf"+ - "\u07d1\u0005\u00ab\u0000\u0000\u07d0\u07d2\u0003\u0156\u00ab\u0000\u07d1"+ - "\u07d0\u0001\u0000\u0000\u0000\u07d1\u07d2\u0001\u0000\u0000\u0000\u07d2"+ - "\u07de\u0001\u0000\u0000\u0000\u07d3\u07d4\u0005F\u0000\u0000\u07d4\u07d5"+ - "\u0005\u00aa\u0000\u0000\u07d5\u07d6\u0003\u00eau\u0000\u07d6\u07d7\u0005"+ - "\u00ab\u0000\u0000\u07d7\u07de\u0001\u0000\u0000\u0000\u07d8\u07d9\u0005"+ - "G\u0000\u0000\u07d9\u07da\u0005\u00aa\u0000\u0000\u07da\u07db\u0003\u00ea"+ - "u\u0000\u07db\u07dc\u0005\u00ab\u0000\u0000\u07dc\u07de\u0001\u0000\u0000"+ - "\u0000\u07dd\u07c7\u0001\u0000\u0000\u0000\u07dd\u07c8\u0001\u0000\u0000"+ - "\u0000\u07dd\u07cc\u0001\u0000\u0000\u0000\u07dd\u07d3\u0001\u0000\u0000"+ - "\u0000\u07dd\u07d8\u0001\u0000\u0000\u0000\u07de\u0153\u0001\u0000\u0000"+ - "\u0000\u07df\u07ea\u0003\u00ecv\u0000\u07e0\u07e1\u0005\u00b1\u0000\u0000"+ - "\u07e1\u07e9\u0003\u00ecv\u0000\u07e2\u07e3\u0005\u00b1\u0000\u0000\u07e3"+ - "\u07e4\u0005\u0086\u0000\u0000\u07e4\u07e5\u0005\u00aa\u0000\u0000\u07e5"+ - "\u07e9\u0005\u00ab\u0000\u0000\u07e6\u07e7\u0005\u00ac\u0000\u0000\u07e7"+ - "\u07e9\u0005\u00ad\u0000\u0000\u07e8\u07e0\u0001\u0000\u0000\u0000\u07e8"+ - "\u07e2\u0001\u0000\u0000\u0000\u07e8\u07e6\u0001\u0000\u0000\u0000\u07e9"+ - "\u07ec\u0001\u0000\u0000\u0000\u07ea\u07e8\u0001\u0000\u0000\u0000\u07ea"+ - "\u07eb\u0001\u0000\u0000\u0000\u07eb\u07f7\u0001\u0000\u0000\u0000\u07ec"+ - "\u07ea\u0001\u0000\u0000\u0000\u07ed\u07ee\u0005\u00ac\u0000\u0000\u07ee"+ - "\u07f8\u0005\u00ad\u0000\u0000\u07ef\u07f0\u0005\u00b1\u0000\u0000\u07f0"+ - "\u07f1\u0005\u0086\u0000\u0000\u07f1\u07f2\u0005\u00aa\u0000\u0000\u07f2"+ - "\u07f8\u0005\u00ab\u0000\u0000\u07f3\u07f4\u0005\u00b1\u0000\u0000\u07f4"+ - "\u07f5\u0005G\u0000\u0000\u07f5\u07f6\u0005\u00aa\u0000\u0000\u07f6\u07f8"+ - "\u0005\u00ab\u0000\u0000\u07f7\u07ed\u0001\u0000\u0000\u0000\u07f7\u07ef"+ - "\u0001\u0000\u0000\u0000\u07f7\u07f3\u0001\u0000\u0000\u0000\u07f8\u0155"+ - "\u0001\u0000\u0000\u0000\u07f9\u07fa\u0005\u00b1\u0000\u0000\u07fa\u07fb"+ - "\u0003\u00eau\u0000\u07fb\u0157\u0001\u0000\u0000\u0000\u07fc\u0809\u0005"+ - "\r\u0000\u0000\u07fd\u080a\u0005\u009a\u0000\u0000\u07fe\u080a\u0005\u009b"+ - "\u0000\u0000\u07ff\u080a\u0005\u0096\u0000\u0000\u0800\u080a\u0005\u00a0"+ - "\u0000\u0000\u0801\u080a\u0005\u0095\u0000\u0000\u0802\u080a\u0005\u009d"+ - "\u0000\u0000\u0803\u080a\u0005\u00a3\u0000\u0000\u0804\u0806\u0005\u0099"+ - "\u0000\u0000\u0805\u0807\u0003\u01a2\u00d1\u0000\u0806\u0805\u0001\u0000"+ - "\u0000\u0000\u0806\u0807\u0001\u0000\u0000\u0000\u0807\u080a\u0001\u0000"+ - "\u0000\u0000\u0808\u080a\u0005\u009e\u0000\u0000\u0809\u07fd\u0001\u0000"+ - "\u0000\u0000\u0809\u07fe\u0001\u0000\u0000\u0000\u0809\u07ff\u0001\u0000"+ - "\u0000\u0000\u0809\u0800\u0001\u0000\u0000\u0000\u0809\u0801\u0001\u0000"+ - "\u0000\u0000\u0809\u0802\u0001\u0000\u0000\u0000\u0809\u0803\u0001\u0000"+ - "\u0000\u0000\u0809\u0804\u0001\u0000\u0000\u0000\u0809\u0808\u0001\u0000"+ - "\u0000\u0000\u080a\u0159\u0001\u0000\u0000\u0000\u080b\u080c\u0005\u0019"+ - "\u0000\u0000\u080c\u080d\u00054\u0000\u0000\u080d\u0811\u0005>\u0000\u0000"+ - "\u080e\u080f\u0005;\u0000\u0000\u080f\u0810\u0005X\u0000\u0000\u0810\u0812"+ - "\u0005*\u0000\u0000\u0811\u080e\u0001\u0000\u0000\u0000\u0811\u0812\u0001"+ - "\u0000\u0000\u0000\u0812\u0813\u0001\u0000\u0000\u0000\u0813\u0814\u0003"+ - "\u0148\u00a4\u0000\u0814\u0815\u0005\\\u0000\u0000\u0815\u0816\u0003\u00fc"+ - "~\u0000\u0816\u0818\u0003\u015c\u00ae\u0000\u0817\u0819\u0003\u0162\u00b1"+ - "\u0000\u0818\u0817\u0001\u0000\u0000\u0000\u0818\u0819\u0001\u0000\u0000"+ - "\u0000\u0819\u081b\u0001\u0000\u0000\u0000\u081a\u081c\u0005a\u0000\u0000"+ - "\u081b\u081a\u0001\u0000\u0000\u0000\u081b\u081c\u0001\u0000\u0000\u0000"+ - "\u081c\u081e\u0001\u0000\u0000\u0000\u081d\u081f\u0003\u01aa\u00d5\u0000"+ - "\u081e\u081d\u0001\u0000\u0000\u0000\u081e\u081f\u0001\u0000\u0000\u0000"+ - "\u081f\u015b\u0001\u0000\u0000\u0000\u0820\u0821\u0005\u00aa\u0000\u0000"+ - "\u0821\u0822\u0003\u015e\u00af\u0000\u0822\u0823\u0005\u00ab\u0000\u0000"+ - "\u0823\u0829\u0001\u0000\u0000\u0000\u0824\u0825\u0005\u00aa\u0000\u0000"+ - "\u0825\u0826\u0003\u015e\u00af\u0000\u0826\u0827\u0006\u00ae\uffff\uffff"+ - "\u0000\u0827\u0829\u0001\u0000\u0000\u0000\u0828\u0820\u0001\u0000\u0000"+ - "\u0000\u0828\u0824\u0001\u0000\u0000\u0000\u0829\u015d\u0001\u0000\u0000"+ - "\u0000\u082a\u082f\u0003\u0160\u00b0\u0000\u082b\u082c\u0005\u00a8\u0000"+ - "\u0000\u082c\u082e\u0003\u0160\u00b0\u0000\u082d\u082b\u0001\u0000\u0000"+ - "\u0000\u082e\u0831\u0001\u0000\u0000\u0000\u082f\u082d\u0001\u0000\u0000"+ - "\u0000\u082f\u0830\u0001\u0000\u0000\u0000\u0830\u015f\u0001\u0000\u0000"+ - "\u0000\u0831\u082f\u0001\u0000\u0000\u0000\u0832\u0834\u0003\u0152\u00a9"+ - "\u0000\u0833\u0835\u0003\u01a2\u00d1\u0000\u0834\u0833\u0001\u0000\u0000"+ - "\u0000\u0834\u0835\u0001\u0000\u0000\u0000\u0835\u0161\u0001\u0000\u0000"+ - "\u0000\u0836\u083a\u0003\u0164\u00b2\u0000\u0837\u0839\u0003\u0164\u00b2"+ - "\u0000\u0838\u0837\u0001\u0000\u0000\u0000\u0839\u083c\u0001\u0000\u0000"+ - "\u0000\u083a\u0838\u0001\u0000\u0000\u0000\u083a\u083b\u0001\u0000\u0000"+ - "\u0000\u083b\u0163\u0001\u0000\u0000\u0000\u083c\u083a\u0001\u0000\u0000"+ - "\u0000\u083d\u083e\u0005(\u0000\u0000\u083e\u083f\u0005\u00b8\u0000\u0000"+ - "\u083f\u0844\u0005\u00c8\u0000\u0000\u0840\u0841\u0005)\u0000\u0000\u0841"+ - "\u0842\u0005\u00b8\u0000\u0000\u0842\u0844\u0005\u00c8\u0000\u0000\u0843"+ - "\u083d\u0001\u0000\u0000\u0000\u0843\u0840\u0001\u0000\u0000\u0000\u0844"+ - "\u0165\u0001\u0000\u0000\u0000\u0845\u0846\u0005#\u0000\u0000\u0846\u0849"+ - "\u0005>\u0000\u0000\u0847\u0848\u0005;\u0000\u0000\u0848\u084a\u0005*"+ - "\u0000\u0000\u0849\u0847\u0001\u0000\u0000\u0000\u0849\u084a\u0001\u0000"+ - "\u0000\u0000\u084a\u084b\u0001\u0000\u0000\u0000\u084b\u084c\u0003\u0148"+ - "\u00a4\u0000\u084c\u084d\u0005\\\u0000\u0000\u084d\u084f\u0003\u00fc~"+ - "\u0000\u084e\u0850\u0005a\u0000\u0000\u084f\u084e\u0001\u0000\u0000\u0000"+ - "\u084f\u0850\u0001\u0000\u0000\u0000\u0850\u0167\u0001\u0000\u0000\u0000"+ - "\u0851\u0854\u0007\r\u0000\u0000\u0852\u0853\u0005\r\u0000\u0000\u0853"+ - "\u0855\u0005C\u0000\u0000\u0854\u0852\u0001\u0000\u0000\u0000\u0854\u0855"+ - "\u0001\u0000\u0000\u0000\u0855\u0867\u0001\u0000\u0000\u0000\u0856\u0857"+ - "\u0005y\u0000\u0000\u0857\u0860\u0003\u00fc~\u0000\u0858\u0859\u0005\u00aa"+ - "\u0000\u0000\u0859\u085a\u0003\u016a\u00b5\u0000\u085a\u085b\u0005\u00ab"+ - "\u0000\u0000\u085b\u0861\u0001\u0000\u0000\u0000\u085c\u085d\u0005\u00aa"+ - "\u0000\u0000\u085d\u085e\u0003\u016a\u00b5\u0000\u085e\u085f\u0006\u00b4"+ - "\uffff\uffff\u0000\u085f\u0861\u0001\u0000\u0000\u0000\u0860\u0858\u0001"+ - "\u0000\u0000\u0000\u0860\u085c\u0001\u0000\u0000\u0000\u0860\u0861\u0001"+ - "\u0000\u0000\u0000\u0861\u0868\u0001\u0000\u0000\u0000\u0862\u0863\u0005"+ - ">\u0000\u0000\u0863\u0864\u0003\u0148\u00a4\u0000\u0864\u0865\u0005\\"+ - "\u0000\u0000\u0865\u0866\u0003\u00fc~\u0000\u0866\u0868\u0001\u0000\u0000"+ - "\u0000\u0867\u0856\u0001\u0000\u0000\u0000\u0867\u0862\u0001\u0000\u0000"+ - "\u0000\u0868\u0169\u0001\u0000\u0000\u0000\u0869\u086e\u0003\u013e\u009f"+ - "\u0000\u086a\u086b\u0005\u00a8\u0000\u0000\u086b\u086d\u0003\u013e\u009f"+ - "\u0000\u086c\u086a\u0001\u0000\u0000\u0000\u086d\u0870\u0001\u0000\u0000"+ - "\u0000\u086e\u086c\u0001\u0000\u0000\u0000\u086e\u086f\u0001\u0000\u0000"+ - "\u0000\u086f\u016b\u0001\u0000\u0000\u0000\u0870\u086e\u0001\u0000\u0000"+ - "\u0000\u0871\u0874\u0005w\u0000\u0000\u0872\u0873\u0005\r\u0000\u0000"+ - "\u0873\u0875\u0005C\u0000\u0000\u0874\u0872\u0001\u0000\u0000\u0000\u0874"+ - "\u0875\u0001\u0000\u0000\u0000\u0875\u0884\u0001\u0000\u0000\u0000\u0876"+ - "\u0885\u0005z\u0000\u0000\u0877\u0885\u0005\u0084\u0000\u0000\u0878\u0885"+ - "\u0005o\u0000\u0000\u0879\u087a\u0005\u0083\u0000\u0000\u087a\u0885\u0003"+ - "\u017c\u00be\u0000\u087b\u087c\u0005n\u0000\u0000\u087c\u0885\u0003\u01b8"+ - "\u00dc\u0000\u087d\u087e\u0005?\u0000\u0000\u087e\u087f\u0005\\\u0000"+ - "\u0000\u087f\u0885\u0003\u00fc~\u0000\u0880\u0881\u0005y\u0000\u0000\u0881"+ - "\u0885\u0003\u00fc~\u0000\u0882\u0885\u0005U\u0000\u0000\u0883\u0885\u0005"+ - "j\u0000\u0000\u0884\u0876\u0001\u0000\u0000\u0000\u0884\u0877\u0001\u0000"+ - "\u0000\u0000\u0884\u0878\u0001\u0000\u0000\u0000\u0884\u0879\u0001\u0000"+ - "\u0000\u0000\u0884\u087b\u0001\u0000\u0000\u0000\u0884\u087d\u0001\u0000"+ - "\u0000\u0000\u0884\u0880\u0001\u0000\u0000\u0000\u0884\u0882\u0001\u0000"+ - "\u0000\u0000\u0884\u0883\u0001\u0000\u0000\u0000\u0885\u016d\u0001\u0000"+ - "\u0000\u0000\u0886\u0887\u0005\u0019\u0000\u0000\u0887\u0888\u0005\u0083"+ - "\u0000\u0000\u0888\u088a\u0003\u0180\u00c0\u0000\u0889\u088b\u0003\u0188"+ - "\u00c4\u0000\u088a\u0889\u0001\u0000\u0000\u0000\u088a\u088b\u0001\u0000"+ - "\u0000\u0000\u088b\u088d\u0001\u0000\u0000\u0000\u088c\u088e\u0005\u0007"+ - "\u0000\u0000\u088d\u088c\u0001\u0000\u0000\u0000\u088d\u088e\u0001\u0000"+ - "\u0000\u0000\u088e\u016f\u0001\u0000\u0000\u0000\u088f\u0890\u0005\u0019"+ - "\u0000\u0000\u0890\u0891\u0005n\u0000\u0000\u0891\u0892\u0003\u01b8\u00dc"+ - "\u0000\u0892\u0171\u0001\u0000\u0000\u0000\u0893\u0894\u0005\t\u0000\u0000"+ - "\u0894\u0895\u0005\u0083\u0000\u0000\u0895\u0897\u0003\u017c\u00be\u0000"+ - "\u0896\u0898\u0003\u0186\u00c3\u0000\u0897\u0896\u0001\u0000\u0000\u0000"+ - "\u0897\u0898\u0001\u0000\u0000\u0000\u0898\u089a\u0001\u0000\u0000\u0000"+ - "\u0899\u089b\u0005\u0091\u0000\u0000\u089a\u0899\u0001\u0000\u0000\u0000"+ - "\u089a\u089b\u0001\u0000\u0000\u0000\u089b\u089d\u0001\u0000\u0000\u0000"+ - "\u089c\u089e\u0005\u008f\u0000\u0000\u089d\u089c\u0001\u0000\u0000\u0000"+ - "\u089d\u089e\u0001\u0000\u0000\u0000\u089e\u08a0\u0001\u0000\u0000\u0000"+ - "\u089f\u08a1\u0003\u0184\u00c2\u0000\u08a0\u089f\u0001\u0000\u0000\u0000"+ - "\u08a0\u08a1\u0001\u0000\u0000\u0000\u08a1\u08a3\u0001\u0000\u0000\u0000"+ - "\u08a2\u08a4\u0003\u0188\u00c4\u0000\u08a3\u08a2\u0001\u0000\u0000\u0000"+ - "\u08a3\u08a4\u0001\u0000\u0000\u0000\u08a4\u0173\u0001\u0000\u0000\u0000"+ - "\u08a5\u08a6\u0005#\u0000\u0000\u08a6\u08a7\u0005\u0083\u0000\u0000\u08a7"+ - "\u08a9\u0003\u017c\u00be\u0000\u08a8\u08aa\u0005\u0014\u0000\u0000\u08a9"+ - "\u08a8\u0001\u0000\u0000\u0000\u08a9\u08aa\u0001\u0000\u0000\u0000\u08aa"+ - "\u0175\u0001\u0000\u0000\u0000\u08ab\u08ac\u0005#\u0000\u0000\u08ac\u08ad"+ - "\u0005n\u0000\u0000\u08ad\u08ae\u0003\u01b8\u00dc\u0000\u08ae\u0177\u0001"+ - "\u0000\u0000\u0000\u08af\u08b3\u00056\u0000\u0000\u08b0\u08b4\u0003\u018a"+ - "\u00c5\u0000\u08b1\u08b4\u0003\u018c\u00c6\u0000\u08b2\u08b4\u0003\u018e"+ - "\u00c7\u0000\u08b3\u08b0\u0001\u0000\u0000\u0000\u08b3\u08b1\u0001\u0000"+ - "\u0000\u0000\u08b3\u08b2\u0001\u0000\u0000\u0000\u08b4\u0179\u0001\u0000"+ - "\u0000\u0000\u08b5\u08b9\u0005m\u0000\u0000\u08b6\u08ba\u0003\u0190\u00c8"+ - "\u0000\u08b7\u08ba\u0003\u0192\u00c9\u0000\u08b8\u08ba\u0003\u0194\u00ca"+ - "\u0000\u08b9\u08b6\u0001\u0000\u0000\u0000\u08b9\u08b7\u0001\u0000\u0000"+ - "\u0000\u08b9\u08b8\u0001\u0000\u0000\u0000\u08ba\u017b\u0001\u0000\u0000"+ - "\u0000\u08bb\u08be\u0003\u01b8\u00dc\u0000\u08bc\u08be\u0003\u01b4\u00da"+ - "\u0000\u08bd\u08bb\u0001\u0000\u0000\u0000\u08bd\u08bc\u0001\u0000\u0000"+ - "\u0000\u08be\u017d\u0001\u0000\u0000\u0000\u08bf\u08c0\u00059\u0000\u0000"+ - "\u08c0\u08c1\u0003\u0182\u00c1\u0000\u08c1\u017f\u0001\u0000\u0000\u0000"+ - "\u08c2\u08c3\u0003\u01b8\u00dc\u0000\u08c3\u08c5\u0003\u017e\u00bf\u0000"+ - "\u08c4\u08c6\u0005\u008f\u0000\u0000\u08c5\u08c4\u0001\u0000\u0000\u0000"+ - "\u08c5\u08c6\u0001\u0000\u0000\u0000\u08c6\u08c8\u0001\u0000\u0000\u0000"+ - "\u08c7\u08c9\u0003\u0184\u00c2\u0000\u08c8\u08c7\u0001\u0000\u0000\u0000"+ - "\u08c8\u08c9\u0001\u0000\u0000\u0000\u08c9\u08ce\u0001\u0000\u0000\u0000"+ - "\u08ca\u08cb\u0003\u01b4\u00da\u0000\u08cb\u08cc\u0005\u008e\u0000\u0000"+ - "\u08cc\u08ce\u0001\u0000\u0000\u0000\u08cd\u08c2\u0001\u0000\u0000\u0000"+ - "\u08cd\u08ca\u0001\u0000\u0000\u0000\u08ce\u0181\u0001\u0000\u0000\u0000"+ - "\u08cf\u08d0\u0005\u0011\u0000\u0000\u08d0\u08d1\u0003\u01b4\u00da\u0000"+ - "\u08d1\u0183\u0001\u0000\u0000\u0000\u08d2\u08d3\u0005b\u0000\u0000\u08d3"+ - "\u08d4\u0005J\u0000\u0000\u08d4\u08d5\u0003\u01ac\u00d6\u0000\u08d5\u0185"+ - "\u0001\u0000\u0000\u0000\u08d6\u08d8\u0003\u017e\u00bf\u0000\u08d7\u08d9"+ - "\u0005\u0090\u0000\u0000\u08d8\u08d7\u0001\u0000\u0000\u0000\u08d8\u08d9"+ - "\u0001\u0000\u0000\u0000\u08d9\u0187\u0001\u0000\u0000\u0000\u08da\u08db"+ - "\u0005\u0005\u0000\u0000\u08db\u08dc\u0007\u000e\u0000\u0000\u08dc\u0189"+ - "\u0001\u0000\u0000\u0000\u08dd\u08de\u0003\u01b6\u00db\u0000\u08de\u08df"+ - "\u0005|\u0000\u0000\u08df\u08e0\u0003\u0196\u00cb\u0000\u08e0\u018b\u0001"+ - "\u0000\u0000\u0000\u08e1\u08e2\u0003\u0198\u00cc\u0000\u08e2\u08e3\u0005"+ - "|\u0000\u0000\u08e3\u08e4\u0003\u01b8\u00dc\u0000\u08e4\u018d\u0001\u0000"+ - "\u0000\u0000\u08e5\u08e6\u0003\u019c\u00ce\u0000\u08e6\u08ea\u0005\\\u0000"+ - "\u0000\u08e7\u08eb\u0003\u019e\u00cf\u0000\u08e8\u08e9\u0005T\u0000\u0000"+ - "\u08e9\u08eb\u0003\u00fe\u007f\u0000\u08ea\u08e7\u0001\u0000\u0000\u0000"+ - "\u08ea\u08e8\u0001\u0000\u0000\u0000\u08eb\u08ec\u0001\u0000\u0000\u0000"+ - "\u08ec\u08ed\u0005|\u0000\u0000\u08ed\u08ee\u0003\u01b8\u00dc\u0000\u08ee"+ - "\u018f\u0001\u0000\u0000\u0000\u08ef\u08f0\u0003\u01b6\u00db\u0000\u08f0"+ - "\u08f1\u00052\u0000\u0000\u08f1\u08f2\u0003\u0196\u00cb\u0000\u08f2\u0191"+ - "\u0001\u0000\u0000\u0000\u08f3\u08f4\u0003\u0198\u00cc\u0000\u08f4\u08f5"+ - "\u00052\u0000\u0000\u08f5\u08f6\u0003\u01b8\u00dc\u0000\u08f6\u0193\u0001"+ - "\u0000\u0000\u0000\u08f7\u08f8\u0003\u019c\u00ce\u0000\u08f8\u08fc\u0005"+ - "\\\u0000\u0000\u08f9\u08fd\u0003\u019e\u00cf\u0000\u08fa\u08fb\u0005T"+ - "\u0000\u0000\u08fb\u08fd\u0003\u00fe\u007f\u0000\u08fc\u08f9\u0001\u0000"+ - "\u0000\u0000\u08fc\u08fa\u0001\u0000\u0000\u0000\u08fd\u08fe\u0001\u0000"+ - "\u0000\u0000\u08fe\u08ff\u00052\u0000\u0000\u08ff\u0900\u0003\u01b8\u00dc"+ - "\u0000\u0900\u0195\u0001\u0000\u0000\u0000\u0901\u0902\u0005\u0083\u0000"+ - "\u0000\u0902\u0906\u0003\u017c\u00be\u0000\u0903\u0904\u0005n\u0000\u0000"+ - "\u0904\u0906\u0003\u01b8\u00dc\u0000\u0905\u0901\u0001\u0000\u0000\u0000"+ - "\u0905\u0903\u0001\u0000\u0000\u0000\u0906\u0197\u0001\u0000\u0000\u0000"+ - "\u0907\u090c\u0003\u019a\u00cd\u0000\u0908\u0909\u0005\u00a8\u0000\u0000"+ - "\u0909\u090b\u0003\u019a\u00cd\u0000\u090a\u0908\u0001\u0000\u0000\u0000"+ - "\u090b\u090e\u0001\u0000\u0000\u0000\u090c\u090a\u0001\u0000\u0000\u0000"+ - "\u090c\u090d\u0001\u0000\u0000\u0000\u090d\u0199\u0001\u0000\u0000\u0000"+ - "\u090e\u090c\u0001\u0000\u0000\u0000\u090f\u0912\u0003\u01b8\u00dc\u0000"+ - "\u0910\u0912\u0005\u008d\u0000\u0000\u0911\u090f\u0001\u0000\u0000\u0000"+ - "\u0911\u0910\u0001\u0000\u0000\u0000\u0912\u019b\u0001\u0000\u0000\u0000"+ - "\u0913\u0916\u0003\u019a\u00cd\u0000\u0914\u0916\u0005\b\u0000\u0000\u0915"+ - "\u0913\u0001\u0000\u0000\u0000\u0915\u0914\u0001\u0000\u0000\u0000\u0916"+ - "\u091e\u0001\u0000\u0000\u0000\u0917\u091a\u0005\u00a8\u0000\u0000\u0918"+ - "\u091b\u0003\u019a\u00cd\u0000\u0919\u091b\u0005\b\u0000\u0000\u091a\u0918"+ - "\u0001\u0000\u0000\u0000\u091a\u0919\u0001\u0000\u0000\u0000\u091b\u091d"+ - "\u0001\u0000\u0000\u0000\u091c\u0917\u0001\u0000\u0000\u0000\u091d\u0920"+ - "\u0001\u0000\u0000\u0000\u091e\u091c\u0001\u0000\u0000\u0000\u091e\u091f"+ - "\u0001\u0000\u0000\u0000\u091f\u019d\u0001\u0000\u0000\u0000\u0920\u091e"+ - "\u0001\u0000\u0000\u0000\u0921\u0922\u0003\u00fc~\u0000\u0922\u019f\u0001"+ - "\u0000\u0000\u0000\u0923\u092b\u0003\u01a2\u00d1\u0000\u0924\u092b\u0003"+ - "\u01a4\u00d2\u0000\u0925\u092b\u0003\u01b4\u00da\u0000\u0926\u092b\u0003"+ - "\u01b0\u00d8\u0000\u0927\u092b\u0005\u00c7\u0000\u0000\u0928\u092b\u0005"+ - "\u00c6\u0000\u0000\u0929\u092b\u0005\u00c5\u0000\u0000\u092a\u0923\u0001"+ - "\u0000\u0000\u0000\u092a\u0924\u0001\u0000\u0000\u0000\u092a\u0925\u0001"+ - "\u0000\u0000\u0000\u092a\u0926\u0001\u0000\u0000\u0000\u092a\u0927\u0001"+ - "\u0000\u0000\u0000\u092a\u0928\u0001\u0000\u0000\u0000\u092a\u0929\u0001"+ - "\u0000\u0000\u0000\u092b\u01a1\u0001\u0000\u0000\u0000\u092c\u092d\u0005"+ - "\u00ae\u0000\u0000\u092d\u0932\u0003\u01a6\u00d3\u0000\u092e\u092f\u0005"+ - "\u00a8\u0000\u0000\u092f\u0931\u0003\u01a6\u00d3\u0000\u0930\u092e\u0001"+ - "\u0000\u0000\u0000\u0931\u0934\u0001\u0000\u0000\u0000\u0932\u0930\u0001"+ - "\u0000\u0000\u0000\u0932\u0933\u0001\u0000\u0000\u0000\u0933\u0935\u0001"+ - "\u0000\u0000\u0000\u0934\u0932\u0001\u0000\u0000\u0000\u0935\u0936\u0005"+ - "\u00af\u0000\u0000\u0936\u093a\u0001\u0000\u0000\u0000\u0937\u0938\u0005"+ - "\u00ae\u0000\u0000\u0938\u093a\u0005\u00af\u0000\u0000\u0939\u092c\u0001"+ - "\u0000\u0000\u0000\u0939\u0937\u0001\u0000\u0000\u0000\u093a\u01a3\u0001"+ - "\u0000\u0000\u0000\u093b\u093c\u0005\u00ac\u0000\u0000\u093c\u0941\u0003"+ - "\u01a8\u00d4\u0000\u093d\u093e\u0005\u00a8\u0000\u0000\u093e\u0940\u0003"+ - "\u01a8\u00d4\u0000\u093f\u093d\u0001\u0000\u0000\u0000\u0940\u0943\u0001"+ - "\u0000\u0000\u0000\u0941\u093f\u0001\u0000\u0000\u0000\u0941\u0942\u0001"+ - "\u0000\u0000\u0000\u0942\u0944\u0001\u0000\u0000\u0000\u0943\u0941\u0001"+ - "\u0000\u0000\u0000\u0944\u0945\u0005\u00ad\u0000\u0000\u0945\u0949\u0001"+ - "\u0000\u0000\u0000\u0946\u0947\u0005\u00ac\u0000\u0000\u0947\u0949\u0005"+ - "\u00ad\u0000\u0000\u0948\u093b\u0001\u0000\u0000\u0000\u0948\u0946\u0001"+ - "\u0000\u0000\u0000\u0949\u01a5\u0001\u0000\u0000\u0000\u094a\u094b\u0005"+ - "\u00cb\u0000\u0000\u094b\u094c\u0005\u00a9\u0000\u0000\u094c\u094d\u0003"+ - "\u01a8\u00d4\u0000\u094d\u01a7\u0001\u0000\u0000\u0000\u094e\u0956\u0003"+ - "\u01a2\u00d1\u0000\u094f\u0956\u0003\u01a4\u00d2\u0000\u0950\u0956\u0005"+ - "\u00cb\u0000\u0000\u0951\u0956\u0003\u01b0\u00d8\u0000\u0952\u0956\u0005"+ - "\u00c7\u0000\u0000\u0953\u0956\u0005\u00c6\u0000\u0000\u0954\u0956\u0005"+ - "\u00c5\u0000\u0000\u0955\u094e\u0001\u0000\u0000\u0000\u0955\u094f\u0001"+ - "\u0000\u0000\u0000\u0955\u0950\u0001\u0000\u0000\u0000\u0955\u0951\u0001"+ - "\u0000\u0000\u0000\u0955\u0952\u0001\u0000\u0000\u0000\u0955\u0953\u0001"+ - "\u0000\u0000\u0000\u0955\u0954\u0001\u0000\u0000\u0000\u0956\u01a9\u0001"+ - "\u0000\u0000\u0000\u0957\u0958\u0005\u0017\u0000\u0000\u0958\u0959\u0003"+ - "\u01b4\u00da\u0000\u0959\u01ab\u0001\u0000\u0000\u0000\u095a\u095b\u0005"+ - "\u00c8\u0000\u0000\u095b\u095c\u0003\u01ae\u00d7\u0000\u095c\u01ad\u0001"+ - "\u0000\u0000\u0000\u095d\u095e\u0007\u000f\u0000\u0000\u095e\u01af\u0001"+ - "\u0000\u0000\u0000\u095f\u0961\u0005\u00c1\u0000\u0000\u0960\u095f\u0001"+ - "\u0000\u0000\u0000\u0960\u0961\u0001\u0000\u0000\u0000\u0961\u0962\u0001"+ - "\u0000\u0000\u0000\u0962\u0963\u0007\u0010\u0000\u0000\u0963\u01b1\u0001"+ - "\u0000\u0000\u0000\u0964\u0966\u0007\u0003\u0000\u0000\u0965\u0964\u0001"+ - "\u0000\u0000\u0000\u0965\u0966\u0001\u0000\u0000\u0000\u0966\u0967\u0001"+ - "\u0000\u0000\u0000\u0967\u0968\u0005\u00c8\u0000\u0000\u0968\u01b3\u0001"+ - "\u0000\u0000\u0000\u0969\u096a\u0007\u0011\u0000\u0000\u096a\u01b5\u0001"+ - "\u0000\u0000\u0000\u096b\u0970\u0003\u01b8\u00dc\u0000\u096c\u096d\u0005"+ - "\u00a8\u0000\u0000\u096d\u096f\u0003\u01b8\u00dc\u0000\u096e\u096c\u0001"+ - "\u0000\u0000\u0000\u096f\u0972\u0001\u0000\u0000\u0000\u0970\u096e\u0001"+ - "\u0000\u0000\u0000\u0970\u0971\u0001\u0000\u0000\u0000\u0971\u01b7\u0001"+ - "\u0000\u0000\u0000\u0972\u0970\u0001\u0000\u0000\u0000\u0973\u0a05\u0005"+ - "\u0005\u0000\u0000\u0974\u0a05\u0005\u0006\u0000\u0000\u0975\u0a05\u0005"+ - "\u0007\u0000\u0000\u0976\u0a05\u0005\b\u0000\u0000\u0977\u0a05\u0005\t"+ - "\u0000\u0000\u0978\u0a05\u0005\n\u0000\u0000\u0979\u0a05\u0005\u000b\u0000"+ - "\u0000\u097a\u0a05\u0005\f\u0000\u0000\u097b\u0a05\u0005\u00a2\u0000\u0000"+ - "\u097c\u0a05\u0005\u00a3\u0000\u0000\u097d\u0a05\u0005\u00a4\u0000\u0000"+ - "\u097e\u0a05\u0005\u00a5\u0000\u0000\u097f\u0a05\u0005\u000f\u0000\u0000"+ - "\u0980\u0a05\u0005\r\u0000\u0000\u0981\u0a05\u0005\u000e\u0000\u0000\u0982"+ - "\u0a05\u0005\u0010\u0000\u0000\u0983\u0a05\u0005\u0011\u0000\u0000\u0984"+ - "\u0a05\u0005\u0012\u0000\u0000\u0985\u0a05\u0005\u0013\u0000\u0000\u0986"+ - "\u0a05\u0005\u0015\u0000\u0000\u0987\u0a05\u0005\u0016\u0000\u0000\u0988"+ - "\u0a05\u0005\u0017\u0000\u0000\u0989\u0a05\u0005\u0018\u0000\u0000\u098a"+ - "\u0a05\u0005\u0019\u0000\u0000\u098b\u0a05\u0005\u001a\u0000\u0000\u098c"+ - "\u0a05\u0005\u001b\u0000\u0000\u098d\u0a05\u0005\u001c\u0000\u0000\u098e"+ - "\u0a05\u0005\u001d\u0000\u0000\u098f\u0a05\u0005\u001e\u0000\u0000\u0990"+ - "\u0a05\u0005\u001f\u0000\u0000\u0991\u0a05\u0005 \u0000\u0000\u0992\u0a05"+ - "\u0005!\u0000\u0000\u0993\u0a05\u0005\"\u0000\u0000\u0994\u0a05\u0005"+ - "#\u0000\u0000\u0995\u0a05\u0005$\u0000\u0000\u0996\u0a05\u0005%\u0000"+ - "\u0000\u0997\u0a05\u0005&\u0000\u0000\u0998\u0a05\u0005\'\u0000\u0000"+ - "\u0999\u0a05\u0005(\u0000\u0000\u099a\u0a05\u0005)\u0000\u0000\u099b\u0a05"+ - "\u0005*\u0000\u0000\u099c\u0a05\u0005+\u0000\u0000\u099d\u0a05\u0005,"+ - "\u0000\u0000\u099e\u0a05\u0005-\u0000\u0000\u099f\u0a05\u00051\u0000\u0000"+ - "\u09a0\u0a05\u00052\u0000\u0000\u09a1\u0a05\u00053\u0000\u0000\u09a2\u0a05"+ - "\u00054\u0000\u0000\u09a3\u0a05\u00055\u0000\u0000\u09a4\u0a05\u00056"+ - "\u0000\u0000\u09a5\u0a05\u00057\u0000\u0000\u09a6\u0a05\u00058\u0000\u0000"+ - "\u09a7\u0a05\u00059\u0000\u0000\u09a8\u0a05\u0005:\u0000\u0000\u09a9\u0a05"+ - "\u0005;\u0000\u0000\u09aa\u0a05\u0005=\u0000\u0000\u09ab\u0a05\u0005>"+ - "\u0000"; + "\u0514\u0515\u0001\u0000\u0000\u0000\u0515\u051a\u0003\u00b2Y\u0000\u0516"+ + "\u0518\u0005\u0003\u0000\u0000\u0517\u0516\u0001\u0000\u0000\u0000\u0517"+ + "\u0518\u0001\u0000\u0000\u0000\u0518\u0519\u0001\u0000\u0000\u0000\u0519"+ + "\u051b\u0003\u00b4Z\u0000\u051a\u0517\u0001\u0000\u0000\u0000\u051a\u051b"+ + "\u0001\u0000\u0000\u0000\u051b\u051d\u0001\u0000\u0000\u0000\u051c\u051e"+ + "\u0005(\u0000\u0000\u051d\u051c\u0001\u0000\u0000\u0000\u051d\u051e\u0001"+ + "\u0000\u0000\u0000\u051e\u051f\u0001\u0000\u0000\u0000\u051f\u0520\u0003"+ + "\f\u0006\u0000\u0520\u00a7\u0001\u0000\u0000\u0000\u0521\u0523\u0005F"+ + "\u0000\u0000\u0522\u0521\u0001\u0000\u0000\u0000\u0522\u0523\u0001\u0000"+ + "\u0000\u0000\u0523\u0524\u0001\u0000\u0000\u0000\u0524\u0526\u0003\u00b2"+ + "Y\u0000\u0525\u0527\u00050\u0000\u0000\u0526\u0525\u0001\u0000\u0000\u0000"+ + "\u0526\u0527\u0001\u0000\u0000\u0000\u0527\u0528\u0001\u0000\u0000\u0000"+ + "\u0528\u0529\u0003\f\u0006\u0000\u0529\u00a9\u0001\u0000\u0000\u0000\u052a"+ + "\u052b\u0003\u00b2Y\u0000\u052b\u00ab\u0001\u0000\u0000\u0000\u052c\u052d"+ + "\u0003\u00b2Y\u0000\u052d\u052e\u0005\u008e\u0000\u0000\u052e\u052f\u0005"+ + "h\u0000\u0000\u052f\u0530\u0003\u00aeW\u0000\u0530\u00ad\u0001\u0000\u0000"+ + "\u0000\u0531\u0536\u0003~?\u0000\u0532\u0536\u0003|>\u0000\u0533\u0536"+ + "\u0003x<\u0000\u0534\u0536\u0003z=\u0000\u0535\u0531\u0001\u0000\u0000"+ + "\u0000\u0535\u0532\u0001\u0000\u0000\u0000\u0535\u0533\u0001\u0000\u0000"+ + "\u0000\u0535\u0534\u0001\u0000\u0000\u0000\u0536\u00af\u0001\u0000\u0000"+ + "\u0000\u0537\u0538\u0003`0\u0000\u0538\u0539\u0007\b\u0000\u0000\u0539"+ + "\u053e\u0001\u0000\u0000\u0000\u053a\u053b\u0005\u008a\u0000\u0000\u053b"+ + "\u053c\u0005~\u0000\u0000\u053c\u053e\u0005\u001f\u0000\u0000\u053d\u0537"+ + "\u0001\u0000\u0000\u0000\u053d\u053a\u0001\u0000\u0000\u0000\u053e\u00b1"+ + "\u0001\u0000\u0000\u0000\u053f\u0540\u0003f3\u0000\u0540\u00b3\u0001\u0000"+ + "\u0000\u0000\u0541\u0542\u0003`0\u0000\u0542\u00b5\u0001\u0000\u0000\u0000"+ + "\u0543\u0545\u0003\u0006\u0003\u0000\u0544\u0543\u0001\u0000\u0000\u0000"+ + "\u0544\u0545\u0001\u0000\u0000\u0000\u0545\u0546\u0001\u0000\u0000\u0000"+ + "\u0546\u0547\u0005 \u0000\u0000\u0547\u0548\u00056\u0000\u0000\u0548\u054d"+ + "\u0003\u00fc~\u0000\u0549\u054b\u0005\u000e\u0000\u0000\u054a\u0549\u0001"+ + "\u0000\u0000\u0000\u054a\u054b\u0001\u0000\u0000\u0000\u054b\u054c\u0001"+ + "\u0000\u0000\u0000\u054c\u054e\u0003\"\u0011\u0000\u054d\u054a\u0001\u0000"+ + "\u0000\u0000\u054d\u054e\u0001\u0000\u0000\u0000\u054e\u0551\u0001\u0000"+ + "\u0000\u0000\u054f\u0550\u0005\u008d\u0000\u0000\u0550\u0552\u0003\f\u0006"+ + "\u0000\u0551\u054f\u0001\u0000\u0000\u0000\u0551\u0552\u0001\u0000\u0000"+ + "\u0000\u0552\u0554\u0001\u0000\u0000\u0000\u0553\u0555\u0003\u00b8\\\u0000"+ + "\u0554\u0553\u0001\u0000\u0000\u0000\u0554\u0555\u0001\u0000\u0000\u0000"+ + "\u0555\u00b7\u0001\u0000\u0000\u0000\u0556\u0557\u0005q\u0000\u0000\u0557"+ + "\u0558\u0003*\u0015\u0000\u0558\u00b9\u0001\u0000\u0000\u0000\u0559\u055b"+ + "\u0003\u00bc^\u0000\u055a\u055c\u0007\t\u0000\u0000\u055b\u055a\u0001"+ + "\u0000\u0000\u0000\u055b\u055c\u0001\u0000\u0000\u0000\u055c\u00bb\u0001"+ + "\u0000\u0000\u0000\u055d\u056d\u0003\u00d8l\u0000\u055e\u056d\u0003\u00ca"+ + "e\u0000\u055f\u056d\u0003\u00d6k\u0000\u0560\u056d\u0003\u00d4j\u0000"+ + "\u0561\u056d\u0003\u00d0h\u0000\u0562\u056d\u0003\u00ccf\u0000\u0563\u056d"+ + "\u0003\u00ceg\u0000\u0564\u056d\u0003\u00c8d\u0000\u0565\u056d\u0003\u00be"+ + "_\u0000\u0566\u056d\u0003\u00d2i\u0000\u0567\u056d\u0003\u00dam\u0000"+ + "\u0568\u056d\u0003\u00dcn\u0000\u0569\u056d\u0003\u00deo\u0000\u056a\u056d"+ + "\u0003\u00e0p\u0000\u056b\u056d\u0003\u00e2q\u0000\u056c\u055d\u0001\u0000"+ + "\u0000\u0000\u056c\u055e\u0001\u0000\u0000\u0000\u056c\u055f\u0001\u0000"+ + "\u0000\u0000\u056c\u0560\u0001\u0000\u0000\u0000\u056c\u0561\u0001\u0000"+ + "\u0000\u0000\u056c\u0562\u0001\u0000\u0000\u0000\u056c\u0563\u0001\u0000"+ + "\u0000\u0000\u056c\u0564\u0001\u0000\u0000\u0000\u056c\u0565\u0001\u0000"+ + "\u0000\u0000\u056c\u0566\u0001\u0000\u0000\u0000\u056c\u0567\u0001\u0000"+ + "\u0000\u0000\u056c\u0568\u0001\u0000\u0000\u0000\u056c\u0569\u0001\u0000"+ + "\u0000\u0000\u056c\u056a\u0001\u0000\u0000\u0000\u056c\u056b\u0001\u0000"+ + "\u0000\u0000\u056d\u00bd\u0001\u0000\u0000\u0000\u056e\u056f\u0005\u00a4"+ + "\u0000\u0000\u056f\u0570\u0005\u00af\u0000\u0000\u0570\u0575\u0003\u00c0"+ + "`\u0000\u0571\u0572\u0005\u00ad\u0000\u0000\u0572\u0574\u0003\u00c0`\u0000"+ + "\u0573\u0571\u0001\u0000\u0000\u0000\u0574\u0577\u0001\u0000\u0000\u0000"+ + "\u0575\u0573\u0001\u0000\u0000\u0000\u0575\u0576\u0001\u0000\u0000\u0000"+ + "\u0576\u0578\u0001\u0000\u0000\u0000\u0577\u0575\u0001\u0000\u0000\u0000"+ + "\u0578\u0579\u0005\u00b0\u0000\u0000\u0579\u00bf\u0001\u0000\u0000\u0000"+ + "\u057a\u057b\u0003\u01c2\u00e1\u0000\u057b\u057d\u0003\u00bc^\u0000\u057c"+ + "\u057e\u0003\u00c2a\u0000\u057d\u057c\u0001\u0000\u0000\u0000\u057d\u057e"+ + "\u0001\u0000\u0000\u0000\u057e\u0580\u0001\u0000\u0000\u0000\u057f\u0581"+ + "\u0003\u01b4\u00da\u0000\u0580\u057f\u0001\u0000\u0000\u0000\u0580\u0581"+ + "\u0001\u0000\u0000\u0000\u0581\u00c1\u0001\u0000\u0000\u0000\u0582\u0584"+ + "\u0003\u00c4b\u0000\u0583\u0585\u0003\u00c6c\u0000\u0584\u0583\u0001\u0000"+ + "\u0000\u0000\u0584\u0585\u0001\u0000\u0000\u0000\u0585\u058b\u0001\u0000"+ + "\u0000\u0000\u0586\u0588\u0003\u00c6c\u0000\u0587\u0589\u0003\u00c4b\u0000"+ + "\u0588\u0587\u0001\u0000\u0000\u0000\u0588\u0589\u0001\u0000\u0000\u0000"+ + "\u0589\u058b\u0001\u0000\u0000\u0000\u058a\u0582\u0001\u0000\u0000\u0000"+ + "\u058a\u0586\u0001\u0000\u0000\u0000\u058b\u00c3\u0001\u0000\u0000\u0000"+ + "\u058c\u0592\u0005\u001f\u0000\u0000\u058d\u0593\u0003\u01ba\u00dd\u0000"+ + "\u058e\u0593\u0003\u01be\u00df\u0000\u058f\u0593\u0005\u00cc\u0000\u0000"+ + "\u0590\u0593\u0005\u00cb\u0000\u0000\u0591\u0593\u0003\u01c2\u00e1\u0000"+ + "\u0592\u058d\u0001\u0000\u0000\u0000\u0592\u058e\u0001\u0000\u0000\u0000"+ + "\u0592\u058f\u0001\u0000\u0000\u0000\u0592\u0590\u0001\u0000\u0000\u0000"+ + "\u0592\u0591\u0001\u0000\u0000\u0000\u0593\u00c5\u0001\u0000\u0000\u0000"+ + "\u0594\u0595\u0005]\u0000\u0000\u0595\u0596\u0005\u00ca\u0000\u0000\u0596"+ + "\u00c7\u0001\u0000\u0000\u0000\u0597\u0598\u0005\u00a1\u0000\u0000\u0598"+ + "\u0599\u0005\u00af\u0000\u0000\u0599\u059a\u0003\u00bc^\u0000\u059a\u059b"+ + "\u0005\u00b0\u0000\u0000\u059b\u00c9\u0001\u0000\u0000\u0000\u059c\u059d"+ + "\u0005\u0098\u0000\u0000\u059d\u059e\u0005\u00af\u0000\u0000\u059e\u059f"+ + "\u0003\u00bc^\u0000\u059f\u05a0\u0005\u00b0\u0000\u0000\u05a0\u00cb\u0001"+ + "\u0000\u0000\u0000\u05a1\u05a2\u0007\n\u0000\u0000\u05a2\u00cd\u0001\u0000"+ + "\u0000\u0000\u05a3\u05a4\u0005H\u0000\u0000\u05a4\u00cf\u0001\u0000\u0000"+ + "\u0000\u05a5\u05a6\u0007\u000b\u0000\u0000\u05a6\u00d1\u0001\u0000\u0000"+ + "\u0000\u05a7\u05a8\u0005\u00a5\u0000\u0000\u05a8\u00d3\u0001\u0000\u0000"+ + "\u0000\u05a9\u05aa\u0005\u009c\u0000\u0000\u05aa\u05ab\u0005\u00af\u0000"+ + "\u0000\u05ab\u05ac\u0003\u01c0\u00e0\u0000\u05ac\u05ad\u0005\u00b0\u0000"+ + "\u0000\u05ad\u05b4\u0001\u0000\u0000\u0000\u05ae\u05af\u0005\u009c\u0000"+ + "\u0000\u05af\u05b0\u0005\u00af\u0000\u0000\u05b0\u05b1\u0003\u01c0\u00e0"+ + "\u0000\u05b1\u05b2\u0006j\uffff\uffff\u0000\u05b2\u05b4\u0001\u0000\u0000"+ + "\u0000\u05b3\u05a9\u0001\u0000\u0000\u0000\u05b3\u05ae\u0001\u0000\u0000"+ + "\u0000\u05b4\u00d5\u0001\u0000\u0000\u0000\u05b5\u05b6\u0005\u009a\u0000"+ + "\u0000\u05b6\u00d7\u0001\u0000\u0000\u0000\u05b7\u05bb\u0005\u0099\u0000"+ + "\u0000\u05b8\u05b9\u0005\u00af\u0000\u0000\u05b9\u05ba\u0005\u00cd\u0000"+ + "\u0000\u05ba\u05bc\u0005\u00b0\u0000\u0000\u05bb\u05b8\u0001\u0000\u0000"+ + "\u0000\u05bb\u05bc\u0001\u0000\u0000\u0000\u05bc\u00d9\u0001\u0000\u0000"+ + "\u0000\u05bd\u05c1\u0005\u00a6\u0000\u0000\u05be\u05bf\u0005\u00af\u0000"+ + "\u0000\u05bf\u05c0\u0005\u00cd\u0000\u0000\u05c0\u05c2\u0005\u00b0\u0000"+ + "\u0000\u05c1\u05be\u0001\u0000\u0000\u0000\u05c1\u05c2\u0001\u0000\u0000"+ + "\u0000\u05c2\u00db\u0001\u0000\u0000\u0000\u05c3\u05c4\u0005\u00a7\u0000"+ + "\u0000\u05c4\u00dd\u0001\u0000\u0000\u0000\u05c5\u05c6\u0005\u00a8\u0000"+ + "\u0000\u05c6\u00df\u0001\u0000\u0000\u0000\u05c7\u05c8\u0005\u00a9\u0000"+ + "\u0000\u05c8\u00e1\u0001\u0000\u0000\u0000\u05c9\u05ca\u0005\u00aa\u0000"+ + "\u0000\u05ca\u00e3\u0001\u0000\u0000\u0000\u05cb\u05d0\u0003\u01c2\u00e1"+ + "\u0000\u05cc\u05cd\u0005\u00b6\u0000\u0000\u05cd\u05cf\u0003\u01c2\u00e1"+ + "\u0000\u05ce\u05cc\u0001\u0000\u0000\u0000\u05cf\u05d2\u0001\u0000\u0000"+ + "\u0000\u05d0\u05ce\u0001\u0000\u0000\u0000\u05d0\u05d1\u0001\u0000\u0000"+ + "\u0000\u05d1\u00e5\u0001\u0000\u0000\u0000\u05d2\u05d0\u0001\u0000\u0000"+ + "\u0000\u05d3\u05d8\u0003\u00e8t\u0000\u05d4\u05d5\u0005\u00b6\u0000\u0000"+ + "\u05d5\u05d7\u0003\u00e8t\u0000\u05d6\u05d4\u0001\u0000\u0000\u0000\u05d7"+ + "\u05da\u0001\u0000\u0000\u0000\u05d8\u05d6\u0001\u0000\u0000\u0000\u05d8"+ + "\u05d9\u0001\u0000\u0000\u0000\u05d9\u00e7\u0001\u0000\u0000\u0000\u05da"+ + "\u05d8\u0001\u0000\u0000\u0000\u05db\u05dd\u0005\u00d2\u0000\u0000\u05dc"+ + "\u05db\u0001\u0000\u0000\u0000\u05dc\u05dd\u0001\u0000\u0000\u0000\u05dd"+ + "\u05de\u0001\u0000\u0000\u0000\u05de\u05df\u0003\u01c2\u00e1\u0000\u05df"+ + "\u00e9\u0001\u0000\u0000\u0000\u05e0\u05e5\u0003\u00ecv\u0000\u05e1\u05e2"+ + "\u0005\u00b6\u0000\u0000\u05e2\u05e4\u0003\u00ecv\u0000\u05e3\u05e1\u0001"+ + "\u0000\u0000\u0000\u05e4\u05e7\u0001\u0000\u0000\u0000\u05e5\u05e3\u0001"+ + "\u0000\u0000\u0000\u05e5\u05e6\u0001\u0000\u0000\u0000\u05e6\u00eb\u0001"+ + "\u0000\u0000\u0000\u05e7\u05e5\u0001\u0000\u0000\u0000\u05e8\u05eb\u0003"+ + "\u01c2\u00e1\u0000\u05e9\u05eb\u0005\u00d0\u0000\u0000\u05ea\u05e8\u0001"+ + "\u0000\u0000\u0000\u05ea\u05e9\u0001\u0000\u0000\u0000\u05eb\u00ed\u0001"+ + "\u0000\u0000\u0000\u05ec\u05ed\u0005\u001b\u0000\u0000\u05ed\u05f1\u0005"+ + "Y\u0000\u0000\u05ee\u05ef\u0005?\u0000\u0000\u05ef\u05f0\u0005]\u0000"+ + "\u0000\u05f0\u05f2\u0005.\u0000\u0000\u05f1\u05ee\u0001\u0000\u0000\u0000"+ + "\u05f1\u05f2\u0001\u0000\u0000\u0000\u05f2\u05f3\u0001\u0000\u0000\u0000"+ + "\u05f3\u05f4\u0003\u00fe\u007f\u0000\u05f4\u00ef\u0001\u0000\u0000\u0000"+ + "\u05f5\u05f6\u0005&\u0000\u0000\u05f6\u05f9\u0005Y\u0000\u0000\u05f7\u05f8"+ + "\u0005?\u0000\u0000\u05f8\u05fa\u0005.\u0000\u0000\u05f9\u05f7\u0001\u0000"+ + "\u0000\u0000\u05f9\u05fa\u0001\u0000\u0000\u0000\u05fa\u05fb\u0001\u0000"+ + "\u0000\u0000\u05fb\u05fd\u0003\u00fe\u007f\u0000\u05fc\u05fe\u0005\u0016"+ + "\u0000\u0000\u05fd\u05fc\u0001\u0000\u0000\u0000\u05fd\u05fe\u0001\u0000"+ + "\u0000\u0000\u05fe\u00f1\u0001\u0000\u0000\u0000\u05ff\u0600\u0003\u01c2"+ + "\u00e1\u0000\u0600\u00f3\u0001\u0000\u0000\u0000\u0601\u0602\u0005\u001b"+ + "\u0000\u0000\u0602\u0603\u0005n\u0000\u0000\u0603\u0604\u0003\u00f2y\u0000"+ + "\u0604\u00f5\u0001\u0000\u0000\u0000\u0605\u0606\u0005&\u0000\u0000\u0606"+ + "\u0607\u0005n\u0000\u0000\u0607\u0608\u0003\u00f2y\u0000\u0608\u00f7\u0001"+ + "\u0000\u0000\u0000\u0609\u060a\u0005z\u0000\u0000\u060a\u060b\u0005Q\u0000"+ + "\u0000\u060b\u060c\u0005n\u0000\u0000\u060c\u060d\u0003\u00f2y\u0000\u060d"+ + "\u00f9\u0001\u0000\u0000\u0000\u060e\u060f\u0005\u001b\u0000\u0000\u060f"+ + "\u0613\u0005~\u0000\u0000\u0610\u0611\u0005?\u0000\u0000\u0611\u0612\u0005"+ + "]\u0000\u0000\u0612\u0614\u0005.\u0000\u0000\u0613\u0610\u0001\u0000\u0000"+ + "\u0000\u0613\u0614\u0001\u0000\u0000\u0000\u0614\u0615\u0001\u0000\u0000"+ + "\u0000\u0615\u0617\u0003\u00fc~\u0000\u0616\u0618\u0003\u01b4\u00da\u0000"+ + "\u0617\u0616\u0001\u0000\u0000\u0000\u0617\u0618\u0001\u0000\u0000\u0000"+ + "\u0618\u0619\u0001\u0000\u0000\u0000\u0619\u061a\u0005\u00af\u0000\u0000"+ + "\u061a\u061b\u0003\u0100\u0080\u0000\u061b\u061d\u0005\u00b0\u0000\u0000"+ + "\u061c\u061e\u0003\u0116\u008b\u0000\u061d\u061c\u0001\u0000\u0000\u0000"+ + "\u061d\u061e\u0001\u0000\u0000\u0000\u061e\u00fb\u0001\u0000\u0000\u0000"+ + "\u061f\u0620\u0003\u00fe\u007f\u0000\u0620\u0621\u0005\u00ae\u0000\u0000"+ + "\u0621\u0623\u0001\u0000\u0000\u0000\u0622\u061f\u0001\u0000\u0000\u0000"+ + "\u0622\u0623\u0001\u0000\u0000\u0000\u0623\u0624\u0001\u0000\u0000\u0000"+ + "\u0624\u0625\u0003\u00e6s\u0000\u0625\u00fd\u0001\u0000\u0000\u0000\u0626"+ + "\u0627\u0003\u00e4r\u0000\u0627\u00ff\u0001\u0000\u0000\u0000\u0628\u062c"+ + "\u0003\u0102\u0081\u0000\u0629\u062c\u0003\u010c\u0086\u0000\u062a\u062c"+ + "\u0003\u0108\u0084\u0000\u062b\u0628\u0001\u0000\u0000\u0000\u062b\u0629"+ + "\u0001\u0000\u0000\u0000\u062b\u062a\u0001\u0000\u0000\u0000\u062c\u0635"+ + "\u0001\u0000\u0000\u0000\u062d\u0631\u0005\u00ad\u0000\u0000\u062e\u0632"+ + "\u0003\u0102\u0081\u0000\u062f\u0632\u0003\u010c\u0086\u0000\u0630\u0632"+ + "\u0003\u0108\u0084\u0000\u0631\u062e\u0001\u0000\u0000\u0000\u0631\u062f"+ + "\u0001\u0000\u0000\u0000\u0631\u0630\u0001\u0000\u0000\u0000\u0632\u0634"+ + "\u0001\u0000\u0000\u0000\u0633\u062d\u0001\u0000\u0000\u0000\u0634\u0637"+ + "\u0001\u0000\u0000\u0000\u0635\u0633\u0001\u0000\u0000\u0000\u0635\u0636"+ + "\u0001\u0000\u0000\u0000\u0636\u0101\u0001\u0000\u0000\u0000\u0637\u0635"+ + "\u0001\u0000\u0000\u0000\u0638\u0639\u0003\u01c2\u00e1\u0000\u0639\u063f"+ + "\u0003\u00bc^\u0000\u063a\u0640\u0003\u00c2a\u0000\u063b\u0640\u0003\u0128"+ + "\u0094\u0000\u063c\u0640\u0003\u012e\u0097\u0000\u063d\u0640\u0003\u012c"+ + "\u0096\u0000\u063e\u0640\u0003\u0104\u0082\u0000\u063f\u063a\u0001\u0000"+ + "\u0000\u0000\u063f\u063b\u0001\u0000\u0000\u0000\u063f\u063c\u0001\u0000"+ + "\u0000\u0000\u063f\u063d\u0001\u0000\u0000\u0000\u063f\u063e\u0001\u0000"+ + "\u0000\u0000\u063f\u0640\u0001\u0000\u0000\u0000\u0640\u0642\u0001\u0000"+ + "\u0000\u0000\u0641\u0643\u0003\u01b4\u00da\u0000\u0642\u0641\u0001\u0000"+ + "\u0000\u0000\u0642\u0643\u0001\u0000\u0000\u0000\u0643\u0103\u0001\u0000"+ + "\u0000\u0000\u0644\u0645\u0005\u00af\u0000\u0000\u0645\u064a\u0003\u0106"+ + "\u0083\u0000\u0646\u0647\u0005\u00ad\u0000\u0000\u0647\u0649\u0003\u0106"+ + "\u0083\u0000\u0648\u0646\u0001\u0000\u0000\u0000\u0649\u064c\u0001\u0000"+ + "\u0000\u0000\u064a\u0648\u0001\u0000\u0000\u0000\u064a\u064b\u0001\u0000"+ + "\u0000\u0000\u064b\u064d\u0001\u0000\u0000\u0000\u064c\u064a\u0001\u0000"+ + "\u0000\u0000\u064d\u064e\u0005\u00b0\u0000\u0000\u064e\u0105\u0001\u0000"+ + "\u0000\u0000\u064f\u0650\u0003\u010a\u0085\u0000\u0650\u0651\u0005\u000e"+ + "\u0000\u0000\u0651\u0652\u0007\f\u0000\u0000\u0652\u0653\u0005X\u0000"+ + "\u0000\u0653\u0107\u0001\u0000\u0000\u0000\u0654\u0655\u0003\u0106\u0083"+ + "\u0000\u0655\u0109\u0001\u0000\u0000\u0000\u0656\u0659\u0003\u01c2\u00e1"+ + "\u0000\u0657\u0659\u0003\u01be\u00df\u0000\u0658\u0656\u0001\u0000\u0000"+ + "\u0000\u0658\u0657\u0001\u0000\u0000\u0000\u0659\u0661\u0001\u0000\u0000"+ + "\u0000\u065a\u065d\u0005\u00b6\u0000\u0000\u065b\u065e\u0003\u01c2\u00e1"+ + "\u0000\u065c\u065e\u0003\u01be\u00df\u0000\u065d\u065b\u0001\u0000\u0000"+ + "\u0000\u065d\u065c\u0001\u0000\u0000\u0000\u065e\u0660\u0001\u0000\u0000"+ + "\u0000\u065f\u065a\u0001\u0000\u0000\u0000\u0660\u0663\u0001\u0000\u0000"+ + "\u0000\u0661\u065f\u0001\u0000\u0000\u0000\u0661\u0662\u0001\u0000\u0000"+ + "\u0000\u0662\u010b\u0001\u0000\u0000\u0000\u0663\u0661\u0001\u0000\u0000"+ + "\u0000\u0664\u0665\u0005l\u0000\u0000\u0665\u0666\u0005J\u0000\u0000\u0666"+ + "\u066b\u0005\u00af\u0000\u0000\u0667\u0669\u0003\u010e\u0087\u0000\u0668"+ + "\u066a\u0005\u00ad\u0000\u0000\u0669\u0668\u0001\u0000\u0000\u0000\u0669"+ + "\u066a\u0001\u0000\u0000\u0000\u066a\u066c\u0001\u0000\u0000\u0000\u066b"+ + "\u0667\u0001\u0000\u0000\u0000\u066b\u066c\u0001\u0000\u0000\u0000\u066c"+ + "\u066e\u0001\u0000\u0000\u0000\u066d\u066f\u0003\u0110\u0088\u0000\u066e"+ + "\u066d\u0001\u0000\u0000\u0000\u066e\u066f\u0001\u0000\u0000\u0000\u066f"+ + "\u0670\u0001\u0000\u0000\u0000\u0670\u0671\u0005\u00b0\u0000\u0000\u0671"+ + "\u010d\u0001\u0000\u0000\u0000\u0672\u0673\u0005{\u0000\u0000\u0673\u0674"+ + "\u0005\u00af\u0000\u0000\u0674\u0675\u0003\u0110\u0088\u0000\u0675\u0676"+ + "\u0005\u00b0\u0000\u0000\u0676\u067c\u0001\u0000\u0000\u0000\u0677\u0678"+ + "\u0005\u00af\u0000\u0000\u0678\u0679\u0003\u0110\u0088\u0000\u0679\u067a"+ + "\u0006\u0087\uffff\uffff\u0000\u067a\u067c\u0001\u0000\u0000\u0000\u067b"+ + "\u0672\u0001\u0000\u0000\u0000\u067b\u0677\u0001\u0000\u0000\u0000\u067c"+ + "\u010f\u0001\u0000\u0000\u0000\u067d\u0682\u0003\u0112\u0089\u0000\u067e"+ + "\u067f\u0005\u00ad\u0000\u0000\u067f\u0681\u0003\u0112\u0089\u0000\u0680"+ + "\u067e\u0001\u0000\u0000\u0000\u0681\u0684\u0001\u0000\u0000\u0000\u0682"+ + "\u0680\u0001\u0000\u0000\u0000\u0682\u0683\u0001\u0000\u0000\u0000\u0683"+ + "\u0111\u0001\u0000\u0000\u0000\u0684\u0682\u0001\u0000\u0000\u0000\u0685"+ + "\u0687\u0003\u01c2\u00e1\u0000\u0686\u0688\u0003\u0114\u008a\u0000\u0687"+ + "\u0686\u0001\u0000\u0000\u0000\u0687\u0688\u0001\u0000\u0000\u0000\u0688"+ + "\u0113\u0001\u0000\u0000\u0000\u0689\u068a\u0005\u00af\u0000\u0000\u068a"+ + "\u068b\u0005\u00cd\u0000\u0000\u068b\u068c\u0005\u00b0\u0000\u0000\u068c"+ + "\u0115\u0001\u0000\u0000\u0000\u068d\u0693\u0003\u0118\u008c\u0000\u068e"+ + "\u0693\u0003\u011c\u008e\u0000\u068f\u0693\u0003\u011e\u008f\u0000\u0690"+ + "\u0693\u0003\u0120\u0090\u0000\u0691\u0693\u0003\u0122\u0091\u0000\u0692"+ + "\u068d\u0001\u0000\u0000\u0000\u0692\u068e\u0001\u0000\u0000\u0000\u0692"+ + "\u068f\u0001\u0000\u0000\u0000\u0692\u0690\u0001\u0000\u0000\u0000\u0692"+ + "\u0691\u0001\u0000\u0000\u0000\u0693\u0694\u0001\u0000\u0000\u0000\u0694"+ + "\u0692\u0001\u0000\u0000\u0000\u0694\u0695\u0001\u0000\u0000\u0000\u0695"+ + "\u0117\u0001\u0000\u0000\u0000\u0696\u0697\u0005\u008a\u0000\u0000\u0697"+ + "\u0698\u0005\u0082\u0000\u0000\u0698\u0699\u0003\u01b6\u00db\u0000\u0699"+ + "\u0119\u0001\u0000\u0000\u0000\u069a\u069b\u0003\u01c0\u00e0\u0000\u069b"+ + "\u011b\u0001\u0000\u0000\u0000\u069c\u069d\u0005A\u0000\u0000\u069d\u069e"+ + "\u0005o\u0000\u0000\u069e\u069f\u0003\u011a\u008d\u0000\u069f\u011d\u0001"+ + "\u0000\u0000\u0000\u06a0\u06a1\u0005\u008e\u0000\u0000\u06a1\u06a2\u0005"+ + "v\u0000\u0000\u06a2\u06a4\u00057\u0000\u0000\u06a3\u06a5\u00052\u0000"+ + "\u0000\u06a4\u06a3\u0001\u0000\u0000\u0000\u06a4\u06a5\u0001\u0000\u0000"+ + "\u0000\u06a5\u011f\u0001\u0000\u0000\u0000\u06a6\u06a7\u0005\u000e\u0000"+ + "\u0000\u06a7\u06a8\u0005H\u0000\u0000\u06a8\u06a9\u0005\u0018\u0000\u0000"+ + "\u06a9\u0121\u0001\u0000\u0000\u0000\u06aa\u06ab\u0005*\u0000\u0000\u06ab"+ + "\u06ac\u0005\u0011\u0000\u0000\u06ac\u06ae\u0005@\u0000\u0000\u06ad\u06af"+ + "\u0003\u0124\u0092\u0000\u06ae\u06ad\u0001\u0000\u0000\u0000\u06ae\u06af"+ + "\u0001\u0000\u0000\u0000\u06af\u0123\u0001\u0000\u0000\u0000\u06b0\u06b1"+ + "\u0005\u008a\u0000\u0000\u06b1\u06b2\u0005\u0082\u0000\u0000\u06b2\u06b3"+ + "\u0003\u01b6\u00db\u0000\u06b3\u0125\u0001\u0000\u0000\u0000\u06b4\u06b5"+ + "\u0005$\u0000\u0000\u06b5\u06b6\u0005\u0011\u0000\u0000\u06b6\u06b7\u0005"+ + "@\u0000\u0000\u06b7\u0127\u0001\u0000\u0000\u0000\u06b8\u06c0\u00059\u0000"+ + "\u0000\u06b9\u06c1\u0005\u000b\u0000\u0000\u06ba\u06bb\u0005\u0013\u0000"+ + "\u0000\u06bb\u06be\u0005\u001f\u0000\u0000\u06bc\u06bd\u0005a\u0000\u0000"+ + "\u06bd\u06bf\u0005\u00ca\u0000\u0000\u06be\u06bc\u0001\u0000\u0000\u0000"+ + "\u06be\u06bf\u0001\u0000\u0000\u0000\u06bf\u06c1\u0001\u0000\u0000\u0000"+ + "\u06c0\u06b9\u0001\u0000\u0000\u0000\u06c0\u06ba\u0001\u0000\u0000\u0000"+ + "\u06c1\u06c2\u0001\u0000\u0000\u0000\u06c2\u06c3\u0005\u000e\u0000\u0000"+ + "\u06c3\u06cc\u0005>\u0000\u0000\u06c4\u06c6\u0005\u00af\u0000\u0000\u06c5"+ + "\u06c7\u0003\u012a\u0095\u0000\u06c6\u06c5\u0001\u0000\u0000\u0000\u06c7"+ + "\u06c8\u0001\u0000\u0000\u0000\u06c8\u06c6\u0001\u0000\u0000\u0000\u06c8"+ + "\u06c9\u0001\u0000\u0000\u0000\u06c9\u06ca\u0001\u0000\u0000\u0000\u06ca"+ + "\u06cb\u0005\u00b0\u0000\u0000\u06cb\u06cd\u0001\u0000\u0000\u0000\u06cc"+ + "\u06c4\u0001\u0000\u0000\u0000\u06cc\u06cd\u0001\u0000\u0000\u0000\u06cd"+ + "\u0129\u0001\u0000\u0000\u0000\u06ce\u06cf\u0005}\u0000\u0000\u06cf\u06d0"+ + "\u0005\u008e\u0000\u0000\u06d0\u06e4\u0003\u01bc\u00de\u0000\u06d1\u06d2"+ + "\u0005B\u0000\u0000\u06d2\u06d3\u0005\u0013\u0000\u0000\u06d3\u06e4\u0003"+ + "\u01bc\u00de\u0000\u06d4\u06d5\u0005S\u0000\u0000\u06d5\u06e4\u0003\u01bc"+ + "\u00de\u0000\u06d6\u06d7\u0005\\\u0000\u0000\u06d7\u06e4\u0005S\u0000"+ + "\u0000\u06d8\u06d9\u0005V\u0000\u0000\u06d9\u06e4\u0003\u01bc\u00de\u0000"+ + "\u06da\u06db\u0005\\\u0000\u0000\u06db\u06e4\u0005V\u0000\u0000\u06dc"+ + "\u06dd\u0005\u0014\u0000\u0000\u06dd\u06e4\u0005\u00cd\u0000\u0000\u06de"+ + "\u06df\u0005\\\u0000\u0000\u06df\u06e4\u0005\u0014\u0000\u0000\u06e0\u06e4"+ + "\u0005\u001c\u0000\u0000\u06e1\u06e2\u0005\\\u0000\u0000\u06e2\u06e4\u0005"+ + "\u001c\u0000\u0000\u06e3\u06ce\u0001\u0000\u0000\u0000\u06e3\u06d1\u0001"+ + "\u0000\u0000\u0000\u06e3\u06d4\u0001\u0000\u0000\u0000\u06e3\u06d6\u0001"+ + "\u0000\u0000\u0000\u06e3\u06d8\u0001\u0000\u0000\u0000\u06e3\u06da\u0001"+ + "\u0000\u0000\u0000\u06e3\u06dc\u0001\u0000\u0000\u0000\u06e3\u06de\u0001"+ + "\u0000\u0000\u0000\u06e3\u06e0\u0001\u0000\u0000\u0000\u06e3\u06e1\u0001"+ + "\u0000\u0000\u0000\u06e4\u012b\u0001\u0000\u0000\u0000\u06e5\u06e6\u0005"+ + "\u000e\u0000\u0000\u06e6\u06e7\u0005X\u0000\u0000\u06e7\u012d\u0001\u0000"+ + "\u0000\u0000\u06e8\u06e9\u0005\u000e\u0000\u0000\u06e9\u06ed\u0005\u0091"+ + "\u0000\u0000\u06ea\u06eb\u00059\u0000\u0000\u06eb\u06ec\u0005\u0013\u0000"+ + "\u0000\u06ec\u06ee\u0005\u001f\u0000\u0000\u06ed\u06ea\u0001\u0000\u0000"+ + "\u0000\u06ed\u06ee\u0001\u0000\u0000\u0000\u06ee\u012f\u0001\u0000\u0000"+ + "\u0000\u06ef\u06f0\u0005\n\u0000\u0000\u06f0\u06f1\u0005~\u0000\u0000"+ + "\u06f1\u06f2\u0003\u00fc~\u0000\u06f2\u06f3\u0003\u0132\u0099\u0000\u06f3"+ + "\u0131\u0001\u0000\u0000\u0000\u06f4\u06fd\u0003\u013c\u009e\u0000\u06f5"+ + "\u06fd\u0003\u0118\u008c\u0000\u06f6\u06fd\u0003\u0138\u009c\u0000\u06f7"+ + "\u06fd\u0003\u013a\u009d\u0000\u06f8\u06fd\u0003\u0134\u009a\u0000\u06f9"+ + "\u06fd\u0003\u0136\u009b\u0000\u06fa\u06fd\u0003\u0122\u0091\u0000\u06fb"+ + "\u06fd\u0003\u0126\u0093\u0000\u06fc\u06f4\u0001\u0000\u0000\u0000\u06fc"+ + "\u06f5\u0001\u0000\u0000\u0000\u06fc\u06f6\u0001\u0000\u0000\u0000\u06fc"+ + "\u06f7\u0001\u0000\u0000\u0000\u06fc\u06f8\u0001\u0000\u0000\u0000\u06fc"+ + "\u06f9\u0001\u0000\u0000\u0000\u06fc\u06fa\u0001\u0000\u0000\u0000\u06fc"+ + "\u06fb\u0001\u0000\u0000\u0000\u06fd\u0133\u0001\u0000\u0000\u0000\u06fe"+ + "\u06ff\u00055\u0000\u0000\u06ff\u0701\u0005v\u0000\u0000\u0700\u0702\u0005"+ + "2\u0000\u0000\u0701\u0700\u0001\u0000\u0000\u0000\u0701\u0702\u0001\u0000"+ + "\u0000\u0000\u0702\u0135\u0001\u0000\u0000\u0000\u0703\u0704\u0005\u0084"+ + "\u0000\u0000\u0704\u0705\u0005v\u0000\u0000\u0705\u0137\u0001\u0000\u0000"+ + "\u0000\u0706\u0707\u0005\u0007\u0000\u0000\u0707\u0708\u0005o\u0000\u0000"+ + "\u0708\u0709\u0003\u011a\u008d\u0000\u0709\u0139\u0001\u0000\u0000\u0000"+ + "\u070a\u070b\u0005&\u0000\u0000\u070b\u070c\u0005o\u0000\u0000\u070c\u070d"+ + "\u0003\u011a\u008d\u0000\u070d\u013b\u0001\u0000\u0000\u0000\u070e\u0712"+ + "\u0005\u00af\u0000\u0000\u070f\u0713\u0003\u013e\u009f\u0000\u0710\u0713"+ + "\u0003\u0140\u00a0\u0000\u0711\u0713\u0003\u0142\u00a1\u0000\u0712\u070f"+ + "\u0001\u0000\u0000\u0000\u0712\u0710\u0001\u0000\u0000\u0000\u0712\u0711"+ + "\u0001\u0000\u0000\u0000\u0713\u071c\u0001\u0000\u0000\u0000\u0714\u0718"+ + "\u0005\u00ad\u0000\u0000\u0715\u0719\u0003\u013e\u009f\u0000\u0716\u0719"+ + "\u0003\u0140\u00a0\u0000\u0717\u0719\u0003\u0142\u00a1\u0000\u0718\u0715"+ + "\u0001\u0000\u0000\u0000\u0718\u0716\u0001\u0000\u0000\u0000\u0718\u0717"+ + "\u0001\u0000\u0000\u0000\u0719\u071b\u0001\u0000\u0000\u0000\u071a\u0714"+ + "\u0001\u0000\u0000\u0000\u071b\u071e\u0001\u0000\u0000\u0000\u071c\u071a"+ + "\u0001\u0000\u0000\u0000\u071c\u071d\u0001\u0000\u0000\u0000\u071d\u071f"+ + "\u0001\u0000\u0000\u0000\u071e\u071c\u0001\u0000\u0000\u0000\u071f\u0720"+ + "\u0005\u00b0\u0000\u0000\u0720\u013d\u0001\u0000\u0000\u0000\u0721\u0722"+ + "\u0005\u0007\u0000\u0000\u0722\u0723\u0003\u0144\u00a2\u0000\u0723\u0729"+ + "\u0003\u00bc^\u0000\u0724\u072a\u0003\u00c2a\u0000\u0725\u072a\u0003\u0128"+ + "\u0094\u0000\u0726\u072a\u0003\u012c\u0096\u0000\u0727\u072a\u0003\u012e"+ + "\u0097\u0000\u0728\u072a\u0003\u0104\u0082\u0000\u0729\u0724\u0001\u0000"+ + "\u0000\u0000\u0729\u0725\u0001\u0000\u0000\u0000\u0729\u0726\u0001\u0000"+ + "\u0000\u0000\u0729\u0727\u0001\u0000\u0000\u0000\u0729\u0728\u0001\u0000"+ + "\u0000\u0000\u0729\u072a\u0001\u0000\u0000\u0000\u072a\u072c\u0001\u0000"+ + "\u0000\u0000\u072b\u072d\u0003\u01b4\u00da\u0000\u072c\u072b\u0001\u0000"+ + "\u0000\u0000\u072c\u072d\u0001\u0000\u0000\u0000\u072d\u013f\u0001\u0000"+ + "\u0000\u0000\u072e\u072f\u0005&\u0000\u0000\u072f\u0730\u0003\u0144\u00a2"+ + "\u0000\u0730\u0141\u0001\u0000\u0000\u0000\u0731\u0732\u0005W\u0000\u0000"+ + "\u0732\u073e\u0003\u0144\u00a2\u0000\u0733\u0735\u0003\u00bc^\u0000\u0734"+ + "\u0736\u0003\u00c2a\u0000\u0735\u0734\u0001\u0000\u0000\u0000\u0735\u0736"+ + "\u0001\u0000\u0000\u0000\u0736\u0738\u0001\u0000\u0000\u0000\u0737\u0739"+ + "\u0003\u01b4\u00da\u0000\u0738\u0737\u0001\u0000\u0000\u0000\u0738\u0739"+ + "\u0001\u0000\u0000\u0000\u0739\u073f\u0001\u0000\u0000\u0000\u073a\u073f"+ + "\u0003\u0128\u0094\u0000\u073b\u073f\u0003\u012e\u0097\u0000\u073c\u073d"+ + "\u0005&\u0000\u0000\u073d\u073f\u0005>\u0000\u0000\u073e\u0733\u0001\u0000"+ + "\u0000\u0000\u073e\u073a\u0001\u0000\u0000\u0000\u073e\u073b\u0001\u0000"+ + "\u0000\u0000\u073e\u073c\u0001\u0000\u0000\u0000\u073f\u0143\u0001\u0000"+ + "\u0000\u0000\u0740\u0745\u0003\u0146\u00a3\u0000\u0741\u0742\u0005\u00b6"+ + "\u0000\u0000\u0742\u0744\u0003\u0148\u00a4\u0000\u0743\u0741\u0001\u0000"+ + "\u0000\u0000\u0744\u0747\u0001\u0000\u0000\u0000\u0745\u0743\u0001\u0000"+ + "\u0000\u0000\u0745\u0746\u0001\u0000\u0000\u0000\u0746\u0145\u0001\u0000"+ + "\u0000\u0000\u0747\u0745\u0001\u0000\u0000\u0000\u0748\u074d\u0003\u01c2"+ + "\u00e1\u0000\u0749\u074a\u0005\u00b1\u0000\u0000\u074a\u074c\u0005\u00b2"+ + "\u0000\u0000\u074b\u0749\u0001\u0000\u0000\u0000\u074c\u074f\u0001\u0000"+ + "\u0000\u0000\u074d\u074b\u0001\u0000\u0000\u0000\u074d\u074e\u0001\u0000"+ + "\u0000\u0000\u074e\u0147\u0001\u0000\u0000\u0000\u074f\u074d\u0001\u0000"+ + "\u0000\u0000\u0750\u0755\u0003\u01c2\u00e1\u0000\u0751\u0752\u0005\u00b1"+ + "\u0000\u0000\u0752\u0754\u0005\u00b2\u0000\u0000\u0753\u0751\u0001\u0000"+ + "\u0000\u0000\u0754\u0757\u0001\u0000\u0000\u0000\u0755\u0753\u0001\u0000"+ + "\u0000\u0000\u0755\u0756\u0001\u0000\u0000\u0000\u0756\u075c\u0001\u0000"+ + "\u0000\u0000\u0757\u0755\u0001\u0000\u0000\u0000\u0758\u0759\u0005\u008b"+ + "\u0000\u0000\u0759\u075a\u0005\u00af\u0000\u0000\u075a\u075c\u0005\u00b0"+ + "\u0000\u0000\u075b\u0750\u0001\u0000\u0000\u0000\u075b\u0758\u0001\u0000"+ + "\u0000\u0000\u075c\u0149\u0001\u0000\u0000\u0000\u075d\u075e\u0005&\u0000"+ + "\u0000\u075e\u0761\u0005~\u0000\u0000\u075f\u0760\u0005?\u0000\u0000\u0760"+ + "\u0762\u0005.\u0000\u0000\u0761\u075f\u0001\u0000\u0000\u0000\u0761\u0762"+ + "\u0001\u0000\u0000\u0000\u0762\u0763\u0001\u0000\u0000\u0000\u0763\u0764"+ + "\u0003\u00fc~\u0000\u0764\u014b\u0001\u0000\u0000\u0000\u0765\u0766\u0005"+ + "\u001b\u0000\u0000\u0766\u076a\u0005C\u0000\u0000\u0767\u0768\u0005?\u0000"+ + "\u0000\u0768\u0769\u0005]\u0000\u0000\u0769\u076b\u0005.\u0000\u0000\u076a"+ + "\u0767\u0001\u0000\u0000\u0000\u076a\u076b\u0001\u0000\u0000\u0000\u076b"+ + "\u076c\u0001\u0000\u0000\u0000\u076c\u076d\u0003\u014e\u00a7\u0000\u076d"+ + "\u076e\u0005a\u0000\u0000\u076e\u0784\u0003\u00fc~\u0000\u076f\u0770\u0005"+ + "\u00af\u0000\u0000\u0770\u0771\u0003\u0150\u00a8\u0000\u0771\u0777\u0005"+ + "\u00b0\u0000\u0000\u0772\u0774\u0005\u008e\u0000\u0000\u0773\u0775\u0005"+ + "\\\u0000\u0000\u0774\u0773\u0001\u0000\u0000\u0000\u0774\u0775\u0001\u0000"+ + "\u0000\u0000\u0775\u0776\u0001\u0000\u0000\u0000\u0776\u0778\u0005^\u0000"+ + "\u0000\u0777\u0772\u0001\u0000\u0000\u0000\u0777\u0778\u0001\u0000\u0000"+ + "\u0000\u0778\u077e\u0001\u0000\u0000\u0000\u0779\u077a\u0005\u008e\u0000"+ + "\u0000\u077a\u077b\u0005\u008f\u0000\u0000\u077b\u077c\u0005L\u0000\u0000"+ + "\u077c\u077d\u0005i\u0000\u0000\u077d\u077f\u0005u\u0000\u0000\u077e\u0779"+ + "\u0001\u0000\u0000\u0000\u077e\u077f\u0001\u0000\u0000\u0000\u077f\u0785"+ + "\u0001\u0000\u0000\u0000\u0780\u0781\u0005\u00af\u0000\u0000\u0781\u0782"+ + "\u0003\u0150\u00a8\u0000\u0782\u0783\u0006\u00a6\uffff\uffff\u0000\u0783"+ + "\u0785\u0001\u0000\u0000\u0000\u0784\u076f\u0001\u0000\u0000\u0000\u0784"+ + "\u0780\u0001\u0000\u0000\u0000\u0785\u0787\u0001\u0000\u0000\u0000\u0786"+ + "\u0788\u0003\u01b4\u00da\u0000\u0787\u0786\u0001\u0000\u0000\u0000\u0787"+ + "\u0788\u0001\u0000\u0000\u0000\u0788\u014d\u0001\u0000\u0000\u0000\u0789"+ + "\u078a\u0003\u01c2\u00e1\u0000\u078a\u014f\u0001\u0000\u0000\u0000\u078b"+ + "\u0790\u0003\u0152\u00a9\u0000\u078c\u078d\u0005\u00ad\u0000\u0000\u078d"+ + "\u078f\u0003\u0152\u00a9\u0000\u078e\u078c\u0001\u0000\u0000\u0000\u078f"+ + "\u0792\u0001\u0000\u0000\u0000\u0790\u078e\u0001\u0000\u0000\u0000\u0790"+ + "\u0791\u0001\u0000\u0000\u0000\u0791\u0151\u0001\u0000\u0000\u0000\u0792"+ + "\u0790\u0001\u0000\u0000\u0000\u0793\u0795\u0003\u0158\u00ac\u0000\u0794"+ + "\u0796\u0003\u0162\u00b1\u0000\u0795\u0794\u0001\u0000\u0000\u0000\u0795"+ + "\u0796\u0001\u0000\u0000\u0000\u0796\u0799\u0001\u0000\u0000\u0000\u0797"+ + "\u0799\u0003\u0154\u00aa\u0000\u0798\u0793\u0001\u0000\u0000\u0000\u0798"+ + "\u0797\u0001\u0000\u0000\u0000\u0799\u0153\u0001\u0000\u0000\u0000\u079a"+ + "\u079b\u0003\u01c2\u00e1\u0000\u079b\u079d\u0005\u00af\u0000\u0000\u079c"+ + "\u079e\u0003\u0158\u00ac\u0000\u079d\u079c\u0001\u0000\u0000\u0000\u079d"+ + "\u079e\u0001\u0000\u0000\u0000\u079e\u07a0\u0001\u0000\u0000\u0000\u079f"+ + "\u07a1\u0003\u0162\u00b1\u0000\u07a0\u079f\u0001\u0000\u0000\u0000\u07a0"+ + "\u07a1\u0001\u0000\u0000\u0000\u07a1\u07a3\u0001\u0000\u0000\u0000\u07a2"+ + "\u07a4\u0003\u0156\u00ab\u0000\u07a3\u07a2\u0001\u0000\u0000\u0000\u07a3"+ + "\u07a4\u0001\u0000\u0000\u0000\u07a4\u07a5\u0001\u0000\u0000\u0000\u07a5"+ + "\u07a6\u0005\u00b0\u0000\u0000\u07a6\u0155\u0001\u0000\u0000\u0000\u07a7"+ + "\u07a8\u0005\u00ad\u0000\u0000\u07a8\u07aa\u0003x<\u0000\u07a9\u07a7\u0001"+ + "\u0000\u0000\u0000\u07aa\u07ab\u0001\u0000\u0000\u0000\u07ab\u07a9\u0001"+ + "\u0000\u0000\u0000\u07ab\u07ac\u0001\u0000\u0000\u0000\u07ac\u0157\u0001"+ + "\u0000\u0000\u0000\u07ad\u07af\u0003\u015c\u00ae\u0000\u07ae\u07ad\u0001"+ + "\u0000\u0000\u0000\u07ae\u07af\u0001\u0000\u0000\u0000\u07af\u07b5\u0001"+ + "\u0000\u0000\u0000\u07b0\u07b6\u0003\u00eau\u0000\u07b1\u07b3\u0003\u015e"+ + "\u00af\u0000\u07b2\u07b4\u0003\u0160\u00b0\u0000\u07b3\u07b2\u0001\u0000"+ + "\u0000\u0000\u07b3\u07b4\u0001\u0000\u0000\u0000\u07b4\u07b6\u0001\u0000"+ + "\u0000\u0000\u07b5\u07b0\u0001\u0000\u0000\u0000\u07b5\u07b1\u0001\u0000"+ + "\u0000\u0000\u07b6\u07b9\u0001\u0000\u0000\u0000\u07b7\u07b9\u0003\u015a"+ + "\u00ad\u0000\u07b8\u07ae\u0001\u0000\u0000\u0000\u07b8\u07b7\u0001\u0000"+ + "\u0000\u0000\u07b9\u0159\u0001\u0000\u0000\u0000\u07ba\u07bb\u0005\'\u0000"+ + "\u0000\u07bb\u07bc\u0005\u00af\u0000\u0000\u07bc\u07bd\u0003\u00eau\u0000"+ + "\u07bd\u07bf\u0005\u00b0\u0000\u0000\u07be\u07c0\u0003\u0160\u00b0\u0000"+ + "\u07bf\u07be\u0001\u0000\u0000\u0000\u07bf\u07c0\u0001\u0000\u0000\u0000"+ + "\u07c0\u07cc\u0001\u0000\u0000\u0000\u07c1\u07c2\u0005K\u0000\u0000\u07c2"+ + "\u07c3\u0005\u00af\u0000\u0000\u07c3\u07c4\u0003\u00eau\u0000\u07c4\u07c5"+ + "\u0005\u00b0\u0000\u0000\u07c5\u07cc\u0001\u0000\u0000\u0000\u07c6\u07c7"+ + "\u0005L\u0000\u0000\u07c7\u07c8\u0005\u00af\u0000\u0000\u07c8\u07c9\u0003"+ + "\u00eau\u0000\u07c9\u07ca\u0005\u00b0\u0000\u0000\u07ca\u07cc\u0001\u0000"+ + "\u0000\u0000\u07cb\u07ba\u0001\u0000\u0000\u0000\u07cb\u07c1\u0001\u0000"+ + "\u0000\u0000\u07cb\u07c6\u0001\u0000\u0000\u0000\u07cc\u015b\u0001\u0000"+ + "\u0000\u0000\u07cd\u07ce\u0005\u0004\u0000\u0000\u07ce\u015d\u0001\u0000"+ + "\u0000\u0000\u07cf\u07da\u0003\u00ecv\u0000\u07d0\u07d1\u0005\u00b6\u0000"+ + "\u0000\u07d1\u07d9\u0003\u00ecv\u0000\u07d2\u07d3\u0005\u00b6\u0000\u0000"+ + "\u07d3\u07d4\u0005\u008b\u0000\u0000\u07d4\u07d5\u0005\u00af\u0000\u0000"+ + "\u07d5\u07d9\u0005\u00b0\u0000\u0000\u07d6\u07d7\u0005\u00b1\u0000\u0000"+ + "\u07d7\u07d9\u0005\u00b2\u0000\u0000\u07d8\u07d0\u0001\u0000\u0000\u0000"+ + "\u07d8\u07d2\u0001\u0000\u0000\u0000\u07d8\u07d6\u0001\u0000\u0000\u0000"+ + "\u07d9\u07dc\u0001\u0000\u0000\u0000\u07da\u07d8\u0001\u0000\u0000\u0000"+ + "\u07da\u07db\u0001\u0000\u0000\u0000\u07db\u07e7\u0001\u0000\u0000\u0000"+ + "\u07dc\u07da\u0001\u0000\u0000\u0000\u07dd\u07de\u0005\u00b1\u0000\u0000"+ + "\u07de\u07e8\u0005\u00b2\u0000\u0000\u07df\u07e0\u0005\u00b6\u0000\u0000"+ + "\u07e0\u07e1\u0005\u008b\u0000\u0000\u07e1\u07e2\u0005\u00af\u0000\u0000"+ + "\u07e2\u07e8\u0005\u00b0\u0000\u0000\u07e3\u07e4\u0005\u00b6\u0000\u0000"+ + "\u07e4\u07e5\u0005L\u0000\u0000\u07e5\u07e6\u0005\u00af\u0000\u0000\u07e6"+ + "\u07e8\u0005\u00b0\u0000\u0000\u07e7\u07dd\u0001\u0000\u0000\u0000\u07e7"+ + "\u07df\u0001\u0000\u0000\u0000\u07e7\u07e3\u0001\u0000\u0000\u0000\u07e8"+ + "\u015f\u0001\u0000\u0000\u0000\u07e9\u07ea\u0005\u00b6\u0000\u0000\u07ea"+ + "\u07eb\u0003\u00eau\u0000\u07eb\u0161\u0001\u0000\u0000\u0000\u07ec\u07f9"+ + "\u0005\u000e\u0000\u0000\u07ed\u07fa\u0005\u009f\u0000\u0000\u07ee\u07fa"+ + "\u0005\u00a0\u0000\u0000\u07ef\u07fa\u0005\u009b\u0000\u0000\u07f0\u07fa"+ + "\u0005\u00a5\u0000\u0000\u07f1\u07fa\u0005\u009a\u0000\u0000\u07f2\u07fa"+ + "\u0005\u00a2\u0000\u0000\u07f3\u07fa\u0005\u00a8\u0000\u0000\u07f4\u07f6"+ + "\u0005\u009e\u0000\u0000\u07f5\u07f7\u0003\u01ac\u00d6\u0000\u07f6\u07f5"+ + "\u0001\u0000\u0000\u0000\u07f6\u07f7\u0001\u0000\u0000\u0000\u07f7\u07fa"+ + "\u0001\u0000\u0000\u0000\u07f8\u07fa\u0005\u00a3\u0000\u0000\u07f9\u07ed"+ + "\u0001\u0000\u0000\u0000\u07f9\u07ee\u0001\u0000\u0000\u0000\u07f9\u07ef"+ + "\u0001\u0000\u0000\u0000\u07f9\u07f0\u0001\u0000\u0000\u0000\u07f9\u07f1"+ + "\u0001\u0000\u0000\u0000\u07f9\u07f2\u0001\u0000\u0000\u0000\u07f9\u07f3"+ + "\u0001\u0000\u0000\u0000\u07f9\u07f4\u0001\u0000\u0000\u0000\u07f9\u07f8"+ + "\u0001\u0000\u0000\u0000\u07fa\u0163\u0001\u0000\u0000\u0000\u07fb\u07fc"+ + "\u0005\u001b\u0000\u0000\u07fc\u07fd\u00058\u0000\u0000\u07fd\u0801\u0005"+ + "C\u0000\u0000\u07fe\u07ff\u0005?\u0000\u0000\u07ff\u0800\u0005]\u0000"+ + "\u0000\u0800\u0802\u0005.\u0000\u0000\u0801\u07fe\u0001\u0000\u0000\u0000"+ + "\u0801\u0802\u0001\u0000\u0000\u0000\u0802\u0803\u0001\u0000\u0000\u0000"+ + "\u0803\u0804\u0003\u014e\u00a7\u0000\u0804\u0805\u0005a\u0000\u0000\u0805"+ + "\u0806\u0003\u00fc~\u0000\u0806\u0808\u0003\u0166\u00b3\u0000\u0807\u0809"+ + "\u0003\u016c\u00b6\u0000\u0808\u0807\u0001\u0000\u0000\u0000\u0808\u0809"+ + "\u0001\u0000\u0000\u0000\u0809\u080b\u0001\u0000\u0000\u0000\u080a\u080c"+ + "\u0005f\u0000\u0000\u080b\u080a\u0001\u0000\u0000\u0000\u080b\u080c\u0001"+ + "\u0000\u0000\u0000\u080c\u080e\u0001\u0000\u0000\u0000\u080d\u080f\u0003"+ + "\u01b4\u00da\u0000\u080e\u080d\u0001\u0000\u0000\u0000\u080e\u080f\u0001"+ + "\u0000\u0000\u0000\u080f\u0165\u0001\u0000\u0000\u0000\u0810\u0811\u0005"+ + "\u00af\u0000\u0000\u0811\u0812\u0003\u0168\u00b4\u0000\u0812\u0813\u0005"+ + "\u00b0\u0000\u0000\u0813\u0819\u0001\u0000\u0000\u0000\u0814\u0815\u0005"+ + "\u00af\u0000\u0000\u0815\u0816\u0003\u0168\u00b4\u0000\u0816\u0817\u0006"+ + "\u00b3\uffff\uffff\u0000\u0817\u0819\u0001\u0000\u0000\u0000\u0818\u0810"+ + "\u0001\u0000\u0000\u0000\u0818\u0814\u0001\u0000\u0000\u0000\u0819\u0167"+ + "\u0001\u0000\u0000\u0000\u081a\u081f\u0003\u016a\u00b5\u0000\u081b\u081c"+ + "\u0005\u00ad\u0000\u0000\u081c\u081e\u0003\u016a\u00b5\u0000\u081d\u081b"+ + "\u0001\u0000\u0000\u0000\u081e\u0821\u0001\u0000\u0000\u0000\u081f\u081d"+ + "\u0001\u0000\u0000\u0000\u081f\u0820\u0001\u0000\u0000\u0000\u0820\u0169"+ + "\u0001\u0000\u0000\u0000\u0821\u081f\u0001\u0000\u0000\u0000\u0822\u0824"+ + "\u0003\u0158\u00ac\u0000\u0823\u0825\u0003\u01ac\u00d6\u0000\u0824\u0823"+ + "\u0001\u0000\u0000\u0000\u0824\u0825\u0001\u0000\u0000\u0000\u0825\u016b"+ + "\u0001\u0000\u0000\u0000\u0826\u082a\u0003\u016e\u00b7\u0000\u0827\u0829"+ + "\u0003\u016e\u00b7\u0000\u0828\u0827\u0001\u0000\u0000\u0000\u0829\u082c"+ + "\u0001\u0000\u0000\u0000\u082a\u0828\u0001\u0000\u0000\u0000\u082a\u082b"+ + "\u0001\u0000\u0000\u0000\u082b\u016d\u0001\u0000\u0000\u0000\u082c\u082a"+ + "\u0001\u0000\u0000\u0000\u082d\u082e\u0005,\u0000\u0000\u082e\u082f\u0005"+ + "\u00bd\u0000\u0000\u082f\u0834\u0005\u00cd\u0000\u0000\u0830\u0831\u0005"+ + "-\u0000\u0000\u0831\u0832\u0005\u00bd\u0000\u0000\u0832\u0834\u0005\u00cd"+ + "\u0000\u0000\u0833\u082d\u0001\u0000\u0000\u0000\u0833\u0830\u0001\u0000"+ + "\u0000\u0000\u0834\u016f\u0001\u0000\u0000\u0000\u0835\u0836\u0005&\u0000"+ + "\u0000\u0836\u0839\u0005C\u0000\u0000\u0837\u0838\u0005?\u0000\u0000\u0838"+ + "\u083a\u0005.\u0000\u0000\u0839\u0837\u0001\u0000\u0000\u0000\u0839\u083a"+ + "\u0001\u0000\u0000\u0000\u083a\u083b\u0001\u0000\u0000\u0000\u083b\u083c"+ + "\u0003\u014e\u00a7\u0000\u083c\u083d\u0005a\u0000\u0000\u083d\u083f\u0003"+ + "\u00fc~\u0000\u083e\u0840\u0005f\u0000\u0000\u083f\u083e\u0001\u0000\u0000"+ + "\u0000\u083f\u0840\u0001\u0000\u0000\u0000\u0840\u0171\u0001\u0000\u0000"+ + "\u0000\u0841\u0844\u0007\r\u0000\u0000\u0842\u0843\u0005\u000e\u0000\u0000"+ + "\u0843\u0845\u0005H\u0000\u0000\u0844\u0842\u0001\u0000\u0000\u0000\u0844"+ + "\u0845\u0001\u0000\u0000\u0000\u0845\u0857\u0001\u0000\u0000\u0000\u0846"+ + "\u0847\u0005~\u0000\u0000\u0847\u0850\u0003\u00fc~\u0000\u0848\u0849\u0005"+ + "\u00af\u0000\u0000\u0849\u084a\u0003\u0174\u00ba\u0000\u084a\u084b\u0005"+ + "\u00b0\u0000\u0000\u084b\u0851\u0001\u0000\u0000\u0000\u084c\u084d\u0005"+ + "\u00af\u0000\u0000\u084d\u084e\u0003\u0174\u00ba\u0000\u084e\u084f\u0006"+ + "\u00b9\uffff\uffff\u0000\u084f\u0851\u0001\u0000\u0000\u0000\u0850\u0848"+ + "\u0001\u0000\u0000\u0000\u0850\u084c\u0001\u0000\u0000\u0000\u0850\u0851"+ + "\u0001\u0000\u0000\u0000\u0851\u0858\u0001\u0000\u0000\u0000\u0852\u0853"+ + "\u0005C\u0000\u0000\u0853\u0854\u0003\u014e\u00a7\u0000\u0854\u0855\u0005"+ + "a\u0000\u0000\u0855\u0856\u0003\u00fc~\u0000\u0856\u0858\u0001\u0000\u0000"+ + "\u0000\u0857\u0846\u0001\u0000\u0000\u0000\u0857\u0852\u0001\u0000\u0000"+ + "\u0000\u0858\u0173\u0001\u0000\u0000\u0000\u0859\u085e\u0003\u0144\u00a2"+ + "\u0000\u085a\u085b\u0005\u00ad\u0000\u0000\u085b\u085d\u0003\u0144\u00a2"+ + "\u0000\u085c\u085a\u0001\u0000\u0000\u0000\u085d\u0860\u0001\u0000\u0000"+ + "\u0000\u085e\u085c\u0001\u0000\u0000\u0000\u085e\u085f\u0001\u0000\u0000"+ + "\u0000\u085f\u0175\u0001\u0000\u0000\u0000\u0860\u085e\u0001\u0000\u0000"+ + "\u0000\u0861\u0864\u0005|\u0000\u0000\u0862\u0863\u0005\u000e\u0000\u0000"+ + "\u0863\u0865\u0005H\u0000\u0000\u0864\u0862\u0001\u0000\u0000\u0000\u0864"+ + "\u0865\u0001\u0000\u0000\u0000\u0865\u0874\u0001\u0000\u0000\u0000\u0866"+ + "\u0875\u0005\u007f\u0000\u0000\u0867\u0875\u0005\u0089\u0000\u0000\u0868"+ + "\u0875\u0005t\u0000\u0000\u0869\u086a\u0005\u0088\u0000\u0000\u086a\u0875"+ + "\u0003\u0186\u00c3\u0000\u086b\u086c\u0005s\u0000\u0000\u086c\u0875\u0003"+ + "\u01c2\u00e1\u0000\u086d\u086e\u0005D\u0000\u0000\u086e\u086f\u0005a\u0000"+ + "\u0000\u086f\u0875\u0003\u00fc~\u0000\u0870\u0871\u0005~\u0000\u0000\u0871"+ + "\u0875\u0003\u00fc~\u0000\u0872\u0875\u0005Z\u0000\u0000\u0873\u0875\u0005"+ + "o\u0000\u0000\u0874\u0866\u0001\u0000\u0000\u0000\u0874\u0867\u0001\u0000"+ + "\u0000\u0000\u0874\u0868\u0001\u0000\u0000\u0000\u0874\u0869\u0001\u0000"+ + "\u0000\u0000\u0874\u086b\u0001\u0000\u0000\u0000\u0874\u086d\u0001\u0000"+ + "\u0000\u0000\u0874\u0870\u0001\u0000\u0000\u0000\u0874\u0872\u0001\u0000"+ + "\u0000\u0000\u0874\u0873\u0001\u0000\u0000\u0000\u0875\u0177\u0001\u0000"+ + "\u0000\u0000\u0876\u0877\u0005\u001b\u0000\u0000\u0877\u0878\u0005\u0088"+ + "\u0000\u0000\u0878\u087a\u0003\u018a\u00c5\u0000\u0879\u087b\u0003\u0192"+ + "\u00c9\u0000\u087a\u0879\u0001\u0000\u0000\u0000\u087a\u087b\u0001\u0000"+ + "\u0000\u0000\u087b\u087d\u0001\u0000\u0000\u0000\u087c\u087e\u0005\b\u0000"+ + "\u0000\u087d\u087c\u0001\u0000\u0000\u0000\u087d\u087e\u0001\u0000\u0000"+ + "\u0000\u087e\u0179\u0001\u0000\u0000\u0000\u087f\u0880\u0005\u001b\u0000"+ + "\u0000\u0880\u0881\u0005s\u0000\u0000\u0881\u0882\u0003\u01c2\u00e1\u0000"+ + "\u0882\u017b\u0001\u0000\u0000\u0000\u0883\u0884\u0005\n\u0000\u0000\u0884"+ + "\u0885\u0005\u0088\u0000\u0000\u0885\u0887\u0003\u0186\u00c3\u0000\u0886"+ + "\u0888\u0003\u0190\u00c8\u0000\u0887\u0886\u0001\u0000\u0000\u0000\u0887"+ + "\u0888\u0001\u0000\u0000\u0000\u0888\u088a\u0001\u0000\u0000\u0000\u0889"+ + "\u088b\u0005\u0096\u0000\u0000\u088a\u0889\u0001\u0000\u0000\u0000\u088a"+ + "\u088b\u0001\u0000\u0000\u0000\u088b\u088d\u0001\u0000\u0000\u0000\u088c"+ + "\u088e\u0005\u0094\u0000\u0000\u088d\u088c\u0001\u0000\u0000\u0000\u088d"+ + "\u088e\u0001\u0000\u0000\u0000\u088e\u0890\u0001\u0000\u0000\u0000\u088f"+ + "\u0891\u0003\u018e\u00c7\u0000\u0890\u088f\u0001\u0000\u0000\u0000\u0890"+ + "\u0891\u0001\u0000\u0000\u0000\u0891\u0893\u0001\u0000\u0000\u0000\u0892"+ + "\u0894\u0003\u0192\u00c9\u0000\u0893\u0892\u0001\u0000\u0000\u0000\u0893"+ + "\u0894\u0001\u0000\u0000\u0000\u0894\u017d\u0001\u0000\u0000\u0000\u0895"+ + "\u0896\u0005&\u0000\u0000\u0896\u0897\u0005\u0088\u0000\u0000\u0897\u0899"+ + "\u0003\u0186\u00c3\u0000\u0898\u089a\u0005\u0016\u0000\u0000\u0899\u0898"+ + "\u0001\u0000\u0000\u0000\u0899\u089a\u0001\u0000\u0000\u0000\u089a\u017f"+ + "\u0001\u0000\u0000\u0000\u089b\u089c\u0005&\u0000\u0000\u089c\u089d\u0005"+ + "s\u0000\u0000\u089d\u089e\u0003\u01c2\u00e1\u0000\u089e\u0181\u0001\u0000"+ + "\u0000\u0000\u089f\u08a3\u0005:\u0000\u0000\u08a0\u08a4\u0003\u0194\u00ca"+ + "\u0000\u08a1\u08a4\u0003\u0196\u00cb\u0000\u08a2\u08a4\u0003\u0198\u00cc"+ + "\u0000\u08a3\u08a0\u0001\u0000\u0000\u0000\u08a3\u08a1\u0001\u0000\u0000"+ + "\u0000\u08a3\u08a2\u0001\u0000\u0000\u0000\u08a4\u0183\u0001\u0000\u0000"+ + "\u0000\u08a5\u08a9\u0005r\u0000\u0000\u08a6\u08aa\u0003\u019a\u00cd\u0000"+ + "\u08a7\u08aa\u0003\u019c\u00ce\u0000\u08a8\u08aa\u0003\u019e\u00cf\u0000"+ + "\u08a9\u08a6\u0001\u0000\u0000\u0000\u08a9\u08a7\u0001\u0000\u0000\u0000"+ + "\u08a9\u08a8\u0001\u0000\u0000\u0000\u08aa\u0185\u0001\u0000\u0000\u0000"+ + "\u08ab\u08ae\u0003\u01c2\u00e1\u0000\u08ac\u08ae\u0003\u01be\u00df\u0000"+ + "\u08ad\u08ab\u0001\u0000\u0000\u0000\u08ad\u08ac\u0001\u0000\u0000\u0000"+ + "\u08ae\u0187\u0001\u0000\u0000\u0000\u08af\u08b0\u0005=\u0000\u0000\u08b0"+ + "\u08b1\u0003\u018c\u00c6\u0000\u08b1\u0189\u0001\u0000\u0000\u0000\u08b2"+ + "\u08b3\u0003\u01c2\u00e1\u0000\u08b3\u08b5\u0003\u0188\u00c4\u0000\u08b4"+ + "\u08b6\u0005\u0094\u0000\u0000\u08b5\u08b4\u0001\u0000\u0000\u0000\u08b5"+ + "\u08b6\u0001\u0000\u0000\u0000\u08b6\u08b8\u0001\u0000\u0000\u0000\u08b7"+ + "\u08b9\u0003\u018e\u00c7\u0000\u08b8\u08b7\u0001\u0000\u0000\u0000\u08b8"+ + "\u08b9\u0001\u0000\u0000\u0000\u08b9\u08be\u0001\u0000\u0000\u0000\u08ba"+ + "\u08bb\u0003\u01be\u00df\u0000\u08bb\u08bc\u0005\u0093\u0000\u0000\u08bc"+ + "\u08be\u0001\u0000\u0000\u0000\u08bd\u08b2\u0001\u0000\u0000\u0000\u08bd"+ + "\u08ba\u0001\u0000\u0000\u0000\u08be\u018b\u0001\u0000\u0000\u0000\u08bf"+ + "\u08c0\u0005\u0013\u0000\u0000\u08c0\u08c1\u0003\u01be\u00df\u0000\u08c1"+ + "\u018d\u0001\u0000\u0000\u0000\u08c2\u08c3\u0005g\u0000\u0000\u08c3\u08c4"+ + "\u0005O\u0000\u0000\u08c4\u08c5\u0003\u01b6\u00db\u0000\u08c5\u018f\u0001"+ + "\u0000\u0000\u0000\u08c6\u08c8\u0003\u0188\u00c4\u0000\u08c7\u08c9\u0005"+ + "\u0095\u0000\u0000\u08c8\u08c7\u0001\u0000\u0000\u0000\u08c8\u08c9\u0001"+ + "\u0000\u0000\u0000\u08c9\u0191\u0001\u0000\u0000\u0000\u08ca\u08cb\u0005"+ + "\u0006\u0000\u0000\u08cb\u08cc\u0007\u000e\u0000\u0000\u08cc\u0193\u0001"+ + "\u0000\u0000\u0000\u08cd\u08ce\u0003\u01c0\u00e0\u0000\u08ce\u08cf\u0005"+ + "\u0081\u0000\u0000\u08cf\u08d0\u0003\u01a0\u00d0\u0000\u08d0\u0195\u0001"+ + "\u0000\u0000\u0000\u08d1\u08d2\u0003\u01a2\u00d1\u0000\u08d2\u08d3\u0005"+ + "\u0081\u0000\u0000\u08d3\u08d4\u0003\u01c2\u00e1\u0000\u08d4\u0197\u0001"+ + "\u0000\u0000\u0000\u08d5\u08d6\u0003\u01a6\u00d3\u0000\u08d6\u08da\u0005"+ + "a\u0000\u0000\u08d7\u08db\u0003\u01a8\u00d4\u0000\u08d8\u08d9\u0005Y\u0000"+ + "\u0000\u08d9\u08db\u0003\u00fe\u007f\u0000\u08da\u08d7\u0001\u0000\u0000"+ + "\u0000\u08da\u08d8\u0001\u0000\u0000\u0000\u08db\u08dc\u0001\u0000\u0000"+ + "\u0000\u08dc\u08dd\u0005\u0081\u0000\u0000\u08dd\u08de\u0003\u01c2\u00e1"+ + "\u0000\u08de\u0199\u0001\u0000\u0000\u0000\u08df\u08e0\u0003\u01c0\u00e0"+ + "\u0000\u08e0\u08e1\u00056\u0000\u0000\u08e1\u08e2\u0003\u01a0\u00d0\u0000"+ + "\u08e2\u019b\u0001\u0000\u0000\u0000\u08e3\u08e4\u0003\u01a2\u00d1\u0000"+ + "\u08e4\u08e5\u00056\u0000\u0000\u08e5\u08e6\u0003\u01c2\u00e1\u0000\u08e6"+ + "\u019d\u0001\u0000\u0000\u0000\u08e7\u08e8\u0003\u01a6\u00d3\u0000\u08e8"+ + "\u08ec\u0005a\u0000\u0000\u08e9\u08ed\u0003\u01a8\u00d4\u0000\u08ea\u08eb"+ + "\u0005Y\u0000\u0000\u08eb\u08ed\u0003\u00fe\u007f\u0000\u08ec\u08e9\u0001"+ + "\u0000\u0000\u0000\u08ec\u08ea\u0001\u0000\u0000\u0000\u08ed\u08ee\u0001"+ + "\u0000\u0000\u0000\u08ee\u08ef\u00056\u0000\u0000\u08ef\u08f0\u0003\u01c2"+ + "\u00e1\u0000\u08f0\u019f\u0001\u0000\u0000\u0000\u08f1\u08f2\u0005\u0088"+ + "\u0000\u0000\u08f2\u08f6\u0003\u0186\u00c3\u0000\u08f3\u08f4\u0005s\u0000"+ + "\u0000\u08f4\u08f6\u0003\u01c2\u00e1\u0000\u08f5\u08f1\u0001\u0000\u0000"+ + "\u0000\u08f5\u08f3\u0001\u0000\u0000\u0000\u08f6\u01a1\u0001\u0000\u0000"+ + "\u0000\u08f7\u08fc\u0003\u01a4\u00d2\u0000\u08f8\u08f9\u0005\u00ad\u0000"+ + "\u0000\u08f9\u08fb\u0003\u01a4\u00d2\u0000\u08fa\u08f8\u0001\u0000\u0000"+ + "\u0000\u08fb\u08fe\u0001\u0000\u0000\u0000\u08fc\u08fa\u0001\u0000\u0000"+ + "\u0000\u08fc\u08fd\u0001\u0000\u0000\u0000\u08fd\u01a3\u0001\u0000\u0000"+ + "\u0000\u08fe\u08fc\u0001\u0000\u0000\u0000\u08ff\u0902\u0003\u01c2\u00e1"+ + "\u0000\u0900\u0902\u0005\u0092\u0000\u0000\u0901\u08ff\u0001\u0000\u0000"+ + "\u0000\u0901\u0900\u0001\u0000\u0000\u0000\u0902\u01a5\u0001\u0000\u0000"+ + "\u0000\u0903\u0906\u0003\u01a4\u00d2\u0000\u0904\u0906\u0005\t\u0000\u0000"+ + "\u0905\u0903\u0001\u0000\u0000\u0000\u0905\u0904\u0001\u0000\u0000\u0000"+ + "\u0906\u090e\u0001\u0000\u0000\u0000\u0907\u090a\u0005\u00ad\u0000\u0000"+ + "\u0908\u090b\u0003\u01a4\u00d2\u0000\u0909\u090b\u0005\t\u0000\u0000\u090a"+ + "\u0908\u0001\u0000\u0000\u0000\u090a\u0909\u0001\u0000\u0000\u0000\u090b"+ + "\u090d\u0001\u0000\u0000\u0000\u090c\u0907\u0001\u0000\u0000\u0000\u090d"+ + "\u0910\u0001\u0000\u0000\u0000\u090e\u090c\u0001\u0000\u0000\u0000\u090e"+ + "\u090f\u0001\u0000\u0000\u0000\u090f\u01a7\u0001\u0000\u0000\u0000\u0910"+ + "\u090e\u0001\u0000\u0000\u0000\u0911\u0912\u0003\u00fc~\u0000\u0912\u01a9"+ + "\u0001\u0000\u0000\u0000\u0913\u091b\u0003\u01ac\u00d6\u0000\u0914\u091b"+ + "\u0003\u01ae\u00d7\u0000\u0915\u091b\u0003\u01be\u00df\u0000\u0916\u091b"+ + "\u0003\u01ba\u00dd\u0000\u0917\u091b\u0005\u00cc\u0000\u0000\u0918\u091b"+ + "\u0005\u00cb\u0000\u0000\u0919\u091b\u0005\u00ca\u0000\u0000\u091a\u0913"+ + "\u0001\u0000\u0000\u0000\u091a\u0914\u0001\u0000\u0000\u0000\u091a\u0915"+ + "\u0001\u0000\u0000\u0000\u091a\u0916\u0001\u0000\u0000\u0000\u091a\u0917"+ + "\u0001\u0000\u0000\u0000\u091a\u0918\u0001\u0000\u0000\u0000\u091a\u0919"+ + "\u0001\u0000\u0000\u0000\u091b\u01ab\u0001\u0000\u0000\u0000\u091c\u091d"+ + "\u0005\u00b3\u0000\u0000\u091d\u0922\u0003\u01b0\u00d8\u0000\u091e\u091f"+ + "\u0005\u00ad\u0000\u0000\u091f\u0921\u0003\u01b0\u00d8\u0000\u0920\u091e"+ + "\u0001\u0000\u0000\u0000\u0921\u0924\u0001\u0000\u0000\u0000\u0922\u0920"+ + "\u0001\u0000\u0000\u0000\u0922\u0923\u0001\u0000\u0000\u0000\u0923\u0925"+ + "\u0001\u0000\u0000\u0000\u0924\u0922\u0001\u0000\u0000\u0000\u0925\u0926"+ + "\u0005\u00b4\u0000\u0000\u0926\u092a\u0001\u0000\u0000\u0000\u0927\u0928"+ + "\u0005\u00b3\u0000\u0000\u0928\u092a\u0005\u00b4\u0000\u0000\u0929\u091c"+ + "\u0001\u0000\u0000\u0000\u0929\u0927\u0001\u0000\u0000\u0000\u092a\u01ad"+ + "\u0001\u0000\u0000\u0000\u092b\u092c\u0005\u00b1\u0000\u0000\u092c\u0931"+ + "\u0003\u01b2\u00d9\u0000\u092d\u092e\u0005\u00ad\u0000\u0000\u092e\u0930"+ + "\u0003\u01b2\u00d9\u0000\u092f\u092d\u0001\u0000\u0000\u0000\u0930\u0933"+ + "\u0001\u0000\u0000\u0000\u0931\u092f\u0001\u0000\u0000\u0000\u0931\u0932"+ + "\u0001\u0000\u0000\u0000\u0932\u0934\u0001\u0000\u0000\u0000\u0933\u0931"+ + "\u0001\u0000\u0000\u0000\u0934\u0935\u0005\u00b2\u0000\u0000\u0935\u0939"+ + "\u0001\u0000\u0000\u0000\u0936\u0937\u0005\u00b1\u0000\u0000\u0937\u0939"+ + "\u0005\u00b2\u0000\u0000\u0938\u092b\u0001\u0000\u0000\u0000\u0938\u0936"+ + "\u0001\u0000\u0000\u0000\u0939\u01af\u0001\u0000\u0000\u0000\u093a\u093b"+ + "\u0005\u00d0\u0000\u0000\u093b\u093c\u0005\u00ae\u0000\u0000\u093c\u093d"+ + "\u0003\u01b2\u00d9\u0000\u093d\u01b1\u0001\u0000\u0000\u0000\u093e\u0946"+ + "\u0003\u01ac\u00d6\u0000\u093f\u0946\u0003\u01ae\u00d7\u0000\u0940\u0946"+ + "\u0005\u00d0\u0000\u0000\u0941\u0946\u0003\u01ba\u00dd\u0000\u0942\u0946"+ + "\u0005\u00cc\u0000\u0000\u0943\u0946\u0005\u00cb\u0000\u0000\u0944\u0946"+ + "\u0005\u00ca\u0000\u0000\u0945\u093e\u0001\u0000\u0000\u0000\u0945\u093f"+ + "\u0001\u0000\u0000\u0000\u0945\u0940\u0001\u0000\u0000\u0000\u0945\u0941"+ + "\u0001\u0000\u0000\u0000\u0945\u0942\u0001\u0000\u0000\u0000\u0945\u0943"+ + "\u0001\u0000\u0000\u0000\u0945\u0944\u0001\u0000\u0000\u0000\u0946\u01b3"+ + "\u0001\u0000\u0000\u0000\u0947\u0948\u0005\u0019\u0000\u0000\u0948\u0949"+ + "\u0003\u01be\u00df\u0000\u0949\u01b5\u0001\u0000\u0000\u0000\u094a\u094b"+ + "\u0005\u00cd\u0000\u0000\u094b\u094c\u0003\u01b8\u00dc\u0000\u094c\u01b7"+ + "\u0001\u0000\u0000\u0000\u094d\u094e\u0007\u000f\u0000\u0000\u094e\u01b9"+ + "\u0001\u0000\u0000\u0000\u094f\u0951\u0005\u00c6\u0000\u0000\u0950\u094f"+ + "\u0001\u0000\u0000\u0000\u0950\u0951\u0001\u0000\u0000\u0000\u0951\u0952"+ + "\u0001\u0000\u0000\u0000\u0952\u0953\u0007\u0010\u0000\u0000\u0953\u01bb"+ + "\u0001\u0000\u0000\u0000\u0954\u0956\u0007\u0003\u0000\u0000\u0955\u0954"+ + "\u0001\u0000\u0000\u0000\u0955\u0956\u0001\u0000\u0000\u0000\u0956\u0957"+ + "\u0001\u0000\u0000\u0000\u0957\u0958\u0005\u00cd\u0000\u0000\u0958\u01bd"+ + "\u0001\u0000\u0000\u0000\u0959\u095a\u0007\u0011\u0000\u0000\u095a\u01bf"+ + "\u0001\u0000\u0000\u0000\u095b\u0960\u0003\u01c2\u00e1\u0000\u095c\u095d"+ + "\u0005\u00ad\u0000\u0000\u095d\u095f\u0003\u01c2\u00e1\u0000\u095e\u095c"+ + "\u0001\u0000\u0000\u0000\u095f\u0962\u0001\u0000\u0000\u0000\u0960\u095e"+ + "\u0001\u0000\u0000\u0000\u0960\u0961\u0001\u0000\u0000\u0000\u0961\u01c1"+ + "\u0001\u0000\u0000\u0000\u0962\u0960\u0001\u0000\u0000\u0000\u0963\u09f9"+ + "\u0005\u0006\u0000\u0000\u0964\u09f9\u0005\u0007\u0000\u0000\u0965\u09f9"+ + "\u0005\b\u0000\u0000\u0966\u09f9\u0005\t\u0000\u0000\u0967\u09f9\u0005"+ + "\n\u0000\u0000\u0968\u09f9\u0005\u000b\u0000\u0000\u0969\u09f9\u0005\f"+ + "\u0000\u0000\u096a\u09f9\u0005\r\u0000\u0000\u096b\u09f9\u0005\u00a7\u0000"+ + "\u0000\u096c\u09f9\u0005\u00a8\u0000\u0000\u096d\u09f9\u0005\u00a9\u0000"+ + "\u0000\u096e\u09f9\u0005\u00aa\u0000\u0000\u096f\u09f9\u0005\u0010\u0000"+ + "\u0000\u0970\u09f9\u0005\u000e\u0000\u0000\u0971\u09f9\u0005\u000f\u0000"+ + "\u0000\u0972\u09f9\u0005\u0011\u0000\u0000\u0973\u09f9\u0005\u0012\u0000"+ + "\u0000\u0974\u09f9\u0005\u0013\u0000\u0000\u0975\u09f9\u0005\u0014\u0000"+ + "\u0000\u0976\u09f9\u0005\u0015\u0000\u0000\u0977\u09f9\u0005\u0017\u0000"+ + "\u0000\u0978\u09f9\u0005\u0018\u0000\u0000\u0979\u09f9\u0005\u0019\u0000"+ + "\u0000\u097a\u09f9\u0005\u001a\u0000\u0000\u097b\u09f9\u0005\u001b\u0000"+ + "\u0000\u097c\u09f9\u0005\u001c\u0000\u0000\u097d\u09f9\u0005\u001d\u0000"+ + "\u0000\u097e\u09f9\u0005\u001e\u0000\u0000\u097f\u09f9\u0005\u001f\u0000"+ + "\u0000\u0980\u09f9\u0005 \u0000\u0000\u0981\u09f9\u0005!\u0000\u0000\u0982"+ + "\u09f9\u0005\"\u0000\u0000\u0983\u09f9\u0005#\u0000\u0000\u0984\u09f9"+ + "\u0005$\u0000\u0000\u0985\u09f9\u0005%\u0000\u0000\u0986\u09f9\u0005&"+ + "\u0000\u0000\u0987\u09f9\u0005\'\u0000\u0000\u0988\u09f9\u0005(\u0000"+ + "\u0000\u0989\u09f9\u0005)\u0000\u0000\u098a\u09f9\u0005*\u0000\u0000\u098b"+ + "\u09f9\u0005+\u0000\u0000\u098c\u09f9\u0005,\u0000\u0000\u098d\u09f9\u0005"+ + "-\u0000\u0000\u098e\u09f9\u0005.\u0000\u0000\u098f\u09f9\u0005/\u0000"+ + "\u0000\u0990\u09f9\u00050\u0000\u0000\u0991\u09f9\u00051\u0000\u0000\u0992"+ + "\u09f9\u00055\u0000\u0000\u0993\u09f9\u00056\u0000\u0000\u0994\u09f9\u0005"+ + "7\u0000\u0000\u0995\u09f9\u00058\u0000\u0000\u0996\u09f9\u00059\u0000"+ + "\u0000\u0997\u09f9\u0005:\u0000\u0000\u0998\u09f9\u0005;\u0000\u0000\u0999"+ + "\u09f9\u0005<\u0000\u0000\u099a\u09f9\u0005=\u0000\u0000\u099b\u09f9\u0005"+ + ">\u0000\u0000\u099c\u09f9\u0005?\u0000\u0000\u099d\u09f9\u0005B\u0000"+ + "\u0000\u099e\u09f9\u0005@\u0000\u0000\u099f\u09f9\u0005C\u0000\u0000\u09a0"+ + "\u09f9\u0005D\u0000\u0000\u09a1\u09f9\u0005E\u0000\u0000\u09a2\u09f9\u0005"+ + "F\u0000\u0000\u09a3\u09f9\u0005A\u0000\u0000\u09a4\u09f9\u0005G\u0000"+ + "\u0000\u09a5\u09f9\u0005H\u0000\u0000\u09a6\u09f9\u0005J\u0000\u0000\u09a7"+ + "\u09f9\u0005K\u0000\u0000\u09a8\u09f9\u0005L\u0000\u0000\u09a9\u09f9\u0005"+ + "O\u0000\u0000\u09aa\u09f9\u0005M\u0000\u0000\u09ab\u09f9\u0005P\u0000"+ + "\u0000\u09ac\u09f9\u0005Q\u0000\u0000\u09ad\u09f9\u0005R\u0000\u0000\u09ae"+ + "\u09f9\u0005T\u0000\u0000\u09af\u09f9\u0005U\u0000\u0000\u09b0\u09f9\u0005"+ + "W\u0000\u0000\u09b1\u09b2\u0005X\u0000\u0000\u09b2\u09f9\u0005Y\u0000"+ + "\u0000\u09b3\u09f9\u0005Z\u0000\u0000\u09b4\u09f9\u0005[\u0000\u0000\u09b5"+ + "\u09f9\u0005\\\u0000\u0000\u09b6\u09f9\u0005]\u0000\u0000\u09b7\u09f9"+ + "\u0005^\u0000\u0000\u09b8\u09f9\u0005`\u0000\u0000\u09b9\u09f9\u0005_"+ + "\u0000\u0000\u09ba\u09f9\u0005a\u0000\u0000\u09bb\u09f9\u0005c\u0000\u0000"+ + "\u09bc\u09f9\u0005d\u0000\u0000\u09bd\u09f9\u0005f\u0000\u0000\u09be\u09f9"+ + "\u0005i\u0000\u0000\u09bf\u09f9\u0005g\u0000\u0000\u09c0\u09f9\u0005h"+ + "\u0000\u0000\u09c1\u09f9\u0005l\u0000\u0000\u09c2\u09f9\u0005m\u0000\u0000"+ + "\u09c3\u09f9\u0005\u00c8\u0000\u0000\u09c4\u09f9\u0005n\u0000\u0000\u09c5"+ + "\u09f9\u0005o\u0000\u0000\u09c6\u09f9\u0005p\u0000\u0000\u09c7\u09f9"; private static final String _serializedATNSegment1 = - "\u0000\u09ac\u0a05\u0005?\u0000\u0000\u09ad\u0a05\u0005@\u0000\u0000\u09ae"+ - "\u0a05\u0005A\u0000\u0000\u09af\u0a05\u0005<\u0000\u0000\u09b0\u0a05\u0005"+ - "B\u0000\u0000\u09b1\u0a05\u0005C\u0000\u0000\u09b2\u0a05\u0005E\u0000"+ - "\u0000\u09b3\u0a05\u0005F\u0000\u0000\u09b4\u0a05\u0005G\u0000\u0000\u09b5"+ - "\u0a05\u0005J\u0000\u0000\u09b6\u0a05\u0005H\u0000\u0000\u09b7\u0a05\u0005"+ - "K\u0000\u0000\u09b8\u0a05\u0005L\u0000\u0000\u09b9\u0a05\u0005M\u0000"+ - "\u0000\u09ba\u0a05\u0005O\u0000\u0000\u09bb\u0a05\u0005P\u0000\u0000\u09bc"+ - "\u0a05\u0005R\u0000\u0000\u09bd\u09be\u0005S\u0000\u0000\u09be\u0a05\u0005"+ - "T\u0000\u0000\u09bf\u0a05\u0005U\u0000\u0000\u09c0\u0a05\u0005V\u0000"+ - "\u0000\u09c1\u0a05\u0005W\u0000\u0000\u09c2\u0a05\u0005X\u0000\u0000\u09c3"+ - "\u0a05\u0005Y\u0000\u0000\u09c4\u0a05\u0005[\u0000\u0000\u09c5\u0a05\u0005"+ - "Z\u0000\u0000\u09c6\u0a05\u0005\\\u0000\u0000\u09c7\u0a05\u0005^\u0000"+ - "\u0000\u09c8\u0a05\u0005_\u0000\u0000\u09c9\u0a05\u0005a\u0000\u0000\u09ca"+ - "\u0a05\u0005d\u0000\u0000\u09cb\u0a05\u0005b\u0000\u0000\u09cc\u0a05\u0005"+ - "c\u0000\u0000\u09cd\u0a05\u0005g\u0000\u0000\u09ce\u0a05\u0005h\u0000"+ - "\u0000\u09cf\u0a05\u0005\u00c3\u0000\u0000\u09d0\u0a05\u0005i\u0000\u0000"+ - "\u09d1\u0a05\u0005j\u0000\u0000\u09d2\u0a05\u0005k\u0000\u0000\u09d3\u0a05"+ - "\u0005l\u0000\u0000\u09d4\u0a05\u0005p\u0000\u0000\u09d5\u0a05\u0005n"+ - "\u0000\u0000\u09d6\u0a05\u0005o\u0000\u0000\u09d7\u0a05\u0005m\u0000\u0000"+ - "\u09d8\u0a05\u0005q\u0000\u0000\u09d9\u0a05\u0005r\u0000\u0000\u09da\u0a05"+ - "\u0005s\u0000\u0000\u09db\u0a05\u0005t\u0000\u0000\u09dc\u0a05\u0005u"+ - "\u0000\u0000\u09dd\u0a05\u0005v\u0000\u0000\u09de\u0a05\u0005w\u0000\u0000"+ - "\u09df\u0a05\u0005x\u0000\u0000\u09e0\u0a05\u0005y\u0000\u0000\u09e1\u0a05"+ - "\u0005z\u0000\u0000\u09e2\u0a05\u0005{\u0000\u0000\u09e3\u0a05\u0005|"+ - "\u0000\u0000\u09e4\u0a05\u0005}\u0000\u0000\u09e5\u0a05\u0005~\u0000\u0000"+ - "\u09e6\u0a05\u0005\u007f\u0000\u0000\u09e7\u0a05\u0005\u0080\u0000\u0000"+ - "\u09e8\u0a05\u0005\u008a\u0000\u0000\u09e9\u0a05\u0005\u008b\u0000\u0000"+ - "\u09ea\u0a05\u0005\u0081\u0000\u0000\u09eb\u0a05\u0005\u0082\u0000\u0000"+ - "\u09ec\u0a05\u0005\u0083\u0000\u0000\u09ed\u0a05\u0005\u0084\u0000\u0000"+ - "\u09ee\u0a05\u0005\u0085\u0000\u0000\u09ef\u0a05\u0005\u0086\u0000\u0000"+ - "\u09f0\u0a05\u0005\u0087\u0000\u0000\u09f1\u0a05\u0005\u0088\u0000\u0000"+ - "\u09f2\u0a05\u0005\u0089\u0000\u0000\u09f3\u0a05\u0005\u0093\u0000\u0000"+ - "\u09f4\u0a05\u0005\u0094\u0000\u0000\u09f5\u0a05\u0005\u0095\u0000\u0000"+ - "\u09f6\u0a05\u0005\u0096\u0000\u0000\u09f7\u0a05\u0005\u0097\u0000\u0000"+ - "\u09f8\u0a05\u0005\u0098\u0000\u0000\u09f9\u0a05\u0005\u0099\u0000\u0000"+ - "\u09fa\u0a05\u0005\u009b\u0000\u0000\u09fb\u0a05\u0005\u009a\u0000\u0000"+ - "\u09fc\u0a05\u0005\u009c\u0000\u0000\u09fd\u0a05\u0005\u009d\u0000\u0000"+ - "\u09fe\u0a05\u0005\u009e\u0000\u0000\u09ff\u0a05\u0005\u009f\u0000\u0000"+ - "\u0a00\u0a05\u0005\u00a0\u0000\u0000\u0a01\u0a05\u0005\u00a1\u0000\u0000"+ - "\u0a02\u0a05\u0005\u00a6\u0000\u0000\u0a03\u0a05\u0005\u00ce\u0000\u0000"+ - "\u0a04\u0973\u0001\u0000\u0000\u0000\u0a04\u0974\u0001\u0000\u0000\u0000"+ - "\u0a04\u0975\u0001\u0000\u0000\u0000\u0a04\u0976\u0001\u0000\u0000\u0000"+ - "\u0a04\u0977\u0001\u0000\u0000\u0000\u0a04\u0978\u0001\u0000\u0000\u0000"+ - "\u0a04\u0979\u0001\u0000\u0000\u0000\u0a04\u097a\u0001\u0000\u0000\u0000"+ - "\u0a04\u097b\u0001\u0000\u0000\u0000\u0a04\u097c\u0001\u0000\u0000\u0000"+ - "\u0a04\u097d\u0001\u0000\u0000\u0000\u0a04\u097e\u0001\u0000\u0000\u0000"+ - "\u0a04\u097f\u0001\u0000\u0000\u0000\u0a04\u0980\u0001\u0000\u0000\u0000"+ - "\u0a04\u0981\u0001\u0000\u0000\u0000\u0a04\u0982\u0001\u0000\u0000\u0000"+ - "\u0a04\u0983\u0001\u0000\u0000\u0000\u0a04\u0984\u0001\u0000\u0000\u0000"+ - "\u0a04\u0985\u0001\u0000\u0000\u0000\u0a04\u0986\u0001\u0000\u0000\u0000"+ - "\u0a04\u0987\u0001\u0000\u0000\u0000\u0a04\u0988\u0001\u0000\u0000\u0000"+ - "\u0a04\u0989\u0001\u0000\u0000\u0000\u0a04\u098a\u0001\u0000\u0000\u0000"+ - "\u0a04\u098b\u0001\u0000\u0000\u0000\u0a04\u098c\u0001\u0000\u0000\u0000"+ - "\u0a04\u098d\u0001\u0000\u0000\u0000\u0a04\u098e\u0001\u0000\u0000\u0000"+ - "\u0a04\u098f\u0001\u0000\u0000\u0000\u0a04\u0990\u0001\u0000\u0000\u0000"+ - "\u0a04\u0991\u0001\u0000\u0000\u0000\u0a04\u0992\u0001\u0000\u0000\u0000"+ - "\u0a04\u0993\u0001\u0000\u0000\u0000\u0a04\u0994\u0001\u0000\u0000\u0000"+ - "\u0a04\u0995\u0001\u0000\u0000\u0000\u0a04\u0996\u0001\u0000\u0000\u0000"+ - "\u0a04\u0997\u0001\u0000\u0000\u0000\u0a04\u0998\u0001\u0000\u0000\u0000"+ - "\u0a04\u0999\u0001\u0000\u0000\u0000\u0a04\u099a\u0001\u0000\u0000\u0000"+ - "\u0a04\u099b\u0001\u0000\u0000\u0000\u0a04\u099c\u0001\u0000\u0000\u0000"+ - "\u0a04\u099d\u0001\u0000\u0000\u0000\u0a04\u099e\u0001\u0000\u0000\u0000"+ - "\u0a04\u099f\u0001\u0000\u0000\u0000\u0a04\u09a0\u0001\u0000\u0000\u0000"+ - "\u0a04\u09a1\u0001\u0000\u0000\u0000\u0a04\u09a2\u0001\u0000\u0000\u0000"+ - "\u0a04\u09a3\u0001\u0000\u0000\u0000\u0a04\u09a4\u0001\u0000\u0000\u0000"+ - "\u0a04\u09a5\u0001\u0000\u0000\u0000\u0a04\u09a6\u0001\u0000\u0000\u0000"+ - "\u0a04\u09a7\u0001\u0000\u0000\u0000\u0a04\u09a8\u0001\u0000\u0000\u0000"+ - "\u0a04\u09a9\u0001\u0000\u0000\u0000\u0a04\u09aa\u0001\u0000\u0000\u0000"+ - "\u0a04\u09ab\u0001\u0000\u0000\u0000\u0a04\u09ac\u0001\u0000\u0000\u0000"+ - "\u0a04\u09ad\u0001\u0000\u0000\u0000\u0a04\u09ae\u0001\u0000\u0000\u0000"+ - "\u0a04\u09af\u0001\u0000\u0000\u0000\u0a04\u09b0\u0001\u0000\u0000\u0000"+ - "\u0a04\u09b1\u0001\u0000\u0000\u0000\u0a04\u09b2\u0001\u0000\u0000\u0000"+ - "\u0a04\u09b3\u0001\u0000\u0000\u0000\u0a04\u09b4\u0001\u0000\u0000\u0000"+ - "\u0a04\u09b5\u0001\u0000\u0000\u0000\u0a04\u09b6\u0001\u0000\u0000\u0000"+ - "\u0a04\u09b7\u0001\u0000\u0000\u0000\u0a04\u09b8\u0001\u0000\u0000\u0000"+ - "\u0a04\u09b9\u0001\u0000\u0000\u0000\u0a04\u09ba\u0001\u0000\u0000\u0000"+ - "\u0a04\u09bb\u0001\u0000\u0000\u0000\u0a04\u09bc\u0001\u0000\u0000\u0000"+ - "\u0a04\u09bd\u0001\u0000\u0000\u0000\u0a04\u09bf\u0001\u0000\u0000\u0000"+ - "\u0a04\u09c0\u0001\u0000\u0000\u0000\u0a04\u09c1\u0001\u0000\u0000\u0000"+ - "\u0a04\u09c2\u0001\u0000\u0000\u0000\u0a04\u09c3\u0001\u0000\u0000\u0000"+ - "\u0a04\u09c4\u0001\u0000\u0000\u0000\u0a04\u09c5\u0001\u0000\u0000\u0000"+ - "\u0a04\u09c6\u0001\u0000\u0000\u0000\u0a04\u09c7\u0001\u0000\u0000\u0000"+ - "\u0a04\u09c8\u0001\u0000\u0000\u0000\u0a04\u09c9\u0001\u0000\u0000\u0000"+ - "\u0a04\u09ca\u0001\u0000\u0000\u0000\u0a04\u09cb\u0001\u0000\u0000\u0000"+ - "\u0a04\u09cc\u0001\u0000\u0000\u0000\u0a04\u09cd\u0001\u0000\u0000\u0000"+ - "\u0a04\u09ce\u0001\u0000\u0000\u0000\u0a04\u09cf\u0001\u0000\u0000\u0000"+ - "\u0a04\u09d0\u0001\u0000\u0000\u0000\u0a04\u09d1\u0001\u0000\u0000\u0000"+ - "\u0a04\u09d2\u0001\u0000\u0000\u0000\u0a04\u09d3\u0001\u0000\u0000\u0000"+ - "\u0a04\u09d4\u0001\u0000\u0000\u0000\u0a04\u09d5\u0001\u0000\u0000\u0000"+ - "\u0a04\u09d6\u0001\u0000\u0000\u0000\u0a04\u09d7\u0001\u0000\u0000\u0000"+ - "\u0a04\u09d8\u0001\u0000\u0000\u0000\u0a04\u09d9\u0001\u0000\u0000\u0000"+ - "\u0a04\u09da\u0001\u0000\u0000\u0000\u0a04\u09db\u0001\u0000\u0000\u0000"+ - "\u0a04\u09dc\u0001\u0000\u0000\u0000\u0a04\u09dd\u0001\u0000\u0000\u0000"+ - "\u0a04\u09de\u0001\u0000\u0000\u0000\u0a04\u09df\u0001\u0000\u0000\u0000"+ - "\u0a04\u09e0\u0001\u0000\u0000\u0000\u0a04\u09e1\u0001\u0000\u0000\u0000"+ - "\u0a04\u09e2\u0001\u0000\u0000\u0000\u0a04\u09e3\u0001\u0000\u0000\u0000"+ - "\u0a04\u09e4\u0001\u0000\u0000\u0000\u0a04\u09e5\u0001\u0000\u0000\u0000"+ - "\u0a04\u09e6\u0001\u0000\u0000\u0000\u0a04\u09e7\u0001\u0000\u0000\u0000"+ - "\u0a04\u09e8\u0001\u0000\u0000\u0000\u0a04\u09e9\u0001\u0000\u0000\u0000"+ - "\u0a04\u09ea\u0001\u0000\u0000\u0000\u0a04\u09eb\u0001\u0000\u0000\u0000"+ - "\u0a04\u09ec\u0001\u0000\u0000\u0000\u0a04\u09ed\u0001\u0000\u0000\u0000"+ - "\u0a04\u09ee\u0001\u0000\u0000\u0000\u0a04\u09ef\u0001\u0000\u0000\u0000"+ - "\u0a04\u09f0\u0001\u0000\u0000\u0000\u0a04\u09f1\u0001\u0000\u0000\u0000"+ - "\u0a04\u09f2\u0001\u0000\u0000\u0000\u0a04\u09f3\u0001\u0000\u0000\u0000"+ - "\u0a04\u09f4\u0001\u0000\u0000\u0000\u0a04\u09f5\u0001\u0000\u0000\u0000"+ - "\u0a04\u09f6\u0001\u0000\u0000\u0000\u0a04\u09f7\u0001\u0000\u0000\u0000"+ - "\u0a04\u09f8\u0001\u0000\u0000\u0000\u0a04\u09f9\u0001\u0000\u0000\u0000"+ - "\u0a04\u09fa\u0001\u0000\u0000\u0000\u0a04\u09fb\u0001\u0000\u0000\u0000"+ - "\u0a04\u09fc\u0001\u0000\u0000\u0000\u0a04\u09fd\u0001\u0000\u0000\u0000"+ - "\u0a04\u09fe\u0001\u0000\u0000\u0000\u0a04\u09ff\u0001\u0000\u0000\u0000"+ - "\u0a04\u0a00\u0001\u0000\u0000\u0000\u0a04\u0a01\u0001\u0000\u0000\u0000"+ - "\u0a04\u0a02\u0001\u0000\u0000\u0000\u0a04\u0a03\u0001\u0000\u0000\u0000"+ - "\u0a05\u0a09\u0001\u0000\u0000\u0000\u0a06\u0a07\u0005\u00cf\u0000\u0000"+ - "\u0a07\u0a09\u0006\u00dc\uffff\uffff\u0000\u0a08\u0a04\u0001\u0000\u0000"+ - "\u0000\u0a08\u0a06\u0001\u0000\u0000\u0000\u0a09\u01b9\u0001\u0000\u0000"+ - "\u0000\u0117\u01d6\u01d9\u01e5\u01f3\u01f6\u01f9\u01fc\u01ff\u0207\u020d"+ - "\u0212\u0216\u021c\u0227\u022e\u0237\u023f\u0247\u0250\u0254\u0257\u025b"+ - "\u0261\u0268\u026e\u027a\u027d\u0288\u028b\u0291\u029c\u02b1\u02b4\u02b8"+ - "\u02c4\u02c8\u02cc\u02d5\u02e6\u02f1\u02f5\u02fc\u02ff\u0306\u0311\u0315"+ - "\u031f\u0324\u032e\u0338\u0343\u0350\u035b\u0360\u036b\u036f\u0373\u0378"+ - "\u037d\u0387\u038f\u0397\u039d\u03a2\u03a4\u03aa\u03b1\u03b6\u03bc\u03c0"+ - "\u03c4\u03ca\u03dc\u03e2\u03e4\u03eb\u03f1\u03f7\u0407\u040e\u041c\u0428"+ - "\u042b\u0446\u044b\u0462\u0468\u046b\u0473\u0478\u0481\u0488\u048b\u048f"+ - "\u0496\u049e\u04a1\u04a6\u04a9\u04b0\u04b6\u04c0\u04c4\u04cc\u04d0\u04d8"+ - "\u04dc\u04e4\u04e8\u04f1\u04f5\u04ff\u0502\u0509\u050d\u0510\u0513\u0518"+ - "\u051c\u052b\u0533\u053a\u0540\u0543\u0547\u054a\u0551\u0562\u056b\u0573"+ - "\u0576\u057a\u057e\u0580\u0588\u05a9\u05b1\u05b7\u05c6\u05ce\u05d2\u05db"+ - "\u05e0\u05e7\u05ef\u05f3\u0609\u060d\u0617\u0620\u0626\u062a\u0634\u0637"+ - "\u063f\u064d\u0652\u0656\u065e\u0660\u0663\u0670\u0677\u067c\u0683\u0686"+ - "\u0689\u068c\u068f\u0692\u0695\u0698\u069b\u069e\u06a1\u06a4\u06a7\u06aa"+ - "\u06ad\u06b0\u06b3\u06b6\u06b9\u06bc\u06be\u06d6\u06e2\u06e4\u06ec\u06f0"+ - "\u0707\u0711\u071e\u0723\u072c\u0732\u0736\u0743\u0746\u074f\u0752\u0758"+ - "\u075f\u0767\u076f\u0775\u077b\u0784\u078e\u0791\u0798\u079e\u07a1\u07aa"+ - "\u07af\u07b2\u07b7\u07ba\u07bd\u07c5\u07ca\u07d1\u07dd\u07e8\u07ea\u07f7"+ - "\u0806\u0809\u0811\u0818\u081b\u081e\u0828\u082f\u0834\u083a\u0843\u0849"+ - "\u084f\u0854\u0860\u0867\u086e\u0874\u0884\u088a\u088d\u0897\u089a\u089d"+ - "\u08a0\u08a3\u08a9\u08b3\u08b9\u08bd\u08c5\u08c8\u08cd\u08d8\u08ea\u08fc"+ - "\u0905\u090c\u0911\u0915\u091a\u091e\u092a\u0932\u0939\u0941\u0948\u0955"+ - "\u0960\u0965\u0970\u0a04\u0a08"; + "\u0005q\u0000\u0000\u09c8\u09f9\u0005u\u0000\u0000\u09c9\u09f9\u0005s"+ + "\u0000\u0000\u09ca\u09f9\u0005t\u0000\u0000\u09cb\u09f9\u0005r\u0000\u0000"+ + "\u09cc\u09f9\u0005v\u0000\u0000\u09cd\u09f9\u0005w\u0000\u0000\u09ce\u09f9"+ + "\u0005x\u0000\u0000\u09cf\u09f9\u0005y\u0000\u0000\u09d0\u09f9\u0005z"+ + "\u0000\u0000\u09d1\u09f9\u0005{\u0000\u0000\u09d2\u09f9\u0005|\u0000\u0000"+ + "\u09d3\u09f9\u0005}\u0000\u0000\u09d4\u09f9\u0005~\u0000\u0000\u09d5\u09f9"+ + "\u0005\u007f\u0000\u0000\u09d6\u09f9\u0005\u0080\u0000\u0000\u09d7\u09f9"+ + "\u0005\u0081\u0000\u0000\u09d8\u09f9\u0005\u0082\u0000\u0000\u09d9\u09f9"+ + "\u0005\u0083\u0000\u0000\u09da\u09f9\u0005\u0084\u0000\u0000\u09db\u09f9"+ + "\u0005\u0085\u0000\u0000\u09dc\u09f9\u0005\u008f\u0000\u0000\u09dd\u09f9"+ + "\u0005\u0090\u0000\u0000\u09de\u09f9\u0005\u0086\u0000\u0000\u09df\u09f9"+ + "\u0005\u0087\u0000\u0000\u09e0\u09f9\u0005\u0088\u0000\u0000\u09e1\u09f9"+ + "\u0005\u0089\u0000\u0000\u09e2\u09f9\u0005\u008a\u0000\u0000\u09e3\u09f9"+ + "\u0005\u008b\u0000\u0000\u09e4\u09f9\u0005\u008c\u0000\u0000\u09e5\u09f9"+ + "\u0005\u008d\u0000\u0000\u09e6\u09f9\u0005\u008e\u0000\u0000\u09e7\u09f9"+ + "\u0005\u0098\u0000\u0000\u09e8\u09f9\u0005\u0099\u0000\u0000\u09e9\u09f9"+ + "\u0005\u009a\u0000\u0000\u09ea\u09f9\u0005\u009b\u0000\u0000\u09eb\u09f9"+ + "\u0005\u009c\u0000\u0000\u09ec\u09f9\u0005\u009d\u0000\u0000\u09ed\u09f9"+ + "\u0005\u009e\u0000\u0000\u09ee\u09f9\u0005\u00a0\u0000\u0000\u09ef\u09f9"+ + "\u0005\u009f\u0000\u0000\u09f0\u09f9\u0005\u00a1\u0000\u0000\u09f1\u09f9"+ + "\u0005\u00a2\u0000\u0000\u09f2\u09f9\u0005\u00a3\u0000\u0000\u09f3\u09f9"+ + "\u0005\u00a4\u0000\u0000\u09f4\u09f9\u0005\u00a5\u0000\u0000\u09f5\u09f9"+ + "\u0005\u00a6\u0000\u0000\u09f6\u09f9\u0005\u00ab\u0000\u0000\u09f7\u09f9"+ + "\u0005\u00d3\u0000\u0000\u09f8\u0963\u0001\u0000\u0000\u0000\u09f8\u0964"+ + "\u0001\u0000\u0000\u0000\u09f8\u0965\u0001\u0000\u0000\u0000\u09f8\u0966"+ + "\u0001\u0000\u0000\u0000\u09f8\u0967\u0001\u0000\u0000\u0000\u09f8\u0968"+ + "\u0001\u0000\u0000\u0000\u09f8\u0969\u0001\u0000\u0000\u0000\u09f8\u096a"+ + "\u0001\u0000\u0000\u0000\u09f8\u096b\u0001\u0000\u0000\u0000\u09f8\u096c"+ + "\u0001\u0000\u0000\u0000\u09f8\u096d\u0001\u0000\u0000\u0000\u09f8\u096e"+ + "\u0001\u0000\u0000\u0000\u09f8\u096f\u0001\u0000\u0000\u0000\u09f8\u0970"+ + "\u0001\u0000\u0000\u0000\u09f8\u0971\u0001\u0000\u0000\u0000\u09f8\u0972"+ + "\u0001\u0000\u0000\u0000\u09f8\u0973\u0001\u0000\u0000\u0000\u09f8\u0974"+ + "\u0001\u0000\u0000\u0000\u09f8\u0975\u0001\u0000\u0000\u0000\u09f8\u0976"+ + "\u0001\u0000\u0000\u0000\u09f8\u0977\u0001\u0000\u0000\u0000\u09f8\u0978"+ + "\u0001\u0000\u0000\u0000\u09f8\u0979\u0001\u0000\u0000\u0000\u09f8\u097a"+ + "\u0001\u0000\u0000\u0000\u09f8\u097b\u0001\u0000\u0000\u0000\u09f8\u097c"+ + "\u0001\u0000\u0000\u0000\u09f8\u097d\u0001\u0000\u0000\u0000\u09f8\u097e"+ + "\u0001\u0000\u0000\u0000\u09f8\u097f\u0001\u0000\u0000\u0000\u09f8\u0980"+ + "\u0001\u0000\u0000\u0000\u09f8\u0981\u0001\u0000\u0000\u0000\u09f8\u0982"+ + "\u0001\u0000\u0000\u0000\u09f8\u0983\u0001\u0000\u0000\u0000\u09f8\u0984"+ + "\u0001\u0000\u0000\u0000\u09f8\u0985\u0001\u0000\u0000\u0000\u09f8\u0986"+ + "\u0001\u0000\u0000\u0000\u09f8\u0987\u0001\u0000\u0000\u0000\u09f8\u0988"+ + "\u0001\u0000\u0000\u0000\u09f8\u0989\u0001\u0000\u0000\u0000\u09f8\u098a"+ + "\u0001\u0000\u0000\u0000\u09f8\u098b\u0001\u0000\u0000\u0000\u09f8\u098c"+ + "\u0001\u0000\u0000\u0000\u09f8\u098d\u0001\u0000\u0000\u0000\u09f8\u098e"+ + "\u0001\u0000\u0000\u0000\u09f8\u098f\u0001\u0000\u0000\u0000\u09f8\u0990"+ + "\u0001\u0000\u0000\u0000\u09f8\u0991\u0001\u0000\u0000\u0000\u09f8\u0992"+ + "\u0001\u0000\u0000\u0000\u09f8\u0993\u0001\u0000\u0000\u0000\u09f8\u0994"+ + "\u0001\u0000\u0000\u0000\u09f8\u0995\u0001\u0000\u0000\u0000\u09f8\u0996"+ + "\u0001\u0000\u0000\u0000\u09f8\u0997\u0001\u0000\u0000\u0000\u09f8\u0998"+ + "\u0001\u0000\u0000\u0000\u09f8\u0999\u0001\u0000\u0000\u0000\u09f8\u099a"+ + "\u0001\u0000\u0000\u0000\u09f8\u099b\u0001\u0000\u0000\u0000\u09f8\u099c"+ + "\u0001\u0000\u0000\u0000\u09f8\u099d\u0001\u0000\u0000\u0000\u09f8\u099e"+ + "\u0001\u0000\u0000\u0000\u09f8\u099f\u0001\u0000\u0000\u0000\u09f8\u09a0"+ + "\u0001\u0000\u0000\u0000\u09f8\u09a1\u0001\u0000\u0000\u0000\u09f8\u09a2"+ + "\u0001\u0000\u0000\u0000\u09f8\u09a3\u0001\u0000\u0000\u0000\u09f8\u09a4"+ + "\u0001\u0000\u0000\u0000\u09f8\u09a5\u0001\u0000\u0000\u0000\u09f8\u09a6"+ + "\u0001\u0000\u0000\u0000\u09f8\u09a7\u0001\u0000\u0000\u0000\u09f8\u09a8"+ + "\u0001\u0000\u0000\u0000\u09f8\u09a9\u0001\u0000\u0000\u0000\u09f8\u09aa"+ + "\u0001\u0000\u0000\u0000\u09f8\u09ab\u0001\u0000\u0000\u0000\u09f8\u09ac"+ + "\u0001\u0000\u0000\u0000\u09f8\u09ad\u0001\u0000\u0000\u0000\u09f8\u09ae"+ + "\u0001\u0000\u0000\u0000\u09f8\u09af\u0001\u0000\u0000\u0000\u09f8\u09b0"+ + "\u0001\u0000\u0000\u0000\u09f8\u09b1\u0001\u0000\u0000\u0000\u09f8\u09b3"+ + "\u0001\u0000\u0000\u0000\u09f8\u09b4\u0001\u0000\u0000\u0000\u09f8\u09b5"+ + "\u0001\u0000\u0000\u0000\u09f8\u09b6\u0001\u0000\u0000\u0000\u09f8\u09b7"+ + "\u0001\u0000\u0000\u0000\u09f8\u09b8\u0001\u0000\u0000\u0000\u09f8\u09b9"+ + "\u0001\u0000\u0000\u0000\u09f8\u09ba\u0001\u0000\u0000\u0000\u09f8\u09bb"+ + "\u0001\u0000\u0000\u0000\u09f8\u09bc\u0001\u0000\u0000\u0000\u09f8\u09bd"+ + "\u0001\u0000\u0000\u0000\u09f8\u09be\u0001\u0000\u0000\u0000\u09f8\u09bf"+ + "\u0001\u0000\u0000\u0000\u09f8\u09c0\u0001\u0000\u0000\u0000\u09f8\u09c1"+ + "\u0001\u0000\u0000\u0000\u09f8\u09c2\u0001\u0000\u0000\u0000\u09f8\u09c3"+ + "\u0001\u0000\u0000\u0000\u09f8\u09c4\u0001\u0000\u0000\u0000\u09f8\u09c5"+ + "\u0001\u0000\u0000\u0000\u09f8\u09c6\u0001\u0000\u0000\u0000\u09f8\u09c7"+ + "\u0001\u0000\u0000\u0000\u09f8\u09c8\u0001\u0000\u0000\u0000\u09f8\u09c9"+ + "\u0001\u0000\u0000\u0000\u09f8\u09ca\u0001\u0000\u0000\u0000\u09f8\u09cb"+ + "\u0001\u0000\u0000\u0000\u09f8\u09cc\u0001\u0000\u0000\u0000\u09f8\u09cd"+ + "\u0001\u0000\u0000\u0000\u09f8\u09ce\u0001\u0000\u0000\u0000\u09f8\u09cf"+ + "\u0001\u0000\u0000\u0000\u09f8\u09d0\u0001\u0000\u0000\u0000\u09f8\u09d1"+ + "\u0001\u0000\u0000\u0000\u09f8\u09d2\u0001\u0000\u0000\u0000\u09f8\u09d3"+ + "\u0001\u0000\u0000\u0000\u09f8\u09d4\u0001\u0000\u0000\u0000\u09f8\u09d5"+ + "\u0001\u0000\u0000\u0000\u09f8\u09d6\u0001\u0000\u0000\u0000\u09f8\u09d7"+ + "\u0001\u0000\u0000\u0000\u09f8\u09d8\u0001\u0000\u0000\u0000\u09f8\u09d9"+ + "\u0001\u0000\u0000\u0000\u09f8\u09da\u0001\u0000\u0000\u0000\u09f8\u09db"+ + "\u0001\u0000\u0000\u0000\u09f8\u09dc\u0001\u0000\u0000\u0000\u09f8\u09dd"+ + "\u0001\u0000\u0000\u0000\u09f8\u09de\u0001\u0000\u0000\u0000\u09f8\u09df"+ + "\u0001\u0000\u0000\u0000\u09f8\u09e0\u0001\u0000\u0000\u0000\u09f8\u09e1"+ + "\u0001\u0000\u0000\u0000\u09f8\u09e2\u0001\u0000\u0000\u0000\u09f8\u09e3"+ + "\u0001\u0000\u0000\u0000\u09f8\u09e4\u0001\u0000\u0000\u0000\u09f8\u09e5"+ + "\u0001\u0000\u0000\u0000\u09f8\u09e6\u0001\u0000\u0000\u0000\u09f8\u09e7"+ + "\u0001\u0000\u0000\u0000\u09f8\u09e8\u0001\u0000\u0000\u0000\u09f8\u09e9"+ + "\u0001\u0000\u0000\u0000\u09f8\u09ea\u0001\u0000\u0000\u0000\u09f8\u09eb"+ + "\u0001\u0000\u0000\u0000\u09f8\u09ec\u0001\u0000\u0000\u0000\u09f8\u09ed"+ + "\u0001\u0000\u0000\u0000\u09f8\u09ee\u0001\u0000\u0000\u0000\u09f8\u09ef"+ + "\u0001\u0000\u0000\u0000\u09f8\u09f0\u0001\u0000\u0000\u0000\u09f8\u09f1"+ + "\u0001\u0000\u0000\u0000\u09f8\u09f2\u0001\u0000\u0000\u0000\u09f8\u09f3"+ + "\u0001\u0000\u0000\u0000\u09f8\u09f4\u0001\u0000\u0000\u0000\u09f8\u09f5"+ + "\u0001\u0000\u0000\u0000\u09f8\u09f6\u0001\u0000\u0000\u0000\u09f8\u09f7"+ + "\u0001\u0000\u0000\u0000\u09f9\u09fd\u0001\u0000\u0000\u0000\u09fa\u09fb"+ + "\u0005\u00d4\u0000\u0000\u09fb\u09fd\u0006\u00e1\uffff\uffff\u0000\u09fc"+ + "\u09f8\u0001\u0000\u0000\u0000\u09fc\u09fa\u0001\u0000\u0000\u0000\u09fd"+ + "\u01c3\u0001\u0000\u0000\u0000\u0109\u01e0\u01e3\u01ef\u01fd\u0200\u0203"+ + "\u0206\u0209\u0211\u0217\u021c\u0220\u0226\u0231\u0238\u0241\u0249\u0251"+ + "\u025a\u025e\u0261\u0265\u026b\u0272\u0278\u0284\u0287\u0292\u0295\u029b"+ + "\u02a6\u02bb\u02be\u02c2\u02ce\u02d2\u02d6\u02df\u02f0\u02fb\u02ff\u0306"+ + "\u0309\u0310\u031b\u031f\u0329\u032e\u0338\u0342\u034d\u035a\u0365\u036a"+ + "\u0375\u0379\u037d\u0382\u0387\u0391\u0399\u03a1\u03a7\u03ac\u03ae\u03b4"+ + "\u03bb\u03c0\u03c6\u03ca\u03ce\u03d4\u03e6\u03ec\u03ee\u03f5\u03fb\u0401"+ + "\u0411\u0418\u0426\u0432\u0435\u0450\u0455\u046c\u0472\u0475\u047d\u0482"+ + "\u048b\u0492\u0495\u0499\u04a0\u04a8\u04ab\u04b0\u04b3\u04ba\u04c0\u04ca"+ + "\u04ce\u04d6\u04da\u04e2\u04e6\u04ee\u04f2\u04fb\u04ff\u0509\u050c\u0513"+ + "\u0517\u051a\u051d\u0522\u0526\u0535\u053d\u0544\u054a\u054d\u0551\u0554"+ + "\u055b\u056c\u0575\u057d\u0580\u0584\u0588\u058a\u0592\u05b3\u05bb\u05c1"+ + "\u05d0\u05d8\u05dc\u05e5\u05ea\u05f1\u05f9\u05fd\u0613\u0617\u061d\u0622"+ + "\u062b\u0631\u0635\u063f\u0642\u064a\u0658\u065d\u0661\u0669\u066b\u066e"+ + "\u067b\u0682\u0687\u0692\u0694\u06a4\u06ae\u06be\u06c0\u06c8\u06cc\u06e3"+ + "\u06ed\u06fc\u0701\u0712\u0718\u071c\u0729\u072c\u0735\u0738\u073e\u0745"+ + "\u074d\u0755\u075b\u0761\u076a\u0774\u0777\u077e\u0784\u0787\u0790\u0795"+ + "\u0798\u079d\u07a0\u07a3\u07ab\u07ae\u07b3\u07b5\u07b8\u07bf\u07cb\u07d8"+ + "\u07da\u07e7\u07f6\u07f9\u0801\u0808\u080b\u080e\u0818\u081f\u0824\u082a"+ + "\u0833\u0839\u083f\u0844\u0850\u0857\u085e\u0864\u0874\u087a\u087d\u0887"+ + "\u088a\u088d\u0890\u0893\u0899\u08a3\u08a9\u08ad\u08b5\u08b8\u08bd\u08c8"+ + "\u08da\u08ec\u08f5\u08fc\u0901\u0905\u090a\u090e\u091a\u0922\u0929\u0931"+ + "\u0938\u0945\u0950\u0955\u0960\u09f8\u09fc"; public static final String _serializedATN = Utils.join( new String[] { _serializedATNSegment0, diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCountIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCountIter.java index 0ce8fceb..9ad0a2bd 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCountIter.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCountIter.java @@ -16,7 +16,6 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.util.Objects; import oracle.kv.impl.api.table.DisplayFormatter; import oracle.kv.impl.api.table.FieldDefImpl; @@ -165,22 +164,4 @@ protected void displayContent( boolean verbose) { displayInputIter(sb, formatter, verbose, theInput); } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj) || !(obj instanceof FuncCountIter)) { - return false; - } - final FuncCountIter other = (FuncCountIter) obj; - return Objects.equals(theInput, other.theInput) && - (theFuncCode == other.theFuncCode); - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), theInput, theFuncCode); - } } diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCreationTimeIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCreationTimeIter.java new file mode 100644 index 00000000..b5712d1b --- /dev/null +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCreationTimeIter.java @@ -0,0 +1,153 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.kv.impl.query.runtime; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.sql.Timestamp; + +import oracle.kv.impl.api.table.DisplayFormatter; +import oracle.kv.impl.api.table.FieldDefImpl; +import oracle.kv.impl.api.table.FieldValueImpl; +import oracle.kv.impl.api.table.NullValueImpl; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TimestampValueImpl; +import oracle.kv.impl.api.table.TupleValue; +import oracle.kv.impl.query.QueryException; +import oracle.kv.impl.query.compiler.Expr; + +/** + * Runtime implementation of creation_time function. + */ +public class FuncCreationTimeIter extends SingleInputPlanIter { + + private final PlanIter theInput; + + public FuncCreationTimeIter(Expr e, int resultReg, PlanIter input) { + super(e, resultReg); + theInput = input; + } + + /** + * FastExternalizable constructor. + */ + FuncCreationTimeIter(DataInput in, short serialVersion) throws + IOException { + super(in, serialVersion); + theInput = deserializeIter(in, serialVersion); + } + + /** + * FastExternalizable writer. Must call superclass method first to + * write common elements. + */ + @Override + public void writeFastExternal(DataOutput out, short serialVersion) + throws IOException { + + super.writeFastExternal(out, serialVersion); + } + + @Override + protected PlanIter getInput() { + return theInput; + } + + @Override + public PlanIterKind getKind() { + return PlanIterKind.FUNC_CREATION_TIME; + } + + @Override + public void open(RuntimeControlBlock rcb) { + rcb.setState(theStatePos, new PlanIterState()); + theInput.open(rcb); + } + + @Override + public boolean next(RuntimeControlBlock rcb) { + + PlanIterState state = rcb.getState(theStatePos); + + if (state.isDone()) { + return false; + } + + boolean more = theInput.next(rcb); + + if (!more) { + state.done(); + return false; + } + + FieldValueImpl row = rcb.getRegVal(theInput.getResultReg()); + long creationTime; + + /* row may be null in case it is a row of a non-target table in a + * join query*/ + if (row == NullValueImpl.getInstance()) { + rcb.setRegVal(theResultReg, row); + state.done(); + return true; + } + + if (row.isTuple()) { + creationTime = ((TupleValue)row).getCreationTime(); + } else if (row.isRecord()) { + creationTime = ((RowImpl)row).getCreationTime(); + } else if (row.isJsonRowMap()) { + creationTime = row.asJsonRowMap().getCreationTime(); + } else { + throw new QueryException( + "Input to the creation_time() function is not a row", + getLocation()); + } + + TimestampValueImpl tsv = FieldDefImpl.Constants.timestampDefs[3]. + createTimestamp(new Timestamp(creationTime)); + + rcb.setRegVal(theResultReg, tsv); + state.done(); + return true; + } + + @Override + public void reset(RuntimeControlBlock rcb) { + + theInput.reset(rcb); + PlanIterState state = rcb.getState(theStatePos); + state.reset(this); + } + + @Override + public void close(RuntimeControlBlock rcb) { + + PlanIterState state = rcb.getState(theStatePos); + if (state == null) { + return; + } + + theInput.close(rcb); + state.close(); + } + + @Override + protected void displayContent( + StringBuilder sb, + DisplayFormatter formatter, + boolean verbose) { + displayInputIter(sb, formatter, verbose, theInput); + } +} diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCreationTimeMillisIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCreationTimeMillisIter.java new file mode 100644 index 00000000..5922c019 --- /dev/null +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncCreationTimeMillisIter.java @@ -0,0 +1,144 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.kv.impl.query.runtime; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import oracle.kv.impl.api.table.DisplayFormatter; +import oracle.kv.impl.api.table.FieldDefImpl; +import oracle.kv.impl.api.table.FieldValueImpl; +import oracle.kv.impl.api.table.NullValueImpl; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TupleValue; +import oracle.kv.impl.query.QueryException; +import oracle.kv.impl.query.compiler.Expr; + +/** + * Runtime implementation of creation_time_millis function. + */ +public class FuncCreationTimeMillisIter extends SingleInputPlanIter { + + private final PlanIter theInput; + + public FuncCreationTimeMillisIter(Expr e, int resultReg, PlanIter input) { + super(e, resultReg); + theInput = input; + } + + /** + * FastExternalizable constructor. + */ + FuncCreationTimeMillisIter(DataInput in, short serialVersion) + throws IOException { + super(in, serialVersion); + theInput = deserializeIter(in, serialVersion); + } + + /** + * FastExternalizable writer. Must call superclass method first to + * write common elements. + */ + @Override + public void writeFastExternal(DataOutput out, short serialVersion) + throws IOException { + + super.writeFastExternal(out, serialVersion); + } + + @Override + protected PlanIter getInput() { + return theInput; + } + + @Override + public PlanIterKind getKind() { + return PlanIterKind.FUNC_CREATION_TIME_MILLIS; + } + + @Override + public void open(RuntimeControlBlock rcb) { + rcb.setState(theStatePos, new PlanIterState()); + theInput.open(rcb); + } + + @Override + public boolean next(RuntimeControlBlock rcb) { + + PlanIterState state = rcb.getState(theStatePos); + + if (state.isDone()) { + return false; + } + + boolean more = theInput.next(rcb); + assert(more); + + FieldValueImpl row = rcb.getRegVal(theInput.getResultReg()); + long creationTime; + + if (row == NullValueImpl.getInstance()) { + rcb.setRegVal(theResultReg, row); + state.done(); + return true; + } + + if (row.isTuple()) { + creationTime = ((TupleValue)row).getCreationTime(); + } else if (row.isRecord()) { + creationTime = ((RowImpl)row).getCreationTime(); + } else if (row.isJsonRowMap()) { + creationTime = row.asJsonRowMap().getCreationTime(); + } else { + throw new QueryException( + "Input to the creation_time_millis() function is not a row", + getLocation()); + } + + rcb.setRegVal(theResultReg, + FieldDefImpl.Constants.longDef.createLong(creationTime)); + state.done(); + return true; + } + + @Override + public void reset(RuntimeControlBlock rcb) { + + theInput.reset(rcb); + PlanIterState state = rcb.getState(theStatePos); + state.reset(this); + } + + @Override + public void close(RuntimeControlBlock rcb) { + + PlanIterState state = rcb.getState(theStatePos); + if (state == null) { + return; + } + + theInput.close(rcb); + state.close(); + } + + @Override + protected void displayContent( + StringBuilder sb, + DisplayFormatter formatter, + boolean verbose) { + displayInputIter(sb, formatter, verbose, theInput); + } +} + diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncRowMetadataIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncRowMetadataIter.java new file mode 100644 index 00000000..fcc4b828 --- /dev/null +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/FuncRowMetadataIter.java @@ -0,0 +1,159 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.kv.impl.query.runtime; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import oracle.kv.impl.api.table.DisplayFormatter; +import oracle.kv.impl.api.table.FieldDefImpl; +import oracle.kv.impl.api.table.FieldValueImpl; +import oracle.kv.impl.api.table.NullValueImpl; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TupleValue; +import oracle.kv.impl.query.QueryException; +import oracle.kv.impl.query.compiler.Expr; +import oracle.kv.table.FieldValue; +import oracle.kv.table.FieldValueFactory; + +/** + * Iterator for row_metadata() function + */ +public class FuncRowMetadataIter extends SingleInputPlanIter { + + private final PlanIter theInput; + + public FuncRowMetadataIter(Expr e, int resultReg, PlanIter input) { + super(e, resultReg); + theInput = input; + } + + /** + * FastExternalizable constructor. + */ + FuncRowMetadataIter(DataInput in, short serialVersion) throws IOException { + super(in, serialVersion); + theInput = deserializeIter(in, serialVersion); + } + + /** + * FastExternalizable writer. Must call superclass method first to + * write common elements. + */ + @Override + public void writeFastExternal(DataOutput out, short serialVersion) + throws IOException { + + super.writeFastExternal(out, serialVersion); + } + + @Override + protected PlanIter getInput() { + return theInput; + } + + @Override + public PlanIterKind getKind() { + return PlanIterKind.FUNC_ROW_METADATA; + } + + @Override + public void open(RuntimeControlBlock rcb) { + rcb.setState(theStatePos, new PlanIterState()); + theInput.open(rcb); + } + + @SuppressWarnings("static-access") + @Override + public boolean next(RuntimeControlBlock rcb) { + + PlanIterState state = rcb.getState(theStatePos); + + if (state.isDone()) { + return false; + } + + boolean more = theInput.next(rcb); + + if (!more) { + state.done(); + return false; + } + + FieldValueImpl row = rcb.getRegVal(theInput.getResultReg()); + + /* row may be null in case it is a row of a non-target table in a + * join query*/ + if (row == NullValueImpl.getInstance()) { + rcb.setRegVal(theResultReg, row); + state.done(); + return true; + } + + String rowMetadata = null; + + if (row.isTuple()) { + rowMetadata = ((TupleValue)row).getRowMetadata(); + } else if (row.isRecord()) { + rowMetadata = ((RowImpl)row).getRowMetadata(); + } else if (row.isJsonRowMap()) { + rowMetadata = row.asJsonRowMap().getJColl().getRowMetadata(); + } else { + throw new QueryException( + "Input to the row_metadata() function is not a row", + getLocation()); + } + + FieldValue fv; + if (rowMetadata == null) { + fv = FieldValueFactory.createJsonNull(); + } else { + fv = FieldDefImpl.Constants.jsonDef.createValueFromString( + rowMetadata, FieldDefImpl.Constants.jsonDef); + } + + rcb.setRegVal(theResultReg, (FieldValueImpl) fv); + state.done(); + return true; + } + + @Override + public void reset(RuntimeControlBlock rcb) { + + theInput.reset(rcb); + PlanIterState state = rcb.getState(theStatePos); + state.reset(this); + } + + @Override + public void close(RuntimeControlBlock rcb) { + + PlanIterState state = rcb.getState(theStatePos); + if (state == null) { + return; + } + + theInput.close(rcb); + state.close(); + } + + @Override + protected void displayContent( + StringBuilder sb, + DisplayFormatter formatter, + boolean verbose) { + displayInputIter(sb, formatter, verbose, theInput); + } +} \ No newline at end of file diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/InsertRowIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/InsertRowIter.java index d436e8c8..97a0d8d5 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/InsertRowIter.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/InsertRowIter.java @@ -399,6 +399,8 @@ public boolean nextInternal(RuntimeControlBlock rcb, boolean local) { table.setUUIDDefaultValue(row); } + row.setRowMetadata(rcb.getRowMetadata()); + if (rcb.getTraceLevel() >= 1) { rcb.trace("Row to insert =\n" + row); } @@ -556,6 +558,7 @@ private boolean processResult( initRowFromValueBytes(row, res.getPreviousValue().toByteArray(), res.getPreviousExpirationTime(), + res.getPreviousCreationTime(), res.getPreviousModificationTime(), res.getPreviousVersion(), state.thePid.getPartitionId(), @@ -570,6 +573,7 @@ private boolean processResult( } else { row.setVersion(res.getNewVersion()); row.setExpirationTime(res.getNewExpirationTime()); + row.setCreationTime(res.getNewCreationTime()); row.setModificationTime(res.getNewModificationTime()); row.setPartition(state.thePid.getPartitionId()); row.setShard(res.getShard()); diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/NestedLoopJoinIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/NestedLoopJoinIter.java index 4f757562..3f11bd9c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/NestedLoopJoinIter.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/NestedLoopJoinIter.java @@ -141,11 +141,14 @@ public void reset(RuntimeControlBlock rcb) { NestedLoopJoinState state = (NestedLoopJoinState)rcb.getState(theStatePos); state.reset(this); + ResumeInfo ri = rcb.getResumeInfo(); for (int i = 0; i < theBranches.length; ++i) { - theBranches[i].reset(rcb); + ri.ensureTableRI(i); } - ResumeInfo ri = rcb.getResumeInfo(); + for (int i = 0; i < theBranches.length; ++i) { + theBranches[i].reset(rcb); + } for (int i = 0; i < theBranches.length - 1; ++i) { if (ri.getPrimResumeKey(i+1) != null) { @@ -201,7 +204,7 @@ public boolean next(RuntimeControlBlock rcb) { rcb.trace("Value for join branch " + branch + " = " + branchRes); } - + for (JoinPred pred : theJoinPreds) { if (pred.theOuterBranch != branch) { continue; diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/PartitionUnionIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/PartitionUnionIter.java index 3cddc053..39cd5040 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/PartitionUnionIter.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/PartitionUnionIter.java @@ -535,12 +535,31 @@ public boolean sortingNext( } } + /* + * Stop fetching records when one of the following conditions is + * met: + * - Reached the size or batch limit + * - For inner join queries, all local partitions have been + * processed. This is because the theInputIter.reset(rcb) + * before fetch from next partition will clear the resume + * keys of inner tables. + * + * If all local partitions are done, create an empty ResumeInfo + * object to signal the end of sort phase 1. + * The proxy will then use the partitions bitmap in ResumeInfo to + * locate the next partition to process. + */ + boolean allPartitionsDone = + (numDonePartitions == state.theRepPids.length); if (rcb.getReachedLimit() || (batchSize > 0 && state.theNumResults >= batchSize && - !rcb.cannotSuspend())) { + !rcb.cannotSuspend()) || + (allPartitionsDone && + ri.numTables() > 1 && + !rcb.cannotSuspend())) { - if (numDonePartitions == state.theRepPids.length) { + if (allPartitionsDone) { ri = new ResumeInfo(rcb); rcb.setResumeInfo(ri); ri.setPartitionsBitmap(partitionsBitmap); diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/PlanIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/PlanIter.java index afbca9ec..b8bf5466 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/PlanIter.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/PlanIter.java @@ -268,7 +268,12 @@ public static enum PlanIterKind { FUNC_LAST_DAY_OF_MONTH(84), FUNC_TIMESTAMP_BUCKET(85), - NESTED_LOOP_JOIN(86); + NESTED_LOOP_JOIN(86), + FUNC_ROW_METADATA(87), + + FUNC_CREATION_TIME(88), + FUNC_CREATION_TIME_MILLIS(89); + private static final PlanIterKind[] VALUES = values(); @@ -938,6 +943,12 @@ public static PlanIter deserializeIter(DataInput in, short serialVersion) case FUNC_EXPIRATION_TIME_MILLIS: iter = new FuncExpirationTimeMillisIter(in, serialVersion); break; + case FUNC_CREATION_TIME: + iter = new FuncCreationTimeIter(in, serialVersion); + break; + case FUNC_CREATION_TIME_MILLIS: + iter = new FuncCreationTimeMillisIter(in, serialVersion); + break; case FUNC_CURRENT_TIME_MILLIS: iter = new FuncCurrentTimeMillisIter(in, serialVersion); break; @@ -1082,6 +1093,9 @@ public static PlanIter deserializeIter(DataInput in, short serialVersion) case NESTED_LOOP_JOIN: iter = new NestedLoopJoinIter(in, serialVersion); break; + case FUNC_ROW_METADATA: + iter = new FuncRowMetadataIter(in, serialVersion); + break; default: throw new IllegalArgumentException( "Unknown query iterator kind: " + kind + diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/ReceiveIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/ReceiveIter.java index a98931d0..cccf1af1 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/ReceiveIter.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/ReceiveIter.java @@ -1017,7 +1017,8 @@ protected Request makeReadRequest() { rcb.getRegionId(), rcb.doTombstone(), rcb.getMaxServerMemoryConsumption(), - theIsUpdate); + theIsUpdate, + rcb.getRowMetadata()); if (theIsUpdate) { final Request req = @@ -1282,7 +1283,8 @@ protected Request makeReadRequest() { rcb.getRegionId(), rcb.doTombstone(), rcb.getMaxServerMemoryConsumption(), - theIsUpdate); + theIsUpdate, + rcb.getRowMetadata()); final ExecuteOptions exeOptions = rcb.getExecuteOptions(); if (theIsUpdate) { @@ -2834,13 +2836,21 @@ private class SequentialShardsIterator Set shardIds = rcb.getShardSet(); - if (shardIds != null) { + /* A simple query run in parallel may have a shardId set */ + final boolean isSimpleQuery = + rcb.getExecuteOptions().getIsSimpleQuery(); + + if (shardIds != null && !isSimpleQuery) { /* For a sorting query, the id of the target shard is sent * by the driver, and only this shard will be scanned during * the current batch). If this is the 1st batch of a virtual * scan, the driver also sends the spec of this scan, and we * have to initialize the resume info accordingly. */ - assert(shardIds.size() == 1); + if (shardIds.size() > 1) { + throw new QueryException( + "Use of multiple shards in a single query requires " + + "a simple query, as set in ExecuteOptions"); + } theShard = shardIds.iterator().next(); sid = theShard; VirtualScan vs = rcb.getDriverVirtualScan(); @@ -2867,7 +2877,14 @@ private class SequentialShardsIterator * (both base and virtual shards). So, we have to build this * array here to find the target shard and initialize the * shard scan. */ - theShards = baseTopo.getSortedRepGroupIds(); + /* if shardIds was passed in, use that set. This can happen + * on a parallel query or use of an input split */ + if (shardIds != null) { + theShards = new ArrayList(shardIds.size()); + theShards.addAll(shardIds); + } else { + theShards = baseTopo.getSortedRepGroupIds(); + } int baseVSID = 1 + theShards.get(theShards.size()-1).getGroupId(); int numVirtualScans = ri.numVirtualScans(); if (numVirtualScans > 0) { @@ -3270,7 +3287,8 @@ Request createRequest() { theRCB.getRegionId(), theRCB.doTombstone(), theRCB.getMaxServerMemoryConsumption(), - theIsUpdate); + theIsUpdate, + theRCB.getRowMetadata()); final Consistency consistency = theRCB.getConsistency(); final Durability durability = theRCB.getDurability(); diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/RuntimeControlBlock.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/RuntimeControlBlock.java index d2368243..37acf786 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/RuntimeControlBlock.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/RuntimeControlBlock.java @@ -543,6 +543,12 @@ public int getUpdateLimit() { theExecuteOptions.getUpdateLimit()); } + public String getRowMetadata() { + return (isServerRCB() ? + theQueryOp.getRowMetadata() : + theExecuteOptions.getRowMetadata()); + } + public String getQueryName() { return (isServerRCB() ? theQueryOp.getQueryName() : diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerDeleteRowIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerDeleteRowIter.java index 822d88d0..790ddee7 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerDeleteRowIter.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerDeleteRowIter.java @@ -198,7 +198,7 @@ private void deleteRow(RuntimeControlBlock rcb) { } Delete op = new Delete(key.getKeyBytes(), Choice.NONE, theTable.getId(), - rcb.doTombstone()); + rcb.doTombstone(), rcb.getRowMetadata()); /* * Configure the resource tracker of Delete op with the * tracker of TableQuery. diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerTableIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerTableIter.java index eb5858b5..2e92e3d8 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerTableIter.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerTableIter.java @@ -1757,6 +1757,7 @@ public boolean simpleNext(RuntimeControlBlock rcb, PlanIterState state) { srow.addPrimKeyAndPropertyFields( theScanner.expirationTime(), + theScanner.creationTime(), theScanner.modificationTime(), theScanner.partitionId(), (theIndex == null ? @@ -1773,6 +1774,7 @@ public boolean simpleNext(RuntimeControlBlock rcb, PlanIterState state) { TupleValue tv = (TupleValue)rcb.getRegVal(resultReg); tv.setExpirationTime(theScanner.expirationTime()); tv.setModificationTime(theScanner.modificationTime()); + tv.setCreationTime(theScanner.creationTime()); tv.setPartition(theScanner.partitionId()); tv.setIndexStorageSize(theScanner.indexStorageSize()); @@ -1822,6 +1824,7 @@ public boolean simpleNext(RuntimeControlBlock rcb, PlanIterState state) { srow.addPrimKeyAndPropertyFields( theScanner.expirationTime(), + theScanner.creationTime(), theScanner.modificationTime(), theScanner.partitionId(), theScanner.rowStorageSize(), @@ -1835,6 +1838,7 @@ public boolean simpleNext(RuntimeControlBlock rcb, PlanIterState state) { TupleValue tv = (TupleValue)rcb.getRegVal(resultReg); tv.setExpirationTime(theScanner.expirationTime()); + tv.setCreationTime(theScanner.creationTime()); tv.setModificationTime(theScanner.modificationTime()); tv.setPartition(theScanner.partitionId()); tv.setIndexStorageSize(theScanner.indexStorageSize()); @@ -1895,10 +1899,12 @@ public boolean simpleNext(RuntimeControlBlock rcb, PlanIterState state) { rcb.getRegVal(theResultReg); tv.setExpirationTime(tableRow.getExpirationTime()); tv.setModificationTime(tableRow.getLastModificationTime()); + tv.setCreationTime(tableRow.getCreationTime()); tv.setPartition(tableRow.getPartition()); tv.setStorageSize(tableRow.getStorageSize()); tv.setIndexStorageSize(theScanner.indexStorageSize()); tv.setVersion(tableRow.getVersion()); + tv.setRowMetadata(tableRow.getRowMetadata()); } else { rcb.setRegVal(theResultReg, tableRow); } diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerUpdateRowIter.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerUpdateRowIter.java index 80b1172a..0e657a42 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerUpdateRowIter.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/ServerUpdateRowIter.java @@ -174,6 +174,7 @@ public boolean next(RuntimeControlBlock rcb) { } row = (RowImpl)inVal; + row.setRowMetadata(rcb.getRowMetadata()); put = createPut(row, rcb); diff --git a/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/TableScannerFactory.java b/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/TableScannerFactory.java index c1b8f768..b1f26112 100644 --- a/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/TableScannerFactory.java +++ b/kvmain/src/main/java/oracle/kv/impl/query/runtime/server/TableScannerFactory.java @@ -185,6 +185,13 @@ public boolean next(TableImpl forTable) public long modificationTime(); + /** + * Return the creation time of the current row. The result is valid + * only if this method is called after currentTableRow() has been + * called. + */ + public long creationTime(); + public int partitionId(); public int rowStorageSize(); @@ -437,6 +444,11 @@ public long modificationTime() { return theGetResult.getModificationTime(); } + @Override + public long creationTime() { + return theGetResult.getCreationTime(); + } + @Override public int partitionId() { if (thePid.getPartitionId() >= 0) { @@ -520,6 +532,7 @@ public RowImpl getTableRow() throws SizeLimitException { return theTable.initRowFromValueBytes(theTableRow, data, expirationTime(), + creationTime(), modificationTime(), rowVersion(), partitionId(), @@ -531,6 +544,7 @@ public RowImpl getTableRow() throws SizeLimitException { if (!theTable.initRowFromKeyValueBytes(thePrimKey, data, expirationTime(), + creationTime(), modificationTime(), rowVersion(), partitionId(), @@ -914,6 +928,11 @@ public long modificationTime() { return theScanner.getModificationTime(); } + @Override + public long creationTime() { + return theScanner.getCreationTime(); + } + @Override public int partitionId() { return thePid.getPartitionId(); @@ -998,6 +1017,7 @@ public RowImpl getTableRow() throws SizeLimitException { return theTable.initRowFromValueBytes(theTableRow, theDataEntry.getData(), expirationTime(), + creationTime(), modificationTime(), rowVersion(), partitionId(), @@ -1426,6 +1446,11 @@ public long modificationTime() { return theScanner.getModificationTime(); } + @Override + public long creationTime() { + return theScanner.getCreationTime(); + } + @Override public int partitionId() { return theHandlersManager.getRepNode(). @@ -1541,6 +1566,7 @@ public RowImpl getTableRow() throws SizeLimitException { if (!theTable.initRowFromKeyValueBytes(theBinaryPrimKey, data, expirationTime(), + creationTime(), modificationTime(), rowVersion(), partitionId(), diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/RepNode.java b/kvmain/src/main/java/oracle/kv/impl/rep/RepNode.java index 901a9736..0d8aa192 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/RepNode.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/RepNode.java @@ -69,6 +69,8 @@ import oracle.kv.impl.security.metadata.SecurityMetadataInfo; import oracle.kv.impl.security.util.KerberosPrincipals; import oracle.kv.impl.security.util.SNKrbInstance; +import oracle.kv.impl.test.TestHook; +import oracle.kv.impl.test.TestHookExecute; import oracle.kv.impl.test.TestStatus; import oracle.kv.impl.tif.TextIndexFeederManager; import oracle.kv.impl.tif.TextIndexFeederTopoTracker; @@ -135,6 +137,13 @@ public class RepNode implements TopologyManager.PostUpdateListener, TopologyManager.PreUpdateListener { + /** + * Test hook executed before sending NOP to a node in a specified rep group. + * A use case is to decide which node to send the NOP to in a test avoiding + * sending the NOP to a random node. + */ + public static volatile TestHook SEND_RG_NOP_HOOK = null; + /* Number of retries for DB operations. */ private static final int NUM_DB_OP_RETRIES = 100; @@ -448,15 +457,7 @@ void updateDbHandles(ReplicatedEnvironment repEnv) { public void preUpdate(Topology newTopology) { /* Don't wait, we just care about the state: master or not */ final ReplicatedEnvironment env = envManager.getEnv(0); - try { - if ((env == null) || !env.getState().isMaster()) { - return; - } - } catch (EnvironmentFailureException e) { - /* It's in the process of being re-established. */ - return; - } catch (IllegalStateException iae) { - /* A closed environment. */ + if (!isMaster(env)) { return; } @@ -465,12 +466,45 @@ public void preUpdate(Topology newTopology) { checkPartitionChanges(new RepGroupId(repNodeId.getGroupId()), newTopology); } catch (IllegalStateException ise) { + if (!isAuthoritativeMaster(env)) { + getLogger().log(Level.INFO, + () -> String.format("Non-authoratitive master " + + "failed partition change check: %s", ise)); + throw new OperationFaultException("not authoritative master"); + } /* The Topology checks failed, force a shutdown. */ getExceptionHandler(). uncaughtException(Thread.currentThread(), ise); } } + private boolean isMaster(ReplicatedEnvironment env) { + try { + if ((env == null) || !env.getState().isMaster()) { + return false; + } + } catch (EnvironmentFailureException e) { + /* It's in the process of being re-established. */ + return false; + } catch (IllegalStateException iae) { + /* A closed environment. */ + return false; + } + return true; + } + + private boolean isAuthoritativeMaster(ReplicatedEnvironment env) { + try { + return getIsAuthoritativeMaster(env); + } catch (EnvironmentFailureException e) { + /* It's in the process of being re-established. */ + return false; + } catch (IllegalStateException iae) { + /* A closed environment. */ + return false; + } + } + /** * Implements the Listener method for topology changes. It's invoked by * the TopologyManager and should not be invoked directly. @@ -667,6 +701,8 @@ public TextIndexFeederManager getTextIndexFeederManager() { * @return true if the operation was successful */ public boolean sendNOP(RepGroupId groupId) { + assert TestHookExecute.doHookIfSet(SEND_RG_NOP_HOOK, this); + final RepGroupState rgs = requestDispatcher.getRepGroupStateTable(). getGroupState(groupId); final LoginManager lm = @@ -733,7 +769,6 @@ boolean sendNOP(RepNodeId rnId) { ex); } return false; - } /** @@ -744,8 +779,8 @@ boolean sendNOP(RepNodeId rnId) { * @return the rep node state */ public RepNodeState getMaster(RepGroupId groupId) { - return requestDispatcher.getRepGroupStateTable(). - getGroupState(groupId).getMaster(); + return requestDispatcher.getRepGroupStateTable().getGroupState(groupId) + .getMaster(); } /** @@ -753,7 +788,7 @@ public RepNodeState getMaster(RepGroupId groupId) { */ public RepGroupState getRepGroupState(RepGroupId groupId) { return requestDispatcher.getRepGroupStateTable().getGroupState(groupId); - } + } /** * Invoked when the replicated environment has been invalidated due to an @@ -823,7 +858,14 @@ public RepImpl getEnvImpl(long timeoutMs) { * authoritative master, without waiting for the environment. */ public boolean getIsAuthoritativeMaster() { - final RepImpl repImpl = getEnvImpl(0); + return getIsAuthoritativeMaster(getEnv(0)); + } + + public boolean getIsAuthoritativeMaster(ReplicatedEnvironment env) { + if (env == null) { + return false; + } + final RepImpl repImpl = RepInternal.getRepImpl(env); if (repImpl == null) { return false; } diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/RepNodeSecurity.java b/kvmain/src/main/java/oracle/kv/impl/rep/RepNodeSecurity.java index 98eb2da0..896bae8d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/RepNodeSecurity.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/RepNodeSecurity.java @@ -452,9 +452,13 @@ public Subject verifyUser(Subject subj) { final KVStoreUser user = secMd.getUser(userPrinc.getName()); - if (user == null || !user.isEnabled()) { - logger.info( - "User " + userPrinc.getName() + " is not valid"); + if (user == null) { + logger.info("User=" + userPrinc.getName() + " does not exist" ); + return null; + } + + if (!user.isEnabled()) { + logger.info("User=" + userPrinc.getName() + " not enabled"); return null; } @@ -495,7 +499,11 @@ public KVStoreUser loadUserFromStore(String userName) { @Override public void logMessage(Level level, String msg) { - logger.log(level, msg); + logger.log(level, lm(msg)); + } + + private String lm(String msg) { + return "[RepNodePasswordAuthenticator] " + msg; } } diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/admin/RepNodeAdminImpl.java b/kvmain/src/main/java/oracle/kv/impl/rep/admin/RepNodeAdminImpl.java index 3a3a3f30..14a2c20e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/admin/RepNodeAdminImpl.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/admin/RepNodeAdminImpl.java @@ -63,6 +63,8 @@ import oracle.kv.impl.security.annotations.SecureInternalMethod; import oracle.kv.impl.security.util.KerberosPrincipals; import oracle.kv.impl.test.RemoteTestInterface; +import oracle.kv.impl.test.TestHook; +import oracle.kv.impl.test.TestHookExecute; import oracle.kv.impl.topo.PartitionId; import oracle.kv.impl.topo.RepGroupId; import oracle.kv.impl.topo.RepNodeId; @@ -100,6 +102,11 @@ public class RepNodeAdminImpl extends VersionedRemoteImpl implements RepNodeAdmin { + /** + * Test hook executed before serving ping. + */ + public static volatile TestHook PING_HOOK = null; + /** * The repNode being administered */ @@ -466,6 +473,9 @@ public RepNodeStatus ping(AuthContext authCtx, short serialVersion) { @Override public RepNodeStatus execute() { + + assert TestHookExecute.doHookIfSet(PING_HOOK, repNodeService); + ServiceStatus status = repNodeService.getStatusTracker().getServiceStatus(); State state = State.DETACHED; diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationManager.java b/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationManager.java index 03e187da..d3d2ee5e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationManager.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationManager.java @@ -97,6 +97,11 @@ */ public class MigrationManager implements Localizer { + /** + * Overrides the min delay between migration start for testing. + */ + public static volatile long MINIMUM_DELAY_OVERRIDE = -1; + private final Logger logger; private static final int NUM_DB_OP_RETRIES = 100; @@ -2114,8 +2119,8 @@ synchronized void submitNew(MigrationTarget target) { * is available (delay == 0). */ if (target.getSource().equals(lastSource)) { - delay = MINIMUM_DELAY + adjustment; - adjustment += MINIMUM_DELAY; + delay = getMinimumDelay() + adjustment; + adjustment += getMinimumDelay(); } else { lastSource = target.getSource(); adjustment = 0; @@ -2180,8 +2185,8 @@ protected void afterExecute(Runnable r, Throwable t) { * because the configuration parameters may have been set with * a time < the minimum. */ - if (delay < MINIMUM_DELAY) { - delay = MINIMUM_DELAY; + if (delay < getMinimumDelay()) { + delay = getMinimumDelay(); } /* @@ -2418,4 +2423,40 @@ public void commit(Transaction t) { pgt.refreshTableFromDB(); } } + + /** + * Returns the target executor. Creates the executor if necessary. Currently + * only used for testing. + */ + public synchronized TargetExecutor getTargetExecutor() { + if ((targetExecutor == null) || targetExecutor.isTerminated()) { + targetExecutor = new TargetExecutor(); + } + return targetExecutor; + } + + /** + * Returns the target monitor executor. Creates the executor if necessary. + * Currently only used for testing. + */ + public synchronized TargetMonitorExecutor getTargetMonitorExecutor() { + if (targetMonitorExecutor == null) { + targetMonitorExecutor = + new TargetMonitorExecutor(this, repNode, logger); + } + return targetMonitorExecutor; + } + + /** + * Returns the mininum delay based on MINIMUM_DELAY_OVERRIDE. If not + * overriden, then MINIMUM_DELAY will be used. Currently only used for + * testing. + */ + public long getMinimumDelay() { + final long override = MINIMUM_DELAY_OVERRIDE; + if (override != -1) { + return override; + } + return MINIMUM_DELAY; + } } diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationSource.java b/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationSource.java index af1da9ec..6ce73de8 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationSource.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationSource.java @@ -146,6 +146,18 @@ public class MigrationSource implements Runnable { private final static long WAIT_PENDING_OPS_TIMEOUT_MS = 10 * 60 * 1000; + /** + * Test hook that throws an IOException before sending the EOD, + * setting the target migration state to ERROR. + */ + public static TestHook eodSendFailureHook = null; + + /** + * Test hook to inhibit calling manager.monitorTarget() to cancel + * initialising the TargetMonitorExecutor in persistTransferComplete. + */ + public static TestHook noMonitorTargetHook = null; + /** * Test hook that can wait to simulate waiting for pending operations * with no-op stream migration handler to complete. @@ -492,6 +504,7 @@ public void run() { * is updated in transferComplete(). KVSTORE-1244 */ lastKey = null; + sendLastRecordMarker(); } /* @@ -537,6 +550,17 @@ public void run() { } } + private void sendLastRecordMarker() { + try { + writeOp(OP.LAST_RECORD_MARKER); + logger.log(Level.INFO, + "Sent last record marker for {0}", partitionId); + stream.flush(); + } catch (IOException ioe) { + error(ioe); + } + } + /** * Closes the channel, logging any resulting exceptions. */ @@ -658,6 +682,10 @@ private void transferComplete() { endVLSNSeq = pgt.getLastVLSN(); genNum = pg.getGenNum().getNumber(); } + + /* run test hook in unit test only */ + assert TestHookExecute.doHookIfSet(eodSendFailureHook, partitionId); + sendEOD(endVLSNSeq, genNum); if (eod /* eod successfully sent */ && @@ -713,12 +741,14 @@ private synchronized boolean sendCopy(Cursor cursor, assert !result.isTombstone(); sendCopy(key, value, getVLSNFromCursor(cursor, false), + 0L /*creationTime*/, 0L /*modificationTime*/, result.getExpirationTime(), false /*isTombstone*/); } else { sendCopy(key, value, 0L /*vlsn*/, + result.getCreationTime(), result.getModificationTime(), result.getExpirationTime(), result.isTombstone()); @@ -731,6 +761,7 @@ private synchronized boolean sendCopy(Cursor cursor, private void sendCopy(DatabaseEntry key, DatabaseEntry value, long vlsn, + long creationTime, long modificationTime, long expirationTime, boolean isTombstone) { @@ -739,6 +770,7 @@ private void sendCopy(DatabaseEntry key, writeOp(OP.COPY); writeDbEntry(key); writeDbEntry(value); + writeCreationTime(creationTime); writeModificationTime(modificationTime); writeExpirationTime(expirationTime); writeTombstoneFlag(isTombstone); @@ -798,6 +830,7 @@ synchronized boolean sendPut(long txnId, DatabaseEntry key, DatabaseEntry value, long vlsn, + long creationTime, long modificationTime, long expirationTime, boolean isTombstone) { @@ -813,6 +846,7 @@ synchronized boolean sendPut(long txnId, writeOp(OP.PUT, txnId); writeDbEntry(key); writeDbEntry(value); + writeCreationTime(creationTime); writeModificationTime(modificationTime); writeExpirationTime(expirationTime); writeTombstoneFlag(isTombstone); @@ -1061,6 +1095,14 @@ private void writeExpirationTime(long expTime) throws IOException { stream.writeLong(expTime); } + private void writeCreationTime(long creationTime) throws IOException { + assert Thread.holdsLock(this); + /* creation time only needed for migration */ + if (!transferOnly) { + stream.writeLong(creationTime); + } + } + private void writeModificationTime(long modificationTime) throws IOException { assert Thread.holdsLock(this); /* modification time only need for migration */ @@ -1071,7 +1113,7 @@ private void writeModificationTime(long modificationTime) throws IOException { private void writeTombstoneFlag(boolean isTombstone) throws IOException { assert Thread.holdsLock(this); - /* Only write the tombstone flag duing migration */ + /* Only write the tombstone flag during migration */ if (!transferOnly) { stream.writeBoolean(isTombstone); } @@ -1136,6 +1178,12 @@ private boolean persistTransferComplete() { */ manager.criticalUpdate(); + /* run test hook in unit test only */ + assert TestHookExecute.doHookIfSet(noMonitorTargetHook, partitionId); + if (noMonitorTargetHook != null) { + return true; + } + /* * Now that the record has been persisted and the local topology * updated, monitor the target for failure so that this may be undone. diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationStreamHandle.java b/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationStreamHandle.java index 36d6f549..f2f1afe6 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationStreamHandle.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationStreamHandle.java @@ -106,24 +106,25 @@ private MigrationStreamHandle() { /** * Inserts a PUT record into migration stream if partition migration - * is in progress. Otherwise this method does nothing. + * is in progress. Otherwise, this method does nothing. * * @param key * @param value - * @param modificationTime * @param vlsn + * @param creationTime + * @param modificationTime * @param expirationTime * @param isTombstone */ public void addPut(DatabaseEntry key, DatabaseEntry value, - long vlsn, long modificationTime, + long vlsn, long creationTime, long modificationTime, long expirationTime, boolean isTombstone) { /* NOOP */ } /** * Inserts a DELETE record into migration stream if partition migration - * is in progress. Otherwise this method does nothing. + * is in progress. Otherwise, this method does nothing. * * @param key * @param cursor @@ -134,7 +135,7 @@ public void addDelete(DatabaseEntry key, Cursor cursor) { /** * Inserts a PREPARE message into the migration stream if partition - * migration is in progress. Otherwise this method does nothing. This + * migration is in progress. Otherwise, this method does nothing. This * method should be invoked before the client transaction is committed. * The PREPARE message signals that the operations associated with this * transaction are about to be committed. No further operations can be @@ -212,14 +213,14 @@ private MigratingHandle(MigrationSource source, Transaction txn) { @Override public void addPut(DatabaseEntry key, DatabaseEntry value, - long vlsn, long modificationTime, + long vlsn, long creationTime, long modificationTime, long expirationTime, boolean isTombstone) { assert !prepared; assert key != null; assert value != null; if (source.sendPut(txn.getId(), key, value, - vlsn, modificationTime, - expirationTime, isTombstone)) { + vlsn, creationTime, modificationTime, + expirationTime,isTombstone)) { opsSent++; } } diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationTarget.java b/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationTarget.java index fca188b7..35b9ab0c 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationTarget.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/migration/MigrationTarget.java @@ -40,6 +40,8 @@ import oracle.kv.impl.rep.migration.generation.PartitionGeneration; import oracle.kv.impl.rep.migration.generation.PartitionGenerationTable; import oracle.kv.impl.rep.migration.generation.PartitionMDException; +import oracle.kv.impl.test.TestHook; +import oracle.kv.impl.test.TestHookExecute; import oracle.kv.impl.topo.PartitionId; import oracle.kv.impl.topo.RepGroupId; import oracle.kv.impl.topo.Topology; @@ -139,7 +141,12 @@ * Case 4 - EoD sent after 3 is the usual steady state while the DB copy is * in progress. */ -class MigrationTarget implements Callable { +public class MigrationTarget implements Callable { + + /** + * Test hook executed before persist migration record durable. + */ + public static volatile TestHook PERSIST_HOOK = null; private final Logger logger; private final RateLimitingLogger rateLimitingLogger; @@ -1017,6 +1024,9 @@ private void consumeOps(Reader reader) throws Exception { */ private boolean persistTargetDurable(Reader.EoD eod) { assert !Thread.holdsLock(this); + + assert TestHookExecute.doHookIfSet(PERSIST_HOOK, MigrationTarget.this); + final TransactionConfig txnConfig = new TransactionConfig(); txnConfig.setConsistencyPolicy( NoConsistencyRequiredPolicy.NO_CONSISTENCY); @@ -1177,12 +1187,16 @@ private class Reader implements Runnable { private final DataInputStream stream; + /* The last record marker from the source*/ + private volatile boolean lastRecordMarker; + /* For general use to avoid constructing DatabaseEntrys in the OPS */ private final DatabaseEntry keyEntry = new DatabaseEntry(); private final DatabaseEntry valueEntry = new DatabaseEntry(); Reader(DataInputStream stream) { this.stream = stream; + lastRecordMarker = false; } @Override @@ -1196,6 +1210,25 @@ public void run() { logger.log(Level.INFO, "Exception processing migration stream for " + partitionId, ex); + + /* If we do not cancel the migration when the EOD is not + * received and last record marker from source has been + * received, it returns the migration state as PENDING + * rather than ERROR, which is not correct and may lead to + * issues. For instance, in the case of a migration failure, + * it may leave a partition unavailable for an extended + * period of time (several minutes). + * PM state flow : + * PENDING --> RUNNING --> ERROR + */ + if (!eodReceived && lastRecordMarker) { + logger.log(Level.WARNING, + String.format( + "EOD not received from source %s. " + + "Migration cancelled for partition %s.", + sourceName, partitionId)); + cancel(false); + } } /* @@ -1203,6 +1236,11 @@ public void run() { * to exit so that the target thread can handle the issue. */ setStopped(); + logger.log(Level.INFO, + String.format( + "Migration stopped for partition %s. " + + "Current migration state is %s.", + partitionId, getState().toString())); } } @@ -1232,6 +1270,7 @@ private void processStream() throws Exception { copyOps++; insert(new CopyOp(readDbEntry(), readDbEntry(), + readCreationTime(), readModificationTime(), readExpirationTime(), readTombstoneFlag())); @@ -1241,6 +1280,7 @@ private void processStream() throws Exception { insert(new PutOp(readTxnId(), readDbEntry(), readDbEntry(), + readCreationTime(), readModificationTime(), readExpirationTime(), readTombstoneFlag())); @@ -1262,6 +1302,13 @@ private void processStream() throws Exception { resolve(readTxnId(), false); break; } + case LAST_RECORD_MARKER: { + logger.log(Level.INFO, + "Received last record marker for {0}", + partitionId); + lastRecordMarker = true; + break; + } case EOD : { logger.log(Level.INFO, "Received EOD for {0}", partitionId); @@ -1332,6 +1379,10 @@ private long readTxnId() throws IOException { return stream.readLong(); } + private long readCreationTime() throws IOException { + return stream.readLong(); + } + private long readModificationTime() throws IOException { return stream.readLong(); } @@ -1498,15 +1549,18 @@ public String toString() { private class CopyOp extends Op { final byte[] key; final byte[] value; + final long rowCreationTime; final long modificationTime; final long expirationTime; final boolean isTombstone; CopyOp(byte[] key, byte[] value, + long rowCreationTime, long modificationTime, long expirationTime, boolean isTombstone) { this.key = key; this.value = value; + this.rowCreationTime = rowCreationTime; this.modificationTime = modificationTime; this.expirationTime = expirationTime; this.isTombstone = isTombstone; @@ -1522,7 +1576,8 @@ void execute() { valueEntry.setData(value); partitionDb.put(getBatchTxn(), keyEntry, valueEntry, Put.OVERWRITE, - getWriteOptions(modificationTime, + getWriteOptions(rowCreationTime, + modificationTime, expirationTime, isTombstone)); tracker.addWriteBytes(key.length + value.length, 0); @@ -1570,16 +1625,19 @@ protected Transaction getTransaction() { private class PutOp extends TxnOp { final byte[] key; final byte[] value; + final long rowCreationTime; final long modificationTime; final long expirationTime; final boolean isTombstone; PutOp(long txnId, byte[] key, byte[] value, + long rowCreationTime, long modificationTime, long expirationTime, boolean isTombstone) { super(txnId); this.key = key; this.value = value; + this.rowCreationTime = rowCreationTime; this.modificationTime = modificationTime; this.expirationTime = expirationTime; this.isTombstone = isTombstone; @@ -1595,7 +1653,8 @@ void execute() { valueEntry.setData(value); partitionDb.put(getTransaction(), keyEntry, valueEntry, Put.OVERWRITE, - getWriteOptions(modificationTime, + getWriteOptions(rowCreationTime, + modificationTime, expirationTime, isTombstone)); tracker.addWriteBytes(key.length + value.length, 0); @@ -1820,11 +1879,13 @@ private Reader newReader(DataInputStream stream) { * Returns a JE WriteOptions object set with the specified expirationTime * and tombstone flag. The instance returned is a singleton. */ - private WriteOptions getWriteOptions(long modificationTime, + private WriteOptions getWriteOptions(long rowCreationTime, + long modificationTime, long expirationTime, boolean isTombstone) { /* writeOptions is already initialized with setUpdateTTL(true) */ - return writeOptions.setModificationTime(modificationTime). + return writeOptions.setCreationTime(rowCreationTime). + setModificationTime(modificationTime). setExpirationTime(expirationTime, null). setTombstone(isTombstone); } diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/migration/TargetMonitorExecutor.java b/kvmain/src/main/java/oracle/kv/impl/rep/migration/TargetMonitorExecutor.java index bc8f0b39..f2185f3b 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/migration/TargetMonitorExecutor.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/migration/TargetMonitorExecutor.java @@ -32,6 +32,9 @@ import oracle.kv.impl.rep.admin.RepNodeAdminAPI; import oracle.kv.impl.rep.migration.PartitionMigrations.MigrationRecord; import oracle.kv.impl.rep.migration.PartitionMigrations.TargetRecord; +import oracle.kv.impl.test.TestHook; +import oracle.kv.impl.test.TestHookExecute; +import oracle.kv.impl.topo.PartitionId; import oracle.kv.impl.topo.RepGroupId; import oracle.kv.impl.util.KVThreadFactory; import oracle.kv.impl.util.registry.RegistryUtils; @@ -45,7 +48,7 @@ */ public class TargetMonitorExecutor extends ScheduledThreadPoolExecutor { - private final static long POLL_PERIOD = 2L; /* 2 seconds */ + public final static long POLL_PERIOD = 2L; /* 2 seconds */ /** * The core pool size of the executor. ScheduledThreadPoolExecutor acts as a @@ -53,13 +56,20 @@ public class TargetMonitorExecutor extends ScheduledThreadPoolExecutor { * are changing the executor and maximum pool size, the getNonStaleMaster() * method needs to be re-considered. */ - private final static int MONITOR_EXECUTOR_CONCURRENCY = 1; + public final static int MONITOR_EXECUTOR_CONCURRENCY = 1; private final MigrationManager manager; private final RepNode repNode; private final Logger logger; private final RepGroupId sourceRGId; + /** + * Test hook to check that the TargetMonitorExecutor#failed method is + * called in case the partition migration state is set to ERROR in case + * of migration failure. + */ + public static TestHook checkRemoveRecordHook = null; + TargetMonitorExecutor(MigrationManager manager, RepNode repNode, Logger logger) { @@ -517,6 +527,10 @@ private void failed(MigrationRecord record, "target returned {1}, removing completed record", new Object[] {record, state}); } + + /* run test hook in unit test only */ + assert TestHookExecute.doHookIfSet(checkRemoveRecordHook, + record.getPartitionId()); manager.notifyPartitionMigrationRestore(record.getPartitionId()); /* migration failed, will restart, and need to update PGT */ manager.removeRecord(record.getPartitionId(), record.getId(), diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/migration/TransferProtocol.java b/kvmain/src/main/java/oracle/kv/impl/rep/migration/TransferProtocol.java index ffa8e33c..f8b85e34 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/migration/TransferProtocol.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/migration/TransferProtocol.java @@ -52,8 +52,12 @@ public class TransferProtocol { * times. * * Version 6 as of release 23.3, add target description. + * + * Version 7 as of release 25.3, add last record marker and support for + * creation time in row metadata. + * */ - static final int VERSION = 6; + static final int VERSION = 7; /* Constant used to indicate a transfer only request */ static final RepNodeId TRANSFER_ONLY_TARGET = new RepNodeId(0, 0); @@ -355,7 +359,13 @@ public enum OP { * End of Data. The partition migration data transfer is complete and * no further messages will be sent from the source. */ - EOD(6); + EOD(6), + + /** + * Last record marker. This informs that source shard has sent the last + * record. + */ + LAST_RECORD_MARKER(7); private static OP[] VALUES = values(); diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/stats/IntermediateTableSizeUpdate.java b/kvmain/src/main/java/oracle/kv/impl/rep/stats/IntermediateTableSizeUpdate.java index ea5e3221..a72fbb40 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/stats/IntermediateTableSizeUpdate.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/stats/IntermediateTableSizeUpdate.java @@ -20,9 +20,13 @@ import static oracle.kv.impl.systables.TableStatsPartitionDesc.COL_NAME_TABLE_SIZE_WITH_TOMBSTONES; import static oracle.kv.impl.systables.TableStatsPartitionDesc.TABLE_NAME; +import java.util.Collections; import java.util.Map; import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; +import java.util.stream.IntStream; + import oracle.kv.Consistency; import oracle.kv.impl.api.table.TableImpl; @@ -38,8 +42,31 @@ import oracle.kv.table.TableAPI; /** - * Updates the size field in table stats records from data collected during - * operations by the resource collection framework. + * Updates the size field in table stats records from write deltas. The write + * delta is computed by accmulating the record size from put and delete + * operations which is collected in ResourceCollector. + * + * The update interval is controlled by + * KeyStatsCollector.statsSizeUpdateInterval. + * + * This class also updates the ResourceCollector.partitionOverages parameter, + * which is used to limit the partition size of a table. + * + * To further optimize and reduce the number of stats record operations, the + * update is skipped if the write delta is under POSITIVE_THRESHOLD_BYTES and + * NEGATIVE_THRESHOLD_BYTES. However, this could cause a problem that the + * ResourceCollecter.partitionOverages is not even eventually consistent. There + * are two sources of inconsistency: (1) The write deltas does not count for + * TTL. (2) The updates done by this class do not tolerate RN failure and master + * transfer. Such inconsistency can only be resolved by reading from the stats + * records which is recently updated by a partition stats scan regardless of the + * write delta value. Because partition stats scan can be done by any RN holding + * a lease, we cannot resolve this issue according to local states within this + * RN. Hence, we will need resort to a periodic forced update approach. To + * optimize and reduce the number of forced update as well, the interval of the + * forced update should be set corresponding to the partition stats scan + * interval. This is controlled by + * KeyStatsCollector.statsSizeForceUpdateInterval. */ class IntermediateTableSizeUpdate { @@ -47,7 +74,7 @@ class IntermediateTableSizeUpdate { new ReadOptions(Consistency.ABSOLUTE, 0, null); private final static long MB = 1024L * 1024L; - + /* * Thresholds for how much delta bytes need to be before we fo an update. * The thresholds are meant reduce the load of intermediate updates. The @@ -57,11 +84,13 @@ class IntermediateTableSizeUpdate { */ final static long POSITIVE_THRESHOLD_BYTES = 1 * MB; final static long NEGATIVE_THRESHOLD_BYTES = 2 * -MB; - + private final RepNode repNode; private final TableAPI tableAPI; private final Logger logger; + private long lastForcedUpdateNanos = System.nanoTime(); + private volatile boolean stop = false; IntermediateTableSizeUpdate(RepNode repNode, @@ -75,7 +104,7 @@ class IntermediateTableSizeUpdate { /* * Do an intermediate table size update. */ - void runUpdate() { + void runUpdate(long forcedUpdateIntervalMillis) { final TableManager tm = repNode.getTableManager(); final TableImpl tableStatsTable = tm.getTable(null, TABLE_NAME, 0); if (tableStatsTable == null) { @@ -89,6 +118,10 @@ void runUpdate() { } logger.fine("Running IntermediateTableSizeUpdate"); + final long sinceLastForcedUpdateMillis = TimeUnit.NANOSECONDS + .toMillis(System.nanoTime() - lastForcedUpdateNanos); + final boolean forceUpdate = + sinceLastForcedUpdateMillis > forcedUpdateIntervalMillis; /* * For each table hierarchy that has a resource collector, update the * table size in each tables stats record. @@ -103,10 +136,14 @@ void runUpdate() { continue; } if (table.isTop()) { - updateSize(table, (TopCollector)entry.getValue(), - collectorMap, tableStatsTable); + updateSize(table, (TopCollector) entry.getValue(), collectorMap, + tableStatsTable, forceUpdate); } } + + if (forceUpdate) { + lastForcedUpdateNanos = System.nanoTime(); + } } /** @@ -116,34 +153,58 @@ void runUpdate() { private void updateSize(TableImpl topTable, TopCollector tc, Map collectorMap, - Table tableStatsTable) { - /* - * For each partition that had activity, update the stats record - * and check for partition limits. - */ - for (int pid : tc.getActivePartitions()) { - final long totalDelta = tc.getTotalSizeDelta(pid); - - /* - * Update only when the change is over a threshold. - */ - if ((totalDelta < POSITIVE_THRESHOLD_BYTES) && - (totalDelta > NEGATIVE_THRESHOLD_BYTES)) { - continue; - } - + Table tableStatsTable, + boolean forceUpdate) { + + final IntStream updatePids; + if (tc.getActivePartitions().isEmpty()) { /* - * Since we are doing an update we can reset the total delta. The - * individual table deltas are reset in updateTable(). + * If there is no write delta, the active partitions will be empty + * (see ResourceCollector#getTotalSizeDelta). In this case, we + * update all partitions on this shard. Always obatin the topology + * considering partition migrations. */ - tc.resetTotalSizeDelta(pid); - final TableSizeResult res = - updateTable(topTable, pid, collectorMap, tableStatsTable); - /* use the table size with tombstone to check the limit */ - tc.checkPartitionLimit(res.sizeTb, pid); + updatePids = repNode.getTopology() + .getPartitionsInShard(repNode.getRepNodeId().getGroupId(), + Collections.emptySet()) + .stream().mapToInt(p -> p.getPartitionId()); + } else { + updatePids = + tc.getActivePartitions().stream().mapToInt(i -> i); + } + updatePids.forEach(i -> updatePartitionSize(topTable, tc, collectorMap, + tableStatsTable, forceUpdate, i)); + } + + private void updatePartitionSize(TableImpl topTable, + TopCollector tc, + Map collectorMap, + Table tableStatsTable, + boolean forceUpdate, + int pid) { + + final long totalDelta = tc.getTotalSizeDelta(pid); + /* + * Update only when the change is over a threshold or is a force update. + */ + if ((totalDelta < POSITIVE_THRESHOLD_BYTES) && + (totalDelta > NEGATIVE_THRESHOLD_BYTES) && + !forceUpdate) + { + return; } + + /* + * Since we are doing an update we can reset the total delta. The + * individual table deltas are reset in updateTable(). + */ + tc.resetTotalSizeDelta(pid); + final TableSizeResult res = + updateTable(topTable, pid, collectorMap, tableStatsTable); + /* Use the table size with tombstone to check the limit. */ + tc.checkPartitionLimit(res.sizeTb, pid); } - + /** * Updates the stats records for the specified table and its children * if there has been activity on the specified partition. Returns the @@ -163,7 +224,7 @@ private TableSizeResult updateTable( /* * Size of the partition based on the stats record adjusted by * a size delta from the resource collector. - */ + */ long size = 0L; long sizeTb; @@ -174,9 +235,9 @@ private TableSizeResult updateTable( final PrimaryKey key = tableStatsTable.createPrimaryKey(); key.put(COL_NAME_TABLE_NAME, table.getFullNamespaceName()); key.put(COL_NAME_PARTITION_ID, pid); - + final ResourceCollector rc = collectorMap.get(table.getId()); - + /* * The row could be null if this is a new table and a * partition scan hasn't run since creation. diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/stats/KeyStatsCollector.java b/kvmain/src/main/java/oracle/kv/impl/rep/stats/KeyStatsCollector.java index 9fd45cb1..d5fd0c4d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/stats/KeyStatsCollector.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/stats/KeyStatsCollector.java @@ -137,7 +137,7 @@ public class KeyStatsCollector implements ParameterListener { private TableAPI tableAPI; private final KVStoreCreator creator; - + final ResourceTracker aggregateThroughputTracker; /* The map is to store the table name and table pairs */ @@ -154,14 +154,46 @@ public class KeyStatsCollector implements ParameterListener { private ScanningThread scanningThread; /* Variables to control scanning which are loaded from parameters */ + + /** + * Whether the stats collection is enabld. + */ private volatile boolean statsEnabled; + /** + * The interval for stats scan. The next scan interval start is computed + * with this parameter which is used to coordinate with the lease. See + * StatsScan#checkLease. + */ private volatile long statsGatherInterval; + /** + * The duration of the lease. Only the RN holding an unexpired lease can do + * the stats scan. + */ private volatile long statsLeaseDuration; - /* It is used to control the sleep time or waiting time in scanning */ + /** + * The duration for the next poll to check if we should start a scan. + */ private volatile long statsSleepWaitDuration; - + /** + * Whether to update table size according to write delta. See + * ResourceCollector.sizeDeltaMap and IntermediateTableSizeUpdate. + */ private volatile boolean updateTableSizes; + /** + * The duration to check whether to do a table size update with + * IntermediateTableSizeUpdate. + */ private volatile long statsSizeUpdateInterval; + /** + * The duration to force a table size update with + * IntermediateTableSizeUpdate. We set this value to 1/10 of the + * statsGatherInterval. The heuristic is to make sure we do a forced update + * pretty soon when the new results of the stats scan from this RN or other + * RNs is available while on the other hand not doing too many forced + * updates. See the comment on the IntermediateTableSizeUpdate class for + * more information. + */ + private volatile long statsSizeForceUpdateInterval; /* TTL for lease records. */ private volatile TimeToLive leaseTTL; @@ -280,13 +312,18 @@ private void loadStatsParametersAndStart(RepNodeParams repNodeParams) { "ms is equal to or greater than the gathering" + " interval, updates will not be done"); updateTableSizes = false; - } else if (statsSizeUpdateInterval < MIN_SIZE_UPDATE_INTERVAL_MS) { + } else if (!testIgnoreMinimumDurations && + statsSizeUpdateInterval < MIN_SIZE_UPDATE_INTERVAL_MS) + { logger.warning("The table size update interval: " + statsSizeUpdateInterval + "ms is less than the minimum, setting it to " + MIN_SIZE_UPDATE_INTERVAL_MS + "ms"); statsSizeUpdateInterval = MIN_SIZE_UPDATE_INTERVAL_MS; } + + statsSizeForceUpdateInterval = + Math.max(statsGatherInterval / 10, 1); } final long hours = TimeUnit.MILLISECONDS.toHours(statsGatherInterval); @@ -398,7 +435,6 @@ private boolean foundUserTables() { * obsolete statistics info from all tables */ private long scan(long intervalStart) { - /* Check whether can scan */ if (isShutdown() || !isRNActive()) { return 0L; @@ -676,7 +712,7 @@ private void updateTableSizes() { updater = tableSizeUpdater; } } - updater.runUpdate(); + updater.runUpdate(statsSizeForceUpdateInterval); } /** @@ -994,13 +1030,13 @@ private Set scanDatabase(Database db, PrimaryKey pKey = null; while ((status == OperationStatus.SUCCESS) && !isShutdown()) { - + /* Track the cost of the read for load management */ final int storageSize = DbInternal.getCursorImpl(cursor).getStorageSize(); aggregateThroughputTracker.addReadBytes(storageSize, false); - + byte[] keyData = keyEntry.getData(); if (target.findTargetTable(keyData) == null) { break; diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/subscription/partreader/PartitionReader.java b/kvmain/src/main/java/oracle/kv/impl/rep/subscription/partreader/PartitionReader.java index 09f5d6f9..1458e815 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/subscription/partreader/PartitionReader.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/subscription/partreader/PartitionReader.java @@ -313,6 +313,12 @@ private void processStream() throws IOException { status.incrCommittedTXNs(); break; } + case LAST_RECORD_MARKER : { + logger.log(Level.INFO, + "Received last record marker for {0}", + new Object[]{partitionId}); + break; + } case EOD : { done = true; logger.log(Level.INFO, "Receive EOD for {0}", diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/table/IndexKeyCreator.java b/kvmain/src/main/java/oracle/kv/impl/rep/table/IndexKeyCreator.java index e82d3174..54e61dce 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/table/IndexKeyCreator.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/table/IndexKeyCreator.java @@ -98,6 +98,7 @@ final void setIndex(IndexImpl newIndex) { public boolean createSecondaryKey(SecondaryDatabase secondaryDb, DatabaseEntry key, DatabaseEntry data, + long creationTime, long modTime, long expTime, int size, @@ -105,6 +106,7 @@ public boolean createSecondaryKey(SecondaryDatabase secondaryDb, byte[] res = index.extractIndexKey(key.getData(), (data != null ? data.getData() : null), + creationTime, modTime, expTime, size, @@ -123,6 +125,7 @@ public boolean createSecondaryKey(SecondaryDatabase secondaryDb, public void createSecondaryKeys(SecondaryDatabase secondaryDb, DatabaseEntry key, DatabaseEntry data, + long creationTime, long modTime, long expTime, int size, @@ -140,6 +143,7 @@ public void createSecondaryKeys(SecondaryDatabase secondaryDb, */ List res = index.extractIndexKeys(key.getData(), data.getData(), + creationTime, modTime, expTime, size, diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/table/MaintenanceThread.java b/kvmain/src/main/java/oracle/kv/impl/rep/table/MaintenanceThread.java index bb75a756..fe08bb53 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/table/MaintenanceThread.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/table/MaintenanceThread.java @@ -1241,6 +1241,7 @@ private boolean populate(TableIterate op, lastKey.setData(keyBytes); scanner.getDatabase(). populateSecondaries(txn, lastKey, dentry, + scanner.getCreationTime(), scanner.getModificationTime(), scanner.getExpirationTime(), scanner.getCurrentStorageSize(), diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/table/ResourceCollector.java b/kvmain/src/main/java/oracle/kv/impl/rep/table/ResourceCollector.java index 584eb53a..340830d0 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/table/ResourceCollector.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/table/ResourceCollector.java @@ -48,6 +48,12 @@ */ public abstract class ResourceCollector implements ResourceTracker { + /** + * Scaling the partition size limit for unit testing to avoid creating a + * huge amount of partitions. + */ + public static volatile int partitionSizeLimitScaling = 1; + /* * The amount of time that a throughout cap is valid. After * this time, the cap should be re-calculated. @@ -55,7 +61,7 @@ public abstract class ResourceCollector implements ResourceTracker { * Public for unit test. */ public static final int CAP_TIME_SEC = 7; - + /** * Map of counters to keep track of table size based on operations. * There is a counter per-partition. Entries are added as needed and @@ -86,13 +92,13 @@ public int addWriteBytes(int bytes, } return addWriteBytes(bytes, nIndexWrites); } - + /* * Called when write bytes are added to a partition on any table in * a hierarchy . Subclass can use this call to total the deltas. */ protected abstract void totalDelta(int pid, int deltaBytes); - + /** * Gets the size delta for the specified partition. */ @@ -108,7 +114,7 @@ public long getSizeDelta(int pid) { } return ret; } - + /** * Gets and resets the size delta for the specified partition. */ @@ -124,7 +130,7 @@ public long getAndResetSizeDelta(int pid) { } return ret; } - + /** * Checks if the operation is permitted on the specified partition. * Checks if accessed is permitted and whether a size or throughput limit @@ -134,7 +140,7 @@ public long getAndResetSizeDelta(int pid) { */ public abstract void checkOperation(InternalOperation internalOp, PartitionId partitionId); - + /** * Returns true if there is read or write throttling at this time. If * checkAccess is true, access is checked and an exception is thrown if @@ -143,7 +149,7 @@ public abstract void checkOperation(InternalOperation internalOp, */ public abstract boolean isThrottled(boolean checkAccess, PartitionId partitionId); - + /** * Records table size and throughput rates. */ @@ -173,13 +179,13 @@ public abstract boolean isThrottled(boolean checkAccess, */ public static class TopCollector extends ResourceCollector { private static final int KB = 1024; - + /* Use to convert bytes to MB */ private static final long MB = KB * KB; /* Use to convert bytes to GB */ private static final long GB = MB * KB; - + /* Array size must be power of 2 */ private static final int ARRAY_SIZE = 8; @@ -191,7 +197,7 @@ public static class TopCollector extends ResourceCollector { /* Minimum cap to allow some progress. */ private static final int MIN_THROUGHPUT_CAP = 2; - + private final RepNode repNode; /* @@ -375,7 +381,7 @@ private void updateLocalFactors(Topology topo) { nNodes = (nodes == 0) ? 1 : nodes; nPartitions = topo.getNumPartitions(); } - + /* * Updates the partition size limit. */ @@ -390,10 +396,10 @@ void updatePartitionSizeLimit(int defaultPartitionSizePercent) { int newLimitMB = getPartitionSizeLimitMB(limits.getSizeLimit(), defaultPartitionSizePercent, nPartitions); - + final boolean increased = newLimitMB > partitionSizeLimitMB; partitionSizeLimitMB = newLimitMB; - + /* * If the partition limit increased, clear out any overages to * re-enable writes. Note that the size may still be over the @@ -403,11 +409,11 @@ void updatePartitionSizeLimit(int defaultPartitionSizePercent) { partitionOverages.clear(); } } - + /** * Gets the per-partition size limit based on the input values. * Public access for unit test. - * + * * @param tableLimitGB - table limit * @param percentOver - percent allowed over the limit * @param nPartitions - the number of partitions @@ -419,13 +425,13 @@ public static int getPartitionSizeLimitMB(int tableLimitGB, long limit = tableLimitGB * GB; limit += (limit * percentOver) / 100; limit /= nPartitions; - + /* Convert to MB, rounding up */ int perPartLimitMB = (int)(limit / MB); if ((limit % MB) > 0) { perPartLimitMB++; } - return perPartLimitMB; + return perPartLimitMB / partitionSizeLimitScaling; } /* @@ -439,7 +445,7 @@ private synchronized void updateSize(long newSizeBytes, return; } reportedTableSize = newSizeBytes; - + final TableLimits limits = table.getTableLimits(); if (limits.hasSizeLimit()) { sizeLimitExceeded = @@ -456,7 +462,7 @@ private synchronized void updateSize(long newSizeBytes, sizeReported.set(true); } } - + /** * Returns the set of partitions which have had some write activity * since the last reset. @@ -465,7 +471,7 @@ public Set getActivePartitions() { final Map map = totalDeltaMap; return map.keySet(); } - + /** * Returns the total size delta for the given partition. This includes * deltas from writes to the top level and child tables. @@ -482,7 +488,7 @@ public long getTotalSizeDelta(int pid) { } return ret; } - + /** * Resets the total size delta for the specified partition. Note: This * will only reset the total delta map entry. It relies on a call to @@ -508,7 +514,7 @@ protected void totalDelta(int pid, int deltaBytes) { map.computeIfAbsent(pid, k -> new AtomicLong()); counter.addAndGet(deltaBytes); } - + @Override public int addReadBytes(int bytes, boolean isAbsolute) { /* Record aggregated RN throughput */ @@ -776,7 +782,7 @@ private void checkAccess(TableLimits limits, table.getName(), limits.isReadAllowed(), "Table " + table.getName() + " is read-only"); } - + if (!checkSize) { return; } @@ -799,7 +805,7 @@ private void checkAccess(TableLimits limits, ", size limit: " + limitGB + "GB, table size: " + sizeGB + "GB"); } - + if (partitionId == null) { return; } @@ -825,7 +831,7 @@ private void checkAccess(TableLimits limits, overageMB + "MB"); } } - + /** * Records whether a partition size is over the shard-key size limit. */ @@ -1148,7 +1154,7 @@ public String toString() { } } } - + /** * Collector for child tables. For most operations this class defers to * to the top collector in the hierarchy. The primary use of this instance @@ -1157,13 +1163,13 @@ public String toString() { static class ChildCollector extends ResourceCollector { private final TopCollector topCollector; - + ChildCollector(ResourceTracker parent) { topCollector = (parent instanceof TopCollector) ? (TopCollector)parent : ((ChildCollector)parent).topCollector; } - + @Override protected void totalDelta(int pid, int deltaBytes) { topCollector.totalDelta(pid, deltaBytes); @@ -1188,24 +1194,24 @@ public int getReadKBToAdd(int bytes, boolean isAbsolute) { public void addReadUnits(int units) { topCollector.addReadUnits(units); } - + @Override public void checkOperation(InternalOperation internalOp, PartitionId partitionId) { topCollector.checkOperation(internalOp, partitionId); } - + @Override public boolean isThrottled(boolean checkAccess, PartitionId partitionId) { return topCollector.isThrottled(checkAccess, partitionId); } - + @Override void report(UsageRecord ur, long reportTimeNanos) { topCollector.report(ur, reportTimeNanos); } - + @Override public String toString() { return "ChildCollector[]"; diff --git a/kvmain/src/main/java/oracle/kv/impl/rep/table/TableManager.java b/kvmain/src/main/java/oracle/kv/impl/rep/table/TableManager.java index e0b57b3a..2ed0944f 100644 --- a/kvmain/src/main/java/oracle/kv/impl/rep/table/TableManager.java +++ b/kvmain/src/main/java/oracle/kv/impl/rep/table/TableManager.java @@ -1343,10 +1343,10 @@ public Collection getSecondaries( if (skip) { continue; } - - //logger.info("Updating Index " + dbName); } + // logger.info("XXX Updating Index " + dbName + " with id = " + idxId); + final SecondaryDatabase db = getSecondaryDb(dbName); if (db == null) { /* Throwing RNUnavailableException should cause a retry */ diff --git a/kvmain/src/main/java/oracle/kv/impl/security/AccessCheckUtils.java b/kvmain/src/main/java/oracle/kv/impl/security/AccessCheckUtils.java index e3672c44..9d32d7a3 100644 --- a/kvmain/src/main/java/oracle/kv/impl/security/AccessCheckUtils.java +++ b/kvmain/src/main/java/oracle/kv/impl/security/AccessCheckUtils.java @@ -83,7 +83,7 @@ private abstract static class PlanContext extends ResourceContext { * Constructs a OperationContext of plan operation. * * @param plan plan to be operated - * @param desc description of the operation + * @param opDesc description of the operation */ PlanContext(Plan plan, String opDesc) { super(plan, "Plan"); @@ -266,10 +266,10 @@ public static void logSecurityError(KVSecurityException kvse, * @param execCtx execution context * @param logger RateLimitingLogger to eliminate excessive log message */ - public static void logSecurityError(String msg, - String opDesc, - ExecutionContext execCtx, - RateLimitingLogger logger) { + static void logSecurityError(String msg, + String opDesc, + ExecutionContext execCtx, + RateLimitingLogger logger) { if (logger.getInternalLogger() != null) { String authHost = ""; String userName = ""; diff --git a/kvmain/src/main/java/oracle/kv/impl/security/KVStorePrivilegeLabel.java b/kvmain/src/main/java/oracle/kv/impl/security/KVStorePrivilegeLabel.java index 5aa2d7d2..3da831c4 100644 --- a/kvmain/src/main/java/oracle/kv/impl/security/KVStorePrivilegeLabel.java +++ b/kvmain/src/main/java/oracle/kv/impl/security/KVStorePrivilegeLabel.java @@ -135,27 +135,27 @@ public enum KVStorePrivilegeLabel implements FastExternalizable { READ_TABLE(18, TABLE), /** - * Privilege for deleting key-values in a specific tables + * Privilege for deleting key-values in a specific table */ DELETE_TABLE(19, TABLE), /** - * Privilege for putting key-values in a specific tables + * Privilege for putting key-values in a specific table */ INSERT_TABLE(20, TABLE), /** - * Privilege for evolving a specific tables + * Privilege for evolving a specific table */ EVOLVE_TABLE(21, TABLE), /** - * Privilege for creating index on a specific tables + * Privilege for creating index on a specific table */ CREATE_INDEX(22, TABLE), /** - * Privilege for dropping index on a specific tables + * Privilege for dropping index on a specific table */ DROP_INDEX(23, TABLE), diff --git a/kvmain/src/main/java/oracle/kv/impl/security/NamespacePrivilege.java b/kvmain/src/main/java/oracle/kv/impl/security/NamespacePrivilege.java index b20c90dc..e7e4c384 100644 --- a/kvmain/src/main/java/oracle/kv/impl/security/NamespacePrivilege.java +++ b/kvmain/src/main/java/oracle/kv/impl/security/NamespacePrivilege.java @@ -151,7 +151,7 @@ public String getNamespace() { * Gets a specific namespace privilege instance according to the specific * label and namespace information. It is used in the case that builds a * namespace privilege instance according to user-input privilege name. In - * other cases, it is recommend to directly get the instances via + * other cases, it is recommended to directly get the instances via * constructors for efficiency. * * @param privLabel label of the privilege diff --git a/kvmain/src/main/java/oracle/kv/impl/security/TablePrivilege.java b/kvmain/src/main/java/oracle/kv/impl/security/TablePrivilege.java index 0363f2f1..b8a621ae 100644 --- a/kvmain/src/main/java/oracle/kv/impl/security/TablePrivilege.java +++ b/kvmain/src/main/java/oracle/kv/impl/security/TablePrivilege.java @@ -118,6 +118,42 @@ public TablePrivilege createPrivilege(long id, }); } + /* + * A convenient map of table privilege label and the implying namespace + * privilege label. + */ + private static final EnumMap + tableNsPrivsMap = new EnumMap<>(KVStorePrivilegeLabel.class); + static { + tableNsPrivsMap.put(KVStorePrivilegeLabel.DELETE_TABLE, + KVStorePrivilegeLabel.DELETE_IN_NAMESPACE); + tableNsPrivsMap.put(KVStorePrivilegeLabel.READ_TABLE, + KVStorePrivilegeLabel.READ_IN_NAMESPACE); + tableNsPrivsMap.put(KVStorePrivilegeLabel.INSERT_TABLE, + KVStorePrivilegeLabel.INSERT_IN_NAMESPACE); + tableNsPrivsMap.put(KVStorePrivilegeLabel.EVOLVE_TABLE, + KVStorePrivilegeLabel.EVOLVE_TABLE_IN_NAMESPACE); + tableNsPrivsMap.put(KVStorePrivilegeLabel.CREATE_INDEX, + KVStorePrivilegeLabel.CREATE_INDEX_IN_NAMESPACE); + tableNsPrivsMap.put(KVStorePrivilegeLabel.DROP_INDEX, + KVStorePrivilegeLabel.DROP_INDEX_IN_NAMESPACE); + } + + /** + * Get implying namespace privilege label of given table privilege label. + */ + public static KVStorePrivilegeLabel + implyingNamespacePrivLabel(KVStorePrivilegeLabel tablePrivLabel) { + final KVStorePrivilegeLabel nsPrivLabel = + tableNsPrivsMap.get(tablePrivLabel); + if (nsPrivLabel == null) { + throw new IllegalStateException( + "Privilege implication code error, " + tablePrivLabel + + " doesn't have an implying namespace privilege label defined"); + } + return nsPrivLabel; + } + private TablePrivilege(KVStorePrivilegeLabel privLabel, long tableId, String tableNamespace, @@ -158,11 +194,12 @@ public void writeFastExternal(DataOutput out, short serialVersion) * Gets a specific table privilege instance according to the specific * label and table information. It is used in the case that builds a table * privilege instance according to user-input privilege name. In other - * cases, it is recommend to directly get the instances via constructors + * cases, it is recommended to directly get the instances via constructors * for efficiency. * * @param privLabel label of the privilege * @param tableId table id + * @param tableNamespace table namespace * @param tableName table name * @return table privilege instance specified by the label */ diff --git a/kvmain/src/main/java/oracle/kv/impl/sna/ProcessServiceManager.java b/kvmain/src/main/java/oracle/kv/impl/sna/ProcessServiceManager.java index 924f1b06..6b6a0afd 100644 --- a/kvmain/src/main/java/oracle/kv/impl/sna/ProcessServiceManager.java +++ b/kvmain/src/main/java/oracle/kv/impl/sna/ProcessServiceManager.java @@ -17,6 +17,7 @@ import java.io.File; import java.io.FileOutputStream; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -148,7 +149,7 @@ protected void onExit() { } @Override - protected void onRestart() { + protected void onRestart() { final ResourceId rId = service.getResourceId(); if (rId instanceof RepNodeId) { @@ -161,7 +162,20 @@ protected void onRestart() { generateStatusChange(ServiceStatus.ERROR_RESTARTING); } service.resetHandles(); - service.resetParameters(false); + try { + service.resetParameters(false); + } catch (IllegalStateException illegalStateException) { + if (illegalStateException.getCause() != null && + (illegalStateException.getCause() instanceof IOException)) { + /* + * ignore file IO exception. Ignore file IO exception since + * the configuration file may be corrupted or removed which + * is the exact cause of the shutdown. + */ + logger.warning("Reset parameter filed with IO exception: " + + illegalStateException.getMessage()); + } + } if (service.resetOnRestart() || (createExecArgsHook != null)) { mgr.reset(); } diff --git a/kvmain/src/main/java/oracle/kv/impl/tif/FeederSubscriptionCbk.java b/kvmain/src/main/java/oracle/kv/impl/tif/FeederSubscriptionCbk.java index 7c705ec2..dc1690c2 100644 --- a/kvmain/src/main/java/oracle/kv/impl/tif/FeederSubscriptionCbk.java +++ b/kvmain/src/main/java/oracle/kv/impl/tif/FeederSubscriptionCbk.java @@ -52,6 +52,10 @@ class FeederSubscriptionCbk implements SubscriptionCallback { * @param dbId id of database the entry belongs to * @param ts timestamp of the last update * @param exp expiration time in ms, not used in TIF + * @param beforeImgEnabled unused + * @param valBeforeImg unused + * @param tsBeforeImg unused + * @param expBeforeImg unused */ @Override public void processPut(long vlsn, byte[] key, byte[] value, long txnId, @@ -70,6 +74,10 @@ public void processPut(long vlsn, byte[] key, byte[] value, long txnId, * @param txnId id of txn the entry belongs to * @param dbId id of database the entry belongs to * @param ts timestamp of the last update + * @param beforeImgEnabled unused + * @param valBeforeImg unused + * @param tsBeforeImg unused + * @param expBeforeImg unused */ @Override public void processDel(long vlsn, byte[] key, byte[] val, long txnId, @@ -84,9 +92,10 @@ public void processDel(long vlsn, byte[] key, byte[] val, long txnId, * * @param vlsn VLSN of commit entry * @param txnId id of txn to commit + * @param ts timestamp of commit */ @Override - public void processCommit(long vlsn, long txnId) { + public void processCommit(long vlsn, long txnId, long ts) { processEntry(new DataItem(vlsn, txnId, LOG_TXN_COMMIT)); } @@ -95,9 +104,10 @@ public void processCommit(long vlsn, long txnId) { * * @param vlsn VLSN of abort entry * @param txnId id of txn to abort + * @param ts timestamp of abort */ @Override - public void processAbort(long vlsn, long txnId) { + public void processAbort(long vlsn, long txnId, long ts) { processEntry(new DataItem(vlsn, txnId, LOG_TXN_ABORT)); } diff --git a/kvmain/src/main/java/oracle/kv/impl/util/SerialVersion.java b/kvmain/src/main/java/oracle/kv/impl/util/SerialVersion.java index 60cd8437..68ad7a06 100644 --- a/kvmain/src/main/java/oracle/kv/impl/util/SerialVersion.java +++ b/kvmain/src/main/java/oracle/kv/impl/util/SerialVersion.java @@ -622,12 +622,33 @@ public class SerialVersion { public static final short QUERY_VERSION_17 = V37; + /** + * Introduced at R25.3/V38 + * - Support before image for tables in Streams API + * - Support for row metadata + * - Introduces row creationTime + */ + public static final short V38 = 38; + static { init(V38, KVVersion.R25_3); } + + /** + * New support of before image for tables + */ + public static final short BEFORE_IMAGE_VERSION = V38; + public static final short ROW_METADATA_VERSION = V38; + public static final short QUERY_VERSION_18 = V38; + + /** + * Introduces row creationTime + */ + public static final short CREATION_TIME_VER = V38; + /** * When adding a new version and updating DEFAULT_CURRENT, be sure to make * corresponding changes in KVVersion as well as the files referenced from * there to add a new release version. See {@link KVVersion#CURRENT_VERSION} */ - private static final short DEFAULT_CURRENT = V37; + private static final short DEFAULT_CURRENT = V38; /* * The default earliest supported serial version. diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/agent/RegionAgentConfig.java b/kvmain/src/main/java/oracle/kv/impl/xregion/agent/RegionAgentConfig.java index 57a3f9a0..374409eb 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/agent/RegionAgentConfig.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/agent/RegionAgentConfig.java @@ -192,7 +192,7 @@ RegionInfo getTarget() { * Gets the set of tables that the agent needs to subscribe at the * beginning * - * @return the set of subscribed tables + * @return array of subscribed tables */ String[] getTables() { return tables.toArray(new String[0]); @@ -292,7 +292,7 @@ public String toString() { ", ckpt enabled=" + enableCkptTable + ", ckpt interval ops=" + ckptIntvOps + ", ckpt interval secs=" + ckptIntvSecs + - ", local write only=" + localWritesOnly; + ", local writes only=" + localWritesOnly; } /** @@ -344,8 +344,7 @@ public static class Builder { private String securityConfig; /** - * Unit test only, - * + * Unit test only * true if checkpoint is enabled, false otherwise. */ private boolean enableCkptTable; @@ -521,7 +520,7 @@ public Builder setTables(Set tbs) { * * @return this instance */ - Builder setLocalWritesOnly(boolean localWritesOnly) { + public Builder setLocalWritesOnly(boolean localWritesOnly) { this.localWritesOnly = localWritesOnly; return this; } diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/agent/RegionAgentThread.java b/kvmain/src/main/java/oracle/kv/impl/xregion/agent/RegionAgentThread.java index d0548414..1303680f 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/agent/RegionAgentThread.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/agent/RegionAgentThread.java @@ -368,6 +368,7 @@ protected int initiateSoftShutdown() { public void run() { logger.info(lm("Region agent starts, mode=" + config.getStartMode() + + ", local writes only=" + config.isLocalWritesOnly() + ", tables=" + Arrays.toString(config.getTables()))); try { @@ -777,6 +778,14 @@ public RegionInfo getHost() { return config.getHost(); } + /** + * Unit test only + * @return if cascading replication is turned on + */ + public boolean isCascadingReplication() { + return !config.isLocalWritesOnly(); + } + /** * Returns the target region * diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/agent/mrt/MRTSubscriber.java b/kvmain/src/main/java/oracle/kv/impl/xregion/agent/mrt/MRTSubscriber.java index 5fbd8d9f..76463fad 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/agent/mrt/MRTSubscriber.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/agent/mrt/MRTSubscriber.java @@ -144,7 +144,7 @@ public class MRTSubscriber extends BaseRegionAgentSubscriber { * the hash of the operation's primary key modulo the number of queues. * These queues are used to make sure that operations on the same key are * performed in order. - * + *

    * Since Java guarantees that access to an immutable object stored in a * final field is thread safe after construction, access to the array * elements is safe because they are only set in the constructor. But that @@ -882,11 +882,25 @@ private Row convertRow(Row srcRow, /* set new region id */ ((RowImpl) tgtRow).setRegionId(regionId); - /* log loopback rows, may need a separate stat */ + /* set the row metadata */ + tgtRow.setRowMetadata(srcRow.getRowMetadata()); + /* log looped back rows and track stats */ if (regionId == Region.LOCAL_REGION_ID) { final String tb = tgtRow.getTable().getFullNamespaceName(); - final String msg = "Loopback rows from table=" + tb + " in " + - (initialization ? "transfer" : "streaming"); + final MRTableMetrics tbm = getMetrics().getTableMetrics(tb); + if (!initialization) { + /* only track loop back stats in streaming */ + if (StreamOperation.Type.PUT.equals(type)) { + tbm.incrLoopbackPuts(1); + } else { + tbm.incrLoopbackDels(1); + } + } + final String stage = initialization ? "transfer" : "streaming"; + final String msg = "Looped back writes from table=" + tb + + ", type=" + type + + ", source region=" + srcRegion + + ", stage=" + stage; rlLogger.log(tb, Level.INFO, lm(msg)); } @@ -1185,7 +1199,8 @@ private void putDelResolveComplete(StreamOperation source, final String msg = "Cannot write to table=" + tbName + " at target store=" + tgtStoreName + " after # of attempts=" + attempts + - ", will retry, error=" + fe; + ", will retry, waitMs=" + WAIT_BEFORE_RETRY_MS + + ", error=" + fe; rlLogger.log(tbName + fe.getFaultClassName(), Level.WARNING, lm(msg)); scheduleRetry(source, tgtRow.getTable(), attempts); diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/service/JsonConfig.java b/kvmain/src/main/java/oracle/kv/impl/xregion/service/JsonConfig.java index 58936c50..1c7ba97d 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/service/JsonConfig.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/service/JsonConfig.java @@ -188,8 +188,14 @@ public class JsonConfig implements Serializable { /** Checkpoint interval in streamed ops */ public static final int DEFAULT_CKPT_INTERVAL_OPS = 1024 * 1024; private int checkpointIntvOps = DEFAULT_CKPT_INTERVAL_OPS; - - + /** + * True if cascading replication is on. That means, when an agent + * stream changes from a remote region, in addition to all changes that + * originated on that remote region, all changes originated from other + * regions and replicated to the remote region will also be streamed. The + * default if false, meaning cascading replication is off. + */ + private boolean cascadingReplication = false; /* derived agent root path, not part of json config file */ private transient String agentRoot; @@ -434,6 +440,14 @@ public String getDurability() { return durability; } + /** + * Returns true if cascading replication is on, or false otherwise + * @return if cascading replication is on. + */ + public boolean getCascadingRep() { + return cascadingReplication; + } + /** * Returns initial heap size in MB if the XRegion service is running in * background @@ -506,6 +520,16 @@ public void setDurability(String durability) { this.durability = durability; } + /** + * Test only + * Sets if cascading replication should be turned on or not + * @param cascadingReplication true if cascading replication is on, false + * otherwise. + */ + public void setCascadingReplication(boolean cascadingReplication) { + this.cascadingReplication = cascadingReplication; + } + /** * Test only * diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/service/MRTableMetrics.java b/kvmain/src/main/java/oracle/kv/impl/xregion/service/MRTableMetrics.java index 6afeba0c..b5abb0c8 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/service/MRTableMetrics.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/service/MRTableMetrics.java @@ -56,6 +56,10 @@ public class MRTableMetrics extends JsonMetricsHeader { private volatile long streamBytes = 0; /** # of persisted streamed bytes to target */ private volatile long persistStreamBytes = 0; + /** # of looped back puts */ + private volatile long loopbackPuts = 0; + /** # of looped back deletes */ + private volatile long loopbackDels = 0; /** per-region initialization statistics */ private final ConcurrentMap initialization = @@ -125,6 +129,14 @@ public long getPersistStreamBytes() { return persistStreamBytes; } + public long getLoopbackPuts() { + return loopbackPuts; + } + + public long getLoopbackDels() { + return loopbackDels; + } + /* used by json */ public Map getInitialization() { return new ConcurrentSkipListMap<>(initialization); @@ -140,6 +152,8 @@ public void aggregateStreamStat(@NonNull MRTableMetrics metrics) { incrDels(metrics.getDels()); incrWinPuts(metrics.getWinPuts()); incrWinDels(metrics.getWinDels()); + incrLoopbackPuts(metrics.getLoopbackPuts()); + incrLoopbackDels(metrics.getLoopbackDels()); incrIncompatibleRows(metrics.getIncompatibleRows()); incrStreamBytes(metrics.getStreamBytes()); incrPersistStreamBytes(metrics.getPersistStreamBytes()); @@ -181,6 +195,14 @@ public synchronized void incrPersistStreamBytes(long delta) { persistStreamBytes += delta; } + public synchronized void incrLoopbackPuts(long delta) { + loopbackPuts += delta; + } + + public synchronized void incrLoopbackDels(long delta) { + loopbackDels += delta; + } + @Override public String toString() { return JsonUtils.print(this, true); @@ -199,6 +221,8 @@ public boolean equals(Object obj) { dels == other.dels && winPuts == other.winPuts && winDels == other.winDels && + loopbackPuts == other.loopbackPuts && + loopbackDels == other.loopbackDels && streamBytes == other.streamBytes && persistStreamBytes == other.persistStreamBytes && incompatibleRows == other.incompatibleRows && @@ -214,10 +238,11 @@ public int hashCode() { Long.hashCode(dels) + Long.hashCode(winPuts) + Long.hashCode(winDels) + + Long.hashCode(loopbackPuts) + + Long.hashCode(loopbackDels) + Long.hashCode(streamBytes) + Long.hashCode(persistStreamBytes) + Long.hashCode(incompatibleRows) + initialization.hashCode(); } - } diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/service/ReqRespManager.java b/kvmain/src/main/java/oracle/kv/impl/xregion/service/ReqRespManager.java index 895c1b7b..760dcba1 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/service/ReqRespManager.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/service/ReqRespManager.java @@ -857,6 +857,14 @@ private void submitRegionRequest(long reqId, if (regions.isEmpty()) { /* an unknown remote region */ final String region = mdMan.getRegionName(rid); + if (mdMan.getJsonConf().getCascadingRep()) { + final String msg = "Region=" + region + " in req id=" + reqId + + " is unknown, Ok since cascading " + + "replication is on"; + logger.info(lm(logPrefix(reqId) + msg)); + resp.postSuccResp(); + return; + } final String err = "Region=" + region + " in req id=" + reqId + " is unknown, please add to the config file " + "before creating it"; diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/service/ServiceMDMan.java b/kvmain/src/main/java/oracle/kv/impl/xregion/service/ServiceMDMan.java index 5d8c8aa7..59db79bf 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/service/ServiceMDMan.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/service/ServiceMDMan.java @@ -59,6 +59,7 @@ import oracle.kv.impl.api.table.FieldDefImpl; import oracle.kv.impl.api.table.TableAPIImpl; import oracle.kv.impl.api.table.TableImpl; +import oracle.kv.impl.pubsub.PublishingUnit; import oracle.kv.impl.systables.MRTableInitCkptDesc; import oracle.kv.impl.test.ExceptionTestHook; import oracle.kv.impl.test.ExceptionTestHookExecute; @@ -1694,14 +1695,38 @@ public Set

    getTablesResumeInit(String region) { * @return set of table checkpoints */ private Set readInitCkptWithRetry(String region) { + int attempt = 0; while (!closed) { try { - return readAllInitCkpt(region); + attempt++; + final Set ret = readAllInitCkpt(region); + final int finalAttempt = attempt; + logger.fine(() -> lm("Fetched all table init checkpoint for " + + "region=" + region + + ", attempts=" + finalAttempt)); + return ret; } catch (StoreIteratorException | FaultException exp) { - logger.info(lm("Rescan the table checkpoints, error=" + exp + + logger.info(lm("Rescan the table checkpoints" + + ", after sleep ms=" + POLL_INTERVAL_MS + + ", region=" + region + + ", attempt=" + attempt + + ", error=" + exp + ", cause=" + exp.getCause())); + try { + synchronized (this) { + wait(POLL_INTERVAL_MS); + } + } catch (InterruptedException e) { + logger.info(lm("Interrupted in sleeping in reading table" + + " init checkpoint for region=" + region + + ", attempts=" + attempt)); + break; + } } } + logger.info(lm("Return empty table init checkpoint for " + + "region=" + region + ", service md manager" + + " closed=" + closed + ", attempts=" + attempt)); return Collections.emptySet(); } @@ -2064,10 +2089,7 @@ public static Set getTbNames(Collection
    tables) { /* Get all tables in the hierarchy, including the top level table. */ public void getAllChildTables(Table topLevelTable, Set
    tables) { - tables.add(topLevelTable); - for (Table child : topLevelTable.getChildTables().values()) { - getAllChildTables(child, tables); - } + PublishingUnit.getAllChildTables(topLevelTable, tables); } /** @@ -2079,7 +2101,7 @@ public void getAllChildTables(Table topLevelTable, Set
    tables) { */ public TableInitCheckpoint readCkptAnyAgent(String table, String region) { - final Set allCkpt = readAllInitCkpt(region); + final Set allCkpt = readInitCkptWithRetry(region); final Set ckpt = allCkpt.stream().filter(t -> t.getTable().equals(table)) .collect(Collectors.toSet()); diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/service/StatsManager.java b/kvmain/src/main/java/oracle/kv/impl/xregion/service/StatsManager.java index 6ce7550e..54c28554 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/service/StatsManager.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/service/StatsManager.java @@ -37,16 +37,20 @@ import oracle.kv.impl.api.KVStoreImpl; import oracle.kv.impl.param.DurationParameter; import oracle.kv.impl.pubsub.NoSQLSubscriptionImpl; +import oracle.kv.impl.pubsub.PublishingUnit; +import oracle.kv.impl.pubsub.ReplicationStreamConsumer; import oracle.kv.impl.util.KVThreadFactory; import oracle.kv.impl.util.ScheduleStart; import oracle.kv.impl.util.server.LoggerUtils; import oracle.kv.impl.xregion.agent.BaseRegionAgentMetrics; import oracle.kv.impl.xregion.agent.BaseRegionAgentSubscriber; import oracle.kv.impl.xregion.agent.RegionAgentThread; +import oracle.kv.impl.xregion.agent.mrt.MRTSubscriber; import oracle.kv.impl.xregion.stat.JsonRegionStat; import oracle.kv.impl.xregion.stat.ReqRespStat; import oracle.kv.impl.xregion.stat.TableInitStat; import oracle.kv.pubsub.NoSQLSubscriberId; +import oracle.kv.pubsub.StreamPosition; import oracle.kv.table.Row; import oracle.kv.table.Table; import oracle.kv.table.TableAPI; @@ -230,7 +234,6 @@ public void shutdown() { "failed, error=" + ie)); } executor.shutdown(); - logger.fine(() -> lm("Stats report manager shuts down")); } @@ -282,6 +285,8 @@ public long collect() { /* build service aggregate stat */ collectServiceAgentStat(ams, now); + /* log memory stat */ + logMemStat(); return now; } @@ -351,6 +356,12 @@ private void collectServiceAgentStat(Set ams, sm.setDels(all.stream().mapToLong(MRTableMetrics::getDels).sum()); sm.setWinPuts(all.stream().mapToLong(MRTableMetrics::getWinPuts).sum()); sm.setWinDels(all.stream().mapToLong(MRTableMetrics::getWinDels).sum()); + sm.setLoopbackPuts(all.stream() + .mapToLong(MRTableMetrics::getLoopbackPuts) + .sum()); + sm.setLoopbackDels(all.stream() + .mapToLong(MRTableMetrics::getLoopbackDels) + .sum()); sm.setStreamBytes( all.stream().mapToLong(MRTableMetrics::getStreamBytes) .sum()); @@ -596,4 +607,109 @@ public void setSysTable(Table sys) { } sysTable = sys; } + + private void logMemStat() { + final StringBuilder sb = new StringBuilder("Memory stat:\n"); + sb.append(getHeapStat()).append("\n"); + for (RegionAgentThread ra : parent.getAllAgents()) { + sb.append(getRegionStat(ra)).append("\n"); + } + logger.info(lm(sb.toString())); + } + + private String getHeapStat() { + final long heapSize = Runtime.getRuntime().totalMemory(); + final long heapMaxSize = Runtime.getRuntime().maxMemory(); + final long heapFreeSize = Runtime.getRuntime().freeMemory(); + final float usage = (heapSize * 1.0f / heapMaxSize) * 100; + final int nproc = Runtime.getRuntime().availableProcessors(); + return "[JVM] #processors=" + nproc + + ", heap size=" + heapSize + + ", max heap="+ heapMaxSize + + ", free heap=" + heapFreeSize + + ", usage percent=" + usage; + } + + private String getRegionStat(RegionAgentThread ra) { + final String regionName = ra.getSourceRegion().getName(); + final StringBuilder sb = new StringBuilder("Region=" + regionName); + final MRTSubscriber sub = ra.getSubscriber(); + if (sub == null) { + return sb.append("No MR subscriber").toString(); + } + final NoSQLSubscriptionImpl stream = + (NoSQLSubscriptionImpl) sub.getSubscription(); + if (stream == null) { + return sb.append("Stream not created").toString(); + } + final PublishingUnit pu = stream.getParentPU(); + if (pu == null) { + return sb.append("Stream not initialized").toString(); + } + + /* output queue stat */ + final PublishingUnit.BoundedOutputQueue queue = pu.getOutputQueue(); + if (queue == null) { + return sb.append("Output queue not initialized").toString(); + } + sb.append("\n").append("\t"); + final long currSizeBytes = queue.getCurrSizeBytes(); + final long maxSizeBytes = queue.getMaxSizeBytes(); + final long queueOps = queue.getBoundedQueue().size(); + final float full = (currSizeBytes * 1.0f) / maxSizeBytes * 100; + sb.append("[OutputQueueStat] #queued ops=").append(queueOps) + .append(", curr size=").append(currSizeBytes) + .append(", max size=").append(maxSizeBytes) + .append(", fullness%=").append(full); + + /* OTB stat */ + sb.append("\n").append("\t"); + sb.append("[OTB] total size bytes=").append(getOTBSize(pu)) + .append(", #shards=").append(pu.getConsumers().size()); + + /* VLSN stat */ + sb.append("\n").append("\t"); + sb.append("[VLSN]").append(getVLSNStat(pu)); + return sb.toString(); + } + + private long getOTBSize(PublishingUnit pu) { + if (pu == null) { + return 0; + } + return pu.getConsumers().values().stream() + .mapToLong(rsc -> rsc.getTxnBuffer() + .computeSize()) + .sum(); + } + + private String getVLSNStat(PublishingUnit pu) { + if (pu == null) { + return "Stream not initialized"; + } + final MRTSubscriber sub = (MRTSubscriber) pu.getSubscriber(); + if (sub == null) { + return "No MR subscriber"; + } + final NoSQLSubscriptionImpl subscription = + (NoSQLSubscriptionImpl) sub.getSubscription(); + if (subscription == null) { + return "Stream not created"; + } + final StringBuilder ret = new StringBuilder(); + final StreamPosition curr = subscription.getCurrentPosition(); + for (ReplicationStreamConsumer rsc : pu.getConsumers().values()) { + final int gid = rsc.getRepGroupId().getGroupId(); + final long lastStreamVLSN = rsc.getRSCStat().getLastStreamedVLSN(); + final StreamPosition.ShardPosition sp = curr.getShardPosition(gid); + final long lastDeliveredVLSN = (sp == null ? 0 : sp.getVLSN()); + final long diff = lastStreamVLSN - lastDeliveredVLSN; + ret.append(" shard=").append(gid) + .append("[last streamed=").append(lastStreamVLSN) + .append(", last delivered=").append(lastDeliveredVLSN) + .append(", #buffered=").append(diff) + .append("]"); + } + return ret.toString(); + } } diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/service/XRegionService.java b/kvmain/src/main/java/oracle/kv/impl/xregion/service/XRegionService.java index 369ce04a..53b01b99 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/service/XRegionService.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/service/XRegionService.java @@ -995,7 +995,8 @@ private void createRegionAgent(RegionInfo source, new RegionAgentConfig.Builder(sid, type, mode, host, source, target, config) .setTables(ServiceMDMan.getTbNames(tables)) - .setSecurityConfig(source.getSecurity()); + .setSecurityConfig(source.getSecurity()) + .setLocalWritesOnly(!config.getCascadingRep()); final RegionAgentConfig conf = builder.build(); logger.info(lm("Agent (source region=" + source.getName() + ") configuration=" + conf)); diff --git a/kvmain/src/main/java/oracle/kv/impl/xregion/service/XRegionServiceMetrics.java b/kvmain/src/main/java/oracle/kv/impl/xregion/service/XRegionServiceMetrics.java index 519284c6..2bc62b2e 100644 --- a/kvmain/src/main/java/oracle/kv/impl/xregion/service/XRegionServiceMetrics.java +++ b/kvmain/src/main/java/oracle/kv/impl/xregion/service/XRegionServiceMetrics.java @@ -66,6 +66,14 @@ public class XRegionServiceMetrics extends JsonMetricsHeader * # remote deletes that win conflict resolution */ private volatile long winDels = 0; + /** + * # of looped back put operations + */ + private volatile long loopbackPuts = 0; + /** + * # of looped back delete operations + */ + private volatile long loopbackDels = 0; /** * # incompatible rows from remote */ @@ -109,10 +117,18 @@ public void setWinPuts(long val) { winPuts = val; } + public void setLoopbackPuts(long val) { + loopbackPuts = val; + } + public void setWinDels(long val) { winDels = val; } + public void setLoopbackDels(long val) { + loopbackDels = val; + } + public void setStreamBytes(long val) { streamBytes = val; } @@ -157,7 +173,14 @@ public long getDels() { public long getWinPuts() { return winPuts; } - + @Override + public long getLoopbackPuts() { + return loopbackPuts; + } + @Override + public long getLoopbackDels() { + return loopbackDels; + } @Override public long getWinDels() { return winDels; @@ -251,6 +274,8 @@ public boolean equals(Object obj) { dels == other.dels && winPuts == other.winPuts && winDels == other.winDels && + loopbackPuts == other.loopbackPuts && + loopbackDels == other.loopbackDels && regionStat.equals(other.regionStat); } @@ -266,6 +291,8 @@ public int hashCode() { Long.hashCode(dels) + Long.hashCode(winPuts) + Long.hashCode(winDels) + + Long.hashCode(loopbackPuts) + + Long.hashCode(loopbackDels) + regionStat.hashCode(); } } diff --git a/kvmain/src/main/java/oracle/kv/pubsub/NoSQLPublisher.java b/kvmain/src/main/java/oracle/kv/pubsub/NoSQLPublisher.java index 834861d9..ade84f47 100644 --- a/kvmain/src/main/java/oracle/kv/pubsub/NoSQLPublisher.java +++ b/kvmain/src/main/java/oracle/kv/pubsub/NoSQLPublisher.java @@ -378,10 +378,11 @@ public static String ensureDir(String parent, * * @throws PublisherFailureException if unable to create a publisher */ - static NoSQLPublisher get(NoSQLPublisherConfig config, - LoginCredentials loginCred, - boolean allowMultiPub, - Logger logger) throws PublisherFailureException { + public static NoSQLPublisher get(NoSQLPublisherConfig config, + LoginCredentials loginCred, + boolean allowMultiPub, + Logger logger) + throws PublisherFailureException { final String pubId = makePublisherId(config.getPublisherId(), config.getStoreName()); diff --git a/kvmain/src/main/java/oracle/kv/pubsub/NoSQLSubscription.java b/kvmain/src/main/java/oracle/kv/pubsub/NoSQLSubscription.java index c71263ac..d5210de5 100644 --- a/kvmain/src/main/java/oracle/kv/pubsub/NoSQLSubscription.java +++ b/kvmain/src/main/java/oracle/kv/pubsub/NoSQLSubscription.java @@ -192,6 +192,20 @@ public interface NoSQLSubscription extends Subscription { */ void subscribeTable(String tableName); + /** + * Adds a subscribed table to the running subscription, and specify if to + * stream transaction. This method is identical to + * {@link #subscribeTable(String)}, except that user can specify if the + * writes to the subscribed table should be streamed as transactions or not. + * @param tableName the name of the table to subscribe, which is either + * an non-prefixed name that specifies a table in the + * default namespace, or a name with the namespace + * prefix and a colon followed by the table name. + * @param streamTxn true if to stream transactions, false to stream write + * operations in {@link StreamOperation}. + */ + void subscribeTable(String tableName, boolean streamTxn); + /** * Removes a table from the set of subscribed tables for a running * subscription. The subscription will apply the change to every shard in diff --git a/kvmain/src/main/java/oracle/kv/pubsub/NoSQLSubscriptionConfig.java b/kvmain/src/main/java/oracle/kv/pubsub/NoSQLSubscriptionConfig.java index 0ceed64a..67a82e68 100644 --- a/kvmain/src/main/java/oracle/kv/pubsub/NoSQLSubscriptionConfig.java +++ b/kvmain/src/main/java/oracle/kv/pubsub/NoSQLSubscriptionConfig.java @@ -34,6 +34,36 @@ */ public class NoSQLSubscriptionConfig { + /** + * Delivery mode of subscribe tables + */ + public enum StreamDeliveryMode { + /** + * When a table is subscribed and configured with this delivery mode, + * every single write to the subscribed table will be delivered in + * a stream event {@link oracle.kv.pubsub.StreamOperation.PutEvent} or + * {@link oracle.kv.pubsub.StreamOperation.DeleteEvent}. This is the + * default delivery mode and does not require users to explicitly set + * it. + */ + SINGLE_WRITE_EVENT, + + /** + * When a table is subscribed and configured with this delivery mode, + * all writes events to the table and its child tables if any, + * including single key writes, will be grouped in their respective + * transactions of the write events. All writes in a transaction + * will be delivered as a single stream event + * {@link oracle.kv.pubsub.StreamOperation.TransactionEvent}. For + * a single key write, the group size will be 1. + *

    + * The table configured with this delivery mode must be a subscribed + * table in the subscription, and must be a top table, otherwise the + * subscription would fail with {@link SubscriptionFailureException}. + */ + GROUP_ALL_IN_TRANSACTION + } + /** * Default empty stream lifetime in seconds */ @@ -47,13 +77,19 @@ public class NoSQLSubscriptionConfig { * Default total memory size in megabytes of output queues for all shards */ public final static int DEFAULT_OUTPUT_QUEUE_SIZE_MB = 256; - + /*** + * Default to not include previous image in the stream events + */ + private final static boolean DEFAULT_INCLUDE_BEFORE_IMAGE = false; /** * Default flag to use external operations for elastic operations in * kvstore. */ private final static boolean DEFAULT_EXTERNAL_CKPT_ELASTICITY = false; - + /** + * Default flag to include aborted transaction in streaming transaction + */ + private final static boolean DEFAULT_STREAM_ABORT_TXN = false; /** * Default max connect. After discussion the Streams API should * favor service availability over stream consistence that would @@ -215,6 +251,24 @@ public class NoSQLSubscriptionConfig { */ private final boolean useExtCkptElasticity; + /** + * True if to include before row in the stream event, false otherwise. + */ + private final boolean includeBeforeImage; + + /** + * Subscribed tables for which the subscription would stream transaction + * instead of single write operation per streaming event. + */ + private final Set streamTxnTables; + + /** + * True to stream aborted transactions for tables in + * {@link #streamTxnTables}, false to ignore aborted transactions. + */ + private final boolean streamAbortTxn; + + private NoSQLSubscriptionConfig(Builder builder) { subscriberId = builder.subscriberId; @@ -232,6 +286,10 @@ private NoSQLSubscriptionConfig(Builder builder) { statReportIntvOps = builder.statReportIntvOps; outputQueueSzMB = builder.outputQueueSzMB; useExtCkptElasticity = builder.useExtCkptElasticity; + includeBeforeImage = builder.includeBeforeImage; + streamAbortTxn = builder.streamAbortTxn; + streamTxnTables = new HashSet<>(); + streamTxnTables.addAll(builder.streamTxnTables); /* * If FROM_(EXACT_)STREAM_POSITION, user must specify a stream @@ -394,7 +452,9 @@ public String toString() { streamMode.equals(NoSQLStreamMode.FROM_EXACT_CHECKPOINT)) { return "Subscription=" + subscriberId + " configured to stream from checkpoint with stream mode=" + - streamMode + ", subscribed tables=" + tableNames; + streamMode + ", subscribed tables=" + tableNames + + ", before image=" + includeBeforeImage + + ", local writes only=" + localWritesOnly; } if (streamMode.equals(NoSQLStreamMode.FROM_STREAM_POSITION) || @@ -402,12 +462,16 @@ public String toString() { return "Subscription=" + subscriberId + " configured to stream from position=" + initialPosition + " with stream mode=" + streamMode + ", subscribed " + - "tables=" + tableNames; + "tables=" + tableNames + + ", before image=" + includeBeforeImage + + ", local writes only=" + localWritesOnly; } return "Subscription=" + subscriberId + " configured to stream with stream mode=" + streamMode + - ", subscribed tables=" + tableNames; + ", subscribed tables=" + tableNames + + ", before image=" + includeBeforeImage + + ", local writes only=" + localWritesOnly; } /** @@ -666,6 +730,43 @@ public boolean getUseExtCkptForElasticity() { return useExtCkptElasticity; } + /** + * Returns true if the stream should include before image, false + * otherwise. + */ + public boolean getIncludeBeforeImage() { + return includeBeforeImage; + } + + /** + * @hidden + *

    Returns true to stream transaction per {@link StreamOperation}, + * false otherwise + */ + public boolean getStreamTxn(String table) { + return streamTxnTables.contains(table); + } + + /** + * @hidden + *

    Returns Set of subscribed tables to stream transaction per + * {@link StreamOperation}. + */ + public Set getStreamTxnTables() { + return streamTxnTables; + } + + /** + * @hidden + *

    + * Returns true to to include aborted transaction in stream, false to + * ignore aborted transactions. Only effective when subscription is + * configured to stream transactions. + */ + public boolean getStreamAbortTxn() { + return streamAbortTxn; + } + /** * Uses mapper to generate checkpoint table map * @param sid subscriber id @@ -760,6 +861,9 @@ public static class Builder { private long statReportIntvOps = DEFAULT_REPORT_INTV_OPS; private int outputQueueSzMB = DEFAULT_OUTPUT_QUEUE_SIZE_MB; private boolean useExtCkptElasticity = DEFAULT_EXTERNAL_CKPT_ELASTICITY; + private boolean includeBeforeImage = DEFAULT_INCLUDE_BEFORE_IMAGE; + private boolean streamAbortTxn = DEFAULT_STREAM_ABORT_TXN; + private final Set streamTxnTables = new HashSet<>(); /** * Makes a builder for NoSQLSubscriptionConfig with required @@ -1178,6 +1282,72 @@ public Builder setUseExternalCheckpointForElasticity() { return this; } + /** + * Sets if the subscription should include before image of subscribed + * tables in stream events represented by {@link StreamOperation}. By + * default, subscription does not include before image of subscribed + * tables in {@link StreamOperation}. The before image will be + * included in subscription only if 1) it is enabled in the + * corresponding subscribed table, and 2) the subscription is + * configured to include it in stream events. For any stream event, + * users can check if the before image is enabled by + * {@link StreamOperation#isBeforeImageEnabled()}, and check if + * subscription is configured to include before image by + * {@link StreamOperation#includeBeforeImage()}. + * @param includeBeforeImage true to include before image in + * subscription, false otherwise. + * @return this instance + */ + public Builder setIncludeBeforeImage(boolean includeBeforeImage) { + this.includeBeforeImage = includeBeforeImage; + return this; + } + + /** + * Sets stream delivery mode {@link StreamDeliveryMode} for given + * tables. The tables must be the subscribed tables by either + * {@link #setSubscribedTables(String...)} or + * {@link #setSubscribedTables(Set)}. + * @param mode stream delivery mode + * @param tables name of tables + */ + public Builder setStreamDeliveryMode(StreamDeliveryMode mode, + String... tables) { + + if (mode == StreamDeliveryMode.SINGLE_WRITE_EVENT) { + /* default mode, nothing to do */ + return this; + } + + if (mode == StreamDeliveryMode.GROUP_ALL_IN_TRANSACTION) { + for (String table : tables) { + if (subscribedTables != null && + !subscribedTables.contains(table)) { + throw new IllegalArgumentException( + "Table=" + table + " is subscribed to stream " + + "transactions but is not in the subscribed table " + + "list=" + subscribedTables); + } + streamTxnTables.add(table); + } + return this; + } + + throw new IllegalArgumentException("Unsupported stream delivery " + + "mode=" + mode); + } + + /** + * @hidden + * + * Sets to include aborted transactions in subscription for the + * subscribed tables configured to stream transactions. + */ + public Builder setIncludeAbortTxn() { + streamAbortTxn = true; + return this; + } + /** * Normalizes the table name from user by making it a qualified name. * If the name space is not set by user, the system default namespace diff --git a/kvmain/src/main/java/oracle/kv/pubsub/StreamOperation.java b/kvmain/src/main/java/oracle/kv/pubsub/StreamOperation.java index 5a9a16c2..1711aabc 100644 --- a/kvmain/src/main/java/oracle/kv/pubsub/StreamOperation.java +++ b/kvmain/src/main/java/oracle/kv/pubsub/StreamOperation.java @@ -13,10 +13,14 @@ package oracle.kv.pubsub; +import java.util.List; + import oracle.kv.impl.api.table.TableImpl; import oracle.kv.impl.topo.RepGroupId; import oracle.kv.table.PrimaryKey; import oracle.kv.table.Row; +import oracle.kv.table.Table; +import oracle.kv.txn.TransactionId; /** * The operation (Put, Delete) that was delivered over the NoSQL stream. @@ -96,6 +100,56 @@ public interface StreamOperation { */ String toJsonString(); + /** + * Returns true if the current stream is configured to include before + * image, false otherwise + * @return true if the current stream is configured to include before + * image, false otherwise + */ + boolean includeBeforeImage(); + + /** + * Returns true if before image is enabled for the write operation in + * this event {@link StreamOperation} to the subscribed table in kvstore. + * Returns false if otherwise. + * + * @return true if the before image is enabled, or false. + */ + boolean isBeforeImageEnabled(); + + /** + * Returns true if the before image is enabled for the write operation + * in this event {@link StreamOperation} but the before image has + * expired. If the before image is disabled for the table or the + * subscription is not configured to include before image, it returns + * false. For insert operations to the subscribed table, for which the + * before image is null, this method returns false. + * + * @return true if the before image is enabled and expired, or false. + */ + boolean isBeforeImageExpired(); + + /** + * Returns the before image associated with the stream event represented + * by {@link StreamOperation}, or null if + * 1. the stream is not configured to include before image, or + * 2. the before image is not enabled for the event, or + * 3. the before image is enabled while it does not exist for operations + * like insert. + * @return the before image associated with the stream event, or null. + */ + Row getBeforeImage(); + + /** + * @hidden + * + * Returns the table instance associated with the operation + * @return table instance + */ + default Table getTable() { + throw new UnsupportedOperationException(); + } + /** * The type of the operation. */ @@ -115,7 +169,12 @@ enum Type { * * An internally generated stream operation */ - INTERNAL + INTERNAL, + + /** + * A {@link TransactionEvent} operation + */ + TRANSACTION } /** @@ -141,6 +200,17 @@ enum Type { */ DeleteEvent asDelete(); + /** + * Converts this operation to a {@link TransactionEvent}. + * + * @return this operation as a Transaction + * @throws IllegalArgumentException if this operation is not a Transaction + */ + default TransactionEvent asTransaction() { + throw new IllegalArgumentException( + "This operation is not a transaction"); + } + /** * Used to signal a Put operation */ @@ -157,7 +227,7 @@ interface PutEvent extends StreamOperation { * @hidden * * Internal use only. - * + *

    * Returns the primary key associated with the put operation in bytes. *

    * The format of bytes should be defined in the @@ -169,7 +239,7 @@ interface PutEvent extends StreamOperation { * @hidden * * Internal use only. - * + *

    * Returns the Row associated with the put operation in bytes. *

    * The format of bytes should be defined in the @@ -181,7 +251,7 @@ interface PutEvent extends StreamOperation { * @hidden * * Internal use only - * + *

    * Returns an estimated storage size in bytes of the row in the put * operation */ @@ -201,7 +271,7 @@ interface DeleteEvent extends StreamOperation { * @hidden * * Internal use only. - * + *

    * Returns the primary key associated with the delete operation * in bytes. *

    @@ -214,13 +284,57 @@ interface DeleteEvent extends StreamOperation { * @hidden * * Internal use only - * + *

    * Returns an estimated storage size in bytes of the primary key in the * delete operation */ long getPrimaryKeySize(); } + /** + * Used to signal a Transaction operation + */ + interface TransactionEvent extends StreamOperation { + + /** + * Returns the transaction id + * @return transaction id + */ + TransactionId getTransactionId(); + + /** + * Returns the type of the transaction as {@link TransactionType} + * @return transaction type + */ + TransactionType getTransactionType(); + + /** + * Returns the number of write operations in the transaction + * @return the number of write operations + */ + long getNumOperations(); + + /** + * Returns an ordered list of write operations in the transaction. + * All write operations are on the same order they are performed in + * the source kvstore. + * @return ordered list of write operations. + */ + List getOperations(); + + /** + * Types of transaction + */ + enum TransactionType { + + /** committed transaction */ + COMMIT, + + /** aborted transaction */ + ABORT + } + } + /** * A SequenceId uniquely identifies a stream operation associated with a * Publisher. @@ -275,7 +389,12 @@ PutEvent getPutEvent(NoSQLSubscriberId subscriberId, byte[] value, SequenceId sequenceId, long lastModificationTime, - long expirationTime); + long expirationTime, + boolean includeBeforeImage, + boolean beforeImgEnabled, + byte[] valBeforeImg, + long tsBeforeImg, + long expBeforeImg); /** * Get a DeleteEvent from given key. @@ -288,6 +407,54 @@ DeleteEvent getDeleteEvent(NoSQLSubscriberId subscriberId, SequenceId sequenceId, long lastModificationTime, long expirationTime, - boolean exactTable); + boolean exactTable, + boolean includeBeforeImage, + boolean beforeImgEnabled, + byte[] valBeforeImg, + long tsBeforeImg, + long expBeforeImg); + } + + /** + * @hidden + * + * Returns true if following conditions are all met, false otherwise + * 1. before image is enabled for the table; + * 2. subscription configured to include before image; + * 3. before image exists for the given {@link StreamOperation}; + * 4. before image has already expired. + * + * @param beforeImgEnabled true if before image is enabled + * @param inclBeforeImage true if before image is included in subscription + * @param beforeImgExpMs before image expiration time, or 0 + * @param beforeImg before image, or null if not available + * @return true if the before image has expired, false otherwise + */ + static boolean isBeforeImageExpired(boolean beforeImgEnabled, + boolean inclBeforeImage, + long beforeImgExpMs, + Row beforeImg) { + if (!beforeImgEnabled) { + return false; + } + if (!inclBeforeImage) { + return false; + } + if (beforeImg == null) { + if (beforeImgExpMs != 0) { + throw new IllegalArgumentException( + "Null before image with a non-zero expiration time=" + + beforeImgExpMs); + } + /* insert op */ + return false; + } + + /* we have a non-null before image */ + if (beforeImgExpMs == 0) { + throw new IllegalArgumentException( + "Non-null before image without a valid expiration time"); + } + return System.currentTimeMillis() >= beforeImgExpMs; } } diff --git a/kvmain/src/main/java/oracle/kv/pubsub/StreamPosition.java b/kvmain/src/main/java/oracle/kv/pubsub/StreamPosition.java index 07975c0c..8c841f23 100644 --- a/kvmain/src/main/java/oracle/kv/pubsub/StreamPosition.java +++ b/kvmain/src/main/java/oracle/kv/pubsub/StreamPosition.java @@ -273,7 +273,7 @@ public int hashCode() { @Override public String toString() { - return "{" + storeName + "(id=" + storeId + "): " + + return "{" + storeName + "(id=" + storeId + "):" + shardPositionToString() +"}"; } diff --git a/kvmain/src/main/java/oracle/kv/query/ExecuteOptions.java b/kvmain/src/main/java/oracle/kv/query/ExecuteOptions.java index 5e29ca86..5022bcbb 100644 --- a/kvmain/src/main/java/oracle/kv/query/ExecuteOptions.java +++ b/kvmain/src/main/java/oracle/kv/query/ExecuteOptions.java @@ -27,6 +27,7 @@ import oracle.kv.impl.api.KVStoreImpl; import oracle.kv.impl.api.table.GeometryUtils; import oracle.kv.impl.api.table.Region; +import oracle.kv.impl.api.table.TableJsonUtils; import oracle.kv.impl.query.runtime.ResumeInfo.VirtualScan; import oracle.kv.impl.security.AuthContext; import oracle.kv.impl.util.contextlogger.LogContext; @@ -200,8 +201,21 @@ public class ExecuteOptions { */ private boolean allowCRDT = false; + /* + * Optionally set to indicate that the query is simple. This is only used + * by the ReceiverIterator to determine processing of queries against + * a set of shards. It is mostly for use by the proxy and SDKs but + * will work for internal callers as well. + */ + private boolean isSimpleQuery = false; + private boolean inTestMode; + /* + * Optionally set rowMetadata to be used for INSERT and UPDATE operations. + */ + private String rowMetadata; + public ExecuteOptions() {} /** @@ -253,6 +267,58 @@ public Durability getDurability() { return durability; } + /** + * This method is **EXPERIMENTAL** and its behavior, signature, or + * even its existence may change without prior notice in future versions. + * Use with caution.

    + * + * Sets the row metadata to use for the operation. This setting only applies + * if the query modifies a row using an INSERT, UPDATE, UPSERT statement. + * If the query is read-only it is ignored. + * This is an optional parameter.

    + * + * Row metadata is associated to a certain version of a row. Any subsequent + * write operation will use its own row metadata value. If not specified + * null will be used by default. + * NOTE that if you have previously written a record with metadata and a + * subsequent write does not supply metadata, the metadata associated with + * the row will be null. Therefore, if you wish to have metadata associated + * with every write operation, you must supply a valid JSON construct to + * this method.

    + * + * @param rowMetadata the row metadata, must be null or in a valid JSON + * construct: object, array, string, number, true, false or null, + * otherwise an IllegalArgumentException is thrown. + * @throws IllegalArgumentException if rowMetadata not null and invalid + * JSON Object format + * + * @return this + */ + public ExecuteOptions setRowMetadata(String rowMetadata) { + if (rowMetadata == null) { + this.rowMetadata = null; + return this; + } + + TableJsonUtils.validateJsonConstruct(rowMetadata); + this.rowMetadata = rowMetadata; + return this; + } + + /** + * This method is **EXPERIMENTAL** and its behavior, signature, or + * even its existence may change without prior notice in future versions. + * Use with caution.

    + * + * Returns the row metadata value set for this request, or null if not set. + * + * @return the row metadata value + */ + public String getRowMetadata() { + return rowMetadata; + } + + /** * The {@code timeout} parameter is an upper bound on the time interval for * processing one of the KVStore.execute(...) methods @@ -1072,6 +1138,25 @@ public boolean allowCRDT() { return allowCRDT; } + /** + * Returns whether or not the query is "simple" which means it does not + * do sorting or aggregation + * @hidden + */ + public boolean getIsSimpleQuery() { + return isSimpleQuery; + } + + /** + * Sets whether the query is "simple" which means that it does not do + * sorting or aggregation + * @hidden + */ + public ExecuteOptions setIsSimpleQuery(boolean isSimple) { + this.isSimpleQuery = isSimple; + return this; + } + /** * @hidden */ diff --git a/kvmain/src/main/java/oracle/kv/shell/ExecuteCommand.java b/kvmain/src/main/java/oracle/kv/shell/ExecuteCommand.java index 6c13cd34..b3a26aaa 100644 --- a/kvmain/src/main/java/oracle/kv/shell/ExecuteCommand.java +++ b/kvmain/src/main/java/oracle/kv/shell/ExecuteCommand.java @@ -86,8 +86,7 @@ public String execute(String[] args, Shell shell) exec.executeDdl(statement.toCharArray(), cmd.getNamespace(), null, /* ExecuteOptions */ - null, /* TableLimits */ - cmd.getLoginManager())); + null /* TableLimits */)); return displayResults(cmd, sr); } catch (RemoteException e) { throw new FaultException(e.getMessage(), e, false); diff --git a/kvmain/src/main/java/oracle/kv/shell/GetCommand.java b/kvmain/src/main/java/oracle/kv/shell/GetCommand.java index 227167d4..8c0401ea 100644 --- a/kvmain/src/main/java/oracle/kv/shell/GetCommand.java +++ b/kvmain/src/main/java/oracle/kv/shell/GetCommand.java @@ -367,7 +367,9 @@ record += CommandUtils.createURI(kvv.getKey()) + eol; if (value.getFormat() == Value.Format.NONE || value.getFormat() == Value.Format.TABLE || value.getFormat() == Value.Format.TABLE_V1 || - value.getFormat() == Value.Format.MULTI_REGION_TABLE) { + value.getFormat() == Value.Format.MULTI_REGION_TABLE || + value.getFormat() == Value.Format.TABLE_V5 + ) { record += printableString(value.getValue()); } else { // report unsupported error diff --git a/kvmain/src/main/java/oracle/kv/stats/ServiceAgentMetrics.java b/kvmain/src/main/java/oracle/kv/stats/ServiceAgentMetrics.java index 0de1c652..51424723 100644 --- a/kvmain/src/main/java/oracle/kv/stats/ServiceAgentMetrics.java +++ b/kvmain/src/main/java/oracle/kv/stats/ServiceAgentMetrics.java @@ -84,6 +84,22 @@ public interface ServiceAgentMetrics { */ long getWinDels(); + /** + * Returns the total number of looped back put operations originated at + * the local region. + * @return the total number of looped back put operations originated at + * the local region. + */ + long getLoopbackPuts(); + + /** + * Returns the total number of looped back delete operations originated at + * the local region. + * @return the total number of looped back delete operations originated at + * the local region. + */ + long getLoopbackDels(); + /** * Returns the total bytes received from source * diff --git a/kvmain/src/main/java/oracle/kv/table/FieldValueFactory.java b/kvmain/src/main/java/oracle/kv/table/FieldValueFactory.java index 6f94e9ca..c0120173 100644 --- a/kvmain/src/main/java/oracle/kv/table/FieldValueFactory.java +++ b/kvmain/src/main/java/oracle/kv/table/FieldValueFactory.java @@ -291,7 +291,7 @@ public static FieldValue createValueFromJson( * schema. Such fields will remain unset in the record value. * If type is BINARY then the value must be a base64 encoded value. * - * Note: This methods doesn't handle arbitrary JSON, it has to comply to + * Note: This method doesn't handle arbitrary JSON, it has to comply to * the given type. Also, top level null is not supported. * * @param type the type definition of the instance. diff --git a/kvmain/src/main/java/oracle/kv/table/Row.java b/kvmain/src/main/java/oracle/kv/table/Row.java index 846aaa1a..645fffa7 100644 --- a/kvmain/src/main/java/oracle/kv/table/Row.java +++ b/kvmain/src/main/java/oracle/kv/table/Row.java @@ -128,13 +128,31 @@ public interface Row extends RecordValue { * or table iterator (e.g. {@link TableAPI#tableIterator}) call. It will * also be set after a successful put of the row (e.g. {@link TableAPI#put}. * - * @return the expiration time in milliseconds since January 1, 1970, + * @return the expiration time in milliseconds since January 1, 1970 GMT, * or zero if the record never expires * * @since 4.0 */ public long getExpirationTime(); + /** + * Returns the creation time of the row or zero if it is not + * available. If the row was written by a version of the + * system older than 25.3 the creation time will be equal to the + * modification time, if it was written by a system older than 19.5 it will + * be zero. + *

    + * The creation time is set automatically by the server when the row is + * first successfully created. + * + * @return the creation time in milliseconds since the epoch January 1st, + * 1970 GMT or 0 if not available. + * + * @since 25.3 + * @hidden + */ + public long getCreationTime(); + /** * Returns the last modification time of the row or zero if it is not * available or not yet set. If the row was written by a version of the @@ -144,12 +162,50 @@ public interface Row extends RecordValue { * The modification time is set if this row was returned by a * get (e.g. {@link TableAPI#get}) or table iterator * (e.g. {@link TableAPI#tableIterator}) call. It will also be valid after - * a successful put of the row (e.g. {@link TableAPI#put}. + * a successful put of the row (e.g. {@link TableAPI#put}). * - * @return the last update time in milliseconds since January 1, 1970 or - * 0 if not available. + * @return the last update time in milliseconds since January 1st, 1970 GMT + * or 0 if not available. * * @since 22.1 */ public long getLastModificationTime(); + + /** + * This method is **EXPERIMENTAL** and its behavior, signature, or + * even its existence may change without prior notice in future versions. + * Use with caution.

    + * + * Sets row metadata associated with the row (for insert or update + * operations) or primary key (for delete operations). Row metadata is + * associated to a certain version of a row. Any subsequent write operation + * will use its own row metadata value. If not specified null will be used + * by default. + * NOTE that if you have previously written a record with metadata and a + * subsequent write does not supply metadata, the metadata associated with + * the row will be null. Therefore, if you wish to have metadata + * associated with every write operation, you must supply a valid JSON + * construct to this method.

    + * + * @param rowMetadata the row metadata, must be null or a valid JSON + * construct: object, array, string, number, true, false or null, + * otherwise an IllegalArgumentException is thrown. + * @throws IllegalArgumentException if rowMetadata not null and invalid + * JSON Object format + * @since 25.3 + */ + public void setRowMetadata(String rowMetadata); + + /** + * This method is **EXPERIMENTAL** and its behavior, signature, or + * even its existence may change without prior notice in future versions. + * Use with caution.

    + * + * Returns the metadata associated with the row. + * + * @return the metadata, or null if not set + * + * @since 25.3 + */ + public String getRowMetadata(); } diff --git a/kvmain/src/main/java/oracle/kv/table/TableAPI.java b/kvmain/src/main/java/oracle/kv/table/TableAPI.java index a3442566..16cca384 100644 --- a/kvmain/src/main/java/oracle/kv/table/TableAPI.java +++ b/kvmain/src/main/java/oracle/kv/table/TableAPI.java @@ -1214,7 +1214,8 @@ Publisher tableKeysIteratorAsync( * @return the version of the new row value * * @throws IllegalArgumentException if the row does not have a complete - * primary key or is otherwise invalid + * primary key or is otherwise invalid, or if + * {@link Row#setRowMetadata(String)} is not a valid JSON construct. * * @see Write exceptions */ @@ -1235,7 +1236,8 @@ Version put(Row row, * *

      *
    • {@link IllegalArgumentException} - if the row does not have a - * complete primary key or is otherwise invalid + * complete primary key or is otherwise invalid, or if + * {@link Row#setRowMetadata(String)} is not a valid JSON construct. * *
    • {@link FaultException} - for one of the standard write exceptions @@ -1318,7 +1320,8 @@ CompletableFuture putAsync(Row row, * value is present and the put is unsuccessful * * @throws IllegalArgumentException if the row does not have a complete - * primary key or is otherwise invalid + * primary key or is otherwise invalid, or if + * {@link Row#setRowMetadata(String)} is not a valid JSON construct. * * @see Write exceptions */ @@ -1340,7 +1343,8 @@ Version putIfAbsent(Row row, * *
        *
      • {@link IllegalArgumentException} - if the row does not have a - * complete primary key or is otherwise invalid + * complete primary key or is otherwise invalid, or if + * {@link Row#setRowMetadata(String)} is not a valid JSON construct. * *
      • {@link FaultException} - for one of the standard write exceptions @@ -1424,7 +1428,8 @@ CompletableFuture putIfAbsentAsync(Row row, * existing row and the put is unsuccessful * * @throws IllegalArgumentException if the {@code Row} does not have - * a complete primary key or is otherwise invalid + * a complete primary key or is otherwise invalid, or if + * {@link Row#setRowMetadata(String)} is not a valid JSON construct. * * @see Write exceptions */ @@ -1445,7 +1450,8 @@ Version putIfPresent(Row row, * *
          *
        • {@link IllegalArgumentException} - if the {@code Row} does not have - * a complete primary key or is otherwise invalid + * a complete primary key or is otherwise invalid, or if + * {@link Row#setRowMetadata(String)} is not a valid JSON construct. * *
        • {@link FaultException} - for one of the standard write exceptions @@ -1533,7 +1539,8 @@ CompletableFuture putIfPresentAsync(Row row, * not match and the put is unsuccessful * * @throws IllegalArgumentException if the {@code Row} does not have - * a complete primary key or is otherwise invalid + * a complete primary key or is otherwise invalid, or if + * {@link Row#setRowMetadata(String)} is not a valid JSON construct. * * @see Write exceptions */ @@ -1557,7 +1564,8 @@ Version putIfVersion(Row row, * *
            *
          • {@link IllegalArgumentException} - if the {@code Row} does not have - * a complete primary key or is otherwise invalid + * a complete primary key or is otherwise invalid, or if + * {@link Row#setRowMetadata(String)} is not a valid JSON construct. * *
          • {@link FaultException} - for one of the standard write exceptions @@ -1625,7 +1633,8 @@ CompletableFuture putIfVersionAsync(Row row, * @return {@code true} if the row existed and was deleted, and {@code * false} otherwise * - * @throws IllegalArgumentException if the primary key is not complete + * @throws IllegalArgumentException if the primary key is not complete or if + * {@link PrimaryKey#setRowMetadata(String)} is not a valid JSON construct. * * @see Write exceptions */ @@ -1645,7 +1654,8 @@ boolean delete(PrimaryKey key, * *
              *
            • {@link IllegalArgumentException} - if the primary key is not - * complete + * complete or if {@link PrimaryKey#setRowMetadata(String)} is not a valid + * JSON construct. * *
            • {@link FaultException} - for one of the standard write exceptions @@ -1698,7 +1708,8 @@ CompletableFuture deleteAsync(PrimaryKey key, * @return {@code true} if the row existed, and its version matched {@code * matchVersion} and was successfully deleted, and {@code false} otherwise * - * @throws IllegalArgumentException if the primary key is not complete + * @throws IllegalArgumentException if the primary key is not complete or if + * {@link PrimaryKey#setRowMetadata(String)} is not a valid JSON construct. * * @see Write exceptions */ @@ -1721,7 +1732,8 @@ boolean deleteIfVersion(PrimaryKey key, * *
                *
              • {@link IllegalArgumentException} - if the primary key is not - * complete + * complete or if {@link PrimaryKey#setRowMetadata(String)} is not a valid + * JSON construct. * *
              • {@link FaultException} - for one of the standard write exceptions @@ -1758,7 +1770,7 @@ CompletableFuture deleteIfVersionAsync(PrimaryKey key, /** * Deletes multiple rows from a table in an atomic operation. The - * key used may be partial but must contain all of the fields that are + * key used may be partial but must contain all the fields that are * in the shard key. * * @param key the primary key for the row to delete @@ -1775,7 +1787,8 @@ CompletableFuture deleteIfVersionAsync(PrimaryKey key, * @return the number of rows deleted from the table * * @throws IllegalArgumentException if the primary key is malformed or does - * not contain all shard key fields + * not contain all shard key fields or if + * {@link PrimaryKey#setRowMetadata(String)} is not a valid JSON construct. * * @see Write exceptions */ @@ -1786,7 +1799,7 @@ int multiDelete(PrimaryKey key, /** * Deletes multiple rows from a table in an atomic operation, returning a * future to manage the asynchronous operation. The key used may be partial - * but must contain all of the fields that are in the shard key. + * but must contain all the fields that are in the shard key. * *

                The result supplied to the future is the number of rows deleted from * the table. @@ -1796,7 +1809,8 @@ int multiDelete(PrimaryKey key, * *

                  *
                • {@link IllegalArgumentException} - if the primary key is malformed - * or does not contain all shard key fields + * or does not contain all shard key fields or if + * {@link PrimaryKey#setRowMetadata(String)} is not a valid JSON construct. * *
                • {@link FaultException} - for one of the standard write exceptions @@ -1828,7 +1842,7 @@ CompletableFuture multiDeleteAsync(PrimaryKey key, /** * Returns a {@code TableOperationFactory} to create operations passed - * to {@link #execute}. Not all operations must use the same table but + * to {@link #execute}. Not all operations must use the same table, but * they must all use the same shard portion of the primary key. * * @return an empty {@code TableOperationFactory} diff --git a/kvmain/src/main/java/oracle/kv/table/WriteOptions.java b/kvmain/src/main/java/oracle/kv/table/WriteOptions.java index 9a981634..71e9dbf7 100644 --- a/kvmain/src/main/java/oracle/kv/table/WriteOptions.java +++ b/kvmain/src/main/java/oracle/kv/table/WriteOptions.java @@ -382,13 +382,13 @@ public int getRegionId() { return regionId; } - /** - * Returns whether put tombstone instead of deleting, used for delete - * operation on external multi-region table. - * - * @hidden - */ - public boolean doTombstone() { + /** + * Returns whether put tombstone instead of deleting, used for delete + * operation on external multi-region table. + * + * @hidden + */ + public boolean doTombstone() { return doTombstone; } } diff --git a/kvmain/src/main/java/oracle/kv/txn/TransactionId.java b/kvmain/src/main/java/oracle/kv/txn/TransactionId.java new file mode 100644 index 00000000..2f56126f --- /dev/null +++ b/kvmain/src/main/java/oracle/kv/txn/TransactionId.java @@ -0,0 +1,37 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.kv.txn; + +/** + * Interface that represents a transaction id. The transaction id is a + * store-wide unique id associated with a particular transaction. + */ +public interface TransactionId { + + /** + * @return the id of shard where the transaction is performed + */ + int getShardId(); + + /** + * @return the transaction id generated by internal JE env + */ + long getShardTxnId(); + + /** + * @return the timestamp of the transaction, which is from the entry of + * commit (abort) for committed (aborted) transaction. + */ + long getTimestamp(); +} diff --git a/kvmain/src/main/java/oracle/kv/txn/TransactionIdImpl.java b/kvmain/src/main/java/oracle/kv/txn/TransactionIdImpl.java new file mode 100644 index 00000000..e73b54e7 --- /dev/null +++ b/kvmain/src/main/java/oracle/kv/txn/TransactionIdImpl.java @@ -0,0 +1,86 @@ +/*- + * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + * This file was distributed by Oracle as part of a version of Oracle NoSQL + * Database made available at: + * + * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html + * + * Please see the LICENSE file included in the top-level directory of the + * appropriate version of Oracle NoSQL Database for a copy of the license and + * additional information. + */ + +package oracle.kv.txn; + +import java.util.UUID; + +import oracle.kv.impl.util.FormatUtils; + +/** + * Object that represents a transaction id. The transaction id is a unique + * id associated with a particular transaction. + */ +public class TransactionIdImpl implements TransactionId { + + /** + * Id of shard where the transaction is performed + */ + private final int shardId; + /** + * Transaction id generated by internal JE env + */ + private final long txnId; + /** + * Timestamp of commit (abort) for committed(aborted) transaction. + */ + private final long timestamp; + /** + * Optional UUID + */ + private final UUID uuid; + + public TransactionIdImpl(int shardId, + long txnId, + long timestamp) { + this.shardId = shardId; + this.txnId = txnId; + this.timestamp = timestamp; + uuid = UUID.randomUUID(); + } + + /** + * Returns id of shard where the transaction is performed + * @return shard id + */ + @Override + public int getShardId() { + return shardId; + } + + /** + * Returns the internal transaction id generated by JE environment. The + * id is shard-wide unique and + * @return the internal transaction id + */ + @Override + public long getShardTxnId() { + return txnId; + } + + @Override + public long getTimestamp() { + return timestamp; + } + + public UUID getUuid() { + return uuid; + } + + @Override + public String toString() { + return "Txn[shard=" + shardId + ", id=" + txnId + + ", timestamp=" + FormatUtils.formatPerfTime(timestamp) + + " (" + timestamp + ")]"; + } +} diff --git a/kvmain/src/main/java/oracle/kv/util/shell/Shell.java b/kvmain/src/main/java/oracle/kv/util/shell/Shell.java index 123bcc6b..03c19f53 100644 --- a/kvmain/src/main/java/oracle/kv/util/shell/Shell.java +++ b/kvmain/src/main/java/oracle/kv/util/shell/Shell.java @@ -768,7 +768,6 @@ public void runLine(String line) private void runLine(String line, boolean checkQuotesMatch) throws ShellException { - exitCode = EXIT_OK; if (line.length() > 0 && !isComment(line)) { String[] splitArgs; try { @@ -823,7 +822,13 @@ protected String run(String commandName, String[] args, String line) cmdArgs = checkCommonFlags(cmdArgs); try { final String result = command.execute(cmdArgs, this, line); - exitCode = command.getExitCode(); + /* + * Do not overwrite exitCode in case of exit command, so that + * the exitCode of previous command is retained. + */ + if (!(command instanceof ExitCommand)) { + exitCode = command.getExitCode(); + } return result; } catch (CommandNotFoundException cnfe) { /* diff --git a/kvstore/pom.xml b/kvstore/pom.xml index 52423f27..124bc4d5 100644 --- a/kvstore/pom.xml +++ b/kvstore/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kv - 25.1.13 + 25.3.21 kvstore diff --git a/kvtest/kvclient-IT/pom.xml b/kvtest/kvclient-IT/pom.xml index c617f387..2fa37120 100644 --- a/kvtest/kvclient-IT/pom.xml +++ b/kvtest/kvclient-IT/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kvtest - 25.1.13 + 25.3.21 kvclient-IT diff --git a/kvtest/kvclient-IT/src/main/java/oracle/kv/ValueTest.java b/kvtest/kvclient-IT/src/main/java/oracle/kv/ValueTest.java index 10170810..12e7fedd 100644 --- a/kvtest/kvclient-IT/src/main/java/oracle/kv/ValueTest.java +++ b/kvtest/kvclient-IT/src/main/java/oracle/kv/ValueTest.java @@ -12,6 +12,7 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -19,6 +20,7 @@ import java.io.DataOutputStream; import java.io.IOException; +import oracle.kv.impl.api.table.Region; import org.junit.Test; import oracle.kv.impl.util.SerialVersion; @@ -28,6 +30,8 @@ */ public class ValueTest extends TestBase { + private static String RMD = "{\"rm\":1}"; + @Override public void setUp() throws Exception { @@ -50,25 +54,106 @@ public void testSerialization() doSerializationTest(Value.Format.NONE, new byte[] { 1 }); doSerializationTest(Value.Format.NONE, new byte[] { 1, 2 }); doSerializationTest(Value.Format.NONE, new byte[] { 1, 2, 3 }); + + /* Format.AVR, deprecated not symmetrically serializable */ + + /* With Format.TABLE, any byte array is allowed. */ + doSerializationTest(Value.Format.TABLE, new byte[0]); + doSerializationTest(Value.Format.TABLE, new byte[] { 1 }); + doSerializationTest(Value.Format.TABLE, new byte[] { 1, 2 }); + doSerializationTest(Value.Format.TABLE, new byte[] { 1, 2, 3 }); + + /* With Format.TABLE_V1, any byte array is allowed. */ + doSerializationTest(Value.Format.TABLE_V1, new byte[0]); + doSerializationTest(Value.Format.TABLE_V1, new byte[] { 1 }); + doSerializationTest(Value.Format.TABLE_V1, new byte[] { 1, 2 }); + doSerializationTest(Value.Format.TABLE_V1, new byte[] { 1, 2, 3 }); + + /* With Format.MULTI_REGION_TABLE, any byte array is allowed. */ + doSerializationTest(Value.Format.MULTI_REGION_TABLE, new byte[0], Region.REGION_ID_START); + doSerializationTest(Value.Format.MULTI_REGION_TABLE, new byte[] { 1 }, Region.LOCAL_REGION_ID); + doSerializationTest(Value.Format.MULTI_REGION_TABLE, new byte[] { 1, 2 }, Region.LOCAL_REGION_ID); + doSerializationTest(Value.Format.MULTI_REGION_TABLE, new byte[] { 1, 2, 3 }, Region.REGION_ID_START); + + /* With Format.TABLE_V5, any byte array is allowed. */ + // with present regionId and metadata string + doSerializationTest(Value.Format.TABLE_V5, new byte[0], Region.LOCAL_REGION_ID, RMD); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1 }, Region.LOCAL_REGION_ID, RMD); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1, 2 }, Region.REGION_ID_START, RMD); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1, 2, 3 }, Region.LOCAL_REGION_ID, RMD); + // without regionId but with metadata string + doSerializationTest(Value.Format.TABLE_V5, new byte[0], Region.NULL_REGION_ID, RMD); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1 }, Region.NULL_REGION_ID, RMD); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1, 2 }, Region.NULL_REGION_ID, RMD); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1, 2, 3 }, Region.NULL_REGION_ID, RMD); + // with regionId but without metadata + doSerializationTest(Value.Format.TABLE_V5, new byte[0], Region.LOCAL_REGION_ID, null); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1 }, Region.REGION_ID_START, null); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1, 2 }, Region.LOCAL_REGION_ID, null); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1, 2, 3 }, 3, null); + // without either regionId and without metadata + doSerializationTest(Value.Format.TABLE_V5, new byte[0], Region.NULL_REGION_ID, null); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1 }, Region.NULL_REGION_ID, null); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1, 2 }, Region.NULL_REGION_ID, null); + doSerializationTest(Value.Format.TABLE_V5, new byte[] { 1, 2, 3 }, Region.NULL_REGION_ID, null); } private void doSerializationTest(Value.Format format, final byte[] val) throws IOException { + doSerializationTest(format, val, Region.NULL_REGION_ID, null); + } + + private void doSerializationTest(Value.Format format, final byte[] val, int regionId) + throws IOException { + doSerializationTest(format, val, regionId, null); + } + + @SuppressWarnings("deprecation") + private void doSerializationTest(Value.Format format, final byte[] val, + int regionId, String metadata) + throws IOException { /* Serialize with toByteArray. */ final Value v1; final int extraLen; if (format == Value.Format.NONE) { v1 = Value.createValue(val); - extraLen = 1; + extraLen = 1 /* format */; + } else if (format == Value.Format.AVRO) { + v1 = Value.internalCreateValue(val, format, Region.NULL_REGION_ID, null); + extraLen = 0 /* AVRO includes format */; + } else if (format == Value.Format.TABLE || + format == Value.Format.TABLE_V1) { + v1 = Value.internalCreateValue(val, format, Region.NULL_REGION_ID, null); + extraLen = 1 /* format */ ; + } else if (format == Value.Format.MULTI_REGION_TABLE) { + v1 = Value.internalCreateValue(val, format, regionId); + extraLen = 1 /* format*/ + 1 /* regionId small int */; + } else if (format == Value.Format.TABLE_V5) { + v1 = Value.internalCreateValue(val, format, regionId, metadata); + extraLen = 1 /* format */ + 1 /* bitset */ + + (regionId == Region.NULL_REGION_ID ? 0 : 1 /* regionId small int */) + + (metadata != null ? 1 /* metadata strLength */ + metadata.length() /* metadata str */ : 0) ; } else { - v1 = Value.internalCreateValue(val, format); - extraLen = 0; + throw new IllegalArgumentException("Unknown format: " + format); } + + assertSame(format, v1.getFormat()); assertArrayEquals(val, v1.getValue()); final byte[] bytes = v1.toByteArray(); assertEquals(val.length + extraLen, bytes.length); + assertEquals(regionId != Region.NULL_REGION_ID, Value.hasRegionId(bytes)); + assertEquals(metadata != null, Value.hasRowMetadata(bytes)); + assertEquals(regionId, Value.getRegionIdFromByteArray(bytes)); + int offset = + format == Value.Format.MULTI_REGION_TABLE ? 2 : + format == Value.Format.TABLE_V5 ? 2 + + (regionId == Region.NULL_REGION_ID ? 0 : 1 ) + + (metadata == null ? 0 : 1 + metadata.length()) : + 1 + ; + assertEquals(offset, Value.getValueOffset(bytes)); /* Deserialize with fromByteArray. */ final Value v2 = Value.fromByteArray(bytes); @@ -76,6 +161,16 @@ private void doSerializationTest(Value.Format format, final byte[] val) assertEquals(v1.hashCode(), v2.hashCode()); assertArrayEquals(val, v2.getValue()); assertSame(format, v2.getFormat()); + assertTrue(v2.getFormat() != Value.Format.MULTI_REGION_TABLE || + v2.getRegionId() == regionId); + assertTrue(v2.getFormat() != Value.Format.TABLE_V5 || + v2.getRegionId() == regionId && + (metadata == null && v2.getRowMetadata() == null || + metadata != null && metadata.equals(v2.getRowMetadata()))); + assertTrue(v2.getRegionId() == Region.NULL_REGION_ID || + v2.getRegionId() == regionId); + assertTrue( (metadata == null && v2.getRowMetadata() == null) || + (metadata != null && metadata.equals(v2.getRowMetadata()))); /* Serialize with writeFastExternal. */ ByteArrayOutputStream baos = new ByteArrayOutputStream(50); @@ -102,6 +197,19 @@ private void doSerializationTest(Value.Format format, final byte[] val) assertEquals(v1.hashCode(), v3.hashCode()); assertArrayEquals(val, v3.getValue()); assertSame(format, v3.getFormat()); + assertTrue(v3.getFormat() != Value.Format.MULTI_REGION_TABLE || + v3.getRegionId() == regionId); + assertTrue(v3.getFormat() != Value.Format.TABLE_V5 || + v3.getRegionId() == regionId && + (metadata == null && v3.getRowMetadata() == null || + metadata != null && metadata.equals(v3.getRowMetadata()))); + + assertTrue(v3.getRegionId() == Region.NULL_REGION_ID || + v3.getRegionId() == regionId); + assertEquals(metadata, v3.getRowMetadata()); + assertEquals(regionId != Region.NULL_REGION_ID, Value.hasRegionId(v3.toByteArray())); + assertEquals(metadata != null, Value.hasRowMetadata(v3.toByteArray())); + assertEquals(regionId, Value.getRegionIdFromByteArray(v3.toByteArray())); /* Deserialize with readFastExternal. */ bais = new ByteArrayInputStream(v3Serial); @@ -110,6 +218,9 @@ private void doSerializationTest(Value.Format format, final byte[] val) assertEquals("Expected EOF after reading serialized object", -1, dis.read()); assertArrayEquals(bytes, v3Bytes); + assertEquals(regionId != Region.NULL_REGION_ID, Value.hasRegionId(v3Bytes)); + assertEquals(metadata != null, Value.hasRowMetadata(v3Bytes)); + assertEquals(regionId, Value.getRegionIdFromByteArray(v3Bytes)); } @Test @@ -134,6 +245,55 @@ public void testSerialVersion() { serialVersionChecker( /* Format: MULTI_REGION_TABLE */ Value.fromByteArray(new byte[] { 3, 90, 100 }), - SerialVersion.MINIMUM, 0xcd1b552a8341734aL)); + SerialVersion.MINIMUM, 0xcd1b552a8341734aL), + serialVersionChecker( + /* Format: TABLE_V5 */ + Value.fromByteArray(new byte[] {4, 3, 1, 8, 123, 34, 114, 109, 34, 58, 49, 125, 1, 2, 3}), + SerialVersion.ROW_METADATA_VERSION,0xe7a099d979a1edcaL), + serialVersionChecker( + /* Format: TABLE_V5 */ + Value.fromByteArray(new byte[] {4, 2, 8, 123, 34, 114, 109, 34, 58, 49, 125, 1, 2, 3}), + SerialVersion.ROW_METADATA_VERSION,0xd4155254ce137292L) + ); + } + + @Test + public void testValueOffset() { + Value value = Value.internalCreateValue(new byte[] {1,2,3}, Value.Format.TABLE_V5, Region.LOCAL_REGION_ID, RMD); + + byte[] serialized = value.toByteArray(); + int offset = Value.getValueOffset(serialized); + assertTrue(offset >= 0); + assertEquals(serialized.length - 3, offset); + + value = Value.internalCreateValue(new byte[] {1,2,3}, Value.Format.TABLE_V5, Region.LOCAL_REGION_ID, null); + serialized = value.toByteArray(); + offset = Value.getValueOffset(serialized); + assertTrue(offset >= 0); + assertEquals(serialized.length - 3, offset); + + value = Value.internalCreateValue(new byte[] {1,2,3}, Value.Format.TABLE_V5, Region.NULL_REGION_ID, RMD); + serialized = value.toByteArray(); + offset = Value.getValueOffset(serialized); + assertTrue(offset >= 0); + assertEquals(serialized.length - 3, offset); + + value = Value.internalCreateValue(new byte[] {1,2,3}, Value.Format.TABLE_V5, Region.NULL_REGION_ID, null); + serialized = value.toByteArray(); + offset = Value.getValueOffset(serialized); + assertTrue(offset >= 0); + assertEquals(serialized.length - 3, offset); + + value = Value.internalCreateValue(new byte[] {1,2,3}, Value.Format.MULTI_REGION_TABLE, 1, null); + serialized = value.toByteArray(); + offset = Value.getValueOffset(serialized); + assertTrue(offset >= 0); + assertEquals(serialized.length - 3, offset); + + value = Value.internalCreateValue(new byte[] {1,2,3}, Value.Format.TABLE_V1, Region.NULL_REGION_ID); + serialized = value.toByteArray(); + offset = Value.getValueOffset(serialized); + assertTrue(offset >= 0); + assertEquals(serialized.length - 3, offset); } } diff --git a/kvtest/kvclient-IT/src/main/java/oracle/kv/impl/api/ops/OpsSerialTest.java b/kvtest/kvclient-IT/src/main/java/oracle/kv/impl/api/ops/OpsSerialTest.java index 035f0f55..2f492c59 100644 --- a/kvtest/kvclient-IT/src/main/java/oracle/kv/impl/api/ops/OpsSerialTest.java +++ b/kvtest/kvclient-IT/src/main/java/oracle/kv/impl/api/ops/OpsSerialTest.java @@ -10,6 +10,7 @@ import static java.util.Collections.singletonList; import static oracle.kv.impl.util.SerialTestUtils.serialVersionChecker; import static oracle.kv.impl.util.SerialVersion.CLOUD_MR_TABLE; +import static oracle.kv.impl.util.SerialVersion.ROW_METADATA_VERSION; import java.util.Collections; import java.util.List; @@ -47,16 +48,19 @@ public void testDelete() { checkOps(serialVersionChecker( new Delete(KEY_BYTES, Choice.VALUE), SerialVersion.MINIMUM, 0x4c952989d59e23a1L, - CLOUD_MR_TABLE, 0xa0dada8a5d6bb4cbL), + CLOUD_MR_TABLE, 0xa0dada8a5d6bb4cbL, + ROW_METADATA_VERSION, 0x22fe249ac2907e8L), serialVersionChecker( new Delete(KEY_BYTES, Choice.ALL, TABLE_ID, - false /* doTombstone */), + false /* doTombstone */, null /* rowMetadata */), SerialVersion.MINIMUM, 0xc10e687be7c11fd7L, - CLOUD_MR_TABLE, 0xb5e3328ea7588e25L), + CLOUD_MR_TABLE, 0xb5e3328ea7588e25L, + ROW_METADATA_VERSION, 0x57e5ff46962347ceL), serialVersionChecker( new Delete(KEY_BYTES, Choice.ALL, TABLE_ID, - true /* doTombstone */), - CLOUD_MR_TABLE, 0x86c55edb51d0e544L)); + true /* doTombstone */, null /* rowMetadata */), + CLOUD_MR_TABLE, 0x86c55edb51d0e544L, + ROW_METADATA_VERSION, 0x670ce448218a0a40L)); } @Test @@ -64,16 +68,21 @@ public void testDeleteIfVersion() { checkOps(serialVersionChecker( new DeleteIfVersion(KEY_BYTES, Choice.VALUE, VERSION), SerialVersion.MINIMUM, 0xb6a2cf000c45c209L, - CLOUD_MR_TABLE, 0xc0d34f8c460721feL), + CLOUD_MR_TABLE, 0xc0d34f8c460721feL, + ROW_METADATA_VERSION, 0x7c9feb5f08881e80L), serialVersionChecker( new DeleteIfVersion(KEY_BYTES, Choice.VALUE, VERSION, - TABLE_ID, false /* doTombstone */), + TABLE_ID, false /* doTombstone */, + null /* rowMetadata */), SerialVersion.MINIMUM, 0x971cbeda8ede03bL, - CLOUD_MR_TABLE, 0x79a3aef8e614aeffL), + CLOUD_MR_TABLE, 0x79a3aef8e614aeffL, + ROW_METADATA_VERSION, 0x2511a05b1b53e6f7L), serialVersionChecker( new DeleteIfVersion(KEY_BYTES, Choice.VALUE, VERSION, - TABLE_ID, true /* doTombstone */), - CLOUD_MR_TABLE, 0xdac26edb2c1e6c91L)); + TABLE_ID, true /* doTombstone */, + null /* rowMetadata */), + CLOUD_MR_TABLE, 0xdac26edb2c1e6c91L, + ROW_METADATA_VERSION, 0x4672dca893599b70L)); } @Test @@ -294,6 +303,7 @@ public void testPutBatch() { VALUE_BYTES, 1 /* ttlVal */, TimeToLive.HOURS_ORDINAL, + 0 /* creationTime */, 0 /* mod time */, false /* isTombstone */, -1 /* streamId */); @@ -302,6 +312,7 @@ public void testPutBatch() { VALUE_BYTES, 1 /* ttlVal */, TimeToLive.HOURS_ORDINAL, + 0 /* creationTime */, 12345L /* mod time */, true /* isTombstone */, -1 /* streamId */); @@ -314,7 +325,8 @@ public void testPutBatch() { 0 /* localRegionId */ ), SerialVersion.MINIMUM, 0xc44845d191603b9bL, - SerialVersion.BULK_PUT_RESOLVE, 0xe6eb825dae9ba8c5L), + SerialVersion.BULK_PUT_RESOLVE, 0xe6eb825dae9ba8c5L, + SerialVersion.CREATION_TIME_VER, 0x64137f6745c83c26L), serialVersionChecker( new PutBatch(singletonList(kvPair), new long[] { TABLE_ID }, @@ -323,7 +335,8 @@ public void testPutBatch() { 0 /* localRegionId */ ), SerialVersion.MINIMUM, 0xf6c32911ff2ba5b4L, - SerialVersion.BULK_PUT_RESOLVE, 0xbdeb11e582a8e7f7L), + SerialVersion.BULK_PUT_RESOLVE, 0xbdeb11e582a8e7f7L, + SerialVersion.CREATION_TIME_VER, 0xb891815470aab01cL), serialVersionChecker( new PutBatch(singletonList(kvPair), new long[] { TABLE_ID }, @@ -331,7 +344,8 @@ public void testPutBatch() { true /* usePutResolve */, 1 /* localRegionId */ ), - SerialVersion.BULK_PUT_RESOLVE, 0x533dbaa3e1916793L), + SerialVersion.BULK_PUT_RESOLVE, 0x533dbaa3e1916793L, + SerialVersion.CREATION_TIME_VER, 0xb678c859b775c2d4L), serialVersionChecker( new PutBatch(singletonList(kvPairNew), new long[] { TABLE_ID }, @@ -339,7 +353,8 @@ public void testPutBatch() { true /* usePutResolve */, 1 /* localRegionId */ ), - SerialVersion.BULK_PUT_RESOLVE, 0x3ee4206c17be7b5bL)); + SerialVersion.BULK_PUT_RESOLVE, 0x3ee4206c17be7b5bL, + SerialVersion.CREATION_TIME_VER, 0xa3a29583759a1173L)); } @Test @@ -389,10 +404,12 @@ public void testPutResolve() { 1 /* expirationTimeMs */, true /* updateTTL */, false /* isTombstone */, + 0 /* creationTime */, 2 /* timestamp */, Region.NULL_REGION_ID), SerialVersion.MINIMUM, 0x20e1758fb369dcf5L, - CLOUD_MR_TABLE, 0x3606734cab6b7d93L), + CLOUD_MR_TABLE, 0x3606734cab6b7d93L, + SerialVersion.CREATION_TIME_VER, 0x3d59d52bacc906baL), serialVersionChecker( new PutResolve(KEY_BYTES, VALUE, @@ -401,9 +418,11 @@ public void testPutResolve() { 1 /* expirationTimeMs */, true /* updateTTL */, false /* isTombstone */, + 0 /* creationTime */, 2 /* timestamp */, regionId), - CLOUD_MR_TABLE, 0x6c1ddf4968294fe2L)); + CLOUD_MR_TABLE, 0x6c1ddf4968294fe2L, + SerialVersion.CREATION_TIME_VER, 0x61e5b0df88952406L)); } @Test diff --git a/kvtest/kvclient-IT/src/main/java/oracle/kv/impl/api/ops/ResultSerialTest.java b/kvtest/kvclient-IT/src/main/java/oracle/kv/impl/api/ops/ResultSerialTest.java index bdad8ad6..3575102a 100644 --- a/kvtest/kvclient-IT/src/main/java/oracle/kv/impl/api/ops/ResultSerialTest.java +++ b/kvtest/kvclient-IT/src/main/java/oracle/kv/impl/api/ops/ResultSerialTest.java @@ -51,18 +51,21 @@ public class ResultSerialTest extends TestBase { new ResultValueVersion(VALUE_BYTES, VERSION, 3 /* expirationTime */, + 0 /* creationTime */, 0 /* modificationTime */, -1 /* storageSize */); private static final ResultValueVersion RESULT_VALUE_VERSION_MOD = new ResultValueVersion(VALUE_BYTES, VERSION, 3 /* expirationTime */, + 0 /* creationTime */, 4 /* modificationTime */, -1 /* storageSize */); private static final ResultValueVersion RESULT_VALUE_VERSION_STORAGE = new ResultValueVersion(VALUE_BYTES, VERSION, 3 /* expirationTime */, + 0 /* creationTime */, 4 /* modificationTime */, 5 /* storageSize */); private static final ResultKeyValueVersion RESULT_KEY_VALUE_VERSION_EXP = @@ -70,6 +73,7 @@ public class ResultSerialTest extends TestBase { VALUE_BYTES, VERSION, 3 /* expirationTime */, + 0 /* creationTime */, 0 /* modificationTime */, false /* isTombstone */); private static final ResultKeyValueVersion RESULT_KEY_VALUE_VERSION_MOD = @@ -77,6 +81,7 @@ public class ResultSerialTest extends TestBase { VALUE_BYTES, VERSION, 3 /* expirationTime */, + 0 /* creationTime */, 4 /* modificationTime */, false /* isTombstone */); private static final ResultKeyValueVersion RESULT_KEY_VALUE_VERSION_TB = @@ -84,6 +89,7 @@ public class ResultSerialTest extends TestBase { VALUE_BYTES, VERSION, 3 /* expirationTime */, + 0 /* creationTime */, 4 /* modificationTime */, true /* isTombstone */); /** @@ -101,19 +107,22 @@ public void testGetResult() { 1 /* readKB */, 2 /* writeKB */, RESULT_VALUE_VERSION_EXP), - SerialVersion.MINIMUM, 0x1c79a005443f68cbL), + SerialVersion.MINIMUM, 0x1c79a005443f68cbL, + SerialVersion.CREATION_TIME_VER, 0xde540db894a75b62L), serialVersionChecker( new GetResult(OpCode.GET, 1 /* readKB */, 2 /* writeKB */, RESULT_VALUE_VERSION_MOD), - SerialVersion.MINIMUM, 0xa6b456f464e3c768L), + SerialVersion.MINIMUM, 0xa6b456f464e3c768L, + SerialVersion.CREATION_TIME_VER, 0x8364304311a59ecbL), serialVersionChecker( new GetResult(OpCode.GET, 1 /* readKB */, 2 /* writeKB */, RESULT_VALUE_VERSION_STORAGE), - SerialVersion.MINIMUM, 0x5512c63d261c2e17L)); + SerialVersion.MINIMUM, 0x5512c63d261c2e17L, + SerialVersion.CREATION_TIME_VER, 0x419284c566f0ab2aL)); } @Test @@ -127,10 +136,12 @@ public void testPutResult() { VERSION, 10 /* expTime */, false /* wasUpdate */, + 0 /* creationTime */, 0 /* modificationTime */, -1 /* storageSize */, -1 /* shard */), - SerialVersion.MINIMUM, 0x961816f3e524fa7dL), + SerialVersion.MINIMUM, 0x961816f3e524fa7dL, + SerialVersion.CREATION_TIME_VER, 0xa5db3f0a4ed8585bL), serialVersionChecker( new PutResult(OpCode.PUT, 1 /* readKB */, @@ -139,10 +150,12 @@ public void testPutResult() { VERSION, 10 /* expTime */, false /* wasUpdate */, + 0 /* creationTime */, 11 /* modificationTime */, -1 /* storageSize */, -1 /* shard */), - SerialVersion.MINIMUM, 0x65051cf4a2615e1dL), + SerialVersion.MINIMUM, 0x65051cf4a2615e1dL, + SerialVersion.CREATION_TIME_VER, 0x31ce10e68f5cdc30L), serialVersionChecker( new PutResult(OpCode.PUT, 1 /* readKB */, @@ -151,10 +164,13 @@ public void testPutResult() { VERSION, 10 /* expTime */, false /* wasUpdate */, + 0 /* creationTime */, 11 /* modificationTime */, 12 /* storageSize */, 13 /* shard */), - SerialVersion.MINIMUM, 0x4035eb2620422159L)); + SerialVersion.MINIMUM, 0x4035eb2620422159L, + SerialVersion.CREATION_TIME_VER, 0x8c868ff28db608eeL) + ); } @Test @@ -166,21 +182,24 @@ public void testDeleteResult() { 2 /* writeKB */, RESULT_VALUE_VERSION_EXP, true /* success */), - SerialVersion.MINIMUM, 0xd6b58ee93336952dL), + SerialVersion.MINIMUM, 0xd6b58ee93336952dL, + SerialVersion.CREATION_TIME_VER, 0x8b397364d5e8377bL), serialVersionChecker( new DeleteResult(OpCode.DELETE, 1 /* readKB */, 2 /* writeKB */, RESULT_VALUE_VERSION_MOD, true /* success */), - SerialVersion.MINIMUM, 0x66bbe208857f246dL), + SerialVersion.MINIMUM, 0x66bbe208857f246dL, + SerialVersion.CREATION_TIME_VER, 0x84a782aa3e15ce8eL), serialVersionChecker( new DeleteResult(OpCode.DELETE, 1 /* readKB */, 2 /* writeKB */, RESULT_VALUE_VERSION_STORAGE, true /* success */), - SerialVersion.MINIMUM, 0x25cb8a869de8fb84L)); + SerialVersion.MINIMUM, 0x25cb8a869de8fb84L, + SerialVersion.CREATION_TIME_VER, 0xec077950c1437c36L)); } @Test @@ -222,7 +241,8 @@ public void testExecuteResult() { 2 /* writeKB */, RESULT_VALUE_VERSION_EXP, true /* success */))), - SerialVersion.MINIMUM, 0xfbc341b841ec19b0L), + SerialVersion.MINIMUM, 0xfbc341b841ec19b0L, + SerialVersion.CREATION_TIME_VER, 0x5b203d39eb69f638L), serialVersionChecker( new ExecuteResult( OpCode.EXECUTE, @@ -234,7 +254,8 @@ public void testExecuteResult() { 2 /* writeKB */, RESULT_VALUE_VERSION_EXP, false /* success */)), - SerialVersion.MINIMUM, 0xdb0549d4ee8b9d3aL)); + SerialVersion.MINIMUM, 0xdb0549d4ee8b9d3aL, + SerialVersion.CREATION_TIME_VER, 0x213d33e8bb48e0c3L)); } @Test @@ -265,7 +286,8 @@ public void testIterateResult() { true /* moreElements */), SerialVersion.MINIMUM, 0x748e650ff4aabeeaL, SerialVersion.TABLE_ITERATOR_TOMBSTONES_VER, - 0x226a94684e4b9475L), + 0x226a94684e4b9475L, + SerialVersion.CREATION_TIME_VER, 0x82ee31160536c72bL), serialVersionChecker( new IterateResult(OpCode.STORE_ITERATE, 1 /* readKB */, @@ -274,7 +296,8 @@ public void testIterateResult() { true /* moreElements */), SerialVersion.MINIMUM, 0x967a8e1f86a512bbL, SerialVersion.TABLE_ITERATOR_TOMBSTONES_VER, - 0x451c2971bf01bc22L), + 0x451c2971bf01bc22L, + SerialVersion.CREATION_TIME_VER, 0xe1239651aed9af8dL), serialVersionChecker( new IterateResult(OpCode.STORE_ITERATE, 1 /* readKB */, @@ -282,7 +305,9 @@ public void testIterateResult() { singletonList(RESULT_KEY_VALUE_VERSION_TB), true /* moreElements */), SerialVersion.TABLE_ITERATOR_TOMBSTONES_VER, - 0xb430af80b284951fL)); + 0xb430af80b284951fL, + SerialVersion.CREATION_TIME_VER, + 0x6f01f80e490f46b2L)); } @Test @@ -332,11 +357,13 @@ public void testIndexRowsIterateResult() { VALUE_BYTES, VERSION, 3 /* expirationTime */, + 0 /* creationTime */, 0 /* modificationTime */)), true /* moreElements */), SerialVersion.MINIMUM, 0xfba1899f70ebbb03L, SerialVersion.TABLE_ITERATOR_TOMBSTONES_VER, - 0xe9d5fdbc25f9656cL), + 0xe9d5fdbc25f9656cL, + SerialVersion.CREATION_TIME_VER, 0xb9e5b3fd47c10af7L), serialVersionChecker( new IndexRowsIterateResult( OpCode.INDEX_ITERATE, @@ -349,11 +376,14 @@ public void testIndexRowsIterateResult() { VALUE_BYTES, VERSION, 3 /* expirationTime */, + 0 /* creationTime */, 4 /* modificationTime */)), true /* moreElements */), SerialVersion.MINIMUM, 0xe14fa82463ae23d0L, SerialVersion.TABLE_ITERATOR_TOMBSTONES_VER, - 0xb85aa6567512024aL)); + 0xb85aa6567512024aL, + SerialVersion.CREATION_TIME_VER, 0x33829f3f9a4ac107L) + ); } @Test @@ -369,7 +399,8 @@ public void testBulkGetIterateResult() { 3 /* resumeParentKeyIndex */), SerialVersion.MINIMUM, 0x37c53d0c655f61b3L, SerialVersion.TABLE_ITERATOR_TOMBSTONES_VER, - 0xe6bb01ba5d56a3bdL), + 0xe6bb01ba5d56a3bdL, + SerialVersion.CREATION_TIME_VER, 0xc807b4a234215c05L), serialVersionChecker( new BulkGetIterateResult( OpCode.MULTI_GET_BATCH, @@ -380,7 +411,8 @@ public void testBulkGetIterateResult() { 3 /* resumeParentKeyIndex */), SerialVersion.MINIMUM, 0x7b10fe7bbe8748ebL, SerialVersion.TABLE_ITERATOR_TOMBSTONES_VER, - 0xf86b4a12b5d94633L)); + 0xf86b4a12b5d94633L, + SerialVersion.CREATION_TIME_VER, 0x9b93f66276b338a7L)); } @Test diff --git a/kvtest/kvdatacheck-IT/pom.xml b/kvtest/kvdatacheck-IT/pom.xml index b2854d3f..a64d7a41 100644 --- a/kvtest/kvdatacheck-IT/pom.xml +++ b/kvtest/kvdatacheck-IT/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kvtest - 25.1.13 + 25.3.21 kvdatacheck-IT diff --git a/kvtest/kvquery-IT/pom.xml b/kvtest/kvquery-IT/pom.xml index bde4966f..9193427a 100644 --- a/kvtest/kvquery-IT/pom.xml +++ b/kvtest/kvquery-IT/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kvtest - 25.1.13 + 25.3.21 kvquery-IT diff --git a/kvtest/kvquery-IT/src/main/java/qt/framework/JsonLoaderKV.java b/kvtest/kvquery-IT/src/main/java/qt/framework/JsonLoaderKV.java index 29ed3b86..e85e6333 100644 --- a/kvtest/kvquery-IT/src/main/java/qt/framework/JsonLoaderKV.java +++ b/kvtest/kvquery-IT/src/main/java/qt/framework/JsonLoaderKV.java @@ -14,6 +14,7 @@ import oracle.kv.FaultException; import oracle.kv.MetadataNotFoundException; import oracle.kv.impl.api.table.FieldDefImpl; +import oracle.kv.impl.api.table.NullJsonValueImpl; import oracle.kv.table.FieldDef; import oracle.kv.table.FieldValue; import oracle.kv.table.Row; @@ -28,6 +29,8 @@ public class JsonLoaderKV extends JsonLoader{ private final TableAPI tableImpl; + private final String dataFile; + private WriteOptions option; /** @@ -41,9 +44,9 @@ public class JsonLoaderKV extends JsonLoader{ * * @return a map of table name and its corresponding record count loaded. */ - public static Map loadJsonFromFile(final TableAPI tableImpl, - final String fileName, - final WriteOptions options) + public static Map loadJsonFromFile(TableAPI tableImpl, + String fileName, + WriteOptions options) throws IllegalArgumentException, IOException, FaultException { return loadJsonFromFile(tableImpl, null, fileName, options); @@ -56,14 +59,14 @@ public static Map loadJsonFromFile(final TableAPI tableImpl, * method, the difference is that 2nd argument "table" is provided to * specify the target table to load records to. */ - public static Map loadJsonFromFile(final TableAPI tableImpl, - final Table table, - final String fileName, - final WriteOptions options) + public static Map loadJsonFromFile(TableAPI tableImpl, + Table table, + String fileName, + WriteOptions options) throws IllegalArgumentException, IOException, FaultException { - return new JsonLoaderKV(tableImpl).loadJsonToTables(table, fileName, - options, true); + return new JsonLoaderKV(tableImpl, fileName). + loadJsonToTables(table, options, true); } /** @@ -77,9 +80,9 @@ public static Map loadJsonFromFile(final TableAPI tableImpl, * * @return a map of table name and its corresponding record count loaded. */ - public static Map loadCSVFromFile(final TableAPI tableImpl, - final String fileName, - final WriteOptions options) + public static Map loadCSVFromFile(TableAPI tableImpl, + String fileName, + WriteOptions options) throws IllegalArgumentException, IOException, FaultException { return loadCSVFromFile(tableImpl, null, fileName, options); @@ -92,18 +95,19 @@ public static Map loadCSVFromFile(final TableAPI tableImpl, * method, the difference is that 2nd argument "table" is provided to * specify the target table to load records to. */ - public static Map loadCSVFromFile(final TableAPI tableImpl, - final Table table, - final String fileName, - final WriteOptions options) + public static Map loadCSVFromFile(TableAPI tableImpl, + Table table, + String fileName, + WriteOptions options) throws IllegalArgumentException, IOException, RuntimeException { - return new JsonLoaderKV(tableImpl).loadCSVToTables(table, fileName, - options, true); + return new JsonLoaderKV(tableImpl, fileName). + loadCSVToTables(table, options, true); } - public JsonLoaderKV(final TableAPI tableImpl) { + public JsonLoaderKV(TableAPI tableImpl, String dataFile) { this.tableImpl = tableImpl; + this.dataFile = dataFile; } private void setWriteOptions(WriteOptions option) { @@ -114,7 +118,6 @@ private void setWriteOptions(WriteOptions option) { * Load JSON records from a file to tables. * * @param table the initial table to which JSON records are loaded. - * @param fileName the file contains JSON records. * @param options the WriteOptions used to put records. * @param exitOnFailure the flag indicates if exits if a record is * failed to put. @@ -122,13 +125,12 @@ private void setWriteOptions(WriteOptions option) { * @return A map of table name and count of records loaded. */ public Map loadJsonToTables(Table table, - String fileName, WriteOptions options, boolean exitOnFailure) throws IllegalArgumentException, IOException, FaultException { setWriteOptions(options); - return loadRecordsFromFile(table, fileName, Type.JSON, + return loadRecordsFromFile(table, dataFile, Type.JSON, false, exitOnFailure); } @@ -137,7 +139,6 @@ public Map loadJsonToTables(Table table, * * @param table the target table to which CSV records are loaded, the * records of other tables will be skipped. - * @param fileName the file contains CSV records. * @param options the WriteOptions used to put records. * @param exitOnFailure the flag indicates if exits if a record is * failed to put. @@ -145,13 +146,12 @@ public Map loadJsonToTables(Table table, * @return A map of table name and count of records loaded. */ public Map loadCSVToTables(Table table, - String fileName, WriteOptions options, boolean exitOnFailure) throws IllegalArgumentException, IOException, FaultException { setWriteOptions(options); - return loadRecordsFromFile(table, fileName, Type.CSV, + return loadRecordsFromFile(table, dataFile, Type.CSV, false, exitOnFailure); } @@ -207,6 +207,15 @@ public boolean putRecord(Object target, String rowLine, Type type) Row row = createRow((Table)target, rowLine, type); + if (dataFile.contains("row_metadata")) { + FieldValue info = row.get("info"); + if (info == null) { + info = NullJsonValueImpl.getInstance(); + } + String metadata = info.toJsonString(false); + row.setRowMetadata(metadata); + } + int retry = 0; while (true) { try { diff --git a/kvtest/kvquery-IT/src/main/java/qt/framework/QTDefaultImpl.java b/kvtest/kvquery-IT/src/main/java/qt/framework/QTDefaultImpl.java index 580cfc78..8d1b1247 100644 --- a/kvtest/kvquery-IT/src/main/java/qt/framework/QTDefaultImpl.java +++ b/kvtest/kvquery-IT/src/main/java/qt/framework/QTDefaultImpl.java @@ -70,7 +70,7 @@ public void before() { if (beforeDataProp != null) { File dataFile = new File(configFile.getParentFile(), - beforeDataProp); + beforeDataProp); if (!dataFile.exists() || !dataFile.isFile()) throw new IllegalArgumentException("Property before-data-file" + " doesn't reference a valid file."); diff --git a/kvtest/kvquery-IT/src/main/resources/cases/gb/q/all b/kvtest/kvquery-IT/src/main/resources/cases/gb/q/all index 3494fc2d..1279855d 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/gb/q/all +++ b/kvtest/kvquery-IT/src/main/resources/cases/gb/q/all @@ -47,7 +47,8 @@ group by f.xact.acctno collect04.q ################ select f.xact.state, - array_collect(seq_transform(f.xact.items[], $.qty * $.price)) as amounts, + array_collect(seq_transform(f.xact.items[], + cast($.qty * $.price as integer))) as amounts, count(*) as cnt from bar f group by f.xact.state @@ -65,6 +66,44 @@ group by f.xact.state +################ +collect07.q +################ +select array_collect({ "id1" : f.id1, + "id2" : f.id2, + "id3" : f.id3, + "prodcat" : f.xact.prodcat, + "year" : f.xact.year, + "str1" : "1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "str2" : "2xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "str3" : "3xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "str4" : "4xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }) as collect, + count(*) as cnt +from Foo f + + + +################ +collect08.q +################ +select f.xact.state, + array_collect({ "items" : f.xact.items, "acctno" : f.xact.acctno }) as collect +from bar f +group by f.xact.state + + + +################ +collect09.q +################ +select b.xact.state, array_collect(b.xact.city) as cities +from bar b +group by b.xact.state +order by b.xact.state desc + + + ################ collect_d01.q ################ @@ -451,7 +490,8 @@ empty02.q select count(*) as cnt, sum(e.record.long) as sum, avg(e.record.long) as avg, - min(id2) as min + min(id2) as min, + array_collect(e.record.long) as collect from empty e @@ -471,7 +511,8 @@ empty04.q ################ select sum(f.xact.item.discount) as sum, count(*) as cnt, - avg(id3) as avg + avg(id3) as avg, + array_collect(f.xact.item.discount) as collect from Foo f where id1 < 0 @@ -731,6 +772,44 @@ where id1 < 1 and id2 < 2 +################ +gb16.q +################ +select count(b.j.i) +from xyz b +group by b.s + + + +################ +gb17.q +################ +select acctno, count(b.xact.year) +from boo b +group by acctno + + + +################ +gb18.q +################ +select b.xact.state, count(*) as count +from bar b +group by b.xact.state +order by b.xact.state desc nulls last + + + +################ +gb19.q +################ +select b.xact.state, count(*) as count +from bar b +group by b.xact.state +order by b.xact.state nulls first + + + ################ jgb01.q ################ @@ -902,6 +981,18 @@ where f.xact.year = 2000 and f.xact.items[].qty >any 2 +################ +jgb25.q +################ +select f.xact.storeid, count(*) +from Foo f +group by f.xact.storeid +order by f.xact.storeid +limit 3 +offset 1 + + + ################ noidx01.q ################ @@ -1182,6 +1273,20 @@ group by f.xact.year +################ +noidx_collect05.q +################ +select array_collect({ "id1" : f.id1, + "id2" : f.id2, + "id3" : f.id3, + "acctno" : f.xact.acctno, + "amount" : f.xact.item.qty * f.xact.item.price + }) as accounts, + count(*) as cnt +from Foo f + + + ################ noidx_collect_d01.q ################ @@ -1439,13 +1544,23 @@ group by id1, id2 +################ +onepart06.q +################ +select f.xact.state, count(*) as cnt +from Foo f +where id1 = 0 +group by f.xact.state + + + ################ seq_sort01.q ################ select f.xact.acctno, - seq_sort(collect({ "prodcat" : f.xact.prodcat, - "year" : f.xact.year - })[]) as collect, + seq_sort(array_collect({ "prodcat" : f.xact.prodcat, + "year" : f.xact.year + })[]) as collect, count(*) as cnt from Foo f group by f.xact.acctno @@ -1456,9 +1571,9 @@ group by f.xact.acctno seq_sort02.q ################ select f.xact.acctno, - [seq_sort(collect({ "prodcat" : f.xact.prodcat, - "qty" : f.xact.item.qty - })[]) + [seq_sort(array_collect({ "prodcat" : f.xact.prodcat, + "qty" : f.xact.item.qty + })[]) ] as collect, count(*) as cnt from Foo f @@ -1471,7 +1586,7 @@ group by f.xact.acctno seq_sort03.q ################ select f.xact.state, - seq_sort(collect(seq_transform(f.xact.items[], $.qty * $.price))[]) as amounts, + seq_sort(array_collect(seq_transform(f.xact.items[], $.qty * $.price))[]) as amounts, count(*) as cnt from bar f group by f.xact.state @@ -1482,7 +1597,7 @@ group by f.xact.state seq_sort04.q ################ select f.xact.state, - seq_sort(collect([seq_transform(f.xact.items[], $.qty * $.price)])[]) as amounts, + seq_sort(array_collect([seq_transform(f.xact.items[], $.qty * $.price)])[]) as amounts, count(*) as cnt from bar f group by f.xact.state @@ -1493,7 +1608,7 @@ group by f.xact.state seq_sort05.q ################ select f.xact.prodcat, - seq_sort(collect(f.xact.acctno)[]) as accounts, + seq_sort(array_collect(f.xact.acctno)[]) as accounts, count(*) as cnt from Foo f where f.xact.year = 2000 @@ -1505,9 +1620,9 @@ group by f.xact.prodcat seq_sort06.q ################ select f.xact.prodcat, - seq_sort(collect({ "acctno" : f.xact.acctno, - "amount" : f.xact.item.qty * f.xact.item.price - })[]) as accounts, + seq_sort(array_collect({ "acctno" : f.xact.acctno, + "amount" : f.xact.item.qty * f.xact.item.price + })[]) as accounts, count(*) as cnt from Foo f where f.xact.year = 2000 @@ -1519,7 +1634,8 @@ group by f.xact.prodcat seq_sort07.q ################ select f.xact.year, - seq_sort(collect(seq_transform(f.xact.items[], $.qty * $.price))[]) as amounts, + seq_sort(array_collect(seq_transform(f.xact.items[], + cast($.qty * $.price as integer)))[]) as amounts, count(*) as cnt from bar f group by f.xact.year diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/after.ddl b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/after.ddl new file mode 100644 index 00000000..80170295 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/after.ddl @@ -0,0 +1,21 @@ +drop table company.department.team.employee + +drop table company.department.team + +drop table company.department + +drop table company.reviews + +drop table company.project + +drop table company.client + +drop table company.skill + +drop table company.no_records + +drop table company.null_records + +drop table company + +drop table org diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/before.data b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/before.data new file mode 100644 index 00000000..37f99dd8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/before.data @@ -0,0 +1,421 @@ + +################## Data for Functional Testing ################ + +Table : company + +{ + "company_id": 1, + "name": "TechCorp", + "head_office_location": {"city": "NY", "country": "USA"}, + "founders": ["Alice", "Bob"] +} +{ + "company_id": 2, + "name": "InnovateX", + "head_office_location": {"city": "San Francisco", "country": "USA"}, + "founders": ["Charlie", "David"] +} +{ + "company_id": 3, + "name": "NextGen Solutions", + "head_office_location": {"city": "London", "country": "UK"}, + "founders": ["Emma", "Frank"] +} +{ + "company_id": 4, + "name": "FutureTech", + "head_office_location": {"city": "Berlin", "country": "Germany"}, + "founders": ["Grace", "Henry"] +} +{ + "company_id": 5, + "name": "Oracle", + "head_office_location": null, + "founders": ["Larry", "Safra"] +} + +Table : company.department + +{ + "company_id": 1, + "department_id": 1, + "name": "Engineering", + "budget_breakdown": {"RnD": 500000.78, "Infrastructure": 200000.0}, + "established": "2024-05-20T00:00:00" +} +{ + "company_id": 2, + "department_id": 1, + "name": "Engineering", + "budget_breakdown": {"RnD": 600000.798, "Infrastructure": 250000.879}, + "established": "2021-02-28T23:59:59" +} +{ + "company_id": 2, + "department_id": 2, + "name": "Marketing", + "budget_breakdown": {"Advertising": 300000.25, "Market Research": 150000.75}, + "established": "2016-10-19T09:18:05.5555" +} +{ + "company_id": 3, + "department_id": 1, + "name": "Engineering", + "budget_breakdown": {"RnD": 700000.367, "Infrastructure": 300000.776}, + "established": "2020-02-28T23:59:59.123456" +} +{ + "company_id": 3, + "department_id": 2, + "name": "Sales", + "budget_breakdown": {"Salaries": 400000.4, "Training": 100000.68688}, + "established": "2023-05-28T00:13:12" +} + + +Table : company.reviews + +{ + "company_id": 1, + "review_id": 1, + "emp_id": 1, + "feedback": { + "reviewer_emp_id": 1, + "rating": 4.8, + "comments": "Excellent AI model development", + "history": [{ "date": "2025-02-15", "previous_rating": "4.5" }] + } +} +{ + "company_id": 2, + "review_id": 1, + "emp_id": 1, + "feedback": { + "reviewer_emp_id": 2, + "rating": 4.7, + "comments": "Great contributions to software architecture", + "history": [{ "date": "2025-01-10", "previous_rating": "4.5" }] + } +} +{ + "company_id": 2, + "review_id": 2, + "emp_id": 2, + "feedback": { + "reviewer_emp_id": 1, + "rating": 4.5, + "comments": "Strong leadership in digital marketing campaigns", + "history": [{ "date": "2025-02-05", "previous_rating": "4.2" }] + } +} +{ + "company_id": 3, + "review_id": 3, + "emp_id": 1, + "feedback": { + "reviewer_emp_id": 2, + "rating": 4.9, + "comments": "Outstanding performance in cloud security", + "history": [{ "date": "2025-01-20", "previous_rating": "4.6" }] + } +} +{ + "company_id": 3, + "review_id": 4, + "emp_id": 2, + "feedback": { + "reviewer_emp_id": 1, + "rating": 4.6, + "comments": "Excellent client relationship management", + "history": [{ "date": "2025-02-08", "previous_rating": "4.3" }] + } +} + +Table : company.department.team + +{ + "company_id": 1, + "department_id": 1, + "team_id": 1, + "name": "AI Research", + "technologies_used": ["Python", "TensorFlow"] +} +{ + "company_id": 2, + "department_id": 1, + "team_id": 1, + "name": "Software Development", + "technologies_used": ["Java"] +} +{ + "company_id": 2, + "department_id": 2, + "team_id": 2, + "name": "Digital Marketing", + "technologies_used": ["Google Ads", "SEO Tools"] +} +{ + "company_id": 3, + "department_id": 1, + "team_id": 1, + "name": "Cloud Engineering", + "technologies_used": ["AWS", "Docker", "Kubernetes"] +} +{ + "company_id": 3, + "department_id": 2, + "team_id": 2, + "name": "Enterprise Sales", + "technologies_used": ["Salesforce", "HubSpot"] +} + +Table : company.department.team.employee + +{ + "company_id": 1, + "department_id": 1, + "team_id": 1, + "emp_id": 1, + "name": "Alice", + "projects": [1, 2, 5], + "skills": [1, 2, 3], + "contact_info": {"phone": "1234567890", "email": "alice@techcorp.com"} +} +{ + "company_id": 2, + "department_id": 1, + "team_id": 1, + "emp_id": 1, + "name": "Charlie", + "projects": [3, 4], + "skills": [4, 6], + "contact_info": {"phone": "2345678901", "email": "charlie@innovatex.com"} +} +{ + "company_id": 2, + "department_id": 2, + "team_id": 2, + "emp_id": 2, + "name": "David", + "projects": [7], + "skills": [4, 5, 6], + "contact_info": {"phone": "3456789012", "email": "david@innovatex.com"} +} +{ + "company_id": 3, + "department_id": 1, + "team_id": 1, + "emp_id": 1, + "name": "Emma", + "projects": [5, 7], + "skills": [7, 8], + "contact_info": {"phone": "4567890123", "email": "emma@nextgensolutions.com"} +} +{ + "company_id": 3, + "department_id": 2, + "team_id": 2, + "emp_id": 2, + "name": "Frank", + "projects": [5], + "skills": [9], + "contact_info": {"phone": "5678901234", "email": "frank@nextgensolutions.com"} +} + +Table : company.project + +{ + "company_id": 1, + "project_id": 1, + "client_id": 1, + "name": "AI Chatbot", + "project_milestones": {"Phase1": "Completed", "Phase2": "Ongoing"} +} +{ + "company_id": 1, + "project_id": 2, + "client_id": 2, + "name": "E-commerce Recommendation System", + "project_milestones": {"Phase1": "Completed", "Phase2": "Testing", "Phase3": "Pending"} +} +{ + "company_id": 1, + "project_id": 5, + "client_id": 1, + "name": "Messaging App", + "project_milestones": {"Phase1": "Completed", "Phase2": "Ongoing"} +} +{ + "company_id": 2, + "project_id": 3, + "client_id": 3, + "name": "Cloud Migration", + "project_milestones": {"Phase1": "Ongoing", "Phase2": "Pending"} +} +{ + "company_id": 2, + "project_id": 4, + "client_id": 4, + "name": "Healthcare Analytics Dashboard", + "project_milestones": {"Phase1": "Completed", "Phase2": "Completed", "Phase3": "Deployment"} +} +{ + "company_id": 2, + "project_id": 7, + "client_id": 1, + "name": "Fine Grain Auth", + "project_milestones": {"Phase1": "Completed", "Phase2": "Completed"} +} +{ + "company_id": 3, + "project_id": 5, + "client_id": 5, + "name": "Cybersecurity Threat Detection", + "project_milestones": {"Phase1": "Research", "Phase2": "Implementation"} +} +{ + "company_id": 3, + "project_id": 7, + "client_id": 5, + "name": "Firewall", + "project_milestones": {"Phase1": "Ongoing"} +} + +Table : company.client + +{ + "company_id": 1, + "client_id": 1, + "name": "RetailCorp", + "preferred_contact_methods": ["Email", "Phone"] +} +{ + "company_id": 1, + "client_id": 2, + "name": "HealthPlus", + "preferred_contact_methods": ["Phone", "Video Call"] +} +{ + "company_id": 2, + "client_id": 3, + "name": "FinTech Solutions", + "preferred_contact_methods": ["Email", "Chat"] +} +{ + "company_id": 2, + "client_id": 4, + "name": "EduLearn", + "preferred_contact_methods": ["Email", "In-Person Meeting"] +} +{ + "company_id": 3, + "client_id": 5, + "name": "AutoMotive AI", + "preferred_contact_methods": ["Phone", "Chat"] +} + +Table : company.skill + +{ + "company_id": 1, + "skill_id": 1, + "skill_value": 10, + "name": "Machine Learning" +} +{ + "company_id": 1, + "skill_id": 2, + "skill_value": 20, + "name": "Data Analysis" +} +{ + "company_id": 1, + "skill_id": 3, + "skill_value": 30, + "name": "Programming" +} +{ + "company_id": 2, + "skill_id": 4, + "skill_value": 40, + "name": "Cloud Computing" +} +{ + "company_id": 2, + "skill_id": 5, + "skill_value": 50, + "name": "Cybersecurity" +} +{ + "company_id": 2, + "skill_id": 6, + "skill_value": 60, + "name": "AI" +} +{ + "company_id": 3, + "skill_id": 7, + "skill_value": 70, + "name": "Software Development" +} +{ + "company_id": 3, + "skill_id": 8, + "skill_value": 80, + "name": "Project Management" +} +{ + "company_id": 3, + "skill_id": 9, + "skill_value": 90, + "name": "Web Development" +} + +Table : company.null_records + +{ + "company_id": 1, + "record_id": 1, + "value": null +} +{ + "company_id": 2, + "record_id": 2, + "value": null +} +{ + "company_id": 2, + "record_id": 1, + "value": 2147483647 +} +{ + "company_id": 3, + "record_id": 3, + "value": 2147483647 +} +{ + "company_id": 4, + "record_id": 4, + "value": -2147483648 +} +{ + "company_id": 5, + "record_id": 5, + "value": null +} + +Table : org + +{ + "company_id": 1, + "name": "Oracle" +} +{ + "company_id": 2, + "name": "Google" +} +{ + "company_id": 3, + "name": "Apple" +} + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/before.ddl b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/before.ddl new file mode 100644 index 00000000..ed934090 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/before.ddl @@ -0,0 +1,81 @@ + +############### Tables for Functional Testing ############# + +CREATE TABLE company ( + company_id INTEGER, + name STRING, + head_office_location MAP(STRING), + founders ARRAY(STRING), + PRIMARY KEY (company_id) +) + +CREATE TABLE company.department ( + department_id INTEGER, + name STRING, + budget_breakdown MAP(DOUBLE), + established TIMESTAMP(4), + PRIMARY KEY (department_id) +) + +CREATE TABLE company.reviews ( + review_id LONG, + emp_id LONG, + feedback JSON, + PRIMARY KEY (review_id) +) + +CREATE TABLE company.project ( + project_id INTEGER, + client_id INTEGER, + name STRING, + project_milestones MAP(STRING), + PRIMARY KEY (project_id) +) + +CREATE TABLE company.client ( + client_id INTEGER, + name STRING, + preferred_contact_methods ARRAY(STRING), + PRIMARY KEY (client_id) +) + +CREATE TABLE company.skill ( + skill_id INTEGER, + skill_value INTEGER, + name STRING, + PRIMARY KEY (skill_id) +) + +CREATE TABLE company.department.team ( + team_id INTEGER, + name STRING, + technologies_used ARRAY(STRING), + PRIMARY KEY (team_id) +) + +CREATE TABLE company.department.team.employee ( + emp_id LONG, + name STRING, + projects ARRAY(INTEGER), + skills ARRAY(INTEGER), + contact_info MAP(STRING), + PRIMARY KEY (emp_id) +) + +CREATE TABLE company.no_records ( + record_id INTEGER, + PRIMARY KEY (record_id) +) + +CREATE TABLE company.null_records ( + record_id INTEGER, + value INTEGER, + PRIMARY KEY (record_id) +) + +CREATE TABLE org ( + company_id INTEGER, + name STRING, + PRIMARY KEY (company_id) +) + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err01.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err01.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err01.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err02.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err02.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err02.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err03.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err03.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err03.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err04.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err04.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err04.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err05.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err05.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/err05.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj01.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj01.r new file mode 100644 index 00000000..b3517698 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj01.r @@ -0,0 +1,133 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/loj01.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "descendant tables" : [ + { "table" : "company.department.team", "row variable" : "$$t", "covering primary index" : true } + ], + "position in join" : 1 + }, + "FROM variables" : ["$$d", "$$t"], + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj02.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj02.r new file mode 100644 index 00000000..bca00c59 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj02.r @@ -0,0 +1,133 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/loj02.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "descendant tables" : [ + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : true } + ], + "position in join" : 1 + }, + "FROM variables" : ["$$d", "$$e"], + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj03.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj03.r new file mode 100644 index 00000000..77d63199 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj03.r @@ -0,0 +1,156 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/loj03.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "ancestor tables" : [ + { "table" : "company.department.team", "row variable" : "$$t", "covering primary index" : true } ], + "position in join" : 0 + }, + "FROM variables" : ["$$t", "$$e"], + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0,"department_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0, 1 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$d", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj04.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj04.r new file mode 100644 index 00000000..24a1594f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj04.r @@ -0,0 +1,138 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/loj04.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "ancestor tables" : [ + { "table" : "company.department", "row variable" : "$$d", "covering primary index" : true } ], + "position in join" : 0 + }, + "FROM variables" : ["$$d", "$$e"], + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj05.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj05.r new file mode 100644 index 00000000..2a652326 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj05.r @@ -0,0 +1,186 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/loj05.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2, 3 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "company.department", "row variable" : "$$d", "covering primary index" : true }, + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$c", "$$d", "$$e"], + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$r", + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj06.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj06.r new file mode 100644 index 00000000..9b8c2248 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj06.r @@ -0,0 +1,195 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/loj06.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2, 3 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$r", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "ancestor tables" : [ + { "table" : "company", "row variable" : "$$c", "covering primary index" : true }, + { "table" : "company.department", "row variable" : "$$d", "covering primary index" : true } ], + "index filtering predicate" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + }, + "right operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + }, + "position in join" : 1 + }, + "FROM variables" : ["$$c", "$$d", "$$e"], + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + }, + "right operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj07.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj07.r new file mode 100644 index 00000000..60a260cc --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj07.r @@ -0,0 +1,206 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/loj07.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2, 3, 4 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.project", + "row variable" : "$$p", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$p", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "project_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team", + "row variable" : "$$t", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "ancestor tables" : [ + { "table" : "company.department", "row variable" : "$$d", "covering primary index" : true } ], + "descendant tables" : [ + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : false } + ], + "position in join" : 1 + }, + "FROM variables" : ["$$d", "$$t", "$$e"], + "WHERE" : + { + "iterator kind" : "IN", + "left-hand-side expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "right-hand-side expressions" : [ + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "projects", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "project_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "project_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj08.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj08.r new file mode 100644 index 00000000..42dfad3e --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj08.r @@ -0,0 +1,204 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/loj08.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$r", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "descendant tables" : [ + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : false } + ], + "ON Predicate for table company.department.team.employee" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FUNC_SIZE", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "projects", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + "right operand" : + { + "iterator kind" : "FUNC_SIZE", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "skills", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + }, + "position in join" : 1 + }, + "FROM variables" : ["$$c", "$$e"], + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj09.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj09.r new file mode 100644 index 00000000..1d970096 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj09.r @@ -0,0 +1,175 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/loj09.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c1", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "company.reviews", "row variable" : "$$r", "covering primary index" : false } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$c1", "$$r"], + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c1" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c2", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "descendant tables" : [ + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : true } + ], + "position in join" : 1 + }, + "FROM variables" : ["$$c2", "$$e"], + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c1" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj10.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj10.r new file mode 100644 index 00000000..7e4ccd4e --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj10.r @@ -0,0 +1,175 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/loj10.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c1", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "company.department", "row variable" : "$$d", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$c1", "$$d"], + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c1" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c2", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "descendant tables" : [ + { "table" : "company.department.team", "row variable" : "$$t", "covering primary index" : true } + ], + "position in join" : 1 + }, + "FROM variables" : ["$$c2", "$$t"], + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c1" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj11.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj11.r new file mode 100644 index 00000000..00d35864 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/loj11.r @@ -0,0 +1,269 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/loj11.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :1, "outerExpr" : 0, "innerVar" : 1 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 2 }, + { "outerBranch" :1, "outerExpr" : 1, "innerVar" : 3 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$r", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c2", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "descendant tables" : [ + { "table" : "company.department", "row variable" : "$$d", "covering primary index" : true } + ], + "position in join" : 1 + }, + "FROM variables" : ["$$c2", "$$d"], + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c2" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c3", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "descendant tables" : [ + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : true } + ], + "position in join" : 2 + }, + "FROM variables" : ["$$c3", "$$e"], + "WHERE" : + { + "iterator kind" : "AND", + "input iterators" : [ + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar2" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar3" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt01.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt01.r new file mode 100644 index 00000000..c924c1fb --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt01.r @@ -0,0 +1,176 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/nt01.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2, 3, 4 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "company.department.team", "row variable" : "$$t", "covering primary index" : true }, + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : true } + ], + "position in join" : 1 + }, + "FROM variables" : ["$$d", "$$t", "$$e"], + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "dept_comp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt02.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt02.r new file mode 100644 index 00000000..1e9ff974 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt02.r @@ -0,0 +1,211 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/nt02.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2, 5, 3 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "ancestor tables" : [ + { "table" : "company", "row variable" : "$$c", "covering primary index" : true }, + { "table" : "company.department", "row variable" : "$$d", "covering primary index" : true } ], + "position in join" : 0 + }, + "FROM variables" : ["$$c", "$$d", "$$e"], + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team", + "row variable" : "$$t", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "index filtering predicate" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + "position in join" : 1 + }, + "FROM variable" : "$$t", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "dept_comp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "emp_team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt03.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt03.r new file mode 100644 index 00000000..ca944062 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt03.r @@ -0,0 +1,210 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/nt03.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 }, + { "outerBranch" :0, "outerExpr" : 2, "innerVar" : 2 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "company.department.team", "row variable" : "$$t", "covering primary index" : true }, + { "table" : "company.reviews", "row variable" : "$$r", "covering primary index" : false } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$c", "$$t", "$$r"], + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "outerJoinVal3", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "index filtering predicate" : + { + "iterator kind" : "AND", + "input iterators" : [ + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar2" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + "position in join" : 1 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt04.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt04.r new file mode 100644 index 00000000..f75b8f69 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt04.r @@ -0,0 +1,177 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/nt04.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 2, 3, 4 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team", + "row variable" : "$$t", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "ancestor tables" : [ + { "table" : "company.department", "row variable" : "$$d", "covering primary index" : true } ], + "descendant tables" : [ + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : true } + ], + "position in join" : 1 + }, + "FROM variables" : ["$$d", "$$t", "$$e"], + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "tcomp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt05.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt05.r new file mode 100644 index 00000000..779f6ec5 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt05.r @@ -0,0 +1,256 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/nt05.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2, 3, 4 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$r", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team", + "row variable" : "$$t", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "ancestor tables" : [ + { "table" : "company.department", "row variable" : "$$d", "covering primary index" : false } ], + "descendant tables" : [ + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : false } + ], + "ON Predicate for table company.department" : + { + "iterator kind" : "GREATER_THAN", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "established", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : "2016-10-20T00:00:00.000000000Z" + } + }, + "ON Predicate for table company.department.team.employee" : + { + "iterator kind" : "GREATER_THAN", + "left operand" : + { + "iterator kind" : "FUNC_SIZE", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "projects", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + }, + "position in join" : 1 + }, + "FROM variables" : ["$$d", "$$t", "$$e"], + "WHERE" : + { + "iterator kind" : "AND", + "input iterators" : [ + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt06.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt06.r new file mode 100644 index 00000000..0e730cf3 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/nt06.r @@ -0,0 +1,215 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/nt06.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c1", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "company.department.team.employee", "row variable" : "$$e", "covering primary index" : false } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$c1", "$$e"], + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c1" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "ARRAY_CONSTRUCTOR", + "conditional" : true, + "input iterators" : [ + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "projects", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c2", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "descendant tables" : [ + { "table" : "company.project", "row variable" : "$$p", "covering primary index" : false } + ], + "ON Predicate for table company.project" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "Phase1", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "project_milestones", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : "Completed" + } + }, + "position in join" : 1 + }, + "FROM variables" : ["$$c2", "$$p"], + "WHERE" : + { + "iterator kind" : "IN", + "left-hand-side expressions" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "project_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + ], + "right-hand-side expressions" : [ + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + } + ] + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c1" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "project_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "project_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q1.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q1.r new file mode 100644 index 00000000..b23957dc --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q1.r @@ -0,0 +1,130 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q1.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$d", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q10.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q10.r new file mode 100644 index 00000000..35f0b7fe --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q10.r @@ -0,0 +1,265 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q10.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 4 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :1, "outerExpr" : 0, "innerVar" : 1 }, + { "outerBranch" :1, "outerExpr" : 1, "innerVar" : 2 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.project", + "row variable" : "$$p", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$p", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "client_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.client", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0,"client_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar2" + } + ], + "map of key bind expressions" : [ + [ 0, 1 ] + ], + "position in join" : 2 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "FROM" : + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "projects", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + "FROM variable" : "$proj", + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "VAR_REF", + "variable" : "$proj" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "project_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + }, + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "project_id", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$proj" + } + }, + { + "field name" : "project_name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + }, + { + "field name" : "client_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "client_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "client_name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q11.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q11.r new file mode 100644 index 00000000..85c424c6 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q11.r @@ -0,0 +1,187 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q11.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "ARRAY_CONSTRUCTOR", + "conditional" : true, + "input iterators" : [ + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "skills", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.skill", + "row variable" : "$$s", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0,"skill_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + }, + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + } + ], + "map of key bind expressions" : [ + [ 0, 1 ] + ], + "bind info for in3 operator" : [ + { + "theNumComps" : 1, + "thePushedComps" : [ 0 ], + "theIndexFieldPositions" : [ 1 ] + } + ], + "position in join" : 1 + }, + "FROM variable" : "$$s", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "emp_name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "skill", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$s" + } + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q12.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q12.r new file mode 100644 index 00000000..be7cb5b8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q12.r @@ -0,0 +1,273 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q12.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 4, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.project", + "row variable" : "$$p", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$p", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "client_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.client", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0,"client_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0, 1 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$c", + "WHERE" : + { + "iterator kind" : "IN", + "left-hand-side expressions" : [ + { + "iterator kind" : "CONST", + "value" : "Email" + } + ], + "right-hand-side expressions" : [ + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "preferred_contact_methods", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "FROM" : + { + "iterator kind" : "VALUES", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "project_milestones", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + }, + "FROM variable" : "$progress", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "project_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "project_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + }, + { + "field name" : "progress", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$progress" + } + }, + { + "field name" : "sort_gen", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "client_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + } + } + }, + "FROM variable" : "$from-1", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "project_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "project_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "progress", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "progress", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q13.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q13.r new file mode 100644 index 00000000..e10afb80 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q13.r @@ -0,0 +1,423 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q13.q", +"plan" : +{ + "iterator kind" : "GROUP", + "input variable" : "$gb-3", + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "GROUP", + "input variable" : "$gb-2", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :1, "outerExpr" : 0, "innerVar" : 1 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 2 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team", + "row variable" : "$$t", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$t", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "index filtering predicate" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar2" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + "position in join" : 1 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.skill", + "row variable" : "$$s", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 2 + }, + "FROM variable" : "$$s", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "FROM" : + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "skills", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + "FROM variable" : "$s", + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "VAR_REF", + "variable" : "$s" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "skill_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$s" + } + } + }, + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "count", + "field expression" : + { + "iterator kind" : "CONST", + "value" : 1 + } + }, + { + "field name" : "total_skill", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "skill_value", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$s" + } + } + }, + { + "field name" : "min", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "skill_value", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$s" + } + } + }, + { + "field name" : "max", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "skill_value", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$s" + } + } + } + ] + }, + "grouping expressions" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + } + ], + "aggregate functions" : [ + { + "iterator kind" : "FUNC_COUNT_STAR" + }, + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "total_skill", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + } + }, + { + "iterator kind" : "FN_MIN", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "min", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + } + }, + { + "iterator kind" : "FN_MAX", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "max", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + } + } + ] + } + }, + "grouping expressions" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + } + ], + "aggregate functions" : [ + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "count", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + } + }, + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "total_skill", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + } + }, + { + "iterator kind" : "FN_MIN", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "min", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + } + }, + { + "iterator kind" : "FN_MAX", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "max", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q14.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q14.r new file mode 100644 index 00000000..98ea36da --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q14.r @@ -0,0 +1,147 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q14.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "GROUP", + "input variable" : "$gb-3", + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "GROUP", + "input variable" : "$gb-2", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$d", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + "grouping expressions" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + } + ], + "aggregate functions" : [ + + ] + } + }, + "grouping expressions" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + } + ], + "aggregate functions" : [ + + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q15.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q15.r new file mode 100644 index 00000000..7c2ab3e5 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q15.r @@ -0,0 +1,134 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q15.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team", + "row variable" : "$$t", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$t", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "team_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "team_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + { + "field name" : "num_tech", + "field expression" : + { + "iterator kind" : "FUNC_SIZE", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "technologies_used", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q16.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q16.r new file mode 100644 index 00000000..e2528067 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q16.r @@ -0,0 +1,195 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/q16.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$d", + "WHERE" : + { + "iterator kind" : "AND", + "input iterators" : [ + { + "iterator kind" : "GREATER_THAN", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "established", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : "2016-09-01T00:00:00.000000000Z" + } + }, + { + "iterator kind" : "LESS_THAN", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "established", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : "2024-04-05T00:00:00.000000000Z" + } + } + ] + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "year", + "field expression" : + { + "iterator kind" : "FUNC_EXTRACT_FROM_TIMESTAMP", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "established", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + } + }, + { + "field name" : "month", + "field expression" : + { + "iterator kind" : "FUNC_EXTRACT_FROM_TIMESTAMP", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "established", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + } + } + ] + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q17.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q17.r new file mode 100644 index 00000000..03c9385d --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q17.r @@ -0,0 +1,134 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q17.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$d", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$d", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$d" + } + } + }, + { + "field name" : "exp_time", + "field expression" : + { + "iterator kind" : "FUNC_EXPIRATION_TIME", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$d" + } + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q18.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q18.r new file mode 100644 index 00000000..57104b19 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q18.r @@ -0,0 +1,107 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q18.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.no_records", + "row variable" : "$$nr", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$nr", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "c", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + }, + { + "field name" : "nr", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$nr" + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q19.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q19.r new file mode 100644 index 00000000..0abf9934 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q19.r @@ -0,0 +1,172 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q19.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "WHERE" : + { + "iterator kind" : "OP_IS_NOT_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "head_office_location", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.null_records", + "row variable" : "$$nr", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$nr", + "WHERE" : + { + "iterator kind" : "OP_IS_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "value", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$nr" + } + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "head_office_location", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "head_office_location", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "field name" : "record_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "record_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$nr" + } + } + }, + { + "field name" : "value", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "value", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$nr" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q2.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q2.r new file mode 100644 index 00000000..4dd6d4b8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q2.r @@ -0,0 +1,130 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q2.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$d", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q20.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q20.r new file mode 100644 index 00000000..6e489e31 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q20.r @@ -0,0 +1,203 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q20.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r1", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$r1", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r1" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "ARRAY_CONSTRUCTOR", + "conditional" : true, + "input iterators" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "reviewer_emp_id", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "feedback", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r1" + } + } + } + ] + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r2", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$r2", + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r2" + } + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r1" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r1" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r1" + } + } + }, + { + "field name" : "comments", + "field expression" : + { + "iterator kind" : "ARRAY_CONSTRUCTOR", + "conditional" : true, + "input iterators" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "comments", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "feedback", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r2" + } + } + } + ] + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q21.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q21.r new file mode 100644 index 00000000..bfaa7284 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q21.r @@ -0,0 +1,228 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/q21.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0, 1, 3 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$d", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$d", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$d" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.null_records", + "row variable" : "$nr", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$nr", + "WHERE" : + { + "iterator kind" : "AND", + "input iterators" : [ + { + "iterator kind" : "OP_IS_NOT_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "value", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$nr" + } + } + }, + { + "iterator kind" : "OR", + "input iterators" : [ + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "value", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$nr" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 2147483647 + } + }, + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "value", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$nr" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : -2147483648 + } + } + ] + } + ] + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "FROM" : + { + "iterator kind" : "KEYS", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "budget_breakdown", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$d" + } + } + }, + "FROM variable" : "$budget_sector", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$d" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$d" + } + } + }, + { + "field name" : "budget_sector", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$budget_sector" + } + }, + { + "field name" : "record_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "record_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$nr" + } + } + }, + { + "field name" : "value", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "value", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$nr" + } + } + } + ] + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q22.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q22.r new file mode 100644 index 00000000..47953269 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q22.r @@ -0,0 +1,280 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q22.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :1, "outerExpr" : 0, "innerVar" : 1 }, + { "outerBranch" :2, "outerExpr" : 0, "innerVar" : 2 }, + { "outerBranch" :3, "outerExpr" : 0, "innerVar" : 3 }, + { "outerBranch" :4, "outerExpr" : 0, "innerVar" : 4 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$d", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 2 + }, + "FROM variable" : "$$r", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team", + "row variable" : "$$t", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar2" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 3 + }, + "FROM variable" : "$$t", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.project", + "row variable" : "$$p", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar3" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 4 + }, + "FROM variable" : "$$p", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar4" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 5 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q3.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q3.r new file mode 100644 index 00000000..2edbf19c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q3.r @@ -0,0 +1,136 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/q3.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q4.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q4.r new file mode 100644 index 00000000..27a700a9 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q4.r @@ -0,0 +1,136 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/q4.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company", + "row variable" : "$$c", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$c", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q5.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q5.r new file mode 100644 index 00000000..92e596ed --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q5.r @@ -0,0 +1,136 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/q5.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$r", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q6.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q6.r new file mode 100644 index 00000000..486ad8a8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q6.r @@ -0,0 +1,136 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/q6.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$r", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q7.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q7.r new file mode 100644 index 00000000..3ab70e81 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q7.r @@ -0,0 +1,258 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/q7.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1, 2, 4 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 }, + { "outerBranch" :0, "outerExpr" : 2, "innerVar" : 2 }, + { "outerBranch" :0, "outerExpr" : 3, "innerVar" : 3 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "outerJoinVal3", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "outerJoinVal4", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0,"department_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar2" + } + ], + "map of key bind expressions" : [ + [ 0, 1 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$d", + "SELECT expressions" : [ + + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 2 + }, + "FROM variable" : "$$r", + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + }, + "right operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar3" + } + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "name", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "review_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "review_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q8.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q8.r new file mode 100644 index 00000000..d4a8649c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q8.r @@ -0,0 +1,245 @@ +compiled-query-plan +{ +"query file" : "idc_inner_join/q/q8.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "GROUP", + "input variable" : "$gb-3", + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "GROUP", + "input variable" : "$gb-2", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$e", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$e", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department", + "row variable" : "$$d", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$d", + "SELECT expressions" : [ + + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 2 + }, + "FROM variable" : "$$r", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$e" + } + } + }, + { + "field name" : "department_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$d" + } + } + }, + { + "field name" : "count", + "field expression" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + }, + "grouping expressions" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + } + ], + "aggregate functions" : [ + { + "iterator kind" : "FUNC_COUNT_STAR" + } + ] + } + }, + "grouping expressions" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + } + ], + "aggregate functions" : [ + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "count", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-3" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q9.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q9.r new file mode 100644 index 00000000..c962c003 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/explans/q9.r @@ -0,0 +1,281 @@ +compiled-query-plan + +{ +"query file" : "idc_inner_join/q/q9.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 1 }, + { "outerBranch" :0, "outerExpr" : 2, "innerVar" : 2 }, + { "outerBranch" :1, "outerExpr" : 0, "innerVar" : 3 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$emp", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$emp", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$emp" + } + } + }, + { + "field name" : "outerJoinVal2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$emp" + } + } + }, + { + "field name" : "outerJoinVal3", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$emp" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.department.team.employee", + "row variable" : "$$reviewer", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$reviewer", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$reviewer" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "company.reviews", + "row variable" : "$$r", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"company_id":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 2 + }, + "FROM variable" : "$$r", + "WHERE" : + { + "iterator kind" : "AND", + "input iterators" : [ + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar2" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + }, + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar3" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "reviewer_emp_id", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "feedback", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + } + ] + }, + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "company_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "company_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$emp" + } + } + }, + { + "field name" : "emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$emp" + } + } + }, + { + "field name" : "rev_dept_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "department_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$reviewer" + } + } + }, + { + "field name" : "rev_emp_id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "emp_id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$reviewer" + } + } + }, + { + "field name" : "feedback", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "feedback", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$r" + } + } + } + ] + } + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err01.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err01.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err01.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err02.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err02.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err02.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err03.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err03.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err03.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err04.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err04.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err04.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err05.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err05.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/err05.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj01.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj01.r new file mode 100644 index 00000000..625c0d95 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj01.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":3,"department_id":1,"team_id":1} +{"company_id":3,"department_id":2,"team_id":2} +{"company_id":1,"department_id":1,"team_id":1} +{"company_id":2,"department_id":1,"team_id":1} +{"company_id":2,"department_id":2,"team_id":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj02.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj02.r new file mode 100644 index 00000000..e01c6084 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj02.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"emp_id":1} +{"company_id":3,"department_id":1,"emp_id":1} +{"company_id":3,"department_id":2,"emp_id":2} +{"company_id":2,"department_id":1,"emp_id":1} +{"company_id":2,"department_id":2,"emp_id":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj03.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj03.r new file mode 100644 index 00000000..7f1e613b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj03.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"team_id":1,"emp_id":1} +{"company_id":2,"team_id":1,"emp_id":1} +{"company_id":2,"team_id":2,"emp_id":2} +{"company_id":3,"team_id":1,"emp_id":1} +{"company_id":3,"team_id":2,"emp_id":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj04.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj04.r new file mode 100644 index 00000000..b3b444c0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj04.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"emp_id":1} +{"company_id":2,"department_id":1,"emp_id":1} +{"company_id":2,"department_id":2,"emp_id":2} +{"company_id":3,"department_id":1,"emp_id":1} +{"company_id":3,"department_id":2,"emp_id":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj05.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj05.r new file mode 100644 index 00000000..87cd5d95 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj05.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"department_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"department_id":2,"emp_id":2,"review_id":2} +{"company_id":3,"department_id":1,"emp_id":1,"review_id":3} +{"company_id":3,"department_id":2,"emp_id":2,"review_id":4} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj06.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj06.r new file mode 100644 index 00000000..87cd5d95 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj06.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"department_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"department_id":2,"emp_id":2,"review_id":2} +{"company_id":3,"department_id":1,"emp_id":1,"review_id":3} +{"company_id":3,"department_id":2,"emp_id":2,"review_id":4} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj07.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj07.r new file mode 100644 index 00000000..22c7ff86 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj07.r @@ -0,0 +1,10 @@ +unordered-result +{"company_id":1,"department_id":1,"team_id":1,"emp_id":1,"project_id":1} +{"company_id":1,"department_id":1,"team_id":1,"emp_id":1,"project_id":2} +{"company_id":1,"department_id":1,"team_id":1,"emp_id":1,"project_id":5} +{"company_id":2,"department_id":1,"team_id":1,"emp_id":1,"project_id":3} +{"company_id":2,"department_id":1,"team_id":1,"emp_id":1,"project_id":4} +{"company_id":2,"department_id":2,"team_id":2,"emp_id":2,"project_id":7} +{"company_id":3,"department_id":1,"team_id":1,"emp_id":1,"project_id":5} +{"company_id":3,"department_id":1,"team_id":1,"emp_id":1,"project_id":7} +{"company_id":3,"department_id":2,"team_id":2,"emp_id":2,"project_id":5} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj08.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj08.r new file mode 100644 index 00000000..9b64f92b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj08.r @@ -0,0 +1,5 @@ +unordered-result +{"company_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"emp_id":1,"review_id":1} +{"company_id":3,"emp_id":1,"review_id":3} +{"company_id":3,"emp_id":2,"review_id":4} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj09.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj09.r new file mode 100644 index 00000000..3ba087b6 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj09.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"review_id":1,"emp_id":1} +{"company_id":2,"review_id":1,"emp_id":1} +{"company_id":2,"review_id":2,"emp_id":2} +{"company_id":3,"review_id":3,"emp_id":1} +{"company_id":3,"review_id":4,"emp_id":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj10.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj10.r new file mode 100644 index 00000000..461e221e --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj10.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"team_id":1} +{"company_id":2,"department_id":1,"team_id":1} +{"company_id":2,"department_id":2,"team_id":2} +{"company_id":3,"department_id":1,"team_id":1} +{"company_id":3,"department_id":2,"team_id":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj11.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj11.r new file mode 100644 index 00000000..87cd5d95 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/loj11.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"department_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"department_id":2,"emp_id":2,"review_id":2} +{"company_id":3,"department_id":1,"emp_id":1,"review_id":3} +{"company_id":3,"department_id":2,"emp_id":2,"review_id":4} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt01.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt01.r new file mode 100644 index 00000000..ebbf1f20 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt01.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"dept_comp_id":1,"department_id":1,"team_id":1,"emp_id":1} +{"company_id":2,"dept_comp_id":2,"department_id":1,"team_id":1,"emp_id":1} +{"company_id":2,"dept_comp_id":2,"department_id":2,"team_id":2,"emp_id":2} +{"company_id":3,"dept_comp_id":3,"department_id":1,"team_id":1,"emp_id":1} +{"company_id":3,"dept_comp_id":3,"department_id":2,"team_id":2,"emp_id":2} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt02.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt02.r new file mode 100644 index 00000000..2215a4b5 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt02.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"dept_comp_id":1,"department_id":1,"emp_id":1,"emp_team_id":1,"team_id":1} +{"company_id":2,"dept_comp_id":2,"department_id":1,"emp_id":1,"emp_team_id":1,"team_id":1} +{"company_id":2,"dept_comp_id":2,"department_id":2,"emp_id":2,"emp_team_id":2,"team_id":2} +{"company_id":3,"dept_comp_id":3,"department_id":1,"emp_id":1,"emp_team_id":1,"team_id":1} +{"company_id":3,"dept_comp_id":3,"department_id":2,"emp_id":2,"emp_team_id":2,"team_id":2} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt03.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt03.r new file mode 100644 index 00000000..cff754bb --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt03.r @@ -0,0 +1 @@ +unordered-result diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt04.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt04.r new file mode 100644 index 00000000..599520e3 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt04.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"tcomp_id":1,"department_id":1,"team_id":1,"emp_id":1} +{"company_id":2,"tcomp_id":2,"department_id":1,"team_id":1,"emp_id":1} +{"company_id":2,"tcomp_id":2,"department_id":2,"team_id":2,"emp_id":2} +{"company_id":3,"tcomp_id":3,"department_id":1,"team_id":1,"emp_id":1} +{"company_id":3,"tcomp_id":3,"department_id":2,"team_id":2,"emp_id":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt05.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt05.r new file mode 100644 index 00000000..66689c22 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt05.r @@ -0,0 +1,4 @@ +unordered-result +{"company_id":1,"department_id":1,"team_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"department_id":1,"team_id":1,"emp_id":1,"review_id":1} +{"company_id":3,"department_id":1,"team_id":1,"emp_id":1,"review_id":3} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt06.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt06.r new file mode 100644 index 00000000..5d9cd0ea --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/nt06.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"emp_id":1,"project_id":1} +{"company_id":1,"emp_id":1,"project_id":2} +{"company_id":1,"emp_id":1,"project_id":5} +{"company_id":2,"emp_id":1,"project_id":4} +{"company_id":2,"emp_id":2,"project_id":7} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q1.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q1.r new file mode 100644 index 00000000..cb50540f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q1.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"name":"Engineering"} +{"company_id":2,"department_id":1,"name":"Engineering"} +{"company_id":2,"department_id":2,"name":"Marketing"} +{"company_id":3,"department_id":1,"name":"Engineering"} +{"company_id":3,"department_id":2,"name":"Sales"} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q10.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q10.r new file mode 100644 index 00000000..c68a9d24 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q10.r @@ -0,0 +1,9 @@ +unordered-result +{"company_id":1,"emp_id":1,"project_id":1,"project_name":"AI Chatbot","client_id":1,"client_name":"RetailCorp"} +{"company_id":1,"emp_id":1,"project_id":5,"project_name":"Messaging App","client_id":1,"client_name":"RetailCorp"} +{"company_id":1,"emp_id":1,"project_id":2,"project_name":"E-commerce Recommendation System","client_id":2,"client_name":"HealthPlus"} +{"company_id":2,"emp_id":1,"project_id":3,"project_name":"Cloud Migration","client_id":3,"client_name":"FinTech Solutions"} +{"company_id":2,"emp_id":1,"project_id":4,"project_name":"Healthcare Analytics Dashboard","client_id":4,"client_name":"EduLearn"} +{"company_id":3,"emp_id":1,"project_id":5,"project_name":"Cybersecurity Threat Detection","client_id":5,"client_name":"AutoMotive AI"} +{"company_id":3,"emp_id":1,"project_id":7,"project_name":"Firewall","client_id":5,"client_name":"AutoMotive AI"} +{"company_id":3,"emp_id":2,"project_id":5,"project_name":"Cybersecurity Threat Detection","client_id":5,"client_name":"AutoMotive AI"} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q11.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q11.r new file mode 100644 index 00000000..56e367c0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q11.r @@ -0,0 +1,12 @@ +unordered-result +{"company_id":1,"emp_id":1,"emp_name":"Alice","skill":"Machine Learning"} +{"company_id":1,"emp_id":1,"emp_name":"Alice","skill":"Data Analysis"} +{"company_id":1,"emp_id":1,"emp_name":"Alice","skill":"Programming"} +{"company_id":2,"emp_id":1,"emp_name":"Charlie","skill":"Cloud Computing"} +{"company_id":2,"emp_id":1,"emp_name":"Charlie","skill":"AI"} +{"company_id":2,"emp_id":2,"emp_name":"David","skill":"Cloud Computing"} +{"company_id":2,"emp_id":2,"emp_name":"David","skill":"Cybersecurity"} +{"company_id":2,"emp_id":2,"emp_name":"David","skill":"AI"} +{"company_id":3,"emp_id":1,"emp_name":"Emma","skill":"Software Development"} +{"company_id":3,"emp_id":1,"emp_name":"Emma","skill":"Project Management"} +{"company_id":3,"emp_id":2,"emp_name":"Frank","skill":"Web Development"} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q12.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q12.r new file mode 100644 index 00000000..c123d409 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q12.r @@ -0,0 +1,10 @@ +unordered-result +{"company_id":1,"name":"RetailCorp","project_id":1,"progress":"Completed"} +{"company_id":1,"name":"RetailCorp","project_id":1,"progress":"Ongoing"} +{"company_id":1,"name":"RetailCorp","project_id":5,"progress":"Completed"} +{"company_id":1,"name":"RetailCorp","project_id":5,"progress":"Ongoing"} +{"company_id":2,"name":"FinTech Solutions","project_id":3,"progress":"Ongoing"} +{"company_id":2,"name":"FinTech Solutions","project_id":3,"progress":"Pending"} +{"company_id":2,"name":"EduLearn","project_id":4,"progress":"Completed"} +{"company_id":2,"name":"EduLearn","project_id":4,"progress":"Completed"} +{"company_id":2,"name":"EduLearn","project_id":4,"progress":"Deployment"} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q13.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q13.r new file mode 100644 index 00000000..bbc318d5 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q13.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"team_id":1,"count":3,"total_skill":60,"min":10,"max":30} +{"company_id":2,"team_id":1,"count":2,"total_skill":100,"min":40,"max":60} +{"company_id":2,"team_id":2,"count":3,"total_skill":150,"min":40,"max":60} +{"company_id":3,"team_id":1,"count":2,"total_skill":150,"min":70,"max":80} +{"company_id":3,"team_id":2,"count":1,"total_skill":90,"min":90,"max":90} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q14.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q14.r new file mode 100644 index 00000000..dab4659a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q14.r @@ -0,0 +1,4 @@ +unordered-result +{"company_id":3} +{"company_id":2} +{"company_id":1} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q15.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q15.r new file mode 100644 index 00000000..628a610a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q15.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"team_id":1,"num_tech":2} +{"company_id":2,"team_id":1,"num_tech":1} +{"company_id":2,"team_id":2,"num_tech":2} +{"company_id":3,"team_id":1,"num_tech":3} +{"company_id":3,"team_id":2,"num_tech":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q16.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q16.r new file mode 100644 index 00000000..32013641 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q16.r @@ -0,0 +1,5 @@ +unordered-result +{"company_id":3,"department_id":2,"year":2023,"month":5} +{"company_id":3,"department_id":1,"year":2020,"month":2} +{"company_id":2,"department_id":2,"year":2016,"month":10} +{"company_id":2,"department_id":1,"year":2021,"month":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q18.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q18.r new file mode 100644 index 00000000..cff754bb --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q18.r @@ -0,0 +1 @@ +unordered-result diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q19.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q19.r new file mode 100644 index 00000000..8f6ae18d --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q19.r @@ -0,0 +1,3 @@ +unordered-result +{"company_id":1,"head_office_location":{"city":"NY","country":"USA"},"record_id":1,"value":null} +{"company_id":2,"head_office_location":{"city":"San Francisco","country":"USA"},"record_id":2,"value":null} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q2.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q2.r new file mode 100644 index 00000000..cb50540f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q2.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"name":"Engineering"} +{"company_id":2,"department_id":1,"name":"Engineering"} +{"company_id":2,"department_id":2,"name":"Marketing"} +{"company_id":3,"department_id":1,"name":"Engineering"} +{"company_id":3,"department_id":2,"name":"Sales"} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q20.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q20.r new file mode 100644 index 00000000..2a1e0836 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q20.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"review_id":1,"emp_id":1,"comments":"Excellent AI model development"} +{"company_id":2,"review_id":1,"emp_id":1,"comments":"Strong leadership in digital marketing campaigns"} +{"company_id":2,"review_id":2,"emp_id":2,"comments":"Great contributions to software architecture"} +{"company_id":3,"review_id":3,"emp_id":1,"comments":"Excellent client relationship management"} +{"company_id":3,"review_id":4,"emp_id":2,"comments":"Outstanding performance in cloud security"} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q21.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q21.r new file mode 100644 index 00000000..7926c01c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q21.r @@ -0,0 +1,9 @@ +unordered-result +{"company_id":2,"department_id":1,"budget_sector":"Infrastructure","record_id":1,"value":2147483647} +{"company_id":2,"department_id":1,"budget_sector":"RnD","record_id":1,"value":2147483647} +{"company_id":2,"department_id":2,"budget_sector":"Advertising","record_id":1,"value":2147483647} +{"company_id":2,"department_id":2,"budget_sector":"Market Research","record_id":1,"value":2147483647} +{"company_id":3,"department_id":1,"budget_sector":"Infrastructure","record_id":3,"value":2147483647} +{"company_id":3,"department_id":1,"budget_sector":"RnD","record_id":3,"value":2147483647} +{"company_id":3,"department_id":2,"budget_sector":"Salaries","record_id":3,"value":2147483647} +{"company_id":3,"department_id":2,"budget_sector":"Training","record_id":3,"value":2147483647} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q22.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q22.r new file mode 100644 index 00000000..fd3ae577 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q22.r @@ -0,0 +1,84 @@ +unordered-result +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":1} +{"company_id":1} +{"company_id":1} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":3} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":3} +{"company_id":3} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} +{"company_id":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q3.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q3.r new file mode 100644 index 00000000..1d229e7c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q3.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"emp_id":1,"name":"Alice"} +{"company_id":2,"emp_id":1,"name":"Charlie"} +{"company_id":2,"emp_id":2,"name":"David"} +{"company_id":3,"emp_id":1,"name":"Emma"} +{"company_id":3,"emp_id":2,"name":"Frank"} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q4.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q4.r new file mode 100644 index 00000000..1d229e7c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q4.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"emp_id":1,"name":"Alice"} +{"company_id":2,"emp_id":1,"name":"Charlie"} +{"company_id":2,"emp_id":2,"name":"David"} +{"company_id":3,"emp_id":1,"name":"Emma"} +{"company_id":3,"emp_id":2,"name":"Frank"} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q5.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q5.r new file mode 100644 index 00000000..c7e8b0e7 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q5.r @@ -0,0 +1,10 @@ +unordered-result +{"company_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"emp_id":1,"review_id":1} +{"company_id":2,"emp_id":1,"review_id":2} +{"company_id":2,"emp_id":2,"review_id":1} +{"company_id":2,"emp_id":2,"review_id":2} +{"company_id":3,"emp_id":1,"review_id":3} +{"company_id":3,"emp_id":1,"review_id":4} +{"company_id":3,"emp_id":2,"review_id":3} +{"company_id":3,"emp_id":2,"review_id":4} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q6.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q6.r new file mode 100644 index 00000000..c7e8b0e7 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q6.r @@ -0,0 +1,10 @@ +unordered-result +{"company_id":1,"emp_id":1,"review_id":1} +{"company_id":2,"emp_id":1,"review_id":1} +{"company_id":2,"emp_id":1,"review_id":2} +{"company_id":2,"emp_id":2,"review_id":1} +{"company_id":2,"emp_id":2,"review_id":2} +{"company_id":3,"emp_id":1,"review_id":3} +{"company_id":3,"emp_id":1,"review_id":4} +{"company_id":3,"emp_id":2,"review_id":3} +{"company_id":3,"emp_id":2,"review_id":4} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q7.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q7.r new file mode 100644 index 00000000..a8b65ff3 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q7.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"emp_id":1,"name":"Alice","review_id":1} +{"company_id":2,"department_id":1,"emp_id":1,"name":"Charlie","review_id":1} +{"company_id":2,"department_id":2,"emp_id":2,"name":"David","review_id":2} +{"company_id":3,"department_id":1,"emp_id":1,"name":"Emma","review_id":3} +{"company_id":3,"department_id":2,"emp_id":2,"name":"Frank","review_id":4} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q8.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q8.r new file mode 100644 index 00000000..e9a32e40 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q8.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"department_id":1,"count":1} +{"company_id":2,"department_id":2,"count":4} +{"company_id":2,"department_id":1,"count":4} +{"company_id":3,"department_id":2,"count":4} +{"company_id":3,"department_id":1,"count":4} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q9.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q9.r new file mode 100644 index 00000000..ae87fbdb --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/expres/q9.r @@ -0,0 +1,6 @@ +unordered-result +{"company_id":1,"emp_id":1,"rev_dept_id":1,"rev_emp_id":1,"feedback":{"comments":"Excellent AI model development","history":[{"date":"2025-02-15","previous_rating":"4.5"}],"rating":4.8,"reviewer_emp_id":1}} +{"company_id":2,"emp_id":1,"rev_dept_id":2,"rev_emp_id":2,"feedback":{"comments":"Great contributions to software architecture","history":[{"date":"2025-01-10","previous_rating":"4.5"}],"rating":4.7,"reviewer_emp_id":2}} +{"company_id":2,"emp_id":2,"rev_dept_id":1,"rev_emp_id":1,"feedback":{"comments":"Strong leadership in digital marketing campaigns","history":[{"date":"2025-02-05","previous_rating":"4.2"}],"rating":4.5,"reviewer_emp_id":1}} +{"company_id":3,"emp_id":1,"rev_dept_id":2,"rev_emp_id":2,"feedback":{"comments":"Outstanding performance in cloud security","history":[{"date":"2025-01-20","previous_rating":"4.6"}],"rating":4.9,"reviewer_emp_id":2}} +{"company_id":3,"emp_id":2,"rev_dept_id":1,"rev_emp_id":1,"feedback":{"comments":"Excellent client relationship management","history":[{"date":"2025-02-08","previous_rating":"4.3"}],"rating":4.6,"reviewer_emp_id":1}} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err01.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err01.q new file mode 100644 index 00000000..87bc20d0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err01.q @@ -0,0 +1,3 @@ +select * +from company, company.department d +where company.company_id = d.department_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err02.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err02.q new file mode 100644 index 00000000..f796bd7d --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err02.q @@ -0,0 +1,3 @@ +select d.company_id, d.department_id, d.name +from company c, company.department d on department_id > 1 +where c.company_id = d.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err03.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err03.q new file mode 100644 index 00000000..c7175d71 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err03.q @@ -0,0 +1,3 @@ +select * +from company c, company.department c +where c.company_id = c.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err04.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err04.q new file mode 100644 index 00000000..19910a8b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err04.q @@ -0,0 +1,3 @@ +select c.company_id, d.department_id, t.team_id +from company c, (company.department d left outer join company.department.team t on d.company_id = t.company_id and d.department_id = t.department_id) +where c.company_id = d.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err05.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err05.q new file mode 100644 index 00000000..6669d808 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/err05.q @@ -0,0 +1,3 @@ +select * +from company c, org g +where c.company_id = g.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj01.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj01.q new file mode 100644 index 00000000..4ab50425 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj01.q @@ -0,0 +1,4 @@ +select c.company_id, d.department_id, t.team_id +from company c, company.department d left outer join company.department.team t +on d.company_id = t.company_id and d.department_id = t.department_id +where c.company_id = d.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj02.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj02.q new file mode 100644 index 00000000..3e006593 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj02.q @@ -0,0 +1,4 @@ +select c.company_id, d.department_id, e.emp_id +from company c, company.department d left outer join company.department.team.employee e +on d.company_id = e.company_id and d.department_id = e.department_id +where c.company_id = d.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj03.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj03.q new file mode 100644 index 00000000..b1ee0bc7 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj03.q @@ -0,0 +1,5 @@ +select d.company_id, t.team_id, e.emp_id +from company.department.team.employee e left outer join company.department.team t on e.company_id = t.company_id and e.department_id = t.department_id and e.team_id = t.team_id, company.department d +where d.company_id = e.company_id and + d.department_id = e.department_id +order by d.company_id, t.team_id, e.emp_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj04.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj04.q new file mode 100644 index 00000000..cb76d8a0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj04.q @@ -0,0 +1,4 @@ +select c.company_id, d.department_id, e.emp_id +from company.department.team.employee e left outer join company.department d on e.company_id = d.company_id and e.department_id = d.department_id, company c +where c.company_id = e.company_id +order by c.company_id, d.department_id, e.emp_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj05.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj05.q new file mode 100644 index 00000000..2431f28b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj05.q @@ -0,0 +1,5 @@ +select c.company_id, d.department_id, e.emp_id, r.review_id +from company c left outer join company.department d on c.company_id = d.company_id left outer join company.department.team.employee e on d.company_id = e.company_id and d.department_id = e.department_id, company.reviews r +where c.company_id = r.company_id and + e.emp_id = r.emp_id +order by c.company_id, d.department_id, e.emp_id, r.review_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj06.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj06.q new file mode 100644 index 00000000..508cb42d --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj06.q @@ -0,0 +1,5 @@ +select c.company_id, d.department_id, e.emp_id, r.review_id +from company.reviews r, company.department.team.employee e left outer join company c on e.company_id = c.company_id left outer join company.department d on e.company_id = d.company_id and e.department_id = d.department_id +where c.company_id = r.company_id and + e.emp_id = r.emp_id +order by c.company_id, d.department_id, e.emp_id, r.review_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj07.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj07.q new file mode 100644 index 00000000..28aa178f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj07.q @@ -0,0 +1,5 @@ +select t.company_id, t.department_id, t.team_id, e.emp_id, p.project_id +from company.project p, company.department.team t left outer join company.department d on t.company_id = d.company_id and t.department_id = d.department_id left outer join company.department.team.employee e on t.company_id = e.company_id and t.department_id = e.department_id and t.team_id = e.team_id +where p.company_id = t.company_id and + p.project_id in e.projects[] +order by t.company_id, t.department_id, t.team_id, e.emp_id, p.project_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj08.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj08.q new file mode 100644 index 00000000..8f021182 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj08.q @@ -0,0 +1,7 @@ +select c.company_id, e.emp_id, r.review_id +from company.reviews r, company c left outer join company.department.team.employee e on c.company_id = e.company_id and size(e.projects) >= size(e.skills) +where r.emp_id = e.emp_id and + c.company_id = r.company_id + +order by c.company_id, e.emp_id, r.review_id + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj09.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj09.q new file mode 100644 index 00000000..a789a05f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj09.q @@ -0,0 +1,6 @@ +select c1.company_id, r.review_id, e.emp_id +from company c1 left outer join company.reviews r on c1.company_id = r.company_id, company c2 left outer join company.department.team.employee e on c2.company_id = e.company_id +where c1.company_id = c2.company_id and + r.emp_id = e.emp_id +order by c1.company_id, r.review_id, e.emp_id + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj10.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj10.q new file mode 100644 index 00000000..ca1c44ee --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj10.q @@ -0,0 +1,5 @@ +select c1.company_id, d.department_id, t.team_id +from company c1 left outer join company.department d on c1.company_id = d.company_id, nested tables(company c2 descendants(company.department.team t)) +where c1.company_id = c2.company_id and + d.department_id = t.department_id +order by c1.company_id, d.department_id, t.team_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj11.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj11.q new file mode 100644 index 00000000..4807df76 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/loj11.q @@ -0,0 +1,7 @@ +select r.company_id, d.department_id, e.emp_id, r.review_id +from company.reviews r, company c2 left outer join company.department d on c2.company_id = d.company_id, nested tables(company c3 descendants(company.department.team.employee e)) +where r.company_id = c2.company_id and + c2.company_id = c3.company_id and + r.emp_id = e.emp_id and + d.department_id = e.department_id +order by r.company_id, d.department_id, e.emp_id diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt01.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt01.q new file mode 100644 index 00000000..aa0b5d54 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt01.q @@ -0,0 +1,4 @@ +select c.company_id, d.company_id as dept_comp_id, d.department_id, t.team_id, e.emp_id +from company c, nested tables(company.department d descendants(company.department.team t, company.department.team.employee e)) +where c.company_id = e.company_id +order by c.company_id, d.company_id, d.department_id, t.team_id, e.emp_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt02.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt02.q new file mode 100644 index 00000000..61710a80 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt02.q @@ -0,0 +1,5 @@ +select c.company_id, d.company_id as dept_comp_id, d.department_id, e.emp_id, e.team_id as emp_team_id, t.team_id +from nested tables(company.department.team.employee e ancestors(company.department d, company c)), company.department.team t +where t.company_id = c.company_id and + e.team_id = t.team_id +order by c.company_id, d.company_id, d.department_id, t.team_id, e.emp_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt03.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt03.q new file mode 100644 index 00000000..b8019adb --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt03.q @@ -0,0 +1,6 @@ +select c.company_id, t.team_id, e.emp_id +from nested tables(company c descendants(company.department.team t, company.reviews r)), company.department.team.employee e +where t.company_id = e.company_id and + t.team_id = e.team_id and + r.emp_id = e.emp_id +order by c.company_id desc, t.team_id, e.emp_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt04.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt04.q new file mode 100644 index 00000000..a4087dd5 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt04.q @@ -0,0 +1,4 @@ +select c.company_id, t.company_id as tcomp_id, d.department_id, t.team_id, e.emp_id +from company c, nested tables(company.department.team t ancestors(company.department d) descendants(company.department.team.employee e)) +where c.company_id = e.company_id +order by c.company_id, d.department_id, t.team_id, e.emp_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt05.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt05.q new file mode 100644 index 00000000..f527a8bf --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt05.q @@ -0,0 +1,5 @@ +select t.company_id, d.department_id, t.team_id, e.emp_id, r.review_id +from company.reviews r, nested tables(company.department.team t ancestors(company.department d on d.established > cast("2016-10-20" AS TIMESTAMP)) descendants(company.department.team.employee e on size(e.projects) > 1)) +where r.company_id = e.company_id and + r.emp_id = e.emp_id +order by t.company_id, d.department_id, t.team_id, e.emp_id, r.review_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt06.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt06.q new file mode 100644 index 00000000..e9577663 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/nt06.q @@ -0,0 +1,6 @@ +select c1.company_id, e.emp_id, p.project_id +from nested tables(company c1 descendants(company.department.team.employee e)), nested tables(company c2 descendants(company.project p on p.project_milestones.Phase1 = "Completed") +) +where c1.company_id = c2.company_id and + p.project_id in e.projects[] +order by c1.company_id, e.emp_id, p.project_id diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q1.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q1.q new file mode 100644 index 00000000..beceba64 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q1.q @@ -0,0 +1,3 @@ +select d.company_id, d.department_id, d.name +from company c, company.department d +where c.company_id = d.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q10.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q10.q new file mode 100644 index 00000000..70e85764 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q10.q @@ -0,0 +1,7 @@ +select e.company_id, e.emp_id, $proj as project_id, p.name as project_name, c.client_id, c.name as client_name +from company.department.team.employee e, company.project p, company.client c, unnest(e.projects[] as $proj) +where e.company_id = p.company_id and + p.company_id = c.company_id and + $proj = p.project_id and + p.client_id = c.client_id +order by e.company_id, e.emp_id, c.client_id diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q11.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q11.q new file mode 100644 index 00000000..a945f9ee --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q11.q @@ -0,0 +1,5 @@ +select e.company_id, e.emp_id, e.name as emp_name, s.name as skill +from company.department.team.employee e, company.skill s +where e.company_id = s.company_id and + s.skill_id in e.skills[] +order by e.company_id, emp_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q12.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q12.q new file mode 100644 index 00000000..fcf10aeb --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q12.q @@ -0,0 +1,6 @@ +select c.company_id, c.name, p.project_id, $progress +from company.project p, company.client c, unnest(p.project_milestones.values() as $progress) +where p.company_id = c.company_id and + p.client_id = c.client_id and + "Email" in c.preferred_contact_methods[] +order by c.company_id, c.client_id, p.project_id diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q13.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q13.q new file mode 100644 index 00000000..8907735d --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q13.q @@ -0,0 +1,7 @@ +select t.company_id, t.team_id, count(*) as count, sum(s.skill_value) as total_skill, min(s.skill_value) as min, max(s.skill_value) as max +from company.department.team t, company.department.team.employee e, company.skill s, unnest(e.skills[] as $s) +where t.company_id = e.company_id and + e.company_id = s.company_id and + t.team_id = e.team_id and + $s = s.skill_id +group by t.company_id, t.team_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q14.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q14.q new file mode 100644 index 00000000..2e766f01 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q14.q @@ -0,0 +1,4 @@ +select distinct c.company_id +from company c, company.department d +where c.company_id = d.company_id +order by c.company_id desc \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q15.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q15.q new file mode 100644 index 00000000..7b2ab542 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q15.q @@ -0,0 +1,3 @@ +select c.company_id, t.team_id, size(t.technologies_used) as num_tech +from company c, company.department.team t +where c.company_id = t.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q16.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q16.q new file mode 100644 index 00000000..11470996 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q16.q @@ -0,0 +1,6 @@ +select c.company_id, d.department_id, year(established) as year, month(established) as month +from company c, company.department d +where c.company_id = d.company_id and + established > cast("2016-09-01" AS TIMESTAMP) and + established < cast("2024-04-05" AS TIMESTAMP) +order by c.company_id desc, department_id desc diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q18.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q18.q new file mode 100644 index 00000000..68084bf1 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q18.q @@ -0,0 +1,3 @@ +select * +from company c, company.no_records nr +where c.company_id = nr.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q19.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q19.q new file mode 100644 index 00000000..19f42108 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q19.q @@ -0,0 +1,7 @@ +select c.company_id, c.head_office_location, nr.record_id, nr.value +from company c, company.null_records nr +where c.company_id = nr.company_id and + nr.value is null and + c.head_office_location is not null +order by c.company_id + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q2.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q2.q new file mode 100644 index 00000000..c7500267 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q2.q @@ -0,0 +1,3 @@ +select d.company_id, d.department_id, d.name +from company.department d, company c +where c.company_id = d.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q20.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q20.q new file mode 100644 index 00000000..468aeca7 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q20.q @@ -0,0 +1,5 @@ +select r1.company_id, r1.review_id, r1.emp_id, r2.feedback.comments +from company.reviews r1, company.reviews r2 +where r1.company_id = r2.company_id and + r1.feedback.reviewer_emp_id = r2.emp_id +order by r1.company_id, r1.review_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q21.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q21.q new file mode 100644 index 00000000..41229008 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q21.q @@ -0,0 +1,6 @@ +select $d.company_id, $d.department_id, $budget_sector, $nr.record_id, $nr.value +from company.department $d, company.null_records $nr, $d.budget_breakdown.keys() as $budget_sector +where $d.company_id = $nr.company_id and + $nr.value is not null and + ( $nr.value = 2147483647 or $nr.value = -2147483648) +order by $d.company_id, $d.department_id, $nr.record_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q22.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q22.q new file mode 100644 index 00000000..b86e786f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q22.q @@ -0,0 +1,7 @@ +select c.company_id +from company c, company.department d, company.reviews r, company.department.team t, company.project p, company.department.team.employee e +where c.company_id = d.company_id and + d.company_id = r.company_id and + r.company_id = t.company_id and + t.company_id = p.company_id and + p.company_id = e.company_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q3.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q3.q new file mode 100644 index 00000000..a359069f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q3.q @@ -0,0 +1,4 @@ +select e.company_id, e.emp_id, e.name +from company c, company.department.team.employee e +where c.company_id = e.company_id +order by e.company_id, e.emp_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q4.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q4.q new file mode 100644 index 00000000..bfb58ad3 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q4.q @@ -0,0 +1,4 @@ +select e.company_id, e.emp_id, e.name +from company.department.team.employee e, company c +where c.company_id = e.company_id +order by e.company_id, e.emp_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q5.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q5.q new file mode 100644 index 00000000..ea4b7c14 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q5.q @@ -0,0 +1,4 @@ +select e.company_id, e.emp_id, r.review_id +from company.department.team.employee e, company.reviews r +where r.company_id = e.company_id +order by e.company_id, e.emp_id, r.review_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q6.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q6.q new file mode 100644 index 00000000..79b63cc7 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q6.q @@ -0,0 +1,4 @@ +select e.company_id, e.emp_id, r.review_id +from company.reviews r, company.department.team.employee e +where r.company_id = e.company_id +order by e.company_id, e.emp_id, r.review_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q7.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q7.q new file mode 100644 index 00000000..6cbfbf54 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q7.q @@ -0,0 +1,7 @@ +select e.company_id, d.department_id, e.emp_id, e.name, r.review_id +from company.department.team.employee e, company.department d, company.reviews r +where e.company_id = d.company_id and + e.company_id = r.company_id and + d.department_id = e.department_id and + r.emp_id = e.emp_id +order by e.company_id, d.department_id, e.emp_id, r.review_id \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q8.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q8.q new file mode 100644 index 00000000..7ecda68f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q8.q @@ -0,0 +1,5 @@ +select e.company_id, d.department_id, count(*) as count +from company.department.team.employee e, company.department d, company.reviews r +where e.company_id = d.company_id and e.company_id = r.company_id +group by e.company_id, d.department_id +order by e.company_id, d.department_id desc \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q9.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q9.q new file mode 100644 index 00000000..5581c6bb --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/q/q9.q @@ -0,0 +1,9 @@ +select emp.company_id, emp.emp_id, reviewer.department_id as rev_dept_id, +reviewer.emp_id as rev_emp_id, r.feedback +from company.department.team.employee emp, company.department.team.employee +reviewer, company.reviews r +where emp.company_id = r.company_id and + emp.company_id = reviewer.company_id and + emp.emp_id = r.emp_id and + reviewer.emp_id = r.feedback.reviewer_emp_id +order by emp.company_id, emp.emp_id diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/test.config b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/test.config new file mode 100644 index 00000000..10162b82 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_inner_join/test.config @@ -0,0 +1,13 @@ +# +# Contains tests for multi-row updates +# + +before-ddl-file = before.ddl +before-data-file = before.data + +run-inner_join = q() = expres + +compile-inner_join = q() = explans + +after-ddl-file = after.ddl + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_maths/explans/idx_atan2_dv.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_maths/explans/idx_atan2_dv.r index e4b23b6f..4910e272 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/idc_maths/explans/idx_atan2_dv.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_maths/explans/idx_atan2_dv.r @@ -24,37 +24,9 @@ compiled-query-plan "index scans" : [ { "equality conditions" : {}, - "range conditions" : {} + "range conditions" : { "atan2#dv@,3" : { "start value" : 0.0, "start inclusive" : false } } } ], - "index filtering predicate" : - { - "iterator kind" : "GREATER_THAN", - "left operand" : - { - "iterator kind" : "ATAN2", - "input iterators" : [ - { - "iterator kind" : "FIELD_STEP", - "field name" : "atan2#dv@,3", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$t_idx" - } - }, - { - "iterator kind" : "CONST", - "value" : 3 - } - ] - }, - "right operand" : - { - "iterator kind" : "CONST", - "value" : 0.0 - } - }, "position in join" : 0 }, "FROM variable" : "$$t_idx", @@ -76,22 +48,13 @@ compiled-query-plan "field name" : "Column_2", "field expression" : { - "iterator kind" : "ATAN2", - "input iterators" : [ - { - "iterator kind" : "FIELD_STEP", - "field name" : "atan2#dv@,3", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$t_idx" - } - }, - { - "iterator kind" : "CONST", - "value" : 3 - } - ] + "iterator kind" : "FIELD_STEP", + "field name" : "atan2#dv@,3", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t_idx" + } } } ] diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/after.ddl b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/after.ddl new file mode 100644 index 00000000..9fe7efe0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/after.ddl @@ -0,0 +1,11 @@ +drop table users + +drop table jsoncol + +drop table limit + +drop table Foo.Child + +drop table Foo + +drop table A diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/before.data b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/before.data new file mode 100644 index 00000000..1b305fd2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/before.data @@ -0,0 +1,1562 @@ +################## Data for Functional Testing ################ + +Table: users +{ + "sid1": 0, + "sid2": 1, + "pid1": 0, + "pid2": 1, + "name": "Newton", + "age": 20, + "seqNo": 56, + "info": { + "email": "paulsmith@yahoo.com", + "phones" : [ + { "areacode":415, "number":2840060, "kind":"office" }, + { "areacode":650, "number":3789021, "kind":"mobile" }, + { "areacode":415, "number":6096010, "kind":"home" } + ], + "address": { + "street": "560 Box Street Idledale", + "state": "North Dakota", + "zipcode": 15934 + }, + "height": 170, + "friends": [ + "Sharpe", + "Tamera" + ], + "favoriteColor": "green" + } +} +{ + "sid1": 0, + "sid2": 1, + "pid1": 0, + "pid2": 2, + "name": "Koch", + "age": 34, + "seqNo": 3, + "info": { + "email": "kochsmith@yahoo.com", + "phones" : [ + { "areacode":443, "number":2840313, "kind":"office" }, + { "areacode":212, "number":2993747, "kind":"mobile" }, + { "areacode":764, "number":8028673, "kind":"home" } + ], + "address": { + "street": "584 Beayer Place Fontanelle", + "state": "New York", + "zipcode": 42601 + }, + "height": 170, + "friends": [ + "Obrien" + ], + "favoriteColor": "red" + } +} +{ + "sid1": 0, + "sid2": 1, + "pid1": 0, + "pid2": 3, + "name": "Fischer", + "age": 28, + "seqNo": 45, + "info": { + "email": "fischersmith@yahoo.com", + "phones" : [ + { "areacode":756, "number":8474873, "kind":"office" }, + { "areacode":894, "number":5746587, "kind":"mobile" }, + { "areacode":918, "number":3298890, "kind":"home" } + ], + "address": { + "street": "996 Sedgwick Place Elfrida", + "state": "Iowa", + "zipcode": 15871 + }, + "height": 168, + "friends": [ + "Bianca" + ], + "favoriteColor": "green" + } +} +{ + "sid1": 1, + "sid2": 2, + "pid1": 1, + "pid2": 1, + "name": "Hardin", + "age": 28, + "seqNo": 34, + "info": { + "email": "jacksmith@yahoo.com", + "phones" : [ + { "areacode":345, "number":3854985, "kind":"office" }, + { "areacode":487, "number":5984589, "kind":"mobile" }, + { "areacode":839, "number":9820420, "kind":"home" } + ], + "address": { + "street": "923 Ridgewood Avenue Cumberland", + "state": "Nevada", + "zipcode": 43839 + }, + "height": 172, + "friends": [], + "favoriteColor": "red" + } +} +{ + "sid1": 1, + "sid2": 2, + "pid1": 1, + "pid2": 2, + "name": "Fowler", + "age": 39, + "seqNo": 89, + "info": { + "email": "stuartsmith@yahoo.com", + "phones" : [ + { "areacode":443, "number":5486085, "kind":"office" }, + { "areacode":857, "number":8745875, "kind":"mobile" }, + { "areacode":871, "number":6475754, "kind":"home" } + ], + "address": { + "street": "422 Brooklyn Avenue Westmoreland", + "state": "Maryland", + "zipcode": 22206 + }, + "height": 167, + "friends": [ + "Colette" + ], + "favoriteColor": "blue" + } +} +{ + "sid1": 1, + "sid2": 2, + "pid1": 1, + "pid2": 3, + "name": "Rosemarie", + "age": 26, + "seqNo": 0, + "info": { + "email": "rosemariesmith@yahoo.com", + "phones" : [ + { "areacode":857, "number":2840060, "kind":"office" }, + { "areacode":344, "number":3789021, "kind":"mobile" }, + { "areacode":543, "number":6096010, "kind":"home" } + ], + "address": { + "street": "960 Ferry Place Cornfields", + "state": "Palau", + "zipcode": 39198 + }, + "height": 162, + "friends": [ + "Cathryn" + ], + "favoriteColor": "blue" + } +} +{ + "sid1": 2, + "sid2": 3, + "pid1": 2, + "pid2": 1, + "name": "Caitlin", + "age": 36, + "seqNo": 1, + "info": { + "email": "caitlinsmith@yahoo.com", + "phones" : [ + { "areacode":424, "number":5455450, "kind":"office" }, + { "areacode":424, "number":4545521, "kind":"mobile" }, + { "areacode":645, "number":5545450, "kind":"home" } + ], + "address": { + "street": "360 Herkimer Court Bethany", + "state": "Wisconsin", + "zipcode": 35536 + }, + "height": 160, + "friends": [ + "Kidd" + ], + "favoriteColor": "blue" + } +} +{ + "sid1": 2, + "sid2": 3, + "pid1": 2, + "pid2": 2, + "name": "Maddox", + "age": 23, + "seqNo": 2, + "info": { + "email": "vinsmith@yahoo.com", + "phones" : [ + { "areacode":635, "number":2840060, "kind":"office" }, + { "areacode":875, "number":3545545, "kind":"mobile" }, + { "areacode":578, "number":4575488, "kind":"home" } + ], + "address": { + "street": "118 Thatford Avenue Neahkahnie", + "state": "Nebraska", + "zipcode": 31685 + }, + "height": 172, + "friends": [], + "favoriteColor": "blue" + } +} +{ + "sid1": 2, + "sid2": 3, + "pid1": 2, + "pid2": 3, + "name": "Trudy", + "age": 38, + "seqNo": 3, + "info": { + "email": "trudysmith@yahoo.com", + "phones" : [ + { "areacode":344, "number":9034994, "kind":"office" }, + { "areacode":432, "number":8328090, "kind":"mobile" }, + { "areacode":876, "number":4543535, "kind":"home" } + ], + "address": { + "street": "839 Bond Street Selma", + "state": "Pennsylvania", + "zipcode": 26918 + }, + "height": "169", + "friends": [], + "favoriteColor": "red" + } +} +{ + "sid1": 3, + "sid2": 4, + "pid1": 3, + "pid2": 1, + "name": "Ernestine", + "age": 22, + "seqNo": 48, + "info": { + "email": "chrissmith@yahoo.com", + "phones" : [ + { "areacode":434, "number":5343434, "kind":"office" }, + { "areacode":545, "number":2332323, "kind":"mobile" }, + { "areacode":323, "number":5433443, "kind":"home" } + ], + "address": { + "street": "345 Belmont Avenue Accoville", + "state": "American Samoa", + "zipcode": 43222 + }, + "height": 178, + "friends": [ + "Herman", + "Carmela" + ], + "favoriteColor": "green" + } +} +{ + "sid1": 3, + "sid2": 4, + "pid1": 3, + "pid2": 2, + "name": "Yash", + "age": 22, + "seqNo": 7, + "info": { + "email": "yashaga@yahoo.com", + "phones" : [ + { "areacode":444, "number":1111111, "kind":"office" }, + { "areacode":555, "number":2222222, "kind":"mobile" }, + { "areacode":666, "number":3333333, "kind":"home" } + ], + "address": { + "street": "345 RC Vyas Colony", + "state": "Rajasthan", + "zipcode": "unknown" + }, + "height": 178, + "friends": [ + "Kunal", + "Rajdeep" + ], + "favoriteColor": "black" + } +} + +Table: jsoncol + +{ + "majorKey1": "k1", + "majorKey2": "k2", + "minorKey": "m1", + "address": { + "name": "yash", + "pin": 247452, + "phones" : [ + { "areacode":831, "number":6734846, "kind":"mobile" } + ] + }, + "firstThread": false, + "index": 997, + "shard": null, + "partition": null +} +{ + "majorKey1": "k1", + "majorKey2": "k2", + "minorKey": "m2", + "address": { + "pin": 754, + "phones" : [ + { "areacode":745, "number":8747929, "kind":"mobile" } + ] + }, + "firstThread": false, + "index": 9, + "shard": null, + "partition": null +} +{ + "majorKey1": "j1", + "majorKey2": "j2", + "minorKey": "l1", + "address": { + "pin": 8437, + "phones" : { + "contact1": { "areacode":368, "number":8674289, "kind":"mobile"}, + "contact2": { "areacode":368, "number":9673449, "kind":"office"} + } + }, + "firstThread": false, + "index": 1, + "shard": null, + "partition": null +} +{ + "majorKey1": "j1", + "majorKey2": "j2", + "minorKey": "l2", + "address": { + "pin": 743683, + "phones" : [ + { "areacode":435, "number":7437849, "kind":"mobile"} + ] + }, + "firstThread": false, + "index": 56, + "shard": null, + "partition": null +} +{ + "majorKey1": "i1", + "majorKey2": "i2", + "minorKey": "n1", + "address": { + "pin": 5647, + "phones" : { + "contact1": { "areacode":501, "number":3123234, "kind":"mobile"}, + "contact2": { "areacode":501, "number":1332444, "kind":"office"} + } + }, + "firstThread": true, + "index": 89, + "shard": null, + "partition": null +} +{ + "majorKey1": "i1", + "majorKey2": "i2", + "minorKey": "n2", + "address": { + "pin": 9838, + "phones" : { + "contact1": { "areacode":435, "number":7437849, "kind":"mobile"} + } + }, + "firstThread": false, + "index": 498, + "shard": null, + "partition": null +} + +Table: Foo + +{ + "id" : 1, + "sid" : 0, + "record" : { "long": 40, "int" : 20, "string" : "aef", "bool" : true, "float": 5}, + "info": + { + "firstName":"first0", "lastName":"last0","age":10, + "address": + { + "city": "San Fransisco", + "state" : "CA", + "phones" : [ { "areacode" : 408, "number" : 50, "kind" : "work" }, + "650-234-4556", + 650234455 + ] + }, + "children": + { + "John" : { "age" : 10, "school" : "sch_1", "friends" : ["Anna", "John", "Maria"]}, + "Mary" : { "age" : 7, "school" : "sch_3", "friends" : ["Anna", "Mark"]} + } + } +} +{ + "id" : 2, + "sid" : 0, + "record" : { "long" : 50, "int" : 20, "string" : "xyz", "bool" : true, "float" : 3}, + "info": + { + "firstName":"first1", "lastName":"last1","age":11, + "address": + { + "city" : "Boston", + "state" : "MA", + "phones" : [ { "areacode" : 304, "number" : 30, "kind" : "work" }, + { "areacode" : 560, "number" : 55, "kind" : "work" } ] + }, + "children": + { + "Anna" : { "age" : 9, "school" : "sch_1", "friends" : ["Bobby", "John"]}, + "Dave" : { "age" : 15, "school" : "sch_3", "friends" : ["Bill", "Sam"]} + } + } +} + +Table: Foo.Child + +{ + "id" : 1, + "id2" : 2, + "sid" : 0, + "str" : "child1", + "ts" : "2000-12-20T12:30:01.234", + "info": + { + "firstName":"Yash", "lastName":"Agarwal","age":10, + "address": + { + "city": "San Fransisco", + "state" : "CA", + "phones" : [ + { "areacode" : 408, "number" : 80, "kind" : "work" } + ] + }, + "children": + { + "John" : { "age" : 10, "school" : "sch_1", "friends" : ["Anna", "John", "Maria"]}, + "Lisa" : null, + "Mary" : 7 + } + } +} +{ + "id" : 2, + "id2" : 3, + "sid" : 0, + "str" : "child1", + "ts" : "2001-12-20T12:30:01.235", + "info": + { + "firstName":"Kunal", "lastName":"Gupta","age":30, + "address": + { + "city": "San Jose", + "state" : "CA", + "phones" : [ + { "areacode" : 408, "number" : 70, "kind" : "work" }, + { "areacode" : 415, "number" : 74, "kind" : "home" } + ] + }, + "children": + { + "Kathy" : { "age" : 10, "school" : "sch_3", "friends" : ["Anna", "Mark", "Maria"]}, + "Will" : { "age" : 2, "school" : "sch_1", "friends" : ["Ada"]} + } + } +} + +Table: limit + +{"sid": 0, "pid": 0, "age": 97} +{"sid": 0, "pid": 1, "age": 23} +{"sid": 0, "pid": 2, "age": 69} +{"sid": 0, "pid": 3, "age": 90} +{"sid": 0, "pid": 4, "age": 97} +{"sid": 0, "pid": 5, "age": 34} +{"sid": 0, "pid": 6, "age": 29} +{"sid": 0, "pid": 7, "age": 62} +{"sid": 0, "pid": 8, "age": 23} +{"sid": 0, "pid": 9, "age": 22} +{"sid": 0, "pid": 10, "age": 36} +{"sid": 0, "pid": 11, "age": 34} +{"sid": 0, "pid": 12, "age": 31} +{"sid": 0, "pid": 13, "age": 32} +{"sid": 0, "pid": 14, "age": 74} +{"sid": 0, "pid": 15, "age": 20} +{"sid": 0, "pid": 16, "age": 71} +{"sid": 0, "pid": 17, "age": 17} +{"sid": 0, "pid": 18, "age": 44} +{"sid": 0, "pid": 19, "age": 18} +{"sid": 0, "pid": 20, "age": 56} +{"sid": 0, "pid": 21, "age": 23} +{"sid": 0, "pid": 22, "age": 32} +{"sid": 0, "pid": 23, "age": 43} +{"sid": 0, "pid": 24, "age": 7} +{"sid": 0, "pid": 25, "age": 36} +{"sid": 0, "pid": 26, "age": 94} +{"sid": 0, "pid": 27, "age": 82} +{"sid": 0, "pid": 28, "age": 71} +{"sid": 0, "pid": 29, "age": 47} +{"sid": 0, "pid": 30, "age": 31} +{"sid": 0, "pid": 31, "age": 69} +{"sid": 0, "pid": 32, "age": 22} +{"sid": 0, "pid": 33, "age": 52} +{"sid": 0, "pid": 34, "age": 11} +{"sid": 0, "pid": 35, "age": 19} +{"sid": 0, "pid": 36, "age": 39} +{"sid": 0, "pid": 37, "age": 92} +{"sid": 0, "pid": 38, "age": 81} +{"sid": 0, "pid": 39, "age": 62} +{"sid": 0, "pid": 40, "age": 14} +{"sid": 0, "pid": 41, "age": 69} +{"sid": 0, "pid": 42, "age": 97} +{"sid": 0, "pid": 43, "age": 98} +{"sid": 0, "pid": 44, "age": 53} +{"sid": 0, "pid": 45, "age": 23} +{"sid": 0, "pid": 46, "age": 18} +{"sid": 0, "pid": 47, "age": 24} +{"sid": 0, "pid": 48, "age": 93} +{"sid": 0, "pid": 49, "age": 14} +{"sid": 0, "pid": 50, "age": 94} +{"sid": 0, "pid": 51, "age": 49} +{"sid": 0, "pid": 52, "age": 38} +{"sid": 0, "pid": 53, "age": 78} +{"sid": 0, "pid": 54, "age": 44} +{"sid": 0, "pid": 55, "age": 97} +{"sid": 0, "pid": 56, "age": 15} +{"sid": 0, "pid": 57, "age": 90} +{"sid": 0, "pid": 58, "age": 79} +{"sid": 0, "pid": 59, "age": 86} +{"sid": 0, "pid": 60, "age": 37} +{"sid": 0, "pid": 61, "age": 62} +{"sid": 0, "pid": 62, "age": 7} +{"sid": 0, "pid": 63, "age": 59} +{"sid": 0, "pid": 64, "age": 67} +{"sid": 0, "pid": 65, "age": 18} +{"sid": 0, "pid": 66, "age": 79} +{"sid": 0, "pid": 67, "age": 6} +{"sid": 0, "pid": 68, "age": 63} +{"sid": 0, "pid": 69, "age": 12} +{"sid": 0, "pid": 70, "age": 68} +{"sid": 0, "pid": 71, "age": 29} +{"sid": 0, "pid": 72, "age": 34} +{"sid": 0, "pid": 73, "age": 17} +{"sid": 0, "pid": 74, "age": 27} +{"sid": 0, "pid": 75, "age": 87} +{"sid": 0, "pid": 76, "age": 93} +{"sid": 0, "pid": 77, "age": 97} +{"sid": 0, "pid": 78, "age": 64} +{"sid": 0, "pid": 79, "age": 86} +{"sid": 0, "pid": 80, "age": 64} +{"sid": 0, "pid": 81, "age": 58} +{"sid": 0, "pid": 82, "age": 87} +{"sid": 0, "pid": 83, "age": 54} +{"sid": 0, "pid": 84, "age": 37} +{"sid": 0, "pid": 85, "age": 32} +{"sid": 0, "pid": 86, "age": 3} +{"sid": 0, "pid": 87, "age": 4} +{"sid": 0, "pid": 88, "age": 22} +{"sid": 0, "pid": 89, "age": 34} +{"sid": 0, "pid": 90, "age": 90} +{"sid": 0, "pid": 91, "age": 60} +{"sid": 0, "pid": 92, "age": 96} +{"sid": 0, "pid": 93, "age": 50} +{"sid": 0, "pid": 94, "age": 71} +{"sid": 0, "pid": 95, "age": 15} +{"sid": 0, "pid": 96, "age": 20} +{"sid": 0, "pid": 97, "age": 2} +{"sid": 0, "pid": 98, "age": 21} +{"sid": 0, "pid": 99, "age": 83} +{"sid": 0, "pid": 100, "age": 15} +{"sid": 0, "pid": 101, "age": 42} +{"sid": 0, "pid": 102, "age": 65} +{"sid": 0, "pid": 103, "age": 1} +{"sid": 0, "pid": 104, "age": 59} +{"sid": 0, "pid": 105, "age": 44} +{"sid": 0, "pid": 106, "age": 88} +{"sid": 0, "pid": 107, "age": 52} +{"sid": 0, "pid": 108, "age": 42} +{"sid": 0, "pid": 109, "age": 52} +{"sid": 0, "pid": 110, "age": 90} +{"sid": 0, "pid": 111, "age": 58} +{"sid": 0, "pid": 112, "age": 63} +{"sid": 0, "pid": 113, "age": 78} +{"sid": 0, "pid": 114, "age": 12} +{"sid": 0, "pid": 115, "age": 0} +{"sid": 0, "pid": 116, "age": 62} +{"sid": 0, "pid": 117, "age": 15} +{"sid": 0, "pid": 118, "age": 4} +{"sid": 0, "pid": 119, "age": 84} +{"sid": 0, "pid": 120, "age": 49} +{"sid": 0, "pid": 121, "age": 46} +{"sid": 0, "pid": 122, "age": 96} +{"sid": 0, "pid": 123, "age": 97} +{"sid": 0, "pid": 124, "age": 96} +{"sid": 0, "pid": 125, "age": 20} +{"sid": 0, "pid": 126, "age": 13} +{"sid": 0, "pid": 127, "age": 69} +{"sid": 0, "pid": 128, "age": 22} +{"sid": 0, "pid": 129, "age": 34} +{"sid": 0, "pid": 130, "age": 4} +{"sid": 0, "pid": 131, "age": 37} +{"sid": 0, "pid": 132, "age": 76} +{"sid": 0, "pid": 133, "age": 69} +{"sid": 0, "pid": 134, "age": 90} +{"sid": 0, "pid": 135, "age": 88} +{"sid": 0, "pid": 136, "age": 14} +{"sid": 0, "pid": 137, "age": 79} +{"sid": 0, "pid": 138, "age": 40} +{"sid": 0, "pid": 139, "age": 8} +{"sid": 0, "pid": 140, "age": 83} +{"sid": 0, "pid": 141, "age": 31} +{"sid": 0, "pid": 142, "age": 66} +{"sid": 0, "pid": 143, "age": 46} +{"sid": 0, "pid": 144, "age": 61} +{"sid": 0, "pid": 145, "age": 78} +{"sid": 0, "pid": 146, "age": 46} +{"sid": 0, "pid": 147, "age": 23} +{"sid": 0, "pid": 148, "age": 45} +{"sid": 0, "pid": 149, "age": 2} +{"sid": 0, "pid": 150, "age": 59} +{"sid": 0, "pid": 151, "age": 94} +{"sid": 0, "pid": 152, "age": 1} +{"sid": 0, "pid": 153, "age": 56} +{"sid": 0, "pid": 154, "age": 43} +{"sid": 0, "pid": 155, "age": 49} +{"sid": 0, "pid": 156, "age": 76} +{"sid": 0, "pid": 157, "age": 56} +{"sid": 0, "pid": 158, "age": 18} +{"sid": 0, "pid": 159, "age": 50} +{"sid": 0, "pid": 160, "age": 91} +{"sid": 0, "pid": 161, "age": 23} +{"sid": 0, "pid": 162, "age": 40} +{"sid": 0, "pid": 163, "age": 19} +{"sid": 0, "pid": 164, "age": 44} +{"sid": 0, "pid": 165, "age": 30} +{"sid": 0, "pid": 166, "age": 7} +{"sid": 0, "pid": 167, "age": 58} +{"sid": 0, "pid": 168, "age": 9} +{"sid": 0, "pid": 169, "age": 0} +{"sid": 0, "pid": 170, "age": 18} +{"sid": 0, "pid": 171, "age": 93} +{"sid": 0, "pid": 172, "age": 83} +{"sid": 0, "pid": 173, "age": 84} +{"sid": 0, "pid": 174, "age": 91} +{"sid": 0, "pid": 175, "age": 96} +{"sid": 0, "pid": 176, "age": 14} +{"sid": 0, "pid": 177, "age": 38} +{"sid": 0, "pid": 178, "age": 19} +{"sid": 0, "pid": 179, "age": 59} +{"sid": 0, "pid": 180, "age": 92} +{"sid": 0, "pid": 181, "age": 78} +{"sid": 0, "pid": 182, "age": 5} +{"sid": 0, "pid": 183, "age": 93} +{"sid": 0, "pid": 184, "age": 86} +{"sid": 0, "pid": 185, "age": 49} +{"sid": 0, "pid": 186, "age": 43} +{"sid": 0, "pid": 187, "age": 14} +{"sid": 0, "pid": 188, "age": 57} +{"sid": 0, "pid": 189, "age": 13} +{"sid": 0, "pid": 190, "age": 65} +{"sid": 0, "pid": 191, "age": 0} +{"sid": 0, "pid": 192, "age": 36} +{"sid": 0, "pid": 193, "age": 5} +{"sid": 0, "pid": 194, "age": 20} +{"sid": 0, "pid": 195, "age": 81} +{"sid": 0, "pid": 196, "age": 35} +{"sid": 0, "pid": 197, "age": 79} +{"sid": 0, "pid": 198, "age": 91} +{"sid": 0, "pid": 199, "age": 97} +{"sid": 0, "pid": 200, "age": 31} +{"sid": 0, "pid": 201, "age": 10} +{"sid": 0, "pid": 202, "age": 42} +{"sid": 0, "pid": 203, "age": 14} +{"sid": 0, "pid": 204, "age": 46} +{"sid": 0, "pid": 205, "age": 33} +{"sid": 0, "pid": 206, "age": 10} +{"sid": 0, "pid": 207, "age": 61} +{"sid": 0, "pid": 208, "age": 23} +{"sid": 0, "pid": 209, "age": 81} +{"sid": 0, "pid": 210, "age": 72} +{"sid": 0, "pid": 211, "age": 16} +{"sid": 0, "pid": 212, "age": 12} +{"sid": 0, "pid": 213, "age": 30} +{"sid": 0, "pid": 214, "age": 61} +{"sid": 0, "pid": 215, "age": 98} +{"sid": 0, "pid": 216, "age": 31} +{"sid": 0, "pid": 217, "age": 4} +{"sid": 0, "pid": 218, "age": 13} +{"sid": 0, "pid": 219, "age": 88} +{"sid": 0, "pid": 220, "age": 70} +{"sid": 0, "pid": 221, "age": 30} +{"sid": 0, "pid": 222, "age": 89} +{"sid": 0, "pid": 223, "age": 6} +{"sid": 0, "pid": 224, "age": 87} +{"sid": 0, "pid": 225, "age": 9} +{"sid": 0, "pid": 226, "age": 39} +{"sid": 0, "pid": 227, "age": 22} +{"sid": 0, "pid": 228, "age": 40} +{"sid": 0, "pid": 229, "age": 83} +{"sid": 0, "pid": 230, "age": 71} +{"sid": 0, "pid": 231, "age": 72} +{"sid": 0, "pid": 232, "age": 93} +{"sid": 0, "pid": 233, "age": 13} +{"sid": 0, "pid": 234, "age": 38} +{"sid": 0, "pid": 235, "age": 91} +{"sid": 0, "pid": 236, "age": 47} +{"sid": 0, "pid": 237, "age": 1} +{"sid": 0, "pid": 238, "age": 4} +{"sid": 0, "pid": 239, "age": 70} +{"sid": 0, "pid": 240, "age": 82} +{"sid": 0, "pid": 241, "age": 77} +{"sid": 0, "pid": 242, "age": 86} +{"sid": 0, "pid": 243, "age": 94} +{"sid": 0, "pid": 244, "age": 59} +{"sid": 0, "pid": 245, "age": 48} +{"sid": 0, "pid": 246, "age": 45} +{"sid": 0, "pid": 247, "age": 90} +{"sid": 0, "pid": 248, "age": 4} +{"sid": 0, "pid": 249, "age": 10} +{"sid": 0, "pid": 250, "age": 78} +{"sid": 0, "pid": 251, "age": 74} +{"sid": 0, "pid": 252, "age": 40} +{"sid": 0, "pid": 253, "age": 67} +{"sid": 0, "pid": 254, "age": 33} +{"sid": 0, "pid": 255, "age": 27} +{"sid": 0, "pid": 256, "age": 28} +{"sid": 0, "pid": 257, "age": 24} +{"sid": 0, "pid": 258, "age": 1} +{"sid": 0, "pid": 259, "age": 69} +{"sid": 0, "pid": 260, "age": 7} +{"sid": 0, "pid": 261, "age": 73} +{"sid": 0, "pid": 262, "age": 41} +{"sid": 0, "pid": 263, "age": 52} +{"sid": 0, "pid": 264, "age": 86} +{"sid": 0, "pid": 265, "age": 31} +{"sid": 0, "pid": 266, "age": 44} +{"sid": 0, "pid": 267, "age": 85} +{"sid": 0, "pid": 268, "age": 32} +{"sid": 0, "pid": 269, "age": 48} +{"sid": 0, "pid": 270, "age": 56} +{"sid": 0, "pid": 271, "age": 67} +{"sid": 0, "pid": 272, "age": 77} +{"sid": 0, "pid": 273, "age": 94} +{"sid": 0, "pid": 274, "age": 61} +{"sid": 0, "pid": 275, "age": 36} +{"sid": 0, "pid": 276, "age": 94} +{"sid": 0, "pid": 277, "age": 58} +{"sid": 0, "pid": 278, "age": 78} +{"sid": 0, "pid": 279, "age": 99} +{"sid": 0, "pid": 280, "age": 68} +{"sid": 0, "pid": 281, "age": 57} +{"sid": 0, "pid": 282, "age": 25} +{"sid": 0, "pid": 283, "age": 60} +{"sid": 0, "pid": 284, "age": 24} +{"sid": 0, "pid": 285, "age": 58} +{"sid": 0, "pid": 286, "age": 39} +{"sid": 0, "pid": 287, "age": 53} +{"sid": 0, "pid": 288, "age": 35} +{"sid": 0, "pid": 289, "age": 41} +{"sid": 0, "pid": 290, "age": 74} +{"sid": 0, "pid": 291, "age": 42} +{"sid": 0, "pid": 292, "age": 66} +{"sid": 0, "pid": 293, "age": 15} +{"sid": 0, "pid": 294, "age": 47} +{"sid": 0, "pid": 295, "age": 4} +{"sid": 0, "pid": 296, "age": 98} +{"sid": 0, "pid": 297, "age": 43} +{"sid": 0, "pid": 298, "age": 90} +{"sid": 0, "pid": 299, "age": 31} +{"sid": 0, "pid": 300, "age": 43} +{"sid": 0, "pid": 301, "age": 46} +{"sid": 0, "pid": 302, "age": 50} +{"sid": 0, "pid": 303, "age": 21} +{"sid": 0, "pid": 304, "age": 40} +{"sid": 0, "pid": 305, "age": 11} +{"sid": 0, "pid": 306, "age": 9} +{"sid": 0, "pid": 307, "age": 35} +{"sid": 0, "pid": 308, "age": 22} +{"sid": 0, "pid": 309, "age": 88} +{"sid": 0, "pid": 310, "age": 86} +{"sid": 0, "pid": 311, "age": 90} +{"sid": 0, "pid": 312, "age": 45} +{"sid": 0, "pid": 313, "age": 11} +{"sid": 0, "pid": 314, "age": 3} +{"sid": 0, "pid": 315, "age": 21} +{"sid": 0, "pid": 316, "age": 22} +{"sid": 0, "pid": 317, "age": 42} +{"sid": 0, "pid": 318, "age": 26} +{"sid": 0, "pid": 319, "age": 9} +{"sid": 0, "pid": 320, "age": 83} +{"sid": 0, "pid": 321, "age": 0} +{"sid": 0, "pid": 322, "age": 3} +{"sid": 0, "pid": 323, "age": 1} +{"sid": 0, "pid": 324, "age": 67} +{"sid": 0, "pid": 325, "age": 50} +{"sid": 0, "pid": 326, "age": 6} +{"sid": 0, "pid": 327, "age": 66} +{"sid": 0, "pid": 328, "age": 93} +{"sid": 0, "pid": 329, "age": 96} +{"sid": 0, "pid": 330, "age": 49} +{"sid": 0, "pid": 331, "age": 37} +{"sid": 0, "pid": 332, "age": 94} +{"sid": 0, "pid": 333, "age": 99} +{"sid": 0, "pid": 334, "age": 10} +{"sid": 0, "pid": 335, "age": 34} +{"sid": 0, "pid": 336, "age": 62} +{"sid": 0, "pid": 337, "age": 71} +{"sid": 0, "pid": 338, "age": 21} +{"sid": 0, "pid": 339, "age": 84} +{"sid": 0, "pid": 340, "age": 59} +{"sid": 0, "pid": 341, "age": 59} +{"sid": 0, "pid": 342, "age": 27} +{"sid": 0, "pid": 343, "age": 56} +{"sid": 0, "pid": 344, "age": 23} +{"sid": 0, "pid": 345, "age": 30} +{"sid": 0, "pid": 346, "age": 30} +{"sid": 0, "pid": 347, "age": 97} +{"sid": 0, "pid": 348, "age": 72} +{"sid": 0, "pid": 349, "age": 56} +{"sid": 0, "pid": 350, "age": 6} +{"sid": 0, "pid": 351, "age": 8} +{"sid": 0, "pid": 352, "age": 57} +{"sid": 0, "pid": 353, "age": 9} +{"sid": 0, "pid": 354, "age": 9} +{"sid": 0, "pid": 355, "age": 76} +{"sid": 0, "pid": 356, "age": 60} +{"sid": 0, "pid": 357, "age": 15} +{"sid": 0, "pid": 358, "age": 42} +{"sid": 0, "pid": 359, "age": 5} +{"sid": 0, "pid": 360, "age": 63} +{"sid": 0, "pid": 361, "age": 91} +{"sid": 0, "pid": 362, "age": 42} +{"sid": 0, "pid": 363, "age": 57} +{"sid": 0, "pid": 364, "age": 42} +{"sid": 0, "pid": 365, "age": 4} +{"sid": 0, "pid": 366, "age": 44} +{"sid": 0, "pid": 367, "age": 5} +{"sid": 0, "pid": 368, "age": 76} +{"sid": 0, "pid": 369, "age": 65} +{"sid": 0, "pid": 370, "age": 89} +{"sid": 0, "pid": 371, "age": 35} +{"sid": 0, "pid": 372, "age": 77} +{"sid": 0, "pid": 373, "age": 68} +{"sid": 0, "pid": 374, "age": 44} +{"sid": 0, "pid": 375, "age": 0} +{"sid": 0, "pid": 376, "age": 98} +{"sid": 0, "pid": 377, "age": 26} +{"sid": 0, "pid": 378, "age": 97} +{"sid": 0, "pid": 379, "age": 71} +{"sid": 0, "pid": 380, "age": 82} +{"sid": 0, "pid": 381, "age": 3} +{"sid": 0, "pid": 382, "age": 79} +{"sid": 0, "pid": 383, "age": 91} +{"sid": 0, "pid": 384, "age": 64} +{"sid": 0, "pid": 385, "age": 40} +{"sid": 0, "pid": 386, "age": 68} +{"sid": 0, "pid": 387, "age": 76} +{"sid": 0, "pid": 388, "age": 8} +{"sid": 0, "pid": 389, "age": 62} +{"sid": 0, "pid": 390, "age": 82} +{"sid": 0, "pid": 391, "age": 71} +{"sid": 0, "pid": 392, "age": 6} +{"sid": 0, "pid": 393, "age": 76} +{"sid": 0, "pid": 394, "age": 29} +{"sid": 0, "pid": 395, "age": 48} +{"sid": 0, "pid": 396, "age": 81} +{"sid": 0, "pid": 397, "age": 73} +{"sid": 0, "pid": 398, "age": 5} +{"sid": 0, "pid": 399, "age": 57} +{"sid": 0, "pid": 400, "age": 90} +{"sid": 0, "pid": 401, "age": 47} +{"sid": 0, "pid": 402, "age": 44} +{"sid": 0, "pid": 403, "age": 67} +{"sid": 0, "pid": 404, "age": 15} +{"sid": 0, "pid": 405, "age": 88} +{"sid": 0, "pid": 406, "age": 67} +{"sid": 0, "pid": 407, "age": 14} +{"sid": 0, "pid": 408, "age": 14} +{"sid": 0, "pid": 409, "age": 64} +{"sid": 0, "pid": 410, "age": 37} +{"sid": 0, "pid": 411, "age": 49} +{"sid": 0, "pid": 412, "age": 19} +{"sid": 0, "pid": 413, "age": 68} +{"sid": 0, "pid": 414, "age": 40} +{"sid": 0, "pid": 415, "age": 36} +{"sid": 0, "pid": 416, "age": 8} +{"sid": 0, "pid": 417, "age": 60} +{"sid": 0, "pid": 418, "age": 12} +{"sid": 0, "pid": 419, "age": 16} +{"sid": 0, "pid": 420, "age": 23} +{"sid": 0, "pid": 421, "age": 94} +{"sid": 0, "pid": 422, "age": 88} +{"sid": 0, "pid": 423, "age": 81} +{"sid": 0, "pid": 424, "age": 71} +{"sid": 0, "pid": 425, "age": 69} +{"sid": 0, "pid": 426, "age": 29} +{"sid": 0, "pid": 427, "age": 52} +{"sid": 0, "pid": 428, "age": 42} +{"sid": 0, "pid": 429, "age": 35} +{"sid": 0, "pid": 430, "age": 61} +{"sid": 0, "pid": 431, "age": 32} +{"sid": 0, "pid": 432, "age": 82} +{"sid": 0, "pid": 433, "age": 5} +{"sid": 0, "pid": 434, "age": 52} +{"sid": 0, "pid": 435, "age": 97} +{"sid": 0, "pid": 436, "age": 46} +{"sid": 0, "pid": 437, "age": 19} +{"sid": 0, "pid": 438, "age": 63} +{"sid": 0, "pid": 439, "age": 12} +{"sid": 0, "pid": 440, "age": 36} +{"sid": 0, "pid": 441, "age": 0} +{"sid": 0, "pid": 442, "age": 61} +{"sid": 0, "pid": 443, "age": 7} +{"sid": 0, "pid": 444, "age": 20} +{"sid": 0, "pid": 445, "age": 54} +{"sid": 0, "pid": 446, "age": 43} +{"sid": 0, "pid": 447, "age": 29} +{"sid": 0, "pid": 448, "age": 66} +{"sid": 0, "pid": 449, "age": 56} +{"sid": 0, "pid": 450, "age": 97} +{"sid": 0, "pid": 451, "age": 41} +{"sid": 0, "pid": 452, "age": 2} +{"sid": 0, "pid": 453, "age": 37} +{"sid": 0, "pid": 454, "age": 22} +{"sid": 0, "pid": 455, "age": 73} +{"sid": 0, "pid": 456, "age": 6} +{"sid": 0, "pid": 457, "age": 52} +{"sid": 0, "pid": 458, "age": 77} +{"sid": 0, "pid": 459, "age": 0} +{"sid": 0, "pid": 460, "age": 39} +{"sid": 0, "pid": 461, "age": 38} +{"sid": 0, "pid": 462, "age": 85} +{"sid": 0, "pid": 463, "age": 73} +{"sid": 0, "pid": 464, "age": 96} +{"sid": 0, "pid": 465, "age": 37} +{"sid": 0, "pid": 466, "age": 70} +{"sid": 0, "pid": 467, "age": 42} +{"sid": 0, "pid": 468, "age": 8} +{"sid": 0, "pid": 469, "age": 86} +{"sid": 0, "pid": 470, "age": 54} +{"sid": 0, "pid": 471, "age": 44} +{"sid": 0, "pid": 472, "age": 38} +{"sid": 0, "pid": 473, "age": 68} +{"sid": 0, "pid": 474, "age": 4} +{"sid": 0, "pid": 475, "age": 59} +{"sid": 0, "pid": 476, "age": 74} +{"sid": 0, "pid": 477, "age": 47} +{"sid": 0, "pid": 478, "age": 40} +{"sid": 0, "pid": 479, "age": 40} +{"sid": 0, "pid": 480, "age": 55} +{"sid": 0, "pid": 481, "age": 37} +{"sid": 0, "pid": 482, "age": 82} +{"sid": 0, "pid": 483, "age": 58} +{"sid": 0, "pid": 484, "age": 27} +{"sid": 0, "pid": 485, "age": 4} +{"sid": 0, "pid": 486, "age": 83} +{"sid": 0, "pid": 487, "age": 33} +{"sid": 0, "pid": 488, "age": 8} +{"sid": 0, "pid": 489, "age": 13} +{"sid": 0, "pid": 490, "age": 86} +{"sid": 0, "pid": 491, "age": 47} +{"sid": 0, "pid": 492, "age": 3} +{"sid": 0, "pid": 493, "age": 71} +{"sid": 0, "pid": 494, "age": 72} +{"sid": 0, "pid": 495, "age": 99} +{"sid": 0, "pid": 496, "age": 8} +{"sid": 0, "pid": 497, "age": 43} +{"sid": 0, "pid": 498, "age": 41} +{"sid": 0, "pid": 499, "age": 16} +{"sid": 0, "pid": 500, "age": 81} +{"sid": 0, "pid": 501, "age": 48} +{"sid": 0, "pid": 502, "age": 13} +{"sid": 0, "pid": 503, "age": 19} +{"sid": 0, "pid": 504, "age": 68} +{"sid": 0, "pid": 505, "age": 17} +{"sid": 0, "pid": 506, "age": 78} +{"sid": 0, "pid": 507, "age": 42} +{"sid": 0, "pid": 508, "age": 16} +{"sid": 0, "pid": 509, "age": 18} +{"sid": 0, "pid": 510, "age": 82} +{"sid": 0, "pid": 511, "age": 24} +{"sid": 0, "pid": 512, "age": 8} +{"sid": 0, "pid": 513, "age": 16} +{"sid": 0, "pid": 514, "age": 82} +{"sid": 0, "pid": 515, "age": 35} +{"sid": 0, "pid": 516, "age": 21} +{"sid": 0, "pid": 517, "age": 17} +{"sid": 0, "pid": 518, "age": 20} +{"sid": 0, "pid": 519, "age": 81} +{"sid": 0, "pid": 520, "age": 30} +{"sid": 0, "pid": 521, "age": 6} +{"sid": 0, "pid": 522, "age": 81} +{"sid": 0, "pid": 523, "age": 34} +{"sid": 0, "pid": 524, "age": 77} +{"sid": 0, "pid": 525, "age": 53} +{"sid": 0, "pid": 526, "age": 85} +{"sid": 0, "pid": 527, "age": 37} +{"sid": 0, "pid": 528, "age": 48} +{"sid": 0, "pid": 529, "age": 79} +{"sid": 0, "pid": 530, "age": 6} +{"sid": 0, "pid": 531, "age": 29} +{"sid": 0, "pid": 532, "age": 27} +{"sid": 0, "pid": 533, "age": 71} +{"sid": 0, "pid": 534, "age": 1} +{"sid": 0, "pid": 535, "age": 95} +{"sid": 0, "pid": 536, "age": 40} +{"sid": 0, "pid": 537, "age": 79} +{"sid": 0, "pid": 538, "age": 37} +{"sid": 0, "pid": 539, "age": 56} +{"sid": 0, "pid": 540, "age": 50} +{"sid": 0, "pid": 541, "age": 71} +{"sid": 0, "pid": 542, "age": 80} +{"sid": 0, "pid": 543, "age": 58} +{"sid": 0, "pid": 544, "age": 88} +{"sid": 0, "pid": 545, "age": 14} +{"sid": 0, "pid": 546, "age": 93} +{"sid": 0, "pid": 547, "age": 61} +{"sid": 0, "pid": 548, "age": 84} +{"sid": 0, "pid": 549, "age": 13} +{"sid": 0, "pid": 550, "age": 94} +{"sid": 0, "pid": 551, "age": 66} +{"sid": 0, "pid": 552, "age": 72} +{"sid": 0, "pid": 553, "age": 75} +{"sid": 0, "pid": 554, "age": 52} +{"sid": 0, "pid": 555, "age": 49} +{"sid": 0, "pid": 556, "age": 81} +{"sid": 0, "pid": 557, "age": 38} +{"sid": 0, "pid": 558, "age": 39} +{"sid": 0, "pid": 559, "age": 29} +{"sid": 0, "pid": 560, "age": 17} +{"sid": 0, "pid": 561, "age": 45} +{"sid": 0, "pid": 562, "age": 11} +{"sid": 0, "pid": 563, "age": 44} +{"sid": 0, "pid": 564, "age": 68} +{"sid": 0, "pid": 565, "age": 12} +{"sid": 0, "pid": 566, "age": 91} +{"sid": 0, "pid": 567, "age": 8} +{"sid": 0, "pid": 568, "age": 43} +{"sid": 0, "pid": 569, "age": 80} +{"sid": 0, "pid": 570, "age": 16} +{"sid": 0, "pid": 571, "age": 93} +{"sid": 0, "pid": 572, "age": 3} +{"sid": 0, "pid": 573, "age": 97} +{"sid": 0, "pid": 574, "age": 3} +{"sid": 0, "pid": 575, "age": 91} +{"sid": 0, "pid": 576, "age": 63} +{"sid": 0, "pid": 577, "age": 48} +{"sid": 0, "pid": 578, "age": 4} +{"sid": 0, "pid": 579, "age": 47} +{"sid": 0, "pid": 580, "age": 14} +{"sid": 0, "pid": 581, "age": 99} +{"sid": 0, "pid": 582, "age": 14} +{"sid": 0, "pid": 583, "age": 86} +{"sid": 0, "pid": 584, "age": 26} +{"sid": 0, "pid": 585, "age": 66} +{"sid": 0, "pid": 586, "age": 35} +{"sid": 0, "pid": 587, "age": 7} +{"sid": 0, "pid": 588, "age": 56} +{"sid": 0, "pid": 589, "age": 26} +{"sid": 0, "pid": 590, "age": 89} +{"sid": 0, "pid": 591, "age": 73} +{"sid": 0, "pid": 592, "age": 23} +{"sid": 0, "pid": 593, "age": 52} +{"sid": 0, "pid": 594, "age": 69} +{"sid": 0, "pid": 595, "age": 91} +{"sid": 0, "pid": 596, "age": 16} +{"sid": 0, "pid": 597, "age": 60} +{"sid": 0, "pid": 598, "age": 99} +{"sid": 0, "pid": 599, "age": 59} +{"sid": 0, "pid": 600, "age": 92} +{"sid": 0, "pid": 601, "age": 16} +{"sid": 0, "pid": 602, "age": 5} +{"sid": 0, "pid": 603, "age": 96} +{"sid": 0, "pid": 604, "age": 65} +{"sid": 0, "pid": 605, "age": 60} +{"sid": 0, "pid": 606, "age": 39} +{"sid": 0, "pid": 607, "age": 80} +{"sid": 0, "pid": 608, "age": 9} +{"sid": 0, "pid": 609, "age": 44} +{"sid": 0, "pid": 610, "age": 80} +{"sid": 0, "pid": 611, "age": 23} +{"sid": 0, "pid": 612, "age": 95} +{"sid": 0, "pid": 613, "age": 94} +{"sid": 0, "pid": 614, "age": 9} +{"sid": 0, "pid": 615, "age": 73} +{"sid": 0, "pid": 616, "age": 12} +{"sid": 0, "pid": 617, "age": 96} +{"sid": 0, "pid": 618, "age": 81} +{"sid": 0, "pid": 619, "age": 69} +{"sid": 0, "pid": 620, "age": 75} +{"sid": 0, "pid": 621, "age": 22} +{"sid": 0, "pid": 622, "age": 94} +{"sid": 0, "pid": 623, "age": 98} +{"sid": 0, "pid": 624, "age": 74} +{"sid": 0, "pid": 625, "age": 64} +{"sid": 0, "pid": 626, "age": 42} +{"sid": 0, "pid": 627, "age": 42} +{"sid": 0, "pid": 628, "age": 76} +{"sid": 0, "pid": 629, "age": 41} +{"sid": 0, "pid": 630, "age": 53} +{"sid": 0, "pid": 631, "age": 21} +{"sid": 0, "pid": 632, "age": 9} +{"sid": 0, "pid": 633, "age": 58} +{"sid": 0, "pid": 634, "age": 17} +{"sid": 0, "pid": 635, "age": 26} +{"sid": 0, "pid": 636, "age": 71} +{"sid": 0, "pid": 637, "age": 56} +{"sid": 0, "pid": 638, "age": 7} +{"sid": 0, "pid": 639, "age": 80} +{"sid": 0, "pid": 640, "age": 52} +{"sid": 0, "pid": 641, "age": 87} +{"sid": 0, "pid": 642, "age": 55} +{"sid": 0, "pid": 643, "age": 99} +{"sid": 0, "pid": 644, "age": 33} +{"sid": 0, "pid": 645, "age": 16} +{"sid": 0, "pid": 646, "age": 73} +{"sid": 0, "pid": 647, "age": 97} +{"sid": 0, "pid": 648, "age": 12} +{"sid": 0, "pid": 649, "age": 6} +{"sid": 0, "pid": 650, "age": 18} +{"sid": 0, "pid": 651, "age": 39} +{"sid": 0, "pid": 652, "age": 28} +{"sid": 0, "pid": 653, "age": 65} +{"sid": 0, "pid": 654, "age": 38} +{"sid": 0, "pid": 655, "age": 54} +{"sid": 0, "pid": 656, "age": 29} +{"sid": 0, "pid": 657, "age": 80} +{"sid": 0, "pid": 658, "age": 96} +{"sid": 0, "pid": 659, "age": 57} +{"sid": 0, "pid": 660, "age": 73} +{"sid": 0, "pid": 661, "age": 49} +{"sid": 0, "pid": 662, "age": 78} +{"sid": 0, "pid": 663, "age": 35} +{"sid": 0, "pid": 664, "age": 60} +{"sid": 0, "pid": 665, "age": 47} +{"sid": 0, "pid": 666, "age": 61} +{"sid": 0, "pid": 667, "age": 31} +{"sid": 0, "pid": 668, "age": 56} +{"sid": 0, "pid": 669, "age": 20} +{"sid": 0, "pid": 670, "age": 63} +{"sid": 0, "pid": 671, "age": 60} +{"sid": 0, "pid": 672, "age": 59} +{"sid": 0, "pid": 673, "age": 18} +{"sid": 0, "pid": 674, "age": 60} +{"sid": 0, "pid": 675, "age": 44} +{"sid": 0, "pid": 676, "age": 34} +{"sid": 0, "pid": 677, "age": 33} +{"sid": 0, "pid": 678, "age": 94} +{"sid": 0, "pid": 679, "age": 98} +{"sid": 0, "pid": 680, "age": 39} +{"sid": 0, "pid": 681, "age": 12} +{"sid": 0, "pid": 682, "age": 38} +{"sid": 0, "pid": 683, "age": 19} +{"sid": 0, "pid": 684, "age": 77} +{"sid": 0, "pid": 685, "age": 28} +{"sid": 0, "pid": 686, "age": 25} +{"sid": 0, "pid": 687, "age": 58} +{"sid": 0, "pid": 688, "age": 60} +{"sid": 0, "pid": 689, "age": 73} +{"sid": 0, "pid": 690, "age": 16} +{"sid": 0, "pid": 691, "age": 33} +{"sid": 0, "pid": 692, "age": 74} +{"sid": 0, "pid": 693, "age": 46} +{"sid": 0, "pid": 694, "age": 68} +{"sid": 0, "pid": 695, "age": 34} +{"sid": 0, "pid": 696, "age": 94} +{"sid": 0, "pid": 697, "age": 82} +{"sid": 0, "pid": 698, "age": 17} +{"sid": 0, "pid": 699, "age": 2} +{"sid": 0, "pid": 700, "age": 2} +{"sid": 0, "pid": 701, "age": 32} +{"sid": 0, "pid": 702, "age": 62} +{"sid": 0, "pid": 703, "age": 14} +{"sid": 0, "pid": 704, "age": 50} +{"sid": 0, "pid": 705, "age": 22} +{"sid": 0, "pid": 706, "age": 58} +{"sid": 0, "pid": 707, "age": 36} +{"sid": 0, "pid": 708, "age": 7} +{"sid": 0, "pid": 709, "age": 52} +{"sid": 0, "pid": 710, "age": 87} +{"sid": 0, "pid": 711, "age": 98} +{"sid": 0, "pid": 712, "age": 17} +{"sid": 0, "pid": 713, "age": 77} +{"sid": 0, "pid": 714, "age": 17} +{"sid": 0, "pid": 715, "age": 46} +{"sid": 0, "pid": 716, "age": 5} +{"sid": 0, "pid": 717, "age": 94} +{"sid": 0, "pid": 718, "age": 5} +{"sid": 0, "pid": 719, "age": 65} +{"sid": 0, "pid": 720, "age": 67} +{"sid": 0, "pid": 721, "age": 73} +{"sid": 0, "pid": 722, "age": 50} +{"sid": 0, "pid": 723, "age": 42} +{"sid": 0, "pid": 724, "age": 71} +{"sid": 0, "pid": 725, "age": 71} +{"sid": 0, "pid": 726, "age": 28} +{"sid": 0, "pid": 727, "age": 65} +{"sid": 0, "pid": 728, "age": 53} +{"sid": 0, "pid": 729, "age": 98} +{"sid": 0, "pid": 730, "age": 67} +{"sid": 0, "pid": 731, "age": 7} +{"sid": 0, "pid": 732, "age": 30} +{"sid": 0, "pid": 733, "age": 30} +{"sid": 0, "pid": 734, "age": 21} +{"sid": 0, "pid": 735, "age": 33} +{"sid": 0, "pid": 736, "age": 4} +{"sid": 0, "pid": 737, "age": 32} +{"sid": 0, "pid": 738, "age": 21} +{"sid": 0, "pid": 739, "age": 12} +{"sid": 0, "pid": 740, "age": 36} +{"sid": 0, "pid": 741, "age": 8} +{"sid": 0, "pid": 742, "age": 10} +{"sid": 0, "pid": 743, "age": 5} +{"sid": 0, "pid": 744, "age": 37} +{"sid": 0, "pid": 745, "age": 80} +{"sid": 0, "pid": 746, "age": 52} +{"sid": 0, "pid": 747, "age": 42} +{"sid": 0, "pid": 748, "age": 26} +{"sid": 0, "pid": 749, "age": 57} +{"sid": 0, "pid": 750, "age": 59} +{"sid": 0, "pid": 751, "age": 94} +{"sid": 0, "pid": 752, "age": 82} +{"sid": 0, "pid": 753, "age": 62} +{"sid": 0, "pid": 754, "age": 88} +{"sid": 0, "pid": 755, "age": 53} +{"sid": 0, "pid": 756, "age": 33} +{"sid": 0, "pid": 757, "age": 16} +{"sid": 0, "pid": 758, "age": 71} +{"sid": 0, "pid": 759, "age": 38} +{"sid": 0, "pid": 760, "age": 14} +{"sid": 0, "pid": 761, "age": 90} +{"sid": 0, "pid": 762, "age": 45} +{"sid": 0, "pid": 763, "age": 97} +{"sid": 0, "pid": 764, "age": 72} +{"sid": 0, "pid": 765, "age": 19} +{"sid": 0, "pid": 766, "age": 30} +{"sid": 0, "pid": 767, "age": 77} +{"sid": 0, "pid": 768, "age": 3} +{"sid": 0, "pid": 769, "age": 51} +{"sid": 0, "pid": 770, "age": 41} +{"sid": 0, "pid": 771, "age": 39} +{"sid": 0, "pid": 772, "age": 12} +{"sid": 0, "pid": 773, "age": 3} +{"sid": 0, "pid": 774, "age": 45} +{"sid": 0, "pid": 775, "age": 49} +{"sid": 0, "pid": 776, "age": 83} +{"sid": 0, "pid": 777, "age": 49} +{"sid": 0, "pid": 778, "age": 44} +{"sid": 0, "pid": 779, "age": 10} +{"sid": 0, "pid": 780, "age": 58} +{"sid": 0, "pid": 781, "age": 55} +{"sid": 0, "pid": 782, "age": 56} +{"sid": 0, "pid": 783, "age": 40} +{"sid": 0, "pid": 784, "age": 17} +{"sid": 0, "pid": 785, "age": 44} +{"sid": 0, "pid": 786, "age": 45} +{"sid": 0, "pid": 787, "age": 50} +{"sid": 0, "pid": 788, "age": 12} +{"sid": 0, "pid": 789, "age": 16} +{"sid": 0, "pid": 790, "age": 88} +{"sid": 0, "pid": 791, "age": 79} +{"sid": 0, "pid": 792, "age": 59} +{"sid": 0, "pid": 793, "age": 86} +{"sid": 0, "pid": 794, "age": 76} +{"sid": 0, "pid": 795, "age": 31} +{"sid": 0, "pid": 796, "age": 57} +{"sid": 0, "pid": 797, "age": 58} +{"sid": 0, "pid": 798, "age": 60} +{"sid": 0, "pid": 799, "age": 60} +{"sid": 0, "pid": 800, "age": 9} +{"sid": 0, "pid": 801, "age": 1} +{"sid": 0, "pid": 802, "age": 51} +{"sid": 0, "pid": 803, "age": 73} +{"sid": 0, "pid": 804, "age": 5} +{"sid": 0, "pid": 805, "age": 48} +{"sid": 0, "pid": 806, "age": 23} +{"sid": 0, "pid": 807, "age": 40} +{"sid": 0, "pid": 808, "age": 97} +{"sid": 0, "pid": 809, "age": 19} +{"sid": 0, "pid": 810, "age": 50} +{"sid": 0, "pid": 811, "age": 55} +{"sid": 0, "pid": 812, "age": 74} +{"sid": 0, "pid": 813, "age": 58} +{"sid": 0, "pid": 814, "age": 47} +{"sid": 0, "pid": 815, "age": 92} +{"sid": 0, "pid": 816, "age": 2} +{"sid": 0, "pid": 817, "age": 45} +{"sid": 0, "pid": 818, "age": 94} +{"sid": 0, "pid": 819, "age": 67} +{"sid": 0, "pid": 820, "age": 61} +{"sid": 0, "pid": 821, "age": 35} +{"sid": 0, "pid": 822, "age": 46} +{"sid": 0, "pid": 823, "age": 72} +{"sid": 0, "pid": 824, "age": 73} +{"sid": 0, "pid": 825, "age": 74} +{"sid": 0, "pid": 826, "age": 4} +{"sid": 0, "pid": 827, "age": 30} +{"sid": 0, "pid": 828, "age": 32} +{"sid": 0, "pid": 829, "age": 64} +{"sid": 0, "pid": 830, "age": 90} +{"sid": 0, "pid": 831, "age": 93} +{"sid": 0, "pid": 832, "age": 66} +{"sid": 0, "pid": 833, "age": 93} +{"sid": 0, "pid": 834, "age": 67} +{"sid": 0, "pid": 835, "age": 23} +{"sid": 0, "pid": 836, "age": 42} +{"sid": 0, "pid": 837, "age": 42} +{"sid": 0, "pid": 838, "age": 63} +{"sid": 0, "pid": 839, "age": 39} +{"sid": 0, "pid": 840, "age": 61} +{"sid": 0, "pid": 841, "age": 66} +{"sid": 0, "pid": 842, "age": 47} +{"sid": 0, "pid": 843, "age": 87} +{"sid": 0, "pid": 844, "age": 24} +{"sid": 0, "pid": 845, "age": 46} +{"sid": 0, "pid": 846, "age": 31} +{"sid": 0, "pid": 847, "age": 27} +{"sid": 0, "pid": 848, "age": 91} +{"sid": 0, "pid": 849, "age": 78} +{"sid": 0, "pid": 850, "age": 46} +{"sid": 0, "pid": 851, "age": 5} +{"sid": 0, "pid": 852, "age": 13} +{"sid": 0, "pid": 853, "age": 92} +{"sid": 0, "pid": 854, "age": 77} +{"sid": 0, "pid": 855, "age": 38} +{"sid": 0, "pid": 856, "age": 66} +{"sid": 0, "pid": 857, "age": 81} +{"sid": 0, "pid": 858, "age": 68} +{"sid": 0, "pid": 859, "age": 98} +{"sid": 0, "pid": 860, "age": 98} +{"sid": 0, "pid": 861, "age": 10} +{"sid": 0, "pid": 862, "age": 43} +{"sid": 0, "pid": 863, "age": 16} +{"sid": 0, "pid": 864, "age": 3} +{"sid": 0, "pid": 865, "age": 10} +{"sid": 0, "pid": 866, "age": 39} +{"sid": 0, "pid": 867, "age": 97} +{"sid": 0, "pid": 868, "age": 4} +{"sid": 0, "pid": 869, "age": 2} +{"sid": 0, "pid": 870, "age": 89} +{"sid": 0, "pid": 871, "age": 17} +{"sid": 0, "pid": 872, "age": 68} +{"sid": 0, "pid": 873, "age": 88} +{"sid": 0, "pid": 874, "age": 5} +{"sid": 0, "pid": 875, "age": 93} +{"sid": 0, "pid": 876, "age": 34} +{"sid": 0, "pid": 877, "age": 36} +{"sid": 0, "pid": 878, "age": 72} +{"sid": 0, "pid": 879, "age": 78} +{"sid": 0, "pid": 880, "age": 14} +{"sid": 0, "pid": 881, "age": 70} +{"sid": 0, "pid": 882, "age": 83} +{"sid": 0, "pid": 883, "age": 79} +{"sid": 0, "pid": 884, "age": 62} +{"sid": 0, "pid": 885, "age": 12} +{"sid": 0, "pid": 886, "age": 17} +{"sid": 0, "pid": 887, "age": 80} +{"sid": 0, "pid": 888, "age": 94} +{"sid": 0, "pid": 889, "age": 37} +{"sid": 0, "pid": 890, "age": 30} +{"sid": 0, "pid": 891, "age": 44} +{"sid": 0, "pid": 892, "age": 99} +{"sid": 0, "pid": 893, "age": 73} +{"sid": 0, "pid": 894, "age": 60} +{"sid": 0, "pid": 895, "age": 55} +{"sid": 0, "pid": 896, "age": 36} +{"sid": 0, "pid": 897, "age": 51} +{"sid": 0, "pid": 898, "age": 52} +{"sid": 0, "pid": 899, "age": 40} +{"sid": 0, "pid": 900, "age": 53} +{"sid": 0, "pid": 901, "age": 93} +{"sid": 0, "pid": 902, "age": 58} +{"sid": 0, "pid": 903, "age": 74} +{"sid": 0, "pid": 904, "age": 81} +{"sid": 0, "pid": 905, "age": 15} +{"sid": 0, "pid": 906, "age": 19} +{"sid": 0, "pid": 907, "age": 16} +{"sid": 0, "pid": 908, "age": 51} +{"sid": 0, "pid": 909, "age": 43} +{"sid": 0, "pid": 910, "age": 46} +{"sid": 0, "pid": 911, "age": 66} +{"sid": 0, "pid": 912, "age": 13} +{"sid": 0, "pid": 913, "age": 29} +{"sid": 0, "pid": 914, "age": 45} +{"sid": 0, "pid": 915, "age": 75} +{"sid": 0, "pid": 916, "age": 41} +{"sid": 0, "pid": 917, "age": 15} +{"sid": 0, "pid": 918, "age": 7} +{"sid": 0, "pid": 919, "age": 87} +{"sid": 0, "pid": 920, "age": 4} +{"sid": 0, "pid": 921, "age": 37} +{"sid": 0, "pid": 922, "age": 83} +{"sid": 0, "pid": 923, "age": 4} +{"sid": 0, "pid": 924, "age": 10} +{"sid": 0, "pid": 925, "age": 43} +{"sid": 0, "pid": 926, "age": 59} +{"sid": 0, "pid": 927, "age": 46} +{"sid": 0, "pid": 928, "age": 94} +{"sid": 0, "pid": 929, "age": 63} +{"sid": 0, "pid": 930, "age": 39} +{"sid": 0, "pid": 931, "age": 0} +{"sid": 0, "pid": 932, "age": 57} +{"sid": 0, "pid": 933, "age": 49} +{"sid": 0, "pid": 934, "age": 74} +{"sid": 0, "pid": 935, "age": 38} +{"sid": 0, "pid": 936, "age": 64} +{"sid": 0, "pid": 937, "age": 45} +{"sid": 0, "pid": 938, "age": 6} +{"sid": 0, "pid": 939, "age": 15} +{"sid": 0, "pid": 940, "age": 88} +{"sid": 0, "pid": 941, "age": 52} +{"sid": 0, "pid": 942, "age": 33} +{"sid": 0, "pid": 943, "age": 1} +{"sid": 0, "pid": 944, "age": 33} +{"sid": 0, "pid": 945, "age": 31} +{"sid": 0, "pid": 946, "age": 28} +{"sid": 0, "pid": 947, "age": 27} +{"sid": 0, "pid": 948, "age": 98} +{"sid": 0, "pid": 949, "age": 35} +{"sid": 0, "pid": 950, "age": 14} +{"sid": 0, "pid": 951, "age": 2} +{"sid": 0, "pid": 952, "age": 72} +{"sid": 0, "pid": 953, "age": 98} +{"sid": 0, "pid": 954, "age": 6} +{"sid": 0, "pid": 955, "age": 34} +{"sid": 0, "pid": 956, "age": 93} +{"sid": 0, "pid": 957, "age": 17} +{"sid": 0, "pid": 958, "age": 81} +{"sid": 0, "pid": 959, "age": 40} +{"sid": 0, "pid": 960, "age": 33} +{"sid": 0, "pid": 961, "age": 72} +{"sid": 0, "pid": 962, "age": 40} +{"sid": 0, "pid": 963, "age": 42} +{"sid": 0, "pid": 964, "age": 21} +{"sid": 0, "pid": 965, "age": 66} +{"sid": 0, "pid": 966, "age": 80} +{"sid": 0, "pid": 967, "age": 37} +{"sid": 0, "pid": 968, "age": 11} +{"sid": 0, "pid": 969, "age": 39} +{"sid": 0, "pid": 970, "age": 52} +{"sid": 0, "pid": 971, "age": 51} +{"sid": 0, "pid": 972, "age": 43} +{"sid": 0, "pid": 973, "age": 38} +{"sid": 0, "pid": 974, "age": 4} +{"sid": 0, "pid": 975, "age": 77} +{"sid": 0, "pid": 976, "age": 69} +{"sid": 0, "pid": 977, "age": 32} +{"sid": 0, "pid": 978, "age": 4} +{"sid": 0, "pid": 979, "age": 67} +{"sid": 0, "pid": 980, "age": 19} +{"sid": 0, "pid": 981, "age": 70} +{"sid": 0, "pid": 982, "age": 69} +{"sid": 0, "pid": 983, "age": 43} +{"sid": 0, "pid": 984, "age": 20} +{"sid": 0, "pid": 985, "age": 28} +{"sid": 0, "pid": 986, "age": 77} +{"sid": 0, "pid": 987, "age": 14} +{"sid": 0, "pid": 988, "age": 97} +{"sid": 0, "pid": 989, "age": 10} +{"sid": 0, "pid": 990, "age": 6} +{"sid": 0, "pid": 991, "age": 82} +{"sid": 0, "pid": 992, "age": 34} +{"sid": 0, "pid": 993, "age": 46} +{"sid": 0, "pid": 994, "age": 24} +{"sid": 0, "pid": 995, "age": 55} +{"sid": 0, "pid": 996, "age": 64} +{"sid": 0, "pid": 997, "age": 57} +{"sid": 0, "pid": 998, "age": 92} +{"sid": 0, "pid": 999, "age": 27} +{"sid": 0, "pid": 1000, "age": 96} + +################## Data for Fuzz Testing ################ + +Table : A + +{ + "prim": 1, + "ida1": "VDVFY#BHJ###hbvfh$%^&*(((()&*@&#vbhdbjvbh", + "ida2": -9223372036854775808, + "ida3": ["123", "", "", "", "", " ", "$&`||\\vjfdnk" ,"456", "789"], + "ida4": NaN, + "ida5": {} +} +{ + "prim": 2, + "ida1": "VDVFY#BHJ###hbvfh$%^&*(((()&*@&#vbhdbjvbh", + "ida2": -9223372036854775808, + "ida3": ["123", "", "", "", "", " ", "$&`||\\vjfdnk" ,"456", "789"], + "ida4": 74924657.8745757874678, + "ida5": {"ey@#₹🚀": 23233.323} +} +{ + "prim": 1, + "ida1": "dfhvbjbdk b vjk vdbvjkdfbvkdbfb ", + "ida2": 9223372036854775807, + "ida3": [], + "ida4": 0.0, + "ida5": { + "subid1": 3.4028235E+38, + "subid2": 1.17549435E-38, + "subid3": -3.4028235E+38, + "subid4": -1.17549435E-38, + "subid5": 0.0, + "subid6": -0.0, + "subid7": 1.4012985E-45, + "subid8": -1.4012985E-45, + "subid9": NaN + } +} +{ + "prim": 2, + "ida1": "dfhvbjbdk b vjk vdbvjkdfbvkdbfb ", + "ida2": 9223372036854775807, + "ida3": [], + "ida4": 4.9406564584124654E-324, + "ida5": {"outer": -5} +} +{ + "prim": 1, + "ida1": "&`b|nm./^*@!\u6F22\u5B57 \uD83D\uDE00\u6F22", + "ida2": 0, + "ida3": ["yash"], + "ida4": +INF, + "ida5": {"key": -56} +} +{ + "prim": 2, + "ida1": "&`b|nm./^*@!\u6F22\u5B57 \uD83D\uDE00\u6F22", + "ida2": 0, + "ida3": ["yash"], + "ida4": -INF, + "ida5": {"key1": 94500.89, "key1": -48959.0} +} + + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/before.ddl b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/before.ddl new file mode 100644 index 00000000..cd419178 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/before.ddl @@ -0,0 +1,58 @@ +CREATE TABLE users( + sid1 INTEGER, + sid2 INTEGER, + pid1 INTEGER, + pid2 INTEGER, + name STRING, + age INTEGER, + seqNo INTEGER GENERATED BY DEFAULT ON NULL AS IDENTITY, + info JSON, + PRIMARY KEY(SHARD(sid1,sid2), pid1, pid2) +) + +CREATE INDEX idxCode ON users(info.code AS INTEGER) + +CREATE INDEX idxAge ON users(age) + +create table if not exists jsoncol( + majorKey1 string, + majorKey2 string, + minorKey string, + primary key(shard(majorKey1,majorKey2),minorKey)) as json collection + +create index idx_index on jsoncol(index as INTEGER) + +CREATE TABLE limit( + sid INTEGER, + pid INTEGER, + age INTEGER, + PRIMARY KEY (SHARD(sid), pid) +) + +CREATE TABLE Foo( + id INTEGER, + sid INTEGER, + record RECORD(long LONG, int INTEGER, string STRING, bool BOOLEAN, float FLOAT), + info JSON, + primary key (SHARD(sid),id) +) + +CREATE TABLE Foo.Child( + id2 integer, + str string, + ts timestamp(3), + info JSON, + primary key(id2) +) + +############### Tables for Fuzz Testing ############# + +CREATE TABLE A ( + prim INTEGER, + ida1 STRING, + ida2 LONG, + ida3 ARRAY(STRING), + ida4 DOUBLE, + ida5 MAP(DOUBLE), + PRIMARY KEY (SHARD(ida1), prim) +) \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz01.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz01.r new file mode 100644 index 00000000..f0a3b471 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz01.r @@ -0,0 +1,167 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/fuzz01.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida2", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : -9223372036854775808 + } + }, + { + "iterator kind" : "ADD", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida3", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "position iterator" : + { + "iterator kind" : "CONST", + "value" : 5 + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : "!@#$%^&*()" + } + }, + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida4", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "new value iterator" : + { + "iterator kind" : "MULTIPLY_DIVIDE", + "operations and operands" : [ + { + "operation" : "*", + "operand" : + { + "iterator kind" : "CONST", + "value" : -1.0 + } + }, + { + "operation" : "/", + "operand" : + { + "iterator kind" : "CONST", + "value" : 0.0 + } + } + ] + } + }, + { + "iterator kind" : "PUT", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida5", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "new value iterator" : + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "new_entry" + }, + { + "iterator kind" : "CONST", + "value" : NaN + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"ida1":"dfhvbjbdk b vjk vdbvjkdfbvkdbfb "}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$a", + "SELECT expressions" : [ + { + "field name" : "a", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz02.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz02.r new file mode 100644 index 00000000..a0811b1c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz02.r @@ -0,0 +1,150 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/fuzz02.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida2", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : 9223372036854775807 + } + }, + { + "iterator kind" : "ADD", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida3", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "position iterator" : + { + "iterator kind" : "CONST", + "value" : 1 + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : "!@#$%^&*()" + } + }, + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida4", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : 5.0987 + } + }, + { + "iterator kind" : "PUT", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida5", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "new value iterator" : + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "new_entry" + }, + { + "iterator kind" : "CONST", + "value" : -3.4028235E38 + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"ida1":"VDVFY#BHJ###hbvfh$%^&*(((()&*@&#vbhdbjvbh"}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$a", + "SELECT expressions" : [ + { + "field name" : "a", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz03.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz03.r new file mode 100644 index 00000000..770d89d7 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/fuzz03.r @@ -0,0 +1,150 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/fuzz03.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida2", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : 4793594583598 + } + }, + { + "iterator kind" : "ADD", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida3", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "position iterator" : + { + "iterator kind" : "CONST", + "value" : 1 + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : "!@#$%^&*()" + } + }, + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida4", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : NaN + } + }, + { + "iterator kind" : "PUT", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida5", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + }, + "new value iterator" : + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "new_entry" + }, + { + "iterator kind" : "CONST", + "value" : NaN + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"ida1":"&`b|nm./^*@!漢字 😀漢"}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$a", + "SELECT expressions" : [ + { + "field name" : "a", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q1.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q1.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q1.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q10.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q10.r new file mode 100644 index 00000000..b1988f26 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q10.r @@ -0,0 +1,70 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q10.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "seqNo", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : null + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "users", + "row variable" : "$$users", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid1":0,"sid2":1}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$users", + "SELECT expressions" : [ + { + "field name" : "users", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q11.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q11.r new file mode 100644 index 00000000..125b4e9c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q11.r @@ -0,0 +1,102 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q11.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "height", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "height", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "users", + "row variable" : "$$u", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid1":2,"sid2":3}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$u", + "SELECT expressions" : [ + { + "field name" : "u", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q12.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q12.r new file mode 100644 index 00000000..f48485fc --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q12.r @@ -0,0 +1,92 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q12.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$limit" + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$limit" + } + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "limit", + "row variable" : "$$limit", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid":0}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$limit", + "SELECT expressions" : [ + { + "field name" : "limit", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$limit" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q13.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q13.r new file mode 100644 index 00000000..8d77a7ba --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q13.r @@ -0,0 +1,92 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q13.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$limit" + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$limit" + } + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "limit", + "row variable" : "$$limit", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid":0}, + "range conditions" : { "pid" : { "end value" : 1000, "end inclusive" : false } } + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$limit", + "SELECT expressions" : [ + { + "field name" : "limit", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$limit" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q14.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q14.r new file mode 100644 index 00000000..cfd49750 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q14.r @@ -0,0 +1,136 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q14.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ "idx_index" ], + "update clauses" : [ + { + "iterator kind" : "PUT", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$j" + }, + "new value iterator" : + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "address" + }, + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "city" + }, + { + "iterator kind" : "CONST", + "value" : "Burlington" + }, + { + "iterator kind" : "CONST", + "value" : "State" + }, + { + "iterator kind" : "CONST", + "value" : "MA" + } + ] + } + ] + } + }, + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "firstThread", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$j" + } + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : true + } + }, + { + "iterator kind" : "REMOVE", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "index", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$j" + } + } + } + ], + "update TTL" : true, + "TimeUnit" : "DAYS", + "TTL iterator" : + { + "iterator kind" : "CONST", + "value" : 8 + }, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "jsoncol", + "row variable" : "$j", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"majorKey1":"k1","majorKey2":"k2"}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$j", + "SELECT expressions" : [ + { + "field name" : "j", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$j" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q15.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q15.r new file mode 100644 index 00000000..1d1cf39e --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q15.r @@ -0,0 +1,220 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q15.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "ADD", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "ARRAY_FILTER", + "predicate iterator" : + { + "iterator kind" : "IS_OF_TYPE", + "target types" : [ + { + "type" : { "Array" : + "Any" + }, + "quantifier" : "", + "only" : false + } + ], + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$element" + } + } + }, + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$j" + } + } + } + }, + "new value iterator" : + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "areacode" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$areacode" + }, + { + "iterator kind" : "CONST", + "value" : "number" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$number" + }, + { + "iterator kind" : "CONST", + "value" : "kind" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$kind" + } + ] + } + }, + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "ARRAY_FILTER", + "predicate iterator" : + { + "iterator kind" : "IS_OF_TYPE", + "target types" : [ + { + "type" : { "Map" : + "Any" + }, + "quantifier" : "", + "only" : false + } + ], + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$element" + } + } + }, + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$j" + } + } + } + }, + "new value iterator" : + { + "iterator kind" : "ARRAY_CONSTRUCTOR", + "conditional" : false, + "input iterators" : [ + { + "iterator kind" : "VALUES", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$" + } + }, + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "areacode" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$areacode" + }, + { + "iterator kind" : "CONST", + "value" : "number" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$number" + }, + { + "iterator kind" : "CONST", + "value" : "kind" + }, + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$kind" + } + ] + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "jsoncol", + "row variable" : "$$j", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"majorKey1":"j1","majorKey2":"j2"}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$j", + "SELECT expressions" : [ + { + "field name" : "j", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$j" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q17.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q17.r new file mode 100644 index 00000000..fe5aabd8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q17.r @@ -0,0 +1,294 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q17.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "str", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$fc" + } + }, + "new value iterator" : + { + "iterator kind" : "STRING_CONCAT", + "input iterators" : [ + { + "iterator kind" : "VAR_REF", + "variable" : "$" + }, + { + "iterator kind" : "CONST", + "value" : " of parent Foo" + } + ] + } + }, + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "areacode", + "input iterator" : + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$fc" + } + } + } + } + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "VAR_REF", + "variable" : "$" + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + }, + { + "iterator kind" : "ADD", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$fc" + } + } + } + }, + "new value iterator" : + { + "iterator kind" : "SEQ_CONCAT", + "input iterators" : [ + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "areacode" + }, + { + "iterator kind" : "CONST", + "value" : 570 + }, + { + "iterator kind" : "CONST", + "value" : "number" + }, + { + "iterator kind" : "CONST", + "value" : 51 + } + ] + }, + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "areacode" + }, + { + "iterator kind" : "CONST", + "value" : 580 + }, + { + "iterator kind" : "CONST", + "value" : "number" + }, + { + "iterator kind" : "CONST", + "value" : 51 + } + ] + } + ] + } + }, + { + "iterator kind" : "PUT", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "children", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$fc" + } + } + }, + "new value iterator" : + { + "iterator kind" : "SEQ_CONCAT", + "input iterators" : [ + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "Rahul" + }, + { + "iterator kind" : "CONST", + "value" : 22 + } + ] + }, + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "Trump" + }, + { + "iterator kind" : "CONST", + "value" : null + } + ] + } + ] + } + }, + { + "iterator kind" : "REMOVE", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "lastName", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$fc" + } + } + } + } + ], + "update TTL" : true, + "TimeUnit" : "DAYS", + "TTL iterator" : + { + "iterator kind" : "CONST", + "value" : 5 + }, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo.Child", + "row variable" : "$$fc", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid":0}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$fc", + "SELECT expressions" : [ + { + "field name" : "fc", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$fc" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q18.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q18.r new file mode 100644 index 00000000..d538ec3b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q18.r @@ -0,0 +1,324 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q18.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "int", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "record", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$f" + } + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "MULTIPLY_DIVIDE", + "operations and operands" : [ + { + "operation" : "*", + "operand" : + { + "iterator kind" : "VAR_REF", + "variable" : "$" + } + }, + { + "operation" : "/", + "operand" : + { + "iterator kind" : "CONST", + "value" : 10 + } + } + ] + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 100 + } + } + ] + } + }, + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "areacode", + "input iterator" : + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$f" + } + } + } + } + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "VAR_REF", + "variable" : "$" + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + }, + { + "iterator kind" : "ADD", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$f" + } + } + } + }, + "new value iterator" : + { + "iterator kind" : "SEQ_CONCAT", + "input iterators" : [ + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "areacode" + }, + { + "iterator kind" : "CONST", + "value" : 570 + }, + { + "iterator kind" : "CONST", + "value" : "number" + }, + { + "iterator kind" : "CONST", + "value" : 51 + } + ] + }, + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "areacode" + }, + { + "iterator kind" : "CONST", + "value" : 580 + }, + { + "iterator kind" : "CONST", + "value" : "number" + }, + { + "iterator kind" : "CONST", + "value" : 51 + } + ] + } + ] + } + }, + { + "iterator kind" : "PUT", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "children", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$f" + } + } + }, + "new value iterator" : + { + "iterator kind" : "SEQ_CONCAT", + "input iterators" : [ + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "Rahul" + }, + { + "iterator kind" : "CONST", + "value" : 22 + } + ] + }, + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "Trump" + }, + { + "iterator kind" : "CONST", + "value" : null + } + ] + } + ] + } + }, + { + "iterator kind" : "REMOVE", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "lastName", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$f" + } + } + } + } + ], + "update TTL" : true, + "TimeUnit" : "DAYS", + "TTL iterator" : + { + "iterator kind" : "CONST", + "value" : 10 + }, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo", + "row variable" : "$$f", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid":0}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$f", + "SELECT expressions" : [ + { + "field name" : "f", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$f" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q19.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q19.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q19.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q2.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q2.r new file mode 100644 index 00000000..e80ba61a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q2.r @@ -0,0 +1,124 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q2.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ "idxAge" ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + }, + "new value iterator" : + { + "iterator kind" : "FN_UPPER", + "input iterators" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "name", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + } + ] + } + }, + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "users", + "row variable" : "$$users", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid1":3,"sid2":4}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$users", + "SELECT expressions" : [ + { + "field name" : "users", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q20.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q20.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q20.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q3.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q3.r new file mode 100644 index 00000000..64ecfdde --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q3.r @@ -0,0 +1,275 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q3.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "areacode", + "input iterator" : + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + } + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "VAR_REF", + "variable" : "$" + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + }, + { + "iterator kind" : "ADD", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "friends", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + }, + "new value iterator" : + { + "iterator kind" : "SEQ_CONCAT", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "Ada" + }, + { + "iterator kind" : "CONST", + "value" : "Aris" + } + ] + } + }, + { + "iterator kind" : "ADD", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + }, + "position iterator" : + { + "iterator kind" : "CONST", + "value" : 2 + }, + "new value iterator" : + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "areacode" + }, + { + "iterator kind" : "CONST", + "value" : 876 + }, + { + "iterator kind" : "CONST", + "value" : "number" + }, + { + "iterator kind" : "CONST", + "value" : 3872730 + }, + { + "iterator kind" : "CONST", + "value" : "kind" + }, + { + "iterator kind" : "CONST", + "value" : "home2" + } + ] + } + }, + { + "iterator kind" : "PUT", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + }, + "new value iterator" : + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "country" + }, + { + "iterator kind" : "CONST", + "value" : "USA" + } + ] + } + }, + { + "iterator kind" : "REMOVE", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "ARRAY_FILTER", + "predicate iterator" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "kind", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$element" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : "office" + } + }, + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + } + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "users", + "row variable" : "$$u", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid1":0,"sid2":1}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$u", + "SELECT expressions" : [ + { + "field name" : "u", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q4.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q4.r new file mode 100644 index 00000000..a6b09182 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q4.r @@ -0,0 +1,143 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q4.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ ], + "update clauses" : [ + { + "iterator kind" : "ADD", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "friends", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + }, + "new value iterator" : + { + "iterator kind" : "CONST", + "value" : "Jerry" + } + }, + { + "iterator kind" : "PUT", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + }, + "new value iterator" : + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "hobbies" + }, + { + "iterator kind" : "ARRAY_CONSTRUCTOR", + "conditional" : false, + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "Cooking" + }, + { + "iterator kind" : "CONST", + "value" : "Music" + } + ] + } + ] + } + }, + { + "iterator kind" : "REMOVE", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "street", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + } + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "users", + "row variable" : "$$t", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid1":3,"sid2":4,"pid1":3}, + "range conditions" : { "pid2" : { "end value" : 3, "end inclusive" : false } } + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$t", + "SELECT expressions" : [ + { + "field name" : "t", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q5.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q5.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q5.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q6.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q6.r new file mode 100644 index 00000000..810dc555 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q6.r @@ -0,0 +1,92 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q6.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ "idxAge" ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "users", + "row variable" : "$$users", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid1":1,"sid2":2,"pid1":1}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$users", + "SELECT expressions" : [ + { + "field name" : "users", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$users" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q7.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q7.r new file mode 100644 index 00000000..c0663a3a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q7.r @@ -0,0 +1,107 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q7.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ "idxAge" ], + "update clauses" : [ + { + "iterator kind" : "SET", + "clone new values" : false, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + }, + "new value iterator" : + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + }, + { + "operation" : "+", + "operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : true, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "users", + "row variable" : "$$u", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid1":2,"sid2":3,"pid1":2,"pid2":1}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$u", + "SELECT expressions" : [ + { + "field name" : "u", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + ] + } + }, + "FROM variable" : "$$u", + "SELECT expressions" : [ + { + "field name" : "u", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q8.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q8.r new file mode 100644 index 00000000..6da08b66 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q8.r @@ -0,0 +1,55 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q8.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ "idxAge", "idxCode" ], + "update clauses" : [ + + ], + "update TTL" : true, + "TimeUnit" : "DAYS", + "TTL iterator" : + { + "iterator kind" : "CONST", + "value" : 2 + }, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "users", + "row variable" : "$$u", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid1":1,"sid2":2}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$u", + "SELECT expressions" : [ + { + "field name" : "u", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$u" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q9.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q9.r new file mode 100644 index 00000000..245757ea --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/explans/q9.r @@ -0,0 +1,100 @@ +compiled-query-plan +{ +"query file" : "idc_multirow_update/q/q9.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "UPDATE_ROW", + "indexes to update" : [ "idxCode" ], + "update clauses" : [ + { + "iterator kind" : "PUT", + "clone new values" : true, + "theIsMRCounterDec" : false, + "theJsonMRCounterColPos" : -1, + "theIsJsonMRCounterUpdate" : false, + "target iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + }, + "new value iterator" : + { + "iterator kind" : "MAP_CONSTRUCTOR", + "input iterators" : [ + { + "iterator kind" : "CONST", + "value" : "code" + }, + { + "iterator kind" : "ARRAY_CONSTRUCTOR", + "conditional" : true, + "input iterators" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "zipcode", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + } + } + ] + } + ] + } + } + ], + "update TTL" : false, + "isCompletePrimaryKey" : false, + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "users", + "row variable" : "$$t", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"sid1":3,"sid2":4}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$t", + "SELECT expressions" : [ + { + "field name" : "t", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$t" + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz01.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz01.r new file mode 100644 index 00000000..79b0604f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz01.r @@ -0,0 +1,3 @@ +unordered-result +{"prim":1,"ida1":"dfhvbjbdk b vjk vdbvjkdfbvkdbfb ","ida2":-9223372036854775808,"ida3":["!@#$%^&*()"],"ida4":-Infinity,"ida5":{"new_entry":NaN,"subid1":3.4028235E38,"subid2":1.17549435E-38,"subid3":-3.4028235E38,"subid4":-1.17549435E-38,"subid5":0.0,"subid6":-0.0,"subid7":1.4012985E-45,"subid8":-1.4012985E-45,"subid9":NaN}} +{"prim":2,"ida1":"dfhvbjbdk b vjk vdbvjkdfbvkdbfb ","ida2":-9223372036854775808,"ida3":["!@#$%^&*()"],"ida4":-Infinity,"ida5":{"new_entry":NaN,"outer":-5.0}} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz02.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz02.r new file mode 100644 index 00000000..2226163a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz02.r @@ -0,0 +1,3 @@ +unordered-result +{"prim":1,"ida1":"VDVFY#BHJ###hbvfh$%^&*(((()&*@&#vbhdbjvbh","ida2":9223372036854775807,"ida3":["123","!@#$%^&*()","","","",""," ","$&`||\\vjfdnk","456","789"],"ida4":5.0987,"ida5":{"new_entry":-3.4028235E38}} +{"prim":2,"ida1":"VDVFY#BHJ###hbvfh$%^&*(((()&*@&#vbhdbjvbh","ida2":9223372036854775807,"ida3":["123","!@#$%^&*()","","","",""," ","$&`||\\vjfdnk","456","789"],"ida4":5.0987,"ida5":{"ey@#₹🚀":23233.323,"new_entry":-3.4028235E38}} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz03.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz03.r new file mode 100644 index 00000000..614a061e --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/fuzz03.r @@ -0,0 +1,3 @@ +unordered-result +{"prim":1,"ida1":"&`b|nm./^*@!漢字 😀漢","ida2":4793594583598,"ida3":["yash","!@#$%^&*()"],"ida4":NaN,"ida5":{"key":-56.0,"new_entry":NaN}} +{"prim":2,"ida1":"&`b|nm./^*@!漢字 😀漢","ida2":4793594583598,"ida3":["yash","!@#$%^&*()"],"ida4":NaN,"ida5":{"key1":-48959.0,"new_entry":NaN}} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q1.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q1.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q1.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q10.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q10.r new file mode 100644 index 00000000..91204e3b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q10.r @@ -0,0 +1,4 @@ +unordered-result +{"pid1":0,"pid2":1,"seqNo":1} +{"pid1":0,"pid2":2,"seqNo":2} +{"pid1":0,"pid2":3,"seqNo":3} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q11.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q11.r new file mode 100644 index 00000000..cfdf146c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q11.r @@ -0,0 +1 @@ +Runtime-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q12.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q12.r new file mode 100644 index 00000000..cfdf146c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q12.r @@ -0,0 +1 @@ +Runtime-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q13.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q13.r new file mode 100644 index 00000000..45e53470 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q13.r @@ -0,0 +1,2 @@ +unordered-result +{"NumRowsUpdated":1000} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q14.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q14.r new file mode 100644 index 00000000..ff516ed6 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q14.r @@ -0,0 +1,3 @@ +unordered-result +{"j":{"address":{"State":"MA","city":"Burlington"},"firstThread":true,"majorKey1":"k1","majorKey2":"k2","minorKey":"m1","partition":null,"shard":null},"remaining_days":8} +{"j":{"address":{"State":"MA","city":"Burlington"},"firstThread":true,"majorKey1":"k1","majorKey2":"k2","minorKey":"m2","partition":null,"shard":null},"remaining_days":8} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q15.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q15.r new file mode 100644 index 00000000..809eaa48 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q15.r @@ -0,0 +1,3 @@ +unordered-result +{"minorKey":"l1","address":{"phones":[{"areacode":368,"kind":"mobile","number":8674289},{"areacode":368,"kind":"office","number":9673449},{"areacode":234,"kind":"home","number":1234567}],"pin":8437}} +{"minorKey":"l2","address":{"phones":[{"areacode":435,"kind":"mobile","number":7437849},{"areacode":234,"kind":"home","number":1234567}],"pin":743683}} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q17.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q17.r new file mode 100644 index 00000000..1fc933b9 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q17.r @@ -0,0 +1,3 @@ +unordered-result +{"fc":{"sid":0,"id":1,"id2":2,"str":"child1 of parent Foo","ts":"2000-12-20T12:30:01.234","info":{"address":{"city":"San Fransisco","phones":[{"areacode":409,"kind":"work","number":80},{"areacode":570,"number":51},{"areacode":580,"number":51}],"state":"CA"},"age":10,"children":{"John":{"age":10,"friends":["Anna","John","Maria"],"school":"sch_1"},"Lisa":null,"Mary":7,"Rahul":22,"Trump":null},"firstName":"Yash"}},"remaining_days":5} +{"fc":{"sid":0,"id":2,"id2":3,"str":"child1 of parent Foo","ts":"2001-12-20T12:30:01.235","info":{"address":{"city":"San Jose","phones":[{"areacode":409,"kind":"work","number":70},{"areacode":416,"kind":"home","number":74},{"areacode":570,"number":51},{"areacode":580,"number":51}],"state":"CA"},"age":30,"children":{"Kathy":{"age":10,"friends":["Anna","Mark","Maria"],"school":"sch_3"},"Rahul":22,"Trump":null,"Will":{"age":2,"friends":["Ada"],"school":"sch_1"}},"firstName":"Kunal"}},"remaining_days":5} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q18.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q18.r new file mode 100644 index 00000000..59973456 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q18.r @@ -0,0 +1,3 @@ +unordered-result +{"f":{"id":1,"sid":0,"record":{"long":40,"int":102,"string":"aef","bool":true,"float":5.0},"info":{"address":{"city":"San Fransisco","phones":[{"areacode":409,"kind":"work","number":50},"650-234-4556",650234455,{"areacode":570,"number":51},{"areacode":580,"number":51}],"state":"CA"},"age":10,"children":{"John":{"age":10,"friends":["Anna","John","Maria"],"school":"sch_1"},"Mary":{"age":7,"friends":["Anna","Mark"],"school":"sch_3"},"Rahul":22,"Trump":null},"firstName":"first0"}},"remaining_days":10} +{"f":{"id":2,"sid":0,"record":{"long":50,"int":102,"string":"xyz","bool":true,"float":3.0},"info":{"address":{"city":"Boston","phones":[{"areacode":305,"kind":"work","number":30},{"areacode":561,"kind":"work","number":55},{"areacode":570,"number":51},{"areacode":580,"number":51}],"state":"MA"},"age":11,"children":{"Anna":{"age":9,"friends":["Bobby","John"],"school":"sch_1"},"Dave":{"age":15,"friends":["Bill","Sam"],"school":"sch_3"},"Rahul":22,"Trump":null},"firstName":"first1"}},"remaining_days":10} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q19.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q19.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q19.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q2.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q2.r new file mode 100644 index 00000000..d4be0a8f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q2.r @@ -0,0 +1,3 @@ +unordered-result +{"name":"ERNESTINE","age":23,"seqNo":48} +{"name":"YASH","age":23,"seqNo":7} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q20.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q20.r new file mode 100644 index 00000000..553beb92 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q20.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q3.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q3.r new file mode 100644 index 00000000..50d0e796 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q3.r @@ -0,0 +1,4 @@ +unordered-result +{"pid1":0,"pid2":1,"info":{"address":{"country":"USA","state":"North Dakota","street":"560 Box Street Idledale","zipcode":15934},"email":"paulsmith@yahoo.com","favoriteColor":"green","friends":["Sharpe","Tamera","Ada","Aris"],"height":170,"phones":[{"areacode":651,"kind":"mobile","number":3789021},{"areacode":876,"kind":"home2","number":3872730},{"areacode":416,"kind":"home","number":6096010}]},"days":-1} +{"pid1":0,"pid2":2,"info":{"address":{"country":"USA","state":"New York","street":"584 Beayer Place Fontanelle","zipcode":42601},"email":"kochsmith@yahoo.com","favoriteColor":"red","friends":["Obrien","Ada","Aris"],"height":170,"phones":[{"areacode":213,"kind":"mobile","number":2993747},{"areacode":876,"kind":"home2","number":3872730},{"areacode":765,"kind":"home","number":8028673}]},"days":-1} +{"pid1":0,"pid2":3,"info":{"address":{"country":"USA","state":"Iowa","street":"996 Sedgwick Place Elfrida","zipcode":15871},"email":"fischersmith@yahoo.com","favoriteColor":"green","friends":["Bianca","Ada","Aris"],"height":168,"phones":[{"areacode":895,"kind":"mobile","number":5746587},{"areacode":876,"kind":"home2","number":3872730},{"areacode":919,"kind":"home","number":3298890}]},"days":-1} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q4.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q4.r new file mode 100644 index 00000000..e9d71d62 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q4.r @@ -0,0 +1,3 @@ +unordered-result +{"pid1":3,"pid2":1,"info":{"address":{"state":"American Samoa","zipcode":43222},"email":"chrissmith@yahoo.com","favoriteColor":"green","friends":["Herman","Carmela","Jerry"],"height":178,"hobbies":["Cooking","Music"],"phones":[{"areacode":434,"kind":"office","number":5343434},{"areacode":545,"kind":"mobile","number":2332323},{"areacode":323,"kind":"home","number":5433443}]}} +{"pid1":3,"pid2":2,"info":{"address":{"state":"Rajasthan","zipcode":"unknown"},"email":"yashaga@yahoo.com","favoriteColor":"black","friends":["Kunal","Rajdeep","Jerry"],"height":178,"hobbies":["Cooking","Music"],"phones":[{"areacode":444,"kind":"office","number":1111111},{"areacode":555,"kind":"mobile","number":2222222},{"areacode":666,"kind":"home","number":3333333}]}} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q5.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q5.r new file mode 100644 index 00000000..9a8a79e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q5.r @@ -0,0 +1 @@ +Compile-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q6.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q6.r new file mode 100644 index 00000000..7b1cfca0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q6.r @@ -0,0 +1,4 @@ +unordered-result +{"pid1":1,"pid2":3,"age":27} +{"pid1":1,"pid2":1,"age":29} +{"pid1":1,"pid2":2,"age":40} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q7.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q7.r new file mode 100644 index 00000000..00f19aa5 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q7.r @@ -0,0 +1,2 @@ +unordered-result +{"sid1":2,"sid2":3,"pid1":2,"pid2":1,"name":"Caitlin","age":37,"seqNo":1,"info":{"address":{"state":"Wisconsin","street":"360 Herkimer Court Bethany","zipcode":35536},"email":"caitlinsmith@yahoo.com","favoriteColor":"blue","friends":["Kidd"],"height":160,"phones":[{"areacode":424,"kind":"office","number":5455450},{"areacode":424,"kind":"mobile","number":4545521},{"areacode":645,"kind":"home","number":5545450}]}} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q8.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q8.r new file mode 100644 index 00000000..8de7ac9b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q8.r @@ -0,0 +1,4 @@ +unordered-result +{"name":"Hardin","days":2} +{"name":"Fowler","days":2} +{"name":"Rosemarie","days":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q9.r b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q9.r new file mode 100644 index 00000000..cfdf146c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/expres/q9.r @@ -0,0 +1 @@ +Runtime-Exception \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz01.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz01.q new file mode 100644 index 00000000..9321c38b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz01.q @@ -0,0 +1,10 @@ +update A a +set ida2 = -9223372036854775808, +add a.ida3 5 "!@#$%^&*()", +set ida4 = -1.0 / 0.0, +put a.ida5 {"new_entry": cast('NaN' as double)} +where ida1 = "dfhvbjbdk b vjk vdbvjkdfbvkdbfb " + +select * +from A +where ida1 = "dfhvbjbdk b vjk vdbvjkdfbvkdbfb " \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz02.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz02.q new file mode 100644 index 00000000..51a2f887 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz02.q @@ -0,0 +1,10 @@ +update A a +set ida2 = 9223372036854775807, +add a.ida3 1 "!@#$%^&*()", +set ida4 = 5.0987, +put a.ida5 {"new_entry": cast(-3.4028235E+38 as double)} +where ida1 = "VDVFY#BHJ###hbvfh$%^&*(((()&*@&#vbhdbjvbh" + +select * +from A +where ida1 = "VDVFY#BHJ###hbvfh$%^&*(((()&*@&#vbhdbjvbh" \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz03.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz03.q new file mode 100644 index 00000000..65e18e6f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/fuzz03.q @@ -0,0 +1,10 @@ +update A a +set ida2 = 4793594583598, +add a.ida3 1 "!@#$%^&*()", +set ida4 = cast('NaN' as double), +put a.ida5 {"new_entry": cast('NaN' as double)} +where ida1 = "&`b|nm./^*@!\u6F22\u5B57 \uD83D\uDE00\u6F22" + +select * +from A +where ida1 = "&`b|nm./^*@!\u6F22\u5B57 \uD83D\uDE00\u6F22" \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q1.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q1.q new file mode 100644 index 00000000..69ffd511 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q1.q @@ -0,0 +1,9 @@ +# +# Update fail: +# Full shard key must be provided for update +# + +update users +set name = upper(name), +set age = age + 1 +where sid1 = 0 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q10.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q10.q new file mode 100644 index 00000000..50df89a9 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q10.q @@ -0,0 +1,7 @@ +update users +set seqNo = null +where sid1 = 0 and sid2 = 1 + +select pid1, pid2, seqNo +from users +where sid1 = 0 and sid2 = 1 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q11.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q11.q new file mode 100644 index 00000000..5ce99bb9 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q11.q @@ -0,0 +1,9 @@ +# +# Update fail: +# Some row fails, while others succeed +# + +update users u +set u.info.height = u.info.height + 1 +where sid1 = 2 and sid2 = 3 + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q12.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q12.q new file mode 100644 index 00000000..8cb89e66 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q12.q @@ -0,0 +1,9 @@ +# +# Update fail: +# Affected number of rows exceeds the +# MAX limit (1000) +# + +update limit +set age = age + 1 +where sid = 0 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q13.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q13.q new file mode 100644 index 00000000..a338175c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q13.q @@ -0,0 +1,3 @@ +update limit +set age = age + 1 +where sid = 0 and pid < 1000 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q14.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q14.q new file mode 100644 index 00000000..bdbce3cf --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q14.q @@ -0,0 +1,10 @@ +update jsoncol $j +put $j {"address":{"city":"Burlington", "State":"MA"}}, +set firstThread = true, +remove index, +set ttl 8 days +where majorKey1 = "k1" and majorKey2 = "k2" + +select $j, remaining_days($j) as remaining_days +from jsoncol $j +where majorKey1 = "k1" and majorKey2 = "k2" \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q15.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q15.q new file mode 100644 index 00000000..0bef24b0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q15.q @@ -0,0 +1,10 @@ +update jsoncol j +add j.address[$element.phones is of type (ARRAY(any))].phones + { "areacode" : $areacode, "number" : $number, "kind" : $kind }, +set j.address[$element.phones is of type (MAP(any))].phones = + [$.values(), { "areacode" : $areacode, "number" : $number, "kind" : $kind }] +where majorKey1 = "j1" and majorKey2 = "j2" + +select minorKey, address +from jsoncol +where majorKey1 = "j1" and majorKey2 = "j2" \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q17.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q17.q new file mode 100644 index 00000000..da8e294d --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q17.q @@ -0,0 +1,12 @@ +update Foo.Child fc +set fc.str = $ || ' of parent Foo', +set fc.info.address.phones[].areacode = $ + 1, +set ttl 5 days, +add fc.info.address.phones seq_concat({ "areacode" : 570, "number" : 51 }, { "areacode" : 580, "number" : 51 }), +put fc.info.children seq_concat({"Rahul": 22},{"Trump": null}), +remove fc.info.lastName +where sid = 0 + +select $fc, remaining_days($fc) as remaining_days +from Foo.Child $fc +where sid = 0 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q18.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q18.q new file mode 100644 index 00000000..f100d27a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q18.q @@ -0,0 +1,12 @@ +update Foo f +set f.record.int = $ /10 + 100, +set f.info.address.phones[].areacode = $ + 1, +set ttl 10 days, +add f.info.address.phones seq_concat({ "areacode" : 570, "number" : 51 }, { "areacode" : 580, "number" : 51 }), +put f.info.children seq_concat({"Rahul": 22},{"Trump": null}), +remove f.info.lastName +where sid = 0 + +select $f, remaining_days($f) as remaining_days +from Foo $f +where sid = 0 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q19.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q19.q new file mode 100644 index 00000000..ace5e921 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q19.q @@ -0,0 +1,7 @@ +update users u +set u.info.height = $ + 5 +where pid1 = 0 and u.info.height = 170 + +select pid1, pid2, u.info.height +from users u +where pid1 = 0 and u.info.height = 170 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q2.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q2.q new file mode 100644 index 00000000..f4913d3e --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q2.q @@ -0,0 +1,8 @@ +update users +set name = upper(name), +set age = age + 1 +where sid1 = 3 and sid2 = 4 + +select name, age, seqNo +from users +where sid1 = 3 and sid2 = 4 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q20.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q20.q new file mode 100644 index 00000000..55a768df --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q20.q @@ -0,0 +1,7 @@ +update users u +set u.info.height = $ + 5 +where sid1 = 3 and (sid2 = 4 or sid2 = 5) + +select pid1, pid2, u.info.height +from users u +where sid1 = 3 and (sid2 = 4 or sid2 = 5) \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q3.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q3.q new file mode 100644 index 00000000..f030cf7c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q3.q @@ -0,0 +1,11 @@ +update users u +set u.info.phones[].areacode = $ + 1, +add u.info.friends seq_concat("Ada", "Aris"), +add u.info.phones 2 { "areacode":876, "number":3872730, "kind":"home2" }, +put u.info.address { "country": "USA" }, +remove u.info.phones [$element.kind = "office"] +where sid1 = 0 and sid2 = 1 + +select pid1, pid2, $u.info, remaining_days($u) as days +from users $u +where sid1 = 0 and sid2 = 1 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q4.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q4.q new file mode 100644 index 00000000..3c71918b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q4.q @@ -0,0 +1,9 @@ +update users t +add t.info.friends 'Jerry', +put t.info {"hobbies":["Cooking", "Music"]}, +remove t.info.address.street +where sid1 = 3 and sid2 = 4 and pid1 = 3 and pid2 < 3 + +select pid1, pid2, info +from users +where sid1 = 3 and sid2 = 4 diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q5.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q5.q new file mode 100644 index 00000000..2cb58577 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q5.q @@ -0,0 +1,10 @@ +# +# Update fail: +# RETURNING clause is not supported unless the complete primary key is +# specified in the WHERE clause. +# + +update users +set age = age + 1 +where sid1 = 0 and sid2 = 1 and pid1 = 0 +returning * diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q6.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q6.q new file mode 100644 index 00000000..3bb4b85e --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q6.q @@ -0,0 +1,7 @@ +update users +set age = age + 1 +where sid1 = 1 and sid2 = 2 and pid1 = 1 + +select pid1, pid2, age +from users +where sid1 = 1 and sid2 = 2 diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q7.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q7.q new file mode 100644 index 00000000..89da8e50 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q7.q @@ -0,0 +1,4 @@ +update users u +set age = age + 1 +where sid1 = 2 and sid2 = 3 and pid1 = 2 and pid2 = 1 +returning * \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q8.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q8.q new file mode 100644 index 00000000..57b82a4a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q8.q @@ -0,0 +1,7 @@ +update users u +set ttl 2 days +where sid1 = 1 and sid2 = 2 + +select name, remaining_days($u) as days +from users $u +where sid1 = 1 and sid2 = 2 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q9.q b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q9.q new file mode 100644 index 00000000..1079b952 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/q/q9.q @@ -0,0 +1,8 @@ +# +# Update fail: +# Invaild type for JSON index field +# + +update users t +put t.info {"code":t.info.address.zipcode} +where sid1 = 3 and sid2 = 4 \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/test.config b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/test.config new file mode 100644 index 00000000..e55ad5e4 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/idc_multirow_update/test.config @@ -0,0 +1,22 @@ +# +# Contains tests for multi-row updates +# + +before-ddl-file = before.ddl +before-data-file = before.data + +run-mupd = q() = expres + +compile-mupd = q() = explans + +after-ddl-file = after.ddl + +var-$pid2 = 5 + +var-$areacode = 234 + +var-$number = 1234567 + +var-$kind = "home" + + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq01.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq01.r index fffe3f5c..c7dcf5ba 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq01.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/lq01.q", "plan" : @@ -81,7 +82,8 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx2_msgs_receivers", - "covering index" : false, + "covering index" : true, + "index row variable" : "$$msgs1_idx", "index scans" : [ { "equality conditions" : {"content.receivers[]":""}, @@ -99,18 +101,18 @@ compiled-query-plan ], "position in join" : 1 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ { "field name" : "outerJoinVal1", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "#uid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } } @@ -207,11 +209,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, @@ -332,4 +334,4 @@ compiled-query-plan "value" : 10 } } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq02.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq02.r index 21a54218..8f841cf4 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq02.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/lq02.q", "plan" : @@ -84,7 +85,8 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx2_msgs_receivers", - "covering index" : false, + "covering index" : true, + "index row variable" : "$$msgs1_idx", "index scans" : [ { "equality conditions" : {"content.receivers[]":""}, @@ -102,18 +104,18 @@ compiled-query-plan ], "position in join" : 1 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ { "field name" : "outerJoinVal1", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "#uid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } } @@ -210,11 +212,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, @@ -301,4 +303,4 @@ compiled-query-plan "value" : 10 } } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq03.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq03.r index fef0fa3c..43264b51 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq03.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq03.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/lq03.q", "plan" : @@ -77,7 +78,8 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx2_msgs_receivers", - "covering index" : false, + "covering index" : true, + "index row variable" : "$$msgs1_idx", "index scans" : [ { "equality conditions" : {"content.receivers[]":""}, @@ -95,18 +97,18 @@ compiled-query-plan ], "position in join" : 1 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ { "field name" : "outerJoinVal1", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "#uid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } } @@ -217,11 +219,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, @@ -251,4 +253,4 @@ compiled-query-plan } } } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq04.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq04.r index acad456b..132b46c3 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq04.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq04.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/lq04.q", "plan" : @@ -83,7 +84,8 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx2_msgs_receivers", - "covering index" : false, + "covering index" : true, + "index row variable" : "$$msgs1_idx", "index scans" : [ { "equality conditions" : {"content.receivers[]":""}, @@ -101,18 +103,18 @@ compiled-query-plan ], "position in join" : 1 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ { "field name" : "outerJoinVal1", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "#uid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } } @@ -232,11 +234,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, @@ -266,4 +268,4 @@ compiled-query-plan } } } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq05.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq05.r index b7b80b90..d85e4b03 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq05.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/lq05.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/lq05.q", "plan" : @@ -83,7 +84,8 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx2_msgs_receivers", - "covering index" : false, + "covering index" : true, + "index row variable" : "$$msgs1_idx", "index scans" : [ { "equality conditions" : {"content.receivers[]":""}, @@ -101,18 +103,18 @@ compiled-query-plan ], "position in join" : 1 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ { "field name" : "outerJoinVal1", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "#uid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } } @@ -232,11 +234,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, @@ -266,4 +268,4 @@ compiled-query-plan } } } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq01.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq01.r index bb0a5309..027b530e 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq01.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/oq01.q", "plan" : @@ -81,7 +82,8 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx2_msgs_receivers", - "covering index" : false, + "covering index" : true, + "index row variable" : "$$msgs1_idx", "index scans" : [ { "equality conditions" : {"content.receivers[]":""}, @@ -99,18 +101,18 @@ compiled-query-plan ], "position in join" : 1 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ { "field name" : "outerJoinVal1", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "#uid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } } @@ -207,11 +209,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, @@ -300,4 +302,4 @@ compiled-query-plan } ] } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq02.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq02.r index d175b171..4a289715 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq02.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/oq02.q", "plan" : @@ -81,7 +82,8 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx2_msgs_receivers", - "covering index" : false, + "covering index" : true, + "index row variable" : "$$msgs1_idx", "index scans" : [ { "equality conditions" : {"content.receivers[]":""}, @@ -99,18 +101,18 @@ compiled-query-plan ], "position in join" : 1 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ { "field name" : "outerJoinVal1", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "#uid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } } @@ -207,11 +209,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, @@ -232,4 +234,4 @@ compiled-query-plan } } } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq03.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq03.r index 25374514..f7786116 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq03.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq03.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/oq03.q", "plan" : @@ -84,7 +85,8 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx2_msgs_receivers", - "covering index" : false, + "covering index" : true, + "index row variable" : "$$msgs1_idx", "index scans" : [ { "equality conditions" : {"content.receivers[]":""}, @@ -102,18 +104,18 @@ compiled-query-plan ], "position in join" : 1 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ { "field name" : "outerJoinVal1", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "#uid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } } @@ -192,11 +194,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, @@ -278,4 +280,4 @@ compiled-query-plan } ] } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq04.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq04.r index 2d3d5f45..450c05d2 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq04.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq04.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/oq04.q", "plan" : @@ -151,7 +152,7 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx2_msgs_receivers", - "covering index" : false, + "covering index" : true, "index row variable" : "$$msgs1_idx", "index scans" : [ { @@ -189,7 +190,7 @@ compiled-query-plan }, "position in join" : 2 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ ] @@ -235,11 +236,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, @@ -329,4 +330,4 @@ compiled-query-plan } ] } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq05.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq05.r index c75dacf1..9da2722d 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq05.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq05.r @@ -82,7 +82,8 @@ compiled-query-plan "target table" : "profile.messages", "row variable" : "$$msgs1", "index used" : "idx1_msgs_sender", - "covering index" : false, + "covering index" : true, + "index row variable" : "$$msgs1_idx", "index scans" : [ { "equality conditions" : {}, @@ -100,18 +101,18 @@ compiled-query-plan ], "position in join" : 1 }, - "FROM variable" : "$$msgs1", + "FROM variable" : "$$msgs1_idx", "SELECT expressions" : [ { "field name" : "outerJoinVal1", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "#uid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } } @@ -195,16 +196,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "sender", + "field name" : "content.sender", "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "content", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" - } + "iterator kind" : "VAR_REF", + "variable" : "$$msgs1_idx" } } }, @@ -226,11 +222,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "msgid", + "field name" : "#msgid", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "variable" : "$$msgs1_idx" } } }, diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq06.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq06.r index bc88c32f..a963580c 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq06.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/oq06.r @@ -1,257 +1,185 @@ compiled-query-plan + { "query file" : "inner_joins/q/oq06.q", "plan" : { - "iterator kind" : "SELECT", - "FROM" : + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "order by fields at positions" : [ 0, 1, 2, 1 ], + "input iterator" : { - "iterator kind" : "RECEIVE", - "distribution kind" : "ALL_SHARDS", - "order by fields at positions" : [ 0, 5, 2, 1 ], - "input iterator" : + "iterator kind" : "SELECT", + "FROM" : { - "iterator kind" : "SELECT", - "FROM" : - { - "iterator kind" : "NESTED_LOOP_JOIN", - "join predicates" : [ - { "outerBranch" :1, "outerExpr" : 0, "innerVar" : 0 }, - { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 1 }, - { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 2 } - ], - "branches" : [ + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :1, "outerExpr" : 0, "innerVar" : 0 }, + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 1 }, + { "outerBranch" :0, "outerExpr" : 1, "innerVar" : 2 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : { - "iterator kind" : "SELECT", - "FROM" : - { - "iterator kind" : "TABLE", - "target table" : "profile.messages", - "row variable" : "$$msgs2", - "index used" : "idx3_msgs_size", - "covering index" : false, - "index scans" : [ - { - "equality conditions" : {}, - "range conditions" : { "content.size" : { "start value" : 30, "start inclusive" : true } } - } - ], - "position in join" : 0 - }, - "FROM variable" : "$$msgs2", - "SELECT expressions" : [ + "iterator kind" : "TABLE", + "target table" : "profile.messages", + "row variable" : "$$msgs2", + "index used" : "idx3_msgs_size", + "covering index" : false, + "index scans" : [ { - "field name" : "outerJoinVal1", - "field expression" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "uid", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$msgs2" - } - } - }, - { - "field name" : "outerJoinVal2", - "field expression" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "sender", - "input iterator" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "content", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$msgs2" - } - } - } + "equality conditions" : {}, + "range conditions" : { "content.size" : { "start value" : 30, "start inclusive" : true } } } - ] + ], + "position in join" : 0 }, - { - "iterator kind" : "SELECT", - "FROM" : + "FROM variable" : "$$msgs2", + "SELECT expressions" : [ { - "iterator kind" : "TABLE", - "target table" : "profile.messages", - "row variable" : "$$msgs1", - "index used" : "idx1_msgs_sender", - "covering index" : false, - "index scans" : [ - { - "equality conditions" : {}, - "range conditions" : { "content.sender" : { "start value" : "", "start inclusive" : true } } - } - ], - "key bind expressions" : [ - { - "iterator kind" : "EXTERNAL_VAR_REF", - "variable" : "$innerJoinVar2" - } - ], - "map of key bind expressions" : [ - [ 0, -1 ] - ], - "position in join" : 1 - }, - "FROM variable" : "$$msgs1", - "SELECT expressions" : [ + "field name" : "outerJoinVal1", + "field expression" : { - "field name" : "outerJoinVal1", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "uid", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "uid", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" - } + "iterator kind" : "VAR_REF", + "variable" : "$$msgs2" } } - ] - }, - { - "iterator kind" : "SELECT", - "FROM" : + }, { - "iterator kind" : "TABLE", - "target table" : "profile", - "row variable" : "$$p", - "index used" : "primary index", - "covering index" : false, - "index scans" : [ - { - "equality conditions" : {"uid":0}, - "range conditions" : {} - } - ], - "key bind expressions" : [ - { - "iterator kind" : "EXTERNAL_VAR_REF", - "variable" : "$innerJoinVar1" - } - ], - "map of key bind expressions" : [ - [ 0 ] - ], - "index filtering predicate" : + "field name" : "outerJoinVal2", + "field expression" : { - "iterator kind" : "EQUAL", - "left operand" : - { - "iterator kind" : "EXTERNAL_VAR_REF", - "variable" : "$innerJoinVar0" - }, - "right operand" : + "iterator kind" : "FIELD_STEP", + "field name" : "sender", + "input iterator" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "content", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$p" + "variable" : "$$msgs2" } } - }, - "position in join" : 2 - }, - "FROM variable" : "$$p", - "SELECT expressions" : [ - - ] - } - ] - - }, - "FROM variable" : "$join-0", - "SELECT expressions" : [ - { - "field name" : "size", - "field expression" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "size", - "input iterator" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "content", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$msgs2" } } - } + ] }, { - "field name" : "sender", - "field expression" : + "iterator kind" : "SELECT", + "FROM" : { - "iterator kind" : "FIELD_STEP", - "field name" : "sender", - "input iterator" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "content", - "input iterator" : + "iterator kind" : "TABLE", + "target table" : "profile.messages", + "row variable" : "$$msgs1", + "index used" : "idx1_msgs_sender", + "covering index" : true, + "index row variable" : "$$msgs1_idx", + "index scans" : [ { - "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "equality conditions" : {}, + "range conditions" : { "content.sender" : { "start value" : "", "start inclusive" : true } } } - } - } - }, - { - "field name" : "msg2", - "field expression" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "msgid", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$msgs2" - } - } - }, - { - "field name" : "msg1", - "field expression" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "msgid", - "input iterator" : + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar2" + } + ], + "map of key bind expressions" : [ + [ 0, -1 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$msgs1_idx", + "SELECT expressions" : [ { - "iterator kind" : "VAR_REF", - "variable" : "$$msgs1" + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#uid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$msgs1_idx" + } + } } - } + ] }, { - "field name" : "userName", - "field expression" : + "iterator kind" : "SELECT", + "FROM" : { - "iterator kind" : "FIELD_STEP", - "field name" : "userName", - "input iterator" : + "iterator kind" : "TABLE", + "target table" : "profile", + "row variable" : "$$p", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"uid":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar1" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "index filtering predicate" : { - "iterator kind" : "VAR_REF", - "variable" : "$$p" - } - } - }, + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "uid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } + } + }, + "position in join" : 2 + }, + "FROM variable" : "$$p", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "size", + "field expression" : { - "field name" : "sort_gen", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "size", + "input iterator" : { "iterator kind" : "FIELD_STEP", - "field name" : "uid", + "field name" : "content", "input iterator" : { "iterator kind" : "VAR_REF", @@ -259,76 +187,60 @@ compiled-query-plan } } } - ] - } - }, - "FROM variable" : "$from-1", - "SELECT expressions" : [ - { - "field name" : "size", - "field expression" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "size", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$from-1" - } - } - }, - { - "field name" : "sender", - "field expression" : + }, { - "iterator kind" : "FIELD_STEP", "field name" : "sender", - "input iterator" : + "field expression" : { - "iterator kind" : "VAR_REF", - "variable" : "$from-1" + "iterator kind" : "FIELD_STEP", + "field name" : "content.sender", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$msgs1_idx" + } } - } - }, - { - "field name" : "msg2", - "field expression" : + }, { - "iterator kind" : "FIELD_STEP", "field name" : "msg2", - "input iterator" : + "field expression" : { - "iterator kind" : "VAR_REF", - "variable" : "$from-1" + "iterator kind" : "FIELD_STEP", + "field name" : "msgid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$msgs2" + } } - } - }, - { - "field name" : "msg1", - "field expression" : + }, { - "iterator kind" : "FIELD_STEP", "field name" : "msg1", - "input iterator" : + "field expression" : { - "iterator kind" : "VAR_REF", - "variable" : "$from-1" + "iterator kind" : "FIELD_STEP", + "field name" : "#msgid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$msgs1_idx" + } } - } - }, - { - "field name" : "userName", - "field expression" : + }, { - "iterator kind" : "FIELD_STEP", "field name" : "userName", - "input iterator" : + "field expression" : { - "iterator kind" : "VAR_REF", - "variable" : "$from-1" + "iterator kind" : "FIELD_STEP", + "field name" : "userName", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$p" + } } } - } - ] -} + ] + } } +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/q25.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/q25.r index 904b398a..d0bd667d 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/q25.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/q25.r @@ -62,26 +62,37 @@ compiled-query-plan "iterator kind" : "TABLE", "target table" : "A.B", "row variable" : "$$b", - "index used" : "primary index", - "covering index" : false, + "index used" : "idxS", + "covering index" : true, + "index row variable" : "$$b_idx", "index scans" : [ { - "equality conditions" : {"sid":""}, + "equality conditions" : {}, "range conditions" : {} } ], - "key bind expressions" : [ + "index filtering predicate" : + { + "iterator kind" : "EQUAL", + "left operand" : { "iterator kind" : "EXTERNAL_VAR_REF", "variable" : "$innerJoinVar0" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#sid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } } - ], - "map of key bind expressions" : [ - [ 0 ] - ], + }, "position in join" : 1 }, - "FROM variable" : "$$b", + "FROM variable" : "$$b_idx", "SELECT expressions" : [ ] @@ -137,8 +148,52 @@ compiled-query-plan "field name" : "b", "field expression" : { - "iterator kind" : "VAR_REF", - "variable" : "$$b" + "iterator kind" : "RECORD_CONSTRUCTOR", + "type" : { "Record" : { + "sid" : "String", + "id" : "Integer", + "bid" : "Integer", + "s" : "String" + } + }, + "input iterators" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "#sid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "#id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "#bid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "s", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + } + ] } }, { diff --git a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/q26.r b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/q26.r index 0505c3e2..9fd7a36f 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/q26.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/inner_joins/explans/q26.r @@ -1,4 +1,5 @@ compiled-query-plan + { "query file" : "inner_joins/q/q26.q", "plan" : @@ -64,26 +65,37 @@ compiled-query-plan "iterator kind" : "TABLE", "target table" : "A.B", "row variable" : "$$b", - "index used" : "primary index", - "covering index" : false, + "index used" : "idxS", + "covering index" : true, + "index row variable" : "$$b_idx", "index scans" : [ { - "equality conditions" : {"sid":""}, + "equality conditions" : {}, "range conditions" : {} } ], - "key bind expressions" : [ + "index filtering predicate" : + { + "iterator kind" : "EQUAL", + "left operand" : { "iterator kind" : "EXTERNAL_VAR_REF", "variable" : "$innerJoinVar0" + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#sid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } } - ], - "map of key bind expressions" : [ - [ 0 ] - ], + }, "position in join" : 1 }, - "FROM variable" : "$$b", + "FROM variable" : "$$b_idx", "SELECT expressions" : [ ] @@ -139,8 +151,52 @@ compiled-query-plan "field name" : "b", "field expression" : { - "iterator kind" : "VAR_REF", - "variable" : "$$b" + "iterator kind" : "RECORD_CONSTRUCTOR", + "type" : { "Record" : { + "sid" : "String", + "id" : "Integer", + "bid" : "Integer", + "s" : "String" + } + }, + "input iterators" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "#sid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "#id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "#bid", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + }, + { + "iterator kind" : "FIELD_STEP", + "field name" : "s", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + } + ] } }, { @@ -190,4 +246,4 @@ compiled-query-plan } ] } -} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/insert/explans/ins03r.r b/kvtest/kvquery-IT/src/main/resources/cases/insert/explans/ins03r.r index c0ff1742..35962718 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/insert/explans/ins03r.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/insert/explans/ins03r.r @@ -131,7 +131,7 @@ compiled-query-plan "right operand" : { "iterator kind" : "CONST", - "value" : 145 + "value" : 150 } } ] @@ -139,4 +139,4 @@ compiled-query-plan } ] } -} \ No newline at end of file +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/insert/q/ins03r.q b/kvtest/kvquery-IT/src/main/resources/cases/insert/q/ins03r.q index 25754ace..c1679047 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/insert/q/ins03r.q +++ b/kvtest/kvquery-IT/src/main/resources/cases/insert/q/ins03r.q @@ -13,4 +13,4 @@ insert into foo $f values ( ) returning id1, id2, 120 <= remaining_hours($f) and remaining_hours($f) < 144, - 135 <= row_storage_size($f) and row_storage_size($f) <= 145 as row_size + 135 <= row_storage_size($f) and row_storage_size($f) <= 150 as row_size diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/before.ddl b/kvtest/kvquery-IT/src/main/resources/cases/joins/before.ddl index 9340edd4..28c3adb5 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/joins/before.ddl +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/before.ddl @@ -60,6 +60,8 @@ create index a_idx_c1_a2 on A(c1, a2) create index a_idx_a1_a2_c1 on A(a1, a2, c1) +create index b_idx_b1 on A.B(b1) + create index d_idx_d2 on A.B.C.D(d2) create index d_idx_d2_idb_c3 on A.B.C.D(d2, idb, c3) diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner01.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner01.r new file mode 100644 index 00000000..568a51d8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner01.r @@ -0,0 +1,131 @@ +compiled-query-plan +{ +"query file" : "joins/q/inner01.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0, 2 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$a", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A.B", + "row variable" : "$$b", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"ida":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$b", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner02.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner02.r new file mode 100644 index 00000000..4a41da41 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner02.r @@ -0,0 +1,131 @@ +compiled-query-plan +{ +"query file" : "joins/q/inner02.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0, 2 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$a", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A.B", + "row variable" : "$$b", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"ida":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$b", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner03.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner03.r new file mode 100644 index 00000000..941ab280 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner03.r @@ -0,0 +1,135 @@ +compiled-query-plan +{ +"query file" : "joins/q/inner03.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 2 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A.B", + "row variable" : "$$b", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$b", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"ida":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$a", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner04.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner04.r new file mode 100644 index 00000000..74d6607e --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/inner04.r @@ -0,0 +1,131 @@ +compiled-query-plan +{ +"query file" : "joins/q/inner04.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 1, 2 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "NESTED_LOOP_JOIN", + "join predicates" : [ + { "outerBranch" :0, "outerExpr" : 0, "innerVar" : 0 } + ], + "branches" : [ + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A.B", + "row variable" : "$$b", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$$b", + "SELECT expressions" : [ + { + "field name" : "outerJoinVal1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + ] + }, + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"ida":0}, + "range conditions" : {} + } + ], + "key bind expressions" : [ + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$innerJoinVar0" + } + ], + "map of key bind expressions" : [ + [ 0 ] + ], + "position in join" : 1 + }, + "FROM variable" : "$$a", + "SELECT expressions" : [ + + ] + } + ] + + }, + "FROM variable" : "$join-0", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lina17.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lina17.r new file mode 100644 index 00000000..840bcd4b --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lina17.r @@ -0,0 +1,108 @@ +compiled-query-plan +{ +"query file" : "joins/q/lina17.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A.B", + "row variable" : "$$b", + "index used" : "b_idx_b1", + "covering index" : true, + "index row variable" : "$$b_idx", + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "ancestor tables" : [ + { "table" : "A", "row variable" : "$$a", "covering primary index" : false } ], + "index filtering predicate" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 10 + } + }, + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b_idx"], + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "b1", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + }, + "right operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "a1", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b_idx" + } + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind24.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind24.r index 0f4ecf6a..1bf007e4 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind24.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind24.r @@ -19,7 +19,7 @@ compiled-query-plan "target table" : "A", "row variable" : "$$a", "index used" : "primary index", - "covering index" : false, + "covering index" : true, "index scans" : [ { "equality conditions" : {"ida":40}, diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind25.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind25.r new file mode 100644 index 00000000..f3d34844 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind25.r @@ -0,0 +1,99 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind25.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b"], + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + ], + "LIMIT" : + { + "iterator kind" : "CONST", + "value" : 10 + } + } + }, + "FROM variable" : "$from-0", + "SELECT expressions" : [ + { + "field name" : "$from-0", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-0" + } + } + ], + "LIMIT" : + { + "iterator kind" : "CONST", + "value" : 10 + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind26.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind26.r new file mode 100644 index 00000000..29471083 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind26.r @@ -0,0 +1,99 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind26.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b"], + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + ], + "LIMIT" : + { + "iterator kind" : "CONST", + "value" : 10 + } + } + }, + "FROM variable" : "$from-0", + "SELECT expressions" : [ + { + "field name" : "$from-0", + "field expression" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-0" + } + } + ], + "LIMIT" : + { + "iterator kind" : "CONST", + "value" : 10 + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind27.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind27.r new file mode 100644 index 00000000..4fbb2c89 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind27.r @@ -0,0 +1,121 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind27.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true }, + { "table" : "A.B.C", "row variable" : "$$c", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b", "$$c"], + "GROUP BY" : "Grouping by the first 2 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_COUNT_STAR" + } + } + ] + } + }, + "FROM variable" : "$from-1", + "GROUP BY" : "Grouping by the first 2 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "cnt", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind28.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind28.r new file mode 100644 index 00000000..c76a1249 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind28.r @@ -0,0 +1,186 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind28.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true }, + { "table" : "A.B.C", "row variable" : "$$c", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b", "$$c"], + "GROUP BY" : "Grouping by the first 2 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FN_COUNT", + "input iterator" : + { + "iterator kind" : "CASE", + "clauses" : [ + { + "when iterator" : + { + "iterator kind" : "OP_IS_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + "then iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "when iterator" : + { + "iterator kind" : "OP_IS_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idc", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + "then iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idc", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "else iterator" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + } + } + ] + } + }, + "FROM variable" : "$from-1", + "GROUP BY" : "Grouping by the first 2 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "cnt", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind29.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind29.r new file mode 100644 index 00000000..82cf6744 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind29.r @@ -0,0 +1,213 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind29.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "a_idx_a2", + "covering index" : true, + "index row variable" : "$$a_idx", + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true }, + { "table" : "A.B.C", "row variable" : "$$c", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a_idx", "$$b", "$$c"], + "GROUP BY" : "Grouping by the first 3 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "a2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "a2", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a_idx" + } + } + }, + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FN_COUNT", + "input iterator" : + { + "iterator kind" : "CASE", + "clauses" : [ + { + "when iterator" : + { + "iterator kind" : "OP_IS_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + "then iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "when iterator" : + { + "iterator kind" : "OP_IS_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idc", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + "then iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idc", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "else iterator" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + } + } + ] + } + }, + "FROM variable" : "$from-1", + "GROUP BY" : "Grouping by the first 3 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "a2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "a2", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "cnt", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind30.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind30.r new file mode 100644 index 00000000..1ef921d2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind30.r @@ -0,0 +1,213 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind30.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "order by fields at positions" : [ 0, 1, 2 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "a_idx_a2", + "covering index" : true, + "index row variable" : "$$a_idx", + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true }, + { "table" : "A.B.C", "row variable" : "$$c", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a_idx", "$$b", "$$c"], + "GROUP BY" : "Grouping by the first 3 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "a2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "a2", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a_idx" + } + } + }, + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a_idx" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FN_COUNT", + "input iterator" : + { + "iterator kind" : "CASE", + "clauses" : [ + { + "when iterator" : + { + "iterator kind" : "OP_IS_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + "then iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "when iterator" : + { + "iterator kind" : "OP_IS_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idc", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + "then iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idc", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + }, + { + "else iterator" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + } + } + } + ] + } + }, + "FROM variable" : "$from-1", + "GROUP BY" : "Grouping by the first 3 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "a2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "a2", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "cnt", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind31.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind31.r new file mode 100644 index 00000000..f59cecf6 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind31.r @@ -0,0 +1,73 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind31.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "SINGLE_PARTITION", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {"ida":40}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b"], + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind32.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind32.r new file mode 100644 index 00000000..3659a63a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind32.r @@ -0,0 +1,94 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind32.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true }, + { "table" : "A.B.C", "row variable" : "$$c", "covering primary index" : true } + ], + "ON Predicate for table A.B" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 0 + } + }, + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b", "$$c"], + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idc", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idc", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$c" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind33.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind33.r new file mode 100644 index 00000000..dc4a60a6 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind33.r @@ -0,0 +1,123 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind33.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b"], + "WHERE" : + { + "iterator kind" : "OP_IS_NOT_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + "GROUP BY" : "Grouping by the first expression in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "count", + "field expression" : + { + "iterator kind" : "FN_COUNT", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + } + ] + } + }, + "FROM variable" : "$from-1", + "GROUP BY" : "Grouping by the first expression in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "count", + "field expression" : + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "count", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind34.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind34.r new file mode 100644 index 00000000..76b68578 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind34.r @@ -0,0 +1,117 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind34.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A.B", + "row variable" : "$$b", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "ancestor tables" : [ + { "table" : "A", "row variable" : "$$a", "covering primary index" : true } ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b"], + "WHERE" : + { + "iterator kind" : "OP_IS_NOT_NULL", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + "GROUP BY" : "Grouping by the first expression in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "count", + "field expression" : + { + "iterator kind" : "FN_COUNT", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + } + } + ] + } + }, + "FROM variable" : "$from-1", + "GROUP BY" : "Grouping by the first expression in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "count", + "field expression" : + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "count", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind35.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind35.r new file mode 100644 index 00000000..1672e4a0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind35.r @@ -0,0 +1,121 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind35.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "a_idx_c1", + "covering index" : true, + "index row variable" : "$$a_idx", + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a_idx", "$$b"], + "GROUP BY" : "Grouping by the first 2 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "c1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "c1", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a_idx" + } + } + }, + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a_idx" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_COUNT_STAR" + } + } + ] + } + }, + "FROM variable" : "$from-1", + "GROUP BY" : "Grouping by the first 2 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "c1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "c1", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "cnt", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind36.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind36.r new file mode 100644 index 00000000..ee7b5eb8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/lind36.r @@ -0,0 +1,121 @@ +compiled-query-plan +{ +"query file" : "joins/q/lind36.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "order by fields at positions" : [ 0, 1 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "a_idx_c1", + "covering index" : true, + "index row variable" : "$$a_idx", + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a_idx", "$$b"], + "GROUP BY" : "Grouping by the first 2 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "c1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "c1", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a_idx" + } + } + }, + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a_idx" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_COUNT_STAR" + } + } + ] + } + }, + "FROM variable" : "$from-1", + "GROUP BY" : "Grouping by the first 2 expressions in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "c1", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "c1", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "cnt", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treead01.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treead01.r index 36877985..50807e9a 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treead01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treead01.r @@ -4,212 +4,208 @@ compiled-query-plan "query file" : "joins/q/treead01.q", "plan" : { - "iterator kind" : "SORT", - "order by fields at positions" : [ 0, 1, 5, 6, 9 ], + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0 ], "input iterator" : { - "iterator kind" : "RECEIVE", - "distribution kind" : "ALL_PARTITIONS", - "input iterator" : + "iterator kind" : "SELECT", + "FROM" : { - "iterator kind" : "SELECT", - "FROM" : + "iterator kind" : "TABLE", + "target table" : "A.B", + "row variable" : "$$b", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "ancestor tables" : [ + { "table" : "A", "row variable" : "$$a", "covering primary index" : true } ], + "descendant tables" : [ + { "table" : "A.B.C.D", "row variable" : "$$d", "covering primary index" : true }, + { "table" : "A.B.E", "row variable" : "$$e", "covering primary index" : true } + ], + "ON Predicate for table A.B.C.D" : { - "iterator kind" : "TABLE", - "target table" : "A.B", - "row variable" : "$$b", - "index used" : "primary index", - "covering index" : false, - "index scans" : [ + "iterator kind" : "AND", + "input iterators" : [ { - "equality conditions" : {}, - "range conditions" : {} - } - ], - "ancestor tables" : [ - { "table" : "A", "row variable" : "$$a", "covering primary index" : true } ], - "descendant tables" : [ - { "table" : "A.B.C.D", "row variable" : "$$d", "covering primary index" : true }, - { "table" : "A.B.E", "row variable" : "$$e", "covering primary index" : true } - ], - "ON Predicate for table A.B.C.D" : - { - "iterator kind" : "AND", - "input iterators" : [ + "iterator kind" : "NOT_EQUAL", + "left operand" : { - "iterator kind" : "NOT_EQUAL", - "left operand" : + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "ida", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$a" - } - }, - "right operand" : - { - "iterator kind" : "CONST", - "value" : 15 + "iterator kind" : "VAR_REF", + "variable" : "$$a" } }, + "right operand" : { - "iterator kind" : "LESS_OR_EQUAL", - "left operand" : - { - "iterator kind" : "FIELD_STEP", - "field name" : "idd", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$d" - } - }, - "right operand" : + "iterator kind" : "CONST", + "value" : 15 + } + }, + { + "iterator kind" : "LESS_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idd", + "input iterator" : { - "iterator kind" : "EXTERNAL_VAR_REF", - "variable" : "$ext1" + "iterator kind" : "VAR_REF", + "variable" : "$$d" } + }, + "right operand" : + { + "iterator kind" : "EXTERNAL_VAR_REF", + "variable" : "$ext1" } - ] - }, - "position in join" : 0 + } + ] }, - "FROM variables" : ["$$a", "$$b", "$$d", "$$e"], - "SELECT expressions" : [ + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b", "$$d", "$$e"], + "SELECT expressions" : [ + { + "field name" : "b_ida", + "field expression" : { - "field name" : "b_ida", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "ida", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$b" - } + "iterator kind" : "VAR_REF", + "variable" : "$$b" } - }, + } + }, + { + "field name" : "b_idb", + "field expression" : { - "field name" : "b_idb", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "idb", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$b" - } + "iterator kind" : "VAR_REF", + "variable" : "$$b" } - }, + } + }, + { + "field name" : "a_ida", + "field expression" : { - "field name" : "a_ida", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "ida", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$a" - } + "iterator kind" : "VAR_REF", + "variable" : "$$a" } - }, + } + }, + { + "field name" : "d_ida", + "field expression" : { - "field name" : "d_ida", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "ida", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$d" - } + "iterator kind" : "VAR_REF", + "variable" : "$$d" } - }, + } + }, + { + "field name" : "d_idb", + "field expression" : { - "field name" : "d_idb", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "idb", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$d" - } + "iterator kind" : "VAR_REF", + "variable" : "$$d" } - }, + } + }, + { + "field name" : "d_idc", + "field expression" : { - "field name" : "d_idc", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "idc", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "idc", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$d" - } + "iterator kind" : "VAR_REF", + "variable" : "$$d" } - }, + } + }, + { + "field name" : "d_idd", + "field expression" : { - "field name" : "d_idd", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "idd", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "idd", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$d" - } + "iterator kind" : "VAR_REF", + "variable" : "$$d" } - }, + } + }, + { + "field name" : "e_ida", + "field expression" : { - "field name" : "e_ida", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "ida", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$e" - } + "iterator kind" : "VAR_REF", + "variable" : "$$e" } - }, + } + }, + { + "field name" : "e_idb", + "field expression" : { - "field name" : "e_idb", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "idb", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$e" - } + "iterator kind" : "VAR_REF", + "variable" : "$$e" } - }, + } + }, + { + "field name" : "e_ide", + "field expression" : { - "field name" : "e_ide", - "field expression" : + "iterator kind" : "FIELD_STEP", + "field name" : "ide", + "input iterator" : { - "iterator kind" : "FIELD_STEP", - "field name" : "ide", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$e" - } + "iterator kind" : "VAR_REF", + "variable" : "$$e" } } - ] - } + } + ] } } } \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed03.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed03.r index 9025d408..f142f55b 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed03.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed03.r @@ -19,7 +19,7 @@ compiled-query-plan "target table" : "A", "row variable" : "$$a", "index used" : "primary index", - "covering index" : false, + "covering index" : true, "index scans" : [ { "equality conditions" : {}, diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed11.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed11.r new file mode 100644 index 00000000..bca0ee1a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed11.r @@ -0,0 +1,88 @@ +compiled-query-plan +{ +"query file" : "joins/q/treed11.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true }, + { "table" : "A.G", "row variable" : "$$g", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b", "$$g"], + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idg", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idg", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$g" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed12.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed12.r new file mode 100644 index 00000000..ceeaadd2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed12.r @@ -0,0 +1,92 @@ +compiled-query-plan +{ +"query file" : "joins/q/treed12.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0, 3 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true }, + { "table" : "A.G", "row variable" : "$$g", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b", "$$g"], + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idg", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idg", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$g" + } + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed13.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed13.r new file mode 100644 index 00000000..d68d8090 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/explans/treed13.r @@ -0,0 +1,88 @@ +compiled-query-plan +{ +"query file" : "joins/q/treed13.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "A", + "row variable" : "$$a", + "index used" : "primary index", + "covering index" : true, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "descendant tables" : [ + { "table" : "A.B", "row variable" : "$$b", "covering primary index" : true }, + { "table" : "A.G", "row variable" : "$$g", "covering primary index" : true } + ], + "position in join" : 0 + }, + "FROM variables" : ["$$a", "$$b", "$$g"], + "SELECT expressions" : [ + { + "field name" : "ida", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$a" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "ida", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idb", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idb", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$b" + } + } + }, + { + "field name" : "idg", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "idg", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$$g" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner01.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner01.r new file mode 100644 index 00000000..405e008c --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner01.r @@ -0,0 +1,12 @@ +ordered-result +{"ida":0,"Column_2":0,"idb":0} +{"ida":0,"Column_2":0,"idb":5} +{"ida":0,"Column_2":0,"idb":10} +{"ida":0,"Column_2":0,"idb":15} +{"ida":10,"Column_2":10,"idb":4} +{"ida":40,"Column_2":40,"idb":0} +{"ida":40,"Column_2":40,"idb":5} +{"ida":40,"Column_2":40,"idb":6} +{"ida":40,"Column_2":40,"idb":7} +{"ida":40,"Column_2":40,"idb":8} +{"ida":40,"Column_2":40,"idb":9} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner02.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner02.r new file mode 100644 index 00000000..56a2c724 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner02.r @@ -0,0 +1,12 @@ +unordered-result +{"ida":40,"Column_2":40,"idb":9} +{"ida":40,"Column_2":40,"idb":8} +{"ida":40,"Column_2":40,"idb":7} +{"ida":40,"Column_2":40,"idb":6} +{"ida":40,"Column_2":40,"idb":5} +{"ida":40,"Column_2":40,"idb":0} +{"ida":10,"Column_2":10,"idb":4} +{"ida":0,"Column_2":0,"idb":15} +{"ida":0,"Column_2":0,"idb":10} +{"ida":0,"Column_2":0,"idb":5} +{"ida":0,"Column_2":0,"idb":0} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner03.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner03.r new file mode 100644 index 00000000..56a2c724 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner03.r @@ -0,0 +1,12 @@ +unordered-result +{"ida":40,"Column_2":40,"idb":9} +{"ida":40,"Column_2":40,"idb":8} +{"ida":40,"Column_2":40,"idb":7} +{"ida":40,"Column_2":40,"idb":6} +{"ida":40,"Column_2":40,"idb":5} +{"ida":40,"Column_2":40,"idb":0} +{"ida":10,"Column_2":10,"idb":4} +{"ida":0,"Column_2":0,"idb":15} +{"ida":0,"Column_2":0,"idb":10} +{"ida":0,"Column_2":0,"idb":5} +{"ida":0,"Column_2":0,"idb":0} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner04.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner04.r new file mode 100644 index 00000000..56a2c724 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/inner04.r @@ -0,0 +1,12 @@ +unordered-result +{"ida":40,"Column_2":40,"idb":9} +{"ida":40,"Column_2":40,"idb":8} +{"ida":40,"Column_2":40,"idb":7} +{"ida":40,"Column_2":40,"idb":6} +{"ida":40,"Column_2":40,"idb":5} +{"ida":40,"Column_2":40,"idb":0} +{"ida":10,"Column_2":10,"idb":4} +{"ida":0,"Column_2":0,"idb":15} +{"ida":0,"Column_2":0,"idb":10} +{"ida":0,"Column_2":0,"idb":5} +{"ida":0,"Column_2":0,"idb":0} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lina17.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lina17.r new file mode 100644 index 00000000..cff754bb --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lina17.r @@ -0,0 +1 @@ +unordered-result diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind25.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind25.r new file mode 100644 index 00000000..877c1238 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind25.r @@ -0,0 +1,11 @@ +ordered-result +{"ida":0,"Column_2":0,"idb":0} +{"ida":0,"Column_2":0,"idb":5} +{"ida":0,"Column_2":0,"idb":10} +{"ida":0,"Column_2":0,"idb":15} +{"ida":5,"Column_2":null,"idb":null} +{"ida":10,"Column_2":10,"idb":4} +{"ida":15,"Column_2":null,"idb":null} +{"ida":20,"Column_2":null,"idb":null} +{"ida":25,"Column_2":null,"idb":null} +{"ida":30,"Column_2":null,"idb":null} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind26.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind26.r new file mode 100644 index 00000000..877c1238 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind26.r @@ -0,0 +1,11 @@ +ordered-result +{"ida":0,"Column_2":0,"idb":0} +{"ida":0,"Column_2":0,"idb":5} +{"ida":0,"Column_2":0,"idb":10} +{"ida":0,"Column_2":0,"idb":15} +{"ida":5,"Column_2":null,"idb":null} +{"ida":10,"Column_2":10,"idb":4} +{"ida":15,"Column_2":null,"idb":null} +{"ida":20,"Column_2":null,"idb":null} +{"ida":25,"Column_2":null,"idb":null} +{"ida":30,"Column_2":null,"idb":null} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind27.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind27.r new file mode 100644 index 00000000..6f300f1a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind27.r @@ -0,0 +1,29 @@ +unordered-result +{"ida":0,"idb":0,"cnt":2} +{"ida":0,"idb":5,"cnt":4} +{"ida":0,"idb":10,"cnt":1} +{"ida":0,"idb":15,"cnt":1} +{"ida":5,"idb":null,"cnt":1} +{"ida":10,"idb":4,"cnt":1} +{"ida":15,"idb":null,"cnt":1} +{"ida":20,"idb":null,"cnt":1} +{"ida":25,"idb":null,"cnt":1} +{"ida":30,"idb":null,"cnt":1} +{"ida":35,"idb":null,"cnt":1} +{"ida":40,"idb":0,"cnt":1} +{"ida":40,"idb":5,"cnt":1} +{"ida":40,"idb":6,"cnt":1} +{"ida":40,"idb":7,"cnt":1} +{"ida":40,"idb":8,"cnt":1} +{"ida":40,"idb":9,"cnt":2} +{"ida":45,"idb":null,"cnt":1} +{"ida":50,"idb":null,"cnt":1} +{"ida":55,"idb":null,"cnt":1} +{"ida":60,"idb":null,"cnt":1} +{"ida":65,"idb":null,"cnt":1} +{"ida":70,"idb":null,"cnt":1} +{"ida":75,"idb":null,"cnt":1} +{"ida":80,"idb":null,"cnt":1} +{"ida":85,"idb":null,"cnt":1} +{"ida":90,"idb":null,"cnt":1} +{"ida":95,"idb":null,"cnt":1} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind28.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind28.r new file mode 100644 index 00000000..4e08ade4 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind28.r @@ -0,0 +1,29 @@ +unordered-result +{"ida":0,"idb":0,"cnt":2} +{"ida":0,"idb":5,"cnt":4} +{"ida":0,"idb":10,"cnt":0} +{"ida":0,"idb":15,"cnt":1} +{"ida":5,"idb":null,"cnt":0} +{"ida":10,"idb":4,"cnt":0} +{"ida":15,"idb":null,"cnt":0} +{"ida":20,"idb":null,"cnt":0} +{"ida":25,"idb":null,"cnt":0} +{"ida":30,"idb":null,"cnt":0} +{"ida":35,"idb":null,"cnt":0} +{"ida":40,"idb":0,"cnt":1} +{"ida":40,"idb":5,"cnt":0} +{"ida":40,"idb":6,"cnt":0} +{"ida":40,"idb":7,"cnt":0} +{"ida":40,"idb":8,"cnt":0} +{"ida":40,"idb":9,"cnt":2} +{"ida":45,"idb":null,"cnt":0} +{"ida":50,"idb":null,"cnt":0} +{"ida":55,"idb":null,"cnt":0} +{"ida":60,"idb":null,"cnt":0} +{"ida":65,"idb":null,"cnt":0} +{"ida":70,"idb":null,"cnt":0} +{"ida":75,"idb":null,"cnt":0} +{"ida":80,"idb":null,"cnt":0} +{"ida":85,"idb":null,"cnt":0} +{"ida":90,"idb":null,"cnt":0} +{"ida":95,"idb":null,"cnt":0} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind29.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind29.r new file mode 100644 index 00000000..e6174ede --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind29.r @@ -0,0 +1,23 @@ +unordered-result +{"a2":-7,"ida":null,"idb":null,"cnt":0} +{"a2":-5,"ida":null,"idb":null,"cnt":0} +{"a2":5,"ida":0,"idb":0,"cnt":2} +{"a2":5,"ida":0,"idb":5,"cnt":4} +{"a2":5,"ida":0,"idb":10,"cnt":0} +{"a2":5,"ida":0,"idb":15,"cnt":1} +{"a2":5,"ida":null,"idb":null,"cnt":0} +{"a2":8,"ida":null,"idb":null,"cnt":0} +{"a2":11,"ida":null,"idb":null,"cnt":0} +{"a2":13,"ida":null,"idb":null,"cnt":0} +{"a2":17,"ida":null,"idb":null,"cnt":0} +{"a2":21,"ida":null,"idb":null,"cnt":0} +{"a2":23,"ida":null,"idb":null,"cnt":0} +{"a2":35,"ida":null,"idb":null,"cnt":0} +{"a2":35,"ida":40,"idb":0,"cnt":1} +{"a2":35,"ida":40,"idb":5,"cnt":0} +{"a2":35,"ida":40,"idb":6,"cnt":0} +{"a2":35,"ida":40,"idb":7,"cnt":0} +{"a2":35,"ida":40,"idb":8,"cnt":0} +{"a2":35,"ida":40,"idb":9,"cnt":2} +{"a2":37,"ida":null,"idb":null,"cnt":0} +{"a2":42,"ida":10,"idb":4,"cnt":0} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind30.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind30.r new file mode 100644 index 00000000..33dc05f9 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind30.r @@ -0,0 +1,29 @@ +unordered-result +{"a2":-7,"ida":15,"idb":null,"cnt":0} +{"a2":-7,"ida":25,"idb":null,"cnt":0} +{"a2":-5,"ida":45,"idb":null,"cnt":0} +{"a2":5,"ida":0,"idb":0,"cnt":2} +{"a2":5,"ida":0,"idb":5,"cnt":4} +{"a2":5,"ida":0,"idb":10,"cnt":0} +{"a2":5,"ida":0,"idb":15,"cnt":1} +{"a2":5,"ida":30,"idb":null,"cnt":0} +{"a2":5,"ida":35,"idb":null,"cnt":0} +{"a2":5,"ida":85,"idb":null,"cnt":0} +{"a2":8,"ida":95,"idb":null,"cnt":0} +{"a2":11,"ida":60,"idb":null,"cnt":0} +{"a2":11,"ida":65,"idb":null,"cnt":0} +{"a2":13,"ida":55,"idb":null,"cnt":0} +{"a2":17,"ida":50,"idb":null,"cnt":0} +{"a2":21,"ida":70,"idb":null,"cnt":0} +{"a2":23,"ida":20,"idb":null,"cnt":0} +{"a2":23,"ida":75,"idb":null,"cnt":0} +{"a2":23,"ida":90,"idb":null,"cnt":0} +{"a2":35,"ida":5,"idb":null,"cnt":0} +{"a2":35,"ida":40,"idb":0,"cnt":1} +{"a2":35,"ida":40,"idb":5,"cnt":0} +{"a2":35,"ida":40,"idb":6,"cnt":0} +{"a2":35,"ida":40,"idb":7,"cnt":0} +{"a2":35,"ida":40,"idb":8,"cnt":0} +{"a2":35,"ida":40,"idb":9,"cnt":2} +{"a2":37,"ida":80,"idb":null,"cnt":0} +{"a2":42,"ida":10,"idb":4,"cnt":0} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind31.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind31.r new file mode 100644 index 00000000..c3f9a224 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind31.r @@ -0,0 +1,7 @@ +unordered-result +{"ida":40,"Column_2":40,"idb":0} +{"ida":40,"Column_2":40,"idb":5} +{"ida":40,"Column_2":40,"idb":6} +{"ida":40,"Column_2":40,"idb":7} +{"ida":40,"Column_2":40,"idb":8} +{"ida":40,"Column_2":40,"idb":9} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind32.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind32.r new file mode 100644 index 00000000..ce8c5db0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind32.r @@ -0,0 +1,22 @@ +unordered-result +{"ida":0,"idb":0,"idc":0} +{"ida":0,"idb":0,"idc":5} +{"ida":5,"idb":null,"idc":null} +{"ida":10,"idb":null,"idc":null} +{"ida":15,"idb":null,"idc":null} +{"ida":20,"idb":null,"idc":null} +{"ida":25,"idb":null,"idc":null} +{"ida":30,"idb":null,"idc":null} +{"ida":35,"idb":null,"idc":null} +{"ida":40,"idb":0,"idc":0} +{"ida":45,"idb":null,"idc":null} +{"ida":50,"idb":null,"idc":null} +{"ida":55,"idb":null,"idc":null} +{"ida":60,"idb":null,"idc":null} +{"ida":65,"idb":null,"idc":null} +{"ida":70,"idb":null,"idc":null} +{"ida":75,"idb":null,"idc":null} +{"ida":80,"idb":null,"idc":null} +{"ida":85,"idb":null,"idc":null} +{"ida":90,"idb":null,"idc":null} +{"ida":95,"idb":null,"idc":null} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind33.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind33.r new file mode 100644 index 00000000..a5048573 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind33.r @@ -0,0 +1,4 @@ +unordered-result +{"ida":40,"count":6} +{"ida":10,"count":1} +{"ida":0,"count":4} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind34.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind34.r new file mode 100644 index 00000000..b0f0fade --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind34.r @@ -0,0 +1,4 @@ +ordered-result +{"ida":40,"count":6} +{"ida":10,"count":1} +{"ida":0,"count":4} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind35.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind35.r new file mode 100644 index 00000000..1d10db19 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind35.r @@ -0,0 +1,21 @@ +ordered-result +{"c1":-12,"ida":95,"cnt":1} +{"c1":-8,"ida":20,"cnt":1} +{"c1":8,"ida":0,"cnt":4} +{"c1":8,"ida":5,"cnt":1} +{"c1":8,"ida":50,"cnt":1} +{"c1":8,"ida":90,"cnt":1} +{"c1":12,"ida":25,"cnt":1} +{"c1":12,"ida":30,"cnt":1} +{"c1":14,"ida":40,"cnt":6} +{"c1":14,"ida":45,"cnt":1} +{"c1":18,"ida":10,"cnt":1} +{"c1":18,"ida":85,"cnt":1} +{"c1":20,"ida":35,"cnt":1} +{"c1":23,"ida":75,"cnt":1} +{"c1":28,"ida":15,"cnt":1} +{"c1":35,"ida":65,"cnt":1} +{"c1":35,"ida":70,"cnt":1} +{"c1":37,"ida":80,"cnt":1} +{"c1":80,"ida":55,"cnt":1} +{"c1":881,"ida":60,"cnt":1} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind36.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind36.r new file mode 100644 index 00000000..4fd76d48 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/lind36.r @@ -0,0 +1,21 @@ +ordered-result +{"c1":881,"ida":60,"cnt":1} +{"c1":80,"ida":55,"cnt":1} +{"c1":37,"ida":80,"cnt":1} +{"c1":35,"ida":70,"cnt":1} +{"c1":35,"ida":65,"cnt":1} +{"c1":28,"ida":15,"cnt":1} +{"c1":23,"ida":75,"cnt":1} +{"c1":20,"ida":35,"cnt":1} +{"c1":18,"ida":85,"cnt":1} +{"c1":18,"ida":10,"cnt":1} +{"c1":14,"ida":45,"cnt":1} +{"c1":14,"ida":40,"cnt":6} +{"c1":12,"ida":30,"cnt":1} +{"c1":12,"ida":25,"cnt":1} +{"c1":8,"ida":90,"cnt":1} +{"c1":8,"ida":50,"cnt":1} +{"c1":8,"ida":5,"cnt":1} +{"c1":8,"ida":0,"cnt":4} +{"c1":-8,"ida":20,"cnt":1} +{"c1":-12,"ida":95,"cnt":1} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed11.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed11.r new file mode 100644 index 00000000..3e3a3abe --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed11.r @@ -0,0 +1,39 @@ +ordered-result +{"ida":0,"Column_2":0,"idb":0,"idg":null} +{"ida":0,"Column_2":0,"idb":5,"idg":null} +{"ida":0,"Column_2":0,"idb":10,"idg":null} +{"ida":0,"Column_2":0,"idb":15,"idg":null} +{"ida":0,"Column_2":null,"idb":null,"idg":3} +{"ida":0,"Column_2":null,"idb":null,"idg":7} +{"ida":0,"Column_2":null,"idb":null,"idg":10} +{"ida":0,"Column_2":null,"idb":null,"idg":15} +{"ida":0,"Column_2":null,"idb":null,"idg":21} +{"ida":5,"Column_2":null,"idb":null,"idg":2} +{"ida":10,"Column_2":10,"idb":4,"idg":null} +{"ida":15,"Column_2":null,"idb":null,"idg":null} +{"ida":20,"Column_2":null,"idb":null,"idg":null} +{"ida":25,"Column_2":null,"idb":null,"idg":null} +{"ida":30,"Column_2":null,"idb":null,"idg":null} +{"ida":35,"Column_2":null,"idb":null,"idg":null} +{"ida":40,"Column_2":40,"idb":0,"idg":null} +{"ida":40,"Column_2":40,"idb":5,"idg":null} +{"ida":40,"Column_2":40,"idb":6,"idg":null} +{"ida":40,"Column_2":40,"idb":7,"idg":null} +{"ida":40,"Column_2":40,"idb":8,"idg":null} +{"ida":40,"Column_2":40,"idb":9,"idg":null} +{"ida":40,"Column_2":null,"idb":null,"idg":3} +{"ida":40,"Column_2":null,"idb":null,"idg":7} +{"ida":40,"Column_2":null,"idb":null,"idg":10} +{"ida":40,"Column_2":null,"idb":null,"idg":15} +{"ida":40,"Column_2":null,"idb":null,"idg":21} +{"ida":45,"Column_2":null,"idb":null,"idg":null} +{"ida":50,"Column_2":null,"idb":null,"idg":null} +{"ida":55,"Column_2":null,"idb":null,"idg":null} +{"ida":60,"Column_2":null,"idb":null,"idg":null} +{"ida":65,"Column_2":null,"idb":null,"idg":null} +{"ida":70,"Column_2":null,"idb":null,"idg":null} +{"ida":75,"Column_2":null,"idb":null,"idg":null} +{"ida":80,"Column_2":null,"idb":null,"idg":null} +{"ida":85,"Column_2":null,"idb":null,"idg":null} +{"ida":90,"Column_2":null,"idb":null,"idg":null} +{"ida":95,"Column_2":null,"idb":null,"idg":null} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed12.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed12.r new file mode 100644 index 00000000..bb855546 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed12.r @@ -0,0 +1,39 @@ +unordered-result +{"ida":0,"Column_2":null,"idb":null,"idg":3} +{"ida":0,"Column_2":null,"idb":null,"idg":7} +{"ida":0,"Column_2":null,"idb":null,"idg":10} +{"ida":0,"Column_2":null,"idb":null,"idg":15} +{"ida":0,"Column_2":null,"idb":null,"idg":21} +{"ida":0,"Column_2":0,"idb":0,"idg":null} +{"ida":0,"Column_2":0,"idb":5,"idg":null} +{"ida":0,"Column_2":0,"idb":10,"idg":null} +{"ida":0,"Column_2":0,"idb":15,"idg":null} +{"ida":5,"Column_2":null,"idb":null,"idg":2} +{"ida":10,"Column_2":10,"idb":4,"idg":null} +{"ida":15,"Column_2":null,"idb":null,"idg":null} +{"ida":20,"Column_2":null,"idb":null,"idg":null} +{"ida":25,"Column_2":null,"idb":null,"idg":null} +{"ida":30,"Column_2":null,"idb":null,"idg":null} +{"ida":35,"Column_2":null,"idb":null,"idg":null} +{"ida":40,"Column_2":null,"idb":null,"idg":3} +{"ida":40,"Column_2":null,"idb":null,"idg":7} +{"ida":40,"Column_2":null,"idb":null,"idg":10} +{"ida":40,"Column_2":null,"idb":null,"idg":15} +{"ida":40,"Column_2":null,"idb":null,"idg":21} +{"ida":40,"Column_2":40,"idb":0,"idg":null} +{"ida":40,"Column_2":40,"idb":5,"idg":null} +{"ida":40,"Column_2":40,"idb":6,"idg":null} +{"ida":40,"Column_2":40,"idb":7,"idg":null} +{"ida":40,"Column_2":40,"idb":8,"idg":null} +{"ida":40,"Column_2":40,"idb":9,"idg":null} +{"ida":45,"Column_2":null,"idb":null,"idg":null} +{"ida":50,"Column_2":null,"idb":null,"idg":null} +{"ida":55,"Column_2":null,"idb":null,"idg":null} +{"ida":60,"Column_2":null,"idb":null,"idg":null} +{"ida":65,"Column_2":null,"idb":null,"idg":null} +{"ida":70,"Column_2":null,"idb":null,"idg":null} +{"ida":75,"Column_2":null,"idb":null,"idg":null} +{"ida":80,"Column_2":null,"idb":null,"idg":null} +{"ida":85,"Column_2":null,"idb":null,"idg":null} +{"ida":90,"Column_2":null,"idb":null,"idg":null} +{"ida":95,"Column_2":null,"idb":null,"idg":null} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed13.r b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed13.r new file mode 100644 index 00000000..3e3a3abe --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/expres/treed13.r @@ -0,0 +1,39 @@ +ordered-result +{"ida":0,"Column_2":0,"idb":0,"idg":null} +{"ida":0,"Column_2":0,"idb":5,"idg":null} +{"ida":0,"Column_2":0,"idb":10,"idg":null} +{"ida":0,"Column_2":0,"idb":15,"idg":null} +{"ida":0,"Column_2":null,"idb":null,"idg":3} +{"ida":0,"Column_2":null,"idb":null,"idg":7} +{"ida":0,"Column_2":null,"idb":null,"idg":10} +{"ida":0,"Column_2":null,"idb":null,"idg":15} +{"ida":0,"Column_2":null,"idb":null,"idg":21} +{"ida":5,"Column_2":null,"idb":null,"idg":2} +{"ida":10,"Column_2":10,"idb":4,"idg":null} +{"ida":15,"Column_2":null,"idb":null,"idg":null} +{"ida":20,"Column_2":null,"idb":null,"idg":null} +{"ida":25,"Column_2":null,"idb":null,"idg":null} +{"ida":30,"Column_2":null,"idb":null,"idg":null} +{"ida":35,"Column_2":null,"idb":null,"idg":null} +{"ida":40,"Column_2":40,"idb":0,"idg":null} +{"ida":40,"Column_2":40,"idb":5,"idg":null} +{"ida":40,"Column_2":40,"idb":6,"idg":null} +{"ida":40,"Column_2":40,"idb":7,"idg":null} +{"ida":40,"Column_2":40,"idb":8,"idg":null} +{"ida":40,"Column_2":40,"idb":9,"idg":null} +{"ida":40,"Column_2":null,"idb":null,"idg":3} +{"ida":40,"Column_2":null,"idb":null,"idg":7} +{"ida":40,"Column_2":null,"idb":null,"idg":10} +{"ida":40,"Column_2":null,"idb":null,"idg":15} +{"ida":40,"Column_2":null,"idb":null,"idg":21} +{"ida":45,"Column_2":null,"idb":null,"idg":null} +{"ida":50,"Column_2":null,"idb":null,"idg":null} +{"ida":55,"Column_2":null,"idb":null,"idg":null} +{"ida":60,"Column_2":null,"idb":null,"idg":null} +{"ida":65,"Column_2":null,"idb":null,"idg":null} +{"ida":70,"Column_2":null,"idb":null,"idg":null} +{"ida":75,"Column_2":null,"idb":null,"idg":null} +{"ida":80,"Column_2":null,"idb":null,"idg":null} +{"ida":85,"Column_2":null,"idb":null,"idg":null} +{"ida":90,"Column_2":null,"idb":null,"idg":null} +{"ida":95,"Column_2":null,"idb":null,"idg":null} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/all b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/all index 7a50fcd9..51ad5dd7 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/all +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/all @@ -1,3 +1,11 @@ +################ +26844.q +################ +select * +from nested tables (X x descendants (X.Y y)) + + + ################ err01.q ################ @@ -43,6 +51,32 @@ order by a.ida +################ +err05.q +################ +select * +from nested tables(A a descendants(A.B b, A.B.C c, A.B.C.D d)) +where a.ida != 40 +order by a.ida desc + + + +################ +err06.q +################ +select * +from nested tables (A.B b1 ancestors (A.B b2)) + + + +################ +err07.q +################ +select * +from nested tables (A.B ancestors (A.B)) + + + ################ lina01.q ################ @@ -170,6 +204,8 @@ from nested tables(A.B.C.D d ancestors(A a, A.B b, A.B.C c)) where d.d2 > 15 and d.c3 < 100 + + ################ lina12.q ################ @@ -184,6 +220,7 @@ from nested tables(A.B.C.D d ancestors(A a, A.B b, A.B.C c)) where d.d2 > 15 and d.c3 < 100 + ################ lina13.q ################ @@ -193,6 +230,46 @@ order by c.ida desc +################ +lina14.q +################ +select * +from nested tables(A.B.C c ancestors(A a, A.B b)) +order by c.ida + + + +################ +lina15.q +################ +select d.ida as d_ida, d.idb as d_idb, d.idc as d_idc, d.idd as d_idd, + a.ida as a_ida, a.c1 as a_c1, + b.ida as b_ida, b.idb as b_idb, b.c1 as b_c1, + c.ida as c_ida, c.idb as c_idb, c.idc as c_idc +from nested tables(A.B.C.D d ancestors(A a, A.B b, A.B.C c)) +where d.d2 > 15 and d.c3 < 100 + + + +################ +lina16.q +################ +select * +from nested tables(A.B.C.D d ancestors(A)) +where d.d2 < 10 + + +################ +lina17.q +################ +select b.ida, b.idb +from nested tables(A.B b ancestors(A a)) +where b.ida = 10 and + b.b1 = a.a1 +order by b.ida, b.idb + + + ################ linad01.q ################ @@ -230,6 +307,58 @@ order by c.ida, c.idb +################ +linad04.q +################ +# row_storage_size on target, ancestor, and descendant tables +# index_storage_size on anscestor table + +select $b.ida as b_ida, $b.idb as b_idb, + $a.ida as a_ida, $a.a1, + $c.ida as c_ida, $c.idb as c_idb, $c.idc as c_idc, + row_storage_size($a) < row_storage_size($b) and + row_storage_size($b) < row_storage_size($c) as c_size, + 20 < index_storage_size($a, "a_idx_a2") and + index_storage_size($a, "a_idx_a2") < 30 as size_a_idx_a2 +from nested tables(A.B $b ancestors(A $a) descendants(A.B.C $c)) +where $b.ida != 40 +order by $b.ida, $b.idb + + + +################ +linad05.q +################ +# index_storage_size on descendant table + +select $b.ida as b_ida, $b.idb as b_idb, + $a.ida as a_ida, $a.a1, + $d.ida as d_ida, $d.idb as d_idb, $d.idc as d_idc, $d.idd as d_idd, + 50 <= index_storage_size($d, "d_idx_d2_idb_c3") and + index_storage_size($d, "d_idx_d2_idb_c3") < 65 as size_d_idx_d2_idb_c3 +from nested tables(A.B $b ancestors(A $a) descendants(A.B.C.D $d)) +where $b.ida != 40 +order by $b.ida, $b.idb + + + +################ +linad06.q +################ +# index_storage_size on target and on secondary index used by query + +select $a.ida as a_ida, $a.a1, + $d.ida as d_ida, $d.idb as d_idb, $d.idc as d_idc, $d.idd as d_idd, + 20 < index_storage_size($a, "a_idx_c1") and + index_storage_size($a, "a_idx_c1") < 28 as size_a_idx_c1, + 50 < index_storage_size($d, "d_idx_d2_idb_c3") and + index_storage_size($d, "d_idx_d2_idb_c3") < 65 as size_d_idx_d2_idb_c3 +from nested tables(A $a descendants(A.B.C.D $d)) +where $a.c1 > 10 +order by $a.ida + + + ################ lind01.q ################ @@ -444,10 +573,12 @@ order by a.ida limit 10 + ################ lind21.q ################ -select a.ida as a_ida, a.c1, +select /*+ FORCE_INDEX(A a_idx_c1_a2) */ + a.ida as a_ida, a.c1, b.ida as b_ida, b.idb as b_idb, c.ida as c_ida, c.idb as c_idb, c.idc as c_idc, d.ida as d_ida, d.idb as d_idb, d.idc as d_idc, d.idd as d_idd @@ -455,6 +586,106 @@ from nested tables(A a descendants(A.B b, A.B.C c, A.B.C.D d)) order by a.c1 + +################ +lind22.q +################ +select a.ida as a_ida, b.ida as b_ida, b.idb as b_idb , b.c1 as b_c1 +from nested tables (A as a descendants(A.B as b)) +order by a.c1, a.ida + + + +################ +lind23.q +################ +select /*+ force_primary_index(A) */ * +from nested tables(A a descendants(A.B b)) +order by a.ida desc + + + +################ +lind24.q +################ +select a.ida as a_ida, + b.ida as b_ida, b.idb as b_idb +from nested tables(A a descendants(A.B b)) +where a.ida = 40 +order by b.idb desc + + + +################ +lind25.q +################ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb +from nested tables(A a descendants(A.B b)) +order by a.ida, b.ida, b.idb +limit 10 + + + +################ +lind26.q +################ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb +from nested tables(A a descendants(A.B b)) +order by a.ida, b.idb +limit 10 + + + +################ +lind27.q +################ +select a.ida, b.idb, count(*) as cnt +from nested tables(A a descendants(A.B b, A.B.C c)) +group by a.ida, b.idb + + + +################ +lind28.q +################ +select a.ida, b.idb, count(case + when b.idb is null then b.idb + when c.idc is null then c.idc + else 1 + end) as cnt +from nested tables(A a descendants(A.B b, A.B.C c)) +group by a.ida, b.idb + + + +################ +lind29.q +################ +select a.a2, b.ida, b.idb, count(case + when b.idb is null then b.idb + when c.idc is null then c.idc + else 1 + end) as cnt +from nested tables(A a descendants(A.B b, A.B.C c)) +group by a.a2, b.ida, b.idb + + + +################ +lind30.q +################ +select a.a2, a.ida, b.idb, count(case + when b.idb is null then b.idb + when c.idc is null then c.idc + else 1 + end) as cnt +from nested tables(A a descendants(A.B b, A.B.C c)) +group by a.a2, a.ida, b.idb + + + ################ treead01.q ################ @@ -466,7 +697,7 @@ select b.ida as b_ida, b.idb as b_idb, from nested tables(A.B b ancestors(A a) descendants(A.B.C.D d on a.ida != 15 and d.idd <= $ext1, A.B.E e)) -order by b.ida, b.idb +order by b.ida, b.idb, d.idc, d.idd, e.ide @@ -513,7 +744,7 @@ from nested tables(A a descendants(A.B.C.D d, A.B.C c, A.B.E e, A.G.H h)) -order by a.ida +order by a.ida, b.idb, c.idc, d.idd, e.ide, g.idg, j.idj, h.idh @@ -565,6 +796,7 @@ from nested tables(A a descendants(A.B.C.D d on d1 > 10, order by a.ida + ################ treed07.q ################ @@ -576,10 +808,12 @@ from nested tables(A a descendants(A.B b, A.B.C c, A.G.J.K k)) where a.a2 > 30 + ################ treed08.q ################ -select a.ida as a_ida, a.a1, +select /*+ FORCE_INDEX(A a_idx_a2) */ + a.ida as a_ida, a.a1, b.ida as b_ida, b.idb as b_idb, c.ida as c_ida, c.idb as c_idb, c.idc as c_idc, k.ida as k_ida, k.idg as k_idg, k.idj as k_idj, k.idk as k_idk @@ -587,6 +821,7 @@ from nested tables(A a descendants(A.B b, A.B.C c, A.G.J.K k)) where a.a2 < 30 + ################ treed09.q ################ @@ -597,3 +832,72 @@ select a.ida as a_ida, a.a2, a.a1, from nested tables(A a descendants(A.B b, A.B.C c, A.G.J.K k)) where a.a2 < 30 order by a.a2 desc + + + +################ +treed10.q +################ +select a.ida as a_ida, a.a1, + b.ida as b_ida, b.idb as b_idb, + c.ida as c_ida, c.idb as c_idb, c.idc as c_idc, + k.ida as k_ida, k.idg as k_idg, k.idj as k_idj, k.idk as k_idk +from nested tables(A a descendants(A.B b, A.B.C c, A.G.J.K k)) +where a.a2 < 30 + + + +################ +treed11.q +################ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb, g.idg +from nested tables(A a descendants(A.B b, A.G g)) +order by a.ida, b.idb + + + +################ +treed12.q +################ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb, g.idg +from nested tables(A a descendants(A.B b, A.G g)) +order by a.ida, g.idg + + + +################ +treed13.q +################ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb, g.idg +from nested tables(A a descendants(A.B b, A.G g)) +order by a.ida, b.idb, g.idg + + + +################ +unnest01.q +################ +select p.idp, c.idc, c.a1, $pa +from NESTED TABLES(P p descendants(P.C c)), p.arr[] as $pa + + + +################ +unnest02.q +################ +select p.idp, c.idc, c.a1, $pa +from NESTED TABLES(P p descendants(P.C c)), c.arr[] as $pa + + + +################ +unnest03.q +################ +select p.idp, c.idc, c.a1, $pa, $ca +from NESTED TABLES(P p descendants(P.C c)), p.arr[] as $pa, c.arr[] as $ca + + + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner01.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner01.q new file mode 100644 index 00000000..7759b5c2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner01.q @@ -0,0 +1,4 @@ +select a.ida, b.ida, b.idb +from A a, A.B b +where a.ida = b.ida +order by a.ida, b.idb diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner02.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner02.q new file mode 100644 index 00000000..b39e2099 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner02.q @@ -0,0 +1,4 @@ +select a.ida, b.ida, b.idb +from A a, A.B b +where a.ida = b.ida +order by a.ida desc, b.idb desc diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner03.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner03.q new file mode 100644 index 00000000..953d0f38 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner03.q @@ -0,0 +1,4 @@ +select a.ida, b.ida, b.idb +from A.B b, A a +where a.ida = b.ida +order by a.ida desc, b.idb desc diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner04.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner04.q new file mode 100644 index 00000000..2dd1b773 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/inner04.q @@ -0,0 +1,4 @@ +select a.ida, b.ida, b.idb +from A.B b, A a +where a.ida = b.ida +order by b.ida desc, b.idb desc diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lina17.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lina17.q new file mode 100644 index 00000000..c3fcbc68 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lina17.q @@ -0,0 +1,5 @@ +select b.ida, b.idb +from nested tables(A.B b ancestors(A a)) +where b.ida = 10 and + b.b1 = a.a1 +order by b.ida, b.idb diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind25.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind25.q new file mode 100644 index 00000000..32b9fe52 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind25.q @@ -0,0 +1,5 @@ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb +from nested tables(A a descendants(A.B b)) +order by a.ida, b.ida, b.idb +limit 10 diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind26.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind26.q new file mode 100644 index 00000000..3227a392 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind26.q @@ -0,0 +1,5 @@ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb +from nested tables(A a descendants(A.B b)) +order by a.ida, b.idb +limit 10 diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind27.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind27.q new file mode 100644 index 00000000..a7b249be --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind27.q @@ -0,0 +1,3 @@ +select a.ida, b.idb, count(*) as cnt +from nested tables(A a descendants(A.B b, A.B.C c)) +group by a.ida, b.idb diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind28.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind28.q new file mode 100644 index 00000000..920e1820 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind28.q @@ -0,0 +1,7 @@ +select a.ida, b.idb, count(case + when b.idb is null then b.idb + when c.idc is null then c.idc + else 1 + end) as cnt +from nested tables(A a descendants(A.B b, A.B.C c)) +group by a.ida, b.idb diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind29.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind29.q new file mode 100644 index 00000000..5356ca46 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind29.q @@ -0,0 +1,7 @@ +select a.a2, b.ida, b.idb, count(case + when b.idb is null then b.idb + when c.idc is null then c.idc + else 1 + end) as cnt +from nested tables(A a descendants(A.B b, A.B.C c)) +group by a.a2, b.ida, b.idb diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind30.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind30.q new file mode 100644 index 00000000..27e54706 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind30.q @@ -0,0 +1,7 @@ +select a.a2, a.ida, b.idb, count(case + when b.idb is null then b.idb + when c.idc is null then c.idc + else 1 + end) as cnt +from nested tables(A a descendants(A.B b, A.B.C c)) +group by a.a2, a.ida, b.idb diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind31.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind31.q new file mode 100644 index 00000000..840e3c39 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind31.q @@ -0,0 +1,4 @@ +select a.ida, b.ida, b.idb +from nested tables(A a descendants(A.B b)) +where a.ida = 40 +order by b.idb diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind32.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind32.q new file mode 100644 index 00000000..aa548f03 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind32.q @@ -0,0 +1,3 @@ +select a.ida, b.idb, c.idc +from nested tables(A a descendants(A.B b on b.idb = 0, A.B.C c)) +order by a.ida, b.idb, c.idc diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind33.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind33.q new file mode 100644 index 00000000..523496de --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind33.q @@ -0,0 +1,5 @@ +select a.ida, count(b.idb) as count +from nested tables(A a descendants(A.B b)) +where b.idb is not null +group by a.ida +order by a.ida desc diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind34.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind34.q new file mode 100644 index 00000000..73e60ba9 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind34.q @@ -0,0 +1,5 @@ +select b.ida, count(b.idb) as count +from nested tables(A.B b ancestors(A a)) +where a.ida is not null +group by b.ida +order by b.ida desc diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind35.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind35.q new file mode 100644 index 00000000..30b71608 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind35.q @@ -0,0 +1,6 @@ +select a.c1, a.ida, count(*) as cnt +from nested tables (A as a descendants(A.B as b)) +group by a.c1, a.ida +order by a.c1, a.ida + + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind36.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind36.q new file mode 100644 index 00000000..f09470b8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/lind36.q @@ -0,0 +1,4 @@ +select a.c1, a.ida, count(*) as cnt +from nested tables (A as a descendants(A.B as b)) +group by a.c1, a.ida +order by a.c1 desc, a.ida desc diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed11.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed11.q new file mode 100644 index 00000000..4d7ecd82 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed11.q @@ -0,0 +1,4 @@ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb, g.idg +from nested tables(A a descendants(A.B b, A.G g)) +order by a.ida, b.idb diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed12.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed12.q new file mode 100644 index 00000000..3508daa0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed12.q @@ -0,0 +1,4 @@ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb, g.idg +from nested tables(A a descendants(A.B b, A.G g)) +order by a.ida, g.idg diff --git a/kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed13.q b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed13.q new file mode 100644 index 00000000..188401d8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/joins/q/treed13.q @@ -0,0 +1,4 @@ +select /* FORCE_PRIMARY_INDEX(A) */ + a.ida, b.ida, b.idb, g.idg +from nested tables(A a descendants(A.B b, A.G g)) +order by a.ida, b.idb, g.idg diff --git a/kvtest/kvquery-IT/src/main/resources/cases/json_idx/q/all b/kvtest/kvquery-IT/src/main/resources/cases/json_idx/q/all index 4918f1ab..b9f71009 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/json_idx/q/all +++ b/kvtest/kvquery-IT/src/main/resources/cases/json_idx/q/all @@ -1450,3 +1450,13 @@ where id = 3 +################ +upd06.q +################ +declare $a string; +update foo f +put f.info.children.values() { $a : 3 } +where id = 2 + + + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/maths/explans/idx_power01.r b/kvtest/kvquery-IT/src/main/resources/cases/maths/explans/idx_power01.r index af085ffd..f1937e72 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/maths/explans/idx_power01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/maths/explans/idx_power01.r @@ -15,56 +15,27 @@ compiled-query-plan "target table" : "math_test", "row variable" : "$$math_test", "index used" : "idx_power_ic", - "covering index" : true, - "index row variable" : "$$math_test_idx", + "covering index" : false, "index scans" : [ { "equality conditions" : {}, - "range conditions" : {} + "range conditions" : { "power#ic@,2" : { "start value" : 1000.0, "start inclusive" : false } } } ], - "index filtering predicate" : - { - "iterator kind" : "GREATER_THAN", - "left operand" : - { - "iterator kind" : "POWER", - "input iterators" : [ - { - "iterator kind" : "FIELD_STEP", - "field name" : "power#ic@,2", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$math_test_idx" - } - }, - { - "iterator kind" : "CONST", - "value" : 2 - } - ] - }, - "right operand" : - { - "iterator kind" : "CONST", - "value" : 1000.0 - } - }, "position in join" : 0 }, - "FROM variable" : "$$math_test_idx", + "FROM variable" : "$$math_test", "SELECT expressions" : [ { "field name" : "id", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "#id", + "field name" : "id", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$math_test_idx" + "variable" : "$$math_test" } } }, @@ -73,11 +44,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "power#ic@,2", + "field name" : "ic", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$math_test_idx" + "variable" : "$$math_test" } } }, @@ -95,11 +66,11 @@ compiled-query-plan "input iterators" : [ { "iterator kind" : "FIELD_STEP", - "field name" : "power#ic@,2", + "field name" : "ic", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$math_test_idx" + "variable" : "$$math_test" } }, { diff --git a/kvtest/kvquery-IT/src/main/resources/cases/maths/explans/idx_power02.r b/kvtest/kvquery-IT/src/main/resources/cases/maths/explans/idx_power02.r index eb3ae284..50ddadfe 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/maths/explans/idx_power02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/maths/explans/idx_power02.r @@ -5,7 +5,7 @@ compiled-query-plan "plan" : { "iterator kind" : "RECEIVE", - "distribution kind" : "ALL_SHARDS", + "distribution kind" : "ALL_PARTITIONS", "input iterator" : { "iterator kind" : "SELECT", @@ -14,57 +14,56 @@ compiled-query-plan "iterator kind" : "TABLE", "target table" : "math_test", "row variable" : "$$math_test", - "index used" : "idx_power_ic", - "covering index" : true, - "index row variable" : "$$math_test_idx", + "index used" : "primary index", + "covering index" : false, "index scans" : [ { "equality conditions" : {}, "range conditions" : {} } ], - "index filtering predicate" : + "position in join" : 0 + }, + "FROM variable" : "$$math_test", + "WHERE" : + { + "iterator kind" : "GREATER_THAN", + "left operand" : { - "iterator kind" : "GREATER_THAN", - "left operand" : - { - "iterator kind" : "POWER", - "input iterators" : [ - { - "iterator kind" : "FIELD_STEP", - "field name" : "power#ic@,2", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$$math_test_idx" - } - }, + "iterator kind" : "POWER", + "input iterators" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "ic", + "input iterator" : { - "iterator kind" : "CONST", - "value" : 3 + "iterator kind" : "VAR_REF", + "variable" : "$$math_test" } - ] - }, - "right operand" : - { - "iterator kind" : "CONST", - "value" : 1000.0 - } + }, + { + "iterator kind" : "CONST", + "value" : 3 + } + ] }, - "position in join" : 0 + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1000.0 + } }, - "FROM variable" : "$$math_test_idx", "SELECT expressions" : [ { "field name" : "id", "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "#id", + "field name" : "id", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$math_test_idx" + "variable" : "$$math_test" } } }, @@ -73,11 +72,11 @@ compiled-query-plan "field expression" : { "iterator kind" : "FIELD_STEP", - "field name" : "power#ic@,2", + "field name" : "ic", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$math_test_idx" + "variable" : "$$math_test" } } }, @@ -89,11 +88,11 @@ compiled-query-plan "input iterators" : [ { "iterator kind" : "FIELD_STEP", - "field name" : "power#ic@,2", + "field name" : "ic", "input iterator" : { "iterator kind" : "VAR_REF", - "variable" : "$$math_test_idx" + "variable" : "$$math_test" } }, { diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/after.ddl b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/after.ddl new file mode 100644 index 00000000..86ff10f0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/after.ddl @@ -0,0 +1,3 @@ +drop table foo + +drop table bar diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/before.data b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/before.data new file mode 100644 index 00000000..201369d7 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/before.data @@ -0,0 +1,586 @@ +### +# Table and records can be on multiple lines but they must be delimited by an +# empty line. +# +# Line "Table: table-name" defines the table name to be used and it must +# precede the records. Multiple tables can be populated in the same file. +### + +Table: Foo + +{ + "id":0, + "info": + { + "firstName":"first0", "lastName":"last0","age":10, + "address": + { + "city": "San Fransisco", + "state" : "CA", + "phones" : [ { "areacode" : 408, "number" : 50, "kind" : "home" }, + { "areacode" : 650, "number" : 51, "kind" : "work" }, + { "areacode" : null, "number" : 52, "kind" : "home" }, + { "areacode" : 510, "number" : 53, "kind" : "home" }, + { "areacode" : 415, "number" : 54 }, + "650-234-4556" + ] + }, + "children": + { + "Anna" : { "age" : 10, "school" : "sch_1", "friends" : ["Anna", "John", "Maria"]}, + "Lisa" : { "age" : 12, "friends" : ["Ada"]}, + "Mary" : { "age" : 7, "school" : "sch_3", "friends" : ["Anna", "Mark"]} + } + } +} + +{ + "id":1, + "info": + { + "firstName":"first1", "lastName":"last1","age":11, + "address": + { + "city" : "Boston", + "state" : "MA", + "phones" : [ { "areacode" : 304, "number" : 30, "kind" : "work" }, + { "areacode" : 318, "number" : 31, "kind" : "work" }, + { "areacode" : 400, "number" : 41, "kind" : "home" }, + { "areacode" : 450, "number" : 43, "kind" : "home" }, + { "areacode" : 480, "number" : 44, "kind" : "work" }, + { "areacode" : 500, "number" : 51, "kind" : "work" }, + { "areacode" : 520, "number" : 52, "kind" : "home" }, + { "areacode" : 550, "number" : 53, "kind" : "home" }, + { "areacode" : 560, "number" : 55, "kind" : "work" } ] + }, + "children": + { + "Anna" : { "age" : 9, "school" : "sch_1", "friends" : ["Bobby", "John", null]}, + "Mark" : { "age" : 4, "school" : "sch_1", "friends" : ["George"]}, + "Dave" : { "age" : 15, "school" : "sch_3", "friends" : ["Bill", "Sam"]} + } + } +} + +{ + "id":2, + "info": + { + "firstName":"first2", "lastName":"last2","age":12, + "address": + { + "city" : "Portland", + "state" : "OR", + "phones" : [ { "areacode" : 104, "number" : 10, "kind" : "home" }, + { "areacode" : 118, "number" : 11, "kind" : "work" } ] + }, + "children": + { + } + } +} + +{ + "id":3, + "info": + { + "firstName":"first3", "lastName":"last3","age":13, + "address": + { + "city" : "Seattle", + "state" : "WA", + "phones" : null + }, + "children": + { + "George" : { "age" : 7, "school" : "sch_2", "friends" : ["Bill", "Mark"]}, + "Matt" : { "age" : 14, "school" : "sch_2", "friends" : ["Bill"]} + } + } +} + +{ + "id":4, + "info": + { + "firstName":"first4", "lastName":"last4","age":15, + "address": + { + "city" : "Salem", + "state" : "MA", + "phones" : [ { "areacode" : 400, "number" : 41, "kind" : "home" }, + { "areacode" : 460, "number" : 44, "kind" : "work" }, + { "areacode" : 500, "number" : 50, "kind" : "work" }, + { "areacode" : 570, "number" : 51 }, + { "areacode" : 580, "number" : 51 }, + { "areacode" : 600, "number" : 61, "kind" : "work" }, + { "areacode" : 610, "number" : 62, "kind" : "home" }, + { "areacode" : 620, "number" : 63, "kind" : "work" } ] + }, + "children": + { + "Anna" : { "age" : 9, "school" : "sch_2", "friends" : ["George", "John"]}, + "Mark" : { "age" : 9, "school" : "sch_1", "friends" : ["George"]} + } + } +} + +{ + "id":5, + "info": + { + "firstName":"first5", "lastName":"last5","age":11, + "address": + { + "city" : "Salem", + "state" : "MA", + "phones" : [ { "areacode" : 605, "number" : 60, "kind" : "work" }, + { "areacode" : 520, "number" : 62 } ] + }, + "children": + { + "Anna" : { "age" : 29, "school" : "sch_1", "friends" : ["Bobby", "Mark"]}, + "Mark" : { "age" : 14, "school" : "sch_2", "friends" : ["George"]}, + "Dave" : { "age" : 16 }, + "Tim" : { "age" : 8, "school" : "sch_2", "hobbies" : ["football", "tennis"]}, + "Julie" : { "age" : 12, "school" : "sch_2", "scores" : [1, 3.5, 4.2, 2]} + } + } +} + +{ + "id":6, + "info": + { + "firstName":"first6", "lastName":"last6","age":11, + "address": + { + "city" : "Albany", + "state" : "NY", + "phones" : [ { "areacode" : 605, "number" : 60, "kind" : "work" }, + { "areacode" : 629, "number" : 62, "kind" : "work" } ] + } + } +} + +{ + "id":7, + "info": + { + "firstName":"first7", "lastName":"last7","age":30, + "address": + { + "city": "San Jose", + "state" : "CA", + "phones" : [ { "areacode" : 408, "number" : 70, "kind" : "work" }, + { "areacode" : 408, "number" : 71, "kind" : "home" }, + { "areacode" : 408, "number" : 72 }, + { "areacode" : 510, "number" : 73, "kind" : "home" }, + { "areacode" : 415, "number" : 74, "kind" : "home" } ] + }, + "children": + { + "Kathy" : { "age" : 10, "school" : "sch_3", "friends" : ["Anna", "Mark", "Maria"]}, + "Will" : { "age" : 2, "school" : "sch_1", "friends" : ["Ada"]} + } + } +} + +{ + "id":8, + "info": + { + "firstName":"first8", "lastName":"last8","age":10, + "address": + { + "city": "San Fransisco", + "state" : "CA", + "phones" : { "areacode" : 650, "number" : 80, "kind" : "work" } + }, + "children": + { + "John" : { "age" : 10, "school" : "sch_1", "friends" : ["Anna", "John", "Maria"]}, + "Anna" : null, + "Mary" : 7 + } + } +} + +{ + "id":9, + "info": + { + "firstName":"first8", "lastName":"last8","age":null, + "address": + { + "city": "San Fransisco", + "state" : null, + "phones" : { "areacode" : 408, "number" : 80, "kind" : "work" } + }, + "children": + { + "John" : { "age" : 10, "school" : "sch_1", "friends" : ["Anna", "John", "Maria"]}, + "Lisa" : null, + "Anna" : 7 + } + } +} + +{ + "id":10, + "info": + { + "firstName":"first8", "lastName":"last8","age":null, + "address": + { + "city": "San Fransisco", + "phones" : { "areacode" : 408, "number" : 80 } + }, + "children": + { + "Anna" : { "age" : 10, "school" : "sch_1", "friends" : []}, + "Lisa" : null, + "Mary" : 7 + } + } +} + + +{ + "id":11, + "info": + { + "firstName":"first8", "lastName":"last8", + "address": + { + "city": "Portland", + "phones" : { "areacode" : 408, "number" : 80, "kind" : "work" } + }, + "children": + { + "Anna" : { "age" : 10, "school" : "sch_1"}, + "Lisa" : null, + "Mary" : 7 + } + } +} + +{ + "id":12, + "info": + { + "firstName":"first8", "lastName":"last8", + "address": + { + "city": "San Fransisco", + "state" : null, + "phones" : { "areacode" : 408, "number" : 80, "kind" : "home" } + }, + "children": + { + "Anna" : { "age" : 10, "school" : "sch_2", "friends" : null}, + "Lisa" : null, + "Mary" : 7 + } + } +} + +{ + "id":17, + "info": + { + "firstName" : "first8", "lastName" : "last8", "age" : 10, + "address" : "somewhere", + "children" : "none" + } +} + + +Table: Bar + +{ + "id":0, + "info": + { + "firstName":"first0", "lastName":"last0","age":10, + "address": + { + "city": "San Fransisco", + "state" : "CA", + "phones" : [ { "areacode" : 408, "number" : 50, "kind" : "home" }, + { "areacode" : 650, "number" : 51, "kind" : "work" }, + { "areacode" : null, "number" : 52, "kind" : "home" }, + { "areacode" : 510, "number" : 53, "kind" : "home" }, + { "areacode" : 415, "number" : 54 }, + "650-234-4556" + ] + }, + "children": + { + "Anna" : { "age" : 10, "school" : "sch_1", "friends" : ["Anna", "John", "Maria"]}, + "Lisa" : { "age" : 12, "friends" : ["Ada"]}, + "Mary" : { "age" : 7, "school" : "sch_3", "friends" : ["Anna", "Mark"]} + } + } +} + +{ + "id":1, + "info": + { + "firstName":"first1", "lastName":"last1","age":11, + "address": + { + "city" : "Boston", + "state" : "MA", + "phones" : [ { "areacode" : 304, "number" : 30, "kind" : "work" }, + { "areacode" : 318, "number" : 31, "kind" : "work" }, + { "areacode" : 400, "number" : 41, "kind" : "home" }, + { "areacode" : 450, "number" : 43, "kind" : "home" }, + { "areacode" : 480, "number" : 44, "kind" : "work" }, + { "areacode" : 500, "number" : 51, "kind" : "work" }, + { "areacode" : 520, "number" : 52, "kind" : "home" }, + { "areacode" : 550, "number" : 53, "kind" : "home" }, + { "areacode" : 560, "number" : 55, "kind" : "work" } ] + }, + "children": + { + "Anna" : { "age" : 9, "school" : "sch_1", "friends" : ["Bobby", "John", null]}, + "Mark" : { "age" : 4, "school" : "sch_1", "friends" : ["George"]}, + "Dave" : { "age" : 15, "school" : "sch_3", "friends" : ["Bill", "Sam"]} + } + } +} + +{ + "id":2, + "info": + { + "firstName":"first2", "lastName":"last2","age":12, + "address": + { + "city" : "Portland", + "state" : "OR", + "phones" : [ { "areacode" : 104, "number" : 10, "kind" : "home" }, + { "areacode" : 118, "number" : 11, "kind" : "work" } ] + }, + "children": + { + } + } +} + +{ + "id":3, + "info": + { + "firstName":"first3", "lastName":"last3","age":13, + "address": + { + "city" : "Seattle", + "state" : "WA", + "phones" : null + }, + "children": + { + "George" : { "age" : 7, "school" : "sch_2", "friends" : ["Bill", "Mark"]}, + "Matt" : { "age" : 14, "school" : "sch_2", "friends" : ["Bill"]} + } + } +} + +{ + "id":4, + "info": + { + "firstName":"first4", "lastName":"last4","age":15, + "address": + { + "city" : "Salem", + "state" : "MA", + "phones" : [ { "areacode" : 400, "number" : 41, "kind" : "home" }, + { "areacode" : 460, "number" : 44, "kind" : "work" }, + { "areacode" : 500, "number" : 50, "kind" : "work" }, + { "areacode" : 570, "number" : 51 }, + { "areacode" : 580, "number" : 51 }, + { "areacode" : 600, "number" : 61, "kind" : "work" }, + { "areacode" : 610, "number" : 62, "kind" : "home" }, + { "areacode" : 620, "number" : 63, "kind" : "work" } ] + }, + "children": + { + "Anna" : { "age" : 9, "school" : "sch_2", "friends" : ["George", "John"]}, + "Mark" : { "age" : 9, "school" : "sch_1", "friends" : ["George"]} + } + } +} + +{ + "id":5, + "info": + { + "firstName":"first5", "lastName":"last5","age":11, + "address": + { + "city" : "Salem", + "state" : "MA", + "phones" : [ { "areacode" : 605, "number" : 60, "kind" : "work" }, + { "areacode" : 520, "number" : 62 } ] + }, + "children": + { + "Anna" : { "age" : 29, "school" : "sch_1", "friends" : ["Bobby", "Mark"]}, + "Mark" : { "age" : 14, "school" : "sch_2", "friends" : ["George"]}, + "Dave" : { "age" : 16 }, + "Tim" : { "age" : 8, "school" : "sch_2", "hobbies" : ["football", "tennis"]}, + "Julie" : { "age" : 12, "school" : "sch_2", "scores" : [1, 3.5, 4.2, 2]} + } + } +} + +{ + "id":6, + "info": + { + "firstName":"first6", "lastName":"last6","age":11, + "address": + { + "city" : "Albany", + "state" : "NY", + "phones" : [ { "areacode" : 605, "number" : 60, "kind" : "work" }, + { "areacode" : 629, "number" : 62, "kind" : "work" } ] + } + } +} + +{ + "id":7, + "info": + { + "firstName":"first7", "lastName":"last7","age":30, + "address": + { + "city": "San Jose", + "state" : "CA", + "phones" : [ { "areacode" : 408, "number" : 70, "kind" : "work" }, + { "areacode" : 408, "number" : 71, "kind" : "home" }, + { "areacode" : 408, "number" : 72 }, + { "areacode" : 510, "number" : 73, "kind" : "home" }, + { "areacode" : 415, "number" : 74, "kind" : "home" } ] + }, + "children": + { + "Kathy" : { "age" : 10, "school" : "sch_3", "friends" : ["Anna", "Mark", "Maria"]}, + "Will" : { "age" : 2, "school" : "sch_1", "friends" : ["Ada"]} + } + } +} + +{ + "id":8, + "info": + { + "firstName":"first8", "lastName":"last8","age":10, + "address": + { + "city": "San Fransisco", + "state" : "CA", + "phones" : { "areacode" : 650, "number" : 80, "kind" : "work" } + }, + "children": + { + "John" : { "age" : 10, "school" : "sch_1", "friends" : ["Anna", "John", "Maria"]}, + "Anna" : null, + "Mary" : 7 + } + } +} + +{ + "id":9, + "info": + { + "firstName":"first8", "lastName":"last8","age":null, + "address": + { + "city": "San Fransisco", + "state" : null, + "phones" : { "areacode" : 408, "number" : 80, "kind" : "work" } + }, + "children": + { + "John" : { "age" : 10, "school" : "sch_1", "friends" : ["Anna", "John", "Maria"]}, + "Lisa" : null, + "Anna" : 7 + } + } +} + +{ + "id":10, + "info": + { + "firstName":"first8", "lastName":"last8","age":null, + "address": + { + "city": "San Fransisco", + "phones" : { "areacode" : 408, "number" : 80 } + }, + "children": + { + "Anna" : { "age" : 10, "school" : "sch_1", "friends" : []}, + "Lisa" : null, + "Mary" : 7 + } + } +} + + +{ + "id":11, + "info": + { + "firstName":"first8", "lastName":"last8", + "address": + { + "city": "Portland", + "phones" : { "areacode" : 408, "number" : 80, "kind" : "work" } + }, + "children": + { + "Anna" : { "age" : 10, "school" : "sch_1"}, + "Lisa" : null, + "Mary" : 7 + } + } +} + +{ + "id":12, + "info": + { + "firstName":"first8", "lastName":"last8", + "address": + { + "city": "San Fransisco", + "state" : null, + "phones" : { "areacode" : 408, "number" : 80, "kind" : "home" } + }, + "children": + { + "Anna" : { "age" : 10, "school" : "sch_2", "friends" : null}, + "Lisa" : null, + "Mary" : 7 + } + } +} + +{ + "id":17, + "info": + { + "firstName" : "first8", "lastName" : "last8", "age" : 10, + "address" : "somewhere", + "children" : "none" + } +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/before.ddl b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/before.ddl new file mode 100644 index 00000000..e74bd26f --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/before.ddl @@ -0,0 +1,99 @@ + +CREATE TABLE Foo( + id INTEGER, + info JSON, + primary key (id) +) + +create index idx_state_city_age on foo ( + row_metadata().address.state as string, + row_metadata().address.city as string, + row_metadata().age as integer) + +create index idx_state_areacode_age on foo ( + row_metadata().address.state as string, + row_metadata().address.phones[].areacode as integer, + row_metadata().age as integer) + +create index idx_areacode_kind on foo ( + row_metadata().address.phones[].areacode as integer, + row_metadata().address.phones[].kind as string) +with unique keys per row + +create index idx_areacode_kind_2 on foo ( + info.address.phones[].areacode as integer, + info.address.phones[].kind as string) +with unique keys per row + + +create index idx_children_anna_friends on foo ( + row_metadata().children.Anna.friends[] as string) + +create index idx_kids_anna_friends on foo ( + row_metadata().children.anna.friends[] as string) + +create index idx_children_both on foo ( + row_metadata().children.keys(), + row_metadata().children.values().age as long, + row_metadata().children.values().school as string) + +create index idx_children_values on foo ( + row_metadata().children.values().age as long, + row_metadata().children.values().school as string) + +create index idx_anna_areacode on foo ( + row_metadata().children.Anna.age as long, + row_metadata().address.phones[].areacode as integer) + +create index idx_children_keys on foo ( + row_metadata().address.city as string, + row_metadata().children.keys()) + + +CREATE TABLE Bar(id INTEGER, primary key (id)) as json collection + +create index idx_state_city_age on bar ( + row_metadata().address.state as string, + row_metadata().address.city as string, + row_metadata().age as integer) + +create index idx_state_areacode_age on bar ( + row_metadata().address.state as string, + row_metadata().address.phones[].areacode as integer, + row_metadata().age as integer) + +create index idx_areacode_kind on bar ( + row_metadata().address.phones[].areacode as integer, + row_metadata().address.phones[].kind as string) +with unique keys per row + +create index idx_areacode_kind_2 on bar ( + info.address.phones[].areacode as integer, + info.address.phones[].kind as string) +with unique keys per row + + +create index idx_children_anna_friends on bar ( + row_metadata().children.Anna.friends[] as string) + +create index idx_kids_anna_friends on bar ( + row_metadata().children.anna.friends[] as string) + +create index idx_children_both on bar ( + row_metadata().children.keys(), + row_metadata().children.values().age as long, + row_metadata().children.values().school as string) + +create index idx_children_values on bar ( + row_metadata().children.values().age as long, + row_metadata().children.values().school as string) + +create index idx_anna_areacode on bar ( + row_metadata().children.Anna.age as long, + row_metadata().address.phones[].areacode as integer) + +create index idx_children_keys on bar ( + row_metadata().address.city as string, + row_metadata().children.keys()) + + diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/aq02.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/aq02.r new file mode 100644 index 00000000..e435ef58 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/aq02.r @@ -0,0 +1,45 @@ +compiled-query-plan +{ +"query file" : "row_metadata/q/aq02.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo", + "row variable" : "$f", + "index used" : "idx_state_areacode_age", + "covering index" : true, + "index row variable" : "$f_idx", + "index scans" : [ + { + "equality conditions" : {"row_metadata().address.state":"CA","row_metadata().address.phones[].areacode":650}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$f_idx", + "SELECT expressions" : [ + { + "field name" : "id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f_idx" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/jc_aq02.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/jc_aq02.r new file mode 100644 index 00000000..d1453261 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/jc_aq02.r @@ -0,0 +1,46 @@ +compiled-query-plan + +{ +"query file" : "row_metadata/q/jc_aq02.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Bar", + "row variable" : "$f", + "index used" : "idx_state_areacode_age", + "covering index" : true, + "index row variable" : "$f_idx", + "index scans" : [ + { + "equality conditions" : {"row_metadata().address.state":"CA","row_metadata().address.phones[].areacode":650}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$f_idx", + "SELECT expressions" : [ + { + "field name" : "id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f_idx" + } + } + } + ] + } +} +} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/nex06.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/nex06.r new file mode 100644 index 00000000..0e4193e6 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/nex06.r @@ -0,0 +1,45 @@ +compiled-query-plan +{ +"query file" : "row_metadata/q/nex06.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo", + "row variable" : "$f", + "index used" : "idx_areacode_kind", + "covering index" : true, + "index row variable" : "$f_idx", + "index scans" : [ + { + "equality conditions" : {"row_metadata().address.phones[].areacode":415,"row_metadata().address.phones[].kind":"EMPTY"}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$f_idx", + "SELECT expressions" : [ + { + "field name" : "id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f_idx" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/partial_mq01.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/partial_mq01.r new file mode 100644 index 00000000..bb922758 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/partial_mq01.r @@ -0,0 +1,83 @@ +compiled-query-plan +{ +"query file" : "row_metadata/q/partial_mq01.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo", + "row variable" : "$f", + "index used" : "idx_children_both", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"row_metadata().children.keys()":"Anna","row_metadata().children.values().age":10}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$f", + "WHERE" : + { + "iterator kind" : "EQUAL", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "Anna", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "children", + "input iterator" : + { + "iterator kind" : "ARRAY_SLICE", + "low bound" : 3, + "high bound" : 3, + "input iterator" : + { + "iterator kind" : "FUNC_ROW_METADATA", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + } + } + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 10 + } + }, + "SELECT expressions" : [ + { + "field name" : "id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/q01.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/q01.r new file mode 100644 index 00000000..19a15535 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/q01.r @@ -0,0 +1,68 @@ +compiled-query-plan +{ +"query file" : "row_metadata/q/q01.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo", + "row variable" : "$t", + "index used" : "idx_state_city_age", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"row_metadata().address.state":"CA"}, + "range conditions" : { "row_metadata().address.city" : { "start value" : "G", "start inclusive" : false } } + } + ], + "position in join" : 0 + }, + "FROM variable" : "$t", + "SELECT expressions" : [ + { + "field name" : "id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$t" + } + } + }, + { + "field name" : "age", + "field expression" : + { + "iterator kind" : "ARRAY_CONSTRUCTOR", + "conditional" : true, + "input iterators" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "info", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$t" + } + } + } + ] + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/sort01.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/sort01.r new file mode 100644 index 00000000..0b332c6d --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/sort01.r @@ -0,0 +1,137 @@ +compiled-query-plan +{ +"query file" : "row_metadata/q/sort01.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "order by fields at positions" : [ 2, 3, 1 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo", + "row variable" : "$t", + "index used" : "idx_state_city_age", + "covering index" : true, + "index row variable" : "$t_idx", + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "index filtering predicate" : + { + "iterator kind" : "GREATER_THAN", + "left operand" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "row_metadata().age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$t_idx" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 10 + } + }, + "position in join" : 0 + }, + "FROM variable" : "$t_idx", + "SELECT expressions" : [ + { + "field name" : "id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$t_idx" + } + } + }, + { + "field name" : "age", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "row_metadata().age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$t_idx" + } + } + }, + { + "field name" : "sort_gen", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "row_metadata().address.state", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$t_idx" + } + } + }, + { + "field name" : "sort_gen0", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "row_metadata().address.city", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$t_idx" + } + } + } + ] + } + }, + "FROM variable" : "$from-0", + "SELECT expressions" : [ + { + "field name" : "id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-0" + } + } + }, + { + "field name" : "age", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "age", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-0" + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/sort16.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/sort16.r new file mode 100644 index 00000000..4303c4d2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/sort16.r @@ -0,0 +1,65 @@ +compiled-query-plan +{ +"query file" : "row_metadata/q/sort16.q", +"plan" : +{ + "iterator kind" : "SORT", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "distinct by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo", + "row variable" : "$f", + "index used" : "idx_areacode_kind", + "covering index" : true, + "index row variable" : "$f_idx", + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : { "row_metadata().address.phones[].areacode" : { "start value" : 415, "start inclusive" : true } } + } + ], + "index filtering predicate" : + { + "iterator kind" : "OP_EXISTS", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "row_metadata().address.phones[].kind", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f_idx" + } + } + }, + "position in join" : 0 + }, + "FROM variable" : "$f_idx", + "SELECT expressions" : [ + { + "field name" : "id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "#id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f_idx" + } + } + } + ] + } + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/unnest01.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/unnest01.r new file mode 100644 index 00000000..32e7c19a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/unnest01.r @@ -0,0 +1,133 @@ +compiled-query-plan +{ +"query file" : "row_metadata/q/unnest01.q", +"plan" : +{ + "iterator kind" : "GROUP", + "input variable" : "$gb-2", + "input iterator" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_PARTITIONS", + "input iterator" : + { + "iterator kind" : "GROUP", + "input variable" : "$gb-1", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo", + "row variable" : "$f", + "index used" : "primary index", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$f", + "FROM" : + { + "iterator kind" : "ARRAY_FILTER", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "phones", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "address", + "input iterator" : + { + "iterator kind" : "FUNC_ROW_METADATA", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + } + } + }, + "FROM variable" : "$phone", + "SELECT expressions" : [ + { + "field name" : "areacode", + "field expression" : + { + "iterator kind" : "PROMOTE", + "target type" : "Any", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "areacode", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$phone" + } + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "CONST", + "value" : 1 + } + } + ] + }, + "grouping expressions" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "areacode", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-1" + } + } + ], + "aggregate functions" : [ + { + "iterator kind" : "FUNC_COUNT_STAR" + } + ] + } + }, + "grouping expressions" : [ + { + "iterator kind" : "FIELD_STEP", + "field name" : "areacode", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + } + ], + "aggregate functions" : [ + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "cnt", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$gb-2" + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/unnest02.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/unnest02.r new file mode 100644 index 00000000..9877f1f0 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/explans/unnest02.r @@ -0,0 +1,92 @@ +compiled-query-plan +{ +"query file" : "row_metadata/q/unnest02.q", +"plan" : +{ + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "order by fields at positions" : [ 0 ], + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Foo", + "row variable" : "$f", + "index used" : "idx_areacode_kind", + "covering index" : true, + "index row variable" : "$f_idx", + "index scans" : [ + { + "equality conditions" : {}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$f_idx", + "GROUP BY" : "Grouping by the first expression in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "areacode", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "row_metadata().address.phones[].areacode", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f_idx" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_COUNT_STAR" + } + } + ] + } + }, + "FROM variable" : "$from-1", + "GROUP BY" : "Grouping by the first expression in the SELECT list", + "SELECT expressions" : [ + { + "field name" : "areacode", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "areacode", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + }, + { + "field name" : "cnt", + "field expression" : + { + "iterator kind" : "FUNC_SUM", + "input iterator" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "cnt", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$from-1" + } + } + } + } + ] +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/aq02.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/aq02.r new file mode 100644 index 00000000..cf8b4374 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/aq02.r @@ -0,0 +1,3 @@ +unordered-result +{"id":0} +{"id":8} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/jc_aq02.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/jc_aq02.r new file mode 100644 index 00000000..cf8b4374 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/jc_aq02.r @@ -0,0 +1,3 @@ +unordered-result +{"id":0} +{"id":8} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/nex06.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/nex06.r new file mode 100644 index 00000000..b3e12d59 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/nex06.r @@ -0,0 +1,2 @@ +unordered-result +{"id":0} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/partial_mq01.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/partial_mq01.r new file mode 100644 index 00000000..cff754bb --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/partial_mq01.r @@ -0,0 +1 @@ +unordered-result diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/q01.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/q01.r new file mode 100644 index 00000000..06c1f6c8 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/q01.r @@ -0,0 +1,4 @@ +unordered-result +{"id":0,"age":10} +{"id":8,"age":10} +{"id":7,"age":30} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/sort01.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/sort01.r new file mode 100644 index 00000000..a6a10024 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/sort01.r @@ -0,0 +1,8 @@ +ordered-result +{"id":7,"age":30} +{"id":1,"age":11} +{"id":5,"age":11} +{"id":4,"age":15} +{"id":6,"age":11} +{"id":2,"age":12} +{"id":3,"age":13} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/sort16.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/sort16.r new file mode 100644 index 00000000..4c71a9fa --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/sort16.r @@ -0,0 +1,8 @@ +ordered-result +{"id":0} +{"id":1} +{"id":4} +{"id":5} +{"id":6} +{"id":7} +{"id":8} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/unnest01.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/unnest01.r new file mode 100644 index 00000000..6f6211ae --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/unnest01.r @@ -0,0 +1,25 @@ +unordered-result +{"areacode":null,"cnt":1} +{"areacode":104,"cnt":1} +{"areacode":118,"cnt":1} +{"areacode":304,"cnt":1} +{"areacode":318,"cnt":1} +{"areacode":400,"cnt":2} +{"areacode":408,"cnt":8} +{"areacode":415,"cnt":2} +{"areacode":450,"cnt":1} +{"areacode":460,"cnt":1} +{"areacode":480,"cnt":1} +{"areacode":500,"cnt":2} +{"areacode":510,"cnt":2} +{"areacode":520,"cnt":2} +{"areacode":550,"cnt":1} +{"areacode":560,"cnt":1} +{"areacode":570,"cnt":1} +{"areacode":580,"cnt":1} +{"areacode":600,"cnt":1} +{"areacode":605,"cnt":2} +{"areacode":610,"cnt":1} +{"areacode":620,"cnt":1} +{"areacode":629,"cnt":1} +{"areacode":650,"cnt":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/unnest02.r b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/unnest02.r new file mode 100644 index 00000000..6f6211ae --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/expres/unnest02.r @@ -0,0 +1,25 @@ +unordered-result +{"areacode":null,"cnt":1} +{"areacode":104,"cnt":1} +{"areacode":118,"cnt":1} +{"areacode":304,"cnt":1} +{"areacode":318,"cnt":1} +{"areacode":400,"cnt":2} +{"areacode":408,"cnt":8} +{"areacode":415,"cnt":2} +{"areacode":450,"cnt":1} +{"areacode":460,"cnt":1} +{"areacode":480,"cnt":1} +{"areacode":500,"cnt":2} +{"areacode":510,"cnt":2} +{"areacode":520,"cnt":2} +{"areacode":550,"cnt":1} +{"areacode":560,"cnt":1} +{"areacode":570,"cnt":1} +{"areacode":580,"cnt":1} +{"areacode":600,"cnt":1} +{"areacode":605,"cnt":2} +{"areacode":610,"cnt":1} +{"areacode":620,"cnt":1} +{"areacode":629,"cnt":1} +{"areacode":650,"cnt":2} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/aq02.q b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/aq02.q new file mode 100644 index 00000000..7df1153a --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/aq02.q @@ -0,0 +1,4 @@ +select id +from Foo $f +where row_metadata($f).address.state = "CA" and + row_metadata($f).address.phones.areacode =any 650 diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/jc_aq02.q b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/jc_aq02.q new file mode 100644 index 00000000..0abd42d9 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/jc_aq02.q @@ -0,0 +1,4 @@ +select id +from bar $f +where row_metadata($f).address.state = "CA" and + row_metadata($f).address.phones.areacode =any 650 diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/nex06.q b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/nex06.q new file mode 100644 index 00000000..59a2e5e3 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/nex06.q @@ -0,0 +1,4 @@ +select id +from foo $f +where exists row_metadata($f).address.phones[not exists $element.kind and + $element.areacode = 415] diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/partial_mq01.q b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/partial_mq01.q new file mode 100644 index 00000000..526fc785 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/partial_mq01.q @@ -0,0 +1,3 @@ +select id +from foo $f +where row_metadata($f)[3].children.Anna.age = 10 diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/q01.q b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/q01.q new file mode 100644 index 00000000..eddcfec7 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/q01.q @@ -0,0 +1,8 @@ +# +# partial key and range, plus always-true preds +# +select id, $t.info.age +from foo $t +where row_metadata($t).address.state = "CA" and + row_metadata($t).address.city > "F" and + row_metadata($t).address.city > "G" diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/sort01.q b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/sort01.q new file mode 100644 index 00000000..ff4e4f00 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/sort01.q @@ -0,0 +1,6 @@ +select id, row_metadata($t).age +from foo $t +where row_metadata($t).age > 10 +order by row_metadata($t).address.state, + row_metadata($t).address.city, + row_metadata($t).age diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/sort16.q b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/sort16.q new file mode 100644 index 00000000..250e9a0d --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/sort16.q @@ -0,0 +1,5 @@ +select id +from foo $f +where exists row_metadata($f).address.phones[$element.areacode >= 415 and + exists $element.kind] +order by id diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/unnest01.q b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/unnest01.q new file mode 100644 index 00000000..ed75fb45 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/unnest01.q @@ -0,0 +1,3 @@ +select $phone.areacode, count(*) as cnt +from foo $f, row_metadata($f).address.phones[] as $phone +group by $phone.areacode diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/unnest02.q b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/unnest02.q new file mode 100644 index 00000000..0705e347 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/q/unnest02.q @@ -0,0 +1,7 @@ +select $phone.areacode, count(*) as cnt +from foo $f, unnest(row_metadata($f).address.phones[] as $phone) +group by $phone.areacode + +#select $phone.areacode, count(*) as cnt +#from foo $f, unnest($f.info.address.phones[] as $phone) +#group by $phone.areacode diff --git a/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/test.config b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/test.config new file mode 100644 index 00000000..d50bc7e2 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/row_metadata/test.config @@ -0,0 +1,42 @@ +# +# Contains tests about field steps, slice steps and filter steps +# + +before-ddl-file = before.ddl +before-data-file = before.data + +run-json-index = q() = expres + +compile-josn-index-plans = q() = explans + +after-ddl-file = after.ddl + +var-$ext1 = type:string:"MA" + +var-$state1 = type:int:3 + +var-$state2 = type:json:""WA"" + +var-$state3 = type:double:5.5 + +var-$bool_false = type:boolean:false + +var-$bool_true = type:boolean:true + +var-$arr = type:json:[ 1, 2, 3] + +var-$jnull = jnull + +var-$$0_qstn01.q=3 + +var-$$1_qstn01.q="MA" + +var-$$0_qstn02.q=jnull + +var-$$1_qstn02.q=jnull + +var-$$1_qstn03.q=4 + +var-$$2_qstn03.q="MA" + +var-$a="a" diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/before.ddl b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/before.ddl index 79b1e183..ba7a25e5 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/before.ddl +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/before.ddl @@ -46,4 +46,8 @@ create index idx_city_phones on Boo ( address.city as string, address.phones[].work as integer) +create index idx_creation_time on Boo (creation_time()) + +create index idx_creation_time_millis on Boo (creation_time_millis()) + create index idx_mod_time on Boo (modification_time()) diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_ct01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_ct01.r new file mode 100644 index 00000000..6774f5ad --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_ct01.r @@ -0,0 +1,90 @@ +compiled-query-plan +{ +"query file" : "rowprops/q/jc_ct01.q", +"plan" : +{ + "iterator kind" : "RECEIVE", + "distribution kind" : "ALL_SHARDS", + "input iterator" : + { + "iterator kind" : "SELECT", + "FROM" : + { + "iterator kind" : "TABLE", + "target table" : "Boo", + "row variable" : "$f", + "index used" : "idx_state_city_age", + "covering index" : false, + "index scans" : [ + { + "equality conditions" : {"address.state":"MA"}, + "range conditions" : {} + } + ], + "position in join" : 0 + }, + "FROM variable" : "$f", + "SELECT expressions" : [ + { + "field name" : "id", + "field expression" : + { + "iterator kind" : "FIELD_STEP", + "field name" : "id", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + }, + { + "field name" : "Column_2", + "field expression" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FUNC_EXTRACT_FROM_TIMESTAMP", + "input iterator" : + { + "iterator kind" : "FUNC_CREATION_TIME", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 2020 + } + } + }, + { + "field name" : "Column_3", + "field expression" : + { + "iterator kind" : "GREATER_THAN", + "left operand" : + { + "iterator kind" : "FUNC_CREATION_TIME_MILLIS", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1700000000 + } + } + } + ] + } +} +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xins02.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xins02.r index e5cb7b4e..72409a5b 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xins02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xins02.r @@ -44,11 +44,42 @@ compiled-query-plan "field name" : "row_size", "field expression" : { - "iterator kind" : "FUNC_ROW_STORAGE_SIZE", - "input iterator" : + "iterator kind" : "LESS_OR_EQUAL", + "left operand" : { - "iterator kind" : "VAR_REF", - "variable" : "$f" + "iterator kind" : "ABS", + "input iterators" : [ + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FUNC_ROW_STORAGE_SIZE", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + }, + { + "operation" : "-", + "operand" : + { + "iterator kind" : "CONST", + "value" : 320 + } + } + ] + } + ] + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1 } } }, diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xupd01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xupd01.r index 88031175..dd210ee3 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xupd01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xupd01.r @@ -1,5 +1,4 @@ compiled-query-plan - { "query file" : "rowprops/q/jc_xupd01.q", "plan" : @@ -12,7 +11,7 @@ compiled-query-plan "FROM" : { "iterator kind" : "UPDATE_ROW", - "indexes to update" : [ "idx_city_phones", "idx_mod_time", "idx_state_city_age" ], + "indexes to update" : [ "idx_city_phones", "idx_creation_time", "idx_creation_time_millis", "idx_mod_time", "idx_state_city_age" ], "update clauses" : [ { "iterator kind" : "SET", @@ -210,11 +209,42 @@ compiled-query-plan "field name" : "row_size", "field expression" : { - "iterator kind" : "FUNC_ROW_STORAGE_SIZE", - "input iterator" : + "iterator kind" : "LESS_OR_EQUAL", + "left operand" : { - "iterator kind" : "VAR_REF", - "variable" : "$f" + "iterator kind" : "ABS", + "input iterators" : [ + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FUNC_ROW_STORAGE_SIZE", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + }, + { + "operation" : "-", + "operand" : + { + "iterator kind" : "CONST", + "value" : 335 + } + } + ] + } + ] + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1 } } }, @@ -301,4 +331,4 @@ compiled-query-plan ] } } -} \ No newline at end of file +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xxdel01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xxdel01.r index 772d83e4..5cd55f38 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xxdel01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xxdel01.r @@ -65,16 +65,16 @@ compiled-query-plan "field name" : "row_size", "field expression" : { - "iterator kind" : "MULTIPLY_DIVIDE", - "operations and operands" : [ - { - "operation" : "*", - "operand" : + "iterator kind" : "LESS_OR_EQUAL", + "left operand" : + { + "iterator kind" : "ABS", + "input iterators" : [ { - "iterator kind" : "MULTIPLY_DIVIDE", + "iterator kind" : "ADD_SUBTRACT", "operations and operands" : [ { - "operation" : "*", + "operation" : "+", "operand" : { "iterator kind" : "FUNC_ROW_STORAGE_SIZE", @@ -86,25 +86,22 @@ compiled-query-plan } }, { - "operation" : "/", + "operation" : "-", "operand" : { "iterator kind" : "CONST", - "value" : 10 + "value" : 382 } } ] } - }, - { - "operation" : "*", - "operand" : - { - "iterator kind" : "CONST", - "value" : 10 - } - } - ] + ] + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } } }, { @@ -191,4 +188,4 @@ compiled-query-plan } } } -} \ No newline at end of file +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xxdel02.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xxdel02.r index 23877d18..73dd9935 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xxdel02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/jc_xxdel02.r @@ -66,43 +66,40 @@ compiled-query-plan "field name" : "row_size", "field expression" : { - "iterator kind" : "MULTIPLY_DIVIDE", - "operations and operands" : [ + "iterator kind" : "AND", + "input iterators" : [ { - "operation" : "*", - "operand" : + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : { - "iterator kind" : "MULTIPLY_DIVIDE", - "operations and operands" : [ - { - "operation" : "*", - "operand" : - { - "iterator kind" : "FUNC_ROW_STORAGE_SIZE", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$f" - } - } - }, - { - "operation" : "/", - "operand" : - { - "iterator kind" : "CONST", - "value" : 10 - } - } - ] + "iterator kind" : "FUNC_ROW_STORAGE_SIZE", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 319 } }, { - "operation" : "*", - "operand" : + "iterator kind" : "LESS_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FUNC_ROW_STORAGE_SIZE", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : { "iterator kind" : "CONST", - "value" : 10 + "value" : 457 } } ] @@ -192,4 +189,4 @@ compiled-query-plan } } } -} \ No newline at end of file +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xins02.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xins02.r index d0519923..b9019980 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xins02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xins02.r @@ -47,11 +47,42 @@ compiled-query-plan "field name" : "row_size", "field expression" : { - "iterator kind" : "FUNC_ROW_STORAGE_SIZE", - "input iterator" : + "iterator kind" : "LESS_OR_EQUAL", + "left operand" : { - "iterator kind" : "VAR_REF", - "variable" : "$f" + "iterator kind" : "ABS", + "input iterators" : [ + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FUNC_ROW_STORAGE_SIZE", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + }, + { + "operation" : "-", + "operand" : + { + "iterator kind" : "CONST", + "value" : 166 + } + } + ] + } + ] + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1 } } }, @@ -92,11 +123,53 @@ compiled-query-plan } }, { - "field name" : "mod_time", + "field name" : "creation_time", + "field expression" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FUNC_CREATION_TIME", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : "2020-09-01T00:00:00.000Z" + } + } + }, + { + "field name" : "creation_ms", "field expression" : { "iterator kind" : "GREATER_THAN", "left operand" : + { + "iterator kind" : "FUNC_CREATION_TIME_MILLIS", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1700000000 + } + } + }, + { + "field name" : "mod_time", + "field expression" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : { "iterator kind" : "FUNC_MOD_TIME", "input iterator" : @@ -107,7 +180,8 @@ compiled-query-plan }, "right operand" : { - "iterator kind" : "FUNC_CURRENT_TIME" + "iterator kind" : "CONST", + "value" : "2020-09-01T00:00:00.000Z" } } } diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xupd01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xupd01.r index 25883f4c..a34452f1 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xupd01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xupd01.r @@ -1,5 +1,4 @@ compiled-query-plan - { "query file" : "rowprops/q/xupd01.q", "plan" : @@ -210,11 +209,42 @@ compiled-query-plan "field name" : "row_size", "field expression" : { - "iterator kind" : "FUNC_ROW_STORAGE_SIZE", - "input iterator" : + "iterator kind" : "LESS_OR_EQUAL", + "left operand" : { - "iterator kind" : "VAR_REF", - "variable" : "$f" + "iterator kind" : "ABS", + "input iterators" : [ + { + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FUNC_ROW_STORAGE_SIZE", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + }, + { + "operation" : "-", + "operand" : + { + "iterator kind" : "CONST", + "value" : 172 + } + } + ] + } + ] + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1 } } }, @@ -279,11 +309,53 @@ compiled-query-plan } }, { - "field name" : "mod_time", + "field name" : "creation_time", + "field expression" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FUNC_CREATION_TIME", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : "2020-09-01T00:00:00.000Z" + } + } + }, + { + "field name" : "creation_ms", "field expression" : { "iterator kind" : "GREATER_THAN", "left operand" : + { + "iterator kind" : "FUNC_CREATION_TIME_MILLIS", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1700000000 + } + } + }, + { + "field name" : "mod_time", + "field expression" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : { "iterator kind" : "FUNC_MOD_TIME", "input iterator" : @@ -294,11 +366,12 @@ compiled-query-plan }, "right operand" : { - "iterator kind" : "FUNC_CURRENT_TIME" + "iterator kind" : "CONST", + "value" : "2020-09-01T00:00:00.000Z" } } } ] } } -} \ No newline at end of file +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xxdel01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xxdel01.r index f675312d..f88c8ce4 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xxdel01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xxdel01.r @@ -1,5 +1,4 @@ compiled-query-plan - { "query file" : "rowprops/q/xxdel01.q", "plan" : @@ -65,43 +64,43 @@ compiled-query-plan "field name" : "row_size", "field expression" : { - "iterator kind" : "OR", - "input iterators" : [ - { - "iterator kind" : "EQUAL", - "left operand" : - { - "iterator kind" : "FUNC_ROW_STORAGE_SIZE", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$f" - } - }, - "right operand" : - { - "iterator kind" : "CONST", - "value" : 180 - } - }, - { - "iterator kind" : "EQUAL", - "left operand" : - { - "iterator kind" : "FUNC_ROW_STORAGE_SIZE", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$f" - } - }, - "right operand" : + "iterator kind" : "LESS_OR_EQUAL", + "left operand" : + { + "iterator kind" : "ABS", + "input iterators" : [ { - "iterator kind" : "CONST", - "value" : 181 + "iterator kind" : "ADD_SUBTRACT", + "operations and operands" : [ + { + "operation" : "+", + "operand" : + { + "iterator kind" : "FUNC_ROW_STORAGE_SIZE", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + } + }, + { + "operation" : "-", + "operand" : + { + "iterator kind" : "CONST", + "value" : 182 + } + } + ] } - } - ] + ] + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1 + } } }, { @@ -165,11 +164,53 @@ compiled-query-plan } }, { - "field name" : "mod_time", + "field name" : "creation_time", + "field expression" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FUNC_CREATION_TIME", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : "2020-09-01T00:00:00.000Z" + } + } + }, + { + "field name" : "creation_ms", "field expression" : { "iterator kind" : "GREATER_THAN", "left operand" : + { + "iterator kind" : "FUNC_CREATION_TIME_MILLIS", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1700000000 + } + } + }, + { + "field name" : "mod_time", + "field expression" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : { "iterator kind" : "FUNC_MOD_TIME", "input iterator" : @@ -180,7 +221,8 @@ compiled-query-plan }, "right operand" : { - "iterator kind" : "FUNC_CURRENT_TIME" + "iterator kind" : "CONST", + "value" : "2020-09-01T00:00:00.000Z" } } } @@ -188,4 +230,4 @@ compiled-query-plan } } } -} \ No newline at end of file +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xxdel02.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xxdel02.r index af5cd9a7..6c82285b 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xxdel02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/explans/xxdel02.r @@ -1,5 +1,4 @@ compiled-query-plan - { "query file" : "rowprops/q/xxdel02.q", "plan" : @@ -66,12 +65,43 @@ compiled-query-plan "field name" : "row_size", "field expression" : { - "iterator kind" : "FUNC_ROW_STORAGE_SIZE", - "input iterator" : - { - "iterator kind" : "VAR_REF", - "variable" : "$f" - } + "iterator kind" : "AND", + "input iterators" : [ + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FUNC_ROW_STORAGE_SIZE", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 165 + } + }, + { + "iterator kind" : "LESS_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FUNC_ROW_STORAGE_SIZE", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 206 + } + } + ] } }, { @@ -135,11 +165,53 @@ compiled-query-plan } }, { - "field name" : "mod_time", + "field name" : "creation_time", + "field expression" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : + { + "iterator kind" : "FUNC_CREATION_TIME", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : "2020-09-01T00:00:00.000Z" + } + } + }, + { + "field name" : "creation_ms", "field expression" : { "iterator kind" : "GREATER_THAN", "left operand" : + { + "iterator kind" : "FUNC_CREATION_TIME_MILLIS", + "input iterator" : + { + "iterator kind" : "VAR_REF", + "variable" : "$f" + } + }, + "right operand" : + { + "iterator kind" : "CONST", + "value" : 1700000000 + } + } + }, + { + "field name" : "mod_time", + "field expression" : + { + "iterator kind" : "GREATER_OR_EQUAL", + "left operand" : { "iterator kind" : "FUNC_MOD_TIME", "input iterator" : @@ -150,7 +222,8 @@ compiled-query-plan }, "right operand" : { - "iterator kind" : "FUNC_CURRENT_TIME" + "iterator kind" : "CONST", + "value" : "2020-09-01T00:00:00.000Z" } } } @@ -158,4 +231,4 @@ compiled-query-plan } } } -} \ No newline at end of file +} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_ct01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_ct01.r new file mode 100644 index 00000000..5ff0a1d6 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_ct01.r @@ -0,0 +1,4 @@ +unordered-result +{"id":1,"Column_2":false,"Column_3":false} +{"id":5,"Column_2":false,"Column_3":false} +{"id":4,"Column_2":false,"Column_3":false} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_mod02.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_mod02.r index 92c6db72..b0df96e9 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_mod02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_mod02.r @@ -1,7 +1,7 @@ unordered-result -{"id":1,"Column_2":false} -{"id":5,"Column_2":false} -{"id":3,"Column_2":false} {"id":0,"Column_2":false} +{"id":1,"Column_2":false} {"id":2,"Column_2":false} -{"id":4,"Column_2":false} \ No newline at end of file +{"id":3,"Column_2":false} +{"id":4,"Column_2":false} +{"id":5,"Column_2":false} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xins02.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xins02.r index cdcd52f0..7af606fa 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xins02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xins02.r @@ -1,2 +1,2 @@ unordered-result -{"row_size":319,"part":2,"shard":1,"expiration":5,"mod_time":false} +{"row_size":true,"part":2,"shard":1,"expiration":5,"mod_time":false} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xupd01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xupd01.r index 370750a4..5f847306 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xupd01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xupd01.r @@ -1,2 +1,2 @@ unordered-result -{"age":16,"address":{"city":"Seattle","phones":[{"home":1231423,"work":3445},{"home":1231423,"work":3446},{"home":1231423,"work":3447}],"ptr":"phones","state":"WA"},"row_size":334,"isize_cp":102,"isize_sca":36,"part":1,"shard":1,"expiration":5,"mod_time":false} +{"age":16,"address":{"city":"Seattle","phones":[{"home":1231423,"work":3445},{"home":1231423,"work":3446},{"home":1231423,"work":3447}],"ptr":"phones","state":"WA"},"row_size":true,"isize_cp":102,"isize_sca":36,"part":1,"shard":1,"expiration":5,"mod_time":false} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xxdel01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xxdel01.r index e8c9bc01..df724313 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xxdel01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xxdel01.r @@ -1,2 +1,2 @@ unordered-result -{"id":1,"row_size":380,"isize_cp":32,"isize_sca":35,"part":1,"shard":1,"expiration":5,"mod_time":false} +{"id":1,"row_size":true,"isize_cp":32,"isize_sca":35,"part":1,"shard":1,"expiration":5,"mod_time":false} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xxdel02.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xxdel02.r index 8f8a567b..d7560989 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xxdel02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/jc_xxdel02.r @@ -1,3 +1,3 @@ unordered-result -{"id":0,"row_size":450,"isize_sca":42,"isize_cp":200,"part":5,"shard":1,"expiration":5,"mod_time":false} -{"id":100,"row_size":310,"isize_sca":43,"isize_cp":205,"part":2,"shard":1,"expiration":5,"mod_time":false} +{"id":0,"row_size":true,"isize_sca":42,"isize_cp":200,"part":5,"shard":1,"expiration":5,"mod_time":false} +{"id":100,"row_size":true,"isize_sca":43,"isize_cp":205,"part":2,"shard":1,"expiration":5,"mod_time":false} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xins02.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xins02.r index 3ef00731..8729ccfc 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xins02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xins02.r @@ -1,2 +1,2 @@ unordered-result -{"row_size":164,"part":6,"shard":2,"expiration":5,"mod_time":false} \ No newline at end of file +{"row_size":true,"part":6,"shard":2,"expiration":5,"creation_time":false,"creation_ms":false,"mod_time":true} \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xupd01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xupd01.r index 51a94111..ba6c1e55 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xupd01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xupd01.r @@ -1,2 +1,2 @@ unordered-result -{"age":16,"address":{"city":"Seattle","state":"WA","phones":[{"work":3445,"home":1231423},{"work":3446,"home":1231423},{"work":3447,"home":1231423}],"ptr":"phones"},"row_size":170,"isize_cp":102,"isize_sca":36,"part":7,"shard":2,"expiration":5,"mod_time":false} \ No newline at end of file +{"age":16,"address":{"city":"Seattle","state":"WA","phones":[{"work":3445,"home":1231423},{"work":3446,"home":1231423},{"work":3447,"home":1231423}],"ptr":"phones"},"row_size":true,"isize_cp":102,"isize_sca":36,"part":7,"shard":2,"expiration":5,"creation_time":false,"creation_ms":false,"mod_time":true} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xxdel01.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xxdel01.r index dea8aaed..f5c108d6 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xxdel01.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xxdel01.r @@ -1,2 +1,2 @@ unordered-result -{"id":1,"row_size":true,"isize_cp":32,"isize_sca":35,"part":3,"shard":1,"expiration":5,"mod_time":false} +{"id":1,"row_size":true,"isize_cp":32,"isize_sca":35,"part":3,"shard":1,"expiration":5,"creation_time":false,"creation_ms":false,"mod_time":true} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xxdel02.r b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xxdel02.r index a9f2681a..a7ff8f88 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xxdel02.r +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/expres/xxdel02.r @@ -1,3 +1,3 @@ unordered-result -{"id":100,"row_size":164,"isize_sca":43,"isize_cp":205,"part":6,"shard":2,"expiration":5,"mod_time":false} -{"id":0,"row_size":203,"isize_sca":42,"isize_cp":200,"part":8,"shard":2,"expiration":5,"mod_time":false} \ No newline at end of file +{"id":100,"row_size":true,"isize_sca":43,"isize_cp":205,"part":6,"shard":2,"expiration":5,"creation_time":false,"creation_ms":false,"mod_time":true} +{"id":0,"row_size":true,"isize_sca":42,"isize_cp":200,"part":8,"shard":2,"expiration":5,"creation_time":false,"creation_ms":false,"mod_time":true} diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/all b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/all index 3275385d..9a0d62ea 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/all +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/all @@ -11,27 +11,27 @@ where partition($f) = $p ################ ext_part04.q ################ -declare $p1 integer; // 2 +declare $p1 integer; // 3 select id from foo $f -where partition($f) = $p1 and id = 3 +where partition($f) = $p1 and id = 1 ################ ext_part06.q ################ -declare $p1 integer; // 2 +declare $p1 integer; // 3 select id from foo $f -where $p1 <= partition($f) and partition($f) < 5 +where $p1 <= partition($f) and partition($f) < 7 ################ ext_part07.q ################ -declare $p1 integer; // 2 +declare $p1 integer; // 3 $p2 long; // 15 select id from foo $f @@ -39,7 +39,6 @@ where $p1 <= partition($f) and partition($f) < $p2 - ################ ext_part08.q ################ @@ -87,14 +86,13 @@ ext_shard02.q declare $s1 integer; // = 1 select id, partition($f) as part from foo $f -where shard($f) = $s1 and partition($f) = 2 +where shard($f) = $s1 and partition($f) = 3 ################ ext_shard03.q ################ - declare $s1 integer; // = 1 select id, partition($f) as part from foo $f @@ -112,11 +110,10 @@ from foo $f - ################ isize02.q ################ -select id, +select id, 170 <= row_storage_size($f) and row_storage_size($f) <= 185 as row_size, index_storage_size($f, "idx_state_city_age") as index_size from foo $f @@ -153,7 +150,6 @@ where $f.children.Anna.iage < 10 - ################ isize06.q ################ @@ -211,7 +207,7 @@ from foo $f ################ isize12.q ################ -select shard($f) as shard, +select shard($f) as shard, sum(index_storage_size($f, "idx_state_city_age")) as index_size from foo $f group by shard($f) @@ -221,7 +217,7 @@ group by shard($f) ################ isize13.q ################ -select shard($f) as shard, +select shard($f) as shard, sum(index_storage_size($f, "idx_city_phones")) as index_size from foo $f group by shard($f) @@ -232,7 +228,7 @@ group by shard($f) isize14.q ################ select /*+ FORCE_PRIMARY_INDEX(foo) */ - shard($f) as shard, + shard($f) as shard, sum(index_storage_size($f, "idx_city_phones")) as index_size from foo $f group by shard($f) @@ -243,7 +239,7 @@ group by shard($f) isize15.q ################ select partition($f) as part, - count(*) as cnt, + count(*) as cnt, avg(index_storage_size($f, "idx_city_phones")) as index_size from foo $f group by partition($f) @@ -258,6 +254,446 @@ from foo $f +################ +isize17.q +################ +select index_storage_size($f, "idx_city_phones") as index_size +from foo $f + + + +################ +isize18.q +################ +select row_storage_size($t) as Row_Storage_Size, + index_storage_size($t,"idx_t_user1") as IDX_SIZE +from t_user1 $t + + + +################ +jc_ct01.q +################ +select id, + year(creation_time($f)) >= 2020, + creation_time_millis($f) > 1700000000 +from boo $f +where $f.address.state = "MA" + + + +################ +jc_isize01.q +################ +select id, + 32 <= index_storage_size($f, "idx_state_city_age") and + index_storage_size($f, "idx_state_city_age") <= 39 as index_size +from boo $f + + + +################ +jc_isize02.q +################ +select id, + 330 <= row_storage_size($f) and row_storage_size($f) <= 450 as row_size, + index_storage_size($f, "idx_state_city_age") as index_size +from boo $f + + + +################ +jc_isize03.q +################ +select id, + 40 <= index_storage_size($f, "idx_state_city_age") and + index_storage_size($f, "idx_state_city_age") <= 43 as index_size +from boo $f +where index_storage_size($f, "idx_state_city_age") > 38 + + + +################ +jc_isize04.q +################ +select id, firstName, + index_storage_size($f, "idx_state_city_age") as index_size +from boo $f + + + +################ +jc_isize05.q +################ +select id, + index_storage_size($f, "idx_state_city_age") as index_size +from boo $f +where $f.children.Anna.iage < 10 + + + +################ +jc_isize06.q +################ +select id, index_storage_size($f, "idx_city_phones") as isize +from boo $f + + + +################ +jc_isize07.q +################ +select /*+ force_primary_index(boo) */ + id, index_storage_size($f, "idx_city_phones") as isize +from boo $f + + + +################ +jc_isize08.q +################ +select id, index_storage_size($f, "idx_city_phones") as isize, firstName +from boo $f +order by firstName + + + +################ +jc_isize09.q +################ +select id, index_storage_size($f, "idx_city_phones") as isize, firstName +from boo $f +order by firstName +limit 3 +offset 2 + + + +################ +jc_isize10.q +################ +select id, index_storage_size($f, "idx_city_phones") as isize +from boo $f +where index_storage_size($f, "idx_city_phones") > 40 + + + +################ +jc_isize11.q +################ +select id, 2 * index_storage_size($f, "idx_city_phones") as isize +from boo $f + + + +################ +jc_isize12.q +################ +select shard($f) as shard, + sum(index_storage_size($f, "idx_state_city_age")) as index_size +from boo $f +group by shard($f) + + + +################ +jc_isize13.q +################ +select shard($f) as shard, + sum(index_storage_size($f, "idx_city_phones")) as index_size +from boo $f +group by shard($f) + + + +################ +jc_isize14.q +################ +select /*+ FORCE_PRIMARY_INDEX(boo) */ + shard($f) as shard, + sum(index_storage_size($f, "idx_city_phones")) as index_size +from boo $f +group by shard($f) + + + +################ +jc_isize15.q +################ +select partition($f) as part, + count(*) as cnt, + avg(index_storage_size($f, "idx_city_phones")) as index_size +from boo $f +group by partition($f) + + + +################ +jc_isize16.q +################ +select sum(index_storage_size($f, "idx_city_phones")) as index_size +from boo $f + + + +################ +jc_isize17.q +################ +select index_storage_size($f, "idx_city_phones") as index_size +from boo $f + + + +################ +jc_mod01.q +################ +select id, + year(modification_time($f)) >= 2020 +from boo $f +where $f.address.state = "MA" + + + +################ +jc_mod02.q +################ +select id, modification_time($f) > current_time() +from boo $f + + + +################ +jc_part01.q +################ +select id, partition($f) as part +from boo $f +where $f.address.state = "MA" + + + +################ +jc_part02.q +################ +select id, partition($f) as part +from boo $f + + + +################ +jc_part03.q +################ +select id, partition($f) as part +from boo $f +where partition($f) = 1 + + + +################ +jc_part04.q +################ +select id +from boo $f +where partition($f) = 2 and id = 3 + + + +################ +jc_part05.q +################ +declare $ext2 integer; // 3 +select id +from boo $f +where partition($f) = 2 and id = $ext2 + + + +################ +jc_part06.q +################ +select id +from boo $f +where 2 <= partition($f) and partition($f) < 5 + + + +################ +jc_part07.q +################ +select id +from boo $f +where 2 <= partition($f) and partition($f) < 15 + + + +################ +jc_part08.q +################ +select id +from boo $f +where 10 < partition($f) + + + +################ +jc_part09.q +################ +select id +from boo $f +where 10 <= partition($f) + + + +################ +jc_shard01.q +################ +select id, shard($f) as shard, partition($f) as part +from boo $f + + + +################ +jc_ttl01.q +################ +select id, remaining_days($f) +from boo $f +where $f.address.state = "CA" + + + +################ +jc_ttl02.q +################ +select id, 2 * remaining_days($f) as days, remaining_hours($f) < 15 as hours +from boo $f +where $f.address.state = "MA" and 2 * remaining_days($f) > 3 + + + +################ +jc_ttl03.q +################ +select id, year(expiration_time($f)) >= 2020 +from boo $f +where $f.address.state = "CA" + + + +################ +jc_ttl04.q +################ +select id, remaining_days($f) +from boo $f + + + +################ +jc_xins01.q +################ +insert into boo $f values ( + 100, + { + "firstName" : "first100", + "lastName" : "last100", + "age" : 33, + "address" : + { + "city": "San Fransisco", + "state" : "CA", + "phones" : [ { "work" : 504, "home" : 50 }, + { "work" : 518, "home" : 51 }, + { "work" : 528, "home" : 52 }, + { "work" : 538, "home" : 53 }, + { "work" : 548, "home" : 54 } ] + } + } +) +returning row_storage_size($f) as row_size, + index_storage_size($f, "idx_state_city_age") as sca_size + + + +################ +jc_xins02.q +################ +insert into boo $f values ( + 100, + { + "firstName" : "first100", + "lastName" : "last100", + "age" : 33, + "address" : + { + "city": "San Fransisco", + "state" : "CA", + "phones" : [ { "work" : 504, "home" : 50 }, + { "work" : 518, "home" : 51 }, + { "work" : 528, "home" : 52 }, + { "work" : 538, "home" : 53 }, + { "work" : 548, "home" : 54 } ] + } + } +) +returning (abs(row_storage_size($f) - 320) <= 1) as row_size, + partition($f) as part, + shard($f) as shard, + remaining_days($f) as expiration, + modification_time($f) > current_time() as mod_time + + + +################ +jc_xupd01.q +################ +update boo $f +set age = $ + 3, +add $f.address.phones seq_concat({ "work" : 3445, "home" : 1231423 }, + { "work" : 3446, "home" : 1231423 }, + { "work" : 3447, "home" : 1231423 }) +where id = 3 +returning age, + $f.address, + (abs(row_storage_size($f) - 335) <= 1) as row_size, + index_storage_size($f, "idx_city_phones") as isize_cp, + index_storage_size($f, "idx_state_city_age") as isize_sca, + partition($f) as part, + shard($f) as shard, + remaining_days($f) as expiration, + modification_time($f) > current_time() as mod_time + + + +################ +jc_xxdel01.q +################ +delete from boo $f +where index_storage_size($f, "idx_city_phones") < 40 +returning id, + (abs(row_storage_size($f) - 382) <= 1) as row_size, + index_storage_size($f, "idx_city_phones") as isize_cp, + index_storage_size($f, "idx_state_city_age") as isize_sca, + partition($f) as part, + shard($f) as shard, + remaining_days($f) as expiration, + modification_time($f) > current_time() as mod_time + + + +################ +jc_xxdel02.q +################ +delete from boo $f +where index_storage_size($f, "idx_state_city_age") > 40 +returning id, + (row_storage_size($f) >= 319 and row_storage_size($f) <= 457) as row_size, + index_storage_size($f, "idx_state_city_age") as isize_sca, + index_storage_size($f, "idx_city_phones") as isize_cp, + partition($f) as part, + shard($f) as shard, + remaining_days($f) as expiration, + modification_time($f) > current_time() as mod_time + + + ################ mod01.q ################ @@ -271,9 +707,7 @@ where $f.address.state = "MA" ################ mod02.q ################ -select id, - year(modification_time($f)) >= 2020 and - month(modification_time($f)) >= 6 +select id, modification_time($f) > current_time() from foo $f @@ -386,6 +820,24 @@ where partition($f) <= 10 +################ +part13.q +################ +select id, partition($f) as part +from foo $f +where $f.address.state = "MA" and partition($f) = 3 + + + +################ +part14.q +################ +select id, partition($f) as part +from foo $f +where $f.address.state = "MA" and 2 <= partition($f) and partition($f) <= 3 + + + ################ rsize01.q ################ @@ -417,7 +869,7 @@ shard02.q ################ select id, partition($f) as part from foo $f -where shard($f) = 1 and partition($f) = 2 +where shard($f) = 1 and partition($f) = 3 @@ -511,7 +963,6 @@ where $f.address.state = "MA" and 2 * remaining_days($f) > 3 - ################ ttl03.q ################ @@ -529,11 +980,18 @@ from foo $f +################ +vers01.q +################ +select id, version($f) = row_version($f) +from foo $f + + + ################ xins01.q ################ insert into foo $f values (100, "first100", "last100", 33, "lastName", - { "city": "San Fransisco", "state" : "CA", @@ -555,7 +1013,6 @@ returning row_storage_size($f) as row_size, xins02.q ################ insert into foo $f values (100, "first100", "last100", 33, "lastName", - { "city": "San Fransisco", "state" : "CA", @@ -568,12 +1025,13 @@ insert into foo $f values (100, "first100", "last100", 33, "lastName", }, {} ) -returning row_storage_size($f) as row_size, +returning (abs(row_storage_size($f) - 166) <= 1) as row_size, partition($f) as part, shard($f) as shard, remaining_days($f) as expiration, - year(modification_time($f)) >= 2020 and - month(modification_time($f)) >= 9 as mod_time + creation_time($f) >= "2020-9-1" as creation_time, + creation_time_millis($f) > 1700000000 as creation_ms, + modification_time($f) >= '2020-9-1' as mod_time @@ -588,14 +1046,15 @@ add $f.address.phones seq_concat({ "work" : 3445, "home" : 1231423 }, where id = 3 returning age, $f.address, - row_storage_size($f) as row_size, + (abs(row_storage_size($f) - 172) <= 1) as row_size, index_storage_size($f, "idx_city_phones") as isize_cp, index_storage_size($f, "idx_state_city_age") as isize_sca, partition($f) as part, shard($f) as shard, remaining_days($f) as expiration, - year(modification_time($f)) >= 2020 and - month(modification_time($f)) >= 9 as mod_time + creation_time($f) >= '2020-9-1' as creation_time, + creation_time_millis($f) > 1700000000 as creation_ms, + modification_time($f) >= '2020-9-1' as mod_time @@ -605,14 +1064,15 @@ xxdel01.q delete from foo $f where index_storage_size($f, "idx_city_phones") < 40 returning id, - row_storage_size($f) as row_size, + (abs(row_storage_size($f) - 182) <= 1) as row_size, index_storage_size($f, "idx_city_phones") as isize_cp, index_storage_size($f, "idx_state_city_age") as isize_sca, partition($f) as part, shard($f) as shard, remaining_days($f) as expiration, - year(modification_time($f)) >= 2020 and - month(modification_time($f)) >= 9 as mod_time + creation_time($f) >= '2020-9-1' as creation_time, + creation_time_millis($f) > 1700000000 as creation_ms, + modification_time($f) >= '2020-9-1' as mod_time @@ -622,14 +1082,15 @@ xxdel02.q delete from foo $f where index_storage_size($f, "idx_state_city_age") > 40 returning id, - row_storage_size($f) as row_size, + (row_storage_size($f) >=165 and row_storage_size($f) <= 206) as row_size, index_storage_size($f, "idx_state_city_age") as isize_sca, index_storage_size($f, "idx_city_phones") as isize_cp, partition($f) as part, shard($f) as shard, remaining_days($f) as expiration, - year(modification_time($f)) >= 2020 and - month(modification_time($f)) >= 9 as mod_time + creation_time($f) >= '2020-9-1' as creation_time, + creation_time_millis($f) > 1700000000 as creation_ms, + modification_time($f) >= '2020-9-1' as mod_time diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_ct01.q b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_ct01.q new file mode 100644 index 00000000..f84f25b4 --- /dev/null +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_ct01.q @@ -0,0 +1,5 @@ +select id, + year(creation_time($f)) >= 2020, + creation_time_millis($f) > 1700000000 +from boo $f +where $f.address.state = "MA" \ No newline at end of file diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xins02.q b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xins02.q index 09e99177..3b5dec4e 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xins02.q +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xins02.q @@ -16,7 +16,7 @@ insert into boo $f values ( } } ) -returning row_storage_size($f) as row_size, +returning (abs(row_storage_size($f) - 320) <= 1) as row_size, partition($f) as part, shard($f) as shard, remaining_days($f) as expiration, diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xupd01.q b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xupd01.q index 0ed05b1a..d1a4e2de 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xupd01.q +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xupd01.q @@ -6,7 +6,7 @@ add $f.address.phones seq_concat({ "work" : 3445, "home" : 1231423 }, where id = 3 returning age, $f.address, - row_storage_size($f) as row_size, + (abs(row_storage_size($f) - 335) <= 1) as row_size, index_storage_size($f, "idx_city_phones") as isize_cp, index_storage_size($f, "idx_state_city_age") as isize_sca, partition($f) as part, diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xxdel01.q b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xxdel01.q index 0c8d2480..e2f8175d 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xxdel01.q +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xxdel01.q @@ -1,7 +1,7 @@ delete from boo $f where index_storage_size($f, "idx_city_phones") < 40 returning id, - (row_storage_size($f) / 10) * 10 as row_size, + (abs(row_storage_size($f) - 382) <= 1) as row_size, index_storage_size($f, "idx_city_phones") as isize_cp, index_storage_size($f, "idx_state_city_age") as isize_sca, partition($f) as part, diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xxdel02.q b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xxdel02.q index 76a155a1..545e8075 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xxdel02.q +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/jc_xxdel02.q @@ -1,7 +1,7 @@ delete from boo $f where index_storage_size($f, "idx_state_city_age") > 40 returning id, - (row_storage_size($f) / 10) * 10 as row_size, + (row_storage_size($f) >= 319 and row_storage_size($f) <= 457) as row_size, index_storage_size($f, "idx_state_city_age") as isize_sca, index_storage_size($f, "idx_city_phones") as isize_cp, partition($f) as part, diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xins02.q b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xins02.q index aeaa064c..71aaf58d 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xins02.q +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xins02.q @@ -12,8 +12,10 @@ insert into foo $f values (100, "first100", "last100", 33, "lastName", }, {} ) -returning row_storage_size($f) as row_size, +returning (abs(row_storage_size($f) - 166) <= 1) as row_size, partition($f) as part, shard($f) as shard, remaining_days($f) as expiration, - modification_time($f) > current_time() as mod_time + creation_time($f) >= "2020-9-1" as creation_time, + creation_time_millis($f) > 1700000000 as creation_ms, + modification_time($f) >= '2020-9-1' as mod_time diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xupd01.q b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xupd01.q index 19fd99ad..3ecf5cdf 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xupd01.q +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xupd01.q @@ -6,10 +6,12 @@ add $f.address.phones seq_concat({ "work" : 3445, "home" : 1231423 }, where id = 3 returning age, $f.address, - row_storage_size($f) as row_size, + (abs(row_storage_size($f) - 172) <= 1) as row_size, index_storage_size($f, "idx_city_phones") as isize_cp, index_storage_size($f, "idx_state_city_age") as isize_sca, partition($f) as part, shard($f) as shard, remaining_days($f) as expiration, - modification_time($f) > current_time() as mod_time + creation_time($f) >= '2020-9-1' as creation_time, + creation_time_millis($f) > 1700000000 as creation_ms, + modification_time($f) >= '2020-9-1' as mod_time diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xxdel01.q b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xxdel01.q index 77986ae9..669e4177 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xxdel01.q +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xxdel01.q @@ -1,10 +1,12 @@ delete from foo $f where index_storage_size($f, "idx_city_phones") < 40 returning id, - row_storage_size($f) = 180 or row_storage_size($f) = 181 as row_size, + (abs(row_storage_size($f) - 182) <= 1) as row_size, index_storage_size($f, "idx_city_phones") as isize_cp, index_storage_size($f, "idx_state_city_age") as isize_sca, partition($f) as part, shard($f) as shard, remaining_days($f) as expiration, - modification_time($f) > current_time() as mod_time + creation_time($f) >= '2020-9-1' as creation_time, + creation_time_millis($f) > 1700000000 as creation_ms, + modification_time($f) >= '2020-9-1' as mod_time diff --git a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xxdel02.q b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xxdel02.q index b323a084..8ceda4c9 100644 --- a/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xxdel02.q +++ b/kvtest/kvquery-IT/src/main/resources/cases/rowprops/q/xxdel02.q @@ -1,10 +1,12 @@ delete from foo $f where index_storage_size($f, "idx_state_city_age") > 40 returning id, - row_storage_size($f) as row_size, + (row_storage_size($f) >=165 and row_storage_size($f) <= 206) as row_size, index_storage_size($f, "idx_state_city_age") as isize_sca, index_storage_size($f, "idx_city_phones") as isize_cp, partition($f) as part, shard($f) as shard, remaining_days($f) as expiration, - modification_time($f) > current_time() as mod_time + creation_time($f) >= '2020-9-1' as creation_time, + creation_time_millis($f) > 1700000000 as creation_ms, + modification_time($f) >= '2020-9-1' as mod_time diff --git a/kvtest/kvstore-IT/pom.xml b/kvtest/kvstore-IT/pom.xml index 62d01eb7..99193cb6 100644 --- a/kvtest/kvstore-IT/pom.xml +++ b/kvtest/kvstore-IT/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kvtest - 25.1.13 + 25.3.21 kvstore-IT diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/DdlSyntaxTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/DdlSyntaxTest.java index 309aecb1..75bd8ada 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/DdlSyntaxTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/DdlSyntaxTest.java @@ -40,6 +40,7 @@ import oracle.kv.impl.api.table.TableBuilderBase; import oracle.kv.impl.api.table.TableEvolver; import oracle.kv.impl.api.table.TableImpl; +import oracle.kv.impl.api.table.TableJsonUtils; import oracle.kv.impl.api.table.TableMetadata; import oracle.kv.impl.api.table.TableMetadataHelper; import oracle.kv.impl.query.compiler.CompilerAPI; @@ -1238,6 +1239,154 @@ public void testIndexDDLGenerator() { assertTrue(idxDdl.contains("WITH UNIQUE KEYS PER ROW")); } + @Test + public void testFunctionalIndex() { + TableImpl table = TableBuilder.createTableBuilder("foo") + .addInteger("id") + .addString("s") + .addInteger("i") + .addTimestamp("ts", 3) + .addJson("j", null) + .primaryKey("id") + .buildTable(); + + /* idx0: functional index */ + String[] idx0Fields = new String[] { + "s", + "ts", + "expiration_time#" + }; + String[] idx0ExternalFields = new String[] { + "s", + "ts", + "expiration_time()" + }; + checkFunctionalIndex(table, "idx0", idx0Fields, null, + idx0ExternalFields, false, true); + + /* idx1: functional index */ + String[] idx1Fields = new String[] { + "length#s", + "substring#s@,1,2", + "power#i@,2", + "timestamp_round#ts@,2", + "modification_time#" + }; + String[] idx1ExternalFields = new String[] { + "length(s)", + "substring(s,1,2)", + "power(i,2)", + "timestamp_round(ts,2)", + "modification_time()" + }; + checkFunctionalIndex(table, "idx1", idx1Fields, null, + idx1ExternalFields, true, false); + + /* idx2: functional index on json fields */ + String[] idx2Fields = new String[] { + "trunc#j.n.d@,3", + "substring#j.m[].values().name@,3", + "power#j.\"#\".a.\"@\"@,2" + }; + FieldDef.Type[] idx2Types = new FieldDef.Type[] { + FieldDef.Type.DOUBLE, + FieldDef.Type.ANY_ATOMIC, + FieldDef.Type.INTEGER + }; + String[] idx2ExternalFields = new String[] { + "trunc(j.n.d AS Double,3)", + "substring(j.m[].values().name AS AnyAtomic,3)", + "power(j.\"#\".a.\"@\" AS Integer,2)" + }; + checkFunctionalIndex(table, "idx2", idx2Fields, idx2Types, + idx2ExternalFields, true, false); + + /* idx3: index on json field */ + String[] idx3Fields = new String[] { + "j.n.d", + "j.m[].values().name", + "j.m[].keys()" + }; + FieldDef.Type[] idx3Types = new FieldDef.Type[] { + FieldDef.Type.INTEGER, + FieldDef.Type.STRING, + null + }; + String[] idx3ExternalFields = new String[] { + "j.n.d AS Integer", + "j.m[].values().name AS String", + "j.m[].keys()" + }; + checkFunctionalIndex(table, "idx3", idx3Fields, idx3Types, + idx3ExternalFields, true, false); + } + + private void checkFunctionalIndex(TableImpl table, + String indexName, + String[] internalFields, + FieldDef.Type[] types, + String[] externalFields, + boolean indexNulls, + boolean isUnique) { + + IndexImpl impl = new IndexImpl(indexName, table, + Arrays.asList(internalFields), + (types != null ? + Arrays.asList(types) : null), + indexNulls, isUnique, null); + table.addIndex(impl); + + /* Verify the index ddl generated by DDLGenerator */ + String ddl = genIndexDdl(indexName, table.getFullName(), externalFields, + indexNulls, isUnique); + + DDLGenerator ddlGen = new DDLGenerator(table, false); + List indexDdls = ddlGen.getAllIndexDDL(); + assertTrue(indexDdls.size() > 0); + assertEquals(ddl, indexDdls.get(indexDdls.size() - 1)); + + /* Check the tabular format output of index */ + String result = impl.formatIndex(false); + for (String field : externalFields) { + assertTrue("Result=" + result + ", Field=" + field, + result.contains(field)); + } + + /* Check the JSON metadata of index */ + String tableJson = table.toJsonString(true); + TableImpl newTable = TableJsonUtils.fromJsonString(tableJson, null); + assertEquals(table.getIndexes().size(), newTable.getIndexes().size()); + assertEquals(table.getIndex(indexName), newTable.getIndex(indexName)); + } + + /* Returns create index ddl based on the provided information */ + private String genIndexDdl(String indexName, + String tableName, + String[] externalFields, + boolean indexNulls, + boolean isUnique) { + StringBuilder sb = new StringBuilder(); + sb.append("CREATE INDEX ").append(indexName).append(" ON ") + .append(tableName).append("("); + boolean first = true; + for (String field : externalFields) { + if (first) { + first = false; + } else { + sb.append(", "); + } + sb.append(field); + } + sb.append(")"); + if (!indexNulls) { + sb.append(" WITH NO NULLS"); + } + if (isUnique) { + sb.append(" WITH UNIQUE KEYS PER ROW"); + } + return sb.toString(); + } + /** * Generates alter table (add field | drop field) ddl */ @@ -1694,18 +1843,21 @@ public void testDDLGenAlterTTL() { /* ALTER TABLE test USING TTL 30 DAYS */ te = TableEvolver.createTableEvolver(oldT); te.setDefaultTTL(TimeToLive.ofDays(30)); + te.setUpdateTableTTL(); newT = te.evolveTable(); tables.add(newT.clone()); /* ALTER TABLE test USING TTL 365 DAYS */ te = TableEvolver.createTableEvolver(newT); te.setDefaultTTL(TimeToLive.ofDays(365)); + te.setUpdateTableTTL(); newT = te.evolveTable(); tables.add(newT.clone()); /* ALTER TABLE test USING TTL 0 DAYS */ te = TableEvolver.createTableEvolver(newT); te.setDefaultTTL(TimeToLive.DO_NOT_EXPIRE); + te.setUpdateTableTTL(); newT = te.evolveTable(); tables.add(newT.clone()); @@ -1730,6 +1882,7 @@ public void testDDLGenAlterTTL() { tables.add(newT.clone()); te.setDefaultTTL(TimeToLive.ofDays(3)); + te.setUpdateTableTTL(); newT = te.evolveTable(); tables.add(newT.clone()); @@ -2153,6 +2306,7 @@ private TableImpl addToMetadata(TableImpl table) { table.getShardKey(), table.getFieldMap(), null, /* TTL */ + null, /*beforeImageTTL*/ null, /* limits */ false, 0, null, null /* owner */); } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/SecuredAdminClientTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/SecuredAdminClientTest.java index f4d91d5d..74ba7dbe 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/SecuredAdminClientTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/SecuredAdminClientTest.java @@ -2052,6 +2052,7 @@ private void evolveTable(final TableEvolver evolver, evolver.getTableVersion(), table.getFieldMap(), table.getDefaultTTL(), + table.getBeforeImageTTL(), table.getRemoteRegions()); execPlan(cs, shouldSucceed, planId, "EvolveTable"); } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/client/CommandShellTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/client/CommandShellTest.java index fd812728..10659009 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/client/CommandShellTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/admin/client/CommandShellTest.java @@ -7,6 +7,7 @@ package oracle.kv.impl.admin.client; +import static oracle.kv.impl.util.CommandParser.JSON_FLAG; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -53,6 +54,7 @@ import oracle.kv.util.shell.CommonShell.VerboseCommand; import oracle.kv.util.shell.Shell; import oracle.kv.util.shell.Shell.CommandComparator; +import oracle.kv.util.shell.Shell.ExitCommand; import oracle.kv.util.shell.ShellCommand; import oracle.kv.util.shell.ShellException; @@ -898,6 +900,32 @@ public void testCommandShellCsfParams() throws Exception { } } + @Test + public void testCommandShellRetainExitCode() { + final ByteArrayOutputStream outStream = new ByteArrayOutputStream(); + final PrintStream cmdOut = new PrintStream(outStream); + final CommandShell shell = getTestShell(System.in, cmdOut); + final ExitCommand exitCommand = new ExitCommand(); + + /* Set exit code to 1 */ + final String msg = "A message"; + final IllegalArgumentException iae = new IllegalArgumentException(msg); + final String line = "Error line"; + shell.handleUnknownException(line, iae); + + try { + shell.run(exitCommand.getCommandName(), + new String[] { exitCommand.getCommandName() }); + assertEquals(Shell.EXIT_UNKNOWN, shell.getExitCode()); + + shell.run(exitCommand.getCommandName(), + new String[] { exitCommand.getCommandName(), JSON_FLAG }); + assertEquals(Shell.EXIT_UNKNOWN, shell.getExitCode()); + } catch (ShellException se) { + fail("unexpected exception: " + se); + } + } + private static class TestCommandShell extends CommandShell { TestCommandShell(InputStream input, PrintStream output) { super(input, output); diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/AsyncRequestHandlerTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/AsyncRequestHandlerTest.java index e2c0681c..5415038b 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/AsyncRequestHandlerTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/AsyncRequestHandlerTest.java @@ -297,7 +297,7 @@ public CompletableFuture execute(Request request, final ResultValueVersion rvv = new ResultValueVersion(new byte[] { 0, 3 }, new Version(new UUID(4, 5), 6, rnId, 7), - 8, 0, -1); + 8, 0, 0,-1); final Result result = new Result.GetResult(OpCode.GET, 9, 10, rvv); final Response response = new Response( diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/RequestHandlerTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/RequestHandlerTest.java index 39f666f3..eeeb7ff7 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/RequestHandlerTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/RequestHandlerTest.java @@ -343,11 +343,8 @@ public void testRequestTimeoutOnInsufficientAcks() new OperationsStatsTracker()); config.startupRHs(); - handler.setPreCommitTestHook(new TestHook() { - @Override - public void doHook(RepImpl arg) { - throw new InsufficientAcksException("request timeout"); - } + handler.setPreCommitTestHook(arg -> { + throw new InsufficientAcksException("request timeout"); }); InternalOperation op = new Get(new byte[0]); @@ -384,21 +381,18 @@ public void testRateLimitingLog() try { config.startupRHs(); - handler.setPreCommitTestHook(new TestHook() { - @Override - public void doHook(RepImpl arg) { - try { - readyLatch.countDown(); - - /* Wait for the EFE */ - efeLatch.await(); - - /* Simulate a NPE following the EFE */ - throw new NullPointerException("test"); - } catch (InterruptedException e) { - failTest.compareAndSet(null, - "Unexpected interrupt"); - } + handler.setPreCommitTestHook(arg -> { + try { + readyLatch.countDown(); + + /* Wait for the EFE */ + efeLatch.await(); + + /* Simulate a NPE following the EFE */ + throw new NullPointerException("test"); + } catch (InterruptedException e) { + failTest.compareAndSet(null, + "Unexpected interrupt"); } }); final Value v = Value.createValue(new byte[0]); diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/ops/ClientTestServices.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/ops/ClientTestServices.java index 83081f05..20e95cef 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/ops/ClientTestServices.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/ops/ClientTestServices.java @@ -639,7 +639,7 @@ private void runPlan(Admin admin1, * -stopAllReplicas * See #stopAllReplicas * - * And an optionally flag: + * And an optional flag: * * -security * To build and start a secured store. diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/security/OpAccessCheckTestUtils.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/security/OpAccessCheckTestUtils.java index 68ed865c..8f164d0e 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/security/OpAccessCheckTestUtils.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/security/OpAccessCheckTestUtils.java @@ -1117,7 +1117,7 @@ private static KeyCounts countKVVIter(Iterator iter) { return new KeyCounts(userCount, serverCount); } - private static abstract class TestDeniedExecution { + protected static abstract class TestDeniedExecution { void exec() { try { perform(); @@ -1160,7 +1160,7 @@ private static class TestKVStream extends TestStream { /** * Implementation of EntryStream used for TableAPI.putBulk(). */ - private static class TestRowStream extends TestStream { + static class TestRowStream extends TestStream { TestRowStream(Row... entries) { super(entries); } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/security/SecureTableOpsTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/security/SecureTableOpsTest.java index 14e622ac..68684743 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/security/SecureTableOpsTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/security/SecureTableOpsTest.java @@ -6,16 +6,9 @@ */ package oracle.kv.impl.api.security; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.CREATE_ANY_INDEX; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.CREATE_ANY_TABLE; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.DELETE_ANY_TABLE; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.DELETE_TABLE; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.INSERT_ANY_TABLE; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.INSERT_TABLE; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.READ_ANY; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.READ_ANY_TABLE; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.READ_TABLE; -import static oracle.kv.impl.security.KVStorePrivilegeLabel.WRITE_ANY; +import static oracle.kv.impl.api.security.OpAccessCheckTestUtils.testDeniedTableDeleteOps; +import static oracle.kv.impl.api.security.OpAccessCheckTestUtils.testValidTableDeleteOps; +import static oracle.kv.impl.security.KVStorePrivilegeLabel.*; import static oracle.kv.util.DDLTestUtils.execStatement; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.not; @@ -23,8 +16,11 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -46,7 +42,9 @@ import oracle.kv.impl.security.KVStorePrivilege.PrivilegeType; import oracle.kv.impl.security.KVStorePrivilegeLabel; import oracle.kv.impl.security.SecureTestBase; +import oracle.kv.query.ExecuteOptions; import oracle.kv.table.IndexKey; +import oracle.kv.table.MultiRowOptions; import oracle.kv.table.PrimaryKey; import oracle.kv.table.Row; import oracle.kv.table.Table; @@ -96,6 +94,11 @@ public class SecureTableOpsTest extends SecureTestBase { private static final String TEST_TABLE_B_ROW_JSON_STR = "{\"id\":1, \"name\":\"jim\", \"salary\":3000}"; + private static final String NS = "ns"; + private static final String NS_TEST_TABLE = NS + ":" + TEST_TABLE; + private static final String NS_TEST_CHILD_TABLE = + NS + ":" + TEST_CHILD_TABLE; + private static final String NS_TEST_TTL_TABLE = NS + ":" + TEST_TTL_TABLE; private static final Map tableRowMap = new HashMap(); @@ -105,6 +108,9 @@ public class SecureTableOpsTest extends SecureTestBase { tableRowMap.put(TEST_TABLE, TEST_TABLE_ROW_JSON_STR); tableRowMap.put(TEST_TTL_TABLE, TEST_TABLE_ROW_JSON_STR); tableRowMap.put(TEST_TABLE_B, TEST_TABLE_B_ROW_JSON_STR); + tableRowMap.put(NS_TEST_TABLE, TEST_TABLE_ROW_JSON_STR); + tableRowMap.put(NS_TEST_CHILD_TABLE, TEST_CHILD_TABLE_ROW_JSON_STR); + tableRowMap.put(NS_TEST_TTL_TABLE, TEST_TABLE_ROW_JSON_STR); } private static final String TEST_ROLE = "testrole"; @@ -282,6 +288,131 @@ public void testTableInsertPermission() throws Exception { } } + private static String RMD = "{\"row metadata\":1}"; + + @Test + public void testTableRowMetadata() throws Exception { + final Row row = createOneRowForTable(NS_TEST_TABLE); + final PrimaryKey pk = row.createPrimaryKey(); + pk.setRowMetadata(RMD); + + /* Removed all privileges */ + revokePrivFromRole(TEST_ROLE, INSERT_ANY_TABLE, DELETE_ANY_TABLE); + revokePrivFromRole(TEST_ROLE, NS_TEST_TABLE, INSERT_TABLE, DELETE_TABLE); + + row.setRowMetadata(RMD); + /* all inserts and deletes with row_metadata ops are denied */ + testDeniedInsertOps(testUserStore, row); + testDeniedTableDeleteOps(testUserStore, pk); + + /* tests table privileges */ + try { + /* grant INSERT_TABLE, insert with row_metadata should work */ + grantPrivToRole(TEST_ROLE, NS_TEST_TABLE, INSERT_TABLE); + testValidInsertOps(row); + testDeniedTableDeleteOps(testUserStore, pk); + revokePrivFromRole(TEST_ROLE, NS_TEST_TABLE, INSERT_TABLE); + + /* grant DELETE_TABLE, delete with row_metadata should work */ + grantPrivToRole(TEST_ROLE, NS_TEST_TABLE, DELETE_TABLE); + testDeniedInsertOps(testUserStore, row); + populateTableWithOneRow(superUserStore, row); + testValidTableDeleteOps(testUserStore, superUserStore, pk); + cleanOneRowFromTable(superUserStore, row); + revokePrivFromRole(TEST_ROLE, NS_TEST_TABLE, DELETE_TABLE); + + /* + * grant INSERT_TABLE and DELETE_TABLE, both insert and + * delete with row_metadata should work + */ + grantPrivToRole(TEST_ROLE, NS_TEST_TABLE, INSERT_TABLE, + DELETE_TABLE); + testValidInsertOps(row); + populateTableWithOneRow(superUserStore, row); + testValidTableDeleteOps(testUserStore, superUserStore, pk); + cleanOneRowFromTable(superUserStore, row); + revokePrivFromRole(TEST_ROLE, NS_TEST_TABLE, INSERT_TABLE, + DELETE_TABLE); + } finally { + revokePrivFromRole(TEST_ROLE, NS_TEST_TABLE, INSERT_TABLE, + DELETE_TABLE); + testDeniedInsertOps(testUserStore, row); + testDeniedTableDeleteOps(testUserStore, pk); + cleanOneRowFromTable(superUserStore, row); + } + + /* test namespace privileges */ + try { + /* grant INSERT_IN_NAMESPACE, insert with row_metadata should work */ + grantNsPrivToRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE); + testValidInsertOps(row); + testDeniedTableDeleteOps(testUserStore, pk); + revokeNsPrivFromRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE); + + /* grant DELETE_IN_NAMESPACE, delete with row_metadata should work */ + grantNsPrivToRole(TEST_ROLE, NS, DELETE_IN_NAMESPACE); + testDeniedInsertOps(testUserStore, row); + populateTableWithOneRow(superUserStore, row); + testValidTableDeleteOps(testUserStore, superUserStore, pk); + cleanOneRowFromTable(superUserStore, row); + revokeNsPrivFromRole(TEST_ROLE, NS, DELETE_IN_NAMESPACE); + + /* + * grant INSERT_IN_NAMESPACE and DELETE_IN_NAMESPACE, both insert + * and delete with row_metadata should work + */ + grantNsPrivToRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE, + DELETE_IN_NAMESPACE); + testValidInsertOps(row); + populateTableWithOneRow(superUserStore, row); + testValidTableDeleteOps(testUserStore, superUserStore, pk); + cleanOneRowFromTable(superUserStore, row); + revokeNsPrivFromRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE, + DELETE_IN_NAMESPACE); + } finally { + revokeNsPrivFromRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE, + DELETE_IN_NAMESPACE); + testDeniedInsertOps(testUserStore, row); + testDeniedDeleteOps(testUserStore, row); + cleanOneRowFromTable(superUserStore, row); + } + + /* test system privileges */ + try { + revokePrivFromRole(TEST_ROLE, INSERT_ANY_TABLE, DELETE_ANY_TABLE); + + /* grant INSERT_ANY_TABLE, insert with row_metadata should work */ + grantPrivToRole(TEST_ROLE, INSERT_ANY_TABLE); + testValidInsertOps(row); + testDeniedTableDeleteOps(testUserStore, pk); + revokePrivFromRole(TEST_ROLE, INSERT_ANY_TABLE); + + /* grant DELETE_ANY_TABLE, delete with row_metadata should work */ + grantPrivToRole(TEST_ROLE, DELETE_ANY_TABLE); + testDeniedInsertOps(testUserStore, row); + populateTableWithOneRow(superUserStore, row); + testValidTableDeleteOps(testUserStore, superUserStore, pk); + cleanOneRowFromTable(superUserStore, row); + revokePrivFromRole(TEST_ROLE, DELETE_ANY_TABLE); + + /* + * grant INSERT_ANY_TABLE and DELETE_ANY_TABLE, both insert and + * delete with row_metadata should work + */ + grantPrivToRole(TEST_ROLE, INSERT_ANY_TABLE, DELETE_ANY_TABLE); + testValidInsertOps(row); + populateTableWithOneRow(superUserStore, row); + testValidTableDeleteOps(testUserStore, superUserStore, pk); + cleanOneRowFromTable(superUserStore, row); + revokePrivFromRole(TEST_ROLE, INSERT_ANY_TABLE, DELETE_ANY_TABLE); + } finally { + revokePrivFromRole(TEST_ROLE, INSERT_ANY_TABLE, DELETE_ANY_TABLE); + testDeniedInsertOps(testUserStore, row); + testDeniedDeleteOps(testUserStore, row); + cleanOneRowFromTable(superUserStore, row); + } + } + @Test public void testTableDeletePermission() throws Exception { final Row row = createOneRowForTable(TEST_TABLE); @@ -327,11 +458,155 @@ public void testTableDeletePermission() throws Exception { } } + @Test + public void testMultiDelete() throws Exception { + final Row parentRow = createOneRowForTable(NS_TEST_TABLE); + final Row childRow = createOneRowForTable(NS_TEST_CHILD_TABLE); + final PrimaryKey parentKey = parentRow.createPrimaryKey(); + final PrimaryKey childKey = childRow.createPrimaryKey(); + + populateTableWithOneRow(superUserStore, parentRow); + populateTableWithOneRow(superUserStore, childRow); + + revokePrivFromRole(TEST_ROLE, WRITE_ANY, DELETE_ANY_TABLE); + revokePrivFromRole(TEST_ROLE, NS_TEST_TABLE, DELETE_TABLE); + revokePrivFromRole(TEST_ROLE, NS_TEST_CHILD_TABLE, DELETE_TABLE); + + try { + testMultiDeleteInternal(parentRow, childRow, parentKey, childKey, + DELETE_TABLE, DELETE_IN_NAMESPACE, + DELETE_ANY_TABLE); + } finally { + cleanOneRowFromTable(superUserStore, parentRow); + cleanOneRowFromTable(superUserStore, childRow); + } + } + + private void testMultiDeleteInternal(Row parentRow, + Row childRow, + PrimaryKey parentKey, + PrimaryKey childKey, + KVStorePrivilegeLabel tablePriv, + KVStorePrivilegeLabel nsPriv, + KVStorePrivilegeLabel sysPriv) + throws Exception { + + final String parentTable = parentRow.getTable().getFullNamespaceName(); + final String childTable = childRow.getTable().getFullNamespaceName(); + + testDeniedMultiDelete(parentKey, childKey); + + grantPrivToRole(TEST_ROLE, parentTable, tablePriv); + try { + testDeniedMultiDeleteChild(parentKey, childKey); + } finally { + revokePrivFromRole(TEST_ROLE, parentTable, tablePriv); + } + + grantPrivToRole(TEST_ROLE, childTable, tablePriv); + try { + testDeniedMultiDeleteParent(parentKey, childKey); + } finally { + revokePrivFromRole(TEST_ROLE, childTable, tablePriv); + } + + grantPrivToRole(TEST_ROLE, parentTable, tablePriv); + grantPrivToRole(TEST_ROLE, childTable, tablePriv); + try { + testValidMultiDelete(parentRow, childRow); + } finally { + revokePrivFromRole(TEST_ROLE, parentTable, tablePriv); + revokePrivFromRole(TEST_ROLE, childTable, tablePriv); + } + + grantNsPrivToRole(TEST_ROLE, NS, nsPriv); + try { + testValidMultiDelete(parentRow, childRow); + } finally { + revokeNsPrivFromRole(TEST_ROLE, NS, nsPriv); + } + + grantPrivToRole(TEST_ROLE, sysPriv); + try { + testValidMultiDelete(parentRow, childRow); + } finally { + revokePrivFromRole(TEST_ROLE, sysPriv); + } + } + + private void testDeniedMultiDelete(PrimaryKey parentKey, + PrimaryKey childKey) { + final TableAPI testTableAPI = testUserStore.getTableAPI(); + new OpAccessCheckTestUtils.TestDeniedExecution() { + @Override + void perform() { + testTableAPI.multiDelete(parentKey, null, null); + } + }.exec(); + new OpAccessCheckTestUtils.TestDeniedExecution() { + @Override + void perform() { + testTableAPI.multiDelete(childKey, null, null); + } + }.exec(); + } + + private void testDeniedMultiDeleteChild(PrimaryKey parentKey, + PrimaryKey childKey) { + final MultiRowOptions includeChild = new MultiRowOptions( + null, null, Arrays.asList(childKey.getTable())); + new OpAccessCheckTestUtils.TestDeniedExecution() { + @Override + void perform() { + testUserStore.getTableAPI().multiDelete( + parentKey, includeChild, null); + } + }.exec(); + } + + private void testDeniedMultiDeleteParent(PrimaryKey parentKey, + PrimaryKey childKey) { + final MultiRowOptions includeParent = new MultiRowOptions( + null, Arrays.asList(parentKey.getTable()), null); + new OpAccessCheckTestUtils.TestDeniedExecution() { + @Override + void perform() { + testUserStore.getTableAPI().multiDelete( + childKey, includeParent, null); + } + }.exec(); + } + + private void testValidMultiDelete(Row parentRow, Row childRow) { + final PrimaryKey parentKey = parentRow.createPrimaryKey(); + final PrimaryKey childKey = childRow.createPrimaryKey(); + final TableAPI testTableAPI = testUserStore.getTableAPI(); + final MultiRowOptions includeParent = new MultiRowOptions( + null, Arrays.asList(parentRow.getTable()), null); + final MultiRowOptions includeChild = new MultiRowOptions( + null, null, Arrays.asList(childRow.getTable())); + try { + assertEquals(2, testTableAPI.multiDelete( + parentKey, includeChild,null)); + } finally { + populateTableWithOneRow(superUserStore, parentRow); + populateTableWithOneRow(superUserStore, childRow); + } + try { + assertEquals(2, testTableAPI.multiDelete( + childKey, includeParent, null)); + } finally { + populateTableWithOneRow(superUserStore, parentRow); + populateTableWithOneRow(superUserStore, childRow); + } + } + @Test public void testPrivOnParentAndChildTable() throws Exception { final Row row = createOneRowForTable(TEST_CHILD_TABLE); final IndexKey idxKey = - row.getTable().getIndex(TEST_CHILD_TABLE_INDEX).createIndexKey(); + row.getTable().getIndex(TEST_CHILD_TABLE_INDEX) + .createIndexKey(); /* Test table read */ revokePrivFromRole(TEST_ROLE, READ_ANY, READ_ANY_TABLE); @@ -387,7 +662,8 @@ public void testPrivOnParentAndChildTable() throws Exception { testValidInsertOps(row); } finally { - revokePrivFromRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE, READ_TABLE); + revokePrivFromRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE, + READ_TABLE); revokePrivFromRole(TEST_ROLE, TEST_CHILD_TABLE, INSERT_TABLE); } @@ -414,7 +690,8 @@ public void testPrivOnParentAndChildTable() throws Exception { testValidDeleteOps(row); } finally { - revokePrivFromRole(TEST_ROLE, TEST_TABLE, DELETE_TABLE, READ_TABLE); + revokePrivFromRole(TEST_ROLE, TEST_TABLE, DELETE_TABLE, + READ_TABLE); cleanOneRowFromTable(superUserStore, row); } } @@ -573,137 +850,329 @@ private void doPutOp(KVStoreImpl storeImpl, } @Test - public void testTTLAccessControl() throws Exception { - Row row = createOneRowForTable(TEST_TTL_TABLE); + public void testTTLTableAccessControl() throws Exception { + testTTLAccessControl(TEST_TTL_TABLE, TEST_TABLE); + } + + @Test + public void testTTLNamespaceAccessControl() throws Exception { + testTTLAccessControl(NS_TEST_TTL_TABLE, NS_TEST_TABLE); + } + + private void testTTLAccessControl(String ttlTable, String nonTtlTable) + throws Exception { + + Row ttlRow = createOneRowForTable(ttlTable); + Row nonTTLRow = createOneRowForTable(nonTtlTable); /* Removed all privileges enabling table insert */ revokePrivFromRole(TEST_ROLE, WRITE_ANY, INSERT_ANY_TABLE, DELETE_ANY_TABLE); - revokePrivFromRole(TEST_ROLE, TEST_TTL_TABLE, + revokePrivFromRole(TEST_ROLE, ttlTable, INSERT_TABLE, DELETE_TABLE); - revokePrivFromRole(TEST_ROLE, TEST_TABLE, + revokePrivFromRole(TEST_ROLE, nonTtlTable, INSERT_TABLE, DELETE_TABLE); + revokeNsPrivFromRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE, + DELETE_IN_NAMESPACE); /* all insert ops are denied */ - testDeniedInsertOps(testUserStore, row); + testDeniedInsertOps(testUserStore, ttlRow); /* * test insert_table only, all insert ops against a table * having ttl defined are denied */ - try { - grantPrivToRole(TEST_ROLE, TEST_TTL_TABLE, INSERT_TABLE); - testDeniedTTLInsertOps(testUserStore, row); - } finally { - revokePrivFromRole(TEST_ROLE, TEST_TTL_TABLE, INSERT_TABLE); - testDeniedInsertOps(testUserStore, row); - } + testDeniedTTLWithoutDelete(ttlRow, nonTTLRow); /* * test insert_table only, all insert ops with explicitly set ttl * as zero against a table having default ttl defined are passed */ - row.setTTL(TimeToLive.DO_NOT_EXPIRE); - try { - grantPrivToRole(TEST_ROLE, TEST_TTL_TABLE, INSERT_TABLE); - testValidInsertOps(row); - } finally { - revokePrivFromRole(TEST_ROLE, TEST_TTL_TABLE, INSERT_TABLE); - testDeniedInsertOps(testUserStore, row); - } + ttlRow.setTTL(TimeToLive.DO_NOT_EXPIRE); + testValidNoTTL(ttlRow); /* * test insert_table and delete_table, all insert ops against a table * having ttl defined are passed */ - row = createOneRowForTable(TEST_TTL_TABLE); - try { - grantPrivToRole(TEST_ROLE, TEST_TTL_TABLE, - INSERT_TABLE, DELETE_TABLE); - testValidInsertOps(row); - } finally { - revokePrivFromRole(TEST_ROLE, TEST_TTL_TABLE, - INSERT_TABLE, DELETE_TABLE); - testDeniedInsertOps(testUserStore, row); - } + ttlRow = createOneRowForTable(ttlTable); + testValidTTLWithDelete(ttlRow); /* * test insert_table only, try perform inserts with explicitly TTL * setting against a table having TTL default defined. */ - row.setTTL(TimeToLive.ofDays(10)); - try { - grantPrivToRole(TEST_ROLE, TEST_TTL_TABLE, INSERT_TABLE); - testDeniedTTLInsertOps(testUserStore, row); - } finally { - revokePrivFromRole(TEST_ROLE, TEST_TTL_TABLE, INSERT_TABLE); - testDeniedInsertOps(testUserStore, row); - } + ttlRow.setTTL(TimeToLive.ofDays(10)); + testDeniedTTLWithoutDelete(ttlRow, nonTTLRow); /* * test insert_table and delete_table, try perform inserts with * explicitly TTL setting against a table having TTL default passed. */ - try { - grantPrivToRole(TEST_ROLE, TEST_TTL_TABLE, - INSERT_TABLE, DELETE_TABLE); - testValidInsertOps(row); - } finally { - revokePrivFromRole(TEST_ROLE, TEST_TTL_TABLE, - INSERT_TABLE, DELETE_TABLE); - testDeniedInsertOps(testUserStore, row); - } + testValidTTLWithDelete(ttlRow); /* * test insert_table only, try perform inserts without TTL setting * against a table not having TTL default defined are passed. */ - row = createOneRowForTable(TEST_TABLE); - try { - grantPrivToRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE); - testValidInsertOps(row); - } finally { - revokePrivFromRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE); - testDeniedInsertOps(testUserStore, row); - } + nonTTLRow = createOneRowForTable(nonTtlTable); + testValidNoTTL(nonTTLRow); /* * test insert_table only, try perform inserts with TTL as zero setting * against a table not having TTL default defined are passed. */ - row.setTTL(TimeToLive.ofDays(0)); + nonTTLRow.setTTL(TimeToLive.ofDays(0)); + testValidNoTTL(nonTTLRow); + + /* + * test insert_table only, try perform inserts with TTL setting against + * a table not having TTL default defined are denied. + */ + ttlRow = createOneRowForTable(ttlTable); + ttlRow.setTTL(TimeToLive.ofDays(10)); + testDeniedTTLWithoutDelete(ttlRow, nonTTLRow); + + /* + * test insert_table and delete_table only, try perform inserts with + * TTL setting against a table not having TTL default defined are passed. + */ + testValidTTLWithDelete(ttlRow); + } + + private void testDeniedTTLWithoutDelete(Row ttlRow, Row nonTTLRow) + throws Exception { + String ttlTable = ttlRow.getTable().getFullNamespaceName(); + String nonTTLTable = nonTTLRow.getTable().getFullNamespaceName(); try { - grantPrivToRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE); - testValidInsertOps(row); + grantPrivToRole(TEST_ROLE, ttlTable, INSERT_TABLE); + testDeniedTTLInsertOps(testUserStore, ttlRow); + + grantPrivToRole(TEST_ROLE, nonTTLTable, INSERT_TABLE); + testValidInsertOps(nonTTLRow); } finally { - revokePrivFromRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE); - testDeniedInsertOps(testUserStore, row); + revokePrivFromRole(TEST_ROLE, ttlTable, INSERT_TABLE); + testDeniedInsertOps(testUserStore, ttlRow); + + revokePrivFromRole(TEST_ROLE, nonTTLTable, INSERT_TABLE); + testDeniedInsertOps(testUserStore, nonTTLRow); + } + if (ttlRow.getTable().getNamespace().equals(NS)) { + try { + grantNsPrivToRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE); + testDeniedTTLInsertOps(testUserStore, ttlRow); + testValidInsertOps(nonTTLRow); + } finally { + revokeNsPrivFromRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE); + testDeniedInsertOps(testUserStore, ttlRow); + testDeniedInsertOps(testUserStore, nonTTLRow); + } } + try { + grantPrivToRole(TEST_ROLE, INSERT_ANY_TABLE); + testDeniedTTLInsertOps(testUserStore, ttlRow); + + grantPrivToRole(TEST_ROLE, INSERT_ANY_TABLE); + testValidInsertOps(nonTTLRow); + } finally { + revokePrivFromRole(TEST_ROLE, INSERT_ANY_TABLE); + testDeniedInsertOps(testUserStore, ttlRow); + + revokePrivFromRole(TEST_ROLE, INSERT_ANY_TABLE); + testDeniedInsertOps(testUserStore, nonTTLRow); + } + try { + grantPrivToRole(TEST_ROLE, WRITE_ANY); + testValidInsertOps(ttlRow); + grantPrivToRole(TEST_ROLE, WRITE_ANY); + testValidInsertOps(nonTTLRow); + } finally { + revokePrivFromRole(TEST_ROLE, WRITE_ANY); + testDeniedInsertOps(testUserStore, ttlRow); + revokePrivFromRole(TEST_ROLE, WRITE_ANY); + testDeniedInsertOps(testUserStore, nonTTLRow); + } + } + + private void testValidNoTTL(Row nonTtlRow) throws Exception { + String table = nonTtlRow.getTable().getFullNamespaceName(); + try { + grantPrivToRole(TEST_ROLE, table, INSERT_TABLE); + testValidInsertOps(nonTtlRow); + } finally { + revokePrivFromRole(TEST_ROLE, table, INSERT_TABLE); + testDeniedInsertOps(testUserStore, nonTtlRow); + } + if (nonTtlRow.getTable().getNamespace().equals(NS)) { + try { + grantNsPrivToRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE); + testValidInsertOps(nonTtlRow); + } finally { + revokeNsPrivFromRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE); + testDeniedInsertOps(testUserStore, nonTtlRow); + } + } + try { + grantPrivToRole(TEST_ROLE, INSERT_ANY_TABLE); + testValidInsertOps(nonTtlRow); + } finally { + revokePrivFromRole(TEST_ROLE, INSERT_ANY_TABLE); + testDeniedInsertOps(testUserStore, nonTtlRow); + } + try { + grantPrivToRole(TEST_ROLE, WRITE_ANY); + testValidInsertOps(nonTtlRow); + } finally { + revokePrivFromRole(TEST_ROLE, WRITE_ANY); + testDeniedInsertOps(testUserStore, nonTtlRow); + } + } + + private void testValidTTLWithDelete(Row ttlRow) throws Exception { + String table = ttlRow.getTable().getFullNamespaceName(); + try { + grantPrivToRole(TEST_ROLE, table, INSERT_TABLE, DELETE_TABLE); + testValidInsertOps(ttlRow); + } finally { + revokePrivFromRole(TEST_ROLE, table, INSERT_TABLE, DELETE_TABLE); + testDeniedInsertOps(testUserStore, ttlRow); + } + if (ttlRow.getTable().getNamespace().equals(NS)) { + try { + grantNsPrivToRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE, + DELETE_IN_NAMESPACE); + testValidInsertOps(ttlRow); + } finally { + revokeNsPrivFromRole(TEST_ROLE, NS, INSERT_IN_NAMESPACE, + DELETE_IN_NAMESPACE); + testDeniedInsertOps(testUserStore, ttlRow); + } + } + try { + grantPrivToRole(TEST_ROLE, INSERT_ANY_TABLE, DELETE_ANY_TABLE); + testValidInsertOps(ttlRow); + } finally { + revokePrivFromRole(TEST_ROLE, INSERT_ANY_TABLE, DELETE_ANY_TABLE); + testDeniedInsertOps(testUserStore, ttlRow); + } + try { + grantPrivToRole(TEST_ROLE, WRITE_ANY); + testValidInsertOps(ttlRow); + } finally { + revokePrivFromRole(TEST_ROLE, WRITE_ANY); + testDeniedInsertOps(testUserStore, ttlRow); + } + } + + @Test + public void testBulkPutTTLAccessControl() + throws Exception { /* - * test insert_table only, try perform inserts with TTL setting against - * a table not having TTL default defined are denied. + * Test against a table without default TTL setting. + */ + Row row = createOneRowForTable(TEST_TABLE); + + /* Removed all privileges enabling table insert */ + revokePrivFromRole(TEST_ROLE, WRITE_ANY, + INSERT_ANY_TABLE, DELETE_ANY_TABLE); + revokePrivFromRole(TEST_ROLE, TEST_TABLE, + INSERT_TABLE, DELETE_TABLE); + testDeniedInsertOps(testUserStore, row); + + Table testTable = superUserStore.getTableAPI().getTable(TEST_TABLE); + TableAPI superUserTableAPI = superUserStore.getTableAPI(); + assertNotNull(testTable); + + Row ttlRow = testTable.createRow(); + ttlRow.put("id", 2); + ttlRow.put("name", "bob"); + ttlRow.setTTL(TimeToLive.ofDays(10)); + + TableAPI tableAPI = testUserStore.getTableAPI(); + OpAccessCheckTestUtils.TestRowStream stream = + new OpAccessCheckTestUtils.TestRowStream(row, ttlRow); + + /* + * Stream has two rows, one with valid TTL, the other not, without + * DELETE_TABLE privilege, the operation should fail. */ - row.setTTL(TimeToLive.ofDays(10)); try { grantPrivToRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE); - testDeniedTTLInsertOps(testUserStore, row); + tableAPI.put(Collections.singletonList(stream), null); + fail("expected"); + } catch (FaultException fe) { + assertTrue(fe.getCause() instanceof UnauthorizedException); + assertTrue(stream.isCaughtException()); } finally { revokePrivFromRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE); testDeniedInsertOps(testUserStore, row); } - /* - * test insert_table and delete_table only, try perform inserts with - * TTL setting against a table not having TTL default defined are passed. - */ + try { grantPrivToRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE, DELETE_TABLE); - testValidInsertOps(row); + tableAPI.put(Collections.singletonList(stream), null); + assertTrue(stream.isCompleted()); } finally { revokePrivFromRole(TEST_ROLE, TEST_TABLE, INSERT_TABLE, DELETE_TABLE); testDeniedInsertOps(testUserStore, row); + superUserTableAPI.delete(row.createPrimaryKey(), null, null); + superUserTableAPI.delete(ttlRow.createPrimaryKey(), null, null); + } + + /* + * Test against a table with default TTL setting. + */ + ttlRow = createOneRowForTable(TEST_TTL_TABLE); + + /* Removed all privileges enabling table insert */ + revokePrivFromRole(TEST_ROLE, WRITE_ANY, + INSERT_ANY_TABLE, DELETE_ANY_TABLE); + revokePrivFromRole(TEST_ROLE, TEST_TTL_TABLE, + INSERT_TABLE, DELETE_TABLE); + revokePrivFromRole(TEST_ROLE, TEST_TABLE, + INSERT_TABLE, DELETE_TABLE); + + /* all insert ops are denied */ + testDeniedInsertOps(testUserStore, ttlRow); + testTable = superUserStore.getTableAPI().getTable(TEST_TABLE); + assertNotNull(testTable); + + row = testTable.createRow(); + row.put("id", 2); + row.put("name", "bob"); + row.setTTL(TimeToLive.DO_NOT_EXPIRE); + stream = new OpAccessCheckTestUtils.TestRowStream(row, ttlRow); + + /* + * Stream has two rows, one with valid default TTL, the other has TTL + * as 0, without DELETE_TABLE privilege, the operation should fail. + */ + try { + grantPrivToRole(TEST_ROLE, TEST_TTL_TABLE, INSERT_TABLE); + tableAPI.put(Collections.singletonList(stream), null); + fail("expected"); + } catch (FaultException fe) { + assertTrue(fe.getCause() instanceof UnauthorizedException); + assertTrue(stream.isCaughtException()); + } finally { + revokePrivFromRole(TEST_ROLE, TEST_TTL_TABLE, INSERT_TABLE); + testDeniedInsertOps(testUserStore, row); + } + + try { + grantPrivToRole(TEST_ROLE, TEST_TTL_TABLE, + INSERT_TABLE, DELETE_TABLE); + tableAPI.put(Collections.singletonList(stream), null); + assertTrue(stream.isCompleted()); + } finally { + revokePrivFromRole(TEST_ROLE, TEST_TTL_TABLE, + INSERT_TABLE, DELETE_TABLE); + testDeniedInsertOps(testUserStore, row); + superUserTableAPI.delete(row.createPrimaryKey(), null, null); + superUserTableAPI.delete(ttlRow.createPrimaryKey(), null, null); } } @@ -750,6 +1219,7 @@ private void testDeniedTTLInsertOps(KVStore store, Row row) * Prepares test tables and roles. */ private static void prepareTest() throws Exception { + execStatement(superUserStore, "create namespace " + NS); execStatement(superUserStore, "create table " + TEST_TABLE + TEST_TABLE_DEF); execStatement(superUserStore, @@ -773,6 +1243,19 @@ private static void prepareTest() throws Exception { "grant " + TEST_ROLE + " to user " + TEST_USER); execStatement(superUserStore, "grant readwrite to user " + SUPER_USER); + + final ExecuteOptions options = new ExecuteOptions().setNamespace(NS); + execStatement(superUserStore, + "create table " + TEST_TABLE + TEST_TABLE_DEF, + options); + execStatement(superUserStore, + "create table " + TEST_CHILD_TABLE + + TEST_CHILD_TABLE_DEF, + options); + execStatement(superUserStore, + "create table " + TEST_TTL_TABLE + TEST_TABLE_DEF + + " using ttl 5 days", + options); } /* Creates a row for test table */ @@ -818,6 +1301,18 @@ private static void revokePrivFromRole(String role, } } + private static void revokeNsPrivFromRole(String role, + String ns, + KVStorePrivilegeLabel... nsPriv) + throws Exception { + for (KVStorePrivilegeLabel label : nsPriv) { + execStatement(superUserStore, + "revoke " + label + " on NAMESPACE " + ns + + " from " + role); + assertRoleHasNoPriv(role, toNamespacePrivStr(label, ns)); + } + } + private static void grantPrivToRole(String role, KVStorePrivilegeLabel... sysPriv) throws Exception { @@ -840,6 +1335,18 @@ private static void grantPrivToRole(String role, } } + private static void grantNsPrivToRole(String role, + String ns, + KVStorePrivilegeLabel... nsPriv) + throws Exception { + for (KVStorePrivilegeLabel label : nsPriv) { + execStatement(superUserStore, + "grant " + label + " on NAMESPACE " + ns + + " to " + role); + assertRoleHasPriv(role, toNamespacePrivStr(label, ns)); + } + } + private static void assertRoleHasNoPriv(String role, String privStr) { final StatementResult result = @@ -863,7 +1370,7 @@ private static String toTablePrivStr(KVStorePrivilegeLabel tablePriv, /* Keep this method around in case we need it some time */ @SuppressWarnings("unused") private static String toNamespacePrivStr(KVStorePrivilegeLabel nsPriv, - String namespace) { + String namespace) { assertEquals(nsPriv.getType(), PrivilegeType.NAMESPACE); return String.format("%s(%s)", nsPriv, namespace); } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/CreationTimeTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/CreationTimeTest.java new file mode 100644 index 00000000..e71c39d4 --- /dev/null +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/CreationTimeTest.java @@ -0,0 +1,1118 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.kv.impl.api.table; + +/** + * Tests Creation-Time feature against data store.
                  + */ +public class CreationTimeTest extends TableTestBase { + + // todo: Uncomment these tests when creation time feature is enabled. + + // private Table table; + // private Table child1; + // private Table child2; + // private long idCounter = System.currentTimeMillis(); + + // private static final String CREATE_TABLE = + // "CREATE TABLE IF NOT EXISTS CreationTimeTest(sk1 STRING, sk2 STRING, " + // + "pk2 STRING, s2 STRING, primary key(shard(sk1,sk2), pk2))"; + // private static final String CREATE_CHILD1_TABLE = + // "CREATE TABLE IF NOT EXISTS CreationTimeTest.CTChildOne(child1 STRING, " + + // " primary key(child1))"; + // private static final String CREATE_CHILD2_TABLE = + // "CREATE TABLE IF NOT EXISTS CreationTimeTest.CTChildOne.CTChildTwo( " + + // "child2 STRING, primary key(child2))"; + // private static final String CREATE_INDEX = + // "CREATE INDEX IF NOT EXISTS s2index ON CreationTimeTest(s2)"; + // private static final String CREATE_CHILD1_INDEX = + // "CREATE INDEX IF NOT EXISTS child1index ON CreationTimeTest.CTChildOne(child1)"; + + // private static final String TABLE_NAME = "CreationTimeTest"; + // private static final String SHARD_KEY_1 = "sk1"; + // private static final String SHARD_KEY_2 = "sk2"; + // private static final String PK2_FIELD = "pk2"; + // private static final String STRING_FIELD = "s2"; + + // private static final String CH1_TABLE_NAME = "CreationTimeTest.CTChildOne"; + // private static final String CH1_KEY = "child1"; + + // private static final String CH2_TABLE_NAME = "CreationTimeTest.CTChildOne.CTChildTwo"; + // private static final String CH2_KEY = "child2"; + + + // private Table tableC; + // private long idCounterC = System.currentTimeMillis(); + + // private static final String CREATE_TABLEC = + // "CREATE TABLE IF NOT EXISTS CreationTimeTestC(sk1 STRING, sk2 STRING, " + // + "pk2 STRING, primary key(shard(sk1,sk2), pk2)) AS JSON COLLECTION"; + + // private static final String CREATE_INDEXC = + // "CREATE INDEX IF NOT EXISTS s2indexC ON CreationTimeTestC(s2 as ANYATOMIC)"; + + // private static final String TABLE_NAMEC = "CreationTimeTestC"; + + + // @BeforeClass + // public static void staticSetUp() throws Exception { + // TableTestBase.staticSetUp(3,3,2, false, false, true); + // } + + + // @Test + // public void testNewRowCTOnPut() { + // createTable(); + + // long startTime = System.currentTimeMillis(); + // Row row = createRow(); + // tableImpl.put(row, null, null); + // long interval = System.currentTimeMillis() - startTime; + + + // PrimaryKey pk = table.createPrimaryKey(row); + // Row dbRow = tableImpl.get(pk, null); + // long ct = dbRow.getCreationTime(); + // assertTrue(ct >= startTime); + // assertTrue(ct - startTime <= interval); + // } + + // @Test + // public void testExistingRowCTOnSimplePut() { + // createTable(); + + // long startTime = System.currentTimeMillis(); + // Row row = createRow(); + // tableImpl.put(row, null, null); + // long interval = System.currentTimeMillis() - startTime; + + // // Put same row again, check same creation date + // ReturnRow prevRow = table.createReturnRow(Choice.ALL); + // tableImpl.putIfPresent(row, prevRow, null); + // assertTrue(prevRow.getCreationTime() >= startTime); + // assertTrue(prevRow.getCreationTime() - startTime <= interval); + + // // Put same row again, check same creation date + // prevRow = table.createReturnRow(Choice.VALUE); + // tableImpl.putIfPresent(row, prevRow, null); + // assertTrue(prevRow.getCreationTime() >= startTime); + // assertTrue(prevRow.getCreationTime() - startTime <= interval); + + // // Put same row again, check same creation date + // prevRow = table.createReturnRow(Choice.ALL); + // tableImpl.putIfPresent(row, prevRow, null); + // assertTrue(prevRow.getCreationTime() >= startTime); + // assertTrue(prevRow.getCreationTime() - startTime <= interval); + // } + + // @Test + // public void testNewRowCTOnPutIfAbsent() { + // createTable(); + + // long startTime = System.currentTimeMillis(); + // Row row = createRow(); + // // putIfAbsent + // tableImpl.putIfAbsent(row, null, null); + // long interval = System.currentTimeMillis() - startTime; + + // // get and check the creation time + // PrimaryKey pk = table.createPrimaryKey(row); + // Row dbRow = tableImpl.get(pk, null); + // assertTrue(dbRow.getCreationTime() >= startTime); + // assertTrue(dbRow.getCreationTime() - startTime <= interval); + + // // Put same row again, check same creation date + // ReturnRow prevRow = table.createReturnRow(Choice.ALL); + // tableImpl.putIfPresent(row, prevRow, null); + // assertTrue(prevRow.getCreationTime() >= startTime); + // assertTrue(prevRow.getCreationTime() - startTime <= interval); + + + // // get the row again + // dbRow = tableImpl.get(pk, null); + // assertTrue(dbRow.getCreationTime() >= startTime); + // assertTrue(dbRow.getCreationTime() - startTime <= interval); + // } + + + // @Test + // public void testBatchPutSameOption() throws Exception { + // createTable(); + // List puts = new ArrayList<>(); + // List pks = new ArrayList<>(); + // TableOperationFactory factory = tableImpl.getTableOperationFactory(); + + // long startTime = System.currentTimeMillis(); + // int N = 10; + // for (int i = 0; i < N; i++) { + // Row row = createRow(); + // pks.add(row.createPrimaryKey()); + // puts.add(factory.createPut(row, null, false)); + // } + + // tableImpl.execute(puts, null); + // long interval = System.currentTimeMillis() - startTime; + + // for (PrimaryKey pk2 : pks) { + // Row dbRow = tableImpl.get(pk2, null); + // assertTrue(dbRow.getCreationTime() >= startTime); + // assertTrue(dbRow.getCreationTime() - startTime <= interval); + // } + // } + + // /* + // * Test execute(List) to parent and child tables, tables + // * has different TTL. + // */ + // @Test + // public void testBatchPutParentChildTables() throws Exception { + + // createTable(); + // createChildTables(); + + // List puts = new ArrayList<>(); + // List pks = new ArrayList<>(); + // TableOperationFactory factory = tableImpl.getTableOperationFactory(); + // WriteOptions options = new WriteOptions(); + + // long startTime = System.currentTimeMillis(); + // /* Use default table TTL */ + // int N = 2; + // for (int i = 0; i < N; i++) { + // Row row = createRow(); + // pks.add(row.createPrimaryKey()); + // puts.add(factory.createPut(row, null, false)); + + // Row child1Row = child1.createRow(row); + // child1Row.put("child1", String.valueOf(N)); + // pks.add(child1Row.createPrimaryKey()); + // puts.add(factory.createPut(child1Row, null, false)); + + // Row child2Row = child2.createRow(child1Row); + // child2Row.put("child2", String.valueOf(N)); + // pks.add(child2Row.createPrimaryKey()); + // puts.add(factory.createPut(child2Row, null, false)); + // } + + // tableImpl.execute(puts, options); + // long interval = System.currentTimeMillis() - startTime; + + // for (PrimaryKey pk2 : pks) { + // Row dbRow = tableImpl.get(pk2, null); + // assertTrue(dbRow.getCreationTime() >= startTime); + // assertTrue(dbRow.getCreationTime() - startTime <= interval); + // } + // } + + + // /** + // * This is a somewhat long set of tests in a single test case. + // * They exercise the access to creation time via all the + // * supported iteration paths. These include: + // * 1. table scan (both key and row) + // * 2. multi-get (both key and row) + // * 3. bulk get (both key and row) + // * 4. index scan (both key and row) + // * In addition it tests for creation time in rows and keys returned + // * from ancestor and child tables. + // * + // * To perform this test there are 3 tables -- parent, child, and grandchild. + // * The basic iteration tests are done on the parent. The tests for + // * ancestor and child retrieval are done on the middle table -- the child. + // * + // * When populated all tables have the same number of rows inserted. + // */ + // @Test + // public void testTableIterator() { + // final int numRows = 20; + // createTable(); + // createChildTables(); + + // long startTime = System.currentTimeMillis(); + // /* + // * Populate the parent table + // */ + // for (int i = 0; i < numRows; i++) { + // Row row = createRow(); + // tableImpl.put(row, null, null); + // } + // long interval = System.currentTimeMillis() - startTime; + + // /* + // * Table iterator + // */ + // TableIterator iter = + // tableImpl.tableIterator(table.createPrimaryKey(), null, null); + // assertCTRows(iter, startTime, interval, numRows); + + // /* + // * Index iterator + // */ + // Index index = table.getIndex("s2index"); + // iter = tableImpl.tableIterator(index.createIndexKey(), null, null); + // assertCTRows(iter, startTime, interval, numRows); + + // /* + // * Multi-get + // */ + // PrimaryKey pkey = table.createPrimaryKey(); + // pkey.put(SHARD_KEY_1, "sk1"); + // pkey.put(SHARD_KEY_2, "sk2"); + // List rows = tableImpl.multiGet(pkey, null, null); + // assertEquals(numRows, rows.size()); + // for (Row row : rows) { + // assertTrue(row.getCreationTime() >= startTime); + // assertTrue(row.getCreationTime() - startTime <= interval); + // } + + // /* + // * Multi-get keys + // * + // * Use the result rows to populate the keys array used for bulk get. + // * Also use the results to populate the child and grandchild tables. + // * They are used for ancestor/child table record retrieval, below. + // */ + // /* for bulk get */ + // List keys = new ArrayList<>(); + + // pkey = table.createPrimaryKey(); + // pkey.put(SHARD_KEY_1, "sk1"); + // pkey.put(SHARD_KEY_2, "sk2"); + // List pkeys = tableImpl.multiGetKeys(pkey, null, null); + // assertTrue(pkeys.size() == numRows); + // int count = 0; + // for (PrimaryKey key : pkeys) { + // keys.add(key); /* bulk get */ + + // /* + // * Add rows to the child tables. They share the primary key of the + // * parent. + // */ + // Row child1Row = child1.createRow(key); + // child1Row.put("child1", Integer.toString(count)); + // tableImpl.put(child1Row, null, null); + + // Row child2Row = child2.createRow(child1Row); + // child2Row.put("child2", Integer.toString(count)); + // tableImpl.put(child2Row, null, null); + + // ++count; + // } + // long intervalTotal = System.currentTimeMillis() - startTime; + + // /* + // * Bulk get iterator + // */ + // assertEquals(numRows, keys.size()); + // iter = tableImpl.tableIterator(keys.iterator(), null, null); + // assertCTRows(iter, startTime, interval, numRows); + + // assertEquals(numRows, keys.size()); + + // /* + // * Test cases for parent/child tables + // */ + + // /* + // * assert that the child tables are populated as expected + // */ + // assertEquals(numRows, countTableRecords(child1.createPrimaryKey(), child1)); + // assertEquals(numRows, countTableRecords(child2.createPrimaryKey(), child2)); + + // /* + // * Iterate the middle child, adding both the parent and child tables + // * to the targets. This exercises code in both directions sufficiently. + // * + // * Test this for all the same combinations above. Some of these share + // * code paths, but the extra coverage doesn't hurt. + // */ + + // /* + // * Table keys first + // */ + // List
    ancestors = Collections.singletonList(table); + // List
    children = Collections.singletonList(child2); + // MultiRowOptions mro = new MultiRowOptions(null, ancestors, children); + + // /* + // * Table rows + // */ + // iter = + // tableImpl.tableIterator(child1.createPrimaryKey(), mro, null); + // assertCTRows(iter, startTime, intervalTotal, numRows * 3); + + // /* + // * Multi-get + // */ + // pkey = child1.createPrimaryKey(); + // pkey.put(SHARD_KEY_1, "sk1"); + // pkey.put(SHARD_KEY_2, "sk2"); + // rows = tableImpl.multiGet(pkey, mro, null); + // assertEquals(numRows * 3, rows.size()); + // for (Row row : rows) { + // assertTrue(row.getCreationTime() >= startTime); + // assertTrue(row.getCreationTime() - startTime <= intervalTotal); + // } + + // /* + // * Index iterator + // * + // * An index was created on "child1" in the middle child table. + // * + // * NOTE: index scans cannot return child tables, so modify the + // * MultiRowOptions to only contain an ancestor. + // */ + // mro = new MultiRowOptions(null, ancestors, null); + // index = child1.getIndex("child1index"); + // iter = tableImpl.tableIterator(index.createIndexKey(), mro, null); + // assertCTRows(iter, startTime, intervalTotal, numRows * 2); + // } + + // @Test + // public void testPreviousRowOnPut() { + // createTable(); + + // Row row = createRow(); + // long startTime = System.currentTimeMillis(); + // tableImpl.put(row, null, null); + // long interval = System.currentTimeMillis() - startTime; + + // ReturnRow rr = table.createReturnRow(Choice.ALL); + + // tableImpl.put(row, rr, null); + + // assertTrue(rr.getCreationTime() >= startTime); + // assertTrue(rr.getCreationTime() - startTime <= interval); + // } + + // @Test + // public void testPreviousRowOnPutIfAbsent() { + // createTable(); + + // for (Choice choice : Choice.values()) { + // Row row = createRow(); + // ReturnRow rr = table.createReturnRow(choice); + // tableImpl.put(row, rr, null); + + // assertNull(rr.getVersion()); + // assertEquals(0, rr.getCreationTime()); + // } + // } + + // @Test + // public void testPreviousRowOnPutIfPresent() { + // createTable(); + + // long startTime = System.currentTimeMillis(); + // Row row = createRow(); + // tableImpl.put(row, null, null); + // long interval = System.currentTimeMillis() - startTime; + + + // ReturnRow rr = table.createReturnRow(Choice.ALL); + // tableImpl.putIfPresent(row, rr, null); + + // assertTrue(rr.getCreationTime() >= startTime); + // assertTrue(rr.getCreationTime() - startTime <= interval); + // } + + // @Test + // public void testPreviousRowOnPutIfVersion() { + // createTable(); + + // long startTime = System.currentTimeMillis(); + // Row row = createRow(); + // Version prevVersion = tableImpl.put(row, null, null); + // long interval = System.currentTimeMillis() - startTime; + + // Version currentVersion = tableImpl.put(row, null, null); + + // ReturnRow rr = table.createReturnRow(Choice.ALL); + + // Row newRow = tableImpl.get(row.createPrimaryKey(), null); + // assertTrue(newRow.getCreationTime() >= startTime); + // assertTrue(newRow.getCreationTime() - startTime <= interval); + + // /* + // * Use old version. This will fail, which means that return row + // * is available + // */ + // Version newVersion = + // tableImpl.putIfVersion(row, prevVersion, rr, null); + // assertNull(newVersion); + // assertTrue(rr.getCreationTime() >= startTime); + // assertTrue(rr.getCreationTime() - startTime <= interval); + + // /* + // * Use the correct version, which will succeed. Return row is empty + // */ + // currentVersion = + // tableImpl.putIfVersion(row, currentVersion, rr, null); + // assertNotNull(currentVersion); + // // because the put went through there is no returnInfo + // assertTrue(rr.getCreationTime() == 0); + // } + + // @Test + // public void testPreviousRowOnPutInBatch() throws Exception { + // createTable(); + + // TableOperationFactory factory = tableImpl.getTableOperationFactory(); + + // List puts = new ArrayList(); + + // long startTime = System.currentTimeMillis(); + // Row row = createRow(); + // Version prevVersion = tableImpl.put(row, null, null); + // assertNotNull(prevVersion); + // TableOperation op = factory.createPut(row, Choice.ALL, false); + // puts.add(op); + + // List results = + // tableImpl.execute(puts, null); + // long interval = System.currentTimeMillis() - startTime; + + // TableOperationResult r = results.get(0); + // assertTrue(r.getSuccess()); + // Row rr = r.getPreviousRow(); + // if (rr != null) { + // assertTrue(rr.getCreationTime() >= startTime); + // assertTrue(rr.getCreationTime() - startTime <= interval); + // } + // } + + // @Test + // public void testPreviousRowOnPutIfPresentInBatch() throws Exception { + // createTable(); + + // TableOperationFactory factory = tableImpl.getTableOperationFactory(); + + // List puts = new ArrayList(); + + // long startTime = System.currentTimeMillis(); + // Row row = createRow(); + // tableImpl.put(row, null, null); + // TableOperation op = factory.createPutIfPresent(row, Choice.ALL, false); + // puts.add(op); + + // List results = + // tableImpl.execute(puts, null); + // long interval = System.currentTimeMillis() - startTime; + + // TableOperationResult r = results.get(0); + // assertTrue(r.getSuccess()); + // Row rr = r.getPreviousRow(); + + // if (rr != null) { + // assertTrue(rr.getCreationTime() >= startTime); + // assertTrue(rr.getCreationTime() - startTime <= interval); + // } + // } + + // /* this tests both delete and deleteIfVersion */ + // @Test + // public void testPreviousRowOnDelete() { + // createTable(); + // Row row = createRow(); + + // long startTime = System.currentTimeMillis(); + // Version prevVersion = tableImpl.put(row, null, null); + // long interval = System.currentTimeMillis() - startTime; + // tableImpl.put(row, null, null); + + // ReturnRow rr = table.createReturnRow(Choice.ALL); + + // /* this will fail, which means ReturnRow is available */ + // boolean deleted = tableImpl.deleteIfVersion(row.createPrimaryKey(), + // prevVersion, + // rr, + // null); + // assertFalse(deleted); + // assertTrue(rr.getCreationTime() >= startTime); + // assertTrue(rr.getCreationTime() - startTime <= interval); + + // /* this will succeed, return row is available */ + // deleted = tableImpl.delete(row.createPrimaryKey(), + // rr, + // null); + // assertTrue(deleted); + // assertTrue(rr.getCreationTime() >= startTime); + // assertTrue(rr.getCreationTime() - startTime <= interval); + // } + + // @Test + // public void testQueryFunction() { + // createTable(); + + // long startTime = System.currentTimeMillis(); + // Row row = createRow(); + // tableImpl.put(row, null, null); + + // Row dbRow = tableImpl.get(row.createPrimaryKey(), null); + + // long interval = System.currentTimeMillis() - startTime; + // assertTrue(dbRow.getCreationTime() >= startTime); + // assertTrue(dbRow.getCreationTime() - startTime <= interval); + + // // query top level table + // try { + // int rows = 0; + // TableIterator it = + // store.execute("SELECT creation_time($t) as ct, " + + // "creation_time_millis($t) as ctm, " + + // "modification_time($t) as mt " + + // " FROM " + TABLE_NAME + " $t") + // .get() + // .iterator(); + // while(it.hasNext()) { + // RecordValue r = it.next(); + // long ctm = r.get("ctm").asLong().get(); + // long ct = r.get("ct").asTimestamp().get().getTime(); + // long mt = r.get("mt").asTimestamp().get().getTime(); + + // assertTrue(" ctm: " + (ctm - startTime) + + // " should be >= 0", ctm >= startTime); + // assertTrue(" creationTime: " + (ctm - startTime) + + // " not in expected interval: " + interval, + // ctm - startTime <= interval); + + // assertTrue(" ct: " + (ct - startTime) + + // " should be >= 0", ct >= startTime); + // assertTrue(" ct: " + (ct - startTime) + + // " not in expected interval: " + interval, + // ct - startTime <= interval); + + // assertTrue(" mt: " + (mt - startTime) + + // " should be >= 0", mt >= startTime); + // assertTrue(" mt: " + (mt - startTime) + + // " not in expected interval: " + interval, + // mt - startTime <= interval); + + // rows++; + // } + // assertEquals(1, rows); + // } catch (Throwable e) { + // e.printStackTrace(); + // fail("Got exception during query iteration: " + e); + // } + + + // // child tables + // createChildTables(); + // long startTimeCh = System.currentTimeMillis(); + + // /* for bulk get */ + // List keys = new ArrayList(); + + // PrimaryKey pkey = table.createPrimaryKey(); + // pkey.put(SHARD_KEY_1, "sk1"); + // pkey.put(SHARD_KEY_2, "sk2"); + // List pkeys = tableImpl.multiGetKeys(pkey, null, null); + // assertEquals(1, pkeys.size()); + + // int count = 0; + // for (PrimaryKey key : pkeys) { + // assertTrue(key.getCreationTime() == 0); + + // keys.add(key); /* bulk get */ + + // /* + // * Add rows to the child tables. They share the primary key of the + // * parent. + // */ + // Row child1Row = child1.createRow(key); + // child1Row.put(CH1_KEY, Integer.toString(count)); + // tableImpl.put(child1Row, null, null); + + // Row child2Row = child2.createRow(child1Row); + // child2Row.put(CH2_KEY, Integer.toString(count)); + // tableImpl.put(child2Row, null, null); + + // ++count; + // } + // long intervalCh = System.currentTimeMillis() - startTimeCh; + + // // query child tables + // try { + // int rows = 0; + // TableIterator it = + // store.execute("SELECT creation_time($t) as ct, " + + // "creation_time_millis($t) as ctm FROM " + CH1_TABLE_NAME + + // " $t") + // .get() + // .iterator(); + // while(it.hasNext()) { + // RecordValue r = it.next(); + // long ctm = r.get("ctm").asLong().get(); + // long ct = r.get("ct").asTimestamp().get().getTime(); + + // assertTrue(ctm >= startTimeCh); + // assertTrue(ctm - startTimeCh <= intervalCh); + // assertTrue(ct >= startTimeCh); + // assertTrue(ct - startTimeCh <= intervalCh); + // rows++; + // } + // assertEquals(1, rows); + + // rows = 0; + // it = + // store.execute("SELECT creation_time($t) as ct, " + + // "creation_time_millis($t) as ctm FROM " + CH2_TABLE_NAME + + // " $t") + // .get() + // .iterator(); + // while(it.hasNext()) { + // RecordValue r = it.next(); + + // long ctm = r.get("ctm").asLong().get(); + // long ct = r.get("ct").asTimestamp().get().getTime(); + + // assertTrue(ctm >= startTimeCh); + // assertTrue(ctm - startTimeCh <= intervalCh); + // assertTrue(ct >= startTimeCh); + // assertTrue(ct - startTimeCh <= intervalCh); + // rows++; + // } + // assertEquals(1, rows); + // } catch (Throwable e) { + // e.printStackTrace(); + // fail("Got exception during query iteration: " + e); + // } + // } + + // @Test + // public void testQueryFunctionCollections() { + // createTableC(); + + // long startTime = System.currentTimeMillis(); + // Row row = createRowC(); + // tableImpl.put(row, null, null); + + // Row dbRow = tableImpl.get(row.createPrimaryKey(), null); + + // long interval = System.currentTimeMillis() - startTime; + // assertTrue(dbRow.getCreationTime() >= startTime); + // assertTrue(dbRow.getCreationTime() - startTime <= interval); + + // // query top level table + // try { + // int rows = 0; + // TableIterator it = + // store.execute("SELECT creation_time($t) as ct, " + + // "creation_time_millis($t) as ctm, " + + // "modification_time($t) as mt " + + // " FROM " + TABLE_NAMEC + " $t") + // .get() + // .iterator(); + // while(it.hasNext()) { + // RecordValue r = it.next(); + // long ctm = r.get("ctm").asLong().get(); + // long ct = r.get("ct").asTimestamp().get().getTime(); + // long mt = r.get("mt").asTimestamp().get().getTime(); + + // assertTrue(" ctm: " + (ctm - startTime) + + // " should be >= 0", ctm >= startTime); + // assertTrue(" creationTime: " + (ctm - startTime) + + // " not in expected interval: " + interval, + // ctm - startTime <= interval); + + // assertTrue(" ct: " + (ct - startTime) + + // " should be >= 0", ct >= startTime); + // assertTrue(" ct: " + (ct - startTime) + + // " not in expected interval: " + interval, + // ct - startTime <= interval); + + // assertTrue(" mt: " + (mt - startTime) + + // " should be >= 0", mt >= startTime); + // assertTrue(" mt: " + (mt - startTime) + + // " not in expected interval: " + interval, + // mt - startTime <= interval); + + // rows++; + // } + // assertEquals(1, rows); + // } catch (Throwable e) { + // e.printStackTrace(); + // fail("Got exception during query iteration: " + e); + // } + + + // /* for bulk get */ + // PrimaryKey pkey = tableC.createPrimaryKey(); + // pkey.put(SHARD_KEY_1, "sk1"); + // pkey.put(SHARD_KEY_2, "sk2"); + // List pkeys = tableImpl.multiGetKeys(pkey, null, null); + // assertEquals(1, pkeys.size()); + // } + + // private static void assertCTRows(TableIterator iter, long expectedTime, + // long interval, + // int expected) { + // int count = 0; + // while (iter.hasNext()) { + // Row row = iter.next(); + // assertTrue(" creationTime: " + + // (row.getCreationTime() - expectedTime) + + // " not in expected interval: " + interval,(row.getCreationTime() - expectedTime) >= 0); + // assertTrue(" creationTime: " + + // (row.getCreationTime() - expectedTime) + + // " not in expected interval: " + interval, + // row.getCreationTime() - expectedTime <= interval); + // count++; + // } + // assertTrue(count == expected); + // } + + // private Table createTable() { + // String query = CREATE_TABLE; + + // executeDdl(query); + // executeDml("DELETE FROM " + TABLE_NAME); + // executeDdl(CREATE_INDEX); + // table = getTable(TABLE_NAME); + + // assertTrue(table != null); + // assertTrue(table.getDefaultTTL() == null); + + // return table; + // } + + // private void createChildTables() { + // String query = CREATE_CHILD1_TABLE; + // executeDdl(query); + // query = CREATE_CHILD2_TABLE; + // executeDdl(query); + // executeDml("DELETE FROM CreationTimeTest.CTChildOne"); + // executeDml("DELETE FROM CreationTimeTest.CTChildOne.CTChildTwo"); + // executeDdl(CREATE_CHILD1_INDEX); + // child1 = getTable("CreationTimeTest.CTChildOne"); + // child2 = getTable("CreationTimeTest.CTChildOne.CTChildTwo"); + // assertTrue(child1 != null && child2 != null); + // } + + // /** + // * Creates a new row and populates fields with values. + // */ + // private Row createRow() { + // Row row = table.createRow(); + // long l = ++idCounter; + // return populate(row, + // SHARD_KEY_1, "sk1", + // SHARD_KEY_2, "sk2", + // PK2_FIELD, "SSN-" + l, + // STRING_FIELD, Long.toString(l)); + // } + + // private Table createTableC() { + // String query = CREATE_TABLEC; + + // executeDdl(query); + // executeDml("DELETE FROM " + TABLE_NAMEC); + // executeDdl(CREATE_INDEXC); + // tableC = getTable(TABLE_NAMEC); + + // assertTrue(tableC != null); + // assertTrue(tableC.getDefaultTTL() == null); + + // return table; + // } + + // private Row createRowC() { + // Row row = tableC.createRow(); + // long l = ++idCounterC; + // return populate(row, + // SHARD_KEY_1, "sk1", + // SHARD_KEY_2, "sk2", + // PK2_FIELD, "SSN-" + l, + // STRING_FIELD, Long.toString(l)); + // } + + // private Row populate(final Row row, String...nvPairs) { + // for (int i = 0; nvPairs != null && i < nvPairs.length; i+=2) { + // row.put(nvPairs[i], nvPairs[i+1]); + // } + // return row; + // } + + + + // @Test + // public void testRegularTableWithIndex() throws InterruptedException { + // String q = "CREATE TABLE IF NOT EXISTS t (i integer, s string, PRIMARY KEY(i))"; + // PreparedStatement ps = store.prepare(q); + // assertNotNull(ps); + + // StatementResult sr = store.executeSync(ps); + // assertNotNull(sr); + + // q = "CREATE INDEX t1idx1 ON t (s)"; + // ps = store.prepare(q); + // assertNotNull(ps); + + // sr = store.executeSync(ps); + // assertNotNull(sr); + + // Thread.sleep(2000); + + // TableAPI api = store.getTableAPI(); + + // Table tableT = api.getTable("t"); + + // Row row = tableT.createRow(); + // row.put("i", 1); + // row.put("s", "string"); + + // // put a few more for query testing + // for(int i = 0; i < 10; i++) { + // Row tableRow = tableT.createRow(); + // tableRow.put("i", 20 + i); + // tableRow.put("s", "string5"); + + // api.put(tableRow, null, null); + // } + + // // query + // try { + // int rows = 0; + // TableIterator it = + // store.execute("SELECT i, s, creation_time($t) as ct, " + + // "creation_time_millis($t) as ctm, " + + // "modification_time($t) as mt FROM t $t").get() + // .iterator(); + // while(it.hasNext()) { + // RecordValue r = it.next(); + + // assertEquals("string5", r.get("s").asString().get()); + // assertTrue(r.get("ct").isTimestamp()); + // assertTrue(r.get("ct").asTimestamp().get().getTime() > 0); + // assertTrue(r.get("ctm").isLong()); + // assertTrue(r.get("ctm").asLong().get() > 0); + // assertTrue(r.get("mt").isTimestamp()); + // assertTrue(r.get("mt").asTimestamp().get().getTime() > 0); + + // rows++; + // } + // assertEquals(10, rows); + // } catch (Throwable e) { + // e.printStackTrace(); + // fail("Got exception during query iteration: " + e); + // } + // } + + // @Test + // public void testRegularTableWithCreationTimeIndex() + // throws InterruptedException { + // String q = "CREATE TABLE IF NOT EXISTS tRegWithIdx (i integer, s string, PRIMARY KEY(i))"; + // PreparedStatement ps = store.prepare(q); + // assertNotNull(ps); + + // StatementResult sr = store.executeSync(ps); + // assertNotNull(sr); + + // q = "CREATE INDEX t1idx2 ON tRegWithIdx ( creation_time() )"; + // ps = store.prepare(q); + // assertNotNull(ps); + + // sr = store.executeSync(ps); + // assertNotNull(sr); + + // Thread.sleep(2000); + + // TableAPI api = store.getTableAPI(); + + // Table tableTRegWithIdx = api.getTable("tRegWithIdx"); + + // // put a few more for query testing + // long startTime = System.currentTimeMillis(); + // for(int i = 0; i < 10; i++) { + // Row tableRow = tableTRegWithIdx.createRow(); + // tableRow.put("i", 20 + i); + // tableRow.put("s", "string5"); + + // api.put(tableRow, null, null); + // } + // long interval = System.currentTimeMillis() - startTime; + + // // query + // try { + // int rows = 0; + // q = "SELECT creation_time($t) as ct " + + // "FROM tRegWithIdx $t "; + // ps = store.prepare(q); + // assertTrue(ps.toString().contains("t1idx2")); + + // TableIterator it = store.executeSync(ps).iterator(); + // while(it.hasNext()) { + // RecordValue r = it.next(); + + // assertTrue(r.get("ct").isTimestamp()); + // assertTrue(r.get("ct").asTimestamp().get().getTime() > 0); + // assertTrue(r.get("ct").asTimestamp().get().getTime() - startTime <= interval); + + // rows++; + // } + // assertEquals(10, rows); + // } catch (Throwable e) { + // e.printStackTrace(); + // fail("Got exception during query iteration: " + e); + // } + // } + + // @Test + // public void testCollectionTable() { + // String q = "CREATE TABLE t2 (i integer, " + + // " PRIMARY KEY(i)) AS JSON COLLECTION "; + // PreparedStatement ps = store.prepare(q); + // assertNotNull(ps); + + // StatementResult sr = store.executeSync(ps); + // assertNotNull(sr); + + // TableAPI api = store.getTableAPI(); + + // Table tableT2 = api.getTable("t2"); + + // Row row = tableT2.createRow(); + // row.put("i", 1); + // row.put("s", "string"); + + // // put a few more for query testing + // for(int i = 0; i < 10; i++) { + // Row tableRow = tableT2.createRow(); + // tableRow.put("i", 20 + i); + // tableRow.put("s", "string5"); + // api.put(tableRow, null, null); + // } + + // // query + // try { + // int rows = 0; + // TableIterator it = + // store.execute("SELECT i, $t.s, creation_time($t) as ct, " + + // "creation_time_millis($t) as ctm, " + + // "modification_time($t) as mt FROM t2 $t").get() + // .iterator(); + // while(it.hasNext()) { + // RecordValue r = it.next(); + // assertEquals("string5", r.get("s").asString().get()); + + // assertTrue(r.get("ct").isTimestamp()); + // assertTrue(r.get("ct").asTimestamp().get().getTime() > 0); + // assertTrue(r.get("ctm").isLong()); + // assertTrue(r.get("ctm").asLong().get() > 0); + // assertTrue(r.get("mt").isTimestamp()); + // assertTrue(r.get("mt").asTimestamp().get().getTime() > 0); + + // rows++; + // } + // assertEquals(10, rows); + // } catch (Throwable e) { + // fail("Got exception during query iteration: " + e); + // } + + // // put null for row metadata and query again + // for(int i = 0; i < 10; i++) { + // Row tableRow = tableT2.createRow(); + // tableRow.put("i", 20 + i); + // tableRow.put("s", "string6"); + + // api.put(tableRow, null, null); + // } + + // // query + // try { + // int rows = 0; + // TableIterator it = + // store.execute("SELECT i, $t.s," + + // "creation_time($t) as ct, " + + // "creation_time_millis($t) as ctm, " + + // "modification_time($t) as mt FROM t2 $t").get() + // .iterator(); + // while(it.hasNext()) { + // RecordValue r = it.next(); + // assertEquals("string6", r.get("s").asString().get()); + + // assertTrue(r.get("ct").isTimestamp()); + // assertTrue(r.get("ct").asTimestamp().get().getTime() > 0); + // assertTrue(r.get("ctm").isLong()); + // assertTrue(r.get("ctm").asLong().get() > 0); + // assertTrue(r.get("mt").isTimestamp()); + // assertTrue(r.get("mt").asTimestamp().get().getTime() > 0); + + // rows++; + // } + // assertEquals(10, rows); + // } catch (Throwable e) { + // fail("Got exception during query iteration: " + e); + // } + // } + + + // @Test + // public void testCollectionTablewithIndex() { + // String q = "CREATE TABLE t2 (i integer, " + + // " PRIMARY KEY(i)) AS JSON COLLECTION "; + // PreparedStatement ps = store.prepare(q); + // assertNotNull(ps); + + // StatementResult sr = store.executeSync(ps); + // assertNotNull(sr); + + // q = "CREATE INDEX t2idx1 ON t2 (s as ANYATOMIC)"; + // ps = store.prepare(q); + // assertNotNull(ps); + + // sr = store.executeSync(ps); + // assertNotNull(sr); + + // TableAPI api = store.getTableAPI(); + + // Table tableT2 = api.getTable("t2"); + + // Row row = tableT2.createRow(); + // row.put("i", 1); + // row.put("s", "string"); + + // // put a few more for query testing + // for(int i = 0; i < 10; i++) { + // Row tableRow = tableT2.createRow(); + // tableRow.put("i", 20 + i); + // tableRow.put("s", "string5"); + + // api.put(tableRow, null, null); + // } + + // // query + // try { + // int rows = 0; + // TableIterator it = + // store.execute("SELECT i, $t.s, " + + // "creation_time($t) as ct, " + + // "creation_time_millis($t) as ctm, " + + // "modification_time($t) as mt FROM t2 $t").get() + // .iterator(); + // while(it.hasNext()) { + // RecordValue r = it.next(); + // assertEquals("string5", r.get("s").asString().get()); + + // assertTrue(r.get("ct").isTimestamp()); + // assertTrue(r.get("ct").asTimestamp().get().getTime() > 0); + // assertTrue(r.get("ctm").isLong()); + // assertTrue(r.get("ctm").asLong().get() > 0); + // assertTrue(r.get("mt").isTimestamp()); + // assertTrue(r.get("mt").asTimestamp().get().getTime() > 0); + + // rows++; + // } + // assertEquals(10, rows); + // } catch (Throwable e) { + // fail("Got exception during query iteration: " + e); + // } + // } +} + diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/DmlTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/DmlTest.java index 77300ad3..ed9b60a0 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/DmlTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/DmlTest.java @@ -17,6 +17,7 @@ import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -29,6 +30,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -57,15 +59,18 @@ import oracle.kv.WriteThroughputException; import oracle.kv.impl.api.KVStoreImpl; import oracle.kv.impl.api.ops.Result; +import oracle.kv.impl.api.query.QueryPublisher.QuerySubscription; import oracle.kv.impl.api.query.PreparedStatementImpl; import oracle.kv.impl.api.query.PreparedStatementImpl.DistributionKind; import oracle.kv.impl.api.query.QueryStatementResultImpl; import oracle.kv.impl.query.compiler.CompilerAPI; import oracle.kv.impl.query.runtime.CloudSerializer.FieldValueWriter; import oracle.kv.impl.query.runtime.PlanIter; +import oracle.kv.impl.topo.RepGroupId; import oracle.kv.impl.util.PollCondition; import oracle.kv.impl.util.SerialVersion; import oracle.kv.impl.util.contextlogger.LogContext; +import oracle.kv.impl.util.registry.AsyncControl; import oracle.kv.impl.xregion.XRegionTestBase; import oracle.kv.query.BoundStatement; import oracle.kv.query.ExecuteOptions; @@ -1473,7 +1478,6 @@ private List execQuerySync(PreparedStatement stmt, ExecuteOptions options, int expRB, int expWB) { - if (showResult) { System.out.println("\n" + String.valueOf(((PreparedStatementImpl)stmt).getQueryString())); @@ -4405,10 +4409,10 @@ public void testGeom() { assertTrue(gen1.getDDL().contains("points")); assertTrue(gen2.getDDL().contains("polygons")); for (String s : gen1.getAllIndexDDL()) { - assertTrue(s.contains("POINT")); + assertTrue(s.contains("Point")); } for (String s : gen2.getAllIndexDDL()) { - assertTrue(s.contains("GEOMETRY")); + assertTrue(s.contains("Geometry")); } } @@ -4840,6 +4844,191 @@ public void testSetVariablesByPosition() { assertTrue(results.size() == 1); } + @Test + public void testPartitionedQueries() throws Exception { + assumeTrue("Only when testing async", AsyncControl.serverUseAsync); + final int numRows = 1000; + final int partitionsPerSplit = 2; + final String tableName = "ParallelQuery"; + final String createTable = "create table " + tableName + + "(id integer, primary key(id)) as json collection"; + final String createIndex = "create index idx on " + tableName + + "(name as string)"; + final String query = "select * from " + tableName; + final String indexQuery = "select * from " + tableName + + " where name > 'm'"; + final String forcePrimary = "select /*+ FORCE_PRIMARY_INDEX(" + + tableName + ") */ * from " + tableName + " where name > 'm'"; + executeDdl(createTable, null, true); + executeDdl(createIndex, null, true); + TableLimits limits = new TableLimits(5000, 10000, 1); + ExecutionFuture f = + ((KVStoreImpl)store).setTableLimits(null, tableName, limits); + assertTrue(f.get().isSuccessful()); + TableImpl table = (TableImpl) tableImpl.getTable(tableName); + assertNotNull(table); + + for (int i = 0; i < numRows; i++) { + Row row = table.createRow(); + row.put("id", i); + row.put("name", ("name_" + i)); + row.put("age", (i % 25)); + assertNotNull(tableImpl.put(row, null, null)); + } + + /* + * driver version and cloud query are needed to track throughput. + * setting is simple query is needed for use of the execute variant + * that uses Set + */ + ExecuteOptions options = + new ExecuteOptions().setResultsBatchSize(1).setAsync(false). + setDriverQueryVersion(5).setIsCloudQuery(true).setIsSimpleQuery(true); + PreparedStatementImpl ps = + (PreparedStatementImpl) store.prepare(query, options); + assertTrue(ps.getDistributionKind().equals( + PreparedStatementImpl.DistributionKind.ALL_PARTITIONS)); + assertTrue(ps.isSimpleQuery()); + int numPartitions = + ((KVStoreImpl)store).getTopology().getNumPartitions(); + + ArrayList> splits = new ArrayList<>(); + HashSet currentSplit = new HashSet<>(); + splits.add(currentSplit); + for (int i = 0; i < numPartitions; i++) { + if (i > 0 && (i % partitionsPerSplit) == 0) { + currentSplit = new HashSet<>(); + splits.add(currentSplit); + } + currentSplit.add(i + 1); /* partition ids are 1-based */ + } + + int count = 0; + int readKB = 0; + for (HashSet s : splits) { + QueryStatementResultImpl result = + (QueryStatementResultImpl) ps.executeSyncPartitions( + (KVStoreImpl)store, options, s); + Iterator iter = result.iterator(); + while (iter.hasNext()) { + count++; + iter.next(); + } + readKB += result.getReadKB(); + } + assertEquals(numRows, count); + assertEquals(numRows, readKB); + + /* + * all shard query + */ + ps = (PreparedStatementImpl) store.prepare(indexQuery, options); + assertTrue(ps.getDistributionKind().equals( + PreparedStatementImpl.DistributionKind.ALL_SHARDS)); + assertTrue(ps.isSimpleQuery()); + int numShards = + ((KVStoreImpl)store).getTopology().getNumRepGroups(); + + /* use one set of size numShards */ + HashSet currentSsplit = new HashSet<>(); + for (int i = 0; i < numShards; i++) { + currentSsplit.add(new RepGroupId(i+1)); + } + + MySubscriber qsub = new MySubscriber(options.getResultsBatchSize()); + execShardsAsync(ps, options, currentSsplit, qsub); + assertEquals(numRows, qsub.numResults); + assertEquals(numRows, qsub.readKB); + + /* use numShard splits, each with a single entry */ + count = 0; + readKB = 0; + for (int i = 0; i < numShards; i++) { + HashSet split = new HashSet<>(); + split.add(new RepGroupId(i+1)); + qsub = new MySubscriber(options.getResultsBatchSize()); + execShardsAsync(ps, options, split, qsub); + count += qsub.numResults; + readKB += qsub.readKB; + } + assertEquals(numRows, count); + assertEquals(numRows, readKB); + + /* make sure forcing use of primary index results in ALL_PARTITIONS */ + ps = (PreparedStatementImpl) store.prepare(forcePrimary, options); + assertTrue(ps.getDistributionKind().equals( + PreparedStatementImpl.DistributionKind.ALL_PARTITIONS)); + } + + private void execShardsAsync(PreparedStatementImpl ps, + ExecuteOptions options, + HashSet split, + MySubscriber qsub) { + + Publisher qpub = + ((KVStoreImpl)store).executeAsync(ps, options, split); + + qpub.subscribe(qsub); + + synchronized (qsub) { + while (!qsub.isDone) { + try { + qsub.wait(); + } catch (InterruptedException e) { + fail("Interrupted while waiting for async query"); + } + } + } + if (qsub.err != null) { + throw new RuntimeException(qsub.err); + } + } + + private class MySubscriber implements Subscriber { + int numResults; + int readKB; + Throwable err; + Subscription subscription; + final int batchSize; + volatile boolean isDone = false; + + MySubscriber(int batchSize) { + this.batchSize = batchSize; + } + + @Override + public void onSubscribe(Subscription s) { + subscription = s; + s.request(batchSize); + } + + @Override + public void onNext(RecordValue value) { + numResults++; + if (numResults % batchSize == 0) { + subscription.request(batchSize); + } + } + + @Override + public void onError(Throwable e) { + err = e; + isDone = true; + notifyAll(); + } + + @SuppressWarnings("null") + @Override + public synchronized void onComplete() { + QueryStatementResultImpl qres = + ((QuerySubscription)subscription). + getAsyncIterator().getQueryStatementResult(); + readKB = qres.getReadKB(); + isDone = true; + notifyAll(); + } + } + private void runPrepareSizeLimit(String statement, ExecuteOptions options, boolean shouldFail, diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/IndexSerializationTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/IndexSerializationTest.java index e33727e4..d7c86207 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/IndexSerializationTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/IndexSerializationTest.java @@ -2171,7 +2171,7 @@ public void testTimestampFields() { byte[] key = table.createKey(row, false).toByteArray(); byte[] data = table.createValue(row).toByteArray(); List indexKeys = - index.extractIndexKeys(key, data, 0, 0, 0, false, maxKeys); + index.extractIndexKeys(key, data, 0, 0, 0, 0, false, maxKeys); for (byte[] buf : indexKeys) { IndexKeyImpl idxKey = index.deserializeIndexKey(buf, false); @@ -3569,7 +3569,7 @@ public void testNumberFields() { byte[] key = table.createKey(row, false).toByteArray(); byte[] data = table.createValue(row).toByteArray(); List indexKeys = - index.extractIndexKeys(key, data, 0, 0, 0, false, maxKeys); + index.extractIndexKeys(key, data, 0, 0, 0, 0, false, maxKeys); for (byte[] buf : indexKeys) { IndexKeyImpl idxKey = index.deserializeIndexKey(buf, false); diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/JsonMetadataTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/JsonMetadataTest.java index bd77f641..66125d87 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/JsonMetadataTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/JsonMetadataTest.java @@ -12,6 +12,7 @@ import java.util.Map; +import oracle.kv.impl.api.table.TableImpl.JsonFormatter; import oracle.kv.table.Index; import org.junit.BeforeClass; @@ -284,9 +285,56 @@ public void testJsonIndexFieldSpecialChars() { roundTrip("foo"); } + @Test + public void testFunctionalIndex() { + String ddl = "CREATE TABLE foo (" + + "id INTEGER, " + + "i INTEGER, " + + "s STRING, " + + "ts TIMESTAMP(3), " + + "j JSON, " + + "PRIMARY KEY(id))"; + executeDdl(ddl); + + String[] indexDdls = new String[] { + "CREATE INDEX idx1 ON foo(" + + "length(s), " + + "substring(s, 1, 2), " + + "power(i, 2), " + + "timestamp_round(ts, 2), " + + "modification_time()) " + + "WITH NO NULLS WITH UNIQUE KEYS PER ROW", + + "CREATE INDEX idx2 ON foo (" + + "replace(j.s as STRING, \"nosql\", \"NoSQL\"), " + + "trunc(j.n.d as Double, 3), " + + "substring(j.m[].values().name as AnyAtomic, 3), " + + "power(j.\"#\".a.\"@\" as Long, 2)," + + "j.v as AnyAtomic)" + }; + + for (String indexDdl : indexDdls) { + executeDdl(indexDdl); + } + + roundTrip("foo"); + } + private void roundTrip(String tableName) { + roundTrip(tableName, null); + for (int v = 1; v <= JsonFormatter.CURRENT_VERSION; v++) { + roundTrip(tableName, v); + } + } + + private void roundTrip(String tableName, Integer jsonVersion) { TableImpl table = getTable(tableName); - String json = table.toJsonString(false); + String json; + if (jsonVersion != null) { + json = table.toJsonString(true, jsonVersion); + } else { + json = table.toJsonString(true); + } TableImpl newTable = TableJsonUtils.fromJsonString(json, null); assertEquals(table, newTable); if (table.hasIdentityColumn()) { diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/MetadataTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/MetadataTest.java index 987efaaf..e0448b12 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/MetadataTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/MetadataTest.java @@ -7,11 +7,11 @@ package oracle.kv.impl.api.table; +import static org.junit.Assert.assertEquals; + import java.util.ArrayList; import java.util.Arrays; -import static org.junit.Assert.assertEquals; - import oracle.kv.TestBase; import org.junit.Test; @@ -82,7 +82,7 @@ public void testTableSeqNum() { assertEquals(table.getSequenceNumber(), md.getSequenceNumber()); md.evolveTable(table, table.numTableVersions(), - table.getFieldMap(), null, "DESCRIPTION", + table.getFieldMap(), null, null, "DESCRIPTION", false, null, null); assertEquals(table.getSequenceNumber(), md.getSequenceNumber()); @@ -130,6 +130,7 @@ private TableImpl addTable(TableMetadata md, TableImpl table, String v) { table.getShardKey(), table.getFieldMap(), null, // TTL + null, /*beforeImageTTL*/ null, // limits false, 0, null, null); diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/NumberUtilsTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/NumberUtilsTest.java index f3aff624..cac3551b 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/NumberUtilsTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/NumberUtilsTest.java @@ -367,6 +367,97 @@ public void testNextUpBytes() { } } + /* + * Verify the fix to KVSTORE-2514 + */ + @Test + public void testSerializeNegativeNumberWithZeros() { + /* + * The values in the following array are sorted in descending order + */ + String[] strs = new String[] { + "-2", + "-2.000001", + "-2.0000099", + "-2.00001", + "-2.000099", + "-2.0001", + "-2.00099", + "-2.001", + "-2.0099", + "-2.01", + "-2.099", + "-2.1", + }; + + byte[] prevBytes = null; + + /* Negative values */ + for (String s : strs) { + BigDecimal v0 = new BigDecimal(s); + byte[] bytes = NumberUtils.serialize(v0); + if (prevBytes != null) { + /* check sorting */ + int ret = IndexImpl.compareUnsignedBytes(bytes, prevBytes); + assertTrue(ret < 0); + } + prevBytes = bytes; + + /* check deserialization */ + BigDecimal v1 = (BigDecimal)NumberUtils.deserialize(bytes, true); + assertTrue(v0.compareTo(v1) == 0); + + /* + * Validate the old problematic bytes can be decoded correctly, + * this is to ensure that existing NUMBER values in database can + * be read correctly before reserialization. + * + * For example -2.0001, the difference between the old and new + * bytes is the 3nd byte: 0x80 vs 0x7E + * old: 59 6A 80 74 7F + * new: 59 6A 7E 74 7F + * + * Converting to the old bytes format by replacing 0x7E in the + * current new bytes with 0x80. + */ + byte[] bytes1 = replaceByte(bytes, + 1 /* skipping leading byte*/, + (byte)0x7E, + (byte)0x80); + v1 = (BigDecimal)NumberUtils.deserialize(bytes1, true); + assertTrue(v0.compareTo(v1) == 0); + } + + /* Validate the positive values */ + for (String s : strs) { + BigDecimal v0 = new BigDecimal(s).negate(); + byte[] bytes = NumberUtils.serialize(v0); + if (prevBytes != null) { + int ret = IndexImpl.compareUnsignedBytes(bytes, prevBytes); + assertTrue(ret > 0); + } + prevBytes = bytes; + + BigDecimal v1 = (BigDecimal)NumberUtils.deserialize(bytes, true); + assertTrue(v0.compareTo(v1) == 0); + } + } + + private byte[] replaceByte(byte[] bytes, + int offset, + byte target, + byte replacement) { + byte[] result = new byte[bytes.length]; + for (int i = 0; i < bytes.length; i++) { + if (i >= offset && bytes[i] == target) { + result[i] = replacement; + } else { + result[i] = bytes[i]; + } + } + return result; + } + private byte[] roundTripWriteReadExponent(int sign, int exponent) { int len = NumberUtils.getNumBytesExponent(sign, exponent); diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableChangeSerialTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableChangeSerialTest.java index 8f349c50..4d7ee791 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableChangeSerialTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableChangeSerialTest.java @@ -9,6 +9,7 @@ import static java.util.Collections.singletonList; import static oracle.kv.impl.util.SerialTestUtils.serialVersionChecker; +import static oracle.kv.impl.util.SerialVersion.BEFORE_IMAGE_VERSION; import static oracle.kv.impl.util.SerialVersion.SCHEMALESS_TABLE_VERSION; import static oracle.kv.util.TestUtils.checkAll; @@ -103,11 +104,13 @@ public void testAddTable() { serialVersionChecker( new AddTable(TABLE, 46 /* seqNum */), SerialVersion.MINIMUM, 0xd54f666087adaf6L, - SCHEMALESS_TABLE_VERSION, 0xb30ed8b6e8485dbL), + SCHEMALESS_TABLE_VERSION, 0xb30ed8b6e8485dbL, + BEFORE_IMAGE_VERSION, 0x36bf1a86ace389eL), serialVersionChecker( new AddTable(REMOTE_TABLE, 47 /* seqNum */), SerialVersion.MINIMUM, 0x301999cf5482ab91L, - SCHEMALESS_TABLE_VERSION, 0xa9b4408458a8f229L)); + SCHEMALESS_TABLE_VERSION, 0xa9b4408458a8f229L, + BEFORE_IMAGE_VERSION, 0xd666ab860686aedL)); } @Test @@ -146,10 +149,12 @@ public void testEvolveTable() { checkAll( serialVersionChecker( new EvolveTable(TABLE, 100), - SerialVersion.MINIMUM, 0xa8799cc74eaa214cL), + SerialVersion.MINIMUM, 0xa8799cc74eaa214cL, + BEFORE_IMAGE_VERSION, 0xaa5cb77ed2f951d1L), serialVersionChecker( new EvolveTable(REMOTE_TABLE, 101), - SerialVersion.MINIMUM, 0xc6024a6a32055764L)); + SerialVersion.MINIMUM, 0xc6024a6a32055764L, + BEFORE_IMAGE_VERSION, 0x737cedf8035376f4L)); } @Test diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableFieldValuesSerialTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableFieldValuesSerialTest.java index cf47bd85..3fc5c5b4 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableFieldValuesSerialTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableFieldValuesSerialTest.java @@ -11,6 +11,7 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static oracle.kv.impl.util.SerialTestUtils.serialVersionChecker; +import static oracle.kv.impl.util.SerialVersion.BEFORE_IMAGE_VERSION; import static oracle.kv.impl.util.SerialVersion.QUERY_VERSION_16; import static oracle.kv.impl.util.SerialVersion.JSON_COLLECTION_VERSION; import static oracle.kv.impl.util.SerialVersion.SCHEMALESS_TABLE_VERSION; @@ -467,7 +468,8 @@ public void testTableImpl() { table, SerialVersion.MINIMUM, 0x9f996711f7e27749L, SCHEMALESS_TABLE_VERSION, 0x757204f05b7c954fL, - JSON_COLLECTION_VERSION, 0xd4cd21e87d4b06a2L) + JSON_COLLECTION_VERSION, 0xd4cd21e87d4b06a2L, + BEFORE_IMAGE_VERSION, 0x97a989d91bde3426L) .reader((in, sv) -> new TableImpl(in, sv, null /* parent */))); } @@ -495,6 +497,7 @@ public void testTableMetadata() { singletonList("id"), fieldMap, TimeToLive.ofHours(5), + null, /*beforeImageTTL*/ new TableLimits(1, 2, 3, 4, 5, 6, 7), false, /* r2compat */ 4, /* schemaId */ @@ -523,7 +526,8 @@ public void testTableMetadata() { SerialVersion.MINIMUM, 0x2ce5a1bd36005028L, SCHEMALESS_TABLE_VERSION, 0xdd84145d183a8efdL, JSON_COLLECTION_VERSION, 0x85a8ea004f0a6fcaL, - QUERY_VERSION_16, 0x221258a889bcf7d9L), + QUERY_VERSION_16, 0x221258a889bcf7d9L, + BEFORE_IMAGE_VERSION, 0x8a34b03d0f5fdd6fL), serialVersionChecker( new TableMetadata(false /* keepChanges */), SerialVersion.MINIMUM, 0x237c6f38ef9b06a3L, @@ -549,6 +553,7 @@ public void testTableMetadataJsonCollection() { singletonList("id"), jsonCollectionFieldMap, TimeToLive.ofHours(5), + null, /*beforeImageTTL*/ new TableLimits(1, 2, 3, 4, 5, 6, 7), false, /* r2compat */ 0, /* schemaId */ @@ -563,7 +568,8 @@ public void testTableMetadataJsonCollection() { checkAll(serialVersionChecker( tableMetadata1, JSON_COLLECTION_VERSION, 0x6e1320898e122560L, - QUERY_VERSION_16, 0x9e9c33072fe00095L)); + QUERY_VERSION_16, 0x9e9c33072fe00095L, + BEFORE_IMAGE_VERSION, 0x20aba3f03bad211fL)); } @Test diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableOpsSerialTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableOpsSerialTest.java index e4418b9c..9be65a21 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableOpsSerialTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableOpsSerialTest.java @@ -13,6 +13,7 @@ import static oracle.kv.impl.util.SerialVersion.QUERY_VERSION_14; import static oracle.kv.impl.util.SerialVersion.QUERY_VERSION_16; import static oracle.kv.impl.util.SerialVersion.QUERY_VERSION_17; +import static oracle.kv.impl.util.SerialVersion.ROW_METADATA_VERSION; import java.math.MathContext; import java.util.concurrent.TimeUnit; @@ -126,26 +127,32 @@ public void testMultiDeleteTable() { KEY_RANGE, RESUME_KEY, 7, - false /* doTombstone */), + false /* doTombstone */, + null /* rowMetadata */), SerialVersion.MINIMUM, 0xf1263fcede36a88dL, - CLOUD_MR_TABLE, 0xae8e2fc911747f58L), + CLOUD_MR_TABLE, 0xae8e2fc911747f58L, + ROW_METADATA_VERSION, 0x260462ecb0234090L), serialVersionChecker( new MultiDeleteTable(null, /* parentKey */ new TargetTables(TABLE, null, null), null, /* subRange */ null, /* resumeKey */ 8, - false /* doTombstone */), + false /* doTombstone */, + null /* rowMetadata */), SerialVersion.MINIMUM, 0x648b1dc049dec2bdL, - CLOUD_MR_TABLE, 0xf3b61b1030c629fL), + CLOUD_MR_TABLE, 0xf3b61b1030c629fL, + ROW_METADATA_VERSION, 0xdc4a82ec4c3b4503L), serialVersionChecker( new MultiDeleteTable(null, /* parentKey */ new TargetTables(TABLE, null, null), null, /* subRange */ null, /* resumeKey */ 8, - true /* doTombstone */), - CLOUD_MR_TABLE, 0xb6b9fa2e043df94aL)); + true /* doTombstone */, + null /* rowMetadata */), + CLOUD_MR_TABLE, 0xb6b9fa2e043df94aL, + ROW_METADATA_VERSION, 0xdb30f86c7d4bc186L)); } @Test @@ -382,12 +389,14 @@ public void testTableQuery() { Region.NULL_REGION_ID, false /* doTombstone */, 0, - false /* performsWrite */), + false /* performsWrite */, + null /* rowMetadata */), SerialVersion.MINIMUM, 0x3144c2b760b8c32cL, CLOUD_MR_TABLE, 0x790589f5083ee0edL, QUERY_VERSION_14, 0x929514f53334c1e3L, QUERY_VERSION_16, 0xff61efe8a32c1ea7L, - QUERY_VERSION_17, 0x71e479f809d5a33eL), + QUERY_VERSION_17, 0x71e479f809d5a33eL, + ROW_METADATA_VERSION, 0xe5efeda932111fb0L), serialVersionChecker( new TableQuery( "testQuery", @@ -425,10 +434,12 @@ public void testTableQuery() { regionId, true /* doTombstone */, 0, - false /* performsWrite */), + false /* performsWrite */, + null /* rowMetadata */), CLOUD_MR_TABLE, 0x1767f1dc5bbf651eL, QUERY_VERSION_14, 0x8a7933f2058d74c8L, QUERY_VERSION_16, 0x8c562212955c1835L, - QUERY_VERSION_17, 0x3a8b98d37b8fecdbL)); + QUERY_VERSION_17, 0x3a8b98d37b8fecdbL, + ROW_METADATA_VERSION, 0xfb04422829bc5a73L)); } } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableTestBase.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableTestBase.java index 09524a86..e479bfc0 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableTestBase.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/TableTestBase.java @@ -119,7 +119,7 @@ public class TableTestBase extends TestBase { static int createStoreCount; static TableAPI restoreTableImpl; static KVStore rsStore; - protected static final int startPort = 13240; + public static final int startPort = 13240; protected static final TableIteratorOptions countOpts = new TableIteratorOptions(Direction.UNORDERED, diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/ValueReaderTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/ValueReaderTest.java index 4c4f0903..77883f30 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/ValueReaderTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/api/table/ValueReaderTest.java @@ -565,6 +565,7 @@ static class TestValueReader implements ValueReader { private long expirationTime; private int regionId; private long modificationTime; + private String rowMetadata; private MapValue root; private Stack nestedNodes; @@ -781,6 +782,10 @@ public void reset() { public void setValue(String value) { } + @Override + public void setCreationTime(long creationTime) { + } + @Override public void setModificationTime(long modificationTime) { this.modificationTime = modificationTime; @@ -796,5 +801,13 @@ public void readCounterCRDT(String fieldName, FieldValueImpl val) { writeValue(fieldName, val); } + + @Override public void setRowMetadata(String rowMetadata) { + this.rowMetadata = rowMetadata; + } + + public String getRowMetadata() { + return rowMetadata; + } } } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/async/dialog/nio/NioEndpointHandlerTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/async/dialog/nio/NioEndpointHandlerTest.java index 30bd598c..344686f5 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/async/dialog/nio/NioEndpointHandlerTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/async/dialog/nio/NioEndpointHandlerTest.java @@ -548,7 +548,8 @@ private void init(SocketChannel[] channels) throws Exception { "Testing", NioUtil.getRemoteAddress(channels[0]).get(), threadPool.next(), - threadPool.getBackupExecutor(), + threadPool.getBackupSchedExecutor(), + threadPool.getBackupSchedExecutor(), dialogHandlerFactories, clientChannel, new DialogResourceManager(Integer.MAX_VALUE), @@ -561,7 +562,8 @@ private void init(SocketChannel[] channels) throws Exception { "Testing", NioUtil.getRemoteAddress(channels[1]).get(), threadPool.next(), - threadPool.getBackupExecutor(), + threadPool.getBackupSchedExecutor(), + threadPool.getBackupSchedExecutor(), dialogHandlerFactories, serverChannel, serverResourceManager, diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/ElasticityDelayTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/ElasticityDelayTest.java index fa639c19..977339d8 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/ElasticityDelayTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/ElasticityDelayTest.java @@ -171,6 +171,7 @@ private void createTableAndIndex() { USER_TABLE.getShardKey(), USER_TABLE.getFieldMap(), null, // TTL + null, /*beforeImageTTL*/ null, // limits false, 0, null, null); @@ -270,7 +271,8 @@ private Request makeScanRequest(int baseTopoNum, 0, /* localRegionId */ true, /* localRegionId*/ 10000 /*maxServerMemoryConsumption*/, - false /* performsWrite*/); + false /* performsWrite*/, + null /* rowMetadata */); return ((KVStoreImpl)kvs) .makeReadRequest(op, PartitionId.NULL_ID, consistency, 5000); diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/FailureInjectionTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/FailureInjectionTest.java index 439ecbbf..97d31a29 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/FailureInjectionTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/FailureInjectionTest.java @@ -118,6 +118,7 @@ private void createTable() { USER_TABLE.getShardKey(), USER_TABLE.getFieldMap(), null, // TTL + null, /*beforeImageTTL*/ null, // limits false, 0, null, null); diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/shell/ShowDdlCommandTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/shell/ShowDdlCommandTest.java index 034a55ef..0991a6b2 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/shell/ShowDdlCommandTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/query/shell/ShowDdlCommandTest.java @@ -33,7 +33,7 @@ public class ShowDdlCommandTest extends ShellTestBase { "create index idx1 on %s(" + IDX1_FIELD +")"; private final static String IDX2_FIELDS = - "info.age as Integer,info.address[].zipcode as String"; + "info.age AS Integer, info.address[].zipcode AS String"; private final static String IDX2_DDL = "create index idx2 on %s(" + IDX2_FIELDS + ")"; diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/MetadataTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/MetadataTest.java index 7863dac3..bd4083f2 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/MetadataTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/MetadataTest.java @@ -116,6 +116,7 @@ public void testPropagationAlltoAll() { userTable.getShardKey(), userTable.getFieldMap(), null, /* TTL */ + null, /*beforeImageTTL*/ null, /* limits */ false, 0, null, @@ -173,6 +174,7 @@ public void testPropagationAlltoMasters() { userTable.getShardKey(), userTable.getFieldMap(), null, /* TTL */ + null, /*beforeImageTTL*/ null, /* limits */ false, 0, null, @@ -263,6 +265,7 @@ public void testPropagationPull() { userTable.getShardKey(), userTable.getFieldMap(), null, + null, /*beforeImageTTL*/ null, /* limits */ false, 0, null, @@ -331,6 +334,7 @@ private TableImpl addTable(TableMetadata md, TableImpl table) { table.getShardKey(), table.getFieldMap(), null, + null, /*beforeImageTTL*/ null, /* limits */ false, 0, null, null); diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/PartitionMigrationTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/PartitionMigrationTest.java index 236ae3e9..904adef0 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/PartitionMigrationTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/PartitionMigrationTest.java @@ -21,12 +21,18 @@ import static org.junit.Assert.fail; import static org.junit.Assume.assumeTrue; +import java.io.IOException; +import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.logging.Level; @@ -42,19 +48,28 @@ import oracle.kv.RequestTimeoutException; import oracle.kv.StoreIteratorException; import oracle.kv.Value; -import oracle.kv.Value.Format; import oracle.kv.ValueVersion; import oracle.kv.Version; import oracle.kv.impl.api.AggregateThroughputTracker; +import oracle.kv.impl.api.KVStoreImpl; import oracle.kv.impl.api.table.Region; +import oracle.kv.impl.api.table.RowImpl; import oracle.kv.impl.api.table.TableBuilder; import oracle.kv.impl.api.table.TableImpl; import oracle.kv.impl.api.table.TableKey; import oracle.kv.impl.api.table.TableMetadata; +import oracle.kv.impl.fault.OperationFaultException; +import oracle.kv.impl.fault.RNUnavailableException; +import oracle.kv.impl.metadata.Metadata; import oracle.kv.impl.param.ParameterState; +import oracle.kv.impl.rep.admin.RepNodeAdminImpl; import oracle.kv.impl.rep.admin.RepNodeAdmin.PartitionMigrationState; +import oracle.kv.impl.rep.migration.MigrationManager; +import oracle.kv.impl.rep.migration.MigrationTarget; import oracle.kv.impl.rep.migration.PartitionMigrationStatus; +import oracle.kv.impl.rep.migration.TargetMonitorExecutor; import oracle.kv.impl.test.TestHook; +import oracle.kv.impl.topo.Partition; import oracle.kv.impl.topo.PartitionId; import oracle.kv.impl.topo.RepGroupId; import oracle.kv.impl.topo.RepNodeId; @@ -66,7 +81,9 @@ import oracle.kv.table.Index; import oracle.kv.table.IndexKey; import oracle.kv.table.KeyPair; +import oracle.kv.table.PrimaryKey; import oracle.kv.table.Row; +import oracle.kv.table.Table; import oracle.kv.table.TableAPI; import oracle.kv.table.TableIterator; import oracle.kv.table.TableIteratorOptions; @@ -75,6 +92,8 @@ import com.sleepycat.je.DatabaseEntry; import com.sleepycat.je.DatabaseNotFoundException; import com.sleepycat.je.rep.utilint.ServiceDispatcher.Response; +import com.sleepycat.je.rep.utilint.net.SimpleDataChannel; +import com.sleepycat.je.utilint.TestHookAdapter; import org.junit.Test; @@ -789,6 +808,234 @@ public void testClientOp() { assertNull(kvs.get(keys[9], Consistency.ABSOLUTE, 0, null)); } + /** + * Tests that the row creation time remains unchanged after executing client + * operations (PUT/DELETE) and COPY during a partition migration. + *

    + * This test triggers a migration, then single-steps the stream using a + * read hook. While migration is running, it performs multiple client + * operations: + *

    + * - PUT operations on rows both before and after the migration cursor + * - DELETE operations on rows both before and after the cursor + * - INSERT operation both before and after cursor of the deleted rows. + *

    + * The test primarily verifies that: + * - The creation time of each retained row remains unchanged before and + * after partition migration. + *

    + * It also verifies : + * - Rows updated are correctly reflected in the target + * - Rows deleted are no longer retrievable + * - Topology metadata is updated correctly after migration completion + */ + @Test + public void testRowCreationTimeAfterClientOps() { + config.startRepNodeServices(); + + final RepNode source = config.getRN(sourceId); + final RepNode target = config.getRN(targetId); + + kvs = KVStoreFactory.getStore(config.getKVSConfig()); + final TableAPI tableAPI = kvs.getTableAPI(); + final Table table = createTable(); + + /* The rows contain the final update to be done in the client ops */ + final Row[] rows = getRowsInPartition(table); + final int totalRows = rows.length; + final PrimaryKey[] pk = new PrimaryKey[totalRows]; + final List beforeCreationTime = new ArrayList<>(); + for (int i = 0; i < totalRows; i++) { + PrimaryKey primaryKey = table.createPrimaryKey(); + primaryKey.put("id", rows[i].get("id").asInteger().get()); + pk[i] = primaryKey; + beforeCreationTime.add(tableAPI.get(pk[i],null).getCreationTime()); + } + Arrays.sort(rows, (r1, r2) -> { + int id1 = r1.get("id").asInteger().get(); + int id2 = r2.get("id").asInteger().get(); + return Integer.compare(id1, id2); + }); + + ReadHook readHook = new ReadHook(); + source.getMigrationManager().setReadHook(readHook); + + assertEquals(PartitionMigrationState.PENDING, + target.migratePartition(p1, rg1) + .getPartitionMigrationState()); + + waitForMigrationState(target, p1, PartitionMigrationState.RUNNING); + readHook.waitForHook(); + + /* COPY Operation */ + readHook.releaseHook(); // row[0] + readHook.waitForHook(); + readHook.releaseHook(); // row[1] + readHook.waitForHook(); + readHook.releaseHook(); // row[2] + readHook.waitForHook(); + readHook.releaseHook(); // row[3] + readHook.waitForHook(); + readHook.releaseHook(); // row[4] + + /* Update row behind the cursor - PUT Operation */ + tableAPI.put(rows[0], null, null); + tableAPI.put(rows[1], null, null); + tableAPI.put(rows[2], null, null); + tableAPI.put(rows[3], null, null); + + /* Update rows ahead of cursor */ + tableAPI.put(rows[5], null, null); + tableAPI.put(rows[6], null, null); + tableAPI.put(rows[7], null, null); + + /* Delete rows before the cursor - DELETE Operation */ + tableAPI.delete(pk[0], null, null); + tableAPI.delete(pk[1], null, null); + + /* Delete rows after the cursor - DELETE Operation */ + tableAPI.delete(pk[5], null, null); + tableAPI.delete(pk[6], null, null); + + /* Inserting row before the cursor */ + tableAPI.put(rows[0], null, null); + /* Replacing the creation time of deleted row with newly inserted row */ + beforeCreationTime.set(0, tableAPI.get(pk[0], null).getCreationTime()); + + /* Inserting row after the cursor */ + tableAPI.put(rows[6], null, null); + /* Replacing the creation time of deleted row with newly inserted row */ + beforeCreationTime.set(6, tableAPI.get(pk[6], null).getCreationTime()); + + readHook.waitForHook(); + source.getMigrationManager().setReadHook(null); + readHook.releaseHook(); + waitForMigrationState(target, p1, PartitionMigrationState.SUCCEEDED); + + verifyRowAndCreationTime(tableAPI, totalRows, rows, pk, + beforeCreationTime); + + /* Update topology */ + final Topology topo = config.getTopology(); + topo.updatePartition(p1, new RepGroupId( + target.getRepNodeId().getGroupId())); + source.updateMetadata(topo); + target.updateMetadata(topo); + + verifyRowAndCreationTime(tableAPI, totalRows, rows, pk, + beforeCreationTime); + + assertEquals(2, p1.getComponent(topo).getRepGroupId().getGroupId()); + } + + /** + * Verifies the state of rows after partition migration by checking: + * - The creation time of all non-deleted rows remains unchanged after PM. + * - Deleted rows (IDs: 0, 1, 5, 6) are no longer retrievable. + * - Rows with additional updates (IDs: 2, 3, 7) have correct final value. + * - Other retained rows have their original values. + */ + private void verifyRowAndCreationTime(TableAPI tableAPI, + int totalRows, + Row[] rows, + PrimaryKey[] pk, + List beforeCreationTime) { + for (int i = 0; i < totalRows; i++) { + /* Deleted rows 1 and 5 should be null after migration */ + if (i == 1 || i == 5) { + assertNull(tableAPI.get(pk[i], null)); + } else { + /* Only row 2,3 and 7 should have the second update . + * Row 0 and 6 were added during migration with the second + * update. + */ + if (i == 2 || i == 3 || i == 7 || i == 0 || i == 6) { + assertEquals( + "SecondUpdate-" + rows[i].get("id").asInteger().get(), + tableAPI.get(pk[i], null).get("val").asString().get()); + } else { + assertEquals( + "FirstUpdate-" + rows[i].get("id").asInteger().get(), + tableAPI.get(pk[i], null).get("val").asString().get()); + } + /* Rows 0, 2, 3, 4, 6, 7, 8, 9 must have same creation time */ + assertEquals((long) beforeCreationTime.get(i), + tableAPI.get(pk[i], null).getCreationTime()); + } + } + } + + /** + * Creating a table in the store created by KVRepTestConfig. + */ + private Table createTable() { + final TableImpl USER_TABLE = + TableBuilder.createTableBuilder("User") + .addInteger("id") + .addString("val") + .primaryKey("id") + .buildTable(); + final RepNode master = config.getMaster(new RepGroupId(1), 100); + final TableMetadata md = (TableMetadata) master + .getMetadata(Metadata.MetadataType.TABLE).getCopy(); + md.addTable(USER_TABLE.getInternalNamespace(), + USER_TABLE.getName(), + USER_TABLE.getParentName(), + USER_TABLE.getPrimaryKey(), + null, // primaryKeySizes + USER_TABLE.getShardKey(), + USER_TABLE.getFieldMap(), + null, // TTL + null, /*beforeImageTTL*/ + null, // limits + false, 0, + null, null); + boolean success; + success = master.updateMetadata(md); + assertTrue(success); + return USER_TABLE; + } + + /** + * Populates 10 rows in the table that belong to the specific + * partition {@code p1}. + *

    + * Each row is inserted and then updated twice to simulate multiple + * modifications: + *

      + *
    • The initial value is inserted as "Value-{id}".
    • + *
    • Then updated to "FirstUpdate-{id}".
    • + *
    • Then updated to "SecondUpdate-{id}".
    • + *
    + *

    + * Only rows that fall into the target partition {@code p1} are retained. + * + * @param table the {@code Table} instance representing table + * @return an array of {@code Row} objects after the second update, all + * belonging to partition {@code p1} + */ + private Row[] getRowsInPartition(Table table) { + final List rowsList = new ArrayList<>(); + final TableAPI tableAPI = kvs.getTableAPI(); + KVStoreImpl storeImpl = (KVStoreImpl) kvs; + int i = 0; + while (rowsList.size() < 10) { + final Row row = table.createRow(); + row.put("id", i); + row.put("val", "Value " + i); + if (storeImpl.getPartitionId(((RowImpl) row).getPrimaryKey(false)) + .equals(p1)) { + tableAPI.put(row, null, null); + row.put("val", "FirstUpdate-" + i); + tableAPI.put(row, null, null); + row.put("val", "SecondUpdate-" + i); + rowsList.add(row); + } + i++; + } + return rowsList.toArray(new Row[0]); + } + @Test public void testConflict() { /* Pausing the source may cause the sream to timeout on some systems */ @@ -1252,7 +1499,9 @@ public void testTableInterlock() { null, userTable.getShardKey(), userTable.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null /* owner */); @@ -1344,7 +1593,9 @@ public void testAwaitIdle() throws InterruptedException { null, userTable.getShardKey(), userTable.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null /* owner */); @@ -1444,7 +1695,9 @@ public void testIndexCreate() throws InterruptedException { null, userTable.getShardKey(), userTable.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null /* owner */); @@ -1574,7 +1827,9 @@ public void testTableDelete() { null, userTable.getShardKey(), userTable.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null /* owner */); @@ -1665,7 +1920,9 @@ public void testMultiRegionTable() throws InterruptedException { null, userTable.getShardKey(), userTable.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null /* owner */, @@ -1777,9 +2034,8 @@ private boolean isTombstone(ValueVersion vv) { return false; } final Value value = vv.getValue(); - return value.getFormat() == Format.MULTI_REGION_TABLE && - Region.isMultiRegionId(value.getRegionId()) && - value.getValue().length == 0; + return Region.isMultiRegionId(value.getRegionId()) && + value.getValue().length == 0; } /** @@ -1820,7 +2076,9 @@ public void testAsyncShardIteration() throws Exception { null, userTable.getShardKey(), userTable.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null /* owner */); @@ -2228,4 +2486,385 @@ public void testPartitionUpdateWithLaggingReplica() throws Exception { 2000); assertTopoNumbersEqualOrAfter(localizedTopo, targetUpdatedTopo); } + + /** + * Tests the situation that a master of the target shard is + * network-partitioned before the migration and came back in the middle. + * + * [KVSTORE-2276][KVSTORE-2640] + */ + @Test + public void testCheckTargetWithMasterNetworkPartitioned() throws Exception { + config.startRepNodeServices(); + /* Finds the current master. */ + final RepNode npMaster = config.getMaster(rg2, 1000); + /* + * Blocks the master's target executor so that the migration record will + * be persisted, but the target never runs. + */ + blockTargetExecution(npMaster); + logger.info(String.format("Start blocking target execution of %s", + npMaster.getRepNodeId())); + /* Starts the migration. */ + final RepNode source = config.getMaster(rg1, 1000); + final RepNode oldTarget = npMaster; + final Key k1 = new KeyGenerator(source).getKeys(1)[0]; + final PartitionId k1p = source.getPartitionId(k1.toByteArray()); + logger.info(String.format("Migrate partition %s, from %s to %s", k1p, + source.getRepNodeId(), oldTarget.getRepNodeId())); + assertEquals(PartitionMigrationState.PENDING, + oldTarget.migratePartition(k1p, rg1). + getPartitionMigrationState()); + /* + * Sleep a bit to ensure that the record is written and replicated and + * the target is being blocked still. + */ + Thread.sleep(1000); + assertEquals(PartitionMigrationState.PENDING, + oldTarget.migratePartition(k1p, rg1). + getPartitionMigrationState()); + /* Sets up the send nop hook for the source master. */ + setupNopDestination(source, oldTarget); + /* Network-partition the old target. */ + blockJEHA(npMaster.getRepNodeId()); + /* Now find the new master. */ + final RepNode newTarget = + config.getMaster(rg2, 10000, Set.of(oldTarget.getRepNodeId())); + logger.info(String.format("Migrate partition %s, from %s to %s", k1p, + source.getRepNodeId(), newTarget.getRepNodeId())); + waitForMigrationState(newTarget, k1p, + PartitionMigrationState.SUCCEEDED); + Thread.sleep(2 * TargetMonitorExecutor.POLL_PERIOD * 1000); + /* + * Updates the topology to the official new one and broadcast it to the + * source which will cause an IllegalStateException if the issue was not + * fixed. + */ + final Topology newTopo = config.getTopology().getCopy(); + newTopo.updatePartition(k1p, rg2); + source.updateMetadata(newTopo); + } + + private AtomicBoolean blockJEHA(RepNodeId rnId) { + final int port = getHAPort(rnId); + logger.info(String.format("Start blocking all traffic to %s of %s", + port, rnId)); + return blockChannel(port); + } + + private int getHAPort(RepNodeId rnId) { + return config.getRN(rnId).getRepNodeParams().getHAPort(); + } + + private AtomicBoolean blockChannel(int blockingPort) { + final AtomicBoolean blocked = new AtomicBoolean(true); + tearDowns.add(() -> blocked.set(false)); + tearDowns.add(() -> SimpleDataChannel.ioHook = null); + SimpleDataChannel.ioHook = new TestHookAdapter() { + @Override + public void doIOHook(SimpleDataChannel ch) throws IOException { + final int remotePort; + try { + remotePort = + ((InetSocketAddress) ch.getRemoteAddress()).getPort(); + } catch (IOException e) { + throw new RuntimeException(e); + } + if (remotePort != blockingPort) { + return; + } + if (!blocked.get()) { + return; + } + throw new IOException("blocked"); + } + }; + return blocked; + } + + private AtomicBoolean blockTargetExecution(RepNode repNode) { + final ScheduledThreadPoolExecutor executor = + repNode.getMigrationManager().getTargetExecutor(); + return blockExecution("target execution for " + repNode.getRepNodeId(), + executor); + } + + private AtomicBoolean blockExecution(String name, + ScheduledThreadPoolExecutor executor) + { + final AtomicBoolean blocked = new AtomicBoolean(true); + tearDowns.add(() -> blocked.set(false)); + final int numThreads = executor.getCorePoolSize(); + logger.info(String.format( + "blocking target execution in %s with %s threads", + name, numThreads)); + for (int i = 0; i < numThreads; ++i) { + executor.execute(() -> { + while (blocked.get()) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + /* ignore */ + } + } + }); + } + return blocked; + } + + private void setupNopDestination(RepNode source, RepNode target) { + tearDowns.add(() -> RepNode.SEND_RG_NOP_HOOK = null); + RepNode.SEND_RG_NOP_HOOK = (rn) -> { + if (!rn.getRepNodeId().equals(source.getRepNodeId())) { + return; + } + source.sendNOP(target.getRepNodeId()); + }; + } + + /** + * Tests that the source can detect target failure quickly even when the old + * target is being network-partitoned. The concern is that the + * TargetMonitorExecutor has only one thread which check one partition at a + * time. If the checks are blocking for a long time, then things could queue + * up preventing a quick failure detection. + */ + @Test + public void testMigrationFailureTargetNetworkPartitioned() + throws Exception + { + /* + * Disable stats collection as it will access a non-existing table, + * throw MetadataNotFoundException to the fault handler and cause node + * restart. + */ + disableStatsCollection(); + /* + * Increase the migration concurrency to speed up the test. We will + * migrate many partitions to overwhelm the TargetMonitorExecutor checks + * if they are blocking. + */ + increaseMigrationConcurrency(); + addPartitions(); + reduceMigrationDelay(); + config.startRepNodeServices(); + /* Sets up to fail the target migration. */ + failMigrationCompletionOnTarget(); + final RepNode source = config.getMaster(rg1, 1000); + final RepNode target = config.getMaster(rg2, 1000); + logger.info(String.format("Running migration from %s to %s", + source.getRepNodeId(), target.getRepNodeId())); + /* + * Blocks the nop execution on the target so that source will encounter + * errors duing target monitoring. + */ + blockRequestExecute(target.getRepNodeId()); + /* + * Blocks the target monitor execution on the source so that we do not + * check the target state until we change target mastership. + */ + final AtomicBoolean targetMonitorBlocked = + blockTargetMonitorExecution(source); + /* Start moving all the partitions on rg1 to rg2. */ + ensureMigrationState(PartitionMigrationState.PENDING, target); + waitMigrationState(PartitionMigrationState.ERROR, target, 10000); + /* + * Block JEHA for the target to force a mastership change. + */ + blockJEHA(target.getRepNodeId()); + final RepNode newTarget = + config.getMaster(rg2, 10000, Set.of(target.getRepNodeId())); + assertTrue("Master not found", newTarget != null); + /* + * Runs the migration again on the new target which will result in error + * because partition is not on the source. We need to re-run due to a + * migration issue. When the old target failed, error() was called and + * the record removed. When the master switch to the new target, it just + * waits there without doing anything. + */ + logger.info( + "Running new migration target on " + newTarget.getRepNodeId()); + ensureMigrationState(PartitionMigrationState.PENDING, newTarget); + /* Unblock the target monitoring. */ + targetMonitorBlocked.set(false); + /* + * Even with nop blocked, the source should detect failure pretty + * quickly and restore the partition. Checking the state at the source + * should return SUCCEEDED indicating the partition is restored instead + * of UNKNOWN. + */ + Thread.sleep(20000); + ensureMigrationState(PartitionMigrationState.SUCCEEDED, source); + } + + private void disableStatsCollection() { + for (RepNode rn : config.getRNs()) { + rn.getRepNodeParams().getMap().setParameter( + ParameterState.RN_SG_ENABLED, "false"); + } + } + + private void increaseMigrationConcurrency() { + for (RepNode rn : config.getRNs()) { + rn.getRepNodeParams().getMap().setParameter( + ParameterState.RN_PM_CONCURRENT_SOURCE_LIMIT, "1000"); + rn.getRepNodeParams().getMap().setParameter( + ParameterState.RN_PM_CONCURRENT_TARGET_LIMIT, "1000"); + } + } + + private void addPartitions() { + for (int i = 0; i < 100; ++i) { + final RepGroupId rgId = (i % 2 == 0) ? rg1 : rg2; + config.getTopology().add(new Partition(rgId)); + } + } + + private void reduceMigrationDelay() { + tearDowns.add(() -> MigrationManager.MINIMUM_DELAY_OVERRIDE = -1); + MigrationManager.MINIMUM_DELAY_OVERRIDE = 0; + } + + private void failMigrationCompletionOnTarget() + { + tearDowns.add(() -> MigrationTarget.PERSIST_HOOK = null); + MigrationTarget.PERSIST_HOOK = (target) -> { + throw new RuntimeException("injected failure"); + }; + } + + private void ensureMigrationState(PartitionMigrationState expected, + RepNode rn) + { + final List partitions = config.getTopology() + .getPartitionsInShard(rg1.getGroupId(), Collections.emptySet()); + for (PartitionId p : partitions) { + final PartitionMigrationState state; + if (rn.getRepNodeId().getGroupId() == rg1.getGroupId()) { + state = rn.getMigrationState(p).getPartitionMigrationState(); + } else { + state = + rn.migratePartition(p, rg1).getPartitionMigrationState(); + } + assertEquals("wrong state for partition " + p, expected, state); + } + } + + private void waitMigrationState(PartitionMigrationState expected, + RepNode rn, + long timeoutMillis) + { + final List partitions = config.getTopology() + .getPartitionsInShard(rg1.getGroupId(), Collections.emptySet()); + for (PartitionId p : partitions) { + waitForMigrationState(rn, p, timeoutMillis, expected); + } + } + + private AtomicBoolean blockRequestExecute(RepNodeId rnId) { + final AtomicBoolean blocked = new AtomicBoolean(true); + tearDowns.add(() -> blocked.set(false)); + config.getRH(rnId).setTestNOPHook((r) -> { + if (!blocked.get()) { + return; + } + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + /* ignore */ + } + throw new RNUnavailableException("blocked"); + }); + return blocked; + } + + private AtomicBoolean blockTargetMonitorExecution(RepNode repNode) { + final ScheduledThreadPoolExecutor executor = + repNode.getMigrationManager().getTargetMonitorExecutor(); + return blockExecution( + "target monitor execution for " + repNode.getRepNodeId(), executor); + } + + /** + * Tests the situation that a master of the source shard is + * network-partitoned but can still make contact with the admin to receive + * topology broadcast. + * + * [KVSTORE-2276] + */ + @Test + public void testTopoUpdateSourceMasterNetworkPartitioned() + throws Exception + { + testTopoUpdateMasterNetworkPartitioned(rg1); + } + + private void testTopoUpdateMasterNetworkPartitioned(RepGroupId npGroup) + throws Exception + { + config.startRepNodeServices(); + /* Finds the current master. */ + final RepNode npMaster = config.getMaster(npGroup, 10); + /* Network-partitions the old master. */ + blockJEHA(npMaster.getRepNodeId()); + blockPing(npMaster.getRepNodeId()); + /* Finds the source and target master and executes the migration. */ + final RepNode source = npGroup.equals(rg1) + ? config.getMaster(rg1, 10000, Set.of(npMaster.getRepNodeId())) + : config.getMaster(rg1, 1000); + final RepNode target = npGroup.equals(rg2) + ? config.getMaster(rg2, 10000, Set.of(npMaster.getRepNodeId())) + : config.getMaster(rg2, 1000); + final Key k1 = new KeyGenerator(source).getKeys(1)[0]; + final PartitionId k1p = source.getPartitionId(k1.toByteArray()); + logger.info(String.format("Migrate partition %s, from %s to %s", k1p, + source.getRepNodeId(), target.getRepNodeId())); + assertEquals(PartitionMigrationState.PENDING, + target.migratePartition(k1p, rg1). + getPartitionMigrationState()); + waitForMigrationState(target, k1p, PartitionMigrationState.SUCCEEDED); + /* + * Broadcasts the new topology to the isolated master which will cause + * an IllegalStateException if the issue was not fixed. + */ + final Topology newTopo = config.getTopology().getCopy(); + newTopo.updatePartition(k1p, rg2); + try { + npMaster.updateMetadata(newTopo); + } catch (OperationFaultException e) { + assertTrue(e.getMessage(), + e.getMessage().contains("not authoritative master")); + } + } + + private AtomicBoolean blockPing(RepNodeId rnId) { + final AtomicBoolean blocked = new AtomicBoolean(true); + tearDowns.add(() -> blocked.set(false)); + tearDowns.add(() -> RepNodeAdminImpl.PING_HOOK = null); + RepNodeAdminImpl.PING_HOOK = (rns) -> { + if (!blocked.get()) { + return; + } + if (!rns.getRepNodeId().equals(rnId)) { + return; + } + throw new RNUnavailableException("blocked"); + }; + return blocked; + } + + + /** + * Tests the situation that a master of the target shard is + * network-partitoned but can still make contact with the admin to receive + * topology broadcast. + * + * [KVSTORE-2276] + */ + @Test + public void testTopoUpdateTargetMasterNetworkPartitioned() + throws Exception + { + testTopoUpdateMasterNetworkPartitioned(rg2); + } } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/PartitionMigrationTestBase.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/PartitionMigrationTestBase.java index 242a7cbe..4eb94f68 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/PartitionMigrationTestBase.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/PartitionMigrationTestBase.java @@ -12,6 +12,7 @@ import java.util.Arrays; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; import java.util.logging.Logger; @@ -115,10 +116,22 @@ protected boolean condition() { * is thrown. */ static public void - waitForMigrationState(final RepNode target, - final PartitionId pId, - final PartitionMigrationState... requiredStates) { - boolean success = new PollCondition(500, 20000) { + waitForMigrationState(final RepNode target, + final PartitionId pId, + final PartitionMigrationState... requiredStates) + { + waitForMigrationState(target, pId, 20000, requiredStates); + } + + static public void + waitForMigrationState(final RepNode target, + final PartitionId pId, + final long timeoutMillis, + final PartitionMigrationState... requiredStates) + { + final AtomicReference observedStatus = + new AtomicReference<>(null); + boolean success = new PollCondition(500, timeoutMillis) { @Override protected boolean condition() { @@ -131,6 +144,7 @@ protected boolean condition() { */ final PartitionMigrationStatus status = target.getMigrationStatus(pId); + observedStatus.set(status); if (status == null) { return false; } @@ -143,9 +157,10 @@ protected boolean condition() { return false; } }.await(); - assertTrue("wait failed for " + pId + " on " + - target.getRepNodeId().getFullName() + - ", state(s): " + Arrays.toString(requiredStates), success); + assertTrue("wait failed for " + pId + " on " + + target.getRepNodeId().getFullName() + ", state(s): " + + Arrays.toString(requiredStates) + ", last observed: " + + observedStatus.get(), success); } /* diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/RepNodeServiceTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/RepNodeServiceTest.java index b8a04169..5ace677b 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/RepNodeServiceTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/RepNodeServiceTest.java @@ -1132,7 +1132,8 @@ public void run() { /** * Test hook to generate an environment failure exception during a request. */ - class EnvFailureExceptionHook implements TestHook { + static class EnvFailureExceptionHook implements + TestHook { int count = 0; @@ -1141,7 +1142,7 @@ class EnvFailureExceptionHook implements TestHook { } @Override - public void doHook(RepImpl arg) { + public void doHook(RequestHandlerImpl.ExecuteRequest arg) { count++; throw EnvironmentFailureException.unexpectedException diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/TableTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/TableTest.java index c75b85bb..e6c76e59 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/TableTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/TableTest.java @@ -896,6 +896,7 @@ public void doHook(TableImpl t) throws IOException { md.evolveTable(table1, table1.numTableVersions(), newFieldMap, table1.getDefaultTTL(), + table1.getBeforeImageTTL(), table1.getDescription(), false, table1.getIdentityColumnInfo(), @@ -1165,7 +1166,9 @@ static TableImpl addTable(TableMetadata md, TableImpl table) { null, table.getShardKey(), table.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null); } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/EodSendFailureTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/EodSendFailureTest.java new file mode 100644 index 00000000..e9126229 --- /dev/null +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/EodSendFailureTest.java @@ -0,0 +1,309 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.kv.impl.rep.migration; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import oracle.kv.KVStore; +import oracle.kv.KVStoreFactory; +import oracle.kv.Key; +import oracle.kv.Value; +import oracle.kv.impl.rep.RepNode; +import oracle.kv.impl.rep.admin.RepNodeAdmin; +import oracle.kv.impl.rep.migration.generation.PartitionGenerationTestBase; +import oracle.kv.impl.topo.PartitionId; +import oracle.kv.impl.topo.RepGroupId; +import oracle.kv.impl.topo.RepNodeId; + +import org.junit.Test; + +/** + * The unit test case tests the scenario of failure in partition migration + * where the failure occurs after the local topology of the source shard has + * been updated, but the EOD is not sent. The source shard is no longer the + * owner of the partition, and due to the failure, the target shard does not + * receive the EOD and cannot take ownership of the migrating partition by + * updating its local topology. As a result, the partition is not owned by + * either the source shard or the target shard. [KVSTORE-456] + * + * The resolution to this issue is to set the migration state to ERROR instead + * of PENDING on the migration target. The ERROR state is received by the + * {@link TargetMonitorExecutor}, which fails the migration and removes the + * source migration record. This rollback restores the previous topology and + * transfers ownership of the partition back to the source shard. + * + * + * Partition migration state flow before fix : + * PENDING --> RUNNING --> PENDING + * + * Partition migration state flow after fix : + * PENDING --> RUNNING --> ERROR + * + * How the unit test works: + * There are three test hooks used in this test. + * + * - MigrationSource.eodSendFailureHook: + * Fails sending the EOD at the source side after persisting the migration + * record and updating the local topology. + * + * - TargetMonitorExecutor.assertRemoveRecordHook: + * Checks whether the TargetMonitorExecutor#failed method calls + * manager.removeRecord. + * + * - MigrationSource.noMonitorTargetHook: + * Makes the source thread exit after MigrationSource#persistTransferComplete + * without starting the TargetMonitorExecutor. + * + * Steps: + * 1. Replication node service is started. + * 2. Data is added to the store because partition migration can have different + * flows with or without data in the store. + * 3. Set the noMonitorTargetHook and eodSendFailureHook. + * + * + * 4. Start the migration for the very first time. + * 5. Verify migration failure due to EOD send failure on the source side, + * and canceling the migration at the target by calling the cancel method, + * resulting in the partition migration state to ERROR. + * 6. The cancel method calls setCanceled, which sets the canceled flag to true + * and calls cleanup. The cleanup method closes the channel and removes the + * partition db at the target. + * 7. Since the Reader thread dropped the partition, executing an operation on + * the partition db results in: + * "java.lang.IllegalStateException: Database was closed." + * 8. There is no try/catch in consumeOps, so this IllegalStateException (which + * in the earlier flow was catching an IOException) propagates to + * runMigration, which catches it in its catch Exception block. + * 9. This catch Exception block calls error(), which calls setCanceled and then + * manager.removeRecord, removing the target migration record from the + * migration db. + * 10. The finally block in runMigration returns to its caller, + * MigrationTarget.call. + * 11. Since waitTime < 0, control breaks out of the while loop, and + * MigrationTarget's call method returns null. + * 12. TargetExecutor's afterExecute will not re-schedule the target since null + * was returned by the target thread. + * 13. We are querying for the migration status, calling + * MigrationManager.getMigrationState, which detects that the target is null + * and TargetRecord is null (because manager.removeRecord was called). So, + * getMigrationState returns UNKNOWN repeatedly for a minute. + * 14. Even though the source thread has exited, the TargetMonitor thread in the + * source checks the completed source migration record and sees its + * corresponding target state as UNKNOWN. + * 15. After **1 minute**, the migratePartition task is re-scheduled/re-run. + * 16. The partition migration state change to ERROR is not recognized by the + * TargetMonitorExecutor because it was not initialized. + * 17. Assert that the partition was not owned by the source or target shard. + * + * + * 18. Unset the noMonitorTargetHook. + * 19. Start partition migration for the second time. Since noMonitorTargetHook + * is unset, TargetMonitorExecutor sees the target state as ERROR, leading + * to the failed method being called. The failed method calls + * manager.removeRecord, removing the source migration record and updating + * the local topology to rollback ownership of the partition to the + * source shard. + * 20. The assertRemoveRecordHook confirms that manager.removeRecord was called, + * indicating that the code flow has changed. + * 21. Assert that the partition was now owned by the source shard and not the + * target shard, confirming that the fix worked. + * + * + * 22. Unset the eodSendFailureHook. + * 23. Start the partition migration for third time. Since no error was + * injected, the migration completes normally. + * 24. Assert that the partition is now owned by the target shard and not the + * source shard. + */ + +public class EodSendFailureTest extends PartitionGenerationTestBase { + + private static final PartitionId PARTITION_ID = p1; + + @Override + public void setUp() throws Exception { + super.setUp(); + } + + @Override + public void tearDown() throws Exception { + MigrationSource.eodSendFailureHook = null; + MigrationSource.noMonitorTargetHook = null; + TargetMonitorExecutor.checkRemoveRecordHook = null; + super.tearDown(); + } + + @Test + public void testEodSendFailure() { + /* RepNode service is started */ + config.startRepNodeServices(); + + /* Data is added in the store */ + addDataInStore(); + + /* Setting the hooks required in the first migration run */ + setNoMonitorTargetTestHook(); + setEodSendFailureTestHook(); + + /* Start the migration for the first time */ + startMigration(sourceId, targetId); + trace("Migration started for pid=" + PARTITION_ID); + + /* This will set the migration state to ERROR. But since the + * TargetMonitorExecutor is not initialised this will not be seen by it + * and it will go through another flow as described above steps. + */ + verifyMigrationFailure(sourceId, targetId); + trace("Migration failed for pid=" + PARTITION_ID); + + MigrationSource.noMonitorTargetHook = null; + trace("No monitor target hook unset"); + + setCheckRemoveRecordTestHook(); + + /* Start the migration for the second time */ + startMigration(sourceId, targetId); + trace("Migration started for pid=" + PARTITION_ID); + + /* This will set the migration state to ERROR. Since the + * TargetMonitorExecutor is initialised this will be seen by it + * and it will restore the topology of the source shard transferring + * back the ownership of the partition + */ + verifyMigrationFailure(sourceId, targetId); + trace("Migration failed for pid=" + PARTITION_ID); + + MigrationSource.eodSendFailureHook = null; + trace("Eod send failure hook unset"); + + /* Start the migration for the third time */ + startMigration(sourceId, targetId); + trace("Migration started for pid=" + PARTITION_ID); + + verifyMigrationComplete(sourceId, targetId); + trace("Done migrating pid=" + PARTITION_ID + + " from shard=" + sourceId + " to shard=" + targetId); + } + + private void setCheckRemoveRecordTestHook() { + TargetMonitorExecutor.checkRemoveRecordHook = pid1 -> { + if (!PARTITION_ID.equals(pid1)) { + /* not my partition */ + return; + } + trace("Pointer reached failed method in TargetMonitorExecutor"); + }; + } + + private void setEodSendFailureTestHook() { + MigrationSource.eodSendFailureHook = pid1 -> { + if (!PARTITION_ID.equals(pid1)) { + /* not my partition */ + return; + } + trace("Failing sending EOD for partition: " + pid1); + try { + throw new IOException(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + } + + private void setNoMonitorTargetTestHook() { + MigrationSource.noMonitorTargetHook = pid1 -> { + if (!PARTITION_ID.equals(pid1)) { + /* not my partition */ + return; + } + trace("No monitor target hook set"); + }; + } + + + private void startMigration(RepNodeId srcId, RepNodeId tgtId) { + + trace("Start migrate pid=" + PARTITION_ID + " from=" + srcId + " t0" + + tgtId); + + final RepGroupId srcGroupId = new RepGroupId(srcId.getGroupId()); + final RepNode target = config.getRN(tgtId); + assertEquals(RepNodeAdmin.PartitionMigrationState.PENDING, + target.migratePartition(PARTITION_ID, srcGroupId) + .getPartitionMigrationState()); + } + + private void verifyMigrationFailure(RepNodeId srcId, RepNodeId tgtId) { + + final RepNode source = config.getRN(srcId); + final RepNode target = config.getRN(tgtId); + + /* The sendEOD failure should have set the migration state to ERROR */ + waitForMigrationState(target, PARTITION_ID, + RepNodeAdmin.PartitionMigrationState.ERROR); + + if (MigrationSource.noMonitorTargetHook != null) { + + /* In the first run, noMonitorTargetHook is set, so + * the partition is not restored instantly. It must + * wait for rescheduling, and it won't be owned by + * either the source or the target. + */ + waitForPartition(target, PARTITION_ID, false); + waitForPartition(source, PARTITION_ID, false); + + } else { + + /* In case of second run, noMonitorTargetHook is not + * set, so the TargetMonitorExecutor will see the + * partition migration state as error and will call + * the fail method, which in turn will remove the + * migration record and update the local topology, + * transferring the ownership back to the source. + */ + + waitForPartition(target, PARTITION_ID, false); + waitForPartition(source, PARTITION_ID, true); + } + } + + private void verifyMigrationComplete(RepNodeId srcId, RepNodeId tgtId) { + final RepNode source = config.getRN(srcId); + final RepNode target = config.getRN(tgtId); + + waitForMigrationState(target, PARTITION_ID, + RepNodeAdmin.PartitionMigrationState.SUCCEEDED); + + /* The source and target should have changed their partition map */ + waitForPartition(target, PARTITION_ID, true); + waitForPartition(source, PARTITION_ID, false); + + /* Should be able to call again and get success */ + assertEquals(RepNodeAdmin.PartitionMigrationState.SUCCEEDED, + target.getMigrationState(PARTITION_ID). + getPartitionMigrationState()); + } + + /** + * Add dummy data in the store + */ + private void addDataInStore() { + KVStore kvStore = KVStoreFactory.getStore(config.getKVSConfig()); + int value = 1; + for (int i = 1; i <= 1000; i++, value++) { + Key key = Key.createKey(String.valueOf(i)); + byte[] byteArray = ByteBuffer.allocate(4).putInt(value).array(); + Value value1 = Value.createValue(byteArray); + kvStore.put(key, value1); + } + trace("Added data in store"); + } +} diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/RowCreationTimeTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/RowCreationTimeTest.java new file mode 100644 index 00000000..aa5224e5 --- /dev/null +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/RowCreationTimeTest.java @@ -0,0 +1,318 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.kv.impl.rep.migration; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.concurrent.ThreadLocalRandom; +import java.util.logging.FileHandler; + +import oracle.kv.Consistency; +import oracle.kv.Durability; +import oracle.kv.KVStoreFactory; +import oracle.kv.Version; +import oracle.kv.impl.admin.CommandServiceAPI; +import oracle.kv.impl.api.KVStoreImpl; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TableImpl; +import oracle.kv.impl.api.table.TableTestBase; +import oracle.kv.impl.test.TestStatus; +import oracle.kv.impl.util.TestUtils; +import oracle.kv.impl.util.TopologyPrinter; +import oracle.kv.table.PrimaryKey; +import oracle.kv.table.ReadOptions; +import oracle.kv.table.Table; +import oracle.kv.table.TableAPI; +import oracle.kv.table.WriteOptions; +import oracle.kv.util.CreateStore; +import oracle.nosql.common.contextlogger.LogFormatter; + +import org.junit.Test; + +/** + * This is a integration unit test that verifies the consistency of row + * creation times before and after a partition migration. It uses createStore + * to set up the test environment. + *

    + * The test performs the following steps: + * 1. Inserts and updates rows in a table prior to migration. + * 2. Initiates a partition migration via a topology change. + * 3. Performs concurrent updates to the rows during the partition migration. + * 4. Validates that the row creation times remain unchanged after migration. + *

    + * This test ensures that partition migration preserves row creation time + * metadata even under concurrent write workloads during Partition Migration. + */ +public class RowCreationTimeTest extends TableTestBase { + + private static final boolean trace_on_screen = false; + private static final int NUM_INIT_ROWS = 100 * 1024; + private static final WriteOptions WRITE_OPTIONS = + new WriteOptions(Durability.COMMIT_NO_SYNC, 10000, MILLISECONDS); + private static final ReadOptions READ_OPTIONS = + new ReadOptions(Consistency.NONE_REQUIRED, 10000, MILLISECONDS); + private Thread writerThread; + + private ArrayList beforeMigrationRowsCreationTime; + private ArrayList afterMigrationRowsCreationTime; + + @Override + public void setUp() throws Exception { + beforeMigrationRowsCreationTime = new ArrayList<>(); + afterMigrationRowsCreationTime = new ArrayList<>(); + kvstoreName = "mystore"; + TestUtils.clearTestDirectory(); + TestStatus.setManyRNs(true); + createStore = new CreateStore(kvstoreName, + startPort, + 2, /* n SNs */ + 1, /* rf */ + 10, /* n partitions */ + 1, /* capacity */ + CreateStore.MB_PER_SN, + true, + null); + createStore.setPoolSize(1); /* reserve one SN for later. */ + createStore.start(); + store = KVStoreFactory.getStore(createKVConfig(createStore)); + addLoggerFileHandler(); + super.setUp(); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + @Test + public void testRowCreationTimeIntegrationTest() throws Exception { + final CommandServiceAPI cs = createStore.getAdmin(); + /* + * Initial topology : + * rg1:[[rg1-rn1] sn=sn1] + * Partitions on rg1:[1,2,3,4,5,6,7,8,9,10] + */ + trace(TopologyPrinter.printTopology(cs.getTopology())); + String tableName = "mytable"; + /* Create table and insert initial rows */ + final TableAPI tableAPI = store.getTableAPI(); + executeDdl("CREATE TABLE " + tableName + " " + + "(id INTEGER, firstName STRING, lastName STRING," + + "age INTEGER, PRIMARY KEY (id))"); + final Table tbl = tableAPI.getTable(tableName); + insertRowsIntoTable(tbl); + trace(NUM_INIT_ROWS + " rows loaded into table " + tbl.getFullName()); + /* Perform pre-migration updates on rows to make creation time + * different from modification time + */ + updateWorkLoad(tbl); + + trace("Table data before migration : "); + traceRowsFromTable(tbl); + + final String topo = "newTopo"; + /* New topology and redistribute to initiate partition migration */ + cs.copyCurrentTopology(topo); + cs.redistributeTopology(topo, "AllStorageNodes"); + + trace("Expand store to two shards"); + final int planId = cs.createDeployTopologyPlan("deploy-new-topo", + topo, + null); + cs.approvePlan(planId); + + /* ====== Start background writer thread ====== */ + setUpConcurrentWriteThread(tableAPI, tableName); + /* Start concurrent writes during migration */ + writerThread.start(); + + try { + cs.executePlan(planId, false); + cs.awaitPlan(planId, 0, null); + } finally { + writerThread.interrupt(); + writerThread.join(); + } + /* ====== End background writer thread ====== */ + + trace("Expanded store to two shards"); + /* + * Final topology : + * rg1:[[rg1-rn1] sn=sn1] + * rg2:[[rg2-rn1] sn=sn2] + * Partitions on rg1:[6,7,8,9,10] + * Partitions on rg2:[1,2,3,4,5] + */ + trace(TopologyPrinter.printTopology(cs.getTopology())); + + /* Fetch data post-migration */ + getRowsFromTableAfterMigration(tbl); + + trace("Table data after migration : "); + traceRowsFromTable(tbl); + + assertFalse(beforeMigrationRowsCreationTime.isEmpty()); + + trace("Checking creation time of rows before and after migration"); + /* Assert that creation time list before and after are of equal size */ + assertEquals("The before and after creation time list do not have the" + + " same size", + beforeMigrationRowsCreationTime.size(), + afterMigrationRowsCreationTime.size() + ); + + /* Assert that both lists contain the same values at each position */ + for (int i = 0; i < beforeMigrationRowsCreationTime.size(); i++) { + assertEquals("Values at row " + i + 1 + " do not match", + beforeMigrationRowsCreationTime.get(i), + afterMigrationRowsCreationTime.get(i)); + } + trace("Creation time of rows before and after migration are same"); + } + + /** + * Create a random row with given ID + */ + private RowImpl makeRandomRow(TableImpl table, int which) { + final RowImpl row = table.createRow(); + row.put("id", which); + row.put("firstName", + "FirstName-" + ThreadLocalRandom.current().nextInt(1, 1000)); + row.put("lastName", + "lastName-" + ThreadLocalRandom.current().nextInt(1, 1000)); + row.put("age", ThreadLocalRandom.current().nextInt(20, 80)); + return row; + } + + + /** + * Insert initial rows and capture their creation time before PM. + */ + private void insertRowsIntoTable(Table tbl) { + final TableAPI tableAPI = store.getTableAPI(); + Table table = tableAPI.getTable(tbl.getName()); + PrimaryKey pk = table.createPrimaryKey(); + for (int i = 0; i < NUM_INIT_ROWS; i++) { + RowImpl row = makeRandomRow((TableImpl) tbl, i); + tableAPI.putIfAbsent(row, null, WRITE_OPTIONS); + pk.put("id", i); + row = (RowImpl) tableAPI.get(pk, READ_OPTIONS); + beforeMigrationRowsCreationTime.add(row.getCreationTime()); + } + } + + /** + * Perform update workload before migration + */ + private void updateWorkLoad(Table tbl) { + final TableAPI tableAPI = store.getTableAPI(); + for (int i = 0; i < NUM_INIT_ROWS; i++) { + final RowImpl row = makeRandomRow((TableImpl) tbl, i); + final Version ver = tableAPI.putIfPresent(row, null, WRITE_OPTIONS); + assertNotNull("Fail to update row=" + row.toJsonString(false), ver); + } + } + + /** + * Set up background writer thread for concurrent updates + */ + private void setUpConcurrentWriteThread(TableAPI tableAPI, + String tableName) { + final Table tblForThread = tableAPI.getTable(tableName); + writerThread = new Thread(() -> { + try { + int counter = 0; + while (!Thread.currentThread().isInterrupted()) { + int id = counter % NUM_INIT_ROWS; + RowImpl row = makeRandomRow((TableImpl) tblForThread, id); + final Version ver = tableAPI.putIfPresent(row, + null, + WRITE_OPTIONS); + assertNotNull("Fail to update row=" + + row.toJsonString(false), ver); + /* + * It provides a reliable point for the thread to detect + * interrupt() signals and exit gracefully, preventing + * it from running indefinitely during long-running + * operations. + */ + Thread.sleep(1); + counter++; + } + } catch (InterruptedException e) { + /* Allow thread to exit gracefully */ + Thread.currentThread().interrupt(); + } catch (Exception e) { + fail("Exception in writer thread: " + e.getMessage()); + } + }); + } + + /** + * Fetch rows after migration and capture their creation time + */ + private void getRowsFromTableAfterMigration(Table tbl) { + final TableAPI tableAPI = store.getTableAPI(); + for (int i = 0; i < NUM_INIT_ROWS; i++) { + Table table = tableAPI.getTable(tbl.getName()); + PrimaryKey pk = table.createPrimaryKey(); + pk.put("id", i); + RowImpl row = (RowImpl) tableAPI.get(pk, READ_OPTIONS); + afterMigrationRowsCreationTime.add(row.getCreationTime()); + assertTrue(row.getCreationTime() != row.getLastModificationTime()); + } + } + + private void trace(String msg) { + logger.info(msg); + + if (trace_on_screen) { + System.out.println(msg); + } + } + + private void traceRowsFromTable(Table tbl) { + final TableAPI tableAPI = store.getTableAPI(); + KVStoreImpl storeImpl = (KVStoreImpl) store; + int rowsToPrint = 100; + for (int i = 0; i < rowsToPrint; i++) { + Table table = tableAPI.getTable(tbl.getName()); + PrimaryKey pk = table.createPrimaryKey(); + pk.put("id", i); + RowImpl row = (RowImpl) tableAPI.get(pk, READ_OPTIONS); + trace("row : " + (i + 1) + " : " + row.toString()); + trace("Creation time: " + row.getCreationTime()); + trace("Modification time: " + row.getLastModificationTime()); + trace("PartitionId : " + + storeImpl.getPartitionId(row.getPrimaryKey(false))); + } + } + + private void addLoggerFileHandler() throws IOException { + final String fileName = "testlog"; + final String path = TestUtils.getTestDir().getAbsolutePath(); + final File loggerFile = new File(new File(path), fileName); + final FileHandler handler = + new FileHandler(loggerFile.getAbsolutePath(), false); + handler.setFormatter(new LogFormatter(null)); + tearDowns.add(() -> logger.removeHandler(handler)); + logger.addHandler(handler); + logger.info("Add test log file handler: path=" + path + + ", log file name=" + fileName + + ", file exits?=" + loggerFile.exists()); + } +} + diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/generation/PartitionGenerationTestBase.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/generation/PartitionGenerationTestBase.java index ec94b775..55bf2520 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/generation/PartitionGenerationTestBase.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/migration/generation/PartitionGenerationTestBase.java @@ -44,7 +44,7 @@ public class PartitionGenerationTestBase extends RepNodeTestBase { protected static final PartitionId p3 = new PartitionId(3); protected static final PartitionId p5 = new PartitionId(5); protected static final RepGroupId rg1 = new RepGroupId(1); - static final RepNodeId sourceId = new RepNodeId(1, 1); + protected static final RepNodeId sourceId = new RepNodeId(1, 1); protected static final RepNodeId targetId = new RepNodeId(2, 1); static final RepNodeId rg1Master = new RepNodeId(1, 1); static final RepNodeId rg2Master = new RepNodeId(2, 1); @@ -133,15 +133,23 @@ protected void trace(String msg) { * If the wait times out without reaching the state an assertion error * is thrown. */ - void waitForMigrationState(RepNode rn, - PartitionId pId, - RepNodeAdmin.PartitionMigrationState st) { - boolean success = new PollCondition(1000, 30000) { + protected void waitForMigrationState( + RepNode rn, + PartitionId pId, + RepNodeAdmin.PartitionMigrationState st) { + + final int[] timeElapsedMs = {0}; + boolean success = new PollCondition(1000, 100000) { @Override protected boolean condition() { final PartitionMigrationStatus status = rn.getMigrationStatus(pId); + timeElapsedMs[0] += 1000; + if (timeElapsedMs[0] % 10000 == 0) { + trace("Time elapsed in polling : " + + timeElapsedMs[0] / 1000 + "s"); + } return status != null && status.getState().equals(st); } }.await(); @@ -154,7 +162,8 @@ protected boolean condition() { assert (success); } - void waitForPartition(RepNode rn, PartitionId pId, boolean present) { + protected void waitForPartition(RepNode rn, PartitionId pId, + boolean present) { boolean success = new PollCondition(500, 15000) { @Override diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/PartitionScanTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/PartitionScanTest.java index 4f26f7b1..d0f8e385 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/PartitionScanTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/PartitionScanTest.java @@ -189,7 +189,9 @@ private void addSysTable(RepNode rn, Class c, TableMetadata md) { null, table.getShardKey(), table.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null/* owner */, true /* sysTable */, null /* identity col */, diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/PartitionSizeLimitTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/PartitionSizeLimitTest.java index 5179e632..7457859f 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/PartitionSizeLimitTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/PartitionSizeLimitTest.java @@ -15,27 +15,38 @@ import org.junit.Test; import java.rmi.RemoteException; +import java.util.Arrays; import java.util.Random; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import java.util.stream.StreamSupport; +import oracle.kv.ExecutionFuture; import oracle.kv.KVStore; +import oracle.kv.KVStoreConfig; import oracle.kv.KVStoreFactory; +import oracle.kv.StatementResult; import oracle.kv.TablePartitionSizeLimitException; import oracle.kv.TestBase; import oracle.kv.impl.admin.CommandServiceAPI; +import oracle.kv.impl.api.KVStoreImpl; import oracle.kv.impl.api.table.TableBuilder; import oracle.kv.impl.api.table.TableImpl; import oracle.kv.impl.api.table.TableLimits; import oracle.kv.impl.param.ParameterMap; import oracle.kv.impl.param.ParameterState; +import oracle.kv.impl.rep.table.ResourceCollector; import oracle.kv.impl.rep.table.ResourceCollector.TopCollector; import oracle.kv.impl.systables.TableStatsPartitionDesc; +import oracle.kv.impl.util.CommonLoggerUtils; +import oracle.kv.impl.util.SpeedyTTLTime; import oracle.kv.table.PrimaryKey; import oracle.kv.table.Row; +import oracle.kv.table.Table; import oracle.kv.table.TableAPI; import oracle.kv.table.TableIterator; +import oracle.kv.table.TimeToLive; import oracle.kv.util.CreateStore; import oracle.nosql.common.sklogger.ScheduleStart; @@ -549,4 +560,175 @@ private void setPolicies(CreateStore cstore) { map.setParameter(ParameterState.RN_SG_SLEEP_WAIT, "50 ms"); cstore.setPolicyMap(map); } + + /** + * Tests that the partition size limit check works with TTL. + * + * The approach of computing size from write delta is inaccurate with TTL + * present (see ResourceCollector.sizeDeltaMap). However, it still works + * eventually by reading from system table which is updated with full scan + * (see KeyStatsCollector#scan). + * + * Test steps: + * + * - Create a table with size limit of 1G, and partition size of 1M with + * partitionSizeLimitScaling. + * - Insert records into a single shard with TTL of 1 hour until it exceeds + * the partition size limit. Use SpeedyTTLTime to simulate TTL time passing + * with faster clock frequency. + * - Waits until all rows expire. + * - Insert rows again should still succeed. + * + * [KVSTORE-2711] + */ + @Test + public void testPartitionSizeLimitCheckWithTTL() throws Exception { + logger.fine("test started"); + KeyStatsCollector.testIgnoreMinimumDurations = true; + ResourceCollector.partitionSizeLimitScaling = 1000; + tearDowns.add(() -> { + KeyStatsCollector.testIgnoreMinimumDurations = false; + ResourceCollector.partitionSizeLimitScaling = 1; + }); + final int port = 5000; + final int partitionSizeLimit = 1 * 1024 * 1024; + createStore = new CreateStore(kvstoreName, port, 1 /* Storage nodes */, + 1 /* rf */, 10 /* partitions */, 1 /* capacity */, + CreateStore.MB_PER_SN, true /* useThreads */, null /* mgmtImpl */); + /* Enable stats */ + setPartitionSizeLimitPolicies(createStore); + createStore.start(); + logger.fine("store started"); + /* Prepare to wait for the stats thread to notice the user table */ + final CompletableFuture foundUserTables = + new CompletableFuture<>(); + KeyStatsCollector.foundUserTablesTestHook = + v -> foundUserTables.complete(null); + /* Create table */ + final KVStore kvstore = KVStoreFactory.getStore( + new KVStoreConfig(kvstoreName, String.format("localhost:%s", port))); + createSkewTable(kvstore, partitionSizeLimit); + logger.fine("table created"); + final TableAPI api = kvstore.getTableAPI(); + final Table table = api.getTable("users"); + /* Before insert, in JE TTL simulates one hour by one second. */ + final int fakeMillisPerHour = 1000; + final SpeedyTTLTime speedyTime = new SpeedyTTLTime(fakeMillisPerHour); + speedyTime.start(); + /* Wait for stats collector to notice user table. */ + foundUserTables.get(120, TimeUnit.SECONDS); + insertUntilLimit(api, table, partitionSizeLimit, + TimeToLive.ofHours(10)); + logger.fine("inserted rows until limit"); + printUserStatsLine(api, TableStatsPartitionDesc.TABLE_NAME); + assertTrue("number of rows in the users table should not be zero", + getNumUserRows(api) != 0); + logger.fine("sleep until row expired and stats scan happend"); + Thread.sleep(25 * 1000); + printUserStatsLine(api, TableStatsPartitionDesc.TABLE_NAME); + assertTrue("number of rows in the users table should be zero", + getNumUserRows(api) == 0); + logger.fine("all rows expired"); + /* Insert more rows should succeed. */ + insertRows(api, table, 0, 1000, new String("name"), + TimeToLive.ofHours(10)); + assertTrue("number of rows in the users table should not be zero", + getNumUserRows(api) != 0); + } + + private void setPartitionSizeLimitPolicies(CreateStore cstore) { + ParameterMap map = new ParameterMap(); + map.setParameter(ParameterState.AP_CHECK_ADD_INDEX, "1 s"); + map.setParameter(RN_SG_ENABLED, "true"); + map.setParameter(RN_SG_INCLUDE_STORAGE_SIZE, "true"); + map.setParameter(ParameterState.RN_SG_INTERVAL, "10 s"); + map.setParameter(ParameterState.RN_SG_SIZE_UPDATE_INTERVAL, "2 s"); + map.setParameter(ParameterState.RN_SG_LEASE_DURATION, "5 s"); + map.setParameter(ParameterState.RN_SG_SLEEP_WAIT, "1 s"); + map.setParameter(ParameterState.RN_PARTITION_SIZE_PERCENT, "1"); + cstore.setPolicyMap(map); + } + + private void createSkewTable(KVStore kvstore, int partitionSizeLimit) + throws Exception { + + final String ddl = "create table users " + + "(shardid integer, id integer, name String, " + + " primary key(shard(shardid), id))"; + final int kb = 1024; + /* Sets the throughput limit so that we hit the size limit first. */ + final TableLimits limits = + new TableLimits(TableLimits.NO_LIMIT /* readLimit */, + 5 * partitionSizeLimit / kb /* writeLimit */, + 1 /* sizeLimit in gb */); + final ExecutionFuture future = ((KVStoreImpl) kvstore) + .execute(ddl.toCharArray(), null, limits); + final StatementResult res = future.get(); + assertTrue(res.isSuccessful()); + } + + private void insertUntilLimit(TableAPI api, + Table table, + int partitionSizeLimit, + TimeToLive ttl) { + final int nameBytesLen = 1000; + final byte[] nameBytes = new byte[nameBytesLen]; + Arrays.fill(nameBytes, (byte) 'a'); + final String name = new String(nameBytes); + try { + insertRows(api, table, 0, 5 * partitionSizeLimit / nameBytesLen, + name, ttl); + fail("Expected failure with partition size check"); + } catch (TablePartitionSizeLimitException e) { + logger.fine("insert got exception: " + + CommonLoggerUtils.getStackTrace(e)); + } + } + + private void insertRows(TableAPI api, + Table table, + int startId, + int numRows, + String name, + TimeToLive ttl) { + for (int id = startId; id < startId + numRows; ++id) { + final Row row = table.createRow(); + row.put("shardid", 1); + row.put("id", id); + row.put("name", name); + row.setTTL(ttl); + api.put(row, null, null); + } + } + + private void printUserStatsLine(TableAPI api, String tableName) { + final PrimaryKey pk = waitForTable(api, tableName).createPrimaryKey(); + final TableIterator iter = api.tableIterator(pk, null, null); + try { + while (iter.hasNext()) { + final Row row = iter.next(); + if (row.get(TableStatsPartitionDesc.COL_NAME_TABLE_NAME) + .asString().get().equals("users")) + { + logger.fine(row.toJsonString(false)); + break; + } + } + } finally { + iter.close(); + } + } + + private long getNumUserRows(TableAPI api) { + final PrimaryKey pk = waitForTable(api, "users").createPrimaryKey(); + pk.put("shardid", 1); + + final TableIterator iter = api.tableIterator(pk, null, null); + final Iterable it = () -> iter; + try { + return StreamSupport.stream(it.spliterator(), false).count(); + } finally { + iter.close(); + } + } } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/StorageStatTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/StorageStatTest.java index 0052f7bb..e317ce32 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/StorageStatTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/StorageStatTest.java @@ -364,7 +364,9 @@ private void addSysTable(RepNode rn, Class c, TableMetadata md) { null, table.getShardKey(), table.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null/* owner */, true /* sysTable */, null /* identity col */, @@ -384,7 +386,9 @@ private void addUserTable(RepNode rn, TableMetadata md) { null, table.getShardKey(), table.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null/* owner */, false /* not sysTable */, null /* identity col */, diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/TableIndexScanTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/TableIndexScanTest.java index 59081c18..045804b3 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/TableIndexScanTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/rep/stats/TableIndexScanTest.java @@ -192,7 +192,9 @@ private void addTable(RepNode rn, Table table, TableMetadata md) { null, table.getShardKey(), ((TableImpl)table).getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null/* owner */); rn.updateMetadata(clone(md)); @@ -210,7 +212,9 @@ private void addSysTable(RepNode rn, Class c, TableMetadata md) { null, table.getShardKey(), table.getFieldMap(), - null, null, + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null/* owner */, true /* sysTable */, null /* identity col */, diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/security/PrivilegeTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/security/PrivilegeTest.java index c5e961d8..4da0884b 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/security/PrivilegeTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/security/PrivilegeTest.java @@ -15,6 +15,7 @@ import static oracle.kv.util.TestUtils.checkAll; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -37,6 +38,8 @@ import oracle.kv.impl.security.login.LoginToken; import oracle.kv.impl.security.login.SessionId; import oracle.kv.impl.util.RateLimitingLogger; +import oracle.kv.table.TableAPI; + import org.junit.Test; public class PrivilegeTest extends TestBase { @@ -223,6 +226,30 @@ public void testImplication() { privsImpliedByDeleteAnyTable); } + /* + * Tests that all table privilege label has an implying namespace privilege + * label defined and the implying privileges of corresponding table + * privilege have all implying privileges of the namespace privilege. + */ + @Test + public void testTableNamespacePrivsImplication() { + for (KVStorePrivilegeLabel label : KVStorePrivilegeLabel.values()) { + if (label.getType() == KVStorePrivilege.PrivilegeType.TABLE) { + KVStorePrivilegeLabel nsLabel = + TablePrivilege.implyingNamespacePrivLabel(label); + assertNotNull(nsLabel); + TablePrivilege tbPriv = TablePrivilege.get( + label, 0, TableAPI.SYSDEFAULT_NAMESPACE_NAME, "foo"); + NamespacePrivilege nsPriv = NamespacePrivilege.get( + nsLabel, TableAPI.SYSDEFAULT_NAMESPACE_NAME); + + assertTrue(Arrays.asList(tbPriv.implyingPrivileges()) + .containsAll(Arrays.asList( + nsPriv.implyingPrivileges()))); + } + } + } + /* * Tests that an ExecutionContext has all privileges defined by the roles * of its user subject. diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/KVRepTestConfig.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/KVRepTestConfig.java index 2b774fb0..d7edb106 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/KVRepTestConfig.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/KVRepTestConfig.java @@ -545,9 +545,21 @@ public RequestHandlerImpl getRH(RepNodeId rnId) { */ public oracle.kv.impl.rep.RepNode getMaster(RepGroupId rgId, long timeoutMillis) + { + return getMaster(rgId, timeoutMillis, Collections.emptySet()); + } + + /** + * Returns the current master of the provided rep group. Returns + * {@code null} if no master found within the provided timeout. + */ + public oracle.kv.impl.rep.RepNode getMaster(RepGroupId rgId, + long timeoutMillis, + Set excluded) { final List targetRNs = getRNs().stream() .filter((rn) -> rn.getRepNodeId().getGroupId() == rgId.getGroupId()) + .filter((rn) -> !excluded.contains(rn.getRepNodeId())) .collect(Collectors.toList()); final AtomicReference master = new AtomicReference<>(null); diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SerialTestUtils.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SerialTestUtils.java index f3bff1a5..64c82a09 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SerialTestUtils.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SerialTestUtils.java @@ -483,6 +483,7 @@ void checkSerialVersion(T object, } catch (IOException e) { throw new RuntimeException("Unexpected exception: " + e, e); } catch (IllegalStateException e) { + } catch (IllegalArgumentException e) { } } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SpeedyTTLTime.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SpeedyTTLTime.java new file mode 100644 index 00000000..600c81b1 --- /dev/null +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SpeedyTTLTime.java @@ -0,0 +1,75 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.kv.impl.util; + +import java.io.IOException; + +import com.sleepycat.je.dbi.TTL; +import com.sleepycat.je.util.TimeSupplier; +import com.sleepycat.je.utilint.TestHook; + +/** + * Sets a TTL.timeTestHook that provides a time that elapses at a different + * rate than normal. Every fakeMillisPerHour after calling this method, JE + * TTL processing will behave as if one hour has elapsed. + * + * In unit tests using this class, add the following to tearDown: + * TTL.setTimeTestHook(null); + */ +public class SpeedyTTLTime { + + private final long fakeMillisPerHour; + private long baseTime; + + public SpeedyTTLTime(final long fakeMillisPerHour) { + this.fakeMillisPerHour = fakeMillisPerHour; + } + + public long realTimeToFakeTime(final long realTime) { + + assert realTime > baseTime; + + final long elapsed = realTime - baseTime; + + return baseTime + + (TTL.MILLIS_PER_HOUR * (elapsed / fakeMillisPerHour)); + + } + + public void start() { + baseTime = TimeSupplier.currentTimeMillis(); + + TTL.setTimeTestHook(new TestHook() { + + @Override + public Long getHookValue() { + return realTimeToFakeTime(TimeSupplier.currentTimeMillis()); + } + + @Override + public void hookSetup() { + throw new UnsupportedOperationException(); + } + + @Override + public void doIOHook() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void doHook() { + throw new UnsupportedOperationException(); + } + + @Override + public void doHook(Long obj) { + throw new UnsupportedOperationException(); + } + }); + } +} diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SpeedyTTLTimeTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SpeedyTTLTimeTest.java new file mode 100644 index 00000000..5356990e --- /dev/null +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/SpeedyTTLTimeTest.java @@ -0,0 +1,77 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.kv.impl.util; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import oracle.kv.KVStore; +import oracle.kv.KVStoreConfig; +import oracle.kv.KVStoreFactory; +import oracle.kv.StatementResult; +import oracle.kv.TestBase; +import oracle.kv.table.PrimaryKey; +import oracle.kv.table.Row; +import oracle.kv.table.Table; +import oracle.kv.table.TableAPI; +import oracle.kv.table.TimeToLive; +import oracle.kv.util.CreateStore; + +import org.junit.Test; + +/** + * Ensures the SpeedyTTLTime works with TTL. + */ +public class SpeedyTTLTimeTest extends TestBase { + + /** + * Tests that SpeedyTTLTime can advance time in the TTL mechanisms as + * expected. + */ + @Test + public void testInsertWithTTL() throws Exception { + /* Creates the store and the table. */ + final String storeName = "kvstore"; + final int port = 5000; + final CreateStore createStore = new CreateStore(storeName, port, + 1 /* nsns */, 1 /* rf */, 1 /* partitions */, 1 /* capacity */, + 2 /* mb */, true /* use threads */, null); + createStore.start(); + final KVStore kvstore = KVStoreFactory.getStore( + new KVStoreConfig(storeName, String.format("localhost:%s", port))); + final String ddl = "create table users " + + "(id integer, name string, primary key (id))"; + final StatementResult result = kvstore.executeSync(ddl); + assertTrue(result.isSuccessful()); + /* + * Starts the TTL simulation with 1 second real time equal to 1 hour TTL + * time. + */ + final SpeedyTTLTime speedyTTLTime = new SpeedyTTLTime(1000); + speedyTTLTime.start(); + /* Inserts a row with 1 hour TTL. */ + final TableAPI api = kvstore.getTableAPI(); + final Table table = api.getTable("users"); + final Row row = table.createRow(); + row.put("id", 1); + row.put("name", "1"); + row.setTTL(TimeToLive.ofHours(1)); + api.put(row, null /* returnRow */, null/* writeOptions */); + final PrimaryKey pk = table.createPrimaryKey(); + pk.put("id", 1); + /* Check immediately and the row should be there. */ + assertNotNull(api.get(pk, null)); + /* + * Sleep two second (just to be safe) and check. The row should be + * expired. + */ + Thread.sleep(2000); + assertNull(api.get(pk, null)); + } +} diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/TestUtils.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/TestUtils.java index 2f2c54e7..f1eb6e26 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/TestUtils.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/util/TestUtils.java @@ -1241,6 +1241,10 @@ public CountDownFaultHook(int faultCount, this.faultThreadName = threadName; } + public void resetCounter(int i) { + counter.set(i); + } + @Override public void doHook(Integer unused) { if (faultThreadName != null) { @@ -1250,7 +1254,7 @@ public void doHook(Integer unused) { return; } } - if (counter.decrementAndGet() > 0) { + if (counter.decrementAndGet() >= 0) { throw fault; } } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/xregion/XRegionTestBase.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/xregion/XRegionTestBase.java index ca80555a..7e2ad67d 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/xregion/XRegionTestBase.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/impl/xregion/XRegionTestBase.java @@ -96,7 +96,6 @@ import oracle.kv.stats.ServiceAgentMetrics; import oracle.kv.table.FieldDef.Type; import oracle.kv.table.FieldValue; -import oracle.kv.table.PrimaryKey; import oracle.kv.table.Row; import oracle.kv.table.Table; import oracle.kv.table.TableAPI; @@ -169,7 +168,7 @@ public abstract class XRegionTestBase extends PubSubTestBase { protected static final String PITR_1 = NameUtils.makeQualifiedName(NS1, "PITR1"); /** table to advance table id */ - protected static final String RANDOM_TABLE = "MyFoo"; + private static final String RANDOM_TABLE = "MyFoo"; /*---------------------*/ /* Schema Evolution */ /*---------------------*/ @@ -755,10 +754,12 @@ protected Version upsertRow(TableAPI api, String tableName, protected Version upsertRow(TableAPI api, String tableName, String desc, int id, TimeToLive ttl) { final Row row = createRow(api.getTable(tableName), desc, id); + if (useRowMD) { + row.setRowMetadata(TEST_ROW_MD); + } if (ttl == null) { return api.put(row, null, null); } - /* write and set TTL */ final WriteOptions wo = new WriteOptions().setUpdateTTL(true); row.setTTL(ttl); @@ -799,12 +800,6 @@ protected void deleteTable(TableAPI api, String tableName, long rows) { } } - protected boolean deleteRow(TableAPI api, String tableName, int id) { - final PrimaryKey pkey = api.getTable(tableName).createPrimaryKey(); - pkey.put("id", id); - return api.delete(pkey, null, null); - } - protected void initTestTable(KVStore storeHandle, String tableName, int numRows, diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/pubsub/PubSubTestBase.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/pubsub/PubSubTestBase.java index d1f0f43e..88cda643 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/pubsub/PubSubTestBase.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/pubsub/PubSubTestBase.java @@ -31,7 +31,6 @@ import java.rmi.RemoteException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.Hashtable; import java.util.Iterator; @@ -87,6 +86,7 @@ import oracle.kv.impl.topo.RepNodeId; import oracle.kv.impl.topo.Topology; import oracle.kv.impl.util.FileNames; +import oracle.kv.impl.util.FormatUtils; import oracle.kv.impl.util.KVRepTestConfig; import oracle.kv.impl.util.PollCondition; import oracle.kv.impl.util.TestUtils; @@ -110,10 +110,14 @@ */ public class PubSubTestBase extends TestBase { + protected static final String USER_TABLE_NAME = "user"; + protected static final String CUSTOMER_TABLE_NAME = "customer"; + public static final String TEST_LOG_FILE = "testlog"; private static final String keyPrefix = "PubSubTestBase_Key_"; - private static final String valPrefix = "PubSubTestBase_Value_"; - + private static final String valPrefix = "PubSubTestBase_Value_"; + protected static final String TEST_ROW_MD = "{\"custom MD\":1}"; + /* no r2compat table */ protected static final boolean r2Compat = false; @@ -137,21 +141,21 @@ public class PubSubTestBase extends TestBase { NameUtils.makeQualifiedName(DEFAULT_TEST_NAME_SPACE, "CheckpointTable"); /* environment config parameters */ - protected int repFactor; - protected int numStorageNodes; - protected int numDataCenters; - protected int numPartitions; - protected int nSecondaryZones; - protected int nShards; + protected int repFactor = 3; + protected int numStorageNodes = 1; + protected int numDataCenters = 1; + protected int numPartitions = 32; + protected int nSecondaryZones = 0; + protected int nShards = 0; - protected volatile boolean traceOnScreen; + protected volatile boolean traceOnScreen = false; protected KVRepTestConfig config; protected KVStore store; protected CreateStore createStore; protected KVStoreConfig kvStoreConfig; protected TableAPI tableAPI; - boolean useFeederFilter; + boolean useFeederFilter = false; private Map testData; @@ -188,6 +192,7 @@ public class PubSubTestBase extends TestBase { protected final Random random = new Random(System.currentTimeMillis()); + protected volatile boolean useRowMD = false; /** * Below are for secure store test only @@ -232,21 +237,59 @@ public void tearDown() throws Exception { super.tearDown(); } - protected void createNamespace(String ns) { + protected void setUseRowMD() { + useRowMD = true; + trace("Set use row metadata"); + } + + protected long getTableId(String tableName) { + final TableAPI tapi = store.getTableAPI(); + final TableImpl tb = (TableImpl) tapi.getTable(tableName); + if (tb == null) { + return -1; + } + return tb.getId(); + } + + protected String logTimestamp(long ts) { + return ts + "(" + FormatUtils.formatDateTime(ts) + ")"; + } + + void createNamespace(String ns) { String ddl = "CREATE NAMESPACE IF NOT EXISTS " + ns; store.executeSync(ddl); } - protected void createTable(KVStore kvs, String testTable) { + protected Table createTable(KVStore kvs, String testTable) { final String ddl = "CREATE TABLE " + testTable + " " + "(id INTEGER, name STRING, age INTEGER, " + "PRIMARY KEY (id))"; kvs.executeSync(ddl); /* Ensure table created */ + final Table ret = kvs.getTableAPI().getTable(testTable); assertNotNull("table " + testTable + " not created", kvs.getTableAPI().getTable(testTable)); - trace("test table " + testTable + " has been created at store=" + + trace("Table=" + testTable + " has been created at store=" + + ((KVStoreImpl)kvs).getTopology().getKVStoreName()); + return ret; + } + + protected Table createChildTable(KVStore kvs, String parent, String child) { + final String tb = parent + "." + child; + return createChildTable(kvs, tb); + } + + protected Table createChildTable(KVStore kvs, String tb) { + final String ddl = "CREATE TABLE " + tb + + " (state STRING, address STRING, " + + " PRIMARY KEY (state))"; + kvs.executeSync(ddl); + /* Ensure table created */ + final Table ret = kvs.getTableAPI().getTable(tb); + assertNotNull("table " + tb + " not created", ret); + trace("Table=" + tb + " has been created at store=" + ((KVStoreImpl)kvs).getTopology().getKVStoreName()); + return ret; } protected NoSQLPublisher createPublisher() { @@ -294,18 +337,24 @@ protected Map deleteRows(TableAPI api, return ret; } - private boolean deleteRow(TableAPI api, String table, int id) { + protected boolean deleteRow(TableAPI api, String table, int id) { final PrimaryKey pkey = api.getTable(table).createPrimaryKey(); pkey.put("id", id); + if (useRowMD) { + pkey.setRowMetadata(TEST_ROW_MD); + } return api.delete(pkey, null, null); } - private Version writeRow(TableAPI api, String tableName, - int id, String name, int age) { + private Version writeRow(TableAPI api, String tableName, + int id, String name, int age) { final Row row = api.getTable(tableName).createRow(); row.put("id", id); row.put("name", name); row.put("age", age); + if (useRowMD) { + row.setRowMetadata(TEST_ROW_MD); + } return api.put(row, null, null); } @@ -318,7 +367,9 @@ protected void waitForStreamDone(final TestNoSQLSubscriberBase sub, new PollCondition(TEST_POLL_INTERVAL_MS, TEST_POLL_TIMEOUT_MS) { @Override protected boolean condition() { - return sub.getNumPuts() == total; + final long act = sub.getNumPuts(); + trace("expected=" + total + ", actual=" + act); + return act == total; } }.await(); @@ -334,6 +385,7 @@ protected void createDefaultTestNameSpace() { final StatementResult sr = store.executeSync(stmt); assertNotNull(sr); assertTrue(sr.isSuccessful()); + trace("Default namespace created=" + DEFAULT_TEST_NAME_SPACE); } protected void waitFor(final PollCondition pollCondition) @@ -357,7 +409,7 @@ protected void addTableToMetadata(TableImpl... tables) { table.getPrimaryKeySizes(), table.getShardKey(), table.getFieldMap(), - null, null, true, 0, null, null); + null, null, null, true, 0, null, null); } for (RepGroupId repGroupId : config.getTopology().getRepGroupIds()) { @@ -376,9 +428,10 @@ public void prepareTestEnv(boolean loadTestData) { verifyTestData(); } store = KVStoreFactory.getStore(config.getKVSConfig()); - logger.info("Test environment created successfully," + - "\ntopology:\n" + topo2String(config) + - "\ntest data loaded:\n" + loadTestData); + kvStoreConfig = config.getKVSConfig(); + trace("Test environment created successfully," + + "\ntopology:\n" + topo2String(config) + + "\ntest data loaded:\n" + loadTestData); } protected Map insertRowsIntoTable(TableImpl table, @@ -481,6 +534,10 @@ protected void verifyRows(Map expRows, } } + protected void threadTrace(String msg) { + trace("[Thread=" + Thread.currentThread().getName()+ "] " + msg); + } + protected void trace(String message) { trace(INFO, message); } @@ -565,7 +622,7 @@ protected NoSQLStreamFeederFilter getFeederFilter(RepNodeId master, return (NoSQLStreamFeederFilter) feeder.getFeederFilter(); } - private void addLoggerFileHandler() throws IOException { + protected void addLoggerFileHandler() throws IOException { final String fileName = TEST_LOG_FILE; final File loggerFile = new File(new File(testPath), fileName); final FileHandler handler = @@ -686,14 +743,16 @@ protected NoSQLPublisher getPublisherSecureStore(int numInitRows) /* create tables */ createUserTable(adminStore, numInitRows); - trace("User table created with rows " + numInitRows); + trace("User table created with rows=" + numInitRows); createCustomerTable(adminStore, numInitRows); - trace("Customer table created with rows " + numInitRows); + trace("Customer table created with rows=" + numInitRows); /* grant all privileges to user needed to subscribe the table */ - grantCreateAnyTablePriv(adminStore); - grantReadPrivTable(adminStore, Collections.singleton("user")); - trace("User " + USER + " granted privileges to subscribe user table, "); + grantCreateAnyTablePriv(adminStore, USER); + final String role = createRole(adminStore); + grantReadPrivTable(adminStore, role, USER_TABLE_NAME, USER); + trace("User=" + USER + " granted privileges to subscribe user table" + + ", role=" + role); /* create a publisher */ final NoSQLPublisherConfig pubConf = @@ -703,7 +762,7 @@ protected NoSQLPublisher getPublisherSecureStore(int numInitRows) final NoSQLPublisher publisher = NoSQLPublisher.get(pubConf, USER_LOGIN_CRED, logger); - trace("publisher created with user " + USER_LOGIN_CRED.getUsername()); + trace("Publisher created with user=" + USER_LOGIN_CRED.getUsername()); return publisher; } @@ -778,7 +837,7 @@ private RepNode getMaster(KVRepTestConfig conf, RepGroupId repGroupId) { } /* make a random row */ - private RowImpl makeRandomRow(TableImpl table, int which) { + protected RowImpl makeRandomRow(TableImpl table, int which) { RowImpl row = table.createRow(); @@ -873,21 +932,22 @@ protected void startStore(boolean useThread) throws Exception { protected Map createUserTable(KVStore storeHandle, int numRows) { - final String ddl = "CREATE TABLE user " + + final String ddl = "CREATE TABLE " + USER_TABLE_NAME + "(id STRING, firstName STRING, lastName STRING, " + "age INTEGER, PRIMARY KEY (id))"; storeHandle.executeSync(ddl); final TableAPI api = storeHandle.getTableAPI(); /* Ensure table created */ - assertNotNull("table user not created", api.getTable("user")); + assertNotNull("table=" + USER_TABLE_NAME + " not created", + api.getTable(USER_TABLE_NAME)); /* load rows into table */ final Map rows = new HashMap<>(); - final TableImpl table = (TableImpl) api.getTable("user"); + final TableImpl table = (TableImpl) api.getTable(USER_TABLE_NAME); for (int i = 0; i < numRows; i++) { final RowImpl row = table.createRow(); - row.put("id", "user" + i); + row.put("id", USER_TABLE_NAME + i); row.put("firstName", "user-first-" + random.nextInt(1000)); row.put("lastName", "user-last-" + @@ -904,19 +964,20 @@ protected Map createUserTable(KVStore storeHandle, } protected void createCustomerTable(KVStore storeHandle, int numRows) { - final String ddl = "CREATE TABLE customer (cid STRING, name STRING, " + - "PRIMARY KEY (cid))"; + final String ddl = "CREATE TABLE " + CUSTOMER_TABLE_NAME + + " (cid STRING, name STRING, PRIMARY KEY (cid))"; storeHandle.executeSync(ddl); final TableAPI api = storeHandle.getTableAPI(); /* Ensure table created */ - assertNotNull("table customer not created", api.getTable("customer")); + assertNotNull("table=" + CUSTOMER_TABLE_NAME + + " not created", api.getTable(CUSTOMER_TABLE_NAME)); /* load rows into table */ - final TableImpl table = (TableImpl) api.getTable("customer"); + final TableImpl table = (TableImpl) api.getTable(CUSTOMER_TABLE_NAME); for (int i = 0; i < numRows; i++) { final RowImpl row = table.createRow(); - row.put("cid", "customer" + i); + row.put("cid", CUSTOMER_TABLE_NAME + i); row.put("name", "customer-name-" + random.nextInt(1000)); api.put(row, null, new WriteOptions(Durability.COMMIT_NO_SYNC, 10000, @@ -977,111 +1038,65 @@ protected SecurityParams getSecureParams(boolean disableClientAuth) { return sp; } - private static void createRole(KVStore adminStore, String role) + protected static String createRole(KVStore adminStore) throws Exception { + final String role = "role" + + UUID.randomUUID().toString().substring(0, 4); execStatement(adminStore, "CREATE ROLE " + role); - } - - protected static String grantReadPrivTable(KVStore adminStore, - Set tables) - - throws Exception { - - final String role = "role" + UUID.randomUUID() - .toString() - .substring(0, 4); - createRole(adminStore, role); - - grantReadPrivTable(adminStore, role, tables); - return role; } - protected static String grantReadPrivTable(KVStore adminStore, - String role, - Set tables) - - throws Exception { - - for (String t : tables) { - grantPrivToRole(adminStore, role, t, - KVStorePrivilegeLabel.READ_TABLE); - } - - grantRoleToUser(adminStore, role, USER); - - return role; + protected static void grantReadPrivTable(KVStore admin, + String role, + String table, + String user) throws Exception { + grantPrivToRole(admin, role, table, KVStorePrivilegeLabel.READ_TABLE); + grantRoleToUser(admin, role, user); } - protected static String grantWritePrivTable(KVStore adminStore, - String role, - String table) - - throws Exception { - - grantPrivToRole(adminStore, role, table, - KVStorePrivilegeLabel.INSERT_TABLE); - - grantRoleToUser(adminStore, role, USER); + protected static void grantWritePrivTable(KVStore admin, + String role, + String table, + String user) throws Exception { + grantPrivToRole(admin, role, table, KVStorePrivilegeLabel.INSERT_TABLE); + grantRoleToUser(admin, role, user); - return role; } - protected static String grantCreateAnyTablePriv(KVStore adminStore) + protected static String grantCreateAnyTablePriv(KVStore admin, String user) throws Exception { - final String role = "role" + UUID.randomUUID() - .toString() - .substring(0, 4); - execStatement(adminStore, "CREATE ROLE " + role); - - grantPrivToRole(adminStore, role, - KVStorePrivilegeLabel.CREATE_ANY_TABLE); - - grantRoleToUser(adminStore, role, USER); - + final String role = createRole(admin); + grantPrivToRole(admin, role, KVStorePrivilegeLabel.CREATE_ANY_TABLE); + grantRoleToUser(admin, role, user); return role; } - protected static void grantWriteAnyTablePriv(KVStore adminStore) + protected static void grantWriteAnyTablePriv(KVStore admin, String user) throws Exception { - final String role = "role" + UUID.randomUUID() - .toString() - .substring(0, 4); - execStatement(adminStore, "CREATE ROLE " + role); - - grantPrivToRole(adminStore, role, - KVStorePrivilegeLabel.INSERT_ANY_TABLE); - - grantRoleToUser(adminStore, role, USER); + final String role = createRole(admin); + grantPrivToRole(admin, role, KVStorePrivilegeLabel.INSERT_ANY_TABLE); + grantRoleToUser(admin, role, user); } - protected static void grantReadAnyTablePriv(KVStore adminStore) + protected static void grantReadAnyTablePriv(KVStore admin, String user) throws Exception { - final String role = "role" + UUID.randomUUID() - .toString() - .substring(0, 4); - execStatement(adminStore, "CREATE ROLE " + role); - - grantPrivToRole(adminStore, role, - KVStorePrivilegeLabel.READ_ANY_TABLE); - - grantRoleToUser(adminStore, role, USER); + final String role = createRole(admin); + grantPrivToRole(admin, role, KVStorePrivilegeLabel.READ_ANY_TABLE); + grantRoleToUser(admin, role, user); } - protected static void dropUser(KVStore adminStore) - throws Exception { - execStatement(adminStore, "DROP USER " + USER + " CASCADE"); + protected static void dropUser(KVStore admin, String user) throws Exception { + execStatement(admin, "DROP USER " + user + " CASCADE"); } protected static void grantPrivToRole(KVStore adminStore, - String role, - KVStorePrivilegeLabel label) + String role, + KVStorePrivilegeLabel label) throws Exception { - execStatement(adminStore, - "grant " + label + " to " + role); + execStatement(adminStore, "GRANT " + label + " TO " + role); assertRoleHasPriv(adminStore, role, label.toString()); } @@ -1126,8 +1141,8 @@ private static String revokePrivFromUser(KVStore adminStore, } private static void assertRoleHasPriv(KVStore adminStore, - String role, - String privStr) { + String role, + String privStr) { assertThat(showRole(adminStore, role), containsString(privStr)); } @@ -1135,31 +1150,36 @@ private static void assertRoleHasPriv(KVStore adminStore, protected static void assertRoleHasNoPriv(KVStore adminStore, String role, String privStr) { - assertThat(showRole(adminStore, role), not(containsString(privStr))); } protected static String showRole(KVStore adminStore, String role) { final StatementResult result = adminStore.executeSync("show role " + role); - return result.getResult(); } protected static String showUsers(KVStore adminStore) { + final StatementResult result = adminStore.executeSync("show users"); + return result.getResult(); + } + + protected static String showRoles(KVStore adminStore) { final StatementResult result = - adminStore.executeSync("show users"); + adminStore.executeSync("SHOW ROLES"); + return result.getResult(); + } + protected static String showSingleUser(KVStore adminStore, String user) { + final StatementResult result = + adminStore.executeSync("SHOW USER " + user); return result.getResult(); } protected static void grantRoleToUser(KVStore adminStore, String role, - String usr) - throws Exception { - - execStatement(adminStore, - "grant " + role + " to user " + usr); + String usr) throws Exception { + execStatement(adminStore, "grant " + role + " to user " + usr); } /** @@ -1198,7 +1218,7 @@ protected abstract class TestNoSQLSubscriberBase private NoSQLSubscription subscription; - protected boolean isSubscribeSucc; + boolean isSubscribeSucc; protected List recvPutOps; protected List recvDelOps; @@ -1268,7 +1288,10 @@ public void onError(Throwable t) { isSubscribeSucc = false; trace(Level.INFO, "Subscriber " + conf.getSubscriberId() + - " receives an error: " + t.getMessage()); + " receives an error=" + t + "\n" + + ", cause=" + causeOfFailure + + "\nStack\n" + + LoggerUtils.getStackTrace(t)); } @Override @@ -1276,7 +1299,7 @@ public void onWarn(Throwable t) { recvWarnings.add(t); trace(Level.INFO, "Subscriber " + conf.getSubscriberId() + - " receives a warning: " + t.getMessage()); + " receives a warning=" + t); } @Override diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/pubsub/StreamRowMDTestBase.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/pubsub/StreamRowMDTestBase.java new file mode 100644 index 00000000..40543916 --- /dev/null +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/pubsub/StreamRowMDTestBase.java @@ -0,0 +1,358 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved. + * + */ + +package oracle.kv.pubsub; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.IntStream; + +import oracle.kv.Consistency; +import oracle.kv.Direction; +import oracle.kv.Durability; +import oracle.kv.KVStoreFactory; +import oracle.kv.Version; +import oracle.kv.impl.api.KVStoreImpl; +import oracle.kv.impl.api.table.RowImpl; +import oracle.kv.impl.api.table.TableImpl; +import oracle.kv.impl.pubsub.NoSQLSubscriptionImpl; +import oracle.kv.impl.pubsub.StreamDelEvent; +import oracle.kv.impl.pubsub.StreamPutEvent; +import oracle.kv.impl.pubsub.StreamSequenceId; +import oracle.kv.impl.util.PollCondition; +import oracle.kv.table.PrimaryKey; +import oracle.kv.table.Row; +import oracle.kv.table.Table; +import oracle.kv.table.TableAPI; +import oracle.kv.table.TableIterator; +import oracle.kv.table.TableIteratorOptions; +import oracle.kv.table.WriteOptions; + +import org.junit.After; +import org.junit.Before; + +public class StreamRowMDTestBase extends PubSubTestBase { + + private static final WriteOptions WRITE_OPTIONS = + new WriteOptions(Durability.COMMIT_NO_SYNC, 10000, MILLISECONDS); + + private static final String TEST_MRT_NAME = "MRTableUser"; + private static final String TEST_TABLE_NAME = "User"; + private static final int INSERTS = 10; + private static final int UPDATES = 10; + private static final int DELETES = 10; + private NoSQLPublisher publisher; + + @Before + @Override + public void setUp() throws Exception { + + super.setUp(); + + /* override default */ + repFactor = 1; + numStorageNodes = 1; + numPartitions = 12; + useFeederFilter = true; + + startStore(); + kvStoreConfig = createKVConfig(createStore); + store = KVStoreFactory.getStore(kvStoreConfig); + createDefaultTestNameSpace(); + tableAPI = store.getTableAPI(); + publisher = null; + } + + @After + @Override + public void tearDown() throws Exception { + if (store != null) { + store.close(); + } + if (createStore != null) { + createStore.shutdown(); + } + super.tearDown(); + } + + void testBody(boolean mrTable) { + + /* create table */ + final TableImpl tb = (TableImpl) creatTable(mrTable); + assertEquals(mrTable, tb.isMultiRegion()); + + /* create stream */ + final NoSQLSubscription stream = + createStream(tb.getFullNamespaceName()); + + /* do insert, update and delete */ + doInsertUpdateDelete(tb.getFullNamespaceName()); + + /* stream all operations */ + waitForStreamDone(stream); + + /* verify the row md in streamed put and deletes */ + verifyRowMDInStream(stream); + + if (mrTable) { + verifyRowMDTombstone(); + } + } + + private void doInsertUpdateDelete(String tbName) { + final TableAPI tapi = store.getTableAPI(); + final Table table = tapi.getTable(tbName); + assertNotNull("Not fond table=" + tbName, table); + + /* insert */ + IntStream.range(0, INSERTS).forEach(i -> { + final Row row = putRow(table, i); + trace("Insert row=" + row.toJsonString(false) + + ", row md=" + row.getRowMetadata()); + }); + + /* updates */ + IntStream.range(0, UPDATES).forEach(i -> { + final Row row = putRow(table, i); + trace("Update row=" + row.toJsonString(false) + + ", row md=" + row.getRowMetadata()); + }); + + /* deletes */ + final PrimaryKey pk = table.createPrimaryKey(); + IntStream.range(0, DELETES).forEach(i -> { + pk.put("id", i); + pk.setRowMetadata(TEST_ROW_MD); + final boolean succ = tapi.delete(pk, null, WRITE_OPTIONS); + assertTrue("Fail to delete pk=" + pk.toJsonString(false), succ); + trace("Delete primary key=" + pk.toJsonString(false) + + ", row md=" + pk.getRowMetadata()); + }); + } + + private Row putRow(Table table, int i) { + final TableAPI tapi = store.getTableAPI(); + final Row row = table.createRow(); + row.put("id", i); + row.put("desc", UUID.randomUUID().toString().substring(0, 4)); + row.setRowMetadata(TEST_ROW_MD); + final Version ver = tapi.put(row, null, WRITE_OPTIONS); + assertNotNull("Fail to write row=" + row.toJsonString(false), ver); + return row; + } + + private void verifyRowMDInStream(NoSQLSubscription stream) { + final TestSubscriber subscriber = + (TestSubscriber) (((NoSQLSubscriptionImpl) stream).getSubscriber()); + + final List puts = subscriber.getPutOps(); + + puts.stream().map(sp -> sp.asPut().getRow()) + .forEach(row -> { + trace("[PUT]" + row.toJsonString(false) + + ", row md=" + row.getRowMetadata()); + assertEquals(TEST_ROW_MD, row.getRowMetadata()); + }); + trace("Done verifying all row md in stream put operations"); + + + final List dels = subscriber.getDelOps(); + dels.stream().map(sp -> sp.asDelete().getPrimaryKey()) + .forEach(pk -> { + trace("[DELETE]" + pk.toJsonString(false) + + ", row md=" + pk.getRowMetadata()); + assertEquals(TEST_ROW_MD, pk.getRowMetadata()); + }); + trace("Done verifying all row md in stream delete operations"); + } + + private void waitForStreamDone(NoSQLSubscription stream) { + + final TestSubscriber subscriber = + (TestSubscriber) (((NoSQLSubscriptionImpl) stream).getSubscriber()); + final int expPuts = INSERTS + UPDATES; + final int expDels = DELETES; + try { + waitFor(new PollCondition(TEST_POLL_INTERVAL_MS, + TEST_POLL_TIMEOUT_MS) { + @Override + protected boolean condition() { + final int actPuts = subscriber.getPutOps().size(); + final int actDels = subscriber.getDelOps().size(); + trace("#puts=" + actPuts + "(exp=" + expPuts + ")" + + ", #dels=" + actDels + "(exp=" + expDels + ")"); + return actPuts == expPuts && actDels == expDels; + } + }); + } catch (TimeoutException e) { + fail("timeout in waiting"); + } + } + + private NoSQLSubscription createStream(String tableName) { + + ckptTableName = "TestCkptTable"; + /* create a publisher */ + final NoSQLPublisherConfig pconf = + new NoSQLPublisherConfig.Builder(kvStoreConfig, testPath) + .build(); + + publisher = NoSQLPublisher.get(pconf, logger); + trace("publisher created"); + + /* create a subscriber */ + final NoSQLSubscriptionConfig subConf = + new NoSQLSubscriptionConfig.Builder(ckptTableName) + .setSubscribedTables(tableName) + .build(); + final TestSubscriber subscriber = new TestSubscriber(subConf); + trace("subscriber created: " + subscriber.getSubscriptionConfig()); + + publisher.subscribe(subscriber); + final boolean succ = new PollCondition(TEST_POLL_INTERVAL_MS, + TEST_POLL_TIMEOUT_MS) { + @Override + protected boolean condition() { + return subscriber.isSubscriptionSucc(); + } + }.await(); + if (!succ) { + fail("Timeout in waiting"); + } + + final NoSQLSubscription subscription = subscriber.getSubscription(); + subscription.request(Long.MAX_VALUE); + trace("Stream started"); + return subscription; + } + + private Table creatTable(boolean mrTable) { + if (mrTable) { + mrTableRegionSetUp(store); + return createTestTable(TEST_MRT_NAME, true); + } + return createTestTable(TEST_TABLE_NAME, false); + } + + private Table createTestTable(String tableName, boolean mrtable) { + /* create test tables */ + String ddl = "CREATE TABLE " + tableName + " " + + "(id INTEGER, desc STRING, PRIMARY KEY (id))"; + if (mrtable) { + ddl += " IN REGIONS " + LOCAL_REGION + ", " + REMOTE_REGION; + } + store.executeSync(ddl); + /* Ensure table created */ + final Table table = tableAPI.getTable(tableName); + assertNotNull("table " + tableName + " not created", table); + trace("src table " + tableName + " have been created, " + + "ddl=" + ddl); + return table; + } + + private void verifyRowMDTombstone() { + final List rows = getTombstoneFromTable(); + rows.forEach(row -> { + assertNotNull(row); + final RowImpl tombstoneRow = (RowImpl) row; + trace("Tombstone row=" + tombstoneRow.toJsonString(false) + + ", creation=" + tombstoneRow.getCreationTime() + + ", last update=" + tombstoneRow.getLastModificationTime() + + ", region id=" + tombstoneRow.getRegionId() + + ", row md=" + tombstoneRow.getRowMetadata()); + assertTrue(tombstoneRow.isTombstone()); + /* tombstone has some metadata */ + + // todo: uncomment check when creation time feature is enabled + //assertTrue(row.getCreationTime() > 0); + assertTrue(row.getLastModificationTime() > 0); + assertTrue(tombstoneRow.getRegionId() > 0); + assertEquals(TEST_ROW_MD, row.getRowMetadata()); + }); + } + + private List getTombstoneFromTable() { + final List ret = new ArrayList<>(); + final TableAPI tapi = store.getTableAPI(); + final Table table = tapi.getTable(TEST_MRT_NAME); + assertNotNull(table); + + final TableIteratorOptions iter_opt = new TableIteratorOptions( + Direction.FORWARD, + Consistency.ABSOLUTE, + /* iterator timeout upper bounded by store read timeout */ + ((KVStoreImpl) store).getReadTimeoutMs(), + TimeUnit.MILLISECONDS, + 1, + 16); + + /* always include tombstones in MR table initialization */ + iter_opt.setIncludeTombstones(); + + final TableIterator iter = tapi.tableIterator( + table.createPrimaryKey(), null, iter_opt); + while (iter.hasNext()) { + final RowImpl tombstoneRow = (RowImpl) iter.next(); + if (tombstoneRow.isTombstone()) { + assertNotNull(tombstoneRow); + ret.add(tombstoneRow); + } + } + return ret; + } + + private class TestSubscriber extends TestNoSQLSubscriberBase { + + private final List putOps; + private final List delOps; + + TestSubscriber(NoSQLSubscriptionConfig config) { + super(config); + putOps = Collections.synchronizedList(new ArrayList<>()); + delOps = Collections.synchronizedList(new ArrayList<>()); + } + + @Override + public void onNext(StreamOperation t) { + if (t instanceof StreamPutEvent) { + putOps.add(t); + return; + } + + if (t instanceof StreamDelEvent) { + delOps.add(t); + return; + } + + throw new IllegalStateException("Receive unsupported stream " + + "operation from shard " + + t.getRepGroupId() + + ", seq: " + + ((StreamSequenceId) + t.getSequenceId()) + .getSequence()); + } + + List getPutOps() { + return putOps; + } + + List getDelOps() { + return delOps; + } + } +} diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/table/SecureDDLTest.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/table/SecureDDLTest.java index 3bbf8709..b95b2d9f 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/table/SecureDDLTest.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/table/SecureDDLTest.java @@ -7,7 +7,9 @@ package oracle.kv.table; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -16,7 +18,9 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; +import oracle.kv.AuthenticationRequiredException; import oracle.kv.ExecutionFuture; import oracle.kv.KVSecurityConstants; import oracle.kv.KVSecurityException; @@ -25,9 +29,15 @@ import oracle.kv.KVStoreFactory; import oracle.kv.LoginCredentials; import oracle.kv.PasswordCredentials; +import oracle.kv.ReauthenticateHandler; import oracle.kv.StatementResult; import oracle.kv.TestBase; import oracle.kv.impl.admin.plan.PlanExecutor; +import oracle.kv.impl.api.KVStoreImpl; +import oracle.kv.impl.api.table.TableLimits; +import oracle.kv.impl.client.admin.DdlStatementExecutor; +import oracle.kv.impl.security.login.LoginManager; +import oracle.kv.impl.util.TestUtils; import oracle.kv.util.CreateStore; import org.junit.Test; @@ -38,6 +48,8 @@ * can create thread local related issues. */ public class SecureDDLTest extends DdlExecutionBase { + private static final String adminUser = "ADMIN_USER"; + private static final String adminUserPassword = "NoSql00__1234"; /** * Test execution in a secure store @@ -46,29 +58,17 @@ public class SecureDDLTest extends DdlExecutionBase { @SuppressWarnings("deprecation") @Test public void testSecureDdl() throws Exception { - createStore = new CreateStore(kvstoreName, - startPort, - 1, /* Storage nodes */ - 1, /* RF */ - 2, /* Partitions */ - 1, /* Capacity */ - CreateStore.MB_PER_SN, /* memory */ - true, /* useThreads */ // tODO, change - null, /* mgmtImpl */ - true, /* mgmtPortsShared */ - true); /* secure */ + createStoreWithAdmin(); /* Make users with different roles, and login. */ - String adminUser= "ADMIN_USER"; - String adminUserPassword = "NoSql00__1234"; String ddlOkUser = "DDL_OK"; String ddlOkUserPassword = "NoSql00__5678"; String noDdlUser = "NO_DDL_FOR_YOU"; String noDdlUserPassword = "NoSql00__333"; - createStore.addUser(adminUser, adminUserPassword, true /* admin */); createStore.addUser(ddlOkUser, ddlOkUserPassword, false /* admin */); createStore.addUser(noDdlUser, noDdlUserPassword, false /* admin */); createStore.start(); + createStore.grantRoles(ddlOkUser, "readwrite", "dbadmin", "writesystable"); createStore.grantRoles(noDdlUser, "readonly"); @@ -205,6 +205,74 @@ void doStatement() throws InterruptedException, ExecutionException { storeNoDdl.close(); } + @Test + public void testReauthenticate() throws Exception { + createStoreWithAdmin(); + createStore.start(); + final LoginCredentials creds = + new PasswordCredentials(adminUser, adminUserPassword.toCharArray()); + final Reauth reauth = new Reauth(creds); + final KVStore store = loginKVStoreUser(creds, reauth); + tearDowns.add(store::close); + + final String tableName = "people"; + final String statement = "CREATE TABLE IF NOT EXISTS " + tableName + + "(id INTEGER, name STRING, PRIMARY KEY (id))"; + + tearDowns.add(() -> DdlStatementExecutor.beforeExecuteHook = null); + TestUtils.CountDownFaultHook testHook = new TestUtils + .CountDownFaultHook(1 /* fault count */, + new AuthenticationRequiredException("test", + true)); + DdlStatementExecutor.beforeExecuteHook = testHook; + + testExecuteOperation( + store, reauth, testHook, + () -> ((KVStoreImpl)store).execute( + statement.toCharArray(), null, new TableLimits(1, 1, 1))); + + testExecuteOperation( + store, reauth, testHook, () -> store.execute(statement)); + + testExecuteOperation( + store, reauth, testHook, + () -> ((KVStoreImpl)store).setTableLimits( + null, tableName, new TableLimits(1, 2, 3))); + } + + private void testExecuteOperation(KVStore store, + Reauth reauth, + TestUtils.CountDownFaultHook testHook, + Supplier op) + throws Exception { + + reauth.completed = 0; + testHook.resetCounter(1); + LoginManager curLoginManager = + ((KVStoreImpl)store).getDdlStatementExecutor().getLoginManager(); + ExecutionFuture future = op.get(); + checkSuccess(future, future.get()); + assertEquals(1, reauth.completed); + LoginManager renewedLoginManager = + ((KVStoreImpl)store).getDdlStatementExecutor().getLoginManager(); + assertNotEquals(renewedLoginManager, curLoginManager); + } + + private void createStoreWithAdmin() throws Exception { + createStore = new CreateStore(kvstoreName, + startPort, + 1, /* Storage nodes */ + 1, /* RF */ + 2, /* Partitions */ + 1, /* Capacity */ + CreateStore.MB_PER_SN, /* memory */ + true, /* useThreads */ // tODO, change + null, /* mgmtImpl */ + true, /* mgmtPortsShared */ + true); /* secure */ + createStore.addUser(adminUser, adminUserPassword, true /* admin */); + } + /** * Run a code snippet and expect a KVSecurityException. */ @@ -223,12 +291,17 @@ void exec() { abstract void doStatement() throws Exception; } + private KVStore loginKVStoreUser(String userName, String password) { + LoginCredentials creds = + new PasswordCredentials(userName, password.toCharArray()); + return loginKVStoreUser(creds, null); + } + /** * Log this user into a secured store. */ - private KVStore loginKVStoreUser(String userName, String password) { - LoginCredentials creds = - new PasswordCredentials(userName, password.toCharArray()); + private KVStore loginKVStoreUser(LoginCredentials creds, + ReauthenticateHandler reauthHandler) { KVStoreConfig kvConfig = new KVStoreConfig(createStore.getStoreName(), createStore.getHostname() + ":" + @@ -241,6 +314,21 @@ private KVStore loginKVStoreUser(String userName, String password) { createStore.getTrustStore().getPath()); kvConfig.setSecurityProperties(props); - return KVStoreFactory.getStore(kvConfig, creds, null); + return KVStoreFactory.getStore(kvConfig, creds, reauthHandler); + } + + private class Reauth implements ReauthenticateHandler { + private volatile int completed ; + private final LoginCredentials creds; + + Reauth(LoginCredentials creds) { + this.creds = creds; + } + + @Override + public void reauthenticate (KVStore store) { + store.login(creds); + completed++; + } } } diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/util/DDLTestUtils.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/util/DDLTestUtils.java index d38670a2..b380ba55 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/util/DDLTestUtils.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/util/DDLTestUtils.java @@ -15,6 +15,7 @@ import oracle.kv.ExecutionFuture; import oracle.kv.KVStore; import oracle.kv.StatementResult; +import oracle.kv.query.ExecuteOptions; import oracle.kv.table.RecordValue; public class DDLTestUtils { @@ -38,6 +39,16 @@ public static void checkSuccess(ExecutionFuture future, assertNotNull(result.toString(), result.getInfo()); } + public static void execStatement(KVStore store, + String statement, + ExecuteOptions options) + throws Exception { + + ExecutionFuture future = store.execute(statement, options); + StatementResult result = future.get(); + checkSuccess(future, result); + } + public static void execStatement(KVStore store, String statement) throws Exception { diff --git a/kvtest/kvstore-IT/src/main/java/oracle/kv/util/TableTestUtils.java b/kvtest/kvstore-IT/src/main/java/oracle/kv/util/TableTestUtils.java index 573b39a2..372d38c3 100644 --- a/kvtest/kvstore-IT/src/main/java/oracle/kv/util/TableTestUtils.java +++ b/kvtest/kvstore-IT/src/main/java/oracle/kv/util/TableTestUtils.java @@ -55,6 +55,7 @@ public static void evolveTable(TableEvolver evolver, evolver.getTableVersion(), table.getFieldMap(), table.getDefaultTTL(), + table.getBeforeImageTTL(), table.getRemoteRegions()); execPlan(cs, planId, "evolveTable", shouldSucceed, storeAPI); } catch (AdminFaultException ice) { diff --git a/kvtest/kvtif-IT/pom.xml b/kvtest/kvtif-IT/pom.xml index 15e3e847..020b2c24 100644 --- a/kvtest/kvtif-IT/pom.xml +++ b/kvtest/kvtif-IT/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kvtest - 25.1.13 + 25.3.21 kvtif-IT diff --git a/kvtest/kvtif-IT/src/main/java/oracle/kv/impl/tif/SubscriptionTest.java b/kvtest/kvtif-IT/src/main/java/oracle/kv/impl/tif/SubscriptionTest.java index 5da3d43b..6e8ff1bb 100644 --- a/kvtest/kvtif-IT/src/main/java/oracle/kv/impl/tif/SubscriptionTest.java +++ b/kvtest/kvtif-IT/src/main/java/oracle/kv/impl/tif/SubscriptionTest.java @@ -278,12 +278,12 @@ public void processDel(long vlsn, byte[] key, byte[] val, long txnId, } @Override - public void processCommit(long vlsn, long txnId) { + public void processCommit(long vlsn, long txnId, long ts) { } @Override - public void processAbort(long vlsn, long txnId) { + public void processAbort(long vlsn, long txnId, long ts) { } diff --git a/kvtest/kvtif-IT/src/main/java/oracle/kv/impl/tif/TextIndexFeederTestBase.java b/kvtest/kvtif-IT/src/main/java/oracle/kv/impl/tif/TextIndexFeederTestBase.java index 606493d5..787c8823 100644 --- a/kvtest/kvtif-IT/src/main/java/oracle/kv/impl/tif/TextIndexFeederTestBase.java +++ b/kvtest/kvtif-IT/src/main/java/oracle/kv/impl/tif/TextIndexFeederTestBase.java @@ -387,7 +387,9 @@ public void setUp() throws Exception { userTableProto.getPrimaryKeySizes(), userTableProto.getShardKey(), userTableProto.getFieldMap(), - null, null, false, 0, null, null); + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null); jokeTable = metadata.addTable(jokeTableProto.getInternalNamespace(), jokeTableProto.getName(), @@ -396,7 +398,9 @@ public void setUp() throws Exception { jokeTableProto.getPrimaryKeySizes(), jokeTableProto.getShardKey(), jokeTableProto.getFieldMap(), - null, null, false, 0, null, null); + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null); metadata.addTextIndex (null, "FirstNameIndex", userTable.getFullName(), makeTextIndexList @@ -806,7 +810,9 @@ private void createJsonIndexScalar() { jsonTableProtoScalar.getPrimaryKeySizes(), jsonTableProtoScalar.getShardKey(), jsonTableProtoScalar.getFieldMap(), - null, null, false, 0, null, null); + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null); /* * Add a full text index for the json column of the table created * above. @@ -878,7 +884,9 @@ protected void createJsonIndexSenators() { jsonTableProtoSenators.getPrimaryKeySizes(), jsonTableProtoSenators.getShardKey(), jsonTableProtoSenators.getFieldMap(), - null, null, false, 0, null, null); + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null); /* * Add a full text index for the json column of the table of * senator rows created above. If you don't want to index @@ -996,7 +1004,9 @@ protected void createJsonIndexSenatorsForBehaviorsTest() { jsonTableProtoSenators.getPrimaryKeySizes(), jsonTableProtoSenators.getShardKey(), jsonTableProtoSenators.getFieldMap(), - null, null, false, 0, null, null); + null, + null, /*beforeImageTTL*/ + null, false, 0, null, null); /* * Add a full text index for the json column of the table of * senator rows created above. And then create another full text index diff --git a/kvtest/pom.xml b/kvtest/pom.xml index 95dab95c..ac46e79f 100644 --- a/kvtest/pom.xml +++ b/kvtest/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kv - 25.1.13 + 25.3.21 kvtest diff --git a/packaging/pom.xml b/packaging/pom.xml index 094f384a..29064ef8 100644 --- a/packaging/pom.xml +++ b/packaging/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kv - 25.1.13 + 25.3.21 packaging diff --git a/pom.xml b/pom.xml index 3d5c74c5..0b02c881 100644 --- a/pom.xml +++ b/pom.xml @@ -11,7 +11,7 @@ com.oracle.nosql kv - 25.1.13 + 25.3.21 pom Oracle NoSQL Database diff --git a/recovery/pom.xml b/recovery/pom.xml index 8994e8fb..8012d663 100644 --- a/recovery/pom.xml +++ b/recovery/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kv - 25.1.13 + 25.3.21 recovery diff --git a/sql/pom.xml b/sql/pom.xml index da0d6571..4a734863 100644 --- a/sql/pom.xml +++ b/sql/pom.xml @@ -7,7 +7,7 @@ com.oracle.nosql kv - 25.1.13 + 25.3.21 sql