diff --git a/.github/workflows/image-build.yaml b/.github/workflows/image-build.yaml
index d6dc08c541433..02b8064af50f9 100644
--- a/.github/workflows/image-build.yaml
+++ b/.github/workflows/image-build.yaml
@@ -1,7 +1,7 @@
name: image build
env:
- TAG: v0.35.79-gorgias
+ TAG: v0.35.81-gorgias
on:
push:
@@ -42,7 +42,7 @@ jobs:
DOCKER_BUILD_RECORD_UPLOAD: false
DOCKER_BUILD_SUMMARY: true
with:
- push: ${{ github.ref == 'refs/heads/gorgias' }}
+ push: true # ${{ github.ref == 'refs/heads/gorgias' }}
context: rust/cubestore
file: rust/cubestore/gorgias.Dockerfile
tags: |
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8901385b30626..28493f0b2aab6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,42 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* **api-gateway:** fixes an issue where queries to get the total count of results were incorrectly applying sorting from the original query and also were getting default ordering applied when the query ordering was stripped out ([#8060](https://github.com/cube-js/cube/issues/8060)) Thanks [@rdwoodring](https://github.com/rdwoodring)! ([863f370](https://github.com/cube-js/cube/commit/863f3709e97c904f1c800ad98889dc272dbfddbd))
+* **cubesql:** Use load meta with user change for SQL generation calls ([#8693](https://github.com/cube-js/cube/issues/8693)) ([0f7bb3d](https://github.com/cube-js/cube/commit/0f7bb3d3a96447a69835e3c591ebaf67592c3eed))
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+### Features
+
+* **cubestore:** Support date_bin function ([#8672](https://github.com/cube-js/cube/issues/8672)) ([64788de](https://github.com/cube-js/cube/commit/64788dea89b0244911518de203929fc5c773cd8f))
+* ksql and rollup pre-aggregations ([#8619](https://github.com/cube-js/cube/issues/8619)) ([cdfbd1e](https://github.com/cube-js/cube/commit/cdfbd1e21ffcf111e40c525f8a391cc0dcee3c11))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Bug Fixes
+
+* **schema-compiler:** propagate FILTER_PARAMS from view to inner cube's SELECT ([#8466](https://github.com/cube-js/cube/issues/8466)) ([c0466fd](https://github.com/cube-js/cube/commit/c0466fde9b7a3834159d7ec592362edcab6d9795))
+
+
+### Features
+
+* **cubesql:** Fill pg_description table with cube and members descriptions ([#8618](https://github.com/cube-js/cube/issues/8618)) ([2288c18](https://github.com/cube-js/cube/commit/2288c18bf30d1f3a3299b235fe9b4405d2cb7463))
+* **cubesql:** Support join with type coercion ([#8608](https://github.com/cube-js/cube/issues/8608)) ([46b3a36](https://github.com/cube-js/cube/commit/46b3a36936f0f00805144714f0dd87a3c50a5e0a))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/DEPRECATION.md b/DEPRECATION.md
index 67d61b8d3f890..f7486c883afa8 100644
--- a/DEPRECATION.md
+++ b/DEPRECATION.md
@@ -60,6 +60,7 @@ features:
| Deprecated | [Node.js 16](#nodejs-16) | v0.35.0 | |
| Removed | [MySQL-based SQL API](#mysql-based-sql-api) | v0.35.0 | v0.35.0 |
| Removed | [`initApp` hook](#initapp-hook) | v0.35.0 | v0.35.0 |
+| Deprecated | [`/v1/run-scheduled-refresh` REST API endpoint](#v1run-scheduled-refresh-rest-api-endpoint) | v0.35.0 | |
### Node.js 8
@@ -392,4 +393,13 @@ Early prototype of the MySQL-based SQL API is removed in favor of the Postgres-c
**Removed in release: v0.35.0**
-The `initApp` hook is removed as it's not relevant anymore for Docker-based architecture.
\ No newline at end of file
+The `initApp` hook is removed as it's not relevant anymore for Docker-based architecture.
+
+### `/v1/run-scheduled-refresh` REST API endpoint
+
+**Deprecated in release: v0.35.0**
+
+The `/v1/run-scheduled-refresh` REST API endpoint is deprecated as it's not
+relevant anymore for Docker-based architecture. Use the [Orchestration
+API](https://cube.dev/docs/product/apis-integrations/orchestration-api) and
+`/v1/pre-aggregations/jobs` endpoint instead.
\ No newline at end of file
diff --git a/README.md b/README.md
index a72e798e5a374..f54d6dceda805 100644
--- a/README.md
+++ b/README.md
@@ -6,14 +6,14 @@ The CI will build and push the image for the `gorgisa` branch but only build the
Locall, build and tag the image with:
```bash
-docker build -t us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.79-gorgias -f rust/cubestore/gorgias.Dockerfile rust/cubestore
-
-docker tag us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.79-gorgias \
- us-central1-docker.pkg.dev/gorgias-revenue-staging/container-images/cubestore:v0.35.79-gorgias
-docker tag us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.79-gorgias \
- us-east1-docker.pkg.dev/gorgias-helpdesk-production/container-images/cubestore:v0.35.79-gorgias
-docker tag us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.79-gorgias \
- us-central1-docker.pkg.dev/gorgias-revenue-production/container-images/cubestore:v0.35.79-gorgias
+docker build -t us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.81-gorgias -f rust/cubestore/gorgias.Dockerfile rust/cubestore
+
+docker tag us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.81-gorgias \
+ us-central1-docker.pkg.dev/gorgias-revenue-staging/container-images/cubestore:v0.35.81-gorgias
+docker tag us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.81-gorgias \
+ us-east1-docker.pkg.dev/gorgias-helpdesk-production/container-images/cubestore:v0.35.81-gorgias
+docker tag us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.81-gorgias \
+ us-central1-docker.pkg.dev/gorgias-revenue-production/container-images/cubestore:v0.35.81-gorgias
```
Then push the image to the registry with:
@@ -23,10 +23,10 @@ Then push the image to the registry with:
gcloud auth configure-docker us-east1-docker.pkg.dev --quiet
gcloud auth configure-docker us-central1-docker.pkg.dev --quiet
-docker push us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.79-gorgias
-docker push us-central1-docker.pkg.dev/gorgias-revenue-staging/container-images/cubestore:v0.35.79-gorgias
-docker push us-east1-docker.pkg.dev/gorgias-helpdesk-production/container-images/cubestore:v0.35.79-gorgias
-docker push us-central1-docker.pkg.dev/gorgias-revenue-production/container-images/cubestore:v0.35.79-gorgias
+docker push us-east1-docker.pkg.dev/gorgias-helpdesk-staging/container-images/cubestore:v0.35.81-gorgias
+docker push us-central1-docker.pkg.dev/gorgias-revenue-staging/container-images/cubestore:v0.35.81-gorgias
+docker push us-east1-docker.pkg.dev/gorgias-helpdesk-production/container-images/cubestore:v0.35.81-gorgias
+docker push us-central1-docker.pkg.dev/gorgias-revenue-production/container-images/cubestore:v0.35.81-gorgias
```
@@ -43,6 +43,13 @@ docker push us-central1-docker.pkg.dev/gorgias-revenue-production/container-imag
+
+
+
+
+
+
+
[Website](https://cube.dev?ref=github-readme) • [Getting Started](https://cube.dev/docs/getting-started?ref=github-readme) • [Docs](https://cube.dev/docs?ref=github-readme) • [Examples](https://cube.dev/docs/examples?ref=github-readme) • [Blog](https://cube.dev/blog?ref=github-readme) • [Slack](https://slack.cube.dev?ref=github-readme) • [Twitter](https://twitter.com/the_cube_dev)
[](https://badge.fury.io/js/%40cubejs-backend%2Fserver)
diff --git a/docs/pages/guides/dbt.mdx b/docs/pages/guides/dbt.mdx
index 86616de28f962..36087b2efdb35 100644
--- a/docs/pages/guides/dbt.mdx
+++ b/docs/pages/guides/dbt.mdx
@@ -263,6 +263,7 @@ cubes:
- name: "{{ column.name }}"
sql: "{{ column.sql }}"
type: "{{ column.type }}"
+ description: "{{ column.description }}"
meta:
source: dbt
{% endfor %}
@@ -361,4 +362,4 @@ of the REST API.
[link-dbt-docs-columns]: https://docs.getdbt.com/reference/resource-properties/columns
[link-dbt-materializations]: https://docs.getdbt.com/docs/build/materializations
[link-smart-open]: https://pypi.org/project/smart-open/
-[link-boto3]: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-examples.html
\ No newline at end of file
+[link-boto3]: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-examples.html
diff --git a/docs/pages/guides/style-guide.mdx b/docs/pages/guides/style-guide.mdx
index aeb5886c0f995..69ddbd0a6f12b 100644
--- a/docs/pages/guides/style-guide.mdx
+++ b/docs/pages/guides/style-guide.mdx
@@ -19,8 +19,7 @@ This style guide is intended to be used by:
## Syntax
-- Default to [YAML syntax][ref-syntax-model] for data modeling. Use JavaScript
- syntax for dynamic data models only.
+- Default to [YAML syntax][ref-syntax-model] for data modeling.
- Use [snake case][ref-syntax-naming] when using either YAML or JavaScript
syntax.
- Follow the recommendations on [YAML syntax][self-yaml] and [SQL
diff --git a/docs/pages/product/apis-integrations/mdx-api.mdx b/docs/pages/product/apis-integrations/mdx-api.mdx
index e54dd77ecb301..700348fca11b6 100644
--- a/docs/pages/product/apis-integrations/mdx-api.mdx
+++ b/docs/pages/product/apis-integrations/mdx-api.mdx
@@ -50,9 +50,72 @@ views:
- city
```
+### Dimension keys
+
+You can define a member that will be used as a key for a dimension in the cube's model file.
+
+```yaml
+cubes:
+ - name: users
+ sql_table: USERS
+ public: false
+
+ dimensions:
+ - name: id
+ sql: "{CUBE}.ID"
+ type: number
+ primary_key: true
+
+ - name: first_name
+ sql: FIRST_NAME
+ type: string
+ meta:
+ key_member: users_id
+```
+
+### Dimension labels
+
+You can define a member that will be used as a label for a dimension in the cube's model file.
+
+```yaml
+cubes:
+ - name: users
+ sql_table: USERS
+ public: false
+
+ dimensions:
+ - name: id
+ sql: "{CUBE}.ID"
+ type: number
+ meta:
+ label_member: users_first_name
+```
+
+### Custom properties
+
+You can define custom properties for dimensions in the cube's model file.
+
+```yaml
+cubes:
+ - name: users
+ sql_table: USERS
+ public: false
+
+ dimensions:
+ - name: id
+ sql: "{CUBE}.ID"
+ type: number
+ meta:
+ properties:
+ - name: "Property A"
+ column: users_first_name
+ - name: "Property B"
+ value: users_city
+```
+
### Measure groups
-MDX API supports organizing measures into groups (folders). You can define measure groups in the view's schema file.
+MDX API supports organizing measures into groups (folders). You can define measure groups in the view's model file.
```yaml
views:
diff --git a/docs/pages/product/apis-integrations/sql-api/query-format.mdx b/docs/pages/product/apis-integrations/sql-api/query-format.mdx
index 14469daaa0450..ee22eba7afc18 100644
--- a/docs/pages/product/apis-integrations/sql-api/query-format.mdx
+++ b/docs/pages/product/apis-integrations/sql-api/query-format.mdx
@@ -284,7 +284,7 @@ cubes:
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
diff --git a/docs/pages/product/data-modeling/dynamic/code-reusability-export-and-import.mdx b/docs/pages/product/data-modeling/dynamic/code-reusability-export-and-import.mdx
index 6cd8d3b20dc45..a5e3933db39bd 100644
--- a/docs/pages/product/data-modeling/dynamic/code-reusability-export-and-import.mdx
+++ b/docs/pages/product/data-modeling/dynamic/code-reusability-export-and-import.mdx
@@ -103,19 +103,20 @@ which is located in a parent directory.
├── README.md
├── cube.js
├── package.json
-└── model
- ├── utils.js
- └── sales
+└── model/
+ ├── shared_utils/
+ │ └── utils.js
+ └── sales/
└── orders.js
```
```javascript
// in model/sales/orders.js
-import { capitalize } from "./schema_utils";
+import { capitalize } from "./shared_utils/utils";
```
```javascript
-// in model/utils.js
+// in model/shared_utils/utils.js
export const capitalize = (s) => s.charAt(0).toUpperCase() + s.slice(1);
```
@@ -124,4 +125,4 @@ export const capitalize = (s) => s.charAt(0).toUpperCase() + s.slice(1);
https://developer.mozilla.org/en-US/docs/web/javascript/reference/statements/export
[mdn-js-es6-import]:
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/import
-[ref-schema-string-time-dims]: /guides/recipes/data-modeling/string-time-dimensions
\ No newline at end of file
+[ref-schema-string-time-dims]: /guides/recipes/data-modeling/string-time-dimensions
diff --git a/docs/pages/product/data-modeling/syntax.mdx b/docs/pages/product/data-modeling/syntax.mdx
index 65f3fa91eabaa..ffffadbb46487 100644
--- a/docs/pages/product/data-modeling/syntax.mdx
+++ b/docs/pages/product/data-modeling/syntax.mdx
@@ -37,7 +37,8 @@ model
Cube supports two ways to define data model files: with [YAML][wiki-yaml] or
JavaScript syntax. YAML data model files should have the `.yml` extension,
-whereas JavaScript data model files should end with `.js`.
+whereas JavaScript data model files should end with `.js`. You can mix YAML and
+JavaScript files within a single data model.
@@ -63,11 +64,15 @@ cubes:
-You define the data model statically or build [dynamic data
+You can define the data model statically or build [dynamic data
models][ref-dynamic-data-models] programmatically. YAML data models use
[Jinja and Python][ref-dynamic-data-models-jinja] whereas JavaScript data
models use [JavaScript][ref-dynamic-data-models-js].
+It is [recommended][ref-style-guide] to default to YAML syntax because of its
+simplicity and readability. However, JavaScript might provide more flexibility
+for dynamic data modeling.
+
## Naming
Common rules apply to names of entities within the data model. All names must:
@@ -586,4 +591,5 @@ defining dynamic data models.
[wiki-yaml]: https://en.wikipedia.org/wiki/YAML
[link-snowflake-listagg]: https://docs.snowflake.com/en/sql-reference/functions/listagg
[link-bigquery-stringagg]: https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#string_agg
-[link-sql-udf]: https://en.wikipedia.org/wiki/User-defined_function#Databases
\ No newline at end of file
+[link-sql-udf]: https://en.wikipedia.org/wiki/User-defined_function#Databases
+[ref-style-guide]: /guides/style-guide
\ No newline at end of file
diff --git a/docs/pages/product/getting-started/cloud/create-data-model.mdx b/docs/pages/product/getting-started/cloud/create-data-model.mdx
index 440b1a0a29c4a..ea070e534e3fd 100644
--- a/docs/pages/product/getting-started/cloud/create-data-model.mdx
+++ b/docs/pages/product/getting-started/cloud/create-data-model.mdx
@@ -112,7 +112,7 @@ within the `measures` block.
```yaml
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
@@ -159,7 +159,7 @@ cubes:
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
diff --git a/docs/pages/product/getting-started/databricks/create-data-model.mdx b/docs/pages/product/getting-started/databricks/create-data-model.mdx
index 502dd0b076592..8c89d9da9d44d 100644
--- a/docs/pages/product/getting-started/databricks/create-data-model.mdx
+++ b/docs/pages/product/getting-started/databricks/create-data-model.mdx
@@ -107,7 +107,7 @@ within the `measures` block.
```yaml
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
@@ -154,7 +154,7 @@ cubes:
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
diff --git a/docs/pages/product/workspace/environments.mdx b/docs/pages/product/workspace/environments.mdx
index 1e93631192c03..4a8f0b6012abb 100644
--- a/docs/pages/product/workspace/environments.mdx
+++ b/docs/pages/product/workspace/environments.mdx
@@ -3,13 +3,17 @@
An environment provides access to your data model. Cube Cloud provides the following environments:
- production (default)
- staging - providing access to the data model on a specific branch.
- Each branch in the repository corresponds to a separate staging environment.
+ Each branch in the repository corresponds to a separate staging environment.
+ Changes must be committed to a branch to be viewable in this environment.
- development - providing access to the data model that you are currently working on.
The development environment is automatically created when you enter [development mode][ref-dev-mode].
- It tracks the branch you're on and is updated automatically when you make changes to the data model.
+ One development environment is allocated per user.
+ It tracks the branch you're currently on and is updated automatically when you save changes to the data model.
+ You cannot query the development enviornment unless your user is in dev mode on the branch you are trying to access.
Each environment provides its own set of API and SQL API endpoints.
-You can access them on the [Data Model][ref-data-model]'s Overview page and BI Integrations SQL API Connection tab.
+You can reference them on the [Data Model][ref-data-model]'s Overview page and BI Integrations SQL API Connection tab.
+To query a development environment's API endpoints, your user must be in dev mode and on the branch that has the saved changes.
)
+ },
+ toc: {
+ extraContent: (
+
+ )
}
};
diff --git a/lerna.json b/lerna.json
index d0b76af684c9d..3aba405fa2bb9 100644
--- a/lerna.json
+++ b/lerna.json
@@ -1,5 +1,5 @@
{
- "version": "0.35.79",
+ "version": "0.35.81",
"npmClient": "yarn",
"useWorkspaces": true,
"packages": [
diff --git a/packages/cubejs-api-gateway/CHANGELOG.md b/packages/cubejs-api-gateway/CHANGELOG.md
index 69ac919655d59..6ea00c6e49d0e 100644
--- a/packages/cubejs-api-gateway/CHANGELOG.md
+++ b/packages/cubejs-api-gateway/CHANGELOG.md
@@ -3,6 +3,26 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* **api-gateway:** fixes an issue where queries to get the total count of results were incorrectly applying sorting from the original query and also were getting default ordering applied when the query ordering was stripped out ([#8060](https://github.com/cube-js/cube/issues/8060)) Thanks [@rdwoodring](https://github.com/rdwoodring)! ([863f370](https://github.com/cube-js/cube/commit/863f3709e97c904f1c800ad98889dc272dbfddbd))
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/api-gateway
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/packages/cubejs-api-gateway/package.json b/packages/cubejs-api-gateway/package.json
index 503d20fea508b..ff56f251eeec2 100644
--- a/packages/cubejs-api-gateway/package.json
+++ b/packages/cubejs-api-gateway/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/api-gateway",
"description": "Cube.js API Gateway",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,7 +27,7 @@
"dist/src/*"
],
"dependencies": {
- "@cubejs-backend/native": "^0.35.79",
+ "@cubejs-backend/native": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@ungap/structured-clone": "^0.3.4",
"body-parser": "^1.19.0",
@@ -39,7 +39,7 @@
"http-proxy-middleware": "^3.0.0",
"inflection": "^1.12.0",
"joi": "^17.8.3",
- "jsonwebtoken": "^8.3.0",
+ "jsonwebtoken": "^9.0.2",
"jwk-to-pem": "^2.0.4",
"moment": "^2.24.0",
"moment-timezone": "^0.5.27",
@@ -52,7 +52,7 @@
"@cubejs-backend/linter": "^0.35.0",
"@types/express": "^4.17.9",
"@types/jest": "^27",
- "@types/jsonwebtoken": "^8.5.0",
+ "@types/jsonwebtoken": "^9.0.2",
"@types/jwk-to-pem": "^2.0.0",
"@types/mysql": "^2.15.19",
"@types/node-fetch": "^2.5.8",
diff --git a/packages/cubejs-api-gateway/src/gateway.ts b/packages/cubejs-api-gateway/src/gateway.ts
index 5ee0f2af0ab25..6f2d0fa4e0f26 100644
--- a/packages/cubejs-api-gateway/src/gateway.ts
+++ b/packages/cubejs-api-gateway/src/gateway.ts
@@ -1552,9 +1552,13 @@ class ApiGateway {
if (normalizedQuery.total) {
const normalizedTotal = structuredClone(normalizedQuery);
normalizedTotal.totalQuery = true;
+
+ delete normalizedTotal.order;
+
normalizedTotal.limit = null;
normalizedTotal.rowLimit = null;
normalizedTotal.offset = null;
+
const [totalQuery] = await this.getSqlQueriesInternal(
context,
[normalizedTotal],
diff --git a/packages/cubejs-athena-driver/CHANGELOG.md b/packages/cubejs-athena-driver/CHANGELOG.md
index 26ab5e6b3217c..1cda6c97f52d2 100644
--- a/packages/cubejs-athena-driver/CHANGELOG.md
+++ b/packages/cubejs-athena-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/athena-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/athena-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/athena-driver
diff --git a/packages/cubejs-athena-driver/package.json b/packages/cubejs-athena-driver/package.json
index 7d01cf0cad96a..9e4f327b486fc 100644
--- a/packages/cubejs-athena-driver/package.json
+++ b/packages/cubejs-athena-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/athena-driver",
"description": "Cube.js Athena database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -31,13 +31,13 @@
"@aws-sdk/client-athena": "^3.22.0",
"@aws-sdk/client-s3": "^3.49.0",
"@aws-sdk/s3-request-presigner": "^3.49.0",
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"sqlstring": "^2.3.1"
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"@types/ramda": "^0.27.40",
"typescript": "~5.2.2"
},
diff --git a/packages/cubejs-backend-cloud/CHANGELOG.md b/packages/cubejs-backend-cloud/CHANGELOG.md
index 4c6d9c1d2990e..2a3e4e2ca774d 100644
--- a/packages/cubejs-backend-cloud/CHANGELOG.md
+++ b/packages/cubejs-backend-cloud/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/cloud
diff --git a/packages/cubejs-backend-cloud/package.json b/packages/cubejs-backend-cloud/package.json
index 41f126dd162ff..003875a0ea107 100644
--- a/packages/cubejs-backend-cloud/package.json
+++ b/packages/cubejs-backend-cloud/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/cloud",
- "version": "0.35.67",
+ "version": "0.35.81",
"description": "Cube Cloud package",
"main": "dist/src/index.js",
"typings": "dist/src/index.d.ts",
@@ -36,7 +36,7 @@
"chokidar": "^3.5.1",
"env-var": "^6.3.0",
"fs-extra": "^9.1.0",
- "jsonwebtoken": "^8.5.1",
+ "jsonwebtoken": "^9.0.2",
"request": "^2.88.2",
"request-promise": "^4.2.5"
},
diff --git a/packages/cubejs-backend-native/CHANGELOG.md b/packages/cubejs-backend-native/CHANGELOG.md
index 7c9ef9fb7d169..e3c2712ab631e 100644
--- a/packages/cubejs-backend-native/CHANGELOG.md
+++ b/packages/cubejs-backend-native/CHANGELOG.md
@@ -3,6 +3,25 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/native
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Features
+
+* **cubesql:** Support join with type coercion ([#8608](https://github.com/cube-js/cube/issues/8608)) ([46b3a36](https://github.com/cube-js/cube/commit/46b3a36936f0f00805144714f0dd87a3c50a5e0a))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/packages/cubejs-backend-native/Cargo.lock b/packages/cubejs-backend-native/Cargo.lock
index d6c4375c21deb..f3b911b4b5687 100644
--- a/packages/cubejs-backend-native/Cargo.lock
+++ b/packages/cubejs-backend-native/Cargo.lock
@@ -681,7 +681,7 @@ dependencies = [
[[package]]
name = "cube-ext"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"chrono",
@@ -788,7 +788,6 @@ dependencies = [
"futures-util",
"hashbrown 0.14.3",
"itertools",
- "lazy_static",
"log",
"lru",
"minijinja",
@@ -839,7 +838,7 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
[[package]]
name = "datafusion"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -872,7 +871,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"ordered-float 2.10.1",
@@ -883,7 +882,7 @@ dependencies = [
[[package]]
name = "datafusion-data-access"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"async-trait",
"chrono",
@@ -896,7 +895,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -907,7 +906,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
diff --git a/packages/cubejs-backend-native/package.json b/packages/cubejs-backend-native/package.json
index 13462994322a3..5c52a03acd512 100644
--- a/packages/cubejs-backend-native/package.json
+++ b/packages/cubejs-backend-native/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/native",
- "version": "0.35.79",
+ "version": "0.35.81",
"author": "Cube Dev, Inc.",
"description": "Native module for Cube.js (binding to Rust codebase)",
"main": "dist/js/index.js",
@@ -43,7 +43,7 @@
"uuid": "^8.3.2"
},
"dependencies": {
- "@cubejs-backend/cubesql": "^0.35.79",
+ "@cubejs-backend/cubesql": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@cubejs-infra/post-installer": "^0.0.7"
},
diff --git a/packages/cubejs-base-driver/CHANGELOG.md b/packages/cubejs-base-driver/CHANGELOG.md
index a37aefd5c410c..0adc734481058 100644
--- a/packages/cubejs-base-driver/CHANGELOG.md
+++ b/packages/cubejs-base-driver/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Features
+
+* ksql and rollup pre-aggregations ([#8619](https://github.com/cube-js/cube/issues/8619)) ([cdfbd1e](https://github.com/cube-js/cube/commit/cdfbd1e21ffcf111e40c525f8a391cc0dcee3c11))
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/base-driver
diff --git a/packages/cubejs-base-driver/package.json b/packages/cubejs-base-driver/package.json
index 3c5535e1c6819..a93149d67b705 100644
--- a/packages/cubejs-base-driver/package.json
+++ b/packages/cubejs-base-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/base-driver",
"description": "Cube.js Base Driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
diff --git a/packages/cubejs-base-driver/src/driver.interface.ts b/packages/cubejs-base-driver/src/driver.interface.ts
index 0122014c72c87..15aecd3518bc5 100644
--- a/packages/cubejs-base-driver/src/driver.interface.ts
+++ b/packages/cubejs-base-driver/src/driver.interface.ts
@@ -90,6 +90,7 @@ export interface StreamTableData extends DownloadTableBase {
export interface StreamingSourceTableData extends DownloadTableBase {
streamingTable: string;
selectStatement?: string;
+ sourceTable?: any,
partitions?: number;
streamOffset?: string;
streamingSource: {
@@ -130,6 +131,7 @@ export type StreamOptions = {
export type StreamingSourceOptions = {
streamOffset?: boolean;
+ outputColumnTypes?: TableColumn[]
};
export interface DownloadQueryResultsBase {
diff --git a/packages/cubejs-bigquery-driver/CHANGELOG.md b/packages/cubejs-bigquery-driver/CHANGELOG.md
index de4493c882a65..0e7ec28fc587d 100644
--- a/packages/cubejs-bigquery-driver/CHANGELOG.md
+++ b/packages/cubejs-bigquery-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/bigquery-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/bigquery-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/bigquery-driver
diff --git a/packages/cubejs-bigquery-driver/package.json b/packages/cubejs-bigquery-driver/package.json
index 3a970a7b48da9..541f28f6afea0 100644
--- a/packages/cubejs-bigquery-driver/package.json
+++ b/packages/cubejs-bigquery-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/bigquery-driver",
"description": "Cube.js BigQuery database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -28,7 +28,7 @@
"main": "index.js",
"types": "dist/src/index.d.ts",
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/dotenv": "^9.0.2",
"@cubejs-backend/shared": "^0.35.67",
"@google-cloud/bigquery": "^7.7.0",
@@ -36,7 +36,7 @@
"ramda": "^0.27.2"
},
"devDependencies": {
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"@types/big.js": "^6.2.2",
"@types/dedent": "^0.7.0",
"@types/jest": "^27",
diff --git a/packages/cubejs-cli/CHANGELOG.md b/packages/cubejs-cli/CHANGELOG.md
index 74041a952226f..69ecf15d7842d 100644
--- a/packages/cubejs-cli/CHANGELOG.md
+++ b/packages/cubejs-cli/CHANGELOG.md
@@ -3,6 +3,25 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package cubejs-cli
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package cubejs-cli
diff --git a/packages/cubejs-cli/package.json b/packages/cubejs-cli/package.json
index 19bcab23fb737..c900140eb9c62 100644
--- a/packages/cubejs-cli/package.json
+++ b/packages/cubejs-cli/package.json
@@ -2,7 +2,7 @@
"name": "cubejs-cli",
"description": "Cube.js Command Line Interface",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -31,7 +31,7 @@
],
"dependencies": {
"@cubejs-backend/dotenv": "^9.0.2",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"chalk": "^2.4.2",
"cli-progress": "^3.10",
@@ -39,7 +39,7 @@
"cross-spawn": "^7.0.1",
"fs-extra": "^8.1.0",
"inquirer": "^7.1.0",
- "jsonwebtoken": "^8.5.1",
+ "jsonwebtoken": "^9.0.2",
"request": "^2.88.2",
"request-promise": "^4.2.5",
"semver": "^7.3.2",
@@ -50,14 +50,14 @@
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/server": "^0.35.79",
+ "@cubejs-backend/server": "^0.35.81",
"@oclif/command": "^1.8.0",
"@types/cli-progress": "^3.8.0",
"@types/cross-spawn": "^6.0.2",
"@types/fs-extra": "^9.0.2",
"@types/inquirer": "^7.3.1",
"@types/jest": "^27",
- "@types/jsonwebtoken": "^8.5.0",
+ "@types/jsonwebtoken": "^9.0.2",
"@types/node": "^14",
"@types/request-promise": "^4.1.46",
"@types/semver": "^7.3.4",
diff --git a/packages/cubejs-clickhouse-driver/CHANGELOG.md b/packages/cubejs-clickhouse-driver/CHANGELOG.md
index 4ec7a5b286f11..5f05d1c133453 100644
--- a/packages/cubejs-clickhouse-driver/CHANGELOG.md
+++ b/packages/cubejs-clickhouse-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/clickhouse-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/clickhouse-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/clickhouse-driver
diff --git a/packages/cubejs-clickhouse-driver/package.json b/packages/cubejs-clickhouse-driver/package.json
index 2df31ed99e9a7..83053da15cfb1 100644
--- a/packages/cubejs-clickhouse-driver/package.json
+++ b/packages/cubejs-clickhouse-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/clickhouse-driver",
"description": "Cube.js ClickHouse database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -28,7 +28,7 @@
},
"dependencies": {
"@cubejs-backend/apla-clickhouse": "^1.7",
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"generic-pool": "^3.6.0",
"moment": "^2.24.0",
@@ -38,7 +38,7 @@
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"@types/jest": "^27",
"jest": "27",
"typescript": "~5.2.2"
diff --git a/packages/cubejs-client-vue/CHANGELOG.md b/packages/cubejs-client-vue/CHANGELOG.md
index 78a74f816b991..3d2d026b17784 100644
--- a/packages/cubejs-client-vue/CHANGELOG.md
+++ b/packages/cubejs-client-vue/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube.js/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube.js/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube.js/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+
+
+
## [0.35.23](https://github.com/cube-js/cube.js/compare/v0.35.22...v0.35.23) (2024-04-25)
**Note:** Version bump only for package @cubejs-client/vue
diff --git a/packages/cubejs-client-vue/package.json b/packages/cubejs-client-vue/package.json
index 775378a5bc8c1..da6e7bbf08522 100644
--- a/packages/cubejs-client-vue/package.json
+++ b/packages/cubejs-client-vue/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-client/vue",
- "version": "0.35.23",
+ "version": "0.35.81",
"engines": {},
"repository": {
"type": "git",
@@ -33,6 +33,7 @@
"ramda": "^0.27.2"
},
"devDependencies": {
+ "@babel/plugin-proposal-optional-chaining": "^7.21.0",
"@vue/babel-preset-app": "^5",
"@vue/cli-plugin-babel": "^5",
"@vue/cli-plugin-eslint": "^5",
diff --git a/packages/cubejs-crate-driver/CHANGELOG.md b/packages/cubejs-crate-driver/CHANGELOG.md
index 6eff588fda812..1dc48bd61d620 100644
--- a/packages/cubejs-crate-driver/CHANGELOG.md
+++ b/packages/cubejs-crate-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/crate-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/crate-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/crate-driver
diff --git a/packages/cubejs-crate-driver/package.json b/packages/cubejs-crate-driver/package.json
index a37e10f89d2ed..bc4adf31fd1b7 100644
--- a/packages/cubejs-crate-driver/package.json
+++ b/packages/cubejs-crate-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/crate-driver",
"description": "Cube.js Crate database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -28,14 +28,14 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/postgres-driver": "^0.35.79",
+ "@cubejs-backend/postgres-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"pg": "^8.7.1"
},
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"testcontainers": "^10.10.4",
"typescript": "~5.2.2"
},
diff --git a/packages/cubejs-cubestore-driver/CHANGELOG.md b/packages/cubejs-cubestore-driver/CHANGELOG.md
index b56efb3fb5060..fda68cbbf3fc5 100644
--- a/packages/cubejs-cubestore-driver/CHANGELOG.md
+++ b/packages/cubejs-cubestore-driver/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Features
+
+* ksql and rollup pre-aggregations ([#8619](https://github.com/cube-js/cube/issues/8619)) ([cdfbd1e](https://github.com/cube-js/cube/commit/cdfbd1e21ffcf111e40c525f8a391cc0dcee3c11))
+
+
+
+
+
## [0.35.78](https://github.com/cube-js/cube/compare/v0.35.77...v0.35.78) (2024-08-27)
**Note:** Version bump only for package @cubejs-backend/cubestore-driver
diff --git a/packages/cubejs-cubestore-driver/package.json b/packages/cubejs-cubestore-driver/package.json
index c67d864b08188..499315868a162 100644
--- a/packages/cubejs-cubestore-driver/package.json
+++ b/packages/cubejs-cubestore-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/cubestore-driver",
"description": "Cube Store driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.78",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -26,8 +26,8 @@
"lint:fix": "eslint --fix src/*.ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/cubestore": "^0.35.78",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/cubestore": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"csv-write-stream": "^2.0.0",
"flatbuffers": "23.3.3",
diff --git a/packages/cubejs-cubestore-driver/src/CubeStoreDriver.ts b/packages/cubejs-cubestore-driver/src/CubeStoreDriver.ts
index 8685735979bb3..a38e4e3cfa36e 100644
--- a/packages/cubejs-cubestore-driver/src/CubeStoreDriver.ts
+++ b/packages/cubejs-cubestore-driver/src/CubeStoreDriver.ts
@@ -46,6 +46,7 @@ type CreateTableOptions = {
files?: string[]
aggregations?: string
selectStatement?: string
+ sourceTable?: any
sealAt?: string
delimiter?: string
};
@@ -118,6 +119,9 @@ export class CubeStoreDriver extends BaseDriver implements DriverInterface {
if (options.selectStatement) {
withEntries.push(`select_statement = ${escape(options.selectStatement)}`);
}
+ if (options.sourceTable) {
+ withEntries.push(`source_table = ${escape(`CREATE TABLE ${options.sourceTable.tableName} (${options.sourceTable.types.map(t => `${t.name} ${this.fromGenericType(t.type)}`).join(', ')})`)}`);
+ }
if (options.streamOffset) {
withEntries.push(`stream_offset = '${options.streamOffset}'`);
}
@@ -431,6 +435,7 @@ export class CubeStoreDriver extends BaseDriver implements DriverInterface {
indexes,
files: locations,
selectStatement: tableData.selectStatement,
+ sourceTable: tableData.sourceTable,
streamOffset: tableData.streamOffset,
sealAt
};
diff --git a/packages/cubejs-databricks-jdbc-driver/CHANGELOG.md b/packages/cubejs-databricks-jdbc-driver/CHANGELOG.md
index 1591c48b9acbc..81215256af731 100644
--- a/packages/cubejs-databricks-jdbc-driver/CHANGELOG.md
+++ b/packages/cubejs-databricks-jdbc-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/databricks-jdbc-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/databricks-jdbc-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/packages/cubejs-databricks-jdbc-driver/package.json b/packages/cubejs-databricks-jdbc-driver/package.json
index dd56a6b414289..668cb4311e32a 100644
--- a/packages/cubejs-databricks-jdbc-driver/package.json
+++ b/packages/cubejs-databricks-jdbc-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/databricks-jdbc-driver",
"description": "Cube.js Databricks database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"license": "Apache-2.0",
"repository": {
"type": "git",
@@ -31,9 +31,9 @@
"@aws-sdk/client-s3": "^3.49.0",
"@aws-sdk/s3-request-presigner": "^3.49.0",
"@azure/storage-blob": "^12.9.0",
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/jdbc-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/jdbc-driver": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"node-fetch": "^2.6.1",
"ramda": "^0.27.2",
diff --git a/packages/cubejs-dbt-schema-extension/CHANGELOG.md b/packages/cubejs-dbt-schema-extension/CHANGELOG.md
index b423caab746c8..7bafcd6d30849 100644
--- a/packages/cubejs-dbt-schema-extension/CHANGELOG.md
+++ b/packages/cubejs-dbt-schema-extension/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/dbt-schema-extension
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/dbt-schema-extension
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/dbt-schema-extension
diff --git a/packages/cubejs-dbt-schema-extension/package.json b/packages/cubejs-dbt-schema-extension/package.json
index 04ea86c55770c..23243dc3be509 100644
--- a/packages/cubejs-dbt-schema-extension/package.json
+++ b/packages/cubejs-dbt-schema-extension/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/dbt-schema-extension",
"description": "Cube.js dbt Schema Extension",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -25,14 +25,14 @@
"lint:fix": "eslint --fix src/* --ext .ts,.js"
},
"dependencies": {
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"fs-extra": "^9.1.0",
"inflection": "^1.12.0",
"node-fetch": "^2.6.1"
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing": "^0.35.79",
+ "@cubejs-backend/testing": "^0.35.81",
"@types/generic-pool": "^3.1.9",
"@types/jest": "^27",
"jest": "^27",
diff --git a/packages/cubejs-docker/CHANGELOG.md b/packages/cubejs-docker/CHANGELOG.md
index b33aba18c2b36..68d2aef844284 100644
--- a/packages/cubejs-docker/CHANGELOG.md
+++ b/packages/cubejs-docker/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/docker
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/docker
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/docker
diff --git a/packages/cubejs-docker/package.json b/packages/cubejs-docker/package.json
index dc575b609ea12..d4b1d1250c508 100644
--- a/packages/cubejs-docker/package.json
+++ b/packages/cubejs-docker/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/docker",
- "version": "0.35.79",
+ "version": "0.35.81",
"description": "Cube.js In Docker (virtual package)",
"author": "Cube Dev, Inc.",
"license": "Apache-2.0",
@@ -9,33 +9,33 @@
"node": "^14.0.0 || ^16.0.0 || >=17.0.0"
},
"dependencies": {
- "@cubejs-backend/athena-driver": "^0.35.79",
- "@cubejs-backend/bigquery-driver": "^0.35.79",
- "@cubejs-backend/clickhouse-driver": "^0.35.79",
- "@cubejs-backend/crate-driver": "^0.35.79",
- "@cubejs-backend/databricks-jdbc-driver": "^0.35.79",
- "@cubejs-backend/dbt-schema-extension": "^0.35.79",
- "@cubejs-backend/dremio-driver": "^0.35.79",
- "@cubejs-backend/druid-driver": "^0.35.79",
- "@cubejs-backend/duckdb-driver": "^0.35.79",
- "@cubejs-backend/elasticsearch-driver": "^0.35.67",
- "@cubejs-backend/firebolt-driver": "^0.35.79",
- "@cubejs-backend/hive-driver": "^0.35.67",
- "@cubejs-backend/ksql-driver": "^0.35.79",
- "@cubejs-backend/materialize-driver": "^0.35.79",
- "@cubejs-backend/mongobi-driver": "^0.35.67",
- "@cubejs-backend/mssql-driver": "^0.35.67",
- "@cubejs-backend/mysql-driver": "^0.35.79",
- "@cubejs-backend/oracle-driver": "^0.35.67",
- "@cubejs-backend/postgres-driver": "^0.35.79",
- "@cubejs-backend/prestodb-driver": "^0.35.67",
- "@cubejs-backend/questdb-driver": "^0.35.79",
- "@cubejs-backend/redshift-driver": "^0.35.79",
- "@cubejs-backend/server": "^0.35.79",
- "@cubejs-backend/snowflake-driver": "^0.35.67",
- "@cubejs-backend/sqlite-driver": "^0.35.67",
- "@cubejs-backend/trino-driver": "^0.35.79",
- "cubejs-cli": "^0.35.79",
+ "@cubejs-backend/athena-driver": "^0.35.81",
+ "@cubejs-backend/bigquery-driver": "^0.35.81",
+ "@cubejs-backend/clickhouse-driver": "^0.35.81",
+ "@cubejs-backend/crate-driver": "^0.35.81",
+ "@cubejs-backend/databricks-jdbc-driver": "^0.35.81",
+ "@cubejs-backend/dbt-schema-extension": "^0.35.81",
+ "@cubejs-backend/dremio-driver": "^0.35.81",
+ "@cubejs-backend/druid-driver": "^0.35.81",
+ "@cubejs-backend/duckdb-driver": "^0.35.81",
+ "@cubejs-backend/elasticsearch-driver": "^0.35.81",
+ "@cubejs-backend/firebolt-driver": "^0.35.81",
+ "@cubejs-backend/hive-driver": "^0.35.81",
+ "@cubejs-backend/ksql-driver": "^0.35.81",
+ "@cubejs-backend/materialize-driver": "^0.35.81",
+ "@cubejs-backend/mongobi-driver": "^0.35.81",
+ "@cubejs-backend/mssql-driver": "^0.35.81",
+ "@cubejs-backend/mysql-driver": "^0.35.81",
+ "@cubejs-backend/oracle-driver": "^0.35.81",
+ "@cubejs-backend/postgres-driver": "^0.35.81",
+ "@cubejs-backend/prestodb-driver": "^0.35.81",
+ "@cubejs-backend/questdb-driver": "^0.35.81",
+ "@cubejs-backend/redshift-driver": "^0.35.81",
+ "@cubejs-backend/server": "^0.35.81",
+ "@cubejs-backend/snowflake-driver": "^0.35.81",
+ "@cubejs-backend/sqlite-driver": "^0.35.81",
+ "@cubejs-backend/trino-driver": "^0.35.81",
+ "cubejs-cli": "^0.35.81",
"typescript": "~5.2.2"
},
"resolutions": {
diff --git a/packages/cubejs-dremio-driver/CHANGELOG.md b/packages/cubejs-dremio-driver/CHANGELOG.md
index 9b13d66090b7a..136efbcb62673 100644
--- a/packages/cubejs-dremio-driver/CHANGELOG.md
+++ b/packages/cubejs-dremio-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/dremio-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/dremio-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/dremio-driver
diff --git a/packages/cubejs-dremio-driver/package.json b/packages/cubejs-dremio-driver/package.json
index 81507ebec476f..3e8148253d09b 100644
--- a/packages/cubejs-dremio-driver/package.json
+++ b/packages/cubejs-dremio-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/dremio-driver",
"description": "Cube.js Dremio driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -17,8 +17,8 @@
"lint:fix": "eslint driver/*.js"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"axios": "^0.21.1",
"moment-timezone": "^0.5.31",
diff --git a/packages/cubejs-druid-driver/CHANGELOG.md b/packages/cubejs-druid-driver/CHANGELOG.md
index bfd237b5f65af..138c1ae7cb35e 100644
--- a/packages/cubejs-druid-driver/CHANGELOG.md
+++ b/packages/cubejs-druid-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/druid-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/druid-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/druid-driver
diff --git a/packages/cubejs-druid-driver/package.json b/packages/cubejs-druid-driver/package.json
index a99c69e61e858..68ffad5412b9f 100644
--- a/packages/cubejs-druid-driver/package.json
+++ b/packages/cubejs-druid-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/druid-driver",
"description": "Cube.js Druid database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"license": "Apache-2.0",
"repository": {
"type": "git",
@@ -28,8 +28,8 @@
"dist/src/*"
],
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"axios": "^0.21.1",
"moment-timezone": "^0.5.31"
diff --git a/packages/cubejs-duckdb-driver/CHANGELOG.md b/packages/cubejs-duckdb-driver/CHANGELOG.md
index 25f4fad80a61d..50d173555626e 100644
--- a/packages/cubejs-duckdb-driver/CHANGELOG.md
+++ b/packages/cubejs-duckdb-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/duckdb-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/duckdb-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/duckdb-driver
diff --git a/packages/cubejs-duckdb-driver/package.json b/packages/cubejs-duckdb-driver/package.json
index bfc22c9cbe401..646f2b336a047 100644
--- a/packages/cubejs-duckdb-driver/package.json
+++ b/packages/cubejs-duckdb-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/duckdb-driver",
"description": "Cube DuckDB database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,15 +27,15 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"duckdb": "^1.0.0"
},
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"@types/jest": "^27",
"@types/node": "^16",
"jest": "^27",
diff --git a/packages/cubejs-elasticsearch-driver/CHANGELOG.md b/packages/cubejs-elasticsearch-driver/CHANGELOG.md
index d68a8b3048715..c6695e6e8b34e 100644
--- a/packages/cubejs-elasticsearch-driver/CHANGELOG.md
+++ b/packages/cubejs-elasticsearch-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/elasticsearch-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/elasticsearch-driver
diff --git a/packages/cubejs-elasticsearch-driver/package.json b/packages/cubejs-elasticsearch-driver/package.json
index 4a5aedd052456..09c904f450efe 100644
--- a/packages/cubejs-elasticsearch-driver/package.json
+++ b/packages/cubejs-elasticsearch-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/elasticsearch-driver",
"description": "Cube.js elasticsearch database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -23,7 +23,7 @@
"driver"
],
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@elastic/elasticsearch": "7.12.0",
"sqlstring": "^2.3.1"
diff --git a/packages/cubejs-firebolt-driver/CHANGELOG.md b/packages/cubejs-firebolt-driver/CHANGELOG.md
index e2218d3ddd7af..a22ff6b8971fa 100644
--- a/packages/cubejs-firebolt-driver/CHANGELOG.md
+++ b/packages/cubejs-firebolt-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/firebolt-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/firebolt-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/firebolt-driver
diff --git a/packages/cubejs-firebolt-driver/package.json b/packages/cubejs-firebolt-driver/package.json
index 595f54fb95c77..f8b6102ecad0b 100644
--- a/packages/cubejs-firebolt-driver/package.json
+++ b/packages/cubejs-firebolt-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/firebolt-driver",
"description": "Cube.js Firebolt database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -28,15 +28,15 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"firebolt-sdk": "^1.2.0"
},
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"typescript": "~5.2.2"
},
"publishConfig": {
diff --git a/packages/cubejs-hive-driver/CHANGELOG.md b/packages/cubejs-hive-driver/CHANGELOG.md
index 2f6238db08208..312a9d16b2287 100644
--- a/packages/cubejs-hive-driver/CHANGELOG.md
+++ b/packages/cubejs-hive-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/hive-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/hive-driver
diff --git a/packages/cubejs-hive-driver/package.json b/packages/cubejs-hive-driver/package.json
index 4d61d08769bcd..a1a0670a0970c 100644
--- a/packages/cubejs-hive-driver/package.json
+++ b/packages/cubejs-hive-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/hive-driver",
"description": "Cube.js Hive database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -17,7 +17,7 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"generic-pool": "^3.6.0",
"jshs2": "^0.4.4",
diff --git a/packages/cubejs-jdbc-driver/CHANGELOG.md b/packages/cubejs-jdbc-driver/CHANGELOG.md
index 4a8f9b683b52d..374682dc88764 100644
--- a/packages/cubejs-jdbc-driver/CHANGELOG.md
+++ b/packages/cubejs-jdbc-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/jdbc-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/jdbc-driver
diff --git a/packages/cubejs-jdbc-driver/package.json b/packages/cubejs-jdbc-driver/package.json
index c061e7e8d919d..641e57929b6fb 100644
--- a/packages/cubejs-jdbc-driver/package.json
+++ b/packages/cubejs-jdbc-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/jdbc-driver",
"description": "Cube.js JDBC database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -25,7 +25,7 @@
"index.js"
],
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"generic-pool": "^3.1.7",
"node-java-maven": "^0.1.2",
"sqlstring": "^2.3.0"
diff --git a/packages/cubejs-ksql-driver/CHANGELOG.md b/packages/cubejs-ksql-driver/CHANGELOG.md
index 5674a1e75b476..5029745be7a2c 100644
--- a/packages/cubejs-ksql-driver/CHANGELOG.md
+++ b/packages/cubejs-ksql-driver/CHANGELOG.md
@@ -3,6 +3,25 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Features
+
+* ksql and rollup pre-aggregations ([#8619](https://github.com/cube-js/cube/issues/8619)) ([cdfbd1e](https://github.com/cube-js/cube/commit/cdfbd1e21ffcf111e40c525f8a391cc0dcee3c11))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/ksql-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/ksql-driver
diff --git a/packages/cubejs-ksql-driver/package.json b/packages/cubejs-ksql-driver/package.json
index 72a3b4e477fc4..ebf7910138f21 100644
--- a/packages/cubejs-ksql-driver/package.json
+++ b/packages/cubejs-ksql-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/ksql-driver",
"description": "Cube.js ksql database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -25,8 +25,8 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"async-mutex": "0.3.2",
"axios": "^0.21.1",
diff --git a/packages/cubejs-ksql-driver/src/KsqlDriver.ts b/packages/cubejs-ksql-driver/src/KsqlDriver.ts
index 4bce1e812475d..651d29c883ec3 100644
--- a/packages/cubejs-ksql-driver/src/KsqlDriver.ts
+++ b/packages/cubejs-ksql-driver/src/KsqlDriver.ts
@@ -10,7 +10,7 @@ import {
} from '@cubejs-backend/shared';
import {
BaseDriver, DriverCapabilities,
- DriverInterface, QueryOptions,
+ DriverInterface, TableColumn,
} from '@cubejs-backend/base-driver';
import { Kafka } from 'kafkajs';
import sqlstring, { format as formatSql } from 'sqlstring';
@@ -64,6 +64,12 @@ type KsqlDescribeResponse = {
}
};
+type KsqlQueryOptions = {
+ outputColumnTypes?: TableColumn[],
+ streamOffset?: string,
+ selectStatement?: string,
+};
+
/**
* KSQL driver class.
*/
@@ -161,7 +167,7 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
}
}
- public async query(query: string, values?: unknown[], options: { streamOffset?: string } = {}): Promise {
+ public async query(query: string, values?: unknown[], options: KsqlQueryOptions = {}): Promise {
if (query.toLowerCase().startsWith('select')) {
throw new Error('Select queries for ksql allowed only from Cube Store. In order to query ksql create pre-aggregation first.');
}
@@ -261,13 +267,15 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
- public loadPreAggregationIntoTable(preAggregationTableName: string, loadSql: string, params: any[], options: any): Promise {
- return this.query(loadSql.replace(preAggregationTableName, this.tableDashName(preAggregationTableName)), params, { streamOffset: options?.streamOffset });
+ public loadPreAggregationIntoTable(preAggregationTableName: string, loadSql: string, params: any[], options: KsqlQueryOptions): Promise {
+ const { streamOffset } = options || {};
+ return this.query(loadSql.replace(preAggregationTableName, this.tableDashName(preAggregationTableName)), params, { streamOffset });
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
public async downloadTable(table: string, options: any): Promise {
- return this.getStreamingTableData(this.tableDashName(table), { streamOffset: options?.streamOffset });
+ const { streamOffset } = options || {};
+ return this.getStreamingTableData(this.tableDashName(table), { streamOffset });
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
@@ -278,11 +286,12 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
}
const selectStatement = sqlstring.format(query, params);
- return this.getStreamingTableData(table, { selectStatement, streamOffset: options?.streamOffset });
+ const { streamOffset, outputColumnTypes } = options || {};
+ return this.getStreamingTableData(table, { selectStatement, streamOffset, outputColumnTypes });
}
- private async getStreamingTableData(streamingTable: string, options: { selectStatement?: string, streamOffset?: string } = {}) {
- const { selectStatement, streamOffset } = options;
+ private async getStreamingTableData(streamingTable: string, options: KsqlQueryOptions = {}) {
+ const { selectStatement, streamOffset, outputColumnTypes } = options;
const describe = await this.describeTable(streamingTable);
const name = this.config.streamingSourceName || 'default';
const kafkaDirectDownload = !!this.config.kafkaHost;
@@ -304,13 +313,20 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
url: this.config.url
}
};
+ const sourceTableTypes = await this.tableColumnTypes(streamingTable, describe);
+ streamingTable = kafkaDirectDownload ? describe.sourceDescription?.topic : streamingTable;
+
return {
- types: await this.tableColumnTypes(streamingTable, describe),
+ types: outputColumnTypes || sourceTableTypes,
partitions: describe.sourceDescription?.partitions,
- streamingTable: kafkaDirectDownload ? describe.sourceDescription?.topic : streamingTable,
+ streamingTable,
streamOffset,
selectStatement,
- streamingSource
+ streamingSource,
+ sourceTable: outputColumnTypes ? {
+ types: sourceTableTypes,
+ tableName: streamingTable
+ } : null
};
}
@@ -344,7 +360,8 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
public capabilities(): DriverCapabilities {
return {
- streamingSource: true
+ streamingSource: true,
+ unloadWithoutTempTable: true,
};
}
}
diff --git a/packages/cubejs-ksql-driver/src/KsqlQuery.ts b/packages/cubejs-ksql-driver/src/KsqlQuery.ts
index 082a249a78c40..3af3a71f4f16e 100644
--- a/packages/cubejs-ksql-driver/src/KsqlQuery.ts
+++ b/packages/cubejs-ksql-driver/src/KsqlQuery.ts
@@ -55,6 +55,10 @@ export class KsqlQuery extends BaseQuery {
return `\`${name}\``;
}
+ public castToString(sql: string) {
+ return `CAST(${sql} as varchar(255))`;
+ }
+
public concatStringsSql(strings: string[]) {
return `CONCAT(${strings.join(', ')})`;
}
@@ -111,7 +115,7 @@ export class KsqlQuery extends BaseQuery {
}
public static extractTableFromSimpleSelectAsteriskQuery(sql: string) {
- const match = sql.match(/^\s*select\s+\*\s+from\s+([a-zA-Z0-9_\-`".*]+)\s*/i);
+ const match = sql.replace(/\n/g, ' ').match(/^\s*select\s+.*\s+from\s+([a-zA-Z0-9_\-`".*]+)\s*/i);
return match && match[1];
}
}
diff --git a/packages/cubejs-materialize-driver/CHANGELOG.md b/packages/cubejs-materialize-driver/CHANGELOG.md
index 3fbdc3a260bc6..9f311ea540091 100644
--- a/packages/cubejs-materialize-driver/CHANGELOG.md
+++ b/packages/cubejs-materialize-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/materialize-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/materialize-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/materialize-driver
diff --git a/packages/cubejs-materialize-driver/package.json b/packages/cubejs-materialize-driver/package.json
index c94a92bb8303b..04f991cd20478 100644
--- a/packages/cubejs-materialize-driver/package.json
+++ b/packages/cubejs-materialize-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/materialize-driver",
"description": "Cube.js Materialize database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,8 +27,8 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/postgres-driver": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/postgres-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@types/pg": "^8.6.0",
"pg": "^8.6.0",
@@ -38,7 +38,7 @@
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing": "^0.35.79",
+ "@cubejs-backend/testing": "^0.35.81",
"typescript": "~5.2.2"
},
"publishConfig": {
diff --git a/packages/cubejs-mongobi-driver/CHANGELOG.md b/packages/cubejs-mongobi-driver/CHANGELOG.md
index 46143f1e73e4c..92138a71fef49 100644
--- a/packages/cubejs-mongobi-driver/CHANGELOG.md
+++ b/packages/cubejs-mongobi-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/mongobi-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/mongobi-driver
diff --git a/packages/cubejs-mongobi-driver/package.json b/packages/cubejs-mongobi-driver/package.json
index de408fa0dcc50..84af7bab1d40c 100644
--- a/packages/cubejs-mongobi-driver/package.json
+++ b/packages/cubejs-mongobi-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/mongobi-driver",
"description": "Cube.js MongoBI driver",
"author": "krunalsabnis@gmail.com",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,7 +27,7 @@
"integration:mongobi": "jest dist/test"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@types/node": "^16",
"generic-pool": "^3.6.0",
diff --git a/packages/cubejs-mssql-driver/CHANGELOG.md b/packages/cubejs-mssql-driver/CHANGELOG.md
index ccf390eccfce1..58fda6d114329 100644
--- a/packages/cubejs-mssql-driver/CHANGELOG.md
+++ b/packages/cubejs-mssql-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/mssql-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/mssql-driver
diff --git a/packages/cubejs-mssql-driver/package.json b/packages/cubejs-mssql-driver/package.json
index 29cfa686cdec9..788f99c8a4651 100644
--- a/packages/cubejs-mssql-driver/package.json
+++ b/packages/cubejs-mssql-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/mssql-driver",
"description": "Cube.js MS SQL database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -13,7 +13,7 @@
},
"main": "driver/MSSqlDriver.js",
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"mssql": "^10.0.2"
},
"devDependencies": {
diff --git a/packages/cubejs-mysql-aurora-serverless-driver/CHANGELOG.md b/packages/cubejs-mysql-aurora-serverless-driver/CHANGELOG.md
index f32f6d96e5216..28fec98f04048 100644
--- a/packages/cubejs-mysql-aurora-serverless-driver/CHANGELOG.md
+++ b/packages/cubejs-mysql-aurora-serverless-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/mysql-aurora-serverless-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/mysql-aurora-serverless-driver
diff --git a/packages/cubejs-mysql-aurora-serverless-driver/package.json b/packages/cubejs-mysql-aurora-serverless-driver/package.json
index 2f948fcdd5bab..c26c662040ab1 100644
--- a/packages/cubejs-mysql-aurora-serverless-driver/package.json
+++ b/packages/cubejs-mysql-aurora-serverless-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/mysql-aurora-serverless-driver",
"description": "Cube.js Aurora Serverless Mysql database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -21,7 +21,7 @@
"lint": "eslint driver/*.js test/*.js"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@types/mysql": "^2.15.15",
"aws-sdk": "^2.787.0",
diff --git a/packages/cubejs-mysql-driver/CHANGELOG.md b/packages/cubejs-mysql-driver/CHANGELOG.md
index 8ad803649b64b..b263af17bc702 100644
--- a/packages/cubejs-mysql-driver/CHANGELOG.md
+++ b/packages/cubejs-mysql-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/mysql-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/mysql-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/mysql-driver
diff --git a/packages/cubejs-mysql-driver/package.json b/packages/cubejs-mysql-driver/package.json
index eb6b465ae64a4..73ee02d657848 100644
--- a/packages/cubejs-mysql-driver/package.json
+++ b/packages/cubejs-mysql-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/mysql-driver",
"description": "Cube.js Mysql database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,7 +27,7 @@
"lint:fix": "eslint --fix src/* test/* --ext .ts,.js"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@types/mysql": "^2.15.21",
"generic-pool": "^3.6.0",
@@ -35,7 +35,7 @@
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"@types/generic-pool": "^3.1.9",
"@types/jest": "^27",
"jest": "^27",
diff --git a/packages/cubejs-oracle-driver/CHANGELOG.md b/packages/cubejs-oracle-driver/CHANGELOG.md
index edfd1fe839f4b..bfcd992e6aeef 100644
--- a/packages/cubejs-oracle-driver/CHANGELOG.md
+++ b/packages/cubejs-oracle-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/oracle-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/oracle-driver
diff --git a/packages/cubejs-oracle-driver/package.json b/packages/cubejs-oracle-driver/package.json
index 317717fbbad31..bc2c287863f24 100644
--- a/packages/cubejs-oracle-driver/package.json
+++ b/packages/cubejs-oracle-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/oracle-driver",
"description": "Cube.js oracle database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -13,7 +13,7 @@
},
"main": "driver/OracleDriver.js",
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"ramda": "^0.27.0"
},
"optionalDependencies": {
diff --git a/packages/cubejs-playground/CHANGELOG.md b/packages/cubejs-playground/CHANGELOG.md
index 619418fc08250..a39a666f7d245 100644
--- a/packages/cubejs-playground/CHANGELOG.md
+++ b/packages/cubejs-playground/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+
+
+
## [0.35.48](https://github.com/cube-js/cube/compare/v0.35.47...v0.35.48) (2024-06-14)
**Note:** Version bump only for package @cubejs-client/playground
diff --git a/packages/cubejs-playground/package.json b/packages/cubejs-playground/package.json
index 1f87bf7064e81..b113426c66244 100644
--- a/packages/cubejs-playground/package.json
+++ b/packages/cubejs-playground/package.json
@@ -1,7 +1,7 @@
{
"name": "@cubejs-client/playground",
"author": "Cube Dev, Inc.",
- "version": "0.35.48",
+ "version": "0.35.81",
"engines": {},
"repository": {
"type": "git",
@@ -79,7 +79,7 @@
"eslint-plugin-react": "^7.20.0",
"fs-extra": "^8.1.0",
"graphql": "^15.8.0",
- "jsdom": "^16.7.0",
+ "jsdom": "^24.0.0",
"prismjs": "^1.25.0",
"react": "^17.0.1",
"react-dom": "^17.0.1",
diff --git a/packages/cubejs-postgres-driver/CHANGELOG.md b/packages/cubejs-postgres-driver/CHANGELOG.md
index edda8532676ad..ad06f19002a52 100644
--- a/packages/cubejs-postgres-driver/CHANGELOG.md
+++ b/packages/cubejs-postgres-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/postgres-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/postgres-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/postgres-driver
diff --git a/packages/cubejs-postgres-driver/package.json b/packages/cubejs-postgres-driver/package.json
index e6bd93cf7b632..47b3231ff763e 100644
--- a/packages/cubejs-postgres-driver/package.json
+++ b/packages/cubejs-postgres-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/postgres-driver",
"description": "Cube.js Postgres database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,7 +27,7 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@types/pg": "^8.6.0",
"@types/pg-query-stream": "^1.0.3",
@@ -38,7 +38,7 @@
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"testcontainers": "^10.10.4",
"typescript": "~5.2.2"
},
diff --git a/packages/cubejs-prestodb-driver/CHANGELOG.md b/packages/cubejs-prestodb-driver/CHANGELOG.md
index 435b7c9b8a5a9..d7e71d2aa5825 100644
--- a/packages/cubejs-prestodb-driver/CHANGELOG.md
+++ b/packages/cubejs-prestodb-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/prestodb-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/prestodb-driver
diff --git a/packages/cubejs-prestodb-driver/package.json b/packages/cubejs-prestodb-driver/package.json
index 1ba6483fe5b17..b9ac4a57e0506 100644
--- a/packages/cubejs-prestodb-driver/package.json
+++ b/packages/cubejs-prestodb-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/prestodb-driver",
"description": "Cube.js Presto database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,7 +27,7 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"presto-client": "^0.12.2",
"ramda": "^0.27.0",
diff --git a/packages/cubejs-query-orchestrator/CHANGELOG.md b/packages/cubejs-query-orchestrator/CHANGELOG.md
index 2b748140fc523..4e2d2a5baba8d 100644
--- a/packages/cubejs-query-orchestrator/CHANGELOG.md
+++ b/packages/cubejs-query-orchestrator/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Features
+
+* ksql and rollup pre-aggregations ([#8619](https://github.com/cube-js/cube/issues/8619)) ([cdfbd1e](https://github.com/cube-js/cube/commit/cdfbd1e21ffcf111e40c525f8a391cc0dcee3c11))
+
+
+
+
+
## [0.35.78](https://github.com/cube-js/cube/compare/v0.35.77...v0.35.78) (2024-08-27)
**Note:** Version bump only for package @cubejs-backend/query-orchestrator
diff --git a/packages/cubejs-query-orchestrator/package.json b/packages/cubejs-query-orchestrator/package.json
index ccfcdb58fb9f3..2fcd4e57eb1f2 100644
--- a/packages/cubejs-query-orchestrator/package.json
+++ b/packages/cubejs-query-orchestrator/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/query-orchestrator",
"description": "Cube.js Query Orchestrator and Cache",
"author": "Cube Dev, Inc.",
- "version": "0.35.78",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -30,8 +30,8 @@
"dist/src/*"
],
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/cubestore-driver": "^0.35.78",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/cubestore-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"csv-write-stream": "^2.0.0",
"es5-ext": "0.10.53",
diff --git a/packages/cubejs-query-orchestrator/src/orchestrator/PreAggregations.ts b/packages/cubejs-query-orchestrator/src/orchestrator/PreAggregations.ts
index d660be85bc42b..82935e33a94a9 100644
--- a/packages/cubejs-query-orchestrator/src/orchestrator/PreAggregations.ts
+++ b/packages/cubejs-query-orchestrator/src/orchestrator/PreAggregations.ts
@@ -104,6 +104,9 @@ function getStructureVersion(preAggregation) {
if (preAggregation.streamOffset) {
versionArray.push(preAggregation.streamOffset);
}
+ if (preAggregation.outputColumnTypes) {
+ versionArray.push(preAggregation.outputColumnTypes);
+ }
return version(versionArray.length === 1 ? versionArray[0] : versionArray);
}
@@ -815,6 +818,9 @@ export class PreAggregationLoader {
if (this.preAggregation.streamOffset) {
versionArray.push(this.preAggregation.streamOffset);
}
+ if (this.preAggregation.outputColumnTypes) {
+ versionArray.push(this.preAggregation.outputColumnTypes);
+ }
versionArray.push(invalidationKeys);
return version(versionArray);
}
@@ -964,7 +970,11 @@ export class PreAggregationLoader {
targetTableName,
query,
params,
- { streamOffset: this.preAggregation.streamOffset, ...queryOptions }
+ {
+ streamOffset: this.preAggregation.streamOffset,
+ outputColumnTypes: this.preAggregation.outputColumnTypes,
+ ...queryOptions
+ }
));
await this.createIndexes(client, newVersionEntry, saveCancelFn, queryOptions);
@@ -1107,7 +1117,11 @@ export class PreAggregationLoader {
targetTableName,
query,
params,
- { streamOffset: this.preAggregation.streamOffset, ...queryOptions }
+ {
+ streamOffset: this.preAggregation.streamOffset,
+ outputColumnTypes: this.preAggregation.outputColumnTypes,
+ ...queryOptions
+ }
));
return queryOptions;
@@ -1156,6 +1170,7 @@ export class PreAggregationLoader {
sql,
params, {
streamOffset: this.preAggregation.streamOffset,
+ outputColumnTypes: this.preAggregation.outputColumnTypes,
...queryOptions,
...capabilities,
...this.getStreamingOptions(),
@@ -1261,7 +1276,11 @@ export class PreAggregationLoader {
tableData.rowStream = stream;
}
} else {
- tableData = await saveCancelFn(client.downloadTable(table, { streamOffset: this.preAggregation.streamOffset, ...externalDriverCapabilities }));
+ tableData = await saveCancelFn(client.downloadTable(table, {
+ streamOffset: this.preAggregation.streamOffset,
+ outputColumnTypes: this.preAggregation.outputColumnTypes,
+ ...externalDriverCapabilities
+ }));
}
if (!tableData.types) {
diff --git a/packages/cubejs-questdb-driver/CHANGELOG.md b/packages/cubejs-questdb-driver/CHANGELOG.md
index 1d1549136d208..9ddc4ae416511 100644
--- a/packages/cubejs-questdb-driver/CHANGELOG.md
+++ b/packages/cubejs-questdb-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/questdb-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/questdb-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/questdb-driver
diff --git a/packages/cubejs-questdb-driver/package.json b/packages/cubejs-questdb-driver/package.json
index 93d4b5ce6f70d..6c8d1ee615777 100644
--- a/packages/cubejs-questdb-driver/package.json
+++ b/packages/cubejs-questdb-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/questdb-driver",
"description": "Cube.js QuestDB database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,8 +27,8 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@types/pg": "^8.6.0",
"moment": "^2.24.0",
@@ -38,7 +38,7 @@
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"testcontainers": "^10.10.4",
"typescript": "~5.2.2"
},
diff --git a/packages/cubejs-redshift-driver/CHANGELOG.md b/packages/cubejs-redshift-driver/CHANGELOG.md
index 1550450f58729..efbd3135674bb 100644
--- a/packages/cubejs-redshift-driver/CHANGELOG.md
+++ b/packages/cubejs-redshift-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/redshift-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/redshift-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/redshift-driver
diff --git a/packages/cubejs-redshift-driver/package.json b/packages/cubejs-redshift-driver/package.json
index 485073b704d8e..a926a8a4d07cb 100644
--- a/packages/cubejs-redshift-driver/package.json
+++ b/packages/cubejs-redshift-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/redshift-driver",
"description": "Cube.js Redshift database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,8 +27,8 @@
"dependencies": {
"@aws-sdk/client-s3": "^3.17.0",
"@aws-sdk/s3-request-presigner": "^3.17.0",
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/postgres-driver": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/postgres-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67"
},
"license": "Apache-2.0",
diff --git a/packages/cubejs-schema-compiler/CHANGELOG.md b/packages/cubejs-schema-compiler/CHANGELOG.md
index 8d37163770d07..f7d97eca9836a 100644
--- a/packages/cubejs-schema-compiler/CHANGELOG.md
+++ b/packages/cubejs-schema-compiler/CHANGELOG.md
@@ -3,6 +3,33 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* **api-gateway:** fixes an issue where queries to get the total count of results were incorrectly applying sorting from the original query and also were getting default ordering applied when the query ordering was stripped out ([#8060](https://github.com/cube-js/cube/issues/8060)) Thanks [@rdwoodring](https://github.com/rdwoodring)! ([863f370](https://github.com/cube-js/cube/commit/863f3709e97c904f1c800ad98889dc272dbfddbd))
+
+
+### Features
+
+* ksql and rollup pre-aggregations ([#8619](https://github.com/cube-js/cube/issues/8619)) ([cdfbd1e](https://github.com/cube-js/cube/commit/cdfbd1e21ffcf111e40c525f8a391cc0dcee3c11))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Bug Fixes
+
+* **schema-compiler:** propagate FILTER_PARAMS from view to inner cube's SELECT ([#8466](https://github.com/cube-js/cube/issues/8466)) ([c0466fd](https://github.com/cube-js/cube/commit/c0466fde9b7a3834159d7ec592362edcab6d9795))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/packages/cubejs-schema-compiler/package.json b/packages/cubejs-schema-compiler/package.json
index 02cc4b08ac373..653f011f65be0 100644
--- a/packages/cubejs-schema-compiler/package.json
+++ b/packages/cubejs-schema-compiler/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/schema-compiler",
"description": "Cube schema compiler",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -40,7 +40,7 @@
"@babel/standalone": "^7.24",
"@babel/traverse": "^7.24",
"@babel/types": "^7.24",
- "@cubejs-backend/native": "^0.35.79",
+ "@cubejs-backend/native": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"antlr4ts": "0.5.0-alpha.4",
"camelcase": "^6.2.0",
@@ -59,7 +59,7 @@
"devDependencies": {
"@cubejs-backend/apla-clickhouse": "^1.7.0",
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/query-orchestrator": "^0.35.78",
+ "@cubejs-backend/query-orchestrator": "^0.35.81",
"@types/babel__code-frame": "^7.0.6",
"@types/babel__generator": "^7.6.8",
"@types/babel__traverse": "^7.20.5",
diff --git a/packages/cubejs-schema-compiler/src/adapter/BaseGroupFilter.ts b/packages/cubejs-schema-compiler/src/adapter/BaseGroupFilter.ts
index 9dd2bc613a8a4..49f17519739aa 100644
--- a/packages/cubejs-schema-compiler/src/adapter/BaseGroupFilter.ts
+++ b/packages/cubejs-schema-compiler/src/adapter/BaseGroupFilter.ts
@@ -1,5 +1,3 @@
-import R from 'ramda';
-
export class BaseGroupFilter {
protected readonly values: any;
@@ -31,7 +29,7 @@ export class BaseGroupFilter {
return null;
}
return `(${sql})`;
- }).filter(R.identity).join(` ${this.operator.toUpperCase()} `);
+ }).filter(x => x).join(` ${this.operator.toUpperCase()} `);
if (!r.length) {
return null;
diff --git a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
index affda97b5fece..9a7ca0fff16f0 100644
--- a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
+++ b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
@@ -387,7 +387,7 @@ export class BaseQuery {
}
defaultOrder() {
- if (this.options.preAggregationQuery) {
+ if (this.options.preAggregationQuery || this.options.totalQuery) {
return [];
}
@@ -2180,15 +2180,16 @@ export class BaseQuery {
const memberPathArray = [cubeName, name];
const memberPath = this.cubeEvaluator.pathFromArray(memberPathArray);
let type = memberExpressionType;
- if (!type && this.cubeEvaluator.isMeasure(memberPathArray)) {
- type = 'measure';
- }
- if (!type && this.cubeEvaluator.isDimension(memberPathArray)) {
- type = 'dimension';
- }
- if (!type && this.cubeEvaluator.isSegment(memberPathArray)) {
- type = 'segment';
+ if (!type) {
+ if (this.cubeEvaluator.isMeasure(memberPathArray)) {
+ type = 'measure';
+ } else if (this.cubeEvaluator.isDimension(memberPathArray)) {
+ type = 'dimension';
+ } else if (this.cubeEvaluator.isSegment(memberPathArray)) {
+ type = 'segment';
+ }
}
+
const parentMember = this.safeEvaluateSymbolContext().currentMember;
if (this.safeEvaluateSymbolContext().memberChildren && parentMember) {
this.safeEvaluateSymbolContext().memberChildren[parentMember] = this.safeEvaluateSymbolContext().memberChildren[parentMember] || [];
@@ -2358,7 +2359,7 @@ export class BaseQuery {
/**
* Evaluate escaped SQL-alias for cube or cube's property
- * (measure, dimention).
+ * (measure, dimension).
* @param {string} cubeName
* @returns string
*/
@@ -2745,9 +2746,12 @@ export class BaseQuery {
}
newSubQueryForCube(cube, options) {
- return this.options.queryFactory
- ? this.options.queryFactory.createQuery(cube, this.compilers, this.subQueryOptions(options))
- : this.newSubQuery(options);
+ if (this.options.queryFactory) {
+ options.paramAllocator = null;
+ return this.options.queryFactory.createQuery(cube, this.compilers, this.subQueryOptions(options));
+ }
+
+ return this.newSubQuery(options);
}
subQueryOptions(options) {
@@ -2941,6 +2945,60 @@ export class BaseQuery {
);
}
+ preAggregationOutputColumnTypes(cube, preAggregation) {
+ return this.cacheValue(
+ ['preAggregationOutputColumnTypes', cube, JSON.stringify(preAggregation)],
+ () => {
+ if (!preAggregation.outputColumnTypes) {
+ return null;
+ }
+
+ if (preAggregation.type === 'rollup') {
+ const query = this.preAggregations.rollupPreAggregationQuery(cube, preAggregation);
+
+ const evaluatedMapOutputColumnTypes = preAggregation.outputColumnTypes.reduce((acc, outputColumnType) => {
+ acc.set(outputColumnType.name, outputColumnType);
+ return acc;
+ }, new Map());
+
+ const findSchemaType = member => {
+ const outputSchemaType = evaluatedMapOutputColumnTypes.get(member);
+ if (!outputSchemaType) {
+ throw new UserError(`Output schema type for ${member} not found in pre-aggregation ${preAggregation}`);
+ }
+
+ return {
+ name: this.aliasName(member),
+ type: outputSchemaType.type,
+ };
+ };
+
+ // The order of the output columns is important, it should match the order in the select statement
+ const outputColumnTypes = [
+ ...(query.dimensions || []).map(d => findSchemaType(d.dimension)),
+ ...(query.timeDimensions || []).map(t => ({
+ name: `${this.aliasName(t.dimension)}_${t.granularity}`,
+ type: 'TIMESTAMP'
+ })),
+ ...(query.measures || []).map(m => findSchemaType(m.measure)),
+ ];
+
+ return outputColumnTypes;
+ }
+ throw new UserError('Output schema is only supported for rollup pre-aggregations');
+ },
+ { inputProps: { }, cache: this.queryCache }
+ );
+ }
+
+ preAggregationUniqueKeyColumns(cube, preAggregation) {
+ if (preAggregation.uniqueKeyColumns) {
+ return preAggregation.uniqueKeyColumns.map(key => this.aliasName(`${cube}.${key}`));
+ }
+
+ return this.dimensionColumns();
+ }
+
preAggregationReadOnly(_cube, _preAggregation) {
return false;
}
@@ -3529,25 +3587,29 @@ export class BaseQuery {
static extractFilterMembers(filter) {
if (filter.operator === 'and' || filter.operator === 'or') {
return filter.values.map(f => BaseQuery.extractFilterMembers(f)).reduce((a, b) => ((a && b) ? { ...a, ...b } : null), {});
- } else if (filter.measure || filter.dimension) {
+ } else if (filter.measure) {
+ return {
+ [filter.measure]: true
+ };
+ } else if (filter.dimension) {
return {
- [filter.measure || filter.dimension]: true
+ [filter.dimension]: true
};
} else {
return null;
}
}
- static findAndSubTreeForFilterGroup(filter, groupMembers, newGroupFilter) {
+ static findAndSubTreeForFilterGroup(filter, groupMembers, newGroupFilter, aliases) {
if ((filter.operator === 'and' || filter.operator === 'or') && !filter.values?.length) {
return null;
}
const filterMembers = BaseQuery.extractFilterMembers(filter);
- if (filterMembers && Object.keys(filterMembers).every(m => groupMembers.indexOf(m) !== -1)) {
+ if (filterMembers && Object.keys(filterMembers).every(m => (groupMembers.indexOf(m) !== -1 || aliases.indexOf(m) !== -1))) {
return filter;
}
if (filter.operator === 'and') {
- const result = filter.values.map(f => BaseQuery.findAndSubTreeForFilterGroup(f, groupMembers, newGroupFilter)).filter(f => !!f);
+ const result = filter.values.map(f => BaseQuery.findAndSubTreeForFilterGroup(f, groupMembers, newGroupFilter, aliases)).filter(f => !!f);
if (!result.length) {
return null;
}
@@ -3572,21 +3634,30 @@ export class BaseQuery {
);
}
- static renderFilterParams(filter, filterParamArgs, allocateParam, newGroupFilter) {
+ static renderFilterParams(filter, filterParamArgs, allocateParam, newGroupFilter, aliases) {
if (!filter) {
return '1 = 1';
}
if (filter.operator === 'and' || filter.operator === 'or') {
const values = filter.values
- .map(f => BaseQuery.renderFilterParams(f, filterParamArgs, allocateParam, newGroupFilter))
+ .map(f => BaseQuery.renderFilterParams(f, filterParamArgs, allocateParam, newGroupFilter, aliases))
.map(v => ({ filterToWhere: () => v }));
return newGroupFilter({ operator: filter.operator, values }).filterToWhere();
}
- const filterParams = filter && filter.filterParams();
- const filterParamArg = filterParamArgs.filter(p => p.__member() === filter.measure || p.__member() === filter.dimension)[0];
+ const filterParams = filter.filterParams();
+ const filterParamArg = filterParamArgs.filter(p => {
+ const member = p.__member();
+ return member === filter.measure ||
+ member === filter.dimension ||
+ (aliases[member] && (
+ aliases[member] === filter.measure ||
+ aliases[member] === filter.dimension
+ ));
+ })[0];
+
if (!filterParamArg) {
throw new Error(`FILTER_PARAMS arg not found for ${filter.measure || filter.dimension}`);
}
@@ -3619,15 +3690,25 @@ export class BaseQuery {
return f.__member();
});
- const filter = BaseQuery.findAndSubTreeForFilterGroup(newGroupFilter({ operator: 'and', values: allFilters }), groupMembers, newGroupFilter);
+ const aliases = allFilters ?
+ allFilters
+ .map(v => (v.query ? v.query.allBackAliasMembersExceptSegments() : {}))
+ .reduce((a, b) => ({ ...a, ...b }), {})
+ : {};
+ const filter = BaseQuery.findAndSubTreeForFilterGroup(
+ newGroupFilter({ operator: 'and', values: allFilters }),
+ groupMembers,
+ newGroupFilter,
+ Object.values(aliases)
+ );
- return `(${BaseQuery.renderFilterParams(filter, filterParamArgs, allocateParam, newGroupFilter)})`;
+ return `(${BaseQuery.renderFilterParams(filter, filterParamArgs, allocateParam, newGroupFilter, aliases)})`;
};
}
static filterProxyFromAllFilters(allFilters, cubeEvaluator, allocateParam, newGroupFilter) {
return new Proxy({}, {
- get: (target, name) => {
+ get: (_target, name) => {
if (name === '_objectWithResolvedProperties') {
return true;
}
@@ -3644,12 +3725,28 @@ export class BaseQuery {
return cubeEvaluator.pathFromArray([cubeNameObj.cube, propertyName]);
},
toString() {
+ // Segments should be excluded because they are evaluated separately in cubeReferenceProxy
+ // In other case this falls into the recursive loop/stack exceeded caused by:
+ // collectFrom() -> traverseSymbol() -> evaluateSymbolSql() ->
+ // evaluateSql() -> resolveSymbolsCall() -> cubeReferenceProxy->toString() ->
+ // evaluateSymbolSql() -> evaluateSql()... -> and got here again
+ const aliases = allFilters ?
+ allFilters
+ .map(v => (v.query ? v.query.allBackAliasMembersExceptSegments() : {}))
+ .reduce((a, b) => ({ ...a, ...b }), {})
+ : {};
+ // Filtering aliases that somehow relate to this cube
+ const filteredAliases = Object.entries(aliases)
+ .filter(([key, value]) => key.startsWith(cubeNameObj.cube) || value.startsWith(cubeNameObj.cube))
+ .reduce((acc, [key, value]) => ({ ...acc, [key]: value }), {});
const filter = BaseQuery.findAndSubTreeForFilterGroup(
newGroupFilter({ operator: 'and', values: allFilters }),
[cubeEvaluator.pathFromArray([cubeNameObj.cube, propertyName])],
- newGroupFilter
+ newGroupFilter,
+ Object.values(filteredAliases)
);
- return `(${BaseQuery.renderFilterParams(filter, [this], allocateParam, newGroupFilter)})`;
+
+ return `(${BaseQuery.renderFilterParams(filter, [this], allocateParam, newGroupFilter, aliases)})`;
}
})
})
@@ -3657,4 +3754,46 @@ export class BaseQuery {
}
});
}
+
+ flattenAllMembers(excludeSegments = false) {
+ return R.flatten(
+ this.measures
+ .concat(this.dimensions)
+ .concat(excludeSegments ? [] : this.segments)
+ .concat(this.filters)
+ .concat(this.measureFilters)
+ .concat(this.timeDimensions)
+ .map(m => m.getMembers()),
+ );
+ }
+
+ allBackAliasMembersExceptSegments() {
+ return this.backAliasMembers(this.flattenAllMembers(true));
+ }
+
+ allBackAliasMembers() {
+ return this.backAliasMembers(this.flattenAllMembers());
+ }
+
+ backAliasMembers(members) {
+ const query = this;
+ return members.map(
+ member => {
+ const collectedMembers = query
+ .collectFrom([member], query.collectMemberNamesFor.bind(query), 'collectMemberNamesFor');
+ const memberPath = member.expressionPath();
+ let nonAliasSeen = false;
+ return collectedMembers
+ .filter(d => {
+ if (!query.cubeEvaluator.byPathAnyType(d).aliasMember) {
+ nonAliasSeen = true;
+ }
+ return !nonAliasSeen;
+ })
+ .map(d => (
+ { [query.cubeEvaluator.byPathAnyType(d).aliasMember]: memberPath }
+ )).reduce((a, b) => ({ ...a, ...b }), {});
+ }
+ ).reduce((a, b) => ({ ...a, ...b }), {});
+ }
}
diff --git a/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js b/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js
index bded2c1e0365d..bb8905aab3203 100644
--- a/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js
+++ b/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js
@@ -1,4 +1,5 @@
import R from 'ramda';
+import { FROM_PARTITION_RANGE, TO_PARTITION_RANGE } from '@cubejs-backend/shared';
import { UserError } from '../compiler/UserError';
@@ -157,7 +158,7 @@ export class PreAggregations {
const queryForSqlEvaluation = this.query.preAggregationQueryForSqlEvaluation(cube, preAggregation);
const partitionInvalidateKeyQueries = queryForSqlEvaluation.partitionInvalidateKeyQueries && queryForSqlEvaluation.partitionInvalidateKeyQueries(cube, preAggregation);
- const allBackAliasMembers = PreAggregations.allBackAliasMembers(this.query);
+ const allBackAliasMembers = this.query.allBackAliasMembers();
const matchedTimeDimension = preAggregation.partitionGranularity && !this.hasCumulativeMeasures &&
this.query.timeDimensions.find(td => {
@@ -189,7 +190,7 @@ export class PreAggregations {
const uniqueKeyColumnsDefault = () => null;
const uniqueKeyColumns = ({
- rollup: () => queryForSqlEvaluation.dimensionColumns(),
+ rollup: () => queryForSqlEvaluation.preAggregationUniqueKeyColumns(cube, preAggregation),
originalSql: () => preAggregation.uniqueKeyColumns || null
}[preAggregation.type] || uniqueKeyColumnsDefault)();
@@ -209,6 +210,7 @@ export class PreAggregations {
preAggregationsSchema: queryForSqlEvaluation.preAggregationSchema(),
loadSql: queryForSqlEvaluation.preAggregationLoadSql(cube, preAggregation, tableName),
sql: queryForSqlEvaluation.preAggregationSql(cube, preAggregation),
+ outputColumnTypes: queryForSqlEvaluation.preAggregationOutputColumnTypes(cube, preAggregation),
uniqueKeyColumns,
aggregationsColumns,
dataSource: queryForSqlEvaluation.dataSource,
@@ -219,7 +221,7 @@ export class PreAggregations {
queryForSqlEvaluation.parseSecondDuration(preAggregation.refreshKey.updateWindow),
preAggregationStartEndQueries:
(preAggregation.partitionGranularity || references.timeDimensions[0]?.granularity) &&
- this.refreshRangeQuery().preAggregationStartEndQueries(cube, preAggregation),
+ this.refreshRangeQuery(cube).preAggregationStartEndQueries(cube, preAggregation),
matchedTimeDimensionDateRange:
preAggregation.partitionGranularity && (
matchedTimeDimension && matchedTimeDimension.boundaryDateRangeFormatted() ||
@@ -292,7 +294,7 @@ export class PreAggregations {
static transformQueryToCanUseForm(query) {
const flattenDimensionMembers = this.flattenDimensionMembers(query);
const sortedDimensions = this.squashDimensions(query, flattenDimensionMembers);
- const allBackAliasMembers = this.allBackAliasMembers(query);
+ const allBackAliasMembers = query.allBackAliasMembers();
const measures = query.measures.concat(query.measureFilters);
const measurePaths = R.uniq(this.flattenMembers(measures).map(m => m.expressionPath()));
const collectLeafMeasures = query.collectLeafMeasures.bind(query);
@@ -426,31 +428,6 @@ export class PreAggregations {
);
}
- static backAliasMembers(query, members) {
- return members.map(
- member => {
- const collectedMembers = query
- .collectFrom([member], query.collectMemberNamesFor.bind(query), 'collectMemberNamesFor');
- const memberPath = member.expressionPath();
- let nonAliasSeen = false;
- return collectedMembers
- .filter(d => {
- if (!query.cubeEvaluator.byPathAnyType(d).aliasMember) {
- nonAliasSeen = true;
- }
- return !nonAliasSeen;
- })
- .map(d => (
- { [query.cubeEvaluator.byPathAnyType(d).aliasMember]: memberPath }
- )).reduce((a, b) => ({ ...a, ...b }), {});
- }
- ).reduce((a, b) => ({ ...a, ...b }), {});
- }
-
- static allBackAliasMembers(query) {
- return this.backAliasMembers(query, this.flattenAllMembers(query));
- }
-
static sortTimeDimensionsWithRollupGranularity(timeDimensions) {
return timeDimensions && R.sortBy(
R.prop(0),
@@ -750,18 +727,6 @@ export class PreAggregations {
);
}
- static flattenAllMembers(query) {
- return R.flatten(
- query.measures
- .concat(query.dimensions)
- .concat(query.segments)
- .concat(query.filters)
- .concat(query.measureFilters)
- .concat(query.timeDimensions)
- .map(m => m.getMembers()),
- );
- }
-
// eslint-disable-next-line no-unused-vars
// eslint-disable-next-line @typescript-eslint/no-unused-vars
getCubeLattice(cube, preAggregationName, preAggregation) {
@@ -1078,12 +1043,15 @@ export class PreAggregations {
return { preAggregations, result };
}
- refreshRangeQuery() {
- return this.query.newSubQuery({
- rowLimit: null,
- offset: null,
- preAggregationQuery: true,
- });
+ refreshRangeQuery(cube) {
+ return this.query.newSubQueryForCube(
+ cube,
+ {
+ rowLimit: null,
+ offset: null,
+ preAggregationQuery: true,
+ }
+ );
}
originalSqlPreAggregationQuery(cube, aggregation) {
diff --git a/packages/cubejs-schema-compiler/src/compiler/CubeEvaluator.ts b/packages/cubejs-schema-compiler/src/compiler/CubeEvaluator.ts
index 9358e2eae1bde..63c6c97b8c10f 100644
--- a/packages/cubejs-schema-compiler/src/compiler/CubeEvaluator.ts
+++ b/packages/cubejs-schema-compiler/src/compiler/CubeEvaluator.ts
@@ -273,6 +273,12 @@ export class CubeEvaluator extends CubeSymbols {
preAggregation.refreshRangeEnd = preAggregation.buildRangeEnd;
delete preAggregation.buildRangeEnd;
}
+
+ if (preAggregation.outputColumnTypes) {
+ preAggregation.outputColumnTypes.forEach(column => {
+ column.name = this.evaluateReferences(cube.name, column.member, { originalSorting: true });
+ });
+ }
}
}
}
diff --git a/packages/cubejs-schema-compiler/src/compiler/CubeValidator.ts b/packages/cubejs-schema-compiler/src/compiler/CubeValidator.ts
index c48825440006e..3e79893ea4f8a 100644
--- a/packages/cubejs-schema-compiler/src/compiler/CubeValidator.ts
+++ b/packages/cubejs-schema-compiler/src/compiler/CubeValidator.ts
@@ -248,6 +248,10 @@ const BasePreAggregationWithoutPartitionGranularity = {
},
readOnly: Joi.boolean().strict(),
streamOffset: Joi.any().valid('earliest', 'latest'),
+ outputColumnTypes: Joi.array().items(Joi.object().keys({
+ member: Joi.func().required(),
+ type: Joi.string().required()
+ })),
};
const BasePreAggregation = {
@@ -390,6 +394,7 @@ const RollUpSchema = condition(
measureReferences: Joi.func(),
dimensionReferences: Joi.func(),
segmentReferences: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
}),
condition(
(s) => defined(s.timeDimension),
@@ -402,6 +407,7 @@ const RollUpSchema = condition(
measures: Joi.func(),
dimensions: Joi.func(),
segments: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
}),
// Rollup with multiple time dimensions
inherit(BasePreAggregation, {
@@ -414,6 +420,7 @@ const RollUpSchema = condition(
measures: Joi.func(),
dimensions: Joi.func(),
segments: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
})
)
),
@@ -422,14 +429,16 @@ const RollUpSchema = condition(
type: Joi.any().valid('rollup').required(),
measureReferences: Joi.func(),
dimensionReferences: Joi.func(),
- segmentReferences: Joi.func()
+ segmentReferences: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
}),
// Rollup without References postfix
inherit(BasePreAggregation, {
type: Joi.any().valid('rollup').required(),
measures: Joi.func(),
dimensions: Joi.func(),
- segments: Joi.func()
+ segments: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
})
)
);
diff --git a/packages/cubejs-schema-compiler/src/compiler/transpilers/CubePropContextTranspiler.ts b/packages/cubejs-schema-compiler/src/compiler/transpilers/CubePropContextTranspiler.ts
index dbfa432727903..f55c71dca29c2 100644
--- a/packages/cubejs-schema-compiler/src/compiler/transpilers/CubePropContextTranspiler.ts
+++ b/packages/cubejs-schema-compiler/src/compiler/transpilers/CubePropContextTranspiler.ts
@@ -19,6 +19,7 @@ export const transpiledFieldsPatterns: Array = [
/^(preAggregations|pre_aggregations)\.[_a-zA-Z][_a-zA-Z0-9]*\.indexes\.[_a-zA-Z][_a-zA-Z0-9]*\.columns$/,
/^(preAggregations|pre_aggregations)\.[_a-zA-Z][_a-zA-Z0-9]*\.(timeDimensionReference|timeDimension|time_dimension|segments|dimensions|measures|rollups|segmentReferences|dimensionReferences|measureReferences|rollupReferences)$/,
/^(preAggregations|pre_aggregations)\.[_a-zA-Z][_a-zA-Z0-9]*\.(timeDimensions|time_dimensions)\.\d+\.dimension$/,
+ /^(preAggregations|pre_aggregations)\.[_a-zA-Z][_a-zA-Z0-9]*\.(outputColumnTypes|output_column_types)\.\d+\.member$/,
/^contextMembers$/,
/^includes$/,
/^excludes$/,
diff --git a/packages/cubejs-schema-compiler/test/integration/postgres/pre-aggregations.test.ts b/packages/cubejs-schema-compiler/test/integration/postgres/pre-aggregations.test.ts
index fbc7bcf8d30f6..eb24efc8ff4b3 100644
--- a/packages/cubejs-schema-compiler/test/integration/postgres/pre-aggregations.test.ts
+++ b/packages/cubejs-schema-compiler/test/integration/postgres/pre-aggregations.test.ts
@@ -490,6 +490,8 @@ describe('PreAggregations', () => {
dimensions: [id, source],
timeDimension: createdAt,
granularity: 'day',
+ build_range_start: { sql: "SELECT DATE_SUB(NOW(), interval '96 hour')" },
+ build_range_end: { sql: "SELECT NOW()" },
partitionGranularity: 'day'
}
}
diff --git a/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts b/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts
index 74fd630b1379f..bd829ce6ff840 100644
--- a/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts
+++ b/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts
@@ -10,7 +10,7 @@ describe('YAMLCompiler', () => {
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
-
+
measures:
- name: weeklyActive
sql: "{CUBE}.user_id"
@@ -62,7 +62,7 @@ cubes:
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
-
+
measures:
- name: weeklyActive
sql: "{CUBE}.user_id"
@@ -83,7 +83,7 @@ cubes:
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01'::timestamptz as timestamp"
-
+
measures:
- name: withFilter
sql: "{CUBE}.user_id"
@@ -126,7 +126,7 @@ cubes:
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
-
+
measures:
- name: weeklyActive
sql: "{user_id}"
@@ -181,7 +181,7 @@ cubes:
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
-
+
measures:
- name: weeklyActive
sql: "{CUBE.user_id}"
@@ -197,7 +197,7 @@ cubes:
- name: time
sql: "{CUBE}.timestamp"
type: time
-
+
preAggregations:
- name: main
measures:
@@ -248,7 +248,7 @@ cubes:
cubes:
- name: active_users
sql: "SELECT * FROM (SELECT 1 as user_id, '2022-01-01'::timestamptz as \\"timestamp\\") t WHERE {FILTER_PARAMS.active_users.time.filter(\\"timestamp\\")} AND {FILTER_PARAMS.active_users.time.filter(lambda a,b : f'timestamp >= {a}::timestamptz AND timestamp <= {b}::timestamptz')}"
-
+
measures:
- name: weekly_active
sql: "{CUBE.user_id}"
@@ -303,13 +303,20 @@ cubes:
const { compiler, joinGraph, cubeEvaluator } = prepareYamlCompiler(`
cubes:
- name: orders
- sql: "SELECT 1 as id, 1 as customer_id, TO_TIMESTAMP('2022-01-01', 'YYYY-MM-DD') as timestamp WHERE {FILTER_PARAMS.orders.time.filter(\\"timestamp\\")}"
-
+ sql: "SELECT *
+ FROM (
+ SELECT
+ 1 as id,
+ 1 as customer_id,
+ TO_TIMESTAMP('2022-01-01', 'YYYY-MM-DD') as timestamp
+ ) sq
+ WHERE {FILTER_PARAMS.orders.time.filter(\\"timestamp\\")}"
+
joins:
- name: customers
sql: "{CUBE}.customer_id = {customers}.id"
relationship: many_to_one
-
+
measures:
- name: count
type: count
@@ -319,11 +326,11 @@ cubes:
sql: "{CUBE}.id"
type: string
primary_key: true
-
+
- name: time
sql: "{CUBE}.timestamp"
type: time
-
+
preAggregations:
- name: main
measures: [orders.count]
@@ -356,11 +363,11 @@ cubes:
measures:
- name: count
type: count
-
-
+
+
- name: customers
sql: "SELECT 1 as id, 'Foo' as name"
-
+
measures:
- name: count
type: count
@@ -370,11 +377,11 @@ cubes:
sql: id
type: string
primary_key: true
-
+
- name: name
sql: "{CUBE}.name"
type: string
-
+
views:
- name: line_items_view
@@ -385,13 +392,13 @@ views:
- join_path: line_items.orders
prefix: true
includes: "*"
- excludes:
+ excludes:
- count
-
+
- join_path: line_items.orders.customers
alias: aliased_customers
prefix: true
- includes:
+ includes:
- name: name
alias: full_name
`);
@@ -425,12 +432,12 @@ views:
cubes:
- name: BaseUsers
sql: "SELECT 1"
-
+
dimensions:
- name: time
sql: "{CUBE}.timestamp"
type: time
-
+
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
extends: BaseUsers
@@ -527,9 +534,9 @@ cubes:
type: string
sql: w_id
primary_key: true
-
+
joins:
-
+
- name: Z
sql: "{CUBE}.z_id = {Z}.z_id"
relationship: many_to_one
@@ -550,9 +557,9 @@ cubes:
type: string
sql: m_id
primary_key: true
-
+
joins:
-
+
- name: V
sql: "{CUBE}.v_id = {V}.v_id"
relationship: many_to_one
@@ -560,11 +567,11 @@ cubes:
- name: W
sql: "{CUBE}.w_id = {W}.w_id"
relationship: many_to_one
-
+
- name: Z
sql: >
SELECT 1 as z_id, 'US' as COUNTRY
-
+
dimensions:
- name: country
sql: "{CUBE}.COUNTRY"
@@ -574,7 +581,7 @@ cubes:
sql: "{CUBE}.z_id"
type: string
primaryKey: true
-
+
- name: V
sql: |
SELECT 1 as v_id, 1 as z_id
@@ -595,7 +602,7 @@ cubes:
views:
- name: m_view
-
+
cubes:
- join_path: M
diff --git a/packages/cubejs-schema-compiler/test/unit/base-query.test.ts b/packages/cubejs-schema-compiler/test/unit/base-query.test.ts
index cf879dd21481c..8e74bb852cda3 100644
--- a/packages/cubejs-schema-compiler/test/unit/base-query.test.ts
+++ b/packages/cubejs-schema-compiler/test/unit/base-query.test.ts
@@ -688,21 +688,30 @@ describe('SQL Generation', () => {
/** @type {Compilers} */
const compilers = prepareYamlCompiler(
createSchemaYaml({
- cubes: [
- {
- name: 'Order',
- sql: 'select * from order where {FILTER_PARAMS.Order.type.filter(\'type\')}',
- measures: [{
- name: 'count',
- type: 'count',
- }],
- dimensions: [{
- name: 'type',
- sql: 'type',
- type: 'string'
- }]
- },
- ]
+ cubes: [{
+ name: 'Order',
+ sql: 'select * from order where {FILTER_PARAMS.Order.type.filter(\'type\')}',
+ measures: [{
+ name: 'count',
+ type: 'count',
+ }],
+ dimensions: [{
+ name: 'type',
+ sql: 'type',
+ type: 'string'
+ }]
+ }],
+ views: [{
+ name: 'orders_view',
+ cubes: [{
+ join_path: 'Order',
+ prefix: true,
+ includes: [
+ 'type',
+ 'count',
+ ]
+ }]
+ }]
})
);
@@ -857,6 +866,23 @@ describe('SQL Generation', () => {
const cubeSQL = query.cubeSql('Order');
expect(cubeSQL).toMatch(/\(\s*\(.*type\s*=\s*\$\d\$.*OR.*type\s*=\s*\$\d\$.*\)\s*AND\s*\(.*type\s*=\s*\$\d\$.*OR.*type\s*=\s*\$\d\$.*\)\s*\)/);
});
+
+ it('propagate filter params from view into cube\'s query', async () => {
+ await compilers.compiler.compile();
+ const query = new BaseQuery(compilers, {
+ measures: ['orders_view.Order_count'],
+ filters: [
+ {
+ member: 'orders_view.Order_type',
+ operator: 'equals',
+ values: ['online'],
+ },
+ ],
+ });
+ const cubeSQL = query.cubeSql('Order');
+ console.log('TEST: ', cubeSQL);
+ expect(cubeSQL).toContain('select * from order where ((type = $0$))');
+ });
});
describe('FILTER_GROUP', () => {
diff --git a/packages/cubejs-server-core/CHANGELOG.md b/packages/cubejs-server-core/CHANGELOG.md
index b57a34031151b..449ba41a60dc7 100644
--- a/packages/cubejs-server-core/CHANGELOG.md
+++ b/packages/cubejs-server-core/CHANGELOG.md
@@ -3,6 +3,28 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Bug Fixes
+
+* **schema-compiler:** propagate FILTER_PARAMS from view to inner cube's SELECT ([#8466](https://github.com/cube-js/cube/issues/8466)) ([c0466fd](https://github.com/cube-js/cube/commit/c0466fde9b7a3834159d7ec592362edcab6d9795))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/server-core
diff --git a/packages/cubejs-server-core/package.json b/packages/cubejs-server-core/package.json
index 15bdce1744f0c..73addcd740f58 100644
--- a/packages/cubejs-server-core/package.json
+++ b/packages/cubejs-server-core/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/server-core",
"description": "Cube.js base component to wire all backend components together",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -29,12 +29,12 @@
"unit": "jest --runInBand --forceExit --coverage dist/test"
},
"dependencies": {
- "@cubejs-backend/api-gateway": "^0.35.79",
- "@cubejs-backend/cloud": "^0.35.67",
+ "@cubejs-backend/api-gateway": "^0.35.81",
+ "@cubejs-backend/cloud": "^0.35.81",
"@cubejs-backend/dotenv": "^9.0.2",
- "@cubejs-backend/native": "^0.35.79",
- "@cubejs-backend/query-orchestrator": "^0.35.78",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/native": "^0.35.81",
+ "@cubejs-backend/query-orchestrator": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@cubejs-backend/templates": "^0.35.67",
"codesandbox-import-utils": "^2.1.12",
@@ -42,7 +42,7 @@
"fs-extra": "^8.1.0",
"is-docker": "^2.1.1",
"joi": "^17.8.3",
- "jsonwebtoken": "^8.4.0",
+ "jsonwebtoken": "^9.0.2",
"lodash.clonedeep": "^4.5.0",
"lru-cache": "^5.1.1",
"moment": "^2.29.1",
@@ -57,14 +57,14 @@
"ws": "^7.5.3"
},
"devDependencies": {
- "@cubejs-backend/cubestore-driver": "^0.35.78",
+ "@cubejs-backend/cubestore-driver": "^0.35.81",
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-client/playground": "^0.35.48",
+ "@cubejs-client/playground": "^0.35.81",
"@types/cross-spawn": "^6.0.2",
"@types/express": "^4.17.9",
"@types/fs-extra": "^9.0.8",
"@types/jest": "^27",
- "@types/jsonwebtoken": "^8.5.0",
+ "@types/jsonwebtoken": "^9.0.2",
"@types/lru-cache": "^5.1.0",
"@types/node": "^16",
"@types/node-fetch": "^2.5.7",
diff --git a/packages/cubejs-server-core/src/core/CompilerApi.js b/packages/cubejs-server-core/src/core/CompilerApi.js
index 489c1816d6cb5..401e50ac5d46c 100644
--- a/packages/cubejs-server-core/src/core/CompilerApi.js
+++ b/packages/cubejs-server-core/src/core/CompilerApi.js
@@ -129,7 +129,7 @@ export class CompilerApi {
async getSqlGenerator(query, dataSource) {
const dbType = await this.getDbType(dataSource);
const compilers = await this.getCompilers({ requestId: query.requestId });
- let sqlGenerator = await this.createQueryByDataSource(compilers, query, dataSource);
+ let sqlGenerator = await this.createQueryByDataSource(compilers, query, dataSource, dbType);
if (!sqlGenerator) {
throw new Error(`Unknown dbType: ${dbType}`);
@@ -142,7 +142,8 @@ export class CompilerApi {
sqlGenerator = await this.createQueryByDataSource(
compilers,
query,
- dataSource
+ dataSource,
+ _dbType
);
if (!sqlGenerator) {
@@ -203,8 +204,10 @@ export class CompilerApi {
return cubeEvaluator.scheduledPreAggregations();
}
- async createQueryByDataSource(compilers, query, dataSource) {
- const dbType = await this.getDbType(dataSource);
+ async createQueryByDataSource(compilers, query, dataSource, dbType) {
+ if (!dbType) {
+ dbType = await this.getDbType(dataSource);
+ }
return this.createQuery(compilers, dbType, this.getDialectClass(dataSource, dbType), query);
}
diff --git a/packages/cubejs-server/CHANGELOG.md b/packages/cubejs-server/CHANGELOG.md
index 9a92b9528d87f..f53c9dc1e24e6 100644
--- a/packages/cubejs-server/CHANGELOG.md
+++ b/packages/cubejs-server/CHANGELOG.md
@@ -3,6 +3,25 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/server
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/server
diff --git a/packages/cubejs-server/package.json b/packages/cubejs-server/package.json
index 3e7e3a67793ad..d964f142ba953 100644
--- a/packages/cubejs-server/package.json
+++ b/packages/cubejs-server/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/server",
"description": "Cube.js all-in-one server",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"types": "index.d.ts",
"repository": {
"type": "git",
@@ -40,10 +40,10 @@
"jest:shapshot": "jest --updateSnapshot test"
},
"dependencies": {
- "@cubejs-backend/cubestore-driver": "^0.35.78",
+ "@cubejs-backend/cubestore-driver": "^0.35.81",
"@cubejs-backend/dotenv": "^9.0.2",
- "@cubejs-backend/native": "^0.35.79",
- "@cubejs-backend/server-core": "^0.35.79",
+ "@cubejs-backend/native": "^0.35.81",
+ "@cubejs-backend/server-core": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@oclif/color": "^1.0.0",
"@oclif/command": "^1.8.13",
@@ -55,14 +55,14 @@
"codesandbox-import-utils": "^2.1.12",
"cors": "^2.8.4",
"express": "^4.17.1",
- "jsonwebtoken": "^8.4.0",
+ "jsonwebtoken": "^9.0.2",
"semver": "^7.3.2",
"source-map-support": "^0.5.19",
"ws": "^7.1.2"
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/query-orchestrator": "^0.35.78",
+ "@cubejs-backend/query-orchestrator": "^0.35.81",
"@oclif/dev-cli": "^1.23.1",
"@types/body-parser": "^1.19.0",
"@types/cors": "^2.8.8",
diff --git a/packages/cubejs-snowflake-driver/CHANGELOG.md b/packages/cubejs-snowflake-driver/CHANGELOG.md
index 1164de7a6f989..52e15a1905816 100644
--- a/packages/cubejs-snowflake-driver/CHANGELOG.md
+++ b/packages/cubejs-snowflake-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/snowflake-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/snowflake-driver
diff --git a/packages/cubejs-snowflake-driver/package.json b/packages/cubejs-snowflake-driver/package.json
index 550a96c0dec80..b4981f854ce00 100644
--- a/packages/cubejs-snowflake-driver/package.json
+++ b/packages/cubejs-snowflake-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/snowflake-driver",
"description": "Cube.js Snowflake database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -25,7 +25,7 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"@google-cloud/storage": "^5.8.5",
"date-fns-timezone": "^0.1.4",
diff --git a/packages/cubejs-sqlite-driver/CHANGELOG.md b/packages/cubejs-sqlite-driver/CHANGELOG.md
index 74dd0ebeb5d21..bfad684f5032f 100644
--- a/packages/cubejs-sqlite-driver/CHANGELOG.md
+++ b/packages/cubejs-sqlite-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/sqlite-driver
+
+
+
+
+
## [0.35.67](https://github.com/cube-js/cube/compare/v0.35.66...v0.35.67) (2024-08-07)
**Note:** Version bump only for package @cubejs-backend/sqlite-driver
diff --git a/packages/cubejs-sqlite-driver/package.json b/packages/cubejs-sqlite-driver/package.json
index 15c0d9b49053d..e96ad820d922d 100644
--- a/packages/cubejs-sqlite-driver/package.json
+++ b/packages/cubejs-sqlite-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/sqlite-driver",
"description": "Cube.js Sqlite database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.67",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -17,7 +17,7 @@
"lint": "eslint **/*.js"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
+ "@cubejs-backend/base-driver": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"sqlite3": "^5.1.7"
},
diff --git a/packages/cubejs-testing-drivers/CHANGELOG.md b/packages/cubejs-testing-drivers/CHANGELOG.md
index 1cdbaa9bc6738..6d63756f01b28 100644
--- a/packages/cubejs-testing-drivers/CHANGELOG.md
+++ b/packages/cubejs-testing-drivers/CHANGELOG.md
@@ -3,6 +3,25 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/testing-drivers
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/testing-drivers
diff --git a/packages/cubejs-testing-drivers/package.json b/packages/cubejs-testing-drivers/package.json
index af7eba46bd814..28b4c2ec9be12 100644
--- a/packages/cubejs-testing-drivers/package.json
+++ b/packages/cubejs-testing-drivers/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/testing-drivers",
- "version": "0.35.79",
+ "version": "0.35.81",
"description": "Cube.js drivers test suite",
"author": "Cube Dev, Inc.",
"license": "MIT",
@@ -46,22 +46,22 @@
"dist/src"
],
"dependencies": {
- "@cubejs-backend/athena-driver": "^0.35.79",
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/bigquery-driver": "^0.35.79",
- "@cubejs-backend/clickhouse-driver": "^0.35.79",
- "@cubejs-backend/cubestore-driver": "^0.35.78",
- "@cubejs-backend/databricks-jdbc-driver": "^0.35.79",
+ "@cubejs-backend/athena-driver": "^0.35.81",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/bigquery-driver": "^0.35.81",
+ "@cubejs-backend/clickhouse-driver": "^0.35.81",
+ "@cubejs-backend/cubestore-driver": "^0.35.81",
+ "@cubejs-backend/databricks-jdbc-driver": "^0.35.81",
"@cubejs-backend/dotenv": "^9.0.2",
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/mssql-driver": "^0.35.67",
- "@cubejs-backend/mysql-driver": "^0.35.79",
- "@cubejs-backend/postgres-driver": "^0.35.79",
- "@cubejs-backend/query-orchestrator": "^0.35.78",
- "@cubejs-backend/server-core": "^0.35.79",
+ "@cubejs-backend/mssql-driver": "^0.35.81",
+ "@cubejs-backend/mysql-driver": "^0.35.81",
+ "@cubejs-backend/postgres-driver": "^0.35.81",
+ "@cubejs-backend/query-orchestrator": "^0.35.81",
+ "@cubejs-backend/server-core": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
- "@cubejs-backend/snowflake-driver": "^0.35.67",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/snowflake-driver": "^0.35.81",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"@cubejs-client/core": "^0.35.23",
"@cubejs-client/ws-transport": "^0.35.23",
"@jest/globals": "^27",
@@ -70,7 +70,7 @@
"dotenv": "^16.0.3",
"fs-extra": "^11.1.1",
"jest": "^27",
- "jsonwebtoken": "^8.5.1",
+ "jsonwebtoken": "^9.0.2",
"pg": "^8.7.3",
"ramda": "^0.28.0",
"testcontainers": "^10.10.4",
diff --git a/packages/cubejs-testing-shared/CHANGELOG.md b/packages/cubejs-testing-shared/CHANGELOG.md
index bf5413bc5a079..6fc11ebc252f5 100644
--- a/packages/cubejs-testing-shared/CHANGELOG.md
+++ b/packages/cubejs-testing-shared/CHANGELOG.md
@@ -3,6 +3,25 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Features
+
+* ksql and rollup pre-aggregations ([#8619](https://github.com/cube-js/cube/issues/8619)) ([cdfbd1e](https://github.com/cube-js/cube/commit/cdfbd1e21ffcf111e40c525f8a391cc0dcee3c11))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/testing-shared
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/testing-shared
diff --git a/packages/cubejs-testing-shared/package.json b/packages/cubejs-testing-shared/package.json
index 1e43f3e2286cf..857333bb7dc01 100644
--- a/packages/cubejs-testing-shared/package.json
+++ b/packages/cubejs-testing-shared/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/testing-shared",
- "version": "0.35.79",
+ "version": "0.35.81",
"description": "Cube.js Testing Helpers",
"author": "Cube Dev, Inc.",
"license": "Apache-2.0",
@@ -21,12 +21,13 @@
],
"dependencies": {
"@cubejs-backend/dotenv": "^9.0.2",
- "@cubejs-backend/query-orchestrator": "^0.35.78",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/query-orchestrator": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
+ "@testcontainers/kafka": "~10.13.0",
"dedent": "^0.7.0",
"node-fetch": "^2.6.7",
- "testcontainers": "^10.10.4"
+ "testcontainers": "^10.13.0"
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
diff --git a/packages/cubejs-testing-shared/src/db-container-runners/db-runner.abstract.ts b/packages/cubejs-testing-shared/src/db-container-runners/db-runner.abstract.ts
index 4c30b36484083..9ef781b8db039 100644
--- a/packages/cubejs-testing-shared/src/db-container-runners/db-runner.abstract.ts
+++ b/packages/cubejs-testing-shared/src/db-container-runners/db-runner.abstract.ts
@@ -1,3 +1,5 @@
+import { StartedNetwork } from 'testcontainers';
+
export interface ContainerVolumeDefinition {
source: string,
target: string,
@@ -5,6 +7,7 @@ export interface ContainerVolumeDefinition {
}
export interface DBRunnerContainerOptions {
+ network?: StartedNetwork,
volumes?: ContainerVolumeDefinition[],
version?: string,
}
diff --git a/packages/cubejs-testing-shared/src/db-container-runners/index.ts b/packages/cubejs-testing-shared/src/db-container-runners/index.ts
index 5a9194d278e2c..0e2e5a6265c24 100644
--- a/packages/cubejs-testing-shared/src/db-container-runners/index.ts
+++ b/packages/cubejs-testing-shared/src/db-container-runners/index.ts
@@ -9,3 +9,5 @@ export * from './prestodb';
export * from './mssql';
export * from './trino';
export * from './oracle';
+export * from './kafka';
+export * from './ksql';
diff --git a/packages/cubejs-testing-shared/src/db-container-runners/kafka.ts b/packages/cubejs-testing-shared/src/db-container-runners/kafka.ts
new file mode 100644
index 0000000000000..8ee8d8558a393
--- /dev/null
+++ b/packages/cubejs-testing-shared/src/db-container-runners/kafka.ts
@@ -0,0 +1,31 @@
+import { KafkaContainer } from '@testcontainers/kafka';
+import { DbRunnerAbstract, DBRunnerContainerOptions } from './db-runner.abstract';
+
+export class KafkaDBRunner extends DbRunnerAbstract {
+ public static startContainer(options: DBRunnerContainerOptions) {
+ const version = process.env.TEST_KAFKA_VERSION || options.version || '7.6.0';
+
+ const container = new KafkaContainer(`confluentinc/cp-kafka:${version}`)
+ .withKraft()
+ .withEnvironment({
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '1',
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: '1',
+ KAFKA_NUM_PARTITIONS: '1',
+ KAFKA_DEFAULT_REPLICATION_FACTOR: '1',
+ })
+ .withExposedPorts(9093)
+ .withStartupTimeout(10 * 1000);
+
+ if (options.network) {
+ container.withNetwork(options.network);
+ container.withNetworkAliases('kafka');
+ }
+
+ if (options.volumes) {
+ const binds = options.volumes.map(v => ({ source: v.source, target: v.target, mode: v.bindMode }));
+ container.withBindMounts(binds);
+ }
+
+ return container.start();
+ }
+}
diff --git a/packages/cubejs-testing-shared/src/db-container-runners/ksql.ts b/packages/cubejs-testing-shared/src/db-container-runners/ksql.ts
new file mode 100644
index 0000000000000..70f0c083d7a5a
--- /dev/null
+++ b/packages/cubejs-testing-shared/src/db-container-runners/ksql.ts
@@ -0,0 +1,82 @@
+import fetch from 'node-fetch';
+import { GenericContainer, StartedTestContainer } from 'testcontainers';
+import { pausePromise } from '@cubejs-backend/shared';
+import { DbRunnerAbstract, DBRunnerContainerOptions } from './db-runner.abstract';
+
+export class KsqlDBRunner extends DbRunnerAbstract {
+ public static startContainer(options: DBRunnerContainerOptions) {
+ const version = process.env.TEST_KSQL_VERSION || options.version || '7.6.0';
+
+ const bootstrapServers = 'kafka:9092';
+ const container = new GenericContainer(`confluentinc/cp-ksqldb-server:${version}`)
+ .withEnvironment({
+ KSQL_BOOTSTRAP_SERVERS: bootstrapServers,
+ KSQL_KSQL_STREAMS_BOOTSTRAP_SERVERS: bootstrapServers,
+ KSQL_KSQL_SERVICE_ID: 'service-id',
+ })
+ .withExposedPorts(8088)
+ .withStartupTimeout(30 * 1000);
+
+ if (options.network) {
+ container.withNetwork(options.network);
+ container.withNetworkAliases('ksql');
+ }
+
+ if (options.volumes) {
+ const binds = options.volumes.map(v => ({ source: v.source, target: v.target, mode: v.bindMode }));
+ container.withBindMounts(binds);
+ }
+
+ return container.start();
+ }
+
+ public static async loadData(db: StartedTestContainer) {
+ const ksqlUrl = `http://${db.getHost()}:${db.getMappedPort(8088)}`;
+
+ let attempts = 0;
+ while (attempts < 10) {
+ const res = await fetch(`${ksqlUrl}/ksql`, {
+ method: 'POST',
+ headers: { Accept: 'application/json' },
+ body: JSON.stringify({
+ ksql: 'LIST STREAMS;',
+ streamsProperties: {}
+ })
+ });
+
+ const body = await res.json();
+ if (body.message !== 'KSQL is not yet ready to serve requests.') {
+ console.log('KSQL ready');
+ break;
+ }
+ console.log('KSQL not ready yet');
+ attempts++;
+
+ await pausePromise(300);
+ }
+
+ const resCreateStream = await fetch(`${ksqlUrl}/ksql`, {
+ method: 'POST',
+ headers: { Accept: 'application/json' },
+ body: JSON.stringify({
+ ksql: "CREATE OR REPLACE STREAM REQUESTS (ID STRING, TIMESTAMP TIMESTAMP, TENANT_ID INTEGER, REQUEST_ID STRING) WITH (KAFKA_TOPIC = 'REQUESTS', KEY_FORMAT = 'JSON', PARTITIONS = 1, REPLICAS = 1, VALUE_FORMAT = 'JSON');",
+ streamsProperties: {}
+ })
+ });
+
+ console.log('KSQL CREATE STREAM', await resCreateStream.json());
+
+ const yesterday = new Date(Date.now() - 24 * 60 * 60 * 1000).toJSON();
+ const today = new Date(Date.now() - 1000).toJSON();
+ const resInsertYesterday = await fetch(`${ksqlUrl}/ksql`, {
+ method: 'POST',
+ headers: { Accept: 'application/json' },
+ body: JSON.stringify({
+ ksql: `INSERT INTO REQUESTS VALUES ('1', '${yesterday}', 1, 'req-stream-1');INSERT INTO REQUESTS VALUES ('1', '${today}', 1, 'req-stream-2');`,
+ streamsProperties: {}
+ })
+ });
+
+ console.log('KSQL INSERT', await resInsertYesterday.json());
+ }
+}
diff --git a/packages/cubejs-testing/CHANGELOG.md b/packages/cubejs-testing/CHANGELOG.md
index 9bc1e584d8833..ea18bbfb6298b 100644
--- a/packages/cubejs-testing/CHANGELOG.md
+++ b/packages/cubejs-testing/CHANGELOG.md
@@ -3,6 +3,30 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* Updated jsonwebtoken in all packages ([#8282](https://github.com/cube-js/cube/issues/8282)) Thanks [@jlloyd-widen](https://github.com/jlloyd-widen) ! ([ca7c292](https://github.com/cube-js/cube/commit/ca7c292e0122be50ac7adc9b9d4910623d19f840))
+
+
+### Features
+
+* ksql and rollup pre-aggregations ([#8619](https://github.com/cube-js/cube/issues/8619)) ([cdfbd1e](https://github.com/cube-js/cube/commit/cdfbd1e21ffcf111e40c525f8a391cc0dcee3c11))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/testing
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/testing
diff --git a/packages/cubejs-testing/birdbox-fixtures/lambda/cube.js b/packages/cubejs-testing/birdbox-fixtures/lambda/cube.js
index 8044afc431111..813926a2d547c 100644
--- a/packages/cubejs-testing/birdbox-fixtures/lambda/cube.js
+++ b/packages/cubejs-testing/birdbox-fixtures/lambda/cube.js
@@ -1,3 +1,6 @@
+const PostgresDriver = require("@cubejs-backend/postgres-driver");
+const KsqlDriver = require("@cubejs-backend/ksql-driver");
+
module.exports = {
orchestratorOptions: {
preAggregationsOptions: {
@@ -7,4 +10,22 @@ module.exports = {
contextToApiScopes: async () => new Promise((resolve) => {
resolve(['graphql', 'meta', 'data', 'jobs']);
}),
+ dbType: ({ dataSource }) => {
+ if (dataSource === 'default') {
+ return 'postgres';
+ }
+
+ return dataSource || 'postgres';
+ },
+ driverFactory: async ({ dataSource }) => {
+ if (dataSource === "ksql") {
+ return new KsqlDriver({
+ url: process.env.KSQL_URL,
+ kafkaHost: process.env.KSQL_KAFKA_HOST,
+ kafkaUseSsl: false,
+ });
+ }
+
+ return new PostgresDriver();
+ }
};
diff --git a/packages/cubejs-testing/birdbox-fixtures/lambda/schema/Requests.js b/packages/cubejs-testing/birdbox-fixtures/lambda/schema/Requests.js
new file mode 100644
index 0000000000000..bb88ba71c9d6e
--- /dev/null
+++ b/packages/cubejs-testing/birdbox-fixtures/lambda/schema/Requests.js
@@ -0,0 +1,96 @@
+cube("Requests", {
+ sql: `select 1 as tenant_id, 1 as deployment_id, 'req-1' as request_id, (NOW() - INTERVAL '1 day')::timestamp as timestamp
+ UNION ALL
+ select 2 as tenant_id, 1 as deployment_id, 'req-2' as request_id, (NOW() - INTERVAL '2 day')::timestamp as timestamp
+ `,
+ data_source: "postgres",
+ measures: {
+ count: {
+ type: "count",
+ },
+ },
+ dimensions: {
+ tenant_id: {
+ sql: `tenant_id`,
+ type: "number",
+ primaryKey: true,
+ },
+ request_id: {
+ sql: `request_id`,
+ type: "string",
+ primaryKey: true,
+ },
+ timestamp: {
+ sql: `timestamp`,
+ type: "time",
+ },
+ },
+ pre_aggregations: {
+ batch_streaming_lambda: {
+ type: `rollup_lambda`,
+ rollups: [batch, RequestsStream.stream],
+ },
+
+ batch: {
+ external: true,
+ type: "rollup",
+ measures: [count],
+ dimensions: [tenant_id, request_id, timestamp],
+ granularity: "day",
+ time_dimension: Requests.timestamp,
+ partition_granularity: "day",
+ build_range_start: { sql: "SELECT NOW() - INTERVAL '10 day'" },
+ build_range_end: { sql: "SELECT NOW()" },
+ },
+ },
+});
+
+cube("RequestsStream", {
+ dataSource: "ksql",
+
+ sql: `SELECT * FROM REQUESTS`,
+
+ measures: {
+ count: {
+ type: "count",
+ },
+ },
+ dimensions: {
+ tenant_id: {
+ sql: `TENANT_ID`,
+ type: "number",
+ primaryKey: true,
+ },
+ request_id: {
+ sql: `REQUEST_ID`,
+ type: "string",
+ primaryKey: true,
+ },
+ timestamp: {
+ sql: `TIMESTAMP`,
+ type: "time",
+ },
+ },
+ preAggregations: {
+ stream: {
+ streamOffset: "earliest",
+ readOnly: true,
+ external: true,
+ type: `rollup`,
+ measures: [count],
+ dimensions: [tenant_id, request_id, timestamp],
+ time_dimension: RequestsStream.timestamp,
+ granularity: "day",
+ unique_key_columns: ["tenant_id", "request_id"],
+ partition_granularity: "day",
+ build_range_start: { sql: "SELECT DATE_SUB(NOW(), interval '96 hour')" },
+ build_range_end: { sql: "SELECT NOW()" },
+ outputColumnTypes: [
+ { member: tenant_id, type: "int" },
+ { member: request_id, type: "text" },
+ { member: timestamp, type: "timestamp" },
+ { member: count, type: "int" },
+ ],
+ },
+ },
+});
diff --git a/packages/cubejs-testing/package.json b/packages/cubejs-testing/package.json
index 1da510b58e11a..33846aa62147c 100644
--- a/packages/cubejs-testing/package.json
+++ b/packages/cubejs-testing/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/testing",
- "version": "0.35.79",
+ "version": "0.35.81",
"description": "Cube.js e2e tests",
"author": "Cube Dev, Inc.",
"license": "Apache-2.0",
@@ -89,20 +89,21 @@
"birdbox-fixtures"
],
"dependencies": {
- "@cubejs-backend/cubestore-driver": "^0.35.78",
+ "@cubejs-backend/cubestore-driver": "^0.35.81",
"@cubejs-backend/dotenv": "^9.0.2",
- "@cubejs-backend/postgres-driver": "^0.35.79",
- "@cubejs-backend/query-orchestrator": "^0.35.78",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/ksql-driver": "^0.35.81",
+ "@cubejs-backend/postgres-driver": "^0.35.81",
+ "@cubejs-backend/query-orchestrator": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.81",
"@cubejs-client/ws-transport": "^0.35.23",
"dedent": "^0.7.0",
"fs-extra": "^8.1.0",
"http-proxy": "^1.18.1",
"node-fetch": "^2.6.1",
"ramda": "^0.27.2",
- "testcontainers": "^10.10.4",
+ "testcontainers": "^10.13.0",
"yargs": "^17.3.1"
},
"devDependencies": {
@@ -122,7 +123,7 @@
"eslint-plugin-cypress": "^2.12.1",
"globby": "^11.0.4",
"jest": "^27",
- "jsonwebtoken": "^8.5.1",
+ "jsonwebtoken": "^9.0.2",
"jwt-decode": "^3.1.2",
"pg": "^8.7.3",
"typescript": "~5.2.2",
diff --git a/packages/cubejs-testing/src/birdbox.ts b/packages/cubejs-testing/src/birdbox.ts
index 17954fa961d13..b357abab4dd18 100644
--- a/packages/cubejs-testing/src/birdbox.ts
+++ b/packages/cubejs-testing/src/birdbox.ts
@@ -363,7 +363,7 @@ export async function startBirdBoxFromContainer(
proxyServer.on('error', async (err, req, res: any) => {
process.stderr.write(`[Proxy Server] error: ${err}\n`);
- if (!res.headersSent) {
+ if ('headersSent' in res && !res.headersSent) {
res.writeHead(500, { 'content-type': 'application/json' });
}
diff --git a/packages/cubejs-testing/test/smoke-lambda.test.ts b/packages/cubejs-testing/test/smoke-lambda.test.ts
index 3687ab25e5194..05369fec8197e 100644
--- a/packages/cubejs-testing/test/smoke-lambda.test.ts
+++ b/packages/cubejs-testing/test/smoke-lambda.test.ts
@@ -1,8 +1,8 @@
import R from 'ramda';
-import { StartedTestContainer } from 'testcontainers';
+import { StartedTestContainer, Network, StartedNetwork } from 'testcontainers';
import { pausePromise } from '@cubejs-backend/shared';
import fetch from 'node-fetch';
-import { PostgresDBRunner } from '@cubejs-backend/testing-shared';
+import { PostgresDBRunner, KafkaDBRunner, KsqlDBRunner } from '@cubejs-backend/testing-shared';
import cubejs, { CubeApi, Query } from '@cubejs-client/core';
// eslint-disable-next-line import/no-extraneous-dependencies
import { afterAll, beforeAll, expect, jest } from '@jest/globals';
@@ -30,6 +30,9 @@ describe('lambda', () => {
jest.setTimeout(60 * 5 * 1000);
let db: StartedTestContainer;
+ let network: StartedNetwork;
+ let dbKafka: StartedTestContainer;
+ let dbKsql: StartedTestContainer;
let birdbox: BirdBox;
let client: CubeApi;
let postgres: any;
@@ -38,6 +41,13 @@ describe('lambda', () => {
beforeAll(async () => {
db = await PostgresDBRunner.startContainer({});
await PostgresDBRunner.loadEcom(db);
+
+ network = await new Network().start();
+ dbKafka = await KafkaDBRunner.startContainer({ network });
+ dbKsql = await KsqlDBRunner.startContainer({ network });
+
+ await KsqlDBRunner.loadData(dbKsql);
+
birdbox = await getBirdbox(
'postgres',
{
@@ -50,6 +60,8 @@ describe('lambda', () => {
CUBEJS_DB_PASS: 'test',
CUBEJS_ROLLUP_ONLY: 'true',
CUBEJS_REFRESH_WORKER: 'false',
+ KSQL_URL: `http://${dbKsql.getHost()}:${dbKsql.getMappedPort(8088)}`,
+ KSQL_KAFKA_HOST: `${dbKafka.getHost()}:${dbKafka.getMappedPort(9093)}`,
},
{
schemaDir: 'lambda/schema',
@@ -79,9 +91,41 @@ describe('lambda', () => {
afterAll(async () => {
await birdbox.stop();
await db.stop();
+ await dbKafka.stop();
+ await dbKsql.stop();
+ await network.stop();
await cubestore.release();
}, JEST_AFTER_ALL_DEFAULT_TIMEOUT);
+ test('Query lambda with ksql ', async () => {
+ const query: Query = {
+ measures: ['Requests.count'],
+ dimensions: ['Requests.tenant_id', 'Requests.request_id'],
+ timeDimensions: [
+ {
+ dimension: 'Requests.timestamp',
+ granularity: 'day'
+ }
+ ],
+ };
+ // First call to trigger the pre-aggregation build
+ await client.load(query);
+ // We have to wait for cubestore to consume the data from Kafka. There is no way to know when it's done right now.
+ await pausePromise(5000);
+
+ const response = await client.load(query);
+
+ // @ts-ignore
+ expect(response.loadResponse.results[0].data.map(i => i['Requests.request_id'])).toEqual([
+ 'req-2',
+ 'req-1',
+ 'req-stream-2'
+ ]);
+
+ // @ts-ignore
+ expect(response.loadResponse.results[0].data.length).toEqual(3);
+ });
+
test('query', async () => {
const query: Query = {
measures: ['Orders.count'],
diff --git a/packages/cubejs-trino-driver/CHANGELOG.md b/packages/cubejs-trino-driver/CHANGELOG.md
index 137635b32a63a..850d14914e8da 100644
--- a/packages/cubejs-trino-driver/CHANGELOG.md
+++ b/packages/cubejs-trino-driver/CHANGELOG.md
@@ -3,6 +3,22 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+**Note:** Version bump only for package @cubejs-backend/trino-driver
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/trino-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/trino-driver
diff --git a/packages/cubejs-trino-driver/package.json b/packages/cubejs-trino-driver/package.json
index a78b95bcd71c8..306b8432fb015 100644
--- a/packages/cubejs-trino-driver/package.json
+++ b/packages/cubejs-trino-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/trino-driver",
"description": "Cube.js Trino database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.81",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -25,9 +25,9 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/prestodb-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/base-driver": "^0.35.81",
+ "@cubejs-backend/prestodb-driver": "^0.35.81",
+ "@cubejs-backend/schema-compiler": "^0.35.81",
"@cubejs-backend/shared": "^0.35.67",
"presto-client": "^0.12.2",
"ramda": "^0.27.0",
diff --git a/rust/cubenativeutils/Cargo.lock b/rust/cubenativeutils/Cargo.lock
index 8a7ca561e9dbd..b91bfac98f144 100644
--- a/rust/cubenativeutils/Cargo.lock
+++ b/rust/cubenativeutils/Cargo.lock
@@ -629,7 +629,7 @@ dependencies = [
[[package]]
name = "cube-ext"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"chrono",
@@ -706,7 +706,6 @@ dependencies = [
"futures-util",
"hashbrown 0.14.5",
"itertools",
- "lazy_static",
"log",
"lru",
"minijinja",
@@ -718,7 +717,6 @@ dependencies = [
"regex",
"rust_decimal",
"serde",
- "serde_derive",
"serde_json",
"sha1_smol",
"sha2",
@@ -736,7 +734,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -769,7 +767,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"ordered-float 2.10.1",
@@ -780,7 +778,7 @@ dependencies = [
[[package]]
name = "datafusion-data-access"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"async-trait",
"chrono",
@@ -793,7 +791,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -804,7 +802,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
diff --git a/rust/cubesql/CHANGELOG.md b/rust/cubesql/CHANGELOG.md
index d316a9473e7bb..fc80190997a3d 100644
--- a/rust/cubesql/CHANGELOG.md
+++ b/rust/cubesql/CHANGELOG.md
@@ -3,6 +3,29 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Bug Fixes
+
+* **cubesql:** Use load meta with user change for SQL generation calls ([#8693](https://github.com/cube-js/cube/issues/8693)) ([0f7bb3d](https://github.com/cube-js/cube/commit/0f7bb3d3a96447a69835e3c591ebaf67592c3eed))
+
+
+
+
+
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Features
+
+* **cubesql:** Fill pg_description table with cube and members descriptions ([#8618](https://github.com/cube-js/cube/issues/8618)) ([2288c18](https://github.com/cube-js/cube/commit/2288c18bf30d1f3a3299b235fe9b4405d2cb7463))
+* **cubesql:** Support join with type coercion ([#8608](https://github.com/cube-js/cube/issues/8608)) ([46b3a36](https://github.com/cube-js/cube/commit/46b3a36936f0f00805144714f0dd87a3c50a5e0a))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/cubesql
diff --git a/rust/cubesql/Cargo.lock b/rust/cubesql/Cargo.lock
index 447302de31405..a832246ac7504 100644
--- a/rust/cubesql/Cargo.lock
+++ b/rust/cubesql/Cargo.lock
@@ -721,7 +721,7 @@ dependencies = [
[[package]]
name = "cube-ext"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"chrono",
@@ -775,7 +775,6 @@ dependencies = [
"hashbrown 0.14.3",
"insta",
"itertools",
- "lazy_static",
"log",
"lru",
"minijinja",
@@ -852,7 +851,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -885,7 +884,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"ordered-float 2.10.0",
@@ -896,7 +895,7 @@ dependencies = [
[[package]]
name = "datafusion-data-access"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"async-trait",
"chrono",
@@ -909,7 +908,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -920,7 +919,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
diff --git a/rust/cubesql/cubesql/Cargo.toml b/rust/cubesql/cubesql/Cargo.toml
index 2e70b15dac9dc..e88ffd9fc6346 100644
--- a/rust/cubesql/cubesql/Cargo.toml
+++ b/rust/cubesql/cubesql/Cargo.toml
@@ -10,13 +10,12 @@ homepage = "https://cube.dev"
[dependencies]
arc-swap = "1"
-datafusion = { git = 'https://github.com/cube-js/arrow-datafusion.git', rev = "400fa0d889a8a38ca69f36d5750dfb572fc6018e", default-features = false, features = ["regex_expressions", "unicode_expressions"] }
+datafusion = { git = 'https://github.com/cube-js/arrow-datafusion.git', rev = "dcf3e4aa26fd112043ef26fa4a78db5dbd443c86", default-features = false, features = ["regex_expressions", "unicode_expressions"] }
anyhow = "1.0"
thiserror = "1.0.50"
cubeclient = { path = "../cubeclient" }
pg-srv = { path = "../pg-srv" }
sqlparser = { git = 'https://github.com/cube-js/sqlparser-rs.git', rev = "6a54d27d3b75a04b9f9cbe309a83078aa54b32fd" }
-lazy_static = "1.4.0"
base64 = "0.13.0"
tokio = { version = "^1.35", features = ["full", "rt", "tracing"] }
serde = { version = "^1.0", features = ["derive"] }
diff --git a/rust/cubesql/cubesql/src/compile/engine/context_postgresql.rs b/rust/cubesql/cubesql/src/compile/engine/context_postgresql.rs
index 68dbf97893dd9..2b1b227553d8c 100644
--- a/rust/cubesql/cubesql/src/compile/engine/context_postgresql.rs
+++ b/rust/cubesql/cubesql/src/compile/engine/context_postgresql.rs
@@ -327,7 +327,11 @@ impl DatabaseProtocol {
context.session_state.all_variables(),
)))
}
- "pg_description" => return Some(Arc::new(PgCatalogDescriptionProvider::new())),
+ "pg_description" => {
+ return Some(Arc::new(PgCatalogDescriptionProvider::new(
+ &context.meta.tables,
+ )))
+ }
"pg_constraint" => return Some(Arc::new(PgCatalogConstraintProvider::new())),
"pg_depend" => return Some(Arc::new(PgCatalogDependProvider::new())),
"pg_am" => return Some(Arc::new(PgCatalogAmProvider::new())),
diff --git a/rust/cubesql/cubesql/src/compile/engine/df/wrapper.rs b/rust/cubesql/cubesql/src/compile/engine/df/wrapper.rs
index 869528f7602cd..73123c38d7ced 100644
--- a/rust/cubesql/cubesql/src/compile/engine/df/wrapper.rs
+++ b/rust/cubesql/cubesql/src/compile/engine/df/wrapper.rs
@@ -32,8 +32,16 @@ use itertools::Itertools;
use regex::{Captures, Regex};
use serde::{Deserialize, Serialize};
use std::{
- any::Any, cmp::min, collections::HashMap, convert::TryInto, fmt, future::Future, iter,
- pin::Pin, result, sync::Arc,
+ any::Any,
+ cmp::min,
+ collections::HashMap,
+ convert::TryInto,
+ fmt,
+ future::Future,
+ iter,
+ pin::Pin,
+ result,
+ sync::{Arc, LazyLock},
};
#[derive(Debug, Clone, Deserialize)]
@@ -160,12 +168,12 @@ impl SqlQuery {
}
pub fn finalize_query(&mut self, sql_templates: Arc) -> Result<()> {
+ static REGEX: LazyLock = LazyLock::new(|| Regex::new(r"\$(\d+)\$").unwrap());
+
let mut params = Vec::new();
let mut rendered_params = HashMap::new();
- let regex = Regex::new(r"\$(\d+)\$")
- .map_err(|e| DataFusionError::Execution(format!("Can't parse regex: {}", e)))?;
let mut res = Ok(());
- let replaced_sql = regex.replace_all(self.sql.as_str(), |c: &Captures<'_>| {
+ let replaced_sql = REGEX.replace_all(self.sql.as_str(), |c: &Captures<'_>| {
let param = c.get(1).map(|x| x.as_str());
match self.render_param(sql_templates.clone(), param, &rendered_params, params.len()) {
Ok((param_index, param, push_param)) => {
@@ -260,9 +268,7 @@ pub struct SqlGenerationResult {
pub request: TransportLoadRequestQuery,
}
-lazy_static! {
- static ref DATE_PART_REGEX: Regex = Regex::new("^[A-Za-z_ ]+$").unwrap();
-}
+static DATE_PART_REGEX: LazyLock = LazyLock::new(|| Regex::new("^[A-Za-z_ ]+$").unwrap());
macro_rules! generate_sql_for_timestamp {
(@generic $value:ident, $value_block:expr, $sql_generator:expr, $sql_query:expr) => {
@@ -459,12 +465,14 @@ impl CubeScanWrapperNode {
node
)));
}
+ let mut meta_with_user = load_request_meta.as_ref().clone();
+ meta_with_user.set_change_user(node.options.change_user.clone());
let sql = transport
.sql(
node.span_id.clone(),
node.request.clone(),
node.auth_context,
- load_request_meta.as_ref().clone(),
+ meta_with_user,
Some(
node.member_fields
.iter()
@@ -837,12 +845,16 @@ impl CubeScanWrapperNode {
}
// TODO time dimensions, filters, segments
+ let mut meta_with_user = load_request_meta.as_ref().clone();
+ meta_with_user.set_change_user(
+ ungrouped_scan_node.options.change_user.clone(),
+ );
let sql_response = transport
.sql(
ungrouped_scan_node.span_id.clone(),
load_request.clone(),
ungrouped_scan_node.auth_context.clone(),
- load_request_meta.as_ref().clone(),
+ meta_with_user,
// TODO use aliases or push everything through names?
None,
Some(sql.values.clone()),
@@ -950,8 +962,9 @@ impl CubeScanWrapperNode {
ungrouped_scan_node: Option>,
subqueries: Arc>,
) -> result::Result<(Vec, SqlQuery), CubeError> {
- let non_id_regex = Regex::new(r"[^a-zA-Z0-9_]")
- .map_err(|e| CubeError::internal(format!("Can't parse regex: {}", e)))?;
+ static NON_ID_REGEX: LazyLock =
+ LazyLock::new(|| Regex::new(r"[^a-zA-Z0-9_]").unwrap());
+
let mut aliased_columns = Vec::new();
for original_expr in exprs {
let expr = if let Some(column_remapping) = column_remapping.as_ref() {
@@ -1001,7 +1014,7 @@ impl CubeScanWrapperNode {
let alias = if can_rename_columns {
let alias = expr_name(&expr, &schema)?;
- let mut truncated_alias = non_id_regex
+ let mut truncated_alias = NON_ID_REGEX
.replace_all(&alias, "_")
.trim_start_matches("_")
.to_lowercase();
diff --git a/rust/cubesql/cubesql/src/compile/engine/information_schema/postgres/pg_description.rs b/rust/cubesql/cubesql/src/compile/engine/information_schema/postgres/pg_description.rs
index bc9ecebdfed90..b3de6e0d25779 100644
--- a/rust/cubesql/cubesql/src/compile/engine/information_schema/postgres/pg_description.rs
+++ b/rust/cubesql/cubesql/src/compile/engine/information_schema/postgres/pg_description.rs
@@ -1,4 +1,4 @@
-use std::{any::Any, sync::Arc};
+use std::{any::Any, convert::TryFrom, sync::Arc};
use async_trait::async_trait;
@@ -14,10 +14,19 @@ use datafusion::{
physical_plan::{memory::MemoryExec, ExecutionPlan},
};
+use crate::{
+ compile::engine::information_schema::postgres::PG_CLASS_CLASS_OID, transport::CubeMetaTable,
+};
+
+/// See https://www.postgresql.org/docs/16/catalog-pg-description.html
struct PgCatalogDescriptionBuilder {
+ /// The OID of the object this description pertains to
objoid: UInt32Builder,
+ /// The OID of the system catalog this object appears in
classoid: UInt32Builder,
+ /// For a comment on a table column, this is the column number (the objoid and classoid refer to the table itself). For all other object types, this column is zero.
objsubid: Int32Builder,
+ /// Arbitrary text that serves as the description of this object
description: StringBuilder,
}
@@ -33,6 +42,23 @@ impl PgCatalogDescriptionBuilder {
}
}
+ fn add_table(&mut self, table_oid: u32, description: impl AsRef) {
+ self.objoid.append_value(table_oid).unwrap();
+ self.classoid.append_value(PG_CLASS_CLASS_OID).unwrap();
+ self.objsubid.append_value(0).unwrap();
+ self.description.append_value(description).unwrap();
+ }
+
+ fn add_column(&mut self, table_oid: u32, column_idx: usize, description: impl AsRef) {
+ self.objoid.append_value(table_oid).unwrap();
+ self.classoid.append_value(PG_CLASS_CLASS_OID).unwrap();
+ // Column subids starts with 1
+ self.objsubid
+ .append_value(i32::try_from(column_idx).unwrap() + 1)
+ .unwrap();
+ self.description.append_value(description).unwrap();
+ }
+
fn finish(mut self) -> Vec> {
let columns: Vec> = vec![
Arc::new(self.objoid.finish()),
@@ -50,8 +76,20 @@ pub struct PgCatalogDescriptionProvider {
}
impl PgCatalogDescriptionProvider {
- pub fn new() -> Self {
- let builder = PgCatalogDescriptionBuilder::new();
+ pub fn new(tables: &[CubeMetaTable]) -> Self {
+ let mut builder = PgCatalogDescriptionBuilder::new();
+
+ for table in tables {
+ if let Some(description) = &table.description {
+ builder.add_table(table.oid, description);
+ }
+
+ for (idx, column) in table.columns.iter().enumerate() {
+ if let Some(description) = &column.description {
+ builder.add_column(table.oid, idx, description);
+ }
+ }
+ }
Self {
data: Arc::new(builder.finish()),
diff --git a/rust/cubesql/cubesql/src/compile/engine/udf/common.rs b/rust/cubesql/cubesql/src/compile/engine/udf/common.rs
index 3be9c598e23fb..db8c96e279eda 100644
--- a/rust/cubesql/cubesql/src/compile/engine/udf/common.rs
+++ b/rust/cubesql/cubesql/src/compile/engine/udf/common.rs
@@ -1,4 +1,8 @@
-use std::{any::type_name, sync::Arc, thread};
+use std::{
+ any::type_name,
+ sync::{Arc, LazyLock},
+ thread,
+};
use chrono::{Datelike, Days, Duration, Months, NaiveDate, NaiveDateTime, NaiveTime};
use datafusion::{
@@ -3329,17 +3333,18 @@ pub fn create_current_setting_udf() -> ScalarUDF {
}
pub fn create_quote_ident_udf() -> ScalarUDF {
+ static RE: LazyLock = LazyLock::new(|| Regex::new(r"^[a-z_][a-z0-9_]*$").unwrap());
+
let fun = make_scalar_function(move |args: &[ArrayRef]| {
assert!(args.len() == 1);
let idents = downcast_string_arg!(args[0], "str", i32);
- let re = Regex::new(r"^[a-z_][a-z0-9_]*$").unwrap();
let result = idents
.iter()
.map(|ident| {
ident.map(|ident| {
- if re.is_match(ident) {
+ if RE.is_match(ident) {
return ident.to_string();
}
format!("\"{}\"", ident.replace("\"", "\"\""))
diff --git a/rust/cubesql/cubesql/src/compile/mod.rs b/rust/cubesql/cubesql/src/compile/mod.rs
index 765d3a7d0c3fd..051e4a98126d8 100644
--- a/rust/cubesql/cubesql/src/compile/mod.rs
+++ b/rust/cubesql/cubesql/src/compile/mod.rs
@@ -288,68 +288,6 @@ mod tests {
);
}
- #[tokio::test]
- async fn test_change_user_via_filter() {
- init_testing_logger();
-
- let query_plan = convert_select_to_query_plan(
- "SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce WHERE __user = 'gopher'"
- .to_string(),
- DatabaseProtocol::PostgreSQL,
- )
- .await;
-
- let cube_scan = query_plan.as_logical_plan().find_cube_scan();
-
- assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
-
- assert_eq!(
- cube_scan.request,
- V1LoadRequestQuery {
- measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string(),]),
- segments: Some(vec![]),
- dimensions: Some(vec![]),
- time_dimensions: None,
- order: None,
- limit: None,
- offset: None,
- filters: None,
- ungrouped: None,
- }
- )
- }
-
- #[tokio::test]
- async fn test_change_user_via_in_filter() {
- init_testing_logger();
-
- let query_plan = convert_select_to_query_plan(
- "SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce WHERE __user IN ('gopher')"
- .to_string(),
- DatabaseProtocol::PostgreSQL,
- )
- .await;
-
- let cube_scan = query_plan.as_logical_plan().find_cube_scan();
-
- assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
-
- assert_eq!(
- cube_scan.request,
- V1LoadRequestQuery {
- measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string(),]),
- segments: Some(vec![]),
- dimensions: Some(vec![]),
- time_dimensions: None,
- order: None,
- limit: None,
- offset: None,
- filters: None,
- ungrouped: None,
- }
- )
- }
-
#[tokio::test]
async fn test_starts_with() {
init_testing_logger();
@@ -481,92 +419,6 @@ mod tests {
assert!(sql.contains("LOWER("));
}
- #[tokio::test]
- async fn test_change_user_via_in_filter_thoughtspot() {
- init_testing_logger();
-
- let query_plan = convert_select_to_query_plan(
- r#"SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce "ta_1" WHERE (LOWER("ta_1"."__user") IN ('gopher')) = TRUE"#.to_string(),
- DatabaseProtocol::PostgreSQL,
- )
- .await;
-
- let expected_request = V1LoadRequestQuery {
- measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string()]),
- segments: Some(vec![]),
- dimensions: Some(vec![]),
- time_dimensions: None,
- order: None,
- limit: None,
- offset: None,
- filters: None,
- ungrouped: None,
- };
-
- let cube_scan = query_plan.as_logical_plan().find_cube_scan();
- assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
- assert_eq!(cube_scan.request, expected_request);
-
- let query_plan = convert_select_to_query_plan(
- r#"SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce "ta_1" WHERE ((LOWER("ta_1"."__user") IN ('gopher') = TRUE) = TRUE)"#.to_string(),
- DatabaseProtocol::PostgreSQL,
- )
- .await;
-
- let cube_scan = query_plan.as_logical_plan().find_cube_scan();
- assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
- assert_eq!(cube_scan.request, expected_request);
- }
-
- #[tokio::test]
- async fn test_change_user_via_filter_and() {
- let query_plan = convert_select_to_query_plan(
- "SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce WHERE __user = 'gopher' AND customer_gender = 'male'".to_string(),
- DatabaseProtocol::PostgreSQL,
- )
- .await;
-
- let cube_scan = query_plan.as_logical_plan().find_cube_scan();
-
- assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
-
- assert_eq!(
- cube_scan.request,
- V1LoadRequestQuery {
- measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string(),]),
- segments: Some(vec![]),
- dimensions: Some(vec![]),
- time_dimensions: None,
- order: None,
- limit: None,
- offset: None,
- filters: Some(vec![V1LoadRequestQueryFilterItem {
- member: Some("KibanaSampleDataEcommerce.customer_gender".to_string()),
- operator: Some("equals".to_string()),
- values: Some(vec!["male".to_string()]),
- or: None,
- and: None,
- }]),
- ungrouped: None,
- }
- )
- }
-
- #[tokio::test]
- async fn test_change_user_via_filter_or() {
- // OR is not allowed for __user
- let meta = get_test_tenant_ctx();
- let query =
- convert_sql_to_cube_query(
- &"SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce WHERE __user = 'gopher' OR customer_gender = 'male'".to_string(),
- meta.clone(),
- get_test_session(DatabaseProtocol::PostgreSQL, meta).await
- ).await;
-
- // TODO: We need to propagate error to result, to assert message
- query.unwrap_err();
- }
-
#[tokio::test]
async fn test_order_alias_for_measure_default() {
let query_plan = convert_select_to_query_plan(
@@ -8806,39 +8658,6 @@ ORDER BY "source"."str0" ASC
)
}
- #[tokio::test]
- async fn test_user_with_join() {
- if !Rewriter::sql_push_down_enabled() {
- return;
- }
- init_testing_logger();
-
- let logical_plan = convert_select_to_query_plan(
- "SELECT aliased.count as c, aliased.user_1 as u1, aliased.user_2 as u2 FROM (SELECT \"KibanaSampleDataEcommerce\".count as count, \"KibanaSampleDataEcommerce\".__user as user_1, Logs.__user as user_2 FROM \"KibanaSampleDataEcommerce\" CROSS JOIN Logs WHERE __user = 'foo') aliased".to_string(),
- DatabaseProtocol::PostgreSQL,
- )
- .await
- .as_logical_plan();
-
- let cube_scan = logical_plan.find_cube_scan();
- assert_eq!(
- cube_scan.request,
- V1LoadRequestQuery {
- measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string()]),
- dimensions: Some(vec![]),
- segments: Some(vec![]),
- time_dimensions: None,
- order: Some(vec![]),
- limit: None,
- offset: None,
- filters: None,
- ungrouped: Some(true),
- }
- );
-
- assert_eq!(cube_scan.options.change_user, Some("foo".to_string()))
- }
-
#[tokio::test]
async fn test_sort_relations() -> Result<(), CubeError> {
init_testing_logger();
diff --git a/rust/cubesql/cubesql/src/compile/parser.rs b/rust/cubesql/cubesql/src/compile/parser.rs
index 53c324ed9bc10..58f79445ee7cc 100644
--- a/rust/cubesql/cubesql/src/compile/parser.rs
+++ b/rust/cubesql/cubesql/src/compile/parser.rs
@@ -1,4 +1,4 @@
-use std::collections::HashMap;
+use std::{collections::HashMap, sync::LazyLock};
use regex::Regex;
use sqlparser::{
@@ -36,9 +36,9 @@ impl Dialect for MySqlDialectWithBackTicks {
}
}
-lazy_static! {
- static ref SIGMA_WORKAROUND: Regex = Regex::new(r#"(?s)^\s*with\s+nsp\sas\s\(.*nspname\s=\s.*\),\s+tbl\sas\s\(.*relname\s=\s.*\).*select\s+attname.*from\spg_attribute.*$"#).unwrap();
-}
+static SIGMA_WORKAROUND: LazyLock = LazyLock::new(|| {
+ Regex::new(r#"(?s)^\s*with\s+nsp\sas\s\(.*nspname\s=\s.*\),\s+tbl\sas\s\(.*relname\s=\s.*\).*select\s+attname.*from\spg_attribute.*$"#).unwrap()
+});
pub fn parse_sql_to_statements(
query: &String,
@@ -118,13 +118,18 @@ pub fn parse_sql_to_statements(
// Sigma Computing WITH query workaround
// TODO: remove workaround when subquery is supported in JOIN ON conditions
let query = if SIGMA_WORKAROUND.is_match(&query) {
- let relnamespace_re = Regex::new(r#"(?s)from\spg_catalog\.pg_class\s+where\s+relname\s=\s(?P'(?:[^']|'')+'|\$\d+)\s+and\s+relnamespace\s=\s\(select\soid\sfrom\snsp\)"#).unwrap();
- let relnamespace_replaced = relnamespace_re.replace(
+ static RELNAMESPACE_RE: LazyLock = LazyLock::new(|| {
+ Regex::new(r#"(?s)from\spg_catalog\.pg_class\s+where\s+relname\s=\s(?P'(?:[^']|'')+'|\$\d+)\s+and\s+relnamespace\s=\s\(select\soid\sfrom\snsp\)"#).unwrap()
+ });
+ static ATTRELID_RE: LazyLock = LazyLock::new(|| {
+ Regex::new(r#"(?s)left\sjoin\spg_description\son\s+attrelid\s=\sobjoid\sand\s+attnum\s=\sobjsubid\s+where\s+attnum\s>\s0\s+and\s+attrelid\s=\s\(select\soid\sfrom\stbl\)"#).unwrap()
+ });
+
+ let relnamespace_replaced = RELNAMESPACE_RE.replace(
&query,
"from pg_catalog.pg_class join nsp on relnamespace = nsp.oid where relname = $relname",
);
- let attrelid_re = Regex::new(r#"(?s)left\sjoin\spg_description\son\s+attrelid\s=\sobjoid\sand\s+attnum\s=\sobjsubid\s+where\s+attnum\s>\s0\s+and\s+attrelid\s=\s\(select\soid\sfrom\stbl\)"#).unwrap();
- let attrelid_replaced = attrelid_re.replace(&relnamespace_replaced, "left join pg_description on attrelid = objoid and attnum = objsubid join tbl on attrelid = tbl.oid where attnum > 0");
+ let attrelid_replaced = ATTRELID_RE.replace(&relnamespace_replaced, "left join pg_description on attrelid = objoid and attnum = objsubid join tbl on attrelid = tbl.oid where attnum > 0");
attrelid_replaced.to_string()
} else {
query
diff --git a/rust/cubesql/cubesql/src/compile/rewrite/converter.rs b/rust/cubesql/cubesql/src/compile/rewrite/converter.rs
index f60a36b3f537d..32fb6ec754778 100644
--- a/rust/cubesql/cubesql/src/compile/rewrite/converter.rs
+++ b/rust/cubesql/cubesql/src/compile/rewrite/converter.rs
@@ -59,7 +59,7 @@ use std::{
collections::{HashMap, HashSet},
env,
ops::Index,
- sync::Arc,
+ sync::{Arc, LazyLock},
};
pub use super::rewriter::CubeRunner;
@@ -170,8 +170,8 @@ macro_rules! add_plan_list_node {
}};
}
-lazy_static! {
- static ref EXCLUDED_PARAM_VALUES: HashSet = vec![
+static EXCLUDED_PARAM_VALUES: LazyLock> = LazyLock::new(|| {
+ vec![
ScalarValue::Utf8(Some("second".to_string())),
ScalarValue::Utf8(Some("minute".to_string())),
ScalarValue::Utf8(Some("hour".to_string())),
@@ -182,8 +182,8 @@ lazy_static! {
]
.into_iter()
.chain((0..50).map(|i| ScalarValue::Int64(Some(i))))
- .collect();
-}
+ .collect()
+});
pub struct LogicalPlanToLanguageConverter {
graph: EGraph,
diff --git a/rust/cubesql/cubesql/src/compile/rewrite/rules/members.rs b/rust/cubesql/cubesql/src/compile/rewrite/rules/members.rs
index 291b7d32a352a..79fa094a995ea 100644
--- a/rust/cubesql/cubesql/src/compile/rewrite/rules/members.rs
+++ b/rust/cubesql/cubesql/src/compile/rewrite/rules/members.rs
@@ -46,7 +46,7 @@ use std::{
collections::{HashMap, HashSet},
fmt::Display,
ops::{Index, IndexMut},
- sync::Arc,
+ sync::{Arc, LazyLock},
};
pub struct MemberRules {
@@ -2857,27 +2857,30 @@ pub fn add_member_error(
]))
}
-lazy_static! {
- static ref STANDARD_GRANULARITIES_PARENTS: HashMap<&'static str, Vec<&'static str>> = [
- (
- "year",
- vec!["year", "quarter", "month", "day", "hour", "minute", "second"]
- ),
- (
- "quarter",
- vec!["quarter", "month", "day", "hour", "minute", "second"]
- ),
- ("month", vec!["month", "day", "hour", "minute", "second"]),
- ("week", vec!["week", "day", "hour", "minute", "second"]),
- ("day", vec!["day", "hour", "minute", "second"]),
- ("hour", vec!["hour", "minute", "second"]),
- ("minute", vec!["minute", "second"]),
- ("second", vec!["second"]),
- ]
- .iter()
- .cloned()
- .collect();
-}
+static STANDARD_GRANULARITIES_PARENTS: LazyLock>> =
+ LazyLock::new(|| {
+ [
+ (
+ "year",
+ vec![
+ "year", "quarter", "month", "day", "hour", "minute", "second",
+ ],
+ ),
+ (
+ "quarter",
+ vec!["quarter", "month", "day", "hour", "minute", "second"],
+ ),
+ ("month", vec!["month", "day", "hour", "minute", "second"]),
+ ("week", vec!["week", "day", "hour", "minute", "second"]),
+ ("day", vec!["day", "hour", "minute", "second"]),
+ ("hour", vec!["hour", "minute", "second"]),
+ ("minute", vec!["minute", "second"]),
+ ("second", vec!["second"]),
+ ]
+ .iter()
+ .cloned()
+ .collect()
+ });
pub fn min_granularity(granularity_a: &String, granularity_b: &String) -> Option {
let granularity_a = granularity_a.to_lowercase();
diff --git a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__metabase_pg_class_query.snap b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__metabase_pg_class_query.snap
index c8d28203015fe..d804b18e6e1a5 100644
--- a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__metabase_pg_class_query.snap
+++ b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__metabase_pg_class_query.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/mod.rs
expression: "execute_query(\"\n SELECT *\n FROM (\n SELECT n.nspname,\n c.relname,\n a.attname,\n a.atttypid,\n a.attnotnull or (t.typtype = 'd' AND t.typnotnull) AS attnotnull,\n a.atttypmod,\n a.attlen,\n t.typtypmod,\n row_number() OVER (partition BY a.attrelid ORDER BY a.attnum) AS attnum,\n NULLIF(a.attidentity, '') AS attidentity,\n pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS adsrc,\n dsc.description,\n t.typbasetype,\n t.typtype\n FROM pg_catalog.pg_namespace n\n JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid)\n JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid)\n JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid)\n LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum)\n LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid)\n LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class')\n LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog')\n WHERE c.relkind IN ('r', 'p', 'v', 'f', 'm') AND a.attnum > 0 AND NOT a.attisdropped AND n.nspname LIKE 'public' AND c.relname LIKE 'KibanaSampleDataEcommerce') c\n WHERE true\n ORDER BY nspname, c.relname, attnum;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-------------+-------------+---------+
-| nspname | relname | attname | atttypid | attnotnull | atttypmod | attlen | typtypmod | attnum | attidentity | adsrc | description | typbasetype | typtype |
-+---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-------------+-------------+---------+
-| public | KibanaSampleDataEcommerce | count | 20 | true | -1 | 8 | -1 | 1 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | maxPrice | 1700 | true | -1 | -1 | -1 | 2 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | sumPrice | 1700 | true | -1 | -1 | -1 | 3 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | minPrice | 1700 | true | -1 | -1 | -1 | 4 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | avgPrice | 1700 | true | -1 | -1 | -1 | 5 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | countDistinct | 20 | true | -1 | 8 | -1 | 6 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | order_date | 1114 | false | -1 | 8 | -1 | 7 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | last_mod | 1114 | false | -1 | 8 | -1 | 8 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | customer_gender | 25 | false | -1 | -1 | -1 | 9 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | notes | 25 | false | -1 | -1 | -1 | 10 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | taxful_total_price | 1700 | false | -1 | -1 | -1 | 11 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | has_subscription | 16 | false | -1 | 1 | -1 | 12 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | is_male | 16 | true | -1 | 1 | -1 | 13 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | is_female | 16 | true | -1 | 1 | -1 | 14 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | __user | 25 | false | -1 | -1 | -1 | 15 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | __cubeJoinField | 25 | false | -1 | -1 | -1 | 16 | NULL | NULL | NULL | 0 | b |
-+---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-------------+-------------+---------+
++---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-----------------------------------------------+-------------+---------+
+| nspname | relname | attname | atttypid | attnotnull | atttypmod | attlen | typtypmod | attnum | attidentity | adsrc | description | typbasetype | typtype |
++---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-----------------------------------------------+-------------+---------+
+| public | KibanaSampleDataEcommerce | count | 20 | true | -1 | 8 | -1 | 1 | NULL | NULL | Events count | 0 | b |
+| public | KibanaSampleDataEcommerce | maxPrice | 1700 | true | -1 | -1 | -1 | 2 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | sumPrice | 1700 | true | -1 | -1 | -1 | 3 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | minPrice | 1700 | true | -1 | -1 | -1 | 4 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | avgPrice | 1700 | true | -1 | -1 | -1 | 5 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | countDistinct | 20 | true | -1 | 8 | -1 | 6 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | order_date | 1114 | false | -1 | 8 | -1 | 7 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | last_mod | 1114 | false | -1 | 8 | -1 | 8 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | customer_gender | 25 | false | -1 | -1 | -1 | 9 | NULL | NULL | Customer gender | 0 | b |
+| public | KibanaSampleDataEcommerce | notes | 25 | false | -1 | -1 | -1 | 10 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | taxful_total_price | 1700 | false | -1 | -1 | -1 | 11 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | has_subscription | 16 | false | -1 | 1 | -1 | 12 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | is_male | 16 | true | -1 | 1 | -1 | 13 | NULL | NULL | Male users segment | 0 | b |
+| public | KibanaSampleDataEcommerce | is_female | 16 | true | -1 | 1 | -1 | 14 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | __user | 25 | false | -1 | -1 | -1 | 15 | NULL | NULL | Virtual column for security context switching | 0 | b |
+| public | KibanaSampleDataEcommerce | __cubeJoinField | 25 | false | -1 | -1 | -1 | 16 | NULL | NULL | Virtual column for joining cubes | 0 | b |
++---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-----------------------------------------------+-------------+---------+
diff --git a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__pgcatalog_pgdescription_postgres.snap b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__pgcatalog_pgdescription_postgres.snap
index f33ec88a25943..a8c95f329dd65 100644
--- a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__pgcatalog_pgdescription_postgres.snap
+++ b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__pgcatalog_pgdescription_postgres.snap
@@ -1,8 +1,88 @@
---
source: cubesql/src/compile/mod.rs
-expression: "execute_query(\"SELECT * FROM pg_catalog.pg_description\".to_string(),\n DatabaseProtocol::PostgreSQL).await?"
+expression: "execute_query(\"SELECT * FROM pg_catalog.pg_description\".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------+----------+----------+-------------+
-| objoid | classoid | objsubid | description |
-+--------+----------+----------+-------------+
-+--------+----------+----------+-------------+
++--------+----------+----------+-------------------------------------------------------+
+| objoid | classoid | objsubid | description |
++--------+----------+----------+-------------------------------------------------------+
+| 18000 | 1259 | 0 | Sample data for tracking eCommerce orders from Kibana |
+| 18000 | 1259 | 1 | Events count |
+| 18000 | 1259 | 9 | Customer gender |
+| 18000 | 1259 | 13 | Male users segment |
+| 18000 | 1259 | 15 | Virtual column for security context switching |
+| 18000 | 1259 | 16 | Virtual column for joining cubes |
+| 18019 | 1259 | 6 | Virtual column for security context switching |
+| 18019 | 1259 | 7 | Virtual column for joining cubes |
+| 18029 | 1259 | 2 | Virtual column for security context switching |
+| 18029 | 1259 | 3 | Virtual column for joining cubes |
+| 18035 | 1259 | 206 | Virtual column for security context switching |
+| 18035 | 1259 | 207 | Virtual column for joining cubes |
+| 18245 | 1259 | 0 | Test cube with a little bit of everything |
+| 18245 | 1259 | 1 | Test number measure 0 |
+| 18245 | 1259 | 2 | Test max(string) measure 0 |
+| 18245 | 1259 | 3 | Test max(time) measure 0 |
+| 18245 | 1259 | 4 | Test number measure 1 |
+| 18245 | 1259 | 5 | Test max(string) measure 1 |
+| 18245 | 1259 | 6 | Test max(time) measure 1 |
+| 18245 | 1259 | 7 | Test number measure 2 |
+| 18245 | 1259 | 8 | Test max(string) measure 2 |
+| 18245 | 1259 | 9 | Test max(time) measure 2 |
+| 18245 | 1259 | 10 | Test number measure 3 |
+| 18245 | 1259 | 11 | Test max(string) measure 3 |
+| 18245 | 1259 | 12 | Test max(time) measure 3 |
+| 18245 | 1259 | 13 | Test number measure 4 |
+| 18245 | 1259 | 14 | Test max(string) measure 4 |
+| 18245 | 1259 | 15 | Test max(time) measure 4 |
+| 18245 | 1259 | 16 | Test number measure 5 |
+| 18245 | 1259 | 17 | Test max(string) measure 5 |
+| 18245 | 1259 | 18 | Test max(time) measure 5 |
+| 18245 | 1259 | 19 | Test number measure 6 |
+| 18245 | 1259 | 20 | Test max(string) measure 6 |
+| 18245 | 1259 | 21 | Test max(time) measure 6 |
+| 18245 | 1259 | 22 | Test number measure 7 |
+| 18245 | 1259 | 23 | Test max(string) measure 7 |
+| 18245 | 1259 | 24 | Test max(time) measure 7 |
+| 18245 | 1259 | 25 | Test number measure 8 |
+| 18245 | 1259 | 26 | Test max(string) measure 8 |
+| 18245 | 1259 | 27 | Test max(time) measure 8 |
+| 18245 | 1259 | 28 | Test number measure 9 |
+| 18245 | 1259 | 29 | Test max(string) measure 9 |
+| 18245 | 1259 | 30 | Test max(time) measure 9 |
+| 18245 | 1259 | 31 | Test count measure |
+| 18245 | 1259 | 32 | Test maxPrice measure |
+| 18245 | 1259 | 33 | Test minPrice measure |
+| 18245 | 1259 | 34 | Test avgPrice measure |
+| 18245 | 1259 | 35 | Test countDistinct measure |
+| 18245 | 1259 | 36 | Test numeric dimention 0 |
+| 18245 | 1259 | 37 | Test string dimention 0 |
+| 18245 | 1259 | 38 | Test time dimention 0 |
+| 18245 | 1259 | 39 | Test numeric dimention 1 |
+| 18245 | 1259 | 40 | Test string dimention 1 |
+| 18245 | 1259 | 41 | Test time dimention 1 |
+| 18245 | 1259 | 42 | Test numeric dimention 2 |
+| 18245 | 1259 | 43 | Test string dimention 2 |
+| 18245 | 1259 | 44 | Test time dimention 2 |
+| 18245 | 1259 | 45 | Test numeric dimention 3 |
+| 18245 | 1259 | 46 | Test string dimention 3 |
+| 18245 | 1259 | 47 | Test time dimention 3 |
+| 18245 | 1259 | 48 | Test numeric dimention 4 |
+| 18245 | 1259 | 49 | Test string dimention 4 |
+| 18245 | 1259 | 50 | Test time dimention 4 |
+| 18245 | 1259 | 51 | Test numeric dimention 5 |
+| 18245 | 1259 | 52 | Test string dimention 5 |
+| 18245 | 1259 | 53 | Test time dimention 5 |
+| 18245 | 1259 | 54 | Test numeric dimention 6 |
+| 18245 | 1259 | 55 | Test string dimention 6 |
+| 18245 | 1259 | 56 | Test time dimention 6 |
+| 18245 | 1259 | 57 | Test numeric dimention 7 |
+| 18245 | 1259 | 58 | Test string dimention 7 |
+| 18245 | 1259 | 59 | Test time dimention 7 |
+| 18245 | 1259 | 60 | Test numeric dimention 8 |
+| 18245 | 1259 | 61 | Test string dimention 8 |
+| 18245 | 1259 | 62 | Test time dimention 8 |
+| 18245 | 1259 | 63 | Test numeric dimention 9 |
+| 18245 | 1259 | 64 | Test string dimention 9 |
+| 18245 | 1259 | 65 | Test time dimention 9 |
+| 18245 | 1259 | 66 | Virtual column for security context switching |
+| 18245 | 1259 | 67 | Virtual column for joining cubes |
++--------+----------+----------+-------------------------------------------------------+
diff --git a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__thought_spot_table_columns.snap b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__thought_spot_table_columns.snap
index 6e17dd07d170d..ebec7d37ee693 100644
--- a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__thought_spot_table_columns.snap
+++ b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__thought_spot_table_columns.snap
@@ -1,25 +1,24 @@
---
source: cubesql/src/compile/mod.rs
-assertion_line: 6432
-expression: "execute_query(\"SELECT * FROM ( SELECT current_database() AS TABLE_CAT, n.nspname AS TABLE_SCHEM, c.relname as TABLE_NAME , a.attname as COLUMN_NAME, CAST(case typname when 'text' THEN 12 when 'bit' THEN -7 when 'bool' THEN -7 when 'boolean' THEN -7 when 'varchar' THEN 12 when 'character varying' THEN 12 when 'char' THEN 1 when '\\\"char\\\"' THEN 1 when 'character' THEN 1 when 'nchar' THEN 12 when 'bpchar' THEN 1 when 'nvarchar' THEN 12 when 'date' THEN 91 when 'timestamp' THEN 93 when 'timestamp without time zone' THEN 93 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 4 when 'int' THEN 4 when 'int4' THEN 4 when 'bigint' THEN -5 when 'int8' THEN -5 when 'decimal' THEN 3 when 'real' THEN 7 when 'float4' THEN 7 when 'double precision' THEN 8 when 'float8' THEN 8 when 'float' THEN 6 when 'numeric' THEN 2 when '_float4' THEN 2003 when 'timestamptz' THEN 2014 when 'timestamp with time zone' THEN 2014 when '_aclitem' THEN 2003 when '_text' THEN 2003 when 'bytea' THEN -2 when 'oid' THEN -5 when 'name' THEN 12 when '_int4' THEN 2003 when '_int2' THEN 2003 when 'ARRAY' THEN 2003 when 'geometry' THEN -4 when 'super' THEN -16 else 1111 END as SMALLINT) AS DATA_TYPE, t.typname as TYPE_NAME, case typname when 'int4' THEN 10 when 'bit' THEN 1 when 'bool' THEN 1 when 'varchar' THEN atttypmod -4 when 'character varying' THEN atttypmod -4 when 'char' THEN atttypmod -4 when 'character' THEN atttypmod -4 when 'nchar' THEN atttypmod -4 when 'bpchar' THEN atttypmod -4 when 'nvarchar' THEN atttypmod -4 when 'date' THEN 13 when 'timestamp' THEN 29 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 10 when 'int' THEN 10 when 'int4' THEN 10 when 'bigint' THEN 19 when 'int8' THEN 19 when 'decimal' then (atttypmod - 4) >> 16 when 'real' THEN 8 when 'float4' THEN 8 when 'double precision' THEN 17 when 'float8' THEN 17 when 'float' THEN 17 when 'numeric' THEN (atttypmod - 4) >> 16 when '_float4' THEN 8 when 'timestamptz' THEN 35 when 'oid' THEN 10 when '_int4' THEN 10 when '_int2' THEN 5 when 'geometry' THEN NULL when 'super' THEN NULL else 2147483647 end as COLUMN_SIZE , null as BUFFER_LENGTH , case typname when 'float4' then 8 when 'float8' then 17 when 'numeric' then (atttypmod - 4) & 65535 when 'timestamp' then 6 when 'geometry' then NULL when 'super' then NULL else 0 end as DECIMAL_DIGITS, 10 AS NUM_PREC_RADIX , case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) when 'false' then 1 when NULL then 2 else 0 end AS NULLABLE , dsc.description as REMARKS , pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS COLUMN_DEF, CAST(case typname when 'text' THEN 12 when 'bit' THEN -7 when 'bool' THEN -7 when 'boolean' THEN -7 when 'varchar' THEN 12 when 'character varying' THEN 12 when '\\\"char\\\"' THEN 1 when 'char' THEN 1 when 'character' THEN 1 when 'nchar' THEN 1 when 'bpchar' THEN 1 when 'nvarchar' THEN 12 when 'date' THEN 91 when 'timestamp' THEN 93 when 'timestamp without time zone' THEN 93 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 4 when 'int' THEN 4 when 'int4' THEN 4 when 'bigint' THEN -5 when 'int8' THEN -5 when 'decimal' THEN 3 when 'real' THEN 7 when 'float4' THEN 7 when 'double precision' THEN 8 when 'float8' THEN 8 when 'float' THEN 6 when 'numeric' THEN 2 when '_float4' THEN 2003 when 'timestamptz' THEN 2014 when 'timestamp with time zone' THEN 2014 when '_aclitem' THEN 2003 when '_text' THEN 2003 when 'bytea' THEN -2 when 'oid' THEN -5 when 'name' THEN 12 when '_int4' THEN 2003 when '_int2' THEN 2003 when 'ARRAY' THEN 2003 when 'geometry' THEN -4 when 'super' THEN -16 else 1111 END as SMALLINT) AS SQL_DATA_TYPE, CAST(NULL AS SMALLINT) as SQL_DATETIME_SUB , case typname when 'int4' THEN 10 when 'bit' THEN 1 when 'bool' THEN 1 when 'varchar' THEN atttypmod -4 when 'character varying' THEN atttypmod -4 when 'char' THEN atttypmod -4 when 'character' THEN atttypmod -4 when 'nchar' THEN atttypmod -4 when 'bpchar' THEN atttypmod -4 when 'nvarchar' THEN atttypmod -4 when 'date' THEN 13 when 'timestamp' THEN 29 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 10 when 'int' THEN 10 when 'int4' THEN 10 when 'bigint' THEN 19 when 'int8' THEN 19 when 'decimal' then ((atttypmod - 4) >> 16) & 65535 when 'real' THEN 8 when 'float4' THEN 8 when 'double precision' THEN 17 when 'float8' THEN 17 when 'float' THEN 17 when 'numeric' THEN ((atttypmod - 4) >> 16) & 65535 when '_float4' THEN 8 when 'timestamptz' THEN 35 when 'oid' THEN 10 when '_int4' THEN 10 when '_int2' THEN 5 when 'geometry' THEN NULL when 'super' THEN NULL else 2147483647 end as CHAR_OCTET_LENGTH , a.attnum AS ORDINAL_POSITION, case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) when 'false' then 'YES' when NULL then '' else 'NO' end AS IS_NULLABLE, null as SCOPE_CATALOG , null as SCOPE_SCHEMA , null as SCOPE_TABLE, t.typbasetype AS SOURCE_DATA_TYPE , CASE WHEN left(pg_catalog.pg_get_expr(def.adbin, def.adrelid), 16) = 'default_identity' THEN 'YES' ELSE 'NO' END AS IS_AUTOINCREMENT, IS_AUTOINCREMENT AS IS_GENERATEDCOLUMN FROM pg_catalog.pg_namespace n JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') WHERE a.attnum > 0 AND NOT a.attisdropped AND current_database() = 'cubedb' AND n.nspname LIKE 'public' AND c.relname LIKE 'KibanaSampleDataEcommerce' ORDER BY TABLE_SCHEM,c.relname,attnum ) UNION ALL SELECT current_database()::VARCHAR(128) AS TABLE_CAT, schemaname::varchar(128) AS table_schem, tablename::varchar(128) AS table_name, columnname::varchar(128) AS column_name, CAST(CASE columntype_rep WHEN 'text' THEN 12 WHEN 'bit' THEN -7 WHEN 'bool' THEN -7 WHEN 'boolean' THEN -7 WHEN 'varchar' THEN 12 WHEN 'character varying' THEN 12 WHEN 'char' THEN 1 WHEN 'character' THEN 1 WHEN 'nchar' THEN 1 WHEN 'bpchar' THEN 1 WHEN 'nvarchar' THEN 12 WHEN '\\\"char\\\"' THEN 1 WHEN 'date' THEN 91 WHEN 'timestamp' THEN 93 WHEN 'timestamp without time zone' THEN 93 WHEN 'timestamp with time zone' THEN 2014 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 4 WHEN 'int' THEN 4 WHEN 'int4' THEN 4 WHEN 'bigint' THEN -5 WHEN 'int8' THEN -5 WHEN 'decimal' THEN 3 WHEN 'real' THEN 7 WHEN 'float4' THEN 7 WHEN 'double precision' THEN 8 WHEN 'float8' THEN 8 WHEN 'float' THEN 6 WHEN 'numeric' THEN 2 WHEN 'timestamptz' THEN 2014 WHEN 'bytea' THEN -2 WHEN 'oid' THEN -5 WHEN 'name' THEN 12 WHEN 'ARRAY' THEN 2003 WHEN 'geometry' THEN -4 WHEN 'super' THEN -16 ELSE 1111 END AS SMALLINT) AS DATA_TYPE, COALESCE(NULL,CASE columntype WHEN 'boolean' THEN 'bool' WHEN 'character varying' THEN 'varchar' WHEN '\\\"char\\\"' THEN 'char' WHEN 'smallint' THEN 'int2' WHEN 'integer' THEN 'int4'WHEN 'bigint' THEN 'int8' WHEN 'real' THEN 'float4' WHEN 'double precision' THEN 'float8' WHEN 'timestamp without time zone' THEN 'timestamp' WHEN 'timestamp with time zone' THEN 'timestamptz' ELSE columntype END) AS TYPE_NAME, CASE columntype_rep WHEN 'int4' THEN 10 WHEN 'bit' THEN 1 WHEN 'bool' THEN 1WHEN 'boolean' THEN 1WHEN 'varchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'character varying' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'char' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN 'character' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN 'nchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'bpchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'nvarchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'date' THEN 13 WHEN 'timestamp' THEN 29 WHEN 'timestamp without time zone' THEN 29 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 10 WHEN 'int' THEN 10 WHEN 'int4' THEN 10 WHEN 'bigint' THEN 19 WHEN 'int8' THEN 19 WHEN 'decimal' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'real' THEN 8 WHEN 'float4' THEN 8 WHEN 'double precision' THEN 17 WHEN 'float8' THEN 17 WHEN 'float' THEN 17WHEN 'numeric' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN '_float4' THEN 8 WHEN 'timestamptz' THEN 35 WHEN 'timestamp with time zone' THEN 35 WHEN 'oid' THEN 10 WHEN '_int4' THEN 10 WHEN '_int2' THEN 5 WHEN 'geometry' THEN NULL WHEN 'super' THEN NULL ELSE 2147483647 END AS COLUMN_SIZE, NULL AS BUFFER_LENGTH, CASE columntype WHEN 'real' THEN 8 WHEN 'float4' THEN 8 WHEN 'double precision' THEN 17 WHEN 'float8' THEN 17 WHEN 'timestamp' THEN 6 WHEN 'timestamp without time zone' THEN 6 WHEN 'geometry' THEN NULL WHEN 'super' THEN NULL ELSE 0 END AS DECIMAL_DIGITS, 10 AS NUM_PREC_RADIX, NULL AS NULLABLE, NULL AS REMARKS, NULL AS COLUMN_DEF, CAST(CASE columntype_rep WHEN 'text' THEN 12 WHEN 'bit' THEN -7 WHEN 'bool' THEN -7 WHEN 'boolean' THEN -7 WHEN 'varchar' THEN 12 WHEN 'character varying' THEN 12 WHEN 'char' THEN 1 WHEN 'character' THEN 1 WHEN 'nchar' THEN 12 WHEN 'bpchar' THEN 1 WHEN 'nvarchar' THEN 12 WHEN '\\\"char\\\"' THEN 1 WHEN 'date' THEN 91 WHEN 'timestamp' THEN 93 WHEN 'timestamp without time zone' THEN 93 WHEN 'timestamp with time zone' THEN 2014 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 4 WHEN 'int' THEN 4 WHEN 'int4' THEN 4 WHEN 'bigint' THEN -5 WHEN 'int8' THEN -5 WHEN 'decimal' THEN 3 WHEN 'real' THEN 7 WHEN 'float4' THEN 7 WHEN 'double precision' THEN 8 WHEN 'float8' THEN 8 WHEN 'float' THEN 6 WHEN 'numeric' THEN 2 WHEN 'timestamptz' THEN 2014 WHEN 'bytea' THEN -2 WHEN 'oid' THEN -5 WHEN 'name' THEN 12 WHEN 'ARRAY' THEN 2003 WHEN 'geometry' THEN -4 WHEN 'super' THEN -4 ELSE 1111 END AS SMALLINT) AS SQL_DATA_TYPE, CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB, CASE WHEN LEFT (columntype,7) = 'varchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN LEFT (columntype,4) = 'char' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN columntype = 'string' THEN 16383 ELSE NULL END AS CHAR_OCTET_LENGTH, columnnum AS ORDINAL_POSITION, NULL AS IS_NULLABLE, NULL AS SCOPE_CATALOG, NULL AS SCOPE_SCHEMA, NULL AS SCOPE_TABLE, NULL AS SOURCE_DATA_TYPE, 'NO' AS IS_AUTOINCREMENT, 'NO' as IS_GENERATEDCOLUMN FROM (select lbv_cols.schemaname, lbv_cols.tablename, lbv_cols.columnname,REGEXP_REPLACE(REGEXP_REPLACE(lbv_cols.columntype,'\\\\\\\\(.*\\\\\\\\)'),'^_.+','ARRAY') as columntype_rep,columntype, lbv_cols.columnnum from pg_get_late_binding_view_cols() lbv_cols( schemaname name, tablename name, columnname name, columntype text, columnnum int)) lbv_columns WHERE true AND current_database() = 'cubedb' AND schemaname LIKE 'public' AND tablename LIKE 'KibanaSampleDataEcommerce';\".to_string(),\n DatabaseProtocol::PostgreSQL).await?"
+expression: "execute_query(\"SELECT * FROM ( SELECT current_database() AS TABLE_CAT, n.nspname AS TABLE_SCHEM, c.relname as TABLE_NAME , a.attname as COLUMN_NAME, CAST(case typname when 'text' THEN 12 when 'bit' THEN -7 when 'bool' THEN -7 when 'boolean' THEN -7 when 'varchar' THEN 12 when 'character varying' THEN 12 when 'char' THEN 1 when '\\\"char\\\"' THEN 1 when 'character' THEN 1 when 'nchar' THEN 12 when 'bpchar' THEN 1 when 'nvarchar' THEN 12 when 'date' THEN 91 when 'timestamp' THEN 93 when 'timestamp without time zone' THEN 93 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 4 when 'int' THEN 4 when 'int4' THEN 4 when 'bigint' THEN -5 when 'int8' THEN -5 when 'decimal' THEN 3 when 'real' THEN 7 when 'float4' THEN 7 when 'double precision' THEN 8 when 'float8' THEN 8 when 'float' THEN 6 when 'numeric' THEN 2 when '_float4' THEN 2003 when 'timestamptz' THEN 2014 when 'timestamp with time zone' THEN 2014 when '_aclitem' THEN 2003 when '_text' THEN 2003 when 'bytea' THEN -2 when 'oid' THEN -5 when 'name' THEN 12 when '_int4' THEN 2003 when '_int2' THEN 2003 when 'ARRAY' THEN 2003 when 'geometry' THEN -4 when 'super' THEN -16 else 1111 END as SMALLINT) AS DATA_TYPE, t.typname as TYPE_NAME, case typname when 'int4' THEN 10 when 'bit' THEN 1 when 'bool' THEN 1 when 'varchar' THEN atttypmod -4 when 'character varying' THEN atttypmod -4 when 'char' THEN atttypmod -4 when 'character' THEN atttypmod -4 when 'nchar' THEN atttypmod -4 when 'bpchar' THEN atttypmod -4 when 'nvarchar' THEN atttypmod -4 when 'date' THEN 13 when 'timestamp' THEN 29 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 10 when 'int' THEN 10 when 'int4' THEN 10 when 'bigint' THEN 19 when 'int8' THEN 19 when 'decimal' then (atttypmod - 4) >> 16 when 'real' THEN 8 when 'float4' THEN 8 when 'double precision' THEN 17 when 'float8' THEN 17 when 'float' THEN 17 when 'numeric' THEN (atttypmod - 4) >> 16 when '_float4' THEN 8 when 'timestamptz' THEN 35 when 'oid' THEN 10 when '_int4' THEN 10 when '_int2' THEN 5 when 'geometry' THEN NULL when 'super' THEN NULL else 2147483647 end as COLUMN_SIZE , null as BUFFER_LENGTH , case typname when 'float4' then 8 when 'float8' then 17 when 'numeric' then (atttypmod - 4) & 65535 when 'timestamp' then 6 when 'geometry' then NULL when 'super' then NULL else 0 end as DECIMAL_DIGITS, 10 AS NUM_PREC_RADIX , case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) when 'false' then 1 when NULL then 2 else 0 end AS NULLABLE , dsc.description as REMARKS , pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS COLUMN_DEF, CAST(case typname when 'text' THEN 12 when 'bit' THEN -7 when 'bool' THEN -7 when 'boolean' THEN -7 when 'varchar' THEN 12 when 'character varying' THEN 12 when '\\\"char\\\"' THEN 1 when 'char' THEN 1 when 'character' THEN 1 when 'nchar' THEN 1 when 'bpchar' THEN 1 when 'nvarchar' THEN 12 when 'date' THEN 91 when 'timestamp' THEN 93 when 'timestamp without time zone' THEN 93 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 4 when 'int' THEN 4 when 'int4' THEN 4 when 'bigint' THEN -5 when 'int8' THEN -5 when 'decimal' THEN 3 when 'real' THEN 7 when 'float4' THEN 7 when 'double precision' THEN 8 when 'float8' THEN 8 when 'float' THEN 6 when 'numeric' THEN 2 when '_float4' THEN 2003 when 'timestamptz' THEN 2014 when 'timestamp with time zone' THEN 2014 when '_aclitem' THEN 2003 when '_text' THEN 2003 when 'bytea' THEN -2 when 'oid' THEN -5 when 'name' THEN 12 when '_int4' THEN 2003 when '_int2' THEN 2003 when 'ARRAY' THEN 2003 when 'geometry' THEN -4 when 'super' THEN -16 else 1111 END as SMALLINT) AS SQL_DATA_TYPE, CAST(NULL AS SMALLINT) as SQL_DATETIME_SUB , case typname when 'int4' THEN 10 when 'bit' THEN 1 when 'bool' THEN 1 when 'varchar' THEN atttypmod -4 when 'character varying' THEN atttypmod -4 when 'char' THEN atttypmod -4 when 'character' THEN atttypmod -4 when 'nchar' THEN atttypmod -4 when 'bpchar' THEN atttypmod -4 when 'nvarchar' THEN atttypmod -4 when 'date' THEN 13 when 'timestamp' THEN 29 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 10 when 'int' THEN 10 when 'int4' THEN 10 when 'bigint' THEN 19 when 'int8' THEN 19 when 'decimal' then ((atttypmod - 4) >> 16) & 65535 when 'real' THEN 8 when 'float4' THEN 8 when 'double precision' THEN 17 when 'float8' THEN 17 when 'float' THEN 17 when 'numeric' THEN ((atttypmod - 4) >> 16) & 65535 when '_float4' THEN 8 when 'timestamptz' THEN 35 when 'oid' THEN 10 when '_int4' THEN 10 when '_int2' THEN 5 when 'geometry' THEN NULL when 'super' THEN NULL else 2147483647 end as CHAR_OCTET_LENGTH , a.attnum AS ORDINAL_POSITION, case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) when 'false' then 'YES' when NULL then '' else 'NO' end AS IS_NULLABLE, null as SCOPE_CATALOG , null as SCOPE_SCHEMA , null as SCOPE_TABLE, t.typbasetype AS SOURCE_DATA_TYPE , CASE WHEN left(pg_catalog.pg_get_expr(def.adbin, def.adrelid), 16) = 'default_identity' THEN 'YES' ELSE 'NO' END AS IS_AUTOINCREMENT, IS_AUTOINCREMENT AS IS_GENERATEDCOLUMN FROM pg_catalog.pg_namespace n JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') WHERE a.attnum > 0 AND NOT a.attisdropped AND current_database() = 'cubedb' AND n.nspname LIKE 'public' AND c.relname LIKE 'KibanaSampleDataEcommerce' ORDER BY TABLE_SCHEM,c.relname,attnum ) UNION ALL SELECT current_database()::VARCHAR(128) AS TABLE_CAT, schemaname::varchar(128) AS table_schem, tablename::varchar(128) AS table_name, columnname::varchar(128) AS column_name, CAST(CASE columntype_rep WHEN 'text' THEN 12 WHEN 'bit' THEN -7 WHEN 'bool' THEN -7 WHEN 'boolean' THEN -7 WHEN 'varchar' THEN 12 WHEN 'character varying' THEN 12 WHEN 'char' THEN 1 WHEN 'character' THEN 1 WHEN 'nchar' THEN 1 WHEN 'bpchar' THEN 1 WHEN 'nvarchar' THEN 12 WHEN '\\\"char\\\"' THEN 1 WHEN 'date' THEN 91 WHEN 'timestamp' THEN 93 WHEN 'timestamp without time zone' THEN 93 WHEN 'timestamp with time zone' THEN 2014 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 4 WHEN 'int' THEN 4 WHEN 'int4' THEN 4 WHEN 'bigint' THEN -5 WHEN 'int8' THEN -5 WHEN 'decimal' THEN 3 WHEN 'real' THEN 7 WHEN 'float4' THEN 7 WHEN 'double precision' THEN 8 WHEN 'float8' THEN 8 WHEN 'float' THEN 6 WHEN 'numeric' THEN 2 WHEN 'timestamptz' THEN 2014 WHEN 'bytea' THEN -2 WHEN 'oid' THEN -5 WHEN 'name' THEN 12 WHEN 'ARRAY' THEN 2003 WHEN 'geometry' THEN -4 WHEN 'super' THEN -16 ELSE 1111 END AS SMALLINT) AS DATA_TYPE, COALESCE(NULL,CASE columntype WHEN 'boolean' THEN 'bool' WHEN 'character varying' THEN 'varchar' WHEN '\\\"char\\\"' THEN 'char' WHEN 'smallint' THEN 'int2' WHEN 'integer' THEN 'int4'WHEN 'bigint' THEN 'int8' WHEN 'real' THEN 'float4' WHEN 'double precision' THEN 'float8' WHEN 'timestamp without time zone' THEN 'timestamp' WHEN 'timestamp with time zone' THEN 'timestamptz' ELSE columntype END) AS TYPE_NAME, CASE columntype_rep WHEN 'int4' THEN 10 WHEN 'bit' THEN 1 WHEN 'bool' THEN 1WHEN 'boolean' THEN 1WHEN 'varchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'character varying' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'char' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN 'character' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN 'nchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'bpchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'nvarchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'date' THEN 13 WHEN 'timestamp' THEN 29 WHEN 'timestamp without time zone' THEN 29 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 10 WHEN 'int' THEN 10 WHEN 'int4' THEN 10 WHEN 'bigint' THEN 19 WHEN 'int8' THEN 19 WHEN 'decimal' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'real' THEN 8 WHEN 'float4' THEN 8 WHEN 'double precision' THEN 17 WHEN 'float8' THEN 17 WHEN 'float' THEN 17WHEN 'numeric' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN '_float4' THEN 8 WHEN 'timestamptz' THEN 35 WHEN 'timestamp with time zone' THEN 35 WHEN 'oid' THEN 10 WHEN '_int4' THEN 10 WHEN '_int2' THEN 5 WHEN 'geometry' THEN NULL WHEN 'super' THEN NULL ELSE 2147483647 END AS COLUMN_SIZE, NULL AS BUFFER_LENGTH, CASE columntype WHEN 'real' THEN 8 WHEN 'float4' THEN 8 WHEN 'double precision' THEN 17 WHEN 'float8' THEN 17 WHEN 'timestamp' THEN 6 WHEN 'timestamp without time zone' THEN 6 WHEN 'geometry' THEN NULL WHEN 'super' THEN NULL ELSE 0 END AS DECIMAL_DIGITS, 10 AS NUM_PREC_RADIX, NULL AS NULLABLE, NULL AS REMARKS, NULL AS COLUMN_DEF, CAST(CASE columntype_rep WHEN 'text' THEN 12 WHEN 'bit' THEN -7 WHEN 'bool' THEN -7 WHEN 'boolean' THEN -7 WHEN 'varchar' THEN 12 WHEN 'character varying' THEN 12 WHEN 'char' THEN 1 WHEN 'character' THEN 1 WHEN 'nchar' THEN 12 WHEN 'bpchar' THEN 1 WHEN 'nvarchar' THEN 12 WHEN '\\\"char\\\"' THEN 1 WHEN 'date' THEN 91 WHEN 'timestamp' THEN 93 WHEN 'timestamp without time zone' THEN 93 WHEN 'timestamp with time zone' THEN 2014 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 4 WHEN 'int' THEN 4 WHEN 'int4' THEN 4 WHEN 'bigint' THEN -5 WHEN 'int8' THEN -5 WHEN 'decimal' THEN 3 WHEN 'real' THEN 7 WHEN 'float4' THEN 7 WHEN 'double precision' THEN 8 WHEN 'float8' THEN 8 WHEN 'float' THEN 6 WHEN 'numeric' THEN 2 WHEN 'timestamptz' THEN 2014 WHEN 'bytea' THEN -2 WHEN 'oid' THEN -5 WHEN 'name' THEN 12 WHEN 'ARRAY' THEN 2003 WHEN 'geometry' THEN -4 WHEN 'super' THEN -4 ELSE 1111 END AS SMALLINT) AS SQL_DATA_TYPE, CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB, CASE WHEN LEFT (columntype,7) = 'varchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN LEFT (columntype,4) = 'char' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN columntype = 'string' THEN 16383 ELSE NULL END AS CHAR_OCTET_LENGTH, columnnum AS ORDINAL_POSITION, NULL AS IS_NULLABLE, NULL AS SCOPE_CATALOG, NULL AS SCOPE_SCHEMA, NULL AS SCOPE_TABLE, NULL AS SOURCE_DATA_TYPE, 'NO' AS IS_AUTOINCREMENT, 'NO' as IS_GENERATEDCOLUMN FROM (select lbv_cols.schemaname, lbv_cols.tablename, lbv_cols.columnname,REGEXP_REPLACE(REGEXP_REPLACE(lbv_cols.columntype,'\\\\\\\\(.*\\\\\\\\)'),'^_.+','ARRAY') as columntype_rep,columntype, lbv_cols.columnnum from pg_get_late_binding_view_cols() lbv_cols( schemaname name, tablename name, columnname name, columntype text, columnnum int)) lbv_columns WHERE true AND current_database() = 'cubedb' AND schemaname LIKE 'public' AND tablename LIKE 'KibanaSampleDataEcommerce';\".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
-| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | COLUMN_NAME | DATA_TYPE | TYPE_NAME | COLUMN_SIZE | BUFFER_LENGTH | DECIMAL_DIGITS | NUM_PREC_RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTET_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SCOPE_CATALOG | SCOPE_SCHEMA | SCOPE_TABLE | SOURCE_DATA_TYPE | IS_AUTOINCREMENT | IS_GENERATEDCOLUMN |
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
-| cubedb | public | KibanaSampleDataEcommerce | count | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 1 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | maxPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 2 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | sumPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 3 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | minPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 4 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | avgPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 5 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | countDistinct | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 6 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | order_date | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 7 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | last_mod | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 8 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | customer_gender | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 9 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | notes | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 10 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | taxful_total_price | 2 | numeric | -1 | NULL | 65531 | 10 | 1 | NULL | NULL | 2 | NULL | 65535 | 11 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | has_subscription | -7 | bool | 1 | NULL | 0 | 10 | 1 | NULL | NULL | -7 | NULL | 1 | 12 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | is_male | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 13 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | is_female | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 14 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | __user | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 15 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | __cubeJoinField | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 16 | YES | NULL | NULL | NULL | 0 | NO | false |
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
+| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | COLUMN_NAME | DATA_TYPE | TYPE_NAME | COLUMN_SIZE | BUFFER_LENGTH | DECIMAL_DIGITS | NUM_PREC_RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTET_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SCOPE_CATALOG | SCOPE_SCHEMA | SCOPE_TABLE | SOURCE_DATA_TYPE | IS_AUTOINCREMENT | IS_GENERATEDCOLUMN |
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
+| cubedb | public | KibanaSampleDataEcommerce | count | -5 | int8 | 19 | NULL | 0 | 10 | NULL | Events count | NULL | -5 | NULL | 19 | 1 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | maxPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 2 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | sumPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 3 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | minPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 4 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | avgPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 5 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | countDistinct | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 6 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | order_date | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 7 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | last_mod | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 8 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | customer_gender | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Customer gender | NULL | 12 | NULL | 2147483647 | 9 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | notes | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 10 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | taxful_total_price | 2 | numeric | -1 | NULL | 65531 | 10 | 1 | NULL | NULL | 2 | NULL | 65535 | 11 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | has_subscription | -7 | bool | 1 | NULL | 0 | 10 | 1 | NULL | NULL | -7 | NULL | 1 | 12 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | is_male | -7 | bool | 1 | NULL | 0 | 10 | NULL | Male users segment | NULL | -7 | NULL | 1 | 13 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | is_female | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 14 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | __user | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Virtual column for security context switching | NULL | 12 | NULL | 2147483647 | 15 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | __cubeJoinField | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Virtual column for joining cubes | NULL | 12 | NULL | 2147483647 | 16 | YES | NULL | NULL | NULL | 0 | NO | false |
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/mod.rs b/rust/cubesql/cubesql/src/compile/test/mod.rs
index 4b662f89ed15e..1d33e4aa29c4a 100644
--- a/rust/cubesql/cubesql/src/compile/test/mod.rs
+++ b/rust/cubesql/cubesql/src/compile/test/mod.rs
@@ -31,9 +31,13 @@ pub mod rewrite_engine;
#[cfg(test)]
pub mod test_bi_workarounds;
#[cfg(test)]
+pub mod test_df_execution;
+#[cfg(test)]
pub mod test_introspection;
#[cfg(test)]
pub mod test_udfs;
+#[cfg(test)]
+pub mod test_user_change;
pub mod utils;
pub use utils::*;
@@ -41,7 +45,7 @@ pub fn get_test_meta() -> Vec {
vec![
CubeMeta {
name: "KibanaSampleDataEcommerce".to_string(),
- description: None,
+ description: Some("Sample data for tracking eCommerce orders from Kibana".to_string()),
title: None,
dimensions: vec![
CubeMetaDimension {
@@ -56,7 +60,7 @@ pub fn get_test_meta() -> Vec {
},
CubeMetaDimension {
name: "KibanaSampleDataEcommerce.customer_gender".to_string(),
- description: None,
+ description: Some("Customer gender".to_string()),
_type: "string".to_string(),
},
CubeMetaDimension {
@@ -79,7 +83,7 @@ pub fn get_test_meta() -> Vec {
CubeMetaMeasure {
name: "KibanaSampleDataEcommerce.count".to_string(),
title: None,
- description: None,
+ description: Some("Events count".to_string()),
_type: "number".to_string(),
agg_type: Some("count".to_string()),
},
@@ -261,24 +265,24 @@ pub fn get_test_meta() -> Vec {
},
CubeMeta {
name: "MultiTypeCube".to_string(),
- description: None,
+ description: Some("Test cube with a little bit of everything".to_string()),
title: None,
dimensions: (0..10)
.flat_map(|i| {
[
CubeMetaDimension {
name: format!("MultiTypeCube.dim_num{}", i),
- description: None,
+ description: Some(format!("Test numeric dimention {i}")),
_type: "number".to_string(),
},
CubeMetaDimension {
name: format!("MultiTypeCube.dim_str{}", i),
- description: None,
+ description: Some(format!("Test string dimention {i}")),
_type: "string".to_string(),
},
CubeMetaDimension {
name: format!("MultiTypeCube.dim_date{}", i),
- description: None,
+ description: Some(format!("Test time dimention {i}")),
_type: "time".to_string(),
},
]
@@ -292,21 +296,21 @@ pub fn get_test_meta() -> Vec {
_type: "number".to_string(),
agg_type: Some("number".to_string()),
title: None,
- description: None,
+ description: Some(format!("Test number measure {i}")),
},
CubeMetaMeasure {
name: format!("MultiTypeCube.measure_str{}", i),
_type: "string".to_string(),
agg_type: Some("max".to_string()),
title: None,
- description: None,
+ description: Some(format!("Test max(string) measure {i}")),
},
CubeMetaMeasure {
name: format!("MultiTypeCube.measure_date{}", i),
_type: "time".to_string(),
agg_type: Some("max".to_string()),
title: None,
- description: None,
+ description: Some(format!("Test max(time) measure {i}")),
},
]
})
@@ -315,35 +319,35 @@ pub fn get_test_meta() -> Vec {
CubeMetaMeasure {
name: "MultiTypeCube.count".to_string(),
title: None,
- description: None,
+ description: Some("Test count measure".to_string()),
_type: "number".to_string(),
agg_type: Some("count".to_string()),
},
CubeMetaMeasure {
name: "MultiTypeCube.maxPrice".to_string(),
title: None,
- description: None,
+ description: Some("Test maxPrice measure".to_string()),
_type: "number".to_string(),
agg_type: Some("max".to_string()),
},
CubeMetaMeasure {
name: "MultiTypeCube.minPrice".to_string(),
title: None,
- description: None,
+ description: Some("Test minPrice measure".to_string()),
_type: "number".to_string(),
agg_type: Some("min".to_string()),
},
CubeMetaMeasure {
name: "MultiTypeCube.avgPrice".to_string(),
title: None,
- description: None,
+ description: Some("Test avgPrice measure".to_string()),
_type: "number".to_string(),
agg_type: Some("avg".to_string()),
},
CubeMetaMeasure {
name: "MultiTypeCube.countDistinct".to_string(),
title: None,
- description: None,
+ description: Some("Test countDistinct measure".to_string()),
_type: "number".to_string(),
agg_type: Some("countDistinct".to_string()),
},
@@ -657,10 +661,19 @@ pub fn get_test_auth() -> Arc {
Arc::new(TestSqlAuth {})
}
+#[derive(Clone, Debug)]
+pub struct TestTransportLoadCall {
+ pub query: TransportLoadRequestQuery,
+ pub sql_query: Option,
+ pub ctx: AuthContextRef,
+ pub meta: LoadRequestMeta,
+}
+
#[derive(Debug)]
struct TestConnectionTransport {
meta_context: Arc,
load_mocks: tokio::sync::Mutex>,
+ load_calls: tokio::sync::Mutex>,
}
impl TestConnectionTransport {
@@ -668,9 +681,14 @@ impl TestConnectionTransport {
Self {
meta_context,
load_mocks: tokio::sync::Mutex::new(vec![]),
+ load_calls: tokio::sync::Mutex::new(vec![]),
}
}
+ pub async fn load_calls(&self) -> Vec {
+ self.load_calls.lock().await.clone()
+ }
+
pub async fn add_cube_load_mock(
&self,
req: TransportLoadRequestQuery,
@@ -692,13 +710,17 @@ impl TransportService for TestConnectionTransport {
_span_id: Option>,
query: TransportLoadRequestQuery,
_ctx: AuthContextRef,
- _meta_fields: LoadRequestMeta,
+ meta: LoadRequestMeta,
_member_to_alias: Option>,
expression_params: Option>>,
) -> Result {
+ let inputs = serde_json::json!({
+ "query": query,
+ "meta": meta,
+ });
Ok(SqlResponse {
sql: SqlQuery::new(
- format!("SELECT * FROM {}", serde_json::to_string(&query).unwrap()),
+ format!("SELECT * FROM {}", serde_json::to_string(&inputs).unwrap()),
expression_params.unwrap_or(Vec::new()),
),
})
@@ -710,16 +732,30 @@ impl TransportService for TestConnectionTransport {
_span_id: Option>,
query: TransportLoadRequestQuery,
sql_query: Option,
- _ctx: AuthContextRef,
- _meta_fields: LoadRequestMeta,
+ ctx: AuthContextRef,
+ meta: LoadRequestMeta,
) -> Result {
- if sql_query.is_some() {
- unimplemented!("load with sql_query");
+ {
+ let mut calls = self.load_calls.lock().await;
+ calls.push(TestTransportLoadCall {
+ query: query.clone(),
+ sql_query: sql_query.clone(),
+ ctx: ctx.clone(),
+ meta: meta.clone(),
+ });
+ }
+
+ if let Some(sql_query) = sql_query {
+ return Err(CubeError::internal(format!(
+ "Test transport does not support load with SQL query: {sql_query:?}"
+ )));
}
let mocks = self.load_mocks.lock().await;
let Some((_req, res)) = mocks.iter().find(|(req, _res)| req == &query) else {
- panic!("Unexpected query: {:?}", query);
+ return Err(CubeError::internal(format!(
+ "Unexpected query in test transport: {query:?}"
+ )));
};
Ok(res.clone())
}
@@ -860,6 +896,9 @@ impl TestContext {
.or(Some(config_limit));
self.transport.add_cube_load_mock(req, res).await
}
+ pub async fn load_calls(&self) -> Vec {
+ self.transport.load_calls().await
+ }
pub async fn convert_sql_to_cube_query(&self, query: &str) -> CompilationResult {
// TODO push to_string() deeper
@@ -915,14 +954,10 @@ impl TestContext {
}
}
-lazy_static! {
- pub static ref TEST_LOGGING_INITIALIZED: std::sync::RwLock =
- std::sync::RwLock::new(false);
-}
+static TEST_LOGGING_INITIALIZED: std::sync::Once = std::sync::Once::new();
pub fn init_testing_logger() {
- let mut initialized = TEST_LOGGING_INITIALIZED.write().unwrap();
- if !*initialized {
+ TEST_LOGGING_INITIALIZED.call_once(|| {
let log_level = log::Level::Trace;
let logger = simple_logger::SimpleLogger::new()
.with_level(log::Level::Error.to_level_filter())
@@ -933,8 +968,7 @@ pub fn init_testing_logger() {
log::set_boxed_logger(Box::new(logger)).unwrap();
log::set_max_level(log_level.to_level_filter());
- *initialized = true;
- }
+ });
}
pub async fn convert_select_to_query_plan_customized(
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__join_with_coercion.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__join_with_coercion.snap
new file mode 100644
index 0000000000000..2c7e4b8928da7
--- /dev/null
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__join_with_coercion.snap
@@ -0,0 +1,9 @@
+---
+source: cubesql/src/compile/test/test_df_execution.rs
+expression: "execute_query(r#\"\n WITH\n t1 AS (\n SELECT 1::int2 AS i1\n ),\n t2 AS (\n SELECT 1::int4 AS i2\n )\n SELECT\n *\n FROM\n t1 LEFT JOIN t2 ON (t1.i1 = t2.i2)\n \"#.to_string(),\nDatabaseProtocol::PostgreSQL,).await.unwrap()"
+---
++----+----+
+| i1 | i2 |
++----+----+
+| 1 | 1 |
++----+----+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__triple_join_with_coercion.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__triple_join_with_coercion.snap
new file mode 100644
index 0000000000000..607514df95b8c
--- /dev/null
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__triple_join_with_coercion.snap
@@ -0,0 +1,9 @@
+---
+source: cubesql/src/compile/test/test_df_execution.rs
+expression: "execute_query(r#\"\n WITH\n t1 AS (\n SELECT 1::int2 AS i1\n ),\n t2 AS (\n SELECT 1::int4 AS i2\n ),\n t3 AS (\n SELECT 1::int8 AS i3\n )\n SELECT\n *\n FROM\n t1\n LEFT JOIN t2 ON (t1.i1 = t2.i2)\n LEFT JOIN t3 ON (t3.i3 = t2.i2)\n \"#.to_string(),\nDatabaseProtocol::PostgreSQL,).await.unwrap()"
+---
++----+----+----+
+| i1 | i2 | i3 |
++----+----+----+
+| 1 | 1 | 1 |
++----+----+----+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_large_select_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_large_select_query.snap
index 6bf1754107e6b..4d8a3a0f57060 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_large_select_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_large_select_query.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"\n SELECT\n na.nspname as Schema,\n cl.relname as Table,\n att.attname AS Name,\n att.attnum as Position,\n CASE\n WHEN att.attnotnull = 'f' THEN 'true'\n ELSE 'false'\n END as Nullable,\n CASE\n WHEN\n ty.typname Like 'bit' OR\n ty.typname Like 'varbit' and\n att.atttypmod > 0\n THEN att.atttypmod\n WHEN ty.typname Like 'interval' THEN -1\n WHEN att.atttypmod > 0 THEN att.atttypmod - 4\n ELSE att.atttypmod\n END as Length,\n (information_schema._pg_numeric_precision(information_schema._pg_truetypid(att.*, ty.*), information_schema._pg_truetypmod(att.*, ty.*)))::information_schema.cardinal_number AS Precision,\n (information_schema._pg_numeric_scale(information_schema._pg_truetypid(att.*, ty.*), information_schema._pg_truetypmod(att.*, ty.*)))::information_schema.cardinal_number AS Scale,\n (information_schema._pg_datetime_precision(information_schema._pg_truetypid(att.*, ty.*), information_schema._pg_truetypmod(att.*, ty.*)))::information_schema.cardinal_number AS DatetimeLength,\n CASE\n WHEN att.attnotnull = 'f' THEN 'false'\n ELSE 'true'\n END as IsUnique,\n att.atthasdef as HasDefaultValue,\n att.attisdropped as IsDropped,\n att.attinhcount as ancestorCount,\n att.attndims as Dimension,\n CASE\n WHEN attndims > 0 THEN true\n ELSE false\n END AS isarray,\n CASE\n WHEN ty.typname = 'bpchar' THEN 'char'\n WHEN ty.typname = '_bpchar' THEN '_char'\n ELSE ty.typname\n END as TypeName,\n tn.nspname as TypeSchema,\n et.typname as elementaltypename,\n description as Comment,\n cs.relname AS sername,\n ns.nspname AS serschema,\n att.attidentity as IdentityMode,\n CAST(pg_get_expr(def.adbin, def.adrelid) AS varchar) as DefaultValue,\n (SELECT count(1) FROM pg_type t2 WHERE t2.typname=ty.typname) > 1 AS isdup\n FROM pg_attribute att\n JOIN pg_type ty ON ty.oid=atttypid\n JOIN pg_namespace tn ON tn.oid=ty.typnamespace\n JOIN pg_class cl ON\n cl.oid=attrelid AND\n (\n (cl.relkind = 'r') OR\n (cl.relkind = 's') OR\n (cl.relkind = 'v') OR\n (cl.relkind = 'm') OR\n (cl.relkind = 'f')\n )\n JOIN pg_namespace na ON na.oid=cl.relnamespace\n LEFT OUTER JOIN pg_type et ON et.oid=ty.typelem\n LEFT OUTER JOIN pg_attrdef def ON\n adrelid=attrelid AND\n adnum=attnum\n LEFT OUTER JOIN pg_description des ON\n des.objoid=attrelid AND\n des.objsubid=attnum\n LEFT OUTER JOIN (\n pg_depend\n JOIN pg_class cs ON\n objid=cs.oid AND\n cs.relkind='S' AND\n classid='pg_class'::regclass::oid\n ) ON\n refobjid=attrelid AND\n refobjsubid=attnum\n LEFT OUTER JOIN pg_namespace ns ON ns.oid=cs.relnamespace\n WHERE\n attnum > 0 AND\n attisdropped IS FALSE AND\n cl.relname like E'KibanaSampleDataEcommerce' AND\n na.nspname like E'public' AND\n att.attname like '%'\n ORDER BY attnum\n ;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+---------+---------+-----------+--------------+--------------+-------+
-| Schema | Table | Name | Position | Nullable | Length | Precision | Scale | DatetimeLength | IsUnique | HasDefaultValue | IsDropped | ancestorCount | Dimension | isarray | TypeName | TypeSchema | elementaltypename | Comment | sername | serschema | IdentityMode | DefaultValue | isdup |
-+--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+---------+---------+-----------+--------------+--------------+-------+
-| public | KibanaSampleDataEcommerce | count | 1 | false | -1 | 64 | 0 | NULL | true | false | false | 0 | 0 | false | int8 | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | maxPrice | 2 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | sumPrice | 3 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | minPrice | 4 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | avgPrice | 5 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | countDistinct | 6 | false | -1 | 64 | 0 | NULL | true | false | false | 0 | 0 | false | int8 | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | order_date | 7 | true | -1 | NULL | NULL | 6 | false | false | false | 0 | 0 | false | timestamp | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | last_mod | 8 | true | -1 | NULL | NULL | 6 | false | false | false | 0 | 0 | false | timestamp | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | customer_gender | 9 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | notes | 10 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | taxful_total_price | 11 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | has_subscription | 12 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | is_male | 13 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | is_female | 14 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | __user | 15 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | __cubeJoinField | 16 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-+--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+---------+---------+-----------+--------------+--------------+-------+
++--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+-----------------------------------------------+---------+-----------+--------------+--------------+-------+
+| Schema | Table | Name | Position | Nullable | Length | Precision | Scale | DatetimeLength | IsUnique | HasDefaultValue | IsDropped | ancestorCount | Dimension | isarray | TypeName | TypeSchema | elementaltypename | Comment | sername | serschema | IdentityMode | DefaultValue | isdup |
++--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+-----------------------------------------------+---------+-----------+--------------+--------------+-------+
+| public | KibanaSampleDataEcommerce | count | 1 | false | -1 | 64 | 0 | NULL | true | false | false | 0 | 0 | false | int8 | pg_catalog | NULL | Events count | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | maxPrice | 2 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | sumPrice | 3 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | minPrice | 4 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | avgPrice | 5 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | countDistinct | 6 | false | -1 | 64 | 0 | NULL | true | false | false | 0 | 0 | false | int8 | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | order_date | 7 | true | -1 | NULL | NULL | 6 | false | false | false | 0 | 0 | false | timestamp | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | last_mod | 8 | true | -1 | NULL | NULL | 6 | false | false | false | 0 | 0 | false | timestamp | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | customer_gender | 9 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | Customer gender | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | notes | 10 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | taxful_total_price | 11 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | has_subscription | 12 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | is_male | 13 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | bool | pg_catalog | NULL | Male users segment | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | is_female | 14 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | __user | 15 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | Virtual column for security context switching | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | __cubeJoinField | 16 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | Virtual column for joining cubes | NULL | NULL | | NULL | false |
++--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+-----------------------------------------------+---------+-----------+--------------+--------------+-------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_select_db_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_select_db_query.snap
index d54385dfdf071..6c3c50c674549 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_select_db_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_select_db_query.snap
@@ -2,12 +2,12 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"\n SELECT\n 'db' as Database,\n ns.nspname as Schema,\n relname as Name,\n CASE\n WHEN ns.nspname Like E'pg\\\\_catalog' then 'Catalog'\n WHEN ns.nspname Like E'information\\\\_schema' then 'Information'\n WHEN relkind = 'f' then 'Foreign'\n ELSE 'User'\n END as TableType,\n pg_get_userbyid(relowner) AS definer,\n rel.oid as Oid,\n relacl as ACL,\n true as HasOids,\n relhassubclass as HasSubtables,\n reltuples as RowNumber,\n description as Comment,\n relnatts as ColumnNumber,\n relhastriggers as TriggersNumber,\n conname as Constraint,\n conkey as ColumnConstrainsIndexes\n FROM pg_class rel\n INNER JOIN pg_namespace ns ON relnamespace = ns.oid\n LEFT OUTER JOIN pg_description des ON\n des.objoid = rel.oid AND\n des.objsubid = 0\n LEFT OUTER JOIN pg_constraint c ON\n c.conrelid = rel.oid AND\n c.contype = 'p'\n WHERE\n (\n (relkind = 'r') OR\n (relkind = 's') OR\n (relkind = 'f')\n ) AND\n NOT ns.nspname LIKE E'pg\\\\_temp\\\\_%%' AND\n NOT ns.nspname like E'pg\\\\_%' AND\n NOT ns.nspname like E'information\\\\_schema' AND\n ns.nspname::varchar like E'public' AND\n relname::varchar like '%' AND\n pg_get_userbyid(relowner)::varchar like '%'\n ORDER BY relname\n ;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+---------+--------------+----------------+------------+-------------------------+
-| Database | Schema | Name | TableType | definer | Oid | ACL | HasOids | HasSubtables | RowNumber | Comment | ColumnNumber | TriggersNumber | Constraint | ColumnConstrainsIndexes |
-+----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+---------+--------------+----------------+------------+-------------------------+
-| db | public | KibanaSampleDataEcommerce | User | ovr | 18000 | NULL | true | false | -1 | NULL | 16 | false | NULL | NULL |
-| db | public | Logs | User | ovr | 18019 | NULL | true | false | -1 | NULL | 7 | false | NULL | NULL |
-| db | public | MultiTypeCube | User | ovr | 18245 | NULL | true | false | -1 | NULL | 67 | false | NULL | NULL |
-| db | public | NumberCube | User | ovr | 18029 | NULL | true | false | -1 | NULL | 3 | false | NULL | NULL |
-| db | public | WideCube | User | ovr | 18035 | NULL | true | false | -1 | NULL | 207 | false | NULL | NULL |
-+----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+---------+--------------+----------------+------------+-------------------------+
++----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+-------------------------------------------------------+--------------+----------------+------------+-------------------------+
+| Database | Schema | Name | TableType | definer | Oid | ACL | HasOids | HasSubtables | RowNumber | Comment | ColumnNumber | TriggersNumber | Constraint | ColumnConstrainsIndexes |
++----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+-------------------------------------------------------+--------------+----------------+------------+-------------------------+
+| db | public | KibanaSampleDataEcommerce | User | ovr | 18000 | NULL | true | false | -1 | Sample data for tracking eCommerce orders from Kibana | 16 | false | NULL | NULL |
+| db | public | Logs | User | ovr | 18019 | NULL | true | false | -1 | NULL | 7 | false | NULL | NULL |
+| db | public | MultiTypeCube | User | ovr | 18245 | NULL | true | false | -1 | Test cube with a little bit of everything | 67 | false | NULL | NULL |
+| db | public | NumberCube | User | ovr | 18029 | NULL | true | false | -1 | NULL | 3 | false | NULL | NULL |
+| db | public | WideCube | User | ovr | 18035 | NULL | true | false | -1 | NULL | 207 | false | NULL | NULL |
++----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+-------------------------------------------------------+--------------+----------------+------------+-------------------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sigma_computing_with_subquery_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sigma_computing_with_subquery_query.snap
index dec355a0046fb..57c2822c9acdb 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sigma_computing_with_subquery_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sigma_computing_with_subquery_query.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"\n with\n nsp as (\n select oid\n from pg_catalog.pg_namespace\n where nspname = 'public'\n ),\n tbl as (\n select oid\n from pg_catalog.pg_class\n where\n relname = 'KibanaSampleDataEcommerce' and\n relnamespace = (select oid from nsp)\n )\n select\n attname,\n typname,\n description\n from pg_attribute a\n join pg_type on atttypid = pg_type.oid\n left join pg_description on\n attrelid = objoid and\n attnum = objsubid\n where\n attnum > 0 and\n attrelid = (select oid from tbl)\n order by attnum\n ;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------------------+-----------+-------------+
-| attname | typname | description |
-+--------------------+-----------+-------------+
-| count | int8 | NULL |
-| maxPrice | numeric | NULL |
-| sumPrice | numeric | NULL |
-| minPrice | numeric | NULL |
-| avgPrice | numeric | NULL |
-| countDistinct | int8 | NULL |
-| order_date | timestamp | NULL |
-| last_mod | timestamp | NULL |
-| customer_gender | text | NULL |
-| notes | text | NULL |
-| taxful_total_price | numeric | NULL |
-| has_subscription | bool | NULL |
-| is_male | bool | NULL |
-| is_female | bool | NULL |
-| __user | text | NULL |
-| __cubeJoinField | text | NULL |
-+--------------------+-----------+-------------+
++--------------------+-----------+-----------------------------------------------+
+| attname | typname | description |
++--------------------+-----------+-----------------------------------------------+
+| count | int8 | Events count |
+| maxPrice | numeric | NULL |
+| sumPrice | numeric | NULL |
+| minPrice | numeric | NULL |
+| avgPrice | numeric | NULL |
+| countDistinct | int8 | NULL |
+| order_date | timestamp | NULL |
+| last_mod | timestamp | NULL |
+| customer_gender | text | Customer gender |
+| notes | text | NULL |
+| taxful_total_price | numeric | NULL |
+| has_subscription | bool | NULL |
+| is_male | bool | Male users segment |
+| is_female | bool | NULL |
+| __user | text | Virtual column for security context switching |
+| __cubeJoinField | text | Virtual column for joining cubes |
++--------------------+-----------+-----------------------------------------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sqlalchemy_new_conname_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sqlalchemy_new_conname_query.snap
index 460faff0ba4a2..39066ea680e91 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sqlalchemy_new_conname_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sqlalchemy_new_conname_query.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(r#\"SELECT\n a.attname,\n pg_catalog.format_type(a.atttypid, a.atttypmod),\n (\n SELECT\n pg_catalog.pg_get_expr(d.adbin, d.adrelid)\n FROM\n pg_catalog.pg_attrdef AS d\n WHERE\n d.adrelid = a.attrelid\n AND d.adnum = a.attnum\n AND a.atthasdef\n ) AS DEFAULT,\n a.attnotnull,\n a.attrelid AS table_oid,\n pgd.description AS comment,\n a.attgenerated AS generated,\n (\n SELECT\n json_build_object(\n 'always',\n a.attidentity = 'a',\n 'start',\n s.seqstart,\n 'increment',\n s.seqincrement,\n 'minvalue',\n s.seqmin,\n 'maxvalue',\n s.seqmax,\n 'cache',\n s.seqcache,\n 'cycle',\n s.seqcycle\n )\n FROM\n pg_catalog.pg_sequence AS s\n JOIN pg_catalog.pg_class AS c ON s.seqrelid = c.\"oid\"\n WHERE\n c.relkind = 'S'\n AND a.attidentity <> ''\n AND s.seqrelid = CAST(\n pg_catalog.pg_get_serial_sequence(\n CAST(CAST(a.attrelid AS REGCLASS) AS TEXT),\n a.attname\n ) AS REGCLASS\n )\n ) AS identity_options\n FROM\n pg_catalog.pg_attribute AS a\n LEFT JOIN pg_catalog.pg_description AS pgd ON (\n pgd.objoid = a.attrelid\n AND pgd.objsubid = a.attnum\n )\n WHERE\n a.attrelid = 18000\n AND a.attnum > 0\n AND NOT a.attisdropped\n ORDER BY\n a.attnum\"#.to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------------------+-------------------------------------+---------+------------+-----------+---------+-----------+------------------+
-| attname | format_type(a.atttypid,a.atttypmod) | DEFAULT | attnotnull | table_oid | comment | generated | identity_options |
-+--------------------+-------------------------------------+---------+------------+-----------+---------+-----------+------------------+
-| count | bigint | NULL | true | 18000 | NULL | | NULL |
-| maxPrice | numeric | NULL | true | 18000 | NULL | | NULL |
-| sumPrice | numeric | NULL | true | 18000 | NULL | | NULL |
-| minPrice | numeric | NULL | true | 18000 | NULL | | NULL |
-| avgPrice | numeric | NULL | true | 18000 | NULL | | NULL |
-| countDistinct | bigint | NULL | true | 18000 | NULL | | NULL |
-| order_date | timestamp without time zone | NULL | false | 18000 | NULL | | NULL |
-| last_mod | timestamp without time zone | NULL | false | 18000 | NULL | | NULL |
-| customer_gender | text | NULL | false | 18000 | NULL | | NULL |
-| notes | text | NULL | false | 18000 | NULL | | NULL |
-| taxful_total_price | numeric | NULL | false | 18000 | NULL | | NULL |
-| has_subscription | boolean | NULL | false | 18000 | NULL | | NULL |
-| is_male | boolean | NULL | true | 18000 | NULL | | NULL |
-| is_female | boolean | NULL | true | 18000 | NULL | | NULL |
-| __user | text | NULL | false | 18000 | NULL | | NULL |
-| __cubeJoinField | text | NULL | false | 18000 | NULL | | NULL |
-+--------------------+-------------------------------------+---------+------------+-----------+---------+-----------+------------------+
++--------------------+-------------------------------------+---------+------------+-----------+-----------------------------------------------+-----------+------------------+
+| attname | format_type(a.atttypid,a.atttypmod) | DEFAULT | attnotnull | table_oid | comment | generated | identity_options |
++--------------------+-------------------------------------+---------+------------+-----------+-----------------------------------------------+-----------+------------------+
+| count | bigint | NULL | true | 18000 | Events count | | NULL |
+| maxPrice | numeric | NULL | true | 18000 | NULL | | NULL |
+| sumPrice | numeric | NULL | true | 18000 | NULL | | NULL |
+| minPrice | numeric | NULL | true | 18000 | NULL | | NULL |
+| avgPrice | numeric | NULL | true | 18000 | NULL | | NULL |
+| countDistinct | bigint | NULL | true | 18000 | NULL | | NULL |
+| order_date | timestamp without time zone | NULL | false | 18000 | NULL | | NULL |
+| last_mod | timestamp without time zone | NULL | false | 18000 | NULL | | NULL |
+| customer_gender | text | NULL | false | 18000 | Customer gender | | NULL |
+| notes | text | NULL | false | 18000 | NULL | | NULL |
+| taxful_total_price | numeric | NULL | false | 18000 | NULL | | NULL |
+| has_subscription | boolean | NULL | false | 18000 | NULL | | NULL |
+| is_male | boolean | NULL | true | 18000 | Male users segment | | NULL |
+| is_female | boolean | NULL | true | 18000 | NULL | | NULL |
+| __user | text | NULL | false | 18000 | Virtual column for security context switching | | NULL |
+| __cubeJoinField | text | NULL | false | 18000 | Virtual column for joining cubes | | NULL |
++--------------------+-------------------------------------+---------+------------+-----------+-----------------------------------------------+-----------+------------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__superset_subquery.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__superset_subquery.snap
index 5808a86aacb4e..948c96230dcc4 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__superset_subquery.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__superset_subquery.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"\n SELECT\n a.attname,\n pg_catalog.format_type(a.atttypid, a.atttypmod),\n (\n SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)\n FROM pg_catalog.pg_attrdef d\n WHERE\n d.adrelid = a.attrelid AND\n d.adnum = a.attnum AND\n a.atthasdef\n ) AS DEFAULT,\n a.attnotnull,\n a.attnum,\n a.attrelid as table_oid,\n pgd.description as comment,\n a.attgenerated as generated\n FROM pg_catalog.pg_attribute a\n LEFT JOIN pg_catalog.pg_description pgd ON (\n pgd.objoid = a.attrelid AND\n pgd.objsubid = a.attnum\n )\n WHERE\n a.attrelid = 18000\n AND a.attnum > 0\n AND NOT a.attisdropped\n ORDER BY a.attnum\n ;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------------------+-------------------------------------+---------+------------+--------+-----------+---------+-----------+
-| attname | format_type(a.atttypid,a.atttypmod) | DEFAULT | attnotnull | attnum | table_oid | comment | generated |
-+--------------------+-------------------------------------+---------+------------+--------+-----------+---------+-----------+
-| count | bigint | NULL | true | 1 | 18000 | NULL | |
-| maxPrice | numeric | NULL | true | 2 | 18000 | NULL | |
-| sumPrice | numeric | NULL | true | 3 | 18000 | NULL | |
-| minPrice | numeric | NULL | true | 4 | 18000 | NULL | |
-| avgPrice | numeric | NULL | true | 5 | 18000 | NULL | |
-| countDistinct | bigint | NULL | true | 6 | 18000 | NULL | |
-| order_date | timestamp without time zone | NULL | false | 7 | 18000 | NULL | |
-| last_mod | timestamp without time zone | NULL | false | 8 | 18000 | NULL | |
-| customer_gender | text | NULL | false | 9 | 18000 | NULL | |
-| notes | text | NULL | false | 10 | 18000 | NULL | |
-| taxful_total_price | numeric | NULL | false | 11 | 18000 | NULL | |
-| has_subscription | boolean | NULL | false | 12 | 18000 | NULL | |
-| is_male | boolean | NULL | true | 13 | 18000 | NULL | |
-| is_female | boolean | NULL | true | 14 | 18000 | NULL | |
-| __user | text | NULL | false | 15 | 18000 | NULL | |
-| __cubeJoinField | text | NULL | false | 16 | 18000 | NULL | |
-+--------------------+-------------------------------------+---------+------------+--------+-----------+---------+-----------+
++--------------------+-------------------------------------+---------+------------+--------+-----------+-----------------------------------------------+-----------+
+| attname | format_type(a.atttypid,a.atttypmod) | DEFAULT | attnotnull | attnum | table_oid | comment | generated |
++--------------------+-------------------------------------+---------+------------+--------+-----------+-----------------------------------------------+-----------+
+| count | bigint | NULL | true | 1 | 18000 | Events count | |
+| maxPrice | numeric | NULL | true | 2 | 18000 | NULL | |
+| sumPrice | numeric | NULL | true | 3 | 18000 | NULL | |
+| minPrice | numeric | NULL | true | 4 | 18000 | NULL | |
+| avgPrice | numeric | NULL | true | 5 | 18000 | NULL | |
+| countDistinct | bigint | NULL | true | 6 | 18000 | NULL | |
+| order_date | timestamp without time zone | NULL | false | 7 | 18000 | NULL | |
+| last_mod | timestamp without time zone | NULL | false | 8 | 18000 | NULL | |
+| customer_gender | text | NULL | false | 9 | 18000 | Customer gender | |
+| notes | text | NULL | false | 10 | 18000 | NULL | |
+| taxful_total_price | numeric | NULL | false | 11 | 18000 | NULL | |
+| has_subscription | boolean | NULL | false | 12 | 18000 | NULL | |
+| is_male | boolean | NULL | true | 13 | 18000 | Male users segment | |
+| is_female | boolean | NULL | true | 14 | 18000 | NULL | |
+| __user | text | NULL | false | 15 | 18000 | Virtual column for security context switching | |
+| __cubeJoinField | text | NULL | false | 16 | 18000 | Virtual column for joining cubes | |
++--------------------+-------------------------------------+---------+------------+--------+-----------+-----------------------------------------------+-----------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__tableau_regclass_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__tableau_regclass_query.snap
index af41cdf149bf3..fd6a8ec013151 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__tableau_regclass_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__tableau_regclass_query.snap
@@ -2,12 +2,12 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"SELECT NULL AS TABLE_CAT,\n n.nspname AS TABLE_SCHEM,\n c.relname AS TABLE_NAME,\n CASE n.nspname ~ '^pg_'\n OR n.nspname = 'information_schema'\n WHEN true THEN\n CASE\n WHEN n.nspname = 'pg_catalog'\n OR n.nspname = 'information_schema' THEN\n CASE c.relkind\n WHEN 'r' THEN 'SYSTEM TABLE'\n WHEN 'v' THEN 'SYSTEM VIEW'\n WHEN 'i' THEN 'SYSTEM INDEX'\n ELSE NULL\n end\n WHEN n.nspname = 'pg_toast' THEN\n CASE c.relkind\n WHEN 'r' THEN 'SYSTEM TOAST TABLE'\n WHEN 'i' THEN 'SYSTEM TOAST INDEX'\n ELSE NULL\n end\n ELSE\n CASE c.relkind\n WHEN 'r' THEN 'TEMPORARY TABLE'\n WHEN 'p' THEN 'TEMPORARY TABLE'\n WHEN 'i' THEN 'TEMPORARY INDEX'\n WHEN 'S' THEN 'TEMPORARY SEQUENCE'\n WHEN 'v' THEN 'TEMPORARY VIEW'\n ELSE NULL\n end\n end\n WHEN false THEN\n CASE c.relkind\n WHEN 'r' THEN 'TABLE'\n WHEN 'p' THEN 'PARTITIONED TABLE'\n WHEN 'i' THEN 'INDEX'\n WHEN 'P' THEN 'PARTITIONED INDEX'\n WHEN 'S' THEN 'SEQUENCE'\n WHEN 'v' THEN 'VIEW'\n WHEN 'c' THEN 'TYPE'\n WHEN 'f' THEN 'FOREIGN TABLE'\n WHEN 'm' THEN 'MATERIALIZED VIEW'\n ELSE NULL\n end\n ELSE NULL\n end AS TABLE_TYPE,\n d.description AS REMARKS,\n '' AS TYPE_CAT,\n '' AS TYPE_SCHEM,\n '' AS TYPE_NAME,\n '' AS SELF_REFERENCING_COL_NAME,\n '' AS REF_GENERATION\n FROM pg_catalog.pg_namespace n,\n pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_description d\n ON ( c.oid = d.objoid\n AND d.objsubid = 0\n AND d.classoid = 'pg_class' :: regclass )\n WHERE c.relnamespace = n.oid\n AND ( false\n OR ( c.relkind = 'f' )\n OR ( c.relkind = 'm' )\n OR ( c.relkind = 'p'\n AND n.nspname !~ '^pg_'\n AND n.nspname <> 'information_schema' )\n OR ( c.relkind = 'r'\n AND n.nspname !~ '^pg_'\n AND n.nspname <> 'information_schema' )\n OR ( c.relkind = 'v'\n AND n.nspname <> 'pg_catalog'\n AND n.nspname <> 'information_schema' ) )\n ORDER BY TABLE_SCHEM ASC, TABLE_NAME ASC\n ;\".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+-----------+-------------+---------------------------+------------+---------+----------+------------+-----------+---------------------------+----------------+
-| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | TABLE_TYPE | REMARKS | TYPE_CAT | TYPE_SCHEM | TYPE_NAME | SELF_REFERENCING_COL_NAME | REF_GENERATION |
-+-----------+-------------+---------------------------+------------+---------+----------+------------+-----------+---------------------------+----------------+
-| NULL | public | KibanaSampleDataEcommerce | TABLE | NULL | | | | | |
-| NULL | public | Logs | TABLE | NULL | | | | | |
-| NULL | public | MultiTypeCube | TABLE | NULL | | | | | |
-| NULL | public | NumberCube | TABLE | NULL | | | | | |
-| NULL | public | WideCube | TABLE | NULL | | | | | |
-+-----------+-------------+---------------------------+------------+---------+----------+------------+-----------+---------------------------+----------------+
++-----------+-------------+---------------------------+------------+-------------------------------------------------------+----------+------------+-----------+---------------------------+----------------+
+| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | TABLE_TYPE | REMARKS | TYPE_CAT | TYPE_SCHEM | TYPE_NAME | SELF_REFERENCING_COL_NAME | REF_GENERATION |
++-----------+-------------+---------------------------+------------+-------------------------------------------------------+----------+------------+-----------+---------------------------+----------------+
+| NULL | public | KibanaSampleDataEcommerce | TABLE | Sample data for tracking eCommerce orders from Kibana | | | | | |
+| NULL | public | Logs | TABLE | NULL | | | | | |
+| NULL | public | MultiTypeCube | TABLE | Test cube with a little bit of everything | | | | | |
+| NULL | public | NumberCube | TABLE | NULL | | | | | |
+| NULL | public | WideCube | TABLE | NULL | | | | | |
++-----------+-------------+---------------------------+------------+-------------------------------------------------------+----------+------------+-----------+---------------------------+----------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__thoughtspot_table_introspection.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__thoughtspot_table_introspection.snap
index 590bbb9bf4a54..631471621aeb1 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__thoughtspot_table_introspection.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__thoughtspot_table_introspection.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(r#\"\n SELECT *\n FROM (\n SELECT\n current_database() AS TABLE_CAT,\n n.nspname AS TABLE_SCHEM,\n c.relname AS TABLE_NAME,\n a.attname AS COLUMN_NAME,\n CAST(\n CASE typname\n WHEN 'text' THEN 12\n WHEN 'bit' THEN - 7\n WHEN 'bool' THEN - 7\n WHEN 'boolean' THEN - 7\n WHEN 'varchar' THEN 12\n WHEN 'character varying' THEN 12\n WHEN 'char' THEN 1\n WHEN '\"char\"' THEN 1\n WHEN 'character' THEN 1\n WHEN 'nchar' THEN 12\n WHEN 'bpchar' THEN 1\n WHEN 'nvarchar' THEN 12\n WHEN 'date' THEN 91\n WHEN 'time' THEN 92\n WHEN 'time without time zone' THEN 92\n WHEN 'timetz' THEN 2013\n WHEN 'time with time zone' THEN 2013\n WHEN 'timestamp' THEN 93\n WHEN 'timestamp without time zone' THEN 93\n WHEN 'timestamptz' THEN 2014\n WHEN 'timestamp with time zone' THEN 2014\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 4\n WHEN 'int' THEN 4\n WHEN 'int4' THEN 4\n WHEN 'bigint' THEN - 5\n WHEN 'int8' THEN - 5\n WHEN 'decimal' THEN 3\n WHEN 'real' THEN 7\n WHEN 'float4' THEN 7\n WHEN 'double precision' THEN 8\n WHEN 'float8' THEN 8\n WHEN 'float' THEN 6\n WHEN 'numeric' THEN 2\n WHEN '_float4' THEN 2003\n WHEN '_aclitem' THEN 2003\n WHEN '_text' THEN 2003\n WHEN 'bytea' THEN - 2\n WHEN 'oid' THEN - 5\n WHEN 'name' THEN 12\n WHEN '_int4' THEN 2003\n WHEN '_int2' THEN 2003\n WHEN 'ARRAY' THEN 2003\n WHEN 'geometry' THEN - 4\n WHEN 'super' THEN - 16\n WHEN 'varbyte' THEN - 4\n WHEN 'geography' THEN - 4\n ELSE 1111\n END\n AS SMALLINT\n ) AS DATA_TYPE,\n t.typname AS TYPE_NAME,\n CASE typname\n WHEN 'int4' THEN 10\n WHEN 'bit' THEN 1\n WHEN 'bool' THEN 1\n WHEN 'varchar' THEN atttypmod - 4\n WHEN 'character varying' THEN atttypmod - 4\n WHEN 'char' THEN atttypmod - 4\n WHEN 'character' THEN atttypmod - 4\n WHEN 'nchar' THEN atttypmod - 4\n WHEN 'bpchar' THEN atttypmod - 4\n WHEN 'nvarchar' THEN atttypmod - 4\n WHEN 'date' THEN 13\n WHEN 'time' THEN 15\n WHEN 'time without time zone' THEN 15\n WHEN 'timetz' THEN 21\n WHEN 'time with time zone' THEN 21\n WHEN 'timestamp' THEN 29\n WHEN 'timestamp without time zone' THEN 29\n WHEN 'timestamptz' THEN 35\n WHEN 'timestamp with time zone' THEN 35\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 10\n WHEN 'int' THEN 10\n WHEN 'int4' THEN 10\n WHEN 'bigint' THEN 19\n WHEN 'int8' THEN 19\n WHEN 'decimal' THEN (atttypmod - 4) >> 16\n WHEN 'real' THEN 8\n WHEN 'float4' THEN 8\n WHEN 'double precision' THEN 17\n WHEN 'float8' THEN 17\n WHEN 'float' THEN 17\n WHEN 'numeric' THEN (atttypmod - 4) >> 16\n WHEN '_float4' THEN 8\n WHEN 'oid' THEN 10\n WHEN '_int4' THEN 10\n WHEN '_int2' THEN 5\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 2147483647\n END AS COLUMN_SIZE,\n NULL AS BUFFER_LENGTH,\n CASE typname\n WHEN 'float4' THEN 8\n WHEN 'float8' THEN 17\n WHEN 'numeric' THEN (atttypmod - 4) & 65535\n WHEN 'time without time zone' THEN 6\n WHEN 'timetz' THEN 6\n WHEN 'time with time zone' THEN 6\n WHEN 'timestamp without time zone' THEN 6\n WHEN 'timestamp' THEN 6\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 0\n END AS DECIMAL_DIGITS,\n CASE typname\n WHEN 'varbyte' THEN 2\n WHEN 'geography' THEN 2\n ELSE 10\n END AS NUM_PREC_RADIX,\n CASE a.attnotnull OR (t.typtype = 'd' AND t.typnotnull)\n WHEN 'false' THEN 1\n WHEN NULL THEN 2\n ELSE 0\n END AS NULLABLE,\n dsc.description AS REMARKS,\n pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS COLUMN_DEF,\n CAST(\n CASE typname\n WHEN 'text' THEN 12\n WHEN 'bit' THEN - 7\n WHEN 'bool' THEN - 7\n WHEN 'boolean' THEN - 7\n WHEN 'varchar' THEN 12\n WHEN 'character varying' THEN 12\n WHEN '\"char\"' THEN 1\n WHEN 'char' THEN 1\n WHEN 'character' THEN 1\n WHEN 'nchar' THEN 1\n WHEN 'bpchar' THEN 1\n WHEN 'nvarchar' THEN 12\n WHEN 'date' THEN 91\n WHEN 'time' THEN 92\n WHEN 'time without time zone' THEN 92\n WHEN 'timetz' THEN 2013\n WHEN 'time with time zone' THEN 2013\n WHEN 'timestamp with time zone' THEN 2014\n WHEN 'timestamp' THEN 93\n WHEN 'timestamp without time zone' THEN 93\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 4\n WHEN 'int' THEN 4\n WHEN 'int4' THEN 4\n WHEN 'bigint' THEN - 5\n WHEN 'int8' THEN - 5\n WHEN 'decimal' THEN 3\n WHEN 'real' THEN 7\n WHEN 'float4' THEN 7\n WHEN 'double precision' THEN 8\n WHEN 'float8' THEN 8\n WHEN 'float' THEN 6\n WHEN 'numeric' THEN 2\n WHEN '_float4' THEN 2003\n WHEN 'timestamptz' THEN 2014\n WHEN 'timestamp with time zone' THEN 2014\n WHEN '_aclitem' THEN 2003\n WHEN '_text' THEN 2003\n WHEN 'bytea' THEN - 2\n WHEN 'oid' THEN - 5\n WHEN 'name' THEN 12\n WHEN '_int4' THEN 2003\n WHEN '_int2' THEN 2003\n WHEN 'ARRAY' THEN 2003\n WHEN 'geometry' THEN - 4\n WHEN 'super' THEN - 16\n WHEN 'varbyte' THEN - 4\n WHEN 'geography' THEN - 4 ELSE 1111\n END\n AS SMALLINT\n ) AS SQL_DATA_TYPE,\n CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB,\n CASE typname\n WHEN 'int4' THEN 10\n WHEN 'bit' THEN 1\n WHEN 'bool' THEN 1\n WHEN 'varchar' THEN atttypmod - 4\n WHEN 'character varying' THEN atttypmod - 4\n WHEN 'char' THEN atttypmod - 4\n WHEN 'character' THEN atttypmod - 4\n WHEN 'nchar' THEN atttypmod - 4\n WHEN 'bpchar' THEN atttypmod - 4\n WHEN 'nvarchar' THEN atttypmod - 4\n WHEN 'date' THEN 13\n WHEN 'time' THEN 15\n WHEN 'time without time zone' THEN 15\n WHEN 'timetz' THEN 21\n WHEN 'time with time zone' THEN 21\n WHEN 'timestamp' THEN 29\n WHEN 'timestamp without time zone' THEN 29\n WHEN 'timestamptz' THEN 35\n WHEN 'timestamp with time zone' THEN 35\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 10\n WHEN 'int' THEN 10\n WHEN 'int4' THEN 10\n WHEN 'bigint' THEN 19\n WHEN 'int8' THEN 19\n WHEN 'decimal' THEN ((atttypmod - 4) >> 16) & 65535\n WHEN 'real' THEN 8\n WHEN 'float4' THEN 8\n WHEN 'double precision' THEN 17\n WHEN 'float8' THEN 17\n WHEN 'float' THEN 17\n WHEN 'numeric' THEN ((atttypmod - 4) >> 16) & 65535\n WHEN '_float4' THEN 8\n WHEN 'oid' THEN 10\n WHEN '_int4' THEN 10\n WHEN '_int2' THEN 5\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 2147483647\n END AS CHAR_OCTET_LENGTH,\n a.attnum AS ORDINAL_POSITION,\n CASE a.attnotnull OR (t.typtype = 'd' AND t.typnotnull)\n WHEN 'false' THEN 'YES'\n WHEN NULL THEN ''\n ELSE 'NO'\n END AS IS_NULLABLE,\n NULL AS SCOPE_CATALOG,\n NULL AS SCOPE_SCHEMA,\n NULL AS SCOPE_TABLE,\n t.typbasetype AS SOURCE_DATA_TYPE,\n CASE\n WHEN left(pg_catalog.pg_get_expr(def.adbin, def.adrelid), 16) = 'default_identity' THEN 'YES'\n ELSE 'NO'\n END AS IS_AUTOINCREMENT,\n false AS IS_GENERATEDCOLUMN\n FROM pg_catalog.pg_namespace AS n\n JOIN pg_catalog.pg_class AS c ON (c.relnamespace = n.oid)\n JOIN pg_catalog.pg_attribute AS a ON (a.attrelid = c.oid)\n JOIN pg_catalog.pg_type AS t ON (a.atttypid = t.oid)\n LEFT JOIN pg_catalog.pg_attrdef AS def ON (a.attrelid = def.adrelid AND a.attnum = def.adnum)\n LEFT JOIN pg_catalog.pg_description AS dsc ON (c.oid = dsc.objoid AND a.attnum = dsc.objsubid)\n LEFT JOIN pg_catalog.pg_class AS dc ON (dc.oid = dsc.classoid AND dc.relname = 'pg_class')\n LEFT JOIN pg_catalog.pg_namespace AS dn ON (dc.relnamespace = dn.oid AND dn.nspname = 'pg_catalog')\n WHERE\n a.attnum > 0 AND\n NOT a.attisdropped AND\n current_database() = 'cubedb' AND\n n.nspname LIKE 'public' AND\n c.relname LIKE 'KibanaSampleDataEcommerce'\n ORDER BY\n TABLE_SCHEM,\n c.relname,\n attnum\n ) AS t\n UNION ALL\n SELECT\n CAST(current_database() AS CHARACTER VARYING(128)) AS TABLE_CAT,\n CAST(schemaname AS CHARACTER VARYING(128)) AS table_schem,\n CAST(tablename AS CHARACTER VARYING(128)) AS table_name,\n CAST(columnname AS CHARACTER VARYING(128)) AS column_name,\n CAST(\n CASE columntype_rep\n WHEN 'text' THEN 12\n WHEN 'bit' THEN - 7\n WHEN 'bool' THEN - 7\n WHEN 'boolean' THEN - 7\n WHEN 'varchar' THEN 12\n WHEN 'character varying' THEN 12\n WHEN 'char' THEN 1\n WHEN 'character' THEN 1\n WHEN 'nchar' THEN 1\n WHEN 'bpchar' THEN 1\n WHEN 'nvarchar' THEN 12\n WHEN '\"char\"' THEN 1\n WHEN 'date' THEN 91\n WHEN 'time' THEN 92\n WHEN 'time without time zone' THEN 92\n WHEN 'timetz' THEN 2013\n WHEN 'time with time zone' THEN 2013\n WHEN 'timestamp' THEN 93\n WHEN 'timestamp without time zone' THEN 93\n WHEN 'timestamptz' THEN 2014\n WHEN 'timestamp with time zone' THEN 2014\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 4\n WHEN 'int' THEN 4\n WHEN 'int4' THEN 4\n WHEN 'bigint' THEN - 5\n WHEN 'int8' THEN - 5\n WHEN 'decimal' THEN 3\n WHEN 'real' THEN 7\n WHEN 'float4' THEN 7\n WHEN 'double precision' THEN 8\n WHEN 'float8' THEN 8\n WHEN 'float' THEN 6\n WHEN 'numeric' THEN 2\n WHEN 'timestamptz' THEN 2014\n WHEN 'bytea' THEN - 2\n WHEN 'oid' THEN - 5\n WHEN 'name' THEN 12\n WHEN 'ARRAY' THEN 2003\n WHEN 'geometry' THEN - 4\n WHEN 'super' THEN - 16\n WHEN 'varbyte' THEN - 4\n WHEN 'geography' THEN - 4\n ELSE 1111\n END\n AS SMALLINT\n ) AS DATA_TYPE,\n COALESCE(\n NULL,\n CASE columntype\n WHEN 'boolean' THEN 'bool'\n WHEN 'character varying' THEN 'varchar'\n WHEN '\"char\"' THEN 'char'\n WHEN 'smallint' THEN 'int2'\n WHEN 'integer' THEN 'int4'\n WHEN 'bigint' THEN 'int8'\n WHEN 'real' THEN 'float4'\n WHEN 'double precision' THEN 'float8'\n WHEN 'time without time zone' THEN 'time'\n WHEN 'time with time zone' THEN 'timetz'\n WHEN 'timestamp without time zone' THEN 'timestamp'\n WHEN 'timestamp with time zone' THEN 'timestamptz'\n ELSE columntype\n END\n ) AS TYPE_NAME,\n CASE columntype_rep\n WHEN 'int4' THEN 10\n WHEN 'bit' THEN 1\n WHEN 'bool' THEN 1\n WHEN 'boolean' THEN 1\n WHEN 'varchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'character varying' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'char' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 4), ''), '0') AS INT)\n WHEN 'character' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 4), ''), '0') AS INT)\n WHEN 'nchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'bpchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'nvarchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'date' THEN 13\n WHEN 'time' THEN 15\n WHEN 'time without time zone' THEN 15\n WHEN 'timetz' THEN 21\n WHEN 'timestamp' THEN 29\n WHEN 'timestamp without time zone' THEN 29\n WHEN 'time with time zone' THEN 21\n WHEN 'timestamptz' THEN 35\n WHEN 'timestamp with time zone' THEN 35\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 10\n WHEN 'int' THEN 10\n WHEN 'int4' THEN 10\n WHEN 'bigint' THEN 19\n WHEN 'int8' THEN 19\n WHEN 'decimal' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'real' THEN 8\n WHEN 'float4' THEN 8\n WHEN 'double precision' THEN 17\n WHEN 'float8' THEN 17\n WHEN 'float' THEN 17\n WHEN 'numeric' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN '_float4' THEN 8\n WHEN 'oid' THEN 10\n WHEN '_int4' THEN 10\n WHEN '_int2' THEN 5\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 2147483647\n END AS COLUMN_SIZE,\n NULL AS BUFFER_LENGTH,\n CASE REGEXP_REPLACE(columntype, '[()0-9,]')\n WHEN 'real' THEN 8\n WHEN 'float4' THEN 8\n WHEN 'double precision' THEN 17\n WHEN 'float8' THEN 17\n WHEN 'timestamp' THEN 6\n WHEN 'timestamp without time zone' THEN 6\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'numeric' THEN CAST(regexp_substr(columntype, '[0-9]+', charindex(',', columntype)) AS INT)\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 0\n END AS DECIMAL_DIGITS,\n CASE columntype\n WHEN 'varbyte' THEN 2\n WHEN 'geography' THEN 2\n ELSE 10\n END AS NUM_PREC_RADIX,\n NULL AS NULLABLE,\n NULL AS REMARKS,\n NULL AS COLUMN_DEF,\n CAST(\n CASE columntype_rep\n WHEN 'text' THEN 12\n WHEN 'bit' THEN - 7\n WHEN 'bool' THEN - 7\n WHEN 'boolean' THEN - 7\n WHEN 'varchar' THEN 12\n WHEN 'character varying' THEN 12\n WHEN 'char' THEN 1\n WHEN 'character' THEN 1\n WHEN 'nchar' THEN 12\n WHEN 'bpchar' THEN 1\n WHEN 'nvarchar' THEN 12\n WHEN '\"char\"' THEN 1\n WHEN 'date' THEN 91\n WHEN 'time' THEN 92\n WHEN 'time without time zone' THEN 92\n WHEN 'timetz' THEN 2013\n WHEN 'time with time zone' THEN 2013\n WHEN 'timestamp' THEN 93\n WHEN 'timestamp without time zone' THEN 93\n WHEN 'timestamptz' THEN 2014\n WHEN 'timestamp with time zone' THEN 2014\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 4\n WHEN 'int' THEN 4\n WHEN 'int4' THEN 4\n WHEN 'bigint' THEN - 5\n WHEN 'int8' THEN - 5\n WHEN 'decimal' THEN 3\n WHEN 'real' THEN 7\n WHEN 'float4' THEN 7\n WHEN 'double precision' THEN 8\n WHEN 'float8' THEN 8\n WHEN 'float' THEN 6\n WHEN 'numeric' THEN 2\n WHEN 'bytea' THEN - 2\n WHEN 'oid' THEN - 5\n WHEN 'name' THEN 12\n WHEN 'ARRAY' THEN 2003\n WHEN 'geometry' THEN - 4\n WHEN 'super' THEN - 16\n WHEN 'varbyte' THEN - 4\n WHEN 'geography' THEN - 4\n ELSE 1111\n END\n AS SMALLINT\n ) AS SQL_DATA_TYPE,\n CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB,\n CASE\n WHEN LEFT(columntype, 7) = 'varchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN LEFT(columntype, 4) = 'char' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 4), ''), '0') AS INT)\n WHEN columntype = 'string' THEN 16383\n ELSE NULL\n END AS CHAR_OCTET_LENGTH,\n columnnum AS ORDINAL_POSITION,\n NULL AS IS_NULLABLE,\n NULL AS SCOPE_CATALOG,\n NULL AS SCOPE_SCHEMA,\n NULL AS SCOPE_TABLE,\n NULL AS SOURCE_DATA_TYPE,\n 'NO' AS IS_AUTOINCREMENT,\n 'NO' AS IS_GENERATEDCOLUMN\n FROM (\n SELECT\n schemaname,\n tablename,\n columnname,\n columntype AS columntype_rep,\n columntype,\n columnnum\n FROM get_late_binding_view_cols_unpacked\n ) AS lbv_columns\n WHERE\n true AND\n current_database() = 'cubedb' AND\n schemaname LIKE 'public' AND\n tablename LIKE 'KibanaSampleDataEcommerce'\n ;\"#.to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
-| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | COLUMN_NAME | DATA_TYPE | TYPE_NAME | COLUMN_SIZE | BUFFER_LENGTH | DECIMAL_DIGITS | NUM_PREC_RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTET_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SCOPE_CATALOG | SCOPE_SCHEMA | SCOPE_TABLE | SOURCE_DATA_TYPE | IS_AUTOINCREMENT | IS_GENERATEDCOLUMN |
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
-| cubedb | public | KibanaSampleDataEcommerce | count | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 1 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | maxPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 2 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | sumPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 3 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | minPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 4 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | avgPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 5 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | countDistinct | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 6 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | order_date | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 7 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | last_mod | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 8 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | customer_gender | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 9 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | notes | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 10 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | taxful_total_price | 2 | numeric | -1 | NULL | 65531 | 10 | 1 | NULL | NULL | 2 | NULL | 65535 | 11 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | has_subscription | -7 | bool | 1 | NULL | 0 | 10 | 1 | NULL | NULL | -7 | NULL | 1 | 12 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | is_male | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 13 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | is_female | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 14 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | __user | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 15 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | __cubeJoinField | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 16 | YES | NULL | NULL | NULL | 0 | NO | false |
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
+| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | COLUMN_NAME | DATA_TYPE | TYPE_NAME | COLUMN_SIZE | BUFFER_LENGTH | DECIMAL_DIGITS | NUM_PREC_RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTET_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SCOPE_CATALOG | SCOPE_SCHEMA | SCOPE_TABLE | SOURCE_DATA_TYPE | IS_AUTOINCREMENT | IS_GENERATEDCOLUMN |
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
+| cubedb | public | KibanaSampleDataEcommerce | count | -5 | int8 | 19 | NULL | 0 | 10 | NULL | Events count | NULL | -5 | NULL | 19 | 1 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | maxPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 2 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | sumPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 3 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | minPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 4 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | avgPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 5 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | countDistinct | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 6 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | order_date | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 7 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | last_mod | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 8 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | customer_gender | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Customer gender | NULL | 12 | NULL | 2147483647 | 9 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | notes | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 10 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | taxful_total_price | 2 | numeric | -1 | NULL | 65531 | 10 | 1 | NULL | NULL | 2 | NULL | 65535 | 11 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | has_subscription | -7 | bool | 1 | NULL | 0 | 10 | 1 | NULL | NULL | -7 | NULL | 1 | 12 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | is_male | -7 | bool | 1 | NULL | 0 | 10 | NULL | Male users segment | NULL | -7 | NULL | 1 | 13 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | is_female | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 14 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | __user | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Virtual column for security context switching | NULL | 12 | NULL | 2147483647 | 15 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | __cubeJoinField | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Virtual column for joining cubes | NULL | 12 | NULL | 2147483647 | 16 | YES | NULL | NULL | NULL | 0 | NO | false |
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/test_df_execution.rs b/rust/cubesql/cubesql/src/compile/test/test_df_execution.rs
new file mode 100644
index 0000000000000..2558517b6efe6
--- /dev/null
+++ b/rust/cubesql/cubesql/src/compile/test/test_df_execution.rs
@@ -0,0 +1,63 @@
+//! Tests that validate that complex but self-contained queries can be executed correctly by DF
+
+use crate::compile::{
+ test::{execute_query, init_testing_logger},
+ DatabaseProtocol,
+};
+
+#[tokio::test]
+async fn test_join_with_coercion() {
+ init_testing_logger();
+
+ insta::assert_snapshot!(execute_query(
+ // language=PostgreSQL
+ r#"
+ WITH
+ t1 AS (
+ SELECT 1::int2 AS i1
+ ),
+ t2 AS (
+ SELECT 1::int4 AS i2
+ )
+ SELECT
+ *
+ FROM
+ t1 LEFT JOIN t2 ON (t1.i1 = t2.i2)
+ "#
+ .to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await
+ .unwrap());
+}
+
+#[tokio::test]
+async fn test_triple_join_with_coercion() {
+ init_testing_logger();
+
+ insta::assert_snapshot!(execute_query(
+ // language=PostgreSQL
+ r#"
+ WITH
+ t1 AS (
+ SELECT 1::int2 AS i1
+ ),
+ t2 AS (
+ SELECT 1::int4 AS i2
+ ),
+ t3 AS (
+ SELECT 1::int8 AS i3
+ )
+ SELECT
+ *
+ FROM
+ t1
+ LEFT JOIN t2 ON (t1.i1 = t2.i2)
+ LEFT JOIN t3 ON (t3.i3 = t2.i2)
+ "#
+ .to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await
+ .unwrap());
+}
diff --git a/rust/cubesql/cubesql/src/compile/test/test_user_change.rs b/rust/cubesql/cubesql/src/compile/test/test_user_change.rs
new file mode 100644
index 0000000000000..87585ad463298
--- /dev/null
+++ b/rust/cubesql/cubesql/src/compile/test/test_user_change.rs
@@ -0,0 +1,231 @@
+//! Tests that check user change via __user virtual column
+
+use cubeclient::models::{V1LoadRequestQuery, V1LoadRequestQueryFilterItem};
+use pretty_assertions::assert_eq;
+
+use crate::compile::{
+ convert_sql_to_cube_query,
+ test::{
+ convert_select_to_query_plan, get_test_session, get_test_tenant_ctx, init_testing_logger,
+ utils::LogicalPlanTestUtils, TestContext,
+ },
+ DatabaseProtocol, Rewriter,
+};
+
+#[tokio::test]
+async fn test_change_user_via_filter() {
+ init_testing_logger();
+
+ let query_plan = convert_select_to_query_plan(
+ "SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce WHERE __user = 'gopher'".to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await;
+
+ let cube_scan = query_plan.as_logical_plan().find_cube_scan();
+
+ assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
+
+ assert_eq!(
+ cube_scan.request,
+ V1LoadRequestQuery {
+ measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string(),]),
+ segments: Some(vec![]),
+ dimensions: Some(vec![]),
+ time_dimensions: None,
+ order: None,
+ limit: None,
+ offset: None,
+ filters: None,
+ ungrouped: None,
+ }
+ )
+}
+
+#[tokio::test]
+async fn test_change_user_via_in_filter() {
+ init_testing_logger();
+
+ let query_plan = convert_select_to_query_plan(
+ "SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce WHERE __user IN ('gopher')"
+ .to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await;
+
+ let cube_scan = query_plan.as_logical_plan().find_cube_scan();
+
+ assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
+
+ assert_eq!(
+ cube_scan.request,
+ V1LoadRequestQuery {
+ measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string(),]),
+ segments: Some(vec![]),
+ dimensions: Some(vec![]),
+ time_dimensions: None,
+ order: None,
+ limit: None,
+ offset: None,
+ filters: None,
+ ungrouped: None,
+ }
+ )
+}
+
+#[tokio::test]
+async fn test_change_user_via_in_filter_thoughtspot() {
+ init_testing_logger();
+
+ let query_plan = convert_select_to_query_plan(
+ r#"SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce "ta_1" WHERE (LOWER("ta_1"."__user") IN ('gopher')) = TRUE"#.to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await;
+
+ let expected_request = V1LoadRequestQuery {
+ measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string()]),
+ segments: Some(vec![]),
+ dimensions: Some(vec![]),
+ time_dimensions: None,
+ order: None,
+ limit: None,
+ offset: None,
+ filters: None,
+ ungrouped: None,
+ };
+
+ let cube_scan = query_plan.as_logical_plan().find_cube_scan();
+ assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
+ assert_eq!(cube_scan.request, expected_request);
+
+ let query_plan = convert_select_to_query_plan(
+ r#"SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce "ta_1" WHERE ((LOWER("ta_1"."__user") IN ('gopher') = TRUE) = TRUE)"#.to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await;
+
+ let cube_scan = query_plan.as_logical_plan().find_cube_scan();
+ assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
+ assert_eq!(cube_scan.request, expected_request);
+}
+
+#[tokio::test]
+async fn test_change_user_via_filter_and() {
+ let query_plan = convert_select_to_query_plan(
+ "SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce WHERE __user = 'gopher' AND customer_gender = 'male'".to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await;
+
+ let cube_scan = query_plan.as_logical_plan().find_cube_scan();
+
+ assert_eq!(cube_scan.options.change_user, Some("gopher".to_string()));
+
+ assert_eq!(
+ cube_scan.request,
+ V1LoadRequestQuery {
+ measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string(),]),
+ segments: Some(vec![]),
+ dimensions: Some(vec![]),
+ time_dimensions: None,
+ order: None,
+ limit: None,
+ offset: None,
+ filters: Some(vec![V1LoadRequestQueryFilterItem {
+ member: Some("KibanaSampleDataEcommerce.customer_gender".to_string()),
+ operator: Some("equals".to_string()),
+ values: Some(vec!["male".to_string()]),
+ or: None,
+ and: None,
+ }]),
+ ungrouped: None,
+ }
+ )
+}
+
+#[tokio::test]
+async fn test_change_user_via_filter_or() {
+ // OR is not allowed for __user
+ let meta = get_test_tenant_ctx();
+ let query =
+ convert_sql_to_cube_query(
+ &"SELECT COUNT(*) as cnt FROM KibanaSampleDataEcommerce WHERE __user = 'gopher' OR customer_gender = 'male'".to_string(),
+ meta.clone(),
+ get_test_session(DatabaseProtocol::PostgreSQL, meta).await
+ ).await;
+
+ // TODO: We need to propagate error to result, to assert message
+ query.unwrap_err();
+}
+
+#[tokio::test]
+async fn test_user_with_join() {
+ if !Rewriter::sql_push_down_enabled() {
+ return;
+ }
+ init_testing_logger();
+
+ let logical_plan = convert_select_to_query_plan(
+ "SELECT aliased.count as c, aliased.user_1 as u1, aliased.user_2 as u2 FROM (SELECT \"KibanaSampleDataEcommerce\".count as count, \"KibanaSampleDataEcommerce\".__user as user_1, Logs.__user as user_2 FROM \"KibanaSampleDataEcommerce\" CROSS JOIN Logs WHERE __user = 'foo') aliased".to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await
+ .as_logical_plan();
+
+ let cube_scan = logical_plan.find_cube_scan();
+ assert_eq!(
+ cube_scan.request,
+ V1LoadRequestQuery {
+ measures: Some(vec!["KibanaSampleDataEcommerce.count".to_string()]),
+ dimensions: Some(vec![]),
+ segments: Some(vec![]),
+ time_dimensions: None,
+ order: Some(vec![]),
+ limit: None,
+ offset: None,
+ filters: None,
+ ungrouped: Some(true),
+ }
+ );
+
+ assert_eq!(cube_scan.options.change_user, Some("foo".to_string()))
+}
+
+/// This should test that query with CubeScanWrapper uses proper change_user for both SQL generation and execution calls
+#[tokio::test]
+async fn test_user_change_sql_generation() {
+ if !Rewriter::sql_push_down_enabled() {
+ return;
+ }
+ init_testing_logger();
+
+ let context = TestContext::new(DatabaseProtocol::PostgreSQL).await;
+
+ context
+ .execute_query(
+ // language=PostgreSQL
+ r#"
+SELECT
+ COALESCE(customer_gender, 'N/A'),
+ AVG(avgPrice)
+FROM
+ KibanaSampleDataEcommerce
+WHERE
+ __user = 'gopher'
+GROUP BY 1
+;
+ "#
+ .to_string(),
+ )
+ .await
+ .expect_err("Test transport does not support load with SQL");
+
+ let load_calls = context.load_calls().await;
+ assert_eq!(load_calls.len(), 1);
+ let sql_query = load_calls[0].sql_query.as_ref().unwrap();
+ // This should be placed from load meta to query by TestConnectionTransport::sql
+ // It would mean that SQL generation used changed user
+ assert!(sql_query.sql.contains(r#""changeUser":"gopher""#));
+ assert_eq!(load_calls[0].meta.change_user(), Some("gopher".to_string()));
+}
diff --git a/rust/cubesql/cubesql/src/config/mod.rs b/rust/cubesql/cubesql/src/config/mod.rs
index a314c5841c7f4..1f73528e00df1 100644
--- a/rust/cubesql/cubesql/src/config/mod.rs
+++ b/rust/cubesql/cubesql/src/config/mod.rs
@@ -237,11 +237,6 @@ impl ConfigObj for ConfigObjImpl {
}
}
-lazy_static! {
- pub static ref TEST_LOGGING_INITIALIZED: tokio::sync::RwLock =
- tokio::sync::RwLock::new(false);
-}
-
impl Config {
pub fn default() -> Config {
Config {
diff --git a/rust/cubesql/cubesql/src/lib.rs b/rust/cubesql/cubesql/src/lib.rs
index 1252d2bc77e63..ae7f986c9256c 100644
--- a/rust/cubesql/cubesql/src/lib.rs
+++ b/rust/cubesql/cubesql/src/lib.rs
@@ -15,8 +15,6 @@
// trace_macros!(false);
-#[macro_use]
-extern crate lazy_static;
extern crate core;
pub mod compile;
diff --git a/rust/cubesql/cubesql/src/sql/session.rs b/rust/cubesql/cubesql/src/sql/session.rs
index a265722e2956f..75a4541ffce85 100644
--- a/rust/cubesql/cubesql/src/sql/session.rs
+++ b/rust/cubesql/cubesql/src/sql/session.rs
@@ -3,7 +3,7 @@ use log::trace;
use rand::Rng;
use std::{
collections::HashMap,
- sync::{Arc, RwLock as RwLockSync, Weak},
+ sync::{Arc, LazyLock, RwLock as RwLockSync, Weak},
time::{Duration, SystemTime},
};
use tokio_util::sync::CancellationToken;
@@ -23,8 +23,6 @@ use crate::{
RWLockAsync,
};
-extern crate lazy_static;
-
#[derive(Debug, Clone)]
pub struct SessionProperties {
user: Option,
@@ -37,10 +35,10 @@ impl SessionProperties {
}
}
-lazy_static! {
- static ref POSTGRES_DEFAULT_VARIABLES: DatabaseVariables = postgres_default_session_variables();
- static ref MYSQL_DEFAULT_VARIABLES: DatabaseVariables = mysql_default_session_variables();
-}
+static POSTGRES_DEFAULT_VARIABLES: LazyLock =
+ LazyLock::new(postgres_default_session_variables);
+static MYSQL_DEFAULT_VARIABLES: LazyLock =
+ LazyLock::new(mysql_default_session_variables);
#[derive(Debug)]
pub enum TransactionState {
diff --git a/rust/cubesql/cubesql/src/telemetry/mod.rs b/rust/cubesql/cubesql/src/telemetry/mod.rs
index 76eaa795e8c8b..5fc813889dc39 100644
--- a/rust/cubesql/cubesql/src/telemetry/mod.rs
+++ b/rust/cubesql/cubesql/src/telemetry/mod.rs
@@ -1,12 +1,14 @@
use crate::{compile::DatabaseProtocolDetails, sql::SessionState, CubeError};
use arc_swap::ArcSwap;
use log::{Level, LevelFilter};
-use std::{collections::HashMap, fmt::Debug, sync::Arc};
-
-lazy_static! {
- static ref REPORTER: ArcSwap> =
- ArcSwap::from_pointee(Box::new(LocalReporter::new()));
-}
+use std::{
+ collections::HashMap,
+ fmt::Debug,
+ sync::{Arc, LazyLock},
+};
+
+static REPORTER: LazyLock>> =
+ LazyLock::new(|| ArcSwap::from_pointee(Box::new(LocalReporter::new())));
pub trait LogReporter: Send + Sync + Debug {
fn log(&self, event: String, properties: HashMap, level: Level);
diff --git a/rust/cubesql/cubesql/src/transport/ctx.rs b/rust/cubesql/cubesql/src/transport/ctx.rs
index ae8141c571fc5..a3ceee2a4c7f4 100644
--- a/rust/cubesql/cubesql/src/transport/ctx.rs
+++ b/rust/cubesql/cubesql/src/transport/ctx.rs
@@ -22,6 +22,7 @@ pub struct CubeMetaTable {
pub record_oid: u32,
pub array_handler_oid: u32,
pub name: String,
+ pub description: Option,
pub columns: Vec,
}
@@ -29,6 +30,7 @@ pub struct CubeMetaTable {
pub struct CubeMetaColumn {
pub oid: u32,
pub name: String,
+ pub description: Option,
pub column_type: ColumnType,
pub can_be_null: bool,
}
@@ -49,12 +51,14 @@ impl MetaContext {
record_oid: oid_iter.next().unwrap_or(0),
array_handler_oid: oid_iter.next().unwrap_or(0),
name: cube.name.clone(),
+ description: cube.description.clone(),
columns: cube
.get_columns()
.iter()
.map(|column| CubeMetaColumn {
oid: oid_iter.next().unwrap_or(0),
name: column.get_name().clone(),
+ description: column.get_description().clone(),
column_type: column.get_column_type().clone(),
can_be_null: column.sql_can_be_null(),
})
diff --git a/rust/cubesql/cubesql/src/transport/ext.rs b/rust/cubesql/cubesql/src/transport/ext.rs
index 654069f8f6092..98e2d823f64d4 100644
--- a/rust/cubesql/cubesql/src/transport/ext.rs
+++ b/rust/cubesql/cubesql/src/transport/ext.rs
@@ -199,7 +199,7 @@ impl V1CubeMetaExt for CubeMeta {
columns.push(CubeColumn {
member_name: measure.name.clone(),
name: measure.get_real_name(),
- description: None,
+ description: measure.description.clone(),
column_type: measure.get_sql_type(),
can_be_null: false,
});
@@ -209,7 +209,7 @@ impl V1CubeMetaExt for CubeMeta {
columns.push(CubeColumn {
member_name: dimension.name.clone(),
name: dimension.get_real_name(),
- description: None,
+ description: dimension.description.clone(),
column_type: dimension.get_sql_type(),
can_be_null: dimension.sql_can_be_null(),
});
@@ -219,7 +219,7 @@ impl V1CubeMetaExt for CubeMeta {
columns.push(CubeColumn {
member_name: segment.name.clone(),
name: segment.get_real_name(),
- description: None,
+ description: segment.description.clone(),
column_type: ColumnType::Boolean,
can_be_null: false,
});
diff --git a/rust/cubesql/package.json b/rust/cubesql/package.json
index b0adb96c4ead5..9ca698045b1b7 100644
--- a/rust/cubesql/package.json
+++ b/rust/cubesql/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/cubesql",
- "version": "0.35.79",
+ "version": "0.35.81",
"description": "SQL API for Cube as proxy over MySQL protocol.",
"engines": {
"node": "^12.0.0 || ^14.0.0 || >=16.0.0"
diff --git a/rust/cubesqlplanner/Cargo.lock b/rust/cubesqlplanner/Cargo.lock
index c5e3de6e44247..58acf427b2091 100644
--- a/rust/cubesqlplanner/Cargo.lock
+++ b/rust/cubesqlplanner/Cargo.lock
@@ -639,7 +639,7 @@ dependencies = [
[[package]]
name = "cube-ext"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"chrono",
@@ -714,7 +714,6 @@ dependencies = [
"futures-util",
"hashbrown 0.14.5",
"itertools",
- "lazy_static",
"log",
"lru",
"minijinja",
@@ -726,7 +725,6 @@ dependencies = [
"regex",
"rust_decimal",
"serde",
- "serde_derive",
"serde_json",
"sha1_smol",
"sha2",
@@ -760,7 +758,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -793,7 +791,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"ordered-float 2.10.1",
@@ -804,7 +802,7 @@ dependencies = [
[[package]]
name = "datafusion-data-access"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"async-trait",
"chrono",
@@ -817,7 +815,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -828,7 +826,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
diff --git a/rust/cubesqlplanner/cubesqlplanner/Cargo.toml b/rust/cubesqlplanner/cubesqlplanner/Cargo.toml
index 5d4218eb4f086..309e341b5f4fd 100644
--- a/rust/cubesqlplanner/cubesqlplanner/Cargo.toml
+++ b/rust/cubesqlplanner/cubesqlplanner/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
-datafusion = { git = 'https://github.com/cube-js/arrow-datafusion.git', rev = "400fa0d889a8a38ca69f36d5750dfb572fc6018e", default-features = false, features = ["regex_expressions", "unicode_expressions"] }
+datafusion = { git = 'https://github.com/cube-js/arrow-datafusion.git', rev = "dcf3e4aa26fd112043ef26fa4a78db5dbd443c86", default-features = false, features = ["regex_expressions", "unicode_expressions"] }
tokio = { version = "^1.35", features = ["full", "rt", "tracing"] }
itertools = "0.10.2"
cubeclient = { path = "../../cubesql/cubeclient" }
diff --git a/rust/cubestore/CHANGELOG.md b/rust/cubestore/CHANGELOG.md
index e7957e3191801..59130e6a8c5fe 100644
--- a/rust/cubestore/CHANGELOG.md
+++ b/rust/cubestore/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.81](https://github.com/cube-js/cube/compare/v0.35.80...v0.35.81) (2024-09-12)
+
+
+### Features
+
+* **cubestore:** Support date_bin function ([#8672](https://github.com/cube-js/cube/issues/8672)) ([64788de](https://github.com/cube-js/cube/commit/64788dea89b0244911518de203929fc5c773cd8f))
+
+
+
+
+
## [0.35.78](https://github.com/cube-js/cube/compare/v0.35.77...v0.35.78) (2024-08-27)
**Note:** Version bump only for package @cubejs-backend/cubestore
diff --git a/rust/cubestore/Cargo.lock b/rust/cubestore/Cargo.lock
index 6c1adcd965db7..0e31cc3d251af 100644
--- a/rust/cubestore/Cargo.lock
+++ b/rust/cubestore/Cargo.lock
@@ -54,6 +54,41 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234"
+[[package]]
+name = "aead"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0"
+dependencies = [
+ "crypto-common",
+ "generic-array 0.14.4",
+]
+
+[[package]]
+name = "aes"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
+dependencies = [
+ "cfg-if 1.0.0",
+ "cipher",
+ "cpufeatures 0.2.5",
+]
+
+[[package]]
+name = "aes-gcm"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1"
+dependencies = [
+ "aead",
+ "aes",
+ "cipher",
+ "ctr",
+ "ghash",
+ "subtle",
+]
+
[[package]]
name = "ahash"
version = "0.7.4"
@@ -125,7 +160,7 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "arrow"
version = "5.0.0"
-source = "git+https://github.com/cube-js/arrow-rs?branch=cube#ba5455c2b7bf693f67cab4a7616e3ce41fd97e8c"
+source = "git+https://github.com/cube-js/arrow-rs.git?branch=cube#9d6173caa1756600981f245c43197bb4d52dcac7"
dependencies = [
"bitflags 1.3.2",
"chrono",
@@ -667,6 +702,16 @@ dependencies = [
"half",
]
+[[package]]
+name = "cipher"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
+dependencies = [
+ "crypto-common",
+ "inout",
+]
+
[[package]]
name = "clang-sys"
version = "1.7.0"
@@ -1017,6 +1062,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array 0.14.4",
+ "rand_core 0.6.3",
"typenum",
]
@@ -1052,6 +1098,15 @@ dependencies = [
"syn 1.0.107",
]
+[[package]]
+name = "ctr"
+version = "0.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835"
+dependencies = [
+ "cipher",
+]
+
[[package]]
name = "cubedatasketches"
version = "0.1.0"
@@ -1146,7 +1201,7 @@ dependencies = [
"msql-srv",
"nanoid",
"num 0.3.1",
- "parquet-format",
+ "parquet-format 2.6.1",
"parse-size",
"paste",
"pin-project",
@@ -1269,7 +1324,7 @@ checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308"
[[package]]
name = "datafusion"
version = "4.0.0-SNAPSHOT"
-source = "git+https://github.com/cube-js/arrow-datafusion?branch=cube#11027d53f93c550d86e32ebf75e3a54cef6c8546"
+source = "git+https://github.com/cube-js/arrow-datafusion?branch=cube#faf7acb5a3f3d4976711f6faf76c7750b22b0eda"
dependencies = [
"ahash",
"arrow",
@@ -1841,6 +1896,16 @@ dependencies = [
"wasi 0.11.0+wasi-snapshot-preview1",
]
+[[package]]
+name = "ghash"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1"
+dependencies = [
+ "opaque-debug 0.3.0",
+ "polyval",
+]
+
[[package]]
name = "gimli"
version = "0.25.0"
@@ -2211,6 +2276,15 @@ dependencies = [
"unindent",
]
+[[package]]
+name = "inout"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
+dependencies = [
+ "generic-array 0.14.4",
+]
+
[[package]]
name = "instant"
version = "0.1.10"
@@ -2337,6 +2411,15 @@ dependencies = [
"simple_asn1",
]
+[[package]]
+name = "keccak"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654"
+dependencies = [
+ "cpufeatures 0.2.5",
+]
+
[[package]]
name = "kernel32-sys"
version = "0.2.2"
@@ -3238,8 +3321,9 @@ dependencies = [
[[package]]
name = "parquet"
version = "5.0.0"
-source = "git+https://github.com/cube-js/arrow-rs?branch=cube#ba5455c2b7bf693f67cab4a7616e3ce41fd97e8c"
+source = "git+https://github.com/cube-js/arrow-rs.git?branch=cube#9d6173caa1756600981f245c43197bb4d52dcac7"
dependencies = [
+ "aes-gcm",
"arrow",
"base64 0.13.0",
"brotli",
@@ -3248,8 +3332,10 @@ dependencies = [
"flate2",
"lz4",
"num-bigint 0.4.3",
- "parquet-format",
+ "parquet-format 4.0.0",
"rand 0.8.4",
+ "serde",
+ "sha3",
"snap",
"thrift",
"zstd",
@@ -3264,6 +3350,15 @@ dependencies = [
"thrift",
]
+[[package]]
+name = "parquet-format"
+version = "4.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f0c06cdcd5460967c485f9c40a821746f5955ad81990533c7fae95dbd9bc0b5"
+dependencies = [
+ "thrift",
+]
+
[[package]]
name = "parse-size"
version = "1.0.0"
@@ -3431,6 +3526,18 @@ dependencies = [
"winapi 0.3.9",
]
+[[package]]
+name = "polyval"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25"
+dependencies = [
+ "cfg-if 1.0.0",
+ "cpufeatures 0.2.5",
+ "opaque-debug 0.3.0",
+ "universal-hash",
+]
+
[[package]]
name = "powerfmt"
version = "0.2.0"
@@ -4438,6 +4545,16 @@ dependencies = [
"digest 0.10.7",
]
+[[package]]
+name = "sha3"
+version = "0.10.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60"
+dependencies = [
+ "digest 0.10.7",
+ "keccak",
+]
+
[[package]]
name = "shared_child"
version = "1.0.0"
@@ -5236,6 +5353,16 @@ version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "514672a55d7380da379785a4d70ca8386c8883ff7eaae877be4d2081cebe73d8"
+[[package]]
+name = "universal-hash"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea"
+dependencies = [
+ "crypto-common",
+ "subtle",
+]
+
[[package]]
name = "untrusted"
version = "0.7.1"
diff --git a/rust/cubestore/cubestore-sql-tests/src/tests.rs b/rust/cubestore/cubestore-sql-tests/src/tests.rs
index 8d918cd7926c8..7b662d463c4d8 100644
--- a/rust/cubestore/cubestore-sql-tests/src/tests.rs
+++ b/rust/cubestore/cubestore-sql-tests/src/tests.rs
@@ -203,6 +203,7 @@ pub fn sql_tests() -> Vec<(&'static str, TestFn)> {
t("float_index", float_index),
t("float_order", float_order),
t("date_add", date_add),
+ t("date_bin", date_bin),
t("now", now),
t("dump", dump),
t("unsorted_merge_assertion", unsorted_merge_assertion),
@@ -2515,7 +2516,7 @@ async fn create_table_with_location_and_hyperloglog(service: Box)
.await
.unwrap();
let _ = service
- .exec_query(&format!("CREATE TABLE hll.locations (id int, hll hyperloglog, hll_base hyperloglog) LOCATION {}",
+ .exec_query(&format!("CREATE TABLE hll.locations (id int, hll hyperloglog, hll_base hyperloglog) LOCATION {}",
paths
.into_iter()
.map(|p| format!("'{}'", p.to_string_lossy()))
@@ -2567,7 +2568,7 @@ async fn create_table_with_location_and_hyperloglog_postgress(service: Box) {
);
}
+async fn date_bin(service: Box) {
+ let check_fn = |interval, source, origin, expected| {
+ let expected = timestamp_from_string(expected).unwrap();
+ let service = &service;
+ async move {
+ let actual = service
+ .exec_query(&format!(
+ "SELECT DATE_BIN(INTERVAL '{}', CAST('{}' as TIMESTAMP), CAST('{}' as TIMESTAMP))",
+ interval, source, origin
+ ))
+ .await
+ .unwrap();
+ assert_eq!(to_rows(&actual), rows(&[expected]));
+ }
+ };
+
+ // Common dates
+ check_fn(
+ "1 month",
+ "2024-01-21T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "1 month",
+ "2023-11-21T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2023-11-01T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "1 month",
+ "2024-02-21T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-02-01T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2024-04-25T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2024-03-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2024-04-15T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2024-03-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2024-05-25T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2024-05-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2024-05-15T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2024-03-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2023-11-25T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2023-11-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2023-11-15T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2023-09-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "10 day",
+ "2024-01-25T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-01-21T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "10 day 2 hour 5 minute 10 second",
+ "2024-01-15T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-01-11T03:05:10.000Z",
+ )
+ .await;
+ check_fn(
+ "10 day 2 hour 5 minute 10 second",
+ "2024-01-30T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-01-21T05:10:20.000Z",
+ )
+ .await;
+ check_fn(
+ "10 day 2 hour 5 minute 10 second",
+ "2023-12-30T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2023-12-21T22:54:50.000Z",
+ )
+ .await;
+
+ // Nulls
+ let r = service
+ .exec_query(
+ "SELECT DATE_BIN(INTERVAL '1 month', CAST(NULL as timestamp), CAST('2023-12-30T01:00:00Z' AS timestamp))",
+ )
+ .await
+ .unwrap();
+ assert_eq!(to_rows(&r), rows(&[(NULL)]));
+
+ // Invalid number of args
+ service.exec_query("SELECT DATE_BIN(1)").await.unwrap_err();
+ service
+ .exec_query("SELECT DATE_BIN(1, 2)")
+ .await
+ .unwrap_err();
+ service
+ .exec_query("SELECT DATE_BIN(1, 2, 3, 4, 5)")
+ .await
+ .unwrap_err();
+
+ // Invalid types
+ service
+ .exec_query("SELECT DATE_BIN(NULL, CAST('2023-12-30T01:00:00Z' AS timestamp), CAST('2023-12-30T01:00:00Z' AS timestamp))")
+ .await
+ .unwrap_err();
+ service
+ .exec_query(
+ "SELECT DATE_BIN(INTERVAL '1 month', 1, CAST('2023-12-30T01:00:00Z' AS timestamp))",
+ )
+ .await
+ .unwrap_err();
+ service
+ .exec_query(
+ "SELECT DATE_BIN(INTERVAL '1 month', CAST('2023-12-30T01:00:00Z' AS timestamp), true)",
+ )
+ .await
+ .unwrap_err();
+
+ // Columnar data
+ service.exec_query("CREATE SCHEMA s").await.unwrap();
+ service
+ .exec_query("CREATE TABLE s.data(t timestamp)")
+ .await
+ .unwrap();
+ service
+ .exec_query(
+ "INSERT INTO s.data(t) VALUES ('2024-01-21T01:00:00Z'), ('2023-11-21T01:00:00Z'), ('2024-02-21T01:00:00Z'), (NULL)",
+ )
+ .await
+ .unwrap();
+ let r = service
+ .exec_query("SELECT DATE_BIN(INTERVAL '1 month', t, CAST('2024-01-01T01:00:00Z' AS timestamp)) FROM s.data ORDER BY 1")
+ .await
+ .unwrap();
+ assert_eq!(
+ to_rows(&r),
+ rows(&[
+ Some(timestamp_from_string("2023-11-01T01:00:00Z").unwrap()),
+ Some(timestamp_from_string("2024-01-01T01:00:00Z").unwrap()),
+ Some(timestamp_from_string("2024-02-01T01:00:00Z").unwrap()),
+ None,
+ ]),
+ );
+}
+
async fn unsorted_merge_assertion(service: Box) {
service.exec_query("CREATE SCHEMA s").await.unwrap();
service
@@ -6014,7 +6189,7 @@ async fn unique_key_and_multi_partitions(service: Box) {
.exec_query(
"SELECT a, b FROM (
SELECT * FROM test.unique_parts1
- UNION ALL
+ UNION ALL
SELECT * FROM test.unique_parts2
) `tt` GROUP BY 1, 2 ORDER BY 1, 2 LIMIT 100",
)
@@ -6074,7 +6249,7 @@ async fn unique_key_and_multi_partitions_hash_aggregate(service: Box) {
service
.exec_query("CREATE TABLE s.Orders(a int, b int, c int, a_sum int, a_max int, a_min int, a_merge HYPERLOGLOG)
AGGREGATIONS(sum(a_sum), max(a_max), min(a_min), merge(a_merge))
- INDEX reg_index (a, b)
+ INDEX reg_index (a, b)
AGGREGATE INDEX aggr_index (a, b)
")
.await
@@ -6239,7 +6414,7 @@ async fn aggregate_index(service: Box) {
.exec_query(
"CREATE TABLE s.Orders(a int, b int, c int, a_sum int, a_max int, a_min int)
AGGREGATIONS(sum(a_sum), max(a_max), min(a_min))
- INDEX reg_index (a, b)
+ INDEX reg_index (a, b)
AGGREGATE INDEX aggr_index (a, b)
",
)
@@ -6335,7 +6510,7 @@ async fn aggregate_index_with_hll_bytes(service: Box) {
.exec_query(
"CREATE TABLE s.Orders(a int, b int, hll bytes)
AGGREGATIONS(merge(hll))
- AGGREGATE INDEX agg_index (a, b)
+ AGGREGATE INDEX agg_index (a, b)
",
)
.await
@@ -7098,9 +7273,9 @@ async fn limit_pushdown_group(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT id, SUM(n) FROM (
- SELECT * FROM foo.pushdown1
+ SELECT * FROM foo.pushdown1
union all
- SELECT * FROM foo.pushdown2
+ SELECT * FROM foo.pushdown2
) as `tb` GROUP BY 1 LIMIT 3",
None,
false,
@@ -7162,9 +7337,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a `aa`, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1 LIMIT 3",
Some("ind1"),
true,
@@ -7198,9 +7373,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1, 2 LIMIT 3",
Some("ind1"),
true,
@@ -7235,9 +7410,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 2 LIMIT 3",
Some("ind1"),
false,
@@ -7270,9 +7445,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1, 2 DESC LIMIT 3",
Some("ind1"),
false,
@@ -7305,9 +7480,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1 DESC, 2 DESC LIMIT 3",
Some("ind1"),
true,
@@ -7340,9 +7515,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1 DESC LIMIT 3",
Some("ind1"),
true,
@@ -7376,9 +7551,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, n FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2,3 ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 3",
Some("default"),
true,
@@ -7412,9 +7587,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1 ORDER BY 1 LIMIT 3",
Some("ind2"),
true,
@@ -7436,9 +7611,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, n FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2, 3 ORDER BY 1, 2 LIMIT 3",
Some("default"),
true,
@@ -7470,9 +7645,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, n FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 3, 1, 2 ORDER BY 1, 2 LIMIT 3",
Some("default"),
true,
@@ -7546,10 +7721,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(c) FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 12
GROUP BY 1, 2 ORDER BY 2 LIMIT 3",
Some("ind1"),
@@ -7583,10 +7758,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(c) FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 12
GROUP BY 1, 2 ORDER BY 2 DESC LIMIT 3",
Some("ind1"),
@@ -7620,10 +7795,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE b = 18
GROUP BY a, b, c ORDER BY a, c LIMIT 3",
Some("ind1"),
@@ -7645,10 +7820,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE b = 18
GROUP BY a, b, c ORDER BY a DESC, c LIMIT 3",
Some("ind1"),
@@ -7670,10 +7845,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE b = 18
GROUP BY a, b, c ORDER BY a DESC, c DESC LIMIT 3",
Some("ind1"),
@@ -7696,10 +7871,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 11 and b = 18
GROUP BY a, b, c ORDER BY c LIMIT 3",
Some("ind1"),
@@ -7720,10 +7895,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 11 and b = 18
GROUP BY a, b, c ORDER BY c DESC LIMIT 3",
Some("ind1"),
@@ -7744,10 +7919,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 11 and b = 18
GROUP BY b, a, c ORDER BY c LIMIT 3",
Some("ind1"),
@@ -7769,10 +7944,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a >= 11 and a < 12 and b = 18
GROUP BY a, b, c ORDER BY c LIMIT 3",
Some("ind1"),
@@ -7794,10 +7969,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT b FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE c = 11
GROUP BY b, c ORDER BY b LIMIT 3",
Some("ind2"),
@@ -7855,10 +8030,10 @@ async fn limit_pushdown_without_group(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a aaa, b bbbb, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 12
ORDER BY 2 LIMIT 4",
Some("ind1"),
@@ -7898,10 +8073,10 @@ async fn limit_pushdown_without_group(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
ORDER BY 3 LIMIT 3",
Some("ind2"),
true,
@@ -7935,10 +8110,10 @@ async fn limit_pushdown_without_group(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
ORDER BY 3 DESC LIMIT 3",
Some("ind2"),
true,
@@ -7972,10 +8147,10 @@ async fn limit_pushdown_without_group(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
ORDER BY 1, 2 LIMIT 3",
Some("ind1"),
true,
@@ -8008,10 +8183,10 @@ async fn limit_pushdown_without_group(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
ORDER BY 1, 2 LIMIT 2 OFFSET 1",
Some("ind1"),
true,
@@ -8039,10 +8214,10 @@ async fn limit_pushdown_without_group(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE b = 20
ORDER BY 1 LIMIT 3",
Some("ind1"),
@@ -8071,10 +8246,10 @@ async fn limit_pushdown_without_group(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE b = 20
ORDER BY 1, 3 LIMIT 3",
Some("ind1"),
@@ -8145,10 +8320,10 @@ async fn limit_pushdown_without_group_resort(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a aaa, b bbbb, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 12
ORDER BY 2 desc LIMIT 4",
Some("ind1"),
@@ -8188,10 +8363,10 @@ async fn limit_pushdown_without_group_resort(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a aaa, b bbbb, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
ORDER BY 1 desc, 2 desc LIMIT 3",
Some("ind1"),
true,
@@ -8225,10 +8400,10 @@ async fn limit_pushdown_without_group_resort(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
ORDER BY 2 LIMIT 2",
Some("ind1"),
false,
@@ -8298,10 +8473,10 @@ async fn limit_pushdown_unique_key(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 12
ORDER BY 2 LIMIT 4",
Some("ind1"),
@@ -8336,10 +8511,10 @@ async fn limit_pushdown_unique_key(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
ORDER BY 3 LIMIT 3",
Some("ind1"),
false,
@@ -8372,10 +8547,10 @@ async fn limit_pushdown_unique_key(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 11 and b = 18
GROUP BY b, a, c ORDER BY c LIMIT 3",
Some("ind1"),
@@ -8390,10 +8565,10 @@ async fn limit_pushdown_unique_key(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(c) FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 12
GROUP BY 1, 2 ORDER BY 2 LIMIT 3",
Some("ind1"),
@@ -8428,10 +8603,10 @@ async fn limit_pushdown_unique_key(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(c) FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
GROUP BY 1, 2 ORDER BY 2 LIMIT 3",
Some("ind1"),
false,
@@ -8464,10 +8639,10 @@ async fn limit_pushdown_unique_key(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(c) FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
GROUP BY 1, 2 ORDER BY 1 LIMIT 3",
Some("ind1"),
true,
diff --git a/rust/cubestore/cubestore/src/config/mod.rs b/rust/cubestore/cubestore/src/config/mod.rs
index 7e4597a5a0e49..ed9d87caff2df 100644
--- a/rust/cubestore/cubestore/src/config/mod.rs
+++ b/rust/cubestore/cubestore/src/config/mod.rs
@@ -32,11 +32,15 @@ use crate::remotefs::{LocalDirRemoteFs, RemoteFs};
use crate::scheduler::SchedulerImpl;
use crate::sql::cache::SqlResultCache;
use crate::sql::{SqlService, SqlServiceImpl};
+use crate::sql::{TableExtensionService, TableExtensionServiceImpl};
use crate::store::compaction::{CompactionService, CompactionServiceImpl};
use crate::store::{ChunkDataStore, ChunkStore, WALDataStore, WALStore};
use crate::streaming::kafka::{KafkaClientService, KafkaClientServiceImpl};
use crate::streaming::{KsqlClient, KsqlClientImpl, StreamingService, StreamingServiceImpl};
-use crate::table::parquet::{CubestoreParquetMetadataCache, CubestoreParquetMetadataCacheImpl};
+use crate::table::parquet::{
+ CubestoreMetadataCacheFactory, CubestoreMetadataCacheFactoryImpl,
+ CubestoreParquetMetadataCache, CubestoreParquetMetadataCacheImpl,
+};
use crate::telemetry::tracing::{TracingHelper, TracingHelperImpl};
use crate::telemetry::{
start_agent_event_loop, start_track_event_loop, stop_agent_event_loop, stop_track_event_loop,
@@ -45,7 +49,7 @@ use crate::util::memory::{MemoryHandler, MemoryHandlerImpl};
use crate::CubeError;
use cuberockstore::rocksdb::{Options, DB};
use datafusion::cube_ext;
-use datafusion::physical_plan::parquet::{LruParquetMetadataCache, NoopParquetMetadataCache};
+use datafusion::physical_plan::parquet::BasicMetadataCacheFactory;
use futures::future::join_all;
use log::Level;
use log::{debug, error};
@@ -535,7 +539,6 @@ pub trait ConfigObj: DIService {
fn remote_files_cleanup_interval_secs(&self) -> u64;
fn local_files_cleanup_size_threshold(&self) -> u64;
-
fn local_files_cleanup_delay_secs(&self) -> u64;
fn remote_files_cleanup_delay_secs(&self) -> u64;
@@ -2002,11 +2005,16 @@ impl Config {
self.injector
.register_typed::(async move |i| {
+ let metadata_cache_factory = i
+ .get_service_typed::()
+ .await
+ .cache_factory();
ChunkStore::new(
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
+ metadata_cache_factory,
i.get_service_typed::()
.await
.wal_split_threshold() as usize,
@@ -2017,10 +2025,14 @@ impl Config {
self.injector
.register_typed::(async move |i| {
let c = i.get_service_typed::().await;
+ let metadata_cache_factory = i
+ .get_service_typed::()
+ .await
+ .cache_factory();
CubestoreParquetMetadataCacheImpl::new(
match c.metadata_cache_max_capacity_bytes() {
- 0 => NoopParquetMetadataCache::new(),
- max_cached_metadata => LruParquetMetadataCache::new(
+ 0 => metadata_cache_factory.make_noop_cache(),
+ max_cached_metadata => metadata_cache_factory.make_lru_cache(
max_cached_metadata,
Duration::from_secs(c.metadata_cache_time_to_idle_secs()),
),
@@ -2031,11 +2043,16 @@ impl Config {
self.injector
.register_typed::(async move |i| {
+ let metadata_cache_factory = i
+ .get_service_typed::()
+ .await
+ .cache_factory();
CompactionServiceImpl::new(
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
+ metadata_cache_factory,
)
})
.await;
@@ -2060,6 +2077,12 @@ impl Config {
})
.await;
+ self.injector
+ .register_typed::(async move |_| {
+ TableExtensionServiceImpl::new()
+ })
+ .await;
+
self.injector
.register_typed::(async move |i| {
StreamingServiceImpl::new(
@@ -2068,6 +2091,9 @@ impl Config {
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
+ i.get_service_typed::()
+ .await
+ .cache_factory(),
)
})
.await;
@@ -2136,6 +2162,12 @@ impl Config {
}
pub async fn configure_common(&self) {
+ self.injector
+ .register_typed::(async move |_| {
+ CubestoreMetadataCacheFactoryImpl::new(Arc::new(BasicMetadataCacheFactory::new()))
+ })
+ .await;
+
self.injector
.register_typed_with_default::(async move |i| {
QueueRemoteFs::new(
@@ -2160,18 +2192,29 @@ impl Config {
let query_cache_to_move = query_cache.clone();
self.injector
.register_typed::(async move |i| {
+ let metadata_cache_factory = i
+ .get_service_typed::()
+ .await
+ .cache_factory();
QueryPlannerImpl::new(
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
query_cache_to_move,
+ metadata_cache_factory,
)
})
.await;
self.injector
.register_typed_with_default::(async move |i| {
- QueryExecutorImpl::new(i.get_service_typed().await, i.get_service_typed().await)
+ QueryExecutorImpl::new(
+ i.get_service_typed::()
+ .await
+ .cache_factory(),
+ i.get_service_typed().await,
+ i.get_service_typed().await,
+ )
})
.await;
@@ -2210,6 +2253,7 @@ impl Config {
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
+ i.get_service_typed().await,
c.wal_split_threshold() as usize,
Duration::from_secs(c.query_timeout()),
Duration::from_secs(c.import_job_timeout() * 2),
diff --git a/rust/cubestore/cubestore/src/metastore/mod.rs b/rust/cubestore/cubestore/src/metastore/mod.rs
index 7e115c465a55b..30d300914493d 100644
--- a/rust/cubestore/cubestore/src/metastore/mod.rs
+++ b/rust/cubestore/cubestore/src/metastore/mod.rs
@@ -856,6 +856,7 @@ pub trait MetaStore: DIService + Send + Sync {
partition_split_threshold: Option,
trace_obj: Option,
drop_if_exists: bool,
+ extension: Option,
) -> Result, CubeError>;
async fn table_ready(&self, id: u64, is_ready: bool) -> Result, CubeError>;
async fn seal_table(&self, id: u64) -> Result, CubeError>;
@@ -2087,6 +2088,7 @@ impl MetaStore for RocksMetaStore {
partition_split_threshold: Option,
trace_obj: Option,
drop_if_exists: bool,
+ extension: Option,
) -> Result, CubeError> {
self.write_operation(move |db_ref, batch_pipe| {
batch_pipe.invalidate_tables_cache();
@@ -2189,6 +2191,7 @@ impl MetaStore for RocksMetaStore {
aggregate_column_indices,
seq_column_index,
partition_split_threshold,
+ extension,
);
let table_id = rocks_table.insert(table, batch_pipe)?;
@@ -5143,6 +5146,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5166,6 +5170,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5290,6 +5295,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5315,6 +5321,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.is_err());
@@ -5406,6 +5413,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5498,6 +5506,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5571,6 +5580,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.is_err());
@@ -5594,6 +5604,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.is_err());
@@ -5620,6 +5631,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.is_err());
@@ -6104,6 +6116,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -6326,6 +6339,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -6467,6 +6481,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
diff --git a/rust/cubestore/cubestore/src/metastore/table.rs b/rust/cubestore/cubestore/src/metastore/table.rs
index ad0b1709dc5a5..c0e464fadca87 100644
--- a/rust/cubestore/cubestore/src/metastore/table.rs
+++ b/rust/cubestore/cubestore/src/metastore/table.rs
@@ -153,7 +153,9 @@ pub struct Table {
#[serde(default)]
location_download_sizes: Option>,
#[serde(default)]
- partition_split_threshold: Option
+ partition_split_threshold: Option,
+ #[serde(default)]
+ extension: Option // TODO: Make this an Option or Option? We have some problems implementing Hash.
}
}
@@ -190,6 +192,7 @@ impl Table {
aggregate_column_indices: Vec,
seq_column_index: Option,
partition_split_threshold: Option,
+ extension: Option,
) -> Table {
let location_download_sizes = locations.as_ref().map(|locations| vec![0; locations.len()]);
Table {
@@ -212,6 +215,7 @@ impl Table {
seq_column_index,
location_download_sizes,
partition_split_threshold,
+ extension,
}
}
pub fn get_columns(&self) -> &Vec {
@@ -312,6 +316,10 @@ impl Table {
&self.select_statement
}
+ pub fn extension(&self) -> &Option {
+ &self.extension
+ }
+
pub fn source_columns(&self) -> &Option> {
&self.source_columns
}
diff --git a/rust/cubestore/cubestore/src/queryplanner/info_schema/system_tables.rs b/rust/cubestore/cubestore/src/queryplanner/info_schema/system_tables.rs
index 6fb259c8957c2..55060cb065add 100644
--- a/rust/cubestore/cubestore/src/queryplanner/info_schema/system_tables.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/info_schema/system_tables.rs
@@ -54,6 +54,7 @@ impl InfoSchemaTableDef for SystemTablesTableDef {
),
Field::new("sealed", DataType::Boolean, false),
Field::new("select_statement", DataType::Utf8, false),
+ Field::new("extension", DataType::Utf8, true),
]
}
@@ -247,6 +248,14 @@ impl InfoSchemaTableDef for SystemTablesTableDef {
.collect::>(),
))
}),
+ Box::new(|tables| {
+ Arc::new(StringArray::from(
+ tables
+ .iter()
+ .map(|row| row.table.get_row().extension().as_ref().map(|t| t.as_str()))
+ .collect::>(),
+ ))
+ }),
]
}
}
diff --git a/rust/cubestore/cubestore/src/queryplanner/mod.rs b/rust/cubestore/cubestore/src/queryplanner/mod.rs
index b661ab0393ab2..c8547e7df8c69 100644
--- a/rust/cubestore/cubestore/src/queryplanner/mod.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/mod.rs
@@ -3,6 +3,7 @@ mod optimizations;
pub mod panic;
mod partition_filter;
mod planning;
+use datafusion::physical_plan::parquet::MetadataCacheFactory;
pub use planning::PlanningMeta;
mod check_memory;
pub mod physical_plan_flags;
@@ -98,6 +99,7 @@ pub struct QueryPlannerImpl {
cache_store: Arc,
config: Arc,
cache: Arc,
+ metadata_cache_factory: Arc,
}
crate::di_service!(QueryPlannerImpl, [QueryPlanner]);
@@ -179,12 +181,14 @@ impl QueryPlannerImpl {
cache_store: Arc,
config: Arc,
cache: Arc,
+ metadata_cache_factory: Arc,
) -> Arc {
Arc::new(QueryPlannerImpl {
meta_store,
cache_store,
config,
cache,
+ metadata_cache_factory,
})
}
}
@@ -193,6 +197,7 @@ impl QueryPlannerImpl {
async fn execution_context(&self) -> Result, CubeError> {
Ok(Arc::new(ExecutionContext::with_config(
ExecutionConfig::new()
+ .with_metadata_cache_factory(self.metadata_cache_factory.clone())
.add_optimizer_rule(Arc::new(MaterializeNow {}))
.add_optimizer_rule(Arc::new(FlattenUnion {})),
)))
@@ -294,6 +299,7 @@ impl ContextProvider for MetaStoreSchemaProvider {
Vec::new(),
None,
None,
+ None,
),
),
schema: Arc::new(IdRow::new(0, metastore::Schema::new(schema.to_string()))),
@@ -409,6 +415,7 @@ impl ContextProvider for MetaStoreSchemaProvider {
"unix_timestamp" | "UNIX_TIMESTAMP" => CubeScalarUDFKind::UnixTimestamp,
"date_add" | "DATE_ADD" => CubeScalarUDFKind::DateAdd,
"date_sub" | "DATE_SUB" => CubeScalarUDFKind::DateSub,
+ "date_bin" | "DATE_BIN" => CubeScalarUDFKind::DateBin,
_ => return None,
};
return Some(Arc::new(scalar_udf_by_kind(kind).descriptor()));
diff --git a/rust/cubestore/cubestore/src/queryplanner/planning.rs b/rust/cubestore/cubestore/src/queryplanner/planning.rs
index 2efcb66ea60b1..a35b96837115f 100644
--- a/rust/cubestore/cubestore/src/queryplanner/planning.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/planning.rs
@@ -2142,6 +2142,7 @@ pub mod tests {
Vec::new(),
None,
None,
+ None,
));
i.indices.push(
Index::try_new(
@@ -2193,6 +2194,7 @@ pub mod tests {
Vec::new(),
None,
None,
+ None,
));
i.indices.push(
@@ -2250,6 +2252,7 @@ pub mod tests {
Vec::new(),
None,
None,
+ None,
));
i
diff --git a/rust/cubestore/cubestore/src/queryplanner/query_executor.rs b/rust/cubestore/cubestore/src/queryplanner/query_executor.rs
index c58dc44971468..4bf2755c49add 100644
--- a/rust/cubestore/cubestore/src/queryplanner/query_executor.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/query_executor.rs
@@ -44,7 +44,7 @@ use datafusion::physical_plan::memory::MemoryExec;
use datafusion::physical_plan::merge::MergeExec;
use datafusion::physical_plan::merge_sort::{LastRowByUniqueKeyExec, MergeSortExec};
use datafusion::physical_plan::parquet::{
- NoopParquetMetadataCache, ParquetExec, ParquetMetadataCache,
+ MetadataCacheFactory, NoopParquetMetadataCache, ParquetExec, ParquetMetadataCache,
};
use datafusion::physical_plan::projection::ProjectionExec;
use datafusion::physical_plan::{
@@ -105,6 +105,8 @@ pub trait QueryExecutor: DIService + Send + Sync {
crate::di_service!(MockQueryExecutor, [QueryExecutor]);
pub struct QueryExecutorImpl {
+ // TODO: Why do we need a MetadataCacheFactory when we have a ParquetMetadataCache?
+ metadata_cache_factory: Arc,
parquet_metadata_cache: Arc,
memory_handler: Arc,
}
@@ -312,10 +314,12 @@ impl QueryExecutor for QueryExecutorImpl {
impl QueryExecutorImpl {
pub fn new(
+ metadata_cache_factory: Arc,
parquet_metadata_cache: Arc,
memory_handler: Arc,
) -> Arc {
Arc::new(QueryExecutorImpl {
+ metadata_cache_factory,
parquet_metadata_cache,
memory_handler,
})
@@ -328,6 +332,7 @@ impl QueryExecutorImpl {
) -> Result, CubeError> {
Ok(Arc::new(ExecutionContext::with_config(
ExecutionConfig::new()
+ .with_metadata_cache_factory(self.metadata_cache_factory.clone())
.with_batch_size(4096)
.with_concurrency(1)
.with_query_planner(Arc::new(CubeQueryPlanner::new_on_router(
@@ -345,6 +350,7 @@ impl QueryExecutorImpl {
) -> Result, CubeError> {
Ok(Arc::new(ExecutionContext::with_config(
ExecutionConfig::new()
+ .with_metadata_cache_factory(self.metadata_cache_factory.clone())
.with_batch_size(4096)
.with_concurrency(1)
.with_query_planner(Arc::new(CubeQueryPlanner::new_on_worker(
diff --git a/rust/cubestore/cubestore/src/queryplanner/test_utils.rs b/rust/cubestore/cubestore/src/queryplanner/test_utils.rs
index d5e1c891fb688..f23219aeec260 100644
--- a/rust/cubestore/cubestore/src/queryplanner/test_utils.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/test_utils.rs
@@ -105,6 +105,7 @@ impl MetaStore for MetaStoreMock {
_partition_split_threshold: Option,
_trace_obj: Option,
_drop_if_exists: bool,
+ _extension: Option,
) -> Result, CubeError> {
panic!("MetaStore mock!")
}
diff --git a/rust/cubestore/cubestore/src/queryplanner/udfs.rs b/rust/cubestore/cubestore/src/queryplanner/udfs.rs
index f5df60b97c6f6..d35ade5f4dee9 100644
--- a/rust/cubestore/cubestore/src/queryplanner/udfs.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/udfs.rs
@@ -1,8 +1,10 @@
use crate::queryplanner::coalesce::{coalesce, SUPPORTED_COALESCE_TYPES};
use crate::queryplanner::hll::{Hll, HllUnion};
use crate::CubeError;
-use chrono::{TimeZone, Utc};
-use datafusion::arrow::array::{Array, BinaryArray, TimestampNanosecondArray, UInt64Builder};
+use chrono::{Datelike, Duration, Months, NaiveDateTime, TimeZone, Utc};
+use datafusion::arrow::array::{
+ Array, ArrayRef, BinaryArray, TimestampNanosecondArray, UInt64Builder,
+};
use datafusion::arrow::datatypes::{DataType, IntervalUnit, TimeUnit};
use datafusion::cube_ext::datetime::{date_addsub_array, date_addsub_scalar};
use datafusion::error::DataFusionError;
@@ -24,6 +26,7 @@ pub enum CubeScalarUDFKind {
UnixTimestamp,
DateAdd,
DateSub,
+ DateBin,
}
pub trait CubeScalarUDF {
@@ -40,6 +43,7 @@ pub fn scalar_udf_by_kind(k: CubeScalarUDFKind) -> Box {
CubeScalarUDFKind::UnixTimestamp => Box::new(UnixTimestamp {}),
CubeScalarUDFKind::DateAdd => Box::new(DateAddSub { is_add: true }),
CubeScalarUDFKind::DateSub => Box::new(DateAddSub { is_add: false }),
+ CubeScalarUDFKind::DateBin => Box::new(DateBin {}),
}
}
@@ -63,6 +67,9 @@ pub fn scalar_kind_by_name(n: &str) -> Option {
if n == "DATE_SUB" {
return Some(CubeScalarUDFKind::DateSub);
}
+ if n == "DATE_BIN" {
+ return Some(CubeScalarUDFKind::DateBin);
+ }
return None;
}
@@ -192,6 +199,233 @@ impl CubeScalarUDF for UnixTimestamp {
}
}
+fn interval_dt_duration(i: &i64) -> Duration {
+ let days: i64 = i.signum() * (i.abs() >> 32);
+ let millis: i64 = i.signum() * ((i.abs() << 32) >> 32);
+ let duration = Duration::days(days) + Duration::milliseconds(millis);
+
+ duration
+}
+
+fn calc_intervals(start: NaiveDateTime, end: NaiveDateTime, interval: i32) -> i32 {
+ let years_diff = end.year() - start.year();
+ let months_diff = end.month() as i32 - start.month() as i32;
+ let mut total_months = years_diff * 12 + months_diff;
+
+ if total_months > 0 && end.day() < start.day() {
+ total_months -= 1; // If the day in the final date is less, reduce by 1 month
+ }
+
+ let rem = months_diff % interval;
+ let mut num_intervals = total_months / interval;
+
+ if num_intervals < 0 && rem == 0 && end.day() < start.day() {
+ num_intervals -= 1;
+ }
+
+ num_intervals
+}
+
+/// Calculate date_bin timestamp for source date for year-month interval
+fn calc_bin_timestamp_ym(origin: NaiveDateTime, source: &i64, interval: i32) -> NaiveDateTime {
+ let timestamp =
+ NaiveDateTime::from_timestamp(*source / 1_000_000_000, (*source % 1_000_000_000) as u32);
+ let num_intervals = calc_intervals(origin, timestamp, interval);
+ let nearest_date = if num_intervals >= 0 {
+ origin
+ .date()
+ .checked_add_months(Months::new((num_intervals * interval) as u32))
+ .unwrap_or(origin.date())
+ } else {
+ origin
+ .date()
+ .checked_sub_months(Months::new((-num_intervals * interval) as u32))
+ .unwrap_or(origin.date())
+ };
+
+ NaiveDateTime::new(nearest_date, origin.time())
+}
+
+/// Calculate date_bin timestamp for source date for date-time interval
+fn calc_bin_timestamp_dt(origin: NaiveDateTime, source: &i64, interval: &i64) -> NaiveDateTime {
+ let timestamp =
+ NaiveDateTime::from_timestamp(*source / 1_000_000_000, (*source % 1_000_000_000) as u32);
+ let diff = timestamp - origin;
+ let interval_duration = interval_dt_duration(&interval);
+ let num_intervals =
+ diff.num_nanoseconds().unwrap_or(0) / interval_duration.num_nanoseconds().unwrap_or(1);
+ let mut nearest_timestamp = origin
+ .checked_add_signed(interval_duration * num_intervals as i32)
+ .unwrap_or(origin);
+
+ if diff.num_nanoseconds().unwrap_or(0) < 0 {
+ nearest_timestamp = nearest_timestamp
+ .checked_sub_signed(interval_duration)
+ .unwrap_or(origin);
+ }
+
+ nearest_timestamp
+}
+
+struct DateBin {}
+impl DateBin {
+ fn signature() -> Signature {
+ Signature::OneOf(vec![
+ Signature::Exact(vec![
+ DataType::Interval(IntervalUnit::YearMonth),
+ DataType::Timestamp(TimeUnit::Nanosecond, None),
+ DataType::Timestamp(TimeUnit::Nanosecond, None),
+ ]),
+ Signature::Exact(vec![
+ DataType::Interval(IntervalUnit::DayTime),
+ DataType::Timestamp(TimeUnit::Nanosecond, None),
+ DataType::Timestamp(TimeUnit::Nanosecond, None),
+ ]),
+ ])
+ }
+}
+impl CubeScalarUDF for DateBin {
+ fn kind(&self) -> CubeScalarUDFKind {
+ CubeScalarUDFKind::DateBin
+ }
+
+ fn name(&self) -> &str {
+ "DATE_BIN"
+ }
+
+ fn descriptor(&self) -> ScalarUDF {
+ return ScalarUDF {
+ name: self.name().to_string(),
+ signature: Self::signature(),
+ return_type: Arc::new(|_| {
+ Ok(Arc::new(DataType::Timestamp(TimeUnit::Nanosecond, None)))
+ }),
+ fun: Arc::new(move |inputs| {
+ assert_eq!(inputs.len(), 3);
+ let interval = match &inputs[0] {
+ ColumnarValue::Scalar(i) => i.clone(),
+ _ => {
+ // We leave this case out for simplicity.
+ // CubeStore does not allow intervals inside tables, so this is super rare.
+ return Err(DataFusionError::Execution(format!(
+ "Only scalar intervals are supported in DATE_BIN"
+ )));
+ }
+ };
+
+ let origin = match &inputs[2] {
+ ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(Some(o))) => {
+ NaiveDateTime::from_timestamp(
+ *o / 1_000_000_000,
+ (*o % 1_000_000_000) as u32,
+ )
+ }
+ ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(None)) => {
+ return Err(DataFusionError::Execution(format!(
+ "Third argument (origin) of DATE_BIN must be a non-null timestamp"
+ )));
+ }
+ _ => {
+ // Leaving out other rare cases.
+ // The initial need for the date_bin comes from custom granularities support
+ // and there will always be a scalar origin point
+ return Err(DataFusionError::Execution(format!(
+ "Only scalar origins are supported in DATE_BIN"
+ )));
+ }
+ };
+
+ match interval {
+ ScalarValue::IntervalYearMonth(Some(interval)) => match &inputs[1] {
+ ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(None)) => Ok(
+ ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(None)),
+ ),
+ ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(Some(t))) => {
+ let nearest_timestamp = calc_bin_timestamp_ym(origin, t, interval);
+
+ Ok(ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(
+ Some(nearest_timestamp.timestamp_nanos()),
+ )))
+ }
+ ColumnarValue::Array(arr)
+ if arr.as_any().is::() =>
+ {
+ let ts_array = arr
+ .as_any()
+ .downcast_ref::()
+ .unwrap();
+
+ let mut builder = TimestampNanosecondArray::builder(ts_array.len());
+
+ for i in 0..ts_array.len() {
+ if ts_array.is_null(i) {
+ builder.append_null()?;
+ } else {
+ let ts = ts_array.value(i);
+ let nearest_timestamp =
+ calc_bin_timestamp_ym(origin, &ts, interval);
+ builder.append_value(nearest_timestamp.timestamp_nanos())?;
+ }
+ }
+
+ Ok(ColumnarValue::Array(Arc::new(builder.finish()) as ArrayRef))
+ }
+ _ => {
+ return Err(DataFusionError::Execution(format!(
+ "Second argument of DATE_BIN must be a non-null timestamp"
+ )));
+ }
+ },
+ ScalarValue::IntervalDayTime(Some(interval)) => match &inputs[1] {
+ ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(None)) => Ok(
+ ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(None)),
+ ),
+ ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(Some(t))) => {
+ let nearest_timestamp = calc_bin_timestamp_dt(origin, t, &interval);
+
+ Ok(ColumnarValue::Scalar(ScalarValue::TimestampNanosecond(
+ Some(nearest_timestamp.timestamp_nanos()),
+ )))
+ }
+ ColumnarValue::Array(arr)
+ if arr.as_any().is::() =>
+ {
+ let ts_array = arr
+ .as_any()
+ .downcast_ref::()
+ .unwrap();
+
+ let mut builder = TimestampNanosecondArray::builder(ts_array.len());
+
+ for i in 0..ts_array.len() {
+ if ts_array.is_null(i) {
+ builder.append_null()?;
+ } else {
+ let ts = ts_array.value(i);
+ let nearest_timestamp =
+ calc_bin_timestamp_dt(origin, &ts, &interval);
+ builder.append_value(nearest_timestamp.timestamp_nanos())?;
+ }
+ }
+
+ Ok(ColumnarValue::Array(Arc::new(builder.finish()) as ArrayRef))
+ }
+ _ => {
+ return Err(DataFusionError::Execution(format!(
+ "Second argument of DATE_BIN must be a non-null timestamp"
+ )));
+ }
+ },
+ _ => Err(DataFusionError::Execution(format!(
+ "Unsupported interval type: {:?}",
+ interval
+ ))),
+ }
+ }),
+ };
+ }
+}
+
struct DateAddSub {
is_add: bool,
}
diff --git a/rust/cubestore/cubestore/src/sql/mod.rs b/rust/cubestore/cubestore/src/sql/mod.rs
index 90d382c1b277b..8c53ccb6d9bb6 100644
--- a/rust/cubestore/cubestore/src/sql/mod.rs
+++ b/rust/cubestore/cubestore/src/sql/mod.rs
@@ -80,6 +80,7 @@ use crate::sql::cachestore::CacheStoreSqlService;
use crate::util::metrics;
use mockall::automock;
use table_creator::{convert_columns_type, TableCreator};
+pub use table_creator::{TableExtensionService, TableExtensionServiceImpl};
#[automock]
#[async_trait]
@@ -187,6 +188,7 @@ impl SqlServiceImpl {
query_executor: Arc,
cluster: Arc,
import_service: Arc,
+ table_extension_service: Arc,
config_obj: Arc,
remote_fs: Arc,
rows_per_chunk: usize,
@@ -205,6 +207,7 @@ impl SqlServiceImpl {
db.clone(),
cluster.clone(),
import_service,
+ table_extension_service,
config_obj.clone(),
create_table_timeout,
cache.clone(),
@@ -1659,11 +1662,13 @@ mod tests {
use crate::store::compaction::CompactionService;
use async_compression::tokio::write::GzipEncoder;
use cuberockstore::rocksdb::{Options, DB};
+ use datafusion::physical_plan::parquet::BasicMetadataCacheFactory;
use futures_timer::Delay;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
+ use table_creator::TableExtensionServiceImpl;
use tokio::io::{AsyncWriteExt, BufWriter};
use uuid::Uuid;
@@ -1723,6 +1728,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
rows_per_chunk,
);
let limits = Arc::new(ConcurrencyLimits::new(4));
@@ -1735,6 +1741,7 @@ mod tests {
Arc::new(MockQueryExecutor::new()),
Arc::new(MockCluster::new()),
Arc::new(MockImportService::new()),
+ TableExtensionServiceImpl::new(),
config.config_obj(),
remote_fs.clone(),
rows_per_chunk,
@@ -1800,6 +1807,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
rows_per_chunk,
);
let limits = Arc::new(ConcurrencyLimits::new(4));
@@ -1812,6 +1820,7 @@ mod tests {
Arc::new(MockQueryExecutor::new()),
Arc::new(MockCluster::new()),
Arc::new(MockImportService::new()),
+ TableExtensionServiceImpl::new(),
config.config_obj(),
remote_fs.clone(),
rows_per_chunk,
@@ -1861,6 +1870,7 @@ mod tests {
TableValue::String("NULL".to_string()),
TableValue::String("NULL".to_string()),
TableValue::String("NULL".to_string()),
+ TableValue::String("NULL".to_string()),
]));
}
@@ -1907,6 +1917,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
rows_per_chunk,
);
let limits = Arc::new(ConcurrencyLimits::new(4));
@@ -1919,6 +1930,7 @@ mod tests {
Arc::new(MockQueryExecutor::new()),
Arc::new(MockCluster::new()),
Arc::new(MockImportService::new()),
+ TableExtensionServiceImpl::new(),
config.config_obj(),
remote_fs.clone(),
rows_per_chunk,
@@ -1968,6 +1980,7 @@ mod tests {
TableValue::String("NULL".to_string()),
TableValue::String("NULL".to_string()),
TableValue::String("NULL".to_string()),
+ TableValue::String("NULL".to_string()),
]));
}
diff --git a/rust/cubestore/cubestore/src/sql/table_creator.rs b/rust/cubestore/cubestore/src/sql/table_creator.rs
index a39db27f3b9f9..a7cef4bc156ef 100644
--- a/rust/cubestore/cubestore/src/sql/table_creator.rs
+++ b/rust/cubestore/cubestore/src/sql/table_creator.rs
@@ -2,6 +2,7 @@ use std::sync::Arc;
use std::time::Duration;
use crate::cluster::{Cluster, JobEvent, JobResultListener};
+use crate::config::injection::DIService;
use crate::config::ConfigObj;
use crate::import::ImportService;
use crate::metastore::job::JobType;
@@ -14,11 +15,35 @@ use crate::sql::cache::SqlResultCache;
use crate::sql::parser::{CubeStoreParser, PartitionedIndexRef};
use crate::telemetry::incoming_traffic_agent_event;
use crate::CubeError;
+use async_trait::async_trait;
use chrono::{DateTime, Utc};
use futures::future::join_all;
use sqlparser::ast::*;
use std::mem::take;
+#[async_trait]
+
+pub trait TableExtensionService: DIService + Send + Sync {
+ async fn get_extension(&self) -> Option;
+}
+
+pub struct TableExtensionServiceImpl;
+
+impl TableExtensionServiceImpl {
+ pub fn new() -> Arc {
+ Arc::new(Self {})
+ }
+}
+
+#[async_trait]
+impl TableExtensionService for TableExtensionServiceImpl {
+ async fn get_extension(&self) -> Option {
+ None
+ }
+}
+
+crate::di_service!(TableExtensionServiceImpl, [TableExtensionService]);
+
enum FinalizeExternalTableResult {
Ok,
Orphaned,
@@ -27,6 +52,7 @@ pub struct TableCreator {
db: Arc,
cluster: Arc,
import_service: Arc,
+ table_extension_service: Arc,
config_obj: Arc,
create_table_timeout: Duration,
cache: Arc,
@@ -37,6 +63,7 @@ impl TableCreator {
db: Arc,
cluster: Arc,
import_service: Arc,
+ table_extension_service: Arc,
config_obj: Arc,
create_table_timeout: Duration,
cache: Arc,
@@ -45,6 +72,7 @@ impl TableCreator {
db,
cluster,
import_service,
+ table_extension_service,
config_obj,
create_table_timeout,
cache,
@@ -70,6 +98,8 @@ impl TableCreator {
partitioned_index: Option,
trace_obj: &Option,
) -> Result, CubeError> {
+ let extension: Option =
+ self.table_extension_service.get_extension().await;
if !if_not_exists {
return self
.create_table_loop(
@@ -90,6 +120,7 @@ impl TableCreator {
aggregates,
partitioned_index,
&trace_obj,
+ &extension,
)
.await;
}
@@ -126,6 +157,7 @@ impl TableCreator {
aggregates,
partitioned_index,
&trace_obj,
+ &extension,
)
.await
})
@@ -151,6 +183,7 @@ impl TableCreator {
aggregates: Option>,
partitioned_index: Option,
trace_obj: &Option,
+ extension: &Option,
) -> Result, CubeError> {
let mut retries = 0;
let max_retries = self.config_obj.create_table_max_retries();
@@ -179,6 +212,7 @@ impl TableCreator {
aggregates.clone(),
partitioned_index.clone(),
trace_obj,
+ extension,
)
.await?;
@@ -251,6 +285,7 @@ impl TableCreator {
aggregates: Option>,
partitioned_index: Option,
trace_obj: &Option,
+ extension: &Option,
) -> Result, CubeError> {
let columns_to_set = convert_columns_type(columns)?;
let mut indexes_to_create = Vec::new();
@@ -369,6 +404,7 @@ impl TableCreator {
None,
None,
false,
+ extension.as_ref().map(|json_value| json_value.to_string()),
)
.await;
}
@@ -449,6 +485,7 @@ impl TableCreator {
partition_split_threshold,
trace_obj_to_save,
if_not_exists,
+ extension.as_ref().map(|json_value| json_value.to_string()),
)
.await?;
diff --git a/rust/cubestore/cubestore/src/store/compaction.rs b/rust/cubestore/cubestore/src/store/compaction.rs
index f451fd236c891..e533679f386a6 100644
--- a/rust/cubestore/cubestore/src/store/compaction.rs
+++ b/rust/cubestore/cubestore/src/store/compaction.rs
@@ -34,7 +34,7 @@ use datafusion::physical_plan::hash_aggregate::{
};
use datafusion::physical_plan::memory::MemoryExec;
use datafusion::physical_plan::merge_sort::{LastRowByUniqueKeyExec, MergeSortExec};
-use datafusion::physical_plan::parquet::ParquetExec;
+use datafusion::physical_plan::parquet::{MetadataCacheFactory, ParquetExec};
use datafusion::physical_plan::union::UnionExec;
use datafusion::physical_plan::{
AggregateExpr, ExecutionPlan, PhysicalExpr, SendableRecordBatchStream,
@@ -75,6 +75,7 @@ pub struct CompactionServiceImpl {
chunk_store: Arc,
remote_fs: Arc,
config: Arc,
+ metadata_cache_factory: Arc,
}
crate::di_service!(CompactionServiceImpl, [CompactionService]);
@@ -85,12 +86,14 @@ impl CompactionServiceImpl {
chunk_store: Arc,
remote_fs: Arc,
config: Arc,
+ metadata_cache_factory: Arc,
) -> Arc {
Arc::new(CompactionServiceImpl {
meta_store,
chunk_store,
remote_fs,
config,
+ metadata_cache_factory,
})
}
@@ -571,7 +574,11 @@ impl CompactionService for CompactionServiceImpl {
}
}
- let store = ParquetTableStore::new(index.get_row().clone(), ROW_GROUP_SIZE);
+ let store = ParquetTableStore::new(
+ index.get_row().clone(),
+ ROW_GROUP_SIZE,
+ self.metadata_cache_factory.clone(),
+ );
let old_partition_remote = match &new_chunk {
Some(_) => None,
None => partition.get_row().get_full_name(partition.get_id()),
@@ -644,13 +651,14 @@ impl CompactionService for CompactionServiceImpl {
let schema = Arc::new(arrow_schema(index.get_row()));
let main_table: Arc = match old_partition_local {
Some(file) => {
- let parquet_exec = Arc::new(ParquetExec::try_from_path(
+ let parquet_exec = Arc::new(ParquetExec::try_from_path_with_cache(
file.as_str(),
None,
None,
ROW_GROUP_SIZE,
1,
None,
+ self.metadata_cache_factory.make_noop_cache(),
)?);
Arc::new(TraceDataLoadedExec::new(
@@ -854,7 +862,7 @@ impl CompactionService for CompactionServiceImpl {
// TODO deactivate corrupt tables
let files = download_files(&partitions, self.remote_fs.clone()).await?;
let keys = find_partition_keys(
- keys_with_counts(&files, key_len).await?,
+ keys_with_counts(&files, self.metadata_cache_factory.as_ref(), key_len).await?,
key_len,
// TODO should it respect table partition_split_threshold?
self.config.partition_split_threshold() as usize,
@@ -897,6 +905,7 @@ impl CompactionService for CompactionServiceImpl {
let mut s = MultiSplit::new(
self.meta_store.clone(),
self.remote_fs.clone(),
+ self.metadata_cache_factory.clone(),
keys,
key_len,
multi_partition_id,
@@ -939,6 +948,7 @@ impl CompactionService for CompactionServiceImpl {
let mut s = MultiSplit::new(
self.meta_store.clone(),
self.remote_fs.clone(),
+ self.metadata_cache_factory.clone(),
keys,
key_len,
multi_partition_id,
@@ -983,19 +993,21 @@ async fn find_partition_keys(
async fn read_files(
files: &[String],
+ metadata_cache_factory: &dyn MetadataCacheFactory,
key_len: usize,
projection: Option>,
) -> Result, CubeError> {
assert!(!files.is_empty());
let mut inputs = Vec::>::with_capacity(files.len());
for f in files {
- inputs.push(Arc::new(ParquetExec::try_from_files(
+ inputs.push(Arc::new(ParquetExec::try_from_files_with_cache(
&[f.as_str()],
projection.clone(),
None,
ROW_GROUP_SIZE,
1,
None,
+ metadata_cache_factory.make_noop_cache(),
)?));
}
let plan = Arc::new(UnionExec::new(inputs));
@@ -1012,10 +1024,17 @@ async fn read_files(
/// this key in the input files.
async fn keys_with_counts(
files: &[String],
+ metadata_cache_factory: &dyn MetadataCacheFactory,
key_len: usize,
) -> Result {
let projection = (0..key_len).collect_vec();
- let plan = read_files(files, key_len, Some(projection.clone())).await?;
+ let plan = read_files(
+ files,
+ metadata_cache_factory,
+ key_len,
+ Some(projection.clone()),
+ )
+ .await?;
let fields = plan.schema();
let fields = fields.fields();
@@ -1404,6 +1423,8 @@ mod tests {
use datafusion::arrow::datatypes::Schema;
use datafusion::arrow::record_batch::RecordBatch;
use datafusion::physical_plan::collect;
+ use datafusion::physical_plan::parquet::BasicMetadataCacheFactory;
+ use datafusion::physical_plan::parquet::NoopParquetMetadataCache;
use std::fs;
use std::path::{Path, PathBuf};
@@ -1436,6 +1457,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1517,6 +1539,7 @@ mod tests {
Arc::new(chunk_store),
remote_fs,
Arc::new(config),
+ Arc::new(BasicMetadataCacheFactory::new()),
);
compaction_service
.compact(1, DataLoadedSize::new())
@@ -1656,6 +1679,7 @@ mod tests {
remote_fs.clone(),
Arc::new(cluster),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
10,
);
metastore
@@ -1682,6 +1706,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1742,6 +1767,7 @@ mod tests {
chunk_store.clone(),
remote_fs,
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
);
compaction_service
.compact_in_memory_chunks(partition.get_id())
@@ -1829,6 +1855,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
50,
);
@@ -1867,6 +1894,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1930,6 +1958,7 @@ mod tests {
chunk_store.clone(),
remote_fs.clone(),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
);
compaction_service
.compact(partition.get_id(), DataLoadedSize::new())
@@ -1953,8 +1982,16 @@ mod tests {
.await
.unwrap();
let reader = Arc::new(
- ParquetExec::try_from_path(local.as_str(), None, None, ROW_GROUP_SIZE, 1, None)
- .unwrap(),
+ ParquetExec::try_from_path_with_cache(
+ local.as_str(),
+ None,
+ None,
+ ROW_GROUP_SIZE,
+ 1,
+ None,
+ NoopParquetMetadataCache::new(),
+ )
+ .unwrap(),
);
let res_data = &collect(reader).await.unwrap()[0];
@@ -2152,6 +2189,7 @@ mod tests {
struct MultiSplit {
meta: Arc,
fs: Arc,
+ metadata_cache_factory: Arc,
keys: Vec,
key_len: usize,
multi_partition_id: u64,
@@ -2167,6 +2205,7 @@ impl MultiSplit {
fn new(
meta: Arc,
fs: Arc,
+ metadata_cache_factory: Arc,
keys: Vec,
key_len: usize,
multi_partition_id: u64,
@@ -2176,6 +2215,7 @@ impl MultiSplit {
MultiSplit {
meta,
fs,
+ metadata_cache_factory,
keys,
key_len,
multi_partition_id,
@@ -2229,10 +2269,15 @@ impl MultiSplit {
}
});
- let store = ParquetTableStore::new(p.index.get_row().clone(), ROW_GROUP_SIZE);
+ let store = ParquetTableStore::new(
+ p.index.get_row().clone(),
+ ROW_GROUP_SIZE,
+ self.metadata_cache_factory.clone(),
+ );
let records = if !in_files.is_empty() {
read_files(
&in_files.into_iter().map(|(f, _)| f).collect::>(),
+ self.metadata_cache_factory.as_ref(),
self.key_len,
None,
)
diff --git a/rust/cubestore/cubestore/src/store/mod.rs b/rust/cubestore/cubestore/src/store/mod.rs
index 559daa784cbe5..d5393c37a23a7 100644
--- a/rust/cubestore/cubestore/src/store/mod.rs
+++ b/rust/cubestore/cubestore/src/store/mod.rs
@@ -10,6 +10,7 @@ use datafusion::physical_plan::hash_aggregate::{
AggregateMode, AggregateStrategy, HashAggregateExec,
};
use datafusion::physical_plan::memory::MemoryExec;
+use datafusion::physical_plan::parquet::MetadataCacheFactory;
use datafusion::physical_plan::{ExecutionPlan, PhysicalExpr};
use serde::{de, Deserialize, Serialize};
extern crate bincode;
@@ -182,6 +183,7 @@ pub struct ChunkStore {
remote_fs: Arc,
cluster: Arc,
config: Arc,
+ metadata_cache_factory: Arc,
memory_chunks: RwLock>,
chunk_size: usize,
}
@@ -342,6 +344,7 @@ impl ChunkStore {
remote_fs: Arc,
cluster: Arc,
config: Arc,
+ metadata_cache_factory: Arc,
chunk_size: usize,
) -> Arc {
let store = ChunkStore {
@@ -349,6 +352,7 @@ impl ChunkStore {
remote_fs,
cluster,
config,
+ metadata_cache_factory,
memory_chunks: RwLock::new(HashMap::new()),
chunk_size,
};
@@ -588,8 +592,10 @@ impl ChunkDataStore for ChunkStore {
)))])
} else {
let (local_file, index) = self.download_chunk(chunk, partition, index).await?;
+ let metadata_cache_factory: Arc =
+ self.metadata_cache_factory.clone();
Ok(cube_ext::spawn_blocking(move || -> Result<_, CubeError> {
- let parquet = ParquetTableStore::new(index, ROW_GROUP_SIZE);
+ let parquet = ParquetTableStore::new(index, ROW_GROUP_SIZE, metadata_cache_factory);
Ok(parquet.read_columns(&local_file)?)
})
.await??)
@@ -804,6 +810,7 @@ mod tests {
use crate::{metastore::ColumnType, table::TableValue};
use cuberockstore::rocksdb::{Options, DB};
use datafusion::arrow::array::{Int64Array, StringArray};
+ use datafusion::physical_plan::parquet::BasicMetadataCacheFactory;
use std::fs;
use std::path::{Path, PathBuf};
@@ -888,6 +895,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -942,6 +950,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
10,
);
@@ -984,6 +993,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1044,6 +1054,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
10,
);
@@ -1094,6 +1105,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1372,8 +1384,14 @@ impl ChunkStore {
let local_file = self.remote_fs.temp_upload_path(remote_path.clone()).await?;
let local_file = scopeguard::guard(local_file, ensure_temp_file_is_dropped);
let local_file_copy = local_file.clone();
+ let metadata_cache_factory: Arc =
+ self.metadata_cache_factory.clone();
cube_ext::spawn_blocking(move || -> Result<(), CubeError> {
- let parquet = ParquetTableStore::new(index.get_row().clone(), ROW_GROUP_SIZE);
+ let parquet = ParquetTableStore::new(
+ index.get_row().clone(),
+ ROW_GROUP_SIZE,
+ metadata_cache_factory,
+ );
parquet.write_data(&local_file_copy, data)?;
Ok(())
})
diff --git a/rust/cubestore/cubestore/src/streaming/kafka.rs b/rust/cubestore/cubestore/src/streaming/kafka.rs
index a6c12a5dfe7e2..f3e9b57d39411 100644
--- a/rust/cubestore/cubestore/src/streaming/kafka.rs
+++ b/rust/cubestore/cubestore/src/streaming/kafka.rs
@@ -11,6 +11,7 @@ use async_std::stream;
use async_trait::async_trait;
use datafusion::arrow::array::ArrayRef;
use datafusion::cube_ext;
+use datafusion::physical_plan::parquet::MetadataCacheFactory;
use futures::Stream;
use json::object::Object;
use json::JsonValue;
@@ -59,6 +60,7 @@ impl KafkaStreamingSource {
kafka_client: Arc,
use_ssl: bool,
trace_obj: Option,
+ metadata_cache_factory: Arc,
) -> Result {
let (post_processing_plan, columns, unique_key_columns, seq_column_index) =
if let Some(select_statement) = select_statement {
@@ -69,7 +71,7 @@ impl KafkaStreamingSource {
columns.clone(),
source_columns,
);
- let plan = planner.build(select_statement.clone())?;
+ let plan = planner.build(select_statement.clone(), metadata_cache_factory)?;
let columns = plan.source_columns().clone();
let seq_column_index = plan.source_seq_column_index();
let unique_columns = plan.source_unique_columns().clone();
diff --git a/rust/cubestore/cubestore/src/streaming/kafka_post_processing.rs b/rust/cubestore/cubestore/src/streaming/kafka_post_processing.rs
index ab5034c06287e..4f47517e62e9e 100644
--- a/rust/cubestore/cubestore/src/streaming/kafka_post_processing.rs
+++ b/rust/cubestore/cubestore/src/streaming/kafka_post_processing.rs
@@ -10,8 +10,9 @@ use datafusion::logical_plan::{
};
use datafusion::physical_plan::empty::EmptyExec;
use datafusion::physical_plan::memory::MemoryExec;
+use datafusion::physical_plan::parquet::MetadataCacheFactory;
use datafusion::physical_plan::{collect, ExecutionPlan};
-use datafusion::prelude::ExecutionContext;
+use datafusion::prelude::{ExecutionConfig, ExecutionContext};
use datafusion::sql::parser::Statement as DFStatement;
use datafusion::sql::planner::SqlToRel;
use sqlparser::ast::Expr as SQExpr;
@@ -126,7 +127,11 @@ impl KafkaPostProcessPlanner {
}
}
- pub fn build(&self, select_statement: String) -> Result