Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions cratedb_toolkit/info/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

from cratedb_toolkit import DatabaseCluster
from cratedb_toolkit.info.core import InfoContainer, JobInfoContainer, LogContainer
from cratedb_toolkit.info.job import TableTraffic
from cratedb_toolkit.util.app import make_cli
from cratedb_toolkit.util.cli import make_command
from cratedb_toolkit.util.data import jd
Expand Down Expand Up @@ -85,6 +86,18 @@ def job_information(ctx: click.Context):
jd(sample.to_dict())


@make_command(cli, "table-traffic", "Display information about table use.")
@click.pass_context
def table_traffic(ctx: click.Context):
"""
Display ad hoc job information.
"""
scrub = ctx.meta.get("scrub", False)
dc = DatabaseCluster.from_options(ctx.meta["address"])
traffic = TableTraffic(adapter=dc.adapter)
traffic.render()

Comment on lines +89 to +99
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Wire scrub, return structured data, and emit JSON like other info commands.

  • scrub is read but unused (Ruff F841).
  • TableTraffic.render() currently logs and prints in its implementation; the CLI should output structured JSON like the other commands.

Refactor to pass scrub, return data, and call jd().

Apply this diff:

-@make_command(cli, "table-traffic", "Display information about table use.")
+@make_command(cli, "table-traffic", "Display information about table usage.")
 @click.pass_context
 def table_traffic(ctx: click.Context):
     """
-    Display ad hoc job information.
+    Display aggregate table usage derived from sys.jobs_log.
     """
     scrub = ctx.meta.get("scrub", False)
     dc = DatabaseCluster.from_options(ctx.meta["address"])
-    traffic = TableTraffic(adapter=dc.adapter)
-    traffic.render()
+    traffic = TableTraffic(adapter=dc.adapter, scrub=scrub)
+    data = traffic.render()
+    jd(data)
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
@make_command(cli, "table-traffic", "Display information about table use.")
@click.pass_context
def table_traffic(ctx: click.Context):
"""
Display ad hoc job information.
"""
scrub = ctx.meta.get("scrub", False)
dc = DatabaseCluster.from_options(ctx.meta["address"])
traffic = TableTraffic(adapter=dc.adapter)
traffic.render()
@make_command(cli, "table-traffic", "Display information about table usage.")
@click.pass_context
def table_traffic(ctx: click.Context):
"""
Display aggregate table usage derived from sys.jobs_log.
"""
scrub = ctx.meta.get("scrub", False)
dc = DatabaseCluster.from_options(ctx.meta["address"])
traffic = TableTraffic(adapter=dc.adapter, scrub=scrub)
data = traffic.render()
jd(data)
🧰 Tools
🪛 Ruff (0.12.2)

95-95: Local variable scrub is assigned to but never used

Remove assignment to unused variable scrub

(F841)

🤖 Prompt for AI Agents
In cratedb_toolkit/info/cli.py around lines 89-99, the CLI reads scrub but
doesn't use it and calls TableTraffic.render() which prints/logs instead of
returning structured data; update the function to pass scrub into TableTraffic
(e.g., TableTraffic(adapter=dc.adapter, scrub=scrub)), call the TableTraffic
method that returns structured data (assign it to a variable), emit that data
via jd(data) like the other info commands, and return the data so the scrub
variable is actually used and the output is JSON-serializable.


@make_command(cli, "serve", help_serve)
@click.option("--listen", type=click.STRING, default=None, help="HTTP server listen address")
@click.option("--reload", is_flag=True, help="Dynamically reload changed files")
Expand Down
192 changes: 192 additions & 0 deletions cratedb_toolkit/info/job.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
import dataclasses
import logging
import time
from typing import List, Any
import polars as pl
import attr
import sqlparse
from boltons.iterutils import flatten
from sqlparse.tokens import Keyword

from cratedb_toolkit.util.database import DatabaseAdapter, get_table_names

logger = logging.getLogger(__name__)


@attr.define
class Operation:
op: str
stmt: str
tables_symbols: List[str] = attr.field(factory=list)
# tables_effective: List[str] = attr.field(factory=list)

@attr.define
class Operations:
data: List[Operation]

def foo(self):
fj = [attr.asdict(j) for j in self.data]
df = pl.from_records(fj)
print(df)
#grouped = df.group_by("tables_symbols").agg([pl.sum("tables_symbols"), pl.sum("op")])
grouped = df.sql("SELECT tables_symbols, COUNT(op) FROM self GROUP BY tables_symbols")
print(grouped)

Comment on lines +27 to +34
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Replace prints and fragile DataFrame SQL with a deterministic aggregation API.

  • print in library code is undesirable (Ruff T201).
  • Grouping by the list column tables_symbols counts per-statement arrays, not per-table usage.
  • df.sql("... FROM self ...") is brittle; prefer a stable Polars API.

Refactor to explode the list column, then group and count, and return a DataFrame.

Apply this diff:

-    def foo(self):
-        fj = [attr.asdict(j) for j in self.data]
-        df = pl.from_records(fj)
-        print(df)
-        #grouped = df.group_by("tables_symbols").agg([pl.sum("tables_symbols"), pl.sum("op")])
-        grouped = df.sql("SELECT tables_symbols, COUNT(op) FROM self GROUP BY tables_symbols")
-        print(grouped)
+    def summarize_by_table(self) -> pl.DataFrame:
+        fj = [attr.asdict(j) for j in self.data]
+        if not fj:
+            return pl.DataFrame({"tables_symbols": [], "count": []})
+        df = pl.from_records(fj)
+        # Count statements per table (explode list column first, then group).
+        df = df.explode("tables_symbols")
+        return (
+            df.group_by("tables_symbols")
+              .agg(pl.len().alias("count"))
+              .sort("count", descending=True)
+        )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def foo(self):
fj = [attr.asdict(j) for j in self.data]
df = pl.from_records(fj)
print(df)
#grouped = df.group_by("tables_symbols").agg([pl.sum("tables_symbols"), pl.sum("op")])
grouped = df.sql("SELECT tables_symbols, COUNT(op) FROM self GROUP BY tables_symbols")
print(grouped)
def summarize_by_table(self) -> pl.DataFrame:
fj = [attr.asdict(j) for j in self.data]
if not fj:
return pl.DataFrame({"tables_symbols": [], "count": []})
- df = pl.from_records(fj)
- # Count statements per table (explode list column first, then group).
- df = df.explode("tables_symbols")
- return (
- df.group_by("tables_symbols")
- .agg(pl.len().alias("count"))
- .sort("count", descending=True)
df = pl.from_records(fj)
# Count statements per table (explode list column first, then group).
df = df.explode("tables_symbols")
return (
df.group_by("tables_symbols")
.agg(pl.len().alias("count"))
.sort("count", descending=True)
)
🧰 Tools
🪛 Ruff (0.12.2)

30-30: print found

Remove print

(T201)


31-31: Found commented-out code

Remove commented-out code

(ERA001)


33-33: print found

Remove print

(T201)


class TableTraffic:

def __init__(self, adapter: DatabaseAdapter):
self.adapter = adapter

Comment on lines +36 to +40
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Pass scrub via constructor for consistent PII handling.

Expose scrub on TableTraffic to allow the CLI to control redaction.

Apply this diff:

-class TableTraffic:
-
-    def __init__(self, adapter: DatabaseAdapter):
-        self.adapter = adapter
+class TableTraffic:
+
+    def __init__(self, adapter: DatabaseAdapter, scrub: bool = False):
+        self.adapter = adapter
+        self.scrub = scrub
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
class TableTraffic:
def __init__(self, adapter: DatabaseAdapter):
self.adapter = adapter
class TableTraffic:
def __init__(self, adapter: DatabaseAdapter, scrub: bool = False):
self.adapter = adapter
self.scrub = scrub
🤖 Prompt for AI Agents
In cratedb_toolkit/info/job.py around lines 36 to 40, TableTraffic's constructor
must accept and store a scrub parameter so the CLI can control PII redaction;
add a scrub parameter (typed appropriately, e.g., Callable[[str], str] or a
Scrubber type) to __init__, assign it to self.scrub, and update any code that
constructs TableTraffic to pass the desired scrub function (for example the
adapter's scrub or a CLI-provided scrub). Ensure the attribute is used wherever
redaction is performed and update type hints/imports as needed.

def read_jobs_database(self, begin: int = 0, end: int = 0):
logger.info("Reading sys.jobs_log")
now = int(time.time() * 1000)
end = end or now
begin = begin or now - 600 * 60 * 1000
stmt = (
f"SELECT "
f"started, ended, classification, stmt, username, node "
f"FROM sys.jobs_log "
f"WHERE "
f"stmt NOT LIKE '%sys.%' AND "
f"stmt NOT LIKE '%information_schema.%' "
f"AND ended BETWEEN {begin} AND {end} "
f"ORDER BY ended ASC"
)
return self.adapter.run_sql(stmt, records=True)

def read_jobs(self, jobs) -> List[Operation]:
result = []
for job in jobs:
sql = job["stmt"]
result.append(self.parse_expression(sql))
return result

Comment on lines +58 to +64
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Redact raw SQL when scrubbing and prepare data for aggregation.

Use scrub to avoid leaking query text and deduplicate tables per statement to count per-table usage once per statement.

Apply this diff:

     def read_jobs(self, jobs) -> List[Operation]:
         result = []
         for job in jobs:
             sql = job["stmt"]
-            result.append(self.parse_expression(sql))
+            op = self.parse_expression(sql)
+            if self.scrub:
+                op.stmt = None  # redact raw SQL text
+            result.append(op)
         return result
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def read_jobs(self, jobs) -> List[Operation]:
result = []
for job in jobs:
sql = job["stmt"]
result.append(self.parse_expression(sql))
return result
def read_jobs(self, jobs) -> List[Operation]:
result = []
for job in jobs:
sql = job["stmt"]
op = self.parse_expression(sql)
if self.scrub:
op.stmt = None # redact raw SQL text
result.append(op)
return result
🤖 Prompt for AI Agents
In cratedb_toolkit/info/job.py around lines 58 to 64, the code returns raw SQL
and may double-count tables; call scrub(sql) before parsing to avoid leaking
query text (e.g., scrubbed = scrub(sql)) and pass the scrubbed SQL to
parse_expression, then deduplicate any per-operation table list so each table is
counted once per statement (e.g., replace op.tables with a unique list/set)
before appending to the result; ensure the returned Operation objects contain
scrubbed SQL and deduplicated tables for aggregation.

@staticmethod
def parse_expression(sql: str) -> Operation:
logger.debug(f"Analyzing SQL: {sql}")
classifier = SqlStatementClassifier(expression=sql)
if not classifier.operation:
logger.warning(f"Unable to determine operation: {sql}")
if not classifier.table_names:
logger.warning(f"Unable to determine table names: {sql}")
return Operation(
op=classifier.operation,
stmt=sql,
tables_symbols=classifier.table_names,
)

Comment on lines +65 to +78
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Harden parsing: default unknown op, deduplicate table names.

Avoid None for op, and ensure tables per statement are unique to prevent overcounting.

Apply this diff:

     @staticmethod
     def parse_expression(sql: str) -> Operation:
         logger.debug(f"Analyzing SQL: {sql}")
         classifier = SqlStatementClassifier(expression=sql)
         if not classifier.operation:
             logger.warning(f"Unable to determine operation: {sql}")
         if not classifier.table_names:
             logger.warning(f"Unable to determine table names: {sql}")
-        return Operation(
-            op=classifier.operation,
+        table_names = list(dict.fromkeys(classifier.table_names))
+        return Operation(
+            op=classifier.operation or "UNKNOWN",
             stmt=sql,
-            tables_symbols=classifier.table_names,
+            tables_symbols=table_names,
         )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
@staticmethod
def parse_expression(sql: str) -> Operation:
logger.debug(f"Analyzing SQL: {sql}")
classifier = SqlStatementClassifier(expression=sql)
if not classifier.operation:
logger.warning(f"Unable to determine operation: {sql}")
if not classifier.table_names:
logger.warning(f"Unable to determine table names: {sql}")
return Operation(
op=classifier.operation,
stmt=sql,
tables_symbols=classifier.table_names,
)
@staticmethod
def parse_expression(sql: str) -> Operation:
logger.debug(f"Analyzing SQL: {sql}")
classifier = SqlStatementClassifier(expression=sql)
if not classifier.operation:
logger.warning(f"Unable to determine operation: {sql}")
if not classifier.table_names:
logger.warning(f"Unable to determine table names: {sql}")
table_names = list(dict.fromkeys(classifier.table_names))
return Operation(
op=classifier.operation or "UNKNOWN",
stmt=sql,
tables_symbols=table_names,
)
🤖 Prompt for AI Agents
In cratedb_toolkit/info/job.py around lines 65 to 78, the parse_expression
function currently can return None for the operation and duplicate table names;
ensure you set a default op value (e.g., "UNKNOWN" or Operation.UNKNOWN) when
classifier.operation is falsy, and replace the tables_symbols assignment with a
deduplicated list preserving order (e.g., use dict.fromkeys or an OrderedDict)
so repeated table names are collapsed before constructing the Operation; keep
the existing logging but still return the safe default op and the unique tables
list.

def analyze_jobs(self, ops: Operations):
ops.foo()

def render(self):
jobs = self.read_jobs_database()
logger.info(f"Analyzing {len(jobs)} jobs")
ops = Operations(self.read_jobs(jobs))
jobsa = self.analyze_jobs(ops)
logger.info(f"Result: {jobsa}")

Comment on lines +79 to +88
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Make analyze/render return structured results; avoid side-effect-only logging.

Currently analyze_jobs returns None and render() logs that None. Return the computed summary and let the CLI serialize it.

Apply this diff:

-    def analyze_jobs(self, ops: Operations):
-        ops.foo()
+    def analyze_jobs(self, ops: Operations) -> pl.DataFrame:
+        return ops.summarize_by_table()
 
-    def render(self):
+    def render(self):
         jobs = self.read_jobs_database()
         logger.info(f"Analyzing {len(jobs)} jobs")
-        ops = Operations(self.read_jobs(jobs))
-        jobsa = self.analyze_jobs(ops)
-        logger.info(f"Result: {jobsa}")
+        ops = Operations(self.read_jobs(jobs))
+        summary_df = self.analyze_jobs(ops)
+        return summary_df.to_dicts()
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def analyze_jobs(self, ops: Operations):
ops.foo()
def render(self):
jobs = self.read_jobs_database()
logger.info(f"Analyzing {len(jobs)} jobs")
ops = Operations(self.read_jobs(jobs))
jobsa = self.analyze_jobs(ops)
logger.info(f"Result: {jobsa}")
def analyze_jobs(self, ops: Operations) -> pl.DataFrame:
return ops.summarize_by_table()
def render(self):
jobs = self.read_jobs_database()
logger.info(f"Analyzing {len(jobs)} jobs")
- ops = Operations(self.read_jobs(jobs))
- jobsa = self.analyze_jobs(ops)
ops = Operations(self.read_jobs(jobs))
summary_df = self.analyze_jobs(ops)
return summary_df.to_dicts()
🤖 Prompt for AI Agents
In cratedb_toolkit/info/job.py around lines 79 to 88, analyze_jobs currently
calls ops.foo() and returns None, and render() logs that None; change
analyze_jobs to compute and return a structured summary (e.g., dict or
dataclass) of the operations instead of returning None, and update render() to
call result = self.analyze_jobs(ops), stop printing/logging the raw None, and
return that structured result so the CLI can serialize it; ensure no
side-effect-only logging remains — keep logging minimal or move it to the
caller.


@dataclasses.dataclass
class SqlStatementClassifier:
"""
Helper to classify an SQL statement.

Here, most importantly: Provide the `is_dql` property that
signals truthfulness for read-only SQL SELECT statements only.
"""

expression: str
permit_all: bool = False

_parsed_sqlparse: Any = dataclasses.field(init=False, default=None)

def __post_init__(self) -> None:
if self.expression is None:
self.expression = ""
if self.expression:
self.expression = self.expression.strip()

def parse_sqlparse(self) -> List[sqlparse.sql.Statement]:
"""
Parse expression using traditional `sqlparse` library.
"""
if self._parsed_sqlparse is None:
self._parsed_sqlparse = sqlparse.parse(self.expression)
return self._parsed_sqlparse

@property
def is_dql(self) -> bool:
"""
Is it a DQL statement, which effectively invokes read-only operations only?
"""

if not self.expression:
return False

if self.permit_all:
return True

# Check if the expression is valid and if it's a DQL/SELECT statement,
# also trying to consider `SELECT ... INTO ...` and evasive
# `SELECT * FROM users; \uff1b DROP TABLE users` statements.
return self.is_select and not self.is_camouflage

@property
def is_select(self) -> bool:
"""
Whether the expression is an SQL SELECT statement.
"""
return self.operation == "SELECT"

@property
def operation(self) -> str:
"""
The SQL operation: SELECT, INSERT, UPDATE, DELETE, CREATE, etc.
"""
parsed = self.parse_sqlparse()
return parsed[0].get_type().upper()

Comment on lines +143 to +149
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Guard against empty parses in operation to prevent IndexError.

sqlparse.parse("") returns an empty list; parsed[0] would crash.

Apply this diff:

     @property
     def operation(self) -> str:
         """
         The SQL operation: SELECT, INSERT, UPDATE, DELETE, CREATE, etc.
         """
-        parsed = self.parse_sqlparse()
-        return parsed[0].get_type().upper()
+        parsed = self.parse_sqlparse()
+        if not parsed:
+            return ""
+        return (parsed[0].get_type() or "").upper()
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def operation(self) -> str:
"""
The SQL operation: SELECT, INSERT, UPDATE, DELETE, CREATE, etc.
"""
parsed = self.parse_sqlparse()
return parsed[0].get_type().upper()
@property
def operation(self) -> str:
"""
The SQL operation: SELECT, INSERT, UPDATE, DELETE, CREATE, etc.
"""
parsed = self.parse_sqlparse()
if not parsed:
return ""
return (parsed[0].get_type() or "").upper()
🤖 Prompt for AI Agents
In cratedb_toolkit/info/job.py around lines 143-149, the operation() method
currently assumes sqlparse.parse() returns at least one statement and accessing
parsed[0] can raise IndexError for empty SQL; update the method to check whether
parsed is non-empty before accessing parsed[0] and return an empty string (or a
sensible default) if it is empty, otherwise call get_type().upper() on the first
parsed statement; ensure the guard covers both an empty list and any unexpected
None so no IndexError occurs.

@property
def table_names(self) -> List[str]:
"""
The SQL operation: SELECT, INSERT, UPDATE, DELETE, CREATE, etc.
"""
return flatten(get_table_names(self.expression))

@property
def is_camouflage(self) -> bool:
"""
Innocent-looking `SELECT` statements can evade filters.
"""
return self.is_select_into or self.is_evasive

@property
def is_select_into(self) -> bool:
"""
Use traditional `sqlparse` for catching `SELECT ... INTO ...` statements.
Examples:
SELECT * INTO foobar FROM bazqux
SELECT * FROM bazqux INTO foobar
"""
# Flatten all tokens (including nested ones) and match on type+value.
statement = self.parse_sqlparse()[0]
return any(
token.ttype is Keyword and token.value.upper() == "INTO"
for token in statement.flatten()
)
Comment on lines +165 to +177
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Guard is_select_into against empty parses.

Prevent potential IndexError when no statements are parsed.

Apply this diff:

         # Flatten all tokens (including nested ones) and match on type+value.
-        statement = self.parse_sqlparse()[0]
+        parsed = self.parse_sqlparse()
+        if not parsed:
+            return False
+        statement = parsed[0]
         return any(
             token.ttype is Keyword and token.value.upper() == "INTO"
             for token in statement.flatten()
         )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def is_select_into(self) -> bool:
"""
Use traditional `sqlparse` for catching `SELECT ... INTO ...` statements.
Examples:
SELECT * INTO foobar FROM bazqux
SELECT * FROM bazqux INTO foobar
"""
# Flatten all tokens (including nested ones) and match on type+value.
statement = self.parse_sqlparse()[0]
return any(
token.ttype is Keyword and token.value.upper() == "INTO"
for token in statement.flatten()
)
def is_select_into(self) -> bool:
"""
Use traditional `sqlparse` for catching `SELECT ... INTO ...` statements.
Examples:
SELECT * INTO foobar FROM bazqux
SELECT * FROM bazqux INTO foobar
"""
# Flatten all tokens (including nested ones) and match on type+value.
parsed = self.parse_sqlparse()
if not parsed:
return False
statement = parsed[0]
return any(
token.ttype is Keyword and token.value.upper() == "INTO"
for token in statement.flatten()
)
🤖 Prompt for AI Agents
In cratedb_toolkit/info/job.py around lines 165 to 177, the is_select_into
method assumes parse_sqlparse() always returns at least one statement and can
raise IndexError; add a guard to handle empty or falsy parses by checking the
result (e.g., assign statements = self.parse_sqlparse(); if not statements:
return False) before accessing statements[0], then proceed with the existing
flatten-and-check logic so the method safely returns False when no statements
are parsed.


@property
def is_evasive(self) -> bool:
"""
Use traditional `sqlparse` for catching evasive SQL statements.

A practice picked up from CodeRabbit was to reject multiple statements
to prevent potential SQL injections. Is it a viable suggestion?

Examples:

SELECT * FROM users; \uff1b DROP TABLE users
"""
parsed = self.parse_sqlparse()
return len(parsed) > 1
2 changes: 1 addition & 1 deletion cratedb_toolkit/util/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -483,6 +483,6 @@ def get_table_names(sql: str) -> t.List[t.List[str]]:
for statement in statements:
local_names = []
for table in statement.metadata.tables:
local_names.append(table.name)
local_names.append(table.fqn)
names.append(local_names)
return names
Loading