From 09a1c3ad069ba1bb71a1cb7736dc2080c08e2b0d Mon Sep 17 00:00:00 2001 From: Ian <47256454+IanRFerguson@users.noreply.github.com> Date: Sun, 8 Feb 2026 16:55:46 -0500 Subject: [PATCH 1/3] fix streaming logic + passing tests --- klondike/scripts/stream_csv_to_database.py | 22 ++- tests/scripts/test_stream_csv_to_bigquery.py | 159 ++++++++----------- 2 files changed, 84 insertions(+), 97 deletions(-) diff --git a/klondike/scripts/stream_csv_to_database.py b/klondike/scripts/stream_csv_to_database.py index 7bc0f6a..3b7d0ba 100644 --- a/klondike/scripts/stream_csv_to_database.py +++ b/klondike/scripts/stream_csv_to_database.py @@ -32,17 +32,24 @@ def stream_csv_to_database( **bigquery_kwargs: Additional keyword arguments to pass to the BigQuery upload method (e.g., write_disposition, etc.). """ - lazy_df = pl.scan_csv( - csv_path, - separator=csv_separator, - infer_schema_length=infer_schema_length, - ) - records_written = 0 + skip_rows_count = 0 logger.info( f"Streaming data from {csv_path} to table {destination_table_name} in batches of {batch_size}..." ) - for batch_df in lazy_df.collect().iter_slices(n_rows=batch_size): + + while True: + batch_df = pl.read_csv( + csv_path, + separator=csv_separator, + infer_schema_length=infer_schema_length, + skip_rows=skip_rows_count, + n_rows=batch_size, + ) + + if len(batch_df) == 0: + break + logger.debug(f"Uploading batch to table {destination_table_name}...") connector.write_dataframe( df=batch_df, @@ -50,6 +57,7 @@ def stream_csv_to_database( **bigquery_kwargs, ) records_written += len(batch_df) + skip_rows_count += len(batch_df) logger.info( f"Finished streaming {records_written} rows to {destination_table_name}" diff --git a/tests/scripts/test_stream_csv_to_bigquery.py b/tests/scripts/test_stream_csv_to_bigquery.py index b4675d0..55724b3 100644 --- a/tests/scripts/test_stream_csv_to_bigquery.py +++ b/tests/scripts/test_stream_csv_to_bigquery.py @@ -32,15 +32,13 @@ def setUp(self): ] ) - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_basic(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_basic(self, mock_read_csv): """ Test basic CSV streaming with default parameters """ - # Mock the lazy dataframe and batches - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [self.batch_1, self.batch_2] - mock_scan_csv.return_value = mock_lazy_df + # Mock read_csv to return batches then empty dataframe + mock_read_csv.side_effect = [self.batch_1, self.batch_2, pl.DataFrame()] # Execute the function stream_csv_to_database( @@ -49,15 +47,16 @@ def test_stream_csv_basic(self, mock_scan_csv): destination_table_name="test_dataset.test_table", ) - # Verify scan_csv was called with correct parameters - mock_scan_csv.assert_called_once_with( - "/path/to/test.csv", - separator=",", - infer_schema_length=0, - ) + # Verify read_csv was called with correct parameters + self.assertEqual(mock_read_csv.call_count, 3) - # Verify iter_slices was called with default batch size - mock_lazy_df.collect().iter_slices.assert_called_once_with(n_rows=10_000) + # Check first call + first_call = mock_read_csv.call_args_list[0] + self.assertEqual(first_call[0][0], "/path/to/test.csv") + self.assertEqual(first_call[1]["separator"], ",") + self.assertEqual(first_call[1]["infer_schema_length"], 0) + self.assertEqual(first_call[1]["skip_rows"], 0) + self.assertEqual(first_call[1]["n_rows"], 10_000) # Verify write_dataframe was called twice (once per batch) self.assertEqual(self.mock_bq_connector.write_dataframe.call_count, 2) @@ -67,18 +66,18 @@ def test_stream_csv_basic(self, mock_scan_csv): self.assertEqual(calls[0][1]["table_name"], "test_dataset.test_table") self.assertEqual(calls[1][1]["table_name"], "test_dataset.test_table") - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_custom_batch_size(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_custom_batch_size(self, mock_read_csv): """ Test CSV streaming with custom batch size """ - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [ + # Mock read_csv to return batches then empty dataframe + mock_read_csv.side_effect = [ self.batch_1, self.batch_2, self.batch_3, + pl.DataFrame(), ] - mock_scan_csv.return_value = mock_lazy_df # Execute with custom batch size stream_csv_to_database( @@ -88,20 +87,20 @@ def test_stream_csv_custom_batch_size(self, mock_scan_csv): batch_size=500, ) - # Verify iter_slices was called with custom batch size - mock_lazy_df.collect().iter_slices.assert_called_once_with(n_rows=500) + # Verify read_csv was called with custom batch size + self.assertTrue( + any(call[1]["n_rows"] == 500 for call in mock_read_csv.call_args_list) + ) # Verify write_dataframe was called three times self.assertEqual(self.mock_bq_connector.write_dataframe.call_count, 3) - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_custom_separator(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_custom_separator(self, mock_read_csv): """ Test CSV streaming with custom separator """ - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [self.batch_1] - mock_scan_csv.return_value = mock_lazy_df + mock_read_csv.side_effect = [self.batch_1, pl.DataFrame()] # Execute with custom separator stream_csv_to_database( @@ -111,21 +110,18 @@ def test_stream_csv_custom_separator(self, mock_scan_csv): csv_separator="\t", ) - # Verify scan_csv was called with tab separator - mock_scan_csv.assert_called_once_with( - "/path/to/test.tsv", - separator="\t", - infer_schema_length=0, - ) + # Verify read_csv was called with tab separator + first_call = mock_read_csv.call_args_list[0] + self.assertEqual(first_call[0][0], "/path/to/test.tsv") + self.assertEqual(first_call[1]["separator"], "\t") + self.assertEqual(first_call[1]["infer_schema_length"], 0) - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_with_schema_inference(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_with_schema_inference(self, mock_read_csv): """ Test CSV streaming with schema inference """ - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [self.batch_1] - mock_scan_csv.return_value = mock_lazy_df + mock_read_csv.side_effect = [self.batch_1, pl.DataFrame()] # Execute with schema inference stream_csv_to_database( @@ -135,21 +131,18 @@ def test_stream_csv_with_schema_inference(self, mock_scan_csv): infer_schema_length=1000, ) - # Verify scan_csv was called with infer_schema_length - mock_scan_csv.assert_called_once_with( - "/path/to/test.csv", - separator=",", - infer_schema_length=1000, - ) + # Verify read_csv was called with infer_schema_length + first_call = mock_read_csv.call_args_list[0] + self.assertEqual(first_call[0][0], "/path/to/test.csv") + self.assertEqual(first_call[1]["separator"], ",") + self.assertEqual(first_call[1]["infer_schema_length"], 1000) - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_with_bigquery_kwargs(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_with_bigquery_kwargs(self, mock_read_csv): """ Test CSV streaming with additional BigQuery kwargs """ - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [self.batch_1, self.batch_2] - mock_scan_csv.return_value = mock_lazy_df + mock_read_csv.side_effect = [self.batch_1, self.batch_2, pl.DataFrame()] # Execute with BigQuery kwargs stream_csv_to_database( @@ -166,14 +159,12 @@ def test_stream_csv_with_bigquery_kwargs(self, mock_scan_csv): self.assertEqual(call_args[1]["if_exists"], "append") self.assertEqual(call_args[1]["max_bad_records"], 10) - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_single_batch(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_single_batch(self, mock_read_csv): """ Test CSV streaming with single batch (small file) """ - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [self.batch_1] - mock_scan_csv.return_value = mock_lazy_df + mock_read_csv.side_effect = [self.batch_1, pl.DataFrame()] # Execute stream_csv_to_database( @@ -185,14 +176,12 @@ def test_stream_csv_single_batch(self, mock_scan_csv): # Verify write_dataframe was called only once self.assertEqual(self.mock_bq_connector.write_dataframe.call_count, 1) - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_empty_batches(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_empty_batches(self, mock_read_csv): """ Test CSV streaming with no batches (empty file) """ - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [] - mock_scan_csv.return_value = mock_lazy_df + mock_read_csv.return_value = pl.DataFrame() # Execute stream_csv_to_database( @@ -204,14 +193,12 @@ def test_stream_csv_empty_batches(self, mock_scan_csv): # Verify write_dataframe was never called self.mock_bq_connector.write_dataframe.assert_not_called() - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_qualified_table_name(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_qualified_table_name(self, mock_read_csv): """ Test that table name is correctly qualified with dataset """ - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [self.batch_1] - mock_scan_csv.return_value = mock_lazy_df + mock_read_csv.side_effect = [self.batch_1, pl.DataFrame()] # Execute with different dataset and table names stream_csv_to_database( @@ -224,18 +211,17 @@ def test_stream_csv_qualified_table_name(self, mock_scan_csv): call_args = self.mock_bq_connector.write_dataframe.call_args self.assertEqual(call_args[1]["table_name"], "my_dataset.my_table") - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_batch_ordering(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_batch_ordering(self, mock_read_csv): """ Test that batches are processed in correct order """ - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [ + mock_read_csv.side_effect = [ self.batch_1, self.batch_2, self.batch_3, + pl.DataFrame(), ] - mock_scan_csv.return_value = mock_lazy_df # Execute stream_csv_to_database( @@ -253,16 +239,14 @@ def test_stream_csv_batch_ordering(self, mock_scan_csv): # but we can verify the number of calls matches the number of batches self.assertEqual(len(calls), 3) - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_large_batch_count(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_large_batch_count(self, mock_read_csv): """ Test streaming with many batches """ - mock_lazy_df = MagicMock() - # Simulate 100 batches - batches = [self.batch_1 for _ in range(100)] - mock_lazy_df.collect().iter_slices.return_value = batches - mock_scan_csv.return_value = mock_lazy_df + # Simulate 100 batches plus empty dataframe at end + batches = [self.batch_1 for _ in range(100)] + [pl.DataFrame()] + mock_read_csv.side_effect = batches # Execute stream_csv_to_database( @@ -275,15 +259,11 @@ def test_stream_csv_large_batch_count(self, mock_scan_csv): # Verify write_dataframe was called 100 times self.assertEqual(self.mock_bq_connector.write_dataframe.call_count, 100) - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_different_file_paths(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_different_file_paths(self, mock_read_csv): """ Test with various file path formats """ - mock_lazy_df = MagicMock() - mock_lazy_df.collect().iter_slices.return_value = [self.batch_1] - mock_scan_csv.return_value = mock_lazy_df - test_paths = [ "/absolute/path/to/file.csv", "relative/path/file.csv", @@ -293,7 +273,8 @@ def test_stream_csv_different_file_paths(self, mock_scan_csv): ] for path in test_paths: - mock_scan_csv.reset_mock() + mock_read_csv.reset_mock() + mock_read_csv.side_effect = [self.batch_1, pl.DataFrame()] self.mock_bq_connector.reset_mock() stream_csv_to_database( @@ -302,23 +283,21 @@ def test_stream_csv_different_file_paths(self, mock_scan_csv): destination_table_name="test_dataset.test_table", ) - # Verify scan_csv was called with the correct path - mock_scan_csv.assert_called_once() - self.assertEqual(mock_scan_csv.call_args[0][0], path) + # Verify read_csv was called with the correct path + self.assertTrue(mock_read_csv.called) + self.assertEqual(mock_read_csv.call_args_list[0][0][0], path) - @patch("klondike.scripts.stream_csv_to_database.pl.scan_csv") - def test_stream_csv_preserves_dataframe_content(self, mock_scan_csv): + @patch("klondike.scripts.stream_csv_to_database.pl.read_csv") + def test_stream_csv_preserves_dataframe_content(self, mock_read_csv): """ Test that dataframe content is preserved during streaming """ - mock_lazy_df = MagicMock() test_df = pl.DataFrame( [ {"id": 99, "name": "Test", "value": 999, "active": True}, ] ) - mock_lazy_df.collect().iter_slices.return_value = [test_df] - mock_scan_csv.return_value = mock_lazy_df + mock_read_csv.side_effect = [test_df, pl.DataFrame()] # Execute stream_csv_to_database( From 9bbddefcbb5328ea3f07ee99c8f5256eb6b7537f Mon Sep 17 00:00:00 2001 From: Ian <47256454+IanRFerguson@users.noreply.github.com> Date: Sun, 8 Feb 2026 17:02:23 -0500 Subject: [PATCH 2/3] update semantic version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0412bf2..4379e95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "klondike" -version = "0.3.0" +version = "0.3.1" authors = [{ "name" = "Ian Richard Ferguson", "email" = "IAN@ianferguson.dev" }] description = "Klondike is a suite of database connectors, powered by Polars" requires-python = "~=3.12.0" From eea8f735b367116ad99008903063a6e8bcff7b0a Mon Sep 17 00:00:00 2001 From: Ian <47256454+IanRFerguson@users.noreply.github.com> Date: Sun, 8 Feb 2026 17:31:05 -0500 Subject: [PATCH 3/3] add dev dependencies + local helpers --- devops/create_development_file.py | 71 +++++++++++++++++++++++++++++++ pyproject.toml | 3 ++ uv.lock | 44 ++++++++++++++++++- 3 files changed, 117 insertions(+), 1 deletion(-) create mode 100644 devops/create_development_file.py diff --git a/devops/create_development_file.py b/devops/create_development_file.py new file mode 100644 index 0000000..5a396c0 --- /dev/null +++ b/devops/create_development_file.py @@ -0,0 +1,71 @@ +import os +from uuid import uuid4 + +import click +import polars as pl +from faker import Faker +from tqdm import tqdm + +##### + +FAKE_CLIENT = Faker("en_us") + + +def generate_fake_data(num_records: int) -> pl.DataFrame: + """ + Generates a Polars DataFrame of fake PII data for testing purposes. + + Args: + num_records: The number of fake records to generate. + + Returns: + A Polars DataFrame containing the generated fake data. + """ + + output = [] + for _ in tqdm(range(num_records), desc="Generating fake data", unit="record"): + output.append( + { + "id": uuid4().hex, + "name": FAKE_CLIENT.name(), + "address": FAKE_CLIENT.address(), + "city": FAKE_CLIENT.city(), + "state": FAKE_CLIENT.state(), + "email": FAKE_CLIENT.email(), + "phone": FAKE_CLIENT.phone_number(), + } + ) + + return pl.DataFrame(output) + + +@click.command() +@click.option( + "--output-dir", default=".", help="Directory to save the generated CSV file." +) +@click.option( + "--output-filename", default="fake_data.csv", help="Name of the generated CSV file." +) +@click.option( + "--num-records", + default=10_000_000, + help="Number of fake records to generate (default: 10,000,000).", +) +def cli(output_dir: str, output_filename: str, num_records: int) -> None: + """Generate a CSV file with fake data for testing purposes.""" + df = generate_fake_data(num_records) + + # Combine the output directory and filename to create the full output path + output_path = os.path.join(output_dir, output_filename) + + # Save the output DataFrame to a CSV file + df.write_csv(output_path) + + # Notify the end user + click.echo(f"Generated {num_records} fake records and saved to {output_path}") + + +##### + +if __name__ == "__main__": + cli() diff --git a/pyproject.toml b/pyproject.toml index 4379e95..a08e372 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,6 +40,9 @@ dev = [ "mypy~=1.9.0", # Strict static type checking "pre-commit~=3.4.0", "jupyter>=1.1.1", + "faker==40.4.0", + "tqdm==4.67.3", + "click==8.3.1", ] [tool.ruff] diff --git a/uv.lock b/uv.lock index 18e97f9..a573999 100644 --- a/uv.lock +++ b/uv.lock @@ -239,6 +239,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, ] +[[package]] +name = "click" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, +] + [[package]] name = "colorama" version = "0.4.6" @@ -391,6 +403,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" }, ] +[[package]] +name = "faker" +version = "40.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tzdata", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/7e/dccb7013c9f3d66f2e379383600629fec75e4da2698548bdbf2041ea4b51/faker-40.4.0.tar.gz", hash = "sha256:76f8e74a3df28c3e2ec2caafa956e19e37a132fdc7ea067bc41783affcfee364", size = 1952221, upload-time = "2026-02-06T23:30:15.515Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/63/58efa67c10fb27810d34351b7a10f85f109a7f7e2a07dc3773952459c47b/faker-40.4.0-py3-none-any.whl", hash = "sha256:486d43c67ebbb136bc932406418744f9a0bdf2c07f77703ea78b58b77e9aa443", size = 1987060, upload-time = "2026-02-06T23:30:13.44Z" }, +] + [[package]] name = "fastjsonschema" version = "2.21.2" @@ -1014,7 +1038,7 @@ wheels = [ [[package]] name = "klondike" -version = "0.3.0" +version = "0.3.1" source = { editable = "." } dependencies = [ { name = "colorlogger" }, @@ -1027,6 +1051,8 @@ dependencies = [ [package.dev-dependencies] dev = [ + { name = "click" }, + { name = "faker" }, { name = "jupyter" }, { name = "mypy" }, { name = "pre-commit" }, @@ -1034,6 +1060,7 @@ dev = [ { name = "pytest-cov" }, { name = "pytest-watcher" }, { name = "ruff" }, + { name = "tqdm" }, ] [package.metadata] @@ -1048,6 +1075,8 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ + { name = "click", specifier = "==8.3.1" }, + { name = "faker", specifier = "==40.4.0" }, { name = "jupyter", specifier = ">=1.1.1" }, { name = "mypy", specifier = "~=1.9.0" }, { name = "pre-commit", specifier = "~=3.4.0" }, @@ -1055,6 +1084,7 @@ dev = [ { name = "pytest-cov", specifier = "~=4.1.0" }, { name = "pytest-watcher", specifier = "~=0.4.0" }, { name = "ruff", specifier = "~=0.4.0" }, + { name = "tqdm", specifier = "==4.67.3" }, ] [[package]] @@ -1890,6 +1920,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/49/8dc3fd90902f70084bd2cd059d576ddb4f8bb44c2c7c0e33a11422acb17e/tornado-6.5.4-cp39-abi3-win_arm64.whl", hash = "sha256:053e6e16701eb6cbe641f308f4c1a9541f91b6261991160391bfc342e8a551a1", size = 445910, upload-time = "2025-12-15T19:21:02.571Z" }, ] +[[package]] +name = "tqdm" +version = "4.67.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" }, +] + [[package]] name = "traitlets" version = "5.14.3"