diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 518cec63125..2455f7abc4c 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - python: ['3.9', '3.10', '3.11', '3.12', '3.13'] + python: ['3.10', '3.11', '3.12', '3.13'] steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg deleted file mode 100644 index 09d7af02ba9..00000000000 --- a/.kokoro/samples/python3.7/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.7" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py37" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigquery-dataframes/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigquery-dataframes/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.7/continuous.cfg b/.kokoro/samples/python3.7/continuous.cfg deleted file mode 100644 index a1c8d9759c8..00000000000 --- a/.kokoro/samples/python3.7/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/periodic-head.cfg b/.kokoro/samples/python3.7/periodic-head.cfg deleted file mode 100644 index 123a35fbd3d..00000000000 --- a/.kokoro/samples/python3.7/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigquery-dataframes/.kokoro/test-samples-against-head.sh" -} diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.7/periodic.cfg deleted file mode 100644 index 71cd1e597e3..00000000000 --- a/.kokoro/samples/python3.7/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/.kokoro/samples/python3.7/presubmit.cfg b/.kokoro/samples/python3.7/presubmit.cfg deleted file mode 100644 index a1c8d9759c8..00000000000 --- a/.kokoro/samples/python3.7/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg deleted file mode 100644 index 976d9ce8c5c..00000000000 --- a/.kokoro/samples/python3.8/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.8" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py38" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigquery-dataframes/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigquery-dataframes/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.8/continuous.cfg b/.kokoro/samples/python3.8/continuous.cfg deleted file mode 100644 index a1c8d9759c8..00000000000 --- a/.kokoro/samples/python3.8/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/periodic-head.cfg b/.kokoro/samples/python3.8/periodic-head.cfg deleted file mode 100644 index 123a35fbd3d..00000000000 --- a/.kokoro/samples/python3.8/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigquery-dataframes/.kokoro/test-samples-against-head.sh" -} diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.8/periodic.cfg deleted file mode 100644 index 71cd1e597e3..00000000000 --- a/.kokoro/samples/python3.8/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/.kokoro/samples/python3.8/presubmit.cfg b/.kokoro/samples/python3.8/presubmit.cfg deleted file mode 100644 index a1c8d9759c8..00000000000 --- a/.kokoro/samples/python3.8/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/.kokoro/samples/python3.9/common.cfg b/.kokoro/samples/python3.9/common.cfg deleted file mode 100644 index 603cfffa280..00000000000 --- a/.kokoro/samples/python3.9/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.9" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py39" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigquery-dataframes/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigquery-dataframes/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.9/continuous.cfg b/.kokoro/samples/python3.9/continuous.cfg deleted file mode 100644 index a1c8d9759c8..00000000000 --- a/.kokoro/samples/python3.9/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/.kokoro/samples/python3.9/periodic-head.cfg b/.kokoro/samples/python3.9/periodic-head.cfg deleted file mode 100644 index 123a35fbd3d..00000000000 --- a/.kokoro/samples/python3.9/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigquery-dataframes/.kokoro/test-samples-against-head.sh" -} diff --git a/.kokoro/samples/python3.9/periodic.cfg b/.kokoro/samples/python3.9/periodic.cfg deleted file mode 100644 index 71cd1e597e3..00000000000 --- a/.kokoro/samples/python3.9/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/.kokoro/samples/python3.9/presubmit.cfg b/.kokoro/samples/python3.9/presubmit.cfg deleted file mode 100644 index a1c8d9759c8..00000000000 --- a/.kokoro/samples/python3.9/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh index 53e365bc4e7..97cdc9c13fe 100755 --- a/.kokoro/test-samples-impl.sh +++ b/.kokoro/test-samples-impl.sh @@ -34,7 +34,7 @@ env | grep KOKORO # Install nox # `virtualenv==20.26.6` is added for Python 3.7 compatibility -python3.9 -m pip install --upgrade --quiet nox virtualenv==20.26.6 +python3.10 -m pip install --upgrade --quiet nox virtualenv==20.26.6 # Use secrets acessor service account to get secrets if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then @@ -77,7 +77,7 @@ for file in samples/**/requirements.txt; do echo "------------------------------------------------------------" # Use nox to execute the tests for the project. - python3.9 -m nox -s "$RUN_TESTS_SESSION" + python3.10 -m nox -s "$RUN_TESTS_SESSION" EXIT=$? # If this is a periodic build, send the test log to the FlakyBot. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 5374e7e3770..7ac410bbf7a 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.9, 3.10, 3.11, 3.12 and 3.13 on both UNIX and Windows. + 3.10, 3.11, 3.12 and 3.13 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -148,7 +148,7 @@ Running System Tests .. note:: - System tests are only configured to run under Python 3.9, 3.11, 3.12 and 3.13. + System tests are only configured to run under Python 3.10, 3.11, 3.12 and 3.13. For expediency, we do not run them in older versions of Python 3. This alone will not run the tests. You'll need to change some local @@ -258,13 +258,11 @@ Supported Python Versions We support: -- `Python 3.9`_ - `Python 3.10`_ - `Python 3.11`_ - `Python 3.12`_ - `Python 3.13`_ -.. _Python 3.9: https://docs.python.org/3.9/ .. _Python 3.10: https://docs.python.org/3.10/ .. _Python 3.11: https://docs.python.org/3.11/ .. _Python 3.12: https://docs.python.org/3.12/ @@ -276,7 +274,7 @@ Supported versions can be found in our ``noxfile.py`` `config`_. .. _config: https://github.com/googleapis/python-bigquery-dataframes/blob/main/noxfile.py -We also explicitly decided to support Python 3 beginning with version 3.9. +We also explicitly decided to support Python 3 beginning with version 3.10. Reasons for this include: - Encouraging use of newest versions of Python 3 diff --git a/bigframes/core/blocks.py b/bigframes/core/blocks.py index 5bac1a06f1e..ff7f2b9899b 100644 --- a/bigframes/core/blocks.py +++ b/bigframes/core/blocks.py @@ -140,6 +140,7 @@ def __init__( column_labels: typing.Union[pd.Index, typing.Iterable[Label]], index_labels: typing.Union[pd.Index, typing.Iterable[Label], None] = None, *, + value_columns: Optional[Iterable[str]] = None, transpose_cache: Optional[Block] = None, ): """Construct a block object, will create default index if no index columns specified.""" @@ -158,7 +159,13 @@ def __init__( if index_labels else tuple([None for _ in index_columns]) ) - self._expr = self._normalize_expression(expr, self._index_columns) + if value_columns is None: + value_columns = [ + col_id for col_id in expr.column_ids if col_id not in index_columns + ] + self._expr = self._normalize_expression( + expr, self._index_columns, value_columns + ) # Use pandas index to more easily replicate column indexing, especially for hierarchical column index self._column_labels = ( column_labels.copy() @@ -1114,13 +1121,15 @@ def project_exprs( labels: Union[Sequence[Label], pd.Index], drop=False, ) -> Block: - new_array, _ = self.expr.compute_values(exprs) + new_array, new_cols = self.expr.compute_values(exprs) if drop: new_array = new_array.drop_columns(self.value_columns) + new_val_cols = new_cols if drop else (*self.value_columns, *new_cols) return Block( new_array, index_columns=self.index_columns, + value_columns=new_val_cols, column_labels=labels if drop else self.column_labels.append(pd.Index(labels)), @@ -1542,17 +1551,13 @@ def _get_labels_for_columns(self, column_ids: typing.Sequence[str]) -> pd.Index: def _normalize_expression( self, expr: core.ArrayValue, - index_columns: typing.Sequence[str], - assert_value_size: typing.Optional[int] = None, + index_columns: Iterable[str], + value_columns: Iterable[str], ): """Normalizes expression by moving index columns to left.""" - value_columns = [ - col_id for col_id in expr.column_ids if col_id not in index_columns - ] - if (assert_value_size is not None) and ( - len(value_columns) != assert_value_size - ): - raise ValueError("Unexpected number of value columns.") + normalized_ids = (*index_columns, *value_columns) + if tuple(expr.column_ids) == normalized_ids: + return expr return expr.select_columns([*index_columns, *value_columns]) def grouped_head( diff --git a/bigframes/operations/aggregations.py b/bigframes/operations/aggregations.py index 5fe83302638..eee710b2882 100644 --- a/bigframes/operations/aggregations.py +++ b/bigframes/operations/aggregations.py @@ -205,7 +205,7 @@ def output_type(self, *input_types: dtypes.ExpressionType) -> dtypes.ExpressionT return dtypes.TIMEDELTA_DTYPE if dtypes.is_numeric(input_types[0]): - if pd.api.types.is_bool_dtype(input_types[0]): + if pd.api.types.is_bool_dtype(input_types[0]): # type: ignore return dtypes.INT_DTYPE return input_types[0] @@ -224,7 +224,7 @@ def output_type(self, *input_types: dtypes.ExpressionType) -> dtypes.ExpressionT # These will change if median is changed to exact implementation. if not dtypes.is_orderable(input_types[0]): raise TypeError(f"Type {input_types[0]} is not orderable") - if pd.api.types.is_bool_dtype(input_types[0]): + if pd.api.types.is_bool_dtype(input_types[0]): # type: ignore return dtypes.INT_DTYPE else: return input_types[0] diff --git a/noxfile.py b/noxfile.py index 00ada18a469..a8a1a84987e 100644 --- a/noxfile.py +++ b/noxfile.py @@ -123,7 +123,7 @@ # TODO(tswast): Consider removing this when unit_noextras and cover is run # from GitHub actions. "unit_noextras", - "system-3.9", # No extras. + "system-3.10", # No extras. f"system-{LATEST_FULLY_SUPPORTED_PYTHON}", # All extras. "cover", # TODO(b/401609005): remove @@ -661,9 +661,7 @@ def prerelease(session: nox.sessions.Session, tests_path, extra_pytest_options=( # version, the first version we test with in the unit tests sessions has a # constraints file containing all dependencies and extras. with open( - CURRENT_DIRECTORY - / "testing" - / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt", + CURRENT_DIRECTORY / "testing" / f"constraints-{DEFAULT_PYTHON_VERSION}.txt", encoding="utf-8", ) as constraints_file: constraints_text = constraints_file.read() diff --git a/scripts/test_publish_api_coverage.py b/scripts/test_publish_api_coverage.py index 6e366b6854e..6abecd0ac40 100644 --- a/scripts/test_publish_api_coverage.py +++ b/scripts/test_publish_api_coverage.py @@ -31,10 +31,8 @@ def api_coverage_df(): reason="Issues with installing sklearn for this test in python 3.13", ) def test_api_coverage_produces_expected_schema(api_coverage_df): - if sys.version.split(".")[:2] == ["3", "9"]: - pytest.skip( - "Python 3.9 uses older pandas without good microsecond timestamp support." - ) + # Older pandas has different timestamp default precision + pytest.importorskip("pandas", minversion="2.0.0") pandas.testing.assert_series_equal( api_coverage_df.dtypes, @@ -56,6 +54,8 @@ def test_api_coverage_produces_expected_schema(api_coverage_df): "release_version": "string", }, ), + # String dtype behavior not consistent across pandas versions + check_dtype=False, ) diff --git a/setup.py b/setup.py index 090b1035364..38c2a6032ec 100644 --- a/setup.py +++ b/setup.py @@ -33,7 +33,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - # please keep these in sync with the minimum versions in testing/constraints-3.9.txt + # please keep these in sync with the minimum versions in testing/constraints-3.10.txt "cloudpickle >= 2.0.0", "fsspec >=2023.3.0", "gcsfs >=2023.3.0, !=2025.5.0", @@ -133,7 +133,6 @@ "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -149,7 +148,7 @@ "bigframes_vendored": "third_party/bigframes_vendored", }, packages=packages, - python_requires=">=3.9", + python_requires=">=3.10", include_package_data=True, zip_safe=False, ) diff --git a/testing/constraints-3.10.txt b/testing/constraints-3.10.txt index 4810d6461b8..b74ff81063b 100644 --- a/testing/constraints-3.10.txt +++ b/testing/constraints-3.10.txt @@ -1,18 +1,39 @@ # When we drop Python 3.9, # please keep these in sync with the minimum versions in setup.py -google-auth==2.27.0 -ipykernel==5.5.6 -ipython==7.34.0 -notebook==6.5.5 -pandas==2.1.4 -pandas-stubs==2.1.4.231227 -portpicker==1.5.2 -requests==2.32.3 -tornado==6.3.3 -absl-py==1.4.0 -debugpy==1.6.6 +cloudpickle==2.0.0 +fsspec==2023.3.0 +gcsfs==2023.3.0 +geopandas==0.12.2 +google-auth==2.15.0 +google-cloud-bigtable==2.24.0 +google-cloud-pubsub==2.21.4 +google-cloud-bigquery==3.36.0 +google-cloud-functions==1.12.0 +google-cloud-bigquery-connection==1.12.0 +google-cloud-iam==2.12.1 +google-cloud-resource-manager==1.10.3 +google-cloud-storage==2.0.0 +grpc-google-iam-v1==0.14.2 +numpy==1.24.0 +pandas==1.5.3 +pandas-gbq==0.26.1 +pyarrow==15.0.2 +pydata-google-auth==1.8.2 +requests==2.27.1 +scikit-learn==1.2.2 +shapely==1.8.5 +tabulate==0.9 +ipywidgets==7.7.1 +humanize==4.6.0 matplotlib==3.7.1 -psutil==5.9.5 -seaborn==0.13.1 -traitlets==5.7.1 -polars==1.21.0 +db-dtypes==1.4.2 +# For vendored ibis-framework. +atpublic==2.3 +python-dateutil==2.8.2 +pytz==2022.7 +toolz==0.11 +typing-extensions==4.5.0 +rich==12.4.4 +# For anywidget mode +anywidget>=0.9.18 +traitlets==5.0.0 diff --git a/tests/system/small/test_dataframe.py b/tests/system/small/test_dataframe.py index 0f7b782b66d..fa82cce6054 100644 --- a/tests/system/small/test_dataframe.py +++ b/tests/system/small/test_dataframe.py @@ -5754,16 +5754,9 @@ def test_df_dot_operator_series( ) -# TODO(tswast): We may be able to re-enable this test after we break large -# queries up in https://github.com/googleapis/python-bigquery-dataframes/pull/427 -@pytest.mark.skipif( - sys.version_info >= (3, 12), - # See: https://github.com/python/cpython/issues/112282 - reason="setrecursionlimit has no effect on the Python C stack since Python 3.12.", -) def test_recursion_limit(scalars_df_index): scalars_df_index = scalars_df_index[["int64_too", "int64_col", "float64_col"]] - for i in range(400): + for i in range(250): scalars_df_index = scalars_df_index + 4 scalars_df_index.to_pandas() @@ -5964,7 +5957,7 @@ def test_resample_with_column( scalars_df_index, scalars_pandas_df_index, on, rule, origin ): # TODO: supply a reason why this isn't compatible with pandas 1.x - pytest.importorskip("pandas", minversion="2.0.0") + pytest.importorskip("pandas", minversion="2.2.0") bf_result = ( scalars_df_index.resample(rule=rule, on=on, origin=origin)[ ["int64_col", "int64_too"] diff --git a/tests/system/small/test_groupby.py b/tests/system/small/test_groupby.py index 579e7cd414d..1d0e05f5ccf 100644 --- a/tests/system/small/test_groupby.py +++ b/tests/system/small/test_groupby.py @@ -123,7 +123,7 @@ def test_dataframe_groupby_rank( scalars_df_index, scalars_pandas_df_index, na_option, method, ascending, pct ): # TODO: supply a reason why this isn't compatible with pandas 1.x - pytest.importorskip("pandas", minversion="2.0.0") + pytest.importorskip("pandas", minversion="2.2.0") col_names = ["int64_too", "float64_col", "int64_col", "string_col"] bf_result = ( scalars_df_index[col_names] diff --git a/tests/unit/test_dataframe_polars.py b/tests/unit/test_dataframe_polars.py index 1c73d9dc6b0..a7e40469a27 100644 --- a/tests/unit/test_dataframe_polars.py +++ b/tests/unit/test_dataframe_polars.py @@ -4450,3 +4450,10 @@ def test_dataframe_explode_reserve_order(session, ignore_index, ordered): def test_dataframe_explode_xfail(col_names): df = bpd.DataFrame({"A": [[0, 1, 2], [], [3, 4]]}) df.explode(col_names) + + +def test_recursion_limit_unit(scalars_df_index): + scalars_df_index = scalars_df_index[["int64_too", "int64_col", "float64_col"]] + for i in range(250): + scalars_df_index = scalars_df_index + 4 + scalars_df_index.to_pandas()