diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 7914b72651e..b848262c3aa 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -15,7 +15,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.10"
+ python-version: "3.14"
- name: Install nox
run: |
python -m pip install --upgrade setuptools pip wheel
diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml
index 518cec63125..f334f4eb431 100644
--- a/.github/workflows/unittest.yml
+++ b/.github/workflows/unittest.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-22.04
strategy:
matrix:
- python: ['3.9', '3.10', '3.11', '3.12', '3.13']
+ python: ['3.9', '3.10', '3.11', '3.12', '3.13', '3.14']
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -48,7 +48,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.10"
+ python-version: "3.14"
- name: Install coverage
run: |
python -m pip install --upgrade setuptools pip wheel
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 5374e7e3770..a4ca07b0c1c 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -22,7 +22,7 @@ In order to add a feature:
documentation.
- The feature must work fully on the following CPython versions:
- 3.9, 3.10, 3.11, 3.12 and 3.13 on both UNIX and Windows.
+ 3.9, 3.10, 3.11, 3.12, 3.13 and 3.14 on both UNIX and Windows.
- The feature must not add unnecessary dependencies (where
"unnecessary" is of course subjective, but new dependencies should
@@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests.
- To run a single unit test::
- $ nox -s unit-3.13 -- -k
+ $ nox -s unit-3.14 -- -k
.. note::
@@ -143,12 +143,12 @@ Running System Tests
$ nox -s system
# Run a single system test
- $ nox -s system-3.13 -- -k
+ $ nox -s system-3.14 -- -k
.. note::
- System tests are only configured to run under Python 3.9, 3.11, 3.12 and 3.13.
+ System tests are only configured to run under Python 3.9, 3.12 and 3.14.
For expediency, we do not run them in older versions of Python 3.
This alone will not run the tests. You'll need to change some local
@@ -263,12 +263,14 @@ We support:
- `Python 3.11`_
- `Python 3.12`_
- `Python 3.13`_
+- `Python 3.14`_
.. _Python 3.9: https://docs.python.org/3.9/
.. _Python 3.10: https://docs.python.org/3.10/
.. _Python 3.11: https://docs.python.org/3.11/
.. _Python 3.12: https://docs.python.org/3.12/
.. _Python 3.13: https://docs.python.org/3.13/
+.. _Python 3.14: https://docs.python.org/3.14/
Supported versions can be found in our ``noxfile.py`` `config`_.
diff --git a/GEMINI.md b/GEMINI.md
index 0d447f17a48..1c8cff33870 100644
--- a/GEMINI.md
+++ b/GEMINI.md
@@ -13,7 +13,7 @@ We use `nox` to instrument our tests.
- To run a single unit test:
```bash
- nox -r -s unit-3.13 -- -k
+ nox -r -s unit-3.14 -- -k
```
- Ignore this step if you lack access to Google Cloud resources. To run system
@@ -23,7 +23,7 @@ We use `nox` to instrument our tests.
$ nox -r -s system
# Run a single system test
- $ nox -r -s system-3.13 -- -k
+ $ nox -r -s system-3.14 -- -k
- The codebase must have better coverage than it had previously after each
change. You can test coverage via `nox -s unit system cover` (takes a long
diff --git a/bigframes/_config/auth.py b/bigframes/_config/auth.py
index 1574fc48835..ccb5fcbedb8 100644
--- a/bigframes/_config/auth.py
+++ b/bigframes/_config/auth.py
@@ -30,9 +30,9 @@
_cached_project_default: Optional[str] = None
-def get_default_credentials_with_project() -> tuple[
- google.auth.credentials.Credentials, Optional[str]
-]:
+def get_default_credentials_with_project() -> (
+ tuple[google.auth.credentials.Credentials, Optional[str]]
+):
global _AUTH_LOCK, _cached_credentials, _cached_project_default
with _AUTH_LOCK:
diff --git a/bigframes/core/block_transforms.py b/bigframes/core/block_transforms.py
index 5c6395d1714..ac6bbcb115c 100644
--- a/bigframes/core/block_transforms.py
+++ b/bigframes/core/block_transforms.py
@@ -618,7 +618,6 @@ def skew(
skew_column_ids: typing.Sequence[str],
grouping_column_ids: typing.Sequence[str] = (),
) -> blocks.Block:
-
original_columns = skew_column_ids
column_labels = block.select_columns(original_columns).column_labels
diff --git a/bigframes/core/blocks.py b/bigframes/core/blocks.py
index 5bac1a06f1e..83fdc239692 100644
--- a/bigframes/core/blocks.py
+++ b/bigframes/core/blocks.py
@@ -650,7 +650,6 @@ def _get_sampling_option(
sampling_method: Optional[str] = None,
random_state: Optional[int] = None,
) -> sampling_options.SamplingOptions:
-
if (sampling_method is not None) and (sampling_method not in _SAMPLING_METHODS):
raise NotImplementedError(
f"The downsampling method {sampling_method} is not implemented, "
@@ -693,7 +692,8 @@ def to_pandas_batches(
"""Download results one message at a time.
page_size and max_results determine the size and number of batches,
- see https://cloud.google.com/python/docs/reference/bigquery/latest/google.cloud.bigquery.job.QueryJob#google_cloud_bigquery_job_QueryJob_result"""
+ see https://cloud.google.com/python/docs/reference/bigquery/latest/google.cloud.bigquery.job.QueryJob#google_cloud_bigquery_job_QueryJob_result
+ """
under_10gb = (
(not allow_large_results)
diff --git a/bigframes/core/compile/ibis_compiler/scalar_op_registry.py b/bigframes/core/compile/ibis_compiler/scalar_op_registry.py
index 519b2c94426..eec19b603cb 100644
--- a/bigframes/core/compile/ibis_compiler/scalar_op_registry.py
+++ b/bigframes/core/compile/ibis_compiler/scalar_op_registry.py
@@ -1901,7 +1901,6 @@ def struct_op_impl(
def ai_generate(
*values: ibis_types.Value, op: ops.AIGenerate
) -> ibis_types.StructValue:
-
return ai_ops.AIGenerate(
_construct_prompt(values, op.prompt_context), # type: ignore
op.connection_id, # type: ignore
@@ -1916,7 +1915,6 @@ def ai_generate(
def ai_generate_bool(
*values: ibis_types.Value, op: ops.AIGenerateBool
) -> ibis_types.StructValue:
-
return ai_ops.AIGenerateBool(
_construct_prompt(values, op.prompt_context), # type: ignore
op.connection_id, # type: ignore
@@ -1930,7 +1928,6 @@ def ai_generate_bool(
def ai_generate_int(
*values: ibis_types.Value, op: ops.AIGenerateInt
) -> ibis_types.StructValue:
-
return ai_ops.AIGenerateInt(
_construct_prompt(values, op.prompt_context), # type: ignore
op.connection_id, # type: ignore
@@ -1944,7 +1941,6 @@ def ai_generate_int(
def ai_generate_double(
*values: ibis_types.Value, op: ops.AIGenerateDouble
) -> ibis_types.StructValue:
-
return ai_ops.AIGenerateDouble(
_construct_prompt(values, op.prompt_context), # type: ignore
op.connection_id, # type: ignore
@@ -1956,7 +1952,6 @@ def ai_generate_double(
@scalar_op_compiler.register_nary_op(ops.AIIf, pass_op=True)
def ai_if(*values: ibis_types.Value, op: ops.AIIf) -> ibis_types.StructValue:
-
return ai_ops.AIIf(
_construct_prompt(values, op.prompt_context), # type: ignore
op.connection_id, # type: ignore
@@ -1967,7 +1962,6 @@ def ai_if(*values: ibis_types.Value, op: ops.AIIf) -> ibis_types.StructValue:
def ai_classify(
*values: ibis_types.Value, op: ops.AIClassify
) -> ibis_types.StructValue:
-
return ai_ops.AIClassify(
_construct_prompt(values, op.prompt_context), # type: ignore
op.categories, # type: ignore
@@ -1977,7 +1971,6 @@ def ai_classify(
@scalar_op_compiler.register_nary_op(ops.AIScore, pass_op=True)
def ai_score(*values: ibis_types.Value, op: ops.AIScore) -> ibis_types.StructValue:
-
return ai_ops.AIScore(
_construct_prompt(values, op.prompt_context), # type: ignore
op.connection_id, # type: ignore
diff --git a/bigframes/core/expression_factoring.py b/bigframes/core/expression_factoring.py
index b58330f5a45..208fc78ebdb 100644
--- a/bigframes/core/expression_factoring.py
+++ b/bigframes/core/expression_factoring.py
@@ -381,9 +381,9 @@ def graph_extract_scalar_exprs() -> Sequence[nodes.ColumnDef]:
# TODO: We can prune expressions that won't be reused here,
return tuple(nodes.ColumnDef(expr, id) for id, expr in results.items())
- def graph_extract_window_expr() -> Optional[
- Tuple[Sequence[nodes.ColumnDef], window_spec.WindowSpec]
- ]:
+ def graph_extract_window_expr() -> (
+ Optional[Tuple[Sequence[nodes.ColumnDef], window_spec.WindowSpec]]
+ ):
for id in graph.sinks:
next_def = by_id[id]
if isinstance(next_def.expression, agg_expressions.WindowExpression):
diff --git a/bigframes/core/indexes/base.py b/bigframes/core/indexes/base.py
index 383534fa4df..3a2b64f44dc 100644
--- a/bigframes/core/indexes/base.py
+++ b/bigframes/core/indexes/base.py
@@ -210,7 +210,6 @@ def is_monotonic_increasing(self) -> bool:
@property
@validations.requires_ordering()
def is_monotonic_decreasing(self) -> bool:
-
return typing.cast(
bool,
self._block.is_monotonic_decreasing(self._block.index_columns),
diff --git a/bigframes/core/window/rolling.py b/bigframes/core/window/rolling.py
index b7bb62372cc..9b69dea0551 100644
--- a/bigframes/core/window/rolling.py
+++ b/bigframes/core/window/rolling.py
@@ -218,7 +218,6 @@ def create_range_window(
grouping_keys: Sequence[str] = tuple(),
drop_null_groups: bool = True,
) -> Window:
-
if on is None:
# Rolling on index
index_dtypes = block.index.dtypes
diff --git a/bigframes/ml/cluster.py b/bigframes/ml/cluster.py
index f371be0cf38..827b9158626 100644
--- a/bigframes/ml/cluster.py
+++ b/bigframes/ml/cluster.py
@@ -44,7 +44,6 @@ class KMeans(
base.UnsupervisedTrainablePredictor,
bigframes_vendored.sklearn.cluster._kmeans.KMeans,
):
-
__doc__ = bigframes_vendored.sklearn.cluster._kmeans.KMeans.__doc__
def __init__(
diff --git a/bigframes/ml/decomposition.py b/bigframes/ml/decomposition.py
index ca5ff102b44..7497ecbcbfe 100644
--- a/bigframes/ml/decomposition.py
+++ b/bigframes/ml/decomposition.py
@@ -226,7 +226,6 @@ def __init__(
# TODO: Add support for hyperparameter tuning.
l2_reg: float = 1.0,
):
-
feedback_type = feedback_type.lower() # type: ignore
if feedback_type not in ("explicit", "implicit"):
raise ValueError("Expected feedback_type to be `explicit` or `implicit`.")
diff --git a/bigframes/ml/ensemble.py b/bigframes/ml/ensemble.py
index 7cd7079dfbd..67a51b702bb 100644
--- a/bigframes/ml/ensemble.py
+++ b/bigframes/ml/ensemble.py
@@ -213,7 +213,6 @@ class XGBClassifier(
base.SupervisedTrainableWithEvaluationPredictor,
bigframes_vendored.xgboost.sklearn.XGBClassifier,
):
-
__doc__ = bigframes_vendored.xgboost.sklearn.XGBClassifier.__doc__
def __init__(
@@ -370,7 +369,6 @@ class RandomForestRegressor(
base.SupervisedTrainableWithEvaluationPredictor,
bigframes_vendored.sklearn.ensemble._forest.RandomForestRegressor,
):
-
__doc__ = bigframes_vendored.sklearn.ensemble._forest.RandomForestRegressor.__doc__
def __init__(
@@ -536,7 +534,6 @@ class RandomForestClassifier(
base.SupervisedTrainableWithEvaluationPredictor,
bigframes_vendored.sklearn.ensemble._forest.RandomForestClassifier,
):
-
__doc__ = bigframes_vendored.sklearn.ensemble._forest.RandomForestClassifier.__doc__
def __init__(
diff --git a/bigframes/ml/imported.py b/bigframes/ml/imported.py
index 295649ed7f5..68893f80bb3 100644
--- a/bigframes/ml/imported.py
+++ b/bigframes/ml/imported.py
@@ -72,7 +72,8 @@ def predict(self, X: utils.ArrayType) -> bpd.DataFrame:
Input DataFrame. Schema is defined by the model.
Returns:
- bigframes.dataframe.DataFrame: Output DataFrame. Schema is defined by the model."""
+ bigframes.dataframe.DataFrame: Output DataFrame. Schema is defined by the model.
+ """
if not self._bqml_model:
if self.model_path is None:
@@ -151,7 +152,8 @@ def predict(self, X: utils.ArrayType) -> bpd.DataFrame:
Input DataFrame or Series. Schema is defined by the model.
Returns:
- bigframes.dataframe.DataFrame: Output DataFrame, schema is defined by the model."""
+ bigframes.dataframe.DataFrame: Output DataFrame, schema is defined by the model.
+ """
if not self._bqml_model:
if self.model_path is None:
@@ -270,7 +272,8 @@ def predict(self, X: utils.ArrayType) -> bpd.DataFrame:
Input DataFrame or Series. Schema is defined by the model.
Returns:
- bigframes.dataframe.DataFrame: Output DataFrame. Schema is defined by the model."""
+ bigframes.dataframe.DataFrame: Output DataFrame. Schema is defined by the model.
+ """
if not self._bqml_model:
if self.model_path is None:
diff --git a/bigframes/ml/impute.py b/bigframes/ml/impute.py
index b3da895201d..d9be5832612 100644
--- a/bigframes/ml/impute.py
+++ b/bigframes/ml/impute.py
@@ -33,7 +33,6 @@ class SimpleImputer(
base.Transformer,
bigframes_vendored.sklearn.impute._base.SimpleImputer,
):
-
__doc__ = bigframes_vendored.sklearn.impute._base.SimpleImputer.__doc__
def __init__(
diff --git a/bigframes/ml/model_selection.py b/bigframes/ml/model_selection.py
index 5adfb03b7f5..a5ed86557b2 100644
--- a/bigframes/ml/model_selection.py
+++ b/bigframes/ml/model_selection.py
@@ -39,7 +39,6 @@ def train_test_split(
stratify: Union[bpd.Series, None] = None,
shuffle: bool = True,
) -> List[Union[bpd.DataFrame, bpd.Series]]:
-
if test_size is None:
if train_size is None:
test_size = 0.25
diff --git a/bigframes/ml/preprocessing.py b/bigframes/ml/preprocessing.py
index 8bf89b08387..3658bbdab59 100644
--- a/bigframes/ml/preprocessing.py
+++ b/bigframes/ml/preprocessing.py
@@ -328,7 +328,6 @@ def _compile_to_sql(
]
elif self.strategy == "quantile":
-
return [
self._base_sql_generator.ml_quantile_bucketize(
column, self.n_bins, f"kbinsdiscretizer_{column}"
diff --git a/bigframes/ml/sql.py b/bigframes/ml/sql.py
index 2937368c92c..09a46b235d9 100644
--- a/bigframes/ml/sql.py
+++ b/bigframes/ml/sql.py
@@ -160,7 +160,8 @@ def ml_one_hot_encoder(
name: str,
) -> str:
"""Encode ML.ONE_HOT_ENCODER for BQML.
- https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-one-hot-encoder for params."""
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-one-hot-encoder for params.
+ """
return f"""ML.ONE_HOT_ENCODER({sql_utils.identifier(numeric_expr_sql)}, '{drop}', {top_k}, {frequency_threshold}) OVER() AS {sql_utils.identifier(name)}"""
def ml_label_encoder(
@@ -171,14 +172,16 @@ def ml_label_encoder(
name: str,
) -> str:
"""Encode ML.LABEL_ENCODER for BQML.
- https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-label-encoder for params."""
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-label-encoder for params.
+ """
return f"""ML.LABEL_ENCODER({sql_utils.identifier(numeric_expr_sql)}, {top_k}, {frequency_threshold}) OVER() AS {sql_utils.identifier(name)}"""
def ml_polynomial_expand(
self, columns: Iterable[str], degree: int, name: str
) -> str:
"""Encode ML.POLYNOMIAL_EXPAND.
- https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-polynomial-expand"""
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-polynomial-expand
+ """
return f"""ML.POLYNOMIAL_EXPAND({self.struct_columns(columns)}, {degree}) AS {sql_utils.identifier(name)}"""
def ml_distance(
@@ -190,7 +193,8 @@ def ml_distance(
name: str,
) -> str:
"""Encode ML.DISTANCE for BQML.
- https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-distance"""
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-distance
+ """
return f"""SELECT *, ML.DISTANCE({sql_utils.identifier(col_x)}, {sql_utils.identifier(col_y)}, '{type}') AS {sql_utils.identifier(name)} FROM ({source_sql})"""
def ai_forecast(
@@ -199,7 +203,8 @@ def ai_forecast(
options: Mapping[str, Union[int, float, bool, Iterable[str]]],
):
"""Encode AI.FORECAST.
- https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-ai-forecast"""
+ https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-ai-forecast
+ """
named_parameters_sql = self.build_named_parameters(**options)
return f"""SELECT * FROM AI.FORECAST(({source_sql}),{named_parameters_sql})"""
diff --git a/bigframes/operations/blob.py b/bigframes/operations/blob.py
index 9210addaa81..7693d7df1db 100644
--- a/bigframes/operations/blob.py
+++ b/bigframes/operations/blob.py
@@ -79,7 +79,8 @@ def metadata(self) -> bigframes.series.Series:
"""Retrieve the metadata of the Blob.
Returns:
- bigframes.series.Series: JSON metadata of the Blob. Contains fields: content_type, md5_hash, size and updated(time)."""
+ bigframes.series.Series: JSON metadata of the Blob. Contains fields: content_type, md5_hash, size and updated(time).
+ """
series_to_check = bigframes.series.Series(self._data._block)
# Check if it's a struct series from a verbose operation
if dtypes.is_struct_like(series_to_check.dtype):
diff --git a/bigframes/session/__init__.py b/bigframes/session/__init__.py
index 757bb50a940..0da06488555 100644
--- a/bigframes/session/__init__.py
+++ b/bigframes/session/__init__.py
@@ -1373,7 +1373,6 @@ def read_json(
write_engine=write_engine,
)
if engine == "bigquery":
-
if dtype is not None:
raise NotImplementedError(
"BigQuery engine does not support the dtype arguments."
@@ -2252,7 +2251,8 @@ def _create_bq_connection(
iam_role: Optional[str] = None,
) -> str:
"""Create the connection with the session settings and try to attach iam role to the connection SA.
- If any of project, location or connection isn't specified, use the session defaults. Returns fully-qualified connection name."""
+ If any of project, location or connection isn't specified, use the session defaults. Returns fully-qualified connection name.
+ """
connection = self._bq_connection if not connection else connection
connection = bigframes.clients.get_canonical_bq_connection_id(
connection_id=connection,
diff --git a/bigframes/session/bq_caching_executor.py b/bigframes/session/bq_caching_executor.py
index 2f5ec035dc6..b870666a47b 100644
--- a/bigframes/session/bq_caching_executor.py
+++ b/bigframes/session/bq_caching_executor.py
@@ -633,7 +633,6 @@ def _execute_plan_gbq(
create_table = True
if not cache_spec.cluster_cols:
-
offsets_id = bigframes.core.identifiers.ColumnId(
bigframes.core.guid.generate_guid()
)
diff --git a/notebooks/getting_started/getting_started_bq_dataframes.ipynb b/notebooks/getting_started/getting_started_bq_dataframes.ipynb
index fa88cf65bbb..f9fb950c534 100644
--- a/notebooks/getting_started/getting_started_bq_dataframes.ipynb
+++ b/notebooks/getting_started/getting_started_bq_dataframes.ipynb
@@ -1329,20 +1329,6 @@
"Running your own Python functions (or being able to bring your packages) and using them at scale is a challenge many data scientists face. BigQuery DataFrames makes it easy to deploy [remote functions](https://cloud.google.com/python/docs/reference/bigframes/latest/bigframes.pandas#bigframes_pandas_remote_function) that run scalar Python functions at BigQuery scale. These functions are persisted as [BigQuery remote functions](https://cloud.google.com/bigquery/docs/remote-functions) that you can then re-use."
]
},
- {
- "cell_type": "code",
- "execution_count": 22,
- "metadata": {},
- "outputs": [],
- "source": [
- "import sys\n",
- "\n",
- "# Python 3.13 is not yet a supported runtime for remote functions.\n",
- "# See: https://cloud.google.com/functions/docs/runtime-support#python for the supported runtimes.\n",
- "if sys.version_info >= (3, 13, 0):\n",
- " sys.exit(0)"
- ]
- },
{
"cell_type": "markdown",
"metadata": {
diff --git a/notebooks/location/regionalized.ipynb b/notebooks/location/regionalized.ipynb
index 066cd181364..23313ec0c4c 100644
--- a/notebooks/location/regionalized.ipynb
+++ b/notebooks/location/regionalized.ipynb
@@ -1339,20 +1339,6 @@
"# Using the Remote Functions"
]
},
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import sys\n",
- "\n",
- "# Python 3.13 is not yet a supported runtime for remote functions.\n",
- "# See: https://cloud.google.com/functions/docs/runtime-support#python for the supported runtimes.\n",
- "if sys.version_info >= (3, 13, 0):\n",
- " sys.exit(0)"
- ]
- },
{
"attachments": {},
"cell_type": "markdown",
diff --git a/notebooks/remote_functions/remote_function.ipynb b/notebooks/remote_functions/remote_function.ipynb
index e2bc88ecae7..4c0524d4026 100644
--- a/notebooks/remote_functions/remote_function.ipynb
+++ b/notebooks/remote_functions/remote_function.ipynb
@@ -1,20 +1,5 @@
{
"cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "bcff4fc4",
- "metadata": {},
- "outputs": [],
- "source": [
- "import sys\n",
- "\n",
- "# Python 3.13 is not yet a supported runtime for remote functions.\n",
- "# See: https://cloud.google.com/functions/docs/runtime-support#python for the supported runtimes.\n",
- "if sys.version_info >= (3, 13, 0):\n",
- " sys.exit(0)"
- ]
- },
{
"cell_type": "code",
"execution_count": 19,
diff --git a/notebooks/remote_functions/remote_function_usecases.ipynb b/notebooks/remote_functions/remote_function_usecases.ipynb
index 03ae6520952..e3a94160ad9 100644
--- a/notebooks/remote_functions/remote_function_usecases.ipynb
+++ b/notebooks/remote_functions/remote_function_usecases.ipynb
@@ -21,20 +21,6 @@
"# limitations under the License."
]
},
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {},
- "outputs": [],
- "source": [
- "import sys\n",
- "\n",
- "# Python 3.13 is not yet a supported runtime for remote functions.\n",
- "# See: https://cloud.google.com/functions/docs/runtime-support#python for the supported runtimes.\n",
- "if sys.version_info >= (3, 13, 0):\n",
- " sys.exit(0)"
- ]
- },
{
"cell_type": "markdown",
"metadata": {},
diff --git a/notebooks/remote_functions/remote_function_vertex_claude_model.ipynb b/notebooks/remote_functions/remote_function_vertex_claude_model.ipynb
index 9792c90205c..087f004cb41 100644
--- a/notebooks/remote_functions/remote_function_vertex_claude_model.ipynb
+++ b/notebooks/remote_functions/remote_function_vertex_claude_model.ipynb
@@ -28,20 +28,6 @@
""
]
},
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import sys\n",
- "\n",
- "# Python 3.13 is not yet a supported runtime for remote functions.\n",
- "# See: https://cloud.google.com/functions/docs/runtime-support#python for the supported runtimes.\n",
- "if sys.version_info >= (3, 13, 0):\n",
- " sys.exit(0)"
- ]
- },
{
"cell_type": "markdown",
"metadata": {},
diff --git a/noxfile.py b/noxfile.py
index 00ada18a469..33ff9885255 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -28,14 +28,11 @@
import nox
import nox.sessions
-BLACK_VERSION = "black==22.3.0"
+BLACK_VERSION = "black==23.7.0"
FLAKE8_VERSION = "flake8==7.1.2"
ISORT_VERSION = "isort==5.12.0"
MYPY_VERSION = "mypy==1.15.0"
-# TODO: switch to 3.13 once remote functions / cloud run adds a runtime for it (internal issue 333742751)
-LATEST_FULLY_SUPPORTED_PYTHON = "3.12"
-
# Notebook tests should match colab and BQ Studio.
# Check with import sys; sys.version_info
# on a fresh notebook runtime.
@@ -58,13 +55,9 @@
"setup.py",
]
-DEFAULT_PYTHON_VERSION = "3.10"
-
-# Cloud Run Functions supports Python versions up to 3.12
-# https://cloud.google.com/run/docs/runtimes/python
-E2E_TEST_PYTHON_VERSION = "3.12"
+DEFAULT_PYTHON_VERSION = "3.14"
-UNIT_TEST_PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13"]
+UNIT_TEST_PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
UNIT_TEST_STANDARD_DEPENDENCIES = [
"mock",
PYTEST_VERSION,
@@ -79,13 +72,14 @@
# Make sure we leave some versions without "extras" so we know those
# dependencies are actually optional.
"3.13": ["tests", "polars", "scikit-learn", "anywidget"],
+ "3.14": ["tests", "polars", "scikit-learn", "anywidget"],
}
# 3.11 is used by colab.
# 3.10 is needed for Windows tests as it is the only version installed in the
# bigframes-windows container image. For more information, search
# bigframes/windows-docker, internally.
-SYSTEM_TEST_PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13"]
+SYSTEM_TEST_PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
SYSTEM_TEST_STANDARD_DEPENDENCIES = [
"jinja2",
"mock",
@@ -107,8 +101,9 @@
# Make sure we leave some versions without "extras" so we know those
# dependencies are actually optional.
"3.10": ["tests", "scikit-learn", "anywidget"],
- LATEST_FULLY_SUPPORTED_PYTHON: ["tests", "scikit-learn", "polars", "anywidget"],
+ "3.12": ["tests", "scikit-learn", "polars", "anywidget"],
"3.13": ["tests", "polars", "anywidget"],
+ "3.14": ["tests", "polars", "anywidget"],
}
LOGGING_NAME_ENV_VAR = "BIGFRAMES_PERFORMANCE_LOG_NAME"
@@ -124,7 +119,7 @@
# from GitHub actions.
"unit_noextras",
"system-3.9", # No extras.
- f"system-{LATEST_FULLY_SUPPORTED_PYTHON}", # All extras.
+ f"system-{DEFAULT_PYTHON_VERSION}", # All extras.
"cover",
# TODO(b/401609005): remove
"cleanup",
@@ -258,7 +253,7 @@ def unit_noextras(session):
run_unit(session, install_test_extra=False)
-@nox.session(python=DEFAULT_PYTHON_VERSION)
+@nox.session(python="3.10")
def mypy(session):
"""Run type checks with mypy."""
# Editable mode is not compatible with mypy when there are multiple
@@ -401,7 +396,7 @@ def system(session: nox.sessions.Session):
)
-@nox.session(python=LATEST_FULLY_SUPPORTED_PYTHON)
+@nox.session(python=DEFAULT_PYTHON_VERSION)
def system_noextras(session: nox.sessions.Session):
"""Run the system test suite."""
run_system(
@@ -412,7 +407,7 @@ def system_noextras(session: nox.sessions.Session):
)
-@nox.session(python=LATEST_FULLY_SUPPORTED_PYTHON)
+@nox.session(python="3.12")
def doctest(session: nox.sessions.Session):
"""Run the system test suite."""
run_system(
@@ -440,7 +435,7 @@ def doctest(session: nox.sessions.Session):
)
-@nox.session(python=E2E_TEST_PYTHON_VERSION)
+@nox.session(python=DEFAULT_PYTHON_VERSION)
def e2e(session: nox.sessions.Session):
"""Run the large tests in system test suite."""
run_system(
@@ -538,7 +533,7 @@ def docs(session):
)
-@nox.session(python=DEFAULT_PYTHON_VERSION)
+@nox.session(python="3.10")
def docfx(session):
"""Build the docfx yaml files for this library."""
@@ -800,20 +795,6 @@ def notebook(session: nox.Session):
"notebooks/dataframes/anywidget_mode.ipynb",
]
- # TODO: remove exception for Python 3.13 cloud run adds a runtime for it (internal issue 333742751)
- # TODO: remove exception for Python 3.13 if nbmake adds support for
- # sys.exit(0) or pytest.skip(...).
- # See: https://github.com/treebeardtech/nbmake/issues/134
- if session.python == "3.13":
- denylist.extend(
- [
- "notebooks/getting_started/getting_started_bq_dataframes.ipynb",
- "notebooks/remote_functions/remote_function_usecases.ipynb",
- "notebooks/remote_functions/remote_function_vertex_claude_model.ipynb",
- "notebooks/remote_functions/remote_function.ipynb",
- ]
- )
-
# Convert each Path notebook object to a string using a list comprehension,
# and remove tests that we choose not to test.
notebooks = [str(nb) for nb in notebooks_list]
@@ -988,7 +969,7 @@ def benchmark(session: nox.Session):
)
-@nox.session(python="3.10")
+@nox.session(python=DEFAULT_PYTHON_VERSION)
def release_dry_run(session):
env = {}
diff --git a/samples/polars/noxfile.py b/samples/polars/noxfile.py
index 494639d2fa5..782da043299 100644
--- a/samples/polars/noxfile.py
+++ b/samples/polars/noxfile.py
@@ -88,7 +88,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py
index 494639d2fa5..782da043299 100644
--- a/samples/snippets/noxfile.py
+++ b/samples/snippets/noxfile.py
@@ -88,7 +88,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/setup.py b/setup.py
index 090b1035364..641a1f32497 100644
--- a/setup.py
+++ b/setup.py
@@ -138,6 +138,7 @@
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
"Operating System :: OS Independent",
"Topic :: Internet",
],
diff --git a/tests/system/large/functions/test_managed_function.py b/tests/system/large/functions/test_managed_function.py
index 732123ec847..a74ff292732 100644
--- a/tests/system/large/functions/test_managed_function.py
+++ b/tests/system/large/functions/test_managed_function.py
@@ -32,7 +32,6 @@
def test_managed_function_array_output(session, scalars_dfs, dataset_id):
try:
-
with warnings.catch_warnings(record=True) as record:
@session.udf(
@@ -85,7 +84,6 @@ def featurize(x: int) -> list[float]:
def test_managed_function_series_apply(session, dataset_id, scalars_dfs):
try:
-
# An explicit name with "def" in it is used to test the robustness of
# the user code extraction logic, which depends on that term.
bq_name = f"{prefixer.create_prefix()}_def_to_test_code_extraction"
@@ -145,7 +143,6 @@ def test_managed_function_series_apply_array_output(
scalars_dfs,
):
try:
-
with pytest.warns(bfe.PreviewWarning, match="udf is in preview."):
@session.udf(dataset=dataset_id, name=prefixer.create_prefix())
@@ -233,7 +230,6 @@ def add(x: int, y: int) -> int:
def test_managed_function_series_combine_array_output(session, dataset_id, scalars_dfs):
try:
-
# The type hints in this function's signature has conflicts. The
# `input_types` and `output_type` arguments from udf decorator take
# precedence and will be used instead.
@@ -451,7 +447,6 @@ def foo(x, y, z):
return [str(x), str(y), z]
try:
-
assert getattr(foo, "is_row_processor") is False
assert getattr(foo, "input_dtypes") == expected_dtypes
assert getattr(foo, "output_dtype") == pandas.ArrowDtype(
@@ -771,7 +766,6 @@ def analyze(row):
"\nenvironment may not precisely match your local environment."
),
):
-
analyze_mf = session.udf(
input_types=pandas.Series,
output_type=str,
@@ -1087,7 +1081,6 @@ def analyze(s: pandas.Series, x: bool, y: float) -> str:
def test_managed_function_df_where_mask(session, dataset_id, scalars_dfs):
try:
-
# The return type has to be bool type for callable where condition.
def is_sum_positive(a, b):
return a + b > 0
@@ -1154,7 +1147,6 @@ def is_sum_positive(a, b):
def test_managed_function_df_where_mask_series(session, dataset_id, scalars_dfs):
try:
-
# The return type has to be bool type for callable where condition.
def is_sum_positive_series(s):
return s["int64_col"] + s["int64_too"] > 0
@@ -1254,7 +1246,6 @@ def the_sum(s: pandas.Series) -> int:
def test_managed_function_series_where_mask_map(session, dataset_id, scalars_dfs):
try:
-
# The return type has to be bool type for callable where condition.
def _is_positive(s):
return s + 1000 > 0
@@ -1307,7 +1298,6 @@ def _is_positive(s):
def test_managed_function_series_apply_args(session, dataset_id, scalars_dfs):
try:
-
with pytest.warns(bfe.PreviewWarning, match="udf is in preview."):
@session.udf(dataset=dataset_id, name=prefixer.create_prefix())
diff --git a/tests/system/large/functions/test_remote_function.py b/tests/system/large/functions/test_remote_function.py
index 2591c0c13a2..3c274cc9551 100644
--- a/tests/system/large/functions/test_remote_function.py
+++ b/tests/system/large/functions/test_remote_function.py
@@ -842,7 +842,6 @@ def test_remote_function_with_external_package_dependencies(
session, scalars_dfs, dataset_id, bq_cf_connection
):
try:
-
# The return type hint in this function's signature has conflict. The
# `output_type` argument from remote_function decorator takes precedence
# and will be used instead.
@@ -897,7 +896,6 @@ def test_remote_function_with_explicit_name_reuse(
session, scalars_dfs, dataset_id, bq_cf_connection
):
try:
-
dirs_to_cleanup = []
# Define a user code
@@ -1251,7 +1249,6 @@ def test_remote_function_via_session_custom_sa(scalars_dfs):
rf_session = bigframes.Session(context=bigframes.BigQueryOptions(project=project))
try:
-
# TODO(shobs): Figure out why the default ingress setting
# (internal-only) does not work here
@rf_session.remote_function(
@@ -1324,7 +1321,6 @@ def test_remote_function_via_session_custom_build_sa(
rf_session = bigframes.Session(context=bigframes.BigQueryOptions(project=project))
try:
-
# TODO(shobs): Figure out why the default ingress setting
# (internal-only) does not work here
@rf_session.remote_function(
@@ -3007,7 +3003,6 @@ def foo(x: int) -> int:
@pytest.mark.flaky(retries=2, delay=120)
def test_remote_function_df_where_mask(session, dataset_id, scalars_dfs):
try:
-
# The return type has to be bool type for callable where condition.
def is_sum_positive(a, b):
return a + b > 0
@@ -3086,7 +3081,6 @@ def the_sum(a, b):
@pytest.mark.flaky(retries=2, delay=120)
def test_remote_function_df_where_mask_series(session, dataset_id, scalars_dfs):
try:
-
# The return type has to be bool type for callable where condition.
def is_sum_positive_series(s: pandas.Series) -> bool:
return s["int64_col"] + s["int64_too"] > 0
diff --git a/tests/system/small/ml/test_metrics.py b/tests/system/small/ml/test_metrics.py
index 040d4d97f64..848acb714d2 100644
--- a/tests/system/small/ml/test_metrics.py
+++ b/tests/system/small/ml/test_metrics.py
@@ -798,7 +798,6 @@ def test_precision_score_binary_default_arguments(session):
def test_precision_score_binary_invalid_input_raise_error(
session, y_true, y_pred, pos_label
):
-
bf_y_true = session.read_pandas(y_true)
bf_y_pred = session.read_pandas(y_pred)
diff --git a/tests/system/small/ml/test_model_selection.py b/tests/system/small/ml/test_model_selection.py
index ebce6e405a5..cbea1e20a0a 100644
--- a/tests/system/small/ml/test_model_selection.py
+++ b/tests/system/small/ml/test_model_selection.py
@@ -323,7 +323,13 @@ def test_train_test_split_value_error(penguins_df_default_index, train_size, tes
)
def test_train_test_split_stratify(df_fixture, request):
df = request.getfixturevalue(df_fixture)
- X = df[["species", "island", "culmen_length_mm",]].rename(
+ X = df[
+ [
+ "species",
+ "island",
+ "culmen_length_mm",
+ ]
+ ].rename(
columns={"species": "x_species"}
) # Keep "species" col just for easy checking. Rename to avoid conflicts.
y = df[["species"]]
diff --git a/tests/system/small/test_anywidget.py b/tests/system/small/test_anywidget.py
index fad8f5b2b50..b9f7b87f5ea 100644
--- a/tests/system/small/test_anywidget.py
+++ b/tests/system/small/test_anywidget.py
@@ -406,7 +406,6 @@ def test_widget_with_empty_dataframe_should_have_zero_row_count(
def test_widget_with_empty_dataframe_should_render_table_headers(
empty_bf_df: bf.dataframe.DataFrame,
):
-
"""
@@ -422,7 +421,6 @@ def test_widget_with_empty_dataframe_should_render_table_headers(
"""
with bigframes.option_context("display.repr_mode", "anywidget"):
-
from bigframes.display import TableWidget
widget = TableWidget(empty_bf_df)
diff --git a/tests/system/small/test_dataframe.py b/tests/system/small/test_dataframe.py
index 0f7b782b66d..684e39ddf78 100644
--- a/tests/system/small/test_dataframe.py
+++ b/tests/system/small/test_dataframe.py
@@ -946,7 +946,6 @@ def test_repr_w_display_options(scalars_dfs, session):
with bigframes.option_context(
"display.max_rows", 10, "display.max_columns", 5, "display.max_colwidth", 10
):
-
# When there are 10 or fewer rows, the outputs should be identical except for the extra note.
actual = scalars_df.head(10).__repr__()
executions_post = metrics.execution_count
@@ -2684,7 +2683,6 @@ def test_df_idxmax():
],
)
def test_df_align(join, axis):
-
index1: pandas.Index = pandas.Index([1, 2, 3, 4], dtype="Int64")
index2: pandas.Index = pandas.Index([1, 2, 4, 5], dtype="Int64")
@@ -5073,7 +5071,6 @@ def test_iloc_list_multiindex(scalars_dfs):
def test_iloc_empty_list(scalars_df_index, scalars_pandas_df_index):
-
index_list: List[int] = []
bf_result = scalars_df_index.iloc[index_list]
diff --git a/tests/system/small/test_dataframe_io.py b/tests/system/small/test_dataframe_io.py
index 02acb8d8f25..fe2e78385b8 100644
--- a/tests/system/small/test_dataframe_io.py
+++ b/tests/system/small/test_dataframe_io.py
@@ -257,7 +257,6 @@ def test_to_pandas_override_global_option(scalars_df_index):
# Direct call to_pandas uses global default setting (allow_large_results=True),
# table has 'bqdf' prefix.
with bigframes.option_context("compute.allow_large_results", True):
-
scalars_df_index.to_pandas()
table_id = scalars_df_index._query_job.destination.table_id
assert table_id is not None
@@ -328,7 +327,6 @@ def test_to_pandas_dry_run(session, scalars_pandas_df_multi_index):
def test_to_arrow_override_global_option(scalars_df_index):
# Direct call to_arrow uses global default setting (allow_large_results=True),
with bigframes.option_context("compute.allow_large_results", True):
-
scalars_df_index.to_arrow()
table_id = scalars_df_index._query_job.destination.table_id
assert table_id is not None
diff --git a/tests/system/small/test_index_io.py b/tests/system/small/test_index_io.py
index 306b15e67a2..b4d7c06da52 100644
--- a/tests/system/small/test_index_io.py
+++ b/tests/system/small/test_index_io.py
@@ -18,7 +18,6 @@
def test_to_pandas_override_global_option(scalars_df_index):
with bigframes.option_context("compute.allow_large_results", True):
-
bf_index = scalars_df_index.index
# Direct call to_pandas uses global default setting (allow_large_results=True),
@@ -43,7 +42,6 @@ def test_to_pandas_dry_run(scalars_df_index):
def test_to_numpy_override_global_option(scalars_df_index):
with bigframes.option_context("compute.allow_large_results", True):
-
bf_index = scalars_df_index.index
# Direct call to_numpy uses global default setting (allow_large_results=True),
diff --git a/tests/system/small/test_null_index.py b/tests/system/small/test_null_index.py
index 4aa7ba8c77c..eb9dc114dde 100644
--- a/tests/system/small/test_null_index.py
+++ b/tests/system/small/test_null_index.py
@@ -381,7 +381,6 @@ def test_null_index_df_concat(scalars_df_null_index, scalars_pandas_df_default_i
def test_null_index_map_dict_input(
scalars_df_null_index, scalars_pandas_df_default_index
):
-
local_map = dict()
# construct a local map, incomplete to cover behavior
for s in scalars_pandas_df_default_index.string_col[:-3]:
diff --git a/tests/system/small/test_pandas.py b/tests/system/small/test_pandas.py
index a1c0dc9851f..e83ba058371 100644
--- a/tests/system/small/test_pandas.py
+++ b/tests/system/small/test_pandas.py
@@ -101,7 +101,7 @@ def test_get_dummies_dataframe(scalars_dfs, kwargs):
# dtype argument above is needed for pandas v1 only
# adjust for expected dtype differences
- for (column_name, type_name) in zip(pd_result.columns, pd_result.dtypes):
+ for column_name, type_name in zip(pd_result.columns, pd_result.dtypes):
if type_name == "bool":
pd_result[column_name] = pd_result[column_name].astype("boolean")
@@ -130,7 +130,7 @@ def test_get_dummies_dataframe_duplicate_labels(scalars_dfs):
# dtype argument above is needed for pandas v1 only
# adjust for expected dtype differences
- for (column_name, type_name) in zip(pd_result.columns, pd_result.dtypes):
+ for column_name, type_name in zip(pd_result.columns, pd_result.dtypes):
if type_name == "bool":
pd_result[column_name] = pd_result[column_name].astype("boolean")
@@ -147,7 +147,7 @@ def test_get_dummies_series(scalars_dfs):
# dtype argument above is needed for pandas v1 only
# adjust for expected dtype differences
- for (column_name, type_name) in zip(pd_result.columns, pd_result.dtypes):
+ for column_name, type_name in zip(pd_result.columns, pd_result.dtypes):
if type_name == "bool": # pragma: NO COVER
pd_result[column_name] = pd_result[column_name].astype("boolean")
pd_result.columns = pd_result.columns.astype(object)
@@ -168,7 +168,7 @@ def test_get_dummies_series_nameless(scalars_dfs):
# dtype argument above is needed for pandas v1 only
# adjust for expected dtype differences
- for (column_name, type_name) in zip(pd_result.columns, pd_result.dtypes):
+ for column_name, type_name in zip(pd_result.columns, pd_result.dtypes):
if type_name == "bool": # pragma: NO COVER
pd_result[column_name] = pd_result[column_name].astype("boolean")
pd_result.columns = pd_result.columns.astype(object)
diff --git a/tests/system/small/test_pandas_options.py b/tests/system/small/test_pandas_options.py
index 7a750ddfd3c..a2a90f3fe52 100644
--- a/tests/system/small/test_pandas_options.py
+++ b/tests/system/small/test_pandas_options.py
@@ -50,7 +50,6 @@ def test_read_gbq_start_sets_session_location(
query_prefix,
reset_default_session_and_location,
):
-
# Form query as a table name or a SQL depending on the test scenario
query_tokyo = test_data_tables_tokyo["scalars"]
query = test_data_tables["scalars"]
diff --git a/tests/system/small/test_series.py b/tests/system/small/test_series.py
index a95c9623e52..ca98377c8b9 100644
--- a/tests/system/small/test_series.py
+++ b/tests/system/small/test_series.py
@@ -3002,7 +3002,6 @@ def test_value_counts_w_cut(scalars_dfs):
def test_iloc_nested(scalars_df_index, scalars_pandas_df_index):
-
bf_result = scalars_df_index["string_col"].iloc[1:].iloc[1:].to_pandas()
pd_result = scalars_pandas_df_index["string_col"].iloc[1:].iloc[1:]
diff --git a/tests/system/small/test_series_io.py b/tests/system/small/test_series_io.py
index 426679d37d0..2f1780812ae 100644
--- a/tests/system/small/test_series_io.py
+++ b/tests/system/small/test_series_io.py
@@ -22,7 +22,6 @@
def test_to_pandas_override_global_option(scalars_df_index):
with bigframes.option_context("compute.allow_large_results", True):
-
bf_series = scalars_df_index["int64_col"]
# Direct call to_pandas uses global default setting (allow_large_results=True)
diff --git a/tests/system/small/test_session.py b/tests/system/small/test_session.py
index 0501df3f8c9..5aa859c883f 100644
--- a/tests/system/small/test_session.py
+++ b/tests/system/small/test_session.py
@@ -871,7 +871,6 @@ def test_read_pandas(session, scalars_dfs):
def test_read_pandas_series(session):
-
idx: pd.Index = pd.Index([2, 7, 1, 2, 8], dtype=pd.Int64Dtype())
pd_series = pd.Series([3, 1, 4, 1, 5], dtype=pd.Int64Dtype(), index=idx)
bf_series = session.read_pandas(pd_series)
@@ -880,7 +879,6 @@ def test_read_pandas_series(session):
def test_read_pandas_index(session):
-
pd_idx: pd.Index = pd.Index([2, 7, 1, 2, 8], dtype=pd.Int64Dtype())
bf_idx = session.read_pandas(pd_idx)
diff --git a/tests/unit/core/compile/sqlglot/expressions/test_string_ops.py b/tests/unit/core/compile/sqlglot/expressions/test_string_ops.py
index b1fbbb0fc9b..fff2cc06df4 100644
--- a/tests/unit/core/compile/sqlglot/expressions/test_string_ops.py
+++ b/tests/unit/core/compile/sqlglot/expressions/test_string_ops.py
@@ -183,7 +183,6 @@ def test_rstrip(scalar_types_df: bpd.DataFrame, snapshot):
def test_startswith(scalar_types_df: bpd.DataFrame, snapshot):
-
col_name = "string_col"
bf_df = scalar_types_df[[col_name]]
ops_map = {
diff --git a/tests/unit/core/compile/sqlglot/test_compile_concat.py b/tests/unit/core/compile/sqlglot/test_compile_concat.py
index c176b2e1164..80cf16558b5 100644
--- a/tests/unit/core/compile/sqlglot/test_compile_concat.py
+++ b/tests/unit/core/compile/sqlglot/test_compile_concat.py
@@ -28,7 +28,6 @@ def test_compile_concat(scalar_types_df: bpd.DataFrame, snapshot):
def test_compile_concat_filter_sorted(scalar_types_df: bpd.DataFrame, snapshot):
-
scalars_array_value = scalar_types_df._block.expr
input_1 = scalars_array_value.select_columns(["float64_col", "int64_col"]).order_by(
[ordering.ascending_over("int64_col")]
diff --git a/tests/unit/functions/test_remote_function_utils.py b/tests/unit/functions/test_remote_function_utils.py
index 812d65bbad2..e200e7c12a1 100644
--- a/tests/unit/functions/test_remote_function_utils.py
+++ b/tests/unit/functions/test_remote_function_utils.py
@@ -441,7 +441,6 @@ def test_has_conflict_output_type_no_annotation():
),
)
def test_get_bigframes_metadata(metadata_options, metadata_string):
-
assert _utils.get_bigframes_metadata(**metadata_options) == metadata_string
@@ -514,7 +513,6 @@ def test_get_bigframes_metadata_array_type_not_serializable(output_type):
def test_get_python_output_type_from_bigframes_metadata(
metadata_string, python_output_type
):
-
assert (
_utils.get_python_output_type_from_bigframes_metadata(metadata_string)
== python_output_type
diff --git a/tests/unit/test_dataframe_polars.py b/tests/unit/test_dataframe_polars.py
index 1c73d9dc6b0..1125b13f958 100644
--- a/tests/unit/test_dataframe_polars.py
+++ b/tests/unit/test_dataframe_polars.py
@@ -1979,7 +1979,6 @@ def test_df_idxmax():
],
)
def test_df_align(join, axis):
-
index1: pandas.Index = pandas.Index([1, 2, 3, 4], dtype="Int64")
index2: pandas.Index = pandas.Index([1, 2, 4, 5], dtype="Int64")
@@ -3906,7 +3905,6 @@ def test_iloc_list_multiindex(scalars_dfs):
def test_iloc_empty_list(scalars_df_index, scalars_pandas_df_index):
-
index_list: List[int] = []
bf_result = scalars_df_index.iloc[index_list]
diff --git a/tests/unit/test_series_polars.py b/tests/unit/test_series_polars.py
index 516a46d4dd1..494e2499dbc 100644
--- a/tests/unit/test_series_polars.py
+++ b/tests/unit/test_series_polars.py
@@ -3025,7 +3025,6 @@ def test_value_counts_w_cut(scalars_dfs):
def test_iloc_nested(scalars_df_index, scalars_pandas_df_index):
-
bf_result = scalars_df_index["string_col"].iloc[1:].iloc[1:].to_pandas()
pd_result = scalars_pandas_df_index["string_col"].iloc[1:].iloc[1:]
diff --git a/third_party/bigframes_vendored/sklearn/preprocessing/_encoder.py b/third_party/bigframes_vendored/sklearn/preprocessing/_encoder.py
index 64a5786f17d..1301ef329ab 100644
--- a/third_party/bigframes_vendored/sklearn/preprocessing/_encoder.py
+++ b/third_party/bigframes_vendored/sklearn/preprocessing/_encoder.py
@@ -84,5 +84,6 @@ def transform(self, X):
Returns:
bigframes.dataframe.DataFrame: The result is categorized as index: number, value: number,
- where index is the position of the dict seeing the category, and value is 0 or 1."""
+ where index is the position of the dict seeing the category, and value is 0 or 1.
+ """
raise NotImplementedError(constants.ABSTRACT_METHOD_ERROR_MESSAGE)