@@ -77,6 +77,7 @@ from pandas.tseries.offsets import (
7777P = ParamSpec ("P" )
7878
7979HashableT = TypeVar ("HashableT" , bound = Hashable )
80+ HashableT0 = TypeVar ("HashableT0" , bound = Hashable , default = Any )
8081HashableT1 = TypeVar ("HashableT1" , bound = Hashable )
8182HashableT2 = TypeVar ("HashableT2" , bound = Hashable )
8283HashableT3 = TypeVar ("HashableT3" , bound = Hashable )
@@ -776,7 +777,7 @@ XMLParsers: TypeAlias = Literal["lxml", "etree"]
776777HTMLFlavors : TypeAlias = Literal ["lxml" , "html5lib" , "bs4" ]
777778
778779# Interval closed type
779- IntervalT = TypeVar ("IntervalT" , bound = Interval )
780+ IntervalT = TypeVar ("IntervalT" , bound = Interval , default = Interval )
780781IntervalLeftRight : TypeAlias = Literal ["left" , "right" ]
781782IntervalClosedType : TypeAlias = IntervalLeftRight | Literal ["both" , "neither" ]
782783
@@ -874,7 +875,11 @@ ExcelWriterMergeCells: TypeAlias = bool | Literal["columns"]
874875
875876# read_csv: usecols
876877UsecolsArgType : TypeAlias = (
877- SequenceNotStr [Hashable ] | range | AnyArrayLike | Callable [[HashableT ], bool ] | None
878+ SequenceNotStr [Hashable ]
879+ | range
880+ | AnyArrayLike
881+ | Callable [[HashableT0 ], bool ]
882+ | None
878883)
879884
880885# maintain the sub-type of any hashable sequence
@@ -920,6 +925,7 @@ PyArrowNotStrDtypeArg: TypeAlias = (
920925StrLike : TypeAlias = str | np .str_
921926
922927ScalarT = TypeVar ("ScalarT" , bound = Scalar )
928+ ScalarT0 = TypeVar ("ScalarT0" , bound = Scalar , default = Scalar )
923929# Refine the definitions below in 3.9 to use the specialized type.
924930np_num : TypeAlias = np .bool | np .integer | np .floating | np .complexfloating
925931np_ndarray_intp : TypeAlias = npt .NDArray [np .intp ]
@@ -1015,8 +1021,9 @@ SeriesDType: TypeAlias = (
10151021 | datetime .datetime # includes pd.Timestamp
10161022 | datetime .timedelta # includes pd.Timedelta
10171023)
1024+ S0 = TypeVar ("S0" , bound = SeriesDType , default = Any )
10181025S1 = TypeVar ("S1" , bound = SeriesDType , default = Any )
1019- # Like S1, but without `default=Any`.
1026+ # Like S0 and S1, but without `default=Any`.
10201027S2 = TypeVar ("S2" , bound = SeriesDType )
10211028S2_contra = TypeVar ("S2_contra" , bound = SeriesDType , contravariant = True )
10221029S2_NDT_contra = TypeVar (
@@ -1050,14 +1057,14 @@ IndexingInt: TypeAlias = (
10501057)
10511058
10521059# AxesData is used for data for Index
1053- AxesData : TypeAlias = Mapping [S3 , Any ] | Axes | KeysView [S3 ]
1060+ AxesData : TypeAlias = Mapping [S0 , Any ] | Axes | KeysView [S0 ]
10541061
10551062# Any plain Python or numpy function
10561063Function : TypeAlias = np .ufunc | Callable [..., Any ]
10571064# Use a distinct HashableT in shared types to avoid conflicts with
10581065# shared HashableT and HashableT#. This one can be used if the identical
10591066# type is need in a function that uses GroupByObjectNonScalar
1060- _HashableTa = TypeVar ("_HashableTa" , bound = Hashable )
1067+ _HashableTa = TypeVar ("_HashableTa" , bound = Hashable , default = Any )
10611068if TYPE_CHECKING : # noqa: PYI002
10621069 ByT = TypeVar (
10631070 "ByT" ,
@@ -1075,7 +1082,7 @@ if TYPE_CHECKING: # noqa: PYI002
10751082 | Scalar
10761083 | Period
10771084 | Interval [int | float | Timestamp | Timedelta ]
1078- | tuple ,
1085+ | tuple [ Any , ...] ,
10791086 )
10801087 # Use a distinct SeriesByT when using groupby with Series of known dtype.
10811088 # Essentially, an intersection between Series S1 TypeVar, and ByT TypeVar
@@ -1130,10 +1137,10 @@ StataDateFormat: TypeAlias = Literal[
11301137# `DataFrame.replace` also accepts mappings of these.
11311138ReplaceValue : TypeAlias = (
11321139 Scalar
1133- | Pattern
1140+ | Pattern [ str ]
11341141 | NAType
1135- | Sequence [Scalar | Pattern ]
1136- | Mapping [HashableT , ScalarT ]
1142+ | Sequence [Scalar | Pattern [ str ] ]
1143+ | Mapping [HashableT0 , ScalarT0 ]
11371144 | Series
11381145 | None
11391146)
0 commit comments