1+ import datetime
12import os
2- from typing import Any , Dict , Iterable , List , Optional , Sequence , Tuple , Union
3+ from typing import (
4+ TYPE_CHECKING ,
5+ Any ,
6+ Dict ,
7+ Iterable ,
8+ List ,
9+ Optional ,
10+ Sequence ,
11+ Tuple ,
12+ Union ,
13+ )
314
415import requests
516
3243 EXPORTED_ROWS ,
3344 FRAME_RATE_KEY ,
3445 ITEMS_KEY ,
46+ JOB_REQ_LIMIT ,
3547 KEEP_HISTORY_KEY ,
3648 MESSAGE_KEY ,
3749 NAME_KEY ,
5466from .dataset_item_uploader import DatasetItemUploader
5567from .deprecation_warning import deprecated
5668from .errors import NotFoundError , NucleusAPIError
69+ from .job import CustomerJobTypes , jobs_status_overview
5770from .metadata_manager import ExportMetadataType , MetadataManager
5871from .payload_constructor import (
5972 construct_append_scenes_payload ,
7083)
7184from .upload_response import UploadResponse
7285
86+ if TYPE_CHECKING :
87+ from . import NucleusClient
88+
7389# TODO: refactor to reduce this file to under 1000 lines.
7490# pylint: disable=C0302
7591
@@ -107,7 +123,7 @@ class Dataset:
107123 existing_dataset = client.get_dataset("YOUR_DATASET_ID")
108124 """
109125
110- def __init__ (self , dataset_id , client , name = None ):
126+ def __init__ (self , dataset_id , client : "NucleusClient" , name = None ):
111127 self .id = dataset_id
112128 self ._client = client
113129 # NOTE: Optionally set name on creation such that the property access doesn't need to hit the server
@@ -144,7 +160,7 @@ def is_scene(self) -> bool:
144160 {}, f"dataset/{ self .id } /is_scene" , requests .get
145161 )[DATASET_IS_SCENE_KEY ]
146162 self ._is_scene = response
147- return self ._is_scene
163+ return self ._is_scene # type: ignore
148164
149165 @property
150166 def model_runs (self ) -> List [str ]:
@@ -153,7 +169,7 @@ def model_runs(self) -> List[str]:
153169 response = self ._client .make_request (
154170 {}, f"dataset/{ self .id } /model_runs" , requests .get
155171 )
156- return response
172+ return response # type: ignore
157173
158174 @property
159175 def slices (self ) -> List [Slice ]:
@@ -885,7 +901,7 @@ def build_slice(
885901 sample_size : int ,
886902 sample_method : Union [str , SliceBuilderMethods ],
887903 filters : Optional [SliceBuilderFilters ] = None ,
888- ) -> Union [str , Tuple [AsyncJob , str ]]:
904+ ) -> Union [str , Tuple [AsyncJob , str ], dict ]:
889905 """Build a slice using Nucleus' Smart Sample tool. Allowing slices to be built
890906 based on certain criteria, and filters.
891907
@@ -1926,3 +1942,36 @@ def delete_tracks(self, track_reference_ids: List[str]) -> None:
19261942 route = f"dataset/{ self .id } /tracks" ,
19271943 requests_command = requests .delete ,
19281944 )
1945+
1946+ def jobs (
1947+ self ,
1948+ job_types : Optional [List [CustomerJobTypes ]] = None ,
1949+ from_date : Optional [Union [str , datetime .datetime ]] = None ,
1950+ to_date : Optional [Union [str , datetime .datetime ]] = None ,
1951+ limit : int = JOB_REQ_LIMIT ,
1952+ show_completed : bool = False ,
1953+ stats_only : bool = False ,
1954+ ):
1955+ """
1956+ Fetch jobs pertaining to this particular dataset.
1957+
1958+ Parameters:
1959+ job_types: Filter on set of job types, if None, fetch all types, ie: ['uploadDatasetItems']
1960+ from_date: beginning of date range, as a string 'YYYY-MM-DD' or datetime object.
1961+ For example: '2021-11-05', parser.parse('Nov 5 2021'), or datetime(2021,11,5)
1962+ to_date: end of date range
1963+ limit: number of results to fetch, max 50_000
1964+ show_completed: dont fetch jobs with Completed status
1965+ stats_only: return overview of jobs, instead of a list of job objects
1966+ """
1967+ job_objects = self ._client .list_jobs (
1968+ dataset_id = self .id ,
1969+ show_completed = show_completed ,
1970+ from_date = from_date ,
1971+ to_date = to_date ,
1972+ limit = limit ,
1973+ job_types = job_types ,
1974+ )
1975+ if stats_only :
1976+ return jobs_status_overview (job_objects )
1977+ return job_objects
0 commit comments