diff --git a/cellpack/autopack/DBRecipeHandler.py b/cellpack/autopack/DBRecipeHandler.py index 00fc8bb7..3eb691d5 100644 --- a/cellpack/autopack/DBRecipeHandler.py +++ b/cellpack/autopack/DBRecipeHandler.py @@ -529,7 +529,7 @@ def upload_config(self, config_data, source_path): self.db.update_doc("configs", id, config_data) return id - def upload_result_metadata(self, file_name, url, dedup_hash=None): + def upload_result_metadata(self, file_name, url): """ Upload the metadata of the result file to the database. """ @@ -543,11 +543,8 @@ def upload_result_metadata(self, file_name, url, dedup_hash=None): "user": username, "timestamp": timestamp, "url": url, - "dedup_hash": dedup_hash, }, ) - if dedup_hash: - self.upload_job_status(dedup_hash, "DONE", result_path=url) def upload_job_status( self, @@ -644,6 +641,7 @@ def upload_packing_results_workflow( self.upload_job_status( dedup_hash, "DONE", + result_path=upload_result.get("simularium_url"), outputs_directory=upload_result.get("outputs_directory"), ) @@ -675,8 +673,11 @@ def upload_outputs_to_s3(self, output_folder, recipe_name, dedup_hash): f"{base_url}/{file_info['s3_key']}" for file_info in upload_result["uploaded_files"] ] + simularium_url = None + for url in public_urls: + if url.endswith(".simularium"): + simularium_url = url outputs_directory = f"https://us-west-2.console.aws.amazon.com/s3/buckets/{bucket_name}/{s3_prefix}/" - logging.info( f"Successfully uploaded {upload_result['total_files']} files to {outputs_directory}" ) @@ -694,6 +695,7 @@ def upload_outputs_to_s3(self, output_folder, recipe_name, dedup_hash): "total_size": upload_result["total_size"], "urls": public_urls, "outputs_directory": outputs_directory, + "simularium_url": simularium_url, } except Exception as e: logging.error(e) diff --git a/cellpack/autopack/upy/simularium/simularium_helper.py b/cellpack/autopack/upy/simularium/simularium_helper.py index 4f934e0e..08179d85 100644 --- a/cellpack/autopack/upy/simularium/simularium_helper.py +++ b/cellpack/autopack/upy/simularium/simularium_helper.py @@ -1387,36 +1387,28 @@ def raycast_test(self, obj, start, end, length, **kw): def post_and_open_file(self, file_name, open_results_in_browser, dedup_hash=None): simularium_file = Path(f"{file_name}.simularium") - file_name, url = simulariumHelper.store_result_file( - simularium_file, storage="aws", batch_job_id=dedup_hash - ) - if file_name and url: - simulariumHelper.store_metadata( - file_name, url, db="firebase", dedup_hash=dedup_hash + if dedup_hash is None: + file_name, url = simulariumHelper.store_result_file( + simularium_file, storage="aws" ) - if open_results_in_browser: - simulariumHelper.open_in_simularium(url) + if file_name and url: + simulariumHelper.store_metadata( + file_name, url, db="firebase" + ) + if open_results_in_browser: + simulariumHelper.open_in_simularium(url) @staticmethod def store_result_file( - file_path, storage=None, batch_job_id=None, sub_folder="simularium" + file_path, storage=None, sub_folder="simularium" ): if storage == "aws": handler = DATABASE_IDS.handlers().get(storage) - # if batch_job_id is not None, then we are in a batch job and should use the temp bucket - # TODO: use cellpack-results bucket for batch jobs once we have the correct permissions - if batch_job_id: - initialized_handler = handler( - bucket_name="cellpack-demo", - sub_folder_name=sub_folder, - region_name="us-west-2", - ) - else: - initialized_handler = handler( - bucket_name="cellpack-results", - sub_folder_name=sub_folder, - region_name="us-west-2", - ) + initialized_handler = handler( + bucket_name="cellpack-results", + sub_folder_name=sub_folder, + region_name="us-west-2", + ) file_name, url = initialized_handler.save_file_and_get_url(file_path) if not file_name or not url: db_maintainer = DBMaintenance(initialized_handler) @@ -1426,7 +1418,7 @@ def store_result_file( return file_name, url @staticmethod - def store_metadata(file_name, url, db=None, dedup_hash=None): + def store_metadata(file_name, url, db=None): if db == "firebase": handler = DATABASE_IDS.handlers().get(db) initialized_db = handler( @@ -1434,7 +1426,7 @@ def store_metadata(file_name, url, db=None, dedup_hash=None): ) # default to staging for metadata uploads if initialized_db._initialized: db_uploader = DBUploader(initialized_db) - db_uploader.upload_result_metadata(file_name, url, dedup_hash) + db_uploader.upload_result_metadata(file_name, url) else: db_maintainer = DBMaintenance(initialized_db) logging.warning(