Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 36 additions & 46 deletions GeniERuntimeExample/GeniERuntimeExample.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -17,35 +17,25 @@
"metadata": {},
"outputs": [],
"source": [
"from pathlib import Path\n",
"from dnv.oneworkflow.utils.workunit_extension import *\n",
"from dnv.oneworkflow.utils.starter import *\n",
"from dnv.oneworkflow import OneWorkflowClient\n",
"from dnv.oneworkflow.utils import *\n",
"from pathlib import Path\n",
"import os\n",
"oneWorkflowTMPFolder = r'D:\\OneWorkflowTmp' #due to possible issues with long file paths we prefer to have this folder at the root\n",
"root_folder = os.getcwd()\n",
"workspacePath = str(Path(root_folder, 'Workspace'))\n",
"workspaceId = \"GeniERuntimeExample\"\n",
"\n",
"cloudRun = False\n",
"oneWorkflowTMPFolder = r'c:\\oneworkflowTmp' #due to possible issues with long file paths we prefer to have this folder at the root\n",
"if not os.path.exists(oneWorkflowTMPFolder):\n",
" try:\n",
" print(\"Trying to create tmp folder for one workflow local execution\")\n",
" os.mkdir(oneWorkflowTMPFolder)\n",
" print(oneWorkflowTMPFolder + \" created!\\n\")\n",
" except:\n",
" print(\"did not manage to create tmp folder for local execution. Check that you have privileges to create it or try to manually create it from the coomand line.\")\n",
"\n",
"workspaceId = \"GeniERuntimeExample\"\n",
"# local workspace, all results will be put here after local or cloud runs\n",
"# location of common files for all analysis, has to be below workspacePath and in the folder names CommonFilesr\n",
"root_folder = os.getcwd()\n",
"print(root_folder)\n",
"workspacePath = str(Path(root_folder, 'Workspace'))\n",
"cloudRun = False\n",
"#If running locally the code below will also start the local workflow host.\n",
"workflow_client = one_workflow_client(workspace_id = workspaceId, workspace_path = workspacePath, cloud_run = cloudRun,\n",
" local_workflow_runtime_temp_folder_path = oneWorkflowTMPFolder, platform=Platform.WINDOWS, max_cores=5,auto_deploy_option = AutoDeployOption.DEV)\n",
"workflow_client.start_workflow_runtime_service()\n",
"if (cloudRun):\n",
" workflow_client.login()\n",
"if not workflow_client.upload_common_files(FileOptions(max_size=\"524MB\",patterns=[\"**/*.*\"], overwrite=True)):\n",
" print(\"Upload failed\")"
"workflow_client = one_workflow_client(workspace_id = workspaceId, cloud_run = cloudRun, workspace_path = workspacePath, local_workflow_runtime_temp_folder_path = oneWorkflowTMPFolder,\n",
" local_workflow_runtime_temp_folders_cleanup=False,environment=Environment.Testing,pool_id=\"SesamWorkflow_Windows_Standard_A1_v2\")\n"
]
},
{
Expand All @@ -55,43 +45,43 @@
"outputs": [],
"source": [
"import pandas as pd\n",
"from SesamHelpers import *\n",
"import shutil\n",
"import json\n",
"from dnv.oneworkflow import PythonCommand\n",
"from dnv.sesam.genie_runtime_command import *\n",
"# we must delete existing results locally before generating new results\n",
"local__result_path = Path(workspacePath, workflow_client.results_directory)\n",
"if os.path.isdir(local__result_path):\n",
" shutil.rmtree(local__result_path)\n",
"\n",
"#parametrized values\n",
"df = pd.DataFrame({'AP': [\"0m\", \"0.5m\", \"1m\"], 'FP': [\"150m\", \"250m\", \"500m\"]})\n",
"workUnit = GeniERuntimeTaskCreator(\"ContainerHull_template.js\", df,workflow_client).get_genieruntime_work_unit(cloudRun, workspacePath)\n",
"downloadOptions = FileOptions(\n",
" max_size=\"1112MB\",\n",
" patterns=[\"**/T1.FEM\", \"**/*.csv\"])\n",
"job = workflow_client.create_job(workUnit)\n",
"\n",
"#for debugging only\n",
"#job_json = json.dumps(job, default=lambda o: o.encode(), indent=4)\n",
"#print(job_json)\n",
"await run_workflow_async(job, workflow_client, downloadOptions)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### CLose client -must be done before a new job can be started in a different notebook\n",
"Will remove all job and blob folders."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"workflow_client.local_workflow_runtime_service.stop_service()"
"\n",
"\n",
"commands_info = []\n",
"for index, row in df.iterrows():\n",
" loadcase_folder_name = f\"Model_{index + 1}\"\n",
" genieruntime_command = GeniERuntimeCommand()\n",
" genieruntime_command.Parameters = {}\n",
" genieruntime_command.TemplateInputFile = \"ContainerHull_template.js\"\n",
" for key, value in row.items():\n",
" genieruntime_command.Parameters[key] = value\n",
" \n",
" post_processing_command = PythonCommand(\n",
" directory=workflow_client.common_directory,\n",
" filename=\"postprocessing.py\")\n",
" cmd_info = CommandInfo(commands=[genieruntime_command,post_processing_command],load_case_foldername=loadcase_folder_name)\n",
" commands_info.append(cmd_info)\n",
" \n",
"print(\"Running commands in parallel\")\n",
"await run_managed_commands_parallelly_async(\n",
" client=workflow_client,\n",
" commands_info=commands_info,\n",
" files_to_download_from_blob_to_client=FileOptions(max_size=\"11124MB\",patterns=[\"**/*.txt\", \"**/*.lis\", \"**/*.MLG\", \"**/*.MLG\",\"**/*.CSV\",\"**/*.FEM\"]),\n",
" enable_common_files_copy_to_load_cases=True,\n",
")\n"
]
},
{
Expand Down
52 changes: 52 additions & 0 deletions GeniERuntimeExample/GeniERuntimeExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@

from dnv.oneworkflow.utils.workunit_extension import *
from dnv.oneworkflow.utils.starter import *
from pathlib import Path
import os
import pandas as pd
from dnv.sesam.genie_runtime_command import *
root_folder = os.path.dirname(os.path.abspath(__file__))
workspacePath = str(Path(root_folder, 'Workspace'))
workspaceId = "GeniERuntimeExample"

cloudRun = False
oneWorkflowTMPFolder = r'd:\oneworkflowTmp' #due to possible issues with long file paths we prefer to have this folder at the root
if not os.path.exists(oneWorkflowTMPFolder):
try:
print("Trying to create tmp folder for one workflow local execution")
os.mkdir(oneWorkflowTMPFolder)
print(oneWorkflowTMPFolder + " created!\n")
except:
print("did not manage to create tmp folder for local execution. Check that you have privileges to create it or try to manually create it from the coomand line.")
#If running locally the code below will also start the local workflow host.
workflow_client = one_workflow_client(workspace_id = workspaceId, cloud_run = cloudRun, workspace_path = workspacePath, local_workflow_runtime_temp_folder_path = oneWorkflowTMPFolder,
local_workflow_runtime_temp_folders_cleanup=False,environment=Environment.Testing)
#parametrized values
df = pd.DataFrame({'AP': ["0m", "0.5m", "1m"], 'FP': ["150m", "250m", "500m"]})
#for debugging only
#job_json = json.dumps(job, default=lambda o: o.encode(), indent=4)
#print(job_json)


commands_info = []
for index, row in df.iterrows():
loadcase_folder_name = f"Model_{index + 1}"
genieruntime_command = GeniERuntimeCommand()
genieruntime_command.Parameters = {}
genieruntime_command.TemplateInputFile = "ContainerHull_template.js"
for key, value in row.items():
genieruntime_command.Parameters[key] = value

post_processing_command = PythonCommand(
directory=workflow_client.common_directory,
filename="postprocessing.py")
cmd_info = CommandInfo(commands=[genieruntime_command, post_processing_command],load_case_foldername=loadcase_folder_name)
commands_info.append(cmd_info)

print("Running commands in parallel")
asyncio.run(run_managed_commands_parallelly_async(
client=workflow_client,
commands_info=commands_info,
files_to_download_from_blob_to_client=FileOptions(max_size="11124MB",patterns=["**/*.txt", "**/*.lis", "**/*.MLG", "**/*.MLG","**/*.CSV"]),
enable_common_files_copy_to_load_cases=True,
))
57 changes: 0 additions & 57 deletions GeniERuntimeExample/SesamHelpers.py

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ def write_node_element_count():
Reads the number of occurrences of a data type and the size of the established pointer table
for a datatype.
"""
print("Reading node and element count from T1.FEM file\n")
with SesamDataFactory.CreateReader(".", 'T1.FEM') as reader:
reader.CreateModel()
all_data =[]
Expand Down
48 changes: 48 additions & 0 deletions SE28ExampleSimaWasimSestra/SimaExample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from pathlib import Path
import os
from dnv.oneworkflow.utils.workunit_extension import *
from dnv.oneworkflow.utils.starter import *
from SimaHelper import *
import json
root_folder = os.path.dirname(os.path.abspath(__file__))
oneWorkflowTMPFolder = r'd:\oneworkflowTmp' #due to possible issues with long file paths we prefer to have this folder at the root
if not os.path.exists(oneWorkflowTMPFolder):
try:
print("Trying to create tmp folder for one workflow local execution")
os.mkdir(oneWorkflowTMPFolder)
print(oneWorkflowTMPFolder + " created!\n")
except:
print("did not manage to create tmp folder for local execution. Check that you have privileges to create it or try to manually create it from the coomand line.")

# local workspace, all results will be put here after local or cloud runs
# location of common files for all analysis, has to be below workspacePath
print(root_folder)
workspacePath = str(Path(root_folder, 'Workspace'))
workspaceId = "SE28"
loadcase_file = f"{workspacePath}\\test_cases_light.xlsx"
wasim_input_file = "test_cases_wasim_input.xlsx"
stask_file = "SimaTemplateV2.stask"
cloudRun = False
notebook_root_folder = os.getcwd()

workflow_client = one_workflow_client(workspace_id = workspaceId, cloud_run = cloudRun, workspace_path = workspacePath, local_workflow_runtime_temp_folder_path = oneWorkflowTMPFolder,
local_workflow_runtime_temp_folders_cleanup=False,environment=Environment.Testing)

"""Tests SIMA and Python commands"""
# Upload Input Files
workflow_client.upload_input_files()

#Sima path must be specified
sima_settings = SimaSettings(sima_exe_path=r'C:\Program Files\DNV\Sima V4.4-00')
sima_commands = SimaTaskCreator(sima_settings, workflow_client).get_sima_commands(loadcase_file, stask_file)



print("Running commands in parallel")
asyncio.run(run_managed_commands_parallelly_async(
#log_job = True,
client=workflow_client,
commands_info=sima_commands,
files_to_download_from_blob_to_client=FileOptions(max_size="11124MB",patterns=["**/*.log","**/*.txt", "**/*.lis", "**/*.MLG", "**/*.MLG","**/*.CSV"]),
enable_common_files_copy_to_load_cases=True,
))
30 changes: 11 additions & 19 deletions SE28ExampleSimaWasimSestra/SimaHelper.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,10 @@
from typing import Any
import os
from dnv.sesam.sima_command import SimaCommand
from dnv.onecompute.flowmodel import ParallelWork
from dnv.oneworkflow import PythonCommand
from dnv.onecompute import FileSpecification
from dnv.oneworkflow.oneworkflowclient import OneWorkflowClient

from dnv.oneworkflow.utils.workunit_extension import *
from dnv.oneworkflow.utils.starter import *
class SimaSettings:
def __init__(self, sima_exe_path: str, result_files_to_keep=[
"*-sima.lis",
Expand Down Expand Up @@ -53,7 +52,7 @@ def get_commands_inputs(self,stask_file: str, case: dict[str, Any]) -> dict[str,



def get_sima_work_unit(self, full_path_to_load_case_file: str, stask_file: str ,single_task: bool = False):
def get_sima_commands(self, full_path_to_load_case_file: str, stask_file: str ,single_task: bool = False):
"""Returns a parallel processing unit based on input given in an Excel file.

Parameters:
Expand All @@ -63,30 +62,23 @@ def get_sima_work_unit(self, full_path_to_load_case_file: str, stask_file: str ,

"""
os.chdir(self.workspace.workspace_path)
load_cases_parent_folder_name = self.workspace.load_cases_parent_directory

parallel_work = ParallelWork()
parallel_work.work_items.clear()

commands_info = []
# Open environmental input file
index = 0
df_cases = pd.read_excel(full_path_to_load_case_file, index_col=0)
for loadcase_folder_name, case in df_cases.iterrows():
load_case_folder = os.path.join(
load_cases_parent_folder_name, loadcase_folder_name)
result_folder_lc = os.path.join(
self.workspace.results_directory, loadcase_folder_name)
index = index + 1
loadcase_folder_name = f"loadcase_{index}"
# Get SIMA commands and inputs
commands_inputs = self.get_commands_inputs(stask_file, case.to_dict())
# Create SimaCommand instance
sima_cmd = SimaCommand(self.sima_settings.sima_exe_path)
sima_cmd.commands = commands_inputs["commands"]
sima_cmd.input = commands_inputs["inputs"]
sima_cmd.sima_result_files = self.sima_settings.result_files_to_keep
sima_cmd.working_directory = load_case_folder
#sima_cmd.working_directory = load_case_folder

# Add work item to ParallelWork instance
parallel_work.add(sima_cmd, work_unit_id=loadcase_folder_name).output_directory(result_folder_lc,
include_files=self.sima_settings.result_files_to_keep)
if single_task == True:
break
return parallel_work
cmd_info = CommandInfo(commands=[sima_cmd],load_case_foldername=loadcase_folder_name)
commands_info.append(cmd_info)
return commands_info
Loading