@@ -592,6 +592,9 @@ def create_launch_model(
592592
593593 A model bundle consists of exactly {predict_fn_or_cls}, {load_predict_fn + model}, or {load_predict_fn + load_model_fn}.
594594 Pre/post-processing code can be included inside load_predict_fn/model or in predict_fn_or_cls call.
595+ Note: the exact parameters used will depend on the version of the Launch client used.
596+ i.e. if you are on Launch client version 0.x, you will use `env_params`, otherwise
597+ you will use `pytorch_image_tag` and `tensorflow_version`.
595598
596599 Parameters:
597600 model_bundle_name: Name of model bundle you want to create. This acts as a unique identifier.
@@ -608,7 +611,8 @@ def create_launch_model(
608611 ["tensorflow==2.3.0", "tensorflow-hub==0.11.0"]. If no list has been passed, will default to the currently
609612 imported list of packages.
610613 app_config: Either a Dictionary that represents a YAML file contents or a local path to a YAML file.
611- env_params: A dictionary that dictates environment information e.g.
614+ env_params: Only for launch v0.
615+ A dictionary that dictates environment information e.g.
612616 the use of pytorch or tensorflow, which cuda/cudnn versions to use.
613617 Specifically, the dictionary should contain the following keys:
614618 "framework_type": either "tensorflow" or "pytorch".
@@ -617,6 +621,9 @@ def create_launch_model(
617621 "cudnn_version" Version of cudnn used, e.g. "cudnn8-devel".
618622 "tensorflow_version": Version of tensorflow, e.g. "2.3.0". Only applicable if framework_type is tensorflow
619623 globals_copy: Dictionary of the global symbol table. Normally provided by `globals()` built-in function.
624+ pytorch_image_tag: Only for launch v1, and if you want to use pytorch framework type.
625+ The tag of the pytorch docker image you want to use, e.g. 1.11.0-cuda11.3-cudnn8-runtime
626+ tensorflow_version: Only for launch v1, and if you want to use tensorflow. Version of tensorflow, e.g. "2.3.0".
620627 """
621628 from launch import LaunchClient
622629
@@ -637,13 +644,22 @@ def create_launch_model(
637644 "model_bundle_name" : name + "-nucleus-autogen" ,
638645 ** bundle_args ,
639646 }
640-
641- bundle = launch_client .create_model_bundle (** kwargs )
647+ if hasattr (launch_client , "create_model_bundle_from_callable_v2" ):
648+ # Launch client is >= 1.0.0
649+ bundle = launch_client .create_model_bundle_from_callable_v2 (
650+ ** kwargs
651+ )
652+ bundle_name = (
653+ bundle .name
654+ ) # both v0 and v1 have a .name field but are different types
655+ else :
656+ bundle = launch_client .create_model_bundle (** kwargs )
657+ bundle_name = bundle .name
642658 return self .create_model (
643659 name ,
644660 reference_id ,
645661 metadata ,
646- bundle . name ,
662+ bundle_name ,
647663 )
648664
649665 def create_launch_model_from_dir (
@@ -705,12 +721,16 @@ def create_launch_model_from_dir(
705721 as the desired inference loading function, then the `load_predict_fn_module_path` argument should be
706722 `my_module1.my_inference_file.f`.
707723
724+ Note: the exact keys for `bundle_from_dir_args` used will depend on the version of the Launch client used.
725+ i.e. if you are on Launch client version 0.x, you will use `env_params`, otherwise
726+ you will use `pytorch_image_tag` and `tensorflow_version`.
708727
709728 Keys for `bundle_from_dir_args`:
710729 model_bundle_name: Name of model bundle you want to create. This acts as a unique identifier.
711730 base_paths: The paths on the local filesystem where the bundle code lives.
712731 requirements_path: A path on the local filesystem where a requirements.txt file lives.
713- env_params: A dictionary that dictates environment information e.g.
732+ env_params: Only for launch v0.
733+ A dictionary that dictates environment information e.g.
714734 the use of pytorch or tensorflow, which cuda/cudnn versions to use.
715735 Specifically, the dictionary should contain the following keys:
716736 "framework_type": either "tensorflow" or "pytorch".
@@ -723,6 +743,9 @@ def create_launch_model_from_dir(
723743 load_model_fn_module_path: A python module path for a function that returns a model. The output feeds into
724744 the function located at load_predict_fn_module_path.
725745 app_config: Either a Dictionary that represents a YAML file contents or a local path to a YAML file.
746+ pytorch_image_tag: Only for launch v1, and if you want to use pytorch framework type.
747+ The tag of the pytorch docker image you want to use, e.g. 1.11.0-cuda11.3-cudnn8-runtime
748+ tensorflow_version: Only for launch v1, and if you want to use tensorflow. Version of tensorflow, e.g. "2.3.0".
726749 """
727750 from launch import LaunchClient
728751
@@ -744,13 +767,21 @@ def create_launch_model_from_dir(
744767 ** bundle_from_dir_args ,
745768 }
746769
747- bundle = launch_client .create_model_bundle_from_dirs (** kwargs )
770+ if hasattr (launch_client , "create_model_bundle_from_dirs_v2" ):
771+ # Launch client is >= 1.0.0, use new fn
772+ bundle = launch_client .create_model_bundle_from_dirs_v2 (** kwargs )
773+ # Different code paths give different types for bundle, although both have a .name field
774+ bundle_name = bundle .name
775+ else :
776+ # Launch client is < 1.0.0
777+ bundle = launch_client .create_model_bundle_from_dirs (** kwargs )
778+ bundle_name = bundle .name
748779
749780 return self .create_model (
750781 name ,
751782 reference_id ,
752783 metadata ,
753- bundle . name ,
784+ bundle_name ,
754785 )
755786
756787 @deprecated (
0 commit comments