From f84821785ae179ec49301cb64cd9100313bfeb90 Mon Sep 17 00:00:00 2001 From: jcuzens Date: Mon, 7 Jul 2025 21:03:28 -0700 Subject: [PATCH] Added Hyperswap256 from FaceFusionLabs --- app/processors/face_swappers.py | 23 +++++++++++++++++- app/processors/models_data.py | 21 +++++++++++++++++ app/processors/models_processor.py | 6 +++++ app/processors/workers/frame_worker.py | 32 ++++++++++++++++++++++++++ app/ui/widgets/swapper_layout_data.py | 15 +++++++++++- 5 files changed, 95 insertions(+), 2 deletions(-) diff --git a/app/processors/face_swappers.py b/app/processors/face_swappers.py index 05826ef3..47c9dce8 100644 --- a/app/processors/face_swappers.py +++ b/app/processors/face_swappers.py @@ -288,4 +288,25 @@ def run_swapper_ghostface(self, image, embedding, output, swapper_model='GhostFa torch.cuda.synchronize() elif self.models_processor.device != "cpu": self.models_processor.syncvec.cpu() - ghostfaceswap_model.run_with_iobinding(io_binding) \ No newline at end of file + ghostfaceswap_model.run_with_iobinding(io_binding) + + def calc_swapper_latent_hyperswap256(self, source_embedding, version="A"): + latent = source_embedding / l2norm(source_embedding) + latent = latent.reshape((1, -1)) + return latent + + def run_hyperswap256(self, image, embedding, output, version="A"): + HS_MODEL_NAME = f'Hyperswap256 Version {version}' + if not self.models_processor.models[HS_MODEL_NAME]: + self.models_processor.models[HS_MODEL_NAME] = self.models_processor.load_model(HS_MODEL_NAME) + + io_binding = self.models_processor.models[HS_MODEL_NAME].io_binding() + io_binding.bind_input(name='target', device_type=self.models_processor.device, device_id=0, element_type=np.float32, shape=(1,3,256,256), buffer_ptr=image.data_ptr()) + io_binding.bind_input(name='source', device_type=self.models_processor.device, device_id=0, element_type=np.float32, shape=(1,512), buffer_ptr=embedding.data_ptr()) + io_binding.bind_output(name='output', device_type=self.models_processor.device, device_id=0, element_type=np.float32, shape=(1,3,256,256), buffer_ptr=output.data_ptr()) + + if self.models_processor.device == "cuda": + torch.cuda.synchronize() + elif self.models_processor.device != "cpu": + self.models_processor.syncvec.cpu() + self.models_processor.models[HS_MODEL_NAME].run_with_iobinding(io_binding) \ No newline at end of file diff --git a/app/processors/models_data.py b/app/processors/models_data.py index 72a76961..c39c221d 100644 --- a/app/processors/models_data.py +++ b/app/processors/models_data.py @@ -43,6 +43,9 @@ 'InStyleSwapper256 Version A': 'Inswapper128ArcFace', 'InStyleSwapper256 Version B': 'Inswapper128ArcFace', 'InStyleSwapper256 Version C': 'Inswapper128ArcFace', + 'Hyperswap256 Version A': 'Inswapper128ArcFace', + 'Hyperswap256 Version B': 'Inswapper128ArcFace', + 'Hyperswap256 Version C': 'Inswapper128ArcFace', 'DeepFaceLive (DFM)': 'Inswapper128ArcFace', 'SimSwap512': 'SimSwapArcFace', 'GhostFace-v1': 'GhostArcFace', @@ -467,5 +470,23 @@ "hash": "a6164debbf1e851c3dcefa622111c42a78afd9bb8f1540e7d01172ddf642c3b5", "url": f"{assets_repo}/v0.1.0_lp/warping_spade-fix.onnx" + }, + { + "model_name": "Hyperswap256 Version A", + "local_path": f"{models_dir}/hyperswap_1a_256.onnx", + "hash": "c0e98a8a03a238f461ed3d2570e426b49f46745ee400854a60dceeb70c246add", + "url": f"{assets_repo}/v0.1.0/hyperswap_1a_256.onnx" + }, + { + "model_name": "Hyperswap256 Version B", + "local_path": f"{models_dir}/hyperswap_1b_256.onnx", + "hash": "5124031789c42f71b9558fb71954ef7aedb6da7ed9fac79293e23c61a792a73e", + "url": f"{assets_repo}/v0.1.0/hyperswap_1b_256.onnx" + }, + { + "model_name": "Hyperswap256 Version C", + "local_path": f"{models_dir}/hyperswap_1c_256.onnx", + "hash": "5528c2d76fe9986c99d829278987ef9f3a630cb606db7628d02b57b330f406a5", + "url": f"{assets_repo}/v0.1.0/hyperswap_1c_256.onnx" } ] \ No newline at end of file diff --git a/app/processors/models_processor.py b/app/processors/models_processor.py index 070c4c01..d8467935 100644 --- a/app/processors/models_processor.py +++ b/app/processors/models_processor.py @@ -327,6 +327,12 @@ def calc_swapper_latent_cscs(self, source_embedding): def run_swapper_cscs(self, image, embedding, output): self.face_swappers.run_swapper_cscs(image, embedding, output) + def calc_swapper_latent_hyperswap256(self, source_embedding, version="A"): + return self.face_swappers.calc_swapper_latent_hyperswap256(source_embedding, version) + + def run_hyperswap256(self, image, embedding, output, version="A"): + self.face_swappers.run_hyperswap256(image, embedding, output, version) + def run_enhance_frame_tile_process(self, img, enhancer_type, tile_size=256, scale=1): return self.frame_enhancers.run_enhance_frame_tile_process(img, enhancer_type, tile_size, scale) diff --git a/app/processors/workers/frame_worker.py b/app/processors/workers/frame_worker.py index 7049a58d..6032e7a9 100644 --- a/app/processors/workers/frame_worker.py +++ b/app/processors/workers/frame_worker.py @@ -442,6 +442,18 @@ def get_affined_face_dim_and_swapping_latents(self, original_faces: tuple, swapp latent = [] input_face_affined = original_face_512 dim = 4 + + elif swapper_model in ( 'Hyperswap256 Version A', 'Hyperswap256 Version B', 'Hyperswap256 Version C'): + version = swapper_model[-1] + latent = torch.from_numpy(self.models_processor.calc_swapper_latent_hyperswap256(s_e, version)).float().to(self.models_processor.device) + if parameters['FaceLikenessEnableToggle']: + factor = parameters['FaceLikenessFactorDecimalSlider'] + dst_latent = torch.from_numpy(self.models_processor.calc_swapper_latent_hyperswap256(t_e, version)).float().to(self.models_processor.device) + latent = latent - (factor * dst_latent) + + dim = 2 + input_face_affined = original_face_256 + return input_face_affined, dfm_model, dim, latent def get_swapped_and_prev_face(self, output, input_face_affined, original_face_512, latent, itex, dim, swapper_model, dfm_model, parameters, ): @@ -487,6 +499,26 @@ def get_swapped_and_prev_face(self, output, input_face_affined, original_face_51 output = torch.mul(output, 255) output = torch.clamp(output, 0, 255) + elif swapper_model in ('Hyperswap256 Version A', 'Hyperswap256 Version B', 'Hyperswap256 Version C'): + version = swapper_model[-1] #Version Name + with torch.no_grad(): + for _ in range(itex): + input_face_disc = input_face_affined.permute(2, 0, 1) + input_face_disc = torch.unsqueeze(input_face_disc, 0).contiguous() + + swapper_output = torch.empty((1,3,256,256), dtype=torch.float32, device=self.models_processor.device).contiguous() + self.models_processor.run_hyperswap256(input_face_disc, latent, swapper_output, version) + + swapper_output = torch.squeeze(swapper_output) + swapper_output = swapper_output.permute(1, 2, 0) + + output = swapper_output.clone() + prev_face = input_face_affined.clone() + input_face_affined = output.clone() + + output = torch.mul(output, 255) + output = torch.clamp(output, 0, 255) + elif swapper_model == 'SimSwap512': for k in range(itex): input_face_disc = input_face_affined.permute(2, 0, 1) diff --git a/app/ui/widgets/swapper_layout_data.py b/app/ui/widgets/swapper_layout_data.py index b9b6c52e..4bc41c45 100644 --- a/app/ui/widgets/swapper_layout_data.py +++ b/app/ui/widgets/swapper_layout_data.py @@ -8,7 +8,20 @@ 'SwapModelSelection': { 'level': 1, 'label': 'Swapper Model', - 'options': ['Inswapper128', 'InStyleSwapper256 Version A', 'InStyleSwapper256 Version B', 'InStyleSwapper256 Version C', 'DeepFaceLive (DFM)', 'SimSwap512', 'GhostFace-v1', 'GhostFace-v2', 'GhostFace-v3', 'CSCS'], 'default': 'Inswapper128', + 'options': [ + 'Inswapper128', + 'InStyleSwapper256 Version A', + 'InStyleSwapper256 Version B', + 'InStyleSwapper256 Version C', + 'Hyperswap256 Version A', + 'Hyperswap256 Version B', + 'Hyperswap256 Version C', + 'DeepFaceLive (DFM)', + 'SimSwap512', + 'GhostFace-v1', + 'GhostFace-v2', + 'GhostFace-v3', + 'CSCS'], 'default': 'Inswapper128', 'help': 'Choose which swapper model to use for face swapping.' }, 'SwapperResSelection': {