@@ -363,25 +363,29 @@ def __init__(self):
363363 self .embedding_provider = "sentence_transformers"
364364 self .llm_model = ""
365365 self .embedding_model = ""
366- self .assistant = "Cortana "
366+ self .assistant = "None "
367367 self .random = False
368- self . rag_dir = os . path . join ( folder_paths . base_path , "custom_nodes" , "ComfyUI_IF_AI_tools" , "IF_AI" , "rag" )
368+
369369 self .comfy_dir = os .path .dirname (os .path .dirname (os .path .dirname (os .path .abspath (__file__ ))))
370+ self .rag_dir = os .path .join (folder_paths .base_path , "custom_nodes" , "ComfyUI_IF_AI_tools" , "IF_AI" , "rag" )
370371 self .presets_dir = os .path .join (folder_paths .base_path , "custom_nodes" , "ComfyUI_IF_AI_tools" , "IF_AI" , "presets" )
372+
371373 self .stop_file = os .path .join (self .presets_dir , "stop_strings.json" )
372374 self .assistants_file = os .path .join (self .presets_dir , "assistants.json" )
373375 self .neg_prompts_file = os .path .join (self .presets_dir , "neg_prompts.json" )
374376 self .embellish_prompts_file = os .path .join (self .presets_dir , "embellishments.json" )
375377 self .style_prompts_file = os .path .join (self .presets_dir , "style_prompts.json" )
376378 self .tasks_file = os .path .join (self .presets_dir , "florence_prompts.json" )
377379 self .agents_dir = os .path .join (self .presets_dir , "agents" )
380+
378381 self .agent_tools = self .load_agent_tools ()
379382 self .stop_strings = self .load_presets (self .stop_file )
380383 self .assistants = self .load_presets (self .assistants_file )
381384 self .neg_prompts = self .load_presets (self .neg_prompts_file )
382385 self .embellish_prompts = self .load_presets (self .embellish_prompts_file )
383386 self .style_prompts = self .load_presets (self .style_prompts_file )
384387 self .florence_prompts = self .load_presets (self .tasks_file )
388+
385389 self .keep_alive = False
386390 self .seed = 94687328150
387391 self .messages = []
@@ -394,8 +398,6 @@ def __init__(self):
394398 self .colpali_app = colpaliRAGapp ()
395399 self .fix_json = True
396400 self .cached_colpali_model = None
397- #self.transformers_manager = TransformersModelManager()
398- #self.transformers_app = self.transformers_manager.send_transformers_request
399401 self .florence_app = FlorenceModule ()
400402 self .florence_models = {}
401403 self .query_type = "global"
@@ -411,9 +413,8 @@ def __init__(self):
411413 self .top_k_search = 3
412414
413415 self .placeholder_image_path = os .path .join (folder_paths .base_path , "custom_nodes" , "ComfyUI_IF_AI_tools" , "IF_AI" , "placeholder.png" )
414- # Ensure the placeholder image exists
416+
415417 if not os .path .exists (self .placeholder_image_path ):
416- # Create a proper RGB placeholder image
417418 placeholder = Image .new ('RGB' , (512 , 512 ), color = (73 , 109 , 137 ))
418419 os .makedirs (os .path .dirname (self .placeholder_image_path ), exist_ok = True )
419420 placeholder .save (self .placeholder_image_path )
@@ -424,29 +425,34 @@ def load_presets(self, file_path):
424425 return presets
425426
426427 def load_agent_tools (self ):
428+ os .makedirs (self .agents_dir , exist_ok = True )
427429 agent_tools = {}
428- for filename in os .listdir (self .agents_dir ):
429- if filename .endswith ('.json' ):
430- full_path = os .path .join (self .agents_dir , filename )
431- with open (full_path , 'r' ) as f :
432- try :
433- data = json .load (f )
434- if 'output_type' not in data :
435- data ['output_type' ] = None
436- agent_tool = AgentTool (** data )
437- agent_tool .load ()
438- if agent_tool ._class_instance is not None :
439- if agent_tool .python_function :
440- agent_tools [agent_tool .name ] = agent_tool
430+ try :
431+ for filename in os .listdir (self .agents_dir ):
432+ if filename .endswith ('.json' ):
433+ full_path = os .path .join (self .agents_dir , filename )
434+ with open (full_path , 'r' ) as f :
435+ try :
436+ data = json .load (f )
437+ if 'output_type' not in data :
438+ data ['output_type' ] = None
439+ agent_tool = AgentTool (** data )
440+ agent_tool .load ()
441+ if agent_tool ._class_instance is not None :
442+ if agent_tool .python_function :
443+ agent_tools [agent_tool .name ] = agent_tool
444+ else :
445+ print (f"Warning: Agent tool { agent_tool .name } in { filename } does not have a python_function defined." )
441446 else :
442- print (f"Warning: Agent tool { agent_tool .name } in { filename } does not have a python_function defined." )
443- else :
444- print (f"Failed to create class instance for { filename } " )
445- except json .JSONDecodeError :
446- print (f"Error: Invalid JSON in { filename } " )
447- except Exception as e :
448- print (f"Error loading { filename } : { str (e )} " )
449- return agent_tools
447+ print (f"Failed to create class instance for { filename } " )
448+ except json .JSONDecodeError :
449+ print (f"Error: Invalid JSON in { filename } " )
450+ except Exception as e :
451+ print (f"Error loading { filename } : { str (e )} " )
452+ return agent_tools
453+ except Exception as e :
454+ print (f"Warning: Error accessing agent tools directory: { str (e )} " )
455+ return {}
450456
451457 async def process_chat (
452458 self ,
@@ -648,7 +654,7 @@ async def process_chat(
648654 )
649655
650656 generated_text = response .get ("Response" )
651- selected_neg_prompt_name = neg_prompt # The name/key selected in the UI
657+ selected_neg_prompt_name = neg_prompt
652658 omni = response .get ("Tool_Output" )
653659 retrieved_image = response .get ("Retrieved_Image" )
654660 retrieved_mask = response .get ("Mask" )
@@ -677,21 +683,17 @@ async def process_chat(
677683 # Handle negative prompts
678684 if selected_neg_prompt_name == "AI_Fill" :
679685 try :
680- # Get the NegativePromptEngineer system message
681686 neg_system_message = self .assistants .get ("NegativePromptEngineer" )
682687 if not neg_system_message :
683688 logger .error ("NegativePromptEngineer not found in assistants configuration" )
684689 negative_prompt = "Error: NegativePromptEngineer not configured"
685690 else :
686- # Construct a clear prompt for negative generation
687691 user_message = f"Generate negative prompts for the following prompt:\n { text_result } "
688692
689- # Ensure we have a valid system message
690693 system_message_str = json .dumps (neg_system_message )
691694
692695 logger .info (f"Requesting negative prompts for prompt: { text_result [:100 ]} ..." )
693696
694- # Make the LLM request with proper parameter handling
695697 neg_response = await send_request (
696698 llm_provider = llm_provider ,
697699 base_ip = base_ip ,
@@ -716,19 +718,16 @@ async def process_chat(
716718 logger .debug (f"Received negative prompt response: { neg_response } " )
717719
718720 if neg_response :
719- # Split the AI-generated negative prompts into lines
720721 negative_lines = []
721722 for line in neg_response .split ('\n ' ):
722723 line = line .strip ()
723724 if line :
724725 negative_lines .append (line )
725726
726- # Match number of negative prompts to positive prompts
727727 while len (negative_lines ) < len (lines ):
728728 negative_lines .append (negative_lines [- 1 ] if negative_lines else "" )
729729 negative_lines = negative_lines [:len (lines )]
730730
731- # Create multiline string with explicit newlines
732731 negative_prompt = "\n " .join (negative_lines )
733732 else :
734733 negative_prompt = "Error: Empty response from LLM"
@@ -737,7 +736,6 @@ async def process_chat(
737736 negative_prompt = f"Error generating negative prompts: { str (e )} "
738737
739738 elif neg_prompt != "None" :
740- # Create a negative prompt for each line
741739 neg_content = self .neg_prompts .get (neg_prompt , "" ).strip ()
742740 negative_lines = [neg_content for _ in range (len (lines ))]
743741 negative_prompt = "\n " .join (negative_lines )
@@ -749,7 +747,6 @@ async def process_chat(
749747 negative_prompt = ""
750748
751749 try :
752- # Check if retrieved_image is already a tensor in ComfyUI format
753750 if isinstance (retrieved_image , torch .Tensor ):
754751 # Ensure it's in the correct format (B, C, H, W)
755752 if retrieved_image .dim () == 3 : # Single image (C, H, W)
@@ -778,18 +775,14 @@ async def process_chat(
778775 # Process retrieved_mask if it's not a tensor
779776 mask_tensor = process_mask (retrieved_mask , image_tensor )
780777 else :
781- # Process the retrieved image using process_images_for_comfy
782778 image_tensor , default_mask_tensor = process_images_for_comfy (
783779 retrieved_image ,
784780 self .placeholder_image_path
785781 )
786782 mask_tensor = default_mask_tensor
787783
788784 if retrieved_mask is not None :
789- # Process retrieved_mask to ensure it's in the correct format
790785 mask_tensor = process_mask (retrieved_mask , image_tensor )
791-
792- # Now image_tensor and mask_tensor are ready to be used
793786 return (
794787 prompt ,
795788 combined_prompt ,
0 commit comments