This commit is contained in:
2025-10-13 17:10:11 +02:00
9 changed files with 2704 additions and 39 deletions

View File

@ -57,7 +57,7 @@
},
"7": {
"inputs": {
"seed": 920668017513581,
"seed": 936152772258115,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
@ -76,8 +76,8 @@
0
],
"latent_image": [
"11",
6
"28",
0
]
},
"class_type": "KSampler",
@ -163,11 +163,11 @@
0
],
"image1": [
"23",
"30",
0
],
"image2": [
"24",
"27",
0
]
},
@ -178,20 +178,41 @@
},
"14": {
"inputs": {
"image": "f81662775bd0e7950e4794933ef4b3d973fbb9c2db397c8b46809797954d0074.png"
"image": "model_outfit_location_1760043932148.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
"title": "load base image"
}
},
"15": {
"19": {
"inputs": {
"image": "monster_554.png"
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_dxzmg_00211_.png&type=temp&subfolder=&rand=0.09499077981761894"
},
"class_type": "LoadImage",
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_dxzmg_00212_.png&type=temp&subfolder=&rand=0.21125213225471684"
}
]
},
"image_a": [
"11",
1
],
"image_b": [
"8",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Load Image"
"title": "Image Comparer (rgthree)"
}
},
"20": {
@ -209,7 +230,7 @@
},
"21": {
"inputs": {
"value": "只提取图2中的怪物怪物站在图1的女生身后使用图1的背景并调整怪物的光线以符合图1。\n\n\n\n\n\n\n"
"value": "请将图2中的模特处理成手持图1中包包的照片。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
@ -231,17 +252,65 @@
},
"23": {
"inputs": {
"width": [
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"24": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"25": {
"inputs": {
"samples": [
"23",
0
],
"vae": [
"24",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"26": {
"inputs": {
"images": [
"25",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"27": {
"inputs": {
"width": [
"31",
0
],
"height": [
"26",
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "stretch",
"pad_color": "0, 0, 0",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
@ -255,24 +324,49 @@
"title": "Resize Image v2"
}
},
"24": {
"28": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"29": {
"inputs": {
"image": "handbag_1760043932148.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load reference image"
}
},
"30": {
"inputs": {
"width": [
"25",
"31",
0
],
"height": [
"26",
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "stretch",
"pad_color": "0, 0, 0",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"15",
"29",
0
]
},
@ -281,20 +375,20 @@
"title": "Resize Image v2"
}
},
"25": {
"31": {
"inputs": {
"Number": "1280"
"Value": 720
},
"class_type": "Int",
"class_type": "DF_Integer",
"_meta": {
"title": "width"
}
},
"26": {
"32": {
"inputs": {
"Number": "720"
"Value": 1280
},
"class_type": "Int",
"class_type": "DF_Integer",
"_meta": {
"title": "height"
}

View File

@ -0,0 +1,444 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 38026585691397,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"36",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"10",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"10": {
"inputs": {
"image1_text": "Original image",
"image2_text": "Reference",
"image3_text": "Result",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"11",
1
],
"image2": [
"11",
2
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": false,
"enable_vl_resize": false,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"27",
0
],
"image2": [
"33",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"14": {
"inputs": {
"image": "model_1760082843769.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load base image"
}
},
"19": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00279_.png&type=temp&subfolder=&rand=0.4405150352070387"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00280_.png&type=temp&subfolder=&rand=0.9388629603648289"
}
]
},
"image_a": [
"11",
1
],
"image_b": [
"8",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"20": {
"inputs": {
"filename_prefix": "combined",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "以图像2为基础生成一张女性肖像照片。她穿着一件黑色薄纱长袖上衣一条光滑的皮革及膝裙和勃艮第色的尖头靴子手提一个深红色的手提包。场景改为极简主义风格的客厅摆放着中性的沙发、镜面墙饰、盆栽植物和浅色地板营造出明亮而宽敞的美感。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"22": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"9",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"23": {
"inputs": {
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"24": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"25": {
"inputs": {
"samples": [
"23",
0
],
"vae": [
"24",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"26": {
"inputs": {
"images": [
"25",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"27": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"14",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"28": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"29": {
"inputs": {
"image": "pose_1760082843769.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load reference image"
}
},
"30": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"29",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"31": {
"inputs": {
"Value": 720
},
"class_type": "DF_Integer",
"_meta": {
"title": "width"
}
},
"32": {
"inputs": {
"Value": 1280
},
"class_type": "DF_Integer",
"_meta": {
"title": "height"
}
},
"33": {
"inputs": {
"detect_hand": "enable",
"detect_body": "enable",
"detect_face": "enable",
"resolution": 512,
"bbox_detector": "yolox_l.onnx",
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
"scale_stick_for_xinsr_cn": "disable",
"image": [
"30",
0
]
},
"class_type": "DWPreprocessor",
"_meta": {
"title": "DWPose Estimator"
}
},
"35": {
"inputs": {
"images": [
"33",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"36": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
}
}

View File

@ -0,0 +1,396 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 323591075024702,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"28",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"10",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"10": {
"inputs": {
"image1_text": "Original image",
"image2_text": "Reference",
"image3_text": "Result",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"11",
1
],
"image2": [
"11",
2
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": true,
"enable_vl_resize": true,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"27",
0
],
"image2": [
"30",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"14": {
"inputs": {
"image": "model_outfit_location_handbag1_1760085003312.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"19": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00305_.png&type=temp&subfolder=&rand=0.5408789951924671"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00306_.png&type=temp&subfolder=&rand=0.2425856190711294"
}
]
},
"image_a": [
"11",
1
],
"image_b": [
"8",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"20": {
"inputs": {
"filename_prefix": "combined",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "请将图2中的女性修改成把图1的包背在肩上。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"22": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"9",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"23": {
"inputs": {
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"24": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"25": {
"inputs": {
"samples": [
"23",
0
],
"vae": [
"24",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"26": {
"inputs": {
"images": [
"25",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"27": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "crop",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"14",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"28": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"29": {
"inputs": {
"image": "handbag_1760085003312.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"30": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "crop",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"29",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"31": {
"inputs": {
"Value": 720
},
"class_type": "DF_Integer",
"_meta": {
"title": "width"
}
},
"32": {
"inputs": {
"Value": 1280
},
"class_type": "DF_Integer",
"_meta": {
"title": "height"
}
}
}

View File

@ -0,0 +1,444 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 38026585691397,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"36",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"10",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"10": {
"inputs": {
"image1_text": "Original image",
"image2_text": "Reference",
"image3_text": "Result",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"11",
1
],
"image2": [
"11",
2
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": false,
"enable_vl_resize": false,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"27",
0
],
"image2": [
"33",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"14": {
"inputs": {
"image": "model_1760082843769.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load base image"
}
},
"19": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00279_.png&type=temp&subfolder=&rand=0.4405150352070387"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00280_.png&type=temp&subfolder=&rand=0.9388629603648289"
}
]
},
"image_a": [
"11",
1
],
"image_b": [
"8",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"20": {
"inputs": {
"filename_prefix": "combined",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "以图像2为基础生成一张女性肖像照片。她穿着一件黑色薄纱长袖上衣一条光滑的皮革及膝裙和勃艮第色的尖头靴子手提一个深红色的手提包。场景改为极简主义风格的客厅摆放着中性的沙发、镜面墙饰、盆栽植物和浅色地板营造出明亮而宽敞的美感。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"22": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"9",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"23": {
"inputs": {
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"24": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"25": {
"inputs": {
"samples": [
"23",
0
],
"vae": [
"24",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"26": {
"inputs": {
"images": [
"25",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"27": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"14",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"28": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"29": {
"inputs": {
"image": "pose_1760082843769.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load reference image"
}
},
"30": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"29",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"31": {
"inputs": {
"Value": 720
},
"class_type": "DF_Integer",
"_meta": {
"title": "width"
}
},
"32": {
"inputs": {
"Value": 1280
},
"class_type": "DF_Integer",
"_meta": {
"title": "height"
}
},
"33": {
"inputs": {
"detect_hand": "enable",
"detect_body": "enable",
"detect_face": "enable",
"resolution": 512,
"bbox_detector": "yolox_l.onnx",
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
"scale_stick_for_xinsr_cn": "disable",
"image": [
"30",
0
]
},
"class_type": "DWPreprocessor",
"_meta": {
"title": "DWPose Estimator"
}
},
"35": {
"inputs": {
"images": [
"33",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"36": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
}
}

View File

@ -0,0 +1,111 @@
{
"1": {
"inputs": {
"image": "model_outfit_location_handbag3_1760086053609.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"2": {
"inputs": {
"enabled": true,
"swap_model": "inswapper_128.onnx",
"facedetection": "YOLOv5l",
"face_restore_model": "GPEN-BFR-1024.onnx",
"face_restore_visibility": 0.5200000000000001,
"codeformer_weight": 0.5,
"detect_gender_input": "no",
"detect_gender_source": "no",
"input_faces_index": "0",
"source_faces_index": "0",
"console_log_level": 1,
"input_image": [
"6",
0
],
"source_image": [
"3",
0
]
},
"class_type": "ReActorFaceSwap",
"_meta": {
"title": "ReActor 🌌 Fast Face Swap"
}
},
"3": {
"inputs": {
"image": "outfit_1760086053609.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"4": {
"inputs": {
"images": [
"2",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"6": {
"inputs": {
"resize_to": "4k",
"images": [
"1",
0
],
"upscaler_trt_model": [
"8",
0
]
},
"class_type": "UpscalerTensorrt",
"_meta": {
"title": "Upscaler Tensorrt ⚡"
}
},
"7": {
"inputs": {
"images": [
"6",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"8": {
"inputs": {
"model": "4x-UltraSharp",
"precision": "fp16"
},
"class_type": "LoadUpscalerTensorrtModel",
"_meta": {
"title": "Load Upscale Tensorrt Model"
}
},
"9": {
"inputs": {
"filename_prefix": "upscaled",
"images": [
"2",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}

View File

@ -0,0 +1,388 @@
{
"4": {
"inputs": {
"ckpt_name": "dreamshaperXL_v21TurboDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"12": {
"inputs": {
"seed": 302411063911982,
"steps": 8,
"cfg": 2,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"65",
0
],
"negative": [
"69",
0
],
"latent_image": [
"13",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"13": {
"inputs": {
"width": 1216,
"height": 832,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"16": {
"inputs": {
"samples": [
"12",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"36": {
"inputs": {
"method": "Mixture of Diffusers",
"tile_width": 1024,
"tile_height": 1024,
"tile_overlap": 32,
"tile_batch_size": 8,
"model": [
"4",
0
]
},
"class_type": "TiledDiffusion",
"_meta": {
"title": "Tiled Diffusion"
}
},
"51": {
"inputs": {
"tile_size": 1024,
"fast": false,
"samples": [
"80",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecodeTiled_TiledDiffusion",
"_meta": {
"title": "Tiled VAE Decode"
}
},
"65": {
"inputs": {
"text": "photo of a high end sports car",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"69": {
"inputs": {
"text": "text, watermark, (film grain, noise:1.2)",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"80": {
"inputs": {
"seed": 105566927616764,
"steps": 4,
"cfg": 2,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"36",
0
],
"positive": [
"141",
0
],
"negative": [
"141",
1
],
"latent_image": [
"84",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"84": {
"inputs": {
"width": [
"106",
0
],
"height": [
"107",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"105": {
"inputs": {
"image": [
"115",
0
]
},
"class_type": "GetImageSizeAndCount",
"_meta": {
"title": "Get Image Size & Count"
}
},
"106": {
"inputs": {
"value": "a*b",
"a": [
"105",
1
],
"b": [
"117",
0
]
},
"class_type": "SimpleMath+",
"_meta": {
"title": "🔧 Simple Math"
}
},
"107": {
"inputs": {
"value": "a*b",
"a": [
"105",
2
],
"b": [
"117",
0
]
},
"class_type": "SimpleMath+",
"_meta": {
"title": "🔧 Simple Math"
}
},
"111": {
"inputs": {
"image": "model_outfit_location_handbag1_1760092227085.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"115": {
"inputs": {
"any_01": [
"111",
0
]
},
"class_type": "Any Switch (rgthree)",
"_meta": {
"title": "Any Switch (rgthree)"
}
},
"117": {
"inputs": {
"value": 4.000000000000001
},
"class_type": "FloatConstant",
"_meta": {
"title": "Float Constant"
}
},
"133": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_ybqmm_00009_.png&type=temp&subfolder=&rand=0.02707950499627365"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_ybqmm_00010_.png&type=temp&subfolder=&rand=0.18690183070180255"
}
]
},
"image_a": [
"115",
0
],
"image_b": [
"149",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"141": {
"inputs": {
"strength": 0.65,
"start_percent": 0,
"end_percent": 0.9,
"positive": [
"65",
0
],
"negative": [
"69",
0
],
"control_net": [
"142",
0
],
"image": [
"115",
0
]
},
"class_type": "ACN_AdvancedControlNetApply",
"_meta": {
"title": "Apply Advanced ControlNet 🛂🅐🅒🅝"
}
},
"142": {
"inputs": {
"control_net_name": "xinsircontrolnet-tile-sdxl-1.0.safetensors"
},
"class_type": "ControlNetLoaderAdvanced",
"_meta": {
"title": "Load Advanced ControlNet Model 🛂🅐🅒🅝"
}
},
"148": {
"inputs": {
"color_space": "LAB",
"factor": 0.8,
"device": "auto",
"batch_size": 0,
"image": [
"51",
0
],
"reference": [
"115",
0
]
},
"class_type": "ImageColorMatch+",
"_meta": {
"title": "🔧 Image Color Match"
}
},
"149": {
"inputs": {
"sharpen_radius": 1,
"sigma": 1,
"alpha": 0.05,
"image": [
"148",
0
]
},
"class_type": "ImageSharpen",
"_meta": {
"title": "Image Sharpen"
}
},
"154": {
"inputs": {
"filename_prefix": "Upscaled",
"images": [
"149",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"165": {
"inputs": {
"image": "model_outfit_location_handbag1_1760092227085.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"166": {
"inputs": {
"filename_prefix": "upscaled",
"images": [
"149",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}

View File

@ -15,17 +15,27 @@ async function convertImage(
baseFileName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
size: ImageSize = { width: 720, height: 1280 }
size: ImageSize = { width: 720, height: 1280 },
useEmpltyLatent: boolean = false
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
if (useEmpltyLatent) {
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_2_qwen_empty.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['30']['inputs']['width'] = size.width;
workflow['31']['inputs']['height'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
} else {
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_qwen.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['23']['inputs']['width'] = size.width;
workflow['23']['inputs']['height'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
}
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
@ -66,7 +76,8 @@ async function convertImage(
return newFilePath;
}
// basefilename is connected to image2
// sencondfilename is connect to image1
async function convertImageWithFile(
prompt: string,
baseFileName: string,
@ -81,10 +92,131 @@ async function convertImageWithFile(
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_2_qwen.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['25']['inputs']['width'] = size.width;
workflow['26']['inputs']['height'] = size.height;
workflow['31']['inputs']['Value'] = size.width;
workflow['32']['inputs']['Value'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
workflow['15']['inputs']['image'] = secondFileName;
workflow['29']['inputs']['image'] = secondFileName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('combined'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', baseFileName);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
// basefilename is connected to image1
// sencondfilename is connect to image2
async function convertImageWithFileHandbag(
prompt: string,
baseFileName: string,
secondFileName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
size: ImageSize = { width: 720, height: 1280 }
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_2_qwen_handbag.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['31']['inputs']['Value'] = size.width;
workflow['32']['inputs']['Value'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
workflow['29']['inputs']['image'] = secondFileName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('combined'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', baseFileName);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
// basefilename is connected to image1
// sencondfilename is connect to image2
async function convertImageWithFileForPose(
prompt: string,
baseFileName: string,
secondFileName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
size: ImageSize = { width: 720, height: 1280 }
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_2_qwen_pose.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['31']['inputs']['Value'] = size.width;
workflow['32']['inputs']['Value'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
workflow['29']['inputs']['image'] = secondFileName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
@ -369,4 +501,4 @@ export async function convertImageVtonPose(
return newFilePath;
}
export { convertImage, convertImageWithFile };
export { convertImage, convertImageWithFile, convertImageWithFileForPose, convertImageWithFileHandbag };

118
src/lib/image-upscaler.ts Normal file
View File

@ -0,0 +1,118 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import axios from 'axios';
import dotenv from 'dotenv';
dotenv.config();
interface ImageSize {
width: number;
height: number;
}
async function facerestore_upscale(
baseFileName: string,
faceReferenceName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/facerestore_upscale.json', 'utf-8'));
workflow['1']['inputs']['image'] = baseFileName;
workflow['3']['inputs']['image'] = faceReferenceName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('upscaled'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', baseFileName);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
async function upscale(
baseFileName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/upscale.json', 'utf-8'));
workflow['111']['inputs']['image'] = baseFileName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('upscaled'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', baseFileName);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
export { facerestore_upscale, upscale };

View File

@ -0,0 +1,538 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import dotenv from 'dotenv';
import { downloadImagesFromPinterestSearch } from '../lib/pinterest';
import { convertImage, convertImageWithFile, convertImageWithFileForPose, convertImageWithFileHandbag } from '../lib/image-converter';
import { logger } from '../lib/logger';
import { callLmstudio, callLMStudioAPIWithFile } from '../lib/lmstudio';
import { upscale } from '../lib/image-upscaler';
dotenv.config();
const SERVER1_COMFY_BASE_URL = process.env.SERVER1_COMFY_BASE_URL!;
const SERVER1_COMFY_OUTPUT_DIR = process.env.SERVER1_COMFY_OUTPUT_DIR!;
const imageSize: { width: number; height: number } = { width: 1280, height: 720 };
async function upscaleAndFix(
baseImage: string,
faceImage: string,
outputFilename: string,
outputDir: string,
): Promise<void> {
try {
// Copy both images to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const baseFilePath = path.join(outputDir, baseImage);
const referenceFilePath = path.join(outputDir, faceImage);
const baseFileName = path.basename(baseImage);
const referenceFileName = path.basename(faceImage);
const inputBasePath = path.join(inputFolderFullPath, baseFileName);
const inputReferencePath = path.join(inputFolderFullPath, referenceFileName);
logger.info(`Copying base image to ComfyUI input: ${inputBasePath}`);
await fs.copyFile(baseFilePath, inputBasePath);
logger.info(`Copying reference image to ComfyUI input: ${inputReferencePath}`);
await fs.copyFile(referenceFilePath, inputReferencePath);
// Convert images with prompt
logger.info(`Processing images with convertImageWithFile...`);
const convertedImagePath = await upscale(
baseFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
)
logger.info(`Converted image: ${convertedImagePath}`);
// Copy the converted image to final destination
const finalOutputPath = path.join(outputDir, outputFilename);
logger.info(`Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
logger.info(`✓ Successfully generated: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error processing two images:`, error);
throw error;
}
}
/**
* Process a single image: download from Pinterest, convert with prompt, and save
* @param keyword - Pinterest search keyword
* @param prompt - Image conversion prompt
* @param filename - Output filename
* @param outputDir - Directory to save the generated file
* @param shouldConvert - Whether to convert the image with prompt or just copy it
*/
async function processImage(
keyword: string,
prompt: string,
filename: string,
outputDir: string,
shouldConvert: boolean = true
): Promise<void> {
try {
logger.info(`\n=== Processing: ${filename} ===`);
logger.info(`Keyword: ${keyword}`);
logger.info(`Should convert: ${shouldConvert}`);
// Step 1: Download image from Pinterest
logger.info(`Step 1: Downloading image from Pinterest with keyword: "${keyword}"...`);
const downloadedImages = await downloadImagesFromPinterestSearch(keyword, 1);
if (downloadedImages.length === 0) {
logger.error(`Failed to download image for keyword: "${keyword}"`);
return;
}
const downloadedImagePath = downloadedImages[0];
logger.info(`Downloaded image: ${downloadedImagePath}`);
const finalOutputPath = path.join(outputDir, filename);
if (shouldConvert) {
logger.info(`Prompt: ${prompt}`);
// Step 2: Copy image to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const imageFileName = path.basename(downloadedImagePath);
const inputImagePath = path.join(inputFolderFullPath, imageFileName);
logger.info(`Step 2: Copying image to ComfyUI input folder: ${inputImagePath}`);
await fs.copyFile(downloadedImagePath, inputImagePath);
// Step 3: Convert image with prompt
logger.info(`Step 3: Converting image with prompt...`);
const convertedImagePath = await convertImage(
prompt,
imageFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
);
logger.info(`Converted image: ${convertedImagePath}`);
// Step 4: Copy the converted image to final destination
logger.info(`Step 4: Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
} else {
// Just copy the downloaded image directly to the output directory with the specified filename
logger.info(`Step 2: Copying directly to final destination: ${finalOutputPath}`);
await fs.copyFile(downloadedImagePath, finalOutputPath);
}
logger.info(`✓ Successfully generated: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error processing image for keyword "${keyword}":`, error);
throw error;
}
}
/**
* Convert an existing image with a prompt
* @param prompt - Image conversion prompt
* @param imagePath - Path to the existing image
* @param outputFilename - Output filename
* @param outputDir - Directory to save the converted file
*/
async function convertImageWithPrompt(
prompt: string,
imagePath: string,
outputFilename: string,
outputDir: string
): Promise<void> {
try {
logger.info(`\n=== Converting Image: ${outputFilename} ===`);
logger.info(`Source: ${imagePath}`);
logger.info(`Prompt: ${prompt}`);
// Step 1: Copy image to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const imageFileName = path.basename(imagePath);
const inputImagePath = path.join(inputFolderFullPath, imageFileName);
logger.info(`Step 1: Copying image to ComfyUI input folder: ${inputImagePath}`);
await fs.copyFile(imagePath, inputImagePath);
// Step 2: Convert image with prompt
logger.info(`Step 2: Converting image with prompt...`);
const convertedImagePath = await convertImage(
prompt,
imageFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
);
logger.info(`Converted image: ${convertedImagePath}`);
// Step 3: Copy the converted image to final destination
const finalOutputPath = path.join(outputDir, outputFilename);
logger.info(`Step 3: Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
logger.info(`✓ Successfully converted: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error converting image:`, error);
throw error;
}
}
/**
* Process two images together: combine base image with reference image using prompt
* @param prompt - Processing prompt
* @param baseFile - Base image filename (in generated folder)
* @param referenceFile - Reference image filename (in generated folder)
* @param outputFilename - Output filename
* @param outputDir - Directory to save the generated file
*/
async function processTwoImages(
prompt: string,
baseFile: string,
referenceFile: string,
outputFilename: string,
outputDir: string,
isPose: boolean = false
): Promise<void> {
try {
logger.info(`\n=== Processing: ${outputFilename} ===`);
logger.info(`Base: ${baseFile}, Reference: ${referenceFile}`);
logger.info(`Prompt: ${prompt}`);
// Copy both images to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const baseFilePath = path.join(outputDir, baseFile);
const referenceFilePath = path.join(outputDir, referenceFile);
const baseFileName = path.basename(baseFile);
const referenceFileName = path.basename(referenceFile);
const inputBasePath = path.join(inputFolderFullPath, baseFileName);
const inputReferencePath = path.join(inputFolderFullPath, referenceFileName);
logger.info(`Copying base image to ComfyUI input: ${inputBasePath}`);
await fs.copyFile(baseFilePath, inputBasePath);
logger.info(`Copying reference image to ComfyUI input: ${inputReferencePath}`);
await fs.copyFile(referenceFilePath, inputReferencePath);
// Convert images with prompt
logger.info(`Processing images with convertImageWithFile...`);
const convertedImagePath = isPose ? await convertImageWithFileForPose(
prompt,
baseFileName,
referenceFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
) : await convertImageWithFile(
prompt,
baseFileName,
referenceFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
)
logger.info(`Converted image: ${convertedImagePath}`);
// Copy the converted image to final destination
const finalOutputPath = path.join(outputDir, outputFilename);
logger.info(`Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
logger.info(`✓ Successfully generated: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error processing two images:`, error);
throw error;
}
}
/**
* Process two images together: combine base image with reference image using prompt
* @param prompt - Processing prompt
* @param baseFile - Base image filename (in generated folder)
* @param referenceFile - Reference image filename (in generated folder)
* @param outputFilename - Output filename
* @param outputDir - Directory to save the generated file
*/
async function processTwoImagesHandbag(
prompt: string,
baseFile: string,
referenceFile: string,
outputFilename: string,
outputDir: string,
): Promise<void> {
try {
logger.info(`\n=== Processing: ${outputFilename} ===`);
logger.info(`Base: ${baseFile}, Reference: ${referenceFile}`);
logger.info(`Prompt: ${prompt}`);
// Copy both images to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const baseFilePath = path.join(outputDir, baseFile);
const referenceFilePath = path.join(outputDir, referenceFile);
const baseFileName = path.basename(baseFile);
const referenceFileName = path.basename(referenceFile);
const inputBasePath = path.join(inputFolderFullPath, baseFileName);
const inputReferencePath = path.join(inputFolderFullPath, referenceFileName);
logger.info(`Copying base image to ComfyUI input: ${inputBasePath}`);
await fs.copyFile(baseFilePath, inputBasePath);
logger.info(`Copying reference image to ComfyUI input: ${inputReferencePath}`);
await fs.copyFile(referenceFilePath, inputReferencePath);
// Convert images with prompt
logger.info(`Processing images with convertImageWithFile...`);
const convertedImagePath = await convertImageWithFileHandbag(
prompt,
baseFileName,
referenceFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
)
logger.info(`Converted image: ${convertedImagePath}`);
// Copy the converted image to final destination
const finalOutputPath = path.join(outputDir, outputFilename);
logger.info(`Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
logger.info(`✓ Successfully generated: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error processing two images:`, error);
throw error;
}
}
/**
* Process a complete iteration: download base images and apply sequential transformations
*/
async function processIteration(iteration: number): Promise<void> {
try {
const timestamp = Date.now();
logger.info(`\n${'='.repeat(80)}`);
logger.info(`ITERATION ${iteration} - Starting with timestamp: ${timestamp}`);
logger.info(`${'='.repeat(80)}`);
// Create output directory for this iteration
const outputDir = path.join(process.cwd(), 'generated', `vton_${timestamp}`);
await fs.mkdir(outputDir, { recursive: true });
logger.info(`Output directory created: ${outputDir}`);
// === PHASE 1: Download base images ===
logger.info(`\n--- PHASE 1: Downloading base images ---`);
await processImage(
'cute girl face high resolution',
'',
`model_${timestamp}.png`,
outputDir,
false
);
await processImage(
'woman elegant outfit fullbody single',
'',
`outfit_${timestamp}.png`,
outputDir,
false
);
await processImage(
'photo elegant indoor room',
'',
`room_${timestamp}.png`,
outputDir,
false
);
await processImage(
'handbag single product photography',
'请提取照片中的包,并将其正面朝向地放置在亮灰色背景上。',
`handbag_${timestamp}.png`,
outputDir,
true
);
await processImage(
'woman portrait standing',
'',
`pose_${timestamp}.png`,
outputDir,
false
);
// === PHASE 2: Sequential transformations ===
logger.info(`\n--- PHASE 2: Sequential transformations ---`);
// Step 1: Generate outfit prompt using LMStudio API
logger.info('Step 1: Generating outfit prompt with LMStudio API...');
const outfitImagePath = path.join(outputDir, `outfit_${timestamp}.png`);
const outfitPromptResponse = await callLMStudioAPIWithFile(
outfitImagePath,
'Describe this outfit in detail about 30 words. Focus on color and cloth type. Return the result in this format: {"result":""}'
);
const outfitPrompt = outfitPromptResponse.result || outfitPromptResponse;
logger.info(`Generated outfit prompt: ${outfitPrompt}`);
// Step 2: Generate location prompt using LMStudio API
logger.info('Step 2: Generating location prompt with LMStudio API...');
const roomImagePath = path.join(outputDir, `room_${timestamp}.png`);
const locationPromptResponse = await callLMStudioAPIWithFile(
roomImagePath,
'Describe this location/room in detail about 30 words. Return the result in this format: {"result":""}'
);
const locationPrompt = locationPromptResponse.result || locationPromptResponse;
logger.info(`Generated location prompt: ${locationPrompt}`);
// Step 3: Generate Chinese prompt using LMStudio API
logger.info('Step 3: Generating Chinese prompt for model transformation...');
const chinesePromptRequest = `Generate a Chinese prompt for image transformation that describes:
- Prefix: genereate a portarit photo of a woman in image1
- Use outfit to: ${outfitPrompt}
- Use location to: ${locationPrompt}
Return the result in this format: {"result":""}`;
const chinesePromptResponse = await callLmstudio(chinesePromptRequest);
const chinesePrompt = chinesePromptResponse.result || chinesePromptResponse;
logger.info(`Generated Chinese prompt: ${chinesePrompt}`);
// Process model with outfit and location using the Chinese prompt
logger.info('Step 4: Processing model with outfit and location...');
const modelImagePath = path.join(outputDir, `model_${timestamp}.png`);
// Copy model image to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const modelFileName = path.basename(modelImagePath);
const inputModelPath = path.join(inputFolderFullPath, modelFileName);
await fs.copyFile(modelImagePath, inputModelPath);
// Convert image with Chinese prompt and pose
await processTwoImages(
`请将图1中模特的姿势更改为图2的姿势。, ${chinesePrompt}`,
modelFileName,
`pose_${timestamp}.png`,
`model_outfit_location_pose_${timestamp}.png`,
outputDir,
true
);
// Step 5: Add handbag to model
await processTwoImagesHandbag(
'请将图1中的女性修改成手持图2的包。',
`model_outfit_location_pose_${timestamp}.png`,
`handbag_${timestamp}.png`,
`model_outfit_location_handbag1_${timestamp}.png`,
outputDir
);
await processTwoImagesHandbag(
'请让图1的女性看起来像是在手里拿着图2的包。',
`model_outfit_location_pose_${timestamp}.png`,
`handbag_${timestamp}.png`,
`model_outfit_location_handbag2_${timestamp}.png`,
outputDir
);
await processTwoImagesHandbag(
'请将图1中的女性修改成双手拿着图2的包。',
`model_outfit_location_pose_${timestamp}.png`,
`handbag_${timestamp}.png`,
`model_outfit_location_handbag3_${timestamp}.png`,
outputDir
);
await upscaleAndFix(
`model_outfit_location_handbag1_${timestamp}.png`,
`model_${timestamp}.png`,
`model_outfit_location_handbag1_upscaled_${timestamp}.png`,
outputDir
);
await upscaleAndFix(
`model_outfit_location_handbag2_${timestamp}.png`,
`model_${timestamp}.png`,
`model_outfit_location_handbag2_upscaled_${timestamp}.png`,
outputDir
);
await upscaleAndFix(
`model_outfit_location_handbag3_${timestamp}.png`,
`model_${timestamp}.png`,
`model_outfit_location_handbag3_upscaled_${timestamp}.png`,
outputDir
);
logger.info(`\n${'='.repeat(80)}`);
logger.info(`ITERATION ${iteration} COMPLETED!`);
logger.info(`Generated files are saved in: ${outputDir}`);
logger.info(`${'='.repeat(80)}\n`);
} catch (error) {
logger.error(`Error in iteration ${iteration}:`, error);
throw error;
}
}
/**
* Main execution function with infinite iteration
*/
async function main() {
let iteration = 1;
try {
logger.info('Starting infinite processing loop...');
logger.info('Press Ctrl+C to stop the process\n');
while (true) {
await processIteration(iteration);
iteration++;
// Small delay between iterations
logger.info('Waiting 5 seconds before next iteration...\n');
await new Promise(resolve => setTimeout(resolve, 5000));
}
} catch (error) {
logger.error('Error in main execution:', error);
process.exit(1);
}
}
// Execute main function if this file is run directly
if (require.main === module) {
main();
}
export { processImage, convertImageWithPrompt, processTwoImages, processIteration, main };