save changes
This commit is contained in:
@ -57,14 +57,14 @@
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"seed": 799784211855929,
|
||||
"seed": 506786026379830,
|
||||
"steps": 8,
|
||||
"cfg": 1,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "beta",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"66",
|
||||
"140",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
@ -136,7 +136,7 @@
|
||||
},
|
||||
"15": {
|
||||
"inputs": {
|
||||
"image": "cloth_0001.png"
|
||||
"image": "cloth_0026.png"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"_meta": {
|
||||
@ -145,7 +145,7 @@
|
||||
},
|
||||
"21": {
|
||||
"inputs": {
|
||||
"value": "change clothes of image1 with image2"
|
||||
"value": "change clothes of image1 to image2, remove the cap from head"
|
||||
},
|
||||
"class_type": "PrimitiveStringMultiline",
|
||||
"_meta": {
|
||||
@ -154,30 +154,16 @@
|
||||
},
|
||||
"64": {
|
||||
"inputs": {
|
||||
"image": "Lauren_body.png"
|
||||
"image": "Courtney_body.png"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"_meta": {
|
||||
"title": "Load model"
|
||||
}
|
||||
},
|
||||
"66": {
|
||||
"inputs": {
|
||||
"lora_name": "extract-outfit_v3.safetensors",
|
||||
"strength_model": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
},
|
||||
"76": {
|
||||
"inputs": {
|
||||
"number": 720
|
||||
"number": 832
|
||||
},
|
||||
"class_type": "StaticNumberInt",
|
||||
"_meta": {
|
||||
@ -186,7 +172,7 @@
|
||||
},
|
||||
"77": {
|
||||
"inputs": {
|
||||
"number": 1280
|
||||
"number": 1248
|
||||
},
|
||||
"class_type": "StaticNumberInt",
|
||||
"_meta": {
|
||||
@ -259,7 +245,7 @@
|
||||
0
|
||||
],
|
||||
"upscale_method": "nearest-exact",
|
||||
"keep_proportion": "pad",
|
||||
"keep_proportion": "crop",
|
||||
"pad_color": "0, 0, 0",
|
||||
"crop_position": "center",
|
||||
"divisible_by": 2,
|
||||
@ -310,5 +296,59 @@
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"136": {
|
||||
"inputs": {
|
||||
"image": "281543721672978_1758880135639_0.png"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"_meta": {
|
||||
"title": "Load Image"
|
||||
}
|
||||
},
|
||||
"137": {
|
||||
"inputs": {
|
||||
"detect_hand": "enable",
|
||||
"detect_body": "enable",
|
||||
"detect_face": "enable",
|
||||
"resolution": 512,
|
||||
"bbox_detector": "yolox_l.onnx",
|
||||
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
|
||||
"scale_stick_for_xinsr_cn": "disable",
|
||||
"image": [
|
||||
"136",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "DWPreprocessor",
|
||||
"_meta": {
|
||||
"title": "DWPose Estimator"
|
||||
}
|
||||
},
|
||||
"139": {
|
||||
"inputs": {
|
||||
"images": [
|
||||
"137",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"140": {
|
||||
"inputs": {
|
||||
"lora_name": "Try_On_Qwen_Edit_Lora.safetensors",
|
||||
"strength_model": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
}
|
||||
}
|
||||
425
src/comfyworkflows/vton.json
Normal file
425
src/comfyworkflows/vton.json
Normal file
@ -0,0 +1,425 @@
|
||||
{
|
||||
"1": {
|
||||
"inputs": {
|
||||
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
|
||||
"weight_dtype": "default"
|
||||
},
|
||||
"class_type": "UNETLoader",
|
||||
"_meta": {
|
||||
"title": "Load Diffusion Model"
|
||||
}
|
||||
},
|
||||
"2": {
|
||||
"inputs": {
|
||||
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
|
||||
"type": "qwen_image",
|
||||
"device": "default"
|
||||
},
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
},
|
||||
"3": {
|
||||
"inputs": {
|
||||
"vae_name": "qwen_image_vae.safetensors"
|
||||
},
|
||||
"class_type": "VAELoader",
|
||||
"_meta": {
|
||||
"title": "Load VAE"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
|
||||
"strength_model": 1,
|
||||
"model": [
|
||||
"1",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"conditioning": [
|
||||
"11",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningZeroOut",
|
||||
"_meta": {
|
||||
"title": "ConditioningZeroOut"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"seed": 559577834683401,
|
||||
"steps": 8,
|
||||
"cfg": 1,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "beta",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"66",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"11",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"5",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"11",
|
||||
6
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"3",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"11": {
|
||||
"inputs": {
|
||||
"prompt": [
|
||||
"21",
|
||||
0
|
||||
],
|
||||
"enable_resize": false,
|
||||
"enable_vl_resize": false,
|
||||
"upscale_method": "lanczos",
|
||||
"crop": "disabled",
|
||||
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
|
||||
"clip": [
|
||||
"2",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"image1": [
|
||||
"84",
|
||||
0
|
||||
],
|
||||
"image2": [
|
||||
"82",
|
||||
0
|
||||
],
|
||||
"image3": [
|
||||
"81",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
|
||||
"_meta": {
|
||||
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
|
||||
}
|
||||
},
|
||||
"15": {
|
||||
"inputs": {
|
||||
"image": "Allison_body (1).png"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"_meta": {
|
||||
"title": "Load Image"
|
||||
}
|
||||
},
|
||||
"20": {
|
||||
"inputs": {
|
||||
"filename_prefix": "qwenedit",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"21": {
|
||||
"inputs": {
|
||||
"value": "图2中的女孩穿着图1的衣服,并以图3的姿势站立。背景保持浅灰色。"
|
||||
},
|
||||
"class_type": "PrimitiveStringMultiline",
|
||||
"_meta": {
|
||||
"title": "String (Multiline)"
|
||||
}
|
||||
},
|
||||
"64": {
|
||||
"inputs": {
|
||||
"image": "cloth_0111.png"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"_meta": {
|
||||
"title": "Load Image"
|
||||
}
|
||||
},
|
||||
"66": {
|
||||
"inputs": {
|
||||
"lora_name": "extract-outfit_v3.safetensors",
|
||||
"strength_model": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
},
|
||||
"67": {
|
||||
"inputs": {
|
||||
"detect_hand": "enable",
|
||||
"detect_body": "enable",
|
||||
"detect_face": "enable",
|
||||
"resolution": 512,
|
||||
"bbox_detector": "yolox_l.onnx",
|
||||
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
|
||||
"scale_stick_for_xinsr_cn": "disable",
|
||||
"image": [
|
||||
"68",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "DWPreprocessor",
|
||||
"_meta": {
|
||||
"title": "DWPose Estimator"
|
||||
}
|
||||
},
|
||||
"68": {
|
||||
"inputs": {
|
||||
"image": "633387441703331_1758877367350_1.png"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"_meta": {
|
||||
"title": "Load Image"
|
||||
}
|
||||
},
|
||||
"69": {
|
||||
"inputs": {
|
||||
"images": [
|
||||
"81",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"76": {
|
||||
"inputs": {
|
||||
"number": 720
|
||||
},
|
||||
"class_type": "StaticNumberInt",
|
||||
"_meta": {
|
||||
"title": "Static Number Int"
|
||||
}
|
||||
},
|
||||
"77": {
|
||||
"inputs": {
|
||||
"number": 1280
|
||||
},
|
||||
"class_type": "StaticNumberInt",
|
||||
"_meta": {
|
||||
"title": "Static Number Int"
|
||||
}
|
||||
},
|
||||
"78": {
|
||||
"inputs": {
|
||||
"width": [
|
||||
"76",
|
||||
0
|
||||
],
|
||||
"height": [
|
||||
"77",
|
||||
0
|
||||
],
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"81": {
|
||||
"inputs": {
|
||||
"width": 480,
|
||||
"height": 962,
|
||||
"upscale_method": "nearest-exact",
|
||||
"keep_proportion": "pad",
|
||||
"pad_color": "0, 0, 0",
|
||||
"crop_position": "center",
|
||||
"divisible_by": 2,
|
||||
"device": "cpu",
|
||||
"image": [
|
||||
"67",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ImageResizeKJv2",
|
||||
"_meta": {
|
||||
"title": "Resize Image v2"
|
||||
}
|
||||
},
|
||||
"82": {
|
||||
"inputs": {
|
||||
"width": [
|
||||
"76",
|
||||
0
|
||||
],
|
||||
"height": [
|
||||
"77",
|
||||
0
|
||||
],
|
||||
"upscale_method": "nearest-exact",
|
||||
"keep_proportion": "crop",
|
||||
"pad_color": "255,255,255",
|
||||
"crop_position": "center",
|
||||
"divisible_by": 2,
|
||||
"device": "cpu",
|
||||
"image": [
|
||||
"15",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ImageResizeKJv2",
|
||||
"_meta": {
|
||||
"title": "Resize Image v2"
|
||||
}
|
||||
},
|
||||
"83": {
|
||||
"inputs": {
|
||||
"images": [
|
||||
"82",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"84": {
|
||||
"inputs": {
|
||||
"width": [
|
||||
"76",
|
||||
0
|
||||
],
|
||||
"height": [
|
||||
"77",
|
||||
0
|
||||
],
|
||||
"upscale_method": "nearest-exact",
|
||||
"keep_proportion": "pad",
|
||||
"pad_color": "0, 0, 0",
|
||||
"crop_position": "center",
|
||||
"divisible_by": 2,
|
||||
"device": "cpu",
|
||||
"image": [
|
||||
"64",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ImageResizeKJv2",
|
||||
"_meta": {
|
||||
"title": "Resize Image v2"
|
||||
}
|
||||
},
|
||||
"85": {
|
||||
"inputs": {
|
||||
"images": [
|
||||
"84",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"86": {
|
||||
"inputs": {
|
||||
"image1_text": "image1",
|
||||
"image2_text": "image2",
|
||||
"image3_text": "image3",
|
||||
"image4_text": "image4",
|
||||
"reel_height": 512,
|
||||
"border": 32,
|
||||
"image1": [
|
||||
"15",
|
||||
0
|
||||
],
|
||||
"image2": [
|
||||
"64",
|
||||
0
|
||||
],
|
||||
"image3": [
|
||||
"81",
|
||||
0
|
||||
],
|
||||
"image4": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LayerUtility: ImageReel",
|
||||
"_meta": {
|
||||
"title": "LayerUtility: Image Reel"
|
||||
}
|
||||
},
|
||||
"87": {
|
||||
"inputs": {
|
||||
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
|
||||
"font_size": 40,
|
||||
"border": 32,
|
||||
"color_theme": "light",
|
||||
"reel_1": [
|
||||
"86",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LayerUtility: ImageReelComposit",
|
||||
"_meta": {
|
||||
"title": "LayerUtility: Image Reel Composit"
|
||||
}
|
||||
},
|
||||
"88": {
|
||||
"inputs": {
|
||||
"filename_prefix": "vtonresult/vton",
|
||||
"images": [
|
||||
"87",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
}
|
||||
}
|
||||
357
src/comfyworkflows/vton_cloth.json
Normal file
357
src/comfyworkflows/vton_cloth.json
Normal file
@ -0,0 +1,357 @@
|
||||
{
|
||||
"1": {
|
||||
"inputs": {
|
||||
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
|
||||
"weight_dtype": "default"
|
||||
},
|
||||
"class_type": "UNETLoader",
|
||||
"_meta": {
|
||||
"title": "Load Diffusion Model"
|
||||
}
|
||||
},
|
||||
"2": {
|
||||
"inputs": {
|
||||
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
|
||||
"type": "qwen_image",
|
||||
"device": "default"
|
||||
},
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
},
|
||||
"3": {
|
||||
"inputs": {
|
||||
"vae_name": "qwen_image_vae.safetensors"
|
||||
},
|
||||
"class_type": "VAELoader",
|
||||
"_meta": {
|
||||
"title": "Load VAE"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
|
||||
"strength_model": 1,
|
||||
"model": [
|
||||
"1",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"conditioning": [
|
||||
"11",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningZeroOut",
|
||||
"_meta": {
|
||||
"title": "ConditioningZeroOut"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"seed": 559577834683401,
|
||||
"steps": 8,
|
||||
"cfg": 1,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "beta",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"66",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"11",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"5",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"11",
|
||||
6
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"3",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"11": {
|
||||
"inputs": {
|
||||
"prompt": [
|
||||
"21",
|
||||
0
|
||||
],
|
||||
"enable_resize": false,
|
||||
"enable_vl_resize": false,
|
||||
"upscale_method": "lanczos",
|
||||
"crop": "disabled",
|
||||
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
|
||||
"clip": [
|
||||
"2",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"image1": [
|
||||
"84",
|
||||
0
|
||||
],
|
||||
"image2": [
|
||||
"82",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
|
||||
"_meta": {
|
||||
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
|
||||
}
|
||||
},
|
||||
"15": {
|
||||
"inputs": {
|
||||
"image": "Allison_body (1).png"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"_meta": {
|
||||
"title": "Load Image"
|
||||
}
|
||||
},
|
||||
"20": {
|
||||
"inputs": {
|
||||
"filename_prefix": "qwenedit",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"21": {
|
||||
"inputs": {
|
||||
"value": "图2中的女孩穿着图1的衣服\n\n\n\n\n\n"
|
||||
},
|
||||
"class_type": "PrimitiveStringMultiline",
|
||||
"_meta": {
|
||||
"title": "String (Multiline)"
|
||||
}
|
||||
},
|
||||
"64": {
|
||||
"inputs": {
|
||||
"image": "cloth_0111.png"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"_meta": {
|
||||
"title": "Load Image"
|
||||
}
|
||||
},
|
||||
"66": {
|
||||
"inputs": {
|
||||
"lora_name": "extract-outfit_v3.safetensors",
|
||||
"strength_model": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
},
|
||||
"76": {
|
||||
"inputs": {
|
||||
"number": 720
|
||||
},
|
||||
"class_type": "StaticNumberInt",
|
||||
"_meta": {
|
||||
"title": "Static Number Int"
|
||||
}
|
||||
},
|
||||
"77": {
|
||||
"inputs": {
|
||||
"number": 1280
|
||||
},
|
||||
"class_type": "StaticNumberInt",
|
||||
"_meta": {
|
||||
"title": "Static Number Int"
|
||||
}
|
||||
},
|
||||
"78": {
|
||||
"inputs": {
|
||||
"width": [
|
||||
"76",
|
||||
0
|
||||
],
|
||||
"height": [
|
||||
"77",
|
||||
0
|
||||
],
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"82": {
|
||||
"inputs": {
|
||||
"width": [
|
||||
"76",
|
||||
0
|
||||
],
|
||||
"height": [
|
||||
"77",
|
||||
0
|
||||
],
|
||||
"upscale_method": "nearest-exact",
|
||||
"keep_proportion": "crop",
|
||||
"pad_color": "255,255,255",
|
||||
"crop_position": "center",
|
||||
"divisible_by": 2,
|
||||
"device": "cpu",
|
||||
"image": [
|
||||
"15",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ImageResizeKJv2",
|
||||
"_meta": {
|
||||
"title": "Resize Image v2"
|
||||
}
|
||||
},
|
||||
"83": {
|
||||
"inputs": {
|
||||
"images": [
|
||||
"82",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"84": {
|
||||
"inputs": {
|
||||
"width": [
|
||||
"76",
|
||||
0
|
||||
],
|
||||
"height": [
|
||||
"77",
|
||||
0
|
||||
],
|
||||
"upscale_method": "nearest-exact",
|
||||
"keep_proportion": "pad",
|
||||
"pad_color": "0, 0, 0",
|
||||
"crop_position": "center",
|
||||
"divisible_by": 2,
|
||||
"device": "cpu",
|
||||
"image": [
|
||||
"64",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ImageResizeKJv2",
|
||||
"_meta": {
|
||||
"title": "Resize Image v2"
|
||||
}
|
||||
},
|
||||
"85": {
|
||||
"inputs": {
|
||||
"images": [
|
||||
"84",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"86": {
|
||||
"inputs": {
|
||||
"image1_text": "image1",
|
||||
"image2_text": "image2",
|
||||
"image3_text": "image3",
|
||||
"image4_text": "image4",
|
||||
"reel_height": 512,
|
||||
"border": 32,
|
||||
"image1": [
|
||||
"15",
|
||||
0
|
||||
],
|
||||
"image2": [
|
||||
"64",
|
||||
0
|
||||
],
|
||||
"image3": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LayerUtility: ImageReel",
|
||||
"_meta": {
|
||||
"title": "LayerUtility: Image Reel"
|
||||
}
|
||||
},
|
||||
"87": {
|
||||
"inputs": {
|
||||
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
|
||||
"font_size": 40,
|
||||
"border": 32,
|
||||
"color_theme": "light",
|
||||
"reel_1": [
|
||||
"86",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LayerUtility: ImageReelComposit",
|
||||
"_meta": {
|
||||
"title": "LayerUtility: Image Reel Composit"
|
||||
}
|
||||
},
|
||||
"88": {
|
||||
"inputs": {
|
||||
"filename_prefix": "vtonresult/vton",
|
||||
"images": [
|
||||
"87",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -251,7 +251,8 @@ export async function convertImageWithMultipleFile(
|
||||
|
||||
|
||||
export async function convertImageVton(
|
||||
srcFiles: string[],
|
||||
personFile: string,
|
||||
clothFile: string,
|
||||
outputFile: string,
|
||||
comfyBaseUrl: string,
|
||||
comfyOutputDir: string,
|
||||
@ -261,15 +262,73 @@ export async function convertImageVton(
|
||||
const COMFY_OUTPUT_DIR = comfyOutputDir;
|
||||
let workflow;
|
||||
|
||||
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_vton.json', 'utf-8'));
|
||||
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/vton.json', 'utf-8'));
|
||||
workflow['76']['inputs']['number'] = size.width;
|
||||
workflow['77']['inputs']['number'] = size.height;
|
||||
|
||||
if (srcFiles[0])
|
||||
workflow['64']['inputs']['image'] = srcFiles[0];
|
||||
workflow['15']['inputs']['image'] = personFile;
|
||||
workflow['64']['inputs']['image'] = clothFile;
|
||||
|
||||
if (srcFiles[1])
|
||||
workflow['15']['inputs']['image'] = srcFiles[1];
|
||||
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
|
||||
const promptId = response.data.prompt_id;
|
||||
|
||||
let history;
|
||||
do {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
|
||||
history = historyResponse.data[promptId];
|
||||
} while (!history || Object.keys(history.outputs).length === 0);
|
||||
|
||||
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
|
||||
const generatedFiles = files.filter(file => file.startsWith('qwenedit'));
|
||||
|
||||
const fileStats = await Promise.all(
|
||||
generatedFiles.map(async (file) => {
|
||||
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
|
||||
return { file, mtime: stat.mtime };
|
||||
})
|
||||
);
|
||||
|
||||
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
|
||||
|
||||
const latestFile = fileStats[0].file;
|
||||
const newFilePath = path.resolve('./generated', outputFile);
|
||||
|
||||
await fs.mkdir('./generated', { recursive: true });
|
||||
|
||||
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
|
||||
try {
|
||||
await fs.unlink(newFilePath);
|
||||
} catch (err) {
|
||||
// ignore if not exists
|
||||
}
|
||||
|
||||
await fs.copyFile(sourcePath, newFilePath);
|
||||
|
||||
return newFilePath;
|
||||
}
|
||||
|
||||
|
||||
export async function convertImageVtonPose(
|
||||
personFile: string,
|
||||
clothFile: string,
|
||||
poseFile: string,
|
||||
outputFile: string,
|
||||
comfyBaseUrl: string,
|
||||
comfyOutputDir: string,
|
||||
size: ImageSize = { width: 720, height: 1280 }
|
||||
): Promise<string> {
|
||||
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
|
||||
const COMFY_OUTPUT_DIR = comfyOutputDir;
|
||||
let workflow;
|
||||
|
||||
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/vton.json', 'utf-8'));
|
||||
workflow['76']['inputs']['number'] = size.width;
|
||||
workflow['77']['inputs']['number'] = size.height;
|
||||
|
||||
workflow['15']['inputs']['image'] = personFile;
|
||||
workflow['64']['inputs']['image'] = clothFile;
|
||||
workflow['68']['inputs']['image'] = poseFile;
|
||||
|
||||
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
|
||||
const promptId = response.data.prompt_id;
|
||||
|
||||
@ -5,45 +5,79 @@ import * as dotenv from 'dotenv';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const clothesDir = 'C:\\Users\\ken\\Desktop\\VTON\\clothes';
|
||||
const modelPath = 'C:\\Users\\ken\\Desktop\\VTON\\models\\Jessica_body.png';
|
||||
const posesDir = 'C:\\Users\\ken\\Desktop\\VTON\\poses';
|
||||
const modelsBodyDir = 'D:\\CatsEye\\long videos\\vton-demo\\VTON\\models_body';
|
||||
const clothesDir = 'D:\\CatsEye\\long videos\\vton-demo\\VTON\\clothes';
|
||||
const posesDir = 'D:\\CatsEye\\long videos\\vton-demo\\VTON\\poses';
|
||||
const outputDir = 'generated';
|
||||
|
||||
const comfyBaseUrl = process.env.SERVER1_COMFY_BASE_URL;
|
||||
const comfyOutputDir = process.env.SERVER1_COMFY_OUTPUT_DIR;
|
||||
|
||||
function getNextIndex(directory: string): number {
|
||||
if (!fs.existsSync(directory)) {
|
||||
fs.mkdirSync(directory, { recursive: true });
|
||||
return 0;
|
||||
}
|
||||
const files = fs.readdirSync(directory);
|
||||
const vtonFiles = files.filter(file => file.startsWith('vton_') && file.endsWith('.png'));
|
||||
if (vtonFiles.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
const indices = vtonFiles.map(file => {
|
||||
const match = file.match(/vton_(\d+)\.png/);
|
||||
return match ? parseInt(match[1], 10) : -1;
|
||||
});
|
||||
return Math.max(...indices) + 1;
|
||||
}
|
||||
|
||||
function getRandomFile(directory: string): string {
|
||||
const files = fs.readdirSync(directory).filter(file => /\.(jpg|png|jpeg)$/i.test(file));
|
||||
if (files.length === 0) {
|
||||
throw new Error(`No image files found in directory: ${directory}`);
|
||||
}
|
||||
const randomFile = files[Math.floor(Math.random() * files.length)];
|
||||
return path.join(directory, randomFile);
|
||||
}
|
||||
|
||||
async function generateVtonImages() {
|
||||
if (!comfyBaseUrl || !comfyOutputDir) {
|
||||
throw new Error("ComfyUI URL or Output Directory is not set in environment variables.");
|
||||
}
|
||||
|
||||
const clothesFiles = fs.readdirSync(clothesDir).filter(file => /\.(jpg|png|jpeg)$/i.test(file));
|
||||
const poseFiles = fs.readdirSync(posesDir).filter(file => /\.(jpg|png|jpeg)$/i.test(file));
|
||||
let index = getNextIndex(outputDir);
|
||||
|
||||
if (!fs.existsSync(outputDir)) {
|
||||
fs.mkdirSync(outputDir);
|
||||
}
|
||||
const comfyInputDir = comfyOutputDir.replace("output", "input");
|
||||
|
||||
for (let i = 0; i < clothesFiles.length; i++) {
|
||||
const clothFile = clothesFiles[i];
|
||||
const clothPath = path.join(clothesDir, clothFile);
|
||||
while (true) { // Infinite loop
|
||||
try {
|
||||
const personFilePath = getRandomFile(modelsBodyDir);
|
||||
const clothFilePath = getRandomFile(clothesDir);
|
||||
const poseFilePath = getRandomFile(posesDir);
|
||||
|
||||
const randomPoseFile = poseFiles[Math.floor(Math.random() * poseFiles.length)];
|
||||
const posePath = path.join(posesDir, randomPoseFile);
|
||||
const personFileName = path.basename(personFilePath);
|
||||
const clothFileName = path.basename(clothFilePath);
|
||||
const poseFileName = path.basename(poseFilePath);
|
||||
|
||||
console.log(`Processing cloth: ${clothFile} with pose: ${randomPoseFile}`);
|
||||
fs.copyFileSync(personFilePath, path.join(comfyInputDir, personFileName));
|
||||
fs.copyFileSync(clothFilePath, path.join(comfyInputDir, clothFileName));
|
||||
fs.copyFileSync(poseFilePath, path.join(comfyInputDir, poseFileName));
|
||||
|
||||
const files = [modelPath, clothPath, posePath];
|
||||
const prompt = "change clothes of image1 with image2";
|
||||
const outputFilename = `model_${i}.png`;
|
||||
console.log(`Processing person: ${personFileName}, cloth: ${clothFileName}, pose: ${poseFileName}`);
|
||||
|
||||
const generatedImagePath = await convertImageVton(files, outputFilename, comfyBaseUrl, comfyOutputDir, { width: 720, height: 1280 });
|
||||
const outputFilename = `vton_${index}.png`;
|
||||
|
||||
const generatedImagePath = await convertImageVton(personFileName, clothFileName, poseFileName, outputFilename, comfyBaseUrl, comfyOutputDir, { width: 720, height: 1280 });
|
||||
|
||||
if (generatedImagePath) {
|
||||
console.log(`Generated image saved to ${generatedImagePath}`);
|
||||
index++;
|
||||
} else {
|
||||
console.error(`Failed to generate image for ${clothFile}`);
|
||||
console.error(`Failed to generate image for index ${index}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("An error occurred during image generation:", error);
|
||||
// Optional: wait for a bit before retrying to avoid spamming errors
|
||||
await new Promise(resolve => setTimeout(resolve, 5000));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user