Compare commits

..

3 Commits

5 changed files with 967 additions and 49 deletions

View File

@ -57,14 +57,14 @@
}, },
"7": { "7": {
"inputs": { "inputs": {
"seed": 799784211855929, "seed": 506786026379830,
"steps": 8, "steps": 8,
"cfg": 1, "cfg": 1,
"sampler_name": "euler", "sampler_name": "euler",
"scheduler": "beta", "scheduler": "beta",
"denoise": 1, "denoise": 1,
"model": [ "model": [
"66", "140",
0 0
], ],
"positive": [ "positive": [
@ -136,7 +136,7 @@
}, },
"15": { "15": {
"inputs": { "inputs": {
"image": "cloth_0001.png" "image": "cloth_0026.png"
}, },
"class_type": "LoadImage", "class_type": "LoadImage",
"_meta": { "_meta": {
@ -145,7 +145,7 @@
}, },
"21": { "21": {
"inputs": { "inputs": {
"value": "change tops and bottoms of image1 with image2, change hairstyle to bun" "value": "change clothes of image1 to image2, remove the cap from head"
}, },
"class_type": "PrimitiveStringMultiline", "class_type": "PrimitiveStringMultiline",
"_meta": { "_meta": {
@ -154,30 +154,16 @@
}, },
"64": { "64": {
"inputs": { "inputs": {
"image": "Lauren_body.png" "image": "Courtney_body.png"
}, },
"class_type": "LoadImage", "class_type": "LoadImage",
"_meta": { "_meta": {
"title": "Load model" "title": "Load model"
} }
}, },
"66": {
"inputs": {
"lora_name": "extract-outfit_v3.safetensors",
"strength_model": 1,
"model": [
"4",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"76": { "76": {
"inputs": { "inputs": {
"number": 720 "number": 832
}, },
"class_type": "StaticNumberInt", "class_type": "StaticNumberInt",
"_meta": { "_meta": {
@ -186,7 +172,7 @@
}, },
"77": { "77": {
"inputs": { "inputs": {
"number": 1280 "number": 1248
}, },
"class_type": "StaticNumberInt", "class_type": "StaticNumberInt",
"_meta": { "_meta": {
@ -259,7 +245,7 @@
0 0
], ],
"upscale_method": "nearest-exact", "upscale_method": "nearest-exact",
"keep_proportion": "pad", "keep_proportion": "crop",
"pad_color": "0, 0, 0", "pad_color": "0, 0, 0",
"crop_position": "center", "crop_position": "center",
"divisible_by": 2, "divisible_by": 2,
@ -310,5 +296,59 @@
"_meta": { "_meta": {
"title": "Preview Image" "title": "Preview Image"
} }
},
"136": {
"inputs": {
"image": "281543721672978_1758880135639_0.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"137": {
"inputs": {
"detect_hand": "enable",
"detect_body": "enable",
"detect_face": "enable",
"resolution": 512,
"bbox_detector": "yolox_l.onnx",
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
"scale_stick_for_xinsr_cn": "disable",
"image": [
"136",
0
]
},
"class_type": "DWPreprocessor",
"_meta": {
"title": "DWPose Estimator"
}
},
"139": {
"inputs": {
"images": [
"137",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"140": {
"inputs": {
"lora_name": "Try_On_Qwen_Edit_Lora.safetensors",
"strength_model": 1,
"model": [
"4",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
} }
} }

View File

@ -0,0 +1,425 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 559577834683401,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"66",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"11",
6
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": false,
"enable_vl_resize": false,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"84",
0
],
"image2": [
"82",
0
],
"image3": [
"81",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"15": {
"inputs": {
"image": "Allison_body (1).png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"20": {
"inputs": {
"filename_prefix": "qwenedit",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "图2中的女孩穿着图1的衣服并以图3的姿势站立。背景保持浅灰色。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"64": {
"inputs": {
"image": "cloth_0111.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"66": {
"inputs": {
"lora_name": "extract-outfit_v3.safetensors",
"strength_model": 1,
"model": [
"4",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"67": {
"inputs": {
"detect_hand": "enable",
"detect_body": "enable",
"detect_face": "enable",
"resolution": 512,
"bbox_detector": "yolox_l.onnx",
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
"scale_stick_for_xinsr_cn": "disable",
"image": [
"68",
0
]
},
"class_type": "DWPreprocessor",
"_meta": {
"title": "DWPose Estimator"
}
},
"68": {
"inputs": {
"image": "633387441703331_1758877367350_1.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"69": {
"inputs": {
"images": [
"81",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"76": {
"inputs": {
"number": 720
},
"class_type": "StaticNumberInt",
"_meta": {
"title": "Static Number Int"
}
},
"77": {
"inputs": {
"number": 1280
},
"class_type": "StaticNumberInt",
"_meta": {
"title": "Static Number Int"
}
},
"78": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"81": {
"inputs": {
"width": 480,
"height": 962,
"upscale_method": "nearest-exact",
"keep_proportion": "pad",
"pad_color": "0, 0, 0",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"67",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"82": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "crop",
"pad_color": "255,255,255",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"15",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"83": {
"inputs": {
"images": [
"82",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"84": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "pad",
"pad_color": "0, 0, 0",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"64",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"85": {
"inputs": {
"images": [
"84",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"86": {
"inputs": {
"image1_text": "image1",
"image2_text": "image2",
"image3_text": "image3",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"15",
0
],
"image2": [
"64",
0
],
"image3": [
"81",
0
],
"image4": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"87": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"86",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"88": {
"inputs": {
"filename_prefix": "vtonresult/vton",
"images": [
"87",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}

View File

@ -0,0 +1,357 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 559577834683401,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"66",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"11",
6
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": false,
"enable_vl_resize": false,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"84",
0
],
"image2": [
"82",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"15": {
"inputs": {
"image": "Allison_body (1).png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"20": {
"inputs": {
"filename_prefix": "qwenedit",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "图2中的女孩穿着图1的衣服\n\n\n\n\n\n"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"64": {
"inputs": {
"image": "cloth_0111.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"66": {
"inputs": {
"lora_name": "extract-outfit_v3.safetensors",
"strength_model": 1,
"model": [
"4",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"76": {
"inputs": {
"number": 720
},
"class_type": "StaticNumberInt",
"_meta": {
"title": "Static Number Int"
}
},
"77": {
"inputs": {
"number": 1280
},
"class_type": "StaticNumberInt",
"_meta": {
"title": "Static Number Int"
}
},
"78": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"82": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "crop",
"pad_color": "255,255,255",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"15",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"83": {
"inputs": {
"images": [
"82",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"84": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "pad",
"pad_color": "0, 0, 0",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"64",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"85": {
"inputs": {
"images": [
"84",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"86": {
"inputs": {
"image1_text": "image1",
"image2_text": "image2",
"image3_text": "image3",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"15",
0
],
"image2": [
"64",
0
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"87": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"86",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"88": {
"inputs": {
"filename_prefix": "vtonresult/vton",
"images": [
"87",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}

View File

@ -251,7 +251,8 @@ export async function convertImageWithMultipleFile(
export async function convertImageVton( export async function convertImageVton(
srcFiles: string[], personFile: string,
clothFile: string,
outputFile: string, outputFile: string,
comfyBaseUrl: string, comfyBaseUrl: string,
comfyOutputDir: string, comfyOutputDir: string,
@ -261,15 +262,73 @@ export async function convertImageVton(
const COMFY_OUTPUT_DIR = comfyOutputDir; const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow; let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_vton.json', 'utf-8')); workflow = JSON.parse(await fs.readFile('src/comfyworkflows/vton.json', 'utf-8'));
workflow['76']['inputs']['number'] = size.width; workflow['76']['inputs']['number'] = size.width;
workflow['77']['inputs']['number'] = size.height; workflow['77']['inputs']['number'] = size.height;
if (srcFiles[0]) workflow['15']['inputs']['image'] = personFile;
workflow['64']['inputs']['image'] = srcFiles[0]; workflow['64']['inputs']['image'] = clothFile;
if (srcFiles[1]) const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
workflow['15']['inputs']['image'] = srcFiles[1]; const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('qwenedit'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', outputFile);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
export async function convertImageVtonPose(
personFile: string,
clothFile: string,
poseFile: string,
outputFile: string,
comfyBaseUrl: string,
comfyOutputDir: string,
size: ImageSize = { width: 720, height: 1280 }
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/vton.json', 'utf-8'));
workflow['76']['inputs']['number'] = size.width;
workflow['77']['inputs']['number'] = size.height;
workflow['15']['inputs']['image'] = personFile;
workflow['64']['inputs']['image'] = clothFile;
workflow['68']['inputs']['image'] = poseFile;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow }); const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id; const promptId = response.data.prompt_id;

View File

@ -5,48 +5,85 @@ import * as dotenv from 'dotenv';
dotenv.config(); dotenv.config();
<<<<<<< HEAD
const modelsBodyDir = 'D:\\CatsEye\\long videos\\vton-demo\\VTON\\models_body';
const clothesDir = 'D:\\CatsEye\\long videos\\vton-demo\\VTON\\clothes';
const posesDir = 'D:\\CatsEye\\long videos\\vton-demo\\VTON\\poses';
=======
const clothesDir = 'C:\\Users\\fm201\\Documents\\VTON\\\clothes'; const clothesDir = 'C:\\Users\\fm201\\Documents\\VTON\\\clothes';
const modelPath = 'C:\\Users\\fm201\\Documents\\VTON\\models\\Jessica_body.png'; const modelPath = 'C:\\Users\\fm201\\Documents\\VTON\\models\\Jessica_body.png';
const posesDir = 'C:\\Users\\fm201\\Documents\\VTON\\\poses'; const posesDir = 'C:\\Users\\fm201\\Documents\\VTON\\\poses';
>>>>>>> bdca42e82102a00f771ecf58b4ff0673dbd218af
const outputDir = 'generated'; const outputDir = 'generated';
const comfyBaseUrl = process.env.SERVER2_COMFY_BASE_URL; const comfyBaseUrl = process.env.SERVER2_COMFY_BASE_URL;
const comfyOutputDir = process.env.SERVER2_COMFY_OUTPUT_DIR; const comfyOutputDir = process.env.SERVER2_COMFY_OUTPUT_DIR;
function getNextIndex(directory: string): number {
if (!fs.existsSync(directory)) {
fs.mkdirSync(directory, { recursive: true });
return 0;
}
const files = fs.readdirSync(directory);
const vtonFiles = files.filter(file => file.startsWith('vton_') && file.endsWith('.png'));
if (vtonFiles.length === 0) {
return 0;
}
const indices = vtonFiles.map(file => {
const match = file.match(/vton_(\d+)\.png/);
return match ? parseInt(match[1], 10) : -1;
});
return Math.max(...indices) + 1;
}
function getRandomFile(directory: string): string {
const files = fs.readdirSync(directory).filter(file => /\.(jpg|png|jpeg)$/i.test(file));
if (files.length === 0) {
throw new Error(`No image files found in directory: ${directory}`);
}
const randomFile = files[Math.floor(Math.random() * files.length)];
return path.join(directory, randomFile);
}
async function generateVtonImages() { async function generateVtonImages() {
if (!comfyBaseUrl || !comfyOutputDir) { if (!comfyBaseUrl || !comfyOutputDir) {
throw new Error("ComfyUI URL or Output Directory is not set in environment variables."); throw new Error("ComfyUI URL or Output Directory is not set in environment variables.");
} }
const clothesFiles = fs.readdirSync(clothesDir).filter(file => /\.(jpg|png|jpeg)$/i.test(file)); let index = getNextIndex(outputDir);
const poseFiles = fs.readdirSync(posesDir).filter(file => /\.(jpg|png|jpeg)$/i.test(file));
if (!fs.existsSync(outputDir)) { const comfyInputDir = comfyOutputDir.replace("output", "input");
fs.mkdirSync(outputDir);
}
for (let i = 0; i < clothesFiles.length; i++) { while (true) { // Infinite loop
const clothFile = clothesFiles[i]; try {
const clothPath = path.join(clothesDir, clothFile); const personFilePath = getRandomFile(modelsBodyDir);
const clothFilePath = getRandomFile(clothesDir);
const poseFilePath = getRandomFile(posesDir);
const randomPoseFile = poseFiles[Math.floor(Math.random() * poseFiles.length)]; const personFileName = path.basename(personFilePath);
const posePath = path.join(posesDir, randomPoseFile); const clothFileName = path.basename(clothFilePath);
const poseFileName = path.basename(poseFilePath);
console.log(`Processing cloth: ${clothFile} with pose: ${randomPoseFile}`); fs.copyFileSync(personFilePath, path.join(comfyInputDir, personFileName));
fs.copyFileSync(clothFilePath, path.join(comfyInputDir, clothFileName));
fs.copyFileSync(poseFilePath, path.join(comfyInputDir, poseFileName));
const convertedCloth = await convertImage("Change background to gray of image1", clothPath, comfyBaseUrl, comfyOutputDir, { width: 720, height: 1280 }); console.log(`Processing person: ${personFileName}, cloth: ${clothFileName}, pose: ${poseFileName}`);
const files = [modelPath, convertedCloth, posePath]; const outputFilename = `vton_${index}.png`;
const prompt = "change clothes of image1 with image2";
const outputFilename = `model_${i}.png`;
const generatedImagePath = await convertImageVton(personFileName, clothFileName, poseFileName, outputFilename, comfyBaseUrl, comfyOutputDir, { width: 720, height: 1280 });
const generatedImagePath = await convertImageVton(files, outputFilename, comfyBaseUrl, comfyOutputDir, { width: 720, height: 1280 }); if (generatedImagePath) {
console.log(`Generated image saved to ${generatedImagePath}`);
if (generatedImagePath) { index++;
console.log(`Generated image saved to ${generatedImagePath}`); } else {
} else { console.error(`Failed to generate image for index ${index}`);
console.error(`Failed to generate image for ${clothFile}`); }
} catch (error) {
console.error("An error occurred during image generation:", error);
// Optional: wait for a bit before retrying to avoid spamming errors
await new Promise(resolve => setTimeout(resolve, 5000));
} }
} }
} }