207 lines
5.0 KiB
JSON
207 lines
5.0 KiB
JSON
{
|
|
"1": {
|
|
"inputs": {
|
|
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
|
|
"weight_dtype": "default"
|
|
},
|
|
"class_type": "UNETLoader",
|
|
"_meta": {
|
|
"title": "Load Diffusion Model"
|
|
}
|
|
},
|
|
"2": {
|
|
"inputs": {
|
|
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
|
|
"type": "qwen_image",
|
|
"device": "default"
|
|
},
|
|
"class_type": "CLIPLoader",
|
|
"_meta": {
|
|
"title": "Load CLIP"
|
|
}
|
|
},
|
|
"3": {
|
|
"inputs": {
|
|
"vae_name": "qwen_image_vae.safetensors"
|
|
},
|
|
"class_type": "VAELoader",
|
|
"_meta": {
|
|
"title": "Load VAE"
|
|
}
|
|
},
|
|
"4": {
|
|
"inputs": {
|
|
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
|
|
"strength_model": 1,
|
|
"model": [
|
|
"1",
|
|
0
|
|
]
|
|
},
|
|
"class_type": "LoraLoaderModelOnly",
|
|
"_meta": {
|
|
"title": "LoraLoaderModelOnly"
|
|
}
|
|
},
|
|
"5": {
|
|
"inputs": {
|
|
"conditioning": [
|
|
"11",
|
|
0
|
|
]
|
|
},
|
|
"class_type": "ConditioningZeroOut",
|
|
"_meta": {
|
|
"title": "ConditioningZeroOut"
|
|
}
|
|
},
|
|
"7": {
|
|
"inputs": {
|
|
"seed": 1088883674457465,
|
|
"steps": 8,
|
|
"cfg": 1,
|
|
"sampler_name": "euler",
|
|
"scheduler": "beta",
|
|
"denoise": 1,
|
|
"model": [
|
|
"66",
|
|
0
|
|
],
|
|
"positive": [
|
|
"11",
|
|
0
|
|
],
|
|
"negative": [
|
|
"5",
|
|
0
|
|
],
|
|
"latent_image": [
|
|
"11",
|
|
6
|
|
]
|
|
},
|
|
"class_type": "KSampler",
|
|
"_meta": {
|
|
"title": "KSampler"
|
|
}
|
|
},
|
|
"8": {
|
|
"inputs": {
|
|
"samples": [
|
|
"7",
|
|
0
|
|
],
|
|
"vae": [
|
|
"3",
|
|
0
|
|
]
|
|
},
|
|
"class_type": "VAEDecode",
|
|
"_meta": {
|
|
"title": "VAE Decode"
|
|
}
|
|
},
|
|
"11": {
|
|
"inputs": {
|
|
"prompt": [
|
|
"21",
|
|
0
|
|
],
|
|
"enable_resize": true,
|
|
"enable_vl_resize": true,
|
|
"upscale_method": "lanczos",
|
|
"crop": "disabled",
|
|
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
|
|
"clip": [
|
|
"2",
|
|
0
|
|
],
|
|
"vae": [
|
|
"3",
|
|
0
|
|
],
|
|
"image1": [
|
|
"24",
|
|
0
|
|
]
|
|
},
|
|
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
|
|
"_meta": {
|
|
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
|
|
}
|
|
},
|
|
"20": {
|
|
"inputs": {
|
|
"filename_prefix": "qwenedit",
|
|
"images": [
|
|
"8",
|
|
0
|
|
]
|
|
},
|
|
"class_type": "SaveImage",
|
|
"_meta": {
|
|
"title": "Save Image"
|
|
}
|
|
},
|
|
"21": {
|
|
"inputs": {
|
|
"value": "extract the outfit onto a white background"
|
|
},
|
|
"class_type": "PrimitiveStringMultiline",
|
|
"_meta": {
|
|
"title": "String (Multiline)"
|
|
}
|
|
},
|
|
"24": {
|
|
"inputs": {
|
|
"measurement": "pixels",
|
|
"width": 720,
|
|
"height": 1280,
|
|
"fit": "contain",
|
|
"method": "nearest-exact",
|
|
"image": [
|
|
"64",
|
|
0
|
|
]
|
|
},
|
|
"class_type": "Image Resize (rgthree)",
|
|
"_meta": {
|
|
"title": "Image Resize (rgthree)"
|
|
}
|
|
},
|
|
"64": {
|
|
"inputs": {
|
|
"image": "3096293489212792_1758825204441_2.png"
|
|
},
|
|
"class_type": "LoadImage",
|
|
"_meta": {
|
|
"title": "Load Image"
|
|
}
|
|
},
|
|
"65": {
|
|
"inputs": {
|
|
"images": [
|
|
"24",
|
|
0
|
|
]
|
|
},
|
|
"class_type": "PreviewImage",
|
|
"_meta": {
|
|
"title": "Preview Image"
|
|
}
|
|
},
|
|
"66": {
|
|
"inputs": {
|
|
"lora_name": "extract-outfit_v3.safetensors",
|
|
"strength_model": 1,
|
|
"model": [
|
|
"4",
|
|
0
|
|
]
|
|
},
|
|
"class_type": "LoraLoaderModelOnly",
|
|
"_meta": {
|
|
"title": "LoraLoaderModelOnly"
|
|
}
|
|
}
|
|
} |