Compare commits

...

13 Commits

Author SHA1 Message Date
243f107ef5 Merge branch 'master' of https://git.yasue.org/ken/RandomVideoMaker 2025-10-13 17:10:11 +02:00
9ad4e7972b continuouly image modification 2025-10-13 17:09:58 +02:00
6eec0890a7 save changes 2025-10-10 15:10:26 +02:00
8654d160b6 save changes 2025-10-06 13:12:03 +02:00
4452508dd4 save changes 2025-10-06 00:02:50 +02:00
1697523000 save changes 2025-10-05 15:01:06 +02:00
eee4e6523e save changes 2025-10-05 11:56:31 +02:00
150992aaac photo download 2025-10-02 07:37:41 +02:00
80ddafc4e6 Merge branch 'master' of https://git.yasue.org/ken/RandomVideoMaker 2025-10-02 07:35:44 +02:00
72a995f483 Merge branch 'master' of https://git.yasue.org/ken/RandomVideoMaker 2025-10-02 07:35:43 +02:00
1de8749195 save changes 2025-10-02 07:34:50 +02:00
ad8ca7b997 save changes 2025-10-02 07:34:10 +02:00
bdca42e821 save changes 2025-10-01 07:53:50 +02:00
41 changed files with 6089 additions and 4843 deletions

10
.clinerules/lib.md Normal file
View File

@ -0,0 +1,10 @@
# Library Functions
## PNG Metadata
Use this file `src/lib/util.ts` for embedding and reading JSON data from PNG files.
### Embed JSON to PNG
Use this method `embedJsonToPng(path, obj)`
### Read JSON from PNG
Use this method `readJsonToPng(path)`

View File

@ -14,4 +14,13 @@ Use this file src\lib\openai.ts
- async function callOpenAI(prompt: string): Promise<any>
for just run prompt
- async function callOpenAIWithFile(imagePath: string, prompt: string): Promise<any>
for send file to llm
for send file to llm
Please construct prompt to return json alswasy for calling llm api to generate text.
If nothing specified add following instructin in the given prompt
Return the result in this forket
{"result":""}
Then extract the result param in program you generate, don't change the original function

618
package-lock.json generated
View File

@ -10,15 +10,27 @@
"license": "ISC",
"dependencies": {
"@types/axios": "^0.14.4",
"@types/fs-extra": "^11.0.4",
"@types/pngjs": "^6.0.5",
"@types/sharp": "^0.32.0",
"axios": "^1.11.0",
"dotenv": "^17.2.1",
"fs-extra": "^11.3.2",
"mysql2": "^3.14.3",
"open": "^10.2.0",
"png-chunk-text": "^1.0.0",
"png-chunks-encode": "^1.0.0",
"png-chunks-extract": "^1.0.0",
"pngjs": "^7.0.0",
"puppeteer": "^24.16.2",
"sharp": "^0.34.4",
"uuid": "^11.1.0"
},
"devDependencies": {
"@types/node": "^20.0.0",
"@types/node": "^20.19.19",
"@types/png-chunk-text": "^1.0.3",
"@types/png-chunks-encode": "^1.0.2",
"@types/png-chunks-extract": "^1.0.2",
"ts-node": "^10.9.2",
"typescript": "^5.0.0"
}
@ -56,6 +68,419 @@
"node": ">=12"
}
},
"node_modules/@emnapi/runtime": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.5.0.tgz",
"integrity": "sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==",
"optional": true,
"dependencies": {
"tslib": "^2.4.0"
}
},
"node_modules/@img/colour": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/@img/colour/-/colour-1.0.0.tgz",
"integrity": "sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==",
"engines": {
"node": ">=18"
}
},
"node_modules/@img/sharp-darwin-arm64": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.4.tgz",
"integrity": "sha512-sitdlPzDVyvmINUdJle3TNHl+AG9QcwiAMsXmccqsCOMZNIdW2/7S26w0LyU8euiLVzFBL3dXPwVCq/ODnf2vA==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-darwin-arm64": "1.2.3"
}
},
"node_modules/@img/sharp-darwin-x64": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.4.tgz",
"integrity": "sha512-rZheupWIoa3+SOdF/IcUe1ah4ZDpKBGWcsPX6MT0lYniH9micvIU7HQkYTfrx5Xi8u+YqwLtxC/3vl8TQN6rMg==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-darwin-x64": "1.2.3"
}
},
"node_modules/@img/sharp-libvips-darwin-arm64": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.3.tgz",
"integrity": "sha512-QzWAKo7kpHxbuHqUC28DZ9pIKpSi2ts2OJnoIGI26+HMgq92ZZ4vk8iJd4XsxN+tYfNJxzH6W62X5eTcsBymHw==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"darwin"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-darwin-x64": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.3.tgz",
"integrity": "sha512-Ju+g2xn1E2AKO6YBhxjj+ACcsPQRHT0bhpglxcEf+3uyPY+/gL8veniKoo96335ZaPo03bdDXMv0t+BBFAbmRA==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"darwin"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-arm": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.3.tgz",
"integrity": "sha512-x1uE93lyP6wEwGvgAIV0gP6zmaL/a0tGzJs/BIDDG0zeBhMnuUPm7ptxGhUbcGs4okDJrk4nxgrmxpib9g6HpA==",
"cpu": [
"arm"
],
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-arm64": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.3.tgz",
"integrity": "sha512-I4RxkXU90cpufazhGPyVujYwfIm9Nk1QDEmiIsaPwdnm013F7RIceaCc87kAH+oUB1ezqEvC6ga4m7MSlqsJvQ==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-ppc64": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.3.tgz",
"integrity": "sha512-Y2T7IsQvJLMCBM+pmPbM3bKT/yYJvVtLJGfCs4Sp95SjvnFIjynbjzsa7dY1fRJX45FTSfDksbTp6AGWudiyCg==",
"cpu": [
"ppc64"
],
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-s390x": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.3.tgz",
"integrity": "sha512-RgWrs/gVU7f+K7P+KeHFaBAJlNkD1nIZuVXdQv6S+fNA6syCcoboNjsV2Pou7zNlVdNQoQUpQTk8SWDHUA3y/w==",
"cpu": [
"s390x"
],
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linux-x64": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.3.tgz",
"integrity": "sha512-3JU7LmR85K6bBiRzSUc/Ff9JBVIFVvq6bomKE0e63UXGeRw2HPVEjoJke1Yx+iU4rL7/7kUjES4dZ/81Qjhyxg==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linuxmusl-arm64": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.3.tgz",
"integrity": "sha512-F9q83RZ8yaCwENw1GieztSfj5msz7GGykG/BA+MOUefvER69K/ubgFHNeSyUu64amHIYKGDs4sRCMzXVj8sEyw==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-libvips-linuxmusl-x64": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.3.tgz",
"integrity": "sha512-U5PUY5jbc45ANM6tSJpsgqmBF/VsL6LnxJmIf11kB7J5DctHgqm0SkuXzVWtIY90GnJxKnC/JT251TDnk1fu/g==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"linux"
],
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-linux-arm": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.4.tgz",
"integrity": "sha512-Xyam4mlqM0KkTHYVSuc6wXRmM7LGN0P12li03jAnZ3EJWZqj83+hi8Y9UxZUbxsgsK1qOEwg7O0Bc0LjqQVtxA==",
"cpu": [
"arm"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-arm": "1.2.3"
}
},
"node_modules/@img/sharp-linux-arm64": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.4.tgz",
"integrity": "sha512-YXU1F/mN/Wu786tl72CyJjP/Ngl8mGHN1hST4BGl+hiW5jhCnV2uRVTNOcaYPs73NeT/H8Upm3y9582JVuZHrQ==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-arm64": "1.2.3"
}
},
"node_modules/@img/sharp-linux-ppc64": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.4.tgz",
"integrity": "sha512-F4PDtF4Cy8L8hXA2p3TO6s4aDt93v+LKmpcYFLAVdkkD3hSxZzee0rh6/+94FpAynsuMpLX5h+LRsSG3rIciUQ==",
"cpu": [
"ppc64"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-ppc64": "1.2.3"
}
},
"node_modules/@img/sharp-linux-s390x": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.4.tgz",
"integrity": "sha512-qVrZKE9Bsnzy+myf7lFKvng6bQzhNUAYcVORq2P7bDlvmF6u2sCmK2KyEQEBdYk+u3T01pVsPrkj943T1aJAsw==",
"cpu": [
"s390x"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-s390x": "1.2.3"
}
},
"node_modules/@img/sharp-linux-x64": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.4.tgz",
"integrity": "sha512-ZfGtcp2xS51iG79c6Vhw9CWqQC8l2Ot8dygxoDoIQPTat/Ov3qAa8qpxSrtAEAJW+UjTXc4yxCjNfxm4h6Xm2A==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linux-x64": "1.2.3"
}
},
"node_modules/@img/sharp-linuxmusl-arm64": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.4.tgz",
"integrity": "sha512-8hDVvW9eu4yHWnjaOOR8kHVrew1iIX+MUgwxSuH2XyYeNRtLUe4VNioSqbNkB7ZYQJj9rUTT4PyRscyk2PXFKA==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linuxmusl-arm64": "1.2.3"
}
},
"node_modules/@img/sharp-linuxmusl-x64": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.4.tgz",
"integrity": "sha512-lU0aA5L8QTlfKjpDCEFOZsTYGn3AEiO6db8W5aQDxj0nQkVrZWmN3ZP9sYKWJdtq3PWPhUNlqehWyXpYDcI9Sg==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"linux"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-libvips-linuxmusl-x64": "1.2.3"
}
},
"node_modules/@img/sharp-wasm32": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.4.tgz",
"integrity": "sha512-33QL6ZO/qpRyG7woB/HUALz28WnTMI2W1jgX3Nu2bypqLIKx/QKMILLJzJjI+SIbvXdG9fUnmrxR7vbi1sTBeA==",
"cpu": [
"wasm32"
],
"optional": true,
"dependencies": {
"@emnapi/runtime": "^1.5.0"
},
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-win32-arm64": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.4.tgz",
"integrity": "sha512-2Q250do/5WXTwxW3zjsEuMSv5sUU4Tq9VThWKlU2EYLm4MB7ZeMwF+SFJutldYODXF6jzc6YEOC+VfX0SZQPqA==",
"cpu": [
"arm64"
],
"optional": true,
"os": [
"win32"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-win32-ia32": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.4.tgz",
"integrity": "sha512-3ZeLue5V82dT92CNL6rsal6I2weKw1cYu+rGKm8fOCCtJTR2gYeUfY3FqUnIJsMUPIH68oS5jmZ0NiJ508YpEw==",
"cpu": [
"ia32"
],
"optional": true,
"os": [
"win32"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@img/sharp-win32-x64": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.4.tgz",
"integrity": "sha512-xIyj4wpYs8J18sVN3mSQjwrw7fKUqRw+Z5rnHNCy5fYTxigBz81u5mOMPmFumwjcn8+ld1ppptMBCLic1nz6ig==",
"cpu": [
"x64"
],
"optional": true,
"os": [
"win32"
],
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
}
},
"node_modules/@jridgewell/resolve-uri": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
@ -139,15 +564,66 @@
"axios": "*"
}
},
"node_modules/@types/fs-extra": {
"version": "11.0.4",
"resolved": "https://registry.npmjs.org/@types/fs-extra/-/fs-extra-11.0.4.tgz",
"integrity": "sha512-yTbItCNreRooED33qjunPthRcSjERP1r4MqCZc7wv0u2sUkzTFp45tgUfS5+r7FrZPdmCCNflLhVSP/o+SemsQ==",
"dependencies": {
"@types/jsonfile": "*",
"@types/node": "*"
}
},
"node_modules/@types/jsonfile": {
"version": "6.1.4",
"resolved": "https://registry.npmjs.org/@types/jsonfile/-/jsonfile-6.1.4.tgz",
"integrity": "sha512-D5qGUYwjvnNNextdU59/+fI+spnwtTFmyQP0h+PfIOSkNfpU6AOICUOkm4i0OnSk+NyjdPJrxCDro0sJsWlRpQ==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/node": {
"version": "20.19.11",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.11.tgz",
"integrity": "sha512-uug3FEEGv0r+jrecvUUpbY8lLisvIjg6AAic6a2bSP5OEOLeJsDSnvhCDov7ipFFMXS3orMpzlmi0ZcuGkBbow==",
"devOptional": true,
"version": "20.19.19",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.19.tgz",
"integrity": "sha512-pb1Uqj5WJP7wrcbLU7Ru4QtA0+3kAXrkutGiD26wUKzSMgNNaPARTUDQmElUXp64kh3cWdou3Q0C7qwwxqSFmg==",
"dependencies": {
"undici-types": "~6.21.0"
}
},
"node_modules/@types/png-chunk-text": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@types/png-chunk-text/-/png-chunk-text-1.0.3.tgz",
"integrity": "sha512-7keEFz73uNJ9Ar1XMCNnHEXT9pICJnouMQCCYgBEmHMgdkXaQzSTmSvr6tUDSqgdEgmlRAxZd97wprgliyZoCg==",
"dev": true
},
"node_modules/@types/png-chunks-encode": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@types/png-chunks-encode/-/png-chunks-encode-1.0.2.tgz",
"integrity": "sha512-Dxn0aXEcSg1wVeHjvNlygm/+fKBDzWMCdxJYhjGUTeefFW/jYxWcrg+W7ppLBfH44iJMqeVBHtHBwtYQUeYvgw==",
"dev": true
},
"node_modules/@types/png-chunks-extract": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@types/png-chunks-extract/-/png-chunks-extract-1.0.2.tgz",
"integrity": "sha512-z6djfFIbrrddtunoMJBOPlyZrnmeuG1kkvHUNi2QfpOb+JMMLuLliHHTmMyRi7k7LiTAut0HbdGCF6ibDtQAHQ==",
"dev": true
},
"node_modules/@types/pngjs": {
"version": "6.0.5",
"resolved": "https://registry.npmjs.org/@types/pngjs/-/pngjs-6.0.5.tgz",
"integrity": "sha512-0k5eKfrA83JOZPppLtS2C7OUtyNAl2wKNxfyYl9Q5g9lPkgBl/9hNyAu6HuEH2J4XmIv2znEpkDd0SaZVxW6iQ==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/sharp": {
"version": "0.32.0",
"resolved": "https://registry.npmjs.org/@types/sharp/-/sharp-0.32.0.tgz",
"integrity": "sha512-OOi3kL+FZDnPhVzsfD37J88FNeZh6gQsGcLc95NbeURRGvmSjeXiDcyWzF2o3yh/gQAUn2uhh/e+CPCa5nwAxw==",
"deprecated": "This is a stub types definition. sharp provides its own type definitions, so you do not need this installed.",
"dependencies": {
"sharp": "*"
}
},
"node_modules/@types/yauzl": {
"version": "2.10.3",
"resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.3.tgz",
@ -455,6 +931,14 @@
}
}
},
"node_modules/crc-32": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/crc-32/-/crc-32-0.3.0.tgz",
"integrity": "sha512-kucVIjOmMc1f0tv53BJ/5WIX+MGLcKuoBhnGqQrgKJNqLByb/sVMWfW/Aw6hw0jgcqjJ2pi9E5y32zOIpaUlsA==",
"engines": {
"node": ">=0.8"
}
},
"node_modules/create-require": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz",
@ -551,6 +1035,14 @@
"node": ">=0.10"
}
},
"node_modules/detect-libc": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.1.tgz",
"integrity": "sha512-ecqj/sy1jcK1uWrwpR67UhYrIFQ+5WlGxth34WquCbamhFA6hkkwiu37o6J5xCHdo1oixJRfVRw+ywV+Hq/0Aw==",
"engines": {
"node": ">=8"
}
},
"node_modules/devtools-protocol": {
"version": "0.0.1475386",
"resolved": "https://registry.npmjs.org/devtools-protocol/-/devtools-protocol-0.0.1475386.tgz",
@ -781,6 +1273,19 @@
"node": ">= 6"
}
},
"node_modules/fs-extra": {
"version": "11.3.2",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz",
"integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==",
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
"universalify": "^2.0.0"
},
"engines": {
"node": ">=14.14"
}
},
"node_modules/function-bind": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
@ -878,6 +1383,11 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/graceful-fs": {
"version": "4.2.11",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
},
"node_modules/has-symbols": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
@ -1056,6 +1566,17 @@
"resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
"integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w=="
},
"node_modules/jsonfile": {
"version": "6.2.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
"integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
"dependencies": {
"universalify": "^2.0.0"
},
"optionalDependencies": {
"graceful-fs": "^4.1.6"
}
},
"node_modules/lines-and-columns": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
@ -1262,6 +1783,36 @@
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
"integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="
},
"node_modules/png-chunk-text": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/png-chunk-text/-/png-chunk-text-1.0.0.tgz",
"integrity": "sha512-DEROKU3SkkLGWNMzru3xPVgxyd48UGuMSZvioErCure6yhOc/pRH2ZV+SEn7nmaf7WNf3NdIpH+UTrRdKyq9Lw=="
},
"node_modules/png-chunks-encode": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/png-chunks-encode/-/png-chunks-encode-1.0.0.tgz",
"integrity": "sha512-J1jcHgbQRsIIgx5wxW9UmCymV3wwn4qCCJl6KYgEU/yHCh/L2Mwq/nMOkRPtmV79TLxRZj5w3tH69pvygFkDqA==",
"dependencies": {
"crc-32": "^0.3.0",
"sliced": "^1.0.1"
}
},
"node_modules/png-chunks-extract": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/png-chunks-extract/-/png-chunks-extract-1.0.0.tgz",
"integrity": "sha512-ZiVwF5EJ0DNZyzAqld8BP1qyJBaGOFaq9zl579qfbkcmOwWLLO4I9L8i2O4j3HkI6/35i0nKG2n+dZplxiT89Q==",
"dependencies": {
"crc-32": "^0.3.0"
}
},
"node_modules/pngjs": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/pngjs/-/pngjs-7.0.0.tgz",
"integrity": "sha512-LKWqWJRhstyYo9pGvgor/ivk2w94eSjE3RGVuzLGlr3NmD8bf7RcYGze1mNdEHRP6TRP6rMuDHk5t44hnTRyow==",
"engines": {
"node": ">=14.19.0"
}
},
"node_modules/progress": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz",
@ -1386,6 +1937,52 @@
"resolved": "https://registry.npmjs.org/seq-queue/-/seq-queue-0.0.5.tgz",
"integrity": "sha512-hr3Wtp/GZIc/6DAGPDcV4/9WoZhjrkXsi5B/07QgX8tsdc6ilr7BFM6PM6rbdAX1kFSDYeZGLipIZZKyQP0O5Q=="
},
"node_modules/sharp": {
"version": "0.34.4",
"resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.4.tgz",
"integrity": "sha512-FUH39xp3SBPnxWvd5iib1X8XY7J0K0X7d93sie9CJg2PO8/7gmg89Nve6OjItK53/MlAushNNxteBYfM6DEuoA==",
"hasInstallScript": true,
"dependencies": {
"@img/colour": "^1.0.0",
"detect-libc": "^2.1.0",
"semver": "^7.7.2"
},
"engines": {
"node": "^18.17.0 || ^20.3.0 || >=21.0.0"
},
"funding": {
"url": "https://opencollective.com/libvips"
},
"optionalDependencies": {
"@img/sharp-darwin-arm64": "0.34.4",
"@img/sharp-darwin-x64": "0.34.4",
"@img/sharp-libvips-darwin-arm64": "1.2.3",
"@img/sharp-libvips-darwin-x64": "1.2.3",
"@img/sharp-libvips-linux-arm": "1.2.3",
"@img/sharp-libvips-linux-arm64": "1.2.3",
"@img/sharp-libvips-linux-ppc64": "1.2.3",
"@img/sharp-libvips-linux-s390x": "1.2.3",
"@img/sharp-libvips-linux-x64": "1.2.3",
"@img/sharp-libvips-linuxmusl-arm64": "1.2.3",
"@img/sharp-libvips-linuxmusl-x64": "1.2.3",
"@img/sharp-linux-arm": "0.34.4",
"@img/sharp-linux-arm64": "0.34.4",
"@img/sharp-linux-ppc64": "0.34.4",
"@img/sharp-linux-s390x": "0.34.4",
"@img/sharp-linux-x64": "0.34.4",
"@img/sharp-linuxmusl-arm64": "0.34.4",
"@img/sharp-linuxmusl-x64": "0.34.4",
"@img/sharp-wasm32": "0.34.4",
"@img/sharp-win32-arm64": "0.34.4",
"@img/sharp-win32-ia32": "0.34.4",
"@img/sharp-win32-x64": "0.34.4"
}
},
"node_modules/sliced": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/sliced/-/sliced-1.0.1.tgz",
"integrity": "sha512-VZBmZP8WU3sMOZm1bdgTadsQbcscK0UM8oKxKVBs4XAhUo2Xxzm/OFMGBkPusxw9xL3Uy8LrzEqGqJhclsr0yA=="
},
"node_modules/smart-buffer": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz",
@ -1574,8 +2171,15 @@
"node_modules/undici-types": {
"version": "6.21.0",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
"devOptional": true
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="
},
"node_modules/universalify": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
"integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
"engines": {
"node": ">= 10.0.0"
}
},
"node_modules/uuid": {
"version": "11.1.0",

View File

@ -10,23 +10,36 @@
"db:schema": "ts-node src/schema.ts",
"db:test": "ts-node src/testmysql.ts",
"infinity:start": "ts-node src/infinityvideo_generator/start.ts",
"convert:pinterest-face": "ts-node src/imageconverter/pinterest_face_portrait.ts"
"convert:pinterest-face": "ts-node src/imageconverter/pinterest_face_portrait.ts",
"tool:generate-video-from-input": "ts-node src/tools/generateVideoFromInput.ts"
},
"keywords": [],
"author": "",
"license": "ISC",
"devDependencies": {
"@types/node": "^20.0.0",
"@types/node": "^20.19.19",
"@types/png-chunk-text": "^1.0.3",
"@types/png-chunks-encode": "^1.0.2",
"@types/png-chunks-extract": "^1.0.2",
"ts-node": "^10.9.2",
"typescript": "^5.0.0"
},
"dependencies": {
"@types/axios": "^0.14.4",
"@types/fs-extra": "^11.0.4",
"@types/pngjs": "^6.0.5",
"@types/sharp": "^0.32.0",
"axios": "^1.11.0",
"dotenv": "^17.2.1",
"fs-extra": "^11.3.2",
"mysql2": "^3.14.3",
"open": "^10.2.0",
"png-chunk-text": "^1.0.0",
"png-chunks-encode": "^1.0.0",
"png-chunks-extract": "^1.0.0",
"pngjs": "^7.0.0",
"puppeteer": "^24.16.2",
"sharp": "^0.34.4",
"uuid": "^11.1.0"
}
}
}

View File

@ -57,7 +57,7 @@
},
"7": {
"inputs": {
"seed": 838097333311955,
"seed": 936152772258115,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
@ -76,8 +76,8 @@
0
],
"latent_image": [
"11",
6
"28",
0
]
},
"class_type": "KSampler",
@ -101,14 +101,56 @@
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"10",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"10": {
"inputs": {
"image1_text": "Original image",
"image2_text": "Reference",
"image3_text": "Result",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"11",
1
],
"image2": [
"11",
2
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": true,
"enable_vl_resize": true,
"enable_resize": false,
"enable_vl_resize": false,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
@ -121,11 +163,11 @@
0
],
"image1": [
"24",
"30",
0
],
"image2": [
"15",
"27",
0
]
},
@ -134,18 +176,48 @@
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"15": {
"14": {
"inputs": {
"image": "ComfyUI_00067_.png"
"image": "model_outfit_location_1760043932148.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
"title": "load base image"
}
},
"19": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_dxzmg_00211_.png&type=temp&subfolder=&rand=0.09499077981761894"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_dxzmg_00212_.png&type=temp&subfolder=&rand=0.21125213225471684"
}
]
},
"image_a": [
"11",
1
],
"image_b": [
"8",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"20": {
"inputs": {
"filename_prefix": "qwenedit",
"filename_prefix": "combined",
"images": [
"8",
0
@ -158,43 +230,66 @@
},
"21": {
"inputs": {
"value": "change camera angle to closeup face from image1, change background to light gray with faing gradient, change face angle to look at directry look at camera"
"value": "请将图2中的模特处理成手持图1中包包的照片。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"24": {
"22": {
"inputs": {
"measurement": "pixels",
"width": 720,
"height": 1280,
"fit": "contain",
"method": "nearest-exact",
"image": [
"64",
"filename_prefix": "ComfyUI",
"images": [
"9",
0
]
},
"class_type": "Image Resize (rgthree)",
"class_type": "SaveImage",
"_meta": {
"title": "Image Resize (rgthree)"
"title": "Save Image"
}
},
"64": {
"23": {
"inputs": {
"image": "1337074888177434_1758776251440_2.png"
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "LoadImage",
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Load Image"
"title": "Empty Latent Image"
}
},
"65": {
"24": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"25": {
"inputs": {
"samples": [
"23",
0
],
"vae": [
"24",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"26": {
"inputs": {
"images": [
"24",
"25",
0
]
},
@ -202,5 +297,100 @@
"_meta": {
"title": "Preview Image"
}
},
"27": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"14",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"28": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"29": {
"inputs": {
"image": "handbag_1760043932148.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load reference image"
}
},
"30": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"29",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"31": {
"inputs": {
"Value": 720
},
"class_type": "DF_Integer",
"_meta": {
"title": "width"
}
},
"32": {
"inputs": {
"Value": 1280
},
"class_type": "DF_Integer",
"_meta": {
"title": "height"
}
}
}

View File

@ -0,0 +1,444 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 38026585691397,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"36",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"10",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"10": {
"inputs": {
"image1_text": "Original image",
"image2_text": "Reference",
"image3_text": "Result",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"11",
1
],
"image2": [
"11",
2
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": false,
"enable_vl_resize": false,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"27",
0
],
"image2": [
"33",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"14": {
"inputs": {
"image": "model_1760082843769.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load base image"
}
},
"19": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00279_.png&type=temp&subfolder=&rand=0.4405150352070387"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00280_.png&type=temp&subfolder=&rand=0.9388629603648289"
}
]
},
"image_a": [
"11",
1
],
"image_b": [
"8",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"20": {
"inputs": {
"filename_prefix": "combined",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "以图像2为基础生成一张女性肖像照片。她穿着一件黑色薄纱长袖上衣一条光滑的皮革及膝裙和勃艮第色的尖头靴子手提一个深红色的手提包。场景改为极简主义风格的客厅摆放着中性的沙发、镜面墙饰、盆栽植物和浅色地板营造出明亮而宽敞的美感。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"22": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"9",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"23": {
"inputs": {
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"24": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"25": {
"inputs": {
"samples": [
"23",
0
],
"vae": [
"24",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"26": {
"inputs": {
"images": [
"25",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"27": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"14",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"28": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"29": {
"inputs": {
"image": "pose_1760082843769.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load reference image"
}
},
"30": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"29",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"31": {
"inputs": {
"Value": 720
},
"class_type": "DF_Integer",
"_meta": {
"title": "width"
}
},
"32": {
"inputs": {
"Value": 1280
},
"class_type": "DF_Integer",
"_meta": {
"title": "height"
}
},
"33": {
"inputs": {
"detect_hand": "enable",
"detect_body": "enable",
"detect_face": "enable",
"resolution": 512,
"bbox_detector": "yolox_l.onnx",
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
"scale_stick_for_xinsr_cn": "disable",
"image": [
"30",
0
]
},
"class_type": "DWPreprocessor",
"_meta": {
"title": "DWPose Estimator"
}
},
"35": {
"inputs": {
"images": [
"33",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"36": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
}
}

View File

@ -0,0 +1,396 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 323591075024702,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"28",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"10",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"10": {
"inputs": {
"image1_text": "Original image",
"image2_text": "Reference",
"image3_text": "Result",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"11",
1
],
"image2": [
"11",
2
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": true,
"enable_vl_resize": true,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"27",
0
],
"image2": [
"30",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"14": {
"inputs": {
"image": "model_outfit_location_handbag1_1760085003312.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"19": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00305_.png&type=temp&subfolder=&rand=0.5408789951924671"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00306_.png&type=temp&subfolder=&rand=0.2425856190711294"
}
]
},
"image_a": [
"11",
1
],
"image_b": [
"8",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"20": {
"inputs": {
"filename_prefix": "combined",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "请将图2中的女性修改成把图1的包背在肩上。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"22": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"9",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"23": {
"inputs": {
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"24": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"25": {
"inputs": {
"samples": [
"23",
0
],
"vae": [
"24",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"26": {
"inputs": {
"images": [
"25",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"27": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "crop",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"14",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"28": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"29": {
"inputs": {
"image": "handbag_1760085003312.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"30": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "crop",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"29",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"31": {
"inputs": {
"Value": 720
},
"class_type": "DF_Integer",
"_meta": {
"title": "width"
}
},
"32": {
"inputs": {
"Value": 1280
},
"class_type": "DF_Integer",
"_meta": {
"title": "height"
}
}
}

View File

@ -0,0 +1,444 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 38026585691397,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"36",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"10",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"10": {
"inputs": {
"image1_text": "Original image",
"image2_text": "Reference",
"image3_text": "Result",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"11",
1
],
"image2": [
"11",
2
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": false,
"enable_vl_resize": false,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"27",
0
],
"image2": [
"33",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"14": {
"inputs": {
"image": "model_1760082843769.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load base image"
}
},
"19": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00279_.png&type=temp&subfolder=&rand=0.4405150352070387"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_uoazy_00280_.png&type=temp&subfolder=&rand=0.9388629603648289"
}
]
},
"image_a": [
"11",
1
],
"image_b": [
"8",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"20": {
"inputs": {
"filename_prefix": "combined",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "以图像2为基础生成一张女性肖像照片。她穿着一件黑色薄纱长袖上衣一条光滑的皮革及膝裙和勃艮第色的尖头靴子手提一个深红色的手提包。场景改为极简主义风格的客厅摆放着中性的沙发、镜面墙饰、盆栽植物和浅色地板营造出明亮而宽敞的美感。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"22": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"9",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"23": {
"inputs": {
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"24": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"25": {
"inputs": {
"samples": [
"23",
0
],
"vae": [
"24",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"26": {
"inputs": {
"images": [
"25",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"27": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"14",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"28": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"29": {
"inputs": {
"image": "pose_1760082843769.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "load reference image"
}
},
"30": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "resize",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"29",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"31": {
"inputs": {
"Value": 720
},
"class_type": "DF_Integer",
"_meta": {
"title": "width"
}
},
"32": {
"inputs": {
"Value": 1280
},
"class_type": "DF_Integer",
"_meta": {
"title": "height"
}
},
"33": {
"inputs": {
"detect_hand": "enable",
"detect_body": "enable",
"detect_face": "enable",
"resolution": 512,
"bbox_detector": "yolox_l.onnx",
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
"scale_stick_for_xinsr_cn": "disable",
"image": [
"30",
0
]
},
"class_type": "DWPreprocessor",
"_meta": {
"title": "DWPose Estimator"
}
},
"35": {
"inputs": {
"images": [
"33",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"36": {
"inputs": {
"width": [
"31",
0
],
"height": [
"32",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
}
}

View File

@ -57,7 +57,7 @@
},
"7": {
"inputs": {
"seed": 838097333311955,
"seed": 639545413023960,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
@ -76,8 +76,8 @@
0
],
"latent_image": [
"11",
6
"28",
0
]
},
"class_type": "KSampler",
@ -101,6 +101,48 @@
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"10",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"10": {
"inputs": {
"image1_text": "Original image",
"image2_text": "Reference",
"image3_text": "Result",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"11",
1
],
"image2": [
"11",
2
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"11": {
"inputs": {
"prompt": [
@ -121,7 +163,7 @@
0
],
"image1": [
"24",
"27",
0
]
},
@ -130,6 +172,45 @@
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"14": {
"inputs": {
"image": "7318418139276581_1759654853736_18 - コピー.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"19": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_niitk_00003_.png&type=temp&subfolder=&rand=0.9166876008508786"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_niitk_00004_.png&type=temp&subfolder=&rand=0.06689875639286158"
}
]
},
"image_a": [
"11",
1
],
"image_b": [
"8",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"20": {
"inputs": {
"filename_prefix": "qwenedit",
@ -145,43 +226,66 @@
},
"21": {
"inputs": {
"value": "change camera angle to closeup face from image1, change background to light gray with faing gradient, change face angle to look at directry look at camera"
"value": "请从图1中提取主要主体把背景设置为浅灰色并让主体正面朝向制作成产品照片。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"24": {
"22": {
"inputs": {
"measurement": "pixels",
"width": 720,
"height": 1280,
"fit": "contain",
"method": "nearest-exact",
"image": [
"64",
"filename_prefix": "ComfyUI",
"images": [
"9",
0
]
},
"class_type": "Image Resize (rgthree)",
"class_type": "SaveImage",
"_meta": {
"title": "Image Resize (rgthree)"
"title": "Save Image"
}
},
"64": {
"23": {
"inputs": {
"image": "1337074888177434_1758776251440_2.png"
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "LoadImage",
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Load Image"
"title": "Empty Latent Image"
}
},
"65": {
"24": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"25": {
"inputs": {
"samples": [
"23",
0
],
"vae": [
"24",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"26": {
"inputs": {
"images": [
"24",
"25",
0
]
},
@ -189,5 +293,41 @@
"_meta": {
"title": "Preview Image"
}
},
"27": {
"inputs": {
"width": 720,
"height": 1280,
"upscale_method": "nearest-exact",
"keep_proportion": "pad",
"pad_color": "192,192,192",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"14",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"28": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
}
}

View File

@ -57,14 +57,14 @@
},
"7": {
"inputs": {
"seed": 799784211855929,
"seed": 506786026379830,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"66",
"140",
0
],
"positive": [
@ -136,7 +136,7 @@
},
"15": {
"inputs": {
"image": "cloth_0001.png"
"image": "cloth_0026.png"
},
"class_type": "LoadImage",
"_meta": {
@ -145,7 +145,7 @@
},
"21": {
"inputs": {
"value": "change clothes of image1 with image2"
"value": "change clothes of image1 to image2, remove the cap from head"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
@ -154,30 +154,16 @@
},
"64": {
"inputs": {
"image": "Lauren_body.png"
"image": "Courtney_body.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load model"
}
},
"66": {
"inputs": {
"lora_name": "extract-outfit_v3.safetensors",
"strength_model": 1,
"model": [
"4",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"76": {
"inputs": {
"number": 720
"number": 832
},
"class_type": "StaticNumberInt",
"_meta": {
@ -186,7 +172,7 @@
},
"77": {
"inputs": {
"number": 1280
"number": 1248
},
"class_type": "StaticNumberInt",
"_meta": {
@ -259,7 +245,7 @@
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "pad",
"keep_proportion": "crop",
"pad_color": "0, 0, 0",
"crop_position": "center",
"divisible_by": 2,
@ -310,5 +296,59 @@
"_meta": {
"title": "Preview Image"
}
},
"136": {
"inputs": {
"image": "281543721672978_1758880135639_0.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"137": {
"inputs": {
"detect_hand": "enable",
"detect_body": "enable",
"detect_face": "enable",
"resolution": 512,
"bbox_detector": "yolox_l.onnx",
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
"scale_stick_for_xinsr_cn": "disable",
"image": [
"136",
0
]
},
"class_type": "DWPreprocessor",
"_meta": {
"title": "DWPose Estimator"
}
},
"139": {
"inputs": {
"images": [
"137",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"140": {
"inputs": {
"lora_name": "Try_On_Qwen_Edit_Lora.safetensors",
"strength_model": 1,
"model": [
"4",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
}
}

View File

@ -0,0 +1,111 @@
{
"1": {
"inputs": {
"image": "model_outfit_location_handbag3_1760086053609.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"2": {
"inputs": {
"enabled": true,
"swap_model": "inswapper_128.onnx",
"facedetection": "YOLOv5l",
"face_restore_model": "GPEN-BFR-1024.onnx",
"face_restore_visibility": 0.5200000000000001,
"codeformer_weight": 0.5,
"detect_gender_input": "no",
"detect_gender_source": "no",
"input_faces_index": "0",
"source_faces_index": "0",
"console_log_level": 1,
"input_image": [
"6",
0
],
"source_image": [
"3",
0
]
},
"class_type": "ReActorFaceSwap",
"_meta": {
"title": "ReActor 🌌 Fast Face Swap"
}
},
"3": {
"inputs": {
"image": "outfit_1760086053609.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"4": {
"inputs": {
"images": [
"2",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"6": {
"inputs": {
"resize_to": "4k",
"images": [
"1",
0
],
"upscaler_trt_model": [
"8",
0
]
},
"class_type": "UpscalerTensorrt",
"_meta": {
"title": "Upscaler Tensorrt ⚡"
}
},
"7": {
"inputs": {
"images": [
"6",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"8": {
"inputs": {
"model": "4x-UltraSharp",
"precision": "fp16"
},
"class_type": "LoadUpscalerTensorrtModel",
"_meta": {
"title": "Load Upscale Tensorrt Model"
}
},
"9": {
"inputs": {
"filename_prefix": "upscaled",
"images": [
"2",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}

View File

@ -0,0 +1,229 @@
{
"1": {
"inputs": {
"clip_name1": "clip_l.safetensors",
"clip_name2": "t5xxl_fp16.safetensors",
"type": "flux",
"device": "default"
},
"class_type": "DualCLIPLoader",
"_meta": {
"title": "DualCLIPLoader"
}
},
"2": {
"inputs": {
"unet_name": "flux1-krea-dev_fp8_scaled.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"3": {
"inputs": {
"width": 720,
"height": 1280,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage",
"_meta": {
"title": "EmptySD3LatentImage"
}
},
"4": {
"inputs": {
"seed": 445107772143446,
"steps": 20,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "simple",
"denoise": 1,
"model": [
"2",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"3",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"5": {
"inputs": {
"conditioning": [
"14",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"6": {
"inputs": {
"samples": [
"4",
0
],
"vae": [
"12",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"8": {
"inputs": {
"style_model_name": "flux1-redux-dev.safetensors"
},
"class_type": "StyleModelLoader",
"_meta": {
"title": "Load Style Model"
}
},
"9": {
"inputs": {
"crop": "center",
"clip_vision": [
"10",
0
],
"image": [
"13",
0
]
},
"class_type": "CLIPVisionEncode",
"_meta": {
"title": "CLIP Vision Encode"
}
},
"10": {
"inputs": {
"clip_name": "sigclip_vision_patch14_384.safetensors"
},
"class_type": "CLIPVisionLoader",
"_meta": {
"title": "Load CLIP Vision"
}
},
"11": {
"inputs": {
"strength": 0.30000000000000004,
"conditioning": [
"14",
0
],
"style_model": [
"8",
0
],
"clip_vision_output": [
"9",
0
]
},
"class_type": "ApplyStyleModelAdjust",
"_meta": {
"title": "Apply Style Model (Adjusted)"
}
},
"12": {
"inputs": {
"vae_name": "ae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"13": {
"inputs": {
"image": "281543725739981_1759177922955_0.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image 1"
}
},
"14": {
"inputs": {
"text": "realistic photo of woman, wavy long blong hair, fullbody shot,, A dynamic dance scene begins with a distorted glitch effect mirroring the images grayscale aesthetic, quickly transitioning into a vibrant, fast-paced choreography featuring dancers in similar pale makeup and unsettling expressions. The music is electronic with heavy bass and industrial elements. The camera work should be kinetic and disorienting, utilizing quick cuts and unconventional angles, emphasizing the feeling of being trapped or haunted. The dance evolves from frantic movements to controlled yet eerie poses that echo the images gesture of covering the face. The setting changes between a stark white room similar to the image's background and abstract digital landscapes.",
"clip": [
"1",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"15": {
"inputs": {
"filename_prefix": "STYLEDVIDEOMAKER",
"images": [
"20",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"19": {
"inputs": {
"image": "face.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"20": {
"inputs": {
"enabled": true,
"swap_model": "inswapper_128.onnx",
"facedetection": "retinaface_resnet50",
"face_restore_model": "GPEN-BFR-2048.onnx",
"face_restore_visibility": 1,
"codeformer_weight": 1,
"detect_gender_input": "no",
"detect_gender_source": "no",
"input_faces_index": "0",
"source_faces_index": "0",
"console_log_level": 1,
"input_image": [
"6",
0
],
"source_image": [
"19",
0
]
},
"class_type": "ReActorFaceSwap",
"_meta": {
"title": "ReActor 🌌 Fast Face Swap"
}
}
}

View File

@ -0,0 +1,388 @@
{
"4": {
"inputs": {
"ckpt_name": "dreamshaperXL_v21TurboDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"12": {
"inputs": {
"seed": 302411063911982,
"steps": 8,
"cfg": 2,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"65",
0
],
"negative": [
"69",
0
],
"latent_image": [
"13",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"13": {
"inputs": {
"width": 1216,
"height": 832,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"16": {
"inputs": {
"samples": [
"12",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"36": {
"inputs": {
"method": "Mixture of Diffusers",
"tile_width": 1024,
"tile_height": 1024,
"tile_overlap": 32,
"tile_batch_size": 8,
"model": [
"4",
0
]
},
"class_type": "TiledDiffusion",
"_meta": {
"title": "Tiled Diffusion"
}
},
"51": {
"inputs": {
"tile_size": 1024,
"fast": false,
"samples": [
"80",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecodeTiled_TiledDiffusion",
"_meta": {
"title": "Tiled VAE Decode"
}
},
"65": {
"inputs": {
"text": "photo of a high end sports car",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"69": {
"inputs": {
"text": "text, watermark, (film grain, noise:1.2)",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"80": {
"inputs": {
"seed": 105566927616764,
"steps": 4,
"cfg": 2,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"36",
0
],
"positive": [
"141",
0
],
"negative": [
"141",
1
],
"latent_image": [
"84",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"84": {
"inputs": {
"width": [
"106",
0
],
"height": [
"107",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"105": {
"inputs": {
"image": [
"115",
0
]
},
"class_type": "GetImageSizeAndCount",
"_meta": {
"title": "Get Image Size & Count"
}
},
"106": {
"inputs": {
"value": "a*b",
"a": [
"105",
1
],
"b": [
"117",
0
]
},
"class_type": "SimpleMath+",
"_meta": {
"title": "🔧 Simple Math"
}
},
"107": {
"inputs": {
"value": "a*b",
"a": [
"105",
2
],
"b": [
"117",
0
]
},
"class_type": "SimpleMath+",
"_meta": {
"title": "🔧 Simple Math"
}
},
"111": {
"inputs": {
"image": "model_outfit_location_handbag1_1760092227085.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"115": {
"inputs": {
"any_01": [
"111",
0
]
},
"class_type": "Any Switch (rgthree)",
"_meta": {
"title": "Any Switch (rgthree)"
}
},
"117": {
"inputs": {
"value": 4.000000000000001
},
"class_type": "FloatConstant",
"_meta": {
"title": "Float Constant"
}
},
"133": {
"inputs": {
"rgthree_comparer": {
"images": [
{
"name": "A",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_ybqmm_00009_.png&type=temp&subfolder=&rand=0.02707950499627365"
},
{
"name": "B",
"selected": true,
"url": "/api/view?filename=rgthree.compare._temp_ybqmm_00010_.png&type=temp&subfolder=&rand=0.18690183070180255"
}
]
},
"image_a": [
"115",
0
],
"image_b": [
"149",
0
]
},
"class_type": "Image Comparer (rgthree)",
"_meta": {
"title": "Image Comparer (rgthree)"
}
},
"141": {
"inputs": {
"strength": 0.65,
"start_percent": 0,
"end_percent": 0.9,
"positive": [
"65",
0
],
"negative": [
"69",
0
],
"control_net": [
"142",
0
],
"image": [
"115",
0
]
},
"class_type": "ACN_AdvancedControlNetApply",
"_meta": {
"title": "Apply Advanced ControlNet 🛂🅐🅒🅝"
}
},
"142": {
"inputs": {
"control_net_name": "xinsircontrolnet-tile-sdxl-1.0.safetensors"
},
"class_type": "ControlNetLoaderAdvanced",
"_meta": {
"title": "Load Advanced ControlNet Model 🛂🅐🅒🅝"
}
},
"148": {
"inputs": {
"color_space": "LAB",
"factor": 0.8,
"device": "auto",
"batch_size": 0,
"image": [
"51",
0
],
"reference": [
"115",
0
]
},
"class_type": "ImageColorMatch+",
"_meta": {
"title": "🔧 Image Color Match"
}
},
"149": {
"inputs": {
"sharpen_radius": 1,
"sigma": 1,
"alpha": 0.05,
"image": [
"148",
0
]
},
"class_type": "ImageSharpen",
"_meta": {
"title": "Image Sharpen"
}
},
"154": {
"inputs": {
"filename_prefix": "Upscaled",
"images": [
"149",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"165": {
"inputs": {
"image": "model_outfit_location_handbag1_1760092227085.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"166": {
"inputs": {
"filename_prefix": "upscaled",
"images": [
"149",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}

View File

@ -0,0 +1,425 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 559577834683401,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"66",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"11",
6
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": false,
"enable_vl_resize": false,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"84",
0
],
"image2": [
"82",
0
],
"image3": [
"81",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"15": {
"inputs": {
"image": "Allison_body (1).png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"20": {
"inputs": {
"filename_prefix": "qwenedit",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "图2中的女孩穿着图1的衣服并以图3的姿势站立。背景保持浅灰色。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"64": {
"inputs": {
"image": "cloth_0111.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"66": {
"inputs": {
"lora_name": "extract-outfit_v3.safetensors",
"strength_model": 1,
"model": [
"4",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"67": {
"inputs": {
"detect_hand": "enable",
"detect_body": "enable",
"detect_face": "enable",
"resolution": 512,
"bbox_detector": "yolox_l.onnx",
"pose_estimator": "dw-ll_ucoco_384_bs5.torchscript.pt",
"scale_stick_for_xinsr_cn": "disable",
"image": [
"68",
0
]
},
"class_type": "DWPreprocessor",
"_meta": {
"title": "DWPose Estimator"
}
},
"68": {
"inputs": {
"image": "633387441703331_1758877367350_1.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"69": {
"inputs": {
"images": [
"81",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"76": {
"inputs": {
"number": 720
},
"class_type": "StaticNumberInt",
"_meta": {
"title": "Static Number Int"
}
},
"77": {
"inputs": {
"number": 1280
},
"class_type": "StaticNumberInt",
"_meta": {
"title": "Static Number Int"
}
},
"78": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"81": {
"inputs": {
"width": 480,
"height": 962,
"upscale_method": "nearest-exact",
"keep_proportion": "pad",
"pad_color": "0, 0, 0",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"67",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"82": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "crop",
"pad_color": "255,255,255",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"15",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"83": {
"inputs": {
"images": [
"82",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"84": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "pad",
"pad_color": "0, 0, 0",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"64",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"85": {
"inputs": {
"images": [
"84",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"86": {
"inputs": {
"image1_text": "image1",
"image2_text": "image2",
"image3_text": "image3",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"15",
0
],
"image2": [
"64",
0
],
"image3": [
"81",
0
],
"image4": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"87": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"86",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"88": {
"inputs": {
"filename_prefix": "vtonresult/vton",
"images": [
"87",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}

View File

@ -0,0 +1,357 @@
{
"1": {
"inputs": {
"unet_name": "qwen_image_edit_2509_fp8_e4m3fn.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"2": {
"inputs": {
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
"type": "qwen_image",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"3": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"4": {
"inputs": {
"lora_name": "Qwen-Image-Lightning-8steps-V2.0.safetensors",
"strength_model": 1,
"model": [
"1",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"5": {
"inputs": {
"conditioning": [
"11",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"7": {
"inputs": {
"seed": 559577834683401,
"steps": 8,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "beta",
"denoise": 1,
"model": [
"66",
0
],
"positive": [
"11",
0
],
"negative": [
"5",
0
],
"latent_image": [
"11",
6
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"7",
0
],
"vae": [
"3",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"11": {
"inputs": {
"prompt": [
"21",
0
],
"enable_resize": false,
"enable_vl_resize": false,
"upscale_method": "lanczos",
"crop": "disabled",
"instruction": "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n",
"clip": [
"2",
0
],
"vae": [
"3",
0
],
"image1": [
"84",
0
],
"image2": [
"82",
0
]
},
"class_type": "TextEncodeQwenImageEditPlus_lrzjason",
"_meta": {
"title": "TextEncodeQwenImageEditPlus 小志Jason(xiaozhijason)"
}
},
"15": {
"inputs": {
"image": "Allison_body (1).png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"20": {
"inputs": {
"filename_prefix": "qwenedit",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"21": {
"inputs": {
"value": "图2中的人物穿着图1的上衣、下装和配饰。"
},
"class_type": "PrimitiveStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"64": {
"inputs": {
"image": "cloth_0111.png"
},
"class_type": "LoadImage",
"_meta": {
"title": "Load Image"
}
},
"66": {
"inputs": {
"lora_name": "extract-outfit_v3.safetensors",
"strength_model": 1,
"model": [
"4",
0
]
},
"class_type": "LoraLoaderModelOnly",
"_meta": {
"title": "LoraLoaderModelOnly"
}
},
"76": {
"inputs": {
"number": 720
},
"class_type": "StaticNumberInt",
"_meta": {
"title": "Static Number Int"
}
},
"77": {
"inputs": {
"number": 1280
},
"class_type": "StaticNumberInt",
"_meta": {
"title": "Static Number Int"
}
},
"78": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"82": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "crop",
"pad_color": "255,255,255",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"15",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"83": {
"inputs": {
"images": [
"82",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"84": {
"inputs": {
"width": [
"76",
0
],
"height": [
"77",
0
],
"upscale_method": "nearest-exact",
"keep_proportion": "pad",
"pad_color": "0, 0, 0",
"crop_position": "center",
"divisible_by": 2,
"device": "cpu",
"image": [
"64",
0
]
},
"class_type": "ImageResizeKJv2",
"_meta": {
"title": "Resize Image v2"
}
},
"85": {
"inputs": {
"images": [
"84",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"86": {
"inputs": {
"image1_text": "image1",
"image2_text": "image2",
"image3_text": "image3",
"image4_text": "image4",
"reel_height": 512,
"border": 32,
"image1": [
"15",
0
],
"image2": [
"64",
0
],
"image3": [
"8",
0
]
},
"class_type": "LayerUtility: ImageReel",
"_meta": {
"title": "LayerUtility: Image Reel"
}
},
"87": {
"inputs": {
"font_file": "Alibaba-PuHuiTi-Heavy.ttf",
"font_size": 40,
"border": 32,
"color_theme": "light",
"reel_1": [
"86",
0
]
},
"class_type": "LayerUtility: ImageReelComposit",
"_meta": {
"title": "LayerUtility: Image Reel Composit"
}
},
"88": {
"inputs": {
"filename_prefix": "vtonresult/vton",
"images": [
"87",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}

View File

@ -15,17 +15,27 @@ async function convertImage(
baseFileName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
size: ImageSize = { width: 720, height: 1280 }
size: ImageSize = { width: 720, height: 1280 },
useEmpltyLatent: boolean = false
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_qwen.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['24']['inputs']['width'] = size.width;
workflow['24']['inputs']['height'] = size.height;
workflow['64']['inputs']['image'] = baseFileName;
if (useEmpltyLatent) {
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_2_qwen_empty.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['30']['inputs']['width'] = size.width;
workflow['31']['inputs']['height'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
} else {
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_qwen.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['23']['inputs']['width'] = size.width;
workflow['23']['inputs']['height'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
}
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
@ -66,7 +76,8 @@ async function convertImage(
return newFilePath;
}
// basefilename is connected to image2
// sencondfilename is connect to image1
async function convertImageWithFile(
prompt: string,
baseFileName: string,
@ -81,10 +92,10 @@ async function convertImageWithFile(
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_2_qwen.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['24']['inputs']['width'] = size.width;
workflow['24']['inputs']['height'] = size.height;
workflow['64']['inputs']['image'] = baseFileName;
workflow['15']['inputs']['image'] = secondFileName;
workflow['31']['inputs']['Value'] = size.width;
workflow['32']['inputs']['Value'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
workflow['29']['inputs']['image'] = secondFileName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
@ -97,7 +108,128 @@ async function convertImageWithFile(
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('qwenedit'));
const generatedFiles = files.filter(file => file.startsWith('combined'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', baseFileName);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
// basefilename is connected to image1
// sencondfilename is connect to image2
async function convertImageWithFileHandbag(
prompt: string,
baseFileName: string,
secondFileName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
size: ImageSize = { width: 720, height: 1280 }
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_2_qwen_handbag.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['31']['inputs']['Value'] = size.width;
workflow['32']['inputs']['Value'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
workflow['29']['inputs']['image'] = secondFileName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('combined'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', baseFileName);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
// basefilename is connected to image1
// sencondfilename is connect to image2
async function convertImageWithFileForPose(
prompt: string,
baseFileName: string,
secondFileName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
size: ImageSize = { width: 720, height: 1280 }
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_2_qwen_pose.json', 'utf-8'));
workflow['21']['inputs']['value'] = prompt;
workflow['31']['inputs']['Value'] = size.width;
workflow['32']['inputs']['Value'] = size.height;
workflow['14']['inputs']['image'] = baseFileName;
workflow['29']['inputs']['image'] = secondFileName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('combined'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
@ -251,7 +383,8 @@ export async function convertImageWithMultipleFile(
export async function convertImageVton(
srcFiles: string[],
personFile: string,
clothFile: string,
outputFile: string,
comfyBaseUrl: string,
comfyOutputDir: string,
@ -261,15 +394,12 @@ export async function convertImageVton(
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/edit_image_vton.json', 'utf-8'));
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/vton_cloth.json', 'utf-8'));
workflow['76']['inputs']['number'] = size.width;
workflow['77']['inputs']['number'] = size.height;
if (srcFiles[0])
workflow['64']['inputs']['image'] = srcFiles[0];
if (srcFiles[1])
workflow['15']['inputs']['image'] = srcFiles[1];
workflow['15']['inputs']['image'] = personFile;
workflow['64']['inputs']['image'] = clothFile;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
@ -310,4 +440,65 @@ export async function convertImageVton(
return newFilePath;
}
export { convertImage, convertImageWithFile };
export async function convertImageVtonPose(
personFile: string,
clothFile: string,
poseFile: string,
outputFile: string,
comfyBaseUrl: string,
comfyOutputDir: string,
size: ImageSize = { width: 720, height: 1280 }
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/vton.json', 'utf-8'));
workflow['76']['inputs']['number'] = size.width;
workflow['77']['inputs']['number'] = size.height;
workflow['15']['inputs']['image'] = personFile;
workflow['64']['inputs']['image'] = clothFile;
workflow['68']['inputs']['image'] = poseFile;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('qwenedit'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', outputFile);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
export { convertImage, convertImageWithFile, convertImageWithFileForPose, convertImageWithFileHandbag };

View File

@ -30,7 +30,7 @@ async function generateImage(
workflow['13']['inputs']['image'] = imageName1;
// Set image name
//workflow['16']['inputs']['image'] = imageName2;
workflow['16']['inputs']['image'] = imageName2;
workflow['3']['inputs']['width'] = size.width;
workflow['3']['inputs']['height'] = size.height;

View File

@ -0,0 +1,78 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import axios from 'axios';
import dotenv from 'dotenv';
dotenv.config();
interface ImageSize {
width: number;
height: number;
}
async function generateImage(
prompt: string,
faceImage: string,
styleImage: string,
newFileName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
size: ImageSize = { width: 720, height: 1280 }
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/generate_image_style_faceswap.json', 'utf-8'));
workflow['14']['inputs']['text'] = prompt;
// Set image name
workflow['13']['inputs']['image'] = styleImage;
// Set image name
workflow['19']['inputs']['image'] = faceImage;
workflow['3']['inputs']['width'] = size.width;
workflow['3']['inputs']['height'] = size.height;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('STYLEDVIDEOMAKER'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', newFileName);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
export { generateImage };

118
src/lib/image-upscaler.ts Normal file
View File

@ -0,0 +1,118 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import axios from 'axios';
import dotenv from 'dotenv';
dotenv.config();
interface ImageSize {
width: number;
height: number;
}
async function facerestore_upscale(
baseFileName: string,
faceReferenceName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/facerestore_upscale.json', 'utf-8'));
workflow['1']['inputs']['image'] = baseFileName;
workflow['3']['inputs']['image'] = faceReferenceName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('upscaled'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', baseFileName);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
async function upscale(
baseFileName: string,
comfyBaseUrl: string,
comfyOutputDir: string,
): Promise<string> {
const COMFY_BASE_URL = comfyBaseUrl.replace(/\/$/, '');
const COMFY_OUTPUT_DIR = comfyOutputDir;
let workflow;
workflow = JSON.parse(await fs.readFile('src/comfyworkflows/upscale.json', 'utf-8'));
workflow['111']['inputs']['image'] = baseFileName;
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
const promptId = response.data.prompt_id;
let history;
do {
await new Promise(resolve => setTimeout(resolve, 1000));
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
history = historyResponse.data[promptId];
} while (!history || Object.keys(history.outputs).length === 0);
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
const generatedFiles = files.filter(file => file.startsWith('upscaled'));
const fileStats = await Promise.all(
generatedFiles.map(async (file) => {
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
return { file, mtime: stat.mtime };
})
);
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
const latestFile = fileStats[0].file;
const newFilePath = path.resolve('./generated', baseFileName);
await fs.mkdir('./generated', { recursive: true });
const sourcePath = path.join(COMFY_OUTPUT_DIR!, latestFile);
try {
await fs.unlink(newFilePath);
} catch (err) {
// ignore if not exists
}
await fs.copyFile(sourcePath, newFilePath);
return newFilePath;
}
export { facerestore_upscale, upscale };

View File

@ -108,6 +108,7 @@ async function callLMStudioAPIWithFile(imagePath: string, prompt: string): Promi
return JSON.parse(arrayMatch[0]);
}
}
return content;
} else {
logger.error('Unexpected API response:', data);
}

View File

@ -36,6 +36,81 @@ export async function getPinUrlFromPinterest(keyword: string): Promise<string |
}
}
export async function downloadImagesFromPinterestSearch(keyword: string, count: number): Promise<string[]> {
const browser = await puppeteer.launch({ headless: false });
const page = await browser.newPage();
await page.setUserAgent('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36');
await page.setViewport({ width: 1920, height: 1080 });
try {
const searchUrl = `https://www.pinterest.com/search/pins/?q=${encodeURIComponent(keyword)}`;
await page.goto(searchUrl, { waitUntil: 'networkidle2' });
logger.info(`Scrolling 3 times...`);
for (let i = 0; i < 3; i++) {
await page.evaluate('window.scrollTo(0, document.body.scrollHeight)');
await new Promise(resolve => setTimeout(resolve, Math.random() * 1000 + 1000));
}
const imageUrls = await page.$$eval('img', (imgs) => {
const urls: string[] = imgs.map(img => {
const srcset = img.getAttribute('srcset') || '';
if (!srcset) return '';
const parts = srcset.split(',').map(p => p.trim());
for (const part of parts) {
const m = part.match(/^(\S+)\s+4x$/);
if (m && m[1]) return m[1];
}
const src = img.src || '';
if (src.includes('/originals/')) return src;
return '';
}).filter(s => !!s && s.includes('pinimg'));
// Remove duplicates
return [...new Set(urls)];
});
if (imageUrls.length === 0) {
logger.warn(`No 4x image URLs found for keyword "${keyword}"`);
return [];
}
// shuffle and pick up to `count` unique images
const shuffled = imageUrls.slice().sort(() => 0.5 - Math.random());
const chosen = shuffled.slice(0, Math.min(count, shuffled.length));
const outDir = path.join(process.cwd(), 'download');
await fs.mkdir(outDir, { recursive: true });
const results: string[] = [];
for (let i = 0; i < chosen.length; i++) {
const src = chosen[i];
try {
const imgPage = await browser.newPage();
const resp = await imgPage.goto(src, { timeout: 30000, waitUntil: 'networkidle2' });
if (!resp) {
logger.warn(`Failed to fetch image ${src}`);
await imgPage.close();
continue;
}
const buffer = await resp.buffer();
const timestamp = Date.now();
const outPath = path.join(outDir, `${keyword.replace(/\s+/g, '_')}_${timestamp}_${i}.png`);
await fs.writeFile(outPath, buffer);
results.push(outPath);
await imgPage.close();
} catch (err) {
logger.error(`Failed to download image ${src}:`, err);
}
}
return results;
} catch (error) {
logger.error(`Error while downloading images for keyword "${keyword}":`, error);
return [];
} finally {
await browser.close();
}
}
// Download up to `count` images from a pin URL by opening the pin page and scro lling up to 5 times to trigger lazy loading
// Returns an array of saved image paths (may be empty)

67
src/lib/util.ts Normal file
View File

@ -0,0 +1,67 @@
// png-json-metadata.ts
import * as fs from "fs";
import extract from "png-chunks-extract";
import encodeChunks from "png-chunks-encode";
import * as textChunk from "png-chunk-text";
type PngChunk = { name: string; data: Uint8Array };
/**
* PNG へ JSON を Base64 で埋め込むtEXt / keyword: "json-b64"
* - JSON は UTF-8 → Base64 にして ASCII 化tEXt の Latin-1 制限を回避)
* - 既存の "json-b64" tEXt があれば置き換え(重複回避)
*/
export async function embedJsonToPng(path: string, obj: unknown): Promise<void> {
const input = fs.readFileSync(path);
const chunks = extract(input) as PngChunk[];
// 既存の "json-b64" tEXt を除外
const filtered: PngChunk[] = chunks.filter((c) => {
if (c.name !== "tEXt") return true;
try {
const decoded = textChunk.decode(c.data); // { keyword, text }
return decoded.keyword !== "json-b64";
} catch {
// decode 失敗(別の形式など)は残す
return true;
}
});
const json = JSON.stringify(obj);
const b64 = Buffer.from(json, "utf8").toString("base64"); // ASCII のみ
// encode() は { name:'tEXt', data: Uint8Array } を返す
const newChunk = textChunk.encode("json-b64", b64) as PngChunk;
// IEND の直前に挿入PNG の正しい順序を維持)
const iendIndex = filtered.findIndex((c) => c.name === "IEND");
if (iendIndex < 0) {
throw new Error("Invalid PNG: missing IEND chunk.");
}
filtered.splice(iendIndex, 0, newChunk);
const out = Buffer.from(encodeChunks(filtered));
fs.writeFileSync(path, out);
}
/**
* PNG から Base64 JSONtEXt / keyword: "json-b64")を読み出す
*/
export async function readJsonToPng(path: string): Promise<any> {
const input = fs.readFileSync(path);
const chunks = extract(input) as PngChunk[];
for (const c of chunks) {
if (c.name !== "tEXt") continue;
try {
const { keyword, text } = textChunk.decode(c.data);
if (keyword === "json-b64") {
const json = Buffer.from(text, "base64").toString("utf8");
return JSON.parse(json);
}
} catch {
// 他の tEXt / 壊れたエントリは無視
}
}
throw new Error("No base64 JSON found in PNG (tEXt keyword 'json-b64').");
}

View File

@ -0,0 +1,78 @@
import * as fs from 'fs';
import * as path from 'path';
import { convertImageWithFile } from '../../lib/image-converter';
import dotenv from 'dotenv';
dotenv.config();
const girlDir = path.join(__dirname, '../../../input/girl');
const monsterDir = path.join(__dirname, '../../../input/monster');
const outputDir = path.join(__dirname, '../../../generated');
const prompt = "只提取图1中的女生把她放在图2的怪物之间。";
if (!fs.existsSync(outputDir)) {
fs.mkdirSync(outputDir, { recursive: true });
}
const comfyBaseUrl = process.env.SERVER2_COMFY_BASE_URL;
const comfyOutputDir = process.env.SERVER2_COMFY_OUTPUT_DIR;
if (!comfyBaseUrl || !comfyOutputDir) {
console.error("Please define SERVER1_COMFY_BASE_URL and SERVER1_COMFY_OUTPUT_DIR in your .env file");
process.exit(1);
}
const comfyInputDir = comfyOutputDir.replace("output", "input");
if (!fs.existsSync(comfyInputDir)) {
fs.mkdirSync(comfyInputDir, { recursive: true });
}
async function combineImages() {
while (true) {
try {
const girlImages = fs.readdirSync(girlDir).filter(file => /\.(jpg|jpeg|png)$/i.test(file));
const monsterImages = fs.readdirSync(monsterDir).filter(file => /\.(jpg|jpeg|png)$/i.test(file));
if (girlImages.length === 0 || monsterImages.length === 0) {
console.log('Input directories are empty. Waiting...');
await new Promise(resolve => setTimeout(resolve, 5000));
continue;
}
const randomGirlImage = girlImages[Math.floor(Math.random() * girlImages.length)];
const randomMonsterImage = monsterImages[Math.floor(Math.random() * monsterImages.length)];
const image1Path = path.join(girlDir, randomGirlImage);
const image2Path = path.join(monsterDir, randomMonsterImage);
// Copy files to comfy input directory
const destImage1Path = path.join(comfyInputDir, randomGirlImage);
const destImage2Path = path.join(comfyInputDir, randomMonsterImage);
fs.copyFileSync(image1Path, destImage1Path);
fs.copyFileSync(image2Path, destImage2Path);
console.log(`Combining ${randomGirlImage} and ${randomMonsterImage}`);
const generatedFilePath = await convertImageWithFile(prompt, randomGirlImage, randomMonsterImage, comfyBaseUrl!, comfyOutputDir!);
if (generatedFilePath && fs.existsSync(generatedFilePath)) {
const timestamp = new Date().getTime();
const newFileName = `combined_${timestamp}.png`;
const newFilePath = path.join(outputDir, newFileName);
fs.renameSync(generatedFilePath, newFilePath);
console.log(`Renamed generated file to ${newFilePath}`);
} else {
console.log("Failed to generate or find the image file.");
}
} catch (error) {
console.error('An error occurred:', error);
}
// Wait for a bit before the next iteration
await new Promise(resolve => setTimeout(resolve, 5000));
}
}
combineImages();

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 MiB

View File

@ -1,19 +1,28 @@
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
import { generateImage } from '../../lib/image-generator';
import { generateImage } from '../../lib/image-generator-style-faceswap';
import { logger } from '../../lib/logger';
import dotenv from 'dotenv';
dotenv.config();
const scenesFilePath = path.resolve(process.cwd(), 'src/musicspot_generator/v2/scenes.json');
const faceFilePath = path.resolve(process.cwd(), 'src/musicspot_generator/v2/face.png');
const GENERATED_DIR = path.resolve('generated');
const DEFAULT_SIZE = { width: 1280, height: 720 };
interface Scene {
scene: string;
imagePrompt: string;
imagePrompt: {
description: string,
style: string,
lighting: string,
outfit: string,
location: string,
poses: string,
angle: string,
};
videoPromp: string;
baseImagePath: string;
}
@ -43,11 +52,12 @@ async function generatePhotos() {
try {
await generateImage(
scene.imagePrompt,
`Scary realistic photo, ${scene.imagePrompt.location},${scene.imagePrompt.angle},${scene.imagePrompt.lighting},${scene.imagePrompt.outfit}`,
faceFilePath,
scene.baseImagePath,
imgFileName,
COMFY_BASE_URL,
COMFY_OUTPUT_DIR,
'flux',
DEFAULT_SIZE
);
logger.info(`Successfully generated photo: ${imgFileName}`);

View File

@ -4,10 +4,10 @@ import { callLMStudioAPIWithFile } from '../../lib/lmstudio';
import { logger } from '../../lib/logger';
const promptInstructions = `
Video prompt: No slowmotion, Be creative and generate dynamic dance scene.
Video prompt: No slowmotion, Be creative and generate gengle action scene.
`;
const inputDir = path.resolve(process.cwd(), 'input');
6
const inputDir = path.resolve(process.cwd(), 'input/static');
const outputFilePath = path.resolve(process.cwd(), 'src/musicspot_generator/v2/scenes.json');
interface Scene {

View File

@ -21,33 +21,39 @@ async function processScenes() {
const scenes: Scene[] = JSON.parse(scenesData).scenes;
for (const scene of scenes) {
const hash = crypto.createHash('sha256').update(scene.baseImagePath).digest('hex');
const imageFileName = `${hash}.png`;
const imagePath = path.join(generatedFolderPath, imageFileName);
if (fs.existsSync(imagePath)) {
const outputVideoFileName = `${hash}.mp4`;
const outputVideoPath = path.join(generatedFolderPath, outputVideoFileName);
try {
const hash = crypto.createHash('sha256').update(scene.baseImagePath).digest('hex');
const imageFileName = `${hash}.png`;
const imagePath = path.join(generatedFolderPath, imageFileName);
if (fs.existsSync(outputVideoPath)) {
console.log(`Video already exists for scene ${scene.scene}, skipping.`);
continue;
if (fs.existsSync(imagePath)) {
const outputVideoFileName = `${hash}.mp4`;
const outputVideoPath = path.join(generatedFolderPath, outputVideoFileName);
if (fs.existsSync(outputVideoPath)) {
console.log(`Video already exists for scene ${scene.scene}, skipping.`);
continue;
}
console.log(`Generating video for scene ${scene.scene}...`);
await generateVideo(
scene.videoPrompt,
imagePath,
outputVideoPath,
process.env.COMFY_BASE_URL!,
process.env.COMFY_OUTPUT_DIR!,
{ width: 1280, height: 720 }
);
console.log(`Video for scene ${scene.scene} saved to ${outputVideoPath}`);
} else {
console.warn(`Image not found for scene ${scene.scene}: ${imagePath}`);
}
console.log(`Generating video for scene ${scene.scene}...`);
await generateVideo(
scene.videoPrompt,
imagePath,
outputVideoPath,
process.env.COMFY_BASE_URL!,
process.env.COMFY_OUTPUT_DIR!,
{ width: 1280, height: 720 }
);
console.log(`Video for scene ${scene.scene} saved to ${outputVideoPath}`);
} else {
console.warn(`Image not found for scene ${scene.scene}: ${imagePath}`);
} catch (e) {
continue;
}
}
} catch (error) {
console.error('Error processing scenes:', error);

View File

@ -0,0 +1,68 @@
import * as fs from 'fs';
import * as path from 'path';
import * as crypto from 'crypto';
import { generateVideo } from '../../lib/video-generator';
import { callLMStudioAPIWithFile } from '../../lib/lmstudio';
import dotenv from 'dotenv';
dotenv.config();
const inputFolderPath = path.join(__dirname, '..', '..', '..', 'input/static');
const generatedFolderPath = path.join(__dirname, '..', '..', '..', 'generated');
async function processImages() {
try {
const imageFiles = fs.readdirSync(inputFolderPath);
for (const imageFile of imageFiles) {
const imagePath = path.join(inputFolderPath, imageFile);
try {
const hash = crypto.createHash('sha256').update(imageFile).digest('hex');
const outputVideoFileName = `${hash}.mp4`;
const outputVideoPath = path.join(generatedFolderPath, outputVideoFileName);
if (fs.existsSync(outputVideoPath)) {
console.log(`Video already exists for image ${imageFile}, skipping.`);
continue;
}
console.log(`Generating video prompt for image ${imageFile}...`);
const promptResult = await callLMStudioAPIWithFile(
imagePath,
`
Generate a short, dancing video prompt for an image located at ${imagePath}.
Return the result in this format: {"result":""}
Instruction:
- Find best dancing expression based on the photo
`);
const videoPrompt = promptResult.result;
console.log(`Video prompt ${videoPrompt}`);
if (!videoPrompt) {
console.error(`Could not generate video prompt for image ${imageFile}`);
continue;
}
console.log(`Generating video for image ${imageFile}...`);
await generateVideo(
videoPrompt,
imagePath,
outputVideoPath,
process.env.COMFY_BASE_URL!,
process.env.COMFY_OUTPUT_DIR!,
{ width: 1280, height: 720 }
);
console.log(`Video for image ${imageFile} saved to ${outputVideoPath}`);
} catch (e) {
console.error(`Error processing image ${imageFile}:`, e);
continue;
}
}
} catch (error) {
console.error('Error processing images:', error);
}
}
processImages();

View File

@ -13,8 +13,8 @@ const PINS_TO_COLLECT = 5;
// Hard-coded user prompt
const HARDCODED_USER_PROMPT = process.env.HARDCODED_USER_PROMPT || `
Generate 20 keywords for photos of group of people dancing together focus on street and urban style. All keywords shoudld contain \"group horizontal\" and what you create.
Example output : ["group horizontal hiphop dance","group horizontal modern dance","",... and 20 items in array]
Generate 20 keywords for perfume brand photo. List of 20 most famous perfume brands, and its popular perfume names:
Example output : ["chanel N5", "dior j'adore", "gucci bloom"....]
`;
async function getPinUrlsFromPinterest(keyword: string, scrollCount = SCROLL_SEARCH, limit = PINS_TO_COLLECT): Promise<string[]> {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,75 @@
import { convertImage } from '../lib/image-converter';
import * as fs from 'fs-extra';
import * as path from 'path';
import dotenv from 'dotenv';
dotenv.config();
const inputDir = path.join(__dirname, '../../input');
const outputDir = path.join(__dirname, '../../generated/clearned');
const comfyUrl = process.env.SERVER1_COMFY_BASE_URL;
const comfyOutputDir = process.env.SERVER1_COMFY_OUTPUT_DIR;
if (!comfyUrl || !comfyOutputDir) {
console.error("ComfyUI URL or Output Directory is not set in environment variables.");
process.exit(1);
}
const comfyInputDir = comfyOutputDir.replace("output", "input");
async function processImages() {
await fs.ensureDir(outputDir);
const files = await fs.readdir(inputDir);
let index = 1;
for (const file of files) {
const sourceFilePath = path.join(inputDir, file);
const stats = await fs.stat(sourceFilePath);
if (stats.isFile()) {
console.log(`Processing ${file}...`);
const comfyInputPath = path.join(comfyInputDir, file);
try {
// 1. Copy file to ComfyUI input directory
await fs.copy(sourceFilePath, comfyInputPath);
console.log(`Copied ${file} to ComfyUI input.`);
const prompt = "请从图1中提取主要主体把背景设置为浅灰色并让主体正面朝向制作成产品照片。";
// 2. Call convertImage with correct parameters
const generatedFilePath = await convertImage(prompt, file, comfyUrl!, comfyOutputDir!);
if (generatedFilePath && await fs.pathExists(generatedFilePath)) {
const outputFilename = `clearned_${index}.png`;
const finalOutputPath = path.join(outputDir, outputFilename);
// 3. Move the generated file to the final destination
await fs.move(generatedFilePath, finalOutputPath, { overwrite: true });
console.log(`Saved cleaned image to ${finalOutputPath}`);
index++;
// 4. Delete the original file from the script's input directory
await fs.unlink(sourceFilePath);
console.log(`Deleted original file: ${file}`);
}
// 5. Clean up the file from ComfyUI input directory
await fs.unlink(comfyInputPath);
console.log(`Cleaned up ${file} from ComfyUI input.`);
} catch (error) {
console.error(`Failed to process ${file}:`, error);
// If something fails, make sure to clean up the copied file if it exists
if (await fs.pathExists(comfyInputPath)) {
await fs.unlink(comfyInputPath);
}
}
}
}
}
processImages().catch(console.error);

View File

@ -0,0 +1,88 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import dotenv from 'dotenv';
import { readJsonToPng, embedJsonToPng } from '../lib/util';
import { convertImage } from '../lib/image-converter';
dotenv.config();
const inputDir = './generated/prompts';
const outputDir = './generated/image';
const COMFY_BASE_URL = process.env.SERVER1_COMFY_BASE_URL!;
const COMFY_OUTPUT_DIR = process.env.SERVER1_COMFY_OUTPUT_DIR!;
interface PngMetadata {
prompts: {
imagePrompt: string;
videoPrompt: string;
}[];
}
async function main() {
await fs.mkdir(outputDir, { recursive: true });
const files = await fs.readdir(inputDir);
let generatedImageIndex = 0;
for (const file of files) {
if (path.extname(file).toLowerCase() !== '.png') {
continue;
}
const inputFile = path.join(inputDir, file);
const metadata = await readJsonToPng(inputFile) as PngMetadata;
if (metadata && metadata.prompts && Array.isArray(metadata.prompts)) {
console.log(`Processing ${file} with ${metadata.prompts.length} prompt pairs.`);
const inputfolderFullpath = COMFY_OUTPUT_DIR.replace("output", "input");
await fs.copyFile(inputFile, path.join(inputfolderFullpath, file));
for (const promptPair of metadata.prompts) {
const { imagePrompt, videoPrompt } = promptPair;
const newFileName = `cleaned_prompt_generated_${generatedImageIndex}.png`;
generatedImageIndex++;
const outputPath = path.join(outputDir, newFileName);
try {
await fs.access(outputPath);
console.log(`File ${newFileName} already exists, skipping.`);
continue;
} catch (error) {
// File does not exist, proceed with generation
}
console.log(`Generating image for prompt: "${imagePrompt}"`);
try {
const generatedFilePath = await convertImage(
imagePrompt,
file, // Using the same image for both inputs as per interpretation
COMFY_BASE_URL,
COMFY_OUTPUT_DIR
);
// The convertImage function saves the file in a generic location.
// We need to move it to the correct location with the correct name.
await fs.rename(generatedFilePath, outputPath);
const newMetadata = {
imagePrompt: imagePrompt,
videoPrompt: videoPrompt
};
await embedJsonToPng(outputPath, newMetadata);
console.log(`Successfully generated and saved ${newFileName} with metadata.`);
} catch (error) {
console.error(`Error generating image for prompt "${imagePrompt}":`, error);
}
}
} else {
console.log(`Skipping ${file}, no valid prompts metadata found.`);
}
}
}
main().catch(console.error);

View File

@ -0,0 +1,169 @@
import * as fs from 'fs';
import * as path from 'path';
import { callLMStudioAPIWithFile, callLmstudio } from '../lib/lmstudio';
import { embedJsonToPng } from '../lib/util';
import { downloadImagesFromPinterestSearch } from '../lib/pinterest';
import { logger } from '../lib/logger';
import sharp from 'sharp';
const INPUT_DIR = path.join(process.cwd(), 'input');
const OUTPUT_DIR = path.join(process.cwd(), 'generated', 'prompts');
if (!fs.existsSync(OUTPUT_DIR)) {
fs.mkdirSync(OUTPUT_DIR, { recursive: true });
}
async function generatePromptsForImage(imagePath: string, index: number) {
const outputFilePath = path.join(OUTPUT_DIR, `cleaned_prompt_${index}.png`);
logger.info(`Processing image: ${path.basename(imagePath)} -> ${path.basename(outputFilePath)}`);
try {
// Step 1: Detect main object and generate colors from the input image
const colorGenerationPrompt = `
You are a creative assistant. Analyze the provided image.
Identify the main subject product ( not a product name).
Then, list exactly five colors related to this subject:
- Two colors that are common for this object.
- Two colors that are uncommon but plausible.
- One color that is completely crazy or surreal for this object.
Output strictly in this JSON format:
{
"result": {
"main_object": "the identified noun",
"colors": [
"color1",
"color2",
"color3",
"color4",
"color5"
]
}
}
`;
const colorResponse = await callLMStudioAPIWithFile(imagePath, colorGenerationPrompt);
const { main_object, colors } = colorResponse.result;
if (!main_object || !Array.isArray(colors) || colors.length !== 5) {
logger.error(`Failed to get a valid main object and color list for ${imagePath}.`);
return;
}
logger.info(`Main object: "${main_object}", Colors: ${colors.join(', ')}`);
const prompts: { imagePrompt: string, videoPrompt: string }[] = [];
const themes = ["special", "unique", "beautiful", "crazy", "funny"];
// Step 2: Iterate through each color
for (const color of colors) {
const randomTheme = themes[Math.floor(Math.random() * themes.length)];
const pinterestQuery = `${main_object} product photo ${color} background ${randomTheme}`;
logger.info(`Searching Pinterest for: "${pinterestQuery}"`);
// Step 3: Get an image from Pinterest
const downloadedImages = await downloadImagesFromPinterestSearch(pinterestQuery, 1);
if (downloadedImages.length === 0) {
logger.warn(`Could not find an image on Pinterest for query: "${pinterestQuery}"`);
continue;
}
const pinterestImagePath = downloadedImages[0];
logger.info(`Downloaded Pinterest image: ${pinterestImagePath}`);
// Step 4: Generate a detailed prompt from the Pinterest image
const imagePromptRequest = `
You are an expert in generating descriptive prompts for image generation models.
Analyze the provided image and describe it in a single, detailed paragraph.
Focus on style, mood, lighting, color palette, sub-objects, and composition.
Do not mention the main object itself. The prompt should be about the scene.
Output strictly in this JSON format:
{
"result": "your generated prompt here"
}
`;
const imagePromptResponse = await callLMStudioAPIWithFile(pinterestImagePath, imagePromptRequest);
const imagePrompt = imagePromptResponse.result;
if (imagePrompt) {
logger.info(`Generated image prompt for color ${color}: "${imagePrompt}"`);
// Step 5: Generate a matching video prompt
const videoPromptRequest = `
You are a creative director for a short, stylish video ad.
Based on the provided image and the following scene description, generate an attractive video prompt.
Main Subject: ${main_object}
Scene Description: ${imagePrompt}
The video prompt should:
- Be in English and approximately 50 words.
- Describe one clear action involving the main subject.
- Include one specific camera movement (e.g., slow zoom in, orbiting shot, push-in, pull-out).
- Be dynamic and visually appealing.
Output strictly in this JSON format:
{
"result": "your generated video prompt here"
}
`;
const videoPromptResponse = await callLMStudioAPIWithFile(pinterestImagePath, videoPromptRequest);
const videoPrompt = videoPromptResponse.result;
if (videoPrompt) {
logger.info(`Generated video prompt for color ${color}: "${videoPrompt}"`);
prompts.push({ imagePrompt, videoPrompt });
} else {
logger.warn(`Failed to generate a video prompt for ${pinterestImagePath}`);
}
} else {
logger.warn(`Failed to generate an image prompt for ${pinterestImagePath}`);
}
}
if (prompts.length === 0) {
logger.error(`No prompt pairs were generated for ${imagePath}. Aborting.`);
return;
}
// Step 6: Embed all prompts into the original image and save to the new location
const metadata = {
prompts: prompts
};
// Convert original image to a valid PNG at the output path before embedding
await sharp(imagePath)
.toFormat('png')
.toFile(outputFilePath);
await embedJsonToPng(outputFilePath, metadata);
logger.info(`Successfully generated prompts and saved metadata to ${outputFilePath}`);
} catch (error) {
logger.error(`An error occurred while processing ${imagePath}:`, error);
}
}
async function main() {
try {
const files = fs.readdirSync(INPUT_DIR);
const imageFiles = files.filter(file => /\.(png|jpg|jpeg)$/i.test(file));
if (imageFiles.length === 0) {
console.log('No images found in the input directory.');
return;
}
for (let i = 0; i < imageFiles.length; i++) {
const imageFile = imageFiles[i];
const imagePath = path.join(INPUT_DIR, imageFile);
await generatePromptsForImage(imagePath, i);
}
console.log('All images processed.');
} catch (error) {
console.error('An error occurred in the main process:', error);
}
}
main();

View File

@ -0,0 +1,74 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import dotenv from 'dotenv';
import { readJsonToPng } from '../lib/util';
import { generateVideo } from '../lib/video-generator';
dotenv.config();
const inputDir = './input';
const outputDir = './generated/video';
const COMFY_BASE_URL = process.env.SERVER1_COMFY_BASE_URL!;
const COMFY_OUTPUT_DIR = process.env.SERVER1_COMFY_OUTPUT_DIR!;
interface PngMetadata {
imagePrompt: string;
videoPrompt: string;
}
async function main() {
await fs.mkdir(outputDir, { recursive: true });
const files = await fs.readdir(inputDir);
const pngFiles = files.filter(file => path.extname(file).toLowerCase() === '.png');
for (let i = 0; i < pngFiles.length; i++) {
const file = pngFiles[i];
const inputFile = path.join(inputDir, file);
const metadata = await readJsonToPng(inputFile) as PngMetadata;
if (metadata && metadata.videoPrompt) {
console.log(`Processing ${file} for video generation.`);
const originalFileName = path.parse(file).name;
const nameParts = originalFileName.split('_');
const promptIndex = nameParts[nameParts.length - 1];
const newFileName = `product_${i}_${promptIndex}.mp4`;
const outputPath = path.join(outputDir, newFileName);
try {
await fs.access(outputPath);
console.log(`File ${newFileName} already exists, skipping.`);
continue;
} catch (error) {
// File does not exist, proceed with generation
}
console.log(`Generating video for prompt: "${metadata.videoPrompt}"`);
const inputfolderFullpath = COMFY_OUTPUT_DIR.replace("output", "input");
await fs.copyFile(inputFile, path.join(inputfolderFullpath, file));
try {
await generateVideo(
metadata.videoPrompt,
file,
newFileName,
COMFY_BASE_URL,
COMFY_OUTPUT_DIR
);
console.log(`Successfully generated and saved ${newFileName}`);
} catch (error) {
console.error(`Error generating video for ${file}:`, error);
}
} else {
console.log(`Skipping ${file}, no valid videoPrompt metadata found.`);
}
}
}
main().catch(console.error);

View File

@ -0,0 +1,64 @@
import { callLmstudio } from '../lib/lmstudio';
import { logger } from '../lib/logger';
import dotenv from 'dotenv';
import { downloadImagesFromPinterestSearch } from '../lib/pinterest';
dotenv.config();
const PHOTOS_PER_KEYWORD = 10;
// Hard-coded user prompt
const HARDCODED_USER_PROMPT = process.env.HARDCODED_USER_PROMPT || `
Generate 20 keywords for various photogeneric product. List of 20 most common photo generic product :
Example output : ["food", "perfume", "accesory", "jewelry", "shoes"...]
`;
// Re-usable helper to extract JSON embedded in text
function extractJsonFromText(text: string): any | null {
if (!text || typeof text !== 'string') return null;
const fenced = text.match(/```(?:json)?\s*([\s\S]*?)\s*```/i);
if (fenced && fenced[1]) {
try { return JSON.parse(fenced[1].trim()); } catch (e) { /* fall through */ }
}
const brace = text.match(/\{[\s\S]*\}|\[[\s\S]*\]/);
if (brace && brace[0]) {
try { return JSON.parse(brace[0]); } catch (e) { return null; }
}
// Attempt line-separated keywords fallback
const lines = text.split(/\r?\n/).map((l: string) => l.trim()).filter(Boolean);
if (lines.length > 1) return lines;
return null;
}
(async () => {
logger.info(`Starting photo download process with prompt: "${HARDCODED_USER_PROMPT}"`);
// 1. Extract keywords from the hardcoded prompt
const keywords = ["fullbody portrait girl", "fullbody portrait 18y girl", "fullbody portrait cute girl", "fullbody portrait blond girl", "fullbody portrait 20y girl"];
if (!keywords || keywords.length === 0) {
logger.error("Could not extract keywords from prompt. Exiting.");
return;
}
logger.info(`Extracted keywords: ${keywords.join(', ')}`);
// 2. Search Pinterest for each keyword and download photos directly
let totalDownloads = 0;
for (const keyword of keywords) {
try {
logger.info(`Downloading photos for keyword: "${keyword}"`);
const downloadedPaths = await downloadImagesFromPinterestSearch(`${keyword}`, PHOTOS_PER_KEYWORD);
if (downloadedPaths.length > 0) {
logger.info(`Successfully downloaded ${downloadedPaths.length} images for "${keyword}"`);
totalDownloads += downloadedPaths.length;
} else {
logger.warn(`No images were downloaded for "${keyword}"`);
}
} catch (error) {
logger.error(`An error occurred while processing keyword ${keyword}:`, error);
}
}
logger.info(`Photo download process finished. Total images downloaded: ${totalDownloads}`);
})();

View File

@ -0,0 +1,82 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import dotenv from 'dotenv';
import { callLMStudioAPIWithFile, callLmstudio } from '../lib/lmstudio';
import { convertImage } from '../lib/image-converter';
import { logger } from '../lib/logger';
dotenv.config();
const COMFY_BASE_URL = process.env.SERVER2_COMFY_BASE_URL;
const COMFY_OUTPUT_DIR = process.env.SERVER2_COMFY_OUTPUT_DIR;
const INPUT_DIR = './input';
const GENERATED_DIR = './generated';
async function batchConvert() {
if (!COMFY_BASE_URL || !COMFY_OUTPUT_DIR) {
throw new Error('ComfyUI server details are not defined in the .env file');
}
try {
await fs.mkdir(GENERATED_DIR, { recursive: true });
const files = await fs.readdir(INPUT_DIR);
for (const file of files) {
try {
const inputFile = path.join(INPUT_DIR, file);
logger.info(`Processing ${inputFile}`);
const firstPrompt = `
Read the file in the photo and detect the main subject. Extract the following information:
- what is the main subject in one word
- describe character in 5 words
- describe background in 5 words
- 3 ideas to make this character's look, 2 or 3 words per idea
Return the result in this format:
{ "character":"", "characterDescription": "", "background":"", "idea":"idea1, idea2, idea3"}
`;
const imageInfo = await callLMStudioAPIWithFile(inputFile, firstPrompt);
const { character, idea, background } = imageInfo;
const ideas = idea.split(',').map((i: string) => i.trim());
const secondPrompt = `
Generate a prompt to convert the photo to a group dancing photo.
I need only the prompt in this format, main subject is ${character}
{"result":"
Change the camera angle to a full-body shot, place many ${character} in the scene messily,
Change lighting to silhouette lighting,
Change the style for each body using these ideas: ${ideas.join(', ')},
Change color of each body,
Change the background in horror movie style of ${background},
Overall photo should be realistic and spooky,
"} `;
const promptResult = await callLmstudio(secondPrompt);
const finalPrompt = promptResult.result;
const inputFolderFullPath = COMFY_OUTPUT_DIR.replace("output", "input");
const serverInputFile = path.join(inputFolderFullPath, file);
await fs.copyFile(inputFile, serverInputFile);
logger.info(`Generating image for ${file} with prompt: ${finalPrompt}`);
const generatedFile = await convertImage(finalPrompt, file, COMFY_BASE_URL, COMFY_OUTPUT_DIR);
const finalOutputPath = path.join(GENERATED_DIR, path.basename(generatedFile));
await fs.copyFile(generatedFile, finalOutputPath);
logger.info(`Saved converted image to ${finalOutputPath}`);
} catch (e) {
logger.error('An error occurred during batch conversion:', e);
continue;
}
}
} catch (error) {
logger.error('An error occurred during batch conversion:', error);
}
}
batchConvert();

View File

@ -0,0 +1,54 @@
import * as fs from 'fs';
import * as path from 'path';
import { callLMStudioAPIWithFile } from '../lib/lmstudio';
import { embedJsonToPng } from '../lib/util';
const imageDir = 'C:\\Users\\fm201\\Desktop\\vton\\bags';
async function processImages() {
try {
const files = fs.readdirSync(imageDir);
const imageFiles = files.filter(file => /\.(png)$/i.test(file));
for (const imageFile of imageFiles) {
const imagePath = path.join(imageDir, imageFile);
console.log(`Processing ${imagePath}...`);
const prompt = `
Based on the handbag in the image, generate 10 outfit prompts that would complement it.
Each prompt should be a short, descriptive sentence of around 20 words.
Return the result in the following JSON format:
{"result": ["outfit prompt 1", "outfit prompt 2", ...]}
`;
try {
const response = await callLMStudioAPIWithFile(imagePath, prompt);
let outfitPrompts;
if (typeof response === 'string') {
try {
outfitPrompts = JSON.parse(response);
} catch (e) {
console.error(`Failed to parse JSON string for ${imageFile}:`, response);
continue;
}
} else {
outfitPrompts = response;
}
if (outfitPrompts && outfitPrompts.result) {
await embedJsonToPng(imagePath, outfitPrompts);
console.log(`Successfully embedded prompts into ${imageFile}`);
} else {
console.error(`Invalid JSON response for ${imageFile}:`, response);
}
} catch (error) {
console.error(`Failed to process ${imageFile}:`, error);
}
}
} catch (error) {
console.error('Error reading image directory:', error);
}
}
processImages();

View File

@ -0,0 +1,74 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import { generateVideo } from '../lib/video-generator';
import dotenv from 'dotenv';
dotenv.config();
const inputFolder = './input';
const prompt = "a girl making a cute pose, static camera";
b
async function main() {
try {
const files = await fs.readdir(inputFolder);
const pngFiles = files.filter(file => path.extname(file).toLowerCase() === '.png');
if (pngFiles.length === 0) {
console.log('No PNG files found in the input folder.');
return;
}
const comfyBaseUrl = process.env.SERVER2_COMFY_BASE_URL;
const comfyOutputDir = process.env.SERVER2_COMFY_OUTPUT_DIR;
if (!comfyBaseUrl || !comfyOutputDir) {
console.error('Please define SERVER1_COMFY_BASE_URL and SERVER1_COMFY_OUTPUT_DIR in your .env file.');
return;
}
const comfyInputDir = comfyOutputDir.replace('output', 'input');
for (const file of pngFiles) {
const inputImagePath = path.join(inputFolder, file);
const comfyInputImagePath = path.join(comfyInputDir, file);
console.log(`Processing ${file}...`);
try {
await fs.access(inputImagePath);
// Copy file to comfy input directory
await fs.copyFile(inputImagePath, comfyInputImagePath);
console.log(`Copied ${file} to ComfyUI input directory.`);
} catch (e) {
console.error(`Error copying file ${file}:`, e);
continue;
}
const newFileName = `${path.parse(file).name}.mp4`;
try {
const generatedVideoPath = await generateVideo(
prompt,
file, // Pass only the filename as per instructions
newFileName,
comfyBaseUrl,
comfyOutputDir
);
console.log(`Successfully generated video for ${file} at: ${generatedVideoPath}`);
} catch (e: any) {
if (e.code === 'ECONNREFUSED' || e.code === 'ETIMEDOUT') {
console.error(`\nError: Connection to ComfyUI server at ${comfyBaseUrl} failed.`);
console.error('Please ensure the ComfyUI server is running and accessible.');
break;
} else {
console.error(`An error occurred while generating video for ${file}:`, e);
}
}
}
} catch (error) {
console.error('An unexpected error occurred:', error);
}
}
main();

View File

@ -3,20 +3,57 @@ import * as fs from 'fs/promises';
import * as path from 'path';
import { logger } from '../lib/logger';
import { getPinUrlFromPinterest, downloadImageFromPin } from '../lib/pinterest';
import { convertImage } from '../lib/image-converter';
import { generateImage } from '../lib/image-generator-face';
import { callLMStudioAPIWithFile } from '../lib/lmstudio';
dotenv.config();
const MODE: "keywords" | "pinIds" = "keywords";
const KEYWORDS = [
'fullbody pose cute ',
'fullbody pose model ',
'fullbody pose woman ',
'fullbody pose idol ',
'fullbody pose kawaii',
'fullbody pose japanese',
'fullbody pose kawaii sit',
'fullbody pose model sit',
'fullbody pose cute sit',];
'a girl in scary forest',
'a girl in graveyard',
''];
const pinIDs = [
"22377329393970367",
"18999629674210230",
"3166662232983784",
"291537775902572408",
"2744449769232655",
"9429480465939847",
"34058540926328062",
"1071153092617107265",
"6825836928646465",
"1407443629997072",
"333407178685095962",
"15833036184288417",
"6825836928284784",
"2181499815469509",
"199706564723106062",
"1759287348280571",
"56083957854040032",
"3025924743999802",
"2955556001576084",
"1407443627212889",
"836965911982723974",
"97460779431981493",
"282600945363725869",
"/59532026387104913",
"70437490453979",
"152489137384620437",
"50947039528553588",
"73042825197955754",
"624593042089280419",
"351912466315529",
"624030092104188250",
"21673641951379251",
"27021666506512503",
"3377768467678091",
"985231163409578",
"17240411068654164"
]
const TARGET_COUNT = Number(process.env.IMAGE_COUNT || 20);
const PROMPT =
`
@ -24,6 +61,12 @@ const PROMPT =
change background to light gray with faing gradient,
change clothes to shite sports bra and shite cotton short pants
`;
const LmStudioPrompt = `
describe the image in 50 words including, scene, lighting, describe character(s), add some beautiful accent like light, fog, starts, lamps, whatever suits with scene.
then return in this format
{"prompt":""}
`;
type ServerCfg = { baseUrl: string; outputDir: string; inputDir: string };
@ -60,37 +103,65 @@ async function collectImages(keyword: string, total: number): Promise<string[]>
// ensure local download dir exists (pinterest.ts also ensures it, but harmless here)
await ensureDir(path.join(process.cwd(), 'download'));
while (results.length < total) {
try {
const pinUrl = await getPinUrlFromPinterest(keyword);
if (!pinUrl) {
logger.warn('No pin URL found, retrying...');
await sleep(1500);
return [];
if (MODE == "keywords") {
while (results.length < total) {
try {
const pinUrl = await getPinUrlFromPinterest(keyword);
if (!pinUrl) {
logger.warn('No pin URL found, retrying...');
await sleep(1500);
return [];
}
const remaining = total - results.length;
// attempt to grab up to 5 per pin to reduce churn
const batchTarget = Math.min(5, remaining);
const imgs = await downloadImageFromPin(pinUrl, batchTarget);
if (imgs && imgs.length > 0) {
results.push(...imgs);
logger.info(`Downloaded ${imgs.length} image(s) from ${pinUrl}.Progress: ${results.length}/${total}`);
} else {
logger.warn(`Pin yielded no downloadable images: ${pinUrl}`);
}
await sleep(1000 + Math.random() * 1000);
} catch (err) {
logger.error('Error while collecting images:', err);
await sleep(2000);
}
const remaining = total - results.length;
// attempt to grab up to 5 per pin to reduce churn
const batchTarget = Math.min(5, remaining);
const imgs = await downloadImageFromPin(pinUrl, batchTarget);
if (imgs && imgs.length > 0) {
results.push(...imgs);
logger.info(`Downloaded ${imgs.length} image(s) from ${pinUrl}.Progress: ${results.length}/${total}`);
} else {
logger.warn(`Pin yielded no downloadable images: ${pinUrl}`);
}
} else if (MODE == "pinIds") {
while (results.length < total) {
const shuffledPinIds = pinIDs.slice().sort(() => 0.5 - Math.random());
const pinUrl = `https://www.pinterest.com/pin/${shuffledPinIds[0]}`;
try {
const remaining = total - results.length;
// attempt to grab up to 5 per pin to reduce churn
const batchTarget = Math.min(5, remaining);
const imgs = await downloadImageFromPin(pinUrl, batchTarget);
if (imgs && imgs.length > 0) {
results.push(...imgs);
logger.info(`Downloaded ${imgs.length} image(s) from ${pinUrl}.Progress: ${results.length}/${total}`);
} else {
logger.warn(`Pin yielded no downloadable images: ${pinUrl}`);
}
await sleep(1000 + Math.random() * 1000);
} catch (err) {
logger.error('Error while collecting images:', err);
await sleep(2000);
}
await sleep(1000 + Math.random() * 1000);
} catch (err) {
logger.error('Error while collecting images:', err);
await sleep(2000);
}
}
return results.slice(0, total);
}
let imageIndex = 0;
async function processImages(imagePaths: string[], server: ServerCfg) {
await ensureDir(server.inputDir);
for (const localImagePath of imagePaths) {
const response = await callLMStudioAPIWithFile(localImagePath, LmStudioPrompt);
const prompt = response.prompt;
const baseName = path.basename(localImagePath);
const serverInputPath = path.join(server.inputDir, baseName);
@ -100,14 +171,16 @@ async function processImages(imagePaths: string[], server: ServerCfg) {
logger.info(`Copied ${localImagePath} -> ${serverInputPath}`);
// Run conversion (sequential to avoid output race conditions)
const generatedPath = await convertImage(
PROMPT,
const generatedPath = await generateImage(
`ultra realistic photo, ${prompt}`,
baseName,
`monster_${imageIndex}.png`,
server.baseUrl,
server.outputDir,
{ width: 720, height: 1280 } // portrait
{ width: 1280, height: 720 } // portrait
);
logger.info(`Generated image: ${generatedPath}`);
imageIndex++;
} catch (err) {
logger.error(`Failed to convert ${localImagePath}:`, err);
} finally {
@ -125,6 +198,16 @@ async function processImages(imagePaths: string[], server: ServerCfg) {
async function main() {
const server = getServerConfig();
const files = await fs.readdir(path.join(process.cwd(), 'generated'));
const monsterFiles = files.filter((f) => f.startsWith('monster_'));
if (monsterFiles.length > 0) {
const latestFile = monsterFiles.sort().pop();
if (latestFile) {
const latestIndex = parseInt(latestFile.replace('monster_', '').replace('.png', ''));
imageIndex = latestIndex + 1;
}
}
// Infinite loop as requested
while (true) {

View File

@ -1,49 +1,154 @@
import * as fs from 'fs';
import * as path from 'path';
import { convertImageVton } from '../lib/image-converter';
import { convertImageVton, convertImage } from '../lib/image-converter';
import * as dotenv from 'dotenv';
import sharp from 'sharp';
dotenv.config();
const clothesDir = 'C:\\Users\\ken\\Desktop\\VTON\\clothes';
const modelPath = 'C:\\Users\\ken\\Desktop\\VTON\\models\\Jessica_body.png';
const posesDir = 'C:\\Users\\ken\\Desktop\\VTON\\poses';
const clothesDir = 'D:\\projects\\random_video_maker\\input';
const outputDir = 'generated';
const comfyBaseUrl = process.env.SERVER1_COMFY_BASE_URL;
const comfyOutputDir = process.env.SERVER1_COMFY_OUTPUT_DIR;
function getNextIndex(directory: string): number {
if (!fs.existsSync(directory)) {
fs.mkdirSync(directory, { recursive: true });
return 0;
}
const dirs = fs.readdirSync(directory, { withFileTypes: true })
.filter(dirent => dirent.isDirectory())
.map(dirent => dirent.name);
const vtonDirs = dirs.filter(dir => dir.startsWith('vton_'));
if (vtonDirs.length === 0) {
return 0;
}
const indices = vtonDirs.map(dir => {
const match = dir.match(/vton_(\d+)/);
return match ? parseInt(match[1], 10) : -1;
});
return Math.max(...indices) + 1;
}
function getRandomFile(directory: string): string {
const files = fs.readdirSync(directory).filter(file => /\.(jpg|png|jpeg)$/i.test(file));
if (files.length === 0) {
throw new Error(`No image files found in directory: ${directory}`);
}
const randomFile = files[Math.floor(Math.random() * files.length)];
return path.join(directory, randomFile);
}
async function generateVtonImages() {
if (!comfyBaseUrl || !comfyOutputDir) {
throw new Error("ComfyUI URL or Output Directory is not set in environment variables.");
}
const clothesFiles = fs.readdirSync(clothesDir).filter(file => /\.(jpg|png|jpeg)$/i.test(file));
const poseFiles = fs.readdirSync(posesDir).filter(file => /\.(jpg|png|jpeg)$/i.test(file));
let index = getNextIndex(outputDir);
if (!fs.existsSync(outputDir)) {
fs.mkdirSync(outputDir);
}
const comfyInputDir = comfyOutputDir.replace("output", "input");
for (let i = 0; i < clothesFiles.length; i++) {
const clothFile = clothesFiles[i];
const clothPath = path.join(clothesDir, clothFile);
while (true) { // Infinite loop
const iterationDir = path.join(outputDir, `vton_${index}`);
fs.mkdirSync(iterationDir, { recursive: true });
const randomPoseFile = poseFiles[Math.floor(Math.random() * poseFiles.length)];
const posePath = path.join(posesDir, randomPoseFile);
try {
const personOrigPath = getRandomFile(clothesDir);
const clothOrigPath = getRandomFile(clothesDir);
console.log(`Processing cloth: ${clothFile} with pose: ${randomPoseFile}`);
fs.copyFileSync(personOrigPath, path.join(iterationDir, '1-personOrig.png'));
fs.copyFileSync(clothOrigPath, path.join(iterationDir, '3-clothOrig.png'));
const files = [modelPath, clothPath, posePath];
const prompt = "change clothes of image1 with image2";
const outputFilename = `model_${i}.png`;
const personOrigFileName = path.basename(personOrigPath);
const clothOrigFileName = path.basename(clothOrigPath);
const generatedImagePath = await convertImageVton(files, outputFilename, comfyBaseUrl, comfyOutputDir, { width: 720, height: 1280 });
fs.copyFileSync(personOrigPath, path.join(comfyInputDir, personOrigFileName));
fs.copyFileSync(clothOrigPath, path.join(comfyInputDir, clothOrigFileName));
if (generatedImagePath) {
console.log(`Generated image saved to ${generatedImagePath}`);
} else {
console.error(`Failed to generate image for ${clothFile}`);
console.log(`Processing person: ${personOrigPath}, cloth: ${clothOrigPath}`);
const cleanePersonImagePath = await convertImage("请把姿势改成站立的,转换成全身照片。去掉衣服,只保留白色运动文胸和白色短裤。双脚保持赤脚。背景为浅灰色。", personOrigFileName, comfyBaseUrl, comfyOutputDir, { width: 720, height: 1280 });
fs.copyFileSync(cleanePersonImagePath, path.join(iterationDir, '2-personCleaned.png'));
const cleanedPersonFileName = path.basename(cleanePersonImagePath);
fs.copyFileSync(cleanePersonImagePath, path.join(comfyInputDir, cleanedPersonFileName));
const cleanedClothImagePath = await convertImage("请将图1中的上衣、下装和配饰分别提取出来放到同一个浅灰色的背景上。", clothOrigFileName, comfyBaseUrl, comfyOutputDir, { width: 720, height: 1280 });
fs.copyFileSync(cleanedClothImagePath, path.join(iterationDir, '4-clothCleaned.png'));
const cleanedClothFileName = path.basename(cleanedClothImagePath);
fs.copyFileSync(cleanedClothImagePath, path.join(comfyInputDir, cleanedClothFileName));
const outputFilename = `vton_final_${index}.png`;
const generatedImagePath = await convertImageVton(cleanedPersonFileName, cleanedClothFileName, outputFilename, comfyBaseUrl, comfyOutputDir, { width: 720, height: 1280 });
if (generatedImagePath) {
fs.copyFileSync(generatedImagePath, path.join(iterationDir, '5-finalResult.png'));
console.log(`Generated image saved to ${generatedImagePath}`);
// --- Create composite image ---
const imagePaths = [
path.join(iterationDir, '1-personOrig.png'),
path.join(iterationDir, '3-clothOrig.png'),
path.join(iterationDir, '2-personCleaned.png'),
path.join(iterationDir, '4-clothCleaned.png'),
path.join(iterationDir, '5-finalResult.png')
];
const resizedImages = [];
let totalWidth = 10; // Initial left margin
const resizedHeight = 720;
for (const imagePath of imagePaths) {
const image = sharp(imagePath);
const metadata = await image.metadata();
if (!metadata.width || !metadata.height) {
throw new Error(`Could not get metadata for image ${imagePath}`);
}
const resizedWidth = Math.round((metadata.width / metadata.height) * resizedHeight);
const resizedImageBuffer = await image.resize(resizedWidth, resizedHeight).toBuffer();
resizedImages.push({
buffer: resizedImageBuffer,
width: resizedWidth
});
totalWidth += resizedWidth + 10; // Add image width and right margin
}
const compositeOps = [];
let currentLeft = 10; // Start with left margin
for (const img of resizedImages) {
compositeOps.push({
input: img.buffer,
top: 10, // 10px top margin
left: currentLeft
});
currentLeft += img.width + 10; // Move to the next position
}
await sharp({
create: {
width: totalWidth,
height: 740,
channels: 4,
background: { r: 255, g: 255, b: 255, alpha: 1 }
}
})
.composite(compositeOps)
.toFile(path.join(iterationDir, 'process.png'));
console.log(`Generated composite image process.png in ${iterationDir}`);
// --- End of composite image creation ---
index++;
} else {
console.error(`Failed to generate image for index ${index}`);
}
} catch (error) {
console.error("An error occurred during image generation:", error);
// Optional: wait for a bit before retrying to avoid spamming errors
await new Promise(resolve => setTimeout(resolve, 5000));
}
}
}

View File

@ -0,0 +1,538 @@
import * as fs from 'fs/promises';
import * as path from 'path';
import dotenv from 'dotenv';
import { downloadImagesFromPinterestSearch } from '../lib/pinterest';
import { convertImage, convertImageWithFile, convertImageWithFileForPose, convertImageWithFileHandbag } from '../lib/image-converter';
import { logger } from '../lib/logger';
import { callLmstudio, callLMStudioAPIWithFile } from '../lib/lmstudio';
import { upscale } from '../lib/image-upscaler';
dotenv.config();
const SERVER1_COMFY_BASE_URL = process.env.SERVER1_COMFY_BASE_URL!;
const SERVER1_COMFY_OUTPUT_DIR = process.env.SERVER1_COMFY_OUTPUT_DIR!;
const imageSize: { width: number; height: number } = { width: 1280, height: 720 };
async function upscaleAndFix(
baseImage: string,
faceImage: string,
outputFilename: string,
outputDir: string,
): Promise<void> {
try {
// Copy both images to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const baseFilePath = path.join(outputDir, baseImage);
const referenceFilePath = path.join(outputDir, faceImage);
const baseFileName = path.basename(baseImage);
const referenceFileName = path.basename(faceImage);
const inputBasePath = path.join(inputFolderFullPath, baseFileName);
const inputReferencePath = path.join(inputFolderFullPath, referenceFileName);
logger.info(`Copying base image to ComfyUI input: ${inputBasePath}`);
await fs.copyFile(baseFilePath, inputBasePath);
logger.info(`Copying reference image to ComfyUI input: ${inputReferencePath}`);
await fs.copyFile(referenceFilePath, inputReferencePath);
// Convert images with prompt
logger.info(`Processing images with convertImageWithFile...`);
const convertedImagePath = await upscale(
baseFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
)
logger.info(`Converted image: ${convertedImagePath}`);
// Copy the converted image to final destination
const finalOutputPath = path.join(outputDir, outputFilename);
logger.info(`Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
logger.info(`✓ Successfully generated: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error processing two images:`, error);
throw error;
}
}
/**
* Process a single image: download from Pinterest, convert with prompt, and save
* @param keyword - Pinterest search keyword
* @param prompt - Image conversion prompt
* @param filename - Output filename
* @param outputDir - Directory to save the generated file
* @param shouldConvert - Whether to convert the image with prompt or just copy it
*/
async function processImage(
keyword: string,
prompt: string,
filename: string,
outputDir: string,
shouldConvert: boolean = true
): Promise<void> {
try {
logger.info(`\n=== Processing: ${filename} ===`);
logger.info(`Keyword: ${keyword}`);
logger.info(`Should convert: ${shouldConvert}`);
// Step 1: Download image from Pinterest
logger.info(`Step 1: Downloading image from Pinterest with keyword: "${keyword}"...`);
const downloadedImages = await downloadImagesFromPinterestSearch(keyword, 1);
if (downloadedImages.length === 0) {
logger.error(`Failed to download image for keyword: "${keyword}"`);
return;
}
const downloadedImagePath = downloadedImages[0];
logger.info(`Downloaded image: ${downloadedImagePath}`);
const finalOutputPath = path.join(outputDir, filename);
if (shouldConvert) {
logger.info(`Prompt: ${prompt}`);
// Step 2: Copy image to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const imageFileName = path.basename(downloadedImagePath);
const inputImagePath = path.join(inputFolderFullPath, imageFileName);
logger.info(`Step 2: Copying image to ComfyUI input folder: ${inputImagePath}`);
await fs.copyFile(downloadedImagePath, inputImagePath);
// Step 3: Convert image with prompt
logger.info(`Step 3: Converting image with prompt...`);
const convertedImagePath = await convertImage(
prompt,
imageFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
);
logger.info(`Converted image: ${convertedImagePath}`);
// Step 4: Copy the converted image to final destination
logger.info(`Step 4: Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
} else {
// Just copy the downloaded image directly to the output directory with the specified filename
logger.info(`Step 2: Copying directly to final destination: ${finalOutputPath}`);
await fs.copyFile(downloadedImagePath, finalOutputPath);
}
logger.info(`✓ Successfully generated: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error processing image for keyword "${keyword}":`, error);
throw error;
}
}
/**
* Convert an existing image with a prompt
* @param prompt - Image conversion prompt
* @param imagePath - Path to the existing image
* @param outputFilename - Output filename
* @param outputDir - Directory to save the converted file
*/
async function convertImageWithPrompt(
prompt: string,
imagePath: string,
outputFilename: string,
outputDir: string
): Promise<void> {
try {
logger.info(`\n=== Converting Image: ${outputFilename} ===`);
logger.info(`Source: ${imagePath}`);
logger.info(`Prompt: ${prompt}`);
// Step 1: Copy image to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const imageFileName = path.basename(imagePath);
const inputImagePath = path.join(inputFolderFullPath, imageFileName);
logger.info(`Step 1: Copying image to ComfyUI input folder: ${inputImagePath}`);
await fs.copyFile(imagePath, inputImagePath);
// Step 2: Convert image with prompt
logger.info(`Step 2: Converting image with prompt...`);
const convertedImagePath = await convertImage(
prompt,
imageFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
);
logger.info(`Converted image: ${convertedImagePath}`);
// Step 3: Copy the converted image to final destination
const finalOutputPath = path.join(outputDir, outputFilename);
logger.info(`Step 3: Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
logger.info(`✓ Successfully converted: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error converting image:`, error);
throw error;
}
}
/**
* Process two images together: combine base image with reference image using prompt
* @param prompt - Processing prompt
* @param baseFile - Base image filename (in generated folder)
* @param referenceFile - Reference image filename (in generated folder)
* @param outputFilename - Output filename
* @param outputDir - Directory to save the generated file
*/
async function processTwoImages(
prompt: string,
baseFile: string,
referenceFile: string,
outputFilename: string,
outputDir: string,
isPose: boolean = false
): Promise<void> {
try {
logger.info(`\n=== Processing: ${outputFilename} ===`);
logger.info(`Base: ${baseFile}, Reference: ${referenceFile}`);
logger.info(`Prompt: ${prompt}`);
// Copy both images to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const baseFilePath = path.join(outputDir, baseFile);
const referenceFilePath = path.join(outputDir, referenceFile);
const baseFileName = path.basename(baseFile);
const referenceFileName = path.basename(referenceFile);
const inputBasePath = path.join(inputFolderFullPath, baseFileName);
const inputReferencePath = path.join(inputFolderFullPath, referenceFileName);
logger.info(`Copying base image to ComfyUI input: ${inputBasePath}`);
await fs.copyFile(baseFilePath, inputBasePath);
logger.info(`Copying reference image to ComfyUI input: ${inputReferencePath}`);
await fs.copyFile(referenceFilePath, inputReferencePath);
// Convert images with prompt
logger.info(`Processing images with convertImageWithFile...`);
const convertedImagePath = isPose ? await convertImageWithFileForPose(
prompt,
baseFileName,
referenceFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
) : await convertImageWithFile(
prompt,
baseFileName,
referenceFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
)
logger.info(`Converted image: ${convertedImagePath}`);
// Copy the converted image to final destination
const finalOutputPath = path.join(outputDir, outputFilename);
logger.info(`Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
logger.info(`✓ Successfully generated: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error processing two images:`, error);
throw error;
}
}
/**
* Process two images together: combine base image with reference image using prompt
* @param prompt - Processing prompt
* @param baseFile - Base image filename (in generated folder)
* @param referenceFile - Reference image filename (in generated folder)
* @param outputFilename - Output filename
* @param outputDir - Directory to save the generated file
*/
async function processTwoImagesHandbag(
prompt: string,
baseFile: string,
referenceFile: string,
outputFilename: string,
outputDir: string,
): Promise<void> {
try {
logger.info(`\n=== Processing: ${outputFilename} ===`);
logger.info(`Base: ${baseFile}, Reference: ${referenceFile}`);
logger.info(`Prompt: ${prompt}`);
// Copy both images to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const baseFilePath = path.join(outputDir, baseFile);
const referenceFilePath = path.join(outputDir, referenceFile);
const baseFileName = path.basename(baseFile);
const referenceFileName = path.basename(referenceFile);
const inputBasePath = path.join(inputFolderFullPath, baseFileName);
const inputReferencePath = path.join(inputFolderFullPath, referenceFileName);
logger.info(`Copying base image to ComfyUI input: ${inputBasePath}`);
await fs.copyFile(baseFilePath, inputBasePath);
logger.info(`Copying reference image to ComfyUI input: ${inputReferencePath}`);
await fs.copyFile(referenceFilePath, inputReferencePath);
// Convert images with prompt
logger.info(`Processing images with convertImageWithFile...`);
const convertedImagePath = await convertImageWithFileHandbag(
prompt,
baseFileName,
referenceFileName,
SERVER1_COMFY_BASE_URL,
SERVER1_COMFY_OUTPUT_DIR,
imageSize
)
logger.info(`Converted image: ${convertedImagePath}`);
// Copy the converted image to final destination
const finalOutputPath = path.join(outputDir, outputFilename);
logger.info(`Copying to final destination: ${finalOutputPath}`);
await fs.copyFile(convertedImagePath, finalOutputPath);
logger.info(`✓ Successfully generated: ${finalOutputPath}`);
} catch (error) {
logger.error(`Error processing two images:`, error);
throw error;
}
}
/**
* Process a complete iteration: download base images and apply sequential transformations
*/
async function processIteration(iteration: number): Promise<void> {
try {
const timestamp = Date.now();
logger.info(`\n${'='.repeat(80)}`);
logger.info(`ITERATION ${iteration} - Starting with timestamp: ${timestamp}`);
logger.info(`${'='.repeat(80)}`);
// Create output directory for this iteration
const outputDir = path.join(process.cwd(), 'generated', `vton_${timestamp}`);
await fs.mkdir(outputDir, { recursive: true });
logger.info(`Output directory created: ${outputDir}`);
// === PHASE 1: Download base images ===
logger.info(`\n--- PHASE 1: Downloading base images ---`);
await processImage(
'cute girl face high resolution',
'',
`model_${timestamp}.png`,
outputDir,
false
);
await processImage(
'woman elegant outfit fullbody single',
'',
`outfit_${timestamp}.png`,
outputDir,
false
);
await processImage(
'photo elegant indoor room',
'',
`room_${timestamp}.png`,
outputDir,
false
);
await processImage(
'handbag single product photography',
'请提取照片中的包,并将其正面朝向地放置在亮灰色背景上。',
`handbag_${timestamp}.png`,
outputDir,
true
);
await processImage(
'woman portrait standing',
'',
`pose_${timestamp}.png`,
outputDir,
false
);
// === PHASE 2: Sequential transformations ===
logger.info(`\n--- PHASE 2: Sequential transformations ---`);
// Step 1: Generate outfit prompt using LMStudio API
logger.info('Step 1: Generating outfit prompt with LMStudio API...');
const outfitImagePath = path.join(outputDir, `outfit_${timestamp}.png`);
const outfitPromptResponse = await callLMStudioAPIWithFile(
outfitImagePath,
'Describe this outfit in detail about 30 words. Focus on color and cloth type. Return the result in this format: {"result":""}'
);
const outfitPrompt = outfitPromptResponse.result || outfitPromptResponse;
logger.info(`Generated outfit prompt: ${outfitPrompt}`);
// Step 2: Generate location prompt using LMStudio API
logger.info('Step 2: Generating location prompt with LMStudio API...');
const roomImagePath = path.join(outputDir, `room_${timestamp}.png`);
const locationPromptResponse = await callLMStudioAPIWithFile(
roomImagePath,
'Describe this location/room in detail about 30 words. Return the result in this format: {"result":""}'
);
const locationPrompt = locationPromptResponse.result || locationPromptResponse;
logger.info(`Generated location prompt: ${locationPrompt}`);
// Step 3: Generate Chinese prompt using LMStudio API
logger.info('Step 3: Generating Chinese prompt for model transformation...');
const chinesePromptRequest = `Generate a Chinese prompt for image transformation that describes:
- Prefix: genereate a portarit photo of a woman in image1
- Use outfit to: ${outfitPrompt}
- Use location to: ${locationPrompt}
Return the result in this format: {"result":""}`;
const chinesePromptResponse = await callLmstudio(chinesePromptRequest);
const chinesePrompt = chinesePromptResponse.result || chinesePromptResponse;
logger.info(`Generated Chinese prompt: ${chinesePrompt}`);
// Process model with outfit and location using the Chinese prompt
logger.info('Step 4: Processing model with outfit and location...');
const modelImagePath = path.join(outputDir, `model_${timestamp}.png`);
// Copy model image to ComfyUI input directory
const inputFolderFullPath = SERVER1_COMFY_OUTPUT_DIR.replace('output', 'input');
await fs.mkdir(inputFolderFullPath, { recursive: true });
const modelFileName = path.basename(modelImagePath);
const inputModelPath = path.join(inputFolderFullPath, modelFileName);
await fs.copyFile(modelImagePath, inputModelPath);
// Convert image with Chinese prompt and pose
await processTwoImages(
`请将图1中模特的姿势更改为图2的姿势。, ${chinesePrompt}`,
modelFileName,
`pose_${timestamp}.png`,
`model_outfit_location_pose_${timestamp}.png`,
outputDir,
true
);
// Step 5: Add handbag to model
await processTwoImagesHandbag(
'请将图1中的女性修改成手持图2的包。',
`model_outfit_location_pose_${timestamp}.png`,
`handbag_${timestamp}.png`,
`model_outfit_location_handbag1_${timestamp}.png`,
outputDir
);
await processTwoImagesHandbag(
'请让图1的女性看起来像是在手里拿着图2的包。',
`model_outfit_location_pose_${timestamp}.png`,
`handbag_${timestamp}.png`,
`model_outfit_location_handbag2_${timestamp}.png`,
outputDir
);
await processTwoImagesHandbag(
'请将图1中的女性修改成双手拿着图2的包。',
`model_outfit_location_pose_${timestamp}.png`,
`handbag_${timestamp}.png`,
`model_outfit_location_handbag3_${timestamp}.png`,
outputDir
);
await upscaleAndFix(
`model_outfit_location_handbag1_${timestamp}.png`,
`model_${timestamp}.png`,
`model_outfit_location_handbag1_upscaled_${timestamp}.png`,
outputDir
);
await upscaleAndFix(
`model_outfit_location_handbag2_${timestamp}.png`,
`model_${timestamp}.png`,
`model_outfit_location_handbag2_upscaled_${timestamp}.png`,
outputDir
);
await upscaleAndFix(
`model_outfit_location_handbag3_${timestamp}.png`,
`model_${timestamp}.png`,
`model_outfit_location_handbag3_upscaled_${timestamp}.png`,
outputDir
);
logger.info(`\n${'='.repeat(80)}`);
logger.info(`ITERATION ${iteration} COMPLETED!`);
logger.info(`Generated files are saved in: ${outputDir}`);
logger.info(`${'='.repeat(80)}\n`);
} catch (error) {
logger.error(`Error in iteration ${iteration}:`, error);
throw error;
}
}
/**
* Main execution function with infinite iteration
*/
async function main() {
let iteration = 1;
try {
logger.info('Starting infinite processing loop...');
logger.info('Press Ctrl+C to stop the process\n');
while (true) {
await processIteration(iteration);
iteration++;
// Small delay between iterations
logger.info('Waiting 5 seconds before next iteration...\n');
await new Promise(resolve => setTimeout(resolve, 5000));
}
} catch (error) {
logger.error('Error in main execution:', error);
process.exit(1);
}
}
// Execute main function if this file is run directly
if (require.main === module) {
main();
}
export { processImage, convertImageWithPrompt, processTwoImages, processIteration, main };