save current changes
This commit is contained in:
399
src/comfyworkflows/generate_video.json
Normal file
399
src/comfyworkflows/generate_video.json
Normal file
@ -0,0 +1,399 @@
|
||||
{
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "Create an 8-second animated loop featuring a young man sitting on a stone ledge overlooking a nighttime cityscape. The scene should begin with a slow zoom into the boy’s face as he gazes upwards at the starry sky. Throughout the video, have shooting stars streak across the sky – some fast, some slower, creating a dynamic visual effect. Gentle wind blows his hair and clothing.",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Positive Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Negative Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"58",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"39",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"38": {
|
||||
"inputs": {
|
||||
"clip_name": "umt5_xxl_fp8_e4m3fn_scaled.safetensors",
|
||||
"type": "wan",
|
||||
"device": "cpu"
|
||||
},
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
},
|
||||
"39": {
|
||||
"inputs": {
|
||||
"vae_name": "wan_2.1_vae.safetensors"
|
||||
},
|
||||
"class_type": "VAELoader",
|
||||
"_meta": {
|
||||
"title": "Load VAE"
|
||||
}
|
||||
},
|
||||
"50": {
|
||||
"inputs": {
|
||||
"width": [
|
||||
"64",
|
||||
1
|
||||
],
|
||||
"height": [
|
||||
"64",
|
||||
2
|
||||
],
|
||||
"length": 121,
|
||||
"batch_size": 1,
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"39",
|
||||
0
|
||||
],
|
||||
"start_image": [
|
||||
"64",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "WanImageToVideo",
|
||||
"_meta": {
|
||||
"title": "WanImageToVideo"
|
||||
}
|
||||
},
|
||||
"52": {
|
||||
"inputs": {
|
||||
"image": "ComfyUI_00036_.png"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"_meta": {
|
||||
"title": "Load Image"
|
||||
}
|
||||
},
|
||||
"54": {
|
||||
"inputs": {
|
||||
"shift": 8.000000000000002,
|
||||
"model": [
|
||||
"69",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingSD3",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingSD3"
|
||||
}
|
||||
},
|
||||
"55": {
|
||||
"inputs": {
|
||||
"shift": 8.000000000000002,
|
||||
"model": [
|
||||
"70",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingSD3",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingSD3"
|
||||
}
|
||||
},
|
||||
"57": {
|
||||
"inputs": {
|
||||
"add_noise": "enable",
|
||||
"noise_seed": 375574453154296,
|
||||
"steps": 6,
|
||||
"cfg": 1,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "simple",
|
||||
"start_at_step": 0,
|
||||
"end_at_step": 3,
|
||||
"return_with_leftover_noise": "enable",
|
||||
"model": [
|
||||
"54",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"50",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"50",
|
||||
1
|
||||
],
|
||||
"latent_image": [
|
||||
"50",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "KSamplerAdvanced",
|
||||
"_meta": {
|
||||
"title": "KSampler (Advanced)"
|
||||
}
|
||||
},
|
||||
"58": {
|
||||
"inputs": {
|
||||
"add_noise": "disable",
|
||||
"noise_seed": 0,
|
||||
"steps": 6,
|
||||
"cfg": 1,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "simple",
|
||||
"start_at_step": 3,
|
||||
"end_at_step": 10000,
|
||||
"return_with_leftover_noise": "disable",
|
||||
"model": [
|
||||
"55",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"50",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"50",
|
||||
1
|
||||
],
|
||||
"latent_image": [
|
||||
"57",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSamplerAdvanced",
|
||||
"_meta": {
|
||||
"title": "KSampler (Advanced)"
|
||||
}
|
||||
},
|
||||
"61": {
|
||||
"inputs": {
|
||||
"unet_name": "wan2.2_i2v_high_noise_14B_Q4_K_S.gguf"
|
||||
},
|
||||
"class_type": "UnetLoaderGGUF",
|
||||
"_meta": {
|
||||
"title": "Unet Loader (GGUF)"
|
||||
}
|
||||
},
|
||||
"62": {
|
||||
"inputs": {
|
||||
"unet_name": "wan2.2_i2v_low_noise_14B_Q4_K_S.gguf"
|
||||
},
|
||||
"class_type": "UnetLoaderGGUF",
|
||||
"_meta": {
|
||||
"title": "Unet Loader (GGUF)"
|
||||
}
|
||||
},
|
||||
"63": {
|
||||
"inputs": {
|
||||
"frame_rate": 32,
|
||||
"loop_count": 0,
|
||||
"filename_prefix": "RADOMVIDEOMAKERVIDEO",
|
||||
"format": "video/h264-mp4",
|
||||
"pix_fmt": "yuv420p",
|
||||
"crf": 19,
|
||||
"save_metadata": true,
|
||||
"trim_to_audio": false,
|
||||
"pingpong": false,
|
||||
"save_output": true,
|
||||
"images": [
|
||||
"71",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VHS_VideoCombine",
|
||||
"_meta": {
|
||||
"title": "Video Combine 🎥🅥🅗🅢"
|
||||
}
|
||||
},
|
||||
"64": {
|
||||
"inputs": {
|
||||
"width": 720,
|
||||
"height": 1280,
|
||||
"upscale_method": "lanczos",
|
||||
"keep_proportion": "crop",
|
||||
"pad_color": "0, 0, 0",
|
||||
"crop_position": "center",
|
||||
"divisible_by": 16,
|
||||
"device": "cpu",
|
||||
"image": [
|
||||
"52",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ImageResizeKJv2",
|
||||
"_meta": {
|
||||
"title": "Resize Image v2"
|
||||
}
|
||||
},
|
||||
"65": {
|
||||
"inputs": {
|
||||
"sage_attention": "sageattn_qk_int8_pv_fp8_cuda++",
|
||||
"model": [
|
||||
"61",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PathchSageAttentionKJ",
|
||||
"_meta": {
|
||||
"title": "Patch Sage Attention KJ"
|
||||
}
|
||||
},
|
||||
"66": {
|
||||
"inputs": {
|
||||
"enable_fp16_accumulation": true,
|
||||
"model": [
|
||||
"65",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelPatchTorchSettings",
|
||||
"_meta": {
|
||||
"title": "Model Patch Torch Settings"
|
||||
}
|
||||
},
|
||||
"67": {
|
||||
"inputs": {
|
||||
"sage_attention": "sageattn_qk_int8_pv_fp8_cuda++",
|
||||
"model": [
|
||||
"62",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PathchSageAttentionKJ",
|
||||
"_meta": {
|
||||
"title": "Patch Sage Attention KJ"
|
||||
}
|
||||
},
|
||||
"68": {
|
||||
"inputs": {
|
||||
"enable_fp16_accumulation": true,
|
||||
"model": [
|
||||
"67",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelPatchTorchSettings",
|
||||
"_meta": {
|
||||
"title": "Model Patch Torch Settings"
|
||||
}
|
||||
},
|
||||
"69": {
|
||||
"inputs": {
|
||||
"lora_name": "Wan21_I2V_14B_lightx2v_cfg_step_distill_lora_rank64.safetensors",
|
||||
"strength_model": 3.0000000000000004,
|
||||
"model": [
|
||||
"66",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
},
|
||||
"70": {
|
||||
"inputs": {
|
||||
"lora_name": "Wan21_T2V_14B_lightx2v_cfg_step_distill_lora_rank64.safetensors",
|
||||
"strength_model": 1.5000000000000002,
|
||||
"model": [
|
||||
"68",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
},
|
||||
"71": {
|
||||
"inputs": {
|
||||
"ckpt_name": "rife49.pth",
|
||||
"clear_cache_after_n_frames": 10,
|
||||
"multiplier": 2,
|
||||
"fast_mode": true,
|
||||
"ensemble": true,
|
||||
"scale_factor": 1,
|
||||
"frames": [
|
||||
"73",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "RIFE VFI",
|
||||
"_meta": {
|
||||
"title": "RIFE VFI (recommend rife47 and rife49)"
|
||||
}
|
||||
},
|
||||
"72": {
|
||||
"inputs": {
|
||||
"upscale_model": "4x-UltraSharp.pth",
|
||||
"mode": "rescale",
|
||||
"rescale_factor": 2.0000000000000004,
|
||||
"resize_width": 832,
|
||||
"resampling_method": "lanczos",
|
||||
"supersample": "true",
|
||||
"rounding_modulus": 8
|
||||
},
|
||||
"class_type": "CR Upscale Image",
|
||||
"_meta": {
|
||||
"title": "🔍 CR Upscale Image"
|
||||
}
|
||||
},
|
||||
"73": {
|
||||
"inputs": {
|
||||
"resize_to": "4k",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
],
|
||||
"upscaler_trt_model": [
|
||||
"75",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "UpscalerTensorrt",
|
||||
"_meta": {
|
||||
"title": "Upscaler Tensorrt ⚡"
|
||||
}
|
||||
},
|
||||
"75": {
|
||||
"inputs": {
|
||||
"model": "4xNomos2_otf_esrgan",
|
||||
"precision": "fp16"
|
||||
},
|
||||
"class_type": "LoadUpscalerTensorrtModel",
|
||||
"_meta": {
|
||||
"title": "Load Upscale Tensorrt Model"
|
||||
}
|
||||
}
|
||||
}
|
||||
22
src/index.ts
22
src/index.ts
@ -1,6 +1,7 @@
|
||||
import { downloadPinterestImages } from './lib/downloader';
|
||||
import { describeImage } from './lib/image-describer';
|
||||
import { generateImage } from './lib/image-generator';
|
||||
import { generateVideo } from './lib/video-generator';
|
||||
import { logger } from './lib/logger';
|
||||
|
||||
(async () => {
|
||||
@ -25,9 +26,26 @@ import { logger } from './lib/logger';
|
||||
logger.info(`Description for ${imagePath}:`, prompt);
|
||||
|
||||
const timestamp = new Date().getTime();
|
||||
const newFileName = `${keyword.replace(/\s/g, '_')}_${timestamp}.png`;
|
||||
const generatedImagePath = await generateImage(prompt, newFileName);
|
||||
const imageFileName = `${keyword.replace(/\s/g, '_')}_${timestamp}.png`;
|
||||
const generatedImagePath = await generateImage(prompt, imageFileName);
|
||||
logger.info(`Generated new image from prompt, saved to: ${generatedImagePath}`);
|
||||
|
||||
const videoPromptResponse = await describeImage(generatedImagePath,
|
||||
`Generate a prompt for an 8-second video based on the provided image.
|
||||
The prompt should describe a dynamic scene that evolves from the static image.
|
||||
Output should be in this format
|
||||
---
|
||||
{
|
||||
"prompt":""
|
||||
}
|
||||
---
|
||||
`);
|
||||
const videoPrompt = videoPromptResponse.prompt;
|
||||
logger.info(`Generated video prompt: ${videoPrompt}`);
|
||||
|
||||
const videoFileName = `${keyword.replace(/\s/g, '_')}_${timestamp}.mp4`;
|
||||
const generatedVideoPath = await generateVideo(videoPrompt, generatedImagePath, videoFileName);
|
||||
logger.info(`Generated video from prompt, saved to: ${generatedVideoPath}`);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to process ${imagePath}:`, error);
|
||||
}
|
||||
|
||||
47
src/lib/video-generator.ts
Normal file
47
src/lib/video-generator.ts
Normal file
@ -0,0 +1,47 @@
|
||||
import * as fs from 'fs/promises';
|
||||
import * as path from 'path';
|
||||
import axios from 'axios';
|
||||
import dotenv from 'dotenv';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const COMFY_BASE_URL = process.env.COMFY_BASE_URL?.replace(/\/$/, '');
|
||||
const COMFY_OUTPUT_DIR = process.env.COMFY_OUTPUT_DIR;
|
||||
|
||||
async function generateVideo(prompt: string, imagePath: string, newFileName: string): Promise<string> {
|
||||
const workflow = JSON.parse(await fs.readFile('src/comfyworkflows/generate_video.json', 'utf-8'));
|
||||
workflow['6']['inputs']['text'] = prompt;
|
||||
workflow['52']['inputs']['image'] = imagePath;
|
||||
|
||||
const response = await axios.post(`${COMFY_BASE_URL}/prompt`, { prompt: workflow });
|
||||
const promptId = response.data.prompt_id;
|
||||
|
||||
let history;
|
||||
do {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
const historyResponse = await axios.get(`${COMFY_BASE_URL}/history/${promptId}`);
|
||||
history = historyResponse.data[promptId];
|
||||
} while (!history || Object.keys(history.outputs).length === 0);
|
||||
|
||||
const files = await fs.readdir(COMFY_OUTPUT_DIR!);
|
||||
const generatedFiles = files.filter(file => file.endsWith('.mp4'));
|
||||
|
||||
const fileStats = await Promise.all(
|
||||
generatedFiles.map(async (file) => {
|
||||
const stat = await fs.stat(path.join(COMFY_OUTPUT_DIR!, file));
|
||||
return { file, mtime: stat.mtime };
|
||||
})
|
||||
);
|
||||
|
||||
fileStats.sort((a, b) => b.mtime.getTime() - a.mtime.getTime());
|
||||
|
||||
const latestFile = fileStats[0].file;
|
||||
const newFilePath = path.resolve('./generated', newFileName);
|
||||
|
||||
await fs.mkdir('./generated', { recursive: true });
|
||||
await fs.rename(path.join(COMFY_OUTPUT_DIR!, latestFile), newFilePath);
|
||||
|
||||
return newFilePath;
|
||||
}
|
||||
|
||||
export { generateVideo };
|
||||
Reference in New Issue
Block a user