Description
ReadMe只有t2v的lora测试代码,现在已经训练完成了i2v,如何给pipe设置输入图片去测试lora微调的结果
import torch
from diffsynth import ModelManager, WanVideoPipeline, save_video, VideoData
model_manager = ModelManager(torch_dtype=torch.bfloat16, device="cuda")
这里的load model是这样子吗?如果不是,应该加载哪些
model_manager.load_models([
"models/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00001-of-00007.safetensors",
"models/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00002-of-00007.safetensors",
"models/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00003-of-00007.safetensors",
"models/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00004-of-00007.safetensors",
"models/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00005-of-00007.safetensors",
"models/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00006-of-00007.safetensors",
"models/Wan2.1-I2V-14B-480P/diffusion_pytorch_model-00007-of-00007.safetensors",
"models/Wan2.1-I2V-14B-480P/models_t5_umt5-xxl-enc-bf16.pth",
"models/Wan2.1-I2V-14B-480P/Wan2.1_VAE.pth",
"models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth"
])
model_manager.load_lora("models/lightning_logs/version_11/checkpoints/epoch=9-step=630.ckpt", lora_alpha=1.0)
pipe = WanVideoPipeline.from_model_manager(model_manager, device="cuda")
pipe.enable_vram_management(num_persistent_param_in_dit=None)
这里如何加测试图片
video = pipe(
prompt="...",
negative_prompt="...",
num_inference_steps=50,
seed=0, tiled=True
)
save_video(video, "video.mp4", fps=30, quality=5)
麻烦了