{"status":"success","data":{"lastUpdated":"1737676800000","downloads":{"wan_v2.2-14b-fp8_shared":{"description":"Models shared by all Wan 2.2 14B fp8 workflows","files":[{"tensorFile":"wan_2.1_vae.safetensors","destinationFolder":"models/vae","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/vae/wan_2.1_vae.safetensors","bytes":253815318,"sha256":"2fc39d31359a4b0a64f55876d8ff7fa8d780956ae2cb13463b0223e15148976b"},{"tensorFile":"umt5_xxl_fp8_e4m3fn_scaled.safetensors","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors","bytes":6735906897,"sha256":"c3355d30191f1f066b26d93fba017ae9809dce6c627dda5f6a66eaa651204f68"}]},"wan_v2.2-14b-fp8_t2v":{"description":"Text-to-Video base workflow models","files":[{"tensorFile":"wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors","bytes":14293923632,"sha256":"cad711ae211c8b23455ec68cd6a190a33a3d874234a77eb57266d73f8f0e6c9f"},{"tensorFile":"wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors","bytes":14293923632,"sha256":"e71b96d7c82e638694c5e7fb98fac4bfb0e4ddc5fbbb4b1df40da8f0f1278a97"}]},"wan_v2.2-14b-fp8_t2v_lightx2v":{"description":"Lightx2v speed LoRAs for Text-to-Video workflow","files":[{"tensorFile":"wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors","description":"Lightx2v Speed LoRA for T2V high noise model","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors","bytes":1525000000,"sha256":"698321cb86bd30c4af06c9b84e656a1048c8cb54e06d50694536fb5de37fde41"},{"tensorFile":"wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors","description":"Lightx2v Speed LoRA for T2V low noise model","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors","bytes":1525000000,"sha256":"ec95216e614b3c132c11bfb387b11feedf62163150ccc9068bca8a189771e75a"}]},"wan_v2.2-14b-fp8_i2v":{"description":"Image-to-Video base workflow models","files":[{"tensorFile":"wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors","bytes":14294742832,"sha256":"6122e79d55e0f235698d11d657f3b196c5273c830da00b2b013c5a048d5e6a42"},{"tensorFile":"wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors","bytes":14294742832,"sha256":"5471a457b6ac404202a5fbe6c11595a3d5641fc766b00f38763f72303fffc21e"},{"tensorFile":"clip_vision_h.safetensors","destinationFolder":"models/clip_vision","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/clip_vision/clip_vision_h.safetensors","bytes":1264219396,"sha256":"64a7ef761bfccbadbaa3da77366aac4185a6c58fa5de5f589b42a65bcc21f161"}]},"wan_v2.2-14b-fp8_i2v_lightx2v":{"description":"Lightx2v speed LoRAs for Image-to-Video workflow","files":[{"tensorFile":"wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors","description":"Lightx2v Speed LoRA for I2V high noise model","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors","bytes":1525000000,"sha256":"d176c808d6fc461999b68e321efcb7501b20b8c3797523ed0df14f7d1deff11e"},{"tensorFile":"wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors","description":"Lightx2v Speed LoRA for I2V low noise model","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors","bytes":1525000000,"sha256":"024f21de095bc8fad9809ded3e9e49a2e170dcf27075da8145ba7d60d8aab7f9"}]},"wan_v2.2-14b-fp8_s2v":{"description":"Sound-to-Video base workflow models","files":[{"tensorFile":"wan2.2_s2v_14B_fp8_scaled.safetensors","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/wan2.2_s2v_14B_fp8_scaled.safetensors","bytes":16394832474,"sha256":"140e75af5534ac3d91e710d9df756f7032addd64b341ba2c1c70e3e6da9aa216"},{"tensorFile":"wav2vec2_large_english_fp16.safetensors","description":"Audio encoder for Sound-to-Video workflow","destinationFolder":"models/audio_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/audio_encoders/wav2vec2_large_english_fp16.safetensors","bytes":661000000,"sha256":"f0017a43ea57ef6b3d4866be607844bbd8cada6d30966f7d70044ed0d63d3f9e"}]},"wan_v2.2-14b-fp8_animate":{"description":"Animation base workflow models (includes DWPose for pose detection)","files":[{"tensorFile":"Wan2_2-Animate-14B_fp8_e4m3fn_scaled_KJ.safetensors","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/Wan2_2-Animate-14B_fp8_e4m3fn_scaled_KJ.safetensors","bytes":18401760586,"sha256":"2936b31473a967e7a429a6646bba60e7862d0938e178b58b2a140f391dd5b8e6"},{"tensorFile":"yolox_l.onnx","description":"DWPose YOLOX model for pose detection","destinationFolder":"custom_nodes/comfyui_controlnet_aux/ckpts/yzd-v/DWPose","tensorDownload":"https://cdn.sogni.ai/ComfyUI/custom_nodes/comfyui_controlnet_aux/ckpts/yzd-v/DWPose/yolox_l.onnx","bytes":216746733,"sha256":"7860ae79de6c89a3c1eb72ae9a2756c0ccfbe04b7791bb5880afabd97855a411"},{"tensorFile":"dw-ll_ucoco_384_bs5.torchscript.pt","description":"DWPose TorchScript model for pose detection","destinationFolder":"custom_nodes/comfyui_controlnet_aux/ckpts/yzd-v/DWPose","tensorDownload":"https://cdn.sogni.ai/ComfyUI/custom_nodes/comfyui_controlnet_aux/ckpts/yzd-v/DWPose/dw-ll_ucoco_384_bs5.torchscript.pt","bytes":135059124,"sha256":"d86a0b2b59fddc0901a7076e9f59c9f8602602133ed72511c693fd11eea23d91"},{"tensorFile":"WanAnimate_relight_lora_fp16.safetensors","description":"Relight LoRA for Animate workflow","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/WanAnimate_relight_lora_fp16.safetensors","bytes":630000000,"sha256":"fc646c74c73f4b251f5fd9bc440ef21b03b27305f499966c68b2b3aa31498561"}]},"wan_v2.2-14b-fp8_animate_lightx2v":{"description":"Lightx2v speed LoRA for Animation workflow","files":[{"tensorFile":"lightx2v_I2V_14B_480p_cfg_step_distill_rank64_bf16.safetensors","description":"Lightx2v Speed LoRA for Animate workflow","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/lightx2v_I2V_14B_480p_cfg_step_distill_rank64_bf16.safetensors","bytes":1290000000,"sha256":"85c4a61c30e0497aa44b91d93a893b624708461a56fe5485183b28fa07e2dfb3"}]},"wan_v2.2-14b-fp8_animate-replace_lightx2v":{"description":"SAM2 model required only for animate-replace workflow","files":[{"tensorFile":"sam2_hiera_base_plus.safetensors","destinationFolder":"models/sam2","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/sam2/sam2_hiera_base_plus.safetensors","bytes":323407992,"sha256":"fa02d9028dcc4859c191f1d3f1ca1f7eefdb85f3b5e746c9ad738f322f3e89e2"}]},"z_image_turbo_bf16":{"description":"Z-Image Turbo text-to-image model","files":[{"tensorFile":"z_image_turbo_bf16.safetensors","description":"Z-Image Turbo Diffusion Model","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/z_image_turbo_bf16.safetensors","bytes":12309866400,"sha256":"2407613050b809ffdff18a4ac99af83ea6b95443ecebdf80e064a79c825574a6"},{"tensorFile":"qwen_3_4b.safetensors","description":"Z-Image Turbo Text Encoder (Qwen 3 4B)","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/qwen_3_4b.safetensors","bytes":8044982048,"sha256":"6c671498573ac2f7a5501502ccce8d2b08ea6ca2f661c458e708f36b36edfc5a"},{"tensorFile":"ae.safetensors","description":"Z-Image Turbo VAE","destinationFolder":"models/vae","tensorDownload":"https://cdn.sogni.ai/vae/ae.safetensors","bytes":335304388,"sha256":"afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38"}]},"flux2_dev_fp8":{"description":"Flux.2 [dev] high-quality text/image-to-image model (32GB+ VRAM)","files":[{"tensorFile":"flux2_dev_fp8mixed.safetensors","description":"Flux.2 Dev Diffusion Model (FP8 Mixed)","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/flux2_dev_fp8mixed.safetensors","bytes":38117244928,"sha256":"863a82e4ff950a42a6b0e80bea824828f129eb1a8fbbdbd9e8cb29859127b486"},{"tensorFile":"mistral_3_small_flux2_fp8.safetensors","description":"Flux.2 Dev Text Encoder (Mistral 3 Small FP8)","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/mistral_3_small_flux2_fp8.safetensors","bytes":19327352832,"sha256":"e3467b7d912a234fb929cdf215dc08efdb011810b44bc21081c4234cc75b370e"},{"tensorFile":"flux2-vae.safetensors","description":"Flux.2 Dev VAE","destinationFolder":"models/vae","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/vae/flux2-vae.safetensors","bytes":352321536,"sha256":"d64f3a68e1cc4f9f4e29b6e0da38a0204fe9a49f2d4053f0ec1fa1ca02f9c4b5"}]},"qwen_image_edit_2511_fp8":{"description":"Qwen Image Edit 2511 image editing model","files":[{"tensorFile":"qwen_image_edit_2511_fp8mixed.safetensors","description":"Qwen Image Edit 2511 Diffusion Model (FP8)","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/qwen_image_edit_2511_fp8mixed.safetensors","bytes":20430698424,"sha256":"318568f61951ab9da21100c7b896e3c1da67f0d2efad6421545e022cfaa2b2b4"},{"tensorFile":"qwen_2.5_vl_7b_fp8_scaled.safetensors","description":"Qwen Image Edit Text Encoder (Qwen 2.5 VL 7B FP8)","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors","bytes":9384670680,"sha256":"cb5636d852a0ea6a9075ab1bef496c0db7aef13c02350571e388aea959c5c0b4"},{"tensorFile":"qwen_image_vae.safetensors","description":"Qwen Image VAE","destinationFolder":"models/vae","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/vae/qwen_image_vae.safetensors","bytes":253806246,"sha256":"a70580f0213e67967ee9c95f05bb400e8fb08307e017a924bf3441223e023d1f"}]},"qwen_image_edit_2511_fp8_lightning":{"description":"Qwen Image Edit 2511 Lightning 4-step speed LoRA","files":[{"tensorFile":"Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors","description":"Qwen Image Edit 2511 Lightning 4-Step LoRA","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors","bytes":849608296,"sha256":"2a32ce938ec71db2b49a817b4844ae86995569518dea56ee0ddc209cbe8e1377"}]},"flux1_shared":{"description":"Models shared by all FLUX.1 workflows (VAE, text encoders)","files":[{"tensorFile":"ae.safetensors","description":"FLUX.1 VAE (shared with Z-Image Turbo)","destinationFolder":"models/vae","tensorDownload":"https://cdn.sogni.ai/vae/ae.safetensors","bytes":335304388,"sha256":"afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38"},{"tensorFile":"clip_l.safetensors","description":"FLUX.1 CLIP-L Text Encoder","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/text_encoder/clip_l.safetensors","bytes":246144152,"sha256":"660c6f5b1abae9dc498ac2d21e1347d2abdb0cf6c0c0c8576cd796491d9a6cdd"},{"tensorFile":"t5xxl_fp8_e4m3fn_scaled.safetensors","description":"FLUX.1 T5-XXL Text Encoder (FP8)","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/text_encoder/t5xxl_fp8_e4m3fn_scaled.safetensors","bytes":5157348688,"sha256":"8b2b5c4e5f7a6d8e9c0b1a2d3e4f5a6b7c8d9e0f1a2b3c4d5e6f7a8b9c0d1e2f"}]},"flux1-schnell-fp8":{"description":"FLUX.1 [schnell] - Fast 4-step text-to-image model","files":[{"tensorFile":"flux1-schnell_fp8.safetensors","description":"FLUX.1 Schnell Diffusion Model (FP8)","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/unet/flux1-schnell_fp8.safetensors","bytes":11891286928,"sha256":"ece1aec579"}]},"chroma-v.46-flash_fp8":{"description":"Chroma v.46 [flash] - Fast 10-step high-quality text-to-image model","files":[{"tensorFile":"chroma-unlocked-v46-flash_float8_e4m3fn_scaled_learned.safetensors","description":"Chroma v.46 Flash Diffusion Model (FP8)","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/unet/chroma-unlocked-v46-flash_float8_e4m3fn_scaled_learned.safetensors","bytes":8902171990,"sha256":"e18c3ed3eb"}]},"chroma-v48-detail-svd_fp8":{"description":"Chroma v.48 [detail] - High-detail 20-step text-to-image model","files":[{"tensorFile":"chroma-unlocked-v48-detail-calibrated_float8_e4m3fn_scaled_learned_svd.safetensors","description":"Chroma v.48 Detail Diffusion Model (FP8)","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/unet/chroma-unlocked-v48-detail-calibrated_float8_e4m3fn_scaled_learned_svd.safetensors","bytes":8902171990,"sha256":"081baf829c"}]},"flux1-krea-dev_fp8_scaled":{"description":"FLUX.1 Krea [dev] - Premium high-quality text-to-image model","files":[{"tensorFile":"flux1-krea-dev_fp8_scaled.safetensors","description":"FLUX.1 Krea Dev Diffusion Model (FP8)","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/unet/flux1-krea-dev_fp8_scaled.safetensors","bytes":11904639672,"sha256":"b17a8c2170"}]},"ltx2-19b-fp8_shared":{"description":"Models shared by all LTX-2 19B workflows","files":[{"tensorFile":"gemma_3_12B_it_fp4_mixed.safetensors","description":"LTX-2 Text Encoder (Gemma 3 12B FP4)","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/gemma_3_12B_it_fp4_mixed.safetensors","bytes":6735906897},{"tensorFile":"ltx-2-19b-embeddings_connector_dev_bf16.safetensors","description":"LTX-2 Embeddings Connector (Dev) - Required for text encoder","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/ltx-2-19b-embeddings_connector_dev_bf16.safetensors","bytes":3070000000},{"tensorFile":"ltx-2-19b-embeddings_connector_distill_bf16.safetensors","description":"LTX-2 Embeddings Connector (Distilled) - Required for distilled workflows","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/ltx-2-19b-embeddings_connector_distill_bf16.safetensors","bytes":3070000000},{"tensorFile":"ltx-2-vae.safetensors","description":"LTX-2 Video VAE (CORRECT - from Kijai/LTXV2_comfy)","destinationFolder":"models/vae","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/vae/ltx-2-vae.safetensors","bytes":2450000000,"cleanup_files":["ltx-video-2b-v0.9.1.safetensors"]},{"tensorFile":"LTX2_audio_vae_bf16.safetensors","description":"LTX-2 Audio VAE - Enables synchronized audio generation","destinationFolder":"models/checkpoints","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/checkpoints/LTX2_audio_vae_bf16.safetensors","bytes":228000000},{"tensorFile":"ltx-2-spatial-upscaler-x2-1.0.safetensors","description":"LTX-2 Spatial Upscaler (2x) - For 2-stage generation pipeline","destinationFolder":"models/latent_upscale_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/latent_upscale_models/ltx-2-spatial-upscaler-x2-1.0.safetensors","bytes":1044000000}]},"ltx2-19b-fp8_t2v":{"description":"LTX-2 19B Text-to-Video base model","files":[{"tensorFile":"ltx-2-19b-dev-fp8.safetensors","description":"LTX-2 19B Diffusion Model (FP8)","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/ltx-2-19b-dev-fp8.safetensors","bytes":19327352832}]},"ltx2-19b-fp8_distilled":{"description":"LTX-2 19B distilled LoRA for fast generation","files":[{"tensorFile":"ltx-2-19b-distilled-lora-384.safetensors","description":"LTX-2 19B Distilled LoRA (8-step)","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/ltx-2-19b-distilled-lora-384.safetensors","bytes":1525000000}]},"ltx2-19b-fp8_canny-control":{"description":"LTX-2 19B Canny Control IC-LoRA for video-to-video","files":[{"tensorFile":"ltx-2-19b-ic-lora-canny-control.safetensors","description":"LTX-2 19B Canny Control IC-LoRA","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/ltx-2-19b-ic-lora-canny-control.safetensors","bytes":686000000}]},"ltx2-19b-fp8_pose-control":{"description":"LTX-2 19B Pose Control IC-LoRA for video-to-video","files":[{"tensorFile":"ltx-2-19b-ic-lora-pose-control.safetensors","description":"LTX-2 19B Pose Control IC-LoRA","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/ltx-2-19b-ic-lora-pose-control.safetensors","bytes":686000000}]},"ltx2-19b-fp8_depth-control":{"description":"LTX-2 19B Depth Control IC-LoRA for video-to-video","files":[{"tensorFile":"ltx-2-19b-ic-lora-depth-control.safetensors","description":"LTX-2 19B Depth Control IC-LoRA","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/ltx-2-19b-ic-lora-depth-control.safetensors","bytes":686000000}]},"ltx2-19b-fp8_detailer":{"description":"LTX-2 19B Detailer IC-LoRA for video quality enhancement","files":[{"tensorFile":"ltx-2-19b-ic-lora-detailer.safetensors","description":"LTX-2 19B Detailer IC-LoRA","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/ltx-2-19b-ic-lora-detailer.safetensors","bytes":686000000}]},"ltx2-19b-fp8_v2v-preprocessors":{"description":"Preprocessor models for LTX-2 v2v IC-Control workflows (DWPose + DepthAnythingV2)","files":[{"tensorFile":"yolox_l.onnx","description":"DWPose YOLOX model for pose detection","destinationFolder":"custom_nodes/comfyui_controlnet_aux/ckpts/yzd-v/DWPose","tensorDownload":"https://cdn.sogni.ai/ComfyUI/custom_nodes/comfyui_controlnet_aux/ckpts/yzd-v/DWPose/yolox_l.onnx","bytes":216746733,"sha256":"7860ae79de6c89a3c1eb72ae9a2756c0ccfbe04b7791bb5880afabd97855a411"},{"tensorFile":"dw-ll_ucoco_384_bs5.torchscript.pt","description":"DWPose TorchScript model for pose detection","destinationFolder":"custom_nodes/comfyui_controlnet_aux/ckpts/yzd-v/DWPose","tensorDownload":"https://cdn.sogni.ai/ComfyUI/custom_nodes/comfyui_controlnet_aux/ckpts/yzd-v/DWPose/dw-ll_ucoco_384_bs5.torchscript.pt","bytes":135059124,"sha256":"d86a0b2b59fddc0901a7076e9f59c9f8602602133ed72511c693fd11eea23d91"}]},"ltx2-19b-fp8_camera-control-static":{"description":"LTX-2 19B Camera Control Static LoRA for audio-to-video workflows","files":[{"tensorFile":"ltx-2-19b-lora-camera-control-static.safetensors","description":"LTX-2 19B Camera Control Static LoRA","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/ltx-2-19b-lora-camera-control-static.safetensors","bytes":2373000000}]},"ltx23-22b-fp8_shared":{"description":"Models shared by all LTX-2.3 22B workflows","files":[{"tensorFile":"gemma_3_12B_it_fp4_mixed.safetensors","description":"LTX-2.3 Text Encoder (Gemma 3 12B FP4)","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/gemma_3_12B_it_fp4_mixed.safetensors","bytes":6735906897},{"tensorFile":"ltx-2.3_text_projection_bf16.safetensors","description":"LTX-2.3 Text Projection Model","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/ltx-2.3_text_projection_bf16.safetensors","bytes":2480000000,"sha256":""},{"tensorFile":"LTX23_video_vae_bf16.safetensors","description":"LTX-2.3 Video VAE","destinationFolder":"models/vae","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/vae/LTX23_video_vae_bf16.safetensors","bytes":1557000000,"sha256":""},{"tensorFile":"LTX23_audio_vae_bf16.safetensors","description":"LTX-2.3 Audio VAE","destinationFolder":"models/checkpoints","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/checkpoints/LTX23_audio_vae_bf16.safetensors","bytes":383000000,"sha256":""}]},"ltx23-22b-fp8":{"description":"LTX-2.3 22B distilled diffusion model","files":[{"tensorFile":"ltx-2.3-22b-distilled_transformer_only_fp8_scaled.safetensors","description":"LTX-2.3 22B Distilled Diffusion Model (FP8)","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/ltx-2.3-22b-distilled_transformer_only_fp8_scaled.safetensors","bytes":25200000000,"sha256":""}]},"ltx23-22b-fp8_upscaler":{"description":"LTX-2.3 Spatial Upscaler (2x)","files":[{"tensorFile":"ltx-2.3-spatial-upscaler-x2-1.0.safetensors","description":"LTX-2.3 Spatial Upscaler (2x) - For 2-stage generation pipeline","destinationFolder":"models/latent_upscale_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/latent_upscale_models/ltx-2.3-spatial-upscaler-x2-1.0.safetensors","bytes":1044000000,"sha256":""}]},"qwen_image_2512_fp8_shared":{"description":"Models shared by all Qwen Image 2512 workflows","files":[{"tensorFile":"qwen_2.5_vl_7b_fp8_scaled.safetensors","description":"Qwen 2.5 VL Text Encoder (FP8)","destinationFolder":"models/text_encoders","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/text_encoders/qwen_2.5_vl_7b_fp8_scaled.safetensors","bytes":9384670680,"sha256":"cb5636d852a0ea6a9075ab1bef496c0db7aef13c02350571e388aea959c5c0b4"},{"tensorFile":"qwen_image_vae.safetensors","description":"Qwen Image VAE","destinationFolder":"models/vae","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/vae/qwen_image_vae.safetensors","bytes":253806246,"sha256":"a70580f0213e67967ee9c95f05bb400e8fb08307e017a924bf3441223e023d1f"}]},"qwen_image_2512_fp8":{"description":"Qwen Image 2512 diffusion model (FP8)","files":[{"tensorFile":"qwen_image_2512_fp8_e4m3fn.safetensors","description":"Qwen Image 2512 Diffusion Model (FP8)","destinationFolder":"models/diffusion_models","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/diffusion_models/qwen_image_2512_fp8_e4m3fn.safetensors","bytes":20400000000}]},"qwen_image_2512_lightning":{"description":"Qwen Image Lightning LoRA for fast 4-step generation","files":[{"tensorFile":"Qwen-Image-Lightning-4steps-V1.0.safetensors","description":"Qwen Image Lightning LoRA (4-step)","destinationFolder":"models/loras","tensorDownload":"https://cdn.sogni.ai/ComfyUI/models/loras/Qwen-Image-Lightning-4steps-V1.0.safetensors","bytes":1350000000}]},"z_image_bf16_shared":{"description":"Models shared by Z-Image turbo and non-turbo (text encoder + VAE)","files":[{"tensorFile":"qwen_3_4b.safetensors","description":"Z-Image Text Encoder (Qwen 3 4B)","destinationFolder":"models/text_encoders","tensorDownload":"https://pub-5bc58981af9f42659ff8ada57bfea92c.r2.dev/ComfyUI/models/text_encoders/qwen_3_4b.safetensors","bytes":8044982048,"sha256":"6c671498573ac2f7a5501502ccce8d2b08ea6ca2f661c458e708f36b36edfc5a"},{"tensorFile":"ae.safetensors","description":"Z-Image VAE","destinationFolder":"models/vae","tensorDownload":"https://pub-5bc58981af9f42659ff8ada57bfea92c.r2.dev/vae/ae.safetensors","bytes":335304388,"sha256":"afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38"}]},"z_image_bf16":{"description":"Z-Image text-to-image model (non-turbo, higher quality)","files":[{"tensorFile":"z_image_bf16.safetensors","description":"Z-Image Diffusion Model","destinationFolder":"models/diffusion_models","tensorDownload":"https://pub-5bc58981af9f42659ff8ada57bfea92c.r2.dev/ComfyUI/models/diffusion_models/z_image_bf16.safetensors","bytes":13206016000,"sha256":"996a67d3ff666946b1c25cbc16d1b1918b6cc0ac166309e23fe3b3d830263dee"}]},"ace_step_1.5_shared":{"description":"ACE-Step 1.5 shared models (0.6B caption encoder + 4B LM + VAE) - used by both SFT and Turbo","files":[{"tensorFile":"qwen_0.6b_ace15.safetensors","description":"ACE-Step 1.5 Caption Encoder (0.6B - Qwen3, DualCLIPLoader clip_name1)","destinationFolder":"models/clip","tensorDownload":"https://pub-5bc58981af9f42659ff8ada57bfea92c.r2.dev/ComfyUI/models/clip/qwen_0.6b_ace15.safetensors","bytes":1191588248},{"tensorFile":"qwen_4b_ace15.safetensors","description":"ACE-Step 1.5 Language Model (4B - Qwen3, DualCLIPLoader clip_name2)","destinationFolder":"models/clip","tensorDownload":"https://pub-5bc58981af9f42659ff8ada57bfea92c.r2.dev/ComfyUI/models/clip/qwen_4b_ace15.safetensors","bytes":8379154232},{"tensorFile":"ace_1.5_vae.safetensors","description":"ACE-Step 1.5 VAE (Audio Decoder)","destinationFolder":"models/vae","tensorDownload":"https://pub-5bc58981af9f42659ff8ada57bfea92c.r2.dev/ComfyUI/models/vae/ace_1.5_vae.safetensors","bytes":337431732}]},"ace_step_1.5_sft":{"description":"ACE-Step 1.5 SFT DiT - high-quality music generation with CFG guidance","files":[{"tensorFile":"acestep_v1.5_sft.safetensors","description":"ACE-Step 1.5 SFT DiT (Diffusion Model)","destinationFolder":"models/diffusion_models","tensorDownload":"https://pub-5bc58981af9f42659ff8ada57bfea92c.r2.dev/ComfyUI/models/diffusion_models/acestep_v1.5_sft.safetensors","bytes":4787825604,"sha256":"d4dd3a93870f06720027965b90771f529ab02094b3d29e2518f1d5e097e1af7e"}]},"ace_step_1.5_turbo":{"description":"ACE-Step 1.5 Turbo DiT - fast music generation in 4-16 steps, no CFG support","files":[{"tensorFile":"acestep_v1.5_turbo.safetensors","description":"ACE-Step 1.5 Turbo DiT (Diffusion Model)","destinationFolder":"models/diffusion_models","tensorDownload":"https://pub-5bc58981af9f42659ff8ada57bfea92c.r2.dev/ComfyUI/models/diffusion_models/acestep_v1.5_turbo.safetensors","bytes":4787825604}]}},"workflowDependencies":{"wan_v2.2-14b-fp8_t2v":["wan_v2.2-14b-fp8_shared","wan_v2.2-14b-fp8_t2v"],"wan_v2.2-14b-fp8_t2v_lightx2v":["wan_v2.2-14b-fp8_shared","wan_v2.2-14b-fp8_t2v","wan_v2.2-14b-fp8_t2v_lightx2v"],"wan_v2.2-14b-fp8_i2v":["wan_v2.2-14b-fp8_shared","wan_v2.2-14b-fp8_i2v"],"wan_v2.2-14b-fp8_i2v_lightx2v":["wan_v2.2-14b-fp8_shared","wan_v2.2-14b-fp8_i2v","wan_v2.2-14b-fp8_i2v_lightx2v"],"wan_v2.2-14b-fp8_s2v":["wan_v2.2-14b-fp8_shared","wan_v2.2-14b-fp8_s2v","wan_v2.2-14b-fp8_i2v"],"wan_v2.2-14b-fp8_s2v_lightx2v":["wan_v2.2-14b-fp8_shared","wan_v2.2-14b-fp8_s2v","wan_v2.2-14b-fp8_i2v","wan_v2.2-14b-fp8_i2v_lightx2v"],"wan_v2.2-14b-fp8_animate-move_lightx2v":["wan_v2.2-14b-fp8_shared","wan_v2.2-14b-fp8_animate","wan_v2.2-14b-fp8_animate_lightx2v","wan_v2.2-14b-fp8_i2v"],"wan_v2.2-14b-fp8_animate-replace_lightx2v":["wan_v2.2-14b-fp8_shared","wan_v2.2-14b-fp8_animate","wan_v2.2-14b-fp8_animate_lightx2v","wan_v2.2-14b-fp8_animate-replace_lightx2v","wan_v2.2-14b-fp8_i2v"],"z_image_turbo_bf16":["z_image_turbo_bf16"],"flux2_dev_fp8":["flux2_dev_fp8"],"qwen_image_edit_2511_fp8":["qwen_image_edit_2511_fp8"],"qwen_image_edit_2511_fp8_lightning":["qwen_image_edit_2511_fp8","qwen_image_edit_2511_fp8_lightning"],"flux1-schnell-fp8":["flux1_shared","flux1-schnell-fp8"],"chroma-v.46-flash_fp8":["flux1_shared","chroma-v.46-flash_fp8"],"chroma-v48-detail-svd_fp8":["flux1_shared","chroma-v48-detail-svd_fp8"],"flux1-krea-dev_fp8_scaled":["flux1_shared","flux1-krea-dev_fp8_scaled"],"ltx2-19b-fp8_t2v":["ltx2-19b-fp8_shared","ltx2-19b-fp8_t2v","ltx2-19b-fp8_distilled"],"ltx2-19b-fp8_t2v_distilled":["ltx2-19b-fp8_shared","ltx2-19b-fp8_t2v","ltx2-19b-fp8_distilled"],"ltx2-19b-fp8_i2v":["ltx2-19b-fp8_shared","ltx2-19b-fp8_t2v","ltx2-19b-fp8_distilled"],"ltx2-19b-fp8_i2v_distilled":["ltx2-19b-fp8_shared","ltx2-19b-fp8_t2v","ltx2-19b-fp8_distilled"],"ltx2-19b-fp8_ia2v_distilled":["ltx2-19b-fp8_shared","ltx2-19b-fp8_t2v","ltx2-19b-fp8_distilled","ltx2-19b-fp8_camera-control-static","ltx2-19b-fp8_detailer"],"ltx2-19b-fp8_a2v_distilled":["ltx2-19b-fp8_shared","ltx2-19b-fp8_t2v","ltx2-19b-fp8_distilled","ltx2-19b-fp8_camera-control-static","ltx2-19b-fp8_detailer"],"ltx2-19b-fp8_v2v":["ltx2-19b-fp8_shared","ltx2-19b-fp8_t2v","ltx2-19b-fp8_distilled","ltx2-19b-fp8_v2v-preprocessors","ltx2-19b-fp8_canny-control","ltx2-19b-fp8_pose-control","ltx2-19b-fp8_depth-control","ltx2-19b-fp8_detailer"],"ltx2-19b-fp8_v2v_distilled":["ltx2-19b-fp8_shared","ltx2-19b-fp8_t2v","ltx2-19b-fp8_distilled","ltx2-19b-fp8_v2v-preprocessors","ltx2-19b-fp8_canny-control","ltx2-19b-fp8_pose-control","ltx2-19b-fp8_depth-control","ltx2-19b-fp8_detailer"],"ltx23-22b-fp8_t2v_distilled":["ltx23-22b-fp8_shared","ltx23-22b-fp8","ltx23-22b-fp8_upscaler"],"ltx23-22b-fp8_i2v_distilled":["ltx23-22b-fp8_shared","ltx23-22b-fp8","ltx23-22b-fp8_upscaler"],"ltx23-22b-fp8_a2v_distilled":["ltx23-22b-fp8_shared","ltx23-22b-fp8","ltx23-22b-fp8_upscaler"],"ltx23-22b-fp8_ia2v_distilled":["ltx23-22b-fp8_shared","ltx23-22b-fp8","ltx23-22b-fp8_upscaler"],"qwen_image_2512_fp8":["qwen_image_2512_fp8_shared","qwen_image_2512_fp8"],"qwen_image_2512_fp8_lightning":["qwen_image_2512_fp8_shared","qwen_image_2512_fp8","qwen_image_2512_lightning"],"z_image_bf16":["z_image_bf16_shared","z_image_bf16"],"ace_step_1.5_sft":["ace_step_1.5_shared","ace_step_1.5_sft"],"ace_step_1.5_turbo":["ace_step_1.5_shared","ace_step_1.5_turbo"]}}}