From 83d2b81c9c257d8d4b775e9f6d40765794dd65e9 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 4 Dec 2025 01:54:41 +0000 Subject: [PATCH 01/22] cosmos predict2.5 base: convert chkpt & pipeline - New scheduler: scheduling_flow_unipc_multistep.py - Changes to TransformerCosmos for text embeddings via crossattn_proj --- scripts/convert_cosmos_to_diffusers.py | 128 ++- src/diffusers/__init__.py | 2 + .../models/transformers/transformer_cosmos.py | 14 + .../cosmos/pipeline_cosmos25_predict.py | 788 ++++++++++++++++++ .../schedulers/.nfs42f9905b28788d3400000055 | 770 +++++++++++++++++ src/diffusers/schedulers/__init__.py | 2 + .../scheduling_flow_unipc_multistep.py | 770 +++++++++++++++++ 7 files changed, 2466 insertions(+), 8 deletions(-) create mode 100644 src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py create mode 100644 src/diffusers/schedulers/.nfs42f9905b28788d3400000055 create mode 100644 src/diffusers/schedulers/scheduling_flow_unipc_multistep.py diff --git a/scripts/convert_cosmos_to_diffusers.py b/scripts/convert_cosmos_to_diffusers.py index 6f6563ad641b..0f56370d4a85 100644 --- a/scripts/convert_cosmos_to_diffusers.py +++ b/scripts/convert_cosmos_to_diffusers.py @@ -1,11 +1,55 @@ +""" +# Cosmos 2 Predict + +Download checkpoint +```bash +hf download nvidia/Cosmos-Predict2-2B-Text2Image +``` + +convert checkpoint +```bash +transformer_ckpt_path=~/.cache/huggingface/hub/models--nvidia--Cosmos-Predict2-2B-Text2Image/snapshots/acdb5fde992a73ef0355f287977d002cbfd127e0/model.pt + +python scripts/convert_cosmos_to_diffusers.py \ + --transformer_ckpt_path $transformer_ckpt_path \ + --transformer_type Cosmos-2.0-Diffusion-2B-Text2Image \ + --text_encoder_path google-t5/t5-11b \ + --tokenizer_path google-t5/t5-11b \ + --vae_type wan2.1 \ + --output_path converted/cosmos-p2-t2i-2b \ + --save_pipeline +``` + +# Cosmos 2.5 Predict + +Download checkpoint +```bash +hf download nvidia/Cosmos-Predict2.5-2B +``` + +Convert checkpoint +```bash +transformer_ckpt_path=~/.cache/huggingface/hub/models--nvidia--Cosmos-Predict2.5-2B/snapshots/865baf084d4c9e850eac59a021277d5a9b9e8b63/base/pre-trained/d20b7120-df3e-4911-919d-db6e08bad31c_ema_bf16.pt + +python scripts/convert_cosmos_to_diffusers.py \ + --transformer_type Cosmos-2.5-Predict-Base-2B \ + --transformer_ckpt_path $transformer_ckpt_path \ + --vae_type wan2.1 \ + --output_path converted/cosmos-p2.5-base-2b \ + --save_pipeline +``` + +""" + import argparse import pathlib +import sys from typing import Any, Dict import torch from accelerate import init_empty_weights from huggingface_hub import snapshot_download -from transformers import T5EncoderModel, T5TokenizerFast +from transformers import AutoTokenizer, Qwen2_5_VLForConditionalGeneration, T5EncoderModel, T5TokenizerFast from diffusers import ( AutoencoderKLCosmos, @@ -18,6 +62,7 @@ EDMEulerScheduler, FlowMatchEulerDiscreteScheduler, ) +from diffusers.pipelines.cosmos.pipeline_cosmos25_predict import Cosmos25PredictBase def remove_keys_(key: str, state_dict: Dict[str, Any]): @@ -233,6 +278,25 @@ def rename_transformer_blocks_(key: str, state_dict: Dict[str, Any]): "concat_padding_mask": True, "extra_pos_embed_type": None, }, + "Cosmos-2.5-Predict-Base-2B": { + "in_channels": 16 + 1, + "out_channels": 16, + "num_attention_heads": 16, + "attention_head_dim": 128, + "num_layers": 28, + "mlp_ratio": 4.0, + "text_embed_dim": 1024, + "adaln_lora_dim": 256, + "max_size": (128, 240, 240), + "patch_size": (1, 2, 2), + "rope_scale": (1.0, 3.0, 3.0), + "concat_padding_mask": True, + # NOTE: source config has pos_emb_learnable: 'True' - but params are missing + "extra_pos_embed_type": None, + "use_crossattn_projection": True, + "crossattn_proj_in_channels": 100352, + "encoder_hidden_states_channels": 1024, + }, } VAE_KEYS_RENAME_DICT = { @@ -334,6 +398,9 @@ def convert_transformer(transformer_type: str, ckpt_path: str, weights_only: boo elif "Cosmos-2.0" in transformer_type: TRANSFORMER_KEYS_RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT_COSMOS_2_0 TRANSFORMER_SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_2_0 + elif "Cosmos-2.5" in transformer_type: + TRANSFORMER_KEYS_RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT_COSMOS_2_0 + TRANSFORMER_SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP_COSMOS_2_0 else: assert False @@ -347,6 +414,7 @@ def convert_transformer(transformer_type: str, ckpt_path: str, weights_only: boo new_key = new_key.removeprefix(PREFIX_KEY) for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) + print(key, "->", new_key, flush=True) update_state_dict_(original_state_dict, key, new_key) for key in list(original_state_dict.keys()): @@ -355,6 +423,21 @@ def convert_transformer(transformer_type: str, ckpt_path: str, weights_only: boo continue handler_fn_inplace(key, original_state_dict) + expected_keys = set(transformer.state_dict().keys()) + mapped_keys = set(original_state_dict.keys()) + missing_keys = expected_keys - mapped_keys + unexpected_keys = mapped_keys - expected_keys + if missing_keys: + print(f"ERROR: missing keys ({len(missing_keys)} from state_dict:", flush=True, file=sys.stderr) + for k in missing_keys: + print(k) + sys.exit(1) + if unexpected_keys: + print(f"ERROR: unexpected keys ({len(unexpected_keys)}) from state_dict:", flush=True, file=sys.stderr) + for k in unexpected_keys: + print(k) + sys.exit(2) + transformer.load_state_dict(original_state_dict, strict=True, assign=True) return transformer @@ -444,6 +527,28 @@ def save_pipeline_cosmos_2_0(args, transformer, vae): pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB") +def save_pipeline_cosmos_2_5(args, transformer, vae): + text_encoder_path = args.text_encoder_path or "nvidia/Cosmos-Reason1-7B" + tokenizer_path = args.tokenizer_path or "Qwen/Qwen2.5-VL-7B-Instruct" + + text_encoder = Qwen2_5_VLForConditionalGeneration.from_pretrained( + text_encoder_path, torch_dtype="auto", device_map="cpu" + ) + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + + scheduler = FlowMatchEulerDiscreteScheduler(use_karras_sigmas=True) + + pipe = Cosmos25PredictBase( + text_encoder=text_encoder, + tokenizer=tokenizer, + transformer=transformer, + vae=vae, + scheduler=scheduler, + safety_checker=lambda *args, **kwargs: None, + ) + pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB") + + def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--transformer_type", type=str, default=None, choices=list(TRANSFORMER_CONFIGS.keys())) @@ -451,10 +556,10 @@ def get_args(): "--transformer_ckpt_path", type=str, default=None, help="Path to original transformer checkpoint" ) parser.add_argument( - "--vae_type", type=str, default=None, choices=["none", *list(VAE_CONFIGS.keys())], help="Type of VAE" + "--vae_type", type=str, default="wan2.1", choices=["wan2.1", *list(VAE_CONFIGS.keys())], help="Type of VAE" ) - parser.add_argument("--text_encoder_path", type=str, default="google-t5/t5-11b") - parser.add_argument("--tokenizer_path", type=str, default="google-t5/t5-11b") + parser.add_argument("--text_encoder_path", type=str, default=None) + parser.add_argument("--tokenizer_path", type=str, default=None) parser.add_argument("--save_pipeline", action="store_true") parser.add_argument("--output_path", type=str, required=True, help="Path where converted model should be saved") parser.add_argument("--dtype", default="bf16", help="Torch dtype to save the transformer in.") @@ -477,8 +582,6 @@ def get_args(): if args.save_pipeline: assert args.transformer_ckpt_path is not None assert args.vae_type is not None - assert args.text_encoder_path is not None - assert args.tokenizer_path is not None if args.transformer_ckpt_path is not None: weights_only = "Cosmos-1.0" in args.transformer_type @@ -490,17 +593,26 @@ def get_args(): if args.vae_type is not None: if "Cosmos-1.0" in args.transformer_type: vae = convert_vae(args.vae_type) - else: + elif "Cosmos-2.0" in args.transformer_type or "Cosmos-2.5" in args.transformer_type: vae = AutoencoderKLWan.from_pretrained( "Wan-AI/Wan2.1-T2V-1.3B-Diffusers", subfolder="vae", torch_dtype=torch.float32 ) + else: + raise AssertionError(f"{args.transformer_type} not supported") + if not args.save_pipeline: vae.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB") if args.save_pipeline: if "Cosmos-1.0" in args.transformer_type: + assert args.text_encoder_path is not None + assert args.tokenizer_path is not None save_pipeline_cosmos_1_0(args, transformer, vae) elif "Cosmos-2.0" in args.transformer_type: + assert args.text_encoder_path is not None + assert args.tokenizer_path is not None save_pipeline_cosmos_2_0(args, transformer, vae) + elif "Cosmos-2.5" in args.transformer_type: + save_pipeline_cosmos_2_5(args, transformer, vae) else: - assert False + raise AssertionError(f"{args.transformer_type} not supported") diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 03ecaf6bc14d..a64a20c3c55c 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -347,6 +347,7 @@ "FlowMatchEulerDiscreteScheduler", "FlowMatchHeunDiscreteScheduler", "FlowMatchLCMScheduler", + "FlowUniPCMultistepScheduler", "HeunDiscreteScheduler", "IPNDMScheduler", "KarrasVeScheduler", @@ -1080,6 +1081,7 @@ FlowMatchEulerDiscreteScheduler, FlowMatchHeunDiscreteScheduler, FlowMatchLCMScheduler, + FlowUniPCMultistepScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, diff --git a/src/diffusers/models/transformers/transformer_cosmos.py b/src/diffusers/models/transformers/transformer_cosmos.py index 373b470ae37b..2bd3a121427a 100644 --- a/src/diffusers/models/transformers/transformer_cosmos.py +++ b/src/diffusers/models/transformers/transformer_cosmos.py @@ -439,6 +439,9 @@ def __init__( rope_scale: Tuple[float, float, float] = (2.0, 1.0, 1.0), concat_padding_mask: bool = True, extra_pos_embed_type: Optional[str] = "learnable", + use_crossattn_projection: bool = False, + crossattn_proj_in_channels: int = 1024, + encoder_hidden_states_channels: int = 1024, ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim @@ -485,6 +488,13 @@ def __init__( hidden_size, patch_size[0] * patch_size[1] * patch_size[2] * out_channels, bias=False ) + self.use_crossattn_projection = use_crossattn_projection + if self.use_crossattn_projection: + self.crossattn_proj = nn.Sequential( + nn.Linear(crossattn_proj_in_channels, encoder_hidden_states_channels, bias=True), + nn.GELU(), + ) + self.gradient_checkpointing = False def forward( @@ -524,6 +534,7 @@ def forward( post_patch_num_frames = num_frames // p_t post_patch_height = height // p_h post_patch_width = width // p_w + hidden_states = self.patch_embed(hidden_states) hidden_states = hidden_states.flatten(1, 3) # [B, T, H, W, C] -> [B, THW, C] @@ -546,6 +557,9 @@ def forward( else: assert False + if self.use_crossattn_projection: + encoder_hidden_states = self.crossattn_proj(encoder_hidden_states) + # 5. Transformer blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py new file mode 100644 index 000000000000..3c2f0f221145 --- /dev/null +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py @@ -0,0 +1,788 @@ +# Copyright 2025 The NVIDIA Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import torch +import torchvision +import torchvision.transforms +import torchvision.transforms.functional +from transformers import AutoTokenizer, Qwen2_5_VLForConditionalGeneration + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput +from ...models import AutoencoderKLWan, CosmosTransformer3DModel +from ...schedulers import FlowUniPCMultistepScheduler +from ...utils import is_cosmos_guardrail_available, is_torch_xla_available, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import CosmosPipelineOutput + + +if is_cosmos_guardrail_available(): + from cosmos_guardrail import CosmosSafetyChecker +else: + + class CosmosSafetyChecker: + def __init__(self, *args, **kwargs): + raise ImportError( + "`cosmos_guardrail` is not installed. Please install it to use the safety checker for Cosmos: `pip install cosmos_guardrail`." + ) + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +EXAMPLE_DOC_STRING = """ + Examples: + ```python + >>> import torch + >>> from diffusers import Cosmos25PredictBase + >>> from diffusers.utils import export_to_video, load_image, load_video + + >>> model_id = "nvidia/Cosmos-Predict2.5-Base-2B" + >>> pipe = Cosmos25PredictBase.from_pretrained(model_id, torch_dtype=torch.bfloat16) + >>> pipe.to("cuda") + + >>> prompt = "A close-up shot captures a vibrant yellow scrubber vigorously working on a grimy plate, its bristles moving in circular motions to lift stubborn grease and food residue. The dish, once covered in remnants of a hearty meal, gradually reveals its original glossy surface. Suds form and bubble around the scrubber, creating a satisfying visual of cleanliness in progress. The sound of scrubbing fills the air, accompanied by the gentle clinking of the dish against the sink. As the scrubber continues its task, the dish transforms, gleaming under the bright kitchen lights, symbolizing the triumph of cleanliness over mess." + >>> negative_prompt = "The video captures a series of frames showing ugly scenes, static with no motion, motion blur, over-saturation, shaky footage, low resolution, grainy texture, pixelated images, poorly lit areas, underexposed and overexposed scenes, poor color balance, washed out colors, choppy sequences, jerky movements, low frame rate, artifacting, color banding, unnatural transitions, outdated special effects, fake elements, unconvincing visuals, poorly edited content, jump cuts, visual noise, and flickering. Overall, the video is of poor quality." + + >>> # Text2World: generate a 93-frame world video from text only. + >>> video = pipe( + ... image=None, + ... video=None, + ... prompt=prompt, + ... negative_prompt=negative_prompt, + ... num_frames=93, + ... generator=torch.Generator().manual_seed(1), + ... ).frames[0] + >>> export_to_video(video, "text2world.mp4", fps=16) + + >>> # Image2World: condition on a single image and generate a 93-frame world video. + >>> image = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yellow-scrubber.png" + ... ) + >>> video = pipe( + ... image=image, + ... video=None, + ... prompt=prompt, + ... negative_prompt=negative_prompt, + ... num_frames=93, + ... generator=torch.Generator().manual_seed(1), + ... ).frames[0] + >>> export_to_video(video, "image2world.mp4", fps=16) + + >>> # Video2World: condition on an input clip and predict a 93-frame world video. + >>> input_video = load_video("path/to/input.mp4") + >>> video = pipe( + ... image=None, + ... video=input_video, + ... prompt=prompt, + ... negative_prompt=negative_prompt, + ... num_frames=93, + ... generator=torch.Generator().manual_seed(1), + ... ).frames[0] + >>> export_to_video(video, "video2world.mp4", fps=16) + + >>> # To produce a single-frame image instead of a world clip, set num_frames=1 and + >>> # save the first frame: pipe(..., num_frames=1).frames[0][0]. + ``` +""" + + +class Cosmos25PredictBase(DiffusionPipeline): + r""" + Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) base model. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): + Frozen text-encoder. Cosmos Predict2.5 uses the [Qwen2.5 + VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) encoder. + tokenizer (`AutoTokenizer`): + Tokenizer associated with the Qwen2.5 VL encoder. + transformer ([`CosmosTransformer3DModel`]): + Conditional Transformer to denoise the encoded image latents. + scheduler ([`FlowUniPCMultistepScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKLWan`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + """ + + model_cpu_offload_seq = "text_encoder->transformer->vae" + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + # We mark safety_checker as optional here to get around some test failures, but it is not really optional + _optional_components = ["safety_checker"] + + def __init__( + self, + text_encoder: Qwen2_5_VLForConditionalGeneration, + tokenizer: AutoTokenizer, + transformer: CosmosTransformer3DModel, + vae: AutoencoderKLWan, + scheduler: FlowUniPCMultistepScheduler, + safety_checker: CosmosSafetyChecker = None, + ): + super().__init__() + + if safety_checker is None: + safety_checker = CosmosSafetyChecker() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + transformer=transformer, + scheduler=scheduler, + safety_checker=safety_checker, + ) + + self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4 + self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, self.vae.config.z_dim, 1, 1, 1).float() + if getattr(self.vae.config, "latents_mean", None) is not None + else None + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).float() + if getattr(self.vae.config, "latents_std", None) is not None + else None + ) + self.latents_mean = latents_mean + self.latents_std = latents_std + + # Copied from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline._get_prompt_embeds + def _get_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + max_sequence_length: int = 512, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + prompt = [prompt] if isinstance(prompt, str) else prompt + + # Tokenize prompts + input_ids_batch = [] + + for sample_idx in range(len(prompt)): + conversations = [ + { + "role": "system", + "content": [ + { + "type": "text", + "text": "You are a helpful assistant who will provide prompts to an image generator.", + } + ], + }, + { + "role": "user", + "content": [ + { + "type": "text", + "text": prompt[sample_idx], + } + ], + }, + ] + input_ids = self.tokenizer.apply_chat_template( + conversations, + tokenize=True, + add_generation_prompt=False, + add_vision_id=False, + max_length=max_sequence_length, + truncation=True, + padding="max_length", + ) + input_ids = torch.LongTensor(input_ids) + input_ids_batch.append(input_ids) + + input_ids_batch = torch.stack(input_ids_batch, dim=0) + + outputs = self.text_encoder( + input_ids_batch.to(device), + output_hidden_states=True, + ) + hidden_states = outputs.hidden_states + + normalized_hidden_states = [] + for layer_idx in range(1, len(hidden_states)): + normalized_state = (hidden_states[layer_idx] - hidden_states[layer_idx].mean(dim=-1, keepdim=True)) / ( + hidden_states[layer_idx].std(dim=-1, keepdim=True) + 1e-8 + ) + normalized_hidden_states.append(normalized_state) + + prompt_embeds = torch.cat(normalized_hidden_states, dim=-1) + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + return prompt_embeds + + # Copied from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline.encode_prompt with num_videos_per_prompt->num_videos_per_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + do_classifier_free_guidance: bool = True, + num_videos_per_prompt: int = 1, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + max_sequence_length: int = 512, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + Whether to use classifier free guidance or not. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + Number of videos that should be generated per prompt. torch device to place the resulting embeddings on + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + device: (`torch.device`, *optional*): + torch device + dtype: (`torch.dtype`, *optional*): + torch dtype + """ + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds = self._get_prompt_embeds( + prompt=prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype + ) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + _, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embeds = self._get_prompt_embeds( + prompt=negative_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype + ) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + _, seq_len, _ = negative_prompt_embeds.shape + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) + + return prompt_embeds, negative_prompt_embeds + + def prepare_latents( + self, + video: Optional[torch.Tensor], + batch_size: int, + num_channels_latents: int = 16, + height: int = 704, + width: int = 1280, + num_frames_in: int = 93, + num_frames_out: int = 93, + do_classifier_free_guidance: bool = True, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + B = batch_size + C = num_channels_latents + T = (num_frames_out - 1) // self.vae_scale_factor_temporal + 1 + H = height // self.vae_scale_factor_spatial + W = width // self.vae_scale_factor_spatial + shape = (B, C, T, H, W) + + if num_frames_in == 0: + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + cond_mask = torch.zeros((B, 1, T, H, W), dtype=latents.dtype, device=latents.device) + cond_indicator = torch.zeros((B, 1, T, 1, 1), dtype=latents.dtype, device=latents.device) + + cond_latents = torch.zeros_like(latents) + + return ( + latents, + cond_latents, + cond_mask, + cond_indicator, + ) + else: + if video is None: + raise ValueError("`video` must be provided when `num_frames_in` is greater than 0.") + needs_preprocessing = not (isinstance(video, torch.Tensor) and video.ndim == 5 and video.shape[1] == 3) + if needs_preprocessing: + video = self.video_processor.preprocess_video(video, height, width) + video = video.to(device=device, dtype=self.vae.dtype) + if isinstance(generator, list): + cond_latents = [ + retrieve_latents(self.vae.encode(video[i].unsqueeze(0)), generator=generator[i]) + for i in range(batch_size) + ] + else: + cond_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), generator) for vid in video] + + cond_latents = torch.cat(cond_latents, dim=0).to(dtype) + + if self.latents_mean is None or self.latents_std is None: + raise ValueError("VAE configuration must define `latents_mean` and `latents_std`.") + latents_mean = self.latents_mean.to(device=device, dtype=dtype) + latents_std = self.latents_std.to(device=device, dtype=dtype) + cond_latents = (cond_latents - latents_mean) / latents_std + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device=device, dtype=dtype) + + padding_shape = (B, 1, T, H, W) + ones_padding = latents.new_ones(padding_shape) + zeros_padding = latents.new_zeros(padding_shape) + + num_cond_latent_frames = (num_frames_in - 1) // self.vae_scale_factor_temporal + 1 + cond_indicator = latents.new_zeros(1, 1, latents.size(2), 1, 1) + cond_indicator[:, :, 0:num_cond_latent_frames] = 1.0 + cond_mask = cond_indicator * ones_padding + (1 - cond_indicator) * zeros_padding + + return ( + latents, + cond_latents, + cond_mask, + cond_indicator, + ) + + # Copied from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 16 != 0 or width % 16 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1.0 + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def current_timestep(self): + return self._current_timestep + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: PipelineImageInput | None = None, + video: List[PipelineImageInput] | None = None, + prompt: Union[str, List[str]] | None = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 704, + width: int = 1280, + num_frames: int = 93, + num_inference_steps: int = 35, + guidance_scale: float = 7.0, + fps: int = 16, + num_videos_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + shift: float = 5.0, + conditional_frame_timestep: float = 0.1, + ): + r""" + The call function to the pipeline for generation. Supports three modes: + + - **Text2World**: `image=None`, `video=None`, `prompt` provided. Generates a world clip. + - **Image2World**: `image` provided, `video=None`, `prompt` provided. Conditions on a single frame. + - **Video2World**: `video` provided, `image=None`, `prompt` provided. Conditions on an input clip. + + Set `num_frames=93` (default) to produce a world video, or `num_frames=1` to produce a single image frame. + Outputs follow `output_type` (e.g., `"pil"` returns a list of `num_frames` PIL images per prompt). + + Args: + image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, *optional*): + Optional single image for Image2World conditioning. Must be `None` when `video` is provided. + video (`List[PIL.Image.Image]`, `np.ndarray`, `torch.Tensor`, *optional*): + Optional input video for Video2World conditioning. Must be `None` when `image` is provided. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide generation. Required unless `prompt_embeds` is supplied. + height (`int`, defaults to `704`): + The height in pixels of the generated image. + width (`int`, defaults to `1280`): + The width in pixels of the generated image. + num_frames (`int`, defaults to `93`): + Number of output frames. Use `93` for world (video) generation; set to `1` to return a single frame. + num_inference_steps (`int`, defaults to `35`): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, defaults to `7.0`): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. + fps (`int`, defaults to `16`): + The frames per second of the generated video. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`CosmosPipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int`, defaults to `512`): + The maximum number of tokens in the prompt. If the prompt exceeds this length, it will be truncated. If + the prompt is shorter than this length, it will be padded. + + Examples: + + Returns: + [`~CosmosPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`CosmosPipelineOutput`] is returned, otherwise a `tuple` is returned where + the first element is a list with the generated images and the second element is a list of `bool`s + indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. + """ + if self.safety_checker is None: + raise ValueError( + f"You have disabled the safety checker for {self.__class__}. This is in violation of the " + "[NVIDIA Open Model License Agreement](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license). " + f"Please ensure that you are compliant with the license agreement." + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, prompt_embeds, callback_on_step_end_tensor_inputs) + + self._guidance_scale = guidance_scale + self._current_timestep = None + self._interrupt = False + + device = self._execution_device + + if self.safety_checker is not None: + self.safety_checker.to(device) + if prompt is not None: + prompt_list = [prompt] if isinstance(prompt, str) else prompt + for p in prompt_list: + if not self.safety_checker.check_text_safety(p): + raise ValueError( + f"Cosmos Guardrail detected unsafe text in the prompt: {p}. Please ensure that the " + f"prompt abides by the NVIDIA Open Model License Agreement." + ) + self.safety_checker.to("cpu") + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 3. Encode input prompt + ( + prompt_embeds, + negative_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + negative_prompt=negative_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + num_videos_per_prompt=num_videos_per_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + device=device, + max_sequence_length=max_sequence_length, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, shift=shift, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + vae_dtype = self.vae.dtype + transformer_dtype = self.transformer.dtype + + num_frames_in = None + if image is not None: + # TODO: handle batch_size > 1 + assert batch_size == 1, "batch_size must be 1 for image input" + image = torchvision.transforms.functional.to_tensor(image).unsqueeze(0) + video = torch.cat([image, torch.zeros_like(image).repeat(num_frames - 1, 1, 1, 1)], dim=0) + video = video.unsqueeze(0) + num_frames_in = 1 + elif video is None: + video = torch.zeros(batch_size, num_frames, 3, height, width, dtype=torch.uint8) + num_frames_in = 0 + else: + num_frames_in = len(video) + + assert video is not None + video = self.video_processor.preprocess_video(video, height, width) + + # pad with last frame (for video2world) + if video.shape[2] < num_frames: + assert batch_size == 1, "batch_size must be 1 for padding frames" + n_pad_frames = num_frames - num_frames_in + last_frame = video[0, :, -1:, :, :] # [C, T==1, H, W] + pad_frames = last_frame.repeat(1, 1, n_pad_frames, 1, 1) # [B, C, T, H, W] + video = torch.cat((video, pad_frames), dim=2) + + video = video.to(device=device, dtype=vae_dtype) + + num_channels_latents = self.transformer.config.in_channels - 1 + latents, cond_latent, cond_mask, cond_indicator = self.prepare_latents( + video=video, + batch_size=batch_size * num_videos_per_prompt, + num_channels_latents=num_channels_latents, + height=height, + width=width, + num_frames_in=num_frames_in, + num_frames_out=num_frames, + do_classifier_free_guidance=self.do_classifier_free_guidance, + dtype=torch.float32, + device=device, + generator=generator, + latents=latents, + ) + cond_timestep = torch.ones_like(cond_indicator) * conditional_frame_timestep + cond_mask = cond_mask.to(transformer_dtype) + + padding_mask = latents.new_zeros(1, 1, height, width, dtype=transformer_dtype) + + # 6. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + + gt_velocity = latents - cond_latent + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + self._current_timestep = t + + timestep = torch.stack([t]).to(torch.float32) + # TODO: make scheduler scale this instead + timestep *= 0.001 # NOTE: timestep scale + timestep = timestep.to(transformer_dtype) + + in_latents = cond_mask * cond_latent + (1 - cond_mask) * latents # TODO: could use cond_indicator + in_latents = in_latents.to(transformer_dtype) + in_timestep = cond_indicator * cond_timestep + (1 - cond_indicator) * timestep + noise_pred = self.transformer( + hidden_states=in_latents, + condition_mask=cond_mask, + timestep=in_timestep, + encoder_hidden_states=prompt_embeds, + padding_mask=padding_mask, + return_dict=False, + )[0] + # NOTE: force input video latents for noise_pred by correcting velocity + noise_pred = gt_velocity * cond_mask + noise_pred * (1 - cond_mask) + + if self.do_classifier_free_guidance: + noise_pred_neg = self.transformer( + hidden_states=in_latents, + condition_mask=cond_mask, + timestep=in_timestep, + encoder_hidden_states=negative_prompt_embeds, + padding_mask=padding_mask, + return_dict=False, + )[0] + # NOTE: force input video latents for noise_pred by correcting velocity + noise_pred_neg = gt_velocity * cond_mask + noise_pred_neg * (1 - cond_mask) + noise_pred = noise_pred + self.guidance_scale * (noise_pred - noise_pred_neg) + + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + self._current_timestep = None + + if not output_type == "latent": + assert self.latents_mean is not None and self.latents_std is not None, ( + "VAE configuration must define `latents_mean` and `latents_std`." + ) + latents_mean = self.latents_mean.to(latents.device, latents.dtype) + latents_std = self.latents_std.to(latents.device, latents.dtype) + latents = latents * latents_std + latents_mean + video = self.vae.decode(latents.to(self.vae.dtype), return_dict=False)[0] + + assert self.safety_checker is not None + self.safety_checker.to(device) + video = self.video_processor.postprocess_video(video, output_type="np") + video = (video * 255).astype(np.uint8) + video_batch = [] + for vid in video: + vid = self.safety_checker.check_video_safety(vid) + video_batch.append(vid) + video = np.stack(video_batch).astype(np.float32) / 255.0 * 2 - 1 + video = torch.from_numpy(video).permute(0, 4, 1, 2, 3) + video = self.video_processor.postprocess_video(video, output_type=output_type) + self.safety_checker.to("cpu") + else: + video = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return CosmosPipelineOutput(frames=video) diff --git a/src/diffusers/schedulers/.nfs42f9905b28788d3400000055 b/src/diffusers/schedulers/.nfs42f9905b28788d3400000055 new file mode 100644 index 000000000000..fb5f210f0ba8 --- /dev/null +++ b/src/diffusers/schedulers/.nfs42f9905b28788d3400000055 @@ -0,0 +1,770 @@ +# TODO(migmartin): reduce LOC by using inheritance from UniPCMultistepScheduler +# Copied from https://github.com/huggingface/diffusers/blob/v0.31.0/src/diffusers/schedulers/scheduling_unipc_multistep.py +# Convert unipc for flow matching +# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput +from diffusers.utils import deprecate + + +class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + `FlowUniPCMultistepScheduler` is a training-free framework designed for the fast sampling of diffusion models. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + solver_order (`int`, default `2`): + The UniPC order which can be any positive integer. The effective order of accuracy is `solver_order + 1` + due to the UniC. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for + unconditional sampling. + prediction_type (`str`, defaults to "flow_prediction"): + Prediction type of the scheduler function; must be `flow_prediction` for this scheduler, which predicts the + flow of the diffusion process. + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `predict_x0=True`. + predict_x0 (`bool`, defaults to `True`): + Whether to use the updating algorithm on the predicted x0. + solver_type (`str`, default `bh2`): + Solver type for UniPC. It is recommended to use `bh1` for unconditional sampling when steps < 10, and `bh2` + otherwise. + lower_order_final (`bool`, default `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + disable_corrector (`list`, default `[]`): + Decides which step to disable the corrector to mitigate the misalignment between `epsilon_theta(x_t, c)` + and `epsilon_theta(x_t^c, c)` which can influence convergence for a large guidance scale. Corrector is + usually disabled during the first few steps. + solver_p (`SchedulerMixin`, default `None`): + Any other scheduler that if specified, the algorithm becomes `solver_p + UniC`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + use_exponential_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use exponential sigmas for step sizes in the noise schedule during the sampling process. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + solver_order: int = 2, + prediction_type: str = "flow_prediction", + shift: Optional[float] = 1.0, + use_dynamic_shifting=False, + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + predict_x0: bool = True, + solver_type: str = "bh2", + lower_order_final: bool = True, + disable_corrector: List[int] = [], + solver_p: SchedulerMixin = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + use_karras_sigmas: bool = False, + ): + if solver_type not in ["bh1", "bh2"]: + if solver_type in ["midpoint", "heun", "logrho"]: + self.register_to_config(solver_type="bh2") + else: + raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") + + self.predict_x0 = predict_x0 + # setable values + self.num_inference_steps = None + alphas = np.linspace(1, 1 / num_train_timesteps, num_train_timesteps)[::-1].copy() + sigmas = 1.0 - alphas + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32) + + if not use_dynamic_shifting: + # when use_dynamic_shifting is True, we apply the timestep shifting on the fly based on the image resolution + sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore + + self.sigmas = sigmas + self.timesteps = sigmas * num_train_timesteps + + self.model_outputs = [None] * solver_order + self.timestep_list = [None] * solver_order + self.lower_order_nums = 0 + self.disable_corrector = disable_corrector + self.solver_p = solver_p + self.last_sample = None + self._step_index = None + self._begin_index = None + + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + self.sigma_min = self.sigmas[-1].item() + self.sigma_max = self.sigmas[0].item() + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + # Modified from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler.set_timesteps + def set_timesteps( + self, + num_inference_steps: Union[int, None] = None, + device: Union[str, torch.device] = None, + sigmas: Optional[List[float]] = None, + mu: Optional[Union[float, None]] = None, + shift: Optional[Union[float, None]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + Total number of the spacing of the time steps. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + if self.config.use_dynamic_shifting and mu is None: + raise ValueError(" you have to pass a value for `mu` when `use_dynamic_shifting` is set to be `True`") + + if self.config.use_karras_sigmas: + # force to use the exact sigma used in edm sampler + sigma_max = 200 + sigma_min = 0.01 + rho = 7 + sigmas = np.arange(num_inference_steps + 1) / num_inference_steps + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + sigmas * (min_inv_rho - max_inv_rho)) ** rho + sigmas = sigmas / (1 + sigmas) + else: + if sigmas is None: + sigmas = np.linspace(self.sigma_max, self.sigma_min, num_inference_steps + 1).copy()[:-1] # pyright: ignore + + if self.config.use_dynamic_shifting: + sigmas = self.time_shift(mu, 1.0, sigmas) # pyright: ignore + else: + if shift is None: + shift = self.config.shift + sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore + + if self.config.final_sigmas_type == "sigma_min": + # TODO(migmartin): this raises an error, rewrite this class + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + timesteps = sigmas * self.config.num_train_timesteps + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) # pyright: ignore + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + self.last_sample = None + if self.solver_p: + self.solver_p.set_timesteps(self.num_inference_steps, device=device) + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma): + return sigma * self.config.num_train_timesteps + + def _sigma_to_alpha_sigma_t(self, sigma): + return 1 - sigma, sigma + + # Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.set_timesteps + def time_shift(self, mu: float, sigma: float, t: torch.Tensor): + return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) + + def convert_model_output( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + r""" + Convert the model output to the corresponding type the UniPC algorithm needs. + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + + # print("sigma_t ==>", self.step_index, sigma, sigma_t, alpha_t, sample.shape, model_output.shape) + if self.predict_x0: + if self.config.prediction_type == "flow_prediction": + sigma_t = self.sigmas[self.step_index] + x0_pred = sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`," + " `v_prediction` or `flow_prediction` for the UniPCMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + # print("self.config.thresholding", self.config.thresholding) + return x0_pred + else: + if self.config.prediction_type == "flow_prediction": + sigma_t = self.sigmas[self.step_index] + epsilon = sample - (1 - sigma_t) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`," + " `v_prediction` or `flow_prediction` for the UniPCMultistepScheduler." + ) + + if self.config.thresholding: + sigma_t = self.sigmas[self.step_index] + x0_pred = sample - sigma_t * model_output + x0_pred = self._threshold_sample(x0_pred) + epsilon = model_output + x0_pred + + return epsilon + + def multistep_uni_p_bh_update( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + order: int = None, # pyright: ignore + **kwargs, + ) -> torch.Tensor: + """ + One step for the UniP (B(h) version). Alternatively, `self.solver_p` is used if is specified. + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model at the current timestep. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + order (`int`): + The order of UniP at this timestep (corresponds to the *p* in UniPC-p). + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if order is None: + if len(args) > 2: + order = args[2] + else: + raise ValueError(" missing `order` as a required keyward argument") + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + model_output_list = self.model_outputs + + s0 = self.timestep_list[-1] + m0 = model_output_list[-1] + x = sample + + if self.solver_p: + x_t = self.solver_p.step(model_output, s0, x).prev_sample + return x_t + + sigma_t, sigma_s0 = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] # pyright: ignore + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + h = lambda_t - lambda_s0 + device = sample.device + + rks = [] + D1s = [] + for i in range(1, order): + si = self.step_index - i # pyright: ignore + mi = model_output_list[-(i + 1)] + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + rk = (lambda_si - lambda_s0) / h + rks.append(rk) + D1s.append((mi - m0) / rk) # pyright: ignore + + rks.append(1.0) + rks = torch.tensor(rks, device=device) + + R = [] + b = [] + + hh = -h if self.predict_x0 else h + h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 + h_phi_k = h_phi_1 / hh - 1 + + factorial_i = 1 + + if self.config.solver_type == "bh1": + B_h = hh + elif self.config.solver_type == "bh2": + B_h = torch.expm1(hh) + else: + raise NotImplementedError() + + for i in range(1, order + 1): + R.append(torch.pow(rks, i - 1)) + b.append(h_phi_k * factorial_i / B_h) + factorial_i *= i + 1 + h_phi_k = h_phi_k / hh - 1 / factorial_i + + R = torch.stack(R) + b = torch.tensor(b, device=device) + + if len(D1s) > 0: + D1s = torch.stack(D1s, dim=1) # (B, K) + # for order 2, we use a simplified version + if order == 2: + rhos_p = torch.tensor([0.5], dtype=x.dtype, device=device) + else: + rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1]).to(device).to(x.dtype) + else: + D1s = None + + if self.predict_x0: + x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 + if D1s is not None: + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) # pyright: ignore + else: + pred_res = 0 + x_t = x_t_ - alpha_t * B_h * pred_res + else: + x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 + if D1s is not None: + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) # pyright: ignore + else: + pred_res = 0 + x_t = x_t_ - sigma_t * B_h * pred_res + + x_t = x_t.to(x.dtype) + return x_t + + def multistep_uni_c_bh_update( + self, + this_model_output: torch.Tensor, + *args, + last_sample: torch.Tensor = None, + this_sample: torch.Tensor = None, + order: int = None, # pyright: ignore + **kwargs, + ) -> torch.Tensor: + """ + One step for the UniC (B(h) version). + + Args: + this_model_output (`torch.Tensor`): + The model outputs at `x_t`. + this_timestep (`int`): + The current timestep `t`. + last_sample (`torch.Tensor`): + The generated sample before the last predictor `x_{t-1}`. + this_sample (`torch.Tensor`): + The generated sample after the last predictor `x_{t}`. + order (`int`): + The `p` of UniC-p at this step. The effective order of accuracy should be `order + 1`. + + Returns: + `torch.Tensor`: + The corrected sample tensor at the current timestep. + """ + this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) + if last_sample is None: + if len(args) > 1: + last_sample = args[1] + else: + raise ValueError(" missing`last_sample` as a required keyward argument") + if this_sample is None: + if len(args) > 2: + this_sample = args[2] + else: + raise ValueError(" missing`this_sample` as a required keyward argument") + if order is None: + if len(args) > 3: + order = args[3] + else: + raise ValueError(" missing`order` as a required keyward argument") + if this_timestep is not None: + deprecate( + "this_timestep", + "1.0.0", + "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + model_output_list = self.model_outputs + + m0 = model_output_list[-1] + x = last_sample + x_t = this_sample + model_t = this_model_output + + sigma_t, sigma_s0 = self.sigmas[self.step_index], self.sigmas[self.step_index - 1] # pyright: ignore + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + h = lambda_t - lambda_s0 + device = this_sample.device + + rks = [] + D1s = [] + for i in range(1, order): + si = self.step_index - (i + 1) # pyright: ignore + mi = model_output_list[-(i + 1)] + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + rk = (lambda_si - lambda_s0) / h + rks.append(rk) + D1s.append((mi - m0) / rk) # pyright: ignore + + rks.append(1.0) + rks = torch.tensor(rks, device=device) + + R = [] + b = [] + + hh = -h if self.predict_x0 else h + h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 + h_phi_k = h_phi_1 / hh - 1 + + factorial_i = 1 + + if self.config.solver_type == "bh1": + B_h = hh + elif self.config.solver_type == "bh2": + B_h = torch.expm1(hh) + else: + raise NotImplementedError() + + for i in range(1, order + 1): + R.append(torch.pow(rks, i - 1)) + b.append(h_phi_k * factorial_i / B_h) + factorial_i *= i + 1 + h_phi_k = h_phi_k / hh - 1 / factorial_i + + R = torch.stack(R) + b = torch.tensor(b, device=device) + + if len(D1s) > 0: + D1s = torch.stack(D1s, dim=1) + else: + D1s = None + + # for order 1, we use a simplified version + if order == 1: + rhos_c = torch.tensor([0.5], dtype=x.dtype, device=device) + else: + rhos_c = torch.linalg.solve(R, b).to(device).to(x.dtype) + + if self.predict_x0: + x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 + if D1s is not None: + corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) + else: + corr_res = 0 + D1_t = model_t - m0 + x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t) + else: + x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 + if D1s is not None: + corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) + else: + corr_res = 0 + D1_t = model_t - m0 + x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t) + x_t = x_t.to(x.dtype) + return x_t + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + return_dict: bool = True, + generator=None, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep UniPC. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # print("self.step_index ==> ", self.step_index) + + use_corrector = ( + self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and self.last_sample is not None # pyright: ignore + ) + + model_output_convert = self.convert_model_output(model_output, sample=sample) + + if use_corrector: + sample = self.multistep_uni_c_bh_update( + this_model_output=model_output_convert, + last_sample=self.last_sample, + this_sample=sample, + order=self.this_order, + ) + + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.timestep_list[i] = self.timestep_list[i + 1] + + self.model_outputs[-1] = model_output_convert + self.timestep_list[-1] = timestep # pyright: ignore + + if self.config.lower_order_final: + this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) # pyright: ignore + else: + this_order = self.config.solver_order + + self.this_order = min(this_order, self.lower_order_nums + 1) # warmup for multistep + assert self.this_order > 0 + + self.last_sample = sample + prev_sample = self.multistep_uni_p_bh_update( + model_output=model_output, # pass the original non-converted model output, in case solver-p is used + sample=sample, + order=self.this_order, + ) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 # pyright: ignore + + if not return_dict: + return (prev_sample, model_output_convert) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/src/diffusers/schedulers/__init__.py b/src/diffusers/schedulers/__init__.py index 29052c1ba0cb..eb6dcda0188a 100644 --- a/src/diffusers/schedulers/__init__.py +++ b/src/diffusers/schedulers/__init__.py @@ -61,6 +61,7 @@ _import_structure["scheduling_flow_match_euler_discrete"] = ["FlowMatchEulerDiscreteScheduler"] _import_structure["scheduling_flow_match_heun_discrete"] = ["FlowMatchHeunDiscreteScheduler"] _import_structure["scheduling_flow_match_lcm"] = ["FlowMatchLCMScheduler"] + _import_structure["scheduling_flow_unipc_multistep"] = ["FlowUniPCMultistepScheduler"] _import_structure["scheduling_heun_discrete"] = ["HeunDiscreteScheduler"] _import_structure["scheduling_ipndm"] = ["IPNDMScheduler"] _import_structure["scheduling_k_dpm_2_ancestral_discrete"] = ["KDPM2AncestralDiscreteScheduler"] @@ -163,6 +164,7 @@ from .scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler from .scheduling_flow_match_heun_discrete import FlowMatchHeunDiscreteScheduler from .scheduling_flow_match_lcm import FlowMatchLCMScheduler + from .scheduling_flow_unipc_multistep import FlowUniPCMultistepScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPM2AncestralDiscreteScheduler diff --git a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py new file mode 100644 index 000000000000..fb5f210f0ba8 --- /dev/null +++ b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py @@ -0,0 +1,770 @@ +# TODO(migmartin): reduce LOC by using inheritance from UniPCMultistepScheduler +# Copied from https://github.com/huggingface/diffusers/blob/v0.31.0/src/diffusers/schedulers/scheduling_unipc_multistep.py +# Convert unipc for flow matching +# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput +from diffusers.utils import deprecate + + +class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + `FlowUniPCMultistepScheduler` is a training-free framework designed for the fast sampling of diffusion models. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + solver_order (`int`, default `2`): + The UniPC order which can be any positive integer. The effective order of accuracy is `solver_order + 1` + due to the UniC. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for + unconditional sampling. + prediction_type (`str`, defaults to "flow_prediction"): + Prediction type of the scheduler function; must be `flow_prediction` for this scheduler, which predicts the + flow of the diffusion process. + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `predict_x0=True`. + predict_x0 (`bool`, defaults to `True`): + Whether to use the updating algorithm on the predicted x0. + solver_type (`str`, default `bh2`): + Solver type for UniPC. It is recommended to use `bh1` for unconditional sampling when steps < 10, and `bh2` + otherwise. + lower_order_final (`bool`, default `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + disable_corrector (`list`, default `[]`): + Decides which step to disable the corrector to mitigate the misalignment between `epsilon_theta(x_t, c)` + and `epsilon_theta(x_t^c, c)` which can influence convergence for a large guidance scale. Corrector is + usually disabled during the first few steps. + solver_p (`SchedulerMixin`, default `None`): + Any other scheduler that if specified, the algorithm becomes `solver_p + UniC`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {σi}. + use_exponential_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use exponential sigmas for step sizes in the noise schedule during the sampling process. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + solver_order: int = 2, + prediction_type: str = "flow_prediction", + shift: Optional[float] = 1.0, + use_dynamic_shifting=False, + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + predict_x0: bool = True, + solver_type: str = "bh2", + lower_order_final: bool = True, + disable_corrector: List[int] = [], + solver_p: SchedulerMixin = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + use_karras_sigmas: bool = False, + ): + if solver_type not in ["bh1", "bh2"]: + if solver_type in ["midpoint", "heun", "logrho"]: + self.register_to_config(solver_type="bh2") + else: + raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") + + self.predict_x0 = predict_x0 + # setable values + self.num_inference_steps = None + alphas = np.linspace(1, 1 / num_train_timesteps, num_train_timesteps)[::-1].copy() + sigmas = 1.0 - alphas + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32) + + if not use_dynamic_shifting: + # when use_dynamic_shifting is True, we apply the timestep shifting on the fly based on the image resolution + sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore + + self.sigmas = sigmas + self.timesteps = sigmas * num_train_timesteps + + self.model_outputs = [None] * solver_order + self.timestep_list = [None] * solver_order + self.lower_order_nums = 0 + self.disable_corrector = disable_corrector + self.solver_p = solver_p + self.last_sample = None + self._step_index = None + self._begin_index = None + + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + self.sigma_min = self.sigmas[-1].item() + self.sigma_max = self.sigmas[0].item() + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + # Modified from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler.set_timesteps + def set_timesteps( + self, + num_inference_steps: Union[int, None] = None, + device: Union[str, torch.device] = None, + sigmas: Optional[List[float]] = None, + mu: Optional[Union[float, None]] = None, + shift: Optional[Union[float, None]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + Total number of the spacing of the time steps. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + if self.config.use_dynamic_shifting and mu is None: + raise ValueError(" you have to pass a value for `mu` when `use_dynamic_shifting` is set to be `True`") + + if self.config.use_karras_sigmas: + # force to use the exact sigma used in edm sampler + sigma_max = 200 + sigma_min = 0.01 + rho = 7 + sigmas = np.arange(num_inference_steps + 1) / num_inference_steps + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + sigmas * (min_inv_rho - max_inv_rho)) ** rho + sigmas = sigmas / (1 + sigmas) + else: + if sigmas is None: + sigmas = np.linspace(self.sigma_max, self.sigma_min, num_inference_steps + 1).copy()[:-1] # pyright: ignore + + if self.config.use_dynamic_shifting: + sigmas = self.time_shift(mu, 1.0, sigmas) # pyright: ignore + else: + if shift is None: + shift = self.config.shift + sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore + + if self.config.final_sigmas_type == "sigma_min": + # TODO(migmartin): this raises an error, rewrite this class + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + timesteps = sigmas * self.config.num_train_timesteps + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) # pyright: ignore + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + self.last_sample = None + if self.solver_p: + self.solver_p.set_timesteps(self.num_inference_steps, device=device) + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma): + return sigma * self.config.num_train_timesteps + + def _sigma_to_alpha_sigma_t(self, sigma): + return 1 - sigma, sigma + + # Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.set_timesteps + def time_shift(self, mu: float, sigma: float, t: torch.Tensor): + return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) + + def convert_model_output( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + r""" + Convert the model output to the corresponding type the UniPC algorithm needs. + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + + # print("sigma_t ==>", self.step_index, sigma, sigma_t, alpha_t, sample.shape, model_output.shape) + if self.predict_x0: + if self.config.prediction_type == "flow_prediction": + sigma_t = self.sigmas[self.step_index] + x0_pred = sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`," + " `v_prediction` or `flow_prediction` for the UniPCMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + # print("self.config.thresholding", self.config.thresholding) + return x0_pred + else: + if self.config.prediction_type == "flow_prediction": + sigma_t = self.sigmas[self.step_index] + epsilon = sample - (1 - sigma_t) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`," + " `v_prediction` or `flow_prediction` for the UniPCMultistepScheduler." + ) + + if self.config.thresholding: + sigma_t = self.sigmas[self.step_index] + x0_pred = sample - sigma_t * model_output + x0_pred = self._threshold_sample(x0_pred) + epsilon = model_output + x0_pred + + return epsilon + + def multistep_uni_p_bh_update( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + order: int = None, # pyright: ignore + **kwargs, + ) -> torch.Tensor: + """ + One step for the UniP (B(h) version). Alternatively, `self.solver_p` is used if is specified. + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model at the current timestep. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + order (`int`): + The order of UniP at this timestep (corresponds to the *p* in UniPC-p). + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if order is None: + if len(args) > 2: + order = args[2] + else: + raise ValueError(" missing `order` as a required keyward argument") + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + model_output_list = self.model_outputs + + s0 = self.timestep_list[-1] + m0 = model_output_list[-1] + x = sample + + if self.solver_p: + x_t = self.solver_p.step(model_output, s0, x).prev_sample + return x_t + + sigma_t, sigma_s0 = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] # pyright: ignore + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + h = lambda_t - lambda_s0 + device = sample.device + + rks = [] + D1s = [] + for i in range(1, order): + si = self.step_index - i # pyright: ignore + mi = model_output_list[-(i + 1)] + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + rk = (lambda_si - lambda_s0) / h + rks.append(rk) + D1s.append((mi - m0) / rk) # pyright: ignore + + rks.append(1.0) + rks = torch.tensor(rks, device=device) + + R = [] + b = [] + + hh = -h if self.predict_x0 else h + h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 + h_phi_k = h_phi_1 / hh - 1 + + factorial_i = 1 + + if self.config.solver_type == "bh1": + B_h = hh + elif self.config.solver_type == "bh2": + B_h = torch.expm1(hh) + else: + raise NotImplementedError() + + for i in range(1, order + 1): + R.append(torch.pow(rks, i - 1)) + b.append(h_phi_k * factorial_i / B_h) + factorial_i *= i + 1 + h_phi_k = h_phi_k / hh - 1 / factorial_i + + R = torch.stack(R) + b = torch.tensor(b, device=device) + + if len(D1s) > 0: + D1s = torch.stack(D1s, dim=1) # (B, K) + # for order 2, we use a simplified version + if order == 2: + rhos_p = torch.tensor([0.5], dtype=x.dtype, device=device) + else: + rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1]).to(device).to(x.dtype) + else: + D1s = None + + if self.predict_x0: + x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 + if D1s is not None: + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) # pyright: ignore + else: + pred_res = 0 + x_t = x_t_ - alpha_t * B_h * pred_res + else: + x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 + if D1s is not None: + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) # pyright: ignore + else: + pred_res = 0 + x_t = x_t_ - sigma_t * B_h * pred_res + + x_t = x_t.to(x.dtype) + return x_t + + def multistep_uni_c_bh_update( + self, + this_model_output: torch.Tensor, + *args, + last_sample: torch.Tensor = None, + this_sample: torch.Tensor = None, + order: int = None, # pyright: ignore + **kwargs, + ) -> torch.Tensor: + """ + One step for the UniC (B(h) version). + + Args: + this_model_output (`torch.Tensor`): + The model outputs at `x_t`. + this_timestep (`int`): + The current timestep `t`. + last_sample (`torch.Tensor`): + The generated sample before the last predictor `x_{t-1}`. + this_sample (`torch.Tensor`): + The generated sample after the last predictor `x_{t}`. + order (`int`): + The `p` of UniC-p at this step. The effective order of accuracy should be `order + 1`. + + Returns: + `torch.Tensor`: + The corrected sample tensor at the current timestep. + """ + this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) + if last_sample is None: + if len(args) > 1: + last_sample = args[1] + else: + raise ValueError(" missing`last_sample` as a required keyward argument") + if this_sample is None: + if len(args) > 2: + this_sample = args[2] + else: + raise ValueError(" missing`this_sample` as a required keyward argument") + if order is None: + if len(args) > 3: + order = args[3] + else: + raise ValueError(" missing`order` as a required keyward argument") + if this_timestep is not None: + deprecate( + "this_timestep", + "1.0.0", + "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + model_output_list = self.model_outputs + + m0 = model_output_list[-1] + x = last_sample + x_t = this_sample + model_t = this_model_output + + sigma_t, sigma_s0 = self.sigmas[self.step_index], self.sigmas[self.step_index - 1] # pyright: ignore + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + h = lambda_t - lambda_s0 + device = this_sample.device + + rks = [] + D1s = [] + for i in range(1, order): + si = self.step_index - (i + 1) # pyright: ignore + mi = model_output_list[-(i + 1)] + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + rk = (lambda_si - lambda_s0) / h + rks.append(rk) + D1s.append((mi - m0) / rk) # pyright: ignore + + rks.append(1.0) + rks = torch.tensor(rks, device=device) + + R = [] + b = [] + + hh = -h if self.predict_x0 else h + h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 + h_phi_k = h_phi_1 / hh - 1 + + factorial_i = 1 + + if self.config.solver_type == "bh1": + B_h = hh + elif self.config.solver_type == "bh2": + B_h = torch.expm1(hh) + else: + raise NotImplementedError() + + for i in range(1, order + 1): + R.append(torch.pow(rks, i - 1)) + b.append(h_phi_k * factorial_i / B_h) + factorial_i *= i + 1 + h_phi_k = h_phi_k / hh - 1 / factorial_i + + R = torch.stack(R) + b = torch.tensor(b, device=device) + + if len(D1s) > 0: + D1s = torch.stack(D1s, dim=1) + else: + D1s = None + + # for order 1, we use a simplified version + if order == 1: + rhos_c = torch.tensor([0.5], dtype=x.dtype, device=device) + else: + rhos_c = torch.linalg.solve(R, b).to(device).to(x.dtype) + + if self.predict_x0: + x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 + if D1s is not None: + corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) + else: + corr_res = 0 + D1_t = model_t - m0 + x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t) + else: + x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 + if D1s is not None: + corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) + else: + corr_res = 0 + D1_t = model_t - m0 + x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t) + x_t = x_t.to(x.dtype) + return x_t + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + return_dict: bool = True, + generator=None, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep UniPC. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # print("self.step_index ==> ", self.step_index) + + use_corrector = ( + self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and self.last_sample is not None # pyright: ignore + ) + + model_output_convert = self.convert_model_output(model_output, sample=sample) + + if use_corrector: + sample = self.multistep_uni_c_bh_update( + this_model_output=model_output_convert, + last_sample=self.last_sample, + this_sample=sample, + order=self.this_order, + ) + + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.timestep_list[i] = self.timestep_list[i + 1] + + self.model_outputs[-1] = model_output_convert + self.timestep_list[-1] = timestep # pyright: ignore + + if self.config.lower_order_final: + this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) # pyright: ignore + else: + this_order = self.config.solver_order + + self.this_order = min(this_order, self.lower_order_nums + 1) # warmup for multistep + assert self.this_order > 0 + + self.last_sample = sample + prev_sample = self.multistep_uni_p_bh_update( + model_output=model_output, # pass the original non-converted model output, in case solver-p is used + sample=sample, + order=self.this_order, + ) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 # pyright: ignore + + if not return_dict: + return (prev_sample, model_output_convert) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps From 4395869d04d47ef2919787daa0534ffaf22ad501 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 11 Dec 2025 08:27:36 +0000 Subject: [PATCH 02/22] scheduler cleanup --- .../cosmos/pipeline_cosmos25_predict.py | 18 +++++++----------- .../scheduling_flow_unipc_multistep.py | 9 ++++----- 2 files changed, 11 insertions(+), 16 deletions(-) diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py index 3c2f0f221145..316449207c83 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py @@ -506,6 +506,7 @@ def __call__( callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, shift: float = 5.0, + timestep_scale: float = 0.001, conditional_frame_timestep: float = 0.1, ): r""" @@ -635,14 +636,13 @@ def __call__( max_sequence_length=max_sequence_length, ) - # 4. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, shift=shift, device=device) - timesteps = self.scheduler.timesteps - - # 5. Prepare latent variables vae_dtype = self.vae.dtype transformer_dtype = self.transformer.dtype + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, shift=shift, device=device, scale=timestep_scale) + timesteps = torch.tensor(self.scheduler.timesteps).to(transformer_dtype) + num_frames_in = None if image is not None: # TODO: handle batch_size > 1 @@ -700,12 +700,8 @@ def __call__( if self.interrupt: continue - self._current_timestep = t - - timestep = torch.stack([t]).to(torch.float32) - # TODO: make scheduler scale this instead - timestep *= 0.001 # NOTE: timestep scale - timestep = timestep.to(transformer_dtype) + self._current_timestep = t.cpu().item() + timestep = t.unsqueeze(0) in_latents = cond_mask * cond_latent + (1 - cond_mask) * latents # TODO: could use cond_indicator in_latents = in_latents.to(transformer_dtype) diff --git a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py index fb5f210f0ba8..ecdd2adbc808 100644 --- a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py +++ b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py @@ -157,6 +157,7 @@ def set_timesteps( sigmas: Optional[List[float]] = None, mu: Optional[Union[float, None]] = None, shift: Optional[Union[float, None]] = None, + scale: float = 0.001, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). @@ -191,10 +192,7 @@ def set_timesteps( shift = self.config.shift sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore - if self.config.final_sigmas_type == "sigma_min": - # TODO(migmartin): this raises an error, rewrite this class - sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 - elif self.config.final_sigmas_type == "zero": + if self.config.final_sigmas_type == "zero": sigma_last = 0 else: raise ValueError( @@ -205,7 +203,8 @@ def set_timesteps( sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) # pyright: ignore self.sigmas = torch.from_numpy(sigmas) - self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.float32) + self.timesteps *= scale self.num_inference_steps = len(timesteps) From e6e278e658f7033069e95d7ceeff202f59a0a40e Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Sat, 13 Dec 2025 07:50:38 +0000 Subject: [PATCH 03/22] simplify inference pipeline --- .../cosmos/pipeline_cosmos25_predict.py | 33 +++++++++---------- .../scheduling_flow_unipc_multistep.py | 11 +++---- 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py index 316449207c83..005d428f8001 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py @@ -506,7 +506,6 @@ def __call__( callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, shift: float = 5.0, - timestep_scale: float = 0.001, conditional_frame_timestep: float = 0.1, ): r""" @@ -592,7 +591,7 @@ def __call__( if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs - # 1. Check inputs. Raise error if not correct + # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, prompt_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale @@ -613,7 +612,7 @@ def __call__( ) self.safety_checker.to("cpu") - # 2. Define call parameters + # Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): @@ -621,7 +620,7 @@ def __call__( else: batch_size = prompt_embeds.shape[0] - # 3. Encode input prompt + # Encode input prompt ( prompt_embeds, negative_prompt_embeds, @@ -639,10 +638,6 @@ def __call__( vae_dtype = self.vae.dtype transformer_dtype = self.transformer.dtype - # 4. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, shift=shift, device=device, scale=timestep_scale) - timesteps = torch.tensor(self.scheduler.timesteps).to(transformer_dtype) - num_frames_in = None if image is not None: # TODO: handle batch_size > 1 @@ -690,22 +685,26 @@ def __call__( padding_mask = latents.new_zeros(1, 1, height, width, dtype=transformer_dtype) - # 6. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + # Denoising loop + self.scheduler.set_timesteps(num_inference_steps, shift=shift, device=device) + timesteps = self.scheduler.timesteps self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - gt_velocity = latents - cond_latent + gt_velocity = (latents - cond_latent) * cond_mask with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t.cpu().item() - timestep = t.unsqueeze(0) + + # NOTE: equivalent to t / 1000 for FlowUniPCMultistepScheduler (sigmas are in [0, 1], num_train_timesteps=1000) + sigma_t = torch.tensor(self.scheduler.sigmas[i]).unsqueeze(0).to(device=device, dtype=transformer_dtype) in_latents = cond_mask * cond_latent + (1 - cond_mask) * latents # TODO: could use cond_indicator in_latents = in_latents.to(transformer_dtype) - in_timestep = cond_indicator * cond_timestep + (1 - cond_indicator) * timestep + in_timestep = cond_indicator * cond_timestep + (1 - cond_indicator) * sigma_t noise_pred = self.transformer( hidden_states=in_latents, condition_mask=cond_mask, @@ -714,8 +713,8 @@ def __call__( padding_mask=padding_mask, return_dict=False, )[0] - # NOTE: force input video latents for noise_pred by correcting velocity - noise_pred = gt_velocity * cond_mask + noise_pred * (1 - cond_mask) + # NOTE: replace velocity (noise_pred) with gt_velocity for conditioning inputs only + noise_pred = gt_velocity + noise_pred * (1 - cond_mask) if self.do_classifier_free_guidance: noise_pred_neg = self.transformer( @@ -726,8 +725,8 @@ def __call__( padding_mask=padding_mask, return_dict=False, )[0] - # NOTE: force input video latents for noise_pred by correcting velocity - noise_pred_neg = gt_velocity * cond_mask + noise_pred_neg * (1 - cond_mask) + # NOTE: replace velocity (noise_pred) with gt_velocity for conditioning inputs only + noise_pred_neg = gt_velocity + noise_pred_neg * (1 - cond_mask) noise_pred = noise_pred + self.guidance_scale * (noise_pred - noise_pred_neg) latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] diff --git a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py index ecdd2adbc808..2cafd095209d 100644 --- a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py +++ b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py @@ -157,7 +157,6 @@ def set_timesteps( sigmas: Optional[List[float]] = None, mu: Optional[Union[float, None]] = None, shift: Optional[Union[float, None]] = None, - scale: float = 0.001, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). @@ -192,7 +191,10 @@ def set_timesteps( shift = self.config.shift sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore - if self.config.final_sigmas_type == "zero": + if self.config.final_sigmas_type == "sigma_min": + # TODO(migmartin): this raises an error, rewrite this class + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": sigma_last = 0 else: raise ValueError( @@ -203,8 +205,7 @@ def set_timesteps( sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) # pyright: ignore self.sigmas = torch.from_numpy(sigmas) - self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.float32) - self.timesteps *= scale + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) @@ -304,7 +305,6 @@ def convert_model_output( sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) - # print("sigma_t ==>", self.step_index, sigma, sigma_t, alpha_t, sample.shape, model_output.shape) if self.predict_x0: if self.config.prediction_type == "flow_prediction": sigma_t = self.sigmas[self.step_index] @@ -317,7 +317,6 @@ def convert_model_output( if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) - # print("self.config.thresholding", self.config.thresholding) return x0_pred else: if self.config.prediction_type == "flow_prediction": From dd6f5406964a6985130314d7d7ac084a02233581 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Mon, 15 Dec 2025 21:16:47 +0000 Subject: [PATCH 04/22] cleanup scheduler + tests --- .../cosmos/pipeline_cosmos25_predict.py | 2 +- .../scheduling_flow_unipc_multistep.py | 218 +++++++----------- tests/schedulers/test_scheduler_flow_unipc.py | 132 +++++++++++ 3 files changed, 214 insertions(+), 138 deletions(-) create mode 100644 tests/schedulers/test_scheduler_flow_unipc.py diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py index 005d428f8001..88391cf2104d 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py @@ -699,7 +699,7 @@ def __call__( self._current_timestep = t.cpu().item() - # NOTE: equivalent to t / 1000 for FlowUniPCMultistepScheduler (sigmas are in [0, 1], num_train_timesteps=1000) + # NOTE: sigmas are in [0, 1] in FlowUniPCMultistepScheduler sigma_t = torch.tensor(self.scheduler.sigmas[i]).unsqueeze(0).to(device=device, dtype=transformer_dtype) in_latents = cond_mask * cond_latent + (1 - cond_mask) * latents # TODO: could use cond_indicator diff --git a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py index 2cafd095209d..dd3dc6ec0177 100644 --- a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py +++ b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py @@ -1,7 +1,4 @@ -# TODO(migmartin): reduce LOC by using inheritance from UniPCMultistepScheduler # Copied from https://github.com/huggingface/diffusers/blob/v0.31.0/src/diffusers/schedulers/scheduling_unipc_multistep.py -# Convert unipc for flow matching -# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. import math from typing import List, Optional, Tuple, Union @@ -14,9 +11,39 @@ from diffusers.utils import deprecate +def _get_karras_sigmas(self, num_steps: int, sigma_max: float, sigma_min: float, rho: int, final_sigmas_type: str): + sigmas = np.arange(num_steps + 1, dtype=np.float32) / num_steps + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + sigmas * (min_inv_rho - max_inv_rho)) ** rho + sigmas = sigmas / (1 + sigmas) + + if self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be 'zero' but got {self.config.final_sigmas_type}" + ) + + timesteps = torch.from_numpy(sigmas * self.config.num_train_timesteps).to(torch.int64) + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) # pyright: ignore + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32) + return sigmas, timesteps + + class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): """ - `FlowUniPCMultistepScheduler` is a training-free framework designed for the fast sampling of diffusion models. + `FlowUniPCMultistepScheduler` is the UniPC algorithm [1] for flow matching [2], but strictly uses the Karras sigmas [3]. + + Note this a simplified version of `UniPCMultistepScheduler`, as: + 1. it does not have variance preserving sigmas + 2. it does not store betas and other variables used by `UniPCMultistepScheduler` + 3. it assumes prediction_type == "flow_prediction" (this variable is removed from `FlowUniPCMultistepScheduler`) + + References: + [1] Wang, Chong, et al. "UniPC: A Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models" https://arxiv.org/abs/2302.04867 + [2] Lipman, Chen, et al. "Flow matching for generative modeling." https://arxiv.org/abs/2210.02747 + [3] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." https://huggingface.co/papers/2206.00364 This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. @@ -28,9 +55,6 @@ class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): The UniPC order which can be any positive integer. The effective order of accuracy is `solver_order + 1` due to the UniC. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. - prediction_type (`str`, defaults to "flow_prediction"): - Prediction type of the scheduler function; must be `flow_prediction` for this scheduler, which predicts the - flow of the diffusion process. thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. @@ -52,16 +76,6 @@ class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): usually disabled during the first few steps. solver_p (`SchedulerMixin`, default `None`): Any other scheduler that if specified, the algorithm becomes `solver_p + UniC`. - use_karras_sigmas (`bool`, *optional*, defaults to `False`): - Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, - the sigmas are determined according to a sequence of noise levels {σi}. - use_exponential_sigmas (`bool`, *optional*, defaults to `False`): - Whether to use exponential sigmas for step sizes in the noise schedule during the sampling process. - timestep_spacing (`str`, defaults to `"linspace"`): - The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and - Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. - steps_offset (`int`, defaults to 0): - An offset added to the inference steps, as required by some model families. final_sigmas_type (`str`, defaults to `"zero"`): The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. @@ -75,9 +89,6 @@ def __init__( self, num_train_timesteps: int = 1000, solver_order: int = 2, - prediction_type: str = "flow_prediction", - shift: Optional[float] = 1.0, - use_dynamic_shifting=False, thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, @@ -86,10 +97,10 @@ def __init__( lower_order_final: bool = True, disable_corrector: List[int] = [], solver_p: SchedulerMixin = None, - timestep_spacing: str = "linspace", - steps_offset: int = 0, final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" - use_karras_sigmas: bool = False, + rho: int = 7, + sigma_max: float = 200.0, + sigma_min: float = 0.01, ): if solver_type not in ["bh1", "bh2"]: if solver_type in ["midpoint", "heun", "logrho"]: @@ -98,31 +109,21 @@ def __init__( raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") self.predict_x0 = predict_x0 - # setable values self.num_inference_steps = None - alphas = np.linspace(1, 1 / num_train_timesteps, num_train_timesteps)[::-1].copy() - sigmas = 1.0 - alphas - sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32) - - if not use_dynamic_shifting: - # when use_dynamic_shifting is True, we apply the timestep shifting on the fly based on the image resolution - sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore + self.disable_corrector = disable_corrector - self.sigmas = sigmas - self.timesteps = sigmas * num_train_timesteps + self.sigmas, self.timesteps = _get_karras_sigmas(self, num_train_timesteps, sigma_max, sigma_min, rho, final_sigmas_type) + self.sigma_min = self.sigmas[-1].item() + self.sigma_max = self.sigmas[0].item() - self.model_outputs = [None] * solver_order - self.timestep_list = [None] * solver_order - self.lower_order_nums = 0 - self.disable_corrector = disable_corrector - self.solver_p = solver_p self.last_sample = None self._step_index = None self._begin_index = None + self.model_outputs = [None] * self.config.solver_order + self.timestep_list = [None] * self.config.solver_order + self.lower_order_nums = 0 + self.solver_p = self.config.solver_p - self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication - self.sigma_min = self.sigmas[-1].item() - self.sigma_max = self.sigmas[0].item() @property def step_index(self): @@ -149,14 +150,13 @@ def set_begin_index(self, begin_index: int = 0): """ self._begin_index = begin_index + # Modified from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler.set_timesteps def set_timesteps( self, num_inference_steps: Union[int, None] = None, device: Union[str, torch.device] = None, sigmas: Optional[List[float]] = None, - mu: Optional[Union[float, None]] = None, - shift: Optional[Union[float, None]] = None, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). @@ -167,60 +167,24 @@ def set_timesteps( device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ - if self.config.use_dynamic_shifting and mu is None: - raise ValueError(" you have to pass a value for `mu` when `use_dynamic_shifting` is set to be `True`") - - if self.config.use_karras_sigmas: - # force to use the exact sigma used in edm sampler - sigma_max = 200 - sigma_min = 0.01 - rho = 7 - sigmas = np.arange(num_inference_steps + 1) / num_inference_steps - min_inv_rho = sigma_min ** (1 / rho) - max_inv_rho = sigma_max ** (1 / rho) - sigmas = (max_inv_rho + sigmas * (min_inv_rho - max_inv_rho)) ** rho - sigmas = sigmas / (1 + sigmas) - else: - if sigmas is None: - sigmas = np.linspace(self.sigma_max, self.sigma_min, num_inference_steps + 1).copy()[:-1] # pyright: ignore - - if self.config.use_dynamic_shifting: - sigmas = self.time_shift(mu, 1.0, sigmas) # pyright: ignore - else: - if shift is None: - shift = self.config.shift - sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore - - if self.config.final_sigmas_type == "sigma_min": - # TODO(migmartin): this raises an error, rewrite this class - sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 - elif self.config.final_sigmas_type == "zero": - sigma_last = 0 - else: - raise ValueError( - f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" - ) - - timesteps = sigmas * self.config.num_train_timesteps - sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) # pyright: ignore + assert sigmas is None, "sigmas are not supported for FlowUniPCMultistepScheduler" - self.sigmas = torch.from_numpy(sigmas) - self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + self.sigmas, self.timesteps = _get_karras_sigmas(self, num_inference_steps, self.config.sigma_max, self.config.sigma_min, self.config.rho, self.config.final_sigmas_type) + self.num_inference_steps = len(self.timesteps) - self.num_inference_steps = len(timesteps) + self.sigma_min = self.sigmas[-1].item() + self.sigma_max = self.sigmas[0].item() - self.model_outputs = [ - None, - ] * self.config.solver_order - self.lower_order_nums = 0 self.last_sample = None - if self.solver_p: - self.solver_p.set_timesteps(self.num_inference_steps, device=device) - - # add an index counter for schedulers that allow duplicated timesteps self._step_index = None self._begin_index = None - self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + self.model_outputs = [None] * self.config.solver_order + self.timestep_list = [None] * self.config.solver_order + self.lower_order_nums = 0 + self.solver_p = self.config.solver_p + + self.sigmas = self.sigmas.to(device) + self.timesteps = self.timesteps.to(device) # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: @@ -256,17 +220,11 @@ def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: return sample - # Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler._sigma_to_t - def _sigma_to_t(self, sigma): - return sigma * self.config.num_train_timesteps - + # Copied from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler._sigma_to_alpha_sigma_t def _sigma_to_alpha_sigma_t(self, sigma): return 1 - sigma, sigma - # Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.set_timesteps - def time_shift(self, mu: float, sigma: float, t: torch.Tensor): - return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) - + # Modified from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler.convert_model_output def convert_model_output( self, model_output: torch.Tensor, @@ -303,30 +261,18 @@ def convert_model_output( ) sigma = self.sigmas[self.step_index] - alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + _, sigma_t = self._sigma_to_alpha_sigma_t(sigma) if self.predict_x0: - if self.config.prediction_type == "flow_prediction": - sigma_t = self.sigmas[self.step_index] - x0_pred = sample - sigma_t * model_output - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`," - " `v_prediction` or `flow_prediction` for the UniPCMultistepScheduler." - ) + sigma_t = self.sigmas[self.step_index] + x0_pred = sample - sigma_t * model_output if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred else: - if self.config.prediction_type == "flow_prediction": - sigma_t = self.sigmas[self.step_index] - epsilon = sample - (1 - sigma_t) * model_output - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`," - " `v_prediction` or `flow_prediction` for the UniPCMultistepScheduler." - ) + sigma_t = self.sigmas[self.step_index] + epsilon = sample - (1 - sigma_t) * model_output if self.config.thresholding: sigma_t = self.sigmas[self.step_index] @@ -336,12 +282,13 @@ def convert_model_output( return epsilon + # Copied from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler.multistep_uni_p_bh_update def multistep_uni_p_bh_update( self, model_output: torch.Tensor, *args, sample: torch.Tensor = None, - order: int = None, # pyright: ignore + order: int = None, **kwargs, ) -> torch.Tensor: """ @@ -350,8 +297,6 @@ def multistep_uni_p_bh_update( Args: model_output (`torch.Tensor`): The direct output from the learned diffusion model at the current timestep. - prev_timestep (`int`): - The previous discrete timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. order (`int`): @@ -388,7 +333,7 @@ def multistep_uni_p_bh_update( x_t = self.solver_p.step(model_output, s0, x).prev_sample return x_t - sigma_t, sigma_s0 = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] # pyright: ignore + sigma_t, sigma_s0 = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) @@ -401,13 +346,13 @@ def multistep_uni_p_bh_update( rks = [] D1s = [] for i in range(1, order): - si = self.step_index - i # pyright: ignore + si = self.step_index - i mi = model_output_list[-(i + 1)] alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) rk = (lambda_si - lambda_s0) / h rks.append(rk) - D1s.append((mi - m0) / rk) # pyright: ignore + D1s.append((mi - m0) / rk) rks.append(1.0) rks = torch.tensor(rks, device=device) @@ -450,14 +395,14 @@ def multistep_uni_p_bh_update( if self.predict_x0: x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 if D1s is not None: - pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) # pyright: ignore + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) else: pred_res = 0 x_t = x_t_ - alpha_t * B_h * pred_res else: x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 if D1s is not None: - pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) # pyright: ignore + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) else: pred_res = 0 x_t = x_t_ - sigma_t * B_h * pred_res @@ -465,13 +410,14 @@ def multistep_uni_p_bh_update( x_t = x_t.to(x.dtype) return x_t + # Copied from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler.multistep_uni_c_bh_update def multistep_uni_c_bh_update( self, this_model_output: torch.Tensor, *args, last_sample: torch.Tensor = None, this_sample: torch.Tensor = None, - order: int = None, # pyright: ignore + order: int = None, **kwargs, ) -> torch.Tensor: """ @@ -498,17 +444,17 @@ def multistep_uni_c_bh_update( if len(args) > 1: last_sample = args[1] else: - raise ValueError(" missing`last_sample` as a required keyward argument") + raise ValueError("missing `last_sample` as a required keyward argument") if this_sample is None: if len(args) > 2: this_sample = args[2] else: - raise ValueError(" missing`this_sample` as a required keyward argument") + raise ValueError("missing `this_sample` as a required keyward argument") if order is None: if len(args) > 3: order = args[3] else: - raise ValueError(" missing`order` as a required keyward argument") + raise ValueError("missing `order` as a required keyward argument") if this_timestep is not None: deprecate( "this_timestep", @@ -523,7 +469,7 @@ def multistep_uni_c_bh_update( x_t = this_sample model_t = this_model_output - sigma_t, sigma_s0 = self.sigmas[self.step_index], self.sigmas[self.step_index - 1] # pyright: ignore + sigma_t, sigma_s0 = self.sigmas[self.step_index], self.sigmas[self.step_index - 1] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) @@ -536,13 +482,13 @@ def multistep_uni_c_bh_update( rks = [] D1s = [] for i in range(1, order): - si = self.step_index - (i + 1) # pyright: ignore + si = self.step_index - (i + 1) mi = model_output_list[-(i + 1)] alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) rk = (lambda_si - lambda_s0) / h rks.append(rk) - D1s.append((mi - m0) / rk) # pyright: ignore + D1s.append((mi - m0) / rk) rks.append(1.0) rks = torch.tensor(rks, device=device) @@ -665,10 +611,8 @@ def step( if self.step_index is None: self._init_step_index(timestep) - # print("self.step_index ==> ", self.step_index) - use_corrector = ( - self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and self.last_sample is not None # pyright: ignore + self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and self.last_sample is not None ) model_output_convert = self.convert_model_output(model_output, sample=sample) @@ -686,10 +630,10 @@ def step( self.timestep_list[i] = self.timestep_list[i + 1] self.model_outputs[-1] = model_output_convert - self.timestep_list[-1] = timestep # pyright: ignore + self.timestep_list[-1] = timestep if self.config.lower_order_final: - this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) # pyright: ignore + this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) else: this_order = self.config.solver_order @@ -707,7 +651,7 @@ def step( self.lower_order_nums += 1 # upon completion increase step index by one - self._step_index += 1 # pyright: ignore + self._step_index += 1 if not return_dict: return (prev_sample, model_output_convert) diff --git a/tests/schedulers/test_scheduler_flow_unipc.py b/tests/schedulers/test_scheduler_flow_unipc.py new file mode 100644 index 000000000000..c609129beeb5 --- /dev/null +++ b/tests/schedulers/test_scheduler_flow_unipc.py @@ -0,0 +1,132 @@ +import tempfile +import unittest + +import numpy as np +import torch + +from diffusers import FlowUniPCMultistepScheduler + + +class FlowUniPCMultistepSchedulerKarrasTest(unittest.TestCase): + def test_set_timesteps_with_karras_sigmas(self): + num_inference_steps = 4 + scheduler = FlowUniPCMultistepScheduler( + num_train_timesteps=1000, + solver_order=2, + ) + + scheduler.set_timesteps(num_inference_steps=num_inference_steps) + + # TODO: use constants for sigmas and timesteps + sigma_max, sigma_min, rho = ( + scheduler.config.sigma_max, + scheduler.config.sigma_min, + scheduler.config.rho, + ) + ramp = np.arange(num_inference_steps + 1, dtype=np.float32) / num_inference_steps + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + expected_sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + expected_sigmas = expected_sigmas / (1 + expected_sigmas) + expected_sigmas = torch.from_numpy(expected_sigmas.astype(np.float32)) + + # FlowUniPCMultistepScheduler appends a terminal sigma of zero after conversion to torch.Tensor. + self.assertEqual(scheduler.sigmas.shape[0], expected_sigmas.shape[0] + 1) + self.assertTrue(torch.allclose(scheduler.sigmas[:-1], expected_sigmas, atol=1e-6)) + + expected_timesteps = torch.from_numpy( + (expected_sigmas.numpy() * scheduler.config.num_train_timesteps).astype(np.int64) + ) + self.assertTrue(torch.equal(scheduler.timesteps, expected_timesteps)) + self.assertEqual(scheduler.sigmas[-1].item(), 0.0) + + def test_set_timesteps_with_custom_karras_sigmas(self): + num_inference_steps = 3 + sigma_max, sigma_min, rho = 50.0, 0.005, 5.0 + scheduler = FlowUniPCMultistepScheduler( + num_train_timesteps=1000, + solver_order=2, + sigma_max=sigma_max, + sigma_min=sigma_min, + rho=rho, + ) + + scheduler.set_timesteps(num_inference_steps=num_inference_steps) + + # TODO: use constants for sigmas and timesteps + ramp = np.arange(num_inference_steps + 1, dtype=np.float32) / num_inference_steps + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + expected_sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + expected_sigmas = expected_sigmas / (1 + expected_sigmas) + expected_sigmas = torch.from_numpy(expected_sigmas.astype(np.float32)) + + self.assertEqual(scheduler.sigmas.shape[0], expected_sigmas.shape[0] + 1) + self.assertTrue(torch.allclose(scheduler.sigmas[:-1], expected_sigmas, atol=1e-6)) + expected_timesteps = torch.from_numpy( + (expected_sigmas.numpy() * scheduler.config.num_train_timesteps).astype(np.int64) + ) + self.assertTrue(torch.equal(scheduler.timesteps, expected_timesteps)) + self.assertEqual(scheduler.sigmas[-1].item(), 0.0) + + # TODO: add test + # def test_timesteps_respected_when_steps_match_train(self): + # scheduler = FlowUniPCMultistepScheduler( + # num_train_timesteps=8, + # solver_order=2, + # ) + # before_sigmas = scheduler.sigmas.clone() + # scheduler.set_timesteps(num_inference_steps=scheduler.config.num_train_timesteps) + # self.assertTrue(torch.allclose(scheduler.sigmas[:-1], before_sigmas)) + # self.assertEqual(scheduler.sigmas[-1].item(), 0.0) + + def test_step_preserves_dtype_and_device(self): + scheduler = FlowUniPCMultistepScheduler( + num_train_timesteps=10, + solver_order=2, + ) + scheduler.set_timesteps(num_inference_steps=4, device="cpu") + + sample = torch.randn(2, 3, 4, dtype=torch.float16) + residual = torch.randn_like(sample) + timestep = scheduler.timesteps[0] + + output = scheduler.step(residual, timestep, sample).prev_sample + self.assertEqual(output.dtype, sample.dtype) + self.assertEqual(output.device, sample.device) + + def test_save_and_load_round_trip(self): + scheduler = FlowUniPCMultistepScheduler( + num_train_timesteps=12, + solver_order=2, + ) + scheduler.set_timesteps(num_inference_steps=6) + + with tempfile.TemporaryDirectory() as tmpdir: + scheduler.save_config(tmpdir) + loaded = FlowUniPCMultistepScheduler.from_pretrained(tmpdir) + + loaded.set_timesteps(num_inference_steps=6) + self.assertTrue(torch.equal(scheduler.timesteps, loaded.timesteps)) + self.assertTrue(torch.allclose(scheduler.sigmas, loaded.sigmas)) + + def test_full_loop_no_nan(self): + torch.manual_seed(0) + scheduler = FlowUniPCMultistepScheduler( + num_train_timesteps=16, + solver_order=2, + sigma_max=1.0, + sigma_min=0.01, + ) + scheduler.set_timesteps(num_inference_steps=6) + + def model(sample, t): + return 0.05 * torch.tanh(sample) + + sample = torch.ones(2, 3, 4) + for t in scheduler.timesteps: + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + self.assertFalse(torch.isnan(sample).any()) + self.assertEqual(sample.shape, (2, 3, 4)) \ No newline at end of file From 828788ec2639efa29ad820291ad7fa4e3feb0410 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Tue, 16 Dec 2025 18:56:06 +0000 Subject: [PATCH 05/22] Basic tests for flow unipc --- .../cosmos/pipeline_cosmos25_predict.py | 23 +- .../scheduling_flow_unipc_multistep.py | 72 ++-- .../pipelines/cosmos/test_cosmos25_predict.py | 361 ++++++++++++++++++ tests/schedulers/test_scheduler_flow_unipc.py | 107 +++--- 4 files changed, 454 insertions(+), 109 deletions(-) create mode 100644 tests/pipelines/cosmos/test_cosmos25_predict.py diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py index 88391cf2104d..a1fa13c6b9d6 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py @@ -118,7 +118,7 @@ def retrieve_latents( ... ).frames[0] >>> export_to_video(video, "video2world.mp4", fps=16) - >>> # To produce a single-frame image instead of a world clip, set num_frames=1 and + >>> # To produce a single-frame image instead of a world (video) clip, set num_frames=1 and >>> # save the first frame: pipe(..., num_frames=1).frames[0][0]. ``` """ @@ -201,7 +201,6 @@ def _get_prompt_embeds( dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt - # Tokenize prompts input_ids_batch = [] for sample_idx in range(len(prompt)): @@ -257,7 +256,7 @@ def _get_prompt_embeds( return prompt_embeds - # Copied from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline.encode_prompt with num_videos_per_prompt->num_videos_per_prompt + # Modified from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], @@ -505,7 +504,6 @@ def __call__( ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, - shift: float = 5.0, conditional_frame_timestep: float = 0.1, ): r""" @@ -656,13 +654,16 @@ def __call__( video = self.video_processor.preprocess_video(video, height, width) # pad with last frame (for video2world) - if video.shape[2] < num_frames: + num_frames_out = num_frames + if video.shape[2] < num_frames_out: assert batch_size == 1, "batch_size must be 1 for padding frames" - n_pad_frames = num_frames - num_frames_in + n_pad_frames = num_frames_out - num_frames_in last_frame = video[0, :, -1:, :, :] # [C, T==1, H, W] pad_frames = last_frame.repeat(1, 1, n_pad_frames, 1, 1) # [B, C, T, H, W] video = torch.cat((video, pad_frames), dim=2) + assert num_frames_in <= num_frames_out, f"expected ({num_frames_in=}) <= ({num_frames_out=})" + video = video.to(device=device, dtype=vae_dtype) num_channels_latents = self.transformer.config.in_channels - 1 @@ -686,7 +687,7 @@ def __call__( padding_mask = latents.new_zeros(1, 1, height, width, dtype=transformer_dtype) # Denoising loop - self.scheduler.set_timesteps(num_inference_steps, shift=shift, device=device) + self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order @@ -699,10 +700,10 @@ def __call__( self._current_timestep = t.cpu().item() - # NOTE: sigmas are in [0, 1] in FlowUniPCMultistepScheduler - sigma_t = torch.tensor(self.scheduler.sigmas[i]).unsqueeze(0).to(device=device, dtype=transformer_dtype) + # NOTE: assumes sigma(t) \in [0, 1] + sigma_t = torch.tensor(self.scheduler.sigmas[i].item()).unsqueeze(0).to(device=device, dtype=transformer_dtype) - in_latents = cond_mask * cond_latent + (1 - cond_mask) * latents # TODO: could use cond_indicator + in_latents = cond_mask * cond_latent + (1 - cond_mask) * latents in_latents = in_latents.to(transformer_dtype) in_timestep = cond_indicator * cond_timestep + (1 - cond_indicator) * sigma_t noise_pred = self.transformer( @@ -725,7 +726,7 @@ def __call__( padding_mask=padding_mask, return_dict=False, )[0] - # NOTE: replace velocity (noise_pred) with gt_velocity for conditioning inputs only + # NOTE: replace velocity (noise_pred_neg) with gt_velocity for conditioning inputs only noise_pred_neg = gt_velocity + noise_pred_neg * (1 - cond_mask) noise_pred = noise_pred + self.guidance_scale * (noise_pred - noise_pred_neg) diff --git a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py index dd3dc6ec0177..e2f2508abd95 100644 --- a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py +++ b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py @@ -1,7 +1,4 @@ -# Copied from https://github.com/huggingface/diffusers/blob/v0.31.0/src/diffusers/schedulers/scheduling_unipc_multistep.py - -import math -from typing import List, Optional, Tuple, Union +from typing import List, Literal, Optional, Tuple, Union import numpy as np import torch @@ -11,35 +8,38 @@ from diffusers.utils import deprecate -def _get_karras_sigmas(self, num_steps: int, sigma_max: float, sigma_min: float, rho: int, final_sigmas_type: str): +def _get_karras_sigmas(num_train_steps: int, num_steps: int, sigma_max: float, sigma_min: float, rho: int, final_sigmas_type: str): sigmas = np.arange(num_steps + 1, dtype=np.float32) / num_steps min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + sigmas * (min_inv_rho - max_inv_rho)) ** rho sigmas = sigmas / (1 + sigmas) - if self.config.final_sigmas_type == "zero": + if final_sigmas_type == "zero": sigma_last = 0 + elif final_sigmas_type == "sigma_min": + sigma_last = sigmas[-1] else: raise ValueError( - f"`final_sigmas_type` must be 'zero' but got {self.config.final_sigmas_type}" + f"`final_sigmas_type` must be 'zero' or 'sigma_min' but got {final_sigmas_type}" ) - timesteps = torch.from_numpy(sigmas * self.config.num_train_timesteps).to(torch.int64) - sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) # pyright: ignore + timesteps = torch.from_numpy(sigmas * num_train_steps).to(torch.int64) + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32) return sigmas, timesteps class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): """ - `FlowUniPCMultistepScheduler` is the UniPC algorithm [1] for flow matching [2], but strictly uses the Karras sigmas [3]. + `FlowUniPCMultistepScheduler` is the UniPC algorithm[1] for flow + matching[2], but strictly uses the Karras sigmas [3] (i.e. it follows the EDMEulerScheduler). + + Note this a simplified version of `UniPCMultistepScheduler`, as it: + 1. Does not have variance preserving sigmas + 2. Does not store betas and other variables used by `UniPCMultistepScheduler` + 3. Assumes prediction_type == "flow_prediction" (this parameter is removed) - Note this a simplified version of `UniPCMultistepScheduler`, as: - 1. it does not have variance preserving sigmas - 2. it does not store betas and other variables used by `UniPCMultistepScheduler` - 3. it assumes prediction_type == "flow_prediction" (this variable is removed from `FlowUniPCMultistepScheduler`) - References: [1] Wang, Chong, et al. "UniPC: A Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models" https://arxiv.org/abs/2302.04867 [2] Lipman, Chen, et al. "Flow matching for generative modeling." https://arxiv.org/abs/2210.02747 @@ -97,7 +97,7 @@ def __init__( lower_order_final: bool = True, disable_corrector: List[int] = [], solver_p: SchedulerMixin = None, - final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + final_sigmas_type: Literal["zero", "sigma_min"] = "zero", rho: int = 7, sigma_max: float = 200.0, sigma_min: float = 0.01, @@ -109,21 +109,15 @@ def __init__( raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") self.predict_x0 = predict_x0 - self.num_inference_steps = None self.disable_corrector = disable_corrector + self.solver_p = solver_p + self.num_inference_steps = None - self.sigmas, self.timesteps = _get_karras_sigmas(self, num_train_timesteps, sigma_max, sigma_min, rho, final_sigmas_type) + self.sigmas, self.timesteps = _get_karras_sigmas(num_train_timesteps, num_train_timesteps, sigma_max, sigma_min, rho, final_sigmas_type) self.sigma_min = self.sigmas[-1].item() self.sigma_max = self.sigmas[0].item() - self.last_sample = None - self._step_index = None - self._begin_index = None - self.model_outputs = [None] * self.config.solver_order - self.timestep_list = [None] * self.config.solver_order - self.lower_order_nums = 0 - self.solver_p = self.config.solver_p - + self._reset_state(solver_order) @property def step_index(self): @@ -169,22 +163,29 @@ def set_timesteps( """ assert sigmas is None, "sigmas are not supported for FlowUniPCMultistepScheduler" - self.sigmas, self.timesteps = _get_karras_sigmas(self, num_inference_steps, self.config.sigma_max, self.config.sigma_min, self.config.rho, self.config.final_sigmas_type) + self.sigmas, self.timesteps = _get_karras_sigmas(self.config.num_train_timesteps, num_inference_steps, self.config.sigma_max, self.config.sigma_min, self.config.rho, self.config.final_sigmas_type) self.num_inference_steps = len(self.timesteps) self.sigma_min = self.sigmas[-1].item() self.sigma_max = self.sigmas[0].item() + self.sigmas = self.sigmas.to(device) + self.timesteps = self.timesteps.to(device) + self._reset_state() + + def _reset_state(self, solver_order: Optional[int] = None): + """ + Resets the noise schedule & solver state variables + """ + solver_order = solver_order or self.config.solver_order + self.model_outputs = [None] * solver_order + self.timestep_list = [None] * solver_order + self.lower_order_nums = 0 self.last_sample = None self._step_index = None self._begin_index = None - self.model_outputs = [None] * self.config.solver_order - self.timestep_list = [None] * self.config.solver_order - self.lower_order_nums = 0 - self.solver_p = self.config.solver_p - - self.sigmas = self.sigmas.to(device) - self.timesteps = self.timesteps.to(device) + if self.solver_p: + self.solver_p.set_timesteps(self.num_inference_steps, device=device) # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: @@ -575,6 +576,7 @@ def _init_step_index(self, timestep): else: self._step_index = self._begin_index + # Modified from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler.step def step( self, model_output: torch.Tensor, @@ -638,7 +640,7 @@ def step( this_order = self.config.solver_order self.this_order = min(this_order, self.lower_order_nums + 1) # warmup for multistep - assert self.this_order > 0 + assert self.this_order > 0, "expected this_order > 0, this could be due to duplicate timesteps" self.last_sample = sample prev_sample = self.multistep_uni_p_bh_update( diff --git a/tests/pipelines/cosmos/test_cosmos25_predict.py b/tests/pipelines/cosmos/test_cosmos25_predict.py new file mode 100644 index 000000000000..c005c72ef434 --- /dev/null +++ b/tests/pipelines/cosmos/test_cosmos25_predict.py @@ -0,0 +1,361 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import json +import os +import tempfile +import unittest +from types import SimpleNamespace +from typing import List + +import numpy as np +import torch + +from diffusers import AutoencoderKLWan, Cosmos25PredictBase, CosmosTransformer3DModel, FlowUniPCMultistepScheduler + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np +from .cosmos_guardrail import DummyCosmosSafetyChecker + + +enable_full_determinism() + + +class DummyPredictTokenizer: + model_input_names = ["input_ids"] + + def __init__(self, vocab_size: int = 128): + self.vocab_size = vocab_size + + @classmethod + def from_pretrained(cls, *args, **kwargs): + return cls() + + def apply_chat_template( + self, + conversations: List[dict], + tokenize: bool = True, + add_generation_prompt: bool = False, + add_vision_id: bool = False, + max_length: int = 16, + truncation: bool = True, + padding: str = "max_length", + ): + return list(range(max_length)) + + def save_pretrained(self, save_directory: str): + os.makedirs(save_directory, exist_ok=True) + with open(os.path.join(save_directory, "tokenizer_config.json"), "w") as f: + json.dump({"vocab_size": self.vocab_size}, f) + + +class DummyPredictTextEncoder(torch.nn.Module): + config_name = "config.json" + + def __init__(self, vocab_size: int = 128, hidden_size: int = 16): + super().__init__() + self.emb = torch.nn.Embedding(vocab_size, hidden_size) + self.proj = torch.nn.Linear(hidden_size, hidden_size) + self.config = SimpleNamespace(hidden_size=hidden_size) + + @property + def dtype(self): + return next(self.parameters()).dtype + + @classmethod + def from_pretrained(cls, save_directory: str, **kwargs): + return cls() + + def save_pretrained(self, save_directory: str, safe_serialization: bool = False): + os.makedirs(save_directory, exist_ok=True) + torch.save(self.state_dict(), os.path.join(save_directory, "pytorch_model.bin")) + with open(os.path.join(save_directory, self.config_name), "w") as f: + json.dump({"vocab_size": self.emb.num_embeddings, "hidden_size": self.emb.embedding_dim}, f) + + def forward(self, input_ids: torch.LongTensor, output_hidden_states: bool = False, **kwargs): + hidden = self.emb(input_ids) + hidden = self.proj(hidden) + hidden_states = ( + hidden, + hidden * 0.5, + hidden * 0.25, + ) + return SimpleNamespace(hidden_states=hidden_states) + + +class Cosmos25PredictBaseWrapper(Cosmos25PredictBase): + @staticmethod + def from_pretrained(*args, **kwargs): + kwargs["safety_checker"] = DummyCosmosSafetyChecker() + return Cosmos25PredictBase.from_pretrained(*args, **kwargs) + + +class Cosmos25PredictPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = Cosmos25PredictBaseWrapper + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CosmosTransformer3DModel( + in_channels=16 + 1, + out_channels=16, + num_attention_heads=2, + attention_head_dim=16, + num_layers=2, + mlp_ratio=2, + text_embed_dim=32, + adaln_lora_dim=4, + max_size=(4, 32, 32), + patch_size=(1, 2, 2), + rope_scale=(2.0, 1.0, 1.0), + concat_padding_mask=True, + extra_pos_embed_type="learnable", + ) + + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = FlowUniPCMultistepScheduler() + + text_encoder = DummyPredictTextEncoder(hidden_size=16) + tokenizer = DummyPredictTokenizer() + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": DummyCosmosSafetyChecker(), + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "height": 32, + "width": 32, + "num_frames": 3, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_components_function(self): + init_components = self.get_dummy_components() + init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))} + pipe = self.pipeline_class(**init_components) + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (3, 3, 32, 32)) + self.assertTrue(torch.isfinite(generated_video).all()) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + for tensor_name in callback_kwargs.keys(): + assert tensor_name in pipe._callback_tensor_inputs + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + for tensor_name in callback_kwargs.keys(): + assert tensor_name in pipe._callback_tensor_inputs + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + _ = pipe(**inputs)[0] + + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + _ = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=1e-2) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not getattr(self, "test_attention_slicing", True): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + self.pipeline_class._optional_components.remove("safety_checker") + super().test_save_load_optional_components(expected_max_difference=expected_max_difference) + self.pipeline_class._optional_components.append("safety_checker") + + def test_serialization_with_variants(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + model_components = [ + component_name + for component_name, component in pipe.components.items() + if isinstance(component, torch.nn.Module) + ] + model_components.remove("safety_checker") + variant = "fp16" + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) + + with open(f"{tmpdir}/model_index.json", "r") as f: + config = json.load(f) + + for subfolder in os.listdir(tmpdir): + if not os.path.isfile(subfolder) and subfolder in model_components: + folder_path = os.path.join(tmpdir, subfolder) + is_folder = os.path.isdir(folder_path) and subfolder in config + assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) + + def test_torch_dtype_dict(self): + components = self.get_dummy_components() + if not components: + self.skipTest("No dummy components defined.") + + pipe = self.pipeline_class(**components) + + specified_key = next(iter(components.keys())) + + with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: + pipe.save_pretrained(tmpdirname, safe_serialization=False) + torch_dtype_dict = {specified_key: torch.bfloat16, "default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained( + tmpdirname, safety_checker=DummyCosmosSafetyChecker(), torch_dtype=torch_dtype_dict + ) + + for name, component in loaded_pipe.components.items(): + if name == "safety_checker": + continue + if isinstance(component, torch.nn.Module) and hasattr(component, "dtype"): + expected_dtype = torch_dtype_dict.get(name, torch_dtype_dict.get("default", torch.float32)) + self.assertEqual( + component.dtype, + expected_dtype, + f"Component '{name}' has dtype {component.dtype} but expected {expected_dtype}", + ) + + @unittest.skip( + "The pipeline should not be runnable without a safety checker. The test creates a pipeline without passing in " + "a safety checker, which makes the pipeline default to the actual Cosmos Guardrail. The Cosmos Guardrail is " + "too large and slow to run on CI." + ) + def test_encode_prompt_works_in_isolation(self): + pass diff --git a/tests/schedulers/test_scheduler_flow_unipc.py b/tests/schedulers/test_scheduler_flow_unipc.py index c609129beeb5..343507bef21e 100644 --- a/tests/schedulers/test_scheduler_flow_unipc.py +++ b/tests/schedulers/test_scheduler_flow_unipc.py @@ -1,86 +1,62 @@ import tempfile import unittest -import numpy as np import torch from diffusers import FlowUniPCMultistepScheduler class FlowUniPCMultistepSchedulerKarrasTest(unittest.TestCase): - def test_set_timesteps_with_karras_sigmas(self): + def test_set_timesteps(self): num_inference_steps = 4 + num_train_timesteps = 1000 scheduler = FlowUniPCMultistepScheduler( - num_train_timesteps=1000, + num_train_timesteps=num_train_timesteps, solver_order=2, ) - scheduler.set_timesteps(num_inference_steps=num_inference_steps) - # TODO: use constants for sigmas and timesteps - sigma_max, sigma_min, rho = ( - scheduler.config.sigma_max, - scheduler.config.sigma_min, - scheduler.config.rho, - ) - ramp = np.arange(num_inference_steps + 1, dtype=np.float32) / num_inference_steps - min_inv_rho = sigma_min ** (1 / rho) - max_inv_rho = sigma_max ** (1 / rho) - expected_sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho - expected_sigmas = expected_sigmas / (1 + expected_sigmas) - expected_sigmas = torch.from_numpy(expected_sigmas.astype(np.float32)) - - # FlowUniPCMultistepScheduler appends a terminal sigma of zero after conversion to torch.Tensor. - self.assertEqual(scheduler.sigmas.shape[0], expected_sigmas.shape[0] + 1) - self.assertTrue(torch.allclose(scheduler.sigmas[:-1], expected_sigmas, atol=1e-6)) - - expected_timesteps = torch.from_numpy( - (expected_sigmas.numpy() * scheduler.config.num_train_timesteps).astype(np.int64) - ) - self.assertTrue(torch.equal(scheduler.timesteps, expected_timesteps)) - self.assertEqual(scheduler.sigmas[-1].item(), 0.0) + # 0 appended to end for sigmas + expected_sigmas = [0.9950248599052429, 0.9787454605102539, 0.8774884343147278, 0.3604971766471863, 0.009900986216962337, 0.0] + expected_sigmas = torch.tensor(expected_sigmas) + expected_timesteps = (expected_sigmas * num_train_timesteps).to(torch.int64) + expected_timesteps = expected_timesteps[0:-1] + self.assertTrue(torch.allclose(scheduler.sigmas, expected_sigmas)) + self.assertTrue(torch.all(expected_timesteps == scheduler.timesteps)) - def test_set_timesteps_with_custom_karras_sigmas(self): - num_inference_steps = 3 - sigma_max, sigma_min, rho = 50.0, 0.005, 5.0 + + def test_inference_train_same_schedule(self): + num_inference_steps = 4 + num_train_timesteps = num_inference_steps scheduler = FlowUniPCMultistepScheduler( - num_train_timesteps=1000, + num_train_timesteps=num_train_timesteps, solver_order=2, - sigma_max=sigma_max, - sigma_min=sigma_min, - rho=rho, ) - + before_sigmas = scheduler.sigmas.clone() scheduler.set_timesteps(num_inference_steps=num_inference_steps) + after_sigmas = scheduler.sigmas - # TODO: use constants for sigmas and timesteps - ramp = np.arange(num_inference_steps + 1, dtype=np.float32) / num_inference_steps - min_inv_rho = sigma_min ** (1 / rho) - max_inv_rho = sigma_max ** (1 / rho) - expected_sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho - expected_sigmas = expected_sigmas / (1 + expected_sigmas) - expected_sigmas = torch.from_numpy(expected_sigmas.astype(np.float32)) - - self.assertEqual(scheduler.sigmas.shape[0], expected_sigmas.shape[0] + 1) - self.assertTrue(torch.allclose(scheduler.sigmas[:-1], expected_sigmas, atol=1e-6)) - expected_timesteps = torch.from_numpy( - (expected_sigmas.numpy() * scheduler.config.num_train_timesteps).astype(np.int64) + self.assertTrue(torch.allclose(before_sigmas, after_sigmas)) + + def test_set_timesteps_with_nondefault_args(self): + num_inference_steps = 4 + scheduler = FlowUniPCMultistepScheduler( + sigma_max=50.0, + sigma_min=0.005, + rho=5.0, + final_sigmas_type="sigma_min", ) - self.assertTrue(torch.equal(scheduler.timesteps, expected_timesteps)) - self.assertEqual(scheduler.sigmas[-1].item(), 0.0) - - # TODO: add test - # def test_timesteps_respected_when_steps_match_train(self): - # scheduler = FlowUniPCMultistepScheduler( - # num_train_timesteps=8, - # solver_order=2, - # ) - # before_sigmas = scheduler.sigmas.clone() - # scheduler.set_timesteps(num_inference_steps=scheduler.config.num_train_timesteps) - # self.assertTrue(torch.allclose(scheduler.sigmas[:-1], before_sigmas)) - # self.assertEqual(scheduler.sigmas[-1].item(), 0.0) - - def test_step_preserves_dtype_and_device(self): + + scheduler.set_timesteps(num_inference_steps=num_inference_steps) + expected_sigmas = torch.tensor([0.9803921580314636, + 0.9388325214385986, + 0.7652841210365295, + 0.2545345723628998, + 0.004975131247192621, + 0.004975131247192621]) + self.assertTrue(torch.allclose(scheduler.sigmas, expected_sigmas)) + + def test_step(self): scheduler = FlowUniPCMultistepScheduler( num_train_timesteps=10, solver_order=2, @@ -92,6 +68,7 @@ def test_step_preserves_dtype_and_device(self): timestep = scheduler.timesteps[0] output = scheduler.step(residual, timestep, sample).prev_sample + self.assertEqual(output.shape, (2, 3, 4)) self.assertEqual(output.dtype, sample.dtype) self.assertEqual(output.device, sample.device) @@ -99,6 +76,10 @@ def test_save_and_load_round_trip(self): scheduler = FlowUniPCMultistepScheduler( num_train_timesteps=12, solver_order=2, + sigma_max=50.0, + sigma_min=0.005, + rho=5.0, + final_sigmas_type="sigma_min", ) scheduler.set_timesteps(num_inference_steps=6) @@ -127,6 +108,6 @@ def model(sample, t): for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample).prev_sample + self.assertFalse(torch.isnan(sample).any()) - self.assertFalse(torch.isnan(sample).any()) - self.assertEqual(sample.shape, (2, 3, 4)) \ No newline at end of file + self.assertEqual(sample.shape, (2, 3, 4)) From 899be86b8b5ce34a1035350ed0c4e02c75590d11 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Tue, 16 Dec 2025 22:57:44 +0000 Subject: [PATCH 06/22] working b2b inference --- src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py index a1fa13c6b9d6..8fc7b937e94f 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py @@ -608,7 +608,6 @@ def __call__( f"Cosmos Guardrail detected unsafe text in the prompt: {p}. Please ensure that the " f"prompt abides by the NVIDIA Open Model License Agreement." ) - self.safety_checker.to("cpu") # Define call parameters if prompt is not None and isinstance(prompt, str): @@ -771,7 +770,6 @@ def __call__( video = np.stack(video_batch).astype(np.float32) / 255.0 * 2 - 1 video = torch.from_numpy(video).permute(0, 4, 1, 2, 3) video = self.video_processor.postprocess_video(video, output_type=output_type) - self.safety_checker.to("cpu") else: video = latents From 2cc2b564f90adea94cb944f23fac993835c940ae Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Tue, 16 Dec 2025 23:14:52 +0000 Subject: [PATCH 07/22] Rename everything --- scripts/convert_cosmos_to_diffusers.py | 4 +- scripts/plot_flow_unipc_sigmas.ipynb | 248 ++++++++++++++++++ src/diffusers/__init__.py | 2 + src/diffusers/pipelines/__init__.py | 2 + src/diffusers/pipelines/cosmos/__init__.py | 2 + ...edict.py => pipeline_cosmos2_5_predict.py} | 6 +- ...5_predict.py => test_cosmos2_5_predict.py} | 10 +- 7 files changed, 264 insertions(+), 10 deletions(-) create mode 100644 scripts/plot_flow_unipc_sigmas.ipynb rename src/diffusers/pipelines/cosmos/{pipeline_cosmos25_predict.py => pipeline_cosmos2_5_predict.py} (99%) rename tests/pipelines/cosmos/{test_cosmos25_predict.py => test_cosmos2_5_predict.py} (97%) diff --git a/scripts/convert_cosmos_to_diffusers.py b/scripts/convert_cosmos_to_diffusers.py index 0f56370d4a85..dbd4949739a3 100644 --- a/scripts/convert_cosmos_to_diffusers.py +++ b/scripts/convert_cosmos_to_diffusers.py @@ -62,7 +62,7 @@ EDMEulerScheduler, FlowMatchEulerDiscreteScheduler, ) -from diffusers.pipelines.cosmos.pipeline_cosmos25_predict import Cosmos25PredictBase +from diffusers.pipelines.cosmos.pipeline_cosmos2_5_predict import Cosmos_2_5_PredictBase def remove_keys_(key: str, state_dict: Dict[str, Any]): @@ -538,7 +538,7 @@ def save_pipeline_cosmos_2_5(args, transformer, vae): scheduler = FlowMatchEulerDiscreteScheduler(use_karras_sigmas=True) - pipe = Cosmos25PredictBase( + pipe = Cosmos_2_5_PredictBase( text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, diff --git a/scripts/plot_flow_unipc_sigmas.ipynb b/scripts/plot_flow_unipc_sigmas.ipynb new file mode 100644 index 000000000000..4065b94430a6 --- /dev/null +++ b/scripts/plot_flow_unipc_sigmas.ipynb @@ -0,0 +1,248 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# FlowUniPCMultistepScheduler sigma visualization\n", + "\n", + "This notebook instantiates the local `FlowUniPCMultistepScheduler` implementation and plots the sigma schedule it produces for a configurable number of inference steps.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "from pathlib import Path\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "\n", + "try:\n", + " repo_root = Path(__file__).resolve().parents[1]\n", + "except NameError:\n", + " repo_root = Path.cwd()\n", + "\n", + "src_path = repo_root / \"src\"\n", + "if str(src_path) not in sys.path:\n", + " sys.path.insert(0, str(src_path))\n", + "\n", + "from diffusers.schedulers import FlowUniPCMultistepScheduler\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "NUM_TRAIN_TIMESTEPS = 1_000\n", + "NUM_INFERENCE_STEPS = 35\n", + "SHIFT = 1.0\n", + "USE_KARRAS_SIGMAS = True\n", + "\n", + "\n", + "def generate_sigmas(num_inference_steps=NUM_INFERENCE_STEPS, shift=SHIFT, use_karras_sigmas=USE_KARRAS_SIGMAS):\n", + " \"\"\"Instantiate the scheduler and return the sigma and timestep buffers.\"\"\"\n", + " scheduler = FlowUniPCMultistepScheduler(\n", + " num_train_timesteps=NUM_TRAIN_TIMESTEPS,\n", + " shift=shift,\n", + " use_karras_sigmas=use_karras_sigmas,\n", + " )\n", + " scheduler.set_timesteps(num_inference_steps=num_inference_steps)\n", + "\n", + " sigma_values = scheduler.sigmas.detach().cpu().numpy()\n", + " timestep_values = scheduler.timesteps.detach().cpu().numpy()\n", + " return sigma_values, timestep_values\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "sigma_values, timestep_values = generate_sigmas()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "False" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.allclose(sigma_values[:-1], timestep_values*0.001)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([995, 994, 993, 992, 990, 988, 986, 984, 981, 977, 973, 967, 960,\n", + " 951, 941, 927, 910, 889, 863, 831, 791, 742, 684, 618, 543, 462,\n", + " 380, 301, 228, 166, 116, 77, 49, 30, 17, 9])" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "timestep_values" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([0.99502486, 0.9942067 , 0.993232 , 0.9920665 , 0.9906676 ,\n", + " 0.988982 , 0.98694307, 0.9844666 , 0.9814467 , 0.9777488 ,\n", + " 0.9732026 , 0.9675912 , 0.9606394 , 0.9519976 , 0.94122404,\n", + " 0.9277644 , 0.9109316 , 0.8898901 , 0.86365426, 0.8311152 ,\n", + " 0.79111844, 0.7426199 , 0.6849421 , 0.6181248 , 0.5433001 ,\n", + " 0.46294296, 0.38079414, 0.30132923, 0.22886677, 0.1666508 ,\n", + " 0.11629516, 0.07776967, 0.04981578, 0.03052405, 0.01784122,\n", + " 0.00990099, 0. ], dtype=float32)" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sigma_values" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA90AAAHqCAYAAAAZLi26AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjgsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvwVt1zgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAm+tJREFUeJzs3XdYU2cbBvD7JAHCDBsiIiBORMWFWmcdBbe2Wkedn1brqla7bKs4WrV1a622tnVbbW2rddRRq9Uq7r0VGQ42sg0jOd8fSGpkBQUC4f5dVy4557zn5Dl5E+TJuwRRFEUQERERERERUYmTGDoAIiIiIiIiImPFpJuIiIiIiIiolDDpJiIiIiIiIiolTLqJiIiIiIiISgmTbiIiIiIiIqJSwqSbiIiIiIiIqJQw6SYiIiIiIiIqJUy6iYiIiIiIiEoJk24iIiIiIiKiUsKkm4jISHl6emL48OGGDqNEtG/fHu3btzd0GCVi+PDh8PT01KvszJkzIQhC6QZUjqxbtw6CIODs2bOl/lzFqYfnlbd6MabPBxGRMWLSTURUweQmJvk9Pv74Y4PFNXz4cFhZWRV43MrKqsS+BAgLC9O5b6lUimrVqqFPnz64ePFinvIqlQpLlixB8+bNoVAoIJfLUatWLUyYMAG3b9/WlstNpiQSCe7fv5/nOsnJyTA3N4cgCJgwYUKJ3Et6ejpmzpyJI0eOlMj1Hj16hJkzZ+b7OpSGzMxMLFu2DI0aNYKNjQ1sbW1Rr149jB49Gjdv3iyTGIiIiMozmaEDICKiFzN79mx4eXnp7PP19TVQNKXrwIED+e4fOHAgunbtCrVajRs3bmDVqlX4888/cfLkSfj5+QEA4uLiEBgYiHPnzqF79+4YNGgQrKyscOvWLWzduhXfffcdMjMzda5rZmaGn376CR9++KHO/t9++63E7y09PR2zZs0CgDytlZ999lmxv0h59OgRZs2aBU9PT+1rUJreeOMN/Pnnnxg4cCDefvttZGVl4ebNm9i9ezdeeeUV1KlTp9RjqOwK+nwQEVH5wKSbiKiC6tKlC5o2bWroMMqEqalpvvsbN26MwYMHa7dbtWqFnj17YtWqVfj2228B5LTAX7hwAdu3b8cbb7yhc/6cOXPw6aef5rlu165d8026t2zZgm7duuHXX3992VvSi0wmg0xWfv+rPnPmDHbv3o0vvvgCn3zyic6xr7/+GomJiYYJrIJJT0+HhYXFC59f0OeDiIjKB3YvJyKqRO7du4d+/frB3t4eFhYWaNGiBfbs2aM9LooiHB0dMWXKFO0+jUYDW1tbSKVSnSTqyy+/hEwmQ2pq6gvFkttN/vjx45gyZQqcnJxgaWmJPn36IDY2VqesvmNWO3ToAAAIDQ0FAJw6dQp79uzByJEj8yTcQE6L9sKFC/PsHzRoEC5evKjTPToqKgp///03Bg0aVOC9hIWF6ew/cuQIBEEosOt4WFgYnJycAACzZs3SdpefOXMmgPzHDh88eBCtW7eGra0trKysULt2bW3Ce+TIETRr1gwAMGLECO311q1bpz3/1KlTCAwMhEKhgIWFBdq1a4fjx4/rPEfu8968eRNvvvkmbGxs4ODggEmTJkGlUmnLhYSEAMj5suN5UqkUDg4OOvsePnyIkSNHokqVKjAzM4OXlxfGjh2bp6dBRkZGke8JAPjzzz/Rpk0bWFpawtraGt26dcO1a9fylNuxYwd8fX0hl8vh6+uL33//PU+ZguoqdyjDs69hQTZt2oQmTZrA3Nwc9vb2GDBgQJ5hCu3bt4evry/OnTuHtm3bwsLCIs8XFs+KiorCiBEjULVqVZiZmUGpVKJXr14677X8Ph/h4eHo2bMnLC0t4ezsjPfeew/79+/Pc4+58Vy+fBnt2rWDhYUFatSoge3btwMA/vnnHzRv3hzm5uaoXbs2/vrrrzzPM27cONSuXRvm5uZwcHBAv3798nwWsrKyMGvWLNSsWRNyuRwODg5o3bo1Dh48WOTrSkRU0ZXfr8+JiKhQSUlJiIuL09nn6OhYYPno6Gi88sorSE9Px7vvvgsHBwesX78ePXv2xPbt29GnTx8IgoBWrVrh6NGj2vMuX76MpKQkSCQSHD9+HN26dQMAHDt2DI0aNSp0HLc+Jk6cCDs7OwQFBSEsLAxLly7FhAkTsG3btmJfKzcJzE32/vjjDwDAkCFDinWdtm3bomrVqtiyZQtmz54NANi2bRusrKy0918SnJycsGrVKowdOxZ9+vTB66+/DgBo0KBBvuWvXbuG7t27o0GDBpg9ezbMzMxw9+5dbdJct25dzJ49GzNmzMDo0aPRpk0bAMArr7wCAPj777/RpUsXNGnSBEFBQZBIJFi7di06dOiAY8eOwd/fX+f53nzzTXh6emLevHk4efIkli9fjsePH2PDhg0AAA8PDwDA5s2b0apVq0Jb5R89egR/f38kJiZi9OjRqFOnDh4+fIjt27cjPT1dp7VWn/fExo0bMWzYMAQEBODLL79Eeno6Vq1ahdatW+PChQvaSdIOHDiAN954Az4+Ppg3bx7i4+O1SWxJ+uKLLzB9+nS8+eabGDVqFGJjY7FixQq0bdsWFy5cgK2trbZsfHw8unTpggEDBmDw4MFwcXEp8LpvvPEGrl27hokTJ8LT0xMxMTE4ePAgIiIiCpwILi0tDR06dEBkZCQmTZoEV1dXbNmyBYcPH863/OPHj9G9e3cMGDAA/fr1w6pVqzBgwABs3rwZkydPxjvvvINBgwZhwYIF6Nu3L+7fvw9ra2sAOb0dTpw4gQEDBqBq1aoICwvDqlWr0L59e1y/fl3bgj9z5kzMmzcPo0aNgr+/P5KTk3H27FmcP38enTt3frEXnYioohCJiKhCWbt2rQgg38ezPDw8xGHDhmm3J0+eLAIQjx07pt2XkpIienl5iZ6enqJarRZFURQXLFggSqVSMTk5WRRFUVy+fLno4eEh+vv7ix999JEoiqKoVqtFW1tb8b333tNea9iwYaKlpWWBcVtaWurEk3sfnTp1EjUajXb/e++9J0qlUjExMVG7r127dmK7du2026GhoSIAcdasWWJsbKwYFRUlHjlyRGzUqJEIQPz1119FURTFPn36iADEx48fF/Gq5ggKChIBiLGxseL7778v1qhRQ3usWbNm4ogRI0RRFEUA4vjx4/PcS2hoqM71Dh8+LAIQDx8+rN03bNgw0cPDQ7sdGxsrAhCDgoIKjCfXkiVLtPEV5MyZMyIAce3atTr7NRqNWLNmTTEgIEDn9U5PTxe9vLzEzp0753nenj176lxj3LhxIgDx0qVL2mu2a9dOBCC6uLiIAwcOFFeuXCmGh4fniWvo0KGiRCIRz5w5k+dYbjz6vidSUlJEW1tb8e2339a5TlRUlKhQKHT2+/n5iUqlUuf9dODAARGATj3kV1ei+N977dnX8/l6CQsLE6VSqfjFF1/onHvlyhVRJpPp7M99vVavXp3ndXje48ePRQDiggULCi33/Odj0aJFIgBxx44d2n1PnjwR69Spk+cec+PZsmWLdt/NmzdFAKJEIhFPnjyp3b9///48r0V6enqeeIKDg0UA4oYNG7T7GjZsKHbr1q3IeyYiMkbsXk5EVEGtXLkSBw8e1HkUZu/evfD390fr1q21+6ysrDB69GiEhYXh+vXrAIA2bdpArVbjxIkTAHJatNu0aYM2bdrg2LFjAICrV68iMTFR25L6MkaPHq3ThTr3+cPDw4s8NygoCE5OTnB1dUX79u0REhKCL7/8UttinJycDADaVrniGDRoEO7evYszZ85o/82va3lZym0t3blzJzQaTbHOvXjxIu7cuYNBgwYhPj4ecXFxiIuLQ1paGjp27IijR4/mueb48eN1tidOnAgg570EAIIgYP/+/fj8889hZ2eHn376CePHj4eHhwf69++vHY6g0WiwY8cO9OjRI995CJ7vQl/Ue+LgwYNITEzEwIEDtfcRFxcHqVSK5s2ba1t0IyMjcfHiRQwbNgwKhUJ7vc6dO8PHx6dYr19hfvvtN2g0Grz55ps68bi6uqJmzZp5WpjNzMwwYsSIIq9rbm4OU1NTHDlyBI8fP9Y7nn379sHNzQ09e/bU7pPL5Xj77bfzLW9lZYUBAwZot2vXrg1bW1vUrVsXzZs31+7P/fnevXs6MebKyspCfHw8atSoAVtbW5w/f157zNbWFteuXcOdO3f0vg8iImPB7uVERBWUv79/sSZSCw8P1/kDOlfdunW1x319fdG4cWNYWFjg2LFjCAgIwLFjxzBr1iy4urpixYoVUKlU2uT72QReH/mtbVytWjWdbTs7OwDQK8kYPXo0+vXrB4lEol2qyszMTHvcxsYGAJCSkqLTvVcfjRo1Qp06dbBlyxbY2trC1dVVO2bcUPr374/vv/8eo0aNwscff4yOHTvi9ddfR9++fSGRFP49em6yM2zYsALLJCUlaV9/AKhZs6bOcW9vb0gkEp3xumZmZvj000/x6aefIjIyEv/88w+WLVuGn3/+GSYmJti0aRNiY2ORnJys9+z6Rb0ncu+loPrIrffcJP35+wByEstnk8KXcefOHYiimO/zAICJiYnOtpubm16Tn5mZmeHLL7/E1KlT4eLighYtWqB79+4YOnQoXF1dCzwvPDwc3t7eeT5vNWrUyLd81apV85RVKBRwd3fPsw/Q/Ww+efIE8+bNw9q1a/Hw4UOIoqg9lpSUpP159uzZ6NWrF2rVqgVfX18EBgZiyJAhBQ6lICIyJky6iYhIh4mJCZo3b46jR4/i7t27iIqKQps2beDi4oKsrCycOnUKx44dQ506dbSTgAE5LWkZGRkQRTHPH/CiKEKlUkEul+d5PqlUmm8cz/7xXpCaNWuiU6dOBR7PXa7qypUrL9QqP2jQIKxatQrW1tbo379/gYltfl8mAIBarS72cxbG3NwcR48exeHDh7Fnzx7s27cP27ZtQ4cOHXDgwIECX0sA2lbsBQsWFLiUWFHj8wu6z1xKpRIDBgzAG2+8gXr16uHnn3/WawKy5xX1nsi9l40bN+abfL7IjO8vU4cajQaCIODPP//MN/bnX9dnW4eLMnnyZPTo0QM7duzA/v37MX36dMybNw9///03GjVqpPd1ClPQ663PZ3PixIlYu3YtJk+ejJYtW0KhUEAQBAwYMECn50Tbtm0REhKCnTt34sCBA/j++++xZMkSrF69GqNGjSqR+yAiKq+YdBMRVRIeHh64detWnv25M3TnTooF5HTn/fLLL/HXX3/B0dERderUgSAIqFevHo4dO4Zjx46he/fuea6fnZ2NkJCQPC1qd+/ehVqt1nmOstCjRw/MmzcPmzZteuGke8aMGYiMjMTGjRsLLJfbEvv8Eln6dJEvKpF9nkQiQceOHdGxY0csXrwYc+fOxaefforDhw+jU6dOBV7P29sbQE4rcGFfVDzrzp07OmvB3717FxqNpsAJvHKZmJigQYMGuHPnDuLi4uDs7AwbGxtcvXpVv5ssQu69ODs7F3ovue+3/Lo0P/9ZeJk69Pb2hiiK8PLyQq1atYosX1ze3t6YOnUqpk6dijt37sDPzw+LFi3Cpk2b8i3v4eGB69ev5/kC7O7duyUe2/bt2zFs2DAsWrRIu0+lUuW7XJy9vT1GjBiBESNGIDU1FW3btsXMmTOZdBOR0eOYbiKiSqJr1644ffo0goODtfvS0tLw3XffwdPTU2eMa5s2bZCRkYGlS5eidevW2j/c27Rpg40bN+LRo0d5ktguXboAyFmf+XkrV67UKVNWWrZsicDAQHz//ffYsWNHnuOZmZl4//33Czzf29sbS5cuxbx58/LM7P18OQA6s76r1Wp89913RcaYO7uzPmtaJyQk5NmX22qdkZEBALC0tMz3ek2aNIG3tzcWLlyY7zJv+S3JlVtvuVasWAHgv3q8c+cOIiIi8pyXmJiI4OBg2NnZwcnJCRKJBL1798auXbtw9uzZPOX16dXwrICAANjY2GDu3LnIysoq8F6USiX8/Pywfv16na7OBw8e1M5hkMvDwwNSqVSnDgHgm2++KTKe119/HVKpFLNmzcpzL6IoIj4+Xu97e1Z6errOEm1AznvN2tpaW9/5CQgIwMOHD7Wz9wM5ifCaNWteKI7CSKXSPPe8YsWKPD0Enn8NrKysUKNGjULvg4jIWLClm4iokvj444/x008/oUuXLnj33Xdhb2+P9evXIzQ0FL/++qtO1+mWLVtCJpPh1q1bGD16tHZ/27ZtsWrVKgDIk3T7+flh1KhRWLZsGe7cuaNdBujgwYPYu3cvRo0ahYYNG5bBnerasGEDXnvtNbz++uvo0aMHOnbsCEtLS9y5cwdbt25FZGRkvmt155o0aVKRz1GvXj20aNEC06ZNQ0JCAuzt7bF161ZkZ2cXea65uTl8fHywbds21KpVC/b29vD19c13/PPs2bNx9OhRdOvWDR4eHoiJicE333yDqlWrasfXe3t7w9bWFqtXr4a1tTUsLS3RvHlzeHl54fvvv0eXLl1Qr149jBgxAm5ubnj48CEOHz4MGxsb7Nq1S+f5QkND0bNnTwQGBiI4OBibNm3CoEGDtPV46dIlDBo0CF26dEGbNm1gb2+Phw8fYv369Xj06BGWLl2q7aI8d+5cHDhwAO3atcPo0aNRt25dREZG4pdffsG///5brDH3NjY2WLVqFYYMGYLGjRtjwIABcHJyQkREBPbs2YNWrVppv/yZN28eunXrhtatW+N///sfEhISsGLFCtSrV0/nyweFQoF+/fphxYoVEAQB3t7e2L17N2JiYoqMx9vbG59//jmmTZuGsLAw9O7dG9bW1ggNDcXvv/+O0aNHF/rlTkFu376Njh074s0334SPjw9kMhl+//13REdH60x89rwxY8bg66+/xsCBAzFp0iQolUps3rxZO7yjuL0rCtO9e3ds3LgRCoUCPj4+CA4Oxl9//ZVnjXYfHx+0b98eTZo0gb29Pc6ePYvt27djwoQJJRYLEVG5ZZA504mI6IXlLquU39JLz3p+yTBRFMWQkBCxb9++oq2trSiXy0V/f39x9+7d+Z7frFkzEYB46tQp7b4HDx6IAER3d/d8z1Gr1eKyZcvEhg0binK5XJTL5WLDhg3F5cuXa5ckK+o+8lu6qaAlw4paSilXenq6uHDhQrFZs2ailZWVaGpqKtasWVOcOHGiePfuXW25Z5cMKwyeWzJMFHNe206dOolmZmaii4uL+Mknn4gHDx4scskwURTFEydOiE2aNBFNTU11lg97fmmqQ4cOib169RKrVKkimpqailWqVBEHDhwo3r59W+d6O3fuFH18fESZTJZniacLFy6Ir7/+uujg4CCamZmJHh4e4ptvvikeOnQoz+tw/fp1sW/fvqK1tbVoZ2cnTpgwQXzy5Im2XHR0tDh//nyxXbt2olKpFGUymWhnZyd26NBB3L59e57XLTw8XBw6dKjo5OQkmpmZidWrVxfHjx8vZmRkiKJYvPdE7v6AgABRoVCIcrlc9Pb2FocPHy6ePXtWp9yvv/4q1q1bVzQzMxN9fHzE3377Ld96iI2NFd944w3RwsJCtLOzE8eMGSNevXq1yCXDnn2e1q1bi5aWlqKlpaVYp04dcfz48eKtW7e0Zdq1ayfWq1cvz7n5iYuLE8ePHy/WqVNHtLS0FBUKhdi8eXPx559/1in3/OdDFEXx3r17Yrdu3URzc3PRyclJnDp1qvjrr7+KAHSWASsoHg8Pj3yX+Hr+vf/48WNxxIgRoqOjo2hlZSUGBASIN2/ezPP75/PPPxf9/f1FW1tb0dzcXKxTp474xRdfiJmZmXq9FkREFZkgisXs00VERERGbebMmZg1axZiY2Ph6Oho6HCohCxduhTvvfceHjx4ADc3N0OHQ0RUaXBMNxEREZGRefLkic62SqXCt99+i5o1azLhJiIqYxzTTURERGRkXn/9dVSrVg1+fn5ISkrCpk2bcPPmTWzevNnQoRERVTpMuomIiIiMTEBAAL7//nts3rwZarUaPj4+2Lp1K/r372/o0IiIKh2O6SYiIiIiIiIqJRzTTURERERERFRKmHQTERERERERlZJKN6Zbo9Hg0aNHsLa2hiAIhg6HiIiIiIiIKiBRFJGSkoIqVapAIim4PbvSJd2PHj2Cu7u7ocMgIiIiIiIiI3D//n1UrVq1wOOVLum2trYGkPPC2NjYGDiagmk0GsTGxsLJyanQb02oYmB9GhfWp3FhfRoX1qdxYX0aF9ancWF9AsnJyXB3d9fmmAWpdEl3bpdyGxubcp90q1Qq2NjYVNo3sTFhfRoX1qdxYX0aF9ancWF9GhfWp3Fhff6nqGHLlfvVISIiIiIiIipFTLqJiIiIiIiISgmTbiIiIiIiIqJSUunGdBMRERERVRZqtRpZWVmGDgNAzhjgrKwsqFSqSj8G2BhUhvo0MTGBVCp96esw6SYiIiIiMjKiKCIqKgqJiYmGDkVLFEVoNBqkpKQUOfEUlX+VpT5tbW3h6ur6UvfIpJuIiIiIyMjkJtzOzs6wsLAoF0mRKIrIzs6GTCYrF/HQyzH2+hRFEenp6YiJiQEAKJXKF74Wk24iIiIiIiOiVqu1CbeDg4Ohw9Ey9iStsqkM9Wlubg4AiImJgbOz8wt3NTfOzvdERERERJVU7hhuCwsLA0dCVPHlfo5eZm4EJt1EREREREbIWFsficpSSXyODJp0Hz16FD169ECVKlUgCAJ27NhR5DlHjhxB48aNYWZmhho1amDdunWlHicRERERERHRizBo0p2WloaGDRti5cqVepUPDQ1Ft27d8Oqrr+LixYuYPHkyRo0ahf3795dypEREREREZEjDhw9H7969DR1GiSut+1q3bh1sbW2LdU779u0xefLkEo+lsjPoRGpdunRBly5d9C6/evVqeHl5YdGiRQCAunXr4t9//8WSJUsQEBBQWmGWObVGxKl78bj7IAE1UqVoXt0RUknJdA9Sa0ScDk1ATIoKztZy+HvZV4hrExEREVHZK09/3y1btgyiKBrkuYleRoWavTw4OBidOnXS2RcQEGBU38bsuxqJWbuuIzJJ9XRPKJQKOYJ6+CDQ98Wnqc//2qgQ187FLwyIiIiIyk5Z/H1XHAqFosyfk6gkVKiJ1KKiouDi4qKzz8XFBcnJyXjy5Em+52RkZCA5OVnnAQAajabcPfZefoSxm87r/GIDgKgkFcZuOo+9lx9Vums/+xyt5v+NgWtOYtLWixi45iRazf+73F9bo9EgK1uNE3fjsP9mPE7cjUNWttrg7zU+Xv4hiqLBY+CD9ckH67MyPFifL/66vczjzyuRhf599+eVyGJfE4DOv/k9fvnlF9SvXx/m5uZwcHBAp06dkJqaClEUtd2wc8smJyfjrbfegqWlJZRKJRYvXoz27dtj0qRJ2jKenp6YM2cOhg4dCisrK3h4eGDnzp2IiYlBr169YGVlhQYNGuDMmTPac+Li4jBw4EC4ubnBwsIC9evXx5YtWwq9t7CwMPTo0QN2dnawtLREvXr1sGfPHu3xq1evonv37rCxsYG1tTXatGmDu3fv6rTcL1iwAEqlEg4ODhg3bhwyMzO156tUKkydOhVubm6wtLRE8+bNcfjwYZ0Y1q5di2rVqsHCwgJ9+vRBXFyczmv9/OsniiImTZqE9u3b56kjfZ5Xn/o0pkdBnzV9VKiW7hcxb948zJo1K8/+2NhYqFSqfM4wDLVGxMw/riK/DjO5+6b8fAl/XoqATCJAKhEgEQRIBEAqESAVBEgkgFTIPQZtGQEifjwdVei1p/12GWmpyTB5em3tQxAgk0BnO+fnnH0QgRk7bxZ67Vl/XEVDR+GFW44P332Mabvv5dkflazCuC0XMK97dbxaw67cXTv3+kuO3EdMau4SA2FwtjLBe+3dX+q6udQaERcfpiI+LQsOlibwc7NiC30Z0Gg0SEpKgiiKkEgq1HeXlA/Wp3FhfRoX1ueLycrKgkajQXZ2NrKzs4t9vlojYtauawX+fScAmL37Gl6t5VCsvztEUYRarQaQ/4zQkZGRGDRoEObNm4devXohJSUFx48fR1ZWFrKzs7VJTu49vffeezh+/Dh+++03ODs7Y9asWTh//jwaNGigc99Lly7FnDlz8PHHH2P58uUYOnQoWrZsiWHDhmHu3Ln45JNPMHToUFy6dAmCICA1NRV+fn6YMmUKbGxs8Oeff2Lo0KHw9PREs2bN8r238ePHIzMzE4cOHYKlpSVu3LgBc3NzZGdn4+HDh2jXrh3atm2LAwcOwNraGsHBwVCpVNr7Onz4MFxcXHDgwAGEhITgrbfeQoMGDTBy5Ejt9W/cuIFNmzZBqVRi586d6NKlC86fP4+aNWvi9OnTGDVqFD7//HP07NkTBw4cwOzZswFA+1o8//rl1okoitp9z28X9rw1atQotD6NRW4dxcfHw8TEROdYSkqKXteoUEm3q6sroqOjdfZFR0fDxsZGu3D586ZNm4YpU6Zot5OTk+Hu7g4nJyfY2NiUarzFcfJe/DOJWf5U2RrsuhZfKs+fpFLjgz9CSuXa0alZCPj2EsxNZZBJhJyHVJKT4EsFyCQSmEiFp18mPP356X6pABy+HVvo9ecejEBitgxmMilkEgEmMglMpZKcn6WSp9tPn0eWc31TqQQCgMVHHhR4XQHA8mOP0LdFrRdKZPddjcInu+/l+Q8rNjULn+y+h5WDGiHQ17XY1332+rN330BU8n9fHrnayDGje92Xum4utUbEmbAExKRkwNnaDM082eU+l0ajgSAIcHJy4h+BRoD1aVxYn8aF9fliVCoVUlJSIJPJIJP99+d+z6//RWxKZpHnZ2Sr8Ti94L9LRQCRSRlo+eURmMmkRV7PydoUf0xord1+PnHJFRsbi+zsbPTt2xceHh4AgEaNGmmPSyQSSCQSyGQypKSkYOPGjdi8eTNee+01ADkTh7m5uUEQBJ377tq1K8aOHQsACAoKwrfffotmzZphwIABAICPP/4Yr7zyCuLj4+Hq6goPDw98+OGH2vNr1aqFv/76C7/++itatmyZb+z379/H66+/ro23Vq1a2mPffvstFAoFtm3bpr13Hx8fnfuys7PDypUrIZVK4evri27duuHIkSMYM2YMIiIisH79eoSHh6NKlSoAgA8//BAHDx7Exo0bMXfuXKxcuRKBgYH4+OOPtdc/deoU9u3bp30tnn39cgmCoPN6Pbutz/MWVp/GQiaTQSKRwMHBAXK5XOfY89sFXqM0AistLVu2xN69e3X2HTx4sMA3PwCYmZnBzMwsz/7cN115EZta9C/Aiiw1Q43UDHWpXDslIxtf7rtd4tfN+Q9FhSaf/wVLMxlMpBKYPk3oTWU5DzOZJGf/M/tMZTkJ/6/nHhTaA+DTHVdhaSaDhZkMZjIJzGRSyE1y/jWTSSA3yflXkk+iu+9qJMZvuZDn+tHJKozfcgGrBjd+qbFWFX2MflkQBKHc/R6hF8f6NC6sT+PC+iw+iUSiTZ6ebYGMTcnU+bL+ZeUk5oU3GuUSBAGiKGrjya9l1M/PDx07dkSDBg0QEBCA1157DX379oWdnV2ea4WGhiIrKwvNmzfXXsvW1ha1a9fOc98NGjTQbru6uha4LzY2FkqlEmq1GnPnzsXPP/+Mhw8fIjMzExkZGbCwsCiwRffdd9/F2LFjcfDgQXTq1AlvvPEGGjRoAAC4dOkS2rRpA1NT0wJfn3r16ukkw0qlEleuXIEgCLh69SrUajVq166tc05GRgYcHBwgCAJu3LiBPn366MTXsmVL7Nu3L0/Mz27nVx+5r19Rz1vQ+cYm9/XI7/eQvr+XDJp0p6am4u7du9rt0NBQXLx4Efb29qhWrRqmTZuGhw8fYsOGDQCAd955B19//TU+/PBD/O9//8Pff/+Nn3/+GXv27DHULZQYZ2v9viX58o368HVTQKMBsjUaaEQR6tyfNYBaFKHWaKDW5CQ1ao2Im1HJWPH33SKv/WaTqlDamkOtEZGtyblO9tNrZGtEqNV598ekZOBc+OMir+1iYwaZRIJsjQbZahFZ6pxrZGtEZKs10JTjiSiTVdlIVhW/a1ZRHqdnYdjaM0WWM5EKkMukMHuakJvKBNxPeFJoQv/+L5dwKyoFlmYyyE2kMDeRwtw051+5iRQWprrbuT9LJQL2Xc0Zw/X89XPHcL1sQg+UflJf0RN6IiKi0uBknbchKj9FtXTnsrMw0bOlW7/nlUqlOHjwIE6cOIEDBw5gxYoV+PTTT3Hq1Cl4eXnpdY38PNsSm5sc5rcvd3zuggULsGzZMixduhT169eHpaUlJk+ejMzMghvJRo0ahYCAAOzZswcHDhzAvHnzsGjRIkycOLHAHrkFxZgbU248qampkEqlOHfuHKRS3dfbysqqyGvnkkgkOmPIgZyhCAUpqeclAyfdZ8+exauvvqrdzu0GPmzYMKxbtw6RkZGIiIjQHvfy8sKePXvw3nvvYdmyZahatSq+//57o1guzN/LHkqFHFFJqnyTKQGAq0KOvk3ci508BPq6Yvu5B0Vee94bDYp9bbVGROsv/y7y2v9+1KHQa2s0IrI0Gqg1IrLUOYl49tPEaeJPF4qM471ONeHpaIms3IRerUHm05+zsjXI0vz3c7ZGRKZag4eP0/HP7bgir+1kbQqJICAzW4PMbA2y1Dnnl5Wce8pGSob+56RmqLHkrzvFfi4TiYBsjVjk3ALnIh7D2swElmYyWJpKYWEmg5WZFBamMliaymBpJs1pxTeVwtJUptNaX9pJfXmbaZWIiKi82DWxddGFUHJ/370IQRDQqlUrtGrVCjNmzICHhwd+//13neGiAFC9enWYmJjgzJkzqFatGgAgKSkJt2/fRtu2bV8qhuPHj6NXr14YPHgwgJxk/Pbt2zpdwvPj7u6Od955B++88w6mTZuGNWvWYOLEiWjQoAHWr1+PrKysF+qK3ahRI6jVasTExKBNmzb5lqlbty5OnTqls+/kyZM6205OTrh69arOvosXLxYYU1HP+3wCTwUzaNKdO1NeQdatW5fvORcuFJ2EVTRSiYCgHj4Yu+k8BEDnF1zur7KgHj4v9IutIlxbIhFgJsn7TWnX+krM3XujyF/6EzrULNMvDEQxJ/HOTcRzf85Sa5CRrcH58MeYvvNakTH0aVQFjlZmyMjWICNLA1W2GhlZGmRkq6F6+m9GtgaqrJx/M7I1SFFlQZVVOkl/lh5dDtIz1VhzNLRY1zU3kcLSLKc1PbKA1zt330e/XoFGI0JhYQpruQw2chNYy2WwlpvAVFZ4F56yaKVXa0ScuhePuw8SUCNViubVHdmKTkRERqU0/3YszKlTp3Do0CG89tprcHZ2xqlTpxAbG4u6devmKWttbY1hw4bhgw8+gL29PZydnREUFKTtWv8yatasie3bt+PEiROws7PD4sWLER0dXWjSPXnyZHTp0gW1atXC48ePcfjwYW3cEyZMwIoVKzBgwABMmzYNCoUCJ0+ehL+/f56u2/mpVasW3nrrLQwdOhSLFi1Co0aNEBsbi0OHDqFBgwbo1q0b3n33XbRq1QoLFy5Er169sH//fuzbt0/nOh06dMCCBQuwYcMGtGzZEps2bcLVq1d1xs0X53m7du1ajFe1cqtQY7qNXaCvEqsGN87TSudaAq10FfXa5fULA0EQno69zr9LVR1XG3xzJKTIhH5hP79ixx4cEo+Ba04WWe6DgFrwcLDEk0w1VFlqpGeq8SQr56HS/qzRHn+SpUZUkgoPE/Nffu9l5D6vPpKeZGHclvy/WDOTSWAtN4GNXKZNxK2f/mxpJsMvZwseRy8AmLXrOjr7uL7wHwl5W9FD2YpORERGqTT/viuIjY0Njh49iqVLlyI5ORkeHh5YtGgRunTpkm/5xYsX45133tEuxfXhhx/i/v37ek9uVZDPPvsM9+7dQ0BAACwsLDB69Gj07t0bSUlJBZ6jVqsxfvx4PHjwADY2NggMDMSSJUsAAA4ODvj777/xwQcfoF27dpBKpfDz80OrVq30jmnt2rX4/PPPMXXqVDx8+BCOjo5o0aIFunfvDgBo0aIF1qxZg6CgIMyYMQOdOnXCZ599hjlz5mivERAQgOnTp+PDDz+ESqXC//73PwwdOhRXrlx54ecl/QhiJesXkJycDIVCgaSkpHI1e/mzclrS4nD3QSxqVHUq0Za00hzrWprXLs0uw6V17dxWVyD/hP5FW11Ls8uXvgn99G514eloibRMNdIzspGakY30TDXSMrORlpGN9Izcn3P+zd1+nJaJtMzSmVBPX/aWJnC2lsPOwhS2FiawffqvXe7P5iaws8z5N/eYiVRSYCv6y9bnszgW3TA0Gg1iYmLg7OzMiZqMAOvTuLA+X4xKpUJoaCi8vLxeOgEtyf+bcpeikslkpTLxVlpaGtzc3LBo0SLtUltUekq7PsuLwj5P+uaWbOkuh6QSAS2qO6C6lRrOzg75zl79Mtdu6e1QYtcrq2sH+irR2ce1VBKS0rp2aX1DXJqt//rOLTC8ldcLXV/fpH5AM3fYWZoiRZWFFFX208d/PyerspCakY0X+cowIS0LCWn6zbSay9JUiidZ6iJno6/tYgMXhRksTIv/q5Vj0YmIqDwqzb/vXtaFCxdw8+ZN+Pv7IykpSbsuda9evQwcGZEuJt1UYVTELwxyE/qS7rlQERN6QP+k/os+9Yt8Do1GRGrmfwn5yZB4zNx1vcgYbM1NkJ6pLtZkePq0zsenZuLVRUcAABamUjhamcHRyjTnX2szOFqa5vxrZfbfMWszWJvJsP9aVJmMRWcrOhERGZuFCxfi1q1bMDU1RZMmTXDs2DE4OjoaOiwiHUy6iUpZafVcqGgt9EDJJvUSiQAbuQls5CYAzFHT2RrfHr2nV7d7iZAzzvxxehYS0zORmJ6FxPQsPE7PRNKTLDxOy0Tik/+ORSSkI6YY08enZ6oRkZCOiIT0IsuaSAWoi5gxPuiPa+hU1wUy6Yt1rWQrOhERGaNGjRrh3Llzhg6DqEhMuokqsNJuoS+t7vzloZXewlQGC1MZ3GyLXjtT327xLarbQxSBuNQMxKflJOxFyVIX3Uc+OjkDdafvg5udOVwVcigV5lAq5FAq5HB95md7S9M8Y6rKYkZ3IiIiIioYk24iyldFHKNfWgm9vt3iN49qoXMPmdkaJKRlIi41A7GpGYhLyUBcas527uNeTBoik1X5XFVXlkZEWHw6wuILbj03lUlyEnGbnCTcRSHHT6ciSnVGdyIiIiIqHJNuIjKIitRK/6Ld4k1lErgq5HBVFDxzrL6t6G62ciQ/yUZKRnaBZTKzNQiPT0d4IYn5s0QAkUkqfPvPXQT4KlHVzrzAZfCKwjHjRERERPlj0k1ERqc0EnpDt6If/TBnCbgUVRaiklSITFIhKkmFR0lPtNuRSU8QmaRCiqrgxDw/X+2/ja/234YgAFUU5qhmbwFPRwtUs7eEh4MFqtlbwMPBAtZyk3zP55hxIiIiooIx6SYi0lNpzEZf3FZ0a7kJrOUmqOliXeA1UzOyEZWkwt83ozF37029YxFF4GHiEzxMfILge/F5jjtYmqKagwU87C3g4ZCTkEcmPcGC/bfzlOWYcSIiIqIcTLqJiIqhNGajL+lWdCszGWo4W8HL0RJrj4cV2IoOAApzE7zVvJp2tvXw+HQkPcl/Arj4tEzEp2XiQkRikTHkPh/HjBMREVFlx6SbiKgcMNRY9C/fqJ8nqU9Mz8wZG56Qjoj4NO048fCENEQn6790GpAzZrzz4n/QqJodartaoZaLNWq7WsPVRp5npvWCcLw4EVHlIYoixowZg+3bt+Px48e4cOECJk+eDD8/PyxdurTEnmfmzJnYsWMHLl68WOxzw8LC4OXlhQsXLsDPz6/EYipJt27dQrt27XDnzh1YWxfcO64iad++/Uu/D9atW4fJkycjMTERALB69Wrs2bMHu3btKpkgC8Ckm4ionCgvY9FtLUxha2GKhu62eY49yVTj/uOcJHzP5UfYcfFRkTHci0vDvbg0nX3Wchlqu1ijlqt1zr9Pk3F7S1OdchwvTkRUuezbtw/r1q3DkSNHUL16dTg6OuK3336DiUn+84oYgru7OyIjI+Ho6GjoUAo0bdo0TJw40WgS7tLyv//9D3PmzMGxY8fQpk2bUnseJt1EREauJFvRzU2lqPU0SbYyk+mVdOcnRZWNs+GPcTb8sc5+RyszbYt4ZrYGm09F5DmX48WJiMqOqFYj/ew5ZMfGQubkBIumTSBIX2ylC32EhIRAqVTilVde0e6zt7cvted7EVKpFK6uroYOo0ARERHYvXs3VqxYYehQyj1TU1MMGjQIy5cvL9WkW1JqVyYionIjtxW9l58bWno7lEj37NyZ1wu6koCcVumrMwOwe2JrLOrXEGPaVkf72k6oUsAyanGpGTh+Nx5rj4flm3ADuuPF1ZqCRqsTEdHLSj5wAHc7dkLEsGF49P77iBg2DHc7dkLygQOl8nzDhw/HxIkTERERAUEQ4OnpCSCnW/HkyZO15Tw9PTF37lz873//g7W1NapVq4bvvvtO51offfQRatWqBQsLC1SvXh3Tp09HVlb+c5bk5/Hjx3jrrbfg5OQEc3Nz1KxZE2vXrgWQ071cEASdrul//PEHatasCblcjldffRXr16+HIAjabszr1q2Dra0tdu/ejdq1a8PCwgJ9+/ZFeno61q9fD09PT9jZ2eHdd9+FWq3WXnfjxo1o2rQprK2t4erqikGDBiEmJqbQ2H/++Wc0bNgQbm5u2n3x8fEYOHAg3NzcYGFhgfr16+Onn37SOa99+/aYMGECJkyYAIVCAUdHR0yfPh2i+N//tZ6enpgzZw4GDhwIKysreHp6YuXKlTrXSUxMxKhRo+Dk5AQbGxt06NABly5d0h6fOXMm/Pz8sHHjRnh6ekKhUGDAgAFISUnRlklLS8PQoUNhZWUFpVKJRYsW5bnPjIwMvP/++3Bzc4OlpSWaN2+OI0eO6JRZt24dqlWrBgsLC/Tp0wfx8Xkniu3Rowf++OMPPHnypNDX9WUw6SYioheSO2YcQJ7E+9mZ163kMvi6KfBGk6qY1rUu1o3wx4lpHXF55mv4dWxLzO1TH8Nf8UTL6g55upcXJHeN8RHrTmPTyXBcfZiEbLWmxO6NiKiySz5wAA8nTUZ2VJTO/uzoaDycNLlUEu9ly5Zh9uzZqFq1KiIjI3HmzJkCyy5atAhNmzbFhQsXMG7cOIwdOxa3bt3SHre2tsa6detw/fp1LFu2DGvWrMGSJUv0jmX69Om4fv06/vzzT9y4cQOrVq0qsDt5aGgo+vbti969e+PSpUsYM2YMPv300zzl0tPTsXz5cmzduhX79u3DkSNH0KdPH+zduxd79+7Fxo0b8e2332L79u3ac7KysjBnzhxcunQJO3bsQFhYGIYPH15o7MeOHUPTpk119qlUKjRp0gR79uzB1atXMXr0aAwZMgSnT5/WKbd+/XrIZDKcPn0ay5Ytw+LFi/H999/rlFmwYAEaNmyI8+fP44MPPsDkyZNx8OBB7fF+/fohJiYGf/75J86dO4fGjRujY8eOSEhI0JYJCQnBjh07sHv3buzevRv//PMP5s+frz3+wQcf4J9//sHOnTtx4MABHDlyBOfPn9eJY8KECQgODsbWrVtx+fJl9OvXD4GBgbhz5w4A4NSpUxg5ciQmTJiAixcv4tVXX8Xnn3+e5/Vq2rQpsrOzcerUqUJf15fB7uVERPTCXmbmdRu5CZp42KOJh263wbjUDKw7HoqvD4cU+fxHb8fh6O04AIDcRIL6bgr4udvCz90OftVsUUVR+IRtao2IU/ficfdBAmqkSl96CTgiImMgqtWInjsvZy3JPAdFQBAQPXcerDt2LNGu5gqFAtbW1np13+7atSvGjRsHIKdVe8mSJTh8+DBq164NAPjss8+0ZT09PfH+++9j69at+PDDD/WKJSIiAo0aNdImr7mt7vn59ttvUbt2bSxYsAAAULt2bVy9ehVffPGFTrmsrCysWrUK3t7eAIC+ffti48aNiI6OhpWVFXx8fPDqq6/i8OHD6N+/P4CcMce5qlevjuXLl6NZs2ZITU2FlZVVvvGEh4fnSbrd3Nzw/vvva7cnTpyI/fv34+eff4a/v792v7u7O5YsWQJBEFC7dm1cuXIFS5Yswdtvv60t06pVK3z88ccQRRHVq1fHyZMnsWTJEnTu3Bn//vsvTp8+jZiYGJiZmQEAFi5ciB07dmD79u0YPXo0AECj0WDdunXaMedDhgzBoUOH8MUXXyA1NRU//PADNm3ahI4dOwLI+TKgatWqOvWzdu1aREREoEqVKgCA999/H/v27cPatWsxd+5cLFu2DIGBgdo6r1WrFk6cOIF9+/bpvDYWFhZQKBQIDw/P9/UsCUy6iYjopZT0zOuOVmZoVcNJr6T7WaosDc6EPcaZsMcAQgEATtZmT5PwnEeDqgpYy3Mm48k7SVsoJ2kjIgJyxnA/18KtQxSRHRWF9LPnYNncv+BypahBgwbanwVBgKurq063623btmH58uUICQlBamoqsrOzYWNjo/f1x44dizfeeAPnz5/Ha6+9ht69e+uMM3/WrVu30KxZM519zyayuSwsLLQJNwC4uLjA09NTJ3l2cXHRuY9z585h5syZuHTpEh4/fgyNJqdXV0REBHx8fPKN58mTJ5DLdYdxqdVqzJ07Fz///DMePnyIzMxMZGRkwMLCQqdcixYtdL6sbtmyJRYtWgS1Wg3p0y9YWrZsmeecZcuWAQAuXbqE1NRUODjoTgz75MkThIT89/+6p6enziRvSqVSe98hISHIzMxE8+bNtcft7e21X6gAwJUrV6BWq1GrVi2d58nIyNA+940bN9CnTx+d4y1btsyTdAOAubk50tPT8+wvKUy6iYjopZX0zOu548ULWmNcAOCikGP5AD9cfpCEi/cTcfF+Ih481h2PFZuSgYPXo3HwenTOeQJQw8kKjlZmCL6Xd1wXJ2kjIgKyY2NLtFxpeH42c0EQtAlpcHAw3nrrLcyaNQsBAQFQKBTYunVrvuOCC9KlSxeEh4dj7969OHjwIDp27Ijx48dj4cKFJRpzYfeRlpaGgIAABAQEYPPmzXByckJERAQCAgKQmZlZ4PM4Ojri8WPdiUoXLFiAZcuWYenSpahfvz4sLS0xefLkQq/zIlJTU6FUKvOMrQYAW1tb7c+F3be+zyOVSnHu3DntlwG5CuoBUJiEhAQ4OTkV+zx9MekmIqJyR581xmf28IG/lwP8vf5L9uNSM3DpaQKe+0hRZWuPiyJwJyYVd2JS831e8en1Z+26js4+ruxqTkSVkkzP5EPfcmXtxIkT8PDw0BlX/SJdh52cnDBs2DAMGzYMbdq0wQcffJBv0l27dm3s3btXZ19h49H1dfPmTcTHx2P+/Plwd3cHAJw9e7bI8xo1aoTr16/r7Dt+/Dh69eqFwYMHA8jp3n379u08reXPj2s+efIkatasqZPYnjx5Ms85devWBQA0btwYUVFRkMlkhXbJL4y3tzdMTExw6tQpVKtWDUDOxHa3b99Gu3bttPeoVqsRExNT4KzjdevWzfd+nhcSEgKVSoVGjRq9ULz64ERqRERULuWOF3d9bqZzV4W8wJZoRyszdKzrgqmv1cbGkc1xacZr+GtKOyzq1xBDWnigvpsCReXRuZO0fbHnOkLj0nRmbSUiqgwsmjaBzNU1p3tQfgQBMldXWDRtUraB6almzZqIiIjA1q1bERISguXLl+P3338v1jVmzJiBnTt34u7du7h27Rp2796tTSyfN2bMGNy8eRMfffQRbt++jZ9//hnr1q0DgELnFSlKtWrVYGpqihUrVuDevXv4448/MGfOnCLPCwgIQHBwsM4s6DVr1sTBgwdx4sQJ3LhxA2PGjEF0dHSecyMiIjBlyhTcunULP/30E1asWIFJkybplDl+/Di++uor3L59G6tWrcIvv/yiLdOpUye0bNkSvXv3xoEDBxAWFoYTJ07g008/1esLAyCnpXrkyJH44IMP8Pfff+Pq1asYPnw4JJL/UtdatWrhrbfewtChQ/Hbb78hNDQUp0+fxrx587Bnzx4AwLvvvot9+/Zh4cKFuHPnDr7++ut8u5YfO3YM1atX1+n6X9KYdBMRUbkV6KvEvx91wE9vt8CyAX746e0W+PejDnp3/ZZIBNRwtsIbTapiTm9f7JrYGl/1bVD0iQB+PB6GVxceQesvD+PjXy9j16VHSEgr2W54RETlkSCVwuWTaU83nksan267fDKtVNfrfhk9e/bEe++9hwkTJsDPzw8nTpzA9OnTi3UNU1NTTJs2DQ0aNEDbtm0hlUqxdevWfMt6eXlh+/bt+O2339CgQQOsWrVK28qeO5nYi3BycsK6devwyy+/wMfHB/Pnz9ere3uXLl0gk8nw119/afd99tlnaNy4MQICAtC+fXu4urqid+/eec4dOnQonjx5An9/f4wfPx6TJk3STn6Wa+rUqTh79iwaN26MefPmYdGiRQgICACQ8yXD3r170bZtW4wYMQK1atXCgAEDEB4eDhcXF73vfcGCBWjTpg169OiBTp06oXXr1mjSRPdLnrVr12Lo0KGYOnUqateujd69e+PMmTPa1vEWLVpgzZo1WLZsGRo2bIgDBw7oTLCX66efftKZKK40CGIl+wo/OTkZCoUCSUlJxZpMoaxpNBrExMTA2dlZ51sdqphYn8aF9VmxBYfEY+CavN3L9FWvig1a13REmxpOaOppB7lJ/n90qjViiU0uR/rj59O4sD5fjEqlQmhoKLy8vPJMqFUcyQcOIHruPJ1J1WSurnD5ZBpsXnut2NcTRRHZ2dmQyWQv1QJcEXzxxRdYvXo17t+/b5DnX7lyJf744w/s379f73Pat28PPz8/LF26tMAynp6emDx5MiZPnmwU9Xnt2jV06NABt2/fhkKhyLdMYZ8nfXNLjukmIqJKRZ9J2hysTDGilRdOhMThTNhjZGb/N7nLtUfJuPYoGd/+cw9mMgmaedqjdU1HtK7hCB+lDSQSIZ+Z0cGZ0YmowrF57TVYd+yYM5t5bCxkTk6waNqk3LZwG9I333yDZs2awcHBAcePH8eCBQswYcIEg8UzZswYJCYmIiUlRWeWcNIVGRmJDRs2FJhwlxQm3UREVKnoM0nb5719EeirxPhXa0CVpcaZsAT8eycO/96Nw7VHydryGdka/Hs3Zz8A2FmYwMvJEufDE/M8L2dGJ6KKSJBKDbYsWEVy584dfP7550hISEC1atUwdepUTJs2zWDxyGQynYnkKH+dOnUqk+dh9/Jyit2pjAvr07iwPo3Di7ZGx6dm4ERIvDYJf5j4pMCyzxOQMxHcvx91YFfzUsLPp3Fhfb6YkupeXtKMoTsy/aey1Ce7lxMREb2gQF8lOvu44tS9ONx9EIsaVZ3QvLpjkcmwg5UZejSsgh4Nq0AURYTFp+PfO7E4dicOx+7E4klWweuM5s6MfvJePFrVcCzhOyIiIqLyiEk3ERFVWlKJgBbVHVDdSg1nZwdIitn6LAgCvBwt4eVoiSEtPfH7+Qd47+dLRZ73zsaz6NXIDd3qV+EEa0REREaOSTcREVEJcVWY61UuJUONTScjsOlkBBytzNC1viu61leimScTcCIqORpNwT1viEg/JfE5YtJNRERUQoqaGR0A5DIJNKKITHVOibjUDGwIDseG4HA4WZuhq68rujWogqYedsVueSciAnLWmJZIJHj06BGcnJxgampaLsbcVpYxwJWFsdenKIrIzMxEbGwsJBIJTE1NX/haTLqJiIhKiD4zoy8d4IfWNZ3w980Y7Ln8CIdvxWqXJItNycD64HCsDw6Hs7UZutZXolsDJZpU003AuQY4ERVGIpHAy8sLkZGRePTokaHD0RJFERqNBhKJxCiTtMqmstSnhYUFqlWr9lKTOTLpJiIiKkGBvkqsGtw4z8zors/NjN6zYRX0bFgFqRnZOHQjGrsvR+Kf2/8l4DEpGVh3IgzrToTBxeZpAl5fiZjkDMzZwzXAiahwpqamqFatGrKzs6FWqw0dDoCcbrrx8fFwcHDgbPRGoDLUp1QqLZGWfCbdREREJSx3ZnR9WqOtzGTo5eeGXn5uSFFl4dCNGOy+HImjt2ORqc5JwKOTM7D2eBjWHg/L9/m4BjgR5UcQBJiYmMDExMTQoQDISdJMTEwgl8uNNkmrTFif+mPSTUREVAqkEgEtvR2KdY613AS9G7mhdyM3JKuycOhGNPY8bQHPUhc0SjynG7sAYNau6+js48qu5kREROUIv5IgIiIqh2zkJujTqCq+H9YMZz/rjLHtvAstn7sG+OnQ+LIJkIiIiPTCpJuIiKicU5iboI7SWq+y7227iA3BYUhRZZVyVERERKQPJt1EREQVgLO1XK9yUckZmLHzGlrMPYTpO67iTnRKKUdGREREhWHSTUREVAHkrgFe2GhtU+l/R9My1dh4MhydlxzFgO+CsfdKJLKeTsxGREREZYdJNxERUQWQuwY4gDyJt/D0sXxgI+x9tw0G+leDuYlUe/zkvQSM23werb/8G8v+uoOYZBWIiIiobDDpJiIiqiBy1wB3Veh2NXdVyLXLhflUscG81+vj5CcdMaO7D6o7WmrLRSdnYMlft/HK/L8xYct5nA5NgCjmnRVdrRERHBKPnRcfIjgkHmpNwTOnExERUeG4ZBgREVEFou8a4ApzE/yvtReGv+KJ4yFx2BAcjkM3oqERgWyNiN2XI7H7ciTquFpjSEsP9PZzg6WZDPuuRmLWruuITPqvNVypkCOohw/XACciInoBTLqJiIgqmOKsAS6RCGhT0wltajrhYeITbD4Zjm1n7iM+LRMAcDMqBZ/+fhXz995EU087HL4Vm+caUUkqjN10XtuaTkRERPpj93IiIqJKws3WHB8G1sGJaR2wtL8fGlez1R5LycjON+EGctYAB4BZu66zqzkREVExMekmIiKqZMxkUvRu5IbfxrXC7omt0b+pO0ykhc2LnpN4RyapcDo0oWyCJCIiMhJMuomIiCoxXzcFvuzbALN71dOrfEwKZz4nIiIqDibdREREBE8HK73KRSap8p3xnIiIiPLHpJuIiIjg72UPpUKeZw3w583/8yb6f3sSp+7Fl0lcREREFR2TbiIiIoJUIiCohw8AFJl4nw5LQP/vTmLID6dw6X5iqcdGRERUkTHpJiIiIgA5a4CvGtwYrgq5zn6lQo5vBjXG14MaobqTpXb/sTtx6LXyON7ecBY3o5LLOlwiIqIKget0ExERkVagrxKdfVxxOjQBMSkqOFvL4e9lD6kkp/07sJ4rdlx8hKV/3caDx08AAAevR+OvG9Ho3qAK3utUE9Wd9BsfTkREVBkw6SYiIiIdUomAlt4O+R6TSSXo26Qqejasgm1n7+Prv+8gOjkDogjsuvQIe69E4o3Gbni3Y01UtbMo48iJiIjKH3YvJyIiomIzlUkwpIUH/vngVXzWrS7sLU0BAGqNiJ/PPsCrC49gxs6riEnWXWJMrRERHBKPnRcfIjgkHmoNZ0InIiLjxpZuIiIiemFyEylGtamOAf7VsO54KL49eg8pqmxkqUVsCA7HtjP3MewVT7zTzhunQ+Mxa9d1RCb9l4grFXIE9fBBoK/SgHdBRERUetjSTURERC/NykyGCR1q4t8PO2DCqzVgYSoFAGRka/Dd0Xt4Zd4hvLPpvE7CDQBRSSqM3XQe+65GGiJsIiKiUsekm4iIiEqMwsIE7wfUxtEPX8Wo1l4wleX8qaHK1uRbPrdz+axd19nVnIiIjBKTbiIiIipxjlZm+Ky7D45+8Co61XUutKwIIDJJhdOhCWUTHBERURli0k1ERESlxlUhR4+GVfQqG5OiKroQERFRBcOkm4iIiEqVs7Vcr3JOVmalHAkREVHZY9JNREREpcrfyx5KhRxCEeUWH7yFuzEpZRITERFRWWHSTURERKVKKhEQ1MMHAApNvM+GJ6Lrsn+x9K/byMhWl01wREREpYxJNxEREZW6QF8lVg1uDFeFbldzpUKOyR1rwsPBAgCQqdZg6V930G35vzgbxonViIio4pMZOgAiIiKqHAJ9lejs44rToQmISVHB2VoOfy97SCUC3mnvjWWH7uC7o/eg1oi4G5OKvquDMbhFNXwYWAc2chNDh09ERPRC2NJNREREZUYqEdDS2wG9/NzQ0tsBUklOh3O5iRQfBdbBrgmt0bCqQlt+08kIdF78D/ZfizJUyERERC/F4En3ypUr4enpCblcjubNm+P06dOFll+6dClq164Nc3NzuLu747333oNKxSVGiIiIjIFPFRv8Nq4Vpnf3gYWpFAAQnZyBMRvP4Z2N5xCdzP/ziYioYjFo0r1t2zZMmTIFQUFBOH/+PBo2bIiAgADExMTkW37Lli34+OOPERQUhBs3buCHH37Atm3b8Mknn5Rx5ERERFRapBIBI1t74cB7bdG+tpN2/75rUei0+B9sPhUOjUY0YIRERET6M2jSvXjxYrz99tsYMWIEfHx8sHr1alhYWODHH3/Mt/yJEyfQqlUrDBo0CJ6ennjttdcwcODAIlvHiYiIqOKpameBtcObYdkAPzhYmgIAUlTZ+PT3qxjw3UncjUk1cIRERERFM1jSnZmZiXPnzqFTp07/BSORoFOnTggODs73nFdeeQXnzp3TJtn37t3D3r170bVr1zKJmYiIiMqWIAjo5eeGv6a0Q98mVbX7T4cloOuyY1h+6A4yszUAALVGxMl78ThwMwEn78VDzdZwIiIqBww2e3lcXBzUajVcXFx09ru4uODmzZv5njNo0CDExcWhdevWEEUR2dnZeOeddwrtXp6RkYGMjAztdnJyMgBAo9FAo9GUwJ2UDo1GA1EUy3WMpD/Wp3FhfRoX1mfFoDCX4as36qNXQyU+23EN4QnpyFRrsPjgbey+9Ag9/apg08kIRGnHfIfC1UaOGd3rItDX1aCx04vj59O4sD6NC+sTet97hVoy7MiRI5g7dy6++eYbNG/eHHfv3sWkSZMwZ84cTJ8+Pd9z5s2bh1mzZuXZHxsbW64nYNNoNEhKSoIoipBIDD7fHb0k1qdxYX0aF9ZnxVLTBlg/sDZ+OPUIW85FQy0Ct2NSsfDA7Txlo5JVGLflAuZ1r45Xa9gZIFp6Wfx8GhfWp3FhfQIpKSl6lRNEUTRI36vMzExYWFhg+/bt6N27t3b/sGHDkJiYiJ07d+Y5p02bNmjRogUWLFig3bdp0yaMHj0aqamp+VZ2fi3d7u7uePz4MWxsbEr2pkqQRqNBbGwsnJycKu2b2JiwPo0L69O4sD4rruuPkvHRb5dx7VHBf/QIAFwVchz9oL12eTKqOPj5NC6sT+PC+szJLe3s7JCUlFRobmmwlm5TU1M0adIEhw4d0ibdGo0Ghw4dwoQJE/I9Jz09PU+FSqU5y4kU9N2BmZkZzMzM8uyXSCTl/s0hCEKFiJP0w/o0LqxP48L6rJh8q9piWpe6GPxDwROqigAik1Q4G56Ilt4OZRcclRh+Po0L69O4VPb61Pe+Ddq9fMqUKRg2bBiaNm0Kf39/LF26FGlpaRgxYgQAYOjQoXBzc8O8efMAAD169MDixYvRqFEjbffy6dOno0ePHtrkm4iIiCqP+LRMvcrFpJTfIWVERGTcDJp09+/fH7GxsZgxYwaioqLg5+eHffv2aSdXi4iI0Pn24LPPPoMgCPjss8/w8OFDODk5oUePHvjiiy8MdQtERERkQM7W8hItR0REVNIMPpHahAkTCuxOfuTIEZ1tmUyGoKAgBAUFlUFkREREVN75e9lDqZAjKkmFwiapOR0aj+Ze9pBwXDcREZWxytn5noiIiIyCVCIgqIcPgJxJ0wqy5K87GLn+DBLT9euOTkREVFKYdBMREVGFFuirxKrBjeGq0O1CrlTI0cuvCnIbtw/fikW35f/iyoMkA0RJRESVlcG7lxMRERG9rEBfJTr7uOLUvTjcfRCLGlWd0Ly6I6QSAf2auOPdrReQkJaJh4lP8MaqE5jZsx4G+rtDENjdnIiIShdbuomIiMgoSCUCWlR3wGt17NGiuoN2Xe7WNR2x593WaFTNFgCQqdbgk9+v4P1fLuNJptqAERMRUWXApJuIiIiMnlJhjm2jW2L4K57afb+ef4A+3xxHWFya4QIjIiKjx6SbiIiIKgVTmQQze9bD8oGNYGEqBQDcjEpBjxX/Yv+1KANHR0RExopJNxEREVUqPRtWwc7xreDtZAkASMnIxpiN5zDvzxvIVmsMHB0RERkbJt1ERERU6dR0scbOCa3RvYFSu+/bf+7hre9PISZFZcDIiIjI2DDpJiIiokrJykyGFQMbIaiHD2RPJ107FZqA7sv/xenQBANHR0RExoJJNxEREVVagiBgRCsvbBvTAq42Oet8x6RkYOCak/j+2D2IomjgCImIqKJj0k1ERESVXhMPe+x+tzVe8XYAAKg1Ij7fcwPjNp9HiioLao2I4JB47Lz4EMEh8VBrmIwTEZF+ZIYOgIiIiKg8cLQyw8aRzbH44C2sPBwCAPjzahTOhT+GRhQRl5qpLatUyBHUwweBvsqCLkdERASALd1EREREWlKJgA8C6uD7oU1hI89pm4hJydBJuAEgKkmFsZvOY9/VSEOESUREFQiTbiIiIqLndPJxwc7xrbUTrD0vt3P5rF3X2dWciIgKxaSbiIiIKB9RySpkF5JQiwAik1Sc6ZyIiArFpJuIiIgoH/qu1811vYmIqDBMuomIiIjy4WwtL9FyRERUOTHpJiIiIsqHv5c9lAo58h/VnUMiAHYWJmUWExERVTxMuomIiIjyIZUICOrhAwAFJt4aERiw5iQuRDwuu8CIiKhCYdJNREREVIBAXyVWDW4MV4VuF3IXGzO425sDABLTs/DW96dw9HasIUIkIqJyTmboAIiIiIjKs0BfJTr7uOJ0aAJiUlRwtpbD38se6ZnZGL3hHILvxSM9U42R689g8Zt+6NGwiqFDJiKicoQt3URERERFkEoEtPR2QC8/N7T0doBUIsBaboK1I5ohsJ4rACBLLeLdrRewMTjMsMESEVG5wqSbiIiI6AXJTaRY+VZjDGjmDgAQRWD6zmtY+tdtiGLBa3wTEVHlwaSbiIiI6CVIJQLmvV4f49p7a/ct/esOZv5xDRoNE28iosqOSTcRERHRSxIEAR8G1sFn3epq960PDsekbReRma0xYGRERGRoTLqJiIiISsioNtWxqF9DSCU5i4ztuvQIozacRXpmtoEjIyIiQ2HSTURERFSC3mhSFd8ObgIzWc6fWUdvx+Kt708hMT3TwJEREZEhMOkmIiIiKmGdfFywcWRzWMtzVme9EJGIfquDEZn0xMCRERFRWWPSTURERFQK/L3ssW10SzhZmwEA7sSkou+qYITEpho4MiIiKktMuomIiIhKiU8VG/z6ziuoZm8BAHiY+AT9VgfjyoMkA0dGRERlhUk3ERERUSmq5mCB7WNboq7SBgCQkJaJAd8F48TdOANHRkREZYFJNxEREVEpc7aWY+voFvD3tAcApGWqMXztGfx5JRJqjYjgkHjsvPgQwSHxUHNtbyIioyIzdABERERElYHC3AQbRvpjwpbz+OtGDDLVGozbfB425iZIepKlLadUyBHUwweBvkoDRktERCWFLd1EREREZURuIsXqwU3wRuOqAAAR0Em4ASAqSYWxm85j39VIA0RIREQljUk3ERERURmSSSWY/3p9WJpK8z2e27l81q7r7GpORGQEmHQTERERlbGz4Y+Rlqku8LgIIDJJhdOhCWUXFBERlQom3URERERlLCZFVaLliIio/GLSTURERFTGnK3lJVqOiIjKLybdRERERGXM38seSoUcQiFlbOQy+HvZl1lMRERUOph0ExEREZUxqURAUA8fACgw8U5WZePXcw/KLigiIioVTLqJiIiIDCDQV4lVgxvDVaHbhdzKTKb9+ePfLmPvFS4dRkRUkcmKLkJEREREpSHQV4nOPq44HZqAmBQVnK3laOZphy/23sDa42HQiMCkrRdgYSpF+9rOhg6XiIheAJNuIiIiIgOSSgS09HbQ2Te9mw9SVdn45dwDZKlFvLPpHDb8rznHeBMRVUDsXk5ERERUzkgkAua9Xh9dfF0BAKosDUauO4OrD5MMHBkRERUXk24iIiKickgmlWDpAD+0reUEAEjJyMbQH0/jbkyKgSMjIqLiYNJNREREVE6ZyaRYPbgxmnrYAQAS0jIx+PvTuJ+QbuDIiIhIX0y6iYiIiMoxC1MZfhzRDPWq2AAAopJVGPzDKcQkqwwcGRER6YNJNxEREVE5ZyM3wYb/+cPbyRIAEB6fjiE/nEZieqaBIyMioqIw6SYiIiKqAByszLBpVHO42ZoDAG5Fp2DY2jNIzcg2cGRERFQYJt1EREREFYRSYY7No5rD0coMAHDpfiLeXn8Wqiy1gSMjIqKCMOkmIiIiqkA8HS2xaZQ/FOYmAIDge/GYsOU8stQaA0dGRET5YdJNREREVMHUcbXBuhHNYGEqBQD8dSMG7/9yCRqNaODIiIjoeUy6iYiIiCqgRtXs8P2wpjCV5fw5t/PiI0zfeRWiyMSbiKg8YdJNREREVEG94u2IlYMaQyoRAACbT0Xgy323DBwVERE9i0k3ERERUQXW2ccFi99sCCEn78bqf0LwzZG7hg2KiIi0mHQTERERVXC9/Nwwp5evdvurfbewMTjMcAEREZGWzNABEBEREdHLG9zCAymqbHy57yYAYPrOa7CSy9CzoRtOhyYgJkUFZ2s5/L3std3RiYio9DHpJiIiIjISY9t7I0WVhW+OhAAApv58CbN3Xcfj9CxtGaVCjqAePgj0VRoqTCKiSoXdy4mIiIiMyAcBtTGkhQcAQCNCJ+EGgKgkFcZuOo99VyMNER4RUaXDpJuIiIjIiAiCgBndfSA3yf/PvNwFxWbtug411/UmIip1TLqJiIiIjMzZ8MdQZWkKPC4CiExS4XRoQtkFRURUSTHpJiIiIjIyMSmqEi1HREQvjkk3ERERkZFxtpaXaDkiInpxBk+6V65cCU9PT8jlcjRv3hynT58utHxiYiLGjx8PpVIJMzMz1KpVC3v37i2jaImIiIjKP38veygVchS2MJhSkbN8GBERla4XTrrv3r2L/fv348mTJwAAUSz+RBzbtm3DlClTEBQUhPPnz6Nhw4YICAhATExMvuUzMzPRuXNnhIWFYfv27bh16xbWrFkDNze3F70NIiIiIqMjlQgI6uEDAAUm3u1qOXG9biKiMlDspDs+Ph6dOnVCrVq10LVrV0RG5iw3MXLkSEydOrVY11q8eDHefvttjBgxAj4+Pli9ejUsLCzw448/5lv+xx9/REJCAnbs2IFWrVrB09MT7dq1Q8OGDYt7G0RERERGLdBXiVWDG8NVkX8X8l/OPcDR27FlHBURUeVT7KT7vffeg0wmQ0REBCwsLLT7+/fvj3379ul9nczMTJw7dw6dOnX6LxiJBJ06dUJwcHC+5/zxxx9o2bIlxo8fDxcXF/j6+mLu3LlQq9XFvQ0iIiIioxfoq8S/H3XAT2+3wLIBfvjp7RYY3bY6AECtETF+83ncjUkxcJRERMZNVtwTDhw4gP3796Nq1ao6+2vWrInw8HC9rxMXFwe1Wg0XFxed/S4uLrh582a+59y7dw9///033nrrLezduxd3797FuHHjkJWVhaCgoHzPycjIQEZGhnY7OTkZAKDRaKDRFLyUhqFpNBqIoliuYyT9sT6NC+vTuLA+jQvrMy8BQHMvO+12Mw9bhMWl4cD1aKRkZON/687gt7GvwN7S1HBBFoD1aVxYn8aF9Qm9773YSXdaWppOC3euhIQEmJmZFfdyxaLRaODs7IzvvvsOUqkUTZo0wcOHD7FgwYICk+558+Zh1qxZefbHxsZCpSq/y2RoNBokJSVBFEVIJAaf745eEuvTuLA+jQvr07iwPvXzcXslwmKTcTv2CSISnuDtdaew/PWaMJGWr9eM9WlcWJ/GhfUJpKTo11Oo2El3mzZtsGHDBsyZMwcAIAgCNBoNvvrqK7z66qt6X8fR0RFSqRTR0dE6+6Ojo+Hq6prvOUqlEiYmJpBKpdp9devWRVRUFDIzM2Fqmvcb2mnTpmHKlCna7eTkZLi7u8PJyQk2NjZ6x1vWNBoNBEGAk5NTpX0TGxPWp3FhfRoX1qdxYX3q78cRtuizKhixKRm48DAVy0/EYv7rvhCE8jO5GuvTuLA+jQvrE5DL9Vt2sdhJ91dffYWOHTvi7NmzyMzMxIcffohr164hISEBx48f1/s6pqamaNKkCQ4dOoTevXsDyKm4Q4cOYcKECfme06pVK2zZsgUajUZbsbdv34ZSqcw34QYAMzOzfFvgJRJJuX9zCIJQIeIk/bA+jQvr07iwPo0L61M/Ve0tsWZoU/T/NhgZ2Rr8cu4BarpYYXRbb0OHpoP1aVxYn8alstenvvdd7FfH19cXt2/fRuvWrdGrVy+kpaXh9ddfx4ULF+DtXbxf0lOmTMGaNWuwfv163LhxA2PHjkVaWhpGjBgBABg6dCimTZumLT927FgkJCRg0qRJuH37Nvbs2YO5c+di/Pjxxb0NIiIiokrPz90WC/v9twrMvD9v4uD16ELOICKi4ip2SzcAKBQKfPrppy/95P3790dsbCxmzJiBqKgo+Pn5Yd++fdrJ1SIiInS+PXB3d8f+/fvx3nvvoUGDBnBzc8OkSZPw0UcfvXQsRERERJVRj4ZVEBKbiqV/3YEoApO2XsD2d16BT5XyOwyPiKgiKXbSffTo0UKPt23btljXmzBhQoHdyY8cOZJnX8uWLXHy5MliPQcRERERFWxSx5oIiU3DrkuPkJ6pxqj1Z7BjQis4W+s3XpGIiApW7KS7ffv2efY9O+EG18wmIiIiqlgEQcCCvg0QkZCOS/cT8ShJhTEbz+Gnt1tAbiIt+gJERFSgYo/pfvz4sc4jJiYG+/btQ7NmzXDgwIHSiJGIiIiISpncRIo1Q5pAqchp3b4QkYgPt1+GKIoGjoyIqGIrdku3QqHIs69z584wNTXFlClTcO7cuRIJjIiIiIjKlrONHN8Pa4p+q4ORnqnGH5ceoYazFd7tWNPQoRERVVglNre7i4sLbt26VVKXIyIiIiIDqFdFgaX9/ZA7enDxwdvYffmRYYMiIqrAit3SffnyZZ1tURQRGRmJ+fPnw8/Pr6TiIiIiIiIDea2eKz4KrIP5f94EAEz9+RLc7SzQ0N3WsIEREVVAxU66/fz8IAhCnvE9LVq0wI8//lhigRERERGR4YxpWx13olPx6/kHyMjW4O0NZ7FzQisoFeaGDo2IqEIpdtIdGhqqsy2RSODk5AS5nEtKEBERERkLQRAw93Vf3E9Ix+mwBMSkZGDU+rP45Z2WsDAt9p+QRESVVrHHdHt4eOg83N3dmXATERERGSEzmRSrhzSBu31O6/a1R8mYsu0SNBrOaE5EpC+9vqZcvny53hd89913XzgYIiIiIipf7C1N8eOwZnj9mxNIycjGvmtRWHjgFj4MrGPo0IiIKgS9ku4lS5bodTFBEJh0ExERERmZmi7WWDGoEf637gw0IvDNkRB4O1nhjSZVDR0aEVG5p1fS/fw4biIiIiKqXNrXdsb07j6Ytes6AGDab1dQ1c4cGhGISVHB2VoOfy97SCWCgSMlIipfOAsGEREREell+CueuBuTis2nIpCp1mDgmpN4dni3UiFHUA8fBPoqDRckEVE580JJ94MHD/DHH38gIiICmZmZOscWL15cIoERERERUfkiCAJm9qyHc+EJuBmViufnU4tKUmHspvNYNbgxE28ioqeKnXQfOnQIPXv2RPXq1XHz5k34+voiLCwMoiiicePGpREjEREREZUTEkHA4/SsfI+JAAQAs3ZdR2cfV3Y1JyLCCywZNm3aNLz//vu4cuUK5HI5fv31V9y/fx/t2rVDv379SiNGIiIiIionTocmIDo5o8DjIoDIJBVOhyaUXVBEROVYsZPuGzduYOjQoQAAmUyGJ0+ewMrKCrNnz8aXX35Z4gESERERUfkRk6Iq0XJERMau2Em3paWldhy3UqlESEiI9lhcXFzJRUZERERE5Y6ztbxEyxERGbtij+lu0aIF/v33X9StWxddu3bF1KlTceXKFfz2229o0aJFacRIREREROWEv5c9lAo5opJUEAso42qTs3wYERG9QEv34sWL0bx5cwDArFmz0LFjR2zbtg2enp744YcfSjxAIiIiIio/pBIBQT18AORMmpafag4W4BxqREQ5it3SXb16de3PlpaWWL16dYkGRERERETlW6CvEqsGN8asXdcRmfTf2G0BOROpnQ5NwMaT4Rja0tNQIRIRlRvFTrpHjRqFwYMHo3379qUQDhERERFVBIG+SnT2ccXp0ATEpKjgbC1HfGoGJvx0AQAwe9d11KtigyYe7GZORJVbsbuXx8bGIjAwEO7u7vjggw9w6dKl0oiLiIiIiMo5qURAS28H9PJzQ0tvB3RvWAWj2+b0iszWiBi3+TxiUwpeXoyIqDIodtK9c+dOREZGYvr06Thz5gwaN26MevXqYe7cuQgLCyuFEImIiIioovgwoDZaVM9p3Y5OzsCELeeRrdYYOCoiIsMpdtINAHZ2dhg9ejSOHDmC8PBwDB8+HBs3bkSNGjVKOj4iIiIiqkBkUglWDGwMFxszAMCp0AR8tf+WgaMiIjKcF0q6c2VlZeHs2bM4deoUwsLC4OLiUlJxEREREVEF5WRthm/eagITac4U5t8dvYc9lyMNHBURkWG8UNJ9+PBhvP3223BxccHw4cNhY2OD3bt348GDByUdHxERERFVQE087DC9u492+4Ptl3A3JsWAERERGUaxk243Nzd07doVcXFx+O677xAdHY0ff/wRHTt2hCBwQUYiIiIiyjGkhQf6NHIDAKRnqjF64zmkqLIMHBURUdkqdtI9c+ZMREZG4vfff0ffvn1hZmZWGnERERERUQUnCALm9qmPOq7WAIB7sWn4cPtliKJo4MiIiMpOsZPut99+G7a2tqUQChEREREZG3NTKb4d0gTWchkA4M+rUVhz7J6BoyIiKjsvNZEaEREREVFRPBwssbS/n3Z7/p83cSIkznABERGVISbdRERERFTqOtZ1wbsdcpaX1YjAxC0XEJn0xMBRERGVPibdRERERFQmJnWqhba1nAAA8WmZGLf5PDKzNQaOioiodDHpJiIiIqIyIZUIWNbfD2625gCACxGJ+HzPdQNHRURUumQveuL169cRERGBzMxMnf09e/Z86aCIiIiIyDjZWZpi9eAmeGP1CWRma7AhOBx+7rZ4vXFVQ4dGRFQqip1037t3D3369MGVK1cgCIJ2yYfcNbrVanXJRkhERERERqV+VQU+7+WLD3+9DAD45PcrqONqA58qNgaOjIio5BW7e/mkSZPg5eWFmJgYWFhY4Nq1azh69CiaNm2KI0eOlEKIRERERGRs3mzmjoH+7gAAVZYG72w6h6T0LANHRURU8oqddAcHB2P27NlwdHSERCKBRCJB69atMW/ePLz77rulESMRERERGaGgHvXQoKoCABCRkI4pP1+ERiMaOCoiopJV7KRbrVbD2toaAODo6IhHjx4BADw8PHDr1q2SjY6IiIiIjJbcRIpv3moMOwsTAMChmzFYefiugaMiIipZxU66fX19cenSJQBA8+bN8dVXX+H48eOYPXs2qlevXuIBEhEREZHxqmpngeUDG+Hp9EBY/Ndt/HM71rBBERGVoGIn3Z999hk0mpz1FGfPno3Q0FC0adMGe/fuxfLly0s8QCIiIiIybm1qOuH912oDAEQRmLT1AsLi0nDyXjwO3EzAyXvxULPbORFVUMWevTwgIED7c40aNXDz5k0kJCTAzs5OO4M5EREREVFxjG3njQsRifjrRjQS07PQafE/yNYm2qFQKuQI6uGDQF+lQeMkIiquYrd058fe3p4JNxERERG9MIlEwOL+DeFkZQoAzyTcOaKSVBi76Tz2XY00RHhERC+s2C3dKpUKK1aswOHDhxETE6Ptap7r/PnzJRYcEREREVUelqYyFNSJXAQgAJi16zo6+7hCKmGDDxFVDMVOukeOHIkDBw6gb9++8Pf3Zws3EREREZWI06EJiEvNLPC4CCAySYXToQlo6e1QdoEREb2EYifdu3fvxt69e9GqVavSiIeIiIiIKqmYFFWJliMiKg+KPabbzc1Nu043EREREVFJcbaWl2g5IqLyoNhJ96JFi/DRRx8hPDy8NOIhIiIiokrK38seSoUchQ1eVCrk8PeyL7OYiIheVrGT7qZNm0KlUqF69eqwtraGvb29zoOIiIiI6EVIJQKCevgAQIGJ94Bm1TiJGhFVKMUe0z1w4EA8fPgQc+fOhYuLCydSIyIiIqISE+irxKrBjTFr13VEJuUdu73xZDgG+rvD2YZdzImoYih20n3ixAkEBwejYcOGpREPEREREVVygb5KdPZxxal7cbj7IBbVqzhi9dF7+PduPOJSMzDxpwvYPKo5ZNJid9okIipzxf5NVadOHTx58qQ0YiEiIiIiApDT1bxFdQe8Vscer9RwxLIBjeD6tHX7VGgCFh+8beAIiYj0U+yke/78+Zg6dSqOHDmC+Ph4JCcn6zyIiIiIiEqag5UZVr7VCLKn47m/ORKCQzeiDRwVEVHRip10BwYGIjg4GB07doSzszPs7OxgZ2cHW1tb2NnZlUaMRERERERo4mGPj7vU0W6/t+0i7iekGzAiIqKiFXtM9+HDh0sjDiIiIiKiIo1s7YWzYY+x71oUklXZGL/lPH55pyXMZFJDh0ZElK9iJ93t2rUrjTiIiIiIiIokCAK+6tcAN6OSERafjssPkvD57huY09vX0KEREeWr2En35cuX890vCALkcjmqVasGMzOzlw6MiIiIiCg/NnITrHyrMV7/5gQysjXYeDIcTT3t0MvPzdChERHlUeyk28/Pr9C1uU1MTNC/f398++23kMu5fiIRERERlbx6VRSY08sXH/6a0yA07bcrqFfFBjWcrQ0cGRGRrmJPpPb777+jZs2a+O6773Dx4kVcvHgR3333HWrXro0tW7bghx9+wN9//43PPvusNOIlIiIiIgIAvNnMHX2bVAUApGeq8c6m80jLyDZwVEREuord0v3FF19g2bJlCAgI0O6rX78+qlatiunTp+P06dOwtLTE1KlTsXDhwhINloiIiIjoWXN6+eLqwyTcjErB3ZhUfPr7FSzpX3jPTCKislTslu4rV67Aw8Mjz34PDw9cuXIFQE4X9MjIyJePjoiIiIioEOamUnzzVmNYmeW0Je24+AibT0UYOCoiov8UO+muU6cO5s+fj8zMTO2+rKwszJ8/H3Xq5Kyb+PDhQ7i4uJRclEREREREBajuZIWv+jbQbs/edR1XHiQZMCIiov8Uu3v5ypUr0bNnT1StWhUNGuT8crty5QrUajV2794NALh37x7GjRtXspESERERERWga30lRrTyxNrjYchUazB28znsmdgGCgsTQ4dGRJVcsZPuV155BaGhodi8eTNu374NAOjXrx8GDRoEa+uc2SKHDBlSslESERERERVhWpe6uHg/ERciEvHg8RNM+fki1gxtComE47uJyHCK3b0cAKytrfHOO+9g8eLFWLx4McaMGaNNuF/EypUr4enpCblcjubNm+P06dN6nbd161YIgoDevXu/8HMTERERkXEwlUmwclBj2D1t3T50MwbfHr1n4KiIqLLTq6X7jz/+QJcuXWBiYoI//vij0LI9e/YsVgDbtm3DlClTsHr1ajRv3hxLly5FQEAAbt26BWdn5wLPCwsLw/vvv482bdoU6/mIiIiIyHhVsTXHkv5+GLHuDEQRWHjgFhpVs0WL6g6GDo2IKim9ku7evXsjKioKzs7OhbYqC4IAtVpdrAAWL16Mt99+GyNGjAAArF69Gnv27MGPP/6Ijz/+ON9z1Go13nrrLcyaNQvHjh1DYmJisZ6TiIiIiIxX+9rOmNihJpYfugO1RsTEny5gz7ut4WwtN3RoRFQJ6dW9XKPRaFudNRpNgY/iJtyZmZk4d+4cOnXq9F9AEgk6deqE4ODgAs+bPXs2nJ2dMXLkyGI9HxERERFVDpM61kTrGo4AgNiUDLz70wVkqzUGjoqIKqNiT6RWkuLi4qBWq/MsL+bi4oKbN2/me86///6LH374ARcvXtTrOTIyMpCRkaHdTk5OBvDflwfllUajgSiK5TpG0h/r07iwPo0L69O4sD6Ny8vUpwBg8ZsN0OPr44hOzsDJewlYfPA23n+tVskHSnrh59O4sD6h973rnXQHBwcjPj4e3bt31+7bsGEDgoKCkJaWht69e2PFihUwMzMrfrR6SklJwZAhQ7BmzRo4Ojrqdc68efMwa9asPPtjY2OhUqlKOsQSo9FokJSUBFEUIZG80Hx3VI6wPo0L69O4sD6NC+vTuJREfc4O9MS4X25BLQLfHAmBt0JAKy9FCUdK+uDn07iwPnPyU33onXTPnj0b7du31ybdV65cwciRIzF8+HDUrVsXCxYsQJUqVTBz5ky9g3R0dIRUKkV0dLTO/ujoaLi6uuYpHxISgrCwMPTo0UO7L/fbBZlMhlu3bsHb21vnnGnTpmHKlCna7eTkZLi7u8PJyQk2NjZ6x1rWNBoNBEGAk5NTpX0TGxPWp3FhfRoX1qdxYX0al5Koz87OzvgoRcDcP3N6Uc4+EI5dE15BVTuLkgyV9MDPp3FhfQJyuX7zROiddF+8eBFz5szRbm/duhXNmzfHmjVrAADu7u4ICgoqVtJtamqKJk2a4NChQ9oJ2jQaDQ4dOoQJEybkKV+nTh1cuXJFZ99nn32GlJQULFu2DO7u7nnOMTMzy7f1XSKRlPs3hyAIFSJO0g/r07iwPo0L69O4sD6NS0nU59ttq+Ns+GMcuB6NpCdZmLDlAt4PqI3H6VlwtpbD38seUq7lXSb4+TQulb0+9b1vvZPux48f64y9/ueff9ClSxftdrNmzXD//v1ihJhjypQpGDZsGJo2bQp/f38sXboUaWlp2tnMhw4dCjc3N8ybNw9yuRy+vr4659va2gJAnv1EREREREBOYrCgX0Pc+vpfhMen4/LDZAz98Yz2uFIhR1APHwT6Kg0YJREZK72/knBxcUFoaCiAnFnHz58/jxYtWmiPp6SkwMTEpNgB9O/fHwsXLsSMGTPg5+eHixcvYt++fdoEPyIiApGRkcW+LhERERFRLoW5Cd5qXi3fY1FJKozddB77rvJvTiIqeXq3dHft2hUff/wxvvzyS+zYsQMWFhZo06aN9vjly5fzjKfW14QJE/LtTg4AR44cKfTcdevWvdBzEhEREVHlodaIWHs8LN9jInJmO5+16zo6+7iyqzkRlSi9W7rnzJkDmUyGdu3aYc2aNVizZg1MTU21x3/88Ue89tprpRIkEREREdHLOB2agMikgleuEQFEJqlwOjSh7IIiokpB75ZuR0dHHD16FElJSbCysoJUKtU5/ssvv8DKyqrEAyQiIiIielkxKfotFatvOSIifemddOdSKPJf19De3v6lgyEiIiIiKg3O1vot7aNvOSIifVXOud2JiIiIqFLx97KHUiFHYaO17S1N4e/FhiQiKllMuomIiIjI6EklAoJ6+ABAgYm3KkuNR4lPyi4oIqoUmHQTERERUaUQ6KvEqsGN4arQ7UJuJsv5kzg9U40xG8/hSabaEOERkZEq9phuIiIiIqKKKtBXic4+rjgdmoCYFBWcreWoo7TG69+cQGhcGq5HJuPT369g0ZsNIQhcOoyIXh5buomIiIioUpFKBLT0dkAvPze09HaAnYUpvh3SBBamOavz/HbhITYEhxs4SiIyFky6iYiIiKjSq+VijQV9G2q35+y+jjNhXLObiF4ek24iIiIiIgDdGigxpm11AEC2RsS4zecRncx1u4no5TDpJiIiIiJ66oOA2mhVwwEAEJuSgXGbzyMzW2PgqIioImPSTURERET0lEwqwfIBjeBmaw4AOBf+GHN2XzdwVERUkTHpJiIiIiJ6hoOVGVYNbgzTp0uJbTwZjl/O3jdwVERUUTHpJiIiIiJ6ToOqtvi8t692+9MdV3H1YZIBIyKiiopJNxERERFRPt5s6o7BLaoBADKzNRiz8RwS0jINHBURVTRMuomIiIiICjCjez00qmYLAHiY+AQTfzqPbDUnViMi/THpJiIiIiIqgKlMgtWDm8DRygwAcPxuPBYeuG3gqIioImHSTURERERUCBcbOb55qzFkEgEAsPqfEPx5JdLAURFRRcGkm4iIiIioCP5e9visW13t9vu/XMKd6BQDRkREFQWTbiIiIiIiPQx7xRN9GrkBANIy1Riz8RySVVkGjoqIyjsm3UREREREehAEAXP71IeP0gYAcC8uDVN/vgSNRjRwZERUnjHpJiIiIiLSk7mpFN8OaQKFuQkA4OD1aHxz5K6BoyKi8oxJNxERERFRMbjbW2D5wEYQcuZVw6KDt3H4VoxhgyKicotJNxERERFRMbWr5YT3X6sNABBFYNJPFxAen2bgqIioPGLSTURERET0Asa190ZAPRcAQLIqG2M2nsOTTLWBoyKi8oZJNxERERHRCxAEAQv7NUR1J0sAwM2oFHz06yUEh8Rh58WHCA6Jh5qTrBFVejJDB0BEREREVFFZy03w3ZAm6PX1caRlqvHHpUj8cSlSe1ypkCOohw8CfZUGjJKIDIkt3UREREREL6GGszUGt/DI91hUkgpjN53HvquR+R4nIuPHpJuIiIiI6CWoNSL+uPQo32O5nctn7brOruZElRSTbiIiIiKil3A6NAGRSaoCj4sAIpNUOB2aUHZBEVG5waSbiIiIiOglxKQUnHC/SDkiMi5MuomIiIiIXoKztbxEyxGRcWHSTURERET0Evy97KFUyCEUUsZGLoO/l32ZxURE5QeTbiIiIiKilyCVCAjq4QMABSbeyapsHLoRXXZBEVG5waSbiIiIiOglBfoqsWpwY7gqdLuQW5nJtD+/u/UCLt1PLOPIiMjQZEUXISIiIiKiogT6KtHZxxWnQxMQk6KCs7UczTzt8MH2y/j9wkOosjQYuf4Mfh/XCu72FoYOl4jKCFu6iYiIiIhKiFQioKW3A3r5uaGltwNkUgnmv1EfzZ+O545LzcSIdWeQlJ5l4EiJqKww6SYiIiIiKkVmMim+G9IU3k6WAIC7MakYs+ksMrM1Bo6MiMoCk24iIiIiolKmsDDBuhH+cLQyBQCcvJeAj3+9DFEUDRwZEZU2Jt1ERERERGXA3d4Ca4Y2hZks50/w3y48xNK/7hg4KiIqbUy6iYiIiIjKSKNqdlg2wA/C07XFlh26g+3nHhg2KCIqVUy6iYiIiIjKUKCvEp92ravd/vjXyzhxN86AERFRaWLSTURERERUxka29sKwlh4AgGyNiDGbzuFOdIqBoyKi0sCkm4iIiIiojAmCgBk96qFTXWcAQIoqG8PXnkFMisrAkRFRSWPSTURERERkAFKJgOUDG6G+mwIA8DDxCUatP4v0zGwDR0ZEJYlJNxERERGRgViYyvDDsKZwszUHAFx+kIR3f7oItYZLiREZCybdREREREQG5Gwjx4/Dm8HaTAYA+OtGNObsvm7gqIiopDDpJiIiIiIysNqu1lg1uAlkkpy1xNadCMOP/4YaOCoiKglMuomIiIiIyoHWNR0x9/X62u05e65j/7UoA0ZERCWBSTcRERERUTnxZlN3vNuhBgBAFIFJWy/g4v1EwwZFRC+FSTcRERERUTnyXuda6O1XBQCgytJg1PozuJ+QbuCoiOhFMekmIiIiIipHBEHAl30bwN/LHgAQl5qJEevOICk9y8CREdGLYNJNRERERFTOmMmk+G5IE1R3sgQA3I1JxZhNZ/EkU43gkHjsvPgQwSHxXFqMqAKQGToAIiIiIiLKy9bCFOuG+6PPN8cRn5aJk/cS0HjOATzJ0mjLKBVyBPXwQaCv0oCRElFh2NJNRERERFROVXOwwPfDmmqXEns24QaAqCQVxm46j31XIw0RHhHpgUk3EREREVE51qCqLazk+XdQze1cPmvXdXY1JyqnmHQTEREREZVjp0MTkFjIJGoigMgkFU6HJpRdUESkNybdRERERETlWEyKqkTLEVHZYtJNRERERFSOOVvLS7QcEZUtJt1EREREROWYv5c9lAo5hELK2JqbaNf1JqLyhUk3EREREVE5JpUICOrhAwAFJt6JT7Lw6/kHZRcUEemNSTcRERERUTkX6KvEqsGN4arQ7UJuYSrV/vzRr5ex7UxEWYdGREXIf+0BIiIiIiIqVwJ9lejs44rToQmISVHB2VqOZp52+GLvDaw9HgZRBD769Qo0IjDQv5qhwyWip8pFS/fKlSvh6ekJuVyO5s2b4/Tp0wWWXbNmDdq0aQM7OzvY2dmhU6dOhZYnIiIiIjIWUomAlt4O6OXnhpbeDpBJJZjR3QcjW3tpy0z77Qo2nwo3YJRE9CyDJ93btm3DlClTEBQUhPPnz6Nhw4YICAhATExMvuWPHDmCgQMH4vDhwwgODoa7uztee+01PHz4sIwjJyIiIiIyPEEQ8Fm3uhjdtrp236e/X8XGk0y8icoDgyfdixcvxttvv40RI0bAx8cHq1evhoWFBX788cd8y2/evBnjxo2Dn58f6tSpg++//x4ajQaHDh0q48iJiIiIiMoHQRAwrUsdjGn3X+I9fcdVbAgOM1xQRATAwGO6MzMzce7cOUybNk27TyKRoFOnTggODtbrGunp6cjKyoK9ff5LJGRkZCAjI0O7nZycDADQaDTQaDQvEX3p0mg0EEWxXMdI+mN9GhfWp3FhfRoX1qdxYX0W34ev1YIAYPU/9wAAM3Zeg1qtwbBXPA0aF8D6NDasT+h97wZNuuPi4qBWq+Hi4qKz38XFBTdv3tTrGh999BGqVKmCTp065Xt83rx5mDVrVp79sbGxUKlUxQ+6jGg0GiQlJUEURUgkBu+QQC+J9WlcWJ/GhfVpXFifxoX1+WKG+dlC9cQV605HAQBm7b6BpOQUDGjsUsSZpYv1aVxYn0BKSope5Sr07OXz58/H1q1bceTIEcjl8nzLTJs2DVOmTNFuJycnw93dHU5OTrCxsSmrUItNo9FAEAQ4OTlV2jexMWF9GhfWp3FhfRoX1qdxYX2+uOm9nGFteQcrDocAAJYefQALKyuMembCtbLG+jQurE8UmIM+z6BJt6OjI6RSKaKjo3X2R0dHw9XVtdBzFy5ciPnz5+Ovv/5CgwYNCixnZmYGMzOzPPslEkm5f3MIglAh4iT9sD6NC+vTuLA+jQvr07iwPl/c1IA6kEgkWHboDgBg7t6cnqSj23obLCbWp3Gp7PWp730b9NUxNTVFkyZNdCZBy50UrWXLlgWe99VXX2HOnDnYt28fmjZtWhahEhERERFVOO91roX3OtXSbs/dexOr/wkxYERElY/Bu5dPmTIFw4YNQ9OmTeHv74+lS5ciLS0NI0aMAAAMHToUbm5umDdvHgDgyy+/xIwZM7BlyxZ4enoiKipnrIqVlRWsrKwMdh9EREREROXRpE41IRGARQdvAwDm/3kTao2I8a/WMHBkRJWDwZPu/v37IzY2FjNmzEBUVBT8/Pywb98+7eRqEREROs32q1atQmZmJvr27atznaCgIMycObMsQyciIiIiqhAmdqwJiUTAgv23AAAL9t+CKIqY0KGmgSMjMn4GT7oBYMKECZgwYUK+x44cOaKzHRYWVvoBEREREREZmfGv1oBEEPDlvpyx3QsP3IZGBN7tyMSbqDRVzhHvRERERESV0Nj23pjWpY52e/HB21j6120DRkRk/MpFSzcREREREZWNMe28IREEfLH3BgBg6V93clq8O9TAmbDHiElRwdlaDn8ve0glgoGjJar4mHQTEREREVUyb7etDkEAPt+Tk3gvP3QHP/57D6kZam0ZpUKOoB4+CPRVGipMIqPA7uVERERERJXQqDbVEdTDR7v9bMINAFFJKozddB77rkaWdWhERoVJNxERERFRJTW0pSds5Pl3fhWf/jtr13WoNWK+ZYioaEy6iYiIiIgqqdOhCUhWZRd4XAQQmaTC6dCEsguKyMgw6SYiIiIiqqRiUlQlWo6I8mLSTURERERUSTlby0u0HBHlxaSbiIiIiKiS8veyh1IhR1ELg50NT4Aoclw30Ytg0k1EREREVElJJYJ2BvPCEu9FB25j/JbzSMsoePw3EeWPSTcRERERUSUW6KvEqsGN4arQ7UKuVMjRs4ESwtNsfO+VKLz+zQmEx6cZIEqiiiv/9QGIiIiIiKjSCPRVorOPK06HJiAmRQVnazn8vewhlQjodSMak7deREpGNm5Fp6DHin+xYlBjtKvlZOiwiSoEtnQTERERERGkEgEtvR3Qy88NLb0dIJXkNHF3rOuCHRNawdvJEgCQrMrGiLWnsepICMd5E+mBSTcRERERERXK28kKO8a3Qqe6LgAAjQh8ue8mJvx0AemZHOdNVBgm3UREREREVCRruQm+G9IEkzvV1O7bczkSr39zAhHx6QaMjKh8Y9JNRERERER6kUgETO5UC2uGNoWVWc70UDejUtDj639x7E6sgaMjKp+YdBMRERERUbF09nHBjvGtUP3pOO+kJ1kY9uNpfHeU47yJnsekm4iIiIiIiq2Gc8447451nAHkjPOeu/cmJm29iCeZagNHR1R+MOkmIiIiIqIXYiM3wZqhTfFux//Gef9x6RFeX3UC9xM4zpsIYNJNREREREQvQSIRMKVzLXw7pAksTaUAgBuRyejx9b84fjcOAKDWiDh5Lx4Hbibg5L14qDXsgk6Vh8zQARARERERUcUXUM8VO8a3wuiN5xAal4bE9CwM+eEUXm/khn9D4hGVpHpaMhRKhRxBPXwQ6Ks0aMxEZYEt3UREREREVCJqulhjx/hW6PDMOO/t5x8+k3DniEpSYeym89h3NdIQYRKVKSbdRERERERUYhTmJvh+aFOMf9W7wDK5nctn7brOruZk9Jh0ExERERFRiZJIBLSu4VRoGRFAZJIKp0MTyiYoIgNh0k1ERERERCUuJkVVdKFilCOqqJh0ExERERFRiXO2lutVztbcpJQjITIsJt1ERERERFTi/L3soVTIIRRR7pPfr+DQjegyiYnIEJh0ExERERFRiZNKBAT18AGAQhPvh4kqjFx/FqM3nMXDxCdlExxRGWLSTUREREREpSLQV4lVgxvDVaHb1Tx3ne6W1R20+w5cj0bnxf/gu6MhyFJryjpUolIjM3QARERERERkvAJ9lejs44pT9+Jw90EsalR1QvPqjpBKBAx/xRM7Lz7C53uuIy41E+mZaszdexO/nnuIL/r4oqmnvaHDJ3ppbOkmIiIiIqJSJZUIaFHdAa/VsUeL6g6QSnI6nAuCgN6N3HBoansMaeEB4Wk/9FvRKei7Ohgfbr+EhLRMA0ZO9PKYdBMRERERkUEpzE0wp7cvdoxrBV83G+3+n88+QIdFR7DtTAQ0GtGAERK9OCbdRERERERULjR0t8XO8a0xs4cPrM1yRsImpmfho1+voN+3wbgZlWzgCImKj0k3ERERERGVG1KJgOGtvHBoajv0bFhFu/9c+GN0W/4vvthzHWkZ2QaMkKh4mHQTEREREVG542wjx/KBjbBpZHN4OVoCANQaEWuOhaLT4n+w72okRFGEWiMiOCQeOy8+RHBIPNTshk7lDGcvJyIiIiKicqt1TUf8OakNvv3nHlYeuYvMbA0ik1R4Z9N5+FaxQXRKBmJTMrTlc5cjC/RVGjBqov+wpZuIiIiIiMo1uYkUkzrVxIHJbdG2lpN2/9VHyToJNwBEJakwdtN57LsaWdZhEuWLSTcREREREVUIno6WWD+iGVYMaISnq47lkdu5fNau6+xqTuUCk24iIiIiIqowBEGAo7UZCsunRQCRSSqcDk0os7iICsKkm4iIiIiIKpSYFJVe5b45chdhcWmlHA1R4Zh0ExERERFRheJsLder3LE7cXh10RGM3nAWp0MTIIrsbk5lj7OXExERERFRheLvZQ+lQo6oJBWKSqNFEThwPRoHrkejQVUFRrb2Qtf6SphI2f5IZYPvNCIiIiIiqlCkEgFBPXwAAM/PpyY8fSzq1wAfBtaGi42Z9tjlB0mYtPUi2n51GN/+E4KkJ1llFjNVXky6iYiIiIiowgn0VWLV4MZwVeh2NXdVyLFqcGO80cQd49rXwLEPO2BJ/4aoV8VGWyYySYV5f95Ey3mHMPOPa4iITy/r8KkSYfdyIiIiIiKqkAJ9lejs44rToQmISVHB2VoOfy97SJ9ZT8xUJkGfRlXR288Np0IT8P2xUBy6GQ1RBNIz1Vh3IgwbgsPwmo8rRrXxQhMPOwhCzvlqjVjotYn0waSbiIiIiIgqLKlEQEtvhyLLCYKAFtUd0KK6A+7Fpv6/vXuPqrrK/z/+OtxBAUUuB1QQlRQGoRGVcPLyDQNsxoVj37LyuwZNnTUOThcb9UeTotMUZk2TNk61csYuE9U4pZWVaZZmpZaoeUMLktQEvAOigHI+vz+IU0cBj5fDgePzsRYLzuez9+e8j2+2a73Zn8/eWvJZiZYWHFDNWYsshrRyV5lW7ipTYvdOmnRjtNxM0l/eLVRpxY8rpYcH+ih3VJwy4sMd+ZHgYri9HAAAAMA1pWdIRz08Ol4b/l+qpqf3Uaj/j899f3XgpP7w6lZl52+1KbglqayiRlP+vUUrd5a2dshoxyi6AQAAAFyTOnfwUvb/9NanM2/Sk7cnKi48oMX2jSulz31nt+otbD8G+1B0AwAAALimeXm4aUz/bnr3nhv10C9jW2xrqGEhts+LjrZOcGj3eKYbAAAAANTw3HfIT241b8mklzZrRFyY0uLCNLxPqAJ9PR0cHdorim4AAAAA+EGov8/FG0mqPWfRu9tL9e72Unm4mTQoOkg3x4VpRGyYugf5OThKtCcU3QAAAADwg0HRQQoP9FFZRY2ae2rb19Ndnu4mVdackySdsxj6vPiYPi8+prnv7FZfs7/S4sJ0c5xZ8V0DrFuQnY8tya4NFN0AAAAA8AN3N5NyR8Vpyr+3yCTZFN6N5fDfxiZqRGyYviw5oQ8Ly7V6d7n2Hz9tbbenrEp7yqq08KMimQN8NCIuVDfHmXVDzyB5e7hLklbuLNXcd3azJdk1gKIbAAAAAH4iIz5cz/xf/wuKYvN5RXFKry5K6dVFD/0yVl+Xn9KHheVatbtcXx04ae1TVlmjf2/cr39v3K+O3h4adl2IQgO89cJnJRfMpDduSfbM//Wn8HYhFN0AAAAAcJ6M+HDdHGe26/Zvk8mkPmZ/9TH7K/t/equ8skZrCg9r9e4yfVZ8THXnLJKkU7Xn9O6O5vf4NtQwmz73nd26Oc7MreYugqIbAAAAAJrg7mZSSq8ul9wvLMBHdyVH6q7kSFXXntMnXx/R6sJyfbTnsE6ePtti38Ytyd4oOKgx/bvKw51dnts7im4AAAAAcJAO3h4a2S9cI/uF61y9RQvWfKOnPyq6aL8Zb2xX7tu7FBcRoH5dA5XQreErOrijXTPgLNLWdlB0AwAAAEAr8HB30+BewXYV3ZJ05my9Cr47oYLvTliP+Xm5Kz4iUP26Bapf14bv0V06yO0nBTWLtLUtFN0AAAAA0Ers2ZLM38dDQ3oHa8ehCh04fsbm3Om6en1RclxflBy3Huvo7aH4rg0z4hZD+uen+y64Jou0OQ9FNwAAAAC0Enu2JHv8fxOshfGJ6jrtPFSh7QcrtPP7hu/fn7QtxE/VntPGb49r47fH1ZzG98l9e9cVL9JWbzG06dtjKjp4XL1PuSu5ZzC3rreAohsAAAAAWpG9W5JJUucOXhoSE6IhMSHWY8dO1WrH9z8W4Tu+r7C5TkvKK2sVn7tS0cEd1a2zr7p19lP3INvvHb2bLxMvvHV9H7euXwRFNwAAAAC0skvZkux8XTp6a3ifUA3vE2o9dqSqVovXf6vnPvn2ov3PnLVod2mldpdWNnm+k5+nunf2+6Eo91X3oIafS46e1sMrdrO/+CWi6AYAAAAAJ7jcLcmaEuLfUIjbU3SH+nvreHWdzlmafqr85OmzOnm6YQbdHldzf3Gjvl6nNxfo3JEj8ggJkd+AJJnc3S/7em1Bmyi6Fy1apMcff1xlZWVKTEzU008/rUGDBjXbfunSpZo1a5ZKSkoUExOjxx57TLfccksrRgwAAAAAbcvFFmkzqeEW9k9n3iTDMFReVauDx0/rwIkzOnjitA6eOKMDxxu+l1acUTM1eZMa9xf/Yt/xy/5DQuWqVSp/NE/nysqsxzzMZoU9mKOAtLTLumZb4PSi+/XXX9e0adP07LPPKjk5WU899ZTS09O1d+9ehYaGXtD+888/15133qm8vDz96le/Un5+vkaPHq0tW7YoPj7eCZ8AAAAAAJzPnkXackfF/TATbVLXTr7q2slXyU1c62y9RWUVNTpw4rQOHj+jNYXl+mB3+UVjOFxl37Pl56tctUrf33ufZNhW+ufKyxuOL3iq3Rbebs4O4Mknn9TkyZM1YcIExcXF6dlnn5Wfn5/+9a9/Ndl+wYIFysjI0PTp0xUbG6uHH35Y/fv319///vdWjhwAAAAA2pbGRdrMgT42x82BPpf0zLWnu5u6B/lpcK9g3T6wu8b/ItqufqH+PhdvdB6jvl7lj+ZdUHA3nGw4Vv5onoz6+ku+dlvg1Jnuuro6FRQUKCcnx3rMzc1NI0aM0IYNG5rss2HDBk2bNs3mWHp6upYvX95k+9raWtXW1lpfV1Y2LBZgsVhksViu8BM4jsVikWEYbTpG2I98uhby6VrIp2shn66FfLoW8tl60uLClNo3VF+WHNfhqlqF+ntrYI+GRdou999/QFQnmQN8VF7Z8q3rA6I6XfJ7nP7yS5tbyi9gGDpXVqbqL7+UXwuPIbc2ez+nU4vuo0ePqr6+XmFhYTbHw8LCtGfPnib7lJWVNdm+rJkk5eXlae7cuRccP3LkiGpqLu/Wh9ZgsVhUUVEhwzDk5ub0GxJwhcinayGfroV8uhby6VrIp2shn62vZ0epZ0cPSfU6dvTIFV/v3qERylnR9EJthqR7hkRc1vvUFRfb1e54cbFO9ehxydd3lKqqKrvaOf2ZbkfLycmxmRmvrKxU9+7dFRISooCAACdG1jKLxSKTyaSQkBD+U3IB5NO1kE/XQj5dC/l0LeTTtZDP9m9saKgCAwL15xWFKqv8cQIzPNBHs34Zq4x482Vd93SvXqq2o11Qr17ya2LdL2fx8bHvVnqnFt3BwcFyd3dXebntA/nl5eUym5tOmNlsvqT23t7e8vb2vuC4m5tbmx/sJpOpXcQJ+5BP10I+XQv5dC3k07WQT9dCPtu/WxIilB4frk3fHlXRwSPq3S1EyT2Dr2ibsA4DB8rDbNa58vKmn+s2meQRFqYOAwfK1IZ+d+z9PXZqxF5eXkpKStKaNWusxywWi9asWaOUlJQm+6SkpNi0l6TVq1c32x4AAAAAcPW4u5l0Q88uSusbpBt6drmigluSTO7uCnvwh3W+TOdd64fXYQ/mtNv9up3+Z4Jp06bp+eef14svvqjCwkJNmTJF1dXVmjBhgiTpN7/5jc1Ca/fee69Wrlypv/71r9qzZ4/mzJmjzZs3a+rUqc76CAAAAACAKxCQlqauC56Sx3nrd3mEhalrO94uTGoDz3SPHTtWR44c0ezZs1VWVqbrr79eK1eutC6Wtn//fptp+8GDBys/P18PPfSQHnzwQcXExGj58uXs0Q0AAAAA7VhAWpr8U1N1enOBzh05Io+QEPkNSGq3M9yNnF50S9LUqVObnaleu3btBcduu+023XbbbQ6OCgAAAADQmkzu7uqQ3Ha2BbsanH57OQAAAAAAroqiGwAAAAAAB6HoBgAAAADAQSi6AQAAAABwEIpuAAAAAAAchKIbAAAAAAAHoegGAAAAAMBBKLoBAAAAAHAQim4AAAAAAByEohsAAAAAAAeh6AYAAAAAwEE8nB1AazMMQ5JUWVnp5EhaZrFYVFVVJR8fH7m58beR9o58uhby6VrIp2shn66FfLoW8ulayOePNWVjjdmca67orqqqkiR1797dyZEAAAAAANq7qqoqBQYGNnveZFysLHcxFotFhw4dkr+/v0wmk7PDaVZlZaW6d++uAwcOKCAgwNnh4AqRT9dCPl0L+XQt5NO1kE/XQj5dC/lsmOGuqqpSREREi7P919xMt5ubm7p16+bsMOwWEBBwzf4SuyLy6VrIp2shn66FfLoW8ulayKdrudbz2dIMd6Nr8+Z7AAAAAABaAUU3AAAAAAAOQtHdRnl7eys3N1fe3t7ODgVXAfl0LeTTtZBP10I+XQv5dC3k07WQT/tdcwupAQAAAADQWpjpBgAAAADAQSi6AQAAAABwEIpuAAAAAAAchKK7jVq0aJF69OghHx8fJScn64svvnB2SLgMc+bMkclksvnq27evs8OCnT755BONGjVKERERMplMWr58uc15wzA0e/ZshYeHy9fXVyNGjNA333zjnGBxURfL5/jx4y8YrxkZGc4JFi3Ky8vTwIED5e/vr9DQUI0ePVp79+61aVNTU6Ps7Gx16dJFHTt21K233qry8nInRYyW2JPP4cOHXzA+f/e73zkpYrTkmWeeUUJCgnXv5pSUFL3//vvW84zN9uVi+WRs2oeiuw16/fXXNW3aNOXm5mrLli1KTExUenq6Dh8+7OzQcBl+9rOfqbS01Pr16aefOjsk2Km6ulqJiYlatGhRk+fnz5+vhQsX6tlnn9WmTZvUoUMHpaenq6amppUjhT0ulk9JysjIsBmvr776aitGCHutW7dO2dnZ2rhxo1avXq2zZ88qLS1N1dXV1jb333+/3nnnHS1dulTr1q3ToUOHNGbMGCdGjebYk09Jmjx5ss34nD9/vpMiRku6deumefPmqaCgQJs3b9ZNN92kzMxM7dq1SxJjs725WD4lxqZdDLQ5gwYNMrKzs62v6+vrjYiICCMvL8+JUeFy5ObmGomJic4OA1eBJGPZsmXW1xaLxTCbzcbjjz9uPXby5EnD29vbePXVV50QIS7F+fk0DMPIysoyMjMznRIPrszhw4cNSca6desMw2gYi56ensbSpUutbQoLCw1JxoYNG5wVJux0fj4NwzCGDRtm3Hvvvc4LClekc+fOxuLFixmbLqIxn4bB2LQXM91tTF1dnQoKCjRixAjrMTc3N40YMUIbNmxwYmS4XN98840iIiLUs2dPjRs3Tvv373d2SLgK9u3bp7KyMpuxGhgYqOTkZMZqO7Z27VqFhoaqT58+mjJlio4dO+bskGCHiooKSVJQUJAkqaCgQGfPnrUZn3379lVkZCTjsx04P5+NXnnlFQUHBys+Pl45OTk6ffq0M8LDJaivr9drr72m6upqpaSkMDbbufPz2YixeXEezg4Ato4ePar6+nqFhYXZHA8LC9OePXucFBUuV3Jysl544QX16dNHpaWlmjt3roYMGaKdO3fK39/f2eHhCpSVlUlSk2O18Rzal4yMDI0ZM0bR0dEqLi7Wgw8+qJEjR2rDhg1yd3d3dnhohsVi0X333adf/OIXio+Pl9QwPr28vNSpUyebtozPtq+pfErSXXfdpaioKEVERGj79u2aOXOm9u7dqzfffNOJ0aI5O3bsUEpKimpqatSxY0ctW7ZMcXFx2rZtG2OzHWounxJj014U3YADjRw50vpzQkKCkpOTFRUVpf/85z+aOHGiEyMDcL477rjD+nO/fv2UkJCgXr16ae3atUpNTXViZGhJdna2du7cyXoZLqK5fP72t7+1/tyvXz+Fh4crNTVVxcXF6tWrV2uHiYvo06ePtm3bpoqKCv33v/9VVlaW1q1b5+ywcJmay2dcXBxj007cXt7GBAcHy93d/YJVHMvLy2U2m50UFa6WTp066brrrlNRUZGzQ8EVahyPjFXX1bNnTwUHBzNe27CpU6dqxYoV+vjjj9WtWzfrcbPZrLq6Op08edKmPeOzbWsun01JTk6WJMZnG+Xl5aXevXsrKSlJeXl5SkxM1IIFCxib7VRz+WwKY7NpFN1tjJeXl5KSkrRmzRrrMYvFojVr1tg8O4H26dSpUyouLlZ4eLizQ8EVio6OltlsthmrlZWV2rRpE2PVRRw8eFDHjh1jvLZBhmFo6tSpWrZsmT766CNFR0fbnE9KSpKnp6fN+Ny7d6/279/P+GyDLpbPpmzbtk2SGJ/thMViUW1tLWPTRTTmsymMzaZxe3kbNG3aNGVlZWnAgAEaNGiQnnrqKVVXV2vChAnODg2X6I9//KNGjRqlqKgoHTp0SLm5uXJ3d9edd97p7NBgh1OnTtn8pXbfvn3atm2bgoKCFBkZqfvuu09/+ctfFBMTo+joaM2aNUsREREaPXq084JGs1rKZ1BQkObOnatbb71VZrNZxcXFmjFjhnr37q309HQnRo2mZGdnKz8/X2+99Zb8/f2tz4IGBgbK19dXgYGBmjhxoqZNm6agoCAFBAToD3/4g1JSUnTDDTc4OXqc72L5LC4uVn5+vm655RZ16dJF27dv1/3336+hQ4cqISHBydHjfDk5ORo5cqQiIyNVVVWl/Px8rV27Vh988AFjsx1qKZ+MzUvg7OXT0bSnn37aiIyMNLy8vIxBgwYZGzdudHZIuAxjx441wsPDDS8vL6Nr167G2LFjjaKiImeHBTt9/PHHhqQLvrKysgzDaNg2bNasWUZYWJjh7e1tpKamGnv37nVu0GhWS/k8ffq0kZaWZoSEhBienp5GVFSUMXnyZKOsrMzZYaMJTeVRkrFkyRJrmzNnzhi///3vjc6dOxt+fn7Gr3/9a6O0tNR5QaNZF8vn/v37jaFDhxpBQUGGt7e30bt3b2P69OlGRUWFcwNHk+6++24jKirK8PLyMkJCQozU1FRj1apV1vOMzfalpXwyNu1nMgzDaM0iHwAAAACAawXPdAMAAAAA4CAU3QAAAAAAOAhFNwAAAAAADkLRDQAAAACAg1B0AwAAAADgIBTdAAAAAAA4CEU3AAAAAAAOQtENAAAAAICDUHQDANBGlJWV6eabb1aHDh3UqVMnZ4cDAACuAopuAAAcYPz48Ro9evQl9fnb3/6m0tJSbdu2TV9//bVjAmujSkpKZDKZtG3bNmeHAgDAVeXh7AAAAECD4uJiJSUlKSYm5rKvUVdXJy8vr6sYFQAAuBLMdAMA0AqGDx+ue+65RzNmzFBQUJDMZrPmzJljPd+jRw+98cYbeumll2QymTR+/HhJ0smTJzVp0iSFhIQoICBAN910k7766itrvzlz5uj666/X4sWLFR0dLR8fn0vq9/LLL6tHjx4KDAzUHXfcoaqqKmsbi8Wi+fPnq3fv3vL29lZkZKQeeeQR6/kDBw7o9ttvV6dOnRQUFKTMzEyVlJQ0+29w4sQJjRs3TiEhIfL19VVMTIyWLFkiSYqOjpYk/fznP5fJZNLw4cOt/RYvXqzY2Fj5+Piob9+++sc//mE91zhD/tprr2nw4MHy8fFRfHy81q1bZ39yAABwIIpuAABayYsvvqgOHTpo06ZNmj9/vv785z9r9erVkqQvv/xSGRkZuv3221VaWqoFCxZIkm677TYdPnxY77//vgoKCtS/f3+lpqbq+PHj1usWFRXpjTfe0Jtvvmm9PduefsXFxVq+fLlWrFihFStWaN26dZo3b571fE5OjubNm6dZs2Zp9+7dys/PV1hYmCTp7NmzSk9Pl7+/v9avX6/PPvtMHTt2VEZGhurq6pr8/I3Xef/991VYWKhnnnlGwcHBkqQvvvhCkvThhx+qtLRUb775piTplVde0ezZs/XII4+osLBQjz76qGbNmqUXX3zR5trTp0/XAw88oK1btyolJUWjRo3SsWPHLjtXAABcNQYAALjqsrKyjMzMTOvrYcOGGTfeeKNNm4EDBxozZ860vs7MzDSysrKsr9evX28EBAQYNTU1Nv169eplPPfcc4ZhGEZubq7h6elpHD58+JL7+fn5GZWVldbz06dPN5KTkw3DMIzKykrD29vbeP7555v8fC+//LLRp08fw2KxWI/V1tYavr6+xgcffNBkn1GjRhkTJkxo8ty+ffsMScbWrVsviDk/P9/m2MMPP2ykpKTY9Js3b571/NmzZ41u3boZjz32WJPvBQBAa+KZbgAAWklCQoLN6/DwcB0+fLjZ9l999ZVOnTqlLl262Bw/c+aMiouLra+joqIUEhJyyf169Oghf3//JuMpLCxUbW2tUlNTm42tqKjIpr8k1dTU2LzHT02ZMkW33nqrtmzZorS0NI0ePVqDBw9u9vNXV1eruLhYEydO1OTJk63Hz507p8DAQJu2KSkp1p89PDw0YMAAFRYWNnttAABaC0U3AACtxNPT0+a1yWSSxWJptv2pU6cUHh6utWvXXnDup1uKdejQ4bL6tRSPr69vs3E1vkdSUpJeeeWVC8799A8APzVy5Eh99913eu+997R69WqlpqYqOztbTzzxRLPvIUnPP/+8kpOTbc65u7u3GB8AAG0FRTcAAG1U//79VVZWJg8PD/Xo0cPh/X4qJiZGvr6+WrNmjSZNmtTke7z++usKDQ1VQECA3dcNCQlRVlaWsrKyNGTIEE2fPl1PPPGEdcX1+vp6a9uwsDBFRETo22+/1bhx41q87saNGzV06FBJDTPhBQUFmjp1qt1xAQDgKCykBgBAGzVixAilpKRo9OjRWrVqlUpKSvT555/rT3/6kzZv3nzV+/2Uj4+PZs6cqRkzZuill15ScXGxNm7cqH/+85+SpHHjxik4OFiZmZlav3699u3bp7Vr1+qee+7RwYMHm7zm7Nmz9dZbb6moqEi7du3SihUrFBsbK0kKDQ2Vr6+vVq5cqfLyclVUVEiS5s6dq7y8PC1cuFBff/21duzYoSVLlujJJ5+0ufaiRYu0bNky7dmzR9nZ2Tpx4oTuvvtuuz4rAACORNENAEAbZTKZ9N5772no0KGaMGGCrrvuOt1xxx367rvvrKuIX81+55s1a5YeeOABzZ49W7GxsRo7dqz1mW8/Pz998sknioyM1JgxYxQbG6uJEyeqpqam2ZlvLy8v5eTkKCEhQUOHDpW7u7tee+01SQ3PYS9cuFDPPfecIiIilJmZKUmaNGmSFi9erCVLlqhfv34aNmyYXnjhBesWY43mzZunefPmKTExUZ9++qnefvtt68roAAA4k8kwDMPZQQAAAFyOkpISRUdHa+vWrbr++uudHQ4AABdgphsAAAAAAAeh6AYAAAAAwEG4vRwAAAAAAAdhphsAAAAAAAeh6AYAAAAAwEEougEAAAAAcBCKbgAAAAAAHISiGwAAAAAAB6HoBgAAAADAQSi6AQAAAABwEIpuAAAAAAAchKIbAAAAAAAH+f/0BmBmjz9l9QAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Plotted 36 inference steps + final sigma (total tensor length=37).\n" + ] + } + ], + "source": [ + "\n", + "fig, ax = plt.subplots(figsize=(10, 5))\n", + "step_axis = np.arange(len(sigma_values) - 1)\n", + "ax.plot(step_axis, sigma_values[:-1], marker=\"o\", linewidth=2, label=\"sigma schedule\")\n", + "ax.scatter(len(sigma_values) - 1, sigma_values[-1], color=\"tab:red\", zorder=5, label=\"final sigma (appended)\")\n", + "ax.set_xlabel(\"Inference step\")\n", + "ax.set_ylabel(\"Sigma value\")\n", + "ax.set_title(\"FlowUniPCMultistepScheduler sigmas\")\n", + "ax.grid(alpha=0.3)\n", + "ax.legend()\n", + "fig.tight_layout()\n", + "plt.show()\n", + "\n", + "print(\n", + " f\"Plotted {len(step_axis)} inference steps + final sigma (total tensor length={len(sigma_values)}).\"\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA90AAAHqCAYAAAAZLi26AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjgsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvwVt1zgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAiPpJREFUeJzs3Xd8U2X///F3kjYddFJKW6BQQJAlQxREGTKUoSBDRVFBbpFbBVQQByIgOABRhoqieDsRwXWLFkUQKUuEWwQB2XvTQWkppSs5vz/4NV9LW5pA27Tp6/l49EFz5Ton7+RKSj8917mOyTAMQwAAAAAAoNiZ3R0AAAAAAABPRdENAAAAAEAJoegGAAAAAKCEUHQDAAAAAFBCKLoBAAAAACghFN0AAAAAAJQQim4AAAAAAEoIRTcAAAAAACWEohsAAAAAgBJC0Q0A5UhMTIwefPBBd8coFjfffLNuvvlmd8coFg8++KBiYmKc6vviiy/KZDKVbKAy5OOPP5bJZNIff/xR4o/lyjhcrKyMi8lk0osvvujuGACAYkTRDQBlQG5hUtDXc88957ZcDz74oAICAgq9PyAgoNj+CHDw4ME8z9tisahmzZrq06ePNm/enK9/RkaGZsyYodatWys4OFi+vr6qX7++hg8frt27dzv65RZTZrNZR44cybef1NRU+fn5yWQyafjw4cXyXNLT0/Xiiy8qLi6uWPZ3/PhxvfjiiwW+DiUhKytLs2bNUosWLRQUFKSQkBA1btxYQ4cO1c6dO0slgyf78ccfy2VhXdrvQwDwFF7uDgAA+D+TJk1S7dq187Q1adLETWlK1tKlSwtsv/fee9WjRw/ZbDbt2LFD7777rn766Sf9/vvvat68uSQpMTFR3bp108aNG3X77bdrwIABCggI0K5du7RgwQK9//77ysrKyrNfHx8fffHFF3rmmWfytH/77bfF/tzS09M1ceJEScp3NP+FF15w+Q8px48f18SJExUTE+N4DUpSv3799NNPP+nee+/Vww8/rOzsbO3cuVOxsbG68cYb1aBBgxLP4Ml+/PFHzZ49u8DC+/z58/LyKpu/npX2+xAAPEXZ/KkOABVU9+7ddd1117k7RqmwWq0Ftl977bW6//77Hbdvuukm9erVS++++67ee+89SReOwG/atElff/21+vXrl2f7l156SWPHjs233x49ehRYdM+fP1+33Xabvvnmmyt9Sk7x8vIqs0WVJP3vf/9TbGysXnnlFT3//PN57nv77bd15swZ9wQrZ9LT0+Xv7+/ydr6+viWQBgDgTkwvB4Bybv/+/brrrrtUuXJl+fv764YbbtDixYsd9xuGoSpVqmjUqFGONrvdrpCQEFksljxF1NSpU+Xl5aW0tLTLypI7TX7t2rUaNWqUwsPDValSJfXp00cJCQl5+jp7TnenTp0kSQcOHJAkrV+/XosXL9ZDDz2Ur+CWLhzRfv311/O1DxgwQJs3b84zPfrkyZP69ddfNWDAgEKfy8GDB/O0x8XFyWQyFTp1/ODBgwoPD5ckTZw40TFdPveoZkHnDi9btkxt27ZVSEiIAgICdPXVVzsK3ri4OF1//fWSpMGDBzv29/HHHzu2X79+vbp166bg4GD5+/urQ4cOWrt2bZ7HyH3cnTt36u6771ZQUJDCwsL0xBNPKCMjw9Fv3759ki78seNiFotFYWFhedqOHTumhx56SNWqVZOPj49q166tRx99NN9Mg8zMzCLfE5L0008/qV27dqpUqZICAwN122236e+//87X77vvvlOTJk3k6+urJk2a6L///W++PoWNVe6pDP98DQszb948tWzZUn5+fqpcubLuueeefKcp3HzzzWrSpIk2btyo9u3by9/fP98fLHI9+OCDmj17tiTlOZ0i18XndOeO2+7du3X//fcrODhY4eHhGjdunAzD0JEjR3THHXcoKChIkZGReuONN/I9ZmZmpiZMmKCrrrpKPj4+io6O1jPPPKPMzMw8/crS+7CoPABQnpTdP7UDQAWUkpKixMTEPG1VqlQptP+pU6d04403Kj09XY8//rjCwsL0ySefqFevXvr666/Vp08fmUwm3XTTTVq1apVjuy1btiglJUVms1lr167VbbfdJklavXq1WrRoccnzuJ0xYsQIhYaGasKECTp48KBmzpyp4cOHa+HChS7vK7cIzC32vv/+e0nSAw884NJ+2rdvrxo1amj+/PmaNGmSJGnhwoUKCAhwPP/iEB4ernfffVePPvqo+vTpo759+0qSmjZtWmD/v//+W7fffruaNm2qSZMmycfHR3v37nUUKw0bNtSkSZM0fvx4DR06VO3atZMk3XjjjZKkX3/9Vd27d1fLli01YcIEmc1mffTRR+rUqZNWr16tVq1a5Xm8u+++WzExMZo8ebJ+//13vfnmm0pOTtann34qSapVq5Yk6fPPP9dNN910yaPyx48fV6tWrXTmzBkNHTpUDRo00LFjx/T1118rPT09z2wGZ94Tn332mQYNGqSuXbtq6tSpSk9P17vvvqu2bdtq06ZNjkXSli5dqn79+qlRo0aaPHmykpKSNHjwYNWoUcPpcXLGK6+8onHjxunuu+/WkCFDlJCQoLfeekvt27fXpk2bFBIS4uiblJSk7t2765577tH999+viIiIAvf573//W8ePH9eyZcv02WefOZ2lf//+atiwoaZMmaLFixfr5ZdfVuXKlfXee++pU6dOmjp1qj7//HONHj1a119/vdq3by/pwh/YevXqpTVr1mjo0KFq2LChtm7dqhkzZmj37t367rvvJJW992FReQCgXDEAAG730UcfGZIK/PqnWrVqGYMGDXLcfvLJJw1JxurVqx1tZ8+eNWrXrm3ExMQYNpvNMAzDmDZtmmGxWIzU1FTDMAzjzTffNGrVqmW0atXKePbZZw3DMAybzWaEhIQYI0eOdOxr0KBBRqVKlQrNXalSpTx5cp9Hly5dDLvd7mgfOXKkYbFYjDNnzjjaOnToYHTo0MFx+8CBA4YkY+LEiUZCQoJx8uRJIy4uzmjRooUhyfjmm28MwzCMPn36GJKM5OTkIl7VCyZMmGBIMhISEozRo0cbV111leO+66+/3hg8eLBhGIYhyRg2bFi+53LgwIE8+1uxYoUhyVixYoWjbdCgQUatWrUctxMSEgxJxoQJEwrNk2vGjBmOfIX53//+Z0gyPvroozztdrvdqFevntG1a9c8r3d6erpRu3Zt45Zbbsn3uL169cqzj8cee8yQZPz111+OfXbo0MGQZERERBj33nuvMXv2bOPQoUP5cg0cONAwm83G//73v3z35eZx9j1x9uxZIyQkxHj44Yfz7OfkyZNGcHBwnvbmzZsbUVFRed5PS5cuNSTlGYeCxsow/u+99s/X8+JxOXjwoGGxWIxXXnklz7Zbt241vLy88rTnvl5z5szJ9zoUZNiwYfk+27kuft/k5ho6dKijLScnx6hRo4ZhMpmMKVOmONqTk5MNPz+/PJ/Jzz77zDCbzXl+RhiGYcyZM8eQZKxdu9YwjLL3PnQmDwCUF0wvB4AyZPbs2Vq2bFmer0v58ccf1apVK7Vt29bRFhAQoKFDh+rgwYPavn27JKldu3ay2Wz67bffJF04ot2uXTu1a9dOq1evliRt27ZNZ86ccRzBuhJDhw7NM2U29/EPHTpU5LYTJkxQeHi4IiMjdfPNN2vfvn2aOnWq44hxamqqJCkwMNDlXAMGDNDevXv1v//9z/FvQVPLS1Pu0dJFixbJbre7tO3mzZu1Z88eDRgwQElJSUpMTFRiYqLOnTunzp07a9WqVfn2OWzYsDy3R4wYIenCe0m6ML35559/1ssvv6zQ0FB98cUXGjZsmGrVqqX+/fs7Tkew2+367rvv1LNnzwLXIbh4Cn1R74lly5bpzJkzuvfeex3PIzExURaLRa1bt9aKFSskSSdOnNDmzZs1aNAgBQcHO/Z3yy23qFGjRi69fpfy7bffym636+67786TJzIyUvXq1XPkyeXj46PBgwcX2+NfbMiQIY7vLRaLrrvuOhmGoYceesjRHhISoquvvlr79+93tH311Vdq2LChGjRokOd55J62kfs8ytr78EryAEBZw/RyAChDWrVq5dJCaocOHVLr1q3ztTds2NBxf5MmTXTttdfK399fq1evVteuXbV69WpNnDhRkZGReuutt5SRkeEovv9ZwDujoGsb16xZM8/t0NBQSVJycnKR+xs6dKjuuusumc1mx6WqfHx8HPcHBQVJks6ePZtneq8zWrRooQYNGmj+/PkKCQlRZGSko/hwl/79++uDDz7QkCFD9Nxzz6lz587q27ev7rzzTpnNl/7b+J49eyRJgwYNKrRPSkqK4/WXpHr16uW5v27dujKbzXnOXffx8dHYsWM1duxYnThxQitXrtSsWbP05ZdfytvbW/PmzVNCQoJSU1OdXl2/qPdE7nMpbDxyxz23SL/4eUjS1VdfrT///NOpPEXZs2ePDMMo8HEkydvbO8/t6tWrF7o4YHG4+PXLvUzexaefBAcHKykpyXF7z5492rFjh2OdgYvFx8dLKnvvwyvJAwBlDUU3AFQA3t7eat26tVatWqW9e/fq5MmTateunSIiIpSdna3169dr9erVatCgQZ5fzn19fZWZmSnDMPIV14ZhKCMjo8DVli0WS4E5DMMoMmu9evXUpUuXQu/PvVzV1q1bL+uo/IABA/Tuu+8qMDBQ/fv3L/QX+IL+mCBJNpvN5ce8FD8/P61atUorVqzQ4sWLtWTJEi1cuFCdOnXS0qVLC30tJTmOAE6bNq3QSzgVdX5+Yc8zV1RUlO655x7169dPjRs31pdffunUAmQXK+o9kftcPvvsM0VGRubrdzkrvl/JGNrtdplMJv30008FZr/4dfXz83M5nysKyuDM58xut+uaa67R9OnTC+wbHR0tqey9D68kDwCUNRTdAFCO1apVS7t27crXnrtCd+6iWNKF6bxTp07VL7/8oipVqqhBgwYymUxq3LixVq9erdWrV+v222/Pt/+cnBzt27dPV111VZ779u7dK5vNlucxSkPPnj01efJkzZs377KL7vHjx+vEiROXXMgq96jcxZfIcmaKfFGF7MXMZrM6d+6szp07a/r06Xr11Vc1duxYrVixQl26dCl0f3Xr1pV04Sjwpf5Q8U979uzJcy34vXv3ym63OxYpK4y3t7eaNm2qPXv2KDExUVWrVlVQUJC2bdvm3JMsQu5zqVq16iWfS+77Lffo6j9d/Fm4kjGsW7euDMNQ7dq1Vb9+/SL7u8LV98eVqFu3rv766y917ty5yMcta+/DovIAQHnB/BwAKMd69OihDRs2aN26dY62c+fO6f3331dMTEyec1zbtWunzMxMzZw5U23btnX8At2uXTt99tlnOn78eL4itnv37pIuXJ/5YrmXPcrtU1ratGmjbt266YMPPnCsvPxPWVlZGj16dKHb161bVzNnztTkyZPzrah8cT9JeVZ9t9lsev/994vMmHt9ZmeuaX369Ol8bblHC3Mv6VSpUqUC99eyZUvVrVtXr7/+eoGXeSvokly545brrbfekvR/47hnzx4dPnw433ZnzpzRunXrFBoaqvDwcJnNZvXu3Vs//PCD/vjjj3z9nZnV8E9du3ZVUFCQXn31VWVnZxf6XKKiotS8eXN98sknSklJcdy/bNkyxxoGuWrVqiWLxZJnDCXpnXfeKTJP3759ZbFYNHHixHzPxTCMPFO4XVXYeJaEu+++W8eOHdPcuXPz3Xf+/HmdO3dOUtl7HzqTBwDKC450A0A59txzz+mLL75Q9+7d9fjjj6ty5cr65JNPdODAAX3zzTd5pk63adNGXl5e2rVrl4YOHepob9++vd59911Jyld0N2/eXEOGDNGsWbO0Z88e3XLLLZIuFDg//vijhgwZombNmpXCM83r008/1a233qq+ffuqZ8+e6ty5sypVqqQ9e/ZowYIFOnHiRIHX6s71xBNPFPkYjRs31g033KAxY8bo9OnTqly5shYsWKCcnJwit/Xz81OjRo20cOFC1a9fX5UrV1aTJk0KPP950qRJWrVqlW677TbVqlVL8fHxeuedd1SjRg3H+fV169ZVSEiI5syZo8DAQFWqVEmtW7dW7dq19cEHH6h79+5q3LixBg8erOrVq+vYsWNasWKFgoKC9MMPP+R5vAMHDqhXr17q1q2b1q1bp3nz5mnAgAGOcfzrr780YMAAde/eXe3atVPlypV17NgxffLJJzp+/LhmzpzpmNr76quvaunSperQoYPjclQnTpzQV199pTVr1rh0zn1QUJDeffddPfDAA7r22mt1zz33KDw8XIcPH9bixYt10003Of74M3nyZN12221q27at/vWvf+n06dN666231Lhx4zxFX3BwsO666y699dZbMplMqlu3rmJjYx3nMV9K3bp19fLLL2vMmDE6ePCgevfurcDAQB04cED//e9/NXTo0Ev+cedSWrZsKUl6/PHH1bVrV1ksFt1zzz2Xta+iPPDAA/ryyy/1yCOPaMWKFbrppptks9m0c+dOffnll/r555913XXXlbn3oTN5AKDccNOq6QCAf8i9rFJBl176p4svGWYYhrFv3z7jzjvvNEJCQgxfX1+jVatWRmxsbIHbX3/99YYkY/369Y62o0ePGpKM6OjoArex2WzGrFmzjGbNmhm+vr6Gr6+v0axZM+PNN990XJKsqOdR0KWbCrtk2LRp0y75GuRKT083Xn/9deP66683AgICDKvVatSrV88YMWKEsXfvXke/f14y7FJ00SXDDOPCa9ulSxfDx8fHiIiIMJ5//nlj2bJlRV4yzDAM47fffjNatmxpWK3WPJeBuvjSVMuXLzfuuOMOo1q1aobVajWqVatm3Hvvvcbu3bvz7G/RokVGo0aNDC8vr3yXbdq0aZPRt29fIywszPDx8TFq1apl3H333cby5cvzvQ7bt2837rzzTiMwMNAIDQ01hg8fbpw/f97R79SpU8aUKVOMDh06GFFRUYaXl5cRGhpqdOrUyfj666/zvW6HDh0yBg4caISHhxs+Pj5GnTp1jGHDhhmZmZmGYbj2nsht79q1qxEcHGz4+voadevWNR588EHjjz/+yNPvm2++MRo2bGj4+PgYjRo1Mr799tsCxyEhIcHo16+f4e/vb4SGhhr//ve/jW3bthV5ybB/Pk7btm2NSpUqGZUqVTIaNGhgDBs2zNi1a5ejT4cOHYzGjRvn27YwOTk5xogRI4zw8HDDZDLledx/vlf+mevi929hl/MrKEtWVpYxdepUo3HjxoaPj48RGhpqtGzZ0pg4caKRkpJiGEbZex86mwcAygOTYbg4/wsAAJQ7L774oiZOnKiEhIR8K14DpYX3IYCKiHO6AQAAAAAoIRTdAAAAAACUEIpuAAAAAABKCOd0AwAAAABQQjjSDQAAAABACaHoBgAAAACghHi5O0Bps9vtOn78uAIDA2UymdwdBwAAAABQDhmGobNnz6patWoymws/nl3hiu7jx48rOjra3TEAAAAAAB7gyJEjqlGjRqH3V7iiOzAwUNKFFyYoKMjNaQpnt9uVkJCg8PDwS/7VBOUPY+vZGF/Pxdh6LsbWszG+noux9VzlZWxTU1MVHR3tqDELU+GK7twp5UFBQWW+6M7IyFBQUFCZfqPBdYytZ2N8PRdj67kYW8/G+HouxtZzlbexLeq05bL/DAAAAAAAKKcougEAAAAAKCEU3QAAAAAAlJAKd043AAAAgPLHZrMpOzvbcdtutys7O1sZGRnl4rxfOK+sjK23t7csFssV74eiGwAAAECZZRiGTp48qTNnzuRrt9vtOnv2bJELWaF8KUtjGxISosjIyCvKQdENAAAAoMzKLbirVq0qf39/R/FjGIZycnLk5eXl9sIMxassjK1hGEpPT1d8fLwkKSoq6rL3RdENAAAAoEyy2WyOgjssLCzPfWWhMEPJKCtj6+fnJ0mKj49X1apVL3uqOSc/AAAAACiTcs/h9vf3d3MSVFS5771/rifgKopuAAAAAGUaR7LhLsXx3nNr0b1q1Sr17NlT1apVk8lk0nfffVfkNnFxcbr22mvl4+Ojq666Sh9//HGJ5wQAAAAA4HK4teg+d+6cmjVrptmzZzvV/8CBA7rtttvUsWNHbd68WU8++aSGDBmin3/+uYSTAgAAAMCViYuLk8lkyrcSOwp38OBBmUwmbd682d1RLptbF1Lr3r27unfv7nT/OXPmqHbt2nrjjTckSQ0bNtSaNWs0Y8YMde3ataRilpoTaSeUnJksSTLshk6nnlaSJUkm84UpDaE+oYoKuPxV8/65/4KU9f0DAAAArnLX76g333yzmjdvrpkzZzrabrzxRp04cULBwcHF/njOiouLU8eOHZWcnKyQkBC35ahIytXq5evWrVOXLl3ytHXt2lVPPvmkewIVoxNpJ3T7d7cry5ZVaB+rxarY3rGX9UOhvO8/9zEo6gEAAOCs0vgd1RVWq1WRkZEl/jgoW8pV0X3y5ElFRETkaYuIiFBqaqrOnz/vWNL9nzIzM5WZmem4nZqaKkmy2+2y2+0lG9gFp8+fvuQPA0nKsmXp9PnTivCPuGQ/T9z/iXMn1Ou7XsqyX+IHptmq73t/r6hKl/FHg3MndCbjTKH3h/iGXNZ+C9q/3bArOSVZCeYEmU3mYtk/yg673S7DMMrUzxcUD8bWczG2no3xLd9yxy/362K5bQXdJ0mnM5z8HTXjtCIrFV8xPHjwYK1cuVIrV67UrFmzJEn79+/XwYMH1alTJ50+fVohISH6+OOPNXLkSH322WcaPXq0jhw5oh49euiTTz7RV199pRdffFEpKSm6//77NWPGDMclqzIzMzV27FgtWLBAZ86cUZMmTTRlyhTdfPPNkqRDhw5pxIgRWrNmjbKyshQTE6PXXntNjRo1UseOHSVJoaGhkqRBgwbpo48+kt1u19SpUzV37lydPHlS9evX1wsvvKA777xT0oUj5J06ddIPP/yg559/Xrt371bz5s01d+5cNWnSpMDX4b777pPNZtOCBQscbdnZ2apWrZreeOMNDRw4UEuWLNErr7yibdu2yWKxqE2bNpo5c6Zq1qwpKe8YG4bheM2Sk//vYNx3332nvn375vmcL1q0SJMmTdL27dtVrVo1DRw4UGPHjpWXl5cMw9DEiRP10Ucf6dSpUwoLC1O/fv305ptv5nsOuY9bUP3o7M+VclV0X47Jkydr4sSJ+doTEhKUkZHhhkQFO5162ql+szfOVmWfyrKYLPm+zCbzhe/N+e9LzEh0av8bD29U0umkQvdT4OOZLEo6m+Tc80w+rXhbvFN9/2l/6v5LFtySlGXP0v4T+2UJcu36efHn4/XgmgeVbS/8MgDeZm993PZjVfWr6tK+S2P/uY+Rkp1S6P3B3sGXvW+4xm63KyUlRYZhyGzmAhGehLH1XIytZ2N8y7fs7GzZ7Xbl5OQoJycnz32GYchms0kqfIXp3PuLYrPZ8u3/Srz++uvatWuXGjdurAkTJkiSwsPDtW/fPklyPB+73a709HS9+eab+uyzz5SWlqa7775bffr0UXBwsBYtWqQDBw6of//+uuGGG3T33XdLkoYNG6YdO3Zo3rx5ioqK0qJFi9S9e3f9+eefqlevnoYNG6asrCwtX75clSpV0o4dO+Tn56eoqCgtXLhQ/fv317Zt2xQUFCQ/Pz/l5ORo8uTJmj9/vt5++21dddVVWrNmjR544AFVrlxZ7du3d7yWTz/9tKZPn66IiAiNGzdOvXr10t9//y1vb+98r0P//v1177336syZMwoICJAk/fjjj0pPT1fPnj2Vk5Oj1NRUPf7447rmmmuUlpamiRMnqk+fPvr9998dr9XFr9k/23PH759ta9as0aBBgzR9+nS1bdtW+/fv12OPPSa73a5x48bpm2++0cyZMzVv3jw1atRIp06d0pYtWwp8D+Q+ZlJSUr7nePbsWafeD+Wq6I6MjNSpU6fytJ06dcrxZinImDFjNGrUKMft1NRURUdHKzw8XEFBQSWa1xVJFueK1tWnVpdojmnbppXo/l/a8pL8vfzlZfaSl9krT3HvZfL6v0LfnPf22Wzn3tBbzm1RqiVVPhYf+Vh8ZLVYL/2v2aqk00mXLIglKdueLUuARVXDXC9ck5JKdv8nzp3Q4GWDS2wWQO5jlORMAE9it9tlMpkUHh7OL3cehrH1XIytZ2N8y7eMjAydPXtWXl5e8vL6v9LlnsX3KPF8omRIusQVnYr6HSzX8Ljh8jbnLxovVsWvihbctqDIfmFhYfLx8VGlSpVUo0YNR3vukerc52M2m5Wdna13331XdevWlST169dP8+bN08mTJxUQEKCmTZuqY8eOWrVqlQYMGKDDhw/rk08+0aFDh1StWjVJ0jPPPKNly5bps88+06uvvqojR46ob9++atGihSSpfv36jgzh4eGSpGrVqjnO6c7MzNTUqVO1bNkytWnTxrHNb7/9pv/85z/q1KmTI/uECRPUrVs3SdKnn36q6Oho/fDDD44/CPxTjx49VKlSJf3www964IEHJElffvmlevXq5TjSfvF2H330kapWreo4kp477v98zXJvF/S6StIrr7yiZ599Vv/6178cz2XSpEl69tlnNXHiRB07dkyRkZHq2rWrvL29VadOHcfzvljuY4aFhcnX1zfPfRffLky5KrrbtGmjH3/8MU/bP98YBfHx8ZGPj0++drPZXKZ+8OYulubpTpw7UaL7n7NljsvbeJmc+xjM2jRL4f7hhRb0hbUdSzvm1P5NZtNlvSdTslKcmgWQkpWi6oHVXd7/ibQT6rWoF+fru8BkMpW5nzEoHoyt52JsPRvjW36ZzWaZTCbHV67E84mKT3d99mRhkjMK/z3kYq5ct/ni3Lnf//M5+fv766qrrnL0iYyMVExMjAIDAx1tERERSkhIkMlk0rZt22Sz2XT11VfneazMzEyFhYXJZDLp8ccf16OPPqply5apS5cu6tevn5o2bVpgBknat2+f0tPTdeutt+bZZ1ZWllq0aJGn74033uj4PiwsTFdffbV27txZ4Ovi7e2tu+++W/Pnz9fAgQN17tw5LVq0SAsWLHD037Nnj8aPH6/169crMTHRcST7yJEjat68eYGv2cXjcHHbX3/9pbVr1+rVV1919LHZbMrIyND58+d19913a9asWapbt666deumHj16qGfPnnkK+YvHsKCfIc7+THFr0Z2Wlqa9e/c6bh84cECbN29W5cqVVbNmTY0ZM0bHjh3Tp59+Kkl65JFH9Pbbb+uZZ57Rv/71L/3666/68ssvtXjxYnc9hVI3td1U1QqqpRwjRza7TTbDphx7jmyGTTa7rdD2o2lH9cHWD4rcf/eY7gr2Cb6w3UX7uNRjnc06q71n9ha5f38vfxkyHNvZDOem/JSkHMO5qUTrTqwr0RxDlw5VJe9KlyziC7rvTOYZp/Z/LO2Ygn2C8x3pL+o/juTMZKfOhUrOTGYRPgAAUCqq+FW58I0TR7qdKahDfUOdPtJd3C6esmwymQpsyy1G09LSZLFYtHHjRscR3ly5U7iHDBmirl27avHixVq6dKkmT56sN954QyNGjCgwQ1pamiRp8eLFql4970Gagg5guuK+++5Thw4dFB8fr2XLlsnPz89xpFySevbsqVq1amnu3LmqVq2a7Ha7mjRpoqysgn83NJvN+c7hz87OO6Mhd5p63759823v6+ur6Oho7dq1S7/88ouWLVumxx57TNOmTdPKlSsLnCZ/pdxadP/xxx+OE/klOaaBDxo0SB9//LFOnDihw4cPO+6vXbu2Fi9erJEjR2rWrFmqUaOGPvjgA4+4XJizYoJj1CiskcvbbU/a7lTR/WCTBy97//1j+xfZ76NuH+XZv2EYjuL7Un802JW8S6PiRl1izxcMuWaIKvtWVqYtU1m2rDz/FtSWZctSSmaKDp095PJzLm4pWSlKySr8vOwrVdjrZzVfurDPtjk3LWv9ifU6k3FG/t7+8vPyk7+3v/y9/OXv7S9fi2+hxb0nFPUAAKB0Lbx9oQzDUE5Ojry8vAr9PcPZ31HndJlzWb8DX4rVanX6nHJXtGjRQjabTfHx8WrXrl2h/aKjo/XII4/okUce0ZgxYzR37lyNGDFCVqtVUt7z3Rs1aiQfHx8dPnxYHTp0uOTj//77745FzpKTk7V79241bNiw0P433nijoqOjtXDhQv3000+66667HIVtUlKSdu3apblz5zqey5o1ay75+OHh4Tp79qzOnTunSpUqSVK+a3hfe+212rVrV54ZBBfz8/NTz5491bNnTw0bNkwNGjTQ1q1bde21117y8S+HW4vum2++udCVBiXp448/LnCbTZs2lWAqlCaTySQvk5e85CVdYv2ztOw0p/Z3S61bXP6B6ewP47c6vaWaQTULLeYLKugzbZk6nnZcPx38qcj9h/uFy5CRZ9vSkGXPujA93bnaulDTN04v9D6TTHmKcD8vP/l7+cvP2085NudmGiSdT9K57HPy9/J3aVpXSRf1Ut4j6Ybd0OnU00qyJDlOG+FIOgAAFU9MTIzWr1+vgwcPKiAgQJUrVy6W/davX1/33XefBg4cqDfeeEMtWrRQQkKCli9frqZNm+q2227Tk08+qe7du6t+/fpKTk7WihUrHIVxrVq1ZDKZFBsbqx49esjPz0+BgYEaPXq0Ro4cKbvdrrZt2yolJUVr165VUFCQBg0a5Hj8SZMmKSwsTBERERo7dqyqVKmi3r17XzLzgAEDNGfOHO3evVsrVqxwtIeGhiosLEzvv/++oqKidPjwYT333HOX3Ffr1q3l7++v559/Xo8//rjWr1+fr24cP368br/9dtWsWVN33nmnzGaz/vrrL23btk0vv/yyPv74Y9lsNse+5s2bJz8/P9WqVcu1wXBSuTqn25OF+oTKarEWeTQu1Ce0Qu6/LKjqX1V1guu4vN32pO1OFd1vd3473yyAbHt2oUfpc7/fe2avXv/j9SL336FGB/l7+V/Y1l7wUf+LH6u4GDJ0LvuczmWfk85f3j4eW/6YJMlisijAGqBA70AFWgMVZA1SoDUw31due9J55xYpvFwcSQcAoOxy5++oo0eP1qBBg9SoUSOdP39eBw4cKLZ9f/TRR3r55Zf11FNP6dixY6pSpYpuuOEG3X777ZIuHMUeNmyYjh49qqCgIHXr1k0zZsyQJFWvXl0TJ07Uc889p8GDB2vgwIH6+OOP9dJLLyk8PFyTJ0/W/v37FRISomuvvVbPP/98nseeMmWKnnjiCe3Zs0fNmzfXDz/84Dh6Xpj77rtPr7zyimrVqqWbbrrJ0W42m7VgwQI9/vjjatKkia6++mq9+eabjkufFaRy5cqaN2+enn76ac2dO1edO3fWiy++qKFDhzr6dO3aVbGxsZo0aZKmTp0qb29vNWjQQEOGDJEkhYSEaMqUKRo1apRsNpuuueYa/fDDDwoLC3NpHJxlMi51qNkDpaamKjg4WCkpKWVq9XKpgKNlyadVObRysR0tK+nzWkty/yVZ2Dh7pHvh7QtLdOp9Wdt/btG/JWGLBv88uMj+9zW8T4HWQKVnpys9J93x7/ns83lu537v7GqipaVjjY6KDorOV7RffPviI+0lPb4S56SXFXa7XfHx8apatSqLMXkYxtazMb7lW0ZGhg4cOKDatWvnWynamenlEv+PFpe4uDh17NhRycnJjlXPS4qzY1saLvUedLa25Eh3GRIVEOX4wNvtdsXb4lU1rPj+g/jn/ktCSe4/KiBKsb1jS+QHJrMACmYymWS1WOXv7e9U/151e7lUVGbbspWek64tCVscR7EvpXVka8kknc06m+eruBbjW3F0RdGdlP9Iu8Xk2nXhXcWRdAAArkxJ/w4MFIWiG+VGSf3ALMmCvqD9F/cshvJa1HtbvBVsCVaYn3PTeEZdNypfUW8Yhs7nnFdqVmqeQvyftw+mHFTsgdhiy20zbErJTFFKpmuL3g1eMlgRlSJUxa+KqvhWUZhf2IXv//EV5hemUJ9QWcz/V8iXxjnpAAAAKDkU3YBKdxZAcc9iKOk/GpTlot5k+v8LtHn7K7JSZIF9tidtd6rontZ+mqr6V81XtJ/NOquz2fmLeVePtKfnpOtAygEdSLn0+Vxmk1mVfSs7inBnryN/uZhyBwAAnFHUItgoHEU34AHK69T+3G3LQlFfM6imy+dcG4ahTfGbNGjJoCL7RvpHKjUrVek56ZfsZzfsSjyfqMTziS5l+WDLB2oQ1kAR/hGKqBRx4V//iEueHsDUdQAAgJJH0Q2gSBT1BTOZTPL18i26o6RZnWapUVgjpWenK+l8khIzEh3FdeL5xAttF93OMZy7nJokLTu8TMsOL8vXHmgNdBTikf6ReYrytKw0pq4DAACUMIpuAG5Xnot6V+VOh48Oir5kP7thV2pmqtafWK/Rq0Zf9uPlToPfe2bvZe/jSjB9HQBQHOx2u7sjoIIqjvceRTcAj1eSRX1JHUk3m8wK8Q0psjjPNbntZPl5++nUuVM6lf7/v879379Z9ksf0b6Uh35+SHWC6yg6KFo1A2sqOjBaNYNqqmZgTYX4hBR6KQ+mrwMArpTVapXZbNbx48cVHh4uq9Xq+H+nLF1WCsWrLIytYRjKyspSQkKCzGZzkdcivxSKbgC4AiW9Or2z6oTUKfScdMMwdCbzTJ5C/OS5k9qVvEurjq4qct9p2WnakrhFWxK35Lsv0DuwwGK8ZlBNnc44zfR1AMAVMZvNql27tk6cOKHjx4/nuc8wDNntdpnNZopuD1OWxtbf3181a9a8ogWQKboB4AqV5Or0xXEk3WQyKdQ3VKG+oWpQuYGjfXvSdqeK7jDfMCVlJBV439nss9qetF3bk7bnu8/H4lPkvgEAKIrValXNmjWVk5Mjm+3/rhpit9uVlJSksLCwYvk/F2VHWRlbi8VSLEfbKboBoAwrC+ekv9PlHdUJrqNjacd0OPWwDp89rCNnjzi+P3HuhOxG/vOdMm2ZTu3/u73f6cS5E6odXFvRgdHyNns7nY1zxgGgYjCZTPL29pa39//9H2G32+Xt7S1fX1+Kbg/jaWNL0Q0AZVxJX0feGb5evqobUld1Q+rmuy/bln2hIL+oGN97Zq9OnjtZ5L6/2PmFvtj5hSTJy+SlGoE1VDu4dr6vIGtQnu04ZxwAAJQHFN0AUEEV1yJw3hZvxQTHKCY4Jk/79qTt6h/b36VMOUaODqYe1MHUg1pxZEWe+8J8w/IU4RaThXPGAQBAmUfRDQAVVFmYui5Jj7d4XNn2bB1IOaADKQd0MPVggVPTkzKSlJSRpD9O/VGieQAAAIoTRTcAVGBlYer6TdVvyrPyut2w68S5E44i/J9fhS3odim/HPpFFpNFV4VcJYvZUpzRAQAAikTRDQAoEZc7fd1sMqt6QHVVD6iuttXb5rkvJTNFB1MP6kDKAf3v5P/0/b7vi8wxd+tczd06V/5e/mpSpYmahjdV0ypN1TS8qcL8wi657T8XajPshk6nnlaSJalULwcHAADKN4puAECJKInp68E+wWoW3kzNwpupfmh9p4ruXOk56dpwcoM2nNzgaKseUF1Nw5uqWXgzNa3SVA0qN5C35cLKuCzUBgAAigNFNwCgxJSF6esPNHxAp9JPaUvilnyrqR9LO6Zjacf004GfJElWs1UNwxqqaXhTVfatzEJtAADgilF0AwA82u11b3ecM37q3CltTdyqLQlb9FfCX9qetF0ZtgxH3yx7lv5K+Et/JfzlrrgAAMDDUHQDAMqlyzlnPKJShCIqRahLrS6SpGx7tvYk79GWhC0XvhK36FDqIZdyGIZxeU8AAABUCBTdAIByqTjOGfc2e6tRWCM1CmukexrcI0lKzkjW1sStWn5oub7d+22ROR755RG1rd5WN0TdoNZRrRVZKdL1JwMAADwWRTcAoNwqiXPGQ31D1b5Ge1Xxq+JU0X0m84xi98cqdn+sJKl2cG3dEHWDboi6QddHXq9Aa2Ch2/5zdfQCs7A6OgAA5R5FNwAAV8DH4qNMW6bjdu41xb/Y+YXMJrOaVGniKMKbhTeT1WKVxOroAABUFBTdAABcgQ9v/VA5Ro5+P/G7fj/xu7YkbJHNsEmS7Ibdcb74+1vel5+Xn66NuFZtotoo3C+c1dEBAKgAKLoBACiAswu1hfuHKyogStdGXKvHmj+mtKw0bTy10VGE7z2z19H/fM55rT22VmuPrS2NpwAAAMoAim4AAApw8UJtht3Q6eTTqhxaWSazSVLB51wHWAPUIbqDOkR3kCQlpCc4CvDfj/+u+PPxpftEAACAW1F0AwBQiH8u1Ga32xVvi1fVsKoym81O7yPcP1w96/ZUz7o9ZRiGDqQe0O/Hf9eyQ8v0x6k/itx+/5n9ali5oUwm02U/DwAA4D7O/9YAAACuiMlkUp3gOhrQcICevv5pp7YZs2aMun3TTa//73X9lfCX7Ia9hFMCAIDixJFuAADKuOPnjuuT7Z/ok+2fKMI/QrfUukW31LpFzas2l9nE388BACjLKLoBACjDmlZpqr+T/nasiH4q/ZTm7ZineTvmKdwvXJ1rdtatMbfq2qrXymK25NmW64ADAOB+FN0AALiBs6ujv97hdfl5+WnFkRVadmiZ1p1Ypxx7jiQp4XyCFuxaoAW7Fqiyb2V1rtlZt9S6RddHXq+E9ASuAw4AQBlA0Q0AgBtcvDp6Qf55JLpPvT7qU6+PUrNStfLISi09tFS/HftNWfYLRfXpjNP6avdX+mr3VwrxCVGLqi24DjgAAGUARTcAAG7yz9XRnRVkDXKshp6WlaZVR1dp2aFlWn1stTJtmZKkM5lntOLIipKIDAAAXETRDQBAORVgDVCPOj3Uo04PpWena/Wx1Vp2aJlWHV2l8znn3R0PAACIohsAAI/g7+2vrjFd1TWmq87nnNfCnQv1xsY3itwu9/xwAABQMrjOCAAAHsbPy0+tolo51XfY8mF6e9PbOnnuZAmnAgCgYqLoBgCgAjuTeUbvbXlPXb/pqhHLR2jV0VWy2W3ujgUAgMdgejkAABWYWWbZZZfdsCvuaJzijsapWqVqurP+nepTr4+q+FVxd0QAAMo1jnQDAOCBcq8DfilWi1Wf9fhMjzV7TFX9qzraj587rjc3valbvrpFT8U9pfUn1sswjJKODACAR+JINwAAHsiV64A3DW+qh5s+rNVHV+vL3V9q7bG1MmQox8jR0kNLtfTQUsUExeiu+nfpjqvuULBPsCTpRNoJp68zDgBARUXRDQCAh3LlOuBeZi91rNlRHWt21NGzR/XNnm/07Z5vdTrjtCTpYOpBTftjmmb9OUvdandTx+iOenbVs8qyZxW6T6vFqtjesRTeAIAKjenlAAAgjxqBNfTEtU/olzt/0bQO09Qq8v9WQs+yZ+n7fd9rZNzISxbckpRly7rkkXAAACoCjnQDAIACeVu81S2mm7rFdNP+lP36evfXWrR3kVKzUt0dDQCAcoMj3QAAoEh1guvomeuf0fK7luuVtq+oXkg9d0cCAKBcoOgGAABO8/XyVa+6vfRy25fdHQUAgHKBohsAAJSYmRtnam/yXnfHAADAbSi6AQBAiVl3Yp36ft9Xo1eOpvgGAFRIFN0AAKBEGTL088GfKb4BABUSRTcAAHBZqE+orBbrJftYzVYNvWaoKvtWlkTxDQComLhkGAAAcFlUQJRie8de8jrcoT6higqI0pCmQ/Tlri/14bYPdTrjtKP4XnpwqW6NuVWPNH1EV4VeVYrpAQAoPRTdAADgskQFRCkqIKrIfn5efhrUeJDuvvpuim8AQIVD0Q0AAEqFK8V3Je9KTh1FBwCgrKPoBgAApaqo4vvngz/LLLPsshe6D6vFqtjesRTeAIAyj4XUAACAW+QW30v6LdHo60Y7FlyTdMmCW5KybFmXPBIOAEBZQdENAADc6uLiO8ga5O5IAAAUG4puAABQJuQW3293ftvdUQAAKDYU3QAAoEzxsfi4OwIAAMWGohsAAJRLc/6ao5TMFHfHAADgkii6AQBAubTiyAr1+q6Xftj3gwzDcHccAAAKRNENAADKrdMZp/X8muf18LKHdSj1kLvjAACQD0U3AAAoU0J9QmW1WC/Zx2q2ql31do7b60+sV99FffXeX+8py5ZV0hEBAHCal7sDAAAA/FNUQJRie8de8jrcoT6higqI0qqjq/TK76/o+LnjyrJn6e3Nb+vHAz9qfJvxahnRshRTAwBQMIpuAABQ5kQFRCkqIKrIfu1rtNd1d1ynOX/N0afbP5XNsGl/yn49uORB9a3XV6NajlKwT3ApJAYAoGBun14+e/ZsxcTEyNfXV61bt9aGDRsu2X/mzJm6+uqr5efnp+joaI0cOVIZGRmllBYAAJQ1/t7+GnXdKC28faGuqXKNo/3bPd+y0BoAwO3cWnQvXLhQo0aN0oQJE/Tnn3+qWbNm6tq1q+Lj4wvsP3/+fD333HOaMGGCduzYof/85z9auHChnn/++VJODgAAypqrK1+tz7p/prGtxyrAO0ASC60BANzPrUX39OnT9fDDD2vw4MFq1KiR5syZI39/f3344YcF9v/tt9900003acCAAYqJidGtt96qe++9t8ij4wAAoGKwmC26p8E9WtR7kW6tdaujnYXWAADu4rZzurOysrRx40aNGTPG0WY2m9WlSxetW7euwG1uvPFGzZs3Txs2bFCrVq20f/9+/fjjj3rggQcKfZzMzExlZmY6bqempkqS7Ha77HZ7MT2b4me322UYRpnOiMvD2Ho2xtdzMbblSxXfKprWfpp6Hu2pyRsm51to7bGmj6lGYA1Jkt2wKzklWQnmBJlNF45HhPiGKKpS0eeUo+zjs+u5GFvPVV7G1tl8biu6ExMTZbPZFBERkac9IiJCO3fuLHCbAQMGKDExUW3btpVhGMrJydEjjzxyyenlkydP1sSJE/O1JyQklOlzwe12u1JSUmQYhsxmt596j2LE2Ho2xtdzMbblUwNrA825YY7m7Zunrw99Lbth1/6U/Rq9evQlt/M2e+vjth+rql/VUkqKksJn13Mxtp6rvIzt2bNnnepXrlYvj4uL06uvvqp33nlHrVu31t69e/XEE0/opZde0rhx4wrcZsyYMRo1apTjdmpqqqKjoxUeHq6goKDSiu4yu90uk8mk8PDwMv1Gg+sYW8/G+HouxrZ8G1ttrO5sfKcm/T5J25K2Fdk/254tS4BFVcMouss7Pruei7H1XOVlbH19fZ3q57aiu0qVKrJYLDp16lSe9lOnTikyMrLAbcaNG6cHHnhAQ4YMkSRdc801OnfunIYOHaqxY8cWOCA+Pj7y8fHJ1242m8v0AEqSyWQqFznhOsbWszG+nouxLd8aVmmoeT3m6c1Nb+rDbQWvH/NPJrOJsfYQfHY9F2PrucrD2DqbzW3PwGq1qmXLllq+fLmjzW63a/ny5WrTpk2B26Snp+d7YhaLRZK4FAgAACiSxWxR15iu7o4BAKhA3Dq9fNSoURo0aJCuu+46tWrVSjNnztS5c+c0ePBgSdLAgQNVvXp1TZ48WZLUs2dPTZ8+XS1atHBMLx83bpx69uzpKL4BAAAAACgr3Fp09+/fXwkJCRo/frxOnjyp5s2ba8mSJY7F1Q4fPpznyPYLL7wgk8mkF154QceOHVN4eLh69uypV155xV1PAQAAeKgzmWfcHQEA4AFMRgWbl52amqrg4GClpKSU+YXU4uPjVbVq1TJ9HgNcx9h6NsbXczG2nmN70nb1j+1fZL8QnxDNuHmGrou8rhRSoaTw2fVcjK3nKi9j62xtWXafAQAAgBudyTyjIUuH6MNtH7J2DADgslF0AwCACiXUJ1RWi/WSfUwySZJshk0zNs7Q4yseV0pmSmnEAwB4mHJ1nW4AAIArFRUQpdjesUrOTJYkGXZDp5NPq3JoZZnMF4rtIGuQvtv7nd7b8p4kKe5InPrH9tf0m6erUVgjd0UHAJRDFN0AAKDCiQqIUlRAlKT/f+6gLV5Vw/KeOzi8xXA1C2+mMWvGKCUzRcfSjumBHx/Qc62f05317pTJZHJXfABAOcL0cgAAgEK0q9FOX93+la6pco0kKcuepUnrJmnsmrFKz053czoAQHlA0Q0AAHAJUQFR+qTbJxrQYICj7Yf9P+i+H+/TgZQDbkwGACgPKLoBAACK4G3x1pjWYzSt/TT5e/lLkvae2at7Yu/RkgNL3JwOAFCWUXQDAAA4qVvtbvri9i90VchVkqT0nHQ9veppTV4/Wdm2bDenAwCURRTdAAAALqgTXEef9/hct9e53dE2f+d8PbjkQZ1IO+HGZACAsoiiGwAAwEX+3v56te2rGt9mvLzN3pKkLYlbdFfsXVpzbI2b0wEAyhIuGQYAAHAZTCaT7qp/lxqFNdJTcU/pWNoxpWSm6LFfHtO9De5Vzzo981yC7J9CfUIdlywDAHg2im4AAIAr0DissRbevlAvrHlBcUfjZMjQ/J3zNX/n/EK3sVqsiu0dS+ENABUA08sBAACuULBPsGZ1mqWRLUfK7MSvV1m2LCVnJpdCMgCAu1F0AwAAFAOzyax/NfmXxrUZ5+4oAIAyhKIbAACgGDUKa+TuCACAMoSiGwAAAACAEkLRDQAAAABACaHoBgAAcIOk80nujgAAKAUU3QAAAG4wbu047T+z390xAAAljKIbAACgGIX6hMpqsRbZLykjSQOXDNTm+M0lHwoA4DZe7g4AAADgSaICohTbO7bQ63CfyTyjaRumaW/KXqVkpujhpQ/rjZvfUPsa7Us5KQCgNFB0AwAAFLOogChFBUQVev9nPT7Tkyue1PqT65Vhy9Djvz6uiTdO1B1X3VGKKQEApYHp5QAAAKUswBqgd7q8o64xXSVJNsOmF9a+oA+3fSjDMNycDgBQnCi6AQAA3MBqseq19q/p3gb3OtpmbJyhaX9Mk92wuzEZAKA4UXQDAAC4idlk1phWYzSixQhH22fbP9OY1WOUbct2YzIAQHGh6AYAAHAjk8mkoU2H6sU2L8psuvCr2Y8HftTwX4crPTvdzekAAFeKohsAAKAM6Fe/n2bcPEM+Fh9J0m/Hf9NDPz+k0xmn3ZwMAHAlKLoBAADKiE41O+m9W95ToDVQkrQtaZsG/jRQx9KOuTkZAOByUXQDAACUIS0jWuqTbp+oql9VSdKh1EN64McHtOv0LjcnAwBcDopuAACAMqZeaD191uMzxQTFSJISzido8JLB+uPkH+4NBgBwmUtF944dOzRhwgR16tRJdevWVVRUlJo2bapBgwZp/vz5yszMLKmcAAAAFUq1gGr6tPunalqlqSTpbPZZ/XvZv7X80HI3JwMAuMKpovvPP/9Uly5d1KJFC61Zs0atW7fWk08+qZdeekn333+/DMPQ2LFjVa1aNU2dOpXiGwAAoBiE+oZq7q1z1bZ6W0lSlj1Lo1aO0le7v3JzMgCAs7yc6dSvXz89/fTT+vrrrxUSElJov3Xr1mnWrFl644039PzzzxdXRgAAgArL39tfb3Z6UxPWTtAP+3+Q3bBr0rpJ2pG0Q/3q9ZPJZMq3TahPqKICotyQFgBwMaeK7t27d8vb27vIfm3atFGbNm2UnZ19xcEAAABwgbfZWy+3fVlhfmH6+O+PJUlf7f6q0CPeVotVsb1jKbwBoAxwanq5MwW3JKWnp7vUHwAAAM4xm8x66rqn9EDDB4rsm2XLUnJmcimkAgAUxeXVyzt37qxjx/JfK3LDhg1q3rx5cWQCAABAIW6ve7u7IwAAXOBy0e3r66umTZtq4cKFkiS73a4XX3xRbdu2VY8ePYo9IAAAAAAA5ZVT53T/0+LFizV79mz961//0qJFi3Tw4EEdOnRIsbGxuvXWW0siIwAAAAAA5ZLLRbckDRs2TEePHtXUqVPl5eWluLg43XjjjcWdDQAAAACAcs3l6eXJycnq16+f3n33Xb333nu6++67deutt+qdd94piXwAAAC4DDn2HHdHAADoMoruJk2a6NSpU9q0aZMefvhhzZs3T//5z380btw43XbbbSWREQAAAC56e9PbFN4AUAa4XHQ/8sgjWrVqlWrXru1o69+/v/766y9lZWUVazgAAADkFeoTKqvFWmS/dSfWafza8bLZbaWQCgBQGJfP6R43bpzj+4yMDPn6+kqSatSooWXLlhVfMgAAAOQTFRCl2N6xhV6H+89Tf+r1P16XzbDph/0/yGqxanyb8TKbXD7WAgAoBi4X3Xa7Xa+88ormzJmjU6dOaffu3apTp47GjRunmJgYPfTQQyWREwAAAP9fVECUogKiCryvUVgjRQVE6am4p2QzbPpmzzfyNnvr+dbPy2QylXJSAIDLf/J8+eWX9fHHH+u1116T1fp/U5uaNGmiDz74oFjDAQAAwHWda3bWlPZTHEe3F+xaoNf/eF2GYbg5GQBUPC4X3Z9++qnef/993XfffbJYLI72Zs2aaefOncUaDgAAAJenW0w3vXzTyzLpwtHtT7d/qrc2vUXhDQClzOWi+9ixY7rqqqvytdvtdmVnZxdLKAAAAFy5nnV7akKbCY7bc7fO1Xtb3nNjIgCoeFwuuhs1aqTVq1fna//666/VokWLYgkFAACA4tGvfj893/p5x+3Zm2frw20fujERAFQsLi+kNn78eA0aNEjHjh2T3W7Xt99+q127dunTTz9VbGxsSWQEAADAFbi3wb3KtmVr2h/TJEkzNs6Qj8VH9zW8z83JAMDzuXyk+4477tAPP/ygX375RZUqVdL48eO1Y8cO/fDDD7rllltKIiMAAACu0MDGA/XEtU84bk/ZMEVf7vrSjYkAoGJw+Ui3JLVr145rcgMAAJQzQ64Zokxbpub8NUeS9NLvL8nb7K0+9fq4ORkAeC6Xj3QDAACg/Hqs2WMa3GSw4/aE3yZo8f7FbkwEAJ7NqSPdoaGhMplMTu3w9OnTVxQIAAAAJcdkMmnktSOVbcvWvB3zZMjQ2DVjZbVYdUstThUEgOLmVNE9c+ZMx/dJSUl6+eWX1bVrV7Vp00aStG7dOv38888aN25ciYQEAABA8TGZTHrm+meUZcvSl7u/lM2w6ZmVz2hGxxm6Ofpmd8cDAI/iVNE9aNAgx/f9+vXTpEmTNHz4cEfb448/rrffflu//PKLRo4cWfwpAQAAUKxMJpPG3jBWmbZMLdq3SDlGjkbFjdJbnd7STdVvcnc8APAYLp/T/fPPP6tbt2752rt166ZffvmlWEIBAACg5JlNZk28caJ61O4hScq2Z+uJFU9ow4kNbk4GAJ7D5dXLw8LCtGjRIj311FN52hctWqSwsLBiCwYAAICSZzFb9ErbV5Rtz9ayQ8uUacvUY8sf0/OtnleDsAYFbhPqE6qogKhSTgoA5ZPLRffEiRM1ZMgQxcXFqXXr1pKk9evXa8mSJZo7d26xBwQAAEDJ8jJ7aWq7qcq2ZSvuaJwybZmasG5Cof2tFqtie8dSeAOAE1yeXv7ggw9q7dq1CgoK0rfffqtvv/1WQUFBWrNmjR588MESiAgAAICS5m3x1hs3v6FmVZoV2TfLlqXkzORSSAUA5Z/LR7olqXXr1vr888+LOwsAAADcyGqxavT1o/XATw+4OwoAeIzLKrrtdrv27t2r+Ph42e32PPe1b9++WIIBAACg9FktVndHAACP4nLR/fvvv2vAgAE6dOiQDMPIc5/JZJLNZiu2cAAAAAAAlGcuF92PPPKIrrvuOi1evFhRUVEymUwlkQsAAAAAgHLP5YXU9uzZo1dffVUNGzZUSEiIgoOD83y5avbs2YqJiZGvr69at26tDRsufV3IM2fOaNiwYYqKipKPj4/q16+vH3/80eXHBQAAAACgpLlcdLdu3Vp79+4tlgdfuHChRo0apQkTJujPP/9Us2bN1LVrV8XHxxfYPysrS7fccosOHjyor7/+Wrt27dLcuXNVvXr1YskDAAAAAEBxcnl6+YgRI/TUU0/p5MmTuuaaa+Tt7Z3n/qZNmzq9r+nTp+vhhx/W4MGDJUlz5szR4sWL9eGHH+q5557L1//DDz/U6dOn9dtvvzkeNyYmxtWnAAAAgEKE+oTKarEqy5Z1yX5/J/6tRmGNSikVAJRfLhfd/fr1kyT961//crSZTCYZhuHSQmpZWVnauHGjxowZ42gzm83q0qWL1q1bV+A233//vdq0aaNhw4Zp0aJFCg8P14ABA/Tss8/KYrG4+lQAAABwkaiAKMX2ji3wOtzf7P5GX+7+UpL02v9eU8OwhmpSpUlpRwSAcsXlovvAgQPF8sCJiYmy2WyKiIjI0x4REaGdO3cWuM3+/fv166+/6r777tOPP/6ovXv36rHHHlN2drYmTJhQ4DaZmZnKzMx03E5NTZV04bJnF1/urCyx2+0yDKNMZ8TlYWw9G+PruRhbz8XY5hfhH6EI/4h87c+3el7pOemK3R+rDFuGhi8frs97fK6oSlFuSOkcxtdzMbaeq7yMrbP5XC66a9Wq5XKY4mK321W1alW9//77slgsatmypY4dO6Zp06YVWnRPnjxZEydOzNeekJCgjIyMko582ex2u1JSUmQYhsxml0+9RxnG2Ho2xtdzMbaei7F1zaN1H9Wh5EPamrxVSRlJenTpo5rReoYqeVVyd7QCMb6ei7H1XOVlbM+ePetUP6eK7u+//17du3eXt7e3vv/++0v27dWrl1MPXKVKFVksFp06dSpP+6lTpxQZGVngNlFRUfL29s4zlbxhw4Y6efKksrKyZLVa820zZswYjRo1ynE7NTVV0dHRCg8PV1BQkFNZ3cFut8tkMik8PLxMv9HgOsbWszG+noux9VyMrevevuVtPfDTAzp89rAOpB3QtB3T9GbHN+Vldvl4ToljfD0XY+u5ysvY+vr6OtXPqZ+MvXv31smTJ1W1alX17t270H6unNNttVrVsmVLLV++3LFPu92u5cuXa/jw4QVuc9NNN2n+/Pmy2+2OF3/37t2KiooqsOCWJB8fH/n4+ORrN5vNZXoApQuvZ3nICdcxtp6N8fVcjK3nYmxdU9mvsmZ3nq37f7pfKZkpWnt8rV774zWNbT1WJpPJ3fHyYXw9F2PrucrD2DqbzaleudO6c78v7MvZgjvXqFGjNHfuXH3yySfasWOHHn30UZ07d86xmvnAgQPzLLT26KOP6vTp03riiSe0e/duLV68WK+++qqGDRvm0uMCAADgysQEx2jGzTMcR7cX7lqoz3d87uZUAFD2uHUOUP/+/ZWQkKDx48fr5MmTat68uZYsWeJYXO3w4cN5/noQHR2tn3/+WSNHjlTTpk1VvXp1PfHEE3r22Wfd9RQAAAAqrOsjr9fEGydq7Jqxki6saF4jsIZujr7ZvcEAoAxx+4k3w4cPL3Q6eVxcXL62Nm3a6Pfffy/hVAAAAHBGr7q9dCj1kN7f8r4MGXpm1TP6pNsnahjW0N3RAKBMKLsT5AEAAFAuDGs+TN1iukmSzuec1/Bfh+vUuVNFbAUAFQNFNwAAAK6I2WTWy21fVrPwZpKk+PR4jfh1hNKz092cDADcj6IbAAAAV8zH4qNZHWepekB1SdKO0zv07OpnZbO7ttAuAHgal4tui8Wi+Pj4fO1JSUl5rp8NAACAiiXML0yzO89WoHegJCnuSJymb5zu3lAA4GYuF92GYRTYnpmZWei1sgEAAFAx1A2pqzdufkNepgvr9X66/VN9uetLN6cCAPdxevXyN998U9KFi5R/8MEHCggIcNxns9m0atUqNWjQoPgTAgAAoFxpU62Nxt4wVhPXTZQkvbr+VVUPqK6bqt/k5mQAUPqcLrpnzJgh6cKR7jlz5uSZSm61WhUTE6M5c+YUf0IAAACUO3fWv1OHUw/ro78/ks2w6amVT+mz7p+pXmg9d0cDgFLldNF94MABSVLHjh317bffKjQ0tMRCAQAAoPx7suWTOnL2iH45/IvOZZ/TsOXDNP+2+ariV8Xd0QCg1Lh8TveKFSsouAEAAFAks8msV9u9qsZhjSVJJ86d0OO/Pq7zOefdnAwASo/TR7r/6ejRo/r+++91+PBhZWVl5blv+nRWqAQAAMAFfl5+eqvTWxrw4wCdPHdSWxO3auyasXq9w+sym7h6LQDP53LRvXz5cvXq1Ut16tTRzp071aRJEx08eFCGYejaa68tiYwAAAAox8L9w/V2p7c1aMkgncs+p2WHlmn82vEa0HBAgf1DfUIVFRBVyikBoGS4XHSPGTNGo0eP1sSJExUYGKhvvvlGVatW1X333adu3bqVREYAAACUc1dXvlrT2k/TsOXDZMjQon2LtGjfogL7Wi1WxfaOpfAG4BFcntOzY8cODRw4UJLk5eWl8+fPKyAgQJMmTdLUqVOLPSAAAAA8Q7sa7TS4yeAi+2XZspScmVwKiQCg5LlcdFeqVMlxHndUVJT27dvnuC8xMbH4kgEAAMDjdI3p6u4IAFCqXJ5efsMNN2jNmjVq2LChevTooaeeekpbt27Vt99+qxtuuKEkMgIAAAAAUC65XHRPnz5daWlpkqSJEycqLS1NCxcuVL169Vi5HAAAAACAf3C56K5Tp47j+0qVKmnOnDnFGggAAAAAAE9xWdfplqSNGzdqx44dkqTGjRurRYsWxRYKAAAAAABP4HLRHR8fr3vuuUdxcXEKCQmRJJ05c0YdO3bUggULFB4eXtwZAQAAAAAol1xevXzEiBE6e/as/v77b50+fVqnT5/Wtm3blJqaqscff7wkMgIAAMBDhPqEymqxXrKPSSb5WHxKKREAlCyXj3QvWbJEv/zyixo2bOhoa9SokWbPnq1bb721WMMBAADAs0QFRCm2d2y+63Bn5GRo7JqxOpp2VIYMzd48W290eEMmk8lNSQGgeLh8pNtut8vb2ztfu7e3t+x2e7GEAgAAgOeKCohSo7BGeb6ujbhW793yngK9AyVJyw4t00d/f+TmpABw5Vwuujt16qQnnnhCx48fd7QdO3ZMI0eOVOfOnYs1HAAAACqOmkE1NaX9FMftWX/O0rrj69yYCACunMtF99tvv63U1FTFxMSobt26qlu3rmrXrq3U1FS99dZbJZERAAAAFUT7Gu31WLPHJEl2w65nVj2jY2nH3JwKAC6fy+d0R0dH688//9Qvv/yinTt3SpIaNmyoLl26FHs4AAAAVDz/bvZv/Z30t1YeXakzmWc0csVIfdr9U/l6+bo7GgC47LKu020ymXTLLbfolltuKe48AAAAqODMJrNebfeq7o29V4fPHtaO0zv00u8v6eWbXmZhNQDljlNF95tvvun0DrlsGAAAAK5UkDVIMzvO1H0/3qfzOef1/b7vdU2Va3RPg3vcHQ0AXOJU0T1jxgyndmYymSi6AQAAUCzqhdbTpBsn6elVT0uSpm6YqgaVG6h51ebuDQYALnCq6N68ebOCg4NLOgsAAACQR7fa3bQtcZs+2f6JcowcjYobpYW3L1S4f7i7owGAU5xavbxy5cpKSEiQdOGSYWfOnCnJTAAAAIDDky2fVKvIVpKkhPMJemrlU8q2Zbs5FQA4x6miOyAgQImJiZKkuLg4ZWfzQw4AAAClw8vspWkdpimyUqQkaVP8Jk37Y5qbUwGAc5yaXt6lSxd17NhRDRs2lCT16dNHVqu1wL6//vpr8aUDAAAAJFX2rawZN8/QwJ8GKtuerS92fqEmVZqoV91e7o4GAJfkVNE9b948ffLJJ9q3b59Wrlypxo0by9/fv6SzAQAAAA5NqjTRCze8oAm/TZAkTVo3SfVC6qlhWEM3JwOAwjlVdPv5+emRRx6RJP3xxx+aOnWqQkJCSjIXAAAAkE/fen21NXGrvt79tTJtmRoZN1ILblugEN8Qd0cDgAI5dU73P61YsYKCGwAAAG4zptUYNa3SVJJ0LO2Ynln1jGx2m5tTAUDBnCq6p0yZovT0dKd2uH79ei1evPiKQgEAAACFsVqseuPmN1TZt7Ikad2JdXp789tuTgUABXOq6N6+fbtq1aqlxx57TD/99JPj8mGSlJOToy1btuidd97RjTfeqP79+yswMLDEAgMAAACRlSL1eofXZTFZJEkfbP1Avxz6xc2pACA/p4ruTz/9VL/88ouys7M1YMAARUZGymq1KjAwUD4+PmrRooU+/PBDDRw4UDt37lT79u1LOjcAAAAquOsjr9dT1z3luD12zVjtP7PfjYkAID+nFlKTpGbNmmnu3Ll67733tGXLFh06dEjnz59XlSpV1Lx5c1WpUqUkcwIAAAD53N/wfm1L3KYfD/yo9Jx0PbHiCX1x2xcKsAa4OxoASHKh6M5lNpvVvHlzNW/evATiAAAAAM4zmUya0GaC9p7Zq93Ju3Uw9aDGrhmrGR1nyGxyec1gACh2LhfdAAAAQFni7+2vmTfPVP/F/XU266x+PfKrJq+frD71+siwGzqdelpJliSZzCZJUqhPqKICotycGkBFQdENAACAci86KFpT203VY8sfkyQt2LVAC3YtKLCv1WJVbO9YCm8ApYI5NwAAAPAI7Wq009317y6yX5YtS8mZyaWQCAAougEAAOBB+tbr6+4IAJDHFRXdR44c0ZEjR4orCwAAAHBFTCaTuyMAQB4uF905OTkaN26cgoODFRMTo5iYGAUHB+uFF15QdnZ2SWQEAAAAAKBccnkhtREjRujbb7/Va6+9pjZt2kiS1q1bpxdffFFJSUl69913iz0kAAAAAADlkctF9/z587VgwQJ1797d0da0aVNFR0fr3nvvpegGAAAAAOD/c3l6uY+Pj2JiYvK1165dW1artTgyAQAAAADgEVwuuocPH66XXnpJmZmZjrbMzEy98sorGj58eLGGAwAAAFwR6hMqq+XSB4IsJotCfUJLKRGAis7l6eWbNm3S8uXLVaNGDTVr1kyS9NdffykrK0udO3dW377/d5mGb7/9tviSAgAAAEWICohSbO9Yx3W4Dbuh08mnlaAETVo3STbZZDNs2pa0TVEBUW5OC6AicLnoDgkJUb9+/fK0RUdHF1sgAAAA4EpEBUQ5Cmq73a54W7xuqnqTztvOa8qGKZKkF9a8oLohdVUnuI47owKoAFwuuj/66KOSyAEAAACUqAENBmhLwhb9eOBHpeeka+SKkfriti/k7+3v7mgAPJjL53RLF67V/csvv+i9997T2bNnJUnHjx9XWlpasYYDAAAAiovJZNKENhN0VchVkqT9Kfs1/rfxMgzDzckAeDKXi+5Dhw7pmmuu0R133KFhw4YpISFBkjR16lSNHj262AMCAAAAxcXf218zO85UgHeAJOnngz/rs+2fuTkVAE/mctH9xBNP6LrrrlNycrL8/Pwc7X369NHy5cuLNRwAAABQ3GoF1dIrbV9x3J6+cbr+OPmHGxMB8GQuF92rV6/WCy+8kO+a3DExMTp27FixBQMAAABKSqeanTTkmiGSJJth0+iVoxWfHu/mVAA8kctFt91ul81my9d+9OhRBQYGFksoAAAAoKQNbz5cN0TdIElKykjS6JWjlW3PdnMqAJ7G5aL71ltv1cyZMx23TSaT0tLSNGHCBPXo0aM4swEAAAAlxmK2aGr7qYqsFClJ2hS/SdP/mO7mVAA8jctF9xtvvKG1a9eqUaNGysjI0IABAxxTy6dOnVoSGQEAAIASUdm3sqZ3mC5vs7ckad6Oefpx/49uTgXAk7hcdNeoUUN//fWXxo4dq5EjR6pFixaaMmWKNm3apKpVq5ZERgAAAKDEXBN+jca0HuO4/eK6F7UneY8bEwHwJC4X3atWrZIk3XfffXrttdf0zjvvaMiQIfL29nbcBwAAAJQnd9a7U3fUvUOSdD7nvEbGjdTZrLNuTgXAE7hcdHfs2FGnT5/O156SkqKOHTsWSygAAACgNJlMJr1wwwtqULmBJOlQ6iG9sOYFGYbh5mQAyjuXi27DMGQymfK1JyUlqVKlSsUSCgAAAChtvl6+mn7zdAVZgyRJvx75VR9u+9DNqQCUd04X3X379lXfvn1lMpn04IMPOm737dtXd9xxh7p27aobb7zxskLMnj1bMTEx8vX1VevWrbVhwwantluwYIFMJpN69+59WY8LAAAA/FN0YLSmtJsiky4cZHpz05v6/cTvbk4FoDxzuugODg5WcHCwDMNQYGCg43ZwcLAiIyM1dOhQzZs3z+UACxcu1KhRozRhwgT9+eefatasmbp27ar4+PhLbnfw4EGNHj1a7dq1c/kxAQAAgMK0q9FOjzZ7VJJkN+x6ZuUzOnnupJtTASivvJzt+NFHH0mSYmJiNHr06GKbSj59+nQ9/PDDGjx4sCRpzpw5Wrx4sT788EM999xzBW5js9l03333aeLEiVq9erXOnDlTLFkAAAAASfp3s39ra+JWrT62WsmZyRoVN0ofd/tYVovV3dEAlDMun9P9zDPP5Dmn+9ChQ5o5c6aWLl3q8oNnZWVp48aN6tKly/8FMpvVpUsXrVu3rtDtJk2apKpVq+qhhx5y+TEBAACAophNZk1uN1nVA6pLkrYmbtXUDVPdnApAeeT0ke5cd9xxh/r27atHHnlEZ86cUatWrWS1WpWYmKjp06fr0UcfdXpfiYmJstlsioiIyNMeERGhnTt3FrjNmjVr9J///EebN2926jEyMzOVmZnpuJ2amipJstvtstvtTmctbXa7XYZhlOmMuDyMrWdjfD0XY+u5GFvPdiXjG+gdqDc6vKFBSwYp05apL3d/qWuqXKNedXuVQFK4is+u5yovY+tsPpeL7j///FMzZsyQJH399deKjIzUpk2b9M0332j8+PEuFd2uOnv2rB544AHNnTtXVapUcWqbyZMna+LEifnaExISlJGRUdwRi43dbldKSooMw5DZ7PKEBJRhjK1nY3w9F2PruRhbz3al4xumMI1oOEKvb3tdkvTS7y+pilFFVwVdVdxR4SI+u56rvIzt2bNnnernctGdnp6uwMBASdLSpUvVt29fmc1m3XDDDTp06JBL+6pSpYosFotOnTqVp/3UqVOKjIzM13/fvn06ePCgevbs6WjL/euCl5eXdu3apbp16+bZZsyYMRo1apTjdmpqqqKjoxUeHq6goCCX8pYmu90uk8mk8PDwMv1Gg+sYW8/G+HouxtZzMbaerTjG94GqD+hg5kF9vedrZdmz9PLWl7WgxwIF+ZTd3yUrAj67nqu8jK2vr69T/Vwuuq+66ip999136tOnj37++WeNHDlSkhQfH+9yEWu1WtWyZUstX77ccdkvu92u5cuXa/jw4fn6N2jQQFu3bs3T9sILL+js2bOaNWuWoqOj823j4+MjHx+ffO1ms7lMD6AkmUymcpETrmNsPRvj67kYW8/F2Hq24hjfMa3HaOfpndqWtE3H0o7p8RWP69lWz+ZZ60iSQn1CFRUQdaWR4SQ+u56rPIyts9lcLrrHjx+vAQMGaOTIkerUqZPatGkj6cJR7xYtWri6O40aNUqDBg3Sddddp1atWmnmzJk6d+6cYzXzgQMHqnr16po8ebJ8fX3VpEmTPNuHhIRIUr52AAAAoLhYLVY91+o53f/T/ZKkTQmbdM/iewrsF9s7lsIbgIPLRfedd96ptm3b6sSJE2rWrJmjvXPnzurTp4/LAfr376+EhASNHz9eJ0+eVPPmzbVkyRLH4mqHDx8u03/dAAAAQMXgbfEusk+WLUvJmckU3QAcXC66JSkyMlJpaWlatmyZ2rdvLz8/P11//fX5ptc4a/jw4QVOJ5ekuLi4S2778ccfX9ZjAgAAAABQ0lw+hJyUlKTOnTurfv366tGjh06cOCFJeuihh/TUU08Ve0AAAAAAAMorl4vukSNHytvbW4cPH5a/v7+jvX///lqyZEmxhgMAAAAAoDxzeXr50qVL9fPPP6tGjRp52uvVq+fyJcMAAAAAAPBkLh/pPnfuXJ4j3LlOnz5d4KW5AAAAAACoqFwuutu1a6dPP/3UcdtkMslut+u1115Tx44dizUcAAAAUN4cTzvu7ggAyhCXp5e/9tpr6ty5s/744w9lZWXpmWee0d9//63Tp09r7dq1JZERAAAAcLtQn1BZLVZl2bIu2W/6xum6IeoGBVgDSikZgLLM5aK7SZMm2r17t95++20FBgYqLS1Nffv21bBhwxQVxfUIAQAA4JmiAqIU2ztWyZnJ+e47n3NeL6x5QUfTjurI2SN6bvVzerPTmzKbXJ5YCsDDXNZ1uoODgzV27NjizgIAAACUaVEBUYoKKPhA0/u3vK97Ft+j1KxUrTy6Um9veluPX/t4KScEUNZcVtGdkZGhLVu2KD4+Xna7Pc99vXr1KpZgAAAAQHkSHRSt1zu8rkd+eUR2w665W+eqQeUGujXmVndHA+BGLhfdS5Ys0cCBA5WYmJjvPpPJJJvNVizBAAAAgPKmTbU2eqrlU5r2xzRJ0gtrX1CtoFq6uvLVbk4GwF1cPslkxIgRuuuuu3TixAnZ7fY8XxTcAAAAqOgeaPSAetbpKenCud5PrHhCyRn5zwMHUDG4XHSfOnVKo0aNUkREREnkAQAAAMo1k8mk8W3Gq0lYE0nSsbRjGr1ytLLt2W5OBsAdXC6677zzTsXFxZVAFAAAAMAz+Hr5akbHGQrzDZMkbTi5QW/88YabUwFwB5fP6X777bd11113afXq1brmmmvk7e2d5/7HH2eFRgAAACCyUqRmdpypwT8PVo49R5/v+FxXh16tPvX6uDsagFLkctH9xRdfaOnSpfL19VVcXJxMJpPjPpPJRNENAAAA/H/NqzbX2NZjNXHdREnSS7+/pLohddU0vKmbkwEoLS5PLx87dqwmTpyolJQUHTx4UAcOHHB87d+/vyQyAgAAAOXWnfXvVP+r+0uSsu3ZenLFk4pPj3dzKgClxeWiOysrS/3795fZ7PKmAAAAQIX0bKtn1TKipSQp4XyCRq4YqUxbpptTASgNLlfOgwYN0sKFC0siCwAAAOCRvM3emn7zdEVVipIkbUncopd/f1mGYbg5GYCS5vI53TabTa+99pp+/vlnNW3aNN9CatOnTy+2cAAAAICnqOxbWbM6ztLAnwYqw5ah7/Z+pwaVG+i+hve5OxqAEuRy0b1161a1aNFCkrRt27Y89/1zUTUAAAAAeTUMa6hJN03SM6uekSRN+9801Qupp1ZRrdycDEBJcbnoXrFiRUnkAAAAACqE7rW7a+fpnfpw24eyGTY9tfIpfXHbF6oRWMPd0QCUAFZDAwAAAErZ4y0eV9vqbSVJZzLP6IkVTyg9O93NqQCUBKeOdPft21cff/yxgoKC1Ldv30v2/fbbb4slGAAAAOCpLGaLprafqvsW36eDqQe1O3m3xq0dp9c7vM4pm4CHcepId3BwsOPDHxwcfMkvAAAAAEULsgZpVsdZquRdSZK09NBSfbD1AzenAlDcnDrS/dFHH2nSpEkaPXq0Pvroo5LOBAAAAFQIdULqaEq7KXr818dlyNBbm95S/dD66hDdwd3RABQTp8/pnjhxotLS0koyCwAAAFDh3Bx9s4a3GC5JMmRo9KrRWnZombYnbc/3dSLthJvTAnCV06uXG4ZRkjkAAACACuvhax7W5vjNWn1stTJyMjQqblSB/awWq2J7xyoqIKqUEwK4XC6tXs6iDgAAAEDxM5lMGnLNkCL7ZdmylJyZXAqJABQXl67TXb9+/SIL79OnT19RIAAAAKAi8vXydXcEACXApaJ74sSJrFAOAAAAAICTXCq677nnHlWtWrWksgAAAAAA4FGcPqeb87kBAAAAAHCN00U3q5cDAAAAAOAap6eX2+32kswBAAAAwAkZORnujgDABS5dMgwAAABAyQj1CZXVYi2y3+zNs5VjzymFRACKg0sLqQEAAAAoGVEBUYrtHVvgdbiPpB7R+N/GKz0nXRtObtCr61/VuBvGse4SUA5QdAMAAABlRFRAlKICovK1NwprpFDfUP37l38rx56jr3Z/pWoB1TTkmiFuSAnAFUwvBwAAAMqBVlGt9PJNLztuz/pzlhbvX+zGRACcQdENAAAAlBO31blNT1z7hOP2C2tf0IYTG9yYCEBRKLoBAACAcuShJg/pzvp3SpJy7Dl6csWT2pu8182pABSGohsAAAAoR0wmk8a2Hqt21dtJks5mn9Vjyx9TfHq8m5MBKAhFNwAAAFDOeJm99HqH19UorJEk6cS5Exq+fLjOZZ9zczIAF6PoBgAAAMohf29/ze48W9UqVZMk7Ti9Q0+tfErZ9mw3JwPwTxTdAAAAQDlVxa+K3u3yroKsQZKktcfW6pXfX5FhGG5OBiAXRTcAAABQjtUJqaNZHWfJ2+wtSfpmzzd6f8v7bk4FIBdFNwAAAFDOXRd5nV5p+4rj9tub39b3+753YyIAuSi6AQAAAA/QvXZ3jWo5ynF7wtoJ+v3E725MBECi6AYAAAA8xoONH1T/q/tLknKMHI1cMVK7k3e7ORVQsVF0AwAAAB7CZDLpuVbP6eYaN0uS0rLT9Ngvj+nUuVPuDQZUYBTdAAAAgAfxMntpavupahLWRJJ0Kv2UHlv+mNKy0tycDKiYKLoBAAAAD+Pv7a+3Or+l6gHVJUm7k3drVNworuENuAFFNwAAAOCBcq/hHewTLElad2KdJv42kWt4A6WMohsAAADwULWDa+utTm/JarZKkhbtW6Q5f81xcyqgYvFydwAAAAAAJadF1Raa3G6yRq8cLUOG3vnrHdkNuzrW7Jivb6hPqKICotyQEvBcFN0AAACAh7s15lY9fPphvb/1fUnSnC1zNGdL/iPeVotVsb1jKbyBYsT0cgAAAKAC6FSzU5F9smxZSs5MLoU0QMVB0Q0AAABUACaTyd0RgAqJohsAAAAAgBJC0Q0AAAAAQAmh6AYAAAAAoIRQdAMAAABwOJRyyN0RAI9C0Q0AAADAYeK6ifo78W93xwA8BkU3AAAAUAGE+oTKarEW2e9czjkNWTpEm+M3l3wooALwcncAAAAAACUvKiBKsb1jC70O9/mc83rjjze0NXGr0rLTNHTZUM3uPFvXR15fykkBz1ImjnTPnj1bMTEx8vX1VevWrbVhw4ZC+86dO1ft2rVTaGioQkND1aVLl0v2BwAAAHBBVECUGoU1KvCrZURL/afrf9Qmqo2kC0X4Y788pt+O/+bm1ED55vaie+HChRo1apQmTJigP//8U82aNVPXrl0VHx9fYP+4uDjde++9WrFihdatW6fo6GjdeuutOnbsWCknBwAAADyLn5ef3ur8ltpVbydJyrBlaMTyEVp1dJWbkwHll9uL7unTp+vhhx/W4MGD1ahRI82ZM0f+/v768MMPC+z/+eef67HHHlPz5s3VoEEDffDBB7Lb7Vq+fHkpJwcAAAA8j4/FR7M6zlLnmp0lSVn2LD2x4gktP8Tv28DlcOs53VlZWdq4caPGjBnjaDObzerSpYvWrVvn1D7S09OVnZ2typUrF3h/ZmamMjMzHbdTU1MlSXa7XXa7/QrSlyy73S7DMMp0RlwextazMb6ei7H1XIytZ2N8L4/FZNHUdlP1wtoXtOTgEuXYc/TUyqf0yk2vqHvt7u6OJ4mx9WTlZWydzefWojsxMVE2m00RERF52iMiIrRz506n9vHss8+qWrVq6tKlS4H3T548WRMnTszXnpCQoIyMDNdDlxK73a6UlBQZhiGz2e0TElCMGFvPxvh6LsbWczG2no3xvTJP1n9Stiyblh1fJpth0/NrnlfSmSTdWv1Wd0djbD1YeRnbs2fPOtWvXK9ePmXKFC1YsEBxcXHy9fUtsM+YMWM0atQox+3U1FRFR0crPDxcQUFBpRXVZXa7XSaTSeHh4WX6jQbXMbaejfH1XIyt52JsPRvje+Veq/qaXl7/sr7Z843ssmvatmnyreSrO+vf6dZcjK3nKi9jW1gNejG3Ft1VqlSRxWLRqVOn8rSfOnVKkZGRl9z29ddf15QpU/TLL7+oadOmhfbz8fGRj49Pvnaz2VymB1CSTCZTucgJ1zG2no3x9VyMredibD0b43tlzDJrQpsJ8vXy1ec7PpckvbT+JWUb2bqv4X1uzcbYeq7yMLbOZnPrM7BarWrZsmWeRdByF0Vr06ZNodu99tpreumll7RkyRJdd911pREVAAAAqLBMJpOevf5ZDW482NE2ZcMUfbTtIzemAsoHt08vHzVqlAYNGqTrrrtOrVq10syZM3Xu3DkNHnzhAz1w4EBVr15dkydPliRNnTpV48eP1/z58xUTE6OTJ09KkgICAhQQEOC25wEAAAB4MpPJpJEtR8rHy0dz/pojSZq+cboybZl6pNkjbk4HlF1uL7r79++vhIQEjR8/XidPnlTz5s21ZMkSx+Jqhw8fznPY/t1331VWVpbuvDPvOSQTJkzQiy++WJrRAQAAgArFZDJpWPNhspqtenPTm5Kk2ZtnK8uWpREtRshkMrk5IVD2uL3olqThw4dr+PDhBd4XFxeX5/bBgwdLPhAAAACAQj3c9GFZLVa9/sfrkqS5W+cq05ap0deNpvAGLlJ2z0oHAAAAUGYNajxIz7d+3nH70+2f6pX1r8hulO1rKwOlrUwc6QYAAABQ/tzb4F75WHz04m8vypChhbsWKj49XkOvGVrgys6hPqGKCohyQ1LAfSi6AQAAAFy2vvX6ytvsrbFrxsqQoRVHVmjFkRUF9rVarIrtHUvhjQqF6eUAAAAArkjPuj31xLVPFNkvy5al5MzkUkgElB0U3QAAAACuWJtqbdwdASiTKLoBAAAAACghFN0AAAAAAJQQim4AAAAApcYwDHdHAEoVRTcAAACAUjN1w1SlZKa4OwZQaii6AQAAAJSaTQmb1D+2v3Yk7XB3FKBUUHQDAAAAuGKhPqGyWqxO9T2WdkwP/PSAFu1dVMKpAPfzcncAAAAAAOVfVECUYnvHXvI63Nm2bE3ZMEXbkrYp05apF9a+oK2JW/XM9c84XbAD5Q1FNwAAAIBiERUQpaiAqEv2+bj7x5qyYYq+3v21JGnhroXanrRd02+ershKkaUREyhVTC8HAAAAUGp8LD6a0GaCJt04SVbzhaPbWxO36u4f7tbvJ353czqg+FF0AwAAACh1fer10Wc9PlP1gOqSpOTMZP172b/1n63/4bJi8CgU3QAAAADcolFYIy28faFuqn6TJMlu2DXzz5kaGTdSaVlpbk4HFA+KbgAAAABuE+wTrNmdZuvRZo862pYfXq57F9+rvcl73ZgMKB4U3QAAAADcymK26LHmj2l259kKtAZKkg6mHtSAHwdoyYElbk4HXBmKbgAAAABlQvsa7bXw9oW6OvRqSdL5nPN6etXTmrphqrLt2W5OB1weLhkGAAAAoMyIDozWZz0+08u/v6zv930vSZq3Y562J23X09c/LbPpwnFDw27odOppJVmSZDKbJEmhPqFFXrIMKG0U3QAAAADKFD8vP71808tqFt5MkzdMVo49R3/G/6l7F997ye2sFqtie8dSeKNMYXo5AAAAgDLHZDLp7qvv1ifdPlGEf4RT22TZspScmVzCyQDXUHQDAAAAKLOahjfVwtsXqnFYY3dHAS4LRTcAAACAMi3ML0xjW491dwzgslB0AwAAACjzLGaLuyMAl4WiGwAAAIDHMAzD3RGAPCi6AQAAAHiMF9a+oB1JO9wdA3Cg6AYAAADgMfae2at7Ft+jKRum6GzWWXfHASi6AQAAAJR9oT6hslqsl+xjkkmSZDfs+nzH5+r1XS/9uP9HppzDrbzcHQAAAAAAihIVEKXY3rGO63AbdkOnk0+rcmhlmcwXiu0A7wAtPbRU7/31njJsGUo8n6hnVz+rb/d8q+dveF51guu48ymggqLoBgAAAFAuRAVEKSogSpJkt9sVb4tX1bCqMpv/bwLvkGuGqHvt7pq6YapWHFkhSVp/cr36fd9PgxsP1sNNH5afl59b8qNiYno5AAAAAI9SPaC63uz0pt7q9JaqB1SXJOXYczR361z1/q634o7EuTUfKhaKbgAAAAAe6ebom/XfO/6rh695WF7mC5N8j587rhG/jtCIX0foWNoxNydERUDRDQAAAMBj+Xn56fFrH9e3vb5V66jWjva4I3Hq/V1vfbD1A2Xbst0XEB6Pc7oBAAAAeLzawbU195a5WnJwiV7732tKPJ+oDFuGZv05S9/v+15jW49VzcCajoXaChLqE+o4pxxwFkU3AAAAgArBZDKpe+3ualu9rd7Z/I7m75wvu2HXgZQDGrJ0iMwms+yGvdDtrRarYnvHUnjDJUwvBwAAAFChBFoD9WyrZ7Xw9oVqFt7M0X6pgluSsmxZlzwSDhSEohsAAABAhdSgcgN92v1TTbxxogK8A9wdBx6KohsAAABAhWU2mdW3Xl/NuHmGu6PAQ1F0AwAAAKjwgnyCnOqXlpVWwkngaSi6AQAAAMBJj/7yqF5a95L2n9nv7igoJyi6AQAAAMBJWfYsfbn7S92x6A498ssjWntsrQzDcHcslGFcMgwAAAAAnORr8VWGLUOStPbYWq09tla1g2vr/ob36/Y6t8vf29/NCVHWcKQbAAAAQIUX6hMqq8V6yT5Wi1Xzb5uvp697WtUDqjvaD6Qc0Eu/v6Rbvr5FMzbO0MlzJ0s6LsoRjnQDAAAAqPCiAqIU2zv2ktfhDvUJVVRAlOqF1tN9De9T3JE4fbbjM208tVGSlJqVqg+3fahP/v5Et9S6Rfc3uj/PdcBPpJ1wav/wLBTdAAAAAKALhbezRa/FbFHnWp3VuVZnbU/ars93fK4fD/yoHHuObIZNSw4u0ZKDS9S0SlPd3+h+Na7SWH0W9VGWLavQfVotVsX2jqXw9jBMLwcAAACAK9AorJFeafuKlt25TI80e0SVfSs77tuSuEXPrHpG9/94/yULbknKsmVd8kg4yieKbgAAAAAoBlX8qmhY82FaeudSTbpxkuqH1nfcl5xBMV1RUXQDAAAAQDHysfioT70++rrn1/qw64fqGN3R3ZHgRhTdAAAAAFACTCaTro+8Xm92elNvdnzTqW02ndqkTFtmCSdDaaLoBgAAAIASFlEpwql+U/43Re0WtNOIX0fo691fKz49voSToaSxejkAAAAAlCHnc84r7kic4o7ESZIaVm6oDtEd1KFGBzUKaySziWOn5QlFNwAAAACUER2jO2pr4lYlnk90tO04vUM7Tu/QnL/mKMw3TO1rtFeHGh10Q7UbVMm7kqMf1wEvmyi6AQAAAKCEhfqEymqxFnmd7jGtxiiiUoR2JO3QyqMrtfLoSm1P2u7ok5SRpP/u/a/+u/e/8jZ76/rI69W+Rns1qNxAQ5cN5TrgZRBFNwAAAACUsKiAKMX2jnX6SHTjKo3VuEpjPdb8McWnx2v10dVaeXSlfj/xu87nnJckZduz9dvx3/Tb8d+cypB7HXCK7tJF0Q0AAAAApSAqIOqyCt6q/lXVr34/9avfT5m2TP3v5P+08siFo+Anzp0ogaQoThTdAAAAAFBO+Fh81LZ6W7Wt3lbPG89r75m9Wnl0pZYcWKJdybuK3P61/72mG6JuUOOwC0fSK/tWdupxOV/88lF0AwAAAEA5ZDKZVC+0nuqF1tON1W5U/9j+RW6z8dRGbTy10XE7qlKUowBvFNZIjcMaK9gnOM82J9JO6Pbvbud88ctE0Q0AAAAAFdSJcyd04twJ/XL4F0dbjYAaF84pD7vwZTaZL1lwS5wvfikU3QAAAABQQczqOEuZtkxtS9ymv5P+1o6kHUrPSc/T52jaUR1NO6qfD/5carn+OX3dsBs6nXpaSZYkmcwmSeV7+jpFNwAAAABUEJGVItUorJG61+4uSbLZbTqUekh/J/194Svxb+08vVMZtgyX973s0DIlZySrWkA1VQuoJh+Lj1Pbefr0dYpuAAAAACjnnL0OeKhPaJ42i9miOiF1VCekjnrW7SlJyrHnaH/Kfv2deKEQ33hqo/ae2Vtkhg+2fqAPtn7guF3Fr4qqBVRT9UrVHYV49YDq+Yry5Mxkj56+TtENAAAAAOWcq9cBvxQvs5fqh9ZX/dD66lOvj7YnbXdqkbaLJZ5PVOL5RG1J2FLg/blFeYBXgMv7Lk/KRNE9e/ZsTZs2TSdPnlSzZs301ltvqVWrVoX2/+qrrzRu3DgdPHhQ9erV09SpU9WjR49STAwAAAAAZcvlXge8uDx8zcMyZOhY2jEdTzuu42nHlXA+odD+uUW5p3N70b1w4UKNGjVKc+bMUevWrTVz5kx17dpVu3btUtWqVfP1/+2333Tvvfdq8uTJuv322zV//nz17t1bf/75p5o0aeKGZwAAAAAA6FKrixqFNcrTlmnL1MlzJ/MU4v/8Pv58vJvSlh63F93Tp0/Xww8/rMGDB0uS5syZo8WLF+vDDz/Uc889l6//rFmz1K1bNz399NOSpJdeeknLli3T22+/rTlz5pRqdgAAAADwdJd7vrgk+Vh8VCuolmoF1Spwu0xbptYcW6MnVzxZXHHLHLcW3VlZWdq4caPGjBnjaDObzerSpYvWrVtX4Dbr1q3TqFGj8rR17dpV3333XUlGBQAAAIAKqTjPF7+Yj8VHUZXK3+JornBr0Z2YmCibzaaIiIg87REREdq5c2eB25w8ebLA/idPniywf2ZmpjIzMx23U1NTJUl2u112u/1K4pcou90uwzDKdEZcHsbWszG+noux9VyMrWdjfD0XY1u6IvwjFOEfcck+lzsWht1wul9ZGm9ns7h9enlJmzx5siZOnJivPSEhQRkZrl97rrTY7XalpKTIMAyZzWZ3x0ExYmw9G+PruRhbz8XYejbG13Mxtp7Ddt4mb7O3su3ZhfbxNnvLlmZTvK3snAN+9uxZp/q5teiuUqWKLBaLTp06laf91KlTioyMLHCbyMhIl/qPGTMmz3T01NRURUdHKzw8XEFBQVf4DEqO3W6XyWRSeHg4P0Q8DGPr2Rhfz8XYei7G1rMxvp6LsfUcVVVVP1T5QWcyzkiS7IZdycnJCg0Nldl0YWxDfEPK3DR0X19fp/q5tei2Wq1q2bKlli9frt69e0u68OFZvny5hg8fXuA2bdq00fLly/Xkk0862pYtW6Y2bdoU2N/Hx0c+Pj752s1mc5n/cJpMpnKRE65jbD0b4+u5GFvPxdh6NsbXczG2nqN6YHVVD6wu6UJNGG+PV9UqVcv02Dqbze3Ty0eNGqVBgwbpuuuuU6tWrTRz5kydO3fOsZr5wIEDVb16dU2ePFmS9MQTT6hDhw564403dNttt2nBggX6448/9P7777vzaQAAAAAAkI/bi+7+/fsrISFB48eP18mTJ9W8eXMtWbLEsVja4cOH8/wF4cYbb9T8+fP1wgsv6Pnnn1e9evX03XffcY1uAAAAAECZ4/aiW5KGDx9e6HTyuLi4fG133XWX7rrrrhJOBQAAAADAlSm7E+QBAAAAACjnKLoBAAAAACghFN0AAAAAAJQQim4AAAAAAEoIRTcAAAAAACWEohsAAAAAgBJC0Q0AAAAAQAmh6AYAAAAAoIR4uTtAaTMMQ5KUmprq5iSXZrfbdfbsWfn6+sps5m8jnoSx9WyMr+dibD0XY+vZGF/Pxdh6rvIytrk1ZW6NWZgKV3SfPXtWkhQdHe3mJAAAAACA8u7s2bMKDg4u9H6TUVRZ7mHsdruOHz+uwMBAmUwmd8cpVGpqqqKjo3XkyBEFBQW5Ow6KEWPr2Rhfz8XYei7G1rMxvp6LsfVc5WVsDcPQ2bNnVa1atUseka9wR7rNZrNq1Kjh7hhOCwoKKtNvNFw+xtazMb6ei7H1XIytZ2N8PRdj67nKw9he6gh3rrI7QR4AAAAAgHKOohsAAAAAgBJC0V1G+fj4aMKECfLx8XF3FBQzxtazMb6ei7H1XIytZ2N8PRdj67k8bWwr3EJqAAAAAACUFo50AwAAAABQQii6AQAAAAAoIRTdAAAAAACUEIruMmr27NmKiYmRr6+vWrdurQ0bNrg7Eq7Qiy++KJPJlOerQYMG7o6Fy7Bq1Sr17NlT1apVk8lk0nfffZfnfsMwNH78eEVFRcnPz09dunTRnj173BMWLitqfB988MF8n+Vu3bq5JyxcMnnyZF1//fUKDAxU1apV1bt3b+3atStPn4yMDA0bNkxhYWEKCAhQv379dOrUKTclhrOcGdubb74532f3kUcecVNiOOvdd99V06ZNHddrbtOmjX766SfH/Xxmy7eixtdTPrcU3WXQwoULNWrUKE2YMEF//vmnmjVrpq5duyo+Pt7d0XCFGjdurBMnTji+1qxZ4+5IuAznzp1Ts2bNNHv27ALvf+211/Tmm29qzpw5Wr9+vSpVqqSuXbsqIyOjlJPichQ1vpLUrVu3PJ/lL774ohQT4nKtXLlSw4YN0++//65ly5YpOztbt956q86dO+foM3LkSP3www/66quvtHLlSh0/flx9+/Z1Y2o4w5mxlaSHH344z2f3tddec1NiOKtGjRqaMmWKNm7cqD/++EOdOnXSHXfcob///lsSn9nyrqjxlTzkc2ugzGnVqpUxbNgwx22bzWZUq1bNmDx5shtT4UpNmDDBaNasmbtjoJhJMv773/86btvtdiMyMtKYNm2ao+3MmTOGj4+P8cUXX7ghIa7ExeNrGIYxaNAg44477nBLHhSv+Ph4Q5KxcuVKwzAufFa9vb2Nr776ytFnx44dhiRj3bp17oqJy3Dx2BqGYXTo0MF44okn3BcKxSY0NNT44IMP+Mx6qNzxNQzP+dxypLuMycrK0saNG9WlSxdHm9lsVpcuXbRu3To3JkNx2LNnj6pVq6Y6derovvvu0+HDh90dCcXswIEDOnnyZJ7PcHBwsFq3bs1n2IPExcWpatWquvrqq/Xoo48qKSnJ3ZFwGVJSUiRJlStXliRt3LhR2dnZeT6/DRo0UM2aNfn8ljMXj22uzz//XFWqVFGTJk00ZswYpaenuyMeLpPNZtOCBQt07tw5tWnThs+sh7l4fHN5wufWy90BkFdiYqJsNpsiIiLytEdERGjnzp1uSoXi0Lp1a3388ce6+uqrdeLECU2cOFHt2rXTtm3bFBgY6O54KCYnT56UpAI/w7n3oXzr1q2b+vbtq9q1a2vfvn16/vnn1b17d61bt04Wi8Xd8eAku92uJ598UjfddJOaNGki6cLn12q1KiQkJE9fPr/lS0FjK0kDBgxQrVq1VK1aNW3ZskXPPvusdu3apW+//daNaeGMrVu3qk2bNsrIyFBAQID++9//qlGjRtq8eTOfWQ9Q2PhKnvO5pegGSkn37t0d3zdt2lStW7dWrVq19OWXX+qhhx5yYzIArrjnnnsc319zzTVq2rSp6tatq7i4OHXu3NmNyeCKYcOGadu2bayt4YEKG9uhQ4c6vr/mmmsUFRWlzp07a9++fapbt25px4QLrr76am3evFkpKSn6+uuvNWjQIK1cudLdsVBMChvfRo0aecznlunlZUyVKlVksVjyrbp46tQpRUZGuikVSkJISIjq16+vvXv3ujsKilHu55TPcMVRp04dValShc9yOTJ8+HDFxsZqxYoVqlGjhqM9MjJSWVlZOnPmTJ7+fH7Lj8LGtiCtW7eWJD675YDVatVVV12lli1bavLkyWrWrJlmzZrFZ9ZDFDa+BSmvn1uK7jLGarWqZcuWWr58uaPNbrdr+fLlec5tQPmXlpamffv2KSoqyt1RUIxq166tyMjIPJ/h1NRUrV+/ns+whzp69KiSkpL4LJcDhmFo+PDh+u9//6tff/1VtWvXznN/y5Yt5e3tnefzu2vXLh0+fJjPbxlX1NgWZPPmzZLEZ7ccstvtyszM5DProXLHtyDl9XPL9PIyaNSoURo0aJCuu+46tWrVSjNnztS5c+c0ePBgd0fDFRg9erR69uypWrVq6fjx45owYYIsFovuvfded0eDi9LS0vL8hfXAgQPavHmzKleurJo1a+rJJ5/Uyy+/rHr16ql27doaN26cqlWrpt69e7svNJx2qfGtXLmyJk6cqH79+ikyMlL79u3TM888o6uuukpdu3Z1Y2o4Y9iwYZo/f74WLVqkwMBAxzmfwcHB8vPzU3BwsB566CGNGjVKlStXVlBQkEaMGKE2bdrohhtucHN6XEpRY7tv3z7Nnz9fPXr0UFhYmLZs2aKRI0eqffv2atq0qZvT41LGjBmj7t27q2bNmjp79uz/a+/eYqK69jiO/ybCwICiRUfEGi5RVBLvVg2mRdOplz6YQRuFhIcBxYdq44tBYwx4izo1po0a2xhpqBqJPqjVEK9tRFGDF0TSVNQwFdsmIIlarVbwwjoPxh3nCBYtm/F4vp+EhLXX+q/936zshz9r9h6VlJSorKxMR44c4Z59B7xqfd+p+zbUr09H6zZt2mQSEhKM0+k0Y8eONRUVFaFOCf9SZmamiY+PN06n07z//vsmMzPT1NbWhjotvIHjx48bSS/9+Hw+Y8yzrw0rKCgwcXFxJiIiwng8HnP16tXQJo12e9X6/v3332by5MnG7Xab8PBwk5iYaObOnWsaGhpCnTbaobV1lWSKi4utMQ8fPjTz5s0z7733nomKijLTp0839fX1oUsa7fJPa/vbb7+Z9PR0ExsbayIiIsyAAQNMfn6+uXv3bmgTxz+aPXu2SUxMNE6n07jdbuPxeMzRo0etfu7Z/22vWt936b51GGNMZxb5AAAAAAD8v+CZbgAAAAAAbELRDQAAAACATSi6AQAAAACwCUU3AAAAAAA2oegGAAAAAMAmFN0AAAAAANiEohsAAAAAAJtQdAMAAAAAYBOKbgAA3hINDQ2aNGmSoqOj1aNHj1CnAwAAOgBFNwAANsjJyVFGRsZrxXz99deqr6/XpUuXdO3aNXsSe0vV1dXJ4XDo0qVLoU4FAIAOFRbqBAAAwDOBQECjR49WSkrKG8/x6NEjOZ3ODswKAAD8G+x0AwDQCSZOnKgFCxZo0aJFio2NVZ8+fbR8+XKrPykpSXv27NH27dvlcDiUk5MjSfrzzz+Vl5cnt9utmJgYffzxx6qurrbili9frhEjRqioqEjJycmKjIx8rbgdO3YoKSlJ3bt3V1ZWlv766y9rTEtLi9atW6cBAwYoIiJCCQkJWr16tdX/+++/a9asWerRo4diY2Pl9XpVV1fX5t/gzp07ys7OltvtlsvlUkpKioqLiyVJycnJkqSRI0fK4XBo4sSJVlxRUZFSU1MVGRmpwYMH65tvvrH6nu+Q79q1S+PHj1dkZKSGDBmiEydOtH9xAACwEUU3AACdZNu2bYqOjtbZs2e1bt06rVy5UseOHZMknT9/XlOnTtWsWbNUX1+vDRs2SJJmzpypxsZGHTp0SJWVlRo1apQ8Ho9u375tzVtbW6s9e/Zo79691sez2xMXCAT0ww8/qLS0VKWlpTpx4oT8fr/Vv2TJEvn9fhUUFOjy5csqKSlRXFycJOnx48eaMmWKunXrpvLycp0+fVpdu3bV1KlT9ejRo1av//k8hw4dUk1Njb799lv16tVLknTu3DlJ0o8//qj6+nrt3btXkrRz504VFhZq9erVqqmp0Zo1a1RQUKBt27YFzZ2fn6+FCxeqqqpKaWlpmjZtmm7duvXGawUAQIcxAACgw/l8PuP1eq32hAkTzIcffhg0ZsyYMWbx4sVW2+v1Gp/PZ7XLy8tNTEyMaWpqCorr37+/2bJlizHGmGXLlpnw8HDT2Nj42nFRUVHm3r17Vn9+fr4ZN26cMcaYe/fumYiICLN169ZWr2/Hjh1m0KBBpqWlxTrW3NxsXC6XOXLkSKsx06ZNM7m5ua32Xb9+3UgyVVVVL+VcUlISdGzVqlUmLS0tKM7v91v9jx8/Nv369TNffvllq+cCAKAz8Uw3AACdZNiwYUHt+Ph4NTY2tjm+urpa9+/fV8+ePYOOP3z4UIFAwGonJibK7Xa/dlxSUpK6devWaj41NTVqbm6Wx+NpM7fa2tqgeElqamoKOseLPv/8c3322We6ePGiJk+erIyMDI0fP77N63/w4IECgYDmzJmjuXPnWsefPHmi7t27B41NS0uzfg8LC9MHH3ygmpqaNucGAKCzUHQDANBJwsPDg9oOh0MtLS1tjr9//77i4+NVVlb2Ut+LXykWHR39RnGvysflcrWZ1/NzjB49Wjt37nyp78V/ALzo008/1Y0bN3Tw4EEdO3ZMHo9H8+fP1/r169s8hyRt3bpV48aNC+rr0qXLK/MDAOBtQdENAMBbatSoUWpoaFBYWJiSkpJsj3tRSkqKXC6XfvrpJ+Xl5bV6jt27d6t3796KiYlp97xut1s+n08+n08fffSR8vPztX79euuN60+fPrXGxsXFqW/fvvr111+VnZ39ynkrKiqUnp4u6dlOeGVlpb744ot25wUAgF14kRoAAG+pTz75RGlpacrIyNDRo0dVV1enM2fOaOnSpbpw4UKHx70oMjJSixcv1qJFi7R9+3YFAgFVVFTou+++kyRlZ2erV69e8nq9Ki8v1/Xr11VWVqYFCxbojz/+aHXOwsJC7d+/X7W1tfrll19UWlqq1NRUSVLv3r3lcrl0+PBh3bx5U3fv3pUkrVixQmvXrtXGjRt17do1/fzzzyouLtZXX30VNPfmzZu1b98+XblyRfPnz9edO3c0e/bsdl0rAAB2ougGAOAt5XA4dPDgQaWnpys3N1cDBw5UVlaWbty4Yb1FvCPj/ltBQYEWLlyowsJCpaamKjMz03rmOyoqSidPnlRCQoJmzJih1NRUzZkzR01NTW3ufDudTi1ZskTDhg1Tenq6unTpol27dkl69hz2xo0btWXLFvXt21der1eSlJeXp6KiIhUXF2vo0KGaMGGCvv/+e+srxp7z+/3y+/0aPny4Tp06pQMHDlhvRgcAIJQcxhgT6iQAAADeRF1dnZKTk1VVVaURI0aEOh0AAF7CTjcAAAAAADah6AYAAAAAwCZ8vBwAAAAAAJuw0w0AAAAAgE0ougEAAAAAsAlFNwAAAAAANqHoBgAAAADAJhTdAAAAAADYhKIbAAAAAACbUHQDAAAAAGATim4AAAAAAGxC0Q0AAAAAgE3+A7vIDPrTyINHAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig, ax = plt.subplots(figsize=(10, 5))\n", + "timestep_axis = np.arange(len(timestep_values))\n", + "ax.plot(timestep_axis, timestep_values * 0.001, marker=\"s\", linewidth=2, color=\"tab:green\", label=\"timestep values\")\n", + "ax.set_xlabel(\"Inference step\")\n", + "ax.set_ylabel(\"Timestep (float index)\")\n", + "ax.set_title(\"FlowUniPCMultistepScheduler timesteps\")\n", + "ax.grid(alpha=0.3)\n", + "ax.legend()\n", + "fig.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index a64a20c3c55c..2d5a3d1204f0 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -464,6 +464,7 @@ "CogView4ControlPipeline", "CogView4Pipeline", "ConsisIDPipeline", + "Cosmos_2_5_PredictBase", "Cosmos2TextToImagePipeline", "Cosmos2VideoToWorldPipeline", "CosmosTextToWorldPipeline", @@ -1177,6 +1178,7 @@ CogView4ControlPipeline, CogView4Pipeline, ConsisIDPipeline, + Cosmos_2_5_PredictBase, Cosmos2TextToImagePipeline, Cosmos2VideoToWorldPipeline, CosmosTextToWorldPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 04ec6b5cd8d3..15b68a303799 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -165,6 +165,7 @@ _import_structure["cogview4"] = ["CogView4Pipeline", "CogView4ControlPipeline"] _import_structure["consisid"] = ["ConsisIDPipeline"] _import_structure["cosmos"] = [ + "Cosmos_2_5_PredictBase", "Cosmos2TextToImagePipeline", "CosmosTextToWorldPipeline", "CosmosVideoToWorldPipeline", @@ -622,6 +623,7 @@ StableDiffusionXLControlNetXSPipeline, ) from .cosmos import ( + Cosmos_2_5_PredictBase, Cosmos2TextToImagePipeline, Cosmos2VideoToWorldPipeline, CosmosTextToWorldPipeline, diff --git a/src/diffusers/pipelines/cosmos/__init__.py b/src/diffusers/pipelines/cosmos/__init__.py index 2833c89abd5e..edd8163b86a0 100644 --- a/src/diffusers/pipelines/cosmos/__init__.py +++ b/src/diffusers/pipelines/cosmos/__init__.py @@ -22,6 +22,7 @@ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: + _import_structure["pipeline_cosmos2_5_predict"] = ["Cosmos_2_5_PredictBase", "retrieve_latents"] _import_structure["pipeline_cosmos2_text2image"] = ["Cosmos2TextToImagePipeline"] _import_structure["pipeline_cosmos2_video2world"] = ["Cosmos2VideoToWorldPipeline"] _import_structure["pipeline_cosmos_text2world"] = ["CosmosTextToWorldPipeline"] @@ -35,6 +36,7 @@ except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: + from .pipeline_cosmos2_5_predict import Cosmos_2_5_PredictBase, retrieve_latents from .pipeline_cosmos2_text2image import Cosmos2TextToImagePipeline from .pipeline_cosmos2_video2world import Cosmos2VideoToWorldPipeline from .pipeline_cosmos_text2world import CosmosTextToWorldPipeline diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py similarity index 99% rename from src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py rename to src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index 8fc7b937e94f..f2a2d9bec2b2 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos25_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -71,11 +71,11 @@ def retrieve_latents( Examples: ```python >>> import torch - >>> from diffusers import Cosmos25PredictBase + >>> from diffusers import Cosmos_2_5_PredictBase >>> from diffusers.utils import export_to_video, load_image, load_video >>> model_id = "nvidia/Cosmos-Predict2.5-Base-2B" - >>> pipe = Cosmos25PredictBase.from_pretrained(model_id, torch_dtype=torch.bfloat16) + >>> pipe = Cosmos_2_5_PredictBase.from_pretrained(model_id, torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> prompt = "A close-up shot captures a vibrant yellow scrubber vigorously working on a grimy plate, its bristles moving in circular motions to lift stubborn grease and food residue. The dish, once covered in remnants of a hearty meal, gradually reveals its original glossy surface. Suds form and bubble around the scrubber, creating a satisfying visual of cleanliness in progress. The sound of scrubbing fills the air, accompanied by the gentle clinking of the dish against the sink. As the scrubber continues its task, the dish transforms, gleaming under the bright kitchen lights, symbolizing the triumph of cleanliness over mess." @@ -124,7 +124,7 @@ def retrieve_latents( """ -class Cosmos25PredictBase(DiffusionPipeline): +class Cosmos_2_5_PredictBase(DiffusionPipeline): r""" Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) base model. diff --git a/tests/pipelines/cosmos/test_cosmos25_predict.py b/tests/pipelines/cosmos/test_cosmos2_5_predict.py similarity index 97% rename from tests/pipelines/cosmos/test_cosmos25_predict.py rename to tests/pipelines/cosmos/test_cosmos2_5_predict.py index c005c72ef434..e68fde325a6d 100644 --- a/tests/pipelines/cosmos/test_cosmos25_predict.py +++ b/tests/pipelines/cosmos/test_cosmos2_5_predict.py @@ -23,7 +23,7 @@ import numpy as np import torch -from diffusers import AutoencoderKLWan, Cosmos25PredictBase, CosmosTransformer3DModel, FlowUniPCMultistepScheduler +from diffusers import AutoencoderKLWan, Cosmos_2_5_PredictBase, CosmosTransformer3DModel, FlowUniPCMultistepScheduler from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -96,15 +96,15 @@ def forward(self, input_ids: torch.LongTensor, output_hidden_states: bool = Fals return SimpleNamespace(hidden_states=hidden_states) -class Cosmos25PredictBaseWrapper(Cosmos25PredictBase): +class Cosmos_2_5_PredictBaseWrapper(Cosmos_2_5_PredictBase): @staticmethod def from_pretrained(*args, **kwargs): kwargs["safety_checker"] = DummyCosmosSafetyChecker() - return Cosmos25PredictBase.from_pretrained(*args, **kwargs) + return Cosmos_2_5_PredictBase.from_pretrained(*args, **kwargs) -class Cosmos25PredictPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = Cosmos25PredictBaseWrapper +class Cosmos_2_5_PredictPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = Cosmos_2_5_PredictBaseWrapper params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS From 04f23e8849c8215c0ac1761a8310c9c429e53aef Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Tue, 16 Dec 2025 23:56:11 +0000 Subject: [PATCH 08/22] Tests for pipeline present, but not working (predict2 also not working) --- scripts/plot_flow_unipc_sigmas.ipynb | 248 ------------------ src/diffusers/__init__.py | 2 +- src/diffusers/pipelines/__init__.py | 2 +- .../cosmos/test_cosmos2_5_predict.py | 72 +---- 4 files changed, 8 insertions(+), 316 deletions(-) delete mode 100644 scripts/plot_flow_unipc_sigmas.ipynb diff --git a/scripts/plot_flow_unipc_sigmas.ipynb b/scripts/plot_flow_unipc_sigmas.ipynb deleted file mode 100644 index 4065b94430a6..000000000000 --- a/scripts/plot_flow_unipc_sigmas.ipynb +++ /dev/null @@ -1,248 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# FlowUniPCMultistepScheduler sigma visualization\n", - "\n", - "This notebook instantiates the local `FlowUniPCMultistepScheduler` implementation and plots the sigma schedule it produces for a configurable number of inference steps.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "import sys\n", - "from pathlib import Path\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "\n", - "try:\n", - " repo_root = Path(__file__).resolve().parents[1]\n", - "except NameError:\n", - " repo_root = Path.cwd()\n", - "\n", - "src_path = repo_root / \"src\"\n", - "if str(src_path) not in sys.path:\n", - " sys.path.insert(0, str(src_path))\n", - "\n", - "from diffusers.schedulers import FlowUniPCMultistepScheduler\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "NUM_TRAIN_TIMESTEPS = 1_000\n", - "NUM_INFERENCE_STEPS = 35\n", - "SHIFT = 1.0\n", - "USE_KARRAS_SIGMAS = True\n", - "\n", - "\n", - "def generate_sigmas(num_inference_steps=NUM_INFERENCE_STEPS, shift=SHIFT, use_karras_sigmas=USE_KARRAS_SIGMAS):\n", - " \"\"\"Instantiate the scheduler and return the sigma and timestep buffers.\"\"\"\n", - " scheduler = FlowUniPCMultistepScheduler(\n", - " num_train_timesteps=NUM_TRAIN_TIMESTEPS,\n", - " shift=shift,\n", - " use_karras_sigmas=use_karras_sigmas,\n", - " )\n", - " scheduler.set_timesteps(num_inference_steps=num_inference_steps)\n", - "\n", - " sigma_values = scheduler.sigmas.detach().cpu().numpy()\n", - " timestep_values = scheduler.timesteps.detach().cpu().numpy()\n", - " return sigma_values, timestep_values\n" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "sigma_values, timestep_values = generate_sigmas()\n" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "False" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.allclose(sigma_values[:-1], timestep_values*0.001)" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([995, 994, 993, 992, 990, 988, 986, 984, 981, 977, 973, 967, 960,\n", - " 951, 941, 927, 910, 889, 863, 831, 791, 742, 684, 618, 543, 462,\n", - " 380, 301, 228, 166, 116, 77, 49, 30, 17, 9])" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "timestep_values" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([0.99502486, 0.9942067 , 0.993232 , 0.9920665 , 0.9906676 ,\n", - " 0.988982 , 0.98694307, 0.9844666 , 0.9814467 , 0.9777488 ,\n", - " 0.9732026 , 0.9675912 , 0.9606394 , 0.9519976 , 0.94122404,\n", - " 0.9277644 , 0.9109316 , 0.8898901 , 0.86365426, 0.8311152 ,\n", - " 0.79111844, 0.7426199 , 0.6849421 , 0.6181248 , 0.5433001 ,\n", - " 0.46294296, 0.38079414, 0.30132923, 0.22886677, 0.1666508 ,\n", - " 0.11629516, 0.07776967, 0.04981578, 0.03052405, 0.01784122,\n", - " 0.00990099, 0. ], dtype=float32)" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "sigma_values" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA90AAAHqCAYAAAAZLi26AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjgsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvwVt1zgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAm+tJREFUeJzs3XdYU2cbBvD7JAHCDBsiIiBORMWFWmcdBbe2Wkedn1brqla7bKs4WrV1a622tnVbbW2rddRRq9Uq7r0VGQ42sg0jOd8fSGpkBQUC4f5dVy4557zn5Dl5E+TJuwRRFEUQERERERERUYmTGDoAIiIiIiIiImPFpJuIiIiIiIiolDDpJiIiIiIiIiolTLqJiIiIiIiISgmTbiIiIiIiIqJSwqSbiIiIiIiIqJQw6SYiIiIiIiIqJUy6iYiIiIiIiEoJk24iIiIiIiKiUsKkm4jISHl6emL48OGGDqNEtG/fHu3btzd0GCVi+PDh8PT01KvszJkzIQhC6QZUjqxbtw6CIODs2bOl/lzFqYfnlbd6MabPBxGRMWLSTURUweQmJvk9Pv74Y4PFNXz4cFhZWRV43MrKqsS+BAgLC9O5b6lUimrVqqFPnz64ePFinvIqlQpLlixB8+bNoVAoIJfLUatWLUyYMAG3b9/WlstNpiQSCe7fv5/nOsnJyTA3N4cgCJgwYUKJ3Et6ejpmzpyJI0eOlMj1Hj16hJkzZ+b7OpSGzMxMLFu2DI0aNYKNjQ1sbW1Rr149jB49Gjdv3iyTGIiIiMozmaEDICKiFzN79mx4eXnp7PP19TVQNKXrwIED+e4fOHAgunbtCrVajRs3bmDVqlX4888/cfLkSfj5+QEA4uLiEBgYiHPnzqF79+4YNGgQrKyscOvWLWzduhXfffcdMjMzda5rZmaGn376CR9++KHO/t9++63E7y09PR2zZs0CgDytlZ999lmxv0h59OgRZs2aBU9PT+1rUJreeOMN/Pnnnxg4cCDefvttZGVl4ebNm9i9ezdeeeUV1KlTp9RjqOwK+nwQEVH5wKSbiKiC6tKlC5o2bWroMMqEqalpvvsbN26MwYMHa7dbtWqFnj17YtWqVfj2228B5LTAX7hwAdu3b8cbb7yhc/6cOXPw6aef5rlu165d8026t2zZgm7duuHXX3992VvSi0wmg0xWfv+rPnPmDHbv3o0vvvgCn3zyic6xr7/+GomJiYYJrIJJT0+HhYXFC59f0OeDiIjKB3YvJyKqRO7du4d+/frB3t4eFhYWaNGiBfbs2aM9LooiHB0dMWXKFO0+jUYDW1tbSKVSnSTqyy+/hEwmQ2pq6gvFkttN/vjx45gyZQqcnJxgaWmJPn36IDY2VqesvmNWO3ToAAAIDQ0FAJw6dQp79uzByJEj8yTcQE6L9sKFC/PsHzRoEC5evKjTPToqKgp///03Bg0aVOC9hIWF6ew/cuQIBEEosOt4WFgYnJycAACzZs3SdpefOXMmgPzHDh88eBCtW7eGra0trKysULt2bW3Ce+TIETRr1gwAMGLECO311q1bpz3/1KlTCAwMhEKhgIWFBdq1a4fjx4/rPEfu8968eRNvvvkmbGxs4ODggEmTJkGlUmnLhYSEAMj5suN5UqkUDg4OOvsePnyIkSNHokqVKjAzM4OXlxfGjh2bp6dBRkZGke8JAPjzzz/Rpk0bWFpawtraGt26dcO1a9fylNuxYwd8fX0hl8vh6+uL33//PU+ZguoqdyjDs69hQTZt2oQmTZrA3Nwc9vb2GDBgQJ5hCu3bt4evry/OnTuHtm3bwsLCIs8XFs+KiorCiBEjULVqVZiZmUGpVKJXr14677X8Ph/h4eHo2bMnLC0t4ezsjPfeew/79+/Pc4+58Vy+fBnt2rWDhYUFatSoge3btwMA/vnnHzRv3hzm5uaoXbs2/vrrrzzPM27cONSuXRvm5uZwcHBAv3798nwWsrKyMGvWLNSsWRNyuRwODg5o3bo1Dh48WOTrSkRU0ZXfr8+JiKhQSUlJiIuL09nn6OhYYPno6Gi88sorSE9Px7vvvgsHBwesX78ePXv2xPbt29GnTx8IgoBWrVrh6NGj2vMuX76MpKQkSCQSHD9+HN26dQMAHDt2DI0aNSp0HLc+Jk6cCDs7OwQFBSEsLAxLly7FhAkTsG3btmJfKzcJzE32/vjjDwDAkCFDinWdtm3bomrVqtiyZQtmz54NANi2bRusrKy0918SnJycsGrVKowdOxZ9+vTB66+/DgBo0KBBvuWvXbuG7t27o0GDBpg9ezbMzMxw9+5dbdJct25dzJ49GzNmzMDo0aPRpk0bAMArr7wCAPj777/RpUsXNGnSBEFBQZBIJFi7di06dOiAY8eOwd/fX+f53nzzTXh6emLevHk4efIkli9fjsePH2PDhg0AAA8PDwDA5s2b0apVq0Jb5R89egR/f38kJiZi9OjRqFOnDh4+fIjt27cjPT1dp7VWn/fExo0bMWzYMAQEBODLL79Eeno6Vq1ahdatW+PChQvaSdIOHDiAN954Az4+Ppg3bx7i4+O1SWxJ+uKLLzB9+nS8+eabGDVqFGJjY7FixQq0bdsWFy5cgK2trbZsfHw8unTpggEDBmDw4MFwcXEp8LpvvPEGrl27hokTJ8LT0xMxMTE4ePAgIiIiCpwILi0tDR06dEBkZCQmTZoEV1dXbNmyBYcPH863/OPHj9G9e3cMGDAA/fr1w6pVqzBgwABs3rwZkydPxjvvvINBgwZhwYIF6Nu3L+7fvw9ra2sAOb0dTpw4gQEDBqBq1aoICwvDqlWr0L59e1y/fl3bgj9z5kzMmzcPo0aNgr+/P5KTk3H27FmcP38enTt3frEXnYioohCJiKhCWbt2rQgg38ezPDw8xGHDhmm3J0+eLAIQjx07pt2XkpIienl5iZ6enqJarRZFURQXLFggSqVSMTk5WRRFUVy+fLno4eEh+vv7ix999JEoiqKoVqtFW1tb8b333tNea9iwYaKlpWWBcVtaWurEk3sfnTp1EjUajXb/e++9J0qlUjExMVG7r127dmK7du2026GhoSIAcdasWWJsbKwYFRUlHjlyRGzUqJEIQPz1119FURTFPn36iADEx48fF/Gq5ggKChIBiLGxseL7778v1qhRQ3usWbNm4ogRI0RRFEUA4vjx4/PcS2hoqM71Dh8+LAIQDx8+rN03bNgw0cPDQ7sdGxsrAhCDgoIKjCfXkiVLtPEV5MyZMyIAce3atTr7NRqNWLNmTTEgIEDn9U5PTxe9vLzEzp0753nenj176lxj3LhxIgDx0qVL2mu2a9dOBCC6uLiIAwcOFFeuXCmGh4fniWvo0KGiRCIRz5w5k+dYbjz6vidSUlJEW1tb8e2339a5TlRUlKhQKHT2+/n5iUqlUuf9dODAARGATj3kV1ei+N977dnX8/l6CQsLE6VSqfjFF1/onHvlyhVRJpPp7M99vVavXp3ndXje48ePRQDiggULCi33/Odj0aJFIgBxx44d2n1PnjwR69Spk+cec+PZsmWLdt/NmzdFAKJEIhFPnjyp3b9///48r0V6enqeeIKDg0UA4oYNG7T7GjZsKHbr1q3IeyYiMkbsXk5EVEGtXLkSBw8e1HkUZu/evfD390fr1q21+6ysrDB69GiEhYXh+vXrAIA2bdpArVbjxIkTAHJatNu0aYM2bdrg2LFjAICrV68iMTFR25L6MkaPHq3ThTr3+cPDw4s8NygoCE5OTnB1dUX79u0REhKCL7/8UttinJycDADaVrniGDRoEO7evYszZ85o/82va3lZym0t3blzJzQaTbHOvXjxIu7cuYNBgwYhPj4ecXFxiIuLQ1paGjp27IijR4/mueb48eN1tidOnAgg570EAIIgYP/+/fj8889hZ2eHn376CePHj4eHhwf69++vHY6g0WiwY8cO9OjRI995CJ7vQl/Ue+LgwYNITEzEwIEDtfcRFxcHqVSK5s2ba1t0IyMjcfHiRQwbNgwKhUJ7vc6dO8PHx6dYr19hfvvtN2g0Grz55ps68bi6uqJmzZp5WpjNzMwwYsSIIq9rbm4OU1NTHDlyBI8fP9Y7nn379sHNzQ09e/bU7pPL5Xj77bfzLW9lZYUBAwZot2vXrg1bW1vUrVsXzZs31+7P/fnevXs6MebKyspCfHw8atSoAVtbW5w/f157zNbWFteuXcOdO3f0vg8iImPB7uVERBWUv79/sSZSCw8P1/kDOlfdunW1x319fdG4cWNYWFjg2LFjCAgIwLFjxzBr1iy4urpixYoVUKlU2uT72QReH/mtbVytWjWdbTs7OwDQK8kYPXo0+vXrB4lEol2qyszMTHvcxsYGAJCSkqLTvVcfjRo1Qp06dbBlyxbY2trC1dVVO2bcUPr374/vv/8eo0aNwscff4yOHTvi9ddfR9++fSGRFP49em6yM2zYsALLJCUlaV9/AKhZs6bOcW9vb0gkEp3xumZmZvj000/x6aefIjIyEv/88w+WLVuGn3/+GSYmJti0aRNiY2ORnJys9+z6Rb0ncu+loPrIrffcJP35+wByEstnk8KXcefOHYiimO/zAICJiYnOtpubm16Tn5mZmeHLL7/E1KlT4eLighYtWqB79+4YOnQoXF1dCzwvPDwc3t7eeT5vNWrUyLd81apV85RVKBRwd3fPsw/Q/Ww+efIE8+bNw9q1a/Hw4UOIoqg9lpSUpP159uzZ6NWrF2rVqgVfX18EBgZiyJAhBQ6lICIyJky6iYhIh4mJCZo3b46jR4/i7t27iIqKQps2beDi4oKsrCycOnUKx44dQ506dbSTgAE5LWkZGRkQRTHPH/CiKEKlUkEul+d5PqlUmm8cz/7xXpCaNWuiU6dOBR7PXa7qypUrL9QqP2jQIKxatQrW1tbo379/gYltfl8mAIBarS72cxbG3NwcR48exeHDh7Fnzx7s27cP27ZtQ4cOHXDgwIECX0sA2lbsBQsWFLiUWFHj8wu6z1xKpRIDBgzAG2+8gXr16uHnn3/WawKy5xX1nsi9l40bN+abfL7IjO8vU4cajQaCIODPP//MN/bnX9dnW4eLMnnyZPTo0QM7duzA/v37MX36dMybNw9///03GjVqpPd1ClPQ663PZ3PixIlYu3YtJk+ejJYtW0KhUEAQBAwYMECn50Tbtm0REhKCnTt34sCBA/j++++xZMkSrF69GqNGjSqR+yAiKq+YdBMRVRIeHh64detWnv25M3TnTooF5HTn/fLLL/HXX3/B0dERderUgSAIqFevHo4dO4Zjx46he/fuea6fnZ2NkJCQPC1qd+/ehVqt1nmOstCjRw/MmzcPmzZteuGke8aMGYiMjMTGjRsLLJfbEvv8Eln6dJEvKpF9nkQiQceOHdGxY0csXrwYc+fOxaefforDhw+jU6dOBV7P29sbQE4rcGFfVDzrzp07OmvB3717FxqNpsAJvHKZmJigQYMGuHPnDuLi4uDs7AwbGxtcvXpVv5ssQu69ODs7F3ovue+3/Lo0P/9ZeJk69Pb2hiiK8PLyQq1atYosX1ze3t6YOnUqpk6dijt37sDPzw+LFi3Cpk2b8i3v4eGB69ev5/kC7O7duyUe2/bt2zFs2DAsWrRIu0+lUuW7XJy9vT1GjBiBESNGIDU1FW3btsXMmTOZdBOR0eOYbiKiSqJr1644ffo0goODtfvS0tLw3XffwdPTU2eMa5s2bZCRkYGlS5eidevW2j/c27Rpg40bN+LRo0d5ktguXboAyFmf+XkrV67UKVNWWrZsicDAQHz//ffYsWNHnuOZmZl4//33Czzf29sbS5cuxbx58/LM7P18OQA6s76r1Wp89913RcaYO7uzPmtaJyQk5NmX22qdkZEBALC0tMz3ek2aNIG3tzcWLlyY7zJv+S3JlVtvuVasWAHgv3q8c+cOIiIi8pyXmJiI4OBg2NnZwcnJCRKJBL1798auXbtw9uzZPOX16dXwrICAANjY2GDu3LnIysoq8F6USiX8/Pywfv16na7OBw8e1M5hkMvDwwNSqVSnDgHgm2++KTKe119/HVKpFLNmzcpzL6IoIj4+Xu97e1Z6errOEm1AznvN2tpaW9/5CQgIwMOHD7Wz9wM5ifCaNWteKI7CSKXSPPe8YsWKPD0Enn8NrKysUKNGjULvg4jIWLClm4iokvj444/x008/oUuXLnj33Xdhb2+P9evXIzQ0FL/++qtO1+mWLVtCJpPh1q1bGD16tHZ/27ZtsWrVKgDIk3T7+flh1KhRWLZsGe7cuaNdBujgwYPYu3cvRo0ahYYNG5bBnerasGEDXnvtNbz++uvo0aMHOnbsCEtLS9y5cwdbt25FZGRkvmt155o0aVKRz1GvXj20aNEC06ZNQ0JCAuzt7bF161ZkZ2cXea65uTl8fHywbds21KpVC/b29vD19c13/PPs2bNx9OhRdOvWDR4eHoiJicE333yDqlWrasfXe3t7w9bWFqtXr4a1tTUsLS3RvHlzeHl54fvvv0eXLl1Qr149jBgxAm5ubnj48CEOHz4MGxsb7Nq1S+f5QkND0bNnTwQGBiI4OBibNm3CoEGDtPV46dIlDBo0CF26dEGbNm1gb2+Phw8fYv369Xj06BGWLl2q7aI8d+5cHDhwAO3atcPo0aNRt25dREZG4pdffsG///5brDH3NjY2WLVqFYYMGYLGjRtjwIABcHJyQkREBPbs2YNWrVppv/yZN28eunXrhtatW+N///sfEhISsGLFCtSrV0/nyweFQoF+/fphxYoVEAQB3t7e2L17N2JiYoqMx9vbG59//jmmTZuGsLAw9O7dG9bW1ggNDcXvv/+O0aNHF/rlTkFu376Njh074s0334SPjw9kMhl+//13REdH60x89rwxY8bg66+/xsCBAzFp0iQolUps3rxZO7yjuL0rCtO9e3ds3LgRCoUCPj4+CA4Oxl9//ZVnjXYfHx+0b98eTZo0gb29Pc6ePYvt27djwoQJJRYLEVG5ZZA504mI6IXlLquU39JLz3p+yTBRFMWQkBCxb9++oq2trSiXy0V/f39x9+7d+Z7frFkzEYB46tQp7b4HDx6IAER3d/d8z1Gr1eKyZcvEhg0binK5XJTL5WLDhg3F5cuXa5ckK+o+8lu6qaAlw4paSilXenq6uHDhQrFZs2ailZWVaGpqKtasWVOcOHGiePfuXW25Z5cMKwyeWzJMFHNe206dOolmZmaii4uL+Mknn4gHDx4scskwURTFEydOiE2aNBFNTU11lg97fmmqQ4cOib169RKrVKkimpqailWqVBEHDhwo3r59W+d6O3fuFH18fESZTJZniacLFy6Ir7/+uujg4CCamZmJHh4e4ptvvikeOnQoz+tw/fp1sW/fvqK1tbVoZ2cnTpgwQXzy5Im2XHR0tDh//nyxXbt2olKpFGUymWhnZyd26NBB3L59e57XLTw8XBw6dKjo5OQkmpmZidWrVxfHjx8vZmRkiKJYvPdE7v6AgABRoVCIcrlc9Pb2FocPHy6ePXtWp9yvv/4q1q1bVzQzMxN9fHzE3377Ld96iI2NFd944w3RwsJCtLOzE8eMGSNevXq1yCXDnn2e1q1bi5aWlqKlpaVYp04dcfz48eKtW7e0Zdq1ayfWq1cvz7n5iYuLE8ePHy/WqVNHtLS0FBUKhdi8eXPx559/1in3/OdDFEXx3r17Yrdu3URzc3PRyclJnDp1qvjrr7+KAHSWASsoHg8Pj3yX+Hr+vf/48WNxxIgRoqOjo2hlZSUGBASIN2/ezPP75/PPPxf9/f1FW1tb0dzcXKxTp474xRdfiJmZmXq9FkREFZkgisXs00VERERGbebMmZg1axZiY2Ph6Oho6HCohCxduhTvvfceHjx4ADc3N0OHQ0RUaXBMNxEREZGRefLkic62SqXCt99+i5o1azLhJiIqYxzTTURERGRkXn/9dVSrVg1+fn5ISkrCpk2bcPPmTWzevNnQoRERVTpMuomIiIiMTEBAAL7//nts3rwZarUaPj4+2Lp1K/r372/o0IiIKh2O6SYiIiIiIiIqJRzTTURERERERFRKmHQTERERERERlZJKN6Zbo9Hg0aNHsLa2hiAIhg6HiIiIiIiIKiBRFJGSkoIqVapAIim4PbvSJd2PHj2Cu7u7ocMgIiIiIiIiI3D//n1UrVq1wOOVLum2trYGkPPC2NjYGDiagmk0GsTGxsLJyanQb02oYmB9GhfWp3FhfRoX1qdxYX0aF9ancWF9AsnJyXB3d9fmmAWpdEl3bpdyGxubcp90q1Qq2NjYVNo3sTFhfRoX1qdxYX0aF9ancWF9GhfWp3Fhff6nqGHLlfvVISIiIiIiIipFTLqJiIiIiIiISgmTbiIiIiIiIqJSUunGdBMRERERVRZqtRpZWVmGDgNAzhjgrKwsqFSqSj8G2BhUhvo0MTGBVCp96esw6SYiIiIiMjKiKCIqKgqJiYmGDkVLFEVoNBqkpKQUOfEUlX+VpT5tbW3h6ur6UvfIpJuIiIiIyMjkJtzOzs6wsLAoF0mRKIrIzs6GTCYrF/HQyzH2+hRFEenp6YiJiQEAKJXKF74Wk24iIiIiIiOiVqu1CbeDg4Ohw9Ey9iStsqkM9Wlubg4AiImJgbOz8wt3NTfOzvdERERERJVU7hhuCwsLA0dCVPHlfo5eZm4EJt1EREREREbIWFsficpSSXyODJp0Hz16FD169ECVKlUgCAJ27NhR5DlHjhxB48aNYWZmhho1amDdunWlHicRERERERHRizBo0p2WloaGDRti5cqVepUPDQ1Ft27d8Oqrr+LixYuYPHkyRo0ahf3795dypEREREREZEjDhw9H7969DR1GiSut+1q3bh1sbW2LdU779u0xefLkEo+lsjPoRGpdunRBly5d9C6/evVqeHl5YdGiRQCAunXr4t9//8WSJUsQEBBQWmGWObVGxKl78bj7IAE1UqVoXt0RUknJdA9Sa0ScDk1ATIoKztZy+HvZV4hrExEREVHZK09/3y1btgyiKBrkuYleRoWavTw4OBidOnXS2RcQEGBU38bsuxqJWbuuIzJJ9XRPKJQKOYJ6+CDQ98Wnqc//2qgQ187FLwyIiIiIyk5Z/H1XHAqFosyfk6gkVKiJ1KKiouDi4qKzz8XFBcnJyXjy5Em+52RkZCA5OVnnAQAajabcPfZefoSxm87r/GIDgKgkFcZuOo+9lx9Vums/+xyt5v+NgWtOYtLWixi45iRazf+73F9bo9EgK1uNE3fjsP9mPE7cjUNWttrg7zU+Xv4hiqLBY+CD9ckH67MyPFifL/66vczjzyuRhf599+eVyGJfE4DOv/k9fvnlF9SvXx/m5uZwcHBAp06dkJqaClEUtd2wc8smJyfjrbfegqWlJZRKJRYvXoz27dtj0qRJ2jKenp6YM2cOhg4dCisrK3h4eGDnzp2IiYlBr169YGVlhQYNGuDMmTPac+Li4jBw4EC4ubnBwsIC9evXx5YtWwq9t7CwMPTo0QN2dnawtLREvXr1sGfPHu3xq1evonv37rCxsYG1tTXatGmDu3fv6rTcL1iwAEqlEg4ODhg3bhwyMzO156tUKkydOhVubm6wtLRE8+bNcfjwYZ0Y1q5di2rVqsHCwgJ9+vRBXFyczmv9/OsniiImTZqE9u3b56kjfZ5Xn/o0pkdBnzV9VKiW7hcxb948zJo1K8/+2NhYqFSqfM4wDLVGxMw/riK/DjO5+6b8fAl/XoqATCJAKhEgEQRIBEAqESAVBEgkgFTIPQZtGQEifjwdVei1p/12GWmpyTB5em3tQxAgk0BnO+fnnH0QgRk7bxZ67Vl/XEVDR+GFW44P332Mabvv5dkflazCuC0XMK97dbxaw67cXTv3+kuO3EdMau4SA2FwtjLBe+3dX+q6udQaERcfpiI+LQsOlibwc7NiC30Z0Gg0SEpKgiiKkEgq1HeXlA/Wp3FhfRoX1ueLycrKgkajQXZ2NrKzs4t9vlojYtauawX+fScAmL37Gl6t5VCsvztEUYRarQaQ/4zQkZGRGDRoEObNm4devXohJSUFx48fR1ZWFrKzs7VJTu49vffeezh+/Dh+++03ODs7Y9asWTh//jwaNGigc99Lly7FnDlz8PHHH2P58uUYOnQoWrZsiWHDhmHu3Ln45JNPMHToUFy6dAmCICA1NRV+fn6YMmUKbGxs8Oeff2Lo0KHw9PREs2bN8r238ePHIzMzE4cOHYKlpSVu3LgBc3NzZGdn4+HDh2jXrh3atm2LAwcOwNraGsHBwVCpVNr7Onz4MFxcXHDgwAGEhITgrbfeQoMGDTBy5Ejt9W/cuIFNmzZBqVRi586d6NKlC86fP4+aNWvi9OnTGDVqFD7//HP07NkTBw4cwOzZswFA+1o8//rl1okoitp9z28X9rw1atQotD6NRW4dxcfHw8TEROdYSkqKXteoUEm3q6sroqOjdfZFR0fDxsZGu3D586ZNm4YpU6Zot5OTk+Hu7g4nJyfY2NiUarzFcfJe/DOJWf5U2RrsuhZfKs+fpFLjgz9CSuXa0alZCPj2EsxNZZBJhJyHVJKT4EsFyCQSmEiFp18mPP356X6pABy+HVvo9ecejEBitgxmMilkEgEmMglMpZKcn6WSp9tPn0eWc31TqQQCgMVHHhR4XQHA8mOP0LdFrRdKZPddjcInu+/l+Q8rNjULn+y+h5WDGiHQ17XY1332+rN330BU8n9fHrnayDGje92Xum4utUbEmbAExKRkwNnaDM082eU+l0ajgSAIcHJy4h+BRoD1aVxYn8aF9fliVCoVUlJSIJPJIJP99+d+z6//RWxKZpHnZ2Sr8Ti94L9LRQCRSRlo+eURmMmkRV7PydoUf0xord1+PnHJFRsbi+zsbPTt2xceHh4AgEaNGmmPSyQSSCQSyGQypKSkYOPGjdi8eTNee+01ADkTh7m5uUEQBJ377tq1K8aOHQsACAoKwrfffotmzZphwIABAICPP/4Yr7zyCuLj4+Hq6goPDw98+OGH2vNr1aqFv/76C7/++itatmyZb+z379/H66+/ro23Vq1a2mPffvstFAoFtm3bpr13Hx8fnfuys7PDypUrIZVK4evri27duuHIkSMYM2YMIiIisH79eoSHh6NKlSoAgA8//BAHDx7Exo0bMXfuXKxcuRKBgYH4+OOPtdc/deoU9u3bp30tnn39cgmCoPN6Pbutz/MWVp/GQiaTQSKRwMHBAXK5XOfY89sFXqM0AistLVu2xN69e3X2HTx4sMA3PwCYmZnBzMwsz/7cN115EZta9C/Aiiw1Q43UDHWpXDslIxtf7rtd4tfN+Q9FhSaf/wVLMxlMpBKYPk3oTWU5DzOZJGf/M/tMZTkJ/6/nHhTaA+DTHVdhaSaDhZkMZjIJzGRSyE1y/jWTSSA3yflXkk+iu+9qJMZvuZDn+tHJKozfcgGrBjd+qbFWFX2MflkQBKHc/R6hF8f6NC6sT+PC+iw+iUSiTZ6ebYGMTcnU+bL+ZeUk5oU3GuUSBAGiKGrjya9l1M/PDx07dkSDBg0QEBCA1157DX379oWdnV2ea4WGhiIrKwvNmzfXXsvW1ha1a9fOc98NGjTQbru6uha4LzY2FkqlEmq1GnPnzsXPP/+Mhw8fIjMzExkZGbCwsCiwRffdd9/F2LFjcfDgQXTq1AlvvPEGGjRoAAC4dOkS2rRpA1NT0wJfn3r16ukkw0qlEleuXIEgCLh69SrUajVq166tc05GRgYcHBwgCAJu3LiBPn366MTXsmVL7Nu3L0/Mz27nVx+5r19Rz1vQ+cYm9/XI7/eQvr+XDJp0p6am4u7du9rt0NBQXLx4Efb29qhWrRqmTZuGhw8fYsOGDQCAd955B19//TU+/PBD/O9//8Pff/+Nn3/+GXv27DHULZQYZ2v9viX58o368HVTQKMBsjUaaEQR6tyfNYBaFKHWaKDW5CQ1ao2Im1HJWPH33SKv/WaTqlDamkOtEZGtyblO9tNrZGtEqNV598ekZOBc+OMir+1iYwaZRIJsjQbZahFZ6pxrZGtEZKs10JTjiSiTVdlIVhW/a1ZRHqdnYdjaM0WWM5EKkMukMHuakJvKBNxPeFJoQv/+L5dwKyoFlmYyyE2kMDeRwtw051+5iRQWprrbuT9LJQL2Xc0Zw/X89XPHcL1sQg+UflJf0RN6IiKi0uBknbchKj9FtXTnsrMw0bOlW7/nlUqlOHjwIE6cOIEDBw5gxYoV+PTTT3Hq1Cl4eXnpdY38PNsSm5sc5rcvd3zuggULsGzZMixduhT169eHpaUlJk+ejMzMghvJRo0ahYCAAOzZswcHDhzAvHnzsGjRIkycOLHAHrkFxZgbU248qampkEqlOHfuHKRS3dfbysqqyGvnkkgkOmPIgZyhCAUpqeclAyfdZ8+exauvvqrdzu0GPmzYMKxbtw6RkZGIiIjQHvfy8sKePXvw3nvvYdmyZahatSq+//57o1guzN/LHkqFHFFJqnyTKQGAq0KOvk3ci508BPq6Yvu5B0Vee94bDYp9bbVGROsv/y7y2v9+1KHQa2s0IrI0Gqg1IrLUOYl49tPEaeJPF4qM471ONeHpaIms3IRerUHm05+zsjXI0vz3c7ZGRKZag4eP0/HP7bgir+1kbQqJICAzW4PMbA2y1Dnnl5Wce8pGSob+56RmqLHkrzvFfi4TiYBsjVjk3ALnIh7D2swElmYyWJpKYWEmg5WZFBamMliaymBpJs1pxTeVwtJUptNaX9pJfXmbaZWIiKi82DWxddGFUHJ/370IQRDQqlUrtGrVCjNmzICHhwd+//13neGiAFC9enWYmJjgzJkzqFatGgAgKSkJt2/fRtu2bV8qhuPHj6NXr14YPHgwgJxk/Pbt2zpdwvPj7u6Od955B++88w6mTZuGNWvWYOLEiWjQoAHWr1+PrKysF+qK3ahRI6jVasTExKBNmzb5lqlbty5OnTqls+/kyZM6205OTrh69arOvosXLxYYU1HP+3wCTwUzaNKdO1NeQdatW5fvORcuFJ2EVTRSiYCgHj4Yu+k8BEDnF1zur7KgHj4v9IutIlxbIhFgJsn7TWnX+krM3XujyF/6EzrULNMvDEQxJ/HOTcRzf85Sa5CRrcH58MeYvvNakTH0aVQFjlZmyMjWICNLA1W2GhlZGmRkq6F6+m9GtgaqrJx/M7I1SFFlQZVVOkl/lh5dDtIz1VhzNLRY1zU3kcLSLKc1PbKA1zt330e/XoFGI0JhYQpruQw2chNYy2WwlpvAVFZ4F56yaKVXa0ScuhePuw8SUCNViubVHdmKTkRERqU0/3YszKlTp3Do0CG89tprcHZ2xqlTpxAbG4u6devmKWttbY1hw4bhgw8+gL29PZydnREUFKTtWv8yatasie3bt+PEiROws7PD4sWLER0dXWjSPXnyZHTp0gW1atXC48ePcfjwYW3cEyZMwIoVKzBgwABMmzYNCoUCJ0+ehL+/f56u2/mpVasW3nrrLQwdOhSLFi1Co0aNEBsbi0OHDqFBgwbo1q0b3n33XbRq1QoLFy5Er169sH//fuzbt0/nOh06dMCCBQuwYcMGtGzZEps2bcLVq1d1xs0X53m7du1ajFe1cqtQY7qNXaCvEqsGN87TSudaAq10FfXa5fULA0EQno69zr9LVR1XG3xzJKTIhH5hP79ixx4cEo+Ba04WWe6DgFrwcLDEk0w1VFlqpGeq8SQr56HS/qzRHn+SpUZUkgoPE/Nffu9l5D6vPpKeZGHclvy/WDOTSWAtN4GNXKZNxK2f/mxpJsMvZwseRy8AmLXrOjr7uL7wHwl5W9FD2YpORERGqTT/viuIjY0Njh49iqVLlyI5ORkeHh5YtGgRunTpkm/5xYsX45133tEuxfXhhx/i/v37ek9uVZDPPvsM9+7dQ0BAACwsLDB69Gj07t0bSUlJBZ6jVqsxfvx4PHjwADY2NggMDMSSJUsAAA4ODvj777/xwQcfoF27dpBKpfDz80OrVq30jmnt2rX4/PPPMXXqVDx8+BCOjo5o0aIFunfvDgBo0aIF1qxZg6CgIMyYMQOdOnXCZ599hjlz5mivERAQgOnTp+PDDz+ESqXC//73PwwdOhRXrlx54ecl/QhiJesXkJycDIVCgaSkpHI1e/mzclrS4nD3QSxqVHUq0Za00hzrWprXLs0uw6V17dxWVyD/hP5FW11Ls8uXvgn99G514eloibRMNdIzspGakY30TDXSMrORlpGN9Izcn3P+zd1+nJaJtMzSmVBPX/aWJnC2lsPOwhS2FiawffqvXe7P5iaws8z5N/eYiVRSYCv6y9bnszgW3TA0Gg1iYmLg7OzMiZqMAOvTuLA+X4xKpUJoaCi8vLxeOgEtyf+bcpeikslkpTLxVlpaGtzc3LBo0SLtUltUekq7PsuLwj5P+uaWbOkuh6QSAS2qO6C6lRrOzg75zl79Mtdu6e1QYtcrq2sH+irR2ce1VBKS0rp2aX1DXJqt//rOLTC8ldcLXV/fpH5AM3fYWZoiRZWFFFX208d/PyerspCakY0X+cowIS0LCWn6zbSay9JUiidZ6iJno6/tYgMXhRksTIv/q5Vj0YmIqDwqzb/vXtaFCxdw8+ZN+Pv7IykpSbsuda9evQwcGZEuJt1UYVTELwxyE/qS7rlQERN6QP+k/os+9Yt8Do1GRGrmfwn5yZB4zNx1vcgYbM1NkJ6pLtZkePq0zsenZuLVRUcAABamUjhamcHRyjTnX2szOFqa5vxrZfbfMWszWJvJsP9aVJmMRWcrOhERGZuFCxfi1q1bMDU1RZMmTXDs2DE4OjoaOiwiHUy6iUpZafVcqGgt9EDJJvUSiQAbuQls5CYAzFHT2RrfHr2nV7d7iZAzzvxxehYS0zORmJ6FxPQsPE7PRNKTLDxOy0Tik/+ORSSkI6YY08enZ6oRkZCOiIT0IsuaSAWoi5gxPuiPa+hU1wUy6Yt1rWQrOhERGaNGjRrh3Llzhg6DqEhMuokqsNJuoS+t7vzloZXewlQGC1MZ3GyLXjtT327xLarbQxSBuNQMxKflJOxFyVIX3Uc+OjkDdafvg5udOVwVcigV5lAq5FAq5HB95md7S9M8Y6rKYkZ3IiIiIioYk24iyldFHKNfWgm9vt3iN49qoXMPmdkaJKRlIi41A7GpGYhLyUBcas527uNeTBoik1X5XFVXlkZEWHw6wuILbj03lUlyEnGbnCTcRSHHT6ciSnVGdyIiIiIqHJNuIjKIitRK/6Ld4k1lErgq5HBVFDxzrL6t6G62ciQ/yUZKRnaBZTKzNQiPT0d4IYn5s0QAkUkqfPvPXQT4KlHVzrzAZfCKwjHjRERERPlj0k1ERqc0EnpDt6If/TBnCbgUVRaiklSITFIhKkmFR0lPtNuRSU8QmaRCiqrgxDw/X+2/ja/234YgAFUU5qhmbwFPRwtUs7eEh4MFqtlbwMPBAtZyk3zP55hxIiIiooIx6SYi0lNpzEZf3FZ0a7kJrOUmqOliXeA1UzOyEZWkwt83ozF37029YxFF4GHiEzxMfILge/F5jjtYmqKagwU87C3g4ZCTkEcmPcGC/bfzlOWYcSIiIqIcTLqJiIqhNGajL+lWdCszGWo4W8HL0RJrj4cV2IoOAApzE7zVvJp2tvXw+HQkPcl/Arj4tEzEp2XiQkRikTHkPh/HjBMREVFlx6SbiKgcMNRY9C/fqJ8nqU9Mz8wZG56Qjoj4NO048fCENEQn6790GpAzZrzz4n/QqJodartaoZaLNWq7WsPVRp5npvWCcLw4EVHlIYoixowZg+3bt+Px48e4cOECJk+eDD8/PyxdurTEnmfmzJnYsWMHLl68WOxzw8LC4OXlhQsXLsDPz6/EYipJt27dQrt27XDnzh1YWxfcO64iad++/Uu/D9atW4fJkycjMTERALB69Wrs2bMHu3btKpkgC8Ckm4ionCgvY9FtLUxha2GKhu62eY49yVTj/uOcJHzP5UfYcfFRkTHci0vDvbg0nX3Wchlqu1ijlqt1zr9Pk3F7S1OdchwvTkRUuezbtw/r1q3DkSNHUL16dTg6OuK3336DiUn+84oYgru7OyIjI+Ho6GjoUAo0bdo0TJw40WgS7tLyv//9D3PmzMGxY8fQpk2bUnseJt1EREauJFvRzU2lqPU0SbYyk+mVdOcnRZWNs+GPcTb8sc5+RyszbYt4ZrYGm09F5DmX48WJiMqOqFYj/ew5ZMfGQubkBIumTSBIX2ylC32EhIRAqVTilVde0e6zt7cvted7EVKpFK6uroYOo0ARERHYvXs3VqxYYehQyj1TU1MMGjQIy5cvL9WkW1JqVyYionIjtxW9l58bWno7lEj37NyZ1wu6koCcVumrMwOwe2JrLOrXEGPaVkf72k6oUsAyanGpGTh+Nx5rj4flm3ADuuPF1ZqCRqsTEdHLSj5wAHc7dkLEsGF49P77iBg2DHc7dkLygQOl8nzDhw/HxIkTERERAUEQ4OnpCSCnW/HkyZO15Tw9PTF37lz873//g7W1NapVq4bvvvtO51offfQRatWqBQsLC1SvXh3Tp09HVlb+c5bk5/Hjx3jrrbfg5OQEc3Nz1KxZE2vXrgWQ071cEASdrul//PEHatasCblcjldffRXr16+HIAjabszr1q2Dra0tdu/ejdq1a8PCwgJ9+/ZFeno61q9fD09PT9jZ2eHdd9+FWq3WXnfjxo1o2rQprK2t4erqikGDBiEmJqbQ2H/++Wc0bNgQbm5u2n3x8fEYOHAg3NzcYGFhgfr16+Onn37SOa99+/aYMGECJkyYAIVCAUdHR0yfPh2i+N//tZ6enpgzZw4GDhwIKysreHp6YuXKlTrXSUxMxKhRo+Dk5AQbGxt06NABly5d0h6fOXMm/Pz8sHHjRnh6ekKhUGDAgAFISUnRlklLS8PQoUNhZWUFpVKJRYsW5bnPjIwMvP/++3Bzc4OlpSWaN2+OI0eO6JRZt24dqlWrBgsLC/Tp0wfx8Xkniu3Rowf++OMPPHnypNDX9WUw6SYioheSO2YcQJ7E+9mZ163kMvi6KfBGk6qY1rUu1o3wx4lpHXF55mv4dWxLzO1TH8Nf8UTL6g55upcXJHeN8RHrTmPTyXBcfZiEbLWmxO6NiKiySz5wAA8nTUZ2VJTO/uzoaDycNLlUEu9ly5Zh9uzZqFq1KiIjI3HmzJkCyy5atAhNmzbFhQsXMG7cOIwdOxa3bt3SHre2tsa6detw/fp1LFu2DGvWrMGSJUv0jmX69Om4fv06/vzzT9y4cQOrVq0qsDt5aGgo+vbti969e+PSpUsYM2YMPv300zzl0tPTsXz5cmzduhX79u3DkSNH0KdPH+zduxd79+7Fxo0b8e2332L79u3ac7KysjBnzhxcunQJO3bsQFhYGIYPH15o7MeOHUPTpk119qlUKjRp0gR79uzB1atXMXr0aAwZMgSnT5/WKbd+/XrIZDKcPn0ay5Ytw+LFi/H999/rlFmwYAEaNmyI8+fP44MPPsDkyZNx8OBB7fF+/fohJiYGf/75J86dO4fGjRujY8eOSEhI0JYJCQnBjh07sHv3buzevRv//PMP5s+frz3+wQcf4J9//sHOnTtx4MABHDlyBOfPn9eJY8KECQgODsbWrVtx+fJl9OvXD4GBgbhz5w4A4NSpUxg5ciQmTJiAixcv4tVXX8Xnn3+e5/Vq2rQpsrOzcerUqUJf15fB7uVERPTCXmbmdRu5CZp42KOJh263wbjUDKw7HoqvD4cU+fxHb8fh6O04AIDcRIL6bgr4udvCz90OftVsUUVR+IRtao2IU/ficfdBAmqkSl96CTgiImMgqtWInjsvZy3JPAdFQBAQPXcerDt2LNGu5gqFAtbW1np13+7atSvGjRsHIKdVe8mSJTh8+DBq164NAPjss8+0ZT09PfH+++9j69at+PDDD/WKJSIiAo0aNdImr7mt7vn59ttvUbt2bSxYsAAAULt2bVy9ehVffPGFTrmsrCysWrUK3t7eAIC+ffti48aNiI6OhpWVFXx8fPDqq6/i8OHD6N+/P4CcMce5qlevjuXLl6NZs2ZITU2FlZVVvvGEh4fnSbrd3Nzw/vvva7cnTpyI/fv34+eff4a/v792v7u7O5YsWQJBEFC7dm1cuXIFS5Yswdtvv60t06pVK3z88ccQRRHVq1fHyZMnsWTJEnTu3Bn//vsvTp8+jZiYGJiZmQEAFi5ciB07dmD79u0YPXo0AECj0WDdunXaMedDhgzBoUOH8MUXXyA1NRU//PADNm3ahI4dOwLI+TKgatWqOvWzdu1aREREoEqVKgCA999/H/v27cPatWsxd+5cLFu2DIGBgdo6r1WrFk6cOIF9+/bpvDYWFhZQKBQIDw/P9/UsCUy6iYjopZT0zOuOVmZoVcNJr6T7WaosDc6EPcaZsMcAQgEATtZmT5PwnEeDqgpYy3Mm48k7SVsoJ2kjIgJyxnA/18KtQxSRHRWF9LPnYNncv+BypahBgwbanwVBgKurq063623btmH58uUICQlBamoqsrOzYWNjo/f1x44dizfeeAPnz5/Ha6+9ht69e+uMM3/WrVu30KxZM519zyayuSwsLLQJNwC4uLjA09NTJ3l2cXHRuY9z585h5syZuHTpEh4/fgyNJqdXV0REBHx8fPKN58mTJ5DLdYdxqdVqzJ07Fz///DMePnyIzMxMZGRkwMLCQqdcixYtdL6sbtmyJRYtWgS1Wg3p0y9YWrZsmeecZcuWAQAuXbqE1NRUODjoTgz75MkThIT89/+6p6enziRvSqVSe98hISHIzMxE8+bNtcft7e21X6gAwJUrV6BWq1GrVi2d58nIyNA+940bN9CnTx+d4y1btsyTdAOAubk50tPT8+wvKUy6iYjopZX0zOu548ULWmNcAOCikGP5AD9cfpCEi/cTcfF+Ih481h2PFZuSgYPXo3HwenTOeQJQw8kKjlZmCL6Xd1wXJ2kjIgKyY2NLtFxpeH42c0EQtAlpcHAw3nrrLcyaNQsBAQFQKBTYunVrvuOCC9KlSxeEh4dj7969OHjwIDp27Ijx48dj4cKFJRpzYfeRlpaGgIAABAQEYPPmzXByckJERAQCAgKQmZlZ4PM4Ojri8WPdiUoXLFiAZcuWYenSpahfvz4sLS0xefLkQq/zIlJTU6FUKvOMrQYAW1tb7c+F3be+zyOVSnHu3DntlwG5CuoBUJiEhAQ4OTkV+zx9MekmIqJyR581xmf28IG/lwP8vf5L9uNSM3DpaQKe+0hRZWuPiyJwJyYVd2JS831e8en1Z+26js4+ruxqTkSVkkzP5EPfcmXtxIkT8PDw0BlX/SJdh52cnDBs2DAMGzYMbdq0wQcffJBv0l27dm3s3btXZ19h49H1dfPmTcTHx2P+/Plwd3cHAJw9e7bI8xo1aoTr16/r7Dt+/Dh69eqFwYMHA8jp3n379u08reXPj2s+efIkatasqZPYnjx5Ms85devWBQA0btwYUVFRkMlkhXbJL4y3tzdMTExw6tQpVKtWDUDOxHa3b99Gu3bttPeoVqsRExNT4KzjdevWzfd+nhcSEgKVSoVGjRq9ULz64ERqRERULuWOF3d9bqZzV4W8wJZoRyszdKzrgqmv1cbGkc1xacZr+GtKOyzq1xBDWnigvpsCReXRuZO0fbHnOkLj0nRmbSUiqgwsmjaBzNU1p3tQfgQBMldXWDRtUraB6almzZqIiIjA1q1bERISguXLl+P3338v1jVmzJiBnTt34u7du7h27Rp2796tTSyfN2bMGNy8eRMfffQRbt++jZ9//hnr1q0DgELnFSlKtWrVYGpqihUrVuDevXv4448/MGfOnCLPCwgIQHBwsM4s6DVr1sTBgwdx4sQJ3LhxA2PGjEF0dHSecyMiIjBlyhTcunULP/30E1asWIFJkybplDl+/Di++uor3L59G6tWrcIvv/yiLdOpUye0bNkSvXv3xoEDBxAWFoYTJ07g008/1esLAyCnpXrkyJH44IMP8Pfff+Pq1asYPnw4JJL/UtdatWrhrbfewtChQ/Hbb78hNDQUp0+fxrx587Bnzx4AwLvvvot9+/Zh4cKFuHPnDr7++ut8u5YfO3YM1atX1+n6X9KYdBMRUbkV6KvEvx91wE9vt8CyAX746e0W+PejDnp3/ZZIBNRwtsIbTapiTm9f7JrYGl/1bVD0iQB+PB6GVxceQesvD+PjXy9j16VHSEgr2W54RETlkSCVwuWTaU83nksan267fDKtVNfrfhk9e/bEe++9hwkTJsDPzw8nTpzA9OnTi3UNU1NTTJs2DQ0aNEDbtm0hlUqxdevWfMt6eXlh+/bt+O2339CgQQOsWrVK28qeO5nYi3BycsK6devwyy+/wMfHB/Pnz9ere3uXLl0gk8nw119/afd99tlnaNy4MQICAtC+fXu4urqid+/eec4dOnQonjx5An9/f4wfPx6TJk3STn6Wa+rUqTh79iwaN26MefPmYdGiRQgICACQ8yXD3r170bZtW4wYMQK1atXCgAEDEB4eDhcXF73vfcGCBWjTpg169OiBTp06oXXr1mjSRPdLnrVr12Lo0KGYOnUqateujd69e+PMmTPa1vEWLVpgzZo1WLZsGRo2bIgDBw7oTLCX66efftKZKK40CGIl+wo/OTkZCoUCSUlJxZpMoaxpNBrExMTA2dlZ51sdqphYn8aF9VmxBYfEY+CavN3L9FWvig1a13REmxpOaOppB7lJ/n90qjViiU0uR/rj59O4sD5fjEqlQmhoKLy8vPJMqFUcyQcOIHruPJ1J1WSurnD5ZBpsXnut2NcTRRHZ2dmQyWQv1QJcEXzxxRdYvXo17t+/b5DnX7lyJf744w/s379f73Pat28PPz8/LF26tMAynp6emDx5MiZPnmwU9Xnt2jV06NABt2/fhkKhyLdMYZ8nfXNLjukmIqJKRZ9J2hysTDGilRdOhMThTNhjZGb/N7nLtUfJuPYoGd/+cw9mMgmaedqjdU1HtK7hCB+lDSQSIZ+Z0cGZ0YmowrF57TVYd+yYM5t5bCxkTk6waNqk3LZwG9I333yDZs2awcHBAcePH8eCBQswYcIEg8UzZswYJCYmIiUlRWeWcNIVGRmJDRs2FJhwlxQm3UREVKnoM0nb5719EeirxPhXa0CVpcaZsAT8eycO/96Nw7VHydryGdka/Hs3Zz8A2FmYwMvJEufDE/M8L2dGJ6KKSJBKDbYsWEVy584dfP7550hISEC1atUwdepUTJs2zWDxyGQynYnkKH+dOnUqk+dh9/Jyit2pjAvr07iwPo3Di7ZGx6dm4ERIvDYJf5j4pMCyzxOQMxHcvx91YFfzUsLPp3Fhfb6YkupeXtKMoTsy/aey1Ce7lxMREb2gQF8lOvu44tS9ONx9EIsaVZ3QvLpjkcmwg5UZejSsgh4Nq0AURYTFp+PfO7E4dicOx+7E4klWweuM5s6MfvJePFrVcCzhOyIiIqLyiEk3ERFVWlKJgBbVHVDdSg1nZwdIitn6LAgCvBwt4eVoiSEtPfH7+Qd47+dLRZ73zsaz6NXIDd3qV+EEa0REREaOSTcREVEJcVWY61UuJUONTScjsOlkBBytzNC1viu61leimScTcCIqORpNwT1viEg/JfE5YtJNRERUQoqaGR0A5DIJNKKITHVOibjUDGwIDseG4HA4WZuhq68rujWogqYedsVueSciAnLWmJZIJHj06BGcnJxgampaLsbcVpYxwJWFsdenKIrIzMxEbGwsJBIJTE1NX/haTLqJiIhKiD4zoy8d4IfWNZ3w980Y7Ln8CIdvxWqXJItNycD64HCsDw6Hs7UZutZXolsDJZpU003AuQY4ERVGIpHAy8sLkZGRePTokaHD0RJFERqNBhKJxCiTtMqmstSnhYUFqlWr9lKTOTLpJiIiKkGBvkqsGtw4z8zors/NjN6zYRX0bFgFqRnZOHQjGrsvR+Kf2/8l4DEpGVh3IgzrToTBxeZpAl5fiZjkDMzZwzXAiahwpqamqFatGrKzs6FWqw0dDoCcbrrx8fFwcHDgbPRGoDLUp1QqLZGWfCbdREREJSx3ZnR9WqOtzGTo5eeGXn5uSFFl4dCNGOy+HImjt2ORqc5JwKOTM7D2eBjWHg/L9/m4BjgR5UcQBJiYmMDExMTQoQDISdJMTEwgl8uNNkmrTFif+mPSTUREVAqkEgEtvR2KdY613AS9G7mhdyM3JKuycOhGNPY8bQHPUhc0SjynG7sAYNau6+js48qu5kREROUIv5IgIiIqh2zkJujTqCq+H9YMZz/rjLHtvAstn7sG+OnQ+LIJkIiIiPTCpJuIiKicU5iboI7SWq+y7227iA3BYUhRZZVyVERERKQPJt1EREQVgLO1XK9yUckZmLHzGlrMPYTpO67iTnRKKUdGREREhWHSTUREVAHkrgFe2GhtU+l/R9My1dh4MhydlxzFgO+CsfdKJLKeTsxGREREZYdJNxERUQWQuwY4gDyJt/D0sXxgI+x9tw0G+leDuYlUe/zkvQSM23werb/8G8v+uoOYZBWIiIiobDDpJiIiqiBy1wB3Veh2NXdVyLXLhflUscG81+vj5CcdMaO7D6o7WmrLRSdnYMlft/HK/L8xYct5nA5NgCjmnRVdrRERHBKPnRcfIjgkHmpNwTOnExERUeG4ZBgREVEFou8a4ApzE/yvtReGv+KJ4yFx2BAcjkM3oqERgWyNiN2XI7H7ciTquFpjSEsP9PZzg6WZDPuuRmLWruuITPqvNVypkCOohw/XACciInoBTLqJiIgqmOKsAS6RCGhT0wltajrhYeITbD4Zjm1n7iM+LRMAcDMqBZ/+fhXz995EU087HL4Vm+caUUkqjN10XtuaTkRERPpj93IiIqJKws3WHB8G1sGJaR2wtL8fGlez1R5LycjON+EGctYAB4BZu66zqzkREVExMekmIiKqZMxkUvRu5IbfxrXC7omt0b+pO0ykhc2LnpN4RyapcDo0oWyCJCIiMhJMuomIiCoxXzcFvuzbALN71dOrfEwKZz4nIiIqDibdREREBE8HK73KRSap8p3xnIiIiPLHpJuIiIjg72UPpUKeZw3w583/8yb6f3sSp+7Fl0lcREREFR2TbiIiIoJUIiCohw8AFJl4nw5LQP/vTmLID6dw6X5iqcdGRERUkTHpJiIiIgA5a4CvGtwYrgq5zn6lQo5vBjXG14MaobqTpXb/sTtx6LXyON7ecBY3o5LLOlwiIqIKget0ExERkVagrxKdfVxxOjQBMSkqOFvL4e9lD6kkp/07sJ4rdlx8hKV/3caDx08AAAevR+OvG9Ho3qAK3utUE9Wd9BsfTkREVBkw6SYiIiIdUomAlt4O+R6TSSXo26Qqejasgm1n7+Prv+8gOjkDogjsuvQIe69E4o3Gbni3Y01UtbMo48iJiIjKH3YvJyIiomIzlUkwpIUH/vngVXzWrS7sLU0BAGqNiJ/PPsCrC49gxs6riEnWXWJMrRERHBKPnRcfIjgkHmoNZ0InIiLjxpZuIiIiemFyEylGtamOAf7VsO54KL49eg8pqmxkqUVsCA7HtjP3MewVT7zTzhunQ+Mxa9d1RCb9l4grFXIE9fBBoK/SgHdBRERUetjSTURERC/NykyGCR1q4t8PO2DCqzVgYSoFAGRka/Dd0Xt4Zd4hvLPpvE7CDQBRSSqM3XQe+65GGiJsIiKiUsekm4iIiEqMwsIE7wfUxtEPX8Wo1l4wleX8qaHK1uRbPrdz+axd19nVnIiIjBKTbiIiIipxjlZm+Ky7D45+8Co61XUutKwIIDJJhdOhCWUTHBERURli0k1ERESlxlUhR4+GVfQqG5OiKroQERFRBcOkm4iIiEqVs7Vcr3JOVmalHAkREVHZY9JNREREpcrfyx5KhRxCEeUWH7yFuzEpZRITERFRWWHSTURERKVKKhEQ1MMHAApNvM+GJ6Lrsn+x9K/byMhWl01wREREpYxJNxEREZW6QF8lVg1uDFeFbldzpUKOyR1rwsPBAgCQqdZg6V930G35vzgbxonViIio4pMZOgAiIiKqHAJ9lejs44rToQmISVHB2VoOfy97SCUC3mnvjWWH7uC7o/eg1oi4G5OKvquDMbhFNXwYWAc2chNDh09ERPRC2NJNREREZUYqEdDS2wG9/NzQ0tsBUklOh3O5iRQfBdbBrgmt0bCqQlt+08kIdF78D/ZfizJUyERERC/F4En3ypUr4enpCblcjubNm+P06dOFll+6dClq164Nc3NzuLu747333oNKxSVGiIiIjIFPFRv8Nq4Vpnf3gYWpFAAQnZyBMRvP4Z2N5xCdzP/ziYioYjFo0r1t2zZMmTIFQUFBOH/+PBo2bIiAgADExMTkW37Lli34+OOPERQUhBs3buCHH37Atm3b8Mknn5Rx5ERERFRapBIBI1t74cB7bdG+tpN2/75rUei0+B9sPhUOjUY0YIRERET6M2jSvXjxYrz99tsYMWIEfHx8sHr1alhYWODHH3/Mt/yJEyfQqlUrDBo0CJ6ennjttdcwcODAIlvHiYiIqOKpameBtcObYdkAPzhYmgIAUlTZ+PT3qxjw3UncjUk1cIRERERFM1jSnZmZiXPnzqFTp07/BSORoFOnTggODs73nFdeeQXnzp3TJtn37t3D3r170bVr1zKJmYiIiMqWIAjo5eeGv6a0Q98mVbX7T4cloOuyY1h+6A4yszUAALVGxMl78ThwMwEn78VDzdZwIiIqBww2e3lcXBzUajVcXFx09ru4uODmzZv5njNo0CDExcWhdevWEEUR2dnZeOeddwrtXp6RkYGMjAztdnJyMgBAo9FAo9GUwJ2UDo1GA1EUy3WMpD/Wp3FhfRoX1mfFoDCX4as36qNXQyU+23EN4QnpyFRrsPjgbey+9Ag9/apg08kIRGnHfIfC1UaOGd3rItDX1aCx04vj59O4sD6NC+sTet97hVoy7MiRI5g7dy6++eYbNG/eHHfv3sWkSZMwZ84cTJ8+Pd9z5s2bh1mzZuXZHxsbW64nYNNoNEhKSoIoipBIDD7fHb0k1qdxYX0aF9ZnxVLTBlg/sDZ+OPUIW85FQy0Ct2NSsfDA7Txlo5JVGLflAuZ1r45Xa9gZIFp6Wfx8GhfWp3FhfQIpKSl6lRNEUTRI36vMzExYWFhg+/bt6N27t3b/sGHDkJiYiJ07d+Y5p02bNmjRogUWLFig3bdp0yaMHj0aqamp+VZ2fi3d7u7uePz4MWxsbEr2pkqQRqNBbGwsnJycKu2b2JiwPo0L69O4sD4rruuPkvHRb5dx7VHBf/QIAFwVchz9oL12eTKqOPj5NC6sT+PC+szJLe3s7JCUlFRobmmwlm5TU1M0adIEhw4d0ibdGo0Ghw4dwoQJE/I9Jz09PU+FSqU5y4kU9N2BmZkZzMzM8uyXSCTl/s0hCEKFiJP0w/o0LqxP48L6rJh8q9piWpe6GPxDwROqigAik1Q4G56Ilt4OZRcclRh+Po0L69O4VPb61Pe+Ddq9fMqUKRg2bBiaNm0Kf39/LF26FGlpaRgxYgQAYOjQoXBzc8O8efMAAD169MDixYvRqFEjbffy6dOno0ePHtrkm4iIiCqP+LRMvcrFpJTfIWVERGTcDJp09+/fH7GxsZgxYwaioqLg5+eHffv2aSdXi4iI0Pn24LPPPoMgCPjss8/w8OFDODk5oUePHvjiiy8MdQtERERkQM7W8hItR0REVNIMPpHahAkTCuxOfuTIEZ1tmUyGoKAgBAUFlUFkREREVN75e9lDqZAjKkmFwiapOR0aj+Ze9pBwXDcREZWxytn5noiIiIyCVCIgqIcPgJxJ0wqy5K87GLn+DBLT9euOTkREVFKYdBMREVGFFuirxKrBjeGq0O1CrlTI0cuvCnIbtw/fikW35f/iyoMkA0RJRESVlcG7lxMRERG9rEBfJTr7uOLUvTjcfRCLGlWd0Ly6I6QSAf2auOPdrReQkJaJh4lP8MaqE5jZsx4G+rtDENjdnIiIShdbuomIiMgoSCUCWlR3wGt17NGiuoN2Xe7WNR2x593WaFTNFgCQqdbgk9+v4P1fLuNJptqAERMRUWXApJuIiIiMnlJhjm2jW2L4K57afb+ef4A+3xxHWFya4QIjIiKjx6SbiIiIKgVTmQQze9bD8oGNYGEqBQDcjEpBjxX/Yv+1KANHR0RExopJNxEREVUqPRtWwc7xreDtZAkASMnIxpiN5zDvzxvIVmsMHB0RERkbJt1ERERU6dR0scbOCa3RvYFSu+/bf+7hre9PISZFZcDIiIjI2DDpJiIiokrJykyGFQMbIaiHD2RPJ107FZqA7sv/xenQBANHR0RExoJJNxEREVVagiBgRCsvbBvTAq42Oet8x6RkYOCak/j+2D2IomjgCImIqKJj0k1ERESVXhMPe+x+tzVe8XYAAKg1Ij7fcwPjNp9HiioLao2I4JB47Lz4EMEh8VBrmIwTEZF+ZIYOgIiIiKg8cLQyw8aRzbH44C2sPBwCAPjzahTOhT+GRhQRl5qpLatUyBHUwweBvsqCLkdERASALd1EREREWlKJgA8C6uD7oU1hI89pm4hJydBJuAEgKkmFsZvOY9/VSEOESUREFQiTbiIiIqLndPJxwc7xrbUTrD0vt3P5rF3X2dWciIgKxaSbiIiIKB9RySpkF5JQiwAik1Sc6ZyIiArFpJuIiIgoH/qu1811vYmIqDBMuomIiIjy4WwtL9FyRERUOTHpJiIiIsqHv5c9lAo58h/VnUMiAHYWJmUWExERVTxMuomIiIjyIZUICOrhAwAFJt4aERiw5iQuRDwuu8CIiKhCYdJNREREVIBAXyVWDW4MV4VuF3IXGzO425sDABLTs/DW96dw9HasIUIkIqJyTmboAIiIiIjKs0BfJTr7uOJ0aAJiUlRwtpbD38se6ZnZGL3hHILvxSM9U42R689g8Zt+6NGwiqFDJiKicoQt3URERERFkEoEtPR2QC8/N7T0doBUIsBaboK1I5ohsJ4rACBLLeLdrRewMTjMsMESEVG5wqSbiIiI6AXJTaRY+VZjDGjmDgAQRWD6zmtY+tdtiGLBa3wTEVHlwaSbiIiI6CVIJQLmvV4f49p7a/ct/esOZv5xDRoNE28iosqOSTcRERHRSxIEAR8G1sFn3epq960PDsekbReRma0xYGRERGRoTLqJiIiISsioNtWxqF9DSCU5i4ztuvQIozacRXpmtoEjIyIiQ2HSTURERFSC3mhSFd8ObgIzWc6fWUdvx+Kt708hMT3TwJEREZEhMOkmIiIiKmGdfFywcWRzWMtzVme9EJGIfquDEZn0xMCRERFRWWPSTURERFQK/L3ssW10SzhZmwEA7sSkou+qYITEpho4MiIiKktMuomIiIhKiU8VG/z6ziuoZm8BAHiY+AT9VgfjyoMkA0dGRERlhUk3ERERUSmq5mCB7WNboq7SBgCQkJaJAd8F48TdOANHRkREZYFJNxEREVEpc7aWY+voFvD3tAcApGWqMXztGfx5JRJqjYjgkHjsvPgQwSHxUHNtbyIioyIzdABERERElYHC3AQbRvpjwpbz+OtGDDLVGozbfB425iZIepKlLadUyBHUwweBvkoDRktERCWFLd1EREREZURuIsXqwU3wRuOqAAAR0Em4ASAqSYWxm85j39VIA0RIREQljUk3ERERURmSSSWY/3p9WJpK8z2e27l81q7r7GpORGQEmHQTERERlbGz4Y+Rlqku8LgIIDJJhdOhCWUXFBERlQom3URERERlLCZFVaLliIio/GLSTURERFTGnK3lJVqOiIjKLybdRERERGXM38seSoUcQiFlbOQy+HvZl1lMRERUOph0ExEREZUxqURAUA8fACgw8U5WZePXcw/KLigiIioVTLqJiIiIDCDQV4lVgxvDVaHbhdzKTKb9+ePfLmPvFS4dRkRUkcmKLkJEREREpSHQV4nOPq44HZqAmBQVnK3laOZphy/23sDa42HQiMCkrRdgYSpF+9rOhg6XiIheAJNuIiIiIgOSSgS09HbQ2Te9mw9SVdn45dwDZKlFvLPpHDb8rznHeBMRVUDsXk5ERERUzkgkAua9Xh9dfF0BAKosDUauO4OrD5MMHBkRERUXk24iIiKickgmlWDpAD+0reUEAEjJyMbQH0/jbkyKgSMjIqLiYNJNREREVE6ZyaRYPbgxmnrYAQAS0jIx+PvTuJ+QbuDIiIhIX0y6iYiIiMoxC1MZfhzRDPWq2AAAopJVGPzDKcQkqwwcGRER6YNJNxEREVE5ZyM3wYb/+cPbyRIAEB6fjiE/nEZieqaBIyMioqIw6SYiIiKqAByszLBpVHO42ZoDAG5Fp2DY2jNIzcg2cGRERFQYJt1EREREFYRSYY7No5rD0coMAHDpfiLeXn8Wqiy1gSMjIqKCMOkmIiIiqkA8HS2xaZQ/FOYmAIDge/GYsOU8stQaA0dGRET5YdJNREREVMHUcbXBuhHNYGEqBQD8dSMG7/9yCRqNaODIiIjoeUy6iYiIiCqgRtXs8P2wpjCV5fw5t/PiI0zfeRWiyMSbiKg8YdJNREREVEG94u2IlYMaQyoRAACbT0Xgy323DBwVERE9i0k3ERERUQXW2ccFi99sCCEn78bqf0LwzZG7hg2KiIi0mHQTERERVXC9/Nwwp5evdvurfbewMTjMcAEREZGWzNABEBEREdHLG9zCAymqbHy57yYAYPrOa7CSy9CzoRtOhyYgJkUFZ2s5/L3std3RiYio9DHpJiIiIjISY9t7I0WVhW+OhAAApv58CbN3Xcfj9CxtGaVCjqAePgj0VRoqTCKiSoXdy4mIiIiMyAcBtTGkhQcAQCNCJ+EGgKgkFcZuOo99VyMNER4RUaXDpJuIiIjIiAiCgBndfSA3yf/PvNwFxWbtug411/UmIip1TLqJiIiIjMzZ8MdQZWkKPC4CiExS4XRoQtkFRURUSTHpJiIiIjIyMSmqEi1HREQvjkk3ERERkZFxtpaXaDkiInpxBk+6V65cCU9PT8jlcjRv3hynT58utHxiYiLGjx8PpVIJMzMz1KpVC3v37i2jaImIiIjKP38veygVchS2MJhSkbN8GBERla4XTrrv3r2L/fv348mTJwAAUSz+RBzbtm3DlClTEBQUhPPnz6Nhw4YICAhATExMvuUzMzPRuXNnhIWFYfv27bh16xbWrFkDNze3F70NIiIiIqMjlQgI6uEDAAUm3u1qOXG9biKiMlDspDs+Ph6dOnVCrVq10LVrV0RG5iw3MXLkSEydOrVY11q8eDHefvttjBgxAj4+Pli9ejUsLCzw448/5lv+xx9/REJCAnbs2IFWrVrB09MT7dq1Q8OGDYt7G0RERERGLdBXiVWDG8NVkX8X8l/OPcDR27FlHBURUeVT7KT7vffeg0wmQ0REBCwsLLT7+/fvj3379ul9nczMTJw7dw6dOnX6LxiJBJ06dUJwcHC+5/zxxx9o2bIlxo8fDxcXF/j6+mLu3LlQq9XFvQ0iIiIioxfoq8S/H3XAT2+3wLIBfvjp7RYY3bY6AECtETF+83ncjUkxcJRERMZNVtwTDhw4gP3796Nq1ao6+2vWrInw8HC9rxMXFwe1Wg0XFxed/S4uLrh582a+59y7dw9///033nrrLezduxd3797FuHHjkJWVhaCgoHzPycjIQEZGhnY7OTkZAKDRaKDRFLyUhqFpNBqIoliuYyT9sT6NC+vTuLA+jQvrMy8BQHMvO+12Mw9bhMWl4cD1aKRkZON/687gt7GvwN7S1HBBFoD1aVxYn8aF9Qm9773YSXdaWppOC3euhIQEmJmZFfdyxaLRaODs7IzvvvsOUqkUTZo0wcOHD7FgwYICk+558+Zh1qxZefbHxsZCpSq/y2RoNBokJSVBFEVIJAaf745eEuvTuLA+jQvr07iwPvXzcXslwmKTcTv2CSISnuDtdaew/PWaMJGWr9eM9WlcWJ/GhfUJpKTo11Oo2El3mzZtsGHDBsyZMwcAIAgCNBoNvvrqK7z66qt6X8fR0RFSqRTR0dE6+6Ojo+Hq6prvOUqlEiYmJpBKpdp9devWRVRUFDIzM2Fqmvcb2mnTpmHKlCna7eTkZLi7u8PJyQk2NjZ6x1vWNBoNBEGAk5NTpX0TGxPWp3FhfRoX1qdxYX3q78cRtuizKhixKRm48DAVy0/EYv7rvhCE8jO5GuvTuLA+jQvrE5DL9Vt2sdhJ91dffYWOHTvi7NmzyMzMxIcffohr164hISEBx48f1/s6pqamaNKkCQ4dOoTevXsDyKm4Q4cOYcKECfme06pVK2zZsgUajUZbsbdv34ZSqcw34QYAMzOzfFvgJRJJuX9zCIJQIeIk/bA+jQvr07iwPo0L61M/Ve0tsWZoU/T/NhgZ2Rr8cu4BarpYYXRbb0OHpoP1aVxYn8alstenvvdd7FfH19cXt2/fRuvWrdGrVy+kpaXh9ddfx4ULF+DtXbxf0lOmTMGaNWuwfv163LhxA2PHjkVaWhpGjBgBABg6dCimTZumLT927FgkJCRg0qRJuH37Nvbs2YO5c+di/Pjxxb0NIiIiokrPz90WC/v9twrMvD9v4uD16ELOICKi4ip2SzcAKBQKfPrppy/95P3790dsbCxmzJiBqKgo+Pn5Yd++fdrJ1SIiInS+PXB3d8f+/fvx3nvvoUGDBnBzc8OkSZPw0UcfvXQsRERERJVRj4ZVEBKbiqV/3YEoApO2XsD2d16BT5XyOwyPiKgiKXbSffTo0UKPt23btljXmzBhQoHdyY8cOZJnX8uWLXHy5MliPQcRERERFWxSx5oIiU3DrkuPkJ6pxqj1Z7BjQis4W+s3XpGIiApW7KS7ffv2efY9O+EG18wmIiIiqlgEQcCCvg0QkZCOS/cT8ShJhTEbz+Gnt1tAbiIt+gJERFSgYo/pfvz4sc4jJiYG+/btQ7NmzXDgwIHSiJGIiIiISpncRIo1Q5pAqchp3b4QkYgPt1+GKIoGjoyIqGIrdku3QqHIs69z584wNTXFlClTcO7cuRIJjIiIiIjKlrONHN8Pa4p+q4ORnqnGH5ceoYazFd7tWNPQoRERVVglNre7i4sLbt26VVKXIyIiIiIDqFdFgaX9/ZA7enDxwdvYffmRYYMiIqrAit3SffnyZZ1tURQRGRmJ+fPnw8/Pr6TiIiIiIiIDea2eKz4KrIP5f94EAEz9+RLc7SzQ0N3WsIEREVVAxU66/fz8IAhCnvE9LVq0wI8//lhigRERERGR4YxpWx13olPx6/kHyMjW4O0NZ7FzQisoFeaGDo2IqEIpdtIdGhqqsy2RSODk5AS5nEtKEBERERkLQRAw93Vf3E9Ix+mwBMSkZGDU+rP45Z2WsDAt9p+QRESVVrHHdHt4eOg83N3dmXATERERGSEzmRSrhzSBu31O6/a1R8mYsu0SNBrOaE5EpC+9vqZcvny53hd89913XzgYIiIiIipf7C1N8eOwZnj9mxNIycjGvmtRWHjgFj4MrGPo0IiIKgS9ku4lS5bodTFBEJh0ExERERmZmi7WWDGoEf637gw0IvDNkRB4O1nhjSZVDR0aEVG5p1fS/fw4biIiIiKqXNrXdsb07j6Ytes6AGDab1dQ1c4cGhGISVHB2VoOfy97SCWCgSMlIipfOAsGEREREell+CueuBuTis2nIpCp1mDgmpN4dni3UiFHUA8fBPoqDRckEVE580JJ94MHD/DHH38gIiICmZmZOscWL15cIoERERERUfkiCAJm9qyHc+EJuBmViufnU4tKUmHspvNYNbgxE28ioqeKnXQfOnQIPXv2RPXq1XHz5k34+voiLCwMoiiicePGpREjEREREZUTEkHA4/SsfI+JAAQAs3ZdR2cfV3Y1JyLCCywZNm3aNLz//vu4cuUK5HI5fv31V9y/fx/t2rVDv379SiNGIiIiIionTocmIDo5o8DjIoDIJBVOhyaUXVBEROVYsZPuGzduYOjQoQAAmUyGJ0+ewMrKCrNnz8aXX35Z4gESERERUfkRk6Iq0XJERMau2Em3paWldhy3UqlESEiI9lhcXFzJRUZERERE5Y6ztbxEyxERGbtij+lu0aIF/v33X9StWxddu3bF1KlTceXKFfz2229o0aJFacRIREREROWEv5c9lAo5opJUEAso42qTs3wYERG9QEv34sWL0bx5cwDArFmz0LFjR2zbtg2enp744YcfSjxAIiIiIio/pBIBQT18AORMmpafag4W4BxqREQ5it3SXb16de3PlpaWWL16dYkGRERERETlW6CvEqsGN8asXdcRmfTf2G0BOROpnQ5NwMaT4Rja0tNQIRIRlRvFTrpHjRqFwYMHo3379qUQDhERERFVBIG+SnT2ccXp0ATEpKjgbC1HfGoGJvx0AQAwe9d11KtigyYe7GZORJVbsbuXx8bGIjAwEO7u7vjggw9w6dKl0oiLiIiIiMo5qURAS28H9PJzQ0tvB3RvWAWj2+b0iszWiBi3+TxiUwpeXoyIqDIodtK9c+dOREZGYvr06Thz5gwaN26MevXqYe7cuQgLCyuFEImIiIioovgwoDZaVM9p3Y5OzsCELeeRrdYYOCoiIsMpdtINAHZ2dhg9ejSOHDmC8PBwDB8+HBs3bkSNGjVKOj4iIiIiqkBkUglWDGwMFxszAMCp0AR8tf+WgaMiIjKcF0q6c2VlZeHs2bM4deoUwsLC4OLiUlJxEREREVEF5WRthm/eagITac4U5t8dvYc9lyMNHBURkWG8UNJ9+PBhvP3223BxccHw4cNhY2OD3bt348GDByUdHxERERFVQE087DC9u492+4Ptl3A3JsWAERERGUaxk243Nzd07doVcXFx+O677xAdHY0ff/wRHTt2hCBwQUYiIiIiyjGkhQf6NHIDAKRnqjF64zmkqLIMHBURUdkqdtI9c+ZMREZG4vfff0ffvn1hZmZWGnERERERUQUnCALm9qmPOq7WAIB7sWn4cPtliKJo4MiIiMpOsZPut99+G7a2tqUQChEREREZG3NTKb4d0gTWchkA4M+rUVhz7J6BoyIiKjsvNZEaEREREVFRPBwssbS/n3Z7/p83cSIkznABERGVISbdRERERFTqOtZ1wbsdcpaX1YjAxC0XEJn0xMBRERGVPibdRERERFQmJnWqhba1nAAA8WmZGLf5PDKzNQaOioiodDHpJiIiIqIyIZUIWNbfD2625gCACxGJ+HzPdQNHRURUumQveuL169cRERGBzMxMnf09e/Z86aCIiIiIyDjZWZpi9eAmeGP1CWRma7AhOBx+7rZ4vXFVQ4dGRFQqip1037t3D3369MGVK1cgCIJ2yYfcNbrVanXJRkhERERERqV+VQU+7+WLD3+9DAD45PcrqONqA58qNgaOjIio5BW7e/mkSZPg5eWFmJgYWFhY4Nq1azh69CiaNm2KI0eOlEKIRERERGRs3mzmjoH+7gAAVZYG72w6h6T0LANHRURU8oqddAcHB2P27NlwdHSERCKBRCJB69atMW/ePLz77rulESMRERERGaGgHvXQoKoCABCRkI4pP1+ERiMaOCoiopJV7KRbrVbD2toaAODo6IhHjx4BADw8PHDr1q2SjY6IiIiIjJbcRIpv3moMOwsTAMChmzFYefiugaMiIipZxU66fX19cenSJQBA8+bN8dVXX+H48eOYPXs2qlevXuIBEhEREZHxqmpngeUDG+Hp9EBY/Ndt/HM71rBBERGVoGIn3Z999hk0mpz1FGfPno3Q0FC0adMGe/fuxfLly0s8QCIiIiIybm1qOuH912oDAEQRmLT1AsLi0nDyXjwO3EzAyXvxULPbORFVUMWevTwgIED7c40aNXDz5k0kJCTAzs5OO4M5EREREVFxjG3njQsRifjrRjQS07PQafE/yNYm2qFQKuQI6uGDQF+lQeMkIiquYrd058fe3p4JNxERERG9MIlEwOL+DeFkZQoAzyTcOaKSVBi76Tz2XY00RHhERC+s2C3dKpUKK1aswOHDhxETE6Ptap7r/PnzJRYcEREREVUelqYyFNSJXAQgAJi16zo6+7hCKmGDDxFVDMVOukeOHIkDBw6gb9++8Pf3Zws3EREREZWI06EJiEvNLPC4CCAySYXToQlo6e1QdoEREb2EYifdu3fvxt69e9GqVavSiIeIiIiIKqmYFFWJliMiKg+KPabbzc1Nu043EREREVFJcbaWl2g5IqLyoNhJ96JFi/DRRx8hPDy8NOIhIiIiokrK38seSoUchQ1eVCrk8PeyL7OYiIheVrGT7qZNm0KlUqF69eqwtraGvb29zoOIiIiI6EVIJQKCevgAQIGJ94Bm1TiJGhFVKMUe0z1w4EA8fPgQc+fOhYuLCydSIyIiIqISE+irxKrBjTFr13VEJuUdu73xZDgG+rvD2YZdzImoYih20n3ixAkEBwejYcOGpREPEREREVVygb5KdPZxxal7cbj7IBbVqzhi9dF7+PduPOJSMzDxpwvYPKo5ZNJid9okIipzxf5NVadOHTx58qQ0YiEiIiIiApDT1bxFdQe8Vscer9RwxLIBjeD6tHX7VGgCFh+8beAIiYj0U+yke/78+Zg6dSqOHDmC+Ph4JCcn6zyIiIiIiEqag5UZVr7VCLKn47m/ORKCQzeiDRwVEVHRip10BwYGIjg4GB07doSzszPs7OxgZ2cHW1tb2NnZlUaMRERERERo4mGPj7vU0W6/t+0i7iekGzAiIqKiFXtM9+HDh0sjDiIiIiKiIo1s7YWzYY+x71oUklXZGL/lPH55pyXMZFJDh0ZElK9iJ93t2rUrjTiIiIiIiIokCAK+6tcAN6OSERafjssPkvD57huY09vX0KEREeWr2En35cuX890vCALkcjmqVasGMzOzlw6MiIiIiCg/NnITrHyrMV7/5gQysjXYeDIcTT3t0MvPzdChERHlUeyk28/Pr9C1uU1MTNC/f398++23kMu5fiIRERERlbx6VRSY08sXH/6a0yA07bcrqFfFBjWcrQ0cGRGRrmJPpPb777+jZs2a+O6773Dx4kVcvHgR3333HWrXro0tW7bghx9+wN9//43PPvusNOIlIiIiIgIAvNnMHX2bVAUApGeq8c6m80jLyDZwVEREuord0v3FF19g2bJlCAgI0O6rX78+qlatiunTp+P06dOwtLTE1KlTsXDhwhINloiIiIjoWXN6+eLqwyTcjErB3ZhUfPr7FSzpX3jPTCKislTslu4rV67Aw8Mjz34PDw9cuXIFQE4X9MjIyJePjoiIiIioEOamUnzzVmNYmeW0Je24+AibT0UYOCoiov8UO+muU6cO5s+fj8zMTO2+rKwszJ8/H3Xq5Kyb+PDhQ7i4uJRclEREREREBajuZIWv+jbQbs/edR1XHiQZMCIiov8Uu3v5ypUr0bNnT1StWhUNGuT8crty5QrUajV2794NALh37x7GjRtXspESERERERWga30lRrTyxNrjYchUazB28znsmdgGCgsTQ4dGRJVcsZPuV155BaGhodi8eTNu374NAOjXrx8GDRoEa+uc2SKHDBlSslESERERERVhWpe6uHg/ERciEvHg8RNM+fki1gxtComE47uJyHCK3b0cAKytrfHOO+9g8eLFWLx4McaMGaNNuF/EypUr4enpCblcjubNm+P06dN6nbd161YIgoDevXu/8HMTERERkXEwlUmwclBj2D1t3T50MwbfHr1n4KiIqLLTq6X7jz/+QJcuXWBiYoI//vij0LI9e/YsVgDbtm3DlClTsHr1ajRv3hxLly5FQEAAbt26BWdn5wLPCwsLw/vvv482bdoU6/mIiIiIyHhVsTXHkv5+GLHuDEQRWHjgFhpVs0WL6g6GDo2IKim9ku7evXsjKioKzs7OhbYqC4IAtVpdrAAWL16Mt99+GyNGjAAArF69Gnv27MGPP/6Ijz/+ON9z1Go13nrrLcyaNQvHjh1DYmJisZ6TiIiIiIxX+9rOmNihJpYfugO1RsTEny5gz7ut4WwtN3RoRFQJ6dW9XKPRaFudNRpNgY/iJtyZmZk4d+4cOnXq9F9AEgk6deqE4ODgAs+bPXs2nJ2dMXLkyGI9HxERERFVDpM61kTrGo4AgNiUDLz70wVkqzUGjoqIKqNiT6RWkuLi4qBWq/MsL+bi4oKbN2/me86///6LH374ARcvXtTrOTIyMpCRkaHdTk5OBvDflwfllUajgSiK5TpG0h/r07iwPo0L69O4sD6Ny8vUpwBg8ZsN0OPr44hOzsDJewlYfPA23n+tVskHSnrh59O4sD6h973rnXQHBwcjPj4e3bt31+7bsGEDgoKCkJaWht69e2PFihUwMzMrfrR6SklJwZAhQ7BmzRo4Ojrqdc68efMwa9asPPtjY2OhUqlKOsQSo9FokJSUBFEUIZG80Hx3VI6wPo0L69O4sD6NC+vTuJREfc4O9MS4X25BLQLfHAmBt0JAKy9FCUdK+uDn07iwPnPyU33onXTPnj0b7du31ybdV65cwciRIzF8+HDUrVsXCxYsQJUqVTBz5ky9g3R0dIRUKkV0dLTO/ujoaLi6uuYpHxISgrCwMPTo0UO7L/fbBZlMhlu3bsHb21vnnGnTpmHKlCna7eTkZLi7u8PJyQk2NjZ6x1rWNBoNBEGAk5NTpX0TGxPWp3FhfRoX1qdxYX0al5Koz87OzvgoRcDcP3N6Uc4+EI5dE15BVTuLkgyV9MDPp3FhfQJyuX7zROiddF+8eBFz5szRbm/duhXNmzfHmjVrAADu7u4ICgoqVtJtamqKJk2a4NChQ9oJ2jQaDQ4dOoQJEybkKV+nTh1cuXJFZ99nn32GlJQULFu2DO7u7nnOMTMzy7f1XSKRlPs3hyAIFSJO0g/r07iwPo0L69O4sD6NS0nU59ttq+Ns+GMcuB6NpCdZmLDlAt4PqI3H6VlwtpbD38seUq7lXSb4+TQulb0+9b1vvZPux48f64y9/ueff9ClSxftdrNmzXD//v1ihJhjypQpGDZsGJo2bQp/f38sXboUaWlp2tnMhw4dCjc3N8ybNw9yuRy+vr4659va2gJAnv1EREREREBOYrCgX0Pc+vpfhMen4/LDZAz98Yz2uFIhR1APHwT6Kg0YJREZK72/knBxcUFoaCiAnFnHz58/jxYtWmiPp6SkwMTEpNgB9O/fHwsXLsSMGTPg5+eHixcvYt++fdoEPyIiApGRkcW+LhERERFRLoW5Cd5qXi3fY1FJKozddB77rvJvTiIqeXq3dHft2hUff/wxvvzyS+zYsQMWFhZo06aN9vjly5fzjKfW14QJE/LtTg4AR44cKfTcdevWvdBzEhEREVHlodaIWHs8LN9jInJmO5+16zo6+7iyqzkRlSi9W7rnzJkDmUyGdu3aYc2aNVizZg1MTU21x3/88Ue89tprpRIkEREREdHLOB2agMikgleuEQFEJqlwOjSh7IIiokpB75ZuR0dHHD16FElJSbCysoJUKtU5/ssvv8DKyqrEAyQiIiIielkxKfotFatvOSIifemddOdSKPJf19De3v6lgyEiIiIiKg3O1vot7aNvOSIifVXOud2JiIiIqFLx97KHUiFHYaO17S1N4e/FhiQiKllMuomIiIjI6EklAoJ6+ABAgYm3KkuNR4lPyi4oIqoUmHQTERERUaUQ6KvEqsGN4arQ7UJuJsv5kzg9U40xG8/hSabaEOERkZEq9phuIiIiIqKKKtBXic4+rjgdmoCYFBWcreWoo7TG69+cQGhcGq5HJuPT369g0ZsNIQhcOoyIXh5buomIiIioUpFKBLT0dkAvPze09HaAnYUpvh3SBBamOavz/HbhITYEhxs4SiIyFky6iYiIiKjSq+VijQV9G2q35+y+jjNhXLObiF4ek24iIiIiIgDdGigxpm11AEC2RsS4zecRncx1u4no5TDpJiIiIiJ66oOA2mhVwwEAEJuSgXGbzyMzW2PgqIioImPSTURERET0lEwqwfIBjeBmaw4AOBf+GHN2XzdwVERUkTHpJiIiIiJ6hoOVGVYNbgzTp0uJbTwZjl/O3jdwVERUUTHpJiIiIiJ6ToOqtvi8t692+9MdV3H1YZIBIyKiiopJNxERERFRPt5s6o7BLaoBADKzNRiz8RwS0jINHBURVTRMuomIiIiICjCjez00qmYLAHiY+AQTfzqPbDUnViMi/THpJiIiIiIqgKlMgtWDm8DRygwAcPxuPBYeuG3gqIioImHSTURERERUCBcbOb55qzFkEgEAsPqfEPx5JdLAURFRRcGkm4iIiIioCP5e9visW13t9vu/XMKd6BQDRkREFQWTbiIiIiIiPQx7xRN9GrkBANIy1Riz8RySVVkGjoqIyjsm3UREREREehAEAXP71IeP0gYAcC8uDVN/vgSNRjRwZERUnjHpJiIiIiLSk7mpFN8OaQKFuQkA4OD1aHxz5K6BoyKi8oxJNxERERFRMbjbW2D5wEYQcuZVw6KDt3H4VoxhgyKicotJNxERERFRMbWr5YT3X6sNABBFYNJPFxAen2bgqIioPGLSTURERET0Asa190ZAPRcAQLIqG2M2nsOTTLWBoyKi8oZJNxERERHRCxAEAQv7NUR1J0sAwM2oFHz06yUEh8Rh58WHCA6Jh5qTrBFVejJDB0BEREREVFFZy03w3ZAm6PX1caRlqvHHpUj8cSlSe1ypkCOohw8CfZUGjJKIDIkt3UREREREL6GGszUGt/DI91hUkgpjN53HvquR+R4nIuPHpJuIiIiI6CWoNSL+uPQo32O5nctn7brOruZElRSTbiIiIiKil3A6NAGRSaoCj4sAIpNUOB2aUHZBEVG5waSbiIiIiOglxKQUnHC/SDkiMi5MuomIiIiIXoKztbxEyxGRcWHSTURERET0Evy97KFUyCEUUsZGLoO/l32ZxURE5QeTbiIiIiKilyCVCAjq4QMABSbeyapsHLoRXXZBEVG5waSbiIiIiOglBfoqsWpwY7gqdLuQW5nJtD+/u/UCLt1PLOPIiMjQZEUXISIiIiKiogT6KtHZxxWnQxMQk6KCs7UczTzt8MH2y/j9wkOosjQYuf4Mfh/XCu72FoYOl4jKCFu6iYiIiIhKiFQioKW3A3r5uaGltwNkUgnmv1EfzZ+O545LzcSIdWeQlJ5l4EiJqKww6SYiIiIiKkVmMim+G9IU3k6WAIC7MakYs+ksMrM1Bo6MiMoCk24iIiIiolKmsDDBuhH+cLQyBQCcvJeAj3+9DFEUDRwZEZU2Jt1ERERERGXA3d4Ca4Y2hZks50/w3y48xNK/7hg4KiIqbUy6iYiIiIjKSKNqdlg2wA/C07XFlh26g+3nHhg2KCIqVUy6iYiIiIjKUKCvEp92ravd/vjXyzhxN86AERFRaWLSTURERERUxka29sKwlh4AgGyNiDGbzuFOdIqBoyKi0sCkm4iIiIiojAmCgBk96qFTXWcAQIoqG8PXnkFMisrAkRFRSWPSTURERERkAFKJgOUDG6G+mwIA8DDxCUatP4v0zGwDR0ZEJYlJNxERERGRgViYyvDDsKZwszUHAFx+kIR3f7oItYZLiREZCybdREREREQG5Gwjx4/Dm8HaTAYA+OtGNObsvm7gqIiopDDpJiIiIiIysNqu1lg1uAlkkpy1xNadCMOP/4YaOCoiKglMuomIiIiIyoHWNR0x9/X62u05e65j/7UoA0ZERCWBSTcRERERUTnxZlN3vNuhBgBAFIFJWy/g4v1EwwZFRC+FSTcRERERUTnyXuda6O1XBQCgytJg1PozuJ+QbuCoiOhFMekmIiIiIipHBEHAl30bwN/LHgAQl5qJEevOICk9y8CREdGLYNJNRERERFTOmMmk+G5IE1R3sgQA3I1JxZhNZ/EkU43gkHjsvPgQwSHxXFqMqAKQGToAIiIiIiLKy9bCFOuG+6PPN8cRn5aJk/cS0HjOATzJ0mjLKBVyBPXwQaCv0oCRElFh2NJNRERERFROVXOwwPfDmmqXEns24QaAqCQVxm46j31XIw0RHhHpgUk3EREREVE51qCqLazk+XdQze1cPmvXdXY1JyqnmHQTEREREZVjp0MTkFjIJGoigMgkFU6HJpRdUESkNybdRERERETlWEyKqkTLEVHZYtJNRERERFSOOVvLS7QcEZUtJt1EREREROWYv5c9lAo5hELK2JqbaNf1JqLyhUk3EREREVE5JpUICOrhAwAFJt6JT7Lw6/kHZRcUEemNSTcRERERUTkX6KvEqsGN4arQ7UJuYSrV/vzRr5ex7UxEWYdGREXIf+0BIiIiIiIqVwJ9lejs44rToQmISVHB2VqOZp52+GLvDaw9HgZRBD769Qo0IjDQv5qhwyWip8pFS/fKlSvh6ekJuVyO5s2b4/Tp0wWWXbNmDdq0aQM7OzvY2dmhU6dOhZYnIiIiIjIWUomAlt4O6OXnhpbeDpBJJZjR3QcjW3tpy0z77Qo2nwo3YJRE9CyDJ93btm3DlClTEBQUhPPnz6Nhw4YICAhATExMvuWPHDmCgQMH4vDhwwgODoa7uztee+01PHz4sIwjJyIiIiIyPEEQ8Fm3uhjdtrp236e/X8XGk0y8icoDgyfdixcvxttvv40RI0bAx8cHq1evhoWFBX788cd8y2/evBnjxo2Dn58f6tSpg++//x4ajQaHDh0q48iJiIiIiMoHQRAwrUsdjGn3X+I9fcdVbAgOM1xQRATAwGO6MzMzce7cOUybNk27TyKRoFOnTggODtbrGunp6cjKyoK9ff5LJGRkZCAjI0O7nZycDADQaDTQaDQvEX3p0mg0EEWxXMdI+mN9GhfWp3FhfRoX1qdxYX0W34ev1YIAYPU/9wAAM3Zeg1qtwbBXPA0aF8D6NDasT+h97wZNuuPi4qBWq+Hi4qKz38XFBTdv3tTrGh999BGqVKmCTp065Xt83rx5mDVrVp79sbGxUKlUxQ+6jGg0GiQlJUEURUgkBu+QQC+J9WlcWJ/GhfVpXFifxoX1+WKG+dlC9cQV605HAQBm7b6BpOQUDGjsUsSZpYv1aVxYn0BKSope5Sr07OXz58/H1q1bceTIEcjl8nzLTJs2DVOmTNFuJycnw93dHU5OTrCxsSmrUItNo9FAEAQ4OTlV2jexMWF9GhfWp3FhfRoX1qdxYX2+uOm9nGFteQcrDocAAJYefQALKyuMembCtbLG+jQurE8UmIM+z6BJt6OjI6RSKaKjo3X2R0dHw9XVtdBzFy5ciPnz5+Ovv/5CgwYNCixnZmYGMzOzPPslEkm5f3MIglAh4iT9sD6NC+vTuLA+jQvr07iwPl/c1IA6kEgkWHboDgBg7t6cnqSj23obLCbWp3Gp7PWp730b9NUxNTVFkyZNdCZBy50UrWXLlgWe99VXX2HOnDnYt28fmjZtWhahEhERERFVOO91roX3OtXSbs/dexOr/wkxYERElY/Bu5dPmTIFw4YNQ9OmTeHv74+lS5ciLS0NI0aMAAAMHToUbm5umDdvHgDgyy+/xIwZM7BlyxZ4enoiKipnrIqVlRWsrKwMdh9EREREROXRpE41IRGARQdvAwDm/3kTao2I8a/WMHBkRJWDwZPu/v37IzY2FjNmzEBUVBT8/Pywb98+7eRqEREROs32q1atQmZmJvr27atznaCgIMycObMsQyciIiIiqhAmdqwJiUTAgv23AAAL9t+CKIqY0KGmgSMjMn4GT7oBYMKECZgwYUK+x44cOaKzHRYWVvoBEREREREZmfGv1oBEEPDlvpyx3QsP3IZGBN7tyMSbqDRVzhHvRERERESV0Nj23pjWpY52e/HB21j6120DRkRk/MpFSzcREREREZWNMe28IREEfLH3BgBg6V93clq8O9TAmbDHiElRwdlaDn8ve0glgoGjJar4mHQTEREREVUyb7etDkEAPt+Tk3gvP3QHP/57D6kZam0ZpUKOoB4+CPRVGipMIqPA7uVERERERJXQqDbVEdTDR7v9bMINAFFJKozddB77rkaWdWhERoVJNxERERFRJTW0pSds5Pl3fhWf/jtr13WoNWK+ZYioaEy6iYiIiIgqqdOhCUhWZRd4XAQQmaTC6dCEsguKyMgw6SYiIiIiqqRiUlQlWo6I8mLSTURERERUSTlby0u0HBHlxaSbiIiIiKiS8veyh1IhR1ELg50NT4Aoclw30Ytg0k1EREREVElJJYJ2BvPCEu9FB25j/JbzSMsoePw3EeWPSTcRERERUSUW6KvEqsGN4arQ7UKuVMjRs4ESwtNsfO+VKLz+zQmEx6cZIEqiiiv/9QGIiIiIiKjSCPRVorOPK06HJiAmRQVnazn8vewhlQjodSMak7deREpGNm5Fp6DHin+xYlBjtKvlZOiwiSoEtnQTERERERGkEgEtvR3Qy88NLb0dIJXkNHF3rOuCHRNawdvJEgCQrMrGiLWnsepICMd5E+mBSTcRERERERXK28kKO8a3Qqe6LgAAjQh8ue8mJvx0AemZHOdNVBgm3UREREREVCRruQm+G9IEkzvV1O7bczkSr39zAhHx6QaMjKh8Y9JNRERERER6kUgETO5UC2uGNoWVWc70UDejUtDj639x7E6sgaMjKp+YdBMRERERUbF09nHBjvGtUP3pOO+kJ1kY9uNpfHeU47yJnsekm4iIiIiIiq2Gc8447451nAHkjPOeu/cmJm29iCeZagNHR1R+MOkmIiIiIqIXYiM3wZqhTfFux//Gef9x6RFeX3UC9xM4zpsIYNJNREREREQvQSIRMKVzLXw7pAksTaUAgBuRyejx9b84fjcOAKDWiDh5Lx4Hbibg5L14qDXsgk6Vh8zQARARERERUcUXUM8VO8a3wuiN5xAal4bE9CwM+eEUXm/khn9D4hGVpHpaMhRKhRxBPXwQ6Ks0aMxEZYEt3UREREREVCJqulhjx/hW6PDMOO/t5x8+k3DniEpSYeym89h3NdIQYRKVKSbdRERERERUYhTmJvh+aFOMf9W7wDK5nctn7brOruZk9Jh0ExERERFRiZJIBLSu4VRoGRFAZJIKp0MTyiYoIgNh0k1ERERERCUuJkVVdKFilCOqqJh0ExERERFRiXO2lutVztbcpJQjITIsJt1ERERERFTi/L3soVTIIRRR7pPfr+DQjegyiYnIEJh0ExERERFRiZNKBAT18AGAQhPvh4kqjFx/FqM3nMXDxCdlExxRGWLSTUREREREpSLQV4lVgxvDVaHb1Tx3ne6W1R20+w5cj0bnxf/gu6MhyFJryjpUolIjM3QARERERERkvAJ9lejs44pT9+Jw90EsalR1QvPqjpBKBAx/xRM7Lz7C53uuIy41E+mZaszdexO/nnuIL/r4oqmnvaHDJ3ppbOkmIiIiIqJSJZUIaFHdAa/VsUeL6g6QSnI6nAuCgN6N3HBoansMaeEB4Wk/9FvRKei7Ohgfbr+EhLRMA0ZO9PKYdBMRERERkUEpzE0wp7cvdoxrBV83G+3+n88+QIdFR7DtTAQ0GtGAERK9OCbdRERERERULjR0t8XO8a0xs4cPrM1yRsImpmfho1+voN+3wbgZlWzgCImKj0k3ERERERGVG1KJgOGtvHBoajv0bFhFu/9c+GN0W/4vvthzHWkZ2QaMkKh4mHQTEREREVG542wjx/KBjbBpZHN4OVoCANQaEWuOhaLT4n+w72okRFGEWiMiOCQeOy8+RHBIPNTshk7lDGcvJyIiIiKicqt1TUf8OakNvv3nHlYeuYvMbA0ik1R4Z9N5+FaxQXRKBmJTMrTlc5cjC/RVGjBqov+wpZuIiIiIiMo1uYkUkzrVxIHJbdG2lpN2/9VHyToJNwBEJakwdtN57LsaWdZhEuWLSTcREREREVUIno6WWD+iGVYMaISnq47lkdu5fNau6+xqTuUCk24iIiIiIqowBEGAo7UZCsunRQCRSSqcDk0os7iICsKkm4iIiIiIKpSYFJVe5b45chdhcWmlHA1R4Zh0ExERERFRheJsLder3LE7cXh10RGM3nAWp0MTIIrsbk5lj7OXExERERFRheLvZQ+lQo6oJBWKSqNFEThwPRoHrkejQVUFRrb2Qtf6SphI2f5IZYPvNCIiIiIiqlCkEgFBPXwAAM/PpyY8fSzq1wAfBtaGi42Z9tjlB0mYtPUi2n51GN/+E4KkJ1llFjNVXky6iYiIiIiowgn0VWLV4MZwVeh2NXdVyLFqcGO80cQd49rXwLEPO2BJ/4aoV8VGWyYySYV5f95Ey3mHMPOPa4iITy/r8KkSYfdyIiIiIiKqkAJ9lejs44rToQmISVHB2VoOfy97SJ9ZT8xUJkGfRlXR288Np0IT8P2xUBy6GQ1RBNIz1Vh3IgwbgsPwmo8rRrXxQhMPOwhCzvlqjVjotYn0waSbiIiIiIgqLKlEQEtvhyLLCYKAFtUd0KK6A+7Fpv6/vXuPqrrK/z/+OtxBAUUuB1QQlRQGoRGVcPLyDQNsxoVj37LyuwZNnTUOThcb9UeTotMUZk2TNk61csYuE9U4pZWVaZZmpZaoeUMLktQEvAOigHI+vz+IU0cBj5fDgePzsRYLzuez9+e8j2+2a73Zn8/eWvJZiZYWHFDNWYsshrRyV5lW7ipTYvdOmnRjtNxM0l/eLVRpxY8rpYcH+ih3VJwy4sMd+ZHgYri9HAAAAMA1pWdIRz08Ol4b/l+qpqf3Uaj/j899f3XgpP7w6lZl52+1KbglqayiRlP+vUUrd5a2dshoxyi6AQAAAFyTOnfwUvb/9NanM2/Sk7cnKi48oMX2jSulz31nt+otbD8G+1B0AwAAALimeXm4aUz/bnr3nhv10C9jW2xrqGEhts+LjrZOcGj3eKYbAAAAANTw3HfIT241b8mklzZrRFyY0uLCNLxPqAJ9PR0cHdorim4AAAAA+EGov8/FG0mqPWfRu9tL9e72Unm4mTQoOkg3x4VpRGyYugf5OThKtCcU3QAAAADwg0HRQQoP9FFZRY2ae2rb19Ndnu4mVdackySdsxj6vPiYPi8+prnv7FZfs7/S4sJ0c5xZ8V0DrFuQnY8tya4NFN0AAAAA8AN3N5NyR8Vpyr+3yCTZFN6N5fDfxiZqRGyYviw5oQ8Ly7V6d7n2Hz9tbbenrEp7yqq08KMimQN8NCIuVDfHmXVDzyB5e7hLklbuLNXcd3azJdk1gKIbAAAAAH4iIz5cz/xf/wuKYvN5RXFKry5K6dVFD/0yVl+Xn9KHheVatbtcXx04ae1TVlmjf2/cr39v3K+O3h4adl2IQgO89cJnJRfMpDduSfbM//Wn8HYhFN0AAAAAcJ6M+HDdHGe26/Zvk8mkPmZ/9TH7K/t/equ8skZrCg9r9e4yfVZ8THXnLJKkU7Xn9O6O5vf4NtQwmz73nd26Oc7MreYugqIbAAAAAJrg7mZSSq8ul9wvLMBHdyVH6q7kSFXXntMnXx/R6sJyfbTnsE6ePtti38Ytyd4oOKgx/bvKw51dnts7im4AAAAAcJAO3h4a2S9cI/uF61y9RQvWfKOnPyq6aL8Zb2xX7tu7FBcRoH5dA5XQreErOrijXTPgLNLWdlB0AwAAAEAr8HB30+BewXYV3ZJ05my9Cr47oYLvTliP+Xm5Kz4iUP26Bapf14bv0V06yO0nBTWLtLUtFN0AAAAA0Ers2ZLM38dDQ3oHa8ehCh04fsbm3Om6en1RclxflBy3Huvo7aH4rg0z4hZD+uen+y64Jou0OQ9FNwAAAAC0Enu2JHv8fxOshfGJ6jrtPFSh7QcrtPP7hu/fn7QtxE/VntPGb49r47fH1ZzG98l9e9cVL9JWbzG06dtjKjp4XL1PuSu5ZzC3rreAohsAAAAAWpG9W5JJUucOXhoSE6IhMSHWY8dO1WrH9z8W4Tu+r7C5TkvKK2sVn7tS0cEd1a2zr7p19lP3INvvHb2bLxMvvHV9H7euXwRFNwAAAAC0skvZkux8XTp6a3ifUA3vE2o9dqSqVovXf6vnPvn2ov3PnLVod2mldpdWNnm+k5+nunf2+6Eo91X3oIafS46e1sMrdrO/+CWi6AYAAAAAJ7jcLcmaEuLfUIjbU3SH+nvreHWdzlmafqr85OmzOnm6YQbdHldzf3Gjvl6nNxfo3JEj8ggJkd+AJJnc3S/7em1Bmyi6Fy1apMcff1xlZWVKTEzU008/rUGDBjXbfunSpZo1a5ZKSkoUExOjxx57TLfccksrRgwAAAAAbcvFFmkzqeEW9k9n3iTDMFReVauDx0/rwIkzOnjitA6eOKMDxxu+l1acUTM1eZMa9xf/Yt/xy/5DQuWqVSp/NE/nysqsxzzMZoU9mKOAtLTLumZb4PSi+/XXX9e0adP07LPPKjk5WU899ZTS09O1d+9ehYaGXtD+888/15133qm8vDz96le/Un5+vkaPHq0tW7YoPj7eCZ8AAAAAAJzPnkXackfF/TATbVLXTr7q2slXyU1c62y9RWUVNTpw4rQOHj+jNYXl+mB3+UVjOFxl37Pl56tctUrf33ufZNhW+ufKyxuOL3iq3Rbebs4O4Mknn9TkyZM1YcIExcXF6dlnn5Wfn5/+9a9/Ndl+wYIFysjI0PTp0xUbG6uHH35Y/fv319///vdWjhwAAAAA2pbGRdrMgT42x82BPpf0zLWnu5u6B/lpcK9g3T6wu8b/ItqufqH+PhdvdB6jvl7lj+ZdUHA3nGw4Vv5onoz6+ku+dlvg1Jnuuro6FRQUKCcnx3rMzc1NI0aM0IYNG5rss2HDBk2bNs3mWHp6upYvX95k+9raWtXW1lpfV1Y2LBZgsVhksViu8BM4jsVikWEYbTpG2I98uhby6VrIp2shn66FfLoW8tl60uLClNo3VF+WHNfhqlqF+ntrYI+GRdou999/QFQnmQN8VF7Z8q3rA6I6XfJ7nP7yS5tbyi9gGDpXVqbqL7+UXwuPIbc2ez+nU4vuo0ePqr6+XmFhYTbHw8LCtGfPnib7lJWVNdm+rJkk5eXlae7cuRccP3LkiGpqLu/Wh9ZgsVhUUVEhwzDk5ub0GxJwhcinayGfroV8uhby6VrIp2shn62vZ0epZ0cPSfU6dvTIFV/v3qERylnR9EJthqR7hkRc1vvUFRfb1e54cbFO9ehxydd3lKqqKrvaOf2ZbkfLycmxmRmvrKxU9+7dFRISooCAACdG1jKLxSKTyaSQkBD+U3IB5NO1kE/XQj5dC/l0LeTTtZDP9m9saKgCAwL15xWFKqv8cQIzPNBHs34Zq4x482Vd93SvXqq2o11Qr17ya2LdL2fx8bHvVnqnFt3BwcFyd3dXebntA/nl5eUym5tOmNlsvqT23t7e8vb2vuC4m5tbmx/sJpOpXcQJ+5BP10I+XQv5dC3k07WQT9dCPtu/WxIilB4frk3fHlXRwSPq3S1EyT2Dr2ibsA4DB8rDbNa58vKmn+s2meQRFqYOAwfK1IZ+d+z9PXZqxF5eXkpKStKaNWusxywWi9asWaOUlJQm+6SkpNi0l6TVq1c32x4AAAAAcPW4u5l0Q88uSusbpBt6drmigluSTO7uCnvwh3W+TOdd64fXYQ/mtNv9up3+Z4Jp06bp+eef14svvqjCwkJNmTJF1dXVmjBhgiTpN7/5jc1Ca/fee69Wrlypv/71r9qzZ4/mzJmjzZs3a+rUqc76CAAAAACAKxCQlqauC56Sx3nrd3mEhalrO94uTGoDz3SPHTtWR44c0ezZs1VWVqbrr79eK1eutC6Wtn//fptp+8GDBys/P18PPfSQHnzwQcXExGj58uXs0Q0AAAAA7VhAWpr8U1N1enOBzh05Io+QEPkNSGq3M9yNnF50S9LUqVObnaleu3btBcduu+023XbbbQ6OCgAAAADQmkzu7uqQ3Ha2BbsanH57OQAAAAAAroqiGwAAAAAAB6HoBgAAAADAQSi6AQAAAABwEIpuAAAAAAAchKIbAAAAAAAHoegGAAAAAMBBKLoBAAAAAHAQim4AAAAAAByEohsAAAAAAAeh6AYAAAAAwEE8nB1AazMMQ5JUWVnp5EhaZrFYVFVVJR8fH7m58beR9o58uhby6VrIp2shn66FfLoW8ulayOePNWVjjdmca67orqqqkiR1797dyZEAAAAAANq7qqoqBQYGNnveZFysLHcxFotFhw4dkr+/v0wmk7PDaVZlZaW6d++uAwcOKCAgwNnh4AqRT9dCPl0L+XQt5NO1kE/XQj5dC/lsmOGuqqpSREREi7P919xMt5ubm7p16+bsMOwWEBBwzf4SuyLy6VrIp2shn66FfLoW8ulayKdrudbz2dIMd6Nr8+Z7AAAAAABaAUU3AAAAAAAOQtHdRnl7eys3N1fe3t7ODgVXAfl0LeTTtZBP10I+XQv5dC3k07WQT/tdcwupAQAAAADQWpjpBgAAAADAQSi6AQAAAABwEIpuAAAAAAAchKK7jVq0aJF69OghHx8fJScn64svvnB2SLgMc+bMkclksvnq27evs8OCnT755BONGjVKERERMplMWr58uc15wzA0e/ZshYeHy9fXVyNGjNA333zjnGBxURfL5/jx4y8YrxkZGc4JFi3Ky8vTwIED5e/vr9DQUI0ePVp79+61aVNTU6Ps7Gx16dJFHTt21K233qry8nInRYyW2JPP4cOHXzA+f/e73zkpYrTkmWeeUUJCgnXv5pSUFL3//vvW84zN9uVi+WRs2oeiuw16/fXXNW3aNOXm5mrLli1KTExUenq6Dh8+7OzQcBl+9rOfqbS01Pr16aefOjsk2Km6ulqJiYlatGhRk+fnz5+vhQsX6tlnn9WmTZvUoUMHpaenq6amppUjhT0ulk9JysjIsBmvr776aitGCHutW7dO2dnZ2rhxo1avXq2zZ88qLS1N1dXV1jb333+/3nnnHS1dulTr1q3ToUOHNGbMGCdGjebYk09Jmjx5ss34nD9/vpMiRku6deumefPmqaCgQJs3b9ZNN92kzMxM7dq1SxJjs725WD4lxqZdDLQ5gwYNMrKzs62v6+vrjYiICCMvL8+JUeFy5ObmGomJic4OA1eBJGPZsmXW1xaLxTCbzcbjjz9uPXby5EnD29vbePXVV50QIS7F+fk0DMPIysoyMjMznRIPrszhw4cNSca6desMw2gYi56ensbSpUutbQoLCw1JxoYNG5wVJux0fj4NwzCGDRtm3Hvvvc4LClekc+fOxuLFixmbLqIxn4bB2LQXM91tTF1dnQoKCjRixAjrMTc3N40YMUIbNmxwYmS4XN98840iIiLUs2dPjRs3Tvv373d2SLgK9u3bp7KyMpuxGhgYqOTkZMZqO7Z27VqFhoaqT58+mjJlio4dO+bskGCHiooKSVJQUJAkqaCgQGfPnrUZn3379lVkZCTjsx04P5+NXnnlFQUHBys+Pl45OTk6ffq0M8LDJaivr9drr72m6upqpaSkMDbbufPz2YixeXEezg4Ato4ePar6+nqFhYXZHA8LC9OePXucFBUuV3Jysl544QX16dNHpaWlmjt3roYMGaKdO3fK39/f2eHhCpSVlUlSk2O18Rzal4yMDI0ZM0bR0dEqLi7Wgw8+qJEjR2rDhg1yd3d3dnhohsVi0X333adf/OIXio+Pl9QwPr28vNSpUyebtozPtq+pfErSXXfdpaioKEVERGj79u2aOXOm9u7dqzfffNOJ0aI5O3bsUEpKimpqatSxY0ctW7ZMcXFx2rZtG2OzHWounxJj014U3YADjRw50vpzQkKCkpOTFRUVpf/85z+aOHGiEyMDcL477rjD+nO/fv2UkJCgXr16ae3atUpNTXViZGhJdna2du7cyXoZLqK5fP72t7+1/tyvXz+Fh4crNTVVxcXF6tWrV2uHiYvo06ePtm3bpoqKCv33v/9VVlaW1q1b5+ywcJmay2dcXBxj007cXt7GBAcHy93d/YJVHMvLy2U2m50UFa6WTp066brrrlNRUZGzQ8EVahyPjFXX1bNnTwUHBzNe27CpU6dqxYoV+vjjj9WtWzfrcbPZrLq6Op08edKmPeOzbWsun01JTk6WJMZnG+Xl5aXevXsrKSlJeXl5SkxM1IIFCxib7VRz+WwKY7NpFN1tjJeXl5KSkrRmzRrrMYvFojVr1tg8O4H26dSpUyouLlZ4eLizQ8EVio6OltlsthmrlZWV2rRpE2PVRRw8eFDHjh1jvLZBhmFo6tSpWrZsmT766CNFR0fbnE9KSpKnp6fN+Ny7d6/279/P+GyDLpbPpmzbtk2SGJ/thMViUW1tLWPTRTTmsymMzaZxe3kbNG3aNGVlZWnAgAEaNGiQnnrqKVVXV2vChAnODg2X6I9//KNGjRqlqKgoHTp0SLm5uXJ3d9edd97p7NBgh1OnTtn8pXbfvn3atm2bgoKCFBkZqfvuu09/+ctfFBMTo+joaM2aNUsREREaPXq084JGs1rKZ1BQkObOnatbb71VZrNZxcXFmjFjhnr37q309HQnRo2mZGdnKz8/X2+99Zb8/f2tz4IGBgbK19dXgYGBmjhxoqZNm6agoCAFBAToD3/4g1JSUnTDDTc4OXqc72L5LC4uVn5+vm655RZ16dJF27dv1/3336+hQ4cqISHBydHjfDk5ORo5cqQiIyNVVVWl/Px8rV27Vh988AFjsx1qKZ+MzUvg7OXT0bSnn37aiIyMNLy8vIxBgwYZGzdudHZIuAxjx441wsPDDS8vL6Nr167G2LFjjaKiImeHBTt9/PHHhqQLvrKysgzDaNg2bNasWUZYWJjh7e1tpKamGnv37nVu0GhWS/k8ffq0kZaWZoSEhBienp5GVFSUMXnyZKOsrMzZYaMJTeVRkrFkyRJrmzNnzhi///3vjc6dOxt+fn7Gr3/9a6O0tNR5QaNZF8vn/v37jaFDhxpBQUGGt7e30bt3b2P69OlGRUWFcwNHk+6++24jKirK8PLyMkJCQozU1FRj1apV1vOMzfalpXwyNu1nMgzDaM0iHwAAAACAawXPdAMAAAAA4CAU3QAAAAAAOAhFNwAAAAAADkLRDQAAAACAg1B0AwAAAADgIBTdAAAAAAA4CEU3AAAAAAAOQtENAAAAAICDUHQDANBGlJWV6eabb1aHDh3UqVMnZ4cDAACuAopuAAAcYPz48Ro9evQl9fnb3/6m0tJSbdu2TV9//bVjAmujSkpKZDKZtG3bNmeHAgDAVeXh7AAAAECD4uJiJSUlKSYm5rKvUVdXJy8vr6sYFQAAuBLMdAMA0AqGDx+ue+65RzNmzFBQUJDMZrPmzJljPd+jRw+98cYbeumll2QymTR+/HhJ0smTJzVp0iSFhIQoICBAN910k7766itrvzlz5uj666/X4sWLFR0dLR8fn0vq9/LLL6tHjx4KDAzUHXfcoaqqKmsbi8Wi+fPnq3fv3vL29lZkZKQeeeQR6/kDBw7o9ttvV6dOnRQUFKTMzEyVlJQ0+29w4sQJjRs3TiEhIfL19VVMTIyWLFkiSYqOjpYk/fznP5fJZNLw4cOt/RYvXqzY2Fj5+Piob9+++sc//mE91zhD/tprr2nw4MHy8fFRfHy81q1bZ39yAABwIIpuAABayYsvvqgOHTpo06ZNmj9/vv785z9r9erVkqQvv/xSGRkZuv3221VaWqoFCxZIkm677TYdPnxY77//vgoKCtS/f3+lpqbq+PHj1usWFRXpjTfe0Jtvvmm9PduefsXFxVq+fLlWrFihFStWaN26dZo3b571fE5OjubNm6dZs2Zp9+7dys/PV1hYmCTp7NmzSk9Pl7+/v9avX6/PPvtMHTt2VEZGhurq6pr8/I3Xef/991VYWKhnnnlGwcHBkqQvvvhCkvThhx+qtLRUb775piTplVde0ezZs/XII4+osLBQjz76qGbNmqUXX3zR5trTp0/XAw88oK1btyolJUWjRo3SsWPHLjtXAABcNQYAALjqsrKyjMzMTOvrYcOGGTfeeKNNm4EDBxozZ860vs7MzDSysrKsr9evX28EBAQYNTU1Nv169eplPPfcc4ZhGEZubq7h6elpHD58+JL7+fn5GZWVldbz06dPN5KTkw3DMIzKykrD29vbeP7555v8fC+//LLRp08fw2KxWI/V1tYavr6+xgcffNBkn1GjRhkTJkxo8ty+ffsMScbWrVsviDk/P9/m2MMPP2ykpKTY9Js3b571/NmzZ41u3boZjz32WJPvBQBAa+KZbgAAWklCQoLN6/DwcB0+fLjZ9l999ZVOnTqlLl262Bw/c+aMiouLra+joqIUEhJyyf169Oghf3//JuMpLCxUbW2tUlNTm42tqKjIpr8k1dTU2LzHT02ZMkW33nqrtmzZorS0NI0ePVqDBw9u9vNXV1eruLhYEydO1OTJk63Hz507p8DAQJu2KSkp1p89PDw0YMAAFRYWNnttAABaC0U3AACtxNPT0+a1yWSSxWJptv2pU6cUHh6utWvXXnDup1uKdejQ4bL6tRSPr69vs3E1vkdSUpJeeeWVC8799A8APzVy5Eh99913eu+997R69WqlpqYqOztbTzzxRLPvIUnPP/+8kpOTbc65u7u3GB8AAG0FRTcAAG1U//79VVZWJg8PD/Xo0cPh/X4qJiZGvr6+WrNmjSZNmtTke7z++usKDQ1VQECA3dcNCQlRVlaWsrKyNGTIEE2fPl1PPPGEdcX1+vp6a9uwsDBFRETo22+/1bhx41q87saNGzV06FBJDTPhBQUFmjp1qt1xAQDgKCykBgBAGzVixAilpKRo9OjRWrVqlUpKSvT555/rT3/6kzZv3nzV+/2Uj4+PZs6cqRkzZuill15ScXGxNm7cqH/+85+SpHHjxik4OFiZmZlav3699u3bp7Vr1+qee+7RwYMHm7zm7Nmz9dZbb6moqEi7du3SihUrFBsbK0kKDQ2Vr6+vVq5cqfLyclVUVEiS5s6dq7y8PC1cuFBff/21duzYoSVLlujJJ5+0ufaiRYu0bNky7dmzR9nZ2Tpx4oTuvvtuuz4rAACORNENAEAbZTKZ9N5772no0KGaMGGCrrvuOt1xxx367rvvrKuIX81+55s1a5YeeOABzZ49W7GxsRo7dqz1mW8/Pz998sknioyM1JgxYxQbG6uJEyeqpqam2ZlvLy8v5eTkKCEhQUOHDpW7u7tee+01SQ3PYS9cuFDPPfecIiIilJmZKUmaNGmSFi9erCVLlqhfv34aNmyYXnjhBesWY43mzZunefPmKTExUZ9++qnefvtt68roAAA4k8kwDMPZQQAAAFyOkpISRUdHa+vWrbr++uudHQ4AABdgphsAAAAAAAeh6AYAAAAAwEG4vRwAAAAAAAdhphsAAAAAAAeh6AYAAAAAwEEougEAAAAAcBCKbgAAAAAAHISiGwAAAAAAB6HoBgAAAADAQSi6AQAAAABwEIpuAAAAAAAchKIbAAAAAAAH+f/0BmBmjz9l9QAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Plotted 36 inference steps + final sigma (total tensor length=37).\n" - ] - } - ], - "source": [ - "\n", - "fig, ax = plt.subplots(figsize=(10, 5))\n", - "step_axis = np.arange(len(sigma_values) - 1)\n", - "ax.plot(step_axis, sigma_values[:-1], marker=\"o\", linewidth=2, label=\"sigma schedule\")\n", - "ax.scatter(len(sigma_values) - 1, sigma_values[-1], color=\"tab:red\", zorder=5, label=\"final sigma (appended)\")\n", - "ax.set_xlabel(\"Inference step\")\n", - "ax.set_ylabel(\"Sigma value\")\n", - "ax.set_title(\"FlowUniPCMultistepScheduler sigmas\")\n", - "ax.grid(alpha=0.3)\n", - "ax.legend()\n", - "fig.tight_layout()\n", - "plt.show()\n", - "\n", - "print(\n", - " f\"Plotted {len(step_axis)} inference steps + final sigma (total tensor length={len(sigma_values)}).\"\n", - ")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA90AAAHqCAYAAAAZLi26AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjgsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvwVt1zgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAiPpJREFUeJzs3Xd8U2X///F3kjYddFJKW6BQQJAlQxREGTKUoSBDRVFBbpFbBVQQByIgOABRhoqieDsRwXWLFkUQKUuEWwQB2XvTQWkppSs5vz/4NV9LW5pA27Tp6/l49EFz5Ton7+RKSj8917mOyTAMQwAAAAAAoNiZ3R0AAAAAAABPRdENAAAAAEAJoegGAAAAAKCEUHQDAAAAAFBCKLoBAAAAACghFN0AAAAAAJQQim4AAAAAAEoIRTcAAAAAACWEohsAAAAAgBJC0Q0A5UhMTIwefPBBd8coFjfffLNuvvlmd8coFg8++KBiYmKc6vviiy/KZDKVbKAy5OOPP5bJZNIff/xR4o/lyjhcrKyMi8lk0osvvujuGACAYkTRDQBlQG5hUtDXc88957ZcDz74oAICAgq9PyAgoNj+CHDw4ME8z9tisahmzZrq06ePNm/enK9/RkaGZsyYodatWys4OFi+vr6qX7++hg8frt27dzv65RZTZrNZR44cybef1NRU+fn5yWQyafjw4cXyXNLT0/Xiiy8qLi6uWPZ3/PhxvfjiiwW+DiUhKytLs2bNUosWLRQUFKSQkBA1btxYQ4cO1c6dO0slgyf78ccfy2VhXdrvQwDwFF7uDgAA+D+TJk1S7dq187Q1adLETWlK1tKlSwtsv/fee9WjRw/ZbDbt2LFD7777rn766Sf9/vvvat68uSQpMTFR3bp108aNG3X77bdrwIABCggI0K5du7RgwQK9//77ysrKyrNfHx8fffHFF3rmmWfytH/77bfF/tzS09M1ceJEScp3NP+FF15w+Q8px48f18SJExUTE+N4DUpSv3799NNPP+nee+/Vww8/rOzsbO3cuVOxsbG68cYb1aBBgxLP4Ml+/PFHzZ49u8DC+/z58/LyKpu/npX2+xAAPEXZ/KkOABVU9+7ddd1117k7RqmwWq0Ftl977bW6//77Hbdvuukm9erVS++++67ee+89SReOwG/atElff/21+vXrl2f7l156SWPHjs233x49ehRYdM+fP1+33Xabvvnmmyt9Sk7x8vIqs0WVJP3vf/9TbGysXnnlFT3//PN57nv77bd15swZ9wQrZ9LT0+Xv7+/ydr6+viWQBgDgTkwvB4Bybv/+/brrrrtUuXJl+fv764YbbtDixYsd9xuGoSpVqmjUqFGONrvdrpCQEFksljxF1NSpU+Xl5aW0tLTLypI7TX7t2rUaNWqUwsPDValSJfXp00cJCQl5+jp7TnenTp0kSQcOHJAkrV+/XosXL9ZDDz2Ur+CWLhzRfv311/O1DxgwQJs3b84zPfrkyZP69ddfNWDAgEKfy8GDB/O0x8XFyWQyFTp1/ODBgwoPD5ckTZw40TFdPveoZkHnDi9btkxt27ZVSEiIAgICdPXVVzsK3ri4OF1//fWSpMGDBzv29/HHHzu2X79+vbp166bg4GD5+/urQ4cOWrt2bZ7HyH3cnTt36u6771ZQUJDCwsL0xBNPKCMjw9Fv3759ki78seNiFotFYWFhedqOHTumhx56SNWqVZOPj49q166tRx99NN9Mg8zMzCLfE5L0008/qV27dqpUqZICAwN122236e+//87X77vvvlOTJk3k6+urJk2a6L///W++PoWNVe6pDP98DQszb948tWzZUn5+fqpcubLuueeefKcp3HzzzWrSpIk2btyo9u3by9/fP98fLHI9+OCDmj17tiTlOZ0i18XndOeO2+7du3X//fcrODhY4eHhGjdunAzD0JEjR3THHXcoKChIkZGReuONN/I9ZmZmpiZMmKCrrrpKPj4+io6O1jPPPKPMzMw8/crS+7CoPABQnpTdP7UDQAWUkpKixMTEPG1VqlQptP+pU6d04403Kj09XY8//rjCwsL0ySefqFevXvr666/Vp08fmUwm3XTTTVq1apVjuy1btiglJUVms1lr167VbbfdJklavXq1WrRoccnzuJ0xYsQIhYaGasKECTp48KBmzpyp4cOHa+HChS7vK7cIzC32vv/+e0nSAw884NJ+2rdvrxo1amj+/PmaNGmSJGnhwoUKCAhwPP/iEB4ernfffVePPvqo+vTpo759+0qSmjZtWmD/v//+W7fffruaNm2qSZMmycfHR3v37nUUKw0bNtSkSZM0fvx4DR06VO3atZMk3XjjjZKkX3/9Vd27d1fLli01YcIEmc1mffTRR+rUqZNWr16tVq1a5Xm8u+++WzExMZo8ebJ+//13vfnmm0pOTtann34qSapVq5Yk6fPPP9dNN910yaPyx48fV6tWrXTmzBkNHTpUDRo00LFjx/T1118rPT09z2wGZ94Tn332mQYNGqSuXbtq6tSpSk9P17vvvqu2bdtq06ZNjkXSli5dqn79+qlRo0aaPHmykpKSNHjwYNWoUcPpcXLGK6+8onHjxunuu+/WkCFDlJCQoLfeekvt27fXpk2bFBIS4uiblJSk7t2765577tH999+viIiIAvf573//W8ePH9eyZcv02WefOZ2lf//+atiwoaZMmaLFixfr5ZdfVuXKlfXee++pU6dOmjp1qj7//HONHj1a119/vdq3by/pwh/YevXqpTVr1mjo0KFq2LChtm7dqhkzZmj37t367rvvJJW992FReQCgXDEAAG730UcfGZIK/PqnWrVqGYMGDXLcfvLJJw1JxurVqx1tZ8+eNWrXrm3ExMQYNpvNMAzDmDZtmmGxWIzU1FTDMAzjzTffNGrVqmW0atXKePbZZw3DMAybzWaEhIQYI0eOdOxr0KBBRqVKlQrNXalSpTx5cp9Hly5dDLvd7mgfOXKkYbFYjDNnzjjaOnToYHTo0MFx+8CBA4YkY+LEiUZCQoJx8uRJIy4uzmjRooUhyfjmm28MwzCMPn36GJKM5OTkIl7VCyZMmGBIMhISEozRo0cbV111leO+66+/3hg8eLBhGIYhyRg2bFi+53LgwIE8+1uxYoUhyVixYoWjbdCgQUatWrUctxMSEgxJxoQJEwrNk2vGjBmOfIX53//+Z0gyPvroozztdrvdqFevntG1a9c8r3d6erpRu3Zt45Zbbsn3uL169cqzj8cee8yQZPz111+OfXbo0MGQZERERBj33nuvMXv2bOPQoUP5cg0cONAwm83G//73v3z35eZx9j1x9uxZIyQkxHj44Yfz7OfkyZNGcHBwnvbmzZsbUVFRed5PS5cuNSTlGYeCxsow/u+99s/X8+JxOXjwoGGxWIxXXnklz7Zbt241vLy88rTnvl5z5szJ9zoUZNiwYfk+27kuft/k5ho6dKijLScnx6hRo4ZhMpmMKVOmONqTk5MNPz+/PJ/Jzz77zDCbzXl+RhiGYcyZM8eQZKxdu9YwjLL3PnQmDwCUF0wvB4AyZPbs2Vq2bFmer0v58ccf1apVK7Vt29bRFhAQoKFDh+rgwYPavn27JKldu3ay2Wz67bffJF04ot2uXTu1a9dOq1evliRt27ZNZ86ccRzBuhJDhw7NM2U29/EPHTpU5LYTJkxQeHi4IiMjdfPNN2vfvn2aOnWq44hxamqqJCkwMNDlXAMGDNDevXv1v//9z/FvQVPLS1Pu0dJFixbJbre7tO3mzZu1Z88eDRgwQElJSUpMTFRiYqLOnTunzp07a9WqVfn2OWzYsDy3R4wYIenCe0m6ML35559/1ssvv6zQ0FB98cUXGjZsmGrVqqX+/fs7Tkew2+367rvv1LNnzwLXIbh4Cn1R74lly5bpzJkzuvfeex3PIzExURaLRa1bt9aKFSskSSdOnNDmzZs1aNAgBQcHO/Z3yy23qFGjRi69fpfy7bffym636+67786TJzIyUvXq1XPkyeXj46PBgwcX2+NfbMiQIY7vLRaLrrvuOhmGoYceesjRHhISoquvvlr79+93tH311Vdq2LChGjRokOd55J62kfs8ytr78EryAEBZw/RyAChDWrVq5dJCaocOHVLr1q3ztTds2NBxf5MmTXTttdfK399fq1evVteuXbV69WpNnDhRkZGReuutt5SRkeEovv9ZwDujoGsb16xZM8/t0NBQSVJycnKR+xs6dKjuuusumc1mx6WqfHx8HPcHBQVJks6ePZtneq8zWrRooQYNGmj+/PkKCQlRZGSko/hwl/79++uDDz7QkCFD9Nxzz6lz587q27ev7rzzTpnNl/7b+J49eyRJgwYNKrRPSkqK4/WXpHr16uW5v27dujKbzXnOXffx8dHYsWM1duxYnThxQitXrtSsWbP05ZdfytvbW/PmzVNCQoJSU1OdXl2/qPdE7nMpbDxyxz23SL/4eUjS1VdfrT///NOpPEXZs2ePDMMo8HEkydvbO8/t6tWrF7o4YHG4+PXLvUzexaefBAcHKykpyXF7z5492rFjh2OdgYvFx8dLKnvvwyvJAwBlDUU3AFQA3t7eat26tVatWqW9e/fq5MmTateunSIiIpSdna3169dr9erVatCgQZ5fzn19fZWZmSnDMPIV14ZhKCMjo8DVli0WS4E5DMMoMmu9evXUpUuXQu/PvVzV1q1bL+uo/IABA/Tuu+8qMDBQ/fv3L/QX+IL+mCBJNpvN5ce8FD8/P61atUorVqzQ4sWLtWTJEi1cuFCdOnXS0qVLC30tJTmOAE6bNq3QSzgVdX5+Yc8zV1RUlO655x7169dPjRs31pdffunUAmQXK+o9kftcPvvsM0VGRubrdzkrvl/JGNrtdplMJv30008FZr/4dfXz83M5nysKyuDM58xut+uaa67R9OnTC+wbHR0tqey9D68kDwCUNRTdAFCO1apVS7t27crXnrtCd+6iWNKF6bxTp07VL7/8oipVqqhBgwYymUxq3LixVq9erdWrV+v222/Pt/+cnBzt27dPV111VZ779u7dK5vNlucxSkPPnj01efJkzZs377KL7vHjx+vEiROXXMgq96jcxZfIcmaKfFGF7MXMZrM6d+6szp07a/r06Xr11Vc1duxYrVixQl26dCl0f3Xr1pV04Sjwpf5Q8U979uzJcy34vXv3ym63OxYpK4y3t7eaNm2qPXv2KDExUVWrVlVQUJC2bdvm3JMsQu5zqVq16iWfS+77Lffo6j9d/Fm4kjGsW7euDMNQ7dq1Vb9+/SL7u8LV98eVqFu3rv766y917ty5yMcta+/DovIAQHnB/BwAKMd69OihDRs2aN26dY62c+fO6f3331dMTEyec1zbtWunzMxMzZw5U23btnX8At2uXTt99tlnOn78eL4itnv37pIuXJ/5YrmXPcrtU1ratGmjbt266YMPPnCsvPxPWVlZGj16dKHb161bVzNnztTkyZPzrah8cT9JeVZ9t9lsev/994vMmHt9ZmeuaX369Ol8bblHC3Mv6VSpUqUC99eyZUvVrVtXr7/+eoGXeSvokly545brrbfekvR/47hnzx4dPnw433ZnzpzRunXrFBoaqvDwcJnNZvXu3Vs//PCD/vjjj3z9nZnV8E9du3ZVUFCQXn31VWVnZxf6XKKiotS8eXN98sknSklJcdy/bNkyxxoGuWrVqiWLxZJnDCXpnXfeKTJP3759ZbFYNHHixHzPxTCMPFO4XVXYeJaEu+++W8eOHdPcuXPz3Xf+/HmdO3dOUtl7HzqTBwDKC450A0A59txzz+mLL75Q9+7d9fjjj6ty5cr65JNPdODAAX3zzTd5pk63adNGXl5e2rVrl4YOHepob9++vd59911Jyld0N2/eXEOGDNGsWbO0Z88e3XLLLZIuFDg//vijhgwZombNmpXCM83r008/1a233qq+ffuqZ8+e6ty5sypVqqQ9e/ZowYIFOnHiRIHX6s71xBNPFPkYjRs31g033KAxY8bo9OnTqly5shYsWKCcnJwit/Xz81OjRo20cOFC1a9fX5UrV1aTJk0KPP950qRJWrVqlW677TbVqlVL8fHxeuedd1SjRg3H+fV169ZVSEiI5syZo8DAQFWqVEmtW7dW7dq19cEHH6h79+5q3LixBg8erOrVq+vYsWNasWKFgoKC9MMPP+R5vAMHDqhXr17q1q2b1q1bp3nz5mnAgAGOcfzrr780YMAAde/eXe3atVPlypV17NgxffLJJzp+/LhmzpzpmNr76quvaunSperQoYPjclQnTpzQV199pTVr1rh0zn1QUJDeffddPfDAA7r22mt1zz33KDw8XIcPH9bixYt10003Of74M3nyZN12221q27at/vWvf+n06dN666231Lhx4zxFX3BwsO666y699dZbMplMqlu3rmJjYx3nMV9K3bp19fLLL2vMmDE6ePCgevfurcDAQB04cED//e9/NXTo0Ev+cedSWrZsKUl6/PHH1bVrV1ksFt1zzz2Xta+iPPDAA/ryyy/1yCOPaMWKFbrppptks9m0c+dOffnll/r555913XXXlbn3oTN5AKDccNOq6QCAf8i9rFJBl176p4svGWYYhrFv3z7jzjvvNEJCQgxfX1+jVatWRmxsbIHbX3/99YYkY/369Y62o0ePGpKM6OjoArex2WzGrFmzjGbNmhm+vr6Gr6+v0axZM+PNN990XJKsqOdR0KWbCrtk2LRp0y75GuRKT083Xn/9deP66683AgICDKvVatSrV88YMWKEsXfvXke/f14y7FJ00SXDDOPCa9ulSxfDx8fHiIiIMJ5//nlj2bJlRV4yzDAM47fffjNatmxpWK3WPJeBuvjSVMuXLzfuuOMOo1q1aobVajWqVatm3Hvvvcbu3bvz7G/RokVGo0aNDC8vr3yXbdq0aZPRt29fIywszPDx8TFq1apl3H333cby5cvzvQ7bt2837rzzTiMwMNAIDQ01hg8fbpw/f97R79SpU8aUKVOMDh06GFFRUYaXl5cRGhpqdOrUyfj666/zvW6HDh0yBg4caISHhxs+Pj5GnTp1jGHDhhmZmZmGYbj2nsht79q1qxEcHGz4+voadevWNR588EHjjz/+yNPvm2++MRo2bGj4+PgYjRo1Mr799tsCxyEhIcHo16+f4e/vb4SGhhr//ve/jW3bthV5ybB/Pk7btm2NSpUqGZUqVTIaNGhgDBs2zNi1a5ejT4cOHYzGjRvn27YwOTk5xogRI4zw8HDDZDLledx/vlf+mevi929hl/MrKEtWVpYxdepUo3HjxoaPj48RGhpqtGzZ0pg4caKRkpJiGEbZex86mwcAygOTYbg4/wsAAJQ7L774oiZOnKiEhIR8K14DpYX3IYCKiHO6AQAAAAAoIRTdAAAAAACUEIpuAAAAAABKCOd0AwAAAABQQjjSDQAAAABACaHoBgAAAACghHi5O0Bps9vtOn78uAIDA2UymdwdBwAAAABQDhmGobNnz6patWoymws/nl3hiu7jx48rOjra3TEAAAAAAB7gyJEjqlGjRqH3V7iiOzAwUNKFFyYoKMjNaQpnt9uVkJCg8PDwS/7VBOUPY+vZGF/Pxdh6LsbWszG+noux9VzlZWxTU1MVHR3tqDELU+GK7twp5UFBQWW+6M7IyFBQUFCZfqPBdYytZ2N8PRdj67kYW8/G+HouxtZzlbexLeq05bL/DAAAAAAAKKcougEAAAAAKCEU3QAAAAAAlJAKd043AAAAgPLHZrMpOzvbcdtutys7O1sZGRnl4rxfOK+sjK23t7csFssV74eiGwAAAECZZRiGTp48qTNnzuRrt9vtOnv2bJELWaF8KUtjGxISosjIyCvKQdENAAAAoMzKLbirVq0qf39/R/FjGIZycnLk5eXl9sIMxassjK1hGEpPT1d8fLwkKSoq6rL3RdENAAAAoEyy2WyOgjssLCzPfWWhMEPJKCtj6+fnJ0mKj49X1apVL3uqOSc/AAAAACiTcs/h9vf3d3MSVFS5771/rifgKopuAAAAAGUaR7LhLsXx3nNr0b1q1Sr17NlT1apVk8lk0nfffVfkNnFxcbr22mvl4+Ojq666Sh9//HGJ5wQAAAAA4HK4teg+d+6cmjVrptmzZzvV/8CBA7rtttvUsWNHbd68WU8++aSGDBmin3/+uYSTAgAAAMCViYuLk8lkyrcSOwp38OBBmUwmbd682d1RLptbF1Lr3r27unfv7nT/OXPmqHbt2nrjjTckSQ0bNtSaNWs0Y8YMde3ataRilpoTaSeUnJksSTLshk6nnlaSJUkm84UpDaE+oYoKuPxV8/65/4KU9f0DAAAArnLX76g333yzmjdvrpkzZzrabrzxRp04cULBwcHF/njOiouLU8eOHZWcnKyQkBC35ahIytXq5evWrVOXLl3ytHXt2lVPPvmkewIVoxNpJ3T7d7cry5ZVaB+rxarY3rGX9UOhvO8/9zEo6gEAAOCs0vgd1RVWq1WRkZEl/jgoW8pV0X3y5ElFRETkaYuIiFBqaqrOnz/vWNL9nzIzM5WZmem4nZqaKkmy2+2y2+0lG9gFp8+fvuQPA0nKsmXp9PnTivCPuGQ/T9z/iXMn1Ou7XsqyX+IHptmq73t/r6hKl/FHg3MndCbjTKH3h/iGXNZ+C9q/3bArOSVZCeYEmU3mYtk/yg673S7DMMrUzxcUD8bWczG2no3xLd9yxy/362K5bQXdJ0mnM5z8HTXjtCIrFV8xPHjwYK1cuVIrV67UrFmzJEn79+/XwYMH1alTJ50+fVohISH6+OOPNXLkSH322WcaPXq0jhw5oh49euiTTz7RV199pRdffFEpKSm6//77NWPGDMclqzIzMzV27FgtWLBAZ86cUZMmTTRlyhTdfPPNkqRDhw5pxIgRWrNmjbKyshQTE6PXXntNjRo1UseOHSVJoaGhkqRBgwbpo48+kt1u19SpUzV37lydPHlS9evX1wsvvKA777xT0oUj5J06ddIPP/yg559/Xrt371bz5s01d+5cNWnSpMDX4b777pPNZtOCBQscbdnZ2apWrZreeOMNDRw4UEuWLNErr7yibdu2yWKxqE2bNpo5c6Zq1qwpKe8YG4bheM2Sk//vYNx3332nvn375vmcL1q0SJMmTdL27dtVrVo1DRw4UGPHjpWXl5cMw9DEiRP10Ucf6dSpUwoLC1O/fv305ptv5nsOuY9bUP3o7M+VclV0X47Jkydr4sSJ+doTEhKUkZHhhkQFO5162ql+szfOVmWfyrKYLPm+zCbzhe/N+e9LzEh0av8bD29U0umkQvdT4OOZLEo6m+Tc80w+rXhbvFN9/2l/6v5LFtySlGXP0v4T+2UJcu36efHn4/XgmgeVbS/8MgDeZm993PZjVfWr6tK+S2P/uY+Rkp1S6P3B3sGXvW+4xm63KyUlRYZhyGzmAhGehLH1XIytZ2N8y7fs7GzZ7Xbl5OQoJycnz32GYchms0kqfIXp3PuLYrPZ8u3/Srz++uvatWuXGjdurAkTJkiSwsPDtW/fPklyPB+73a709HS9+eab+uyzz5SWlqa7775bffr0UXBwsBYtWqQDBw6of//+uuGGG3T33XdLkoYNG6YdO3Zo3rx5ioqK0qJFi9S9e3f9+eefqlevnoYNG6asrCwtX75clSpV0o4dO+Tn56eoqCgtXLhQ/fv317Zt2xQUFCQ/Pz/l5ORo8uTJmj9/vt5++21dddVVWrNmjR544AFVrlxZ7du3d7yWTz/9tKZPn66IiAiNGzdOvXr10t9//y1vb+98r0P//v1177336syZMwoICJAk/fjjj0pPT1fPnj2Vk5Oj1NRUPf7447rmmmuUlpamiRMnqk+fPvr9998dr9XFr9k/23PH759ta9as0aBBgzR9+nS1bdtW+/fv12OPPSa73a5x48bpm2++0cyZMzVv3jw1atRIp06d0pYtWwp8D+Q+ZlJSUr7nePbsWafeD+Wq6I6MjNSpU6fytJ06dcrxZinImDFjNGrUKMft1NRURUdHKzw8XEFBQSWa1xVJFueK1tWnVpdojmnbppXo/l/a8pL8vfzlZfaSl9krT3HvZfL6v0LfnPf22Wzn3tBbzm1RqiVVPhYf+Vh8ZLVYL/2v2aqk00mXLIglKdueLUuARVXDXC9ck5JKdv8nzp3Q4GWDS2wWQO5jlORMAE9it9tlMpkUHh7OL3cehrH1XIytZ2N8y7eMjAydPXtWXl5e8vL6v9LlnsX3KPF8omRIusQVnYr6HSzX8Ljh8jbnLxovVsWvihbctqDIfmFhYfLx8VGlSpVUo0YNR3vukerc52M2m5Wdna13331XdevWlST169dP8+bN08mTJxUQEKCmTZuqY8eOWrVqlQYMGKDDhw/rk08+0aFDh1StWjVJ0jPPPKNly5bps88+06uvvqojR46ob9++atGihSSpfv36jgzh4eGSpGrVqjnO6c7MzNTUqVO1bNkytWnTxrHNb7/9pv/85z/q1KmTI/uECRPUrVs3SdKnn36q6Oho/fDDD44/CPxTjx49VKlSJf3www964IEHJElffvmlevXq5TjSfvF2H330kapWreo4kp477v98zXJvF/S6StIrr7yiZ599Vv/6178cz2XSpEl69tlnNXHiRB07dkyRkZHq2rWrvL29VadOHcfzvljuY4aFhcnX1zfPfRffLky5KrrbtGmjH3/8MU/bP98YBfHx8ZGPj0++drPZXKZ+8OYulubpTpw7UaL7n7NljsvbeJmc+xjM2jRL4f7hhRb0hbUdSzvm1P5NZtNlvSdTslKcmgWQkpWi6oHVXd7/ibQT6rWoF+fru8BkMpW5nzEoHoyt52JsPRvjW36ZzWaZTCbHV67E84mKT3d99mRhkjMK/z3kYq5ct/ni3Lnf//M5+fv766qrrnL0iYyMVExMjAIDAx1tERERSkhIkMlk0rZt22Sz2XT11VfneazMzEyFhYXJZDLp8ccf16OPPqply5apS5cu6tevn5o2bVpgBknat2+f0tPTdeutt+bZZ1ZWllq0aJGn74033uj4PiwsTFdffbV27txZ4Ovi7e2tu+++W/Pnz9fAgQN17tw5LVq0SAsWLHD037Nnj8aPH6/169crMTHRcST7yJEjat68eYGv2cXjcHHbX3/9pbVr1+rVV1919LHZbMrIyND58+d19913a9asWapbt666deumHj16qGfPnnkK+YvHsKCfIc7+THFr0Z2Wlqa9e/c6bh84cECbN29W5cqVVbNmTY0ZM0bHjh3Tp59+Kkl65JFH9Pbbb+uZZ57Rv/71L/3666/68ssvtXjxYnc9hVI3td1U1QqqpRwjRza7TTbDphx7jmyGTTa7rdD2o2lH9cHWD4rcf/eY7gr2Cb6w3UX7uNRjnc06q71n9ha5f38vfxkyHNvZDOem/JSkHMO5qUTrTqwr0RxDlw5VJe9KlyziC7rvTOYZp/Z/LO2Ygn2C8x3pL+o/juTMZKfOhUrOTGYRPgAAUCqq+FW58I0TR7qdKahDfUOdPtJd3C6esmwymQpsyy1G09LSZLFYtHHjRscR3ly5U7iHDBmirl27avHixVq6dKkmT56sN954QyNGjCgwQ1pamiRp8eLFql4970Gagg5guuK+++5Thw4dFB8fr2XLlsnPz89xpFySevbsqVq1amnu3LmqVq2a7Ha7mjRpoqysgn83NJvN+c7hz87OO6Mhd5p63759823v6+ur6Oho7dq1S7/88ouWLVumxx57TNOmTdPKlSsLnCZ/pdxadP/xxx+OE/klOaaBDxo0SB9//LFOnDihw4cPO+6vXbu2Fi9erJEjR2rWrFmqUaOGPvjgA4+4XJizYoJj1CiskcvbbU/a7lTR/WCTBy97//1j+xfZ76NuH+XZv2EYjuL7Un802JW8S6PiRl1izxcMuWaIKvtWVqYtU1m2rDz/FtSWZctSSmaKDp095PJzLm4pWSlKySr8vOwrVdjrZzVfurDPtjk3LWv9ifU6k3FG/t7+8vPyk7+3v/y9/OXv7S9fi2+hxb0nFPUAAKB0Lbx9oQzDUE5Ojry8vAr9PcPZ31HndJlzWb8DX4rVanX6nHJXtGjRQjabTfHx8WrXrl2h/aKjo/XII4/okUce0ZgxYzR37lyNGDFCVqtVUt7z3Rs1aiQfHx8dPnxYHTp0uOTj//77745FzpKTk7V79241bNiw0P433nijoqOjtXDhQv3000+66667HIVtUlKSdu3apblz5zqey5o1ay75+OHh4Tp79qzOnTunSpUqSVK+a3hfe+212rVrV54ZBBfz8/NTz5491bNnTw0bNkwNGjTQ1q1bde21117y8S+HW4vum2++udCVBiXp448/LnCbTZs2lWAqlCaTySQvk5e85CVdYv2ztOw0p/Z3S61bXP6B6ewP47c6vaWaQTULLeYLKugzbZk6nnZcPx38qcj9h/uFy5CRZ9vSkGXPujA93bnaulDTN04v9D6TTHmKcD8vP/l7+cvP2085NudmGiSdT9K57HPy9/J3aVpXSRf1Ut4j6Ybd0OnU00qyJDlOG+FIOgAAFU9MTIzWr1+vgwcPKiAgQJUrVy6W/davX1/33XefBg4cqDfeeEMtWrRQQkKCli9frqZNm+q2227Tk08+qe7du6t+/fpKTk7WihUrHIVxrVq1ZDKZFBsbqx49esjPz0+BgYEaPXq0Ro4cKbvdrrZt2yolJUVr165VUFCQBg0a5Hj8SZMmKSwsTBERERo7dqyqVKmi3r17XzLzgAEDNGfOHO3evVsrVqxwtIeGhiosLEzvv/++oqKidPjwYT333HOX3Ffr1q3l7++v559/Xo8//rjWr1+fr24cP368br/9dtWsWVN33nmnzGaz/vrrL23btk0vv/yyPv74Y9lsNse+5s2bJz8/P9WqVcu1wXBSuTqn25OF+oTKarEWeTQu1Ce0Qu6/LKjqX1V1guu4vN32pO1OFd1vd3473yyAbHt2oUfpc7/fe2avXv/j9SL336FGB/l7+V/Y1l7wUf+LH6u4GDJ0LvuczmWfk85f3j4eW/6YJMlisijAGqBA70AFWgMVZA1SoDUw31due9J55xYpvFwcSQcAoOxy5++oo0eP1qBBg9SoUSOdP39eBw4cKLZ9f/TRR3r55Zf11FNP6dixY6pSpYpuuOEG3X777ZIuHMUeNmyYjh49qqCgIHXr1k0zZsyQJFWvXl0TJ07Uc889p8GDB2vgwIH6+OOP9dJLLyk8PFyTJ0/W/v37FRISomuvvVbPP/98nseeMmWKnnjiCe3Zs0fNmzfXDz/84Dh6Xpj77rtPr7zyimrVqqWbbrrJ0W42m7VgwQI9/vjjatKkia6++mq9+eabjkufFaRy5cqaN2+enn76ac2dO1edO3fWiy++qKFDhzr6dO3aVbGxsZo0aZKmTp0qb29vNWjQQEOGDJEkhYSEaMqUKRo1apRsNpuuueYa/fDDDwoLC3NpHJxlMi51qNkDpaamKjg4WCkpKWVq9XKpgKNlyadVObRysR0tK+nzWkty/yVZ2Dh7pHvh7QtLdOp9Wdt/btG/JWGLBv88uMj+9zW8T4HWQKVnpys9J93x7/ns83lu537v7GqipaVjjY6KDorOV7RffPviI+0lPb4S56SXFXa7XfHx8apatSqLMXkYxtazMb7lW0ZGhg4cOKDatWvnWynamenlEv+PFpe4uDh17NhRycnJjlXPS4qzY1saLvUedLa25Eh3GRIVEOX4wNvtdsXb4lU1rPj+g/jn/ktCSe4/KiBKsb1jS+QHJrMACmYymWS1WOXv7e9U/151e7lUVGbbspWek64tCVscR7EvpXVka8kknc06m+eruBbjW3F0RdGdlP9Iu8Xk2nXhXcWRdAAArkxJ/w4MFIWiG+VGSf3ALMmCvqD9F/cshvJa1HtbvBVsCVaYn3PTeEZdNypfUW8Yhs7nnFdqVmqeQvyftw+mHFTsgdhiy20zbErJTFFKpmuL3g1eMlgRlSJUxa+KqvhWUZhf2IXv//EV5hemUJ9QWcz/V8iXxjnpAAAAKDkU3YBKdxZAcc9iKOk/GpTlot5k+v8LtHn7K7JSZIF9tidtd6rontZ+mqr6V81XtJ/NOquz2fmLeVePtKfnpOtAygEdSLn0+Vxmk1mVfSs7inBnryN/uZhyBwAAnFHUItgoHEU34AHK69T+3G3LQlFfM6imy+dcG4ahTfGbNGjJoCL7RvpHKjUrVek56ZfsZzfsSjyfqMTziS5l+WDLB2oQ1kAR/hGKqBRx4V//iEueHsDUdQAAgJJH0Q2gSBT1BTOZTPL18i26o6RZnWapUVgjpWenK+l8khIzEh3FdeL5xAttF93OMZy7nJokLTu8TMsOL8vXHmgNdBTikf6ReYrytKw0pq4DAACUMIpuAG5Xnot6V+VOh48Oir5kP7thV2pmqtafWK/Rq0Zf9uPlToPfe2bvZe/jSjB9HQBQHOx2u7sjoIIqjvceRTcAj1eSRX1JHUk3m8wK8Q0psjjPNbntZPl5++nUuVM6lf7/v879379Z9ksf0b6Uh35+SHWC6yg6KFo1A2sqOjBaNYNqqmZgTYX4hBR6KQ+mrwMArpTVapXZbNbx48cVHh4uq9Xq+H+nLF1WCsWrLIytYRjKyspSQkKCzGZzkdcivxSKbgC4AiW9Or2z6oTUKfScdMMwdCbzTJ5C/OS5k9qVvEurjq4qct9p2WnakrhFWxK35Lsv0DuwwGK8ZlBNnc44zfR1AMAVMZvNql27tk6cOKHjx4/nuc8wDNntdpnNZopuD1OWxtbf3181a9a8ogWQKboB4AqV5Or0xXEk3WQyKdQ3VKG+oWpQuYGjfXvSdqeK7jDfMCVlJBV439nss9qetF3bk7bnu8/H4lPkvgEAKIrValXNmjWVk5Mjm+3/rhpit9uVlJSksLCwYvk/F2VHWRlbi8VSLEfbKboBoAwrC+ekv9PlHdUJrqNjacd0OPWwDp89rCNnjzi+P3HuhOxG/vOdMm2ZTu3/u73f6cS5E6odXFvRgdHyNns7nY1zxgGgYjCZTPL29pa39//9H2G32+Xt7S1fX1+Kbg/jaWNL0Q0AZVxJX0feGb5evqobUld1Q+rmuy/bln2hIL+oGN97Zq9OnjtZ5L6/2PmFvtj5hSTJy+SlGoE1VDu4dr6vIGtQnu04ZxwAAJQHFN0AUEEV1yJw3hZvxQTHKCY4Jk/79qTt6h/b36VMOUaODqYe1MHUg1pxZEWe+8J8w/IU4RaThXPGAQBAmUfRDQAVVFmYui5Jj7d4XNn2bB1IOaADKQd0MPVggVPTkzKSlJSRpD9O/VGieQAAAIoTRTcAVGBlYer6TdVvyrPyut2w68S5E44i/J9fhS3odim/HPpFFpNFV4VcJYvZUpzRAQAAikTRDQAoEZc7fd1sMqt6QHVVD6iuttXb5rkvJTNFB1MP6kDKAf3v5P/0/b7vi8wxd+tczd06V/5e/mpSpYmahjdV0ypN1TS8qcL8wi657T8XajPshk6nnlaSJalULwcHAADKN4puAECJKInp68E+wWoW3kzNwpupfmh9p4ruXOk56dpwcoM2nNzgaKseUF1Nw5uqWXgzNa3SVA0qN5C35cLKuCzUBgAAigNFNwCgxJSF6esPNHxAp9JPaUvilnyrqR9LO6Zjacf004GfJElWs1UNwxqqaXhTVfatzEJtAADgilF0AwA82u11b3ecM37q3CltTdyqLQlb9FfCX9qetF0ZtgxH3yx7lv5K+Et/JfzlrrgAAMDDUHQDAMqlyzlnPKJShCIqRahLrS6SpGx7tvYk79GWhC0XvhK36FDqIZdyGIZxeU8AAABUCBTdAIByqTjOGfc2e6tRWCM1CmukexrcI0lKzkjW1sStWn5oub7d+22ROR755RG1rd5WN0TdoNZRrRVZKdL1JwMAADwWRTcAoNwqiXPGQ31D1b5Ge1Xxq+JU0X0m84xi98cqdn+sJKl2cG3dEHWDboi6QddHXq9Aa2Ch2/5zdfQCs7A6OgAA5R5FNwAAV8DH4qNMW6bjdu41xb/Y+YXMJrOaVGniKMKbhTeT1WKVxOroAABUFBTdAABcgQ9v/VA5Ro5+P/G7fj/xu7YkbJHNsEmS7Ibdcb74+1vel5+Xn66NuFZtotoo3C+c1dEBAKgAKLoBACiAswu1hfuHKyogStdGXKvHmj+mtKw0bTy10VGE7z2z19H/fM55rT22VmuPrS2NpwAAAMoAim4AAApw8UJtht3Q6eTTqhxaWSazSVLB51wHWAPUIbqDOkR3kCQlpCc4CvDfj/+u+PPxpftEAACAW1F0AwBQiH8u1Ga32xVvi1fVsKoym81O7yPcP1w96/ZUz7o9ZRiGDqQe0O/Hf9eyQ8v0x6k/itx+/5n9ali5oUwm02U/DwAA4D7O/9YAAACuiMlkUp3gOhrQcICevv5pp7YZs2aMun3TTa//73X9lfCX7Ia9hFMCAIDixJFuAADKuOPnjuuT7Z/ok+2fKMI/QrfUukW31LpFzas2l9nE388BACjLKLoBACjDmlZpqr+T/nasiH4q/ZTm7ZineTvmKdwvXJ1rdtatMbfq2qrXymK25NmW64ADAOB+FN0AALiBs6ujv97hdfl5+WnFkRVadmiZ1p1Ypxx7jiQp4XyCFuxaoAW7Fqiyb2V1rtlZt9S6RddHXq+E9ASuAw4AQBlA0Q0AgBtcvDp6Qf55JLpPvT7qU6+PUrNStfLISi09tFS/HftNWfYLRfXpjNP6avdX+mr3VwrxCVGLqi24DjgAAGUARTcAAG7yz9XRnRVkDXKshp6WlaZVR1dp2aFlWn1stTJtmZKkM5lntOLIipKIDAAAXETRDQBAORVgDVCPOj3Uo04PpWena/Wx1Vp2aJlWHV2l8znn3R0PAACIohsAAI/g7+2vrjFd1TWmq87nnNfCnQv1xsY3itwu9/xwAABQMrjOCAAAHsbPy0+tolo51XfY8mF6e9PbOnnuZAmnAgCgYqLoBgCgAjuTeUbvbXlPXb/pqhHLR2jV0VWy2W3ujgUAgMdgejkAABWYWWbZZZfdsCvuaJzijsapWqVqurP+nepTr4+q+FVxd0QAAMo1jnQDAOCBcq8DfilWi1Wf9fhMjzV7TFX9qzraj587rjc3valbvrpFT8U9pfUn1sswjJKODACAR+JINwAAHsiV64A3DW+qh5s+rNVHV+vL3V9q7bG1MmQox8jR0kNLtfTQUsUExeiu+nfpjqvuULBPsCTpRNoJp68zDgBARUXRDQCAh3LlOuBeZi91rNlRHWt21NGzR/XNnm/07Z5vdTrjtCTpYOpBTftjmmb9OUvdandTx+iOenbVs8qyZxW6T6vFqtjesRTeAIAKjenlAAAgjxqBNfTEtU/olzt/0bQO09Qq8v9WQs+yZ+n7fd9rZNzISxbckpRly7rkkXAAACoCjnQDAIACeVu81S2mm7rFdNP+lP36evfXWrR3kVKzUt0dDQCAcoMj3QAAoEh1guvomeuf0fK7luuVtq+oXkg9d0cCAKBcoOgGAABO8/XyVa+6vfRy25fdHQUAgHKBohsAAJSYmRtnam/yXnfHAADAbSi6AQBAiVl3Yp36ft9Xo1eOpvgGAFRIFN0AAKBEGTL088GfKb4BABUSRTcAAHBZqE+orBbrJftYzVYNvWaoKvtWlkTxDQComLhkGAAAcFlUQJRie8de8jrcoT6higqI0pCmQ/Tlri/14bYPdTrjtKP4XnpwqW6NuVWPNH1EV4VeVYrpAQAoPRTdAADgskQFRCkqIKrIfn5efhrUeJDuvvpuim8AQIVD0Q0AAEqFK8V3Je9KTh1FBwCgrKPoBgAApaqo4vvngz/LLLPsshe6D6vFqtjesRTeAIAyj4XUAACAW+QW30v6LdHo60Y7FlyTdMmCW5KybFmXPBIOAEBZQdENAADc6uLiO8ga5O5IAAAUG4puAABQJuQW3293ftvdUQAAKDYU3QAAoEzxsfi4OwIAAMWGohsAAJRLc/6ao5TMFHfHAADgkii6AQBAubTiyAr1+q6Xftj3gwzDcHccAAAKRNENAADKrdMZp/X8muf18LKHdSj1kLvjAACQD0U3AAAoU0J9QmW1WC/Zx2q2ql31do7b60+sV99FffXeX+8py5ZV0hEBAHCal7sDAAAA/FNUQJRie8de8jrcoT6higqI0qqjq/TK76/o+LnjyrJn6e3Nb+vHAz9qfJvxahnRshRTAwBQMIpuAABQ5kQFRCkqIKrIfu1rtNd1d1ynOX/N0afbP5XNsGl/yn49uORB9a3XV6NajlKwT3ApJAYAoGBun14+e/ZsxcTEyNfXV61bt9aGDRsu2X/mzJm6+uqr5efnp+joaI0cOVIZGRmllBYAAJQ1/t7+GnXdKC28faGuqXKNo/3bPd+y0BoAwO3cWnQvXLhQo0aN0oQJE/Tnn3+qWbNm6tq1q+Lj4wvsP3/+fD333HOaMGGCduzYof/85z9auHChnn/++VJODgAAypqrK1+tz7p/prGtxyrAO0ASC60BANzPrUX39OnT9fDDD2vw4MFq1KiR5syZI39/f3344YcF9v/tt9900003acCAAYqJidGtt96qe++9t8ij4wAAoGKwmC26p8E9WtR7kW6tdaujnYXWAADu4rZzurOysrRx40aNGTPG0WY2m9WlSxetW7euwG1uvPFGzZs3Txs2bFCrVq20f/9+/fjjj3rggQcKfZzMzExlZmY6bqempkqS7Ha77HZ7MT2b4me322UYRpnOiMvD2Ho2xtdzMbblSxXfKprWfpp6Hu2pyRsm51to7bGmj6lGYA1Jkt2wKzklWQnmBJlNF45HhPiGKKpS0eeUo+zjs+u5GFvPVV7G1tl8biu6ExMTZbPZFBERkac9IiJCO3fuLHCbAQMGKDExUW3btpVhGMrJydEjjzxyyenlkydP1sSJE/O1JyQklOlzwe12u1JSUmQYhsxmt596j2LE2Ho2xtdzMbblUwNrA825YY7m7Zunrw99Lbth1/6U/Rq9evQlt/M2e+vjth+rql/VUkqKksJn13Mxtp6rvIzt2bNnnepXrlYvj4uL06uvvqp33nlHrVu31t69e/XEE0/opZde0rhx4wrcZsyYMRo1apTjdmpqqqKjoxUeHq6goKDSiu4yu90uk8mk8PDwMv1Gg+sYW8/G+HouxrZ8G1ttrO5sfKcm/T5J25K2Fdk/254tS4BFVcMouss7Pruei7H1XOVlbH19fZ3q57aiu0qVKrJYLDp16lSe9lOnTikyMrLAbcaNG6cHHnhAQ4YMkSRdc801OnfunIYOHaqxY8cWOCA+Pj7y8fHJ1242m8v0AEqSyWQqFznhOsbWszG+nouxLd8aVmmoeT3m6c1Nb+rDbQWvH/NPJrOJsfYQfHY9F2PrucrD2DqbzW3PwGq1qmXLllq+fLmjzW63a/ny5WrTpk2B26Snp+d7YhaLRZK4FAgAACiSxWxR15iu7o4BAKhA3Dq9fNSoURo0aJCuu+46tWrVSjNnztS5c+c0ePBgSdLAgQNVvXp1TZ48WZLUs2dPTZ8+XS1atHBMLx83bpx69uzpKL4BAAAAACgr3Fp09+/fXwkJCRo/frxOnjyp5s2ba8mSJY7F1Q4fPpznyPYLL7wgk8mkF154QceOHVN4eLh69uypV155xV1PAQAAeKgzmWfcHQEA4AFMRgWbl52amqrg4GClpKSU+YXU4uPjVbVq1TJ9HgNcx9h6NsbXczG2nmN70nb1j+1fZL8QnxDNuHmGrou8rhRSoaTw2fVcjK3nKi9j62xtWXafAQAAgBudyTyjIUuH6MNtH7J2DADgslF0AwCACiXUJ1RWi/WSfUwySZJshk0zNs7Q4yseV0pmSmnEAwB4mHJ1nW4AAIArFRUQpdjesUrOTJYkGXZDp5NPq3JoZZnMF4rtIGuQvtv7nd7b8p4kKe5InPrH9tf0m6erUVgjd0UHAJRDFN0AAKDCiQqIUlRAlKT/f+6gLV5Vw/KeOzi8xXA1C2+mMWvGKCUzRcfSjumBHx/Qc62f05317pTJZHJXfABAOcL0cgAAgEK0q9FOX93+la6pco0kKcuepUnrJmnsmrFKz053czoAQHlA0Q0AAHAJUQFR+qTbJxrQYICj7Yf9P+i+H+/TgZQDbkwGACgPKLoBAACK4G3x1pjWYzSt/TT5e/lLkvae2at7Yu/RkgNL3JwOAFCWUXQDAAA4qVvtbvri9i90VchVkqT0nHQ9veppTV4/Wdm2bDenAwCURRTdAAAALqgTXEef9/hct9e53dE2f+d8PbjkQZ1IO+HGZACAsoiiGwAAwEX+3v56te2rGt9mvLzN3pKkLYlbdFfsXVpzbI2b0wEAyhIuGQYAAHAZTCaT7qp/lxqFNdJTcU/pWNoxpWSm6LFfHtO9De5Vzzo981yC7J9CfUIdlywDAHg2im4AAIAr0DissRbevlAvrHlBcUfjZMjQ/J3zNX/n/EK3sVqsiu0dS+ENABUA08sBAACuULBPsGZ1mqWRLUfK7MSvV1m2LCVnJpdCMgCAu1F0AwAAFAOzyax/NfmXxrUZ5+4oAIAyhKIbAACgGDUKa+TuCACAMoSiGwAAAACAEkLRDQAAAABACaHoBgAAcIOk80nujgAAKAUU3QAAAG4wbu047T+z390xAAAljKIbAACgGIX6hMpqsRbZLykjSQOXDNTm+M0lHwoA4DZe7g4AAADgSaICohTbO7bQ63CfyTyjaRumaW/KXqVkpujhpQ/rjZvfUPsa7Us5KQCgNFB0AwAAFLOogChFBUQVev9nPT7Tkyue1PqT65Vhy9Djvz6uiTdO1B1X3VGKKQEApYHp5QAAAKUswBqgd7q8o64xXSVJNsOmF9a+oA+3fSjDMNycDgBQnCi6AQAA3MBqseq19q/p3gb3OtpmbJyhaX9Mk92wuzEZAKA4UXQDAAC4idlk1phWYzSixQhH22fbP9OY1WOUbct2YzIAQHGh6AYAAHAjk8mkoU2H6sU2L8psuvCr2Y8HftTwX4crPTvdzekAAFeKohsAAKAM6Fe/n2bcPEM+Fh9J0m/Hf9NDPz+k0xmn3ZwMAHAlKLoBAADKiE41O+m9W95ToDVQkrQtaZsG/jRQx9KOuTkZAOByUXQDAACUIS0jWuqTbp+oql9VSdKh1EN64McHtOv0LjcnAwBcDopuAACAMqZeaD191uMzxQTFSJISzido8JLB+uPkH+4NBgBwmUtF944dOzRhwgR16tRJdevWVVRUlJo2bapBgwZp/vz5yszMLKmcAAAAFUq1gGr6tPunalqlqSTpbPZZ/XvZv7X80HI3JwMAuMKpovvPP/9Uly5d1KJFC61Zs0atW7fWk08+qZdeekn333+/DMPQ2LFjVa1aNU2dOpXiGwAAoBiE+oZq7q1z1bZ6W0lSlj1Lo1aO0le7v3JzMgCAs7yc6dSvXz89/fTT+vrrrxUSElJov3Xr1mnWrFl644039PzzzxdXRgAAgArL39tfb3Z6UxPWTtAP+3+Q3bBr0rpJ2pG0Q/3q9ZPJZMq3TahPqKICotyQFgBwMaeK7t27d8vb27vIfm3atFGbNm2UnZ19xcEAAABwgbfZWy+3fVlhfmH6+O+PJUlf7f6q0CPeVotVsb1jKbwBoAxwanq5MwW3JKWnp7vUHwAAAM4xm8x66rqn9EDDB4rsm2XLUnJmcimkAgAUxeXVyzt37qxjx/JfK3LDhg1q3rx5cWQCAABAIW6ve7u7IwAAXOBy0e3r66umTZtq4cKFkiS73a4XX3xRbdu2VY8ePYo9IAAAAAAA5ZVT53T/0+LFizV79mz961//0qJFi3Tw4EEdOnRIsbGxuvXWW0siIwAAAAAA5ZLLRbckDRs2TEePHtXUqVPl5eWluLg43XjjjcWdDQAAAACAcs3l6eXJycnq16+f3n33Xb333nu6++67deutt+qdd94piXwAAAC4DDn2HHdHAADoMoruJk2a6NSpU9q0aZMefvhhzZs3T//5z380btw43XbbbSWREQAAAC56e9PbFN4AUAa4XHQ/8sgjWrVqlWrXru1o69+/v/766y9lZWUVazgAAADkFeoTKqvFWmS/dSfWafza8bLZbaWQCgBQGJfP6R43bpzj+4yMDPn6+kqSatSooWXLlhVfMgAAAOQTFRCl2N6xhV6H+89Tf+r1P16XzbDph/0/yGqxanyb8TKbXD7WAgAoBi4X3Xa7Xa+88ormzJmjU6dOaffu3apTp47GjRunmJgYPfTQQyWREwAAAP9fVECUogKiCryvUVgjRQVE6am4p2QzbPpmzzfyNnvr+dbPy2QylXJSAIDLf/J8+eWX9fHHH+u1116T1fp/U5uaNGmiDz74oFjDAQAAwHWda3bWlPZTHEe3F+xaoNf/eF2GYbg5GQBUPC4X3Z9++qnef/993XfffbJYLI72Zs2aaefOncUaDgAAAJenW0w3vXzTyzLpwtHtT7d/qrc2vUXhDQClzOWi+9ixY7rqqqvytdvtdmVnZxdLKAAAAFy5nnV7akKbCY7bc7fO1Xtb3nNjIgCoeFwuuhs1aqTVq1fna//666/VokWLYgkFAACA4tGvfj893/p5x+3Zm2frw20fujERAFQsLi+kNn78eA0aNEjHjh2T3W7Xt99+q127dunTTz9VbGxsSWQEAADAFbi3wb3KtmVr2h/TJEkzNs6Qj8VH9zW8z83JAMDzuXyk+4477tAPP/ygX375RZUqVdL48eO1Y8cO/fDDD7rllltKIiMAAACu0MDGA/XEtU84bk/ZMEVf7vrSjYkAoGJw+Ui3JLVr145rcgMAAJQzQ64Zokxbpub8NUeS9NLvL8nb7K0+9fq4ORkAeC6Xj3QDAACg/Hqs2WMa3GSw4/aE3yZo8f7FbkwEAJ7NqSPdoaGhMplMTu3w9OnTVxQIAAAAJcdkMmnktSOVbcvWvB3zZMjQ2DVjZbVYdUstThUEgOLmVNE9c+ZMx/dJSUl6+eWX1bVrV7Vp00aStG7dOv38888aN25ciYQEAABA8TGZTHrm+meUZcvSl7u/lM2w6ZmVz2hGxxm6Ofpmd8cDAI/iVNE9aNAgx/f9+vXTpEmTNHz4cEfb448/rrffflu//PKLRo4cWfwpAQAAUKxMJpPG3jBWmbZMLdq3SDlGjkbFjdJbnd7STdVvcnc8APAYLp/T/fPPP6tbt2752rt166ZffvmlWEIBAACg5JlNZk28caJ61O4hScq2Z+uJFU9ow4kNbk4GAJ7D5dXLw8LCtGjRIj311FN52hctWqSwsLBiCwYAAICSZzFb9ErbV5Rtz9ayQ8uUacvUY8sf0/OtnleDsAYFbhPqE6qogKhSTgoA5ZPLRffEiRM1ZMgQxcXFqXXr1pKk9evXa8mSJZo7d26xBwQAAEDJ8jJ7aWq7qcq2ZSvuaJwybZmasG5Cof2tFqtie8dSeAOAE1yeXv7ggw9q7dq1CgoK0rfffqtvv/1WQUFBWrNmjR588MESiAgAAICS5m3x1hs3v6FmVZoV2TfLlqXkzORSSAUA5Z/LR7olqXXr1vr888+LOwsAAADcyGqxavT1o/XATw+4OwoAeIzLKrrtdrv27t2r+Ph42e32PPe1b9++WIIBAACg9FktVndHAACP4nLR/fvvv2vAgAE6dOiQDMPIc5/JZJLNZiu2cAAAAAAAlGcuF92PPPKIrrvuOi1evFhRUVEymUwlkQsAAAAAgHLP5YXU9uzZo1dffVUNGzZUSEiIgoOD83y5avbs2YqJiZGvr69at26tDRsufV3IM2fOaNiwYYqKipKPj4/q16+vH3/80eXHBQAAAACgpLlcdLdu3Vp79+4tlgdfuHChRo0apQkTJujPP/9Us2bN1LVrV8XHxxfYPysrS7fccosOHjyor7/+Wrt27dLcuXNVvXr1YskDAAAAAEBxcnl6+YgRI/TUU0/p5MmTuuaaa+Tt7Z3n/qZNmzq9r+nTp+vhhx/W4MGDJUlz5szR4sWL9eGHH+q5557L1//DDz/U6dOn9dtvvzkeNyYmxtWnAAAAgEKE+oTKarEqy5Z1yX5/J/6tRmGNSikVAJRfLhfd/fr1kyT961//crSZTCYZhuHSQmpZWVnauHGjxowZ42gzm83q0qWL1q1bV+A233//vdq0aaNhw4Zp0aJFCg8P14ABA/Tss8/KYrG4+lQAAABwkaiAKMX2ji3wOtzf7P5GX+7+UpL02v9eU8OwhmpSpUlpRwSAcsXlovvAgQPF8sCJiYmy2WyKiIjI0x4REaGdO3cWuM3+/fv166+/6r777tOPP/6ovXv36rHHHlN2drYmTJhQ4DaZmZnKzMx03E5NTZV04bJnF1/urCyx2+0yDKNMZ8TlYWw9G+PruRhbz8XY5hfhH6EI/4h87c+3el7pOemK3R+rDFuGhi8frs97fK6oSlFuSOkcxtdzMbaeq7yMrbP5XC66a9Wq5XKY4mK321W1alW9//77slgsatmypY4dO6Zp06YVWnRPnjxZEydOzNeekJCgjIyMko582ex2u1JSUmQYhsxml0+9RxnG2Ho2xtdzMbaei7F1zaN1H9Wh5EPamrxVSRlJenTpo5rReoYqeVVyd7QCMb6ei7H1XOVlbM+ePetUP6eK7u+//17du3eXt7e3vv/++0v27dWrl1MPXKVKFVksFp06dSpP+6lTpxQZGVngNlFRUfL29s4zlbxhw4Y6efKksrKyZLVa820zZswYjRo1ynE7NTVV0dHRCg8PV1BQkFNZ3cFut8tkMik8PLxMv9HgOsbWszG+noux9VyMrevevuVtPfDTAzp89rAOpB3QtB3T9GbHN+Vldvl4ToljfD0XY+u5ysvY+vr6OtXPqZ+MvXv31smTJ1W1alX17t270H6unNNttVrVsmVLLV++3LFPu92u5cuXa/jw4QVuc9NNN2n+/Pmy2+2OF3/37t2KiooqsOCWJB8fH/n4+ORrN5vNZXoApQuvZ3nICdcxtp6N8fVcjK3nYmxdU9mvsmZ3nq37f7pfKZkpWnt8rV774zWNbT1WJpPJ3fHyYXw9F2PrucrD2DqbzaleudO6c78v7MvZgjvXqFGjNHfuXH3yySfasWOHHn30UZ07d86xmvnAgQPzLLT26KOP6vTp03riiSe0e/duLV68WK+++qqGDRvm0uMCAADgysQEx2jGzTMcR7cX7lqoz3d87uZUAFD2uHUOUP/+/ZWQkKDx48fr5MmTat68uZYsWeJYXO3w4cN5/noQHR2tn3/+WSNHjlTTpk1VvXp1PfHEE3r22Wfd9RQAAAAqrOsjr9fEGydq7Jqxki6saF4jsIZujr7ZvcEAoAxx+4k3w4cPL3Q6eVxcXL62Nm3a6Pfffy/hVAAAAHBGr7q9dCj1kN7f8r4MGXpm1TP6pNsnahjW0N3RAKBMKLsT5AEAAFAuDGs+TN1iukmSzuec1/Bfh+vUuVNFbAUAFQNFNwAAAK6I2WTWy21fVrPwZpKk+PR4jfh1hNKz092cDADcj6IbAAAAV8zH4qNZHWepekB1SdKO0zv07OpnZbO7ttAuAHgal4tui8Wi+Pj4fO1JSUl5rp8NAACAiiXML0yzO89WoHegJCnuSJymb5zu3lAA4GYuF92GYRTYnpmZWei1sgEAAFAx1A2pqzdufkNepgvr9X66/VN9uetLN6cCAPdxevXyN998U9KFi5R/8MEHCggIcNxns9m0atUqNWjQoPgTAgAAoFxpU62Nxt4wVhPXTZQkvbr+VVUPqK6bqt/k5mQAUPqcLrpnzJgh6cKR7jlz5uSZSm61WhUTE6M5c+YUf0IAAACUO3fWv1OHUw/ro78/ks2w6amVT+mz7p+pXmg9d0cDgFLldNF94MABSVLHjh317bffKjQ0tMRCAQAAoPx7suWTOnL2iH45/IvOZZ/TsOXDNP+2+ariV8Xd0QCg1Lh8TveKFSsouAEAAFAks8msV9u9qsZhjSVJJ86d0OO/Pq7zOefdnAwASo/TR7r/6ejRo/r+++91+PBhZWVl5blv+nRWqAQAAMAFfl5+eqvTWxrw4wCdPHdSWxO3auyasXq9w+sym7h6LQDP53LRvXz5cvXq1Ut16tTRzp071aRJEx08eFCGYejaa68tiYwAAAAox8L9w/V2p7c1aMkgncs+p2WHlmn82vEa0HBAgf1DfUIVFRBVyikBoGS4XHSPGTNGo0eP1sSJExUYGKhvvvlGVatW1X333adu3bqVREYAAACUc1dXvlrT2k/TsOXDZMjQon2LtGjfogL7Wi1WxfaOpfAG4BFcntOzY8cODRw4UJLk5eWl8+fPKyAgQJMmTdLUqVOLPSAAAAA8Q7sa7TS4yeAi+2XZspScmVwKiQCg5LlcdFeqVMlxHndUVJT27dvnuC8xMbH4kgEAAMDjdI3p6u4IAFCqXJ5efsMNN2jNmjVq2LChevTooaeeekpbt27Vt99+qxtuuKEkMgIAAAAAUC65XHRPnz5daWlpkqSJEycqLS1NCxcuVL169Vi5HAAAAACAf3C56K5Tp47j+0qVKmnOnDnFGggAAAAAAE9xWdfplqSNGzdqx44dkqTGjRurRYsWxRYKAAAAAABP4HLRHR8fr3vuuUdxcXEKCQmRJJ05c0YdO3bUggULFB4eXtwZAQAAAAAol1xevXzEiBE6e/as/v77b50+fVqnT5/Wtm3blJqaqscff7wkMgIAAMBDhPqEymqxXrKPSSb5WHxKKREAlCyXj3QvWbJEv/zyixo2bOhoa9SokWbPnq1bb721WMMBAADAs0QFRCm2d2y+63Bn5GRo7JqxOpp2VIYMzd48W290eEMmk8lNSQGgeLh8pNtut8vb2ztfu7e3t+x2e7GEAgAAgOeKCohSo7BGeb6ujbhW793yngK9AyVJyw4t00d/f+TmpABw5Vwuujt16qQnnnhCx48fd7QdO3ZMI0eOVOfOnYs1HAAAACqOmkE1NaX9FMftWX/O0rrj69yYCACunMtF99tvv63U1FTFxMSobt26qlu3rmrXrq3U1FS99dZbJZERAAAAFUT7Gu31WLPHJEl2w65nVj2jY2nH3JwKAC6fy+d0R0dH688//9Qvv/yinTt3SpIaNmyoLl26FHs4AAAAVDz/bvZv/Z30t1YeXakzmWc0csVIfdr9U/l6+bo7GgC47LKu020ymXTLLbfolltuKe48AAAAqODMJrNebfeq7o29V4fPHtaO0zv00u8v6eWbXmZhNQDljlNF95tvvun0DrlsGAAAAK5UkDVIMzvO1H0/3qfzOef1/b7vdU2Va3RPg3vcHQ0AXOJU0T1jxgyndmYymSi6AQAAUCzqhdbTpBsn6elVT0uSpm6YqgaVG6h51ebuDQYALnCq6N68ebOCg4NLOgsAAACQR7fa3bQtcZs+2f6JcowcjYobpYW3L1S4f7i7owGAU5xavbxy5cpKSEiQdOGSYWfOnCnJTAAAAIDDky2fVKvIVpKkhPMJemrlU8q2Zbs5FQA4x6miOyAgQImJiZKkuLg4ZWfzQw4AAAClw8vspWkdpimyUqQkaVP8Jk37Y5qbUwGAc5yaXt6lSxd17NhRDRs2lCT16dNHVqu1wL6//vpr8aUDAAAAJFX2rawZN8/QwJ8GKtuerS92fqEmVZqoV91e7o4GAJfkVNE9b948ffLJJ9q3b59Wrlypxo0by9/fv6SzAQAAAA5NqjTRCze8oAm/TZAkTVo3SfVC6qlhWEM3JwOAwjlVdPv5+emRRx6RJP3xxx+aOnWqQkJCSjIXAAAAkE/fen21NXGrvt79tTJtmRoZN1ILblugEN8Qd0cDgAI5dU73P61YsYKCGwAAAG4zptUYNa3SVJJ0LO2Ynln1jGx2m5tTAUDBnCq6p0yZovT0dKd2uH79ei1evPiKQgEAAACFsVqseuPmN1TZt7Ikad2JdXp789tuTgUABXOq6N6+fbtq1aqlxx57TD/99JPj8mGSlJOToy1btuidd97RjTfeqP79+yswMLDEAgMAAACRlSL1eofXZTFZJEkfbP1Avxz6xc2pACA/p4ruTz/9VL/88ouys7M1YMAARUZGymq1KjAwUD4+PmrRooU+/PBDDRw4UDt37lT79u1LOjcAAAAquOsjr9dT1z3luD12zVjtP7PfjYkAID+nFlKTpGbNmmnu3Ll67733tGXLFh06dEjnz59XlSpV1Lx5c1WpUqUkcwIAAAD53N/wfm1L3KYfD/yo9Jx0PbHiCX1x2xcKsAa4OxoASHKh6M5lNpvVvHlzNW/evATiAAAAAM4zmUya0GaC9p7Zq93Ju3Uw9aDGrhmrGR1nyGxyec1gACh2LhfdAAAAQFni7+2vmTfPVP/F/XU266x+PfKrJq+frD71+siwGzqdelpJliSZzCZJUqhPqKICotycGkBFQdENAACAci86KFpT203VY8sfkyQt2LVAC3YtKLCv1WJVbO9YCm8ApYI5NwAAAPAI7Wq009317y6yX5YtS8mZyaWQCAAougEAAOBB+tbr6+4IAJDHFRXdR44c0ZEjR4orCwAAAHBFTCaTuyMAQB4uF905OTkaN26cgoODFRMTo5iYGAUHB+uFF15QdnZ2SWQEAAAAAKBccnkhtREjRujbb7/Va6+9pjZt2kiS1q1bpxdffFFJSUl69913iz0kAAAAAADlkctF9/z587VgwQJ1797d0da0aVNFR0fr3nvvpegGAAAAAOD/c3l6uY+Pj2JiYvK1165dW1artTgyAQAAAADgEVwuuocPH66XXnpJmZmZjrbMzEy98sorGj58eLGGAwAAAFwR6hMqq+XSB4IsJotCfUJLKRGAis7l6eWbNm3S8uXLVaNGDTVr1kyS9NdffykrK0udO3dW377/d5mGb7/9tviSAgAAAEWICohSbO9Yx3W4Dbuh08mnlaAETVo3STbZZDNs2pa0TVEBUW5OC6AicLnoDgkJUb9+/fK0RUdHF1sgAAAA4EpEBUQ5Cmq73a54W7xuqnqTztvOa8qGKZKkF9a8oLohdVUnuI47owKoAFwuuj/66KOSyAEAAACUqAENBmhLwhb9eOBHpeeka+SKkfriti/k7+3v7mgAPJjL53RLF67V/csvv+i9997T2bNnJUnHjx9XWlpasYYDAAAAiovJZNKENhN0VchVkqT9Kfs1/rfxMgzDzckAeDKXi+5Dhw7pmmuu0R133KFhw4YpISFBkjR16lSNHj262AMCAAAAxcXf218zO85UgHeAJOnngz/rs+2fuTkVAE/mctH9xBNP6LrrrlNycrL8/Pwc7X369NHy5cuLNRwAAABQ3GoF1dIrbV9x3J6+cbr+OPmHGxMB8GQuF92rV6/WCy+8kO+a3DExMTp27FixBQMAAABKSqeanTTkmiGSJJth0+iVoxWfHu/mVAA8kctFt91ul81my9d+9OhRBQYGFksoAAAAoKQNbz5cN0TdIElKykjS6JWjlW3PdnMqAJ7G5aL71ltv1cyZMx23TSaT0tLSNGHCBPXo0aM4swEAAAAlxmK2aGr7qYqsFClJ2hS/SdP/mO7mVAA8jctF9xtvvKG1a9eqUaNGysjI0IABAxxTy6dOnVoSGQEAAIASUdm3sqZ3mC5vs7ckad6Oefpx/49uTgXAk7hcdNeoUUN//fWXxo4dq5EjR6pFixaaMmWKNm3apKpVq5ZERgAAAKDEXBN+jca0HuO4/eK6F7UneY8bEwHwJC4X3atWrZIk3XfffXrttdf0zjvvaMiQIfL29nbcBwAAAJQnd9a7U3fUvUOSdD7nvEbGjdTZrLNuTgXAE7hcdHfs2FGnT5/O156SkqKOHTsWSygAAACgNJlMJr1wwwtqULmBJOlQ6iG9sOYFGYbh5mQAyjuXi27DMGQymfK1JyUlqVKlSsUSCgAAAChtvl6+mn7zdAVZgyRJvx75VR9u+9DNqQCUd04X3X379lXfvn1lMpn04IMPOm737dtXd9xxh7p27aobb7zxskLMnj1bMTEx8vX1VevWrbVhwwantluwYIFMJpN69+59WY8LAAAA/FN0YLSmtJsiky4cZHpz05v6/cTvbk4FoDxzuugODg5WcHCwDMNQYGCg43ZwcLAiIyM1dOhQzZs3z+UACxcu1KhRozRhwgT9+eefatasmbp27ar4+PhLbnfw4EGNHj1a7dq1c/kxAQAAgMK0q9FOjzZ7VJJkN+x6ZuUzOnnupJtTASivvJzt+NFHH0mSYmJiNHr06GKbSj59+nQ9/PDDGjx4sCRpzpw5Wrx4sT788EM999xzBW5js9l03333aeLEiVq9erXOnDlTLFkAAAAASfp3s39ra+JWrT62WsmZyRoVN0ofd/tYVovV3dEAlDMun9P9zDPP5Dmn+9ChQ5o5c6aWLl3q8oNnZWVp48aN6tKly/8FMpvVpUsXrVu3rtDtJk2apKpVq+qhhx5y+TEBAACAophNZk1uN1nVA6pLkrYmbtXUDVPdnApAeeT0ke5cd9xxh/r27atHHnlEZ86cUatWrWS1WpWYmKjp06fr0UcfdXpfiYmJstlsioiIyNMeERGhnTt3FrjNmjVr9J///EebN2926jEyMzOVmZnpuJ2amipJstvtstvtTmctbXa7XYZhlOmMuDyMrWdjfD0XY+u5GFvPdiXjG+gdqDc6vKFBSwYp05apL3d/qWuqXKNedXuVQFK4is+u5yovY+tsPpeL7j///FMzZsyQJH399deKjIzUpk2b9M0332j8+PEuFd2uOnv2rB544AHNnTtXVapUcWqbyZMna+LEifnaExISlJGRUdwRi43dbldKSooMw5DZ7PKEBJRhjK1nY3w9F2PruRhbz3al4xumMI1oOEKvb3tdkvTS7y+pilFFVwVdVdxR4SI+u56rvIzt2bNnnernctGdnp6uwMBASdLSpUvVt29fmc1m3XDDDTp06JBL+6pSpYosFotOnTqVp/3UqVOKjIzM13/fvn06ePCgevbs6WjL/euCl5eXdu3apbp16+bZZsyYMRo1apTjdmpqqqKjoxUeHq6goCCX8pYmu90uk8mk8PDwMv1Gg+sYW8/G+HouxtZzMbaerTjG94GqD+hg5kF9vedrZdmz9PLWl7WgxwIF+ZTd3yUrAj67nqu8jK2vr69T/Vwuuq+66ip999136tOnj37++WeNHDlSkhQfH+9yEWu1WtWyZUstX77ccdkvu92u5cuXa/jw4fn6N2jQQFu3bs3T9sILL+js2bOaNWuWoqOj823j4+MjHx+ffO1ms7lMD6AkmUymcpETrmNsPRvj67kYW8/F2Hq24hjfMa3HaOfpndqWtE3H0o7p8RWP69lWz+ZZ60iSQn1CFRUQdaWR4SQ+u56rPIyts9lcLrrHjx+vAQMGaOTIkerUqZPatGkj6cJR7xYtWri6O40aNUqDBg3Sddddp1atWmnmzJk6d+6cYzXzgQMHqnr16po8ebJ8fX3VpEmTPNuHhIRIUr52AAAAoLhYLVY91+o53f/T/ZKkTQmbdM/iewrsF9s7lsIbgIPLRfedd96ptm3b6sSJE2rWrJmjvXPnzurTp4/LAfr376+EhASNHz9eJ0+eVPPmzbVkyRLH4mqHDx8u03/dAAAAQMXgbfEusk+WLUvJmckU3QAcXC66JSkyMlJpaWlatmyZ2rdvLz8/P11//fX5ptc4a/jw4QVOJ5ekuLi4S2778ccfX9ZjAgAAAABQ0lw+hJyUlKTOnTurfv366tGjh06cOCFJeuihh/TUU08Ve0AAAAAAAMorl4vukSNHytvbW4cPH5a/v7+jvX///lqyZEmxhgMAAAAAoDxzeXr50qVL9fPPP6tGjRp52uvVq+fyJcMAAAAAAPBkLh/pPnfuXJ4j3LlOnz5d4KW5AAAAAACoqFwuutu1a6dPP/3UcdtkMslut+u1115Tx44dizUcAAAAUN4cTzvu7ggAyhCXp5e/9tpr6ty5s/744w9lZWXpmWee0d9//63Tp09r7dq1JZERAAAAcLtQn1BZLVZl2bIu2W/6xum6IeoGBVgDSikZgLLM5aK7SZMm2r17t95++20FBgYqLS1Nffv21bBhwxQVxfUIAQAA4JmiAqIU2ztWyZnJ+e47n3NeL6x5QUfTjurI2SN6bvVzerPTmzKbXJ5YCsDDXNZ1uoODgzV27NjizgIAAACUaVEBUYoKKPhA0/u3vK97Ft+j1KxUrTy6Um9veluPX/t4KScEUNZcVtGdkZGhLVu2KD4+Xna7Pc99vXr1KpZgAAAAQHkSHRSt1zu8rkd+eUR2w665W+eqQeUGujXmVndHA+BGLhfdS5Ys0cCBA5WYmJjvPpPJJJvNVizBAAAAgPKmTbU2eqrlU5r2xzRJ0gtrX1CtoFq6uvLVbk4GwF1cPslkxIgRuuuuu3TixAnZ7fY8XxTcAAAAqOgeaPSAetbpKenCud5PrHhCyRn5zwMHUDG4XHSfOnVKo0aNUkREREnkAQAAAMo1k8mk8W3Gq0lYE0nSsbRjGr1ytLLt2W5OBsAdXC6677zzTsXFxZVAFAAAAMAz+Hr5akbHGQrzDZMkbTi5QW/88YabUwFwB5fP6X777bd11113afXq1brmmmvk7e2d5/7HH2eFRgAAACCyUqRmdpypwT8PVo49R5/v+FxXh16tPvX6uDsagFLkctH9xRdfaOnSpfL19VVcXJxMJpPjPpPJRNENAAAA/H/NqzbX2NZjNXHdREnSS7+/pLohddU0vKmbkwEoLS5PLx87dqwmTpyolJQUHTx4UAcOHHB87d+/vyQyAgAAAOXWnfXvVP+r+0uSsu3ZenLFk4pPj3dzKgClxeWiOysrS/3795fZ7PKmAAAAQIX0bKtn1TKipSQp4XyCRq4YqUxbpptTASgNLlfOgwYN0sKFC0siCwAAAOCRvM3emn7zdEVVipIkbUncopd/f1mGYbg5GYCS5vI53TabTa+99pp+/vlnNW3aNN9CatOnTy+2cAAAAICnqOxbWbM6ztLAnwYqw5ah7/Z+pwaVG+i+hve5OxqAEuRy0b1161a1aNFCkrRt27Y89/1zUTUAAAAAeTUMa6hJN03SM6uekSRN+9801Qupp1ZRrdycDEBJcbnoXrFiRUnkAAAAACqE7rW7a+fpnfpw24eyGTY9tfIpfXHbF6oRWMPd0QCUAFZDAwAAAErZ4y0eV9vqbSVJZzLP6IkVTyg9O93NqQCUBKeOdPft21cff/yxgoKC1Ldv30v2/fbbb4slGAAAAOCpLGaLprafqvsW36eDqQe1O3m3xq0dp9c7vM4pm4CHcepId3BwsOPDHxwcfMkvAAAAAEULsgZpVsdZquRdSZK09NBSfbD1AzenAlDcnDrS/dFHH2nSpEkaPXq0Pvroo5LOBAAAAFQIdULqaEq7KXr818dlyNBbm95S/dD66hDdwd3RABQTp8/pnjhxotLS0koyCwAAAFDh3Bx9s4a3GC5JMmRo9KrRWnZombYnbc/3dSLthJvTAnCV06uXG4ZRkjkAAACACuvhax7W5vjNWn1stTJyMjQqblSB/awWq2J7xyoqIKqUEwK4XC6tXs6iDgAAAEDxM5lMGnLNkCL7ZdmylJyZXAqJABQXl67TXb9+/SIL79OnT19RIAAAAKAi8vXydXcEACXApaJ74sSJrFAOAAAAAICTXCq677nnHlWtWrWksgAAAAAA4FGcPqeb87kBAAAAAHCN00U3q5cDAAAAAOAap6eX2+32kswBAAAAwAkZORnujgDABS5dMgwAAABAyQj1CZXVYi2y3+zNs5VjzymFRACKg0sLqQEAAAAoGVEBUYrtHVvgdbiPpB7R+N/GKz0nXRtObtCr61/VuBvGse4SUA5QdAMAAABlRFRAlKICovK1NwprpFDfUP37l38rx56jr3Z/pWoB1TTkmiFuSAnAFUwvBwAAAMqBVlGt9PJNLztuz/pzlhbvX+zGRACcQdENAAAAlBO31blNT1z7hOP2C2tf0IYTG9yYCEBRKLoBAACAcuShJg/pzvp3SpJy7Dl6csWT2pu8182pABSGohsAAAAoR0wmk8a2Hqt21dtJks5mn9Vjyx9TfHq8m5MBKAhFNwAAAFDOeJm99HqH19UorJEk6cS5Exq+fLjOZZ9zczIAF6PoBgAAAMohf29/ze48W9UqVZMk7Ti9Q0+tfErZ9mw3JwPwTxTdAAAAQDlVxa+K3u3yroKsQZKktcfW6pXfX5FhGG5OBiAXRTcAAABQjtUJqaNZHWfJ2+wtSfpmzzd6f8v7bk4FIBdFNwAAAFDOXRd5nV5p+4rj9tub39b3+753YyIAuSi6AQAAAA/QvXZ3jWo5ynF7wtoJ+v3E725MBECi6AYAAAA8xoONH1T/q/tLknKMHI1cMVK7k3e7ORVQsVF0AwAAAB7CZDLpuVbP6eYaN0uS0rLT9Ngvj+nUuVPuDQZUYBTdAAAAgAfxMntpavupahLWRJJ0Kv2UHlv+mNKy0tycDKiYKLoBAAAAD+Pv7a+3Or+l6gHVJUm7k3drVNworuENuAFFNwAAAOCBcq/hHewTLElad2KdJv42kWt4A6WMohsAAADwULWDa+utTm/JarZKkhbtW6Q5f81xcyqgYvFydwAAAAAAJadF1Raa3G6yRq8cLUOG3vnrHdkNuzrW7Jivb6hPqKICotyQEvBcFN0AAACAh7s15lY9fPphvb/1fUnSnC1zNGdL/iPeVotVsb1jKbyBYsT0cgAAAKAC6FSzU5F9smxZSs5MLoU0QMVB0Q0AAABUACaTyd0RgAqJohsAAAAAgBJC0Q0AAAAAQAmh6AYAAAAAoIRQdAMAAABwOJRyyN0RAI9C0Q0AAADAYeK6ifo78W93xwA8BkU3AAAAUAGE+oTKarEW2e9czjkNWTpEm+M3l3wooALwcncAAAAAACUvKiBKsb1jC70O9/mc83rjjze0NXGr0rLTNHTZUM3uPFvXR15fykkBz1ImjnTPnj1bMTEx8vX1VevWrbVhw4ZC+86dO1ft2rVTaGioQkND1aVLl0v2BwAAAHBBVECUGoU1KvCrZURL/afrf9Qmqo2kC0X4Y788pt+O/+bm1ED55vaie+HChRo1apQmTJigP//8U82aNVPXrl0VHx9fYP+4uDjde++9WrFihdatW6fo6GjdeuutOnbsWCknBwAAADyLn5ef3ur8ltpVbydJyrBlaMTyEVp1dJWbkwHll9uL7unTp+vhhx/W4MGD1ahRI82ZM0f+/v768MMPC+z/+eef67HHHlPz5s3VoEEDffDBB7Lb7Vq+fHkpJwcAAAA8j4/FR7M6zlLnmp0lSVn2LD2x4gktP8Tv28DlcOs53VlZWdq4caPGjBnjaDObzerSpYvWrVvn1D7S09OVnZ2typUrF3h/ZmamMjMzHbdTU1MlSXa7XXa7/QrSlyy73S7DMMp0RlwextazMb6ei7H1XIytZ2N8L4/FZNHUdlP1wtoXtOTgEuXYc/TUyqf0yk2vqHvt7u6OJ4mx9WTlZWydzefWojsxMVE2m00RERF52iMiIrRz506n9vHss8+qWrVq6tKlS4H3T548WRMnTszXnpCQoIyMDNdDlxK73a6UlBQZhiGz2e0TElCMGFvPxvh6LsbWczG2no3xvTJP1n9Stiyblh1fJpth0/NrnlfSmSTdWv1Wd0djbD1YeRnbs2fPOtWvXK9ePmXKFC1YsEBxcXHy9fUtsM+YMWM0atQox+3U1FRFR0crPDxcQUFBpRXVZXa7XSaTSeHh4WX6jQbXMbaejfH1XIyt52JsPRvje+Veq/qaXl7/sr7Z843ssmvatmnyreSrO+vf6dZcjK3nKi9jW1gNejG3Ft1VqlSRxWLRqVOn8rSfOnVKkZGRl9z29ddf15QpU/TLL7+oadOmhfbz8fGRj49Pvnaz2VymB1CSTCZTucgJ1zG2no3x9VyMredibD0b43tlzDJrQpsJ8vXy1ec7PpckvbT+JWUb2bqv4X1uzcbYeq7yMLbOZnPrM7BarWrZsmWeRdByF0Vr06ZNodu99tpreumll7RkyRJdd911pREVAAAAqLBMJpOevf5ZDW482NE2ZcMUfbTtIzemAsoHt08vHzVqlAYNGqTrrrtOrVq10syZM3Xu3DkNHnzhAz1w4EBVr15dkydPliRNnTpV48eP1/z58xUTE6OTJ09KkgICAhQQEOC25wEAAAB4MpPJpJEtR8rHy0dz/pojSZq+cboybZl6pNkjbk4HlF1uL7r79++vhIQEjR8/XidPnlTz5s21ZMkSx+Jqhw8fznPY/t1331VWVpbuvDPvOSQTJkzQiy++WJrRAQAAgArFZDJpWPNhspqtenPTm5Kk2ZtnK8uWpREtRshkMrk5IVD2uL3olqThw4dr+PDhBd4XFxeX5/bBgwdLPhAAAACAQj3c9GFZLVa9/sfrkqS5W+cq05ap0deNpvAGLlJ2z0oHAAAAUGYNajxIz7d+3nH70+2f6pX1r8hulO1rKwOlrUwc6QYAAABQ/tzb4F75WHz04m8vypChhbsWKj49XkOvGVrgys6hPqGKCohyQ1LAfSi6AQAAAFy2vvX6ytvsrbFrxsqQoRVHVmjFkRUF9rVarIrtHUvhjQqF6eUAAAAArkjPuj31xLVPFNkvy5al5MzkUkgElB0U3QAAAACuWJtqbdwdASiTKLoBAAAAACghFN0AAAAAAJQQim4AAAAApcYwDHdHAEoVRTcAAACAUjN1w1SlZKa4OwZQaii6AQAAAJSaTQmb1D+2v3Yk7XB3FKBUUHQDAAAAuGKhPqGyWqxO9T2WdkwP/PSAFu1dVMKpAPfzcncAAAAAAOVfVECUYnvHXvI63Nm2bE3ZMEXbkrYp05apF9a+oK2JW/XM9c84XbAD5Q1FNwAAAIBiERUQpaiAqEv2+bj7x5qyYYq+3v21JGnhroXanrRd02+ershKkaUREyhVTC8HAAAAUGp8LD6a0GaCJt04SVbzhaPbWxO36u4f7tbvJ353czqg+FF0AwAAACh1fer10Wc9PlP1gOqSpOTMZP172b/1n63/4bJi8CgU3QAAAADcolFYIy28faFuqn6TJMlu2DXzz5kaGTdSaVlpbk4HFA+KbgAAAABuE+wTrNmdZuvRZo862pYfXq57F9+rvcl73ZgMKB4U3QAAAADcymK26LHmj2l259kKtAZKkg6mHtSAHwdoyYElbk4HXBmKbgAAAABlQvsa7bXw9oW6OvRqSdL5nPN6etXTmrphqrLt2W5OB1weLhkGAAAAoMyIDozWZz0+08u/v6zv930vSZq3Y562J23X09c/LbPpwnFDw27odOppJVmSZDKbJEmhPqFFXrIMKG0U3QAAAADKFD8vP71808tqFt5MkzdMVo49R3/G/6l7F997ye2sFqtie8dSeKNMYXo5AAAAgDLHZDLp7qvv1ifdPlGEf4RT22TZspScmVzCyQDXUHQDAAAAKLOahjfVwtsXqnFYY3dHAS4LRTcAAACAMi3ML0xjW491dwzgslB0AwAAACjzLGaLuyMAl4WiGwAAAIDHMAzD3RGAPCi6AQAAAHiMF9a+oB1JO9wdA3Cg6AYAAADgMfae2at7Ft+jKRum6GzWWXfHASi6AQAAAJR9oT6hslqsl+xjkkmSZDfs+nzH5+r1XS/9uP9HppzDrbzcHQAAAAAAihIVEKXY3rGO63AbdkOnk0+rcmhlmcwXiu0A7wAtPbRU7/31njJsGUo8n6hnVz+rb/d8q+dveF51guu48ymggqLoBgAAAFAuRAVEKSogSpJkt9sVb4tX1bCqMpv/bwLvkGuGqHvt7pq6YapWHFkhSVp/cr36fd9PgxsP1sNNH5afl59b8qNiYno5AAAAAI9SPaC63uz0pt7q9JaqB1SXJOXYczR361z1/q634o7EuTUfKhaKbgAAAAAe6ebom/XfO/6rh695WF7mC5N8j587rhG/jtCIX0foWNoxNydERUDRDQAAAMBj+Xn56fFrH9e3vb5V66jWjva4I3Hq/V1vfbD1A2Xbst0XEB6Pc7oBAAAAeLzawbU195a5WnJwiV7732tKPJ+oDFuGZv05S9/v+15jW49VzcCajoXaChLqE+o4pxxwFkU3AAAAgArBZDKpe+3ualu9rd7Z/I7m75wvu2HXgZQDGrJ0iMwms+yGvdDtrRarYnvHUnjDJUwvBwAAAFChBFoD9WyrZ7Xw9oVqFt7M0X6pgluSsmxZlzwSDhSEohsAAABAhdSgcgN92v1TTbxxogK8A9wdBx6KohsAAABAhWU2mdW3Xl/NuHmGu6PAQ1F0AwAAAKjwgnyCnOqXlpVWwkngaSi6AQAAAMBJj/7yqF5a95L2n9nv7igoJyi6AQAAAMBJWfYsfbn7S92x6A498ssjWntsrQzDcHcslGFcMgwAAAAAnORr8VWGLUOStPbYWq09tla1g2vr/ob36/Y6t8vf29/NCVHWcKQbAAAAQIUX6hMqq8V6yT5Wi1Xzb5uvp697WtUDqjvaD6Qc0Eu/v6Rbvr5FMzbO0MlzJ0s6LsoRjnQDAAAAqPCiAqIU2zv2ktfhDvUJVVRAlOqF1tN9De9T3JE4fbbjM208tVGSlJqVqg+3fahP/v5Et9S6Rfc3uj/PdcBPpJ1wav/wLBTdAAAAAKALhbezRa/FbFHnWp3VuVZnbU/ars93fK4fD/yoHHuObIZNSw4u0ZKDS9S0SlPd3+h+Na7SWH0W9VGWLavQfVotVsX2jqXw9jBMLwcAAACAK9AorJFeafuKlt25TI80e0SVfSs77tuSuEXPrHpG9/94/yULbknKsmVd8kg4yieKbgAAAAAoBlX8qmhY82FaeudSTbpxkuqH1nfcl5xBMV1RUXQDAAAAQDHysfioT70++rrn1/qw64fqGN3R3ZHgRhTdAAAAAFACTCaTro+8Xm92elNvdnzTqW02ndqkTFtmCSdDaaLoBgAAAIASFlEpwql+U/43Re0WtNOIX0fo691fKz49voSToaSxejkAAAAAlCHnc84r7kic4o7ESZIaVm6oDtEd1KFGBzUKaySziWOn5QlFNwAAAACUER2jO2pr4lYlnk90tO04vUM7Tu/QnL/mKMw3TO1rtFeHGh10Q7UbVMm7kqMf1wEvmyi6AQAAAKCEhfqEymqxFnmd7jGtxiiiUoR2JO3QyqMrtfLoSm1P2u7ok5SRpP/u/a/+u/e/8jZ76/rI69W+Rns1qNxAQ5cN5TrgZRBFNwAAAACUsKiAKMX2jnX6SHTjKo3VuEpjPdb8McWnx2v10dVaeXSlfj/xu87nnJckZduz9dvx3/Tb8d+cypB7HXCK7tJF0Q0AAAAApSAqIOqyCt6q/lXVr34/9avfT5m2TP3v5P+08siFo+Anzp0ogaQoThTdAAAAAFBO+Fh81LZ6W7Wt3lbPG89r75m9Wnl0pZYcWKJdybuK3P61/72mG6JuUOOwC0fSK/tWdupxOV/88lF0AwAAAEA5ZDKZVC+0nuqF1tON1W5U/9j+RW6z8dRGbTy10XE7qlKUowBvFNZIjcMaK9gnOM82J9JO6Pbvbud88ctE0Q0AAAAAFdSJcyd04twJ/XL4F0dbjYAaF84pD7vwZTaZL1lwS5wvfikU3QAAAABQQczqOEuZtkxtS9ymv5P+1o6kHUrPSc/T52jaUR1NO6qfD/5carn+OX3dsBs6nXpaSZYkmcwmSeV7+jpFNwAAAABUEJGVItUorJG61+4uSbLZbTqUekh/J/194Svxb+08vVMZtgyX973s0DIlZySrWkA1VQuoJh+Lj1Pbefr0dYpuAAAAACjnnL0OeKhPaJ42i9miOiF1VCekjnrW7SlJyrHnaH/Kfv2deKEQ33hqo/ae2Vtkhg+2fqAPtn7guF3Fr4qqBVRT9UrVHYV49YDq+Yry5Mxkj56+TtENAAAAAOWcq9cBvxQvs5fqh9ZX/dD66lOvj7YnbXdqkbaLJZ5PVOL5RG1J2FLg/blFeYBXgMv7Lk/KRNE9e/ZsTZs2TSdPnlSzZs301ltvqVWrVoX2/+qrrzRu3DgdPHhQ9erV09SpU9WjR49STAwAAAAAZcvlXge8uDx8zcMyZOhY2jEdTzuu42nHlXA+odD+uUW5p3N70b1w4UKNGjVKc+bMUevWrTVz5kx17dpVu3btUtWqVfP1/+2333Tvvfdq8uTJuv322zV//nz17t1bf/75p5o0aeKGZwAAAAAA6FKrixqFNcrTlmnL1MlzJ/MU4v/8Pv58vJvSlh63F93Tp0/Xww8/rMGDB0uS5syZo8WLF+vDDz/Uc889l6//rFmz1K1bNz399NOSpJdeeknLli3T22+/rTlz5pRqdgAAAADwdJd7vrgk+Vh8VCuolmoF1Spwu0xbptYcW6MnVzxZXHHLHLcW3VlZWdq4caPGjBnjaDObzerSpYvWrVtX4Dbr1q3TqFGj8rR17dpV3333XUlGBQAAAIAKqTjPF7+Yj8VHUZXK3+JornBr0Z2YmCibzaaIiIg87REREdq5c2eB25w8ebLA/idPniywf2ZmpjIzMx23U1NTJUl2u112u/1K4pcou90uwzDKdEZcHsbWszG+noux9VyMrWdjfD0XY1u6IvwjFOEfcck+lzsWht1wul9ZGm9ns7h9enlJmzx5siZOnJivPSEhQRkZrl97rrTY7XalpKTIMAyZzWZ3x0ExYmw9G+PruRhbz8XYejbG13Mxtp7Ddt4mb7O3su3ZhfbxNnvLlmZTvK3snAN+9uxZp/q5teiuUqWKLBaLTp06laf91KlTioyMLHCbyMhIl/qPGTMmz3T01NRURUdHKzw8XEFBQVf4DEqO3W6XyWRSeHg4P0Q8DGPr2Rhfz8XYei7G1rMxvp6LsfUcVVVVP1T5QWcyzkiS7IZdycnJCg0Nldl0YWxDfEPK3DR0X19fp/q5tei2Wq1q2bKlli9frt69e0u68OFZvny5hg8fXuA2bdq00fLly/Xkk0862pYtW6Y2bdoU2N/Hx0c+Pj752s1mc5n/cJpMpnKRE65jbD0b4+u5GFvPxdh6NsbXczG2nqN6YHVVD6wu6UJNGG+PV9UqVcv02Dqbze3Ty0eNGqVBgwbpuuuuU6tWrTRz5kydO3fOsZr5wIEDVb16dU2ePFmS9MQTT6hDhw564403dNttt2nBggX6448/9P7777vzaQAAAAAAkI/bi+7+/fsrISFB48eP18mTJ9W8eXMtWbLEsVja4cOH8/wF4cYbb9T8+fP1wgsv6Pnnn1e9evX03XffcY1uAAAAAECZ4/aiW5KGDx9e6HTyuLi4fG133XWX7rrrrhJOBQAAAADAlSm7E+QBAAAAACjnKLoBAAAAACghFN0AAAAAAJQQim4AAAAAAEoIRTcAAAAAACWEohsAAAAAgBJC0Q0AAAAAQAmh6AYAAAAAoIR4uTtAaTMMQ5KUmprq5iSXZrfbdfbsWfn6+sps5m8jnoSx9WyMr+dibD0XY+vZGF/Pxdh6rvIytrk1ZW6NWZgKV3SfPXtWkhQdHe3mJAAAAACA8u7s2bMKDg4u9H6TUVRZ7mHsdruOHz+uwMBAmUwmd8cpVGpqqqKjo3XkyBEFBQW5Ow6KEWPr2Rhfz8XYei7G1rMxvp6LsfVc5WVsDcPQ2bNnVa1atUseka9wR7rNZrNq1Kjh7hhOCwoKKtNvNFw+xtazMb6ei7H1XIytZ2N8PRdj67nKw9he6gh3rrI7QR4AAAAAgHKOohsAAAAAgBJC0V1G+fj4aMKECfLx8XF3FBQzxtazMb6ei7H1XIytZ2N8PRdj67k8bWwr3EJqAAAAAACUFo50AwAAAABQQii6AQAAAAAoIRTdAAAAAACUEIruMmr27NmKiYmRr6+vWrdurQ0bNrg7Eq7Qiy++KJPJlOerQYMG7o6Fy7Bq1Sr17NlT1apVk8lk0nfffZfnfsMwNH78eEVFRcnPz09dunTRnj173BMWLitqfB988MF8n+Vu3bq5JyxcMnnyZF1//fUKDAxU1apV1bt3b+3atStPn4yMDA0bNkxhYWEKCAhQv379dOrUKTclhrOcGdubb74532f3kUcecVNiOOvdd99V06ZNHddrbtOmjX766SfH/Xxmy7eixtdTPrcU3WXQwoULNWrUKE2YMEF//vmnmjVrpq5duyo+Pt7d0XCFGjdurBMnTji+1qxZ4+5IuAznzp1Ts2bNNHv27ALvf+211/Tmm29qzpw5Wr9+vSpVqqSuXbsqIyOjlJPichQ1vpLUrVu3PJ/lL774ohQT4nKtXLlSw4YN0++//65ly5YpOztbt956q86dO+foM3LkSP3www/66quvtHLlSh0/flx9+/Z1Y2o4w5mxlaSHH344z2f3tddec1NiOKtGjRqaMmWKNm7cqD/++EOdOnXSHXfcob///lsSn9nyrqjxlTzkc2ugzGnVqpUxbNgwx22bzWZUq1bNmDx5shtT4UpNmDDBaNasmbtjoJhJMv773/86btvtdiMyMtKYNm2ao+3MmTOGj4+P8cUXX7ghIa7ExeNrGIYxaNAg44477nBLHhSv+Ph4Q5KxcuVKwzAufFa9vb2Nr776ytFnx44dhiRj3bp17oqJy3Dx2BqGYXTo0MF44okn3BcKxSY0NNT44IMP+Mx6qNzxNQzP+dxypLuMycrK0saNG9WlSxdHm9lsVpcuXbRu3To3JkNx2LNnj6pVq6Y6derovvvu0+HDh90dCcXswIEDOnnyZJ7PcHBwsFq3bs1n2IPExcWpatWquvrqq/Xoo48qKSnJ3ZFwGVJSUiRJlStXliRt3LhR2dnZeT6/DRo0UM2aNfn8ljMXj22uzz//XFWqVFGTJk00ZswYpaenuyMeLpPNZtOCBQt07tw5tWnThs+sh7l4fHN5wufWy90BkFdiYqJsNpsiIiLytEdERGjnzp1uSoXi0Lp1a3388ce6+uqrdeLECU2cOFHt2rXTtm3bFBgY6O54KCYnT56UpAI/w7n3oXzr1q2b+vbtq9q1a2vfvn16/vnn1b17d61bt04Wi8Xd8eAku92uJ598UjfddJOaNGki6cLn12q1KiQkJE9fPr/lS0FjK0kDBgxQrVq1VK1aNW3ZskXPPvusdu3apW+//daNaeGMrVu3qk2bNsrIyFBAQID++9//qlGjRtq8eTOfWQ9Q2PhKnvO5pegGSkn37t0d3zdt2lStW7dWrVq19OWXX+qhhx5yYzIArrjnnnsc319zzTVq2rSp6tatq7i4OHXu3NmNyeCKYcOGadu2bayt4YEKG9uhQ4c6vr/mmmsUFRWlzp07a9++fapbt25px4QLrr76am3evFkpKSn6+uuvNWjQIK1cudLdsVBMChvfRo0aecznlunlZUyVKlVksVjyrbp46tQpRUZGuikVSkJISIjq16+vvXv3ujsKilHu55TPcMVRp04dValShc9yOTJ8+HDFxsZqxYoVqlGjhqM9MjJSWVlZOnPmTJ7+fH7Lj8LGtiCtW7eWJD675YDVatVVV12lli1bavLkyWrWrJlmzZrFZ9ZDFDa+BSmvn1uK7jLGarWqZcuWWr58uaPNbrdr+fLlec5tQPmXlpamffv2KSoqyt1RUIxq166tyMjIPJ/h1NRUrV+/ns+whzp69KiSkpL4LJcDhmFo+PDh+u9//6tff/1VtWvXznN/y5Yt5e3tnefzu2vXLh0+fJjPbxlX1NgWZPPmzZLEZ7ccstvtyszM5DProXLHtyDl9XPL9PIyaNSoURo0aJCuu+46tWrVSjNnztS5c+c0ePBgd0fDFRg9erR69uypWrVq6fjx45owYYIsFovuvfded0eDi9LS0vL8hfXAgQPavHmzKleurJo1a+rJJ5/Uyy+/rHr16ql27doaN26cqlWrpt69e7svNJx2qfGtXLmyJk6cqH79+ikyMlL79u3TM888o6uuukpdu3Z1Y2o4Y9iwYZo/f74WLVqkwMBAxzmfwcHB8vPzU3BwsB566CGNGjVKlStXVlBQkEaMGKE2bdrohhtucHN6XEpRY7tv3z7Nnz9fPXr0UFhYmLZs2aKRI0eqffv2atq0qZvT41LGjBmj7t27q2bNmjp79uz/a+/eYqK69jiO/ybCwICiRUfEGi5RVBLvVg2mRdOplz6YQRuFhIcBxYdq44tBYwx4izo1po0a2xhpqBqJPqjVEK9tRFGDF0TSVNQwFdsmIIlarVbwwjoPxh3nCBYtm/F4vp+EhLXX+q/936zshz9r9h6VlJSorKxMR44c4Z59B7xqfd+p+zbUr09H6zZt2mQSEhKM0+k0Y8eONRUVFaFOCf9SZmamiY+PN06n07z//vsmMzPT1NbWhjotvIHjx48bSS/9+Hw+Y8yzrw0rKCgwcXFxJiIiwng8HnP16tXQJo12e9X6/v3332by5MnG7Xab8PBwk5iYaObOnWsaGhpCnTbaobV1lWSKi4utMQ8fPjTz5s0z7733nomKijLTp0839fX1oUsa7fJPa/vbb7+Z9PR0ExsbayIiIsyAAQNMfn6+uXv3bmgTxz+aPXu2SUxMNE6n07jdbuPxeMzRo0etfu7Z/22vWt936b51GGNMZxb5AAAAAAD8v+CZbgAAAAAAbELRDQAAAACATSi6AQAAAACwCUU3AAAAAAA2oegGAAAAAMAmFN0AAAAAANiEohsAAAAAAJtQdAMAAAAAYBOKbgAA3hINDQ2aNGmSoqOj1aNHj1CnAwAAOgBFNwAANsjJyVFGRsZrxXz99deqr6/XpUuXdO3aNXsSe0vV1dXJ4XDo0qVLoU4FAIAOFRbqBAAAwDOBQECjR49WSkrKG8/x6NEjOZ3ODswKAAD8G+x0AwDQCSZOnKgFCxZo0aJFio2NVZ8+fbR8+XKrPykpSXv27NH27dvlcDiUk5MjSfrzzz+Vl5cnt9utmJgYffzxx6qurrbili9frhEjRqioqEjJycmKjIx8rbgdO3YoKSlJ3bt3V1ZWlv766y9rTEtLi9atW6cBAwYoIiJCCQkJWr16tdX/+++/a9asWerRo4diY2Pl9XpVV1fX5t/gzp07ys7OltvtlsvlUkpKioqLiyVJycnJkqSRI0fK4XBo4sSJVlxRUZFSU1MVGRmpwYMH65tvvrH6nu+Q79q1S+PHj1dkZKSGDBmiEydOtH9xAACwEUU3AACdZNu2bYqOjtbZs2e1bt06rVy5UseOHZMknT9/XlOnTtWsWbNUX1+vDRs2SJJmzpypxsZGHTp0SJWVlRo1apQ8Ho9u375tzVtbW6s9e/Zo79691sez2xMXCAT0ww8/qLS0VKWlpTpx4oT8fr/Vv2TJEvn9fhUUFOjy5csqKSlRXFycJOnx48eaMmWKunXrpvLycp0+fVpdu3bV1KlT9ejRo1av//k8hw4dUk1Njb799lv16tVLknTu3DlJ0o8//qj6+nrt3btXkrRz504VFhZq9erVqqmp0Zo1a1RQUKBt27YFzZ2fn6+FCxeqqqpKaWlpmjZtmm7duvXGawUAQIcxAACgw/l8PuP1eq32hAkTzIcffhg0ZsyYMWbx4sVW2+v1Gp/PZ7XLy8tNTEyMaWpqCorr37+/2bJlizHGmGXLlpnw8HDT2Nj42nFRUVHm3r17Vn9+fr4ZN26cMcaYe/fumYiICLN169ZWr2/Hjh1m0KBBpqWlxTrW3NxsXC6XOXLkSKsx06ZNM7m5ua32Xb9+3UgyVVVVL+VcUlISdGzVqlUmLS0tKM7v91v9jx8/Nv369TNffvllq+cCAKAz8Uw3AACdZNiwYUHt+Ph4NTY2tjm+urpa9+/fV8+ePYOOP3z4UIFAwGonJibK7Xa/dlxSUpK6devWaj41NTVqbm6Wx+NpM7fa2tqgeElqamoKOseLPv/8c3322We6ePGiJk+erIyMDI0fP77N63/w4IECgYDmzJmjuXPnWsefPHmi7t27B41NS0uzfg8LC9MHH3ygmpqaNucGAKCzUHQDANBJwsPDg9oOh0MtLS1tjr9//77i4+NVVlb2Ut+LXykWHR39RnGvysflcrWZ1/NzjB49Wjt37nyp78V/ALzo008/1Y0bN3Tw4EEdO3ZMHo9H8+fP1/r169s8hyRt3bpV48aNC+rr0qXLK/MDAOBtQdENAMBbatSoUWpoaFBYWJiSkpJsj3tRSkqKXC6XfvrpJ+Xl5bV6jt27d6t3796KiYlp97xut1s+n08+n08fffSR8vPztX79euuN60+fPrXGxsXFqW/fvvr111+VnZ39ynkrKiqUnp4u6dlOeGVlpb744ot25wUAgF14kRoAAG+pTz75RGlpacrIyNDRo0dVV1enM2fOaOnSpbpw4UKHx70oMjJSixcv1qJFi7R9+3YFAgFVVFTou+++kyRlZ2erV69e8nq9Ki8v1/Xr11VWVqYFCxbojz/+aHXOwsJC7d+/X7W1tfrll19UWlqq1NRUSVLv3r3lcrl0+PBh3bx5U3fv3pUkrVixQmvXrtXGjRt17do1/fzzzyouLtZXX30VNPfmzZu1b98+XblyRfPnz9edO3c0e/bsdl0rAAB2ougGAOAt5XA4dPDgQaWnpys3N1cDBw5UVlaWbty4Yb1FvCPj/ltBQYEWLlyowsJCpaamKjMz03rmOyoqSidPnlRCQoJmzJih1NRUzZkzR01NTW3ufDudTi1ZskTDhg1Tenq6unTpol27dkl69hz2xo0btWXLFvXt21der1eSlJeXp6KiIhUXF2vo0KGaMGGCvv/+e+srxp7z+/3y+/0aPny4Tp06pQMHDlhvRgcAIJQcxhgT6iQAAADeRF1dnZKTk1VVVaURI0aEOh0AAF7CTjcAAAAAADah6AYAAAAAwCZ8vBwAAAAAAJuw0w0AAAAAgE0ougEAAAAAsAlFNwAAAAAANqHoBgAAAADAJhTdAAAAAADYhKIbAAAAAACbUHQDAAAAAGATim4AAAAAAGxC0Q0AAAAAgE3+A7vIDPrTyINHAAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "fig, ax = plt.subplots(figsize=(10, 5))\n", - "timestep_axis = np.arange(len(timestep_values))\n", - "ax.plot(timestep_axis, timestep_values * 0.001, marker=\"s\", linewidth=2, color=\"tab:green\", label=\"timestep values\")\n", - "ax.set_xlabel(\"Inference step\")\n", - "ax.set_ylabel(\"Timestep (float index)\")\n", - "ax.set_title(\"FlowUniPCMultistepScheduler timesteps\")\n", - "ax.grid(alpha=0.3)\n", - "ax.legend()\n", - "fig.tight_layout()\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.15" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 2d5a3d1204f0..bc169a451ebd 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -1178,9 +1178,9 @@ CogView4ControlPipeline, CogView4Pipeline, ConsisIDPipeline, - Cosmos_2_5_PredictBase, Cosmos2TextToImagePipeline, Cosmos2VideoToWorldPipeline, + Cosmos_2_5_PredictBase, CosmosTextToWorldPipeline, CosmosVideoToWorldPipeline, CycleDiffusionPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 15b68a303799..fe9920055d64 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -623,9 +623,9 @@ StableDiffusionXLControlNetXSPipeline, ) from .cosmos import ( - Cosmos_2_5_PredictBase, Cosmos2TextToImagePipeline, Cosmos2VideoToWorldPipeline, + Cosmos_2_5_PredictBase, CosmosTextToWorldPipeline, CosmosVideoToWorldPipeline, ) diff --git a/tests/pipelines/cosmos/test_cosmos2_5_predict.py b/tests/pipelines/cosmos/test_cosmos2_5_predict.py index e68fde325a6d..87ebd629585a 100644 --- a/tests/pipelines/cosmos/test_cosmos2_5_predict.py +++ b/tests/pipelines/cosmos/test_cosmos2_5_predict.py @@ -17,11 +17,10 @@ import os import tempfile import unittest -from types import SimpleNamespace -from typing import List import numpy as np import torch +from transformers import AutoTokenizer, Qwen2VLForConditionalGeneration from diffusers import AutoencoderKLWan, Cosmos_2_5_PredictBase, CosmosTransformer3DModel, FlowUniPCMultistepScheduler @@ -34,68 +33,6 @@ enable_full_determinism() -class DummyPredictTokenizer: - model_input_names = ["input_ids"] - - def __init__(self, vocab_size: int = 128): - self.vocab_size = vocab_size - - @classmethod - def from_pretrained(cls, *args, **kwargs): - return cls() - - def apply_chat_template( - self, - conversations: List[dict], - tokenize: bool = True, - add_generation_prompt: bool = False, - add_vision_id: bool = False, - max_length: int = 16, - truncation: bool = True, - padding: str = "max_length", - ): - return list(range(max_length)) - - def save_pretrained(self, save_directory: str): - os.makedirs(save_directory, exist_ok=True) - with open(os.path.join(save_directory, "tokenizer_config.json"), "w") as f: - json.dump({"vocab_size": self.vocab_size}, f) - - -class DummyPredictTextEncoder(torch.nn.Module): - config_name = "config.json" - - def __init__(self, vocab_size: int = 128, hidden_size: int = 16): - super().__init__() - self.emb = torch.nn.Embedding(vocab_size, hidden_size) - self.proj = torch.nn.Linear(hidden_size, hidden_size) - self.config = SimpleNamespace(hidden_size=hidden_size) - - @property - def dtype(self): - return next(self.parameters()).dtype - - @classmethod - def from_pretrained(cls, save_directory: str, **kwargs): - return cls() - - def save_pretrained(self, save_directory: str, safe_serialization: bool = False): - os.makedirs(save_directory, exist_ok=True) - torch.save(self.state_dict(), os.path.join(save_directory, "pytorch_model.bin")) - with open(os.path.join(save_directory, self.config_name), "w") as f: - json.dump({"vocab_size": self.emb.num_embeddings, "hidden_size": self.emb.embedding_dim}, f) - - def forward(self, input_ids: torch.LongTensor, output_hidden_states: bool = False, **kwargs): - hidden = self.emb(input_ids) - hidden = self.proj(hidden) - hidden_states = ( - hidden, - hidden * 0.5, - hidden * 0.25, - ) - return SimpleNamespace(hidden_states=hidden_states) - - class Cosmos_2_5_PredictBaseWrapper(Cosmos_2_5_PredictBase): @staticmethod def from_pretrained(*args, **kwargs): @@ -154,8 +91,11 @@ def get_dummy_components(self): torch.manual_seed(0) scheduler = FlowUniPCMultistepScheduler() - text_encoder = DummyPredictTextEncoder(hidden_size=16) - tokenizer = DummyPredictTokenizer() + # NOTE: using Qwen2 VL instead for tests (reason1 is based on 2.5) + text_encoder = Qwen2VLForConditionalGeneration.from_pretrained( + "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration", + ) + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") components = { "transformer": transformer, From 824fffa190a73799d8f20748beb5983d50a75fda Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Wed, 17 Dec 2025 00:00:21 +0000 Subject: [PATCH 09/22] docstring update --- .../cosmos/pipeline_cosmos2_5_predict.py | 53 ++++++++++++++++--- 1 file changed, 46 insertions(+), 7 deletions(-) diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index f2a2d9bec2b2..dde6c3d3ed13 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -76,12 +76,27 @@ def retrieve_latents( >>> model_id = "nvidia/Cosmos-Predict2.5-Base-2B" >>> pipe = Cosmos_2_5_PredictBase.from_pretrained(model_id, torch_dtype=torch.bfloat16) - >>> pipe.to("cuda") - - >>> prompt = "A close-up shot captures a vibrant yellow scrubber vigorously working on a grimy plate, its bristles moving in circular motions to lift stubborn grease and food residue. The dish, once covered in remnants of a hearty meal, gradually reveals its original glossy surface. Suds form and bubble around the scrubber, creating a satisfying visual of cleanliness in progress. The sound of scrubbing fills the air, accompanied by the gentle clinking of the dish against the sink. As the scrubber continues its task, the dish transforms, gleaming under the bright kitchen lights, symbolizing the triumph of cleanliness over mess." - >>> negative_prompt = "The video captures a series of frames showing ugly scenes, static with no motion, motion blur, over-saturation, shaky footage, low resolution, grainy texture, pixelated images, poorly lit areas, underexposed and overexposed scenes, poor color balance, washed out colors, choppy sequences, jerky movements, low frame rate, artifacting, color banding, unnatural transitions, outdated special effects, fake elements, unconvincing visuals, poorly edited content, jump cuts, visual noise, and flickering. Overall, the video is of poor quality." + >>> pipe = pipe.to("cuda") + + >>> # Common negative prompt reused across modes. + >>> negative_prompt = ( + ... "The video captures a series of frames showing ugly scenes, static with no motion, motion blur, " + ... "over-saturation, shaky footage, low resolution, grainy texture, pixelated images, poorly lit areas, " + ... "underexposed and overexposed scenes, poor color balance, washed out colors, choppy sequences, jerky " + ... "movements, low frame rate, artifacting, color banding, unnatural transitions, outdated special effects, " + ... "fake elements, unconvincing visuals, poorly edited content, jump cuts, visual noise, and flickering. " + ... "Overall, the video is of poor quality." + ... ) >>> # Text2World: generate a 93-frame world video from text only. + >>> prompt = ( + ... "As the red light shifts to green, the red bus at the intersection begins to move forward, its headlights " + ... "cutting through the falling snow. The snowy tire tracks deepen as the vehicle inches ahead, casting fresh " + ... "lines onto the slushy road. Around it, streetlights glow warmer, illuminating the drifting flakes and wet " + ... "reflections on the asphalt. Other cars behind start to edge forward, their beams joining the scene. " + ... "The stillness of the urban street transitions into motion as the quiet snowfall is punctuated by the slow " + ... "advance of traffic through the frosty city corridor." + ... ) >>> video = pipe( ... image=None, ... video=None, @@ -93,8 +108,20 @@ def retrieve_latents( >>> export_to_video(video, "text2world.mp4", fps=16) >>> # Image2World: condition on a single image and generate a 93-frame world video. + >>> prompt = ( + ... "A high-definition video captures the precision of robotic welding in an industrial setting. " + ... "The first frame showcases a robotic arm, equipped with a welding torch, positioned over a large metal structure. " + ... "The welding process is in full swing, with bright sparks and intense light illuminating the scene, creating a vivid " + ... "display of blue and white hues. A significant amount of smoke billows around the welding area, partially obscuring " + ... "the view but emphasizing the heat and activity. The background reveals parts of the workshop environment, including a " + ... "ventilation system and various pieces of machinery, indicating a busy and functional industrial workspace. As the video " + ... "progresses, the robotic arm maintains its steady position, continuing the welding process and moving to its left. " + ... "The welding torch consistently emits sparks and light, and the smoke continues to rise, diffusing slightly as it moves upward. " + ... "The metal surface beneath the torch shows ongoing signs of heating and melting. The scene retains its industrial ambiance, with " + ... "the welding sparks and smoke dominating the visual field, underscoring the ongoing nature of the welding operation." + ... ) >>> image = load_image( - ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yellow-scrubber.png" + ... "https://media.githubusercontent.com/media/nvidia-cosmos/cosmos-predict2.5/refs/heads/main/assets/base/robot_welding.jpg" ... ) >>> video = pipe( ... image=image, @@ -104,10 +131,22 @@ def retrieve_latents( ... num_frames=93, ... generator=torch.Generator().manual_seed(1), ... ).frames[0] - >>> export_to_video(video, "image2world.mp4", fps=16) + >>> # export_to_video(video, "image2world.mp4", fps=16) >>> # Video2World: condition on an input clip and predict a 93-frame world video. - >>> input_video = load_video("path/to/input.mp4") + >>> prompt = ( + ... "The video opens with an aerial view of a large-scale sand mining construction operation, showcasing extensive piles " + ... "of brown sand meticulously arranged in parallel rows. A central water channel, fed by a water pipe, flows through the " + ... "middle of these sand heaps, creating ripples and movement as it cascades down. The surrounding area features dense green " + ... "vegetation on the left, contrasting with the sandy terrain, while a body of water is visible in the background on the right. " + ... "As the video progresses, a piece of heavy machinery, likely a bulldozer, enters the frame from the right, moving slowly along " + ... "the edge of the sand piles. This machinery's presence indicates ongoing construction work in the operation. The final frame " + ... "captures the same scene, with the water continuing its flow and the bulldozer still in motion, maintaining the dynamic yet " + ... "steady pace of the construction activity." + ... ) + >>> input_video = load_video( + ... "https://github.com/nvidia-cosmos/cosmos-predict2.5/raw/refs/heads/main/assets/base/sand_mining.mp4" + ... ) >>> video = pipe( ... image=None, ... video=input_video, From bae477a1f806cf07e01115986b09b295a9ba0e7f Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Wed, 17 Dec 2025 00:22:54 +0000 Subject: [PATCH 10/22] wrapper pipelines + make style --- scripts/convert_cosmos_to_diffusers.py | 8 +- src/diffusers/__init__.py | 10 +- src/diffusers/pipelines/__init__.py | 10 +- src/diffusers/pipelines/cosmos/__init__.py | 14 +- .../cosmos/pipeline_cosmos2_5_predict.py | 484 +++++++++++++++++- .../scheduling_flow_unipc_multistep.py | 37 +- .../cosmos/test_cosmos2_5_predict.py | 10 +- tests/schedulers/test_scheduler_flow_unipc.py | 26 +- 8 files changed, 557 insertions(+), 42 deletions(-) diff --git a/scripts/convert_cosmos_to_diffusers.py b/scripts/convert_cosmos_to_diffusers.py index dbd4949739a3..f4a584a1b091 100644 --- a/scripts/convert_cosmos_to_diffusers.py +++ b/scripts/convert_cosmos_to_diffusers.py @@ -62,7 +62,7 @@ EDMEulerScheduler, FlowMatchEulerDiscreteScheduler, ) -from diffusers.pipelines.cosmos.pipeline_cosmos2_5_predict import Cosmos_2_5_PredictBase +from diffusers.pipelines.cosmos.pipeline_cosmos2_5_predict import Cosmos2_5_PredictBase def remove_keys_(key: str, state_dict: Dict[str, Any]): @@ -527,7 +527,7 @@ def save_pipeline_cosmos_2_0(args, transformer, vae): pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB") -def save_pipeline_cosmos_2_5(args, transformer, vae): +def save_pipeline_cosmos2_5(args, transformer, vae): text_encoder_path = args.text_encoder_path or "nvidia/Cosmos-Reason1-7B" tokenizer_path = args.tokenizer_path or "Qwen/Qwen2.5-VL-7B-Instruct" @@ -538,7 +538,7 @@ def save_pipeline_cosmos_2_5(args, transformer, vae): scheduler = FlowMatchEulerDiscreteScheduler(use_karras_sigmas=True) - pipe = Cosmos_2_5_PredictBase( + pipe = Cosmos2_5_PredictBase( text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, @@ -613,6 +613,6 @@ def get_args(): assert args.tokenizer_path is not None save_pipeline_cosmos_2_0(args, transformer, vae) elif "Cosmos-2.5" in args.transformer_type: - save_pipeline_cosmos_2_5(args, transformer, vae) + save_pipeline_cosmos2_5(args, transformer, vae) else: raise AssertionError(f"{args.transformer_type} not supported") diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index bc169a451ebd..8623aa61178f 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -464,7 +464,10 @@ "CogView4ControlPipeline", "CogView4Pipeline", "ConsisIDPipeline", - "Cosmos_2_5_PredictBase", + "Cosmos2_5_PredictBase", + "Cosmos2_5_PredictImage2World", + "Cosmos2_5_PredictText2World", + "Cosmos2_5_PredictVideo2World", "Cosmos2TextToImagePipeline", "Cosmos2VideoToWorldPipeline", "CosmosTextToWorldPipeline", @@ -1178,9 +1181,12 @@ CogView4ControlPipeline, CogView4Pipeline, ConsisIDPipeline, + Cosmos2_5_PredictBase, + Cosmos2_5_PredictImage2World, + Cosmos2_5_PredictText2World, + Cosmos2_5_PredictVideo2World, Cosmos2TextToImagePipeline, Cosmos2VideoToWorldPipeline, - Cosmos_2_5_PredictBase, CosmosTextToWorldPipeline, CosmosVideoToWorldPipeline, CycleDiffusionPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index fe9920055d64..8b98cda38c4b 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -165,7 +165,10 @@ _import_structure["cogview4"] = ["CogView4Pipeline", "CogView4ControlPipeline"] _import_structure["consisid"] = ["ConsisIDPipeline"] _import_structure["cosmos"] = [ - "Cosmos_2_5_PredictBase", + "Cosmos2_5_PredictBase", + "Cosmos2_5_PredictImage2World", + "Cosmos2_5_PredictText2World", + "Cosmos2_5_PredictVideo2World", "Cosmos2TextToImagePipeline", "CosmosTextToWorldPipeline", "CosmosVideoToWorldPipeline", @@ -623,9 +626,12 @@ StableDiffusionXLControlNetXSPipeline, ) from .cosmos import ( + Cosmos2_5_PredictBase, + Cosmos2_5_PredictImage2World, + Cosmos2_5_PredictText2World, + Cosmos2_5_PredictVideo2World, Cosmos2TextToImagePipeline, Cosmos2VideoToWorldPipeline, - Cosmos_2_5_PredictBase, CosmosTextToWorldPipeline, CosmosVideoToWorldPipeline, ) diff --git a/src/diffusers/pipelines/cosmos/__init__.py b/src/diffusers/pipelines/cosmos/__init__.py index edd8163b86a0..745921149833 100644 --- a/src/diffusers/pipelines/cosmos/__init__.py +++ b/src/diffusers/pipelines/cosmos/__init__.py @@ -22,7 +22,12 @@ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: - _import_structure["pipeline_cosmos2_5_predict"] = ["Cosmos_2_5_PredictBase", "retrieve_latents"] + _import_structure["pipeline_cosmos2_5_predict"] = [ + "Cosmos2_5_PredictBase", + "Cosmos2_5_PredictImage2World", + "Cosmos2_5_PredictText2World", + "Cosmos2_5_PredictVideo2World", + ] _import_structure["pipeline_cosmos2_text2image"] = ["Cosmos2TextToImagePipeline"] _import_structure["pipeline_cosmos2_video2world"] = ["Cosmos2VideoToWorldPipeline"] _import_structure["pipeline_cosmos_text2world"] = ["CosmosTextToWorldPipeline"] @@ -36,7 +41,12 @@ except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: - from .pipeline_cosmos2_5_predict import Cosmos_2_5_PredictBase, retrieve_latents + from .pipeline_cosmos2_5_predict import ( + Cosmos2_5_PredictBase, + Cosmos2_5_PredictImage2World, + Cosmos2_5_PredictText2World, + Cosmos2_5_PredictVideo2World, + ) from .pipeline_cosmos2_text2image import Cosmos2TextToImagePipeline from .pipeline_cosmos2_video2world import Cosmos2VideoToWorldPipeline from .pipeline_cosmos_text2world import CosmosTextToWorldPipeline diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index dde6c3d3ed13..67a94defb091 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -71,11 +71,11 @@ def retrieve_latents( Examples: ```python >>> import torch - >>> from diffusers import Cosmos_2_5_PredictBase + >>> from diffusers import Cosmos2_5_PredictBase >>> from diffusers.utils import export_to_video, load_image, load_video >>> model_id = "nvidia/Cosmos-Predict2.5-Base-2B" - >>> pipe = Cosmos_2_5_PredictBase.from_pretrained(model_id, torch_dtype=torch.bfloat16) + >>> pipe = Cosmos2_5_PredictBase.from_pretrained(model_id, torch_dtype=torch.bfloat16) >>> pipe = pipe.to("cuda") >>> # Common negative prompt reused across modes. @@ -163,7 +163,7 @@ def retrieve_latents( """ -class Cosmos_2_5_PredictBase(DiffusionPipeline): +class Cosmos2_5_PredictBase(DiffusionPipeline): r""" Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) base model. @@ -552,7 +552,9 @@ def __call__( - **Image2World**: `image` provided, `video=None`, `prompt` provided. Conditions on a single frame. - **Video2World**: `video` provided, `image=None`, `prompt` provided. Conditions on an input clip. - Set `num_frames=93` (default) to produce a world video, or `num_frames=1` to produce a single image frame. + Set `num_frames=93` (default) to produce a world video, or `num_frames=1` to produce a single image frame (the + above in "*2Image mode"). + Outputs follow `output_type` (e.g., `"pil"` returns a list of `num_frames` PIL images per prompt). Args: @@ -739,7 +741,11 @@ def __call__( self._current_timestep = t.cpu().item() # NOTE: assumes sigma(t) \in [0, 1] - sigma_t = torch.tensor(self.scheduler.sigmas[i].item()).unsqueeze(0).to(device=device, dtype=transformer_dtype) + sigma_t = ( + torch.tensor(self.scheduler.sigmas[i].item()) + .unsqueeze(0) + .to(device=device, dtype=transformer_dtype) + ) in_latents = cond_mask * cond_latent + (1 - cond_mask) * latents in_latents = in_latents.to(transformer_dtype) @@ -819,3 +825,471 @@ def __call__( return (video,) return CosmosPipelineOutput(frames=video) + + +class Cosmos2_5_PredictText2World(Cosmos2_5_PredictBase): + r""" + Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) Text2World. + + This pipeline is a specialized version of [`Cosmos2_5_PredictBase`], please refer to the superclass for advanced + options. + + Args: + text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): + Frozen text-encoder. Cosmos Predict2.5 uses the [Qwen2.5 + VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) encoder. + tokenizer (`AutoTokenizer`): + Tokenizer associated with the Qwen2.5 VL encoder. + transformer ([`CosmosTransformer3DModel`]): + Conditional Transformer to denoise the encoded image latents. + scheduler ([`FlowUniPCMultistepScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKLWan`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + """ + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] | None = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 704, + width: int = 1280, + num_inference_steps: int = 35, + guidance_scale: float = 7.0, + fps: int = 16, + num_videos_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + conditional_frame_timestep: float = 0.1, + ): + r""" + Text2World: text-conditioned world generation. This is a wrapper around the base pipeline. + + Args: + image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, *optional*): + Optional single image for Image2World conditioning. Must be `None` when `video` is provided. + video (`List[PIL.Image.Image]`, `np.ndarray`, `torch.Tensor`, *optional*): + Optional input video for Video2World conditioning. Must be `None` when `image` is provided. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide generation. Required unless `prompt_embeds` is supplied. + height (`int`, defaults to `704`): + The height in pixels of the generated image. + width (`int`, defaults to `1280`): + The width in pixels of the generated image. + num_frames (`int`, defaults to `93`): + Number of output frames. Use `93` for world (video) generation; set to `1` to return a single frame. + num_inference_steps (`int`, defaults to `35`): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, defaults to `7.0`): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. + fps (`int`, defaults to `16`): + The frames per second of the generated video. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`CosmosPipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int`, defaults to `512`): + The maximum number of tokens in the prompt. If the prompt exceeds this length, it will be truncated. If + the prompt is shorter than this length, it will be padded. + + Returns: + [`~CosmosPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`CosmosPipelineOutput`] is returned, otherwise a `tuple` is returned where + the first element is a list with the generated images and the second element is a list of `bool`s + indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. + + Examples: + ```python + >>> import torch + >>> from diffusers import Cosmos2_5_PredictText2World + >>> from diffusers.utils import export_to_video + + >>> pipe = Cosmos2_5_PredictText2World.from_pretrained( + ... "nvidia/Cosmos-Predict2.5-Base-2B", torch_dtype=torch.bfloat16 + ... ) + >>> pipe = pipe.to("cuda") + >>> video = pipe( + ... prompt="A snow scene with cars moving through an intersection.", + ... negative_prompt="low quality, blurry", + ... generator=torch.Generator(device="cuda").manual_seed(1), + ... ).frames[0] + >>> export_to_video(video, "text2world_wrapper.mp4", fps=16) + ``` + """ + return super().__call__( + image=None, + video=None, + prompt=prompt, + negative_prompt=negative_prompt, + height=height, + width=width, + num_frames=93, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + fps=fps, + num_videos_per_prompt=num_videos_per_prompt, + generator=generator, + latents=latents, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + output_type=output_type, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + conditional_frame_timestep=conditional_frame_timestep, + ) + + +class Cosmos2_5_PredictImage2World(Cosmos2_5_PredictBase): + r""" + Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) Image2World. + + This pipeline is a specialized version of [`Cosmos2_5_PredictBase`], please refer to the superclass for advanced + options. + + Args: + text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): + Frozen text-encoder. Cosmos Predict2.5 uses the [Qwen2.5 + VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) encoder. + tokenizer (`AutoTokenizer`): + Tokenizer associated with the Qwen2.5 VL encoder. + transformer ([`CosmosTransformer3DModel`]): + Conditional Transformer to denoise the encoded image latents. + scheduler ([`FlowUniPCMultistepScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKLWan`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + """ + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] | None = None, + image: PipelineImageInput | None = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 704, + width: int = 1280, + num_inference_steps: int = 35, + guidance_scale: float = 7.0, + fps: int = 16, + num_videos_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + conditional_frame_timestep: float = 0.1, + ): + r""" + Image2World: image-conditioned world generation. This is a wrapper around the base pipeline. + + Args: + image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, *optional*): + Optional single image for Image2World conditioning. Must be `None` when `video` is provided. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide generation. Required unless `prompt_embeds` is supplied. + height (`int`, defaults to `704`): + The height in pixels of the generated image. + width (`int`, defaults to `1280`): + The width in pixels of the generated image. + num_frames (`int`, defaults to `93`): + Number of output frames. Use `93` for world (video) generation; set to `1` to return a single frame. + num_inference_steps (`int`, defaults to `35`): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, defaults to `7.0`): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. + fps (`int`, defaults to `16`): + The frames per second of the generated video. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`CosmosPipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int`, defaults to `512`): + The maximum number of tokens in the prompt. If the prompt exceeds this length, it will be truncated. If + the prompt is shorter than this length, it will be padded. + + Returns: + [`~CosmosPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`CosmosPipelineOutput`] is returned, otherwise a `tuple` is returned where + the first element is a list with the generated images and the second element is a list of `bool`s + indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. + + Examples: + ```python + >>> import torch + >>> from diffusers import Cosmos2_5_PredictImage2World + >>> from diffusers.utils import export_to_video, load_image + + >>> pipe = Cosmos2_5_PredictImage2World.from_pretrained( + ... "nvidia/Cosmos-Predict2.5-Base-2B", torch_dtype=torch.bfloat16 + ... ) + >>> pipe = pipe.to("cuda") + >>> image = load_image( + ... "https://media.githubusercontent.com/media/nvidia-cosmos/cosmos-predict2.5/refs/heads/main/assets/base/robot_welding.jpg" + ... ) + >>> video = pipe( + ... prompt="A robotic welding arm continues its work.", + ... image=image, + ... negative_prompt="low quality, blurry", + ... generator=torch.Generator(device="cuda").manual_seed(2), + ... ).frames[0] + >>> export_to_video(video, "image2world_wrapper.mp4", fps=16) + ``` + """ + if image is None: + raise ValueError("`image` must be provided for Image2World generation.") + + return super().__call__( + image=image, + video=None, + prompt=prompt, + negative_prompt=negative_prompt, + height=height, + width=width, + num_frames=93, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + fps=fps, + num_videos_per_prompt=num_videos_per_prompt, + generator=generator, + latents=latents, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + output_type=output_type, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + conditional_frame_timestep=conditional_frame_timestep, + ) + + +class Cosmos2_5_PredictVideo2World(Cosmos2_5_PredictBase): + r""" + Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) Video2World. + + This pipeline is a specialized version of [`Cosmos2_5_PredictBase`], please refer to the superclass for advanced + options. + + Args: + text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): + Frozen text-encoder. Cosmos Predict2.5 uses the [Qwen2.5 + VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) encoder. + tokenizer (`AutoTokenizer`): + Tokenizer associated with the Qwen2.5 VL encoder. + transformer ([`CosmosTransformer3DModel`]): + Conditional Transformer to denoise the encoded image latents. + scheduler ([`FlowUniPCMultistepScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKLWan`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + """ + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] | None = None, + video: List[PipelineImageInput] | None = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 704, + width: int = 1280, + num_inference_steps: int = 35, + guidance_scale: float = 7.0, + fps: int = 16, + num_videos_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + conditional_frame_timestep: float = 0.1, + ): + r""" + Video2World: video-conditioned world generation. This is a wrapper around the base pipeline. + + Args: + video (`List[PIL.Image.Image]`, `np.ndarray`, `torch.Tensor`, *optional*): + Optional input video for Video2World conditioning. Must be `None` when `image` is provided. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide generation. Required unless `prompt_embeds` is supplied. + height (`int`, defaults to `704`): + The height in pixels of the generated image. + width (`int`, defaults to `1280`): + The width in pixels of the generated image. + num_frames (`int`, defaults to `93`): + Number of output frames. Use `93` for world (video) generation; set to `1` to return a single frame. + num_inference_steps (`int`, defaults to `35`): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, defaults to `7.0`): + Guidance scale as defined in [Classifier-Free Diffusion + Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. + of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting + `guidance_scale > 1`. + fps (`int`, defaults to `16`): + The frames per second of the generated video. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`CosmosPipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int`, defaults to `512`): + The maximum number of tokens in the prompt. If the prompt exceeds this length, it will be truncated. If + the prompt is shorter than this length, it will be padded. + + Returns: + [`~CosmosPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`CosmosPipelineOutput`] is returned, otherwise a `tuple` is returned where + the first element is a list with the generated images and the second element is a list of `bool`s + indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. + + Examples: + ```python + >>> import torch + >>> from diffusers import Cosmos2_5_PredictVideo2World + >>> from diffusers.utils import export_to_video, load_video + + >>> pipe = Cosmos2_5_PredictVideo2World.from_pretrained( + ... "nvidia/Cosmos-Predict2.5-Base-2B", torch_dtype=torch.bfloat16 + ... ) + >>> pipe = pipe.to("cuda") + >>> input_video = load_video( + ... "https://github.com/nvidia-cosmos/cosmos-predict2.5/raw/refs/heads/main/assets/base/sand_mining.mp4" + ... ) + >>> video = pipe( + ... prompt="Aerial view of sand mining continues.", + ... video=input_video, + ... negative_prompt="low quality, blurry", + ... generator=torch.Generator(device="cuda").manual_seed(3), + ... ).frames[0] + >>> export_to_video(video, "video2world_wrapper.mp4", fps=16) + ``` + """ + if video is None: + raise ValueError("`video` must be provided for Video2World generation.") + + return super().__call__( + image=None, + video=video, + prompt=prompt, + negative_prompt=negative_prompt, + height=height, + width=width, + num_frames=93, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + fps=fps, + num_videos_per_prompt=num_videos_per_prompt, + generator=generator, + latents=latents, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + output_type=output_type, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + conditional_frame_timestep=conditional_frame_timestep, + ) diff --git a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py index e2f2508abd95..e727f3bff279 100644 --- a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py +++ b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py @@ -8,7 +8,9 @@ from diffusers.utils import deprecate -def _get_karras_sigmas(num_train_steps: int, num_steps: int, sigma_max: float, sigma_min: float, rho: int, final_sigmas_type: str): +def _get_karras_sigmas( + num_train_steps: int, num_steps: int, sigma_max: float, sigma_min: float, rho: int, final_sigmas_type: str +): sigmas = np.arange(num_steps + 1, dtype=np.float32) / num_steps min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) @@ -20,9 +22,7 @@ def _get_karras_sigmas(num_train_steps: int, num_steps: int, sigma_max: float, s elif final_sigmas_type == "sigma_min": sigma_last = sigmas[-1] else: - raise ValueError( - f"`final_sigmas_type` must be 'zero' or 'sigma_min' but got {final_sigmas_type}" - ) + raise ValueError(f"`final_sigmas_type` must be 'zero' or 'sigma_min' but got {final_sigmas_type}") timesteps = torch.from_numpy(sigmas * num_train_steps).to(torch.int64) sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) @@ -32,8 +32,8 @@ def _get_karras_sigmas(num_train_steps: int, num_steps: int, sigma_max: float, s class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): """ - `FlowUniPCMultistepScheduler` is the UniPC algorithm[1] for flow - matching[2], but strictly uses the Karras sigmas [3] (i.e. it follows the EDMEulerScheduler). + `FlowUniPCMultistepScheduler` is the UniPC algorithm[1] for flow matching[2], but strictly uses the Karras sigmas + [3] (i.e. it follows the EDMEulerScheduler). Note this a simplified version of `UniPCMultistepScheduler`, as it: 1. Does not have variance preserving sigmas @@ -41,9 +41,10 @@ class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): 3. Assumes prediction_type == "flow_prediction" (this parameter is removed) References: - [1] Wang, Chong, et al. "UniPC: A Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models" https://arxiv.org/abs/2302.04867 - [2] Lipman, Chen, et al. "Flow matching for generative modeling." https://arxiv.org/abs/2210.02747 - [3] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." https://huggingface.co/papers/2206.00364 + [1] Wang, Chong, et al. "UniPC: A Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models" + https://arxiv.org/abs/2302.04867 [2] Lipman, Chen, et al. "Flow matching for generative modeling." + https://arxiv.org/abs/2210.02747 [3] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based + Generative Models." https://huggingface.co/papers/2206.00364 This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. @@ -113,7 +114,9 @@ def __init__( self.solver_p = solver_p self.num_inference_steps = None - self.sigmas, self.timesteps = _get_karras_sigmas(num_train_timesteps, num_train_timesteps, sigma_max, sigma_min, rho, final_sigmas_type) + self.sigmas, self.timesteps = _get_karras_sigmas( + num_train_timesteps, num_train_timesteps, sigma_max, sigma_min, rho, final_sigmas_type + ) self.sigma_min = self.sigmas[-1].item() self.sigma_max = self.sigmas[0].item() @@ -144,7 +147,6 @@ def set_begin_index(self, begin_index: int = 0): """ self._begin_index = begin_index - # Modified from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler.set_timesteps def set_timesteps( self, @@ -163,7 +165,14 @@ def set_timesteps( """ assert sigmas is None, "sigmas are not supported for FlowUniPCMultistepScheduler" - self.sigmas, self.timesteps = _get_karras_sigmas(self.config.num_train_timesteps, num_inference_steps, self.config.sigma_max, self.config.sigma_min, self.config.rho, self.config.final_sigmas_type) + self.sigmas, self.timesteps = _get_karras_sigmas( + self.config.num_train_timesteps, + num_inference_steps, + self.config.sigma_max, + self.config.sigma_min, + self.config.rho, + self.config.final_sigmas_type, + ) self.num_inference_steps = len(self.timesteps) self.sigma_min = self.sigmas[-1].item() @@ -171,9 +180,9 @@ def set_timesteps( self.sigmas = self.sigmas.to(device) self.timesteps = self.timesteps.to(device) - self._reset_state() + self._reset_state(device=device) - def _reset_state(self, solver_order: Optional[int] = None): + def _reset_state(self, solver_order: Optional[int] = None, device=None): """ Resets the noise schedule & solver state variables """ diff --git a/tests/pipelines/cosmos/test_cosmos2_5_predict.py b/tests/pipelines/cosmos/test_cosmos2_5_predict.py index 87ebd629585a..706893aae781 100644 --- a/tests/pipelines/cosmos/test_cosmos2_5_predict.py +++ b/tests/pipelines/cosmos/test_cosmos2_5_predict.py @@ -22,7 +22,7 @@ import torch from transformers import AutoTokenizer, Qwen2VLForConditionalGeneration -from diffusers import AutoencoderKLWan, Cosmos_2_5_PredictBase, CosmosTransformer3DModel, FlowUniPCMultistepScheduler +from diffusers import AutoencoderKLWan, Cosmos2_5_PredictBase, CosmosTransformer3DModel, FlowUniPCMultistepScheduler from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -33,15 +33,15 @@ enable_full_determinism() -class Cosmos_2_5_PredictBaseWrapper(Cosmos_2_5_PredictBase): +class Cosmos2_5_PredictBaseWrapper(Cosmos2_5_PredictBase): @staticmethod def from_pretrained(*args, **kwargs): kwargs["safety_checker"] = DummyCosmosSafetyChecker() - return Cosmos_2_5_PredictBase.from_pretrained(*args, **kwargs) + return Cosmos2_5_PredictBase.from_pretrained(*args, **kwargs) -class Cosmos_2_5_PredictPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = Cosmos_2_5_PredictBaseWrapper +class Cosmos2_5_PredictPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = Cosmos2_5_PredictBaseWrapper params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS diff --git a/tests/schedulers/test_scheduler_flow_unipc.py b/tests/schedulers/test_scheduler_flow_unipc.py index 343507bef21e..3cd7ab1694d0 100644 --- a/tests/schedulers/test_scheduler_flow_unipc.py +++ b/tests/schedulers/test_scheduler_flow_unipc.py @@ -17,14 +17,20 @@ def test_set_timesteps(self): scheduler.set_timesteps(num_inference_steps=num_inference_steps) # 0 appended to end for sigmas - expected_sigmas = [0.9950248599052429, 0.9787454605102539, 0.8774884343147278, 0.3604971766471863, 0.009900986216962337, 0.0] + expected_sigmas = [ + 0.9950248599052429, + 0.9787454605102539, + 0.8774884343147278, + 0.3604971766471863, + 0.009900986216962337, + 0.0, + ] expected_sigmas = torch.tensor(expected_sigmas) expected_timesteps = (expected_sigmas * num_train_timesteps).to(torch.int64) expected_timesteps = expected_timesteps[0:-1] self.assertTrue(torch.allclose(scheduler.sigmas, expected_sigmas)) self.assertTrue(torch.all(expected_timesteps == scheduler.timesteps)) - def test_inference_train_same_schedule(self): num_inference_steps = 4 num_train_timesteps = num_inference_steps @@ -48,12 +54,16 @@ def test_set_timesteps_with_nondefault_args(self): ) scheduler.set_timesteps(num_inference_steps=num_inference_steps) - expected_sigmas = torch.tensor([0.9803921580314636, - 0.9388325214385986, - 0.7652841210365295, - 0.2545345723628998, - 0.004975131247192621, - 0.004975131247192621]) + expected_sigmas = torch.tensor( + [ + 0.9803921580314636, + 0.9388325214385986, + 0.7652841210365295, + 0.2545345723628998, + 0.004975131247192621, + 0.004975131247192621, + ] + ) self.assertTrue(torch.allclose(scheduler.sigmas, expected_sigmas)) def test_step(self): From 232a8168ff4abc6603454eeb14ec1edb6b80423f Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Wed, 17 Dec 2025 00:35:59 +0000 Subject: [PATCH 11/22] remove unnecessary files --- .../schedulers/.nfs42f9905b28788d3400000055 | 770 ------------------ 1 file changed, 770 deletions(-) delete mode 100644 src/diffusers/schedulers/.nfs42f9905b28788d3400000055 diff --git a/src/diffusers/schedulers/.nfs42f9905b28788d3400000055 b/src/diffusers/schedulers/.nfs42f9905b28788d3400000055 deleted file mode 100644 index fb5f210f0ba8..000000000000 --- a/src/diffusers/schedulers/.nfs42f9905b28788d3400000055 +++ /dev/null @@ -1,770 +0,0 @@ -# TODO(migmartin): reduce LOC by using inheritance from UniPCMultistepScheduler -# Copied from https://github.com/huggingface/diffusers/blob/v0.31.0/src/diffusers/schedulers/scheduling_unipc_multistep.py -# Convert unipc for flow matching -# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved. - -import math -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch - -from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput -from diffusers.utils import deprecate - - -class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): - """ - `FlowUniPCMultistepScheduler` is a training-free framework designed for the fast sampling of diffusion models. - - This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic - methods the library implements for all schedulers such as loading and saving. - - Args: - num_train_timesteps (`int`, defaults to 1000): - The number of diffusion steps to train the model. - solver_order (`int`, default `2`): - The UniPC order which can be any positive integer. The effective order of accuracy is `solver_order + 1` - due to the UniC. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for - unconditional sampling. - prediction_type (`str`, defaults to "flow_prediction"): - Prediction type of the scheduler function; must be `flow_prediction` for this scheduler, which predicts the - flow of the diffusion process. - thresholding (`bool`, defaults to `False`): - Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such - as Stable Diffusion. - dynamic_thresholding_ratio (`float`, defaults to 0.995): - The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. - sample_max_value (`float`, defaults to 1.0): - The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `predict_x0=True`. - predict_x0 (`bool`, defaults to `True`): - Whether to use the updating algorithm on the predicted x0. - solver_type (`str`, default `bh2`): - Solver type for UniPC. It is recommended to use `bh1` for unconditional sampling when steps < 10, and `bh2` - otherwise. - lower_order_final (`bool`, default `True`): - Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can - stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. - disable_corrector (`list`, default `[]`): - Decides which step to disable the corrector to mitigate the misalignment between `epsilon_theta(x_t, c)` - and `epsilon_theta(x_t^c, c)` which can influence convergence for a large guidance scale. Corrector is - usually disabled during the first few steps. - solver_p (`SchedulerMixin`, default `None`): - Any other scheduler that if specified, the algorithm becomes `solver_p + UniC`. - use_karras_sigmas (`bool`, *optional*, defaults to `False`): - Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, - the sigmas are determined according to a sequence of noise levels {σi}. - use_exponential_sigmas (`bool`, *optional*, defaults to `False`): - Whether to use exponential sigmas for step sizes in the noise schedule during the sampling process. - timestep_spacing (`str`, defaults to `"linspace"`): - The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and - Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. - steps_offset (`int`, defaults to 0): - An offset added to the inference steps, as required by some model families. - final_sigmas_type (`str`, defaults to `"zero"`): - The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final - sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. - """ - - _compatibles = [e.name for e in KarrasDiffusionSchedulers] - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - solver_order: int = 2, - prediction_type: str = "flow_prediction", - shift: Optional[float] = 1.0, - use_dynamic_shifting=False, - thresholding: bool = False, - dynamic_thresholding_ratio: float = 0.995, - sample_max_value: float = 1.0, - predict_x0: bool = True, - solver_type: str = "bh2", - lower_order_final: bool = True, - disable_corrector: List[int] = [], - solver_p: SchedulerMixin = None, - timestep_spacing: str = "linspace", - steps_offset: int = 0, - final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" - use_karras_sigmas: bool = False, - ): - if solver_type not in ["bh1", "bh2"]: - if solver_type in ["midpoint", "heun", "logrho"]: - self.register_to_config(solver_type="bh2") - else: - raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") - - self.predict_x0 = predict_x0 - # setable values - self.num_inference_steps = None - alphas = np.linspace(1, 1 / num_train_timesteps, num_train_timesteps)[::-1].copy() - sigmas = 1.0 - alphas - sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32) - - if not use_dynamic_shifting: - # when use_dynamic_shifting is True, we apply the timestep shifting on the fly based on the image resolution - sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore - - self.sigmas = sigmas - self.timesteps = sigmas * num_train_timesteps - - self.model_outputs = [None] * solver_order - self.timestep_list = [None] * solver_order - self.lower_order_nums = 0 - self.disable_corrector = disable_corrector - self.solver_p = solver_p - self.last_sample = None - self._step_index = None - self._begin_index = None - - self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication - self.sigma_min = self.sigmas[-1].item() - self.sigma_max = self.sigmas[0].item() - - @property - def step_index(self): - """ - The index counter for current timestep. It will increase 1 after each scheduler step. - """ - return self._step_index - - @property - def begin_index(self): - """ - The index for the first timestep. It should be set from pipeline with `set_begin_index` method. - """ - return self._begin_index - - # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index - def set_begin_index(self, begin_index: int = 0): - """ - Sets the begin index for the scheduler. This function should be run from pipeline before the inference. - - Args: - begin_index (`int`): - The begin index for the scheduler. - """ - self._begin_index = begin_index - - # Modified from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler.set_timesteps - def set_timesteps( - self, - num_inference_steps: Union[int, None] = None, - device: Union[str, torch.device] = None, - sigmas: Optional[List[float]] = None, - mu: Optional[Union[float, None]] = None, - shift: Optional[Union[float, None]] = None, - ): - """ - Sets the discrete timesteps used for the diffusion chain (to be run before inference). - - Args: - num_inference_steps (`int`): - Total number of the spacing of the time steps. - device (`str` or `torch.device`, *optional*): - The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - """ - if self.config.use_dynamic_shifting and mu is None: - raise ValueError(" you have to pass a value for `mu` when `use_dynamic_shifting` is set to be `True`") - - if self.config.use_karras_sigmas: - # force to use the exact sigma used in edm sampler - sigma_max = 200 - sigma_min = 0.01 - rho = 7 - sigmas = np.arange(num_inference_steps + 1) / num_inference_steps - min_inv_rho = sigma_min ** (1 / rho) - max_inv_rho = sigma_max ** (1 / rho) - sigmas = (max_inv_rho + sigmas * (min_inv_rho - max_inv_rho)) ** rho - sigmas = sigmas / (1 + sigmas) - else: - if sigmas is None: - sigmas = np.linspace(self.sigma_max, self.sigma_min, num_inference_steps + 1).copy()[:-1] # pyright: ignore - - if self.config.use_dynamic_shifting: - sigmas = self.time_shift(mu, 1.0, sigmas) # pyright: ignore - else: - if shift is None: - shift = self.config.shift - sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) # pyright: ignore - - if self.config.final_sigmas_type == "sigma_min": - # TODO(migmartin): this raises an error, rewrite this class - sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 - elif self.config.final_sigmas_type == "zero": - sigma_last = 0 - else: - raise ValueError( - f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" - ) - - timesteps = sigmas * self.config.num_train_timesteps - sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) # pyright: ignore - - self.sigmas = torch.from_numpy(sigmas) - self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) - - self.num_inference_steps = len(timesteps) - - self.model_outputs = [ - None, - ] * self.config.solver_order - self.lower_order_nums = 0 - self.last_sample = None - if self.solver_p: - self.solver_p.set_timesteps(self.num_inference_steps, device=device) - - # add an index counter for schedulers that allow duplicated timesteps - self._step_index = None - self._begin_index = None - self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication - - # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample - def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: - """ - "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the - prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by - s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing - pixels from saturation at each step. We find that dynamic thresholding results in significantly better - photorealism as well as better image-text alignment, especially when using very large guidance weights." - - https://arxiv.org/abs/2205.11487 - """ - dtype = sample.dtype - batch_size, channels, *remaining_dims = sample.shape - - if dtype not in (torch.float32, torch.float64): - sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half - - # Flatten sample for doing quantile calculation along each image - sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) - - abs_sample = sample.abs() # "a certain percentile absolute pixel value" - - s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) - s = torch.clamp( - s, min=1, max=self.config.sample_max_value - ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] - s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 - sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" - - sample = sample.reshape(batch_size, channels, *remaining_dims) - sample = sample.to(dtype) - - return sample - - # Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler._sigma_to_t - def _sigma_to_t(self, sigma): - return sigma * self.config.num_train_timesteps - - def _sigma_to_alpha_sigma_t(self, sigma): - return 1 - sigma, sigma - - # Copied from diffusers.schedulers.scheduling_flow_match_euler_discrete.set_timesteps - def time_shift(self, mu: float, sigma: float, t: torch.Tensor): - return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) - - def convert_model_output( - self, - model_output: torch.Tensor, - *args, - sample: torch.Tensor = None, - **kwargs, - ) -> torch.Tensor: - r""" - Convert the model output to the corresponding type the UniPC algorithm needs. - - Args: - model_output (`torch.Tensor`): - The direct output from the learned diffusion model. - timestep (`int`): - The current discrete timestep in the diffusion chain. - sample (`torch.Tensor`): - A current instance of a sample created by the diffusion process. - - Returns: - `torch.Tensor`: - The converted model output. - """ - timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) - if sample is None: - if len(args) > 1: - sample = args[1] - else: - raise ValueError("missing `sample` as a required keyward argument") - if timestep is not None: - deprecate( - "timesteps", - "1.0.0", - "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", - ) - - sigma = self.sigmas[self.step_index] - alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) - - # print("sigma_t ==>", self.step_index, sigma, sigma_t, alpha_t, sample.shape, model_output.shape) - if self.predict_x0: - if self.config.prediction_type == "flow_prediction": - sigma_t = self.sigmas[self.step_index] - x0_pred = sample - sigma_t * model_output - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`," - " `v_prediction` or `flow_prediction` for the UniPCMultistepScheduler." - ) - - if self.config.thresholding: - x0_pred = self._threshold_sample(x0_pred) - # print("self.config.thresholding", self.config.thresholding) - return x0_pred - else: - if self.config.prediction_type == "flow_prediction": - sigma_t = self.sigmas[self.step_index] - epsilon = sample - (1 - sigma_t) * model_output - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`," - " `v_prediction` or `flow_prediction` for the UniPCMultistepScheduler." - ) - - if self.config.thresholding: - sigma_t = self.sigmas[self.step_index] - x0_pred = sample - sigma_t * model_output - x0_pred = self._threshold_sample(x0_pred) - epsilon = model_output + x0_pred - - return epsilon - - def multistep_uni_p_bh_update( - self, - model_output: torch.Tensor, - *args, - sample: torch.Tensor = None, - order: int = None, # pyright: ignore - **kwargs, - ) -> torch.Tensor: - """ - One step for the UniP (B(h) version). Alternatively, `self.solver_p` is used if is specified. - - Args: - model_output (`torch.Tensor`): - The direct output from the learned diffusion model at the current timestep. - prev_timestep (`int`): - The previous discrete timestep in the diffusion chain. - sample (`torch.Tensor`): - A current instance of a sample created by the diffusion process. - order (`int`): - The order of UniP at this timestep (corresponds to the *p* in UniPC-p). - - Returns: - `torch.Tensor`: - The sample tensor at the previous timestep. - """ - prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) - if sample is None: - if len(args) > 1: - sample = args[1] - else: - raise ValueError(" missing `sample` as a required keyward argument") - if order is None: - if len(args) > 2: - order = args[2] - else: - raise ValueError(" missing `order` as a required keyward argument") - if prev_timestep is not None: - deprecate( - "prev_timestep", - "1.0.0", - "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", - ) - model_output_list = self.model_outputs - - s0 = self.timestep_list[-1] - m0 = model_output_list[-1] - x = sample - - if self.solver_p: - x_t = self.solver_p.step(model_output, s0, x).prev_sample - return x_t - - sigma_t, sigma_s0 = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] # pyright: ignore - alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) - alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) - - lambda_t = torch.log(alpha_t) - torch.log(sigma_t) - lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) - - h = lambda_t - lambda_s0 - device = sample.device - - rks = [] - D1s = [] - for i in range(1, order): - si = self.step_index - i # pyright: ignore - mi = model_output_list[-(i + 1)] - alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) - lambda_si = torch.log(alpha_si) - torch.log(sigma_si) - rk = (lambda_si - lambda_s0) / h - rks.append(rk) - D1s.append((mi - m0) / rk) # pyright: ignore - - rks.append(1.0) - rks = torch.tensor(rks, device=device) - - R = [] - b = [] - - hh = -h if self.predict_x0 else h - h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 - h_phi_k = h_phi_1 / hh - 1 - - factorial_i = 1 - - if self.config.solver_type == "bh1": - B_h = hh - elif self.config.solver_type == "bh2": - B_h = torch.expm1(hh) - else: - raise NotImplementedError() - - for i in range(1, order + 1): - R.append(torch.pow(rks, i - 1)) - b.append(h_phi_k * factorial_i / B_h) - factorial_i *= i + 1 - h_phi_k = h_phi_k / hh - 1 / factorial_i - - R = torch.stack(R) - b = torch.tensor(b, device=device) - - if len(D1s) > 0: - D1s = torch.stack(D1s, dim=1) # (B, K) - # for order 2, we use a simplified version - if order == 2: - rhos_p = torch.tensor([0.5], dtype=x.dtype, device=device) - else: - rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1]).to(device).to(x.dtype) - else: - D1s = None - - if self.predict_x0: - x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 - if D1s is not None: - pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) # pyright: ignore - else: - pred_res = 0 - x_t = x_t_ - alpha_t * B_h * pred_res - else: - x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 - if D1s is not None: - pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) # pyright: ignore - else: - pred_res = 0 - x_t = x_t_ - sigma_t * B_h * pred_res - - x_t = x_t.to(x.dtype) - return x_t - - def multistep_uni_c_bh_update( - self, - this_model_output: torch.Tensor, - *args, - last_sample: torch.Tensor = None, - this_sample: torch.Tensor = None, - order: int = None, # pyright: ignore - **kwargs, - ) -> torch.Tensor: - """ - One step for the UniC (B(h) version). - - Args: - this_model_output (`torch.Tensor`): - The model outputs at `x_t`. - this_timestep (`int`): - The current timestep `t`. - last_sample (`torch.Tensor`): - The generated sample before the last predictor `x_{t-1}`. - this_sample (`torch.Tensor`): - The generated sample after the last predictor `x_{t}`. - order (`int`): - The `p` of UniC-p at this step. The effective order of accuracy should be `order + 1`. - - Returns: - `torch.Tensor`: - The corrected sample tensor at the current timestep. - """ - this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) - if last_sample is None: - if len(args) > 1: - last_sample = args[1] - else: - raise ValueError(" missing`last_sample` as a required keyward argument") - if this_sample is None: - if len(args) > 2: - this_sample = args[2] - else: - raise ValueError(" missing`this_sample` as a required keyward argument") - if order is None: - if len(args) > 3: - order = args[3] - else: - raise ValueError(" missing`order` as a required keyward argument") - if this_timestep is not None: - deprecate( - "this_timestep", - "1.0.0", - "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", - ) - - model_output_list = self.model_outputs - - m0 = model_output_list[-1] - x = last_sample - x_t = this_sample - model_t = this_model_output - - sigma_t, sigma_s0 = self.sigmas[self.step_index], self.sigmas[self.step_index - 1] # pyright: ignore - alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) - alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) - - lambda_t = torch.log(alpha_t) - torch.log(sigma_t) - lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) - - h = lambda_t - lambda_s0 - device = this_sample.device - - rks = [] - D1s = [] - for i in range(1, order): - si = self.step_index - (i + 1) # pyright: ignore - mi = model_output_list[-(i + 1)] - alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) - lambda_si = torch.log(alpha_si) - torch.log(sigma_si) - rk = (lambda_si - lambda_s0) / h - rks.append(rk) - D1s.append((mi - m0) / rk) # pyright: ignore - - rks.append(1.0) - rks = torch.tensor(rks, device=device) - - R = [] - b = [] - - hh = -h if self.predict_x0 else h - h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 - h_phi_k = h_phi_1 / hh - 1 - - factorial_i = 1 - - if self.config.solver_type == "bh1": - B_h = hh - elif self.config.solver_type == "bh2": - B_h = torch.expm1(hh) - else: - raise NotImplementedError() - - for i in range(1, order + 1): - R.append(torch.pow(rks, i - 1)) - b.append(h_phi_k * factorial_i / B_h) - factorial_i *= i + 1 - h_phi_k = h_phi_k / hh - 1 / factorial_i - - R = torch.stack(R) - b = torch.tensor(b, device=device) - - if len(D1s) > 0: - D1s = torch.stack(D1s, dim=1) - else: - D1s = None - - # for order 1, we use a simplified version - if order == 1: - rhos_c = torch.tensor([0.5], dtype=x.dtype, device=device) - else: - rhos_c = torch.linalg.solve(R, b).to(device).to(x.dtype) - - if self.predict_x0: - x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 - if D1s is not None: - corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) - else: - corr_res = 0 - D1_t = model_t - m0 - x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t) - else: - x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 - if D1s is not None: - corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) - else: - corr_res = 0 - D1_t = model_t - m0 - x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t) - x_t = x_t.to(x.dtype) - return x_t - - def index_for_timestep(self, timestep, schedule_timesteps=None): - if schedule_timesteps is None: - schedule_timesteps = self.timesteps - - indices = (schedule_timesteps == timestep).nonzero() - - # The sigma index that is taken for the **very** first `step` - # is always the second index (or the last index if there is only 1) - # This way we can ensure we don't accidentally skip a sigma in - # case we start in the middle of the denoising schedule (e.g. for image-to-image) - pos = 1 if len(indices) > 1 else 0 - - return indices[pos].item() - - # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index - def _init_step_index(self, timestep): - """ - Initialize the step_index counter for the scheduler. - """ - - if self.begin_index is None: - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - self._step_index = self.index_for_timestep(timestep) - else: - self._step_index = self._begin_index - - def step( - self, - model_output: torch.Tensor, - timestep: Union[int, torch.Tensor], - sample: torch.Tensor, - return_dict: bool = True, - generator=None, - ) -> Union[SchedulerOutput, Tuple]: - """ - Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with - the multistep UniPC. - - Args: - model_output (`torch.Tensor`): - The direct output from learned diffusion model. - timestep (`int`): - The current discrete timestep in the diffusion chain. - sample (`torch.Tensor`): - A current instance of a sample created by the diffusion process. - return_dict (`bool`): - Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. - - Returns: - [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: - If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a - tuple is returned where the first element is the sample tensor. - - """ - if self.num_inference_steps is None: - raise ValueError( - "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" - ) - - if self.step_index is None: - self._init_step_index(timestep) - - # print("self.step_index ==> ", self.step_index) - - use_corrector = ( - self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and self.last_sample is not None # pyright: ignore - ) - - model_output_convert = self.convert_model_output(model_output, sample=sample) - - if use_corrector: - sample = self.multistep_uni_c_bh_update( - this_model_output=model_output_convert, - last_sample=self.last_sample, - this_sample=sample, - order=self.this_order, - ) - - for i in range(self.config.solver_order - 1): - self.model_outputs[i] = self.model_outputs[i + 1] - self.timestep_list[i] = self.timestep_list[i + 1] - - self.model_outputs[-1] = model_output_convert - self.timestep_list[-1] = timestep # pyright: ignore - - if self.config.lower_order_final: - this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) # pyright: ignore - else: - this_order = self.config.solver_order - - self.this_order = min(this_order, self.lower_order_nums + 1) # warmup for multistep - assert self.this_order > 0 - - self.last_sample = sample - prev_sample = self.multistep_uni_p_bh_update( - model_output=model_output, # pass the original non-converted model output, in case solver-p is used - sample=sample, - order=self.this_order, - ) - - if self.lower_order_nums < self.config.solver_order: - self.lower_order_nums += 1 - - # upon completion increase step index by one - self._step_index += 1 # pyright: ignore - - if not return_dict: - return (prev_sample, model_output_convert) - - return SchedulerOutput(prev_sample=prev_sample) - - def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: - """ - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - - Args: - sample (`torch.Tensor`): - The input sample. - - Returns: - `torch.Tensor`: - A scaled input sample. - """ - return sample - - # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise - def add_noise( - self, - original_samples: torch.Tensor, - noise: torch.Tensor, - timesteps: torch.IntTensor, - ) -> torch.Tensor: - # Make sure sigmas and timesteps have the same device and dtype as original_samples - sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) - if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): - # mps does not support float64 - schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) - timesteps = timesteps.to(original_samples.device, dtype=torch.float32) - else: - schedule_timesteps = self.timesteps.to(original_samples.device) - timesteps = timesteps.to(original_samples.device) - - # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index - if self.begin_index is None: - step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] - elif self.step_index is not None: - # add_noise is called after first denoising step (for inpainting) - step_indices = [self.step_index] * timesteps.shape[0] - else: - # add noise is called before first denoising step to create initial latent(img2img) - step_indices = [self.begin_index] * timesteps.shape[0] - - sigma = sigmas[step_indices].flatten() - while len(sigma.shape) < len(original_samples.shape): - sigma = sigma.unsqueeze(-1) - - alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) - noisy_samples = alpha_t * original_samples + sigma_t * noise - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps From 1a132f2bcfdc6ae0e5abce5ff238bbae86faa395 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Wed, 17 Dec 2025 20:13:34 +0000 Subject: [PATCH 12/22] UniPCMultistep: support use_karras_sigmas=True and use_flow_sigmas=True --- src/diffusers/schedulers/scheduling_unipc_multistep.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/diffusers/schedulers/scheduling_unipc_multistep.py b/src/diffusers/schedulers/scheduling_unipc_multistep.py index 689c6a06350b..5ea56b300be2 100644 --- a/src/diffusers/schedulers/scheduling_unipc_multistep.py +++ b/src/diffusers/schedulers/scheduling_unipc_multistep.py @@ -217,6 +217,8 @@ def __init__( rescale_betas_zero_snr: bool = False, use_dynamic_shifting: bool = False, time_shift_type: Literal["exponential"] = "exponential", + sigma_min: Optional[float] = None, + sigma_max: Optional[float] = None, ) -> None: if self.config.use_beta_sigmas and not is_scipy_available(): raise ImportError("Make sure to install scipy if you want to use beta sigmas.") @@ -350,7 +352,12 @@ def set_timesteps( log_sigmas = np.log(sigmas) sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) - timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + if self.config.use_flow_sigmas: + sigmas = sigmas / (sigmas + 1) + timesteps = (sigmas * self.config.num_train_timesteps).copy() + else: + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + if self.config.final_sigmas_type == "sigma_min": sigma_last = sigmas[-1] elif self.config.final_sigmas_type == "zero": From 980822041e9928e50bcbd4b8ae916c9daba5e4db Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 01:43:46 +0000 Subject: [PATCH 13/22] use UniPCMultistepScheduler + fix tests for pipeline --- scripts/convert_cosmos_to_diffusers.py | 9 +- .../cosmos/pipeline_cosmos2_5_predict.py | 505 ++---------------- tests/pipelines/cosmos/cosmos_guardrail.py | 11 +- .../cosmos/test_cosmos2_5_predict.py | 7 +- 4 files changed, 55 insertions(+), 477 deletions(-) diff --git a/scripts/convert_cosmos_to_diffusers.py b/scripts/convert_cosmos_to_diffusers.py index f4a584a1b091..271d1b7b4ad8 100644 --- a/scripts/convert_cosmos_to_diffusers.py +++ b/scripts/convert_cosmos_to_diffusers.py @@ -61,6 +61,7 @@ CosmosVideoToWorldPipeline, EDMEulerScheduler, FlowMatchEulerDiscreteScheduler, + UniPCMultistepScheduler, ) from diffusers.pipelines.cosmos.pipeline_cosmos2_5_predict import Cosmos2_5_PredictBase @@ -536,7 +537,13 @@ def save_pipeline_cosmos2_5(args, transformer, vae): ) tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) - scheduler = FlowMatchEulerDiscreteScheduler(use_karras_sigmas=True) + scheduler = UniPCMultistepScheduler( + use_karras_sigmas=True, + use_flow_sigmas=True, + prediction_type="flow_prediction", + sigma_max=200.0, + sigma_min=0.01, + ) pipe = Cosmos2_5_PredictBase( text_encoder=text_encoder, diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index 67a94defb091..3fe7a5fde876 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -24,7 +24,7 @@ from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...models import AutoencoderKLWan, CosmosTransformer3DModel -from ...schedulers import FlowUniPCMultistepScheduler +from ...schedulers import UniPCMultistepScheduler from ...utils import is_cosmos_guardrail_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor @@ -178,7 +178,7 @@ class Cosmos2_5_PredictBase(DiffusionPipeline): Tokenizer associated with the Qwen2.5 VL encoder. transformer ([`CosmosTransformer3DModel`]): Conditional Transformer to denoise the encoded image latents. - scheduler ([`FlowUniPCMultistepScheduler`]): + scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. @@ -188,6 +188,7 @@ class Cosmos2_5_PredictBase(DiffusionPipeline): _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] # We mark safety_checker as optional here to get around some test failures, but it is not really optional _optional_components = ["safety_checker"] + _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, @@ -195,7 +196,7 @@ def __init__( tokenizer: AutoTokenizer, transformer: CosmosTransformer3DModel, vae: AutoencoderKLWan, - scheduler: FlowUniPCMultistepScheduler, + scheduler: UniPCMultistepScheduler, safety_checker: CosmosSafetyChecker = None, ): super().__init__() @@ -215,6 +216,7 @@ def __init__( self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) + latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, self.vae.config.z_dim, 1, 1, 1).float() if getattr(self.vae.config, "latents_mean", None) is not None @@ -228,6 +230,23 @@ def __init__( self.latents_mean = latents_mean self.latents_std = latents_std + if self.latents_mean is None or self.latents_std is None: + raise ValueError("VAE configuration must define both `latents_mean` and `latents_std`.") + + + @property + def _execution_device(self): + device = super()._execution_device + if isinstance(device, torch.device) and device.type == "cpu": + for module_name in ("transformer", "text_encoder", "vae"): + module = getattr(self, module_name, None) + if module is None or not isinstance(module, torch.nn.Module): + continue + module_device = getattr(module, "device", None) + if isinstance(module_device, torch.device) and module_device.type != "cpu": + return module_device + return device + # Copied from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline._get_prompt_embeds def _get_prompt_embeds( self, @@ -796,13 +815,11 @@ def __call__( self._current_timestep = None if not output_type == "latent": - assert self.latents_mean is not None and self.latents_std is not None, ( - "VAE configuration must define `latents_mean` and `latents_std`." - ) latents_mean = self.latents_mean.to(latents.device, latents.dtype) latents_std = self.latents_std.to(latents.device, latents.dtype) latents = latents * latents_std + latents_mean video = self.vae.decode(latents.to(self.vae.dtype), return_dict=False)[0] + video = self._match_num_frames(video, num_frames) assert self.safety_checker is not None self.safety_checker.to(device) @@ -826,470 +843,18 @@ def __call__( return CosmosPipelineOutput(frames=video) + def _match_num_frames(self, video: torch.Tensor, target_num_frames: int) -> torch.Tensor: + if target_num_frames <= 0 or video.shape[2] == target_num_frames: + return video -class Cosmos2_5_PredictText2World(Cosmos2_5_PredictBase): - r""" - Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) Text2World. - - This pipeline is a specialized version of [`Cosmos2_5_PredictBase`], please refer to the superclass for advanced - options. + frames_per_latent = max(self.vae_scale_factor_temporal, 1) + video = torch.repeat_interleave(video, repeats=frames_per_latent, dim=2) - Args: - text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): - Frozen text-encoder. Cosmos Predict2.5 uses the [Qwen2.5 - VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) encoder. - tokenizer (`AutoTokenizer`): - Tokenizer associated with the Qwen2.5 VL encoder. - transformer ([`CosmosTransformer3DModel`]): - Conditional Transformer to denoise the encoded image latents. - scheduler ([`FlowUniPCMultistepScheduler`]): - A scheduler to be used in combination with `transformer` to denoise the encoded image latents. - vae ([`AutoencoderKLWan`]): - Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. - """ - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]] | None = None, - negative_prompt: Optional[Union[str, List[str]]] = None, - height: int = 704, - width: int = 1280, - num_inference_steps: int = 35, - guidance_scale: float = 7.0, - fps: int = 16, - num_videos_per_prompt: Optional[int] = 1, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.Tensor] = None, - prompt_embeds: Optional[torch.Tensor] = None, - negative_prompt_embeds: Optional[torch.Tensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback_on_step_end: Optional[ - Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] - ] = None, - callback_on_step_end_tensor_inputs: List[str] = ["latents"], - max_sequence_length: int = 512, - conditional_frame_timestep: float = 0.1, - ): - r""" - Text2World: text-conditioned world generation. This is a wrapper around the base pipeline. + current_frames = video.shape[2] + if current_frames < target_num_frames: + pad = video[:, :, -1:, :, :].repeat(1, 1, target_num_frames - current_frames, 1, 1) + video = torch.cat([video, pad], dim=2) + elif current_frames > target_num_frames: + video = video[:, :, :target_num_frames] - Args: - image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, *optional*): - Optional single image for Image2World conditioning. Must be `None` when `video` is provided. - video (`List[PIL.Image.Image]`, `np.ndarray`, `torch.Tensor`, *optional*): - Optional input video for Video2World conditioning. Must be `None` when `image` is provided. - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide generation. Required unless `prompt_embeds` is supplied. - height (`int`, defaults to `704`): - The height in pixels of the generated image. - width (`int`, defaults to `1280`): - The width in pixels of the generated image. - num_frames (`int`, defaults to `93`): - Number of output frames. Use `93` for world (video) generation; set to `1` to return a single frame. - num_inference_steps (`int`, defaults to `35`): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, defaults to `7.0`): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting - `guidance_scale > 1`. - fps (`int`, defaults to `16`): - The frames per second of the generated video. - num_videos_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.Tensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.Tensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not - provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`CosmosPipelineOutput`] instead of a plain tuple. - callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): - A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of - each denoising step during the inference. with the following arguments: `callback_on_step_end(self: - DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a - list of all tensors as specified by `callback_on_step_end_tensor_inputs`. - callback_on_step_end_tensor_inputs (`List`, *optional*): - The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list - will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the - `._callback_tensor_inputs` attribute of your pipeline class. - max_sequence_length (`int`, defaults to `512`): - The maximum number of tokens in the prompt. If the prompt exceeds this length, it will be truncated. If - the prompt is shorter than this length, it will be padded. - - Returns: - [`~CosmosPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`CosmosPipelineOutput`] is returned, otherwise a `tuple` is returned where - the first element is a list with the generated images and the second element is a list of `bool`s - indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. - - Examples: - ```python - >>> import torch - >>> from diffusers import Cosmos2_5_PredictText2World - >>> from diffusers.utils import export_to_video - - >>> pipe = Cosmos2_5_PredictText2World.from_pretrained( - ... "nvidia/Cosmos-Predict2.5-Base-2B", torch_dtype=torch.bfloat16 - ... ) - >>> pipe = pipe.to("cuda") - >>> video = pipe( - ... prompt="A snow scene with cars moving through an intersection.", - ... negative_prompt="low quality, blurry", - ... generator=torch.Generator(device="cuda").manual_seed(1), - ... ).frames[0] - >>> export_to_video(video, "text2world_wrapper.mp4", fps=16) - ``` - """ - return super().__call__( - image=None, - video=None, - prompt=prompt, - negative_prompt=negative_prompt, - height=height, - width=width, - num_frames=93, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - fps=fps, - num_videos_per_prompt=num_videos_per_prompt, - generator=generator, - latents=latents, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - output_type=output_type, - return_dict=return_dict, - callback_on_step_end=callback_on_step_end, - callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, - max_sequence_length=max_sequence_length, - conditional_frame_timestep=conditional_frame_timestep, - ) - - -class Cosmos2_5_PredictImage2World(Cosmos2_5_PredictBase): - r""" - Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) Image2World. - - This pipeline is a specialized version of [`Cosmos2_5_PredictBase`], please refer to the superclass for advanced - options. - - Args: - text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): - Frozen text-encoder. Cosmos Predict2.5 uses the [Qwen2.5 - VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) encoder. - tokenizer (`AutoTokenizer`): - Tokenizer associated with the Qwen2.5 VL encoder. - transformer ([`CosmosTransformer3DModel`]): - Conditional Transformer to denoise the encoded image latents. - scheduler ([`FlowUniPCMultistepScheduler`]): - A scheduler to be used in combination with `transformer` to denoise the encoded image latents. - vae ([`AutoencoderKLWan`]): - Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. - """ - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]] | None = None, - image: PipelineImageInput | None = None, - negative_prompt: Optional[Union[str, List[str]]] = None, - height: int = 704, - width: int = 1280, - num_inference_steps: int = 35, - guidance_scale: float = 7.0, - fps: int = 16, - num_videos_per_prompt: Optional[int] = 1, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.Tensor] = None, - prompt_embeds: Optional[torch.Tensor] = None, - negative_prompt_embeds: Optional[torch.Tensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback_on_step_end: Optional[ - Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] - ] = None, - callback_on_step_end_tensor_inputs: List[str] = ["latents"], - max_sequence_length: int = 512, - conditional_frame_timestep: float = 0.1, - ): - r""" - Image2World: image-conditioned world generation. This is a wrapper around the base pipeline. - - Args: - image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, *optional*): - Optional single image for Image2World conditioning. Must be `None` when `video` is provided. - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide generation. Required unless `prompt_embeds` is supplied. - height (`int`, defaults to `704`): - The height in pixels of the generated image. - width (`int`, defaults to `1280`): - The width in pixels of the generated image. - num_frames (`int`, defaults to `93`): - Number of output frames. Use `93` for world (video) generation; set to `1` to return a single frame. - num_inference_steps (`int`, defaults to `35`): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, defaults to `7.0`): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting - `guidance_scale > 1`. - fps (`int`, defaults to `16`): - The frames per second of the generated video. - num_videos_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.Tensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.Tensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not - provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`CosmosPipelineOutput`] instead of a plain tuple. - callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): - A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of - each denoising step during the inference. with the following arguments: `callback_on_step_end(self: - DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a - list of all tensors as specified by `callback_on_step_end_tensor_inputs`. - callback_on_step_end_tensor_inputs (`List`, *optional*): - The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list - will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the - `._callback_tensor_inputs` attribute of your pipeline class. - max_sequence_length (`int`, defaults to `512`): - The maximum number of tokens in the prompt. If the prompt exceeds this length, it will be truncated. If - the prompt is shorter than this length, it will be padded. - - Returns: - [`~CosmosPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`CosmosPipelineOutput`] is returned, otherwise a `tuple` is returned where - the first element is a list with the generated images and the second element is a list of `bool`s - indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. - - Examples: - ```python - >>> import torch - >>> from diffusers import Cosmos2_5_PredictImage2World - >>> from diffusers.utils import export_to_video, load_image - - >>> pipe = Cosmos2_5_PredictImage2World.from_pretrained( - ... "nvidia/Cosmos-Predict2.5-Base-2B", torch_dtype=torch.bfloat16 - ... ) - >>> pipe = pipe.to("cuda") - >>> image = load_image( - ... "https://media.githubusercontent.com/media/nvidia-cosmos/cosmos-predict2.5/refs/heads/main/assets/base/robot_welding.jpg" - ... ) - >>> video = pipe( - ... prompt="A robotic welding arm continues its work.", - ... image=image, - ... negative_prompt="low quality, blurry", - ... generator=torch.Generator(device="cuda").manual_seed(2), - ... ).frames[0] - >>> export_to_video(video, "image2world_wrapper.mp4", fps=16) - ``` - """ - if image is None: - raise ValueError("`image` must be provided for Image2World generation.") - - return super().__call__( - image=image, - video=None, - prompt=prompt, - negative_prompt=negative_prompt, - height=height, - width=width, - num_frames=93, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - fps=fps, - num_videos_per_prompt=num_videos_per_prompt, - generator=generator, - latents=latents, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - output_type=output_type, - return_dict=return_dict, - callback_on_step_end=callback_on_step_end, - callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, - max_sequence_length=max_sequence_length, - conditional_frame_timestep=conditional_frame_timestep, - ) - - -class Cosmos2_5_PredictVideo2World(Cosmos2_5_PredictBase): - r""" - Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) Video2World. - - This pipeline is a specialized version of [`Cosmos2_5_PredictBase`], please refer to the superclass for advanced - options. - - Args: - text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): - Frozen text-encoder. Cosmos Predict2.5 uses the [Qwen2.5 - VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) encoder. - tokenizer (`AutoTokenizer`): - Tokenizer associated with the Qwen2.5 VL encoder. - transformer ([`CosmosTransformer3DModel`]): - Conditional Transformer to denoise the encoded image latents. - scheduler ([`FlowUniPCMultistepScheduler`]): - A scheduler to be used in combination with `transformer` to denoise the encoded image latents. - vae ([`AutoencoderKLWan`]): - Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. - """ - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]] | None = None, - video: List[PipelineImageInput] | None = None, - negative_prompt: Optional[Union[str, List[str]]] = None, - height: int = 704, - width: int = 1280, - num_inference_steps: int = 35, - guidance_scale: float = 7.0, - fps: int = 16, - num_videos_per_prompt: Optional[int] = 1, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.Tensor] = None, - prompt_embeds: Optional[torch.Tensor] = None, - negative_prompt_embeds: Optional[torch.Tensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback_on_step_end: Optional[ - Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] - ] = None, - callback_on_step_end_tensor_inputs: List[str] = ["latents"], - max_sequence_length: int = 512, - conditional_frame_timestep: float = 0.1, - ): - r""" - Video2World: video-conditioned world generation. This is a wrapper around the base pipeline. - - Args: - video (`List[PIL.Image.Image]`, `np.ndarray`, `torch.Tensor`, *optional*): - Optional input video for Video2World conditioning. Must be `None` when `image` is provided. - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide generation. Required unless `prompt_embeds` is supplied. - height (`int`, defaults to `704`): - The height in pixels of the generated image. - width (`int`, defaults to `1280`): - The width in pixels of the generated image. - num_frames (`int`, defaults to `93`): - Number of output frames. Use `93` for world (video) generation; set to `1` to return a single frame. - num_inference_steps (`int`, defaults to `35`): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, defaults to `7.0`): - Guidance scale as defined in [Classifier-Free Diffusion - Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. - of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting - `guidance_scale > 1`. - fps (`int`, defaults to `16`): - The frames per second of the generated video. - num_videos_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.Tensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.Tensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not - provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`CosmosPipelineOutput`] instead of a plain tuple. - callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): - A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of - each denoising step during the inference. with the following arguments: `callback_on_step_end(self: - DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a - list of all tensors as specified by `callback_on_step_end_tensor_inputs`. - callback_on_step_end_tensor_inputs (`List`, *optional*): - The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list - will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the - `._callback_tensor_inputs` attribute of your pipeline class. - max_sequence_length (`int`, defaults to `512`): - The maximum number of tokens in the prompt. If the prompt exceeds this length, it will be truncated. If - the prompt is shorter than this length, it will be padded. - - Returns: - [`~CosmosPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`CosmosPipelineOutput`] is returned, otherwise a `tuple` is returned where - the first element is a list with the generated images and the second element is a list of `bool`s - indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. - - Examples: - ```python - >>> import torch - >>> from diffusers import Cosmos2_5_PredictVideo2World - >>> from diffusers.utils import export_to_video, load_video - - >>> pipe = Cosmos2_5_PredictVideo2World.from_pretrained( - ... "nvidia/Cosmos-Predict2.5-Base-2B", torch_dtype=torch.bfloat16 - ... ) - >>> pipe = pipe.to("cuda") - >>> input_video = load_video( - ... "https://github.com/nvidia-cosmos/cosmos-predict2.5/raw/refs/heads/main/assets/base/sand_mining.mp4" - ... ) - >>> video = pipe( - ... prompt="Aerial view of sand mining continues.", - ... video=input_video, - ... negative_prompt="low quality, blurry", - ... generator=torch.Generator(device="cuda").manual_seed(3), - ... ).frames[0] - >>> export_to_video(video, "video2world_wrapper.mp4", fps=16) - ``` - """ - if video is None: - raise ValueError("`video` must be provided for Video2World generation.") - - return super().__call__( - image=None, - video=video, - prompt=prompt, - negative_prompt=negative_prompt, - height=height, - width=width, - num_frames=93, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - fps=fps, - num_videos_per_prompt=num_videos_per_prompt, - generator=generator, - latents=latents, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - output_type=output_type, - return_dict=return_dict, - callback_on_step_end=callback_on_step_end, - callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, - max_sequence_length=max_sequence_length, - conditional_frame_timestep=conditional_frame_timestep, - ) + return video diff --git a/tests/pipelines/cosmos/cosmos_guardrail.py b/tests/pipelines/cosmos/cosmos_guardrail.py index 4de14fbaaf9d..c9ef597fdb36 100644 --- a/tests/pipelines/cosmos/cosmos_guardrail.py +++ b/tests/pipelines/cosmos/cosmos_guardrail.py @@ -27,7 +27,7 @@ class DummyCosmosSafetyChecker(ModelMixin, ConfigMixin): def __init__(self) -> None: super().__init__() - self._dtype = torch.float32 + self.register_buffer("_device_tracker", torch.zeros(1, dtype=torch.float32), persistent=False) def check_text_safety(self, prompt: str) -> bool: return True @@ -35,13 +35,14 @@ def check_text_safety(self, prompt: str) -> bool: def check_video_safety(self, frames: np.ndarray) -> np.ndarray: return frames - def to(self, device: Union[str, torch.device] = None, dtype: torch.dtype = None) -> None: - self._dtype = dtype + def to(self, device: Union[str, torch.device] = None, dtype: torch.dtype = None): + module = super().to(device=device, dtype=dtype) + return module @property def device(self) -> torch.device: - return None + return self._device_tracker.device @property def dtype(self) -> torch.dtype: - return self._dtype + return self._device_tracker.dtype diff --git a/tests/pipelines/cosmos/test_cosmos2_5_predict.py b/tests/pipelines/cosmos/test_cosmos2_5_predict.py index 706893aae781..4c1c1413768e 100644 --- a/tests/pipelines/cosmos/test_cosmos2_5_predict.py +++ b/tests/pipelines/cosmos/test_cosmos2_5_predict.py @@ -36,7 +36,12 @@ class Cosmos2_5_PredictBaseWrapper(Cosmos2_5_PredictBase): @staticmethod def from_pretrained(*args, **kwargs): - kwargs["safety_checker"] = DummyCosmosSafetyChecker() + if "safety_checker" not in kwargs or kwargs["safety_checker"] is None: + safety_checker = DummyCosmosSafetyChecker() + torch_dtype = kwargs.get("torch_dtype") + if isinstance(torch_dtype, torch.dtype): + safety_checker = safety_checker.to(dtype=torch_dtype) + kwargs["safety_checker"] = safety_checker return Cosmos2_5_PredictBase.from_pretrained(*args, **kwargs) From abba01c9dc62054219e06f1b5a87c71320f4ce7b Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 01:50:20 +0000 Subject: [PATCH 14/22] Remove FlowUniPCMultistepScheduler --- src/diffusers/__init__.py | 2 - src/diffusers/schedulers/__init__.py | 2 - .../scheduling_flow_unipc_multistep.py | 723 ------------------ .../cosmos/test_cosmos2_5_predict.py | 4 +- tests/schedulers/test_scheduler_flow_unipc.py | 123 --- 5 files changed, 2 insertions(+), 852 deletions(-) delete mode 100644 src/diffusers/schedulers/scheduling_flow_unipc_multistep.py delete mode 100644 tests/schedulers/test_scheduler_flow_unipc.py diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 8623aa61178f..c7674c3c51d3 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -347,7 +347,6 @@ "FlowMatchEulerDiscreteScheduler", "FlowMatchHeunDiscreteScheduler", "FlowMatchLCMScheduler", - "FlowUniPCMultistepScheduler", "HeunDiscreteScheduler", "IPNDMScheduler", "KarrasVeScheduler", @@ -1085,7 +1084,6 @@ FlowMatchEulerDiscreteScheduler, FlowMatchHeunDiscreteScheduler, FlowMatchLCMScheduler, - FlowUniPCMultistepScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, diff --git a/src/diffusers/schedulers/__init__.py b/src/diffusers/schedulers/__init__.py index eb6dcda0188a..29052c1ba0cb 100644 --- a/src/diffusers/schedulers/__init__.py +++ b/src/diffusers/schedulers/__init__.py @@ -61,7 +61,6 @@ _import_structure["scheduling_flow_match_euler_discrete"] = ["FlowMatchEulerDiscreteScheduler"] _import_structure["scheduling_flow_match_heun_discrete"] = ["FlowMatchHeunDiscreteScheduler"] _import_structure["scheduling_flow_match_lcm"] = ["FlowMatchLCMScheduler"] - _import_structure["scheduling_flow_unipc_multistep"] = ["FlowUniPCMultistepScheduler"] _import_structure["scheduling_heun_discrete"] = ["HeunDiscreteScheduler"] _import_structure["scheduling_ipndm"] = ["IPNDMScheduler"] _import_structure["scheduling_k_dpm_2_ancestral_discrete"] = ["KDPM2AncestralDiscreteScheduler"] @@ -164,7 +163,6 @@ from .scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler from .scheduling_flow_match_heun_discrete import FlowMatchHeunDiscreteScheduler from .scheduling_flow_match_lcm import FlowMatchLCMScheduler - from .scheduling_flow_unipc_multistep import FlowUniPCMultistepScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPM2AncestralDiscreteScheduler diff --git a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py b/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py deleted file mode 100644 index e727f3bff279..000000000000 --- a/src/diffusers/schedulers/scheduling_flow_unipc_multistep.py +++ /dev/null @@ -1,723 +0,0 @@ -from typing import List, Literal, Optional, Tuple, Union - -import numpy as np -import torch - -from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput -from diffusers.utils import deprecate - - -def _get_karras_sigmas( - num_train_steps: int, num_steps: int, sigma_max: float, sigma_min: float, rho: int, final_sigmas_type: str -): - sigmas = np.arange(num_steps + 1, dtype=np.float32) / num_steps - min_inv_rho = sigma_min ** (1 / rho) - max_inv_rho = sigma_max ** (1 / rho) - sigmas = (max_inv_rho + sigmas * (min_inv_rho - max_inv_rho)) ** rho - sigmas = sigmas / (1 + sigmas) - - if final_sigmas_type == "zero": - sigma_last = 0 - elif final_sigmas_type == "sigma_min": - sigma_last = sigmas[-1] - else: - raise ValueError(f"`final_sigmas_type` must be 'zero' or 'sigma_min' but got {final_sigmas_type}") - - timesteps = torch.from_numpy(sigmas * num_train_steps).to(torch.int64) - sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) - sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32) - return sigmas, timesteps - - -class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin): - """ - `FlowUniPCMultistepScheduler` is the UniPC algorithm[1] for flow matching[2], but strictly uses the Karras sigmas - [3] (i.e. it follows the EDMEulerScheduler). - - Note this a simplified version of `UniPCMultistepScheduler`, as it: - 1. Does not have variance preserving sigmas - 2. Does not store betas and other variables used by `UniPCMultistepScheduler` - 3. Assumes prediction_type == "flow_prediction" (this parameter is removed) - - References: - [1] Wang, Chong, et al. "UniPC: A Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models" - https://arxiv.org/abs/2302.04867 [2] Lipman, Chen, et al. "Flow matching for generative modeling." - https://arxiv.org/abs/2210.02747 [3] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based - Generative Models." https://huggingface.co/papers/2206.00364 - - This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic - methods the library implements for all schedulers such as loading and saving. - - Args: - num_train_timesteps (`int`, defaults to 1000): - The number of diffusion steps to train the model. - solver_order (`int`, default `2`): - The UniPC order which can be any positive integer. The effective order of accuracy is `solver_order + 1` - due to the UniC. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for - unconditional sampling. - thresholding (`bool`, defaults to `False`): - Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such - as Stable Diffusion. - dynamic_thresholding_ratio (`float`, defaults to 0.995): - The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. - sample_max_value (`float`, defaults to 1.0): - The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `predict_x0=True`. - predict_x0 (`bool`, defaults to `True`): - Whether to use the updating algorithm on the predicted x0. - solver_type (`str`, default `bh2`): - Solver type for UniPC. It is recommended to use `bh1` for unconditional sampling when steps < 10, and `bh2` - otherwise. - lower_order_final (`bool`, default `True`): - Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can - stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. - disable_corrector (`list`, default `[]`): - Decides which step to disable the corrector to mitigate the misalignment between `epsilon_theta(x_t, c)` - and `epsilon_theta(x_t^c, c)` which can influence convergence for a large guidance scale. Corrector is - usually disabled during the first few steps. - solver_p (`SchedulerMixin`, default `None`): - Any other scheduler that if specified, the algorithm becomes `solver_p + UniC`. - final_sigmas_type (`str`, defaults to `"zero"`): - The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final - sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. - """ - - _compatibles = [e.name for e in KarrasDiffusionSchedulers] - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - solver_order: int = 2, - thresholding: bool = False, - dynamic_thresholding_ratio: float = 0.995, - sample_max_value: float = 1.0, - predict_x0: bool = True, - solver_type: str = "bh2", - lower_order_final: bool = True, - disable_corrector: List[int] = [], - solver_p: SchedulerMixin = None, - final_sigmas_type: Literal["zero", "sigma_min"] = "zero", - rho: int = 7, - sigma_max: float = 200.0, - sigma_min: float = 0.01, - ): - if solver_type not in ["bh1", "bh2"]: - if solver_type in ["midpoint", "heun", "logrho"]: - self.register_to_config(solver_type="bh2") - else: - raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") - - self.predict_x0 = predict_x0 - self.disable_corrector = disable_corrector - self.solver_p = solver_p - self.num_inference_steps = None - - self.sigmas, self.timesteps = _get_karras_sigmas( - num_train_timesteps, num_train_timesteps, sigma_max, sigma_min, rho, final_sigmas_type - ) - self.sigma_min = self.sigmas[-1].item() - self.sigma_max = self.sigmas[0].item() - - self._reset_state(solver_order) - - @property - def step_index(self): - """ - The index counter for current timestep. It will increase 1 after each scheduler step. - """ - return self._step_index - - @property - def begin_index(self): - """ - The index for the first timestep. It should be set from pipeline with `set_begin_index` method. - """ - return self._begin_index - - # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index - def set_begin_index(self, begin_index: int = 0): - """ - Sets the begin index for the scheduler. This function should be run from pipeline before the inference. - - Args: - begin_index (`int`): - The begin index for the scheduler. - """ - self._begin_index = begin_index - - # Modified from diffusers.schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteScheduler.set_timesteps - def set_timesteps( - self, - num_inference_steps: Union[int, None] = None, - device: Union[str, torch.device] = None, - sigmas: Optional[List[float]] = None, - ): - """ - Sets the discrete timesteps used for the diffusion chain (to be run before inference). - - Args: - num_inference_steps (`int`): - Total number of the spacing of the time steps. - device (`str` or `torch.device`, *optional*): - The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - """ - assert sigmas is None, "sigmas are not supported for FlowUniPCMultistepScheduler" - - self.sigmas, self.timesteps = _get_karras_sigmas( - self.config.num_train_timesteps, - num_inference_steps, - self.config.sigma_max, - self.config.sigma_min, - self.config.rho, - self.config.final_sigmas_type, - ) - self.num_inference_steps = len(self.timesteps) - - self.sigma_min = self.sigmas[-1].item() - self.sigma_max = self.sigmas[0].item() - - self.sigmas = self.sigmas.to(device) - self.timesteps = self.timesteps.to(device) - self._reset_state(device=device) - - def _reset_state(self, solver_order: Optional[int] = None, device=None): - """ - Resets the noise schedule & solver state variables - """ - solver_order = solver_order or self.config.solver_order - self.model_outputs = [None] * solver_order - self.timestep_list = [None] * solver_order - self.lower_order_nums = 0 - self.last_sample = None - self._step_index = None - self._begin_index = None - if self.solver_p: - self.solver_p.set_timesteps(self.num_inference_steps, device=device) - - # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample - def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: - """ - "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the - prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by - s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing - pixels from saturation at each step. We find that dynamic thresholding results in significantly better - photorealism as well as better image-text alignment, especially when using very large guidance weights." - - https://arxiv.org/abs/2205.11487 - """ - dtype = sample.dtype - batch_size, channels, *remaining_dims = sample.shape - - if dtype not in (torch.float32, torch.float64): - sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half - - # Flatten sample for doing quantile calculation along each image - sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) - - abs_sample = sample.abs() # "a certain percentile absolute pixel value" - - s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) - s = torch.clamp( - s, min=1, max=self.config.sample_max_value - ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] - s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 - sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" - - sample = sample.reshape(batch_size, channels, *remaining_dims) - sample = sample.to(dtype) - - return sample - - # Copied from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler._sigma_to_alpha_sigma_t - def _sigma_to_alpha_sigma_t(self, sigma): - return 1 - sigma, sigma - - # Modified from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler.convert_model_output - def convert_model_output( - self, - model_output: torch.Tensor, - *args, - sample: torch.Tensor = None, - **kwargs, - ) -> torch.Tensor: - r""" - Convert the model output to the corresponding type the UniPC algorithm needs. - - Args: - model_output (`torch.Tensor`): - The direct output from the learned diffusion model. - timestep (`int`): - The current discrete timestep in the diffusion chain. - sample (`torch.Tensor`): - A current instance of a sample created by the diffusion process. - - Returns: - `torch.Tensor`: - The converted model output. - """ - timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) - if sample is None: - if len(args) > 1: - sample = args[1] - else: - raise ValueError("missing `sample` as a required keyward argument") - if timestep is not None: - deprecate( - "timesteps", - "1.0.0", - "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", - ) - - sigma = self.sigmas[self.step_index] - _, sigma_t = self._sigma_to_alpha_sigma_t(sigma) - - if self.predict_x0: - sigma_t = self.sigmas[self.step_index] - x0_pred = sample - sigma_t * model_output - - if self.config.thresholding: - x0_pred = self._threshold_sample(x0_pred) - return x0_pred - else: - sigma_t = self.sigmas[self.step_index] - epsilon = sample - (1 - sigma_t) * model_output - - if self.config.thresholding: - sigma_t = self.sigmas[self.step_index] - x0_pred = sample - sigma_t * model_output - x0_pred = self._threshold_sample(x0_pred) - epsilon = model_output + x0_pred - - return epsilon - - # Copied from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler.multistep_uni_p_bh_update - def multistep_uni_p_bh_update( - self, - model_output: torch.Tensor, - *args, - sample: torch.Tensor = None, - order: int = None, - **kwargs, - ) -> torch.Tensor: - """ - One step for the UniP (B(h) version). Alternatively, `self.solver_p` is used if is specified. - - Args: - model_output (`torch.Tensor`): - The direct output from the learned diffusion model at the current timestep. - sample (`torch.Tensor`): - A current instance of a sample created by the diffusion process. - order (`int`): - The order of UniP at this timestep (corresponds to the *p* in UniPC-p). - - Returns: - `torch.Tensor`: - The sample tensor at the previous timestep. - """ - prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) - if sample is None: - if len(args) > 1: - sample = args[1] - else: - raise ValueError(" missing `sample` as a required keyward argument") - if order is None: - if len(args) > 2: - order = args[2] - else: - raise ValueError(" missing `order` as a required keyward argument") - if prev_timestep is not None: - deprecate( - "prev_timestep", - "1.0.0", - "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", - ) - model_output_list = self.model_outputs - - s0 = self.timestep_list[-1] - m0 = model_output_list[-1] - x = sample - - if self.solver_p: - x_t = self.solver_p.step(model_output, s0, x).prev_sample - return x_t - - sigma_t, sigma_s0 = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] - alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) - alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) - - lambda_t = torch.log(alpha_t) - torch.log(sigma_t) - lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) - - h = lambda_t - lambda_s0 - device = sample.device - - rks = [] - D1s = [] - for i in range(1, order): - si = self.step_index - i - mi = model_output_list[-(i + 1)] - alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) - lambda_si = torch.log(alpha_si) - torch.log(sigma_si) - rk = (lambda_si - lambda_s0) / h - rks.append(rk) - D1s.append((mi - m0) / rk) - - rks.append(1.0) - rks = torch.tensor(rks, device=device) - - R = [] - b = [] - - hh = -h if self.predict_x0 else h - h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 - h_phi_k = h_phi_1 / hh - 1 - - factorial_i = 1 - - if self.config.solver_type == "bh1": - B_h = hh - elif self.config.solver_type == "bh2": - B_h = torch.expm1(hh) - else: - raise NotImplementedError() - - for i in range(1, order + 1): - R.append(torch.pow(rks, i - 1)) - b.append(h_phi_k * factorial_i / B_h) - factorial_i *= i + 1 - h_phi_k = h_phi_k / hh - 1 / factorial_i - - R = torch.stack(R) - b = torch.tensor(b, device=device) - - if len(D1s) > 0: - D1s = torch.stack(D1s, dim=1) # (B, K) - # for order 2, we use a simplified version - if order == 2: - rhos_p = torch.tensor([0.5], dtype=x.dtype, device=device) - else: - rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1]).to(device).to(x.dtype) - else: - D1s = None - - if self.predict_x0: - x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 - if D1s is not None: - pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) - else: - pred_res = 0 - x_t = x_t_ - alpha_t * B_h * pred_res - else: - x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 - if D1s is not None: - pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) - else: - pred_res = 0 - x_t = x_t_ - sigma_t * B_h * pred_res - - x_t = x_t.to(x.dtype) - return x_t - - # Copied from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler.multistep_uni_c_bh_update - def multistep_uni_c_bh_update( - self, - this_model_output: torch.Tensor, - *args, - last_sample: torch.Tensor = None, - this_sample: torch.Tensor = None, - order: int = None, - **kwargs, - ) -> torch.Tensor: - """ - One step for the UniC (B(h) version). - - Args: - this_model_output (`torch.Tensor`): - The model outputs at `x_t`. - this_timestep (`int`): - The current timestep `t`. - last_sample (`torch.Tensor`): - The generated sample before the last predictor `x_{t-1}`. - this_sample (`torch.Tensor`): - The generated sample after the last predictor `x_{t}`. - order (`int`): - The `p` of UniC-p at this step. The effective order of accuracy should be `order + 1`. - - Returns: - `torch.Tensor`: - The corrected sample tensor at the current timestep. - """ - this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) - if last_sample is None: - if len(args) > 1: - last_sample = args[1] - else: - raise ValueError("missing `last_sample` as a required keyward argument") - if this_sample is None: - if len(args) > 2: - this_sample = args[2] - else: - raise ValueError("missing `this_sample` as a required keyward argument") - if order is None: - if len(args) > 3: - order = args[3] - else: - raise ValueError("missing `order` as a required keyward argument") - if this_timestep is not None: - deprecate( - "this_timestep", - "1.0.0", - "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", - ) - - model_output_list = self.model_outputs - - m0 = model_output_list[-1] - x = last_sample - x_t = this_sample - model_t = this_model_output - - sigma_t, sigma_s0 = self.sigmas[self.step_index], self.sigmas[self.step_index - 1] - alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) - alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) - - lambda_t = torch.log(alpha_t) - torch.log(sigma_t) - lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) - - h = lambda_t - lambda_s0 - device = this_sample.device - - rks = [] - D1s = [] - for i in range(1, order): - si = self.step_index - (i + 1) - mi = model_output_list[-(i + 1)] - alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) - lambda_si = torch.log(alpha_si) - torch.log(sigma_si) - rk = (lambda_si - lambda_s0) / h - rks.append(rk) - D1s.append((mi - m0) / rk) - - rks.append(1.0) - rks = torch.tensor(rks, device=device) - - R = [] - b = [] - - hh = -h if self.predict_x0 else h - h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 - h_phi_k = h_phi_1 / hh - 1 - - factorial_i = 1 - - if self.config.solver_type == "bh1": - B_h = hh - elif self.config.solver_type == "bh2": - B_h = torch.expm1(hh) - else: - raise NotImplementedError() - - for i in range(1, order + 1): - R.append(torch.pow(rks, i - 1)) - b.append(h_phi_k * factorial_i / B_h) - factorial_i *= i + 1 - h_phi_k = h_phi_k / hh - 1 / factorial_i - - R = torch.stack(R) - b = torch.tensor(b, device=device) - - if len(D1s) > 0: - D1s = torch.stack(D1s, dim=1) - else: - D1s = None - - # for order 1, we use a simplified version - if order == 1: - rhos_c = torch.tensor([0.5], dtype=x.dtype, device=device) - else: - rhos_c = torch.linalg.solve(R, b).to(device).to(x.dtype) - - if self.predict_x0: - x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 - if D1s is not None: - corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) - else: - corr_res = 0 - D1_t = model_t - m0 - x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t) - else: - x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 - if D1s is not None: - corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) - else: - corr_res = 0 - D1_t = model_t - m0 - x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t) - x_t = x_t.to(x.dtype) - return x_t - - def index_for_timestep(self, timestep, schedule_timesteps=None): - if schedule_timesteps is None: - schedule_timesteps = self.timesteps - - indices = (schedule_timesteps == timestep).nonzero() - - # The sigma index that is taken for the **very** first `step` - # is always the second index (or the last index if there is only 1) - # This way we can ensure we don't accidentally skip a sigma in - # case we start in the middle of the denoising schedule (e.g. for image-to-image) - pos = 1 if len(indices) > 1 else 0 - - return indices[pos].item() - - # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index - def _init_step_index(self, timestep): - """ - Initialize the step_index counter for the scheduler. - """ - - if self.begin_index is None: - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - self._step_index = self.index_for_timestep(timestep) - else: - self._step_index = self._begin_index - - # Modified from diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler.step - def step( - self, - model_output: torch.Tensor, - timestep: Union[int, torch.Tensor], - sample: torch.Tensor, - return_dict: bool = True, - generator=None, - ) -> Union[SchedulerOutput, Tuple]: - """ - Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with - the multistep UniPC. - - Args: - model_output (`torch.Tensor`): - The direct output from learned diffusion model. - timestep (`int`): - The current discrete timestep in the diffusion chain. - sample (`torch.Tensor`): - A current instance of a sample created by the diffusion process. - return_dict (`bool`): - Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. - - Returns: - [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: - If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a - tuple is returned where the first element is the sample tensor. - - """ - if self.num_inference_steps is None: - raise ValueError( - "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" - ) - - if self.step_index is None: - self._init_step_index(timestep) - - use_corrector = ( - self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and self.last_sample is not None - ) - - model_output_convert = self.convert_model_output(model_output, sample=sample) - - if use_corrector: - sample = self.multistep_uni_c_bh_update( - this_model_output=model_output_convert, - last_sample=self.last_sample, - this_sample=sample, - order=self.this_order, - ) - - for i in range(self.config.solver_order - 1): - self.model_outputs[i] = self.model_outputs[i + 1] - self.timestep_list[i] = self.timestep_list[i + 1] - - self.model_outputs[-1] = model_output_convert - self.timestep_list[-1] = timestep - - if self.config.lower_order_final: - this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) - else: - this_order = self.config.solver_order - - self.this_order = min(this_order, self.lower_order_nums + 1) # warmup for multistep - assert self.this_order > 0, "expected this_order > 0, this could be due to duplicate timesteps" - - self.last_sample = sample - prev_sample = self.multistep_uni_p_bh_update( - model_output=model_output, # pass the original non-converted model output, in case solver-p is used - sample=sample, - order=self.this_order, - ) - - if self.lower_order_nums < self.config.solver_order: - self.lower_order_nums += 1 - - # upon completion increase step index by one - self._step_index += 1 - - if not return_dict: - return (prev_sample, model_output_convert) - - return SchedulerOutput(prev_sample=prev_sample) - - def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: - """ - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - - Args: - sample (`torch.Tensor`): - The input sample. - - Returns: - `torch.Tensor`: - A scaled input sample. - """ - return sample - - # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise - def add_noise( - self, - original_samples: torch.Tensor, - noise: torch.Tensor, - timesteps: torch.IntTensor, - ) -> torch.Tensor: - # Make sure sigmas and timesteps have the same device and dtype as original_samples - sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) - if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): - # mps does not support float64 - schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) - timesteps = timesteps.to(original_samples.device, dtype=torch.float32) - else: - schedule_timesteps = self.timesteps.to(original_samples.device) - timesteps = timesteps.to(original_samples.device) - - # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index - if self.begin_index is None: - step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] - elif self.step_index is not None: - # add_noise is called after first denoising step (for inpainting) - step_indices = [self.step_index] * timesteps.shape[0] - else: - # add noise is called before first denoising step to create initial latent(img2img) - step_indices = [self.begin_index] * timesteps.shape[0] - - sigma = sigmas[step_indices].flatten() - while len(sigma.shape) < len(original_samples.shape): - sigma = sigma.unsqueeze(-1) - - alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) - noisy_samples = alpha_t * original_samples + sigma_t * noise - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/tests/pipelines/cosmos/test_cosmos2_5_predict.py b/tests/pipelines/cosmos/test_cosmos2_5_predict.py index 4c1c1413768e..d3e92b9806a2 100644 --- a/tests/pipelines/cosmos/test_cosmos2_5_predict.py +++ b/tests/pipelines/cosmos/test_cosmos2_5_predict.py @@ -22,7 +22,7 @@ import torch from transformers import AutoTokenizer, Qwen2VLForConditionalGeneration -from diffusers import AutoencoderKLWan, Cosmos2_5_PredictBase, CosmosTransformer3DModel, FlowUniPCMultistepScheduler +from diffusers import AutoencoderKLWan, Cosmos2_5_PredictBase, CosmosTransformer3DModel, UniPCMultistepScheduler from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -94,7 +94,7 @@ def get_dummy_components(self): ) torch.manual_seed(0) - scheduler = FlowUniPCMultistepScheduler() + scheduler = UniPCMultistepScheduler() # NOTE: using Qwen2 VL instead for tests (reason1 is based on 2.5) text_encoder = Qwen2VLForConditionalGeneration.from_pretrained( diff --git a/tests/schedulers/test_scheduler_flow_unipc.py b/tests/schedulers/test_scheduler_flow_unipc.py deleted file mode 100644 index 3cd7ab1694d0..000000000000 --- a/tests/schedulers/test_scheduler_flow_unipc.py +++ /dev/null @@ -1,123 +0,0 @@ -import tempfile -import unittest - -import torch - -from diffusers import FlowUniPCMultistepScheduler - - -class FlowUniPCMultistepSchedulerKarrasTest(unittest.TestCase): - def test_set_timesteps(self): - num_inference_steps = 4 - num_train_timesteps = 1000 - scheduler = FlowUniPCMultistepScheduler( - num_train_timesteps=num_train_timesteps, - solver_order=2, - ) - scheduler.set_timesteps(num_inference_steps=num_inference_steps) - - # 0 appended to end for sigmas - expected_sigmas = [ - 0.9950248599052429, - 0.9787454605102539, - 0.8774884343147278, - 0.3604971766471863, - 0.009900986216962337, - 0.0, - ] - expected_sigmas = torch.tensor(expected_sigmas) - expected_timesteps = (expected_sigmas * num_train_timesteps).to(torch.int64) - expected_timesteps = expected_timesteps[0:-1] - self.assertTrue(torch.allclose(scheduler.sigmas, expected_sigmas)) - self.assertTrue(torch.all(expected_timesteps == scheduler.timesteps)) - - def test_inference_train_same_schedule(self): - num_inference_steps = 4 - num_train_timesteps = num_inference_steps - scheduler = FlowUniPCMultistepScheduler( - num_train_timesteps=num_train_timesteps, - solver_order=2, - ) - before_sigmas = scheduler.sigmas.clone() - scheduler.set_timesteps(num_inference_steps=num_inference_steps) - after_sigmas = scheduler.sigmas - - self.assertTrue(torch.allclose(before_sigmas, after_sigmas)) - - def test_set_timesteps_with_nondefault_args(self): - num_inference_steps = 4 - scheduler = FlowUniPCMultistepScheduler( - sigma_max=50.0, - sigma_min=0.005, - rho=5.0, - final_sigmas_type="sigma_min", - ) - - scheduler.set_timesteps(num_inference_steps=num_inference_steps) - expected_sigmas = torch.tensor( - [ - 0.9803921580314636, - 0.9388325214385986, - 0.7652841210365295, - 0.2545345723628998, - 0.004975131247192621, - 0.004975131247192621, - ] - ) - self.assertTrue(torch.allclose(scheduler.sigmas, expected_sigmas)) - - def test_step(self): - scheduler = FlowUniPCMultistepScheduler( - num_train_timesteps=10, - solver_order=2, - ) - scheduler.set_timesteps(num_inference_steps=4, device="cpu") - - sample = torch.randn(2, 3, 4, dtype=torch.float16) - residual = torch.randn_like(sample) - timestep = scheduler.timesteps[0] - - output = scheduler.step(residual, timestep, sample).prev_sample - self.assertEqual(output.shape, (2, 3, 4)) - self.assertEqual(output.dtype, sample.dtype) - self.assertEqual(output.device, sample.device) - - def test_save_and_load_round_trip(self): - scheduler = FlowUniPCMultistepScheduler( - num_train_timesteps=12, - solver_order=2, - sigma_max=50.0, - sigma_min=0.005, - rho=5.0, - final_sigmas_type="sigma_min", - ) - scheduler.set_timesteps(num_inference_steps=6) - - with tempfile.TemporaryDirectory() as tmpdir: - scheduler.save_config(tmpdir) - loaded = FlowUniPCMultistepScheduler.from_pretrained(tmpdir) - - loaded.set_timesteps(num_inference_steps=6) - self.assertTrue(torch.equal(scheduler.timesteps, loaded.timesteps)) - self.assertTrue(torch.allclose(scheduler.sigmas, loaded.sigmas)) - - def test_full_loop_no_nan(self): - torch.manual_seed(0) - scheduler = FlowUniPCMultistepScheduler( - num_train_timesteps=16, - solver_order=2, - sigma_max=1.0, - sigma_min=0.01, - ) - scheduler.set_timesteps(num_inference_steps=6) - - def model(sample, t): - return 0.05 * torch.tanh(sample) - - sample = torch.ones(2, 3, 4) - for t in scheduler.timesteps: - residual = model(sample, t) - sample = scheduler.step(residual, t, sample).prev_sample - self.assertFalse(torch.isnan(sample).any()) - - self.assertEqual(sample.shape, (2, 3, 4)) From b9a35f5f99184c3998e5c7f1a779aaa9f373197b Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 02:29:22 +0000 Subject: [PATCH 15/22] UniPCMultistepScheduler for use_flow_sigmas=True & use_karras_sigmas=True --- tests/schedulers/test_scheduler_unipc.py | 29 ++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/schedulers/test_scheduler_unipc.py b/tests/schedulers/test_scheduler_unipc.py index 197c831cb015..90cca92fe8dd 100644 --- a/tests/schedulers/test_scheduler_unipc.py +++ b/tests/schedulers/test_scheduler_unipc.py @@ -399,3 +399,32 @@ def test_beta_sigmas(self): def test_exponential_sigmas(self): self.check_over_configs(use_exponential_sigmas=True) + + def test_flow_and_karras_sigmas(self): + self.check_over_configs(use_flow_sigmas=True, use_karras_sigmas=True) + + def test_flow_and_karras_sigmas_values(self): + num_train_timesteps = 1000 + num_inference_steps = 5 + scheduler = UniPCMultistepScheduler( + sigma_min=0.01, + sigma_max=200.0, + use_flow_sigmas=True, + use_karras_sigmas=True, + num_train_timesteps=num_train_timesteps, + ) + scheduler.set_timesteps(num_inference_steps=num_inference_steps) + + expected_sigmas = [ + 0.9950248599052429, + 0.9787454605102539, + 0.8774884343147278, + 0.3604971766471863, + 0.009900986216962337, + 0.0, # 0 appended as default + ] + expected_sigmas = torch.tensor(expected_sigmas) + expected_timesteps = (expected_sigmas * num_train_timesteps).to(torch.int64) + expected_timesteps = expected_timesteps[0:-1] + self.assertTrue(torch.allclose(scheduler.sigmas, expected_sigmas)) + self.assertTrue(torch.all(expected_timesteps == scheduler.timesteps)) From dd429ef3ca4e67d3ca42cb8f01c6d9a6e82fa13b Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 02:31:17 +0000 Subject: [PATCH 16/22] num_inference_steps=36 due to bug in scheduler used by predict2.5 --- src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index 3fe7a5fde876..334ed5630644 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -547,7 +547,7 @@ def __call__( height: int = 704, width: int = 1280, num_frames: int = 93, - num_inference_steps: int = 35, + num_inference_steps: int = 36, guidance_scale: float = 7.0, fps: int = 16, num_videos_per_prompt: Optional[int] = 1, From b76f9f20ff694f7d18c5aa5e929315e0798494ec Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 02:42:21 +0000 Subject: [PATCH 17/22] Address comments --- docs/source/en/api/pipelines/cosmos.md | 6 +++++ scripts/convert_cosmos_to_diffusers.py | 4 ++-- src/diffusers/__init__.py | 4 ++-- .../models/transformers/transformer_cosmos.py | 5 ++-- src/diffusers/pipelines/__init__.py | 4 ++-- src/diffusers/pipelines/cosmos/__init__.py | 4 ++-- .../cosmos/pipeline_cosmos2_5_predict.py | 24 ++++--------------- .../cosmos/test_cosmos2_5_predict.py | 11 ++++++--- 8 files changed, 29 insertions(+), 33 deletions(-) diff --git a/docs/source/en/api/pipelines/cosmos.md b/docs/source/en/api/pipelines/cosmos.md index fb9453480e74..60ecce660303 100644 --- a/docs/source/en/api/pipelines/cosmos.md +++ b/docs/source/en/api/pipelines/cosmos.md @@ -70,6 +70,12 @@ output.save("output.png") - all - __call__ +## Cosmos2_5_PredictBasePipeline + +[[autodoc]] Cosmos2_5_PredictBasePipeline + - all + - __call__ + ## CosmosPipelineOutput [[autodoc]] pipelines.cosmos.pipeline_output.CosmosPipelineOutput diff --git a/scripts/convert_cosmos_to_diffusers.py b/scripts/convert_cosmos_to_diffusers.py index 271d1b7b4ad8..6e70f8cc055d 100644 --- a/scripts/convert_cosmos_to_diffusers.py +++ b/scripts/convert_cosmos_to_diffusers.py @@ -63,7 +63,7 @@ FlowMatchEulerDiscreteScheduler, UniPCMultistepScheduler, ) -from diffusers.pipelines.cosmos.pipeline_cosmos2_5_predict import Cosmos2_5_PredictBase +from diffusers.pipelines.cosmos.pipeline_cosmos2_5_predict import Cosmos2_5_PredictBasePipeline def remove_keys_(key: str, state_dict: Dict[str, Any]): @@ -545,7 +545,7 @@ def save_pipeline_cosmos2_5(args, transformer, vae): sigma_min=0.01, ) - pipe = Cosmos2_5_PredictBase( + pipe = Cosmos2_5_PredictBasePipeline( text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index c7674c3c51d3..83b3ade05646 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -463,7 +463,7 @@ "CogView4ControlPipeline", "CogView4Pipeline", "ConsisIDPipeline", - "Cosmos2_5_PredictBase", + "Cosmos2_5_PredictBasePipeline", "Cosmos2_5_PredictImage2World", "Cosmos2_5_PredictText2World", "Cosmos2_5_PredictVideo2World", @@ -1179,7 +1179,7 @@ CogView4ControlPipeline, CogView4Pipeline, ConsisIDPipeline, - Cosmos2_5_PredictBase, + Cosmos2_5_PredictBasePipeline, Cosmos2_5_PredictImage2World, Cosmos2_5_PredictText2World, Cosmos2_5_PredictVideo2World, diff --git a/src/diffusers/models/transformers/transformer_cosmos.py b/src/diffusers/models/transformers/transformer_cosmos.py index 2bd3a121427a..2b0c2667072b 100644 --- a/src/diffusers/models/transformers/transformer_cosmos.py +++ b/src/diffusers/models/transformers/transformer_cosmos.py @@ -488,8 +488,7 @@ def __init__( hidden_size, patch_size[0] * patch_size[1] * patch_size[2] * out_channels, bias=False ) - self.use_crossattn_projection = use_crossattn_projection - if self.use_crossattn_projection: + if self.config.use_crossattn_projection: self.crossattn_proj = nn.Sequential( nn.Linear(crossattn_proj_in_channels, encoder_hidden_states_channels, bias=True), nn.GELU(), @@ -557,7 +556,7 @@ def forward( else: assert False - if self.use_crossattn_projection: + if self.config.use_crossattn_projection: encoder_hidden_states = self.crossattn_proj(encoder_hidden_states) # 5. Transformer blocks diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 8b98cda38c4b..95d871125a23 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -165,7 +165,7 @@ _import_structure["cogview4"] = ["CogView4Pipeline", "CogView4ControlPipeline"] _import_structure["consisid"] = ["ConsisIDPipeline"] _import_structure["cosmos"] = [ - "Cosmos2_5_PredictBase", + "Cosmos2_5_PredictBasePipeline", "Cosmos2_5_PredictImage2World", "Cosmos2_5_PredictText2World", "Cosmos2_5_PredictVideo2World", @@ -626,7 +626,7 @@ StableDiffusionXLControlNetXSPipeline, ) from .cosmos import ( - Cosmos2_5_PredictBase, + Cosmos2_5_PredictBasePipeline, Cosmos2_5_PredictImage2World, Cosmos2_5_PredictText2World, Cosmos2_5_PredictVideo2World, diff --git a/src/diffusers/pipelines/cosmos/__init__.py b/src/diffusers/pipelines/cosmos/__init__.py index 745921149833..bf8b25ea2919 100644 --- a/src/diffusers/pipelines/cosmos/__init__.py +++ b/src/diffusers/pipelines/cosmos/__init__.py @@ -23,7 +23,7 @@ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_cosmos2_5_predict"] = [ - "Cosmos2_5_PredictBase", + "Cosmos2_5_PredictBasePipeline", "Cosmos2_5_PredictImage2World", "Cosmos2_5_PredictText2World", "Cosmos2_5_PredictVideo2World", @@ -42,7 +42,7 @@ from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_cosmos2_5_predict import ( - Cosmos2_5_PredictBase, + Cosmos2_5_PredictBasePipeline, Cosmos2_5_PredictImage2World, Cosmos2_5_PredictText2World, Cosmos2_5_PredictVideo2World, diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index 334ed5630644..eb30883b93f3 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -71,11 +71,11 @@ def retrieve_latents( Examples: ```python >>> import torch - >>> from diffusers import Cosmos2_5_PredictBase + >>> from diffusers import Cosmos2_5_PredictBasePipeline >>> from diffusers.utils import export_to_video, load_image, load_video >>> model_id = "nvidia/Cosmos-Predict2.5-Base-2B" - >>> pipe = Cosmos2_5_PredictBase.from_pretrained(model_id, torch_dtype=torch.bfloat16) + >>> pipe = Cosmos2_5_PredictBasePipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16) >>> pipe = pipe.to("cuda") >>> # Common negative prompt reused across modes. @@ -163,7 +163,7 @@ def retrieve_latents( """ -class Cosmos2_5_PredictBase(DiffusionPipeline): +class Cosmos2_5_PredictBasePipeline(DiffusionPipeline): r""" Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) base model. @@ -233,20 +233,6 @@ def __init__( if self.latents_mean is None or self.latents_std is None: raise ValueError("VAE configuration must define both `latents_mean` and `latents_std`.") - - @property - def _execution_device(self): - device = super()._execution_device - if isinstance(device, torch.device) and device.type == "cpu": - for module_name in ("transformer", "text_encoder", "vae"): - module = getattr(self, module_name, None) - if module is None or not isinstance(module, torch.nn.Module): - continue - module_device = getattr(module, "device", None) - if isinstance(module_device, torch.device) and module_device.type != "cpu": - return module_device - return device - # Copied from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline._get_prompt_embeds def _get_prompt_embeds( self, @@ -398,6 +384,8 @@ def encode_prompt( return prompt_embeds, negative_prompt_embeds + # Modified from diffusers.pipelines.cosmos.pipeline_cosmos2_video2world.Cosmos2VideoToWorldPipeline.prepare_latents and + # diffusers.pipelines.cosmos.pipeline_cosmos2_video2world.Cosmos2TextToImagePipeline.prepare_latents def prepare_latents( self, video: Optional[torch.Tensor], @@ -458,8 +446,6 @@ def prepare_latents( cond_latents = torch.cat(cond_latents, dim=0).to(dtype) - if self.latents_mean is None or self.latents_std is None: - raise ValueError("VAE configuration must define `latents_mean` and `latents_std`.") latents_mean = self.latents_mean.to(device=device, dtype=dtype) latents_std = self.latents_std.to(device=device, dtype=dtype) cond_latents = (cond_latents - latents_mean) / latents_std diff --git a/tests/pipelines/cosmos/test_cosmos2_5_predict.py b/tests/pipelines/cosmos/test_cosmos2_5_predict.py index d3e92b9806a2..68c77cab3696 100644 --- a/tests/pipelines/cosmos/test_cosmos2_5_predict.py +++ b/tests/pipelines/cosmos/test_cosmos2_5_predict.py @@ -22,7 +22,12 @@ import torch from transformers import AutoTokenizer, Qwen2VLForConditionalGeneration -from diffusers import AutoencoderKLWan, Cosmos2_5_PredictBase, CosmosTransformer3DModel, UniPCMultistepScheduler +from diffusers import ( + AutoencoderKLWan, + Cosmos2_5_PredictBasePipeline, + CosmosTransformer3DModel, + UniPCMultistepScheduler, +) from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -33,7 +38,7 @@ enable_full_determinism() -class Cosmos2_5_PredictBaseWrapper(Cosmos2_5_PredictBase): +class Cosmos2_5_PredictBaseWrapper(Cosmos2_5_PredictBasePipeline): @staticmethod def from_pretrained(*args, **kwargs): if "safety_checker" not in kwargs or kwargs["safety_checker"] is None: @@ -42,7 +47,7 @@ def from_pretrained(*args, **kwargs): if isinstance(torch_dtype, torch.dtype): safety_checker = safety_checker.to(dtype=torch_dtype) kwargs["safety_checker"] = safety_checker - return Cosmos2_5_PredictBase.from_pretrained(*args, **kwargs) + return Cosmos2_5_PredictBasePipeline.from_pretrained(*args, **kwargs) class Cosmos2_5_PredictPipelineFastTests(PipelineTesterMixin, unittest.TestCase): From 735fb0e94c54b63a8fdbfde9296b3a16f7d326a4 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 04:03:05 +0000 Subject: [PATCH 18/22] make style + make fix-copies --- .../cosmos/pipeline_cosmos2_5_predict.py | 3 +- .../dummy_torch_and_transformers_objects.py | 60 +++++++++++++++++++ tests/schedulers/test_scheduler_unipc.py | 2 +- 3 files changed, 62 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index eb30883b93f3..7d5e426a8197 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -233,7 +233,6 @@ def __init__( if self.latents_mean is None or self.latents_std is None: raise ValueError("VAE configuration must define both `latents_mean` and `latents_std`.") - # Copied from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline._get_prompt_embeds def _get_prompt_embeds( self, prompt: Union[str, List[str]] = None, @@ -384,7 +383,7 @@ def encode_prompt( return prompt_embeds, negative_prompt_embeds - # Modified from diffusers.pipelines.cosmos.pipeline_cosmos2_video2world.Cosmos2VideoToWorldPipeline.prepare_latents and + # Modified from diffusers.pipelines.cosmos.pipeline_cosmos2_video2world.Cosmos2VideoToWorldPipeline.prepare_latents and # diffusers.pipelines.cosmos.pipeline_cosmos2_video2world.Cosmos2TextToImagePipeline.prepare_latents def prepare_latents( self, diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 74a4146bfd33..5d36166f2757 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -767,6 +767,66 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) +class Cosmos2_5_PredictBasePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class Cosmos2_5_PredictImage2World(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class Cosmos2_5_PredictText2World(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class Cosmos2_5_PredictVideo2World(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + class Cosmos2TextToImagePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] diff --git a/tests/schedulers/test_scheduler_unipc.py b/tests/schedulers/test_scheduler_unipc.py index 90cca92fe8dd..ac7e1d3f88b4 100644 --- a/tests/schedulers/test_scheduler_unipc.py +++ b/tests/schedulers/test_scheduler_unipc.py @@ -414,7 +414,7 @@ def test_flow_and_karras_sigmas_values(self): num_train_timesteps=num_train_timesteps, ) scheduler.set_timesteps(num_inference_steps=num_inference_steps) - + expected_sigmas = [ 0.9950248599052429, 0.9787454605102539, From 46f7916635487b28403b66cf682d43c045830d25 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 18:17:48 +0000 Subject: [PATCH 19/22] fix tests + remove references to old pipelines --- src/diffusers/__init__.py | 6 --- src/diffusers/pipelines/__init__.py | 6 --- src/diffusers/pipelines/cosmos/__init__.py | 6 --- .../dummy_torch_and_transformers_objects.py | 45 ------------------- .../cosmos/test_cosmos2_5_predict.py | 40 ++++++++++++++--- 5 files changed, 33 insertions(+), 70 deletions(-) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 83b3ade05646..6aac3feffd0e 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -464,9 +464,6 @@ "CogView4Pipeline", "ConsisIDPipeline", "Cosmos2_5_PredictBasePipeline", - "Cosmos2_5_PredictImage2World", - "Cosmos2_5_PredictText2World", - "Cosmos2_5_PredictVideo2World", "Cosmos2TextToImagePipeline", "Cosmos2VideoToWorldPipeline", "CosmosTextToWorldPipeline", @@ -1180,9 +1177,6 @@ CogView4Pipeline, ConsisIDPipeline, Cosmos2_5_PredictBasePipeline, - Cosmos2_5_PredictImage2World, - Cosmos2_5_PredictText2World, - Cosmos2_5_PredictVideo2World, Cosmos2TextToImagePipeline, Cosmos2VideoToWorldPipeline, CosmosTextToWorldPipeline, diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 95d871125a23..e8faf868e741 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -166,9 +166,6 @@ _import_structure["consisid"] = ["ConsisIDPipeline"] _import_structure["cosmos"] = [ "Cosmos2_5_PredictBasePipeline", - "Cosmos2_5_PredictImage2World", - "Cosmos2_5_PredictText2World", - "Cosmos2_5_PredictVideo2World", "Cosmos2TextToImagePipeline", "CosmosTextToWorldPipeline", "CosmosVideoToWorldPipeline", @@ -627,9 +624,6 @@ ) from .cosmos import ( Cosmos2_5_PredictBasePipeline, - Cosmos2_5_PredictImage2World, - Cosmos2_5_PredictText2World, - Cosmos2_5_PredictVideo2World, Cosmos2TextToImagePipeline, Cosmos2VideoToWorldPipeline, CosmosTextToWorldPipeline, diff --git a/src/diffusers/pipelines/cosmos/__init__.py b/src/diffusers/pipelines/cosmos/__init__.py index bf8b25ea2919..944f16553173 100644 --- a/src/diffusers/pipelines/cosmos/__init__.py +++ b/src/diffusers/pipelines/cosmos/__init__.py @@ -24,9 +24,6 @@ else: _import_structure["pipeline_cosmos2_5_predict"] = [ "Cosmos2_5_PredictBasePipeline", - "Cosmos2_5_PredictImage2World", - "Cosmos2_5_PredictText2World", - "Cosmos2_5_PredictVideo2World", ] _import_structure["pipeline_cosmos2_text2image"] = ["Cosmos2TextToImagePipeline"] _import_structure["pipeline_cosmos2_video2world"] = ["Cosmos2VideoToWorldPipeline"] @@ -43,9 +40,6 @@ else: from .pipeline_cosmos2_5_predict import ( Cosmos2_5_PredictBasePipeline, - Cosmos2_5_PredictImage2World, - Cosmos2_5_PredictText2World, - Cosmos2_5_PredictVideo2World, ) from .pipeline_cosmos2_text2image import Cosmos2TextToImagePipeline from .pipeline_cosmos2_video2world import Cosmos2VideoToWorldPipeline diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 5d36166f2757..4e1eae211c6f 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -782,51 +782,6 @@ def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers"]) -class Cosmos2_5_PredictImage2World(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - -class Cosmos2_5_PredictText2World(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - -class Cosmos2_5_PredictVideo2World(metaclass=DummyObject): - _backends = ["torch", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers"]) - - class Cosmos2TextToImagePipeline(metaclass=DummyObject): _backends = ["torch", "transformers"] diff --git a/tests/pipelines/cosmos/test_cosmos2_5_predict.py b/tests/pipelines/cosmos/test_cosmos2_5_predict.py index 68c77cab3696..54d4edb485fe 100644 --- a/tests/pipelines/cosmos/test_cosmos2_5_predict.py +++ b/tests/pipelines/cosmos/test_cosmos2_5_predict.py @@ -20,7 +20,7 @@ import numpy as np import torch -from transformers import AutoTokenizer, Qwen2VLForConditionalGeneration +from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer from diffusers import ( AutoencoderKLWan, @@ -43,9 +43,10 @@ class Cosmos2_5_PredictBaseWrapper(Cosmos2_5_PredictBasePipeline): def from_pretrained(*args, **kwargs): if "safety_checker" not in kwargs or kwargs["safety_checker"] is None: safety_checker = DummyCosmosSafetyChecker() + device_map = kwargs.get("device_map", "cpu") torch_dtype = kwargs.get("torch_dtype") - if isinstance(torch_dtype, torch.dtype): - safety_checker = safety_checker.to(dtype=torch_dtype) + if device_map is not None or torch_dtype is not None: + safety_checker = safety_checker.to(device_map, dtype=torch_dtype) kwargs["safety_checker"] = safety_checker return Cosmos2_5_PredictBasePipeline.from_pretrained(*args, **kwargs) @@ -101,11 +102,36 @@ def get_dummy_components(self): torch.manual_seed(0) scheduler = UniPCMultistepScheduler() - # NOTE: using Qwen2 VL instead for tests (reason1 is based on 2.5) - text_encoder = Qwen2VLForConditionalGeneration.from_pretrained( - "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration", + torch.manual_seed(0) + config = Qwen2_5_VLConfig( + text_config={ + "hidden_size": 16, + "intermediate_size": 16, + "num_hidden_layers": 2, + "num_attention_heads": 2, + "num_key_value_heads": 2, + "rope_scaling": { + "mrope_section": [1, 1, 2], + "rope_type": "default", + "type": "default", + }, + "rope_theta": 1000000.0, + }, + vision_config={ + "depth": 2, + "hidden_size": 16, + "intermediate_size": 16, + "num_heads": 2, + "out_hidden_size": 16, + }, + hidden_size=16, + vocab_size=152064, + vision_end_token_id=151653, + vision_start_token_id=151652, + vision_token_id=151654, ) - tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") + text_encoder = Qwen2_5_VLForConditionalGeneration(config) + tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") components = { "transformer": transformer, From 5c28b087a820b4680eb287be5c141045720f70e6 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 19:26:02 +0000 Subject: [PATCH 20/22] address comments --- .../cosmos/pipeline_cosmos2_5_predict.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index 7d5e426a8197..dca6a81576f0 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -74,8 +74,8 @@ def retrieve_latents( >>> from diffusers import Cosmos2_5_PredictBasePipeline >>> from diffusers.utils import export_to_video, load_image, load_video - >>> model_id = "nvidia/Cosmos-Predict2.5-Base-2B" - >>> pipe = Cosmos2_5_PredictBasePipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16) + >>> model_id = "nvidia/Cosmos-Predict2.5-2B" + >>> pipe = Cosmos2_5_PredictBasePipeline.from_pretrained(model_id, revision="base/pre-trained/diffusers", torch_dtype=torch.bfloat16) >>> pipe = pipe.to("cuda") >>> # Common negative prompt reused across modes. @@ -157,7 +157,7 @@ def retrieve_latents( ... ).frames[0] >>> export_to_video(video, "video2world.mp4", fps=16) - >>> # To produce a single-frame image instead of a world (video) clip, set num_frames=1 and + >>> # To produce an image instead of a world (video) clip, set num_frames=1 and >>> # save the first frame: pipe(..., num_frames=1).frames[0][0]. ``` """ @@ -534,7 +534,6 @@ def __call__( num_frames: int = 93, num_inference_steps: int = 36, guidance_scale: float = 7.0, - fps: int = 16, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, @@ -582,8 +581,6 @@ def __call__( Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. - fps (`int`, defaults to `16`): - The frames per second of the generated video. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -662,6 +659,9 @@ def __call__( else: batch_size = prompt_embeds.shape[0] + if batch_size != 1: + raise ValueError("batch_size must be 1") + # Encode input prompt ( prompt_embeds, @@ -682,8 +682,6 @@ def __call__( num_frames_in = None if image is not None: - # TODO: handle batch_size > 1 - assert batch_size == 1, "batch_size must be 1 for image input" image = torchvision.transforms.functional.to_tensor(image).unsqueeze(0) video = torch.cat([image, torch.zeros_like(image).repeat(num_frames - 1, 1, 1, 1)], dim=0) video = video.unsqueeze(0) @@ -700,7 +698,6 @@ def __call__( # pad with last frame (for video2world) num_frames_out = num_frames if video.shape[2] < num_frames_out: - assert batch_size == 1, "batch_size must be 1 for padding frames" n_pad_frames = num_frames_out - num_frames_in last_frame = video[0, :, -1:, :, :] # [C, T==1, H, W] pad_frames = last_frame.repeat(1, 1, n_pad_frames, 1, 1) # [B, C, T, H, W] From 8081797362fe562223459a209c34ab892e748e8b Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 21:56:23 +0000 Subject: [PATCH 21/22] add revision in from_pretrained call --- src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index dca6a81576f0..7f7ed943f38f 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -75,7 +75,9 @@ def retrieve_latents( >>> from diffusers.utils import export_to_video, load_image, load_video >>> model_id = "nvidia/Cosmos-Predict2.5-2B" - >>> pipe = Cosmos2_5_PredictBasePipeline.from_pretrained(model_id, revision="base/pre-trained/diffusers", torch_dtype=torch.bfloat16) + >>> pipe = Cosmos2_5_PredictBasePipeline.from_pretrained( + ... model_id, revision="diffusers/base/pre-trianed", torch_dtype=torch.bfloat16 + ... ) >>> pipe = pipe.to("cuda") >>> # Common negative prompt reused across modes. From d1dab59d4723ca88ed8f86199fa7e1d9150c3190 Mon Sep 17 00:00:00 2001 From: Miguel Martin Date: Thu, 18 Dec 2025 22:03:54 +0000 Subject: [PATCH 22/22] fix tests --- .../pipelines/cosmos/pipeline_cosmos2_5_predict.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py index 7f7ed943f38f..6564b5937386 100644 --- a/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py +++ b/src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py @@ -661,9 +661,6 @@ def __call__( else: batch_size = prompt_embeds.shape[0] - if batch_size != 1: - raise ValueError("batch_size must be 1") - # Encode input prompt ( prompt_embeds, @@ -684,6 +681,9 @@ def __call__( num_frames_in = None if image is not None: + if batch_size != 1: + raise ValueError(f"batch_size must be 1 for image input (given {batch_size})") + image = torchvision.transforms.functional.to_tensor(image).unsqueeze(0) video = torch.cat([image, torch.zeros_like(image).repeat(num_frames - 1, 1, 1, 1)], dim=0) video = video.unsqueeze(0) @@ -694,6 +694,9 @@ def __call__( else: num_frames_in = len(video) + if batch_size != 1: + raise ValueError(f"batch_size must be 1 for video input (given {batch_size})") + assert video is not None video = self.video_processor.preprocess_video(video, height, width)