From 549a055bb8877cae22a32f26ba5fcf769f17863e Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Sun, 13 Apr 2025 12:58:58 +0000 Subject: [PATCH 01/52] Add diffusion model implementation, EDM variant Preliminary implementation, to be extended with other variants as well. --- bayesflow/experimental/__init__.py | 1 + bayesflow/experimental/diffusion_model.py | 346 ++++++++++++++++++++++ tests/test_networks/conftest.py | 23 +- 3 files changed, 368 insertions(+), 2 deletions(-) create mode 100644 bayesflow/experimental/diffusion_model.py diff --git a/bayesflow/experimental/__init__.py b/bayesflow/experimental/__init__.py index 4c6f80848..1eadd1802 100644 --- a/bayesflow/experimental/__init__.py +++ b/bayesflow/experimental/__init__.py @@ -4,6 +4,7 @@ from .cif import CIF from .continuous_time_consistency_model import ContinuousTimeConsistencyModel +from .diffusion_model import DiffusionModel from .free_form_flow import FreeFormFlow from ..utils._docs import _add_imports_to_all diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py new file mode 100644 index 000000000..ccaedf22e --- /dev/null +++ b/bayesflow/experimental/diffusion_model.py @@ -0,0 +1,346 @@ +from collections.abc import Sequence +import keras +from keras import ops +from keras.saving import register_keras_serializable as serializable + +from bayesflow.types import Tensor, Shape +import bayesflow as bf +from bayesflow.networks import InferenceNetwork + +from bayesflow.utils import ( + expand_right_as, + find_network, + jacobian_trace, + keras_kwargs, + serialize_value_or_type, + deserialize_value_or_type, + weighted_mean, + integrate, +) + + +@serializable(package="bayesflow.networks") +class DiffusionModel(InferenceNetwork): + """Diffusion Model as described as Elucidated Diffusion Model in [1]. + + [1] Elucidating the Design Space of Diffusion-Based Generative Models: arXiv:2206.00364 + """ + + MLP_DEFAULT_CONFIG = { + "widths": (256, 256, 256, 256, 256), + "activation": "mish", + "kernel_initializer": "he_normal", + "residual": True, + "dropout": 0.0, + "spectral_normalization": False, + } + + INTEGRATE_DEFAULT_CONFIG = { + "method": "euler", + "steps": 100, + } + + def __init__( + self, + subnet: str | type = "mlp", + integrate_kwargs: dict[str, any] = None, + subnet_kwargs: dict[str, any] = None, + sigma_data=1.0, + **kwargs, + ): + """ + Initializes a diffusion model with configurable subnet architecture. + + This model learns a transformation from a Gaussian latent distribution to a target distribution using a + specified subnet type, which can be an MLP or a custom network. + + The integration steps can be customized with additional parameters available in the respective + configuration dictionary. + + Parameters + ---------- + subnet : str or type, optional + The architecture used for the transformation network. Can be "mlp" or a custom + callable network. Default is "mlp". + integrate_kwargs : dict[str, any], optional + Additional keyword arguments for the integration process. Default is None. + subnet_kwargs : dict[str, any], optional + Keyword arguments passed to the subnet constructor or used to update the default MLP settings. + sigma_data : float, optional + Averaged standard deviation of the target distribution. Default is 1.0. + **kwargs + Additional keyword arguments passed to the subnet and other components. + """ + + super().__init__(base_distribution=None, **keras_kwargs(kwargs)) + + # internal tunable parameters not intended to be modified by the average user + self.max_sigma = kwargs.get("max_sigma", 80.0) + self.min_sigma = kwargs.get("min_sigma", 1e-4) + self.rho = kwargs.get("rho", 7) + # hyper-parameters for sampling the noise level + self.p_mean = kwargs.get("p_mean", -1.2) + self.p_std = kwargs.get("p_std", 1.2) + + # latent distribution (not configurable) + self.base_distribution = bf.distributions.DiagonalNormal(mean=0.0, std=self.max_sigma) + self.integrate_kwargs = self.INTEGRATE_DEFAULT_CONFIG | (integrate_kwargs or {}) + + self.sigma_data = sigma_data + + self.seed_generator = keras.random.SeedGenerator() + + subnet_kwargs = subnet_kwargs or {} + if subnet == "mlp": + subnet_kwargs = self.MLP_DEFAULT_CONFIG | subnet_kwargs + + self.subnet = find_network(subnet, **subnet_kwargs) + self.output_projector = keras.layers.Dense(units=None, bias_initializer="zeros") + + # serialization: store all parameters necessary to call __init__ + self.config = { + "integrate_kwargs": self.integrate_kwargs, + "subnet_kwargs": subnet_kwargs, + "sigma_data": sigma_data, + **kwargs, + } + self.config = serialize_value_or_type(self.config, "subnet", subnet) + + def build(self, xz_shape: Shape, conditions_shape: Shape = None) -> None: + super().build(xz_shape, conditions_shape=conditions_shape) + + self.output_projector.units = xz_shape[-1] + input_shape = list(xz_shape) + + # construct time vector + input_shape[-1] += 1 + if conditions_shape is not None: + input_shape[-1] += conditions_shape[-1] + + input_shape = tuple(input_shape) + + self.subnet.build(input_shape) + out_shape = self.subnet.compute_output_shape(input_shape) + self.output_projector.build(out_shape) + + def get_config(self): + base_config = super().get_config() + return base_config | self.config + + @classmethod + def from_config(cls, config): + config = deserialize_value_or_type(config, "subnet") + return cls(**config) + + def _c_skip_fn(self, sigma): + return self.sigma_data**2 / (sigma**2 + self.sigma_data**2) + + def _c_out_fn(self, sigma): + return sigma * self.sigma_data / ops.sqrt(self.sigma_data**2 + sigma**2) + + def _c_in_fn(self, sigma): + return 1.0 / ops.sqrt(sigma**2 + self.sigma_data**2) + + def _c_noise_fn(self, sigma): + return 0.25 * ops.log(sigma) + + def _denoiser_fn( + self, + xz: Tensor, + sigma: Tensor, + conditions: Tensor = None, + training: bool = False, + ): + # calculate output of the network + c_in = self._c_in_fn(sigma) + c_noise = self._c_noise_fn(sigma) + xz_pre = c_in * xz + if conditions is None: + xtc = keras.ops.concatenate([xz_pre, c_noise], axis=-1) + else: + xtc = keras.ops.concatenate([xz_pre, c_noise, conditions], axis=-1) + out = self.output_projector(self.subnet(xtc, training=training), training=training) + return self._c_skip_fn(sigma) * xz + self._c_out_fn(sigma) * out + + def velocity( + self, + xz: Tensor, + sigma: float | Tensor, + conditions: Tensor = None, + training: bool = False, + ) -> Tensor: + # transform sigma vector into correct shape + sigma = keras.ops.convert_to_tensor(sigma, dtype=keras.ops.dtype(xz)) + sigma = expand_right_as(sigma, xz) + sigma = keras.ops.broadcast_to(sigma, keras.ops.shape(xz)[:-1] + (1,)) + + d = self._denoiser_fn(xz, sigma, conditions, training=training) + return (xz - d) / sigma + + def _velocity_trace( + self, + xz: Tensor, + sigma: Tensor, + conditions: Tensor = None, + max_steps: int = None, + training: bool = False, + ) -> (Tensor, Tensor): + def f(x): + return self.velocity(x, sigma=sigma, conditions=conditions, training=training) + + v, trace = jacobian_trace(f, xz, max_steps=max_steps, seed=self.seed_generator, return_output=True) + + return v, keras.ops.expand_dims(trace, axis=-1) + + def _forward( + self, + x: Tensor, + conditions: Tensor = None, + density: bool = False, + training: bool = False, + **kwargs, + ) -> Tensor | tuple[Tensor, Tensor]: + integrate_kwargs = self.integrate_kwargs | kwargs + if isinstance(integrate_kwargs["steps"], int): + # set schedule for specified number of steps + integrate_kwargs["steps"] = self._integration_schedule(integrate_kwargs["steps"], dtype=ops.dtype(x)) + if density: + + def deltas(time, xz): + v, trace = self._velocity_trace(xz, sigma=time, conditions=conditions, training=training) + return {"xz": v, "trace": trace} + + state = { + "xz": x, + "trace": keras.ops.zeros(keras.ops.shape(x)[:-1] + (1,), dtype=keras.ops.dtype(x)), + } + state = integrate( + deltas, + state, + **integrate_kwargs, + ) + + z = state["xz"] + log_density = self.base_distribution.log_prob(z) + keras.ops.squeeze(state["trace"], axis=-1) + + return z, log_density + + def deltas(time, xz): + return {"xz": self.velocity(xz, sigma=time, conditions=conditions, training=training)} + + state = {"xz": x} + state = integrate( + deltas, + state, + **integrate_kwargs, + ) + + z = state["xz"] + + return z + + def _inverse( + self, + z: Tensor, + conditions: Tensor = None, + density: bool = False, + training: bool = False, + **kwargs, + ) -> Tensor | tuple[Tensor, Tensor]: + integrate_kwargs = self.integrate_kwargs | kwargs + if isinstance(integrate_kwargs["steps"], int): + # set schedule for specified number of steps + integrate_kwargs["steps"] = self._integration_schedule( + integrate_kwargs["steps"], inverse=True, dtype=ops.dtype(z) + ) + if density: + + def deltas(time, xz): + v, trace = self._velocity_trace(xz, sigma=time, conditions=conditions, training=training) + return {"xz": v, "trace": trace} + + state = { + "xz": z, + "trace": keras.ops.zeros(keras.ops.shape(z)[:-1] + (1,), dtype=keras.ops.dtype(z)), + } + state = integrate(deltas, state, **integrate_kwargs) + + x = state["xz"] + log_density = self.base_distribution.log_prob(z) - keras.ops.squeeze(state["trace"], axis=-1) + + return x, log_density + + def deltas(time, xz): + return {"xz": self.velocity(xz, sigma=time, conditions=conditions, training=training)} + + state = {"xz": z} + state = integrate( + deltas, + state, + **integrate_kwargs, + ) + + x = state["xz"] + + return x + + def compute_metrics( + self, + x: Tensor | Sequence[Tensor, ...], + conditions: Tensor = None, + sample_weight: Tensor = None, + stage: str = "training", + ) -> dict[str, Tensor]: + training = stage == "training" + if not self.built: + xz_shape = keras.ops.shape(x) + conditions_shape = None if conditions is None else keras.ops.shape(conditions) + self.build(xz_shape, conditions_shape) + + # sample log-noise level + log_sigma = self.p_mean + self.p_std * keras.random.normal( + ops.shape(x)[:1], dtype=ops.dtype(x), seed=self.seed_generator + ) + # noise level with shape (batch_size, 1) + sigma = ops.exp(log_sigma)[:, None] + + # generate noise vector + z = sigma * keras.random.normal(ops.shape(x), dtype=ops.dtype(x), seed=self.seed_generator) + + # calculate preconditioning + c_skip = self._c_skip_fn(sigma) + c_out = self._c_out_fn(sigma) + c_in = self._c_in_fn(sigma) + c_noise = self._c_noise_fn(sigma) + xz_pre = c_in * (x + z) + + # calculate output of the network + if conditions is None: + xtc = keras.ops.concatenate([xz_pre, c_noise], axis=-1) + else: + xtc = keras.ops.concatenate([xz_pre, c_noise, conditions], axis=-1) + + out = self.output_projector(self.subnet(xtc, training=training), training=training) + + # Calculate loss: + lam = 1 / c_out[:, 0] ** 2 + effective_weight = lam * c_out[:, 0] ** 2 + unweighted_loss = ops.mean((out - 1 / c_out * (x - c_skip * (x + z))) ** 2, axis=-1) + loss = effective_weight * unweighted_loss + loss = weighted_mean(loss, sample_weight) + + base_metrics = super().compute_metrics(x, conditions, sample_weight, stage) + return base_metrics | {"loss": loss} + + def _integration_schedule(self, steps, inverse=False, dtype=None): + def sigma_i(i, steps): + N = steps + 1 + return ( + self.max_sigma ** (1 / self.rho) + + (i / (N - 1)) * (self.min_sigma ** (1 / self.rho) - self.max_sigma ** (1 / self.rho)) + ) ** self.rho + + steps = sigma_i(ops.arange(steps + 1, dtype=dtype), steps) + if not inverse: + steps = ops.flip(steps) + return steps diff --git a/tests/test_networks/conftest.py b/tests/test_networks/conftest.py index 955b88164..c38d74170 100644 --- a/tests/test_networks/conftest.py +++ b/tests/test_networks/conftest.py @@ -17,6 +17,23 @@ def subnet(request): return MLP +@pytest.fixture() +def diffusion_model(): + from bayesflow.experimental import DiffusionModel + + return DiffusionModel( + subnet_kwargs={"widths": [64, 64]}, + integrate_kwargs={"method": "rk45", "steps": 100}, + ) + + +@pytest.fixture() +def diffusion_model_subnet(subnet): + from bayesflow.experimental import DiffusionModel + + return DiffusionModel(subnet=subnet) + + @pytest.fixture() def flow_matching(): from bayesflow.networks import FlowMatching @@ -94,7 +111,8 @@ def typical_point_inference_network_subnet(subnet): @pytest.fixture( - params=["typical_point_inference_network", "coupling_flow", "flow_matching", "free_form_flow"], scope="function" + params=["typical_point_inference_network", "coupling_flow", "flow_matching", "diffusion_model", "free_form_flow"], + scope="function", ) def inference_network(request): return request.getfixturevalue(request.param) @@ -105,6 +123,7 @@ def inference_network(request): "typical_point_inference_network_subnet", "coupling_flow_subnet", "flow_matching_subnet", + "diffusion_model_subnet", "free_form_flow_subnet", ], scope="function", @@ -113,7 +132,7 @@ def inference_network_subnet(request): return request.getfixturevalue(request.param) -@pytest.fixture(params=["coupling_flow", "flow_matching", "free_form_flow"], scope="function") +@pytest.fixture(params=["coupling_flow", "flow_matching", "diffusion_model", "free_form_flow"], scope="function") def generative_inference_network(request): return request.getfixturevalue(request.param) From 630a8238727c02ce3b4566edfbc719757522f25f Mon Sep 17 00:00:00 2001 From: arrjon Date: Wed, 16 Apr 2025 11:31:25 +0200 Subject: [PATCH 02/52] adding more noise schedules --- bayesflow/experimental/diffusion_model.py | 271 +++++++++++++++++----- 1 file changed, 217 insertions(+), 54 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index ccaedf22e..088724409 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -6,6 +6,7 @@ from bayesflow.types import Tensor, Shape import bayesflow as bf from bayesflow.networks import InferenceNetwork +import math from bayesflow.utils import ( expand_right_as, @@ -21,9 +22,13 @@ @serializable(package="bayesflow.networks") class DiffusionModel(InferenceNetwork): - """Diffusion Model as described as Elucidated Diffusion Model in [1]. + """Diffusion Model as described in this overview paper [1]. + + [1] Variational Diffusion Models 2.0: Understanding Diffusion Model Objectives as the ELBO with Simple Data + Augmentation: Kingma et al. (2023) + [2] Score-Based Generative Modeling through Stochastic Differential Equations: Song et al. (2021) + [3] Elucidating the Design Space of Diffusion-Based Generative Models: arXiv:2206.00364 - [1] Elucidating the Design Space of Diffusion-Based Generative Models: arXiv:2206.00364 """ MLP_DEFAULT_CONFIG = { @@ -74,6 +79,8 @@ def __init__( super().__init__(base_distribution=None, **keras_kwargs(kwargs)) + # todo: clean up these configurations + # EDM hyper-parameters # internal tunable parameters not intended to be modified by the average user self.max_sigma = kwargs.get("max_sigma", 80.0) self.min_sigma = kwargs.get("min_sigma", 1e-4) @@ -81,9 +88,25 @@ def __init__( # hyper-parameters for sampling the noise level self.p_mean = kwargs.get("p_mean", -1.2) self.p_std = kwargs.get("p_std", 1.2) + self._noise_schedule = kwargs.get("noise_schedule", "EDM") + + # general hyper-parameters + self._train_time = kwargs.get("train_time", "continuous") + self._timesteps = kwargs.get("timesteps", None) + if self._train_time == "discrete": + if not isinstance(self._timesteps, int): + raise ValueError('timesteps must be defined, if "discrete" training time is set') + self._loss_type = kwargs.get("loss_type", "eps") + self._weighting_function = kwargs.get("weighting_function", None) + self._log_snr_min = kwargs.get("log_snr_min", -15) + self._log_snr_max = kwargs.get("log_snr_max", 15) + self._t_min = self._get_t_from_log_snr(log_snr_t=self._log_snr_max) + self._t_max = self._get_t_from_log_snr(log_snr_t=self._log_snr_min) + self._s_shift_cosine = kwargs.get("s_shift_cosine", 0.0) # latent distribution (not configurable) self.base_distribution = bf.distributions.DiagonalNormal(mean=0.0, std=self.max_sigma) + self.integrate_kwargs = self.INTEGRATE_DEFAULT_CONFIG | (integrate_kwargs or {}) self.sigma_data = sigma_data @@ -142,51 +165,62 @@ def _c_in_fn(self, sigma): return 1.0 / ops.sqrt(sigma**2 + self.sigma_data**2) def _c_noise_fn(self, sigma): - return 0.25 * ops.log(sigma) - - def _denoiser_fn( - self, - xz: Tensor, - sigma: Tensor, - conditions: Tensor = None, - training: bool = False, - ): - # calculate output of the network - c_in = self._c_in_fn(sigma) - c_noise = self._c_noise_fn(sigma) - xz_pre = c_in * xz - if conditions is None: - xtc = keras.ops.concatenate([xz_pre, c_noise], axis=-1) - else: - xtc = keras.ops.concatenate([xz_pre, c_noise, conditions], axis=-1) - out = self.output_projector(self.subnet(xtc, training=training), training=training) - return self._c_skip_fn(sigma) * xz + self._c_out_fn(sigma) * out + return 0.25 * ops.log(sigma) # this is the snr times a constant def velocity( self, xz: Tensor, - sigma: float | Tensor, + time: float | Tensor, conditions: Tensor = None, training: bool = False, + clip_x: bool = True, ) -> Tensor: - # transform sigma vector into correct shape - sigma = keras.ops.convert_to_tensor(sigma, dtype=keras.ops.dtype(xz)) - sigma = expand_right_as(sigma, xz) - sigma = keras.ops.broadcast_to(sigma, keras.ops.shape(xz)[:-1] + (1,)) + # calculate the current noise level and transform into correct shape + log_snr_t = expand_right_as(self._get_log_snr(t=time), xz) + alpha_t, sigma_t = self._get_alpha_sigma(log_snr_t=log_snr_t) - d = self._denoiser_fn(xz, sigma, conditions, training=training) - return (xz - d) / sigma + if self._noise_schedule == "EDM": + # scale the input + xz = alpha_t * xz + + if conditions is None: + xtc = keras.ops.concatenate([xz, log_snr_t], axis=-1) + else: + xtc = keras.ops.concatenate([xz, log_snr_t, conditions], axis=-1) + pred = self.output_projector(self.subnet(xtc, training=training), training=training) + + if self._noise_schedule == "EDM": + # scale the output + s = ops.exp(-1 / 2 * log_snr_t) + pred_scaled = self._c_skip_fn(s) * xz + self._c_out_fn(s) * pred + out = (xz - pred_scaled) / s + else: + # first convert prediction to x-prediction + if self._loss_type == "eps": + x_pred = (xz - sigma_t * pred) / alpha_t + else: # self._loss_type == 'v': + x_pred = alpha_t * xz - sigma_t * pred + + # clip x if necessary + if clip_x: + x_pred = ops.clip(x_pred, -5, 5) + # convert x to score + score = (alpha_t * x_pred - xz) / ops.square(sigma_t) + # compute velocity for the ODE depending on the noise schedule + f, g = self._get_drift_diffusion(log_snr_t=log_snr_t, x=xz) + out = f - 0.5 * ops.square(g) * score + return out def _velocity_trace( self, xz: Tensor, - sigma: Tensor, + time: Tensor, conditions: Tensor = None, max_steps: int = None, training: bool = False, ) -> (Tensor, Tensor): def f(x): - return self.velocity(x, sigma=sigma, conditions=conditions, training=training) + return self.velocity(x, time=time, conditions=conditions, training=training) v, trace = jacobian_trace(f, xz, max_steps=max_steps, seed=self.seed_generator, return_output=True) @@ -207,7 +241,7 @@ def _forward( if density: def deltas(time, xz): - v, trace = self._velocity_trace(xz, sigma=time, conditions=conditions, training=training) + v, trace = self._velocity_trace(xz, time=time, conditions=conditions, training=training) return {"xz": v, "trace": trace} state = { @@ -226,7 +260,7 @@ def deltas(time, xz): return z, log_density def deltas(time, xz): - return {"xz": self.velocity(xz, sigma=time, conditions=conditions, training=training)} + return {"xz": self.velocity(xz, time=time, conditions=conditions, training=training)} state = {"xz": x} state = integrate( @@ -256,7 +290,7 @@ def _inverse( if density: def deltas(time, xz): - v, trace = self._velocity_trace(xz, sigma=time, conditions=conditions, training=training) + v, trace = self._velocity_trace(xz, time=time, conditions=conditions, training=training) return {"xz": v, "trace": trace} state = { @@ -271,7 +305,7 @@ def deltas(time, xz): return x, log_density def deltas(time, xz): - return {"xz": self.velocity(xz, sigma=time, conditions=conditions, training=training)} + return {"xz": self.velocity(xz, time=time, conditions=conditions, training=training)} state = {"xz": z} state = integrate( @@ -284,6 +318,120 @@ def deltas(time, xz): return x + def _get_drift_diffusion(self, log_snr_t, x=None): # t is not truncated + """ + Compute d/dt log(1 + e^(-snr(t))) for the truncated schedules. + """ + t = self._get_t_from_log_snr(log_snr_t=log_snr_t) + # Compute the truncated time t_trunc + t_trunc = self._t_min + (self._t_max - self._t_min) * t + + # Compute d/dx snr(x) based on the noise schedule + if self._noise_schedule == "linear": + # d/dx snr(x) = - 2*x*exp(x^2) / (exp(x^2) - 1) + dsnr_dx = -(2 * t_trunc * ops.exp(t_trunc**2)) / (ops.exp(t_trunc**2) - 1) + elif self._noise_schedule == "cosine": + # d/dx snr(x) = -2*pi/sin(pi*x) + dsnr_dx = -(2 * math.pi) / ops.sin(math.pi * t_trunc) + elif self._noise_schedule == "flow_matching": + # d/dx snr(x) = -2/(x*(1-x)) + dsnr_dx = -2 / (t_trunc * (1 - t_trunc)) + else: + raise ValueError("Invalid 'noise_schedule'.") + + # Chain rule: d/dt snr(t) = d/dx snr(x) * (t_max - t_min) + dsnr_dt = dsnr_dx * (self._t_max - self._t_min) + + # Using the chain rule on f(t) = log(1 + e^(-snr(t))): + # f'(t) = - (e^{-snr(t)} / (1 + e^{-snr(t)})) * dsnr_dt + factor = ops.exp(-log_snr_t) / (1 + ops.exp(-log_snr_t)) + + beta_t = -factor * dsnr_dt + g = ops.sqrt(beta_t) # diffusion term + if x is None: + return g + f = -0.5 * beta_t * x # drift term + return f, g + + def _get_log_snr(self, t: Tensor) -> Tensor: + """get the log signal-to-noise ratio (lambda) for a given diffusion time""" + if self._noise_schedule == "EDM": + # EDM defines tilde sigma ~ N(p_mean, p_std^2) + # tilde sigma^2 = exp(-lambda), hence lambda = -2 * log(sigma) + # sample noise + log_sigma_tilde = self.p_mean + self.p_std * keras.random.normal( + ops.shape(t), dtype=ops.dtype(t), seed=self.seed_generator + ) + # calculate the log signal-to-noise ratio + log_snr_t = -2 * log_sigma_tilde + return log_snr_t + + t_trunc = self._t_min + (self._t_max - self._t_min) * t + if self._noise_schedule == "linear": + log_snr_t = -ops.log(ops.exp(ops.square(t_trunc)) - 1) + elif self._noise_schedule == "cosine": # this is usually used with variance_preserving + log_snr_t = -2 * ops.log(ops.tan(math.pi * t_trunc / 2)) + 2 * self._s_shift_cosine + elif self._noise_schedule == "flow_matching": # this usually used with sub_variance_preserving + log_snr_t = 2 * ops.log((1 - t_trunc) / t_trunc) + else: + raise ValueError("Unknown noise schedule: {}".format(self._noise_schedule)) + return log_snr_t + + def _get_t_from_log_snr(self, log_snr_t) -> Tensor: + # Invert the noise scheduling to recover t (not truncated) + if self._noise_schedule == "linear": + # SNR = -log(exp(t^2) - 1) + # => t = sqrt(log(1 + exp(-snr))) + t = ops.sqrt(ops.log(1 + ops.exp(-log_snr_t))) + elif self._noise_schedule == "cosine": + # SNR = -2 * log(tan(pi*t/2)) + # => t = 2/pi * arctan(exp(-snr/2)) + t = 2 / math.pi * ops.arctan(ops.exp((2 * self._s_shift_cosine - log_snr_t) / 2)) + elif self._noise_schedule == "flow_matching": + # SNR = 2 * log((1-t)/t) + # => t = 1 / (1 + exp(snr/2)) + t = 1 / (1 + ops.exp(log_snr_t / 2)) + elif self._noise_schedule == "EDM": + raise NotImplementedError + else: + raise ValueError("Unknown noise schedule: {}".format(self._noise_schedule)) + return t + + def _get_alpha_sigma(self, log_snr_t: Tensor) -> tuple[Tensor, Tensor]: + if self._noise_schedule == "EDM": + # EDM: noisy_x = c_in * (x + s * e) = c_in * x + c_in * s * e + # s^2 = exp(-lambda) + s = ops.exp(-1 / 2 * log_snr_t) + c_in = self._c_in_fn(s) + + # alpha = c_in(s), sigma = c_in * s + alpha_t = c_in + sigma_t = c_in * s + else: + # variance preserving noise schedules + alpha_t = keras.ops.sqrt(keras.ops.sigmoid(log_snr_t)) + sigma_t = keras.ops.sqrt(keras.ops.sigmoid(-log_snr_t)) + return alpha_t, sigma_t + + def _get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: + if self._noise_schedule == "EDM": + # EDM: weights are constructed elsewhere + weights = ops.ones_like(log_snr_t) + return weights + + if self._weighting_function == "likelihood_weighting": # based on Song et al. (2021) + g_t = self._get_drift_diffusion(log_snr_t=log_snr_t) + sigma_t = self._get_alpha_sigma(log_snr_t=log_snr_t)[1] + weights = ops.square(g_t / sigma_t) + elif self._weighting_function == "sigmoid": # based on Kingma et al. (2023) + weights = ops.sigmoid(-log_snr_t / 2) + elif self._weighting_function == "min-snr": # based on Hang et al. (2023) + gamma = 5 + weights = 1 / ops.cosh(log_snr_t / 2) * ops.minimum(ops.ones_like(log_snr_t), gamma * ops.exp(-log_snr_t)) + else: + weights = ops.ones_like(log_snr_t) + return weights + def compute_metrics( self, x: Tensor | Sequence[Tensor, ...], @@ -297,36 +445,51 @@ def compute_metrics( conditions_shape = None if conditions is None else keras.ops.shape(conditions) self.build(xz_shape, conditions_shape) - # sample log-noise level - log_sigma = self.p_mean + self.p_std * keras.random.normal( - ops.shape(x)[:1], dtype=ops.dtype(x), seed=self.seed_generator - ) - # noise level with shape (batch_size, 1) - sigma = ops.exp(log_sigma)[:, None] + # sample training diffusion time + if self._train_time == "continuous": + t = keras.random.uniform((keras.ops.shape(x)[0],)) + elif self._train_time == "discrete": + i = keras.random.randint((keras.ops.shape(x)[0],), minval=0, maxval=self._timesteps) + t = keras.ops.cast(i, keras.ops.dtype(x)) / keras.ops.cast(self._timesteps, keras.ops.dtype(x)) + else: + raise NotImplementedError(f"Training time {self._train_time} not implemented") + + # calculate the noise level + log_snr_t = expand_right_as(self._get_log_snr(t), x) + alpha_t, sigma_t = self._get_alpha_sigma(log_snr_t=log_snr_t) # generate noise vector - z = sigma * keras.random.normal(ops.shape(x), dtype=ops.dtype(x), seed=self.seed_generator) + eps_t = keras.random.normal(ops.shape(x), dtype=ops.dtype(x), seed=self.seed_generator) - # calculate preconditioning - c_skip = self._c_skip_fn(sigma) - c_out = self._c_out_fn(sigma) - c_in = self._c_in_fn(sigma) - c_noise = self._c_noise_fn(sigma) - xz_pre = c_in * (x + z) + # diffuse x + diffused_x = alpha_t * x + sigma_t * eps_t # calculate output of the network if conditions is None: - xtc = keras.ops.concatenate([xz_pre, c_noise], axis=-1) + xtc = keras.ops.concatenate([diffused_x, log_snr_t], axis=-1) else: - xtc = keras.ops.concatenate([xz_pre, c_noise, conditions], axis=-1) + xtc = keras.ops.concatenate([diffused_x, log_snr_t, conditions], axis=-1) out = self.output_projector(self.subnet(xtc, training=training), training=training) - # Calculate loss: - lam = 1 / c_out[:, 0] ** 2 - effective_weight = lam * c_out[:, 0] ** 2 - unweighted_loss = ops.mean((out - 1 / c_out * (x - c_skip * (x + z))) ** 2, axis=-1) - loss = effective_weight * unweighted_loss + # Calculate loss + weights_for_snr = self._get_weights_for_snr(log_snr_t=log_snr_t) + if self._loss_type == "eps": + loss = weights_for_snr * ops.mean((out - eps_t) ** 2, axis=-1) + elif self._loss_type == "v": + v_t = alpha_t * eps_t - sigma_t * x + loss = weights_for_snr * ops.mean((out - v_t) ** 2, axis=-1) + elif self._loss_type == "EDM": + s = ops.exp(-1 / 2 * log_snr_t) + c_skip = self._c_skip_fn(s) + c_out = self._c_out_fn(s) + lam = 1 / c_out[:, 0] ** 2 + effective_weight = lam * c_out[:, 0] ** 2 + unweighted_loss = ops.mean((out - 1 / c_out * (x - c_skip * (x + s + eps_t))) ** 2, axis=-1) + loss = effective_weight * unweighted_loss + else: + raise ValueError(f"Unknown loss type: {self._loss_type}") + loss = weighted_mean(loss, sample_weight) base_metrics = super().compute_metrics(x, conditions, sample_weight, stage) From c1cb183c1db6cdf31dabbe3fd8003195d426dbbd Mon Sep 17 00:00:00 2001 From: arrjon Date: Wed, 23 Apr 2025 22:16:53 +0200 Subject: [PATCH 03/52] adding noise scheduler class --- bayesflow/experimental/diffusion_model.py | 638 ++++++++++++++-------- 1 file changed, 400 insertions(+), 238 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 088724409..95a0d3584 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -1,4 +1,5 @@ from collections.abc import Sequence +from abc import ABC, abstractmethod import keras from keras import ops from keras.saving import register_keras_serializable as serializable @@ -20,6 +21,321 @@ ) +match keras.backend.backend(): + case "jax": + from jax.scipy.special import erf, erfinv + + def cdf_gaussian(x, loc, scale): + return 0.5 * (1 + erf((x - loc) / (scale * math.sqrt(2.0)))) + + def icdf_gaussian(x, loc, scale): + return loc + scale * erfinv(2 * x - 1) * math.sqrt(2) + case "numpy": + from scipy.special import erf, erfinv + + def cdf_gaussian(x, loc, scale): + return 0.5 * (1 + erf((x - loc) / (scale * math.sqrt(2.0)))) + + def icdf_gaussian(x, loc, scale): + return loc + scale * erfinv(2 * x - 1) * math.sqrt(2.0) + case "tensorflow": + from tensorflow.math import erf, erfinv + + def cdf_gaussian(x, loc, scale): + return 0.5 * (1 + erf((x - loc) / (scale * math.sqrt(2.0)))) + + def icdf_gaussian(x, loc, scale): + return loc + scale * erfinv(2 * x - 1) * math.sqrt(2.0) + case "torch": + from torch import erf, erfinv + + def cdf_gaussian(x, loc, scale): + return 0.5 * (1 + erf((x - loc) / (scale * math.sqrt(2.0)))) + + def icdf_gaussian(x, loc, scale): + return loc + scale * erfinv(2 * x - 1) * math.sqrt(2.0) + case other: + raise ValueError(f"Backend '{other}' is not supported.") + + +class NoiseSchedule(ABC): + """Noise schedule for diffusion models. We follow the notation from [1]. + + The diffusion process is defined by a noise schedule, which determines how the noise level changes over time. + We define the noise schedule as a function of the log signal-to-noise ratio (lambda), which can be + interchangeably used with the diffusion time (t). + + The noise process is defined as: z = alpha(t) * x + sigma(t) * e, where e ~ N(0, I). + The schedule is defined as: \lambda(t) = \log \sigma^2(t) - \log \alpha^2(t). + + We can also define a weighting function for each noise level for the loss function. Often the noise schedule is + the same for the forward and reverse process, but this is not necessary and can be changed via the training flag. + + [1] Variational Diffusion Models 2.0: Understanding Diffusion Model Objectives as the ELBO with Simple Data + Augmentation: Kingma et al. (2023) + """ + + def __init__(self, name: str): + self.name = name + + # for variance preserving schedules + self.scale_base_distribution = 1.0 + + @abstractmethod + def get_log_snr(self, t: Tensor, training: bool) -> Tensor: + """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" + pass + + @abstractmethod + def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" + pass + + @abstractmethod + def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + """Compute \beta(t) = d/dt log(1 + e^(-snr(t))). This is usually used for the reverse SDE.""" + pass + + def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: bool = True) -> tuple[Tensor, Tensor]: + """Compute the drift and optionally the diffusion term for the reverse SDE. + Usually it can be derived from the derivative of the schedule: + \beta(t) = d/dt log(1 + e^(-snr(t))) + f(z, t) = -0.5 * \beta(t) * z + g(t)^2 = \beta(t) + + SDE: d(z) = [ f(z, t) - g(t)^2 * score(z, lambda) ] dt + g(t) dW + ODE: dz = [ f(z, t) - 0.5 * g(t)^2 * score(z, lambda) ] dt + + For a variance exploding schedule, one should set f(z, t) = 0. + """ + # Default implementation is to return the diffusion term only + beta = self.derivative_log_snr(log_snr_t=log_snr_t, training=training) + if x is None: # return g only + return ops.sqrt(beta) + f = -0.5 * beta * x + return f, ops.sqrt(beta) + + def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Tensor]: + """Get alpha and sigma for a given log signal-to-noise ratio (lambda). + + Default is a variance preserving schedule. + For a variance exploding schedule, one should set alpha^2 = 1 and sigma^2 = exp(-lambda) + """ + alpha_t = keras.ops.sqrt(keras.ops.sigmoid(log_snr_t)) + sigma_t = keras.ops.sqrt(keras.ops.sigmoid(-log_snr_t)) + return alpha_t, sigma_t + + def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: + """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). Default is 1. + Generally, weighting functions should be defined for a noise prediction loss. + """ + # sigmoid: ops.sigmoid(-log_snr_t / 2), based on Kingma et al. (2023) + # min-snr with gamma = 5, based on Hang et al. (2023) + # 1 / ops.cosh(log_snr_t / 2) * ops.minimum(ops.ones_like(log_snr_t), gamma * ops.exp(-log_snr_t)) + return ops.ones_like(log_snr_t) + + +class LinearNoiseSchedule(NoiseSchedule): + """Linear noise schedule for diffusion models. + + The linear noise schedule with likelihood weighting is based on [1]. + + [1] Maximum Likelihood Training of Score-Based Diffusion Models: Song et al. (2021) + """ + + def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): + super().__init__(name="linear_noise_schedule") + self._log_snr_min = ops.convert_to_tensor(min_log_snr) + self._log_snr_max = ops.convert_to_tensor(max_log_snr) + + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + + def get_log_snr(self, t: Tensor, training: bool) -> Tensor: + """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" + t_trunc = self._t_min + (self._t_max - self._t_min) * t + # SNR = -log(exp(t^2) - 1) + return -ops.log(ops.exp(ops.square(t_trunc)) - 1) + + def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" + # SNR = -log(exp(t^2) - 1) => t = sqrt(log(1 + exp(-snr))) + return ops.sqrt(ops.log(1 + ops.exp(-log_snr_t))) + + def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + """Compute d/dt log(1 + e^(-snr(t))), which is used for the reverse SDE.""" + t = self.get_t_from_log_snr(log_snr_t=log_snr_t, training=training) + + # Compute the truncated time t_trunc + t_trunc = self._t_min + (self._t_max - self._t_min) * t + dsnr_dx = -(2 * t_trunc * ops.exp(t_trunc**2)) / (ops.exp(t_trunc**2) - 1) + + # Using the chain rule on f(t) = log(1 + e^(-snr(t))): + # f'(t) = - (e^{-snr(t)} / (1 + e^{-snr(t)})) * dsnr_dt + dsnr_dt = dsnr_dx * (self._t_max - self._t_min) + factor = ops.exp(-log_snr_t) / (1 + ops.exp(-log_snr_t)) + return -factor * dsnr_dt + + def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: + """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). + Default is the likelihood weighting based on Song et al. (2021). + """ + g = self.get_drift_diffusion(log_snr_t=log_snr_t) + sigma_t = self.get_alpha_sigma(log_snr_t=log_snr_t, training=True)[1] + return ops.square(g / sigma_t) + + +class CosineNoiseSchedule(NoiseSchedule): + """Cosine noise schedule for diffusion models. This schedule is based on the cosine schedule from [1]. + For images, use s_shift_cosine = log(base_resolution / d), where d is the used resolution of the image. + + [1] Diffusion models beat gans on image synthesis: Dhariwal and Nichol (2022) + """ + + def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_cosine: float = 0.0): + super().__init__(name="cosine_noise_schedule") + self._log_snr_min = ops.convert_to_tensor(min_log_snr) + self._log_snr_max = ops.convert_to_tensor(max_log_snr) + self._s_shift_cosine = ops.convert_to_tensor(s_shift_cosine) + + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + + def get_log_snr(self, t: Tensor, training: bool) -> Tensor: + """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" + t_trunc = self._t_min + (self._t_max - self._t_min) * t + # SNR = -2 * log(tan(pi*t/2)) + return -2 * ops.log(ops.tan(math.pi * t_trunc / 2)) + 2 * self._s_shift_cosine + + def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" + # SNR = -2 * log(tan(pi*t/2)) => t = 2/pi * arctan(exp(-snr/2)) + return 2 / math.pi * ops.arctan(ops.exp((2 * self._s_shift_cosine - log_snr_t) / 2)) + + def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + """Compute d/dt log(1 + e^(-snr(t))), which is used for the reverse SDE.""" + t = self.get_t_from_log_snr(log_snr_t=log_snr_t, training=training) + + # Compute the truncated time t_trunc + t_trunc = self._t_min + (self._t_max - self._t_min) * t + dsnr_dx = -(2 * math.pi) / ops.sin(math.pi * t_trunc) + + # Using the chain rule on f(t) = log(1 + e^(-snr(t))): + # f'(t) = - (e^{-snr(t)} / (1 + e^{-snr(t)})) * dsnr_dt + dsnr_dt = dsnr_dx * (self._t_max - self._t_min) + factor = ops.exp(-log_snr_t) / (1 + ops.exp(-log_snr_t)) + return -factor * dsnr_dt + + def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: + """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). + Default is the sigmoid weighting based on Kingma et al. (2023). + """ + return ops.sigmoid(-log_snr_t / 2) + + +class EDMNoiseSchedule(NoiseSchedule): + """EDM noise schedule for diffusion models. This schedule is based on the EDM paper [1]. + + [1] Elucidating the Design Space of Diffusion-Based Generative Models: Karras et al. (2022) + """ + + def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: float = 80): + super().__init__(name="edm_noise_schedule") + self.sigma_data = ops.convert_to_tensor(sigma_data) + self.sigma_max = ops.convert_to_tensor(sigma_max) + self.sigma_min = ops.convert_to_tensor(sigma_min) + self.p_mean = ops.convert_to_tensor(-1.2) + self.p_std = ops.convert_to_tensor(1.2) + self.rho = ops.convert_to_tensor(7) + + # convert EDM parameters to signal-to-noise ratio formulation + self._log_snr_min = -2 * ops.log(sigma_max) + self._log_snr_max = -2 * ops.log(sigma_min) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + + # EDM is a variance exploding schedule + self.scale_base_distribution = ops.exp(-self._log_snr_min) + + def get_log_snr(self, t: Tensor, training: bool) -> Tensor: + """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" + t_trunc = self._t_min + (self._t_max - self._t_min) * t + if training: + snr = -icdf_gaussian(x=t_trunc, loc=-2 * self.p_mean, scale=2 * self.p_std) + snr = keras.ops.clip(snr, x_min=self._log_snr_min, x_max=self._log_snr_max) + else: # sampling + snr = ( + -2 + * self.rho + * ops.log( + self.sigma_max ** (1 / self.rho) + + (1 - t_trunc) * (self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)) + ) + ) + return snr + + def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" + if training: + # SNR = -dist.icdf(t_trunc) => t = dist.cdf(-snr) + t = cdf_gaussian(x=-log_snr_t, loc=-2 * self.p_mean, scale=2 * self.p_std) + else: # sampling + # SNR = -2 * rho * log(sigma_max ** (1/rho) + (1 - t) * (sigma_min ** (1/rho) - sigma_max ** (1/rho))) + # => t = 1 - ((exp(-snr/(2*rho)) - sigma_max ** (1/rho)) / (sigma_min ** (1/rho) - sigma_max ** (1/rho))) + t = 1 - ( + (ops.exp(-log_snr_t / (2 * self.rho)) - self.sigma_max ** (1 / self.rho)) + / (self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)) + ) + return t + + def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + """Compute d/dt log(1 + e^(-snr(t))), which is used for the reverse SDE.""" + if training: + raise NotImplementedError("Derivative of log SNR is not implemented for training mode.") + # sampling mode + t = self.get_t_from_log_snr(log_snr_t=log_snr_t, training=training) + t_trunc = self._t_min + (self._t_max - self._t_min) * t + + # SNR = -2*rho*log(s_max + (1 - x)*(s_min - s_max)) + s_max = self.sigma_max ** (1 / self.rho) + s_min = self.sigma_min ** (1 / self.rho) + u = s_max + (1 - t_trunc) * (s_min - s_max) + # d/dx snr = 2*rho*(s_min - s_max) / u + dsnr_dx = 2 * self.rho * (s_min - s_max) / u + + # Using the chain rule on f(t) = log(1 + e^(-snr(t))): + # f'(t) = - (e^{-snr(t)} / (1 + e^{-snr(t)})) * dsnr_dt + dsnr_dt = dsnr_dx * (self._t_max - self._t_min) + factor = ops.exp(-log_snr_t) / (1 + ops.exp(-log_snr_t)) + return -factor * dsnr_dt + + def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: bool = True) -> tuple[Tensor, Tensor]: + """Compute the drift and optionally the diffusion term for the variance exploding reverse SDE. + \beta(t) = d/dt log(1 + e^(-snr(t))) + f(z, t) = 0 + g(t)^2 = \beta(t) + + SDE: d(z) = [ f(z, t) - g(t)^2 * score(z, lambda) ] dt + g(t) dW + ODE: dz = [ f(z, t) - 0.5 * g(t)^2 * score(z, lambda) ] dt + """ + # Default implementation is to return the diffusion term only + beta = self.derivative_log_snr(log_snr_t=log_snr_t, training=training) + if x is None: # return g only + return ops.sqrt(beta) + f = ops.zeros_like(beta) # variance exploding schedule + return f, ops.sqrt(beta) + + def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Tensor]: + """Get alpha and sigma for a given log signal-to-noise ratio (lambda) for a variance exploding schedule.""" + alpha_t = ops.ones_like(log_snr_t) + sigma_t = ops.sqrt(ops.exp(-log_snr_t)) + return alpha_t, sigma_t + + def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: + """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda).""" + return ops.exp(-log_snr_t) + 0.5**2 + + @serializable(package="bayesflow.networks") class DiffusionModel(InferenceNetwork): """Diffusion Model as described in this overview paper [1]. @@ -27,8 +343,6 @@ class DiffusionModel(InferenceNetwork): [1] Variational Diffusion Models 2.0: Understanding Diffusion Model Objectives as the ELBO with Simple Data Augmentation: Kingma et al. (2023) [2] Score-Based Generative Modeling through Stochastic Differential Equations: Song et al. (2021) - [3] Elucidating the Design Space of Diffusion-Based Generative Models: arXiv:2206.00364 - """ MLP_DEFAULT_CONFIG = { @@ -50,7 +364,8 @@ def __init__( subnet: str | type = "mlp", integrate_kwargs: dict[str, any] = None, subnet_kwargs: dict[str, any] = None, - sigma_data=1.0, + noise_schedule: str = "cosine", + prediction_type: str = "v", **kwargs, ): """ @@ -71,46 +386,43 @@ def __init__( Additional keyword arguments for the integration process. Default is None. subnet_kwargs : dict[str, any], optional Keyword arguments passed to the subnet constructor or used to update the default MLP settings. - sigma_data : float, optional - Averaged standard deviation of the target distribution. Default is 1.0. + noise_schedule : str, optional + The noise schedule used for the diffusion process. Can be "linear", "cosine", or "edm". + Default is "cosine". + prediction_type: str, optional + The type of prediction used in the diffusion model. Can be "eps", "v" or "F" (EDM). Default is "v". **kwargs Additional keyword arguments passed to the subnet and other components. """ super().__init__(base_distribution=None, **keras_kwargs(kwargs)) - # todo: clean up these configurations - # EDM hyper-parameters - # internal tunable parameters not intended to be modified by the average user - self.max_sigma = kwargs.get("max_sigma", 80.0) - self.min_sigma = kwargs.get("min_sigma", 1e-4) - self.rho = kwargs.get("rho", 7) - # hyper-parameters for sampling the noise level - self.p_mean = kwargs.get("p_mean", -1.2) - self.p_std = kwargs.get("p_std", 1.2) - self._noise_schedule = kwargs.get("noise_schedule", "EDM") - - # general hyper-parameters - self._train_time = kwargs.get("train_time", "continuous") - self._timesteps = kwargs.get("timesteps", None) - if self._train_time == "discrete": - if not isinstance(self._timesteps, int): - raise ValueError('timesteps must be defined, if "discrete" training time is set') - self._loss_type = kwargs.get("loss_type", "eps") - self._weighting_function = kwargs.get("weighting_function", None) - self._log_snr_min = kwargs.get("log_snr_min", -15) - self._log_snr_max = kwargs.get("log_snr_max", 15) - self._t_min = self._get_t_from_log_snr(log_snr_t=self._log_snr_max) - self._t_max = self._get_t_from_log_snr(log_snr_t=self._log_snr_min) - self._s_shift_cosine = kwargs.get("s_shift_cosine", 0.0) + if isinstance(noise_schedule, str): + if noise_schedule == "linear": + noise_schedule = LinearNoiseSchedule() + elif noise_schedule == "cosine": + noise_schedule = CosineNoiseSchedule() + elif noise_schedule == "edm": + noise_schedule = EDMNoiseSchedule() + else: + raise ValueError(f"Unknown noise schedule: {noise_schedule}") + elif not isinstance(noise_schedule, NoiseSchedule): + raise ValueError(f"Unknown noise schedule: {noise_schedule}") + self.noise_schedule = noise_schedule + + if prediction_type not in ["eps", "v", "F"]: # F is EDM + raise ValueError(f"Unknown prediction type: {prediction_type}") + self.prediction_type = prediction_type + + # clipping of prediction (after it was transformed to x-prediction) + self._clip_min = -5.0 + self._clip_max = 5.0 # latent distribution (not configurable) - self.base_distribution = bf.distributions.DiagonalNormal(mean=0.0, std=self.max_sigma) - + self.base_distribution = bf.distributions.DiagonalNormal( + mean=0.0, std=self.noise_schedule.scale_base_distribution + ) self.integrate_kwargs = self.INTEGRATE_DEFAULT_CONFIG | (integrate_kwargs or {}) - - self.sigma_data = sigma_data - self.seed_generator = keras.random.SeedGenerator() subnet_kwargs = subnet_kwargs or {} @@ -124,7 +436,8 @@ def __init__( self.config = { "integrate_kwargs": self.integrate_kwargs, "subnet_kwargs": subnet_kwargs, - "sigma_data": sigma_data, + "noise_schedule": self.noise_schedule, + "prediction_type": self.prediction_type, **kwargs, } self.config = serialize_value_or_type(self.config, "subnet", subnet) @@ -155,17 +468,29 @@ def from_config(cls, config): config = deserialize_value_or_type(config, "subnet") return cls(**config) - def _c_skip_fn(self, sigma): - return self.sigma_data**2 / (sigma**2 + self.sigma_data**2) - - def _c_out_fn(self, sigma): - return sigma * self.sigma_data / ops.sqrt(self.sigma_data**2 + sigma**2) - - def _c_in_fn(self, sigma): - return 1.0 / ops.sqrt(sigma**2 + self.sigma_data**2) - - def _c_noise_fn(self, sigma): - return 0.25 * ops.log(sigma) # this is the snr times a constant + def convert_prediction_to_x( + self, pred: Tensor, z: Tensor, alpha_t: Tensor, sigma_t: Tensor, log_snr_t: Tensor, clip_x: bool + ) -> Tensor: + """Convert the prediction of the neural network to the x space.""" + if self.prediction_type == "v": + # convert v into x + x = alpha_t * z - sigma_t * pred + elif self.prediction_type == "e": + # convert noise prediction into x + x = (z - sigma_t * pred) / alpha_t + elif self.prediction_type == "x": + x = pred + elif self.prediction_type == "score": + x = (z + sigma_t**2 * pred) / alpha_t + else: # self.prediction_type == 'F': # EDM + sigma_data = self.noise_schedule.sigma_data + x1 = (sigma_data**2 * alpha_t) / (ops.exp(-log_snr_t) + sigma_data**2) + x2 = ops.exp(-log_snr_t / 2) * sigma_data / ops.sqrt(ops.exp(-log_snr_t) + sigma_data**2) + x = x1 * z + x2 * pred + + if clip_x: + x = keras.ops.clip(x, self._clip_min, self._clip_max) + return x def velocity( self, @@ -176,12 +501,8 @@ def velocity( clip_x: bool = True, ) -> Tensor: # calculate the current noise level and transform into correct shape - log_snr_t = expand_right_as(self._get_log_snr(t=time), xz) - alpha_t, sigma_t = self._get_alpha_sigma(log_snr_t=log_snr_t) - - if self._noise_schedule == "EDM": - # scale the input - xz = alpha_t * xz + log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t=time, training=training), xz) + alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma(log_snr_t=log_snr_t, training=training) if conditions is None: xtc = keras.ops.concatenate([xz, log_snr_t], axis=-1) @@ -189,26 +510,17 @@ def velocity( xtc = keras.ops.concatenate([xz, log_snr_t, conditions], axis=-1) pred = self.output_projector(self.subnet(xtc, training=training), training=training) - if self._noise_schedule == "EDM": - # scale the output - s = ops.exp(-1 / 2 * log_snr_t) - pred_scaled = self._c_skip_fn(s) * xz + self._c_out_fn(s) * pred - out = (xz - pred_scaled) / s - else: - # first convert prediction to x-prediction - if self._loss_type == "eps": - x_pred = (xz - sigma_t * pred) / alpha_t - else: # self._loss_type == 'v': - x_pred = alpha_t * xz - sigma_t * pred - - # clip x if necessary - if clip_x: - x_pred = ops.clip(x_pred, -5, 5) - # convert x to score - score = (alpha_t * x_pred - xz) / ops.square(sigma_t) - # compute velocity for the ODE depending on the noise schedule - f, g = self._get_drift_diffusion(log_snr_t=log_snr_t, x=xz) - out = f - 0.5 * ops.square(g) * score + x_pred = self.convert_prediction_to_x( + pred=pred, z=xz, alpha_t=alpha_t, sigma_t=sigma_t, log_snr_t=log_snr_t, clip_x=clip_x + ) + # convert x to score + score = (alpha_t * x_pred - xz) / ops.square(sigma_t) + + # compute velocity for the ODE depending on the noise schedule + f, g = self.noise_schedule.get_drift_diffusion(log_snr_t=log_snr_t, x=xz) + out = f - 0.5 * ops.square(g) * score + + # todo: for the SDE: d(z) = [ f(z, t) - g(t)^2 * score(z, lambda) ] dt + g(t) dW return out def _velocity_trace( @@ -235,9 +547,6 @@ def _forward( **kwargs, ) -> Tensor | tuple[Tensor, Tensor]: integrate_kwargs = self.integrate_kwargs | kwargs - if isinstance(integrate_kwargs["steps"], int): - # set schedule for specified number of steps - integrate_kwargs["steps"] = self._integration_schedule(integrate_kwargs["steps"], dtype=ops.dtype(x)) if density: def deltas(time, xz): @@ -268,9 +577,7 @@ def deltas(time, xz): state, **integrate_kwargs, ) - z = state["xz"] - return z def _inverse( @@ -282,11 +589,6 @@ def _inverse( **kwargs, ) -> Tensor | tuple[Tensor, Tensor]: integrate_kwargs = self.integrate_kwargs | kwargs - if isinstance(integrate_kwargs["steps"], int): - # set schedule for specified number of steps - integrate_kwargs["steps"] = self._integration_schedule( - integrate_kwargs["steps"], inverse=True, dtype=ops.dtype(z) - ) if density: def deltas(time, xz): @@ -315,123 +617,8 @@ def deltas(time, xz): ) x = state["xz"] - return x - def _get_drift_diffusion(self, log_snr_t, x=None): # t is not truncated - """ - Compute d/dt log(1 + e^(-snr(t))) for the truncated schedules. - """ - t = self._get_t_from_log_snr(log_snr_t=log_snr_t) - # Compute the truncated time t_trunc - t_trunc = self._t_min + (self._t_max - self._t_min) * t - - # Compute d/dx snr(x) based on the noise schedule - if self._noise_schedule == "linear": - # d/dx snr(x) = - 2*x*exp(x^2) / (exp(x^2) - 1) - dsnr_dx = -(2 * t_trunc * ops.exp(t_trunc**2)) / (ops.exp(t_trunc**2) - 1) - elif self._noise_schedule == "cosine": - # d/dx snr(x) = -2*pi/sin(pi*x) - dsnr_dx = -(2 * math.pi) / ops.sin(math.pi * t_trunc) - elif self._noise_schedule == "flow_matching": - # d/dx snr(x) = -2/(x*(1-x)) - dsnr_dx = -2 / (t_trunc * (1 - t_trunc)) - else: - raise ValueError("Invalid 'noise_schedule'.") - - # Chain rule: d/dt snr(t) = d/dx snr(x) * (t_max - t_min) - dsnr_dt = dsnr_dx * (self._t_max - self._t_min) - - # Using the chain rule on f(t) = log(1 + e^(-snr(t))): - # f'(t) = - (e^{-snr(t)} / (1 + e^{-snr(t)})) * dsnr_dt - factor = ops.exp(-log_snr_t) / (1 + ops.exp(-log_snr_t)) - - beta_t = -factor * dsnr_dt - g = ops.sqrt(beta_t) # diffusion term - if x is None: - return g - f = -0.5 * beta_t * x # drift term - return f, g - - def _get_log_snr(self, t: Tensor) -> Tensor: - """get the log signal-to-noise ratio (lambda) for a given diffusion time""" - if self._noise_schedule == "EDM": - # EDM defines tilde sigma ~ N(p_mean, p_std^2) - # tilde sigma^2 = exp(-lambda), hence lambda = -2 * log(sigma) - # sample noise - log_sigma_tilde = self.p_mean + self.p_std * keras.random.normal( - ops.shape(t), dtype=ops.dtype(t), seed=self.seed_generator - ) - # calculate the log signal-to-noise ratio - log_snr_t = -2 * log_sigma_tilde - return log_snr_t - - t_trunc = self._t_min + (self._t_max - self._t_min) * t - if self._noise_schedule == "linear": - log_snr_t = -ops.log(ops.exp(ops.square(t_trunc)) - 1) - elif self._noise_schedule == "cosine": # this is usually used with variance_preserving - log_snr_t = -2 * ops.log(ops.tan(math.pi * t_trunc / 2)) + 2 * self._s_shift_cosine - elif self._noise_schedule == "flow_matching": # this usually used with sub_variance_preserving - log_snr_t = 2 * ops.log((1 - t_trunc) / t_trunc) - else: - raise ValueError("Unknown noise schedule: {}".format(self._noise_schedule)) - return log_snr_t - - def _get_t_from_log_snr(self, log_snr_t) -> Tensor: - # Invert the noise scheduling to recover t (not truncated) - if self._noise_schedule == "linear": - # SNR = -log(exp(t^2) - 1) - # => t = sqrt(log(1 + exp(-snr))) - t = ops.sqrt(ops.log(1 + ops.exp(-log_snr_t))) - elif self._noise_schedule == "cosine": - # SNR = -2 * log(tan(pi*t/2)) - # => t = 2/pi * arctan(exp(-snr/2)) - t = 2 / math.pi * ops.arctan(ops.exp((2 * self._s_shift_cosine - log_snr_t) / 2)) - elif self._noise_schedule == "flow_matching": - # SNR = 2 * log((1-t)/t) - # => t = 1 / (1 + exp(snr/2)) - t = 1 / (1 + ops.exp(log_snr_t / 2)) - elif self._noise_schedule == "EDM": - raise NotImplementedError - else: - raise ValueError("Unknown noise schedule: {}".format(self._noise_schedule)) - return t - - def _get_alpha_sigma(self, log_snr_t: Tensor) -> tuple[Tensor, Tensor]: - if self._noise_schedule == "EDM": - # EDM: noisy_x = c_in * (x + s * e) = c_in * x + c_in * s * e - # s^2 = exp(-lambda) - s = ops.exp(-1 / 2 * log_snr_t) - c_in = self._c_in_fn(s) - - # alpha = c_in(s), sigma = c_in * s - alpha_t = c_in - sigma_t = c_in * s - else: - # variance preserving noise schedules - alpha_t = keras.ops.sqrt(keras.ops.sigmoid(log_snr_t)) - sigma_t = keras.ops.sqrt(keras.ops.sigmoid(-log_snr_t)) - return alpha_t, sigma_t - - def _get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: - if self._noise_schedule == "EDM": - # EDM: weights are constructed elsewhere - weights = ops.ones_like(log_snr_t) - return weights - - if self._weighting_function == "likelihood_weighting": # based on Song et al. (2021) - g_t = self._get_drift_diffusion(log_snr_t=log_snr_t) - sigma_t = self._get_alpha_sigma(log_snr_t=log_snr_t)[1] - weights = ops.square(g_t / sigma_t) - elif self._weighting_function == "sigmoid": # based on Kingma et al. (2023) - weights = ops.sigmoid(-log_snr_t / 2) - elif self._weighting_function == "min-snr": # based on Hang et al. (2023) - gamma = 5 - weights = 1 / ops.cosh(log_snr_t / 2) * ops.minimum(ops.ones_like(log_snr_t), gamma * ops.exp(-log_snr_t)) - else: - weights = ops.ones_like(log_snr_t) - return weights - def compute_metrics( self, x: Tensor | Sequence[Tensor, ...], @@ -446,17 +633,13 @@ def compute_metrics( self.build(xz_shape, conditions_shape) # sample training diffusion time - if self._train_time == "continuous": - t = keras.random.uniform((keras.ops.shape(x)[0],)) - elif self._train_time == "discrete": - i = keras.random.randint((keras.ops.shape(x)[0],), minval=0, maxval=self._timesteps) - t = keras.ops.cast(i, keras.ops.dtype(x)) / keras.ops.cast(self._timesteps, keras.ops.dtype(x)) - else: - raise NotImplementedError(f"Training time {self._train_time} not implemented") + t = keras.random.uniform((keras.ops.shape(x)[0],)) + # i = keras.random.randint((keras.ops.shape(x)[0],), minval=0, maxval=self._timesteps) + # t = keras.ops.cast(i, keras.ops.dtype(x)) / keras.ops.cast(self._timesteps, keras.ops.dtype(x)) # calculate the noise level - log_snr_t = expand_right_as(self._get_log_snr(t), x) - alpha_t, sigma_t = self._get_alpha_sigma(log_snr_t=log_snr_t) + log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t, training=training), x) + alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma(log_snr_t=log_snr_t, training=training) # generate noise vector eps_t = keras.random.normal(ops.shape(x), dtype=ops.dtype(x), seed=self.seed_generator) @@ -469,41 +652,20 @@ def compute_metrics( xtc = keras.ops.concatenate([diffused_x, log_snr_t], axis=-1) else: xtc = keras.ops.concatenate([diffused_x, log_snr_t, conditions], axis=-1) + pred = self.output_projector(self.subnet(xtc, training=training), training=training) - out = self.output_projector(self.subnet(xtc, training=training), training=training) - - # Calculate loss - weights_for_snr = self._get_weights_for_snr(log_snr_t=log_snr_t) - if self._loss_type == "eps": - loss = weights_for_snr * ops.mean((out - eps_t) ** 2, axis=-1) - elif self._loss_type == "v": - v_t = alpha_t * eps_t - sigma_t * x - loss = weights_for_snr * ops.mean((out - v_t) ** 2, axis=-1) - elif self._loss_type == "EDM": - s = ops.exp(-1 / 2 * log_snr_t) - c_skip = self._c_skip_fn(s) - c_out = self._c_out_fn(s) - lam = 1 / c_out[:, 0] ** 2 - effective_weight = lam * c_out[:, 0] ** 2 - unweighted_loss = ops.mean((out - 1 / c_out * (x - c_skip * (x + s + eps_t))) ** 2, axis=-1) - loss = effective_weight * unweighted_loss - else: - raise ValueError(f"Unknown loss type: {self._loss_type}") + x_pred = self.convert_prediction_to_x( + pred=pred, z=diffused_x, alpha_t=alpha_t, sigma_t=sigma_t, log_snr_t=log_snr_t, clip_x=True + ) + # convert x to epsilon prediction + out = (alpha_t * diffused_x - x_pred) / sigma_t + # Calculate loss based on noise prediction + weights_for_snr = self.noise_schedule.get_weights_for_snr(log_snr_t=log_snr_t) + loss = weights_for_snr * ops.mean((out - eps_t) ** 2, axis=-1) + + # apply sample weight loss = weighted_mean(loss, sample_weight) base_metrics = super().compute_metrics(x, conditions, sample_weight, stage) return base_metrics | {"loss": loss} - - def _integration_schedule(self, steps, inverse=False, dtype=None): - def sigma_i(i, steps): - N = steps + 1 - return ( - self.max_sigma ** (1 / self.rho) - + (i / (N - 1)) * (self.min_sigma ** (1 / self.rho) - self.max_sigma ** (1 / self.rho)) - ) ** self.rho - - steps = sigma_i(ops.arange(steps + 1, dtype=dtype), steps) - if not inverse: - steps = ops.flip(steps) - return steps From 49c0cb782406b93ce87813f721f6b51ae330d102 Mon Sep 17 00:00:00 2001 From: arrjon Date: Wed, 23 Apr 2025 22:23:46 +0200 Subject: [PATCH 04/52] adding noise scheduler class --- tests/test_networks/conftest.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tests/test_networks/conftest.py b/tests/test_networks/conftest.py index c38d74170..187d76340 100644 --- a/tests/test_networks/conftest.py +++ b/tests/test_networks/conftest.py @@ -111,7 +111,15 @@ def typical_point_inference_network_subnet(subnet): @pytest.fixture( - params=["typical_point_inference_network", "coupling_flow", "flow_matching", "diffusion_model", "free_form_flow"], + params=[ + "typical_point_inference_network", + "affine_coupling_flow", + "spline_coupling_flow", + "flow_matching", + "diffusion_model", + "free_form_flow", + "consistency_model", + ], scope="function", ) def inference_network(request): @@ -132,7 +140,10 @@ def inference_network_subnet(request): return request.getfixturevalue(request.param) -@pytest.fixture(params=["coupling_flow", "flow_matching", "diffusion_model", "free_form_flow"], scope="function") +@pytest.fixture( + params=["coupling_flow", "flow_matching", "diffusion_model", "free_form_flow", "consistency_model"], + scope="function", +) def generative_inference_network(request): return request.getfixturevalue(request.param) From e84004650cfe524ac1c2cbdfd3f609f696c3cfc3 Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 09:41:40 +0200 Subject: [PATCH 05/52] fix backend --- bayesflow/experimental/diffusion_model.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 95a0d3584..8321bdcd7 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -261,7 +261,11 @@ def get_log_snr(self, t: Tensor, training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" t_trunc = self._t_min + (self._t_max - self._t_min) * t if training: - snr = -icdf_gaussian(x=t_trunc, loc=-2 * self.p_mean, scale=2 * self.p_std) + # SNR = -dist.icdf(t_trunc) + loc = -2 * self.p_mean + scale = 2 * self.p_std + x = t_trunc + snr = -(loc + scale * ops.erfinv(2 * x - 1) * math.sqrt(2)) snr = keras.ops.clip(snr, x_min=self._log_snr_min, x_max=self._log_snr_max) else: # sampling snr = ( @@ -278,7 +282,10 @@ def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" if training: # SNR = -dist.icdf(t_trunc) => t = dist.cdf(-snr) - t = cdf_gaussian(x=-log_snr_t, loc=-2 * self.p_mean, scale=2 * self.p_std) + loc = -2 * self.p_mean + scale = 2 * self.p_std + x = -log_snr_t + t = 0.5 * (1 + ops.erf((x - loc) / (scale * math.sqrt(2.0)))) else: # sampling # SNR = -2 * rho * log(sigma_max ** (1/rho) + (1 - t) * (sigma_min ** (1/rho) - sigma_max ** (1/rho))) # => t = 1 - ((exp(-snr/(2*rho)) - sigma_max ** (1/rho)) / (sigma_min ** (1/rho) - sigma_max ** (1/rho))) @@ -632,8 +639,11 @@ def compute_metrics( conditions_shape = None if conditions is None else keras.ops.shape(conditions) self.build(xz_shape, conditions_shape) - # sample training diffusion time - t = keras.random.uniform((keras.ops.shape(x)[0],)) + # sample training diffusion time as low discrepancy sequence to decrease variance + # t_i = \mod (u_0 + i/k, 1) + u0 = keras.random.uniform(shape=(1,)) + i = ops.arange(0, keras.ops.shape(x)[0]) # tensor of indices + t = (u0 + i / keras.ops.shape(x)[0]) % 1 # i = keras.random.randint((keras.ops.shape(x)[0],), minval=0, maxval=self._timesteps) # t = keras.ops.cast(i, keras.ops.dtype(x)) / keras.ops.cast(self._timesteps, keras.ops.dtype(x)) From f2d7de4401c14fc6fb71f44ab841c50b79b8c700 Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 09:47:17 +0200 Subject: [PATCH 06/52] fix backend --- bayesflow/experimental/diffusion_model.py | 41 +---------------------- 1 file changed, 1 insertion(+), 40 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 8321bdcd7..5aa8b1861 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -13,7 +13,6 @@ expand_right_as, find_network, jacobian_trace, - keras_kwargs, serialize_value_or_type, deserialize_value_or_type, weighted_mean, @@ -21,43 +20,6 @@ ) -match keras.backend.backend(): - case "jax": - from jax.scipy.special import erf, erfinv - - def cdf_gaussian(x, loc, scale): - return 0.5 * (1 + erf((x - loc) / (scale * math.sqrt(2.0)))) - - def icdf_gaussian(x, loc, scale): - return loc + scale * erfinv(2 * x - 1) * math.sqrt(2) - case "numpy": - from scipy.special import erf, erfinv - - def cdf_gaussian(x, loc, scale): - return 0.5 * (1 + erf((x - loc) / (scale * math.sqrt(2.0)))) - - def icdf_gaussian(x, loc, scale): - return loc + scale * erfinv(2 * x - 1) * math.sqrt(2.0) - case "tensorflow": - from tensorflow.math import erf, erfinv - - def cdf_gaussian(x, loc, scale): - return 0.5 * (1 + erf((x - loc) / (scale * math.sqrt(2.0)))) - - def icdf_gaussian(x, loc, scale): - return loc + scale * erfinv(2 * x - 1) * math.sqrt(2.0) - case "torch": - from torch import erf, erfinv - - def cdf_gaussian(x, loc, scale): - return 0.5 * (1 + erf((x - loc) / (scale * math.sqrt(2.0)))) - - def icdf_gaussian(x, loc, scale): - return loc + scale * erfinv(2 * x - 1) * math.sqrt(2.0) - case other: - raise ValueError(f"Backend '{other}' is not supported.") - - class NoiseSchedule(ABC): """Noise schedule for diffusion models. We follow the notation from [1]. @@ -401,8 +363,7 @@ def __init__( **kwargs Additional keyword arguments passed to the subnet and other components. """ - - super().__init__(base_distribution=None, **keras_kwargs(kwargs)) + super().__init__(base_distribution="normal", **kwargs) if isinstance(noise_schedule, str): if noise_schedule == "linear": From d5dc2ba3667f8c3f41b98927a1bb246898c5c36e Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 07:56:52 +0000 Subject: [PATCH 07/52] wip: adapt network to layer paradigm --- bayesflow/experimental/diffusion_model.py | 85 +++++++++++++---------- 1 file changed, 49 insertions(+), 36 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 95a0d3584..6ed22595f 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -2,8 +2,8 @@ from abc import ABC, abstractmethod import keras from keras import ops -from keras.saving import register_keras_serializable as serializable +from bayesflow.utils.serialization import serialize, deserialize, serializable from bayesflow.types import Tensor, Shape import bayesflow as bf from bayesflow.networks import InferenceNetwork @@ -13,9 +13,7 @@ expand_right_as, find_network, jacobian_trace, - keras_kwargs, - serialize_value_or_type, - deserialize_value_or_type, + layer_kwargs, weighted_mean, integrate, ) @@ -145,8 +143,8 @@ class LinearNoiseSchedule(NoiseSchedule): def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): super().__init__(name="linear_noise_schedule") - self._log_snr_min = ops.convert_to_tensor(min_log_snr) - self._log_snr_max = ops.convert_to_tensor(max_log_snr) + self._log_snr_min = min_log_snr + self._log_snr_max = max_log_snr self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) @@ -192,11 +190,11 @@ class CosineNoiseSchedule(NoiseSchedule): [1] Diffusion models beat gans on image synthesis: Dhariwal and Nichol (2022) """ - def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_cosine: float = 0.0): + def __init__(self, min_log_snr: float = -15.0, max_log_snr: float = 15.0, s_shift_cosine: float = 0.0): super().__init__(name="cosine_noise_schedule") - self._log_snr_min = ops.convert_to_tensor(min_log_snr) - self._log_snr_max = ops.convert_to_tensor(max_log_snr) - self._s_shift_cosine = ops.convert_to_tensor(s_shift_cosine) + self._log_snr_min = min_log_snr + self._log_snr_max = max_log_snr + self._s_shift_cosine = s_shift_cosine self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) @@ -210,7 +208,8 @@ def get_log_snr(self, t: Tensor, training: bool) -> Tensor: def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" # SNR = -2 * log(tan(pi*t/2)) => t = 2/pi * arctan(exp(-snr/2)) - return 2 / math.pi * ops.arctan(ops.exp((2 * self._s_shift_cosine - log_snr_t) / 2)) + print("p", log_snr_t) + return 2.0 / math.pi * ops.arctan(ops.exp((2.0 * self._s_shift_cosine - log_snr_t) / 2.0)) def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: """Compute d/dt log(1 + e^(-snr(t))), which is used for the reverse SDE.""" @@ -241,12 +240,12 @@ class EDMNoiseSchedule(NoiseSchedule): def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: float = 80): super().__init__(name="edm_noise_schedule") - self.sigma_data = ops.convert_to_tensor(sigma_data) - self.sigma_max = ops.convert_to_tensor(sigma_max) - self.sigma_min = ops.convert_to_tensor(sigma_min) - self.p_mean = ops.convert_to_tensor(-1.2) - self.p_std = ops.convert_to_tensor(1.2) - self.rho = ops.convert_to_tensor(7) + self.sigma_data = sigma_data + self.sigma_max = sigma_max + self.sigma_min = sigma_min + self.p_mean = -1.2 + self.p_std = 1.2 + self.rho = 7 # convert EDM parameters to signal-to-noise ratio formulation self._log_snr_min = -2 * ops.log(sigma_max) @@ -336,7 +335,7 @@ def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: return ops.exp(-log_snr_t) + 0.5**2 -@serializable(package="bayesflow.networks") +@serializable class DiffusionModel(InferenceNetwork): """Diffusion Model as described in this overview paper [1]. @@ -395,7 +394,7 @@ def __init__( Additional keyword arguments passed to the subnet and other components. """ - super().__init__(base_distribution=None, **keras_kwargs(kwargs)) + super().__init__(base_distribution=None, **kwargs) if isinstance(noise_schedule, str): if noise_schedule == "linear": @@ -432,18 +431,11 @@ def __init__( self.subnet = find_network(subnet, **subnet_kwargs) self.output_projector = keras.layers.Dense(units=None, bias_initializer="zeros") - # serialization: store all parameters necessary to call __init__ - self.config = { - "integrate_kwargs": self.integrate_kwargs, - "subnet_kwargs": subnet_kwargs, - "noise_schedule": self.noise_schedule, - "prediction_type": self.prediction_type, - **kwargs, - } - self.config = serialize_value_or_type(self.config, "subnet", subnet) - def build(self, xz_shape: Shape, conditions_shape: Shape = None) -> None: - super().build(xz_shape, conditions_shape=conditions_shape) + if self.built: + return + + self.base_distribution.build(xz_shape) self.output_projector.units = xz_shape[-1] input_shape = list(xz_shape) @@ -461,12 +453,19 @@ def build(self, xz_shape: Shape, conditions_shape: Shape = None) -> None: def get_config(self): base_config = super().get_config() - return base_config | self.config + base_config = layer_kwargs(base_config) + + config = { + "subnet": self.subnet, + "noise_schedule": self.noise_schedule, + "integrate_kwargs": self.integrate_kwargs, + "prediction_type": self.prediction_type, + } + return base_config | serialize(config) @classmethod - def from_config(cls, config): - config = deserialize_value_or_type(config, "subnet") - return cls(**config) + def from_config(cls, config, custom_objects=None): + return cls(**deserialize(config, custom_objects=custom_objects)) def convert_prediction_to_x( self, pred: Tensor, z: Tensor, alpha_t: Tensor, sigma_t: Tensor, log_snr_t: Tensor, clip_x: bool @@ -546,7 +545,14 @@ def _forward( training: bool = False, **kwargs, ) -> Tensor | tuple[Tensor, Tensor]: - integrate_kwargs = self.integrate_kwargs | kwargs + integrate_kwargs = ( + { + "start_time": self.noise_schedule._t_min, + "stop_time": self.noise_schedule._t_max, + } + | self.integrate_kwargs + | kwargs + ) if density: def deltas(time, xz): @@ -588,7 +594,14 @@ def _inverse( training: bool = False, **kwargs, ) -> Tensor | tuple[Tensor, Tensor]: - integrate_kwargs = self.integrate_kwargs | kwargs + integrate_kwargs = ( + { + "start_time": self.noise_schedule._t_max, + "stop_time": self.noise_schedule._t_min, + } + | self.integrate_kwargs + | kwargs + ) if density: def deltas(time, xz): From 739491a05acf829858cb523f1e2610161f7c0094 Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 10:02:39 +0200 Subject: [PATCH 08/52] improve schedules --- bayesflow/experimental/diffusion_model.py | 73 ++++++++++++----------- 1 file changed, 37 insertions(+), 36 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 5aa8b1861..e5d7af529 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -37,11 +37,22 @@ class NoiseSchedule(ABC): Augmentation: Kingma et al. (2023) """ - def __init__(self, name: str): + def __init__(self, name: str, variance_type: str): self.name = name - - # for variance preserving schedules - self.scale_base_distribution = 1.0 + self.variance_type = variance_type # 'exploding' or 'preserving' + self._log_snr_min = ops.convert_to_tensor(-15) # should be set in the subclasses + self._log_snr_max = ops.convert_to_tensor(15) # should be set in the subclasses + + @property + def scale_base_distribution(self): + """Get the scale of the base distribution.""" + if self.variance_type == "preserving": + return 1.0 + elif self.variance_type == "exploding": + # e.g., EDM is a variance exploding schedule + return ops.exp(-self._log_snr_min) + else: + raise ValueError(f"Unknown variance type: {self.variance_type}") @abstractmethod def get_log_snr(self, t: Tensor, training: bool) -> Tensor: @@ -74,17 +85,32 @@ def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: boo beta = self.derivative_log_snr(log_snr_t=log_snr_t, training=training) if x is None: # return g only return ops.sqrt(beta) - f = -0.5 * beta * x + if self.variance_type == "preserving": + f = -0.5 * beta * x + elif self.variance_type == "exploding": + f = ops.zeros_like(beta) + else: + raise ValueError(f"Unknown variance type: {self.variance_type}") return f, ops.sqrt(beta) def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Tensor]: """Get alpha and sigma for a given log signal-to-noise ratio (lambda). - Default is a variance preserving schedule. + Default is a variance preserving schedule: + alpha(t) = sqrt(sigmoid(log_snr_t)) + sigma(t) = sqrt(sigmoid(-log_snr_t)) For a variance exploding schedule, one should set alpha^2 = 1 and sigma^2 = exp(-lambda) """ - alpha_t = keras.ops.sqrt(keras.ops.sigmoid(log_snr_t)) - sigma_t = keras.ops.sqrt(keras.ops.sigmoid(-log_snr_t)) + if self.variance_type == "preserving": + # variance preserving schedule + alpha_t = keras.ops.sqrt(keras.ops.sigmoid(log_snr_t)) + sigma_t = keras.ops.sqrt(keras.ops.sigmoid(-log_snr_t)) + elif self.variance_type == "exploding": + # variance exploding schedule + alpha_t = ops.ones_like(log_snr_t) + sigma_t = ops.sqrt(ops.exp(-log_snr_t)) + else: + raise ValueError(f"Unknown variance type: {self.variance_type}") return alpha_t, sigma_t def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: @@ -106,7 +132,7 @@ class LinearNoiseSchedule(NoiseSchedule): """ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): - super().__init__(name="linear_noise_schedule") + super().__init__(name="linear_noise_schedule", variance_type="preserving") self._log_snr_min = ops.convert_to_tensor(min_log_snr) self._log_snr_max = ops.convert_to_tensor(max_log_snr) @@ -155,7 +181,7 @@ class CosineNoiseSchedule(NoiseSchedule): """ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_cosine: float = 0.0): - super().__init__(name="cosine_noise_schedule") + super().__init__(name="cosine_noise_schedule", variance_type="preserving") self._log_snr_min = ops.convert_to_tensor(min_log_snr) self._log_snr_max = ops.convert_to_tensor(max_log_snr) self._s_shift_cosine = ops.convert_to_tensor(s_shift_cosine) @@ -202,7 +228,7 @@ class EDMNoiseSchedule(NoiseSchedule): """ def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: float = 80): - super().__init__(name="edm_noise_schedule") + super().__init__(name="edm_noise_schedule", variance_type="exploding") self.sigma_data = ops.convert_to_tensor(sigma_data) self.sigma_max = ops.convert_to_tensor(sigma_max) self.sigma_min = ops.convert_to_tensor(sigma_min) @@ -216,9 +242,6 @@ def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) - # EDM is a variance exploding schedule - self.scale_base_distribution = ops.exp(-self._log_snr_min) - def get_log_snr(self, t: Tensor, training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" t_trunc = self._t_min + (self._t_max - self._t_min) * t @@ -278,28 +301,6 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: factor = ops.exp(-log_snr_t) / (1 + ops.exp(-log_snr_t)) return -factor * dsnr_dt - def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: bool = True) -> tuple[Tensor, Tensor]: - """Compute the drift and optionally the diffusion term for the variance exploding reverse SDE. - \beta(t) = d/dt log(1 + e^(-snr(t))) - f(z, t) = 0 - g(t)^2 = \beta(t) - - SDE: d(z) = [ f(z, t) - g(t)^2 * score(z, lambda) ] dt + g(t) dW - ODE: dz = [ f(z, t) - 0.5 * g(t)^2 * score(z, lambda) ] dt - """ - # Default implementation is to return the diffusion term only - beta = self.derivative_log_snr(log_snr_t=log_snr_t, training=training) - if x is None: # return g only - return ops.sqrt(beta) - f = ops.zeros_like(beta) # variance exploding schedule - return f, ops.sqrt(beta) - - def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Tensor]: - """Get alpha and sigma for a given log signal-to-noise ratio (lambda) for a variance exploding schedule.""" - alpha_t = ops.ones_like(log_snr_t) - sigma_t = ops.sqrt(ops.exp(-log_snr_t)) - return alpha_t, sigma_t - def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda).""" return ops.exp(-log_snr_t) + 0.5**2 From 92131d7f8c029e4ee8f0bdb810a7a1cc735541a3 Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 08:40:11 +0000 Subject: [PATCH 09/52] add serialization, remove unnecessary tensor conversions --- bayesflow/experimental/diffusion_model.py | 38 +++++++++++++++++++---- 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 29c54fed3..ce2c193a2 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -19,8 +19,9 @@ ) +@serializable class NoiseSchedule(ABC): - """Noise schedule for diffusion models. We follow the notation from [1]. + r"""Noise schedule for diffusion models. We follow the notation from [1]. The diffusion process is defined by a noise schedule, which determines how the noise level changes over time. We define the noise schedule as a function of the log signal-to-noise ratio (lambda), which can be @@ -39,8 +40,8 @@ class NoiseSchedule(ABC): def __init__(self, name: str, variance_type: str): self.name = name self.variance_type = variance_type # 'exploding' or 'preserving' - self._log_snr_min = ops.convert_to_tensor(-15) # should be set in the subclasses - self._log_snr_max = ops.convert_to_tensor(15) # should be set in the subclasses + self._log_snr_min = -15 # should be set in the subclasses + self._log_snr_max = 15 # should be set in the subclasses @property def scale_base_distribution(self): @@ -65,11 +66,11 @@ def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: @abstractmethod def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: - """Compute \beta(t) = d/dt log(1 + e^(-snr(t))). This is usually used for the reverse SDE.""" + r"""Compute \beta(t) = d/dt log(1 + e^(-snr(t))). This is usually used for the reverse SDE.""" pass def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: bool = True) -> tuple[Tensor, Tensor]: - """Compute the drift and optionally the diffusion term for the reverse SDE. + r"""Compute the drift and optionally the diffusion term for the reverse SDE. Usually it can be derived from the derivative of the schedule: \beta(t) = d/dt log(1 + e^(-snr(t))) f(z, t) = -0.5 * \beta(t) * z @@ -121,7 +122,15 @@ def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: # 1 / ops.cosh(log_snr_t / 2) * ops.minimum(ops.ones_like(log_snr_t), gamma * ops.exp(-log_snr_t)) return ops.ones_like(log_snr_t) + def get_config(self): + return dict(name=self.name, variance_type=self.variance_type) + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**deserialize(config, custom_objects=custom_objects)) + +@serializable class LinearNoiseSchedule(NoiseSchedule): """Linear noise schedule for diffusion models. @@ -171,7 +180,15 @@ def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: sigma_t = self.get_alpha_sigma(log_snr_t=log_snr_t, training=True)[1] return ops.square(g / sigma_t) + def get_config(self): + return dict(min_log_snr=self._log_snr_min, max_log_snr=self._log_snr_max) + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**deserialize(config, custom_objects=custom_objects)) + + +@serializable class CosineNoiseSchedule(NoiseSchedule): """Cosine noise schedule for diffusion models. This schedule is based on the cosine schedule from [1]. For images, use s_shift_cosine = log(base_resolution / d), where d is the used resolution of the image. @@ -181,7 +198,7 @@ class CosineNoiseSchedule(NoiseSchedule): def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_cosine: float = 0.0): super().__init__(name="cosine_noise_schedule", variance_type="preserving") - self._s_shift_cosine = ops.convert_to_tensor(s_shift_cosine) + self._s_shift_cosine = s_shift_cosine self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr self._s_shift_cosine = s_shift_cosine @@ -220,7 +237,15 @@ def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """ return ops.sigmoid(-log_snr_t / 2) + def get_config(self): + return dict(min_log_snr=self._log_snr_min, max_log_snr=self._log_snr_max, s_shift_cosine=self._s_shift_cosine) + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**deserialize(config, custom_objects=custom_objects)) + + +@serializable class EDMNoiseSchedule(NoiseSchedule): """EDM noise schedule for diffusion models. This schedule is based on the EDM paper [1]. @@ -472,6 +497,7 @@ def velocity( ) -> Tensor: # calculate the current noise level and transform into correct shape log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t=time, training=training), xz) + log_snr_t = keras.ops.broadcast_to(log_snr_t, keras.ops.shape(xz)[:-1] + (1,)) alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma(log_snr_t=log_snr_t, training=training) if conditions is None: From bd564b514cc85932c96e01aa59dbb1fc921891d4 Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 08:45:09 +0000 Subject: [PATCH 10/52] format inference network conftest.py --- tests/test_networks/conftest.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/test_networks/conftest.py b/tests/test_networks/conftest.py index 354f90ff5..97dd1c065 100644 --- a/tests/test_networks/conftest.py +++ b/tests/test_networks/conftest.py @@ -126,7 +126,14 @@ def inference_network_subnet(request): @pytest.fixture( - params=["affine_coupling_flow", "spline_coupling_flow", "flow_matching", "diffusion_model", "free_form_flow", "consistency_model"], + params=[ + "affine_coupling_flow", + "spline_coupling_flow", + "flow_matching", + "diffusion_model", + "free_form_flow", + "consistency_model", + ], scope="function", ) def generative_inference_network(request): From 0f7b3f565b7cfe18f3273a376e269329ca0839d7 Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 09:04:08 +0000 Subject: [PATCH 11/52] add dtypes and type casts in compute_metrics --- bayesflow/experimental/diffusion_model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index ce2c193a2..6a9deb583 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -644,9 +644,9 @@ def compute_metrics( # sample training diffusion time as low discrepancy sequence to decrease variance # t_i = \mod (u_0 + i/k, 1) - u0 = keras.random.uniform(shape=(1,)) - i = ops.arange(0, keras.ops.shape(x)[0]) # tensor of indices - t = (u0 + i / keras.ops.shape(x)[0]) % 1 + u0 = keras.random.uniform(shape=(1,), dtype=ops.dtype(x)) + i = ops.arange(0, keras.ops.shape(x)[0], dtype=ops.dtype(x)) # tensor of indices + t = (u0 + i / ops.cast(keras.ops.shape(x)[0], dtype=ops.dtype(x))) % 1 # i = keras.random.randint((keras.ops.shape(x)[0],), minval=0, maxval=self._timesteps) # t = keras.ops.cast(i, keras.ops.dtype(x)) / keras.ops.cast(self._timesteps, keras.ops.dtype(x)) From 2ce74f07e4bcceee37c465c5650217524af591fd Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 10:20:08 +0000 Subject: [PATCH 12/52] disable clip on x by default --- bayesflow/experimental/diffusion_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 6a9deb583..6bad95630 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -493,7 +493,7 @@ def velocity( time: float | Tensor, conditions: Tensor = None, training: bool = False, - clip_x: bool = True, + clip_x: bool = False, ) -> Tensor: # calculate the current noise level and transform into correct shape log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t=time, training=training), xz) @@ -668,7 +668,7 @@ def compute_metrics( pred = self.output_projector(self.subnet(xtc, training=training), training=training) x_pred = self.convert_prediction_to_x( - pred=pred, z=diffused_x, alpha_t=alpha_t, sigma_t=sigma_t, log_snr_t=log_snr_t, clip_x=True + pred=pred, z=diffused_x, alpha_t=alpha_t, sigma_t=sigma_t, log_snr_t=log_snr_t, clip_x=False ) # convert x to epsilon prediction out = (alpha_t * diffused_x - x_pred) / sigma_t From 01b33dcede5525116b947e115436e63fba1f6a51 Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 10:20:58 +0000 Subject: [PATCH 13/52] fixes: use squared g, correct typo in _min_t --- bayesflow/experimental/diffusion_model.py | 25 +++++++++++------------ 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 6bad95630..f0e7915ae 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -69,8 +69,8 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: r"""Compute \beta(t) = d/dt log(1 + e^(-snr(t))). This is usually used for the reverse SDE.""" pass - def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: bool = True) -> tuple[Tensor, Tensor]: - r"""Compute the drift and optionally the diffusion term for the reverse SDE. + def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: bool = False) -> tuple[Tensor, Tensor]: + r"""Compute the drift and optionally the squared diffusion term for the reverse SDE. Usually it can be derived from the derivative of the schedule: \beta(t) = d/dt log(1 + e^(-snr(t))) f(z, t) = -0.5 * \beta(t) * z @@ -84,14 +84,14 @@ def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: boo # Default implementation is to return the diffusion term only beta = self.derivative_log_snr(log_snr_t=log_snr_t, training=training) if x is None: # return g only - return ops.sqrt(beta) + return beta if self.variance_type == "preserving": f = -0.5 * beta * x elif self.variance_type == "exploding": f = ops.zeros_like(beta) else: raise ValueError(f"Unknown variance type: {self.variance_type}") - return f, ops.sqrt(beta) + return f, beta def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Tensor]: """Get alpha and sigma for a given log signal-to-noise ratio (lambda). @@ -144,7 +144,7 @@ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) def get_log_snr(self, t: Tensor, training: bool) -> Tensor: @@ -176,9 +176,9 @@ def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). Default is the likelihood weighting based on Song et al. (2021). """ - g = self.get_drift_diffusion(log_snr_t=log_snr_t) + g_squared = self.get_drift_diffusion(log_snr_t=log_snr_t) sigma_t = self.get_alpha_sigma(log_snr_t=log_snr_t, training=True)[1] - return ops.square(g / sigma_t) + return g_squared / ops.square(sigma_t) def get_config(self): return dict(min_log_snr=self._log_snr_min, max_log_snr=self._log_snr_max) @@ -203,7 +203,7 @@ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_co self._log_snr_max = max_log_snr self._s_shift_cosine = s_shift_cosine - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) def get_log_snr(self, t: Tensor, training: bool) -> Tensor: @@ -254,7 +254,6 @@ class EDMNoiseSchedule(NoiseSchedule): def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: float = 80): super().__init__(name="edm_noise_schedule", variance_type="exploding") - super().__init__(name="edm_noise_schedule") self.sigma_data = sigma_data self.sigma_max = sigma_max self.sigma_min = sigma_min @@ -265,7 +264,7 @@ def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: # convert EDM parameters to signal-to-noise ratio formulation self._log_snr_min = -2 * ops.log(sigma_max) self._log_snr_max = -2 * ops.log(sigma_min) - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) def get_log_snr(self, t: Tensor, training: bool) -> Tensor: @@ -513,8 +512,8 @@ def velocity( score = (alpha_t * x_pred - xz) / ops.square(sigma_t) # compute velocity for the ODE depending on the noise schedule - f, g = self.noise_schedule.get_drift_diffusion(log_snr_t=log_snr_t, x=xz) - out = f - 0.5 * ops.square(g) * score + f, g_squared = self.noise_schedule.get_drift_diffusion(log_snr_t=log_snr_t, x=xz) + out = f - 0.5 * g_squared * score # todo: for the SDE: d(z) = [ f(z, t) - g(t)^2 * score(z, lambda) ] dt + g(t) dW return out @@ -680,5 +679,5 @@ def compute_metrics( # apply sample weight loss = weighted_mean(loss, sample_weight) - base_metrics = super().compute_metrics(x, conditions, sample_weight, stage) + base_metrics = super().compute_metrics(x, conditions=conditions, sample_weight=sample_weight, stage=stage) return base_metrics | {"loss": loss} From 6031212339e98a26f98bb0a2eebfbc36421088ec Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 12:39:16 +0200 Subject: [PATCH 14/52] integration should be from 1 to 0 --- bayesflow/experimental/diffusion_model.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index f0e7915ae..a1b9a3206 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -1,5 +1,6 @@ from collections.abc import Sequence from abc import ABC, abstractmethod +from typing import Union import keras from keras import ops @@ -60,7 +61,7 @@ def get_log_snr(self, t: Tensor, training: bool) -> Tensor: pass @abstractmethod - def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + def get_t_from_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> Tensor: """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" pass @@ -140,7 +141,7 @@ class LinearNoiseSchedule(NoiseSchedule): """ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): - super().__init__(name="linear_noise_schedule") + super().__init__(name="linear_noise_schedule", variance_type="preserving") self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr @@ -153,7 +154,7 @@ def get_log_snr(self, t: Tensor, training: bool) -> Tensor: # SNR = -log(exp(t^2) - 1) return -ops.log(ops.exp(ops.square(t_trunc)) - 1) - def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + def get_t_from_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> Tensor: """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" # SNR = -log(exp(t^2) - 1) => t = sqrt(log(1 + exp(-snr))) return ops.sqrt(ops.log(1 + ops.exp(-log_snr_t))) @@ -212,7 +213,7 @@ def get_log_snr(self, t: Tensor, training: bool) -> Tensor: # SNR = -2 * log(tan(pi*t/2)) return -2 * ops.log(ops.tan(math.pi * t_trunc / 2)) + 2 * self._s_shift_cosine - def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + def get_t_from_log_snr(self, log_snr_t: Union[Tensor, float], training: bool) -> Tensor: """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" # SNR = -2 * log(tan(pi*t/2)) => t = 2/pi * arctan(exp(-snr/2)) return 2 / math.pi * ops.arctan(ops.exp((2 * self._s_shift_cosine - log_snr_t) / 2)) @@ -288,7 +289,7 @@ def get_log_snr(self, t: Tensor, training: bool) -> Tensor: ) return snr - def get_t_from_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + def get_t_from_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> Tensor: """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" if training: # SNR = -dist.icdf(t_trunc) => t = dist.cdf(-snr) @@ -543,8 +544,8 @@ def _forward( ) -> Tensor | tuple[Tensor, Tensor]: integrate_kwargs = ( { - "start_time": self.noise_schedule._t_min, - "stop_time": self.noise_schedule._t_max, + "start_time": 1.0, + "stop_time": 0.0, } | self.integrate_kwargs | kwargs @@ -592,8 +593,8 @@ def _inverse( ) -> Tensor | tuple[Tensor, Tensor]: integrate_kwargs = ( { - "start_time": self.noise_schedule._t_max, - "stop_time": self.noise_schedule._t_min, + "start_time": 1.0, + "stop_time": 0.0, } | self.integrate_kwargs | kwargs From d82e2bf4d0a871d666f8ca0ebc3207ae6d6c903b Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 10:45:00 +0000 Subject: [PATCH 15/52] add missing seed_generator param --- bayesflow/experimental/diffusion_model.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index f0e7915ae..c9253a8fe 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -2,6 +2,7 @@ from abc import ABC, abstractmethod import keras from keras import ops +import warnings from bayesflow.utils.serialization import serialize, deserialize, serializable from bayesflow.types import Tensor, Shape @@ -389,7 +390,7 @@ def __init__( **kwargs Additional keyword arguments passed to the subnet and other components. """ - super().__init__(base_distribution="normal", **kwargs) + super().__init__(base_distribution=None, **kwargs) if isinstance(noise_schedule, str): if noise_schedule == "linear": @@ -419,6 +420,13 @@ def __init__( self.integrate_kwargs = self.INTEGRATE_DEFAULT_CONFIG | (integrate_kwargs or {}) self.seed_generator = keras.random.SeedGenerator() + if subnet_kwargs: + warnings.warn( + "Using `subnet_kwargs` is deprecated." + "Instead, instantiate the network yourself and pass the arguments directly.", + DeprecationWarning, + ) + subnet_kwargs = subnet_kwargs or {} if subnet == "mlp": subnet_kwargs = self.MLP_DEFAULT_CONFIG | subnet_kwargs @@ -643,7 +651,7 @@ def compute_metrics( # sample training diffusion time as low discrepancy sequence to decrease variance # t_i = \mod (u_0 + i/k, 1) - u0 = keras.random.uniform(shape=(1,), dtype=ops.dtype(x)) + u0 = keras.random.uniform(shape=(1,), dtype=ops.dtype(x), seed=self.seed_generator) i = ops.arange(0, keras.ops.shape(x)[0], dtype=ops.dtype(x)) # tensor of indices t = (u0 + i / ops.cast(keras.ops.shape(x)[0], dtype=ops.dtype(x))) % 1 # i = keras.random.randint((keras.ops.shape(x)[0],), minval=0, maxval=self._timesteps) From bdb27e8687d7a70280267768267464f9734cde69 Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 10:47:06 +0000 Subject: [PATCH 16/52] correct integration times for forward direction --- bayesflow/experimental/diffusion_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 6f9bd50e8..74910fbc6 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -552,8 +552,8 @@ def _forward( ) -> Tensor | tuple[Tensor, Tensor]: integrate_kwargs = ( { - "start_time": 1.0, - "stop_time": 0.0, + "start_time": 0.0, + "stop_time": 1.0, } | self.integrate_kwargs | kwargs From ca52fc0b6acb6eb38a66e7a22c8edf5a9fcf3222 Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 10:54:29 +0000 Subject: [PATCH 17/52] flip integration times for correct direction of integration --- bayesflow/experimental/diffusion_model.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 74910fbc6..2568f341f 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -552,8 +552,8 @@ def _forward( ) -> Tensor | tuple[Tensor, Tensor]: integrate_kwargs = ( { - "start_time": 0.0, - "stop_time": 1.0, + "start_time": 1.0, + "stop_time": 0.0, } | self.integrate_kwargs | kwargs @@ -601,8 +601,8 @@ def _inverse( ) -> Tensor | tuple[Tensor, Tensor]: integrate_kwargs = ( { - "start_time": 1.0, - "stop_time": 0.0, + "start_time": 0.0, + "stop_time": 1.0, } | self.integrate_kwargs | kwargs From cbd3568bc0d696471a79a1e3a2da5a526dc492f5 Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 12:14:31 +0000 Subject: [PATCH 18/52] swap mapping log_snr_min/max to t_min/max --- bayesflow/experimental/diffusion_model.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 2568f341f..aaaa2ace2 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -146,8 +146,8 @@ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) def get_log_snr(self, t: Tensor, training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" @@ -205,8 +205,8 @@ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_co self._log_snr_max = max_log_snr self._s_shift_cosine = s_shift_cosine - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) def get_log_snr(self, t: Tensor, training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" @@ -266,8 +266,8 @@ def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: # convert EDM parameters to signal-to-noise ratio formulation self._log_snr_min = -2 * ops.log(sigma_max) self._log_snr_max = -2 * ops.log(sigma_min) - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) def get_log_snr(self, t: Tensor, training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" @@ -478,7 +478,7 @@ def convert_prediction_to_x( if self.prediction_type == "v": # convert v into x x = alpha_t * z - sigma_t * pred - elif self.prediction_type == "e": + elif self.prediction_type == "eps": # convert noise prediction into x x = (z - sigma_t * pred) / alpha_t elif self.prediction_type == "x": @@ -552,8 +552,8 @@ def _forward( ) -> Tensor | tuple[Tensor, Tensor]: integrate_kwargs = ( { - "start_time": 1.0, - "stop_time": 0.0, + "start_time": 0.0, + "stop_time": 1.0, } | self.integrate_kwargs | kwargs @@ -601,8 +601,8 @@ def _inverse( ) -> Tensor | tuple[Tensor, Tensor]: integrate_kwargs = ( { - "start_time": 0.0, - "stop_time": 1.0, + "start_time": 1.0, + "stop_time": 0.0, } | self.integrate_kwargs | kwargs From 9b520bc2246bc8a79ecbfb7c4166da5416a00f99 Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 14:15:29 +0200 Subject: [PATCH 19/52] fix mapping min/max snr to t_min/max --- bayesflow/experimental/diffusion_model.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 2568f341f..71a5729f4 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -146,14 +146,15 @@ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) def get_log_snr(self, t: Tensor, training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" t_trunc = self._t_min + (self._t_max - self._t_min) * t # SNR = -log(exp(t^2) - 1) - return -ops.log(ops.exp(ops.square(t_trunc)) - 1) + # equivalent, but more stable: -t^2 - log(1 - exp(-t^2)) + return -ops.square(t_trunc) - ops.log(1 - ops.exp(-ops.square(t_trunc))) def get_t_from_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> Tensor: """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" @@ -205,8 +206,8 @@ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_co self._log_snr_max = max_log_snr self._s_shift_cosine = s_shift_cosine - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) def get_log_snr(self, t: Tensor, training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" @@ -266,8 +267,8 @@ def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: # convert EDM parameters to signal-to-noise ratio formulation self._log_snr_min = -2 * ops.log(sigma_max) self._log_snr_max = -2 * ops.log(sigma_min) - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) def get_log_snr(self, t: Tensor, training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" From e32e8ad0c42c6b2cb6e0a1a04fef39032e34dff1 Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 15:03:29 +0200 Subject: [PATCH 20/52] fix linear schedule --- bayesflow/experimental/diffusion_model.py | 28 +++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 43605c725..ba98cef86 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -67,7 +67,7 @@ def get_t_from_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> pass @abstractmethod - def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: + def derivative_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> Tensor: r"""Compute \beta(t) = d/dt log(1 + e^(-snr(t))). This is usually used for the reverse SDE.""" pass @@ -131,6 +131,24 @@ def get_config(self): def from_config(cls, config, custom_objects=None): return cls(**deserialize(config, custom_objects=custom_objects)) + def validate(self): + """Validate the noise schedule.""" + if self._log_snr_min >= self._log_snr_max: + raise ValueError("min_log_snr must be less than max_log_snr.") + for training in [True, False]: + if not ops.isfinite(self.get_log_snr(ops.convert_to_tensor(0), training=training)): + raise ValueError("log_snr(0) must be finite.") + if not ops.isfinite(self.get_log_snr(ops.convert_to_tensor(1), training=training)): + raise ValueError("log_snr(1) must be finite.") + if not ops.isfinite(self.get_t_from_log_snr(self._log_snr_max, training=training)): + raise ValueError("t(0) must be finite.") + if not ops.isfinite(self.get_t_from_log_snr(self._log_snr_min, training=training)): + raise ValueError("t(1) must be finite.") + if not ops.isfinite(self.derivative_log_snr(self._log_snr_max, training=training)): + raise ValueError("dt/t log_snr(0) must be finite.") + if not ops.isfinite(self.derivative_log_snr(self._log_snr_min, training=training)): + raise ValueError("dt/t log_snr(1) must be finite.") + @serializable class LinearNoiseSchedule(NoiseSchedule): @@ -167,7 +185,7 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: # Compute the truncated time t_trunc t_trunc = self._t_min + (self._t_max - self._t_min) * t - dsnr_dx = -(2 * t_trunc * ops.exp(t_trunc**2)) / (ops.exp(t_trunc**2) - 1) + dsnr_dx = -2 * t_trunc / (1 - ops.exp(-(t_trunc**2))) # Using the chain rule on f(t) = log(1 + e^(-snr(t))): # f'(t) = - (e^{-snr(t)} / (1 + e^{-snr(t)})) * dsnr_dt @@ -362,7 +380,7 @@ def __init__( subnet: str | type = "mlp", integrate_kwargs: dict[str, any] = None, subnet_kwargs: dict[str, any] = None, - noise_schedule: str = "cosine", + noise_schedule: str | NoiseSchedule = "cosine", prediction_type: str = "v", **kwargs, ): @@ -384,7 +402,7 @@ def __init__( Additional keyword arguments for the integration process. Default is None. subnet_kwargs : dict[str, any], optional Keyword arguments passed to the subnet constructor or used to update the default MLP settings. - noise_schedule : str, optional + noise_schedule : str or NoiseSchedule, optional The noise schedule used for the diffusion process. Can be "linear", "cosine", or "edm". Default is "cosine". prediction_type: str, optional @@ -406,6 +424,8 @@ def __init__( elif not isinstance(noise_schedule, NoiseSchedule): raise ValueError(f"Unknown noise schedule: {noise_schedule}") self.noise_schedule = noise_schedule + # validate noise model + self.noise_schedule.validate() if prediction_type not in ["eps", "v", "F"]: # F is EDM raise ValueError(f"Unknown prediction type: {prediction_type}") From 3455ce1eb7773eb5d50061d4216bcb66a1958762 Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 15:30:44 +0200 Subject: [PATCH 21/52] rename prediction type --- bayesflow/experimental/diffusion_model.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index ba98cef86..64795bfc2 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -44,6 +44,7 @@ def __init__(self, name: str, variance_type: str): self.variance_type = variance_type # 'exploding' or 'preserving' self._log_snr_min = -15 # should be set in the subclasses self._log_snr_max = 15 # should be set in the subclasses + self.sigma_data = 1.0 @property def scale_base_distribution(self): @@ -381,7 +382,7 @@ def __init__( integrate_kwargs: dict[str, any] = None, subnet_kwargs: dict[str, any] = None, noise_schedule: str | NoiseSchedule = "cosine", - prediction_type: str = "v", + prediction_type: str = "velocity", **kwargs, ): """ @@ -406,7 +407,8 @@ def __init__( The noise schedule used for the diffusion process. Can be "linear", "cosine", or "edm". Default is "cosine". prediction_type: str, optional - The type of prediction used in the diffusion model. Can be "eps", "v" or "F" (EDM). Default is "v". + The type of prediction used in the diffusion model. Can be "velocity", "noise" or "F" (EDM). + Default is "velocity". **kwargs Additional keyword arguments passed to the subnet and other components. """ @@ -427,7 +429,7 @@ def __init__( # validate noise model self.noise_schedule.validate() - if prediction_type not in ["eps", "v", "F"]: # F is EDM + if prediction_type not in ["velocity", "noise", "F"]: # F is EDM raise ValueError(f"Unknown prediction type: {prediction_type}") self.prediction_type = prediction_type @@ -496,10 +498,10 @@ def convert_prediction_to_x( self, pred: Tensor, z: Tensor, alpha_t: Tensor, sigma_t: Tensor, log_snr_t: Tensor, clip_x: bool ) -> Tensor: """Convert the prediction of the neural network to the x space.""" - if self.prediction_type == "v": + if self.prediction_type == "velocity": # convert v into x x = alpha_t * z - sigma_t * pred - elif self.prediction_type == "eps": + elif self.prediction_type == "noise": # convert noise prediction into x x = (z - sigma_t * pred) / alpha_t elif self.prediction_type == "x": @@ -700,11 +702,11 @@ def compute_metrics( pred=pred, z=diffused_x, alpha_t=alpha_t, sigma_t=sigma_t, log_snr_t=log_snr_t, clip_x=False ) # convert x to epsilon prediction - out = (alpha_t * diffused_x - x_pred) / sigma_t + noise_pred = (alpha_t * diffused_x - x_pred) / sigma_t # Calculate loss based on noise prediction weights_for_snr = self.noise_schedule.get_weights_for_snr(log_snr_t=log_snr_t) - loss = weights_for_snr * ops.mean((out - eps_t) ** 2, axis=-1) + loss = weights_for_snr * ops.mean((noise_pred - eps_t) ** 2, axis=-1) # apply sample weight loss = weighted_mean(loss, sample_weight) From 95ca12693d5f644394394d84c70b5556cb6e955d Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Thu, 24 Apr 2025 13:44:44 +0000 Subject: [PATCH 22/52] fix: remove unnecessary covert_to_tensor call --- bayesflow/experimental/diffusion_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 64795bfc2..9d1e371f4 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -137,9 +137,9 @@ def validate(self): if self._log_snr_min >= self._log_snr_max: raise ValueError("min_log_snr must be less than max_log_snr.") for training in [True, False]: - if not ops.isfinite(self.get_log_snr(ops.convert_to_tensor(0), training=training)): + if not ops.isfinite(self.get_log_snr(0.0, training=training)): raise ValueError("log_snr(0) must be finite.") - if not ops.isfinite(self.get_log_snr(ops.convert_to_tensor(1), training=training)): + if not ops.isfinite(self.get_log_snr(1.0, training=training)): raise ValueError("log_snr(1) must be finite.") if not ops.isfinite(self.get_t_from_log_snr(self._log_snr_max, training=training)): raise ValueError("t(0) must be finite.") From 495ed29b4bc250c7e2cee9a414966b74678c197f Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 16:27:14 +0200 Subject: [PATCH 23/52] fix validate noise schedule for training --- bayesflow/experimental/diffusion_model.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 9d1e371f4..1cac894b2 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -58,7 +58,7 @@ def scale_base_distribution(self): raise ValueError(f"Unknown variance type: {self.variance_type}") @abstractmethod - def get_log_snr(self, t: Tensor, training: bool) -> Tensor: + def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" pass @@ -145,10 +145,10 @@ def validate(self): raise ValueError("t(0) must be finite.") if not ops.isfinite(self.get_t_from_log_snr(self._log_snr_min, training=training)): raise ValueError("t(1) must be finite.") - if not ops.isfinite(self.derivative_log_snr(self._log_snr_max, training=training)): - raise ValueError("dt/t log_snr(0) must be finite.") - if not ops.isfinite(self.derivative_log_snr(self._log_snr_min, training=training)): - raise ValueError("dt/t log_snr(1) must be finite.") + if not ops.isfinite(self.derivative_log_snr(self._log_snr_max, training=False)): + raise ValueError("dt/t log_snr(0) must be finite.") + if not ops.isfinite(self.derivative_log_snr(self._log_snr_min, training=False)): + raise ValueError("dt/t log_snr(1) must be finite.") @serializable @@ -168,7 +168,7 @@ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) - def get_log_snr(self, t: Tensor, training: bool) -> Tensor: + def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" t_trunc = self._t_min + (self._t_max - self._t_min) * t # SNR = -log(exp(t^2) - 1) @@ -228,7 +228,7 @@ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_co self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) - def get_log_snr(self, t: Tensor, training: bool) -> Tensor: + def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" t_trunc = self._t_min + (self._t_max - self._t_min) * t # SNR = -2 * log(tan(pi*t/2)) @@ -289,7 +289,7 @@ def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) - def get_log_snr(self, t: Tensor, training: bool) -> Tensor: + def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" t_trunc = self._t_min + (self._t_max - self._t_min) * t if training: From 59a349bb3d3fd82fce04d6480e26e24f79710141 Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 16:51:31 +0200 Subject: [PATCH 24/52] minor change in diffusion weightings --- bayesflow/experimental/diffusion_model.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 1cac894b2..fc147dbdc 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -257,7 +257,7 @@ def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). Default is the sigmoid weighting based on Kingma et al. (2023). """ - return ops.sigmoid(-log_snr_t / 2) + return ops.sigmoid(-log_snr_t + 2) def get_config(self): return dict(min_log_snr=self._log_snr_min, max_log_snr=self._log_snr_max, s_shift_cosine=self._s_shift_cosine) @@ -270,6 +270,7 @@ def from_config(cls, config, custom_objects=None): @serializable class EDMNoiseSchedule(NoiseSchedule): """EDM noise schedule for diffusion models. This schedule is based on the EDM paper [1]. + This should be used with the F-prediction type in the diffusion model. [1] Elucidating the Design Space of Diffusion-Based Generative Models: Karras et al. (2022) """ @@ -350,7 +351,7 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda).""" - return ops.exp(-log_snr_t) + 0.5**2 + return (ops.exp(-log_snr_t) + ops.square(self.sigma_data)) / ops.square(self.sigma_data) @serializable @@ -432,6 +433,10 @@ def __init__( if prediction_type not in ["velocity", "noise", "F"]: # F is EDM raise ValueError(f"Unknown prediction type: {prediction_type}") self.prediction_type = prediction_type + if noise_schedule.name == "edm_noise_schedule" and prediction_type != "F": + warnings.warn( + "EDM noise schedule is build for F-prediction. Consider using F-prediction instead.", + ) # clipping of prediction (after it was transformed to x-prediction) self._clip_min = -5.0 From 612b17bc541169ef185891434f5726b931adfb38 Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 22:57:39 +0200 Subject: [PATCH 25/52] add euler_maruyama sampler --- bayesflow/experimental/diffusion_model.py | 47 +++++- bayesflow/utils/__init__.py | 4 +- bayesflow/utils/integrate.py | 172 +++++++++++++++++++++- 3 files changed, 217 insertions(+), 6 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index fc147dbdc..3a769b57f 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -18,6 +18,7 @@ layer_kwargs, weighted_mean, integrate, + integrate_stochastic, ) @@ -550,11 +551,44 @@ def velocity( # compute velocity for the ODE depending on the noise schedule f, g_squared = self.noise_schedule.get_drift_diffusion(log_snr_t=log_snr_t, x=xz) - out = f - 0.5 * g_squared * score + # out = f - 0.5 * g_squared * score + out = f - g_squared * score # todo: for the SDE: d(z) = [ f(z, t) - g(t)^2 * score(z, lambda) ] dt + g(t) dW return out + def velocity2( + self, + xz: Tensor, + time: float | Tensor, + conditions: Tensor = None, + training: bool = False, + clip_x: bool = False, + ) -> Tensor: + # calculate the current noise level and transform into correct shape + log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t=time, training=training), xz) + log_snr_t = keras.ops.broadcast_to(log_snr_t, keras.ops.shape(xz)[:-1] + (1,)) + # alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma(log_snr_t=log_snr_t, training=training) + + # if conditions is None: + # xtc = keras.ops.concatenate([xz, log_snr_t], axis=-1) + # else: + # xtc = keras.ops.concatenate([xz, log_snr_t, conditions], axis=-1) + # pred = self.output_projector(self.subnet(xtc, training=training), training=training) + + # x_pred = self.convert_prediction_to_x( + # pred=pred, z=xz, alpha_t=alpha_t, sigma_t=sigma_t, log_snr_t=log_snr_t, clip_x=clip_x + # ) + # convert x to score + # score = (alpha_t * x_pred - xz) / ops.square(sigma_t) + + # compute velocity for the ODE depending on the noise schedule + f, g_squared = self.noise_schedule.get_drift_diffusion(log_snr_t=log_snr_t, x=xz) + # out = f - 0.5 * g_squared * score + # out = f - g_squared * score + + return ops.sqrt(g_squared) + def _velocity_trace( self, xz: Tensor, @@ -655,9 +689,18 @@ def deltas(time, xz): def deltas(time, xz): return {"xz": self.velocity(xz, time=time, conditions=conditions, training=training)} + def diffusion(time, xz): + return {"xz": self.velocity2(xz, time=time, conditions=conditions, training=training)} + state = {"xz": z} - state = integrate( + # state = integrate( + # deltas, + # state, + # **integrate_kwargs, + # ) + state = integrate_stochastic( deltas, + diffusion, state, **integrate_kwargs, ) diff --git a/bayesflow/utils/__init__.py b/bayesflow/utils/__init__.py index 73ba7fd8b..049144826 100644 --- a/bayesflow/utils/__init__.py +++ b/bayesflow/utils/__init__.py @@ -29,9 +29,7 @@ repo_url, ) from .hparam_utils import find_batch_size, find_memory_budget -from .integrate import ( - integrate, -) +from .integrate import integrate, integrate_stochastic from .io import ( pickle_load, format_bytes, diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index 5e3b407ec..b4af98689 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -4,7 +4,7 @@ import keras import numpy as np -from typing import Literal +from typing import Literal, Union, List from bayesflow.types import Tensor from bayesflow.utils import filter_kwargs @@ -293,3 +293,173 @@ def integrate( return integrate_scheduled(fn, state, steps, method, **kwargs) else: raise RuntimeError(f"Type or value of `steps` not understood (steps={steps})") + + +def euler_maruyama_step( + drift_fn: Callable, + diffusion_fn: Callable, + state: dict[str, ArrayLike], + time: ArrayLike, + step_size: ArrayLike, + noise: dict[str, ArrayLike] = None, + tolerance: ArrayLike = 1e-6, + min_step_size: ArrayLike = -float("inf"), + max_step_size: ArrayLike = float("inf"), + use_adaptive_step_size: bool = False, +) -> (dict[str, ArrayLike], ArrayLike, ArrayLike): + """ + Performs a single Euler-Maruyama step for stochastic differential equations. + + Args: + drift_fn: Function that computes the drift term. + diffusion_fn: Function that computes the diffusion term. + state: Dictionary containing the current state. + time: Current time. + step_size: Size of the integration step. + noise: Dictionary of noise terms for each state variable. + tolerance: Error tolerance for adaptive step size. + min_step_size: Minimum allowed step size. + max_step_size: Maximum allowed step size. + use_adaptive_step_size: Whether to use adaptive step sizing. + + Returns: + Tuple of (new_state, new_time, new_step_size). + """ + # Compute drift term + drift = drift_fn(time, **filter_kwargs(state, drift_fn)) + + # Compute diffusion term + diffusion = diffusion_fn(time, **filter_kwargs(state, diffusion_fn)) + + # Generate noise if not provided + if noise is None: + noise = {} + for key in diffusion.keys(): + shape = keras.ops.shape(diffusion[key]) + noise[key] = keras.random.normal(shape) * keras.ops.sqrt(step_size) + + # Check if diffusion and noise have the same keys + if set(diffusion.keys()) != set(noise.keys()): + raise ValueError("Keys of diffusion terms and noise do not match.") + + if use_adaptive_step_size: + # Perform a half-step to estimate error + intermediate_state = state.copy() + for key in drift.keys(): + intermediate_state[key] = state[key] + (step_size * drift[key]) + (diffusion[key] * noise[key]) + + # Compute drift and diffusion at intermediate state + intermediate_drift = drift_fn(time + step_size, **filter_kwargs(intermediate_state, drift_fn)) + + # Compute error estimate + error_terms = [] + for key in drift.keys(): + error = keras.ops.norm(intermediate_drift[key] - drift[key], ord=2, axis=-1) + error_terms.append(error) + + intermediate_error = keras.ops.stack(error_terms) + new_step_size = step_size * tolerance / (intermediate_error + 1e-9) + + # Apply constraints to step size + new_step_size = keras.ops.clip(new_step_size, min_step_size, max_step_size) + + # Consolidate step size + new_step_size = keras.ops.take(new_step_size, keras.ops.argmin(keras.ops.abs(new_step_size))) + else: + new_step_size = step_size + + # Apply updates using Euler-Maruyama formula: dx = f(x)dt + g(x)dW + new_state = state.copy() + for key in drift.keys(): + if key in diffusion: + new_state[key] = state[key] + (step_size * drift[key]) + (diffusion[key] * noise[key]) + else: + # If no diffusion term for this variable, apply deterministic update + new_state[key] = state[key] + step_size * drift[key] + + new_time = time + step_size + + return new_state, new_time, new_step_size + + +def integrate_stochastic( + drift_fn: Callable, + diffusion_fn: Callable, + state: dict[str, ArrayLike], + start_time: ArrayLike, + stop_time: ArrayLike, + steps: int, + method: str = "euler_maruyama", + seed: int = None, + return_noise: bool = False, + **kwargs, +) -> Union[dict[str, ArrayLike], tuple[dict[str, ArrayLike], dict[str, List[ArrayLike]]]]: + """ + Integrates a stochastic differential equation from start_time to stop_time. + + Args: + drift_fn: Function that computes the drift term. + diffusion_fn: Function that computes the diffusion term. + state: Dictionary containing the initial state. + start_time: Starting time for integration. + stop_time: Ending time for integration. + steps: Number of integration steps. + method: Integration method to use ('euler_maruyama'). + seed: Random seed for noise generation. + return_noise: Whether to return the generated noise terms. + **kwargs: Additional arguments to pass to the step function. + + Returns: + If return_noise is False, returns the final state dictionary. + If return_noise is True, returns a tuple of (final_state, noise_history). + """ + if steps <= 0: + raise ValueError("Number of steps must be positive.") + + # Set random seed if provided + if seed is not None: + keras.random.set_seed(seed) + + # Select step function based on method + match method: + case "euler_maruyama": + step_fn = euler_maruyama_step + case str() as name: + raise ValueError(f"Unknown integration method name: {name!r}") + case other: + raise TypeError(f"Invalid integration method: {other!r}") + + # Prepare step function with partial application + step_fn = partial(step_fn, drift_fn, diffusion_fn, **kwargs) + step_size = (stop_time - start_time) / steps + + time = start_time + + # Store noise history if requested + noise_history = {key: [] for key in state.keys()} if return_noise else None + + def body(_loop_var, _loop_state): + _state, _time = _loop_state + + # Generate noise for this step + _noise = {} + for key in _state.keys(): + shape = keras.ops.shape(_state[key]) + _noise[key] = keras.random.normal(shape) * keras.ops.sqrt(step_size) + + # Store noise if requested + if return_noise: + for key in _noise: + noise_history[key].append(_noise[key]) + + # Perform integration step + _state, _time, _ = step_fn(_state, _time, step_size, noise=_noise) + + return _state, _time + + state, time = keras.ops.fori_loop(0, steps, body, (state, time)) + + if return_noise: + return state, noise_history + else: + return state From de532c752aa10f8515bde30438fb038ad20536fd Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 23:21:36 +0200 Subject: [PATCH 26/52] abs step size --- bayesflow/utils/integrate.py | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index b4af98689..e9b77520b 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -336,7 +336,7 @@ def euler_maruyama_step( noise = {} for key in diffusion.keys(): shape = keras.ops.shape(diffusion[key]) - noise[key] = keras.random.normal(shape) * keras.ops.sqrt(step_size) + noise[key] = keras.random.normal(shape) * keras.ops.sqrt(keras.ops.abs(step_size)) # Check if diffusion and noise have the same keys if set(diffusion.keys()) != set(noise.keys()): @@ -391,7 +391,6 @@ def integrate_stochastic( steps: int, method: str = "euler_maruyama", seed: int = None, - return_noise: bool = False, **kwargs, ) -> Union[dict[str, ArrayLike], tuple[dict[str, ArrayLike], dict[str, List[ArrayLike]]]]: """ @@ -406,7 +405,6 @@ def integrate_stochastic( steps: Number of integration steps. method: Integration method to use ('euler_maruyama'). seed: Random seed for noise generation. - return_noise: Whether to return the generated noise terms. **kwargs: Additional arguments to pass to the step function. Returns: @@ -435,9 +433,6 @@ def integrate_stochastic( time = start_time - # Store noise history if requested - noise_history = {key: [] for key in state.keys()} if return_noise else None - def body(_loop_var, _loop_state): _state, _time = _loop_state @@ -445,12 +440,7 @@ def body(_loop_var, _loop_state): _noise = {} for key in _state.keys(): shape = keras.ops.shape(_state[key]) - _noise[key] = keras.random.normal(shape) * keras.ops.sqrt(step_size) - - # Store noise if requested - if return_noise: - for key in _noise: - noise_history[key].append(_noise[key]) + _noise[key] = keras.random.normal(shape) * keras.ops.sqrt(keras.ops.abs(step_size)) # Perform integration step _state, _time, _ = step_fn(_state, _time, step_size, noise=_noise) @@ -458,8 +448,4 @@ def body(_loop_var, _loop_state): return _state, _time state, time = keras.ops.fori_loop(0, steps, body, (state, time)) - - if return_noise: - return state, noise_history - else: - return state + return state From 9ed482defe3e1c430ac3a0f7062f439c85933a9d Mon Sep 17 00:00:00 2001 From: arrjon Date: Thu, 24 Apr 2025 23:55:03 +0200 Subject: [PATCH 27/52] stochastic sampler --- bayesflow/experimental/diffusion_model.py | 75 ++++++++++------------- 1 file changed, 34 insertions(+), 41 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 3a769b57f..eb028896e 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -374,7 +374,7 @@ class DiffusionModel(InferenceNetwork): } INTEGRATE_DEFAULT_CONFIG = { - "method": "euler", + "method": "euler", # or euler_maruyama "steps": 100, } @@ -530,6 +530,7 @@ def velocity( time: float | Tensor, conditions: Tensor = None, training: bool = False, + stochastic_solver: bool = False, clip_x: bool = False, ) -> Tensor: # calculate the current noise level and transform into correct shape @@ -549,44 +550,28 @@ def velocity( # convert x to score score = (alpha_t * x_pred - xz) / ops.square(sigma_t) - # compute velocity for the ODE depending on the noise schedule + # compute velocity f, g of the SDE or ODE f, g_squared = self.noise_schedule.get_drift_diffusion(log_snr_t=log_snr_t, x=xz) - # out = f - 0.5 * g_squared * score - out = f - g_squared * score - # todo: for the SDE: d(z) = [ f(z, t) - g(t)^2 * score(z, lambda) ] dt + g(t) dW + if stochastic_solver: + # for the SDE: d(z) = [f(z, t) - g(t) ^ 2 * score(z, lambda )] dt + g(t) dW + out = f - g_squared * score + else: + # for the ODE: d(z) = [f(z, t) - 0.5 * g(t) ^ 2 * score(z, lambda )] dt + out = f - 0.5 * g_squared * score + return out - def velocity2( + def compute_diffusion_term( self, xz: Tensor, time: float | Tensor, - conditions: Tensor = None, training: bool = False, - clip_x: bool = False, ) -> Tensor: # calculate the current noise level and transform into correct shape log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t=time, training=training), xz) log_snr_t = keras.ops.broadcast_to(log_snr_t, keras.ops.shape(xz)[:-1] + (1,)) - # alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma(log_snr_t=log_snr_t, training=training) - - # if conditions is None: - # xtc = keras.ops.concatenate([xz, log_snr_t], axis=-1) - # else: - # xtc = keras.ops.concatenate([xz, log_snr_t, conditions], axis=-1) - # pred = self.output_projector(self.subnet(xtc, training=training), training=training) - - # x_pred = self.convert_prediction_to_x( - # pred=pred, z=xz, alpha_t=alpha_t, sigma_t=sigma_t, log_snr_t=log_snr_t, clip_x=clip_x - # ) - # convert x to score - # score = (alpha_t * x_pred - xz) / ops.square(sigma_t) - - # compute velocity for the ODE depending on the noise schedule - f, g_squared = self.noise_schedule.get_drift_diffusion(log_snr_t=log_snr_t, x=xz) - # out = f - 0.5 * g_squared * score - # out = f - g_squared * score - + g_squared = self.noise_schedule.get_drift_diffusion(log_snr_t=log_snr_t) return ops.sqrt(g_squared) def _velocity_trace( @@ -620,6 +605,9 @@ def _forward( | self.integrate_kwargs | kwargs ) + if integrate_kwargs["method"] == "euler_maruyama": + raise ValueError("Stoachastic methods are not supported for forward integration.") + if density: def deltas(time, xz): @@ -670,6 +658,8 @@ def _inverse( | kwargs ) if density: + if integrate_kwargs["method"] == "euler_maruyama": + raise ValueError("Stoachastic methods are not supported for density computation.") def deltas(time, xz): v, trace = self._velocity_trace(xz, time=time, conditions=conditions, training=training) @@ -689,21 +679,24 @@ def deltas(time, xz): def deltas(time, xz): return {"xz": self.velocity(xz, time=time, conditions=conditions, training=training)} - def diffusion(time, xz): - return {"xz": self.velocity2(xz, time=time, conditions=conditions, training=training)} - state = {"xz": z} - # state = integrate( - # deltas, - # state, - # **integrate_kwargs, - # ) - state = integrate_stochastic( - deltas, - diffusion, - state, - **integrate_kwargs, - ) + if integrate_kwargs["method"] == "euler_maruyama": + + def diffusion(time, xz): + return {"xz": self.compute_diffusion_term(xz, time=time, training=training)} + + state = integrate_stochastic( + deltas, + diffusion, + state, + **integrate_kwargs, + ) + else: + state = integrate( + deltas, + state, + **integrate_kwargs, + ) x = state["xz"] return x From 548f51bbdf46f611138622e0d7138c2eeafa7615 Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 09:58:26 +0200 Subject: [PATCH 28/52] stochastic sampler fix --- bayesflow/experimental/diffusion_model.py | 31 +++++++++++++++++------ 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index eb028896e..e8e142a46 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -528,9 +528,9 @@ def velocity( self, xz: Tensor, time: float | Tensor, + stochastic_solver: bool, conditions: Tensor = None, training: bool = False, - stochastic_solver: bool = False, clip_x: bool = False, ) -> Tensor: # calculate the current noise level and transform into correct shape @@ -583,7 +583,7 @@ def _velocity_trace( training: bool = False, ) -> (Tensor, Tensor): def f(x): - return self.velocity(x, time=time, conditions=conditions, training=training) + return self.velocity(x, time=time, stochastic_solver=False, conditions=conditions, training=training) v, trace = jacobian_trace(f, xz, max_steps=max_steps, seed=self.seed_generator, return_output=True) @@ -630,7 +630,9 @@ def deltas(time, xz): return z, log_density def deltas(time, xz): - return {"xz": self.velocity(xz, time=time, conditions=conditions, training=training)} + return { + "xz": self.velocity(xz, time=time, stochastic_solver=False, conditions=conditions, training=training) + } state = {"xz": x} state = integrate( @@ -676,12 +678,14 @@ def deltas(time, xz): return x, log_density - def deltas(time, xz): - return {"xz": self.velocity(xz, time=time, conditions=conditions, training=training)} - state = {"xz": z} if integrate_kwargs["method"] == "euler_maruyama": + def deltas(time, xz): + return { + "xz": self.velocity(xz, time=time, stochastic_solver=True, conditions=conditions, training=training) + } + def diffusion(time, xz): return {"xz": self.compute_diffusion_term(xz, time=time, training=training)} @@ -692,6 +696,14 @@ def diffusion(time, xz): **integrate_kwargs, ) else: + + def deltas(time, xz): + return { + "xz": self.velocity( + xz, time=time, stochastic_solver=False, conditions=conditions, training=training + ) + } + state = integrate( deltas, state, @@ -709,6 +721,7 @@ def compute_metrics( stage: str = "training", ) -> dict[str, Tensor]: training = stage == "training" + noise_schedule_training_stage = stage == "training" or stage == "validation" if not self.built: xz_shape = keras.ops.shape(x) conditions_shape = None if conditions is None else keras.ops.shape(conditions) @@ -723,8 +736,10 @@ def compute_metrics( # t = keras.ops.cast(i, keras.ops.dtype(x)) / keras.ops.cast(self._timesteps, keras.ops.dtype(x)) # calculate the noise level - log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t, training=training), x) - alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma(log_snr_t=log_snr_t, training=training) + log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t, training=noise_schedule_training_stage), x) + alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma( + log_snr_t=log_snr_t, training=noise_schedule_training_stage + ) # generate noise vector eps_t = keras.random.normal(ops.shape(x), dtype=ops.dtype(x), seed=self.seed_generator) From 194a5037030ac32e5095815ad35db38c1b272103 Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 10:58:58 +0200 Subject: [PATCH 29/52] fix scale base dist --- bayesflow/experimental/diffusion_model.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index e8e142a46..fcd08ae46 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -54,7 +54,7 @@ def scale_base_distribution(self): return 1.0 elif self.variance_type == "exploding": # e.g., EDM is a variance exploding schedule - return ops.exp(-self._log_snr_min) + return ops.sqrt(ops.exp(-self._log_snr_min)) else: raise ValueError(f"Unknown variance type: {self.variance_type}") @@ -279,17 +279,20 @@ class EDMNoiseSchedule(NoiseSchedule): def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: float = 80): super().__init__(name="edm_noise_schedule", variance_type="exploding") self.sigma_data = sigma_data - self.sigma_max = sigma_max - self.sigma_min = sigma_min + # training settings self.p_mean = -1.2 self.p_std = 1.2 + # sampling settings + self.sigma_max = sigma_max + self.sigma_min = sigma_min self.rho = 7 # convert EDM parameters to signal-to-noise ratio formulation self._log_snr_min = -2 * ops.log(sigma_max) self._log_snr_max = -2 * ops.log(sigma_min) - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) + # t is not truncated for EDM by definition of the sampling schedule + self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=False) + self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=False) def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" From 196683c7f87e35dedc4976a6a6e81804a0355cc5 Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 11:04:40 +0200 Subject: [PATCH 30/52] EDM training bounds --- bayesflow/experimental/diffusion_model.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index fcd08ae46..a80b78c96 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -276,7 +276,7 @@ class EDMNoiseSchedule(NoiseSchedule): [1] Elucidating the Design Space of Diffusion-Based Generative Models: Karras et al. (2022) """ - def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: float = 80): + def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: float = 80.0): super().__init__(name="edm_noise_schedule", variance_type="exploding") self.sigma_data = sigma_data # training settings @@ -291,26 +291,25 @@ def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: self._log_snr_min = -2 * ops.log(sigma_max) self._log_snr_max = -2 * ops.log(sigma_min) # t is not truncated for EDM by definition of the sampling schedule - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=False) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=False) + # training bounds are not so important, but should be set to avoid numerical issues + self._log_snr_min_training = self._log_snr_min * 2 # one is never sampler during training + self._log_snr_max_training = self._log_snr_max * 2 # 0 is almost surely never sampled during training def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" - t_trunc = self._t_min + (self._t_max - self._t_min) * t if training: # SNR = -dist.icdf(t_trunc) loc = -2 * self.p_mean scale = 2 * self.p_std - x = t_trunc - snr = -(loc + scale * ops.erfinv(2 * x - 1) * math.sqrt(2)) - snr = keras.ops.clip(snr, x_min=self._log_snr_min, x_max=self._log_snr_max) + snr = -(loc + scale * ops.erfinv(2 * t - 1) * math.sqrt(2)) + snr = keras.ops.clip(snr, x_min=self._log_snr_min_training, x_max=self._log_snr_max_training) else: # sampling snr = ( -2 * self.rho * ops.log( self.sigma_max ** (1 / self.rho) - + (1 - t_trunc) * (self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)) + + (1 - t) * (self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)) ) ) return snr @@ -338,20 +337,18 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: raise NotImplementedError("Derivative of log SNR is not implemented for training mode.") # sampling mode t = self.get_t_from_log_snr(log_snr_t=log_snr_t, training=training) - t_trunc = self._t_min + (self._t_max - self._t_min) * t # SNR = -2*rho*log(s_max + (1 - x)*(s_min - s_max)) s_max = self.sigma_max ** (1 / self.rho) s_min = self.sigma_min ** (1 / self.rho) - u = s_max + (1 - t_trunc) * (s_min - s_max) + u = s_max + (1 - t) * (s_min - s_max) # d/dx snr = 2*rho*(s_min - s_max) / u dsnr_dx = 2 * self.rho * (s_min - s_max) / u # Using the chain rule on f(t) = log(1 + e^(-snr(t))): # f'(t) = - (e^{-snr(t)} / (1 + e^{-snr(t)})) * dsnr_dt - dsnr_dt = dsnr_dx * (self._t_max - self._t_min) factor = ops.exp(-log_snr_t) / (1 + ops.exp(-log_snr_t)) - return -factor * dsnr_dt + return -factor * dsnr_dx def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda).""" From 5b524997433e916b94a963bf9f5d8dcf35ba31c0 Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 12:49:31 +0200 Subject: [PATCH 31/52] minor changes --- bayesflow/experimental/diffusion_model.py | 52 +++++++++++------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index a80b78c96..1b8c8f5c1 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -75,7 +75,7 @@ def derivative_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: bool = False) -> tuple[Tensor, Tensor]: r"""Compute the drift and optionally the squared diffusion term for the reverse SDE. - Usually it can be derived from the derivative of the schedule: + It can be derived from the derivative of the schedule: \beta(t) = d/dt log(1 + e^(-snr(t))) f(z, t) = -0.5 * \beta(t) * z g(t)^2 = \beta(t) @@ -85,9 +85,8 @@ def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: boo For a variance exploding schedule, one should set f(z, t) = 0. """ - # Default implementation is to return the diffusion term only beta = self.derivative_log_snr(log_snr_t=log_snr_t, training=training) - if x is None: # return g only + if x is None: # return g^2 only return beta if self.variance_type == "preserving": f = -0.5 * beta * x @@ -121,7 +120,7 @@ def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). Default is 1. Generally, weighting functions should be defined for a noise prediction loss. """ - # sigmoid: ops.sigmoid(-log_snr_t / 2), based on Kingma et al. (2023) + # sigmoid: ops.sigmoid(-log_snr_t + 2), based on Kingma et al. (2023) # min-snr with gamma = 5, based on Hang et al. (2023) # 1 / ops.cosh(log_snr_t / 2) * ops.minimum(ops.ones_like(log_snr_t), gamma * ops.exp(-log_snr_t)) return ops.ones_like(log_snr_t) @@ -291,9 +290,9 @@ def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: self._log_snr_min = -2 * ops.log(sigma_max) self._log_snr_max = -2 * ops.log(sigma_min) # t is not truncated for EDM by definition of the sampling schedule - # training bounds are not so important, but should be set to avoid numerical issues - self._log_snr_min_training = self._log_snr_min * 2 # one is never sampler during training - self._log_snr_max_training = self._log_snr_max * 2 # 0 is almost surely never sampled during training + # training bounds should be set to avoid numerical issues + self._log_snr_min_training = self._log_snr_min - 1 # one is never sampler during training + self._log_snr_max_training = self._log_snr_max + 1 # 0 is almost surely never sampled during training def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" @@ -304,14 +303,9 @@ def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: snr = -(loc + scale * ops.erfinv(2 * t - 1) * math.sqrt(2)) snr = keras.ops.clip(snr, x_min=self._log_snr_min_training, x_max=self._log_snr_max_training) else: # sampling - snr = ( - -2 - * self.rho - * ops.log( - self.sigma_max ** (1 / self.rho) - + (1 - t) * (self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)) - ) - ) + sigma_min_rho = self.sigma_min ** (1 / self.rho) + sigma_max_rho = self.sigma_max ** (1 / self.rho) + snr = -2 * self.rho * ops.log(sigma_max_rho + (1 - t) * (sigma_min_rho - sigma_max_rho)) return snr def get_t_from_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> Tensor: @@ -325,10 +319,9 @@ def get_t_from_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> else: # sampling # SNR = -2 * rho * log(sigma_max ** (1/rho) + (1 - t) * (sigma_min ** (1/rho) - sigma_max ** (1/rho))) # => t = 1 - ((exp(-snr/(2*rho)) - sigma_max ** (1/rho)) / (sigma_min ** (1/rho) - sigma_max ** (1/rho))) - t = 1 - ( - (ops.exp(-log_snr_t / (2 * self.rho)) - self.sigma_max ** (1 / self.rho)) - / (self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho)) - ) + sigma_min_rho = self.sigma_min ** (1 / self.rho) + sigma_max_rho = self.sigma_max ** (1 / self.rho) + t = 1 - ((ops.exp(-log_snr_t / (2 * self.rho)) - sigma_max_rho) / (sigma_min_rho - sigma_max_rho)) return t def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: @@ -354,6 +347,13 @@ def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda).""" return (ops.exp(-log_snr_t) + ops.square(self.sigma_data)) / ops.square(self.sigma_data) + def get_config(self): + return dict(sigma_data=self.sigma_data, sigma_min=self.sigma_min, sigma_max=self.sigma_max) + + @classmethod + def from_config(cls, config, custom_objects=None): + return cls(**deserialize(config, custom_objects=custom_objects)) + @serializable class DiffusionModel(InferenceNetwork): @@ -510,15 +510,15 @@ def convert_prediction_to_x( elif self.prediction_type == "noise": # convert noise prediction into x x = (z - sigma_t * pred) / alpha_t - elif self.prediction_type == "x": - x = pred - elif self.prediction_type == "score": - x = (z + sigma_t**2 * pred) / alpha_t - else: # self.prediction_type == 'F': # EDM + elif self.prediction_type == "F": # EDM sigma_data = self.noise_schedule.sigma_data x1 = (sigma_data**2 * alpha_t) / (ops.exp(-log_snr_t) + sigma_data**2) x2 = ops.exp(-log_snr_t / 2) * sigma_data / ops.sqrt(ops.exp(-log_snr_t) + sigma_data**2) x = x1 * z + x2 * pred + elif self.prediction_type == "x": + x = pred + else: # "score" + x = (z + sigma_t**2 * pred) / alpha_t if clip_x: x = keras.ops.clip(x, self._clip_min, self._clip_max) @@ -606,7 +606,7 @@ def _forward( | kwargs ) if integrate_kwargs["method"] == "euler_maruyama": - raise ValueError("Stoachastic methods are not supported for forward integration.") + raise ValueError("Stochastic methods are not supported for forward integration.") if density: @@ -661,7 +661,7 @@ def _inverse( ) if density: if integrate_kwargs["method"] == "euler_maruyama": - raise ValueError("Stoachastic methods are not supported for density computation.") + raise ValueError("Stochastic methods are not supported for density computation.") def deltas(time, xz): v, trace = self._velocity_trace(xz, time=time, conditions=conditions, training=training) From eb96620ec7044eb595a8ceb5566374d0603c36c6 Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 13:36:26 +0200 Subject: [PATCH 32/52] fix base distribution --- bayesflow/experimental/diffusion_model.py | 61 +++++++++-------------- 1 file changed, 24 insertions(+), 37 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 1b8c8f5c1..66a4cd792 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -45,18 +45,6 @@ def __init__(self, name: str, variance_type: str): self.variance_type = variance_type # 'exploding' or 'preserving' self._log_snr_min = -15 # should be set in the subclasses self._log_snr_max = 15 # should be set in the subclasses - self.sigma_data = 1.0 - - @property - def scale_base_distribution(self): - """Get the scale of the base distribution.""" - if self.variance_type == "preserving": - return 1.0 - elif self.variance_type == "exploding": - # e.g., EDM is a variance exploding schedule - return ops.sqrt(ops.exp(-self._log_snr_min)) - else: - raise ValueError(f"Unknown variance type: {self.variance_type}") @abstractmethod def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: @@ -106,8 +94,8 @@ def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Te """ if self.variance_type == "preserving": # variance preserving schedule - alpha_t = keras.ops.sqrt(keras.ops.sigmoid(log_snr_t)) - sigma_t = keras.ops.sqrt(keras.ops.sigmoid(-log_snr_t)) + alpha_t = ops.sqrt(ops.sigmoid(log_snr_t)) + sigma_t = ops.sqrt(ops.sigmoid(-log_snr_t)) elif self.variance_type == "exploding": # variance exploding schedule alpha_t = ops.ones_like(log_snr_t) @@ -271,6 +259,7 @@ def from_config(cls, config, custom_objects=None): class EDMNoiseSchedule(NoiseSchedule): """EDM noise schedule for diffusion models. This schedule is based on the EDM paper [1]. This should be used with the F-prediction type in the diffusion model. + Since the schedule is variance exploding, the base distribution is a Gaussian with scale 'sigma_max'. [1] Elucidating the Design Space of Diffusion-Based Generative Models: Karras et al. (2022) """ @@ -301,7 +290,7 @@ def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: loc = -2 * self.p_mean scale = 2 * self.p_std snr = -(loc + scale * ops.erfinv(2 * t - 1) * math.sqrt(2)) - snr = keras.ops.clip(snr, x_min=self._log_snr_min_training, x_max=self._log_snr_max_training) + snr = ops.clip(snr, x_min=self._log_snr_min_training, x_max=self._log_snr_max_training) else: # sampling sigma_min_rho = self.sigma_min ** (1 / self.rho) sigma_max_rho = self.sigma_max ** (1 / self.rho) @@ -375,7 +364,7 @@ class DiffusionModel(InferenceNetwork): INTEGRATE_DEFAULT_CONFIG = { "method": "euler", # or euler_maruyama - "steps": 100, + "steps": 250, } def __init__( @@ -444,9 +433,7 @@ def __init__( self._clip_max = 5.0 # latent distribution (not configurable) - self.base_distribution = bf.distributions.DiagonalNormal( - mean=0.0, std=self.noise_schedule.scale_base_distribution - ) + self.base_distribution = bf.distributions.DiagonalNormal() self.integrate_kwargs = self.INTEGRATE_DEFAULT_CONFIG | (integrate_kwargs or {}) self.seed_generator = keras.random.SeedGenerator() @@ -521,7 +508,7 @@ def convert_prediction_to_x( x = (z + sigma_t**2 * pred) / alpha_t if clip_x: - x = keras.ops.clip(x, self._clip_min, self._clip_max) + x = ops.clip(x, self._clip_min, self._clip_max) return x def velocity( @@ -535,13 +522,13 @@ def velocity( ) -> Tensor: # calculate the current noise level and transform into correct shape log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t=time, training=training), xz) - log_snr_t = keras.ops.broadcast_to(log_snr_t, keras.ops.shape(xz)[:-1] + (1,)) + log_snr_t = ops.broadcast_to(log_snr_t, ops.shape(xz)[:-1] + (1,)) alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma(log_snr_t=log_snr_t, training=training) if conditions is None: - xtc = keras.ops.concatenate([xz, log_snr_t], axis=-1) + xtc = ops.concatenate([xz, log_snr_t], axis=-1) else: - xtc = keras.ops.concatenate([xz, log_snr_t, conditions], axis=-1) + xtc = ops.concatenate([xz, log_snr_t, conditions], axis=-1) pred = self.output_projector(self.subnet(xtc, training=training), training=training) x_pred = self.convert_prediction_to_x( @@ -570,7 +557,7 @@ def compute_diffusion_term( ) -> Tensor: # calculate the current noise level and transform into correct shape log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t=time, training=training), xz) - log_snr_t = keras.ops.broadcast_to(log_snr_t, keras.ops.shape(xz)[:-1] + (1,)) + log_snr_t = ops.broadcast_to(log_snr_t, ops.shape(xz)[:-1] + (1,)) g_squared = self.noise_schedule.get_drift_diffusion(log_snr_t=log_snr_t) return ops.sqrt(g_squared) @@ -587,7 +574,7 @@ def f(x): v, trace = jacobian_trace(f, xz, max_steps=max_steps, seed=self.seed_generator, return_output=True) - return v, keras.ops.expand_dims(trace, axis=-1) + return v, ops.expand_dims(trace, axis=-1) def _forward( self, @@ -616,7 +603,7 @@ def deltas(time, xz): state = { "xz": x, - "trace": keras.ops.zeros(keras.ops.shape(x)[:-1] + (1,), dtype=keras.ops.dtype(x)), + "trace": ops.zeros(ops.shape(x)[:-1] + (1,), dtype=ops.dtype(x)), } state = integrate( deltas, @@ -625,7 +612,7 @@ def deltas(time, xz): ) z = state["xz"] - log_density = self.base_distribution.log_prob(z) + keras.ops.squeeze(state["trace"], axis=-1) + log_density = self.base_distribution.log_prob(z) + ops.squeeze(state["trace"], axis=-1) return z, log_density @@ -669,12 +656,12 @@ def deltas(time, xz): state = { "xz": z, - "trace": keras.ops.zeros(keras.ops.shape(z)[:-1] + (1,), dtype=keras.ops.dtype(z)), + "trace": ops.zeros(ops.shape(z)[:-1] + (1,), dtype=ops.dtype(z)), } state = integrate(deltas, state, **integrate_kwargs) x = state["xz"] - log_density = self.base_distribution.log_prob(z) - keras.ops.squeeze(state["trace"], axis=-1) + log_density = self.base_distribution.log_prob(z) - ops.squeeze(state["trace"], axis=-1) return x, log_density @@ -723,17 +710,17 @@ def compute_metrics( training = stage == "training" noise_schedule_training_stage = stage == "training" or stage == "validation" if not self.built: - xz_shape = keras.ops.shape(x) - conditions_shape = None if conditions is None else keras.ops.shape(conditions) + xz_shape = ops.shape(x) + conditions_shape = None if conditions is None else ops.shape(conditions) self.build(xz_shape, conditions_shape) # sample training diffusion time as low discrepancy sequence to decrease variance # t_i = \mod (u_0 + i/k, 1) u0 = keras.random.uniform(shape=(1,), dtype=ops.dtype(x), seed=self.seed_generator) - i = ops.arange(0, keras.ops.shape(x)[0], dtype=ops.dtype(x)) # tensor of indices - t = (u0 + i / ops.cast(keras.ops.shape(x)[0], dtype=ops.dtype(x))) % 1 - # i = keras.random.randint((keras.ops.shape(x)[0],), minval=0, maxval=self._timesteps) - # t = keras.ops.cast(i, keras.ops.dtype(x)) / keras.ops.cast(self._timesteps, keras.ops.dtype(x)) + i = ops.arange(0, ops.shape(x)[0], dtype=ops.dtype(x)) # tensor of indices + t = (u0 + i / ops.cast(ops.shape(x)[0], dtype=ops.dtype(x))) % 1 + # i = keras.random.randint((ops.shape(x)[0],), minval=0, maxval=self._timesteps) + # t = ops.cast(i, ops.dtype(x)) / ops.cast(self._timesteps, ops.dtype(x)) # calculate the noise level log_snr_t = expand_right_as(self.noise_schedule.get_log_snr(t, training=noise_schedule_training_stage), x) @@ -749,9 +736,9 @@ def compute_metrics( # calculate output of the network if conditions is None: - xtc = keras.ops.concatenate([diffused_x, log_snr_t], axis=-1) + xtc = ops.concatenate([diffused_x, log_snr_t], axis=-1) else: - xtc = keras.ops.concatenate([diffused_x, log_snr_t, conditions], axis=-1) + xtc = ops.concatenate([diffused_x, log_snr_t, conditions], axis=-1) pred = self.output_projector(self.subnet(xtc, training=training), training=training) x_pred = self.convert_prediction_to_x( From 668f6fc8c6358b234d40fadd122641cad535bb63 Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 13:46:00 +0200 Subject: [PATCH 33/52] seed in stochastic sampler --- bayesflow/experimental/diffusion_model.py | 7 ++++--- bayesflow/utils/integrate.py | 15 ++------------- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 66a4cd792..6da2319b0 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -677,9 +677,10 @@ def diffusion(time, xz): return {"xz": self.compute_diffusion_term(xz, time=time, training=training)} state = integrate_stochastic( - deltas, - diffusion, - state, + drift_fn=deltas, + diffusion_fn=diffusion, + state=state, + seed=self.seed_generator, **integrate_kwargs, ) else: diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index e9b77520b..1abaab274 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -301,7 +301,7 @@ def euler_maruyama_step( state: dict[str, ArrayLike], time: ArrayLike, step_size: ArrayLike, - noise: dict[str, ArrayLike] = None, + noise: dict[str, ArrayLike], tolerance: ArrayLike = 1e-6, min_step_size: ArrayLike = -float("inf"), max_step_size: ArrayLike = float("inf"), @@ -331,13 +331,6 @@ def euler_maruyama_step( # Compute diffusion term diffusion = diffusion_fn(time, **filter_kwargs(state, diffusion_fn)) - # Generate noise if not provided - if noise is None: - noise = {} - for key in diffusion.keys(): - shape = keras.ops.shape(diffusion[key]) - noise[key] = keras.random.normal(shape) * keras.ops.sqrt(keras.ops.abs(step_size)) - # Check if diffusion and noise have the same keys if set(diffusion.keys()) != set(noise.keys()): raise ValueError("Keys of diffusion terms and noise do not match.") @@ -414,10 +407,6 @@ def integrate_stochastic( if steps <= 0: raise ValueError("Number of steps must be positive.") - # Set random seed if provided - if seed is not None: - keras.random.set_seed(seed) - # Select step function based on method match method: case "euler_maruyama": @@ -440,7 +429,7 @@ def body(_loop_var, _loop_state): _noise = {} for key in _state.keys(): shape = keras.ops.shape(_state[key]) - _noise[key] = keras.random.normal(shape) * keras.ops.sqrt(keras.ops.abs(step_size)) + _noise[key] = keras.random.normal(shape, seed=seed) * keras.ops.sqrt(keras.ops.abs(step_size)) # Perform integration step _state, _time, _ = step_fn(_state, _time, step_size, noise=_noise) From 1a970c282e2710a6c5cffec35e0950c2d3bbfebd Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 13:55:49 +0200 Subject: [PATCH 34/52] seed in stochastic sampler --- bayesflow/utils/integrate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index 1abaab274..8a0bdfe64 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -383,7 +383,7 @@ def integrate_stochastic( stop_time: ArrayLike, steps: int, method: str = "euler_maruyama", - seed: int = None, + seed: int | keras.random.SeedGenerator = None, **kwargs, ) -> Union[dict[str, ArrayLike], tuple[dict[str, ArrayLike], dict[str, List[ArrayLike]]]]: """ From ebafc5e85e7701edefb26e5dd02a14d4ae011dbc Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 14:16:36 +0200 Subject: [PATCH 35/52] seed in stochastic sampler --- bayesflow/experimental/diffusion_model.py | 6 ++---- bayesflow/utils/integrate.py | 8 ++++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 6da2319b0..c4fc52fb4 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -7,7 +7,6 @@ from bayesflow.utils.serialization import serialize, deserialize, serializable from bayesflow.types import Tensor, Shape -import bayesflow as bf from bayesflow.networks import InferenceNetwork import math @@ -334,7 +333,7 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda).""" - return (ops.exp(-log_snr_t) + ops.square(self.sigma_data)) / ops.square(self.sigma_data) + return ops.exp(-log_snr_t) + ops.square(self.sigma_data) # / ops.square(self.sigma_data) def get_config(self): return dict(sigma_data=self.sigma_data, sigma_min=self.sigma_min, sigma_max=self.sigma_max) @@ -403,7 +402,7 @@ def __init__( **kwargs Additional keyword arguments passed to the subnet and other components. """ - super().__init__(base_distribution=None, **kwargs) + super().__init__(base_distribution="normal", **kwargs) if isinstance(noise_schedule, str): if noise_schedule == "linear": @@ -433,7 +432,6 @@ def __init__( self._clip_max = 5.0 # latent distribution (not configurable) - self.base_distribution = bf.distributions.DiagonalNormal() self.integrate_kwargs = self.INTEGRATE_DEFAULT_CONFIG | (integrate_kwargs or {}) self.seed_generator = keras.random.SeedGenerator() diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index 8a0bdfe64..027df35cf 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -423,18 +423,18 @@ def integrate_stochastic( time = start_time def body(_loop_var, _loop_state): - _state, _time = _loop_state + _state, _time, _seed = _loop_state # Generate noise for this step _noise = {} for key in _state.keys(): shape = keras.ops.shape(_state[key]) - _noise[key] = keras.random.normal(shape, seed=seed) * keras.ops.sqrt(keras.ops.abs(step_size)) + _noise[key] = keras.random.normal(shape, seed=_seed) * keras.ops.sqrt(keras.ops.abs(step_size)) # Perform integration step _state, _time, _ = step_fn(_state, _time, step_size, noise=_noise) - return _state, _time + return _state, _time, _seed - state, time = keras.ops.fori_loop(0, steps, body, (state, time)) + state, time = keras.ops.fori_loop(0, steps, body, (state, time, seed)) return state From 9941fa33b11992930a2acefc5cf94b44e25d7f1c Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 14:24:41 +0200 Subject: [PATCH 36/52] seed in stochastic sampler --- bayesflow/utils/integrate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index 027df35cf..addb7e101 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -383,7 +383,7 @@ def integrate_stochastic( stop_time: ArrayLike, steps: int, method: str = "euler_maruyama", - seed: int | keras.random.SeedGenerator = None, + seed: keras.random.SeedGenerator = None, **kwargs, ) -> Union[dict[str, ArrayLike], tuple[dict[str, ArrayLike], dict[str, List[ArrayLike]]]]: """ @@ -428,8 +428,8 @@ def body(_loop_var, _loop_state): # Generate noise for this step _noise = {} for key in _state.keys(): - shape = keras.ops.shape(_state[key]) - _noise[key] = keras.random.normal(shape, seed=_seed) * keras.ops.sqrt(keras.ops.abs(step_size)) + _eps = keras.random.normal(keras.ops.shape(_state[key]), dtype=keras.ops.dtype(_state[key]), seed=_seed) + _noise[key] = _eps * keras.ops.sqrt(keras.ops.abs(step_size)) # Perform integration step _state, _time, _ = step_fn(_state, _time, step_size, noise=_noise) From afaebef2eb6462288e00c1b518530e61acc94817 Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 14:28:45 +0200 Subject: [PATCH 37/52] seed in stochastic sampler --- bayesflow/utils/integrate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index addb7e101..f07f154b7 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -423,18 +423,18 @@ def integrate_stochastic( time = start_time def body(_loop_var, _loop_state): - _state, _time, _seed = _loop_state + _state, _time = _loop_state # Generate noise for this step _noise = {} for key in _state.keys(): - _eps = keras.random.normal(keras.ops.shape(_state[key]), dtype=keras.ops.dtype(_state[key]), seed=_seed) + _eps = keras.random.normal(keras.ops.shape(_state[key]), dtype=keras.ops.dtype(_state[key]), seed=seed) _noise[key] = _eps * keras.ops.sqrt(keras.ops.abs(step_size)) # Perform integration step _state, _time, _ = step_fn(_state, _time, step_size, noise=_noise) - return _state, _time, _seed + return _state, _time state, time = keras.ops.fori_loop(0, steps, body, (state, time, seed)) return state From c1558c5ceb592147776bc06549362ca35c05b88a Mon Sep 17 00:00:00 2001 From: arrjon Date: Fri, 25 Apr 2025 14:30:16 +0200 Subject: [PATCH 38/52] seed in stochastic sampler --- bayesflow/utils/integrate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index f07f154b7..fd37f6fb9 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -436,5 +436,5 @@ def body(_loop_var, _loop_state): return _state, _time - state, time = keras.ops.fori_loop(0, steps, body, (state, time, seed)) + state, time = keras.ops.fori_loop(0, steps, body, (state, time)) return state From 1efd88fc551f4c7f32b87d0916080c2744a48ae5 Mon Sep 17 00:00:00 2001 From: LarsKue Date: Fri, 25 Apr 2025 17:21:13 -0400 Subject: [PATCH 39/52] fix is_symbolic_tensor --- bayesflow/utils/tensor_utils.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/bayesflow/utils/tensor_utils.py b/bayesflow/utils/tensor_utils.py index 4d89249b7..72d83076c 100644 --- a/bayesflow/utils/tensor_utils.py +++ b/bayesflow/utils/tensor_utils.py @@ -97,9 +97,6 @@ def is_symbolic_tensor(x: Tensor) -> bool: if keras.utils.is_keras_tensor(x): return True - if not keras.ops.is_tensor(x): - return False - match keras.backend.backend(): case "jax": import jax From 7456cdb5097539c0a6dadbe7a28634c262cb3c9c Mon Sep 17 00:00:00 2001 From: LarsKue Date: Fri, 25 Apr 2025 17:22:16 -0400 Subject: [PATCH 40/52] [skip ci] skip step_fn for tracing (dangerous, subject to removal) --- bayesflow/utils/integrate.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index fd37f6fb9..5e8768645 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -8,6 +8,8 @@ from bayesflow.types import Tensor from bayesflow.utils import filter_kwargs + +from .tensor_utils import is_symbolic_tensor from . import logging ArrayLike = int | float | Tensor @@ -425,6 +427,9 @@ def integrate_stochastic( def body(_loop_var, _loop_state): _state, _time = _loop_state + if any(is_symbolic_tensor(v) for v in _state.values()): + return _state, _time + # Generate noise for this step _noise = {} for key in _state.keys(): From a722729a6db24541924475c9e28209bcf0cfcc3a Mon Sep 17 00:00:00 2001 From: arrjon Date: Sat, 26 Apr 2025 11:56:57 +0200 Subject: [PATCH 41/52] seed in stochastic sampler --- bayesflow/utils/integrate.py | 66 +++++++----------------------------- 1 file changed, 13 insertions(+), 53 deletions(-) diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index 5e8768645..008698489 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -9,7 +9,6 @@ from bayesflow.types import Tensor from bayesflow.utils import filter_kwargs -from .tensor_utils import is_symbolic_tensor from . import logging ArrayLike = int | float | Tensor @@ -303,11 +302,7 @@ def euler_maruyama_step( state: dict[str, ArrayLike], time: ArrayLike, step_size: ArrayLike, - noise: dict[str, ArrayLike], - tolerance: ArrayLike = 1e-6, - min_step_size: ArrayLike = -float("inf"), - max_step_size: ArrayLike = float("inf"), - use_adaptive_step_size: bool = False, + seed: keras.random.SeedGenerator, ) -> (dict[str, ArrayLike], ArrayLike, ArrayLike): """ Performs a single Euler-Maruyama step for stochastic differential equations. @@ -318,11 +313,7 @@ def euler_maruyama_step( state: Dictionary containing the current state. time: Current time. step_size: Size of the integration step. - noise: Dictionary of noise terms for each state variable. - tolerance: Error tolerance for adaptive step size. - min_step_size: Minimum allowed step size. - max_step_size: Maximum allowed step size. - use_adaptive_step_size: Whether to use adaptive step sizing. + seed: Random seed for noise generation. Returns: Tuple of (new_state, new_time, new_step_size). @@ -333,36 +324,16 @@ def euler_maruyama_step( # Compute diffusion term diffusion = diffusion_fn(time, **filter_kwargs(state, diffusion_fn)) + # Generate noise for this step + noise = {} + for key in state.keys(): + eps = keras.random.normal(keras.ops.shape(state[key]), dtype=keras.ops.dtype(state[key]), seed=seed) + noise[key] = eps * keras.ops.sqrt(keras.ops.abs(step_size)) + # Check if diffusion and noise have the same keys if set(diffusion.keys()) != set(noise.keys()): raise ValueError("Keys of diffusion terms and noise do not match.") - if use_adaptive_step_size: - # Perform a half-step to estimate error - intermediate_state = state.copy() - for key in drift.keys(): - intermediate_state[key] = state[key] + (step_size * drift[key]) + (diffusion[key] * noise[key]) - - # Compute drift and diffusion at intermediate state - intermediate_drift = drift_fn(time + step_size, **filter_kwargs(intermediate_state, drift_fn)) - - # Compute error estimate - error_terms = [] - for key in drift.keys(): - error = keras.ops.norm(intermediate_drift[key] - drift[key], ord=2, axis=-1) - error_terms.append(error) - - intermediate_error = keras.ops.stack(error_terms) - new_step_size = step_size * tolerance / (intermediate_error + 1e-9) - - # Apply constraints to step size - new_step_size = keras.ops.clip(new_step_size, min_step_size, max_step_size) - - # Consolidate step size - new_step_size = keras.ops.take(new_step_size, keras.ops.argmin(keras.ops.abs(new_step_size))) - else: - new_step_size = step_size - # Apply updates using Euler-Maruyama formula: dx = f(x)dt + g(x)dW new_state = state.copy() for key in drift.keys(): @@ -374,7 +345,7 @@ def euler_maruyama_step( new_time = time + step_size - return new_state, new_time, new_step_size + return new_state, new_time def integrate_stochastic( @@ -384,8 +355,8 @@ def integrate_stochastic( start_time: ArrayLike, stop_time: ArrayLike, steps: int, + seed: keras.random.SeedGenerator, method: str = "euler_maruyama", - seed: keras.random.SeedGenerator = None, **kwargs, ) -> Union[dict[str, ArrayLike], tuple[dict[str, ArrayLike], dict[str, List[ArrayLike]]]]: """ @@ -398,8 +369,8 @@ def integrate_stochastic( start_time: Starting time for integration. stop_time: Ending time for integration. steps: Number of integration steps. - method: Integration method to use ('euler_maruyama'). seed: Random seed for noise generation. + method: Integration method to use ('euler_maruyama'). **kwargs: Additional arguments to pass to the step function. Returns: @@ -419,25 +390,14 @@ def integrate_stochastic( raise TypeError(f"Invalid integration method: {other!r}") # Prepare step function with partial application - step_fn = partial(step_fn, drift_fn, diffusion_fn, **kwargs) + step_fn = partial(step_fn, drift_fn=drift_fn, diffusion_fn=diffusion_fn, seed=seed, **kwargs) step_size = (stop_time - start_time) / steps time = start_time def body(_loop_var, _loop_state): _state, _time = _loop_state - - if any(is_symbolic_tensor(v) for v in _state.values()): - return _state, _time - - # Generate noise for this step - _noise = {} - for key in _state.keys(): - _eps = keras.random.normal(keras.ops.shape(_state[key]), dtype=keras.ops.dtype(_state[key]), seed=seed) - _noise[key] = _eps * keras.ops.sqrt(keras.ops.abs(step_size)) - - # Perform integration step - _state, _time, _ = step_fn(_state, _time, step_size, noise=_noise) + _state, _time = step_fn(_state, _time, step_size) return _state, _time From ee0c87b007c0f2c070909604fa284145c6b71068 Mon Sep 17 00:00:00 2001 From: arrjon Date: Sat, 26 Apr 2025 11:57:58 +0200 Subject: [PATCH 42/52] seed in stochastic sampler --- bayesflow/utils/integrate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index 008698489..6af03fdeb 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -397,7 +397,7 @@ def integrate_stochastic( def body(_loop_var, _loop_state): _state, _time = _loop_state - _state, _time = step_fn(_state, _time, step_size) + _state, _time = step_fn(state=_state, time=_time, step_size=step_size) return _state, _time From f2cbde654e786c939094a1cb23915780add82e66 Mon Sep 17 00:00:00 2001 From: arrjon Date: Mon, 28 Apr 2025 18:44:41 +0200 Subject: [PATCH 43/52] fix loss --- bayesflow/experimental/diffusion_model.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index c4fc52fb4..e1840566a 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -373,6 +373,7 @@ def __init__( subnet_kwargs: dict[str, any] = None, noise_schedule: str | NoiseSchedule = "cosine", prediction_type: str = "velocity", + loss_type: str = "noise", **kwargs, ): """ @@ -726,6 +727,7 @@ def compute_metrics( alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma( log_snr_t=log_snr_t, training=noise_schedule_training_stage ) + weights_for_snr = self.noise_schedule.get_weights_for_snr(log_snr_t=log_snr_t) # generate noise vector eps_t = keras.random.normal(ops.shape(x), dtype=ops.dtype(x), seed=self.seed_generator) @@ -743,11 +745,10 @@ def compute_metrics( x_pred = self.convert_prediction_to_x( pred=pred, z=diffused_x, alpha_t=alpha_t, sigma_t=sigma_t, log_snr_t=log_snr_t, clip_x=False ) - # convert x to epsilon prediction - noise_pred = (alpha_t * diffused_x - x_pred) / sigma_t - # Calculate loss based on noise prediction - weights_for_snr = self.noise_schedule.get_weights_for_snr(log_snr_t=log_snr_t) + # convert x to epsilon prediction + noise_pred = (diffused_x - alpha_t * x_pred) / sigma_t + # Calculate loss loss = weights_for_snr * ops.mean((noise_pred - eps_t) ** 2, axis=-1) # apply sample weight From 7b7b15a27953007ac6da628f7a798bfb8cd8a981 Mon Sep 17 00:00:00 2001 From: arrjon Date: Mon, 28 Apr 2025 18:50:38 +0200 Subject: [PATCH 44/52] fix loss --- bayesflow/experimental/diffusion_model.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index e1840566a..eaeb8acf5 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -333,7 +333,7 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda).""" - return ops.exp(-log_snr_t) + ops.square(self.sigma_data) # / ops.square(self.sigma_data) + return ops.exp(-log_snr_t) / ops.square(self.sigma_data) + 1 def get_config(self): return dict(sigma_data=self.sigma_data, sigma_min=self.sigma_min, sigma_max=self.sigma_max) @@ -373,7 +373,6 @@ def __init__( subnet_kwargs: dict[str, any] = None, noise_schedule: str | NoiseSchedule = "cosine", prediction_type: str = "velocity", - loss_type: str = "noise", **kwargs, ): """ From 1811038b680f6c0513955e0dde2ae6e28ca2ddc0 Mon Sep 17 00:00:00 2001 From: arrjon Date: Mon, 28 Apr 2025 22:25:03 +0200 Subject: [PATCH 45/52] improve schedules --- bayesflow/experimental/diffusion_model.py | 50 ++++++++++++++++------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index eaeb8acf5..fd4b30bdc 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -4,6 +4,7 @@ import keras from keras import ops import warnings +from enum import Enum from bayesflow.utils.serialization import serialize, deserialize, serializable from bayesflow.types import Tensor, Shape @@ -21,6 +22,11 @@ ) +class VarianceType(Enum): + PRESERVING = "preserving" + EXPLODING = "exploding" + + @serializable class NoiseSchedule(ABC): r"""Noise schedule for diffusion models. We follow the notation from [1]. @@ -39,7 +45,7 @@ class NoiseSchedule(ABC): Augmentation: Kingma et al. (2023) """ - def __init__(self, name: str, variance_type: str): + def __init__(self, name: str, variance_type: VarianceType): self.name = name self.variance_type = variance_type # 'exploding' or 'preserving' self._log_snr_min = -15 # should be set in the subclasses @@ -75,9 +81,9 @@ def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: boo beta = self.derivative_log_snr(log_snr_t=log_snr_t, training=training) if x is None: # return g^2 only return beta - if self.variance_type == "preserving": + if self.variance_type == VarianceType.PRESERVING: f = -0.5 * beta * x - elif self.variance_type == "exploding": + elif self.variance_type == VarianceType.EXPLODING: f = ops.zeros_like(beta) else: raise ValueError(f"Unknown variance type: {self.variance_type}") @@ -91,11 +97,11 @@ def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Te sigma(t) = sqrt(sigmoid(-log_snr_t)) For a variance exploding schedule, one should set alpha^2 = 1 and sigma^2 = exp(-lambda) """ - if self.variance_type == "preserving": + if self.variance_type == VarianceType.PRESERVING: # variance preserving schedule alpha_t = ops.sqrt(ops.sigmoid(log_snr_t)) sigma_t = ops.sqrt(ops.sigmoid(-log_snr_t)) - elif self.variance_type == "exploding": + elif self.variance_type == VarianceType.EXPLODING: # variance exploding schedule alpha_t = ops.ones_like(log_snr_t) sigma_t = ops.sqrt(ops.exp(-log_snr_t)) @@ -132,6 +138,16 @@ def validate(self): raise ValueError("t(0) must be finite.") if not ops.isfinite(self.get_t_from_log_snr(self._log_snr_min, training=training)): raise ValueError("t(1) must be finite.") + if ( + not self.get_log_snr(self.get_t_from_log_snr(self._log_snr_max, training=training), training=training) + == self._log_snr_max + ): + raise ValueError("RoundTrip snr_max -> t -> snr_max failed.") + if ( + not self.get_log_snr(self.get_t_from_log_snr(self._log_snr_min, training=training), training=training) + == self._log_snr_min + ): + raise ValueError("RoundTrip snr_min -> t -> snr_min failed.") if not ops.isfinite(self.derivative_log_snr(self._log_snr_max, training=False)): raise ValueError("dt/t log_snr(0) must be finite.") if not ops.isfinite(self.derivative_log_snr(self._log_snr_min, training=False)): @@ -148,16 +164,19 @@ class LinearNoiseSchedule(NoiseSchedule): """ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): - super().__init__(name="linear_noise_schedule", variance_type="preserving") + super().__init__(name="linear_noise_schedule", variance_type=VarianceType.PRESERVING) self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) + def _truncated_t(self, t: Tensor) -> Tensor: + return self._t_min + (self._t_max - self._t_min) * t + def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" - t_trunc = self._t_min + (self._t_max - self._t_min) * t + t_trunc = self._truncated_t(t) # SNR = -log(exp(t^2) - 1) # equivalent, but more stable: -t^2 - log(1 - exp(-t^2)) return -ops.square(t_trunc) - ops.log(1 - ops.exp(-ops.square(t_trunc))) @@ -165,14 +184,14 @@ def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: def get_t_from_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> Tensor: """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" # SNR = -log(exp(t^2) - 1) => t = sqrt(log(1 + exp(-snr))) - return ops.sqrt(ops.log(1 + ops.exp(-log_snr_t))) + return ops.sqrt(ops.softplus(-log_snr_t)) def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: """Compute d/dt log(1 + e^(-snr(t))), which is used for the reverse SDE.""" t = self.get_t_from_log_snr(log_snr_t=log_snr_t, training=training) # Compute the truncated time t_trunc - t_trunc = self._t_min + (self._t_max - self._t_min) * t + t_trunc = self._truncated_t(t) dsnr_dx = -2 * t_trunc / (1 - ops.exp(-(t_trunc**2))) # Using the chain rule on f(t) = log(1 + e^(-snr(t))): @@ -206,7 +225,7 @@ class CosineNoiseSchedule(NoiseSchedule): """ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_cosine: float = 0.0): - super().__init__(name="cosine_noise_schedule", variance_type="preserving") + super().__init__(name="cosine_noise_schedule", variance_type=VarianceType.PRESERVING) self._s_shift_cosine = s_shift_cosine self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr @@ -215,9 +234,12 @@ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_co self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) + def _truncated_t(self, t: Tensor) -> Tensor: + return self._t_min + (self._t_max - self._t_min) * t + def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" - t_trunc = self._t_min + (self._t_max - self._t_min) * t + t_trunc = self._truncated_t(t) # SNR = -2 * log(tan(pi*t/2)) return -2 * ops.log(ops.tan(math.pi * t_trunc / 2)) + 2 * self._s_shift_cosine @@ -231,7 +253,7 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: t = self.get_t_from_log_snr(log_snr_t=log_snr_t, training=training) # Compute the truncated time t_trunc - t_trunc = self._t_min + (self._t_max - self._t_min) * t + t_trunc = self._truncated_t(t) dsnr_dx = -(2 * math.pi) / ops.sin(math.pi * t_trunc) # Using the chain rule on f(t) = log(1 + e^(-snr(t))): @@ -263,8 +285,8 @@ class EDMNoiseSchedule(NoiseSchedule): [1] Elucidating the Design Space of Diffusion-Based Generative Models: Karras et al. (2022) """ - def __init__(self, sigma_data: float = 0.5, sigma_min: float = 0.002, sigma_max: float = 80.0): - super().__init__(name="edm_noise_schedule", variance_type="exploding") + def __init__(self, sigma_data: float = 1.0, sigma_min: float = 0.002, sigma_max: float = 80.0): + super().__init__(name="edm_noise_schedule", variance_type=VarianceType.EXPLODING) self.sigma_data = sigma_data # training settings self.p_mean = -1.2 From 9d132646805b8dbc0672af54e1d0ef7210f63d9b Mon Sep 17 00:00:00 2001 From: arrjon Date: Mon, 28 Apr 2025 22:30:04 +0200 Subject: [PATCH 46/52] improve schedules --- bayesflow/experimental/diffusion_model.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index fd4b30bdc..74ae07d1b 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -138,16 +138,6 @@ def validate(self): raise ValueError("t(0) must be finite.") if not ops.isfinite(self.get_t_from_log_snr(self._log_snr_min, training=training)): raise ValueError("t(1) must be finite.") - if ( - not self.get_log_snr(self.get_t_from_log_snr(self._log_snr_max, training=training), training=training) - == self._log_snr_max - ): - raise ValueError("RoundTrip snr_max -> t -> snr_max failed.") - if ( - not self.get_log_snr(self.get_t_from_log_snr(self._log_snr_min, training=training), training=training) - == self._log_snr_min - ): - raise ValueError("RoundTrip snr_min -> t -> snr_min failed.") if not ops.isfinite(self.derivative_log_snr(self._log_snr_max, training=False)): raise ValueError("dt/t log_snr(0) must be finite.") if not ops.isfinite(self.derivative_log_snr(self._log_snr_min, training=False)): From 4e0b7f82e9e3c72e1b54a59e7e0162dc54db9c8f Mon Sep 17 00:00:00 2001 From: arrjon Date: Mon, 28 Apr 2025 23:38:48 +0200 Subject: [PATCH 47/52] improve edm --- bayesflow/experimental/diffusion_model.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 74ae07d1b..bb38a9d9e 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -275,8 +275,8 @@ class EDMNoiseSchedule(NoiseSchedule): [1] Elucidating the Design Space of Diffusion-Based Generative Models: Karras et al. (2022) """ - def __init__(self, sigma_data: float = 1.0, sigma_min: float = 0.002, sigma_max: float = 80.0): - super().__init__(name="edm_noise_schedule", variance_type=VarianceType.EXPLODING) + def __init__(self, sigma_data: float = 1.0, sigma_min: float = 1e-4, sigma_max: float = 80.0): + super().__init__(name="edm_noise_schedule", variance_type=VarianceType.PRESERVING) self.sigma_data = sigma_data # training settings self.p_mean = -1.2 @@ -297,10 +297,10 @@ def __init__(self, sigma_data: float = 1.0, sigma_min: float = 0.002, sigma_max: def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" if training: - # SNR = -dist.icdf(t_trunc) + # SNR = -dist.icdf(t_trunc) # negative seems to be wrong in the paper in the Kingma paper loc = -2 * self.p_mean scale = 2 * self.p_std - snr = -(loc + scale * ops.erfinv(2 * t - 1) * math.sqrt(2)) + snr = loc + scale * ops.erfinv(2 * t - 1) * math.sqrt(2) snr = ops.clip(snr, x_min=self._log_snr_min_training, x_max=self._log_snr_max_training) else: # sampling sigma_min_rho = self.sigma_min ** (1 / self.rho) @@ -311,10 +311,10 @@ def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: def get_t_from_log_snr(self, log_snr_t: Union[float, Tensor], training: bool) -> Tensor: """Get the diffusion time (t) from the log signal-to-noise ratio (lambda).""" if training: - # SNR = -dist.icdf(t_trunc) => t = dist.cdf(-snr) + # SNR = -dist.icdf(t_trunc) => t = dist.cdf(-snr) # negative seems to be wrong in the Kingma paper loc = -2 * self.p_mean scale = 2 * self.p_std - x = -log_snr_t + x = log_snr_t t = 0.5 * (1 + ops.erf((x - loc) / (scale * math.sqrt(2.0)))) else: # sampling # SNR = -2 * rho * log(sigma_max ** (1/rho) + (1 - t) * (sigma_min ** (1/rho) - sigma_max ** (1/rho))) From a028e8a20e0a3ce7aaa7a17eec23fc6d016ca9fd Mon Sep 17 00:00:00 2001 From: Valentin Pratz Date: Tue, 29 Apr 2025 08:53:15 +0000 Subject: [PATCH 48/52] temporary: add notebook to compare implementations --- .../Two_Moons_Diffusion_Comparison.ipynb | 1370 +++++++++++++++++ 1 file changed, 1370 insertions(+) create mode 100644 examples/experimental/Two_Moons_Diffusion_Comparison.ipynb diff --git a/examples/experimental/Two_Moons_Diffusion_Comparison.ipynb b/examples/experimental/Two_Moons_Diffusion_Comparison.ipynb new file mode 100644 index 000000000..244c32613 --- /dev/null +++ b/examples/experimental/Two_Moons_Diffusion_Comparison.ipynb @@ -0,0 +1,1370 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "009b6adf", + "metadata": {}, + "source": [ + "# Two Moons: Tackling Bimodal Posteriors\n", + "\n", + "_Authors: Lars Kühmichel, Marvin Schmitt, Valentin Pratz, Stefan T. Radev_" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "1b50a364-4043-42cf-a7d4-267dd72a1345", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d5f88a59", + "metadata": { + "ExecuteTime": { + "end_time": "2025-04-11T19:54:01.403328Z", + "start_time": "2025-04-11T19:53:24.823026Z" + } + }, + "outputs": [], + "source": [ + "import os\n", + "# Set to your favorite backend\n", + "if \"KERAS_BACKEND\" not in os.environ:\n", + " # set this to \"torch\", \"tensorflow\", or \"jax\"\n", + " os.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\n", + "else:\n", + " print(f\"Using '{os.environ['KERAS_BACKEND']}' backend\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0551e46f", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2025-04-29 08:45:08.361053: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", + "E0000 00:00:1745916308.373731 71278 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "E0000 00:00:1745916308.378030 71278 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "2025-04-29 08:45:08.393220: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "2025-04-29 08:45:09.884415: E external/local_xla/xla/stream_executor/cuda/cuda_driver.cc:152] failed call to cuInit: INTERNAL: CUDA error: Failed call to cuInit: UNKNOWN ERROR (303)\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import seaborn as sns\n", + "\n", + "import bayesflow as bf" + ] + }, + { + "cell_type": "markdown", + "id": "c63b26ba", + "metadata": {}, + "source": [ + "## Simulator" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "4b89c861527c13b8", + "metadata": { + "ExecuteTime": { + "end_time": "2024-10-24T08:36:22.305265Z", + "start_time": "2024-10-24T08:36:22.301546Z" + } + }, + "outputs": [], + "source": [ + "from bayesflow.simulators.benchmark_simulators import TwoMoons\n", + "\n", + "simulator = TwoMoons()" + ] + }, + { + "cell_type": "markdown", + "id": "f6e1eb5777c59eba", + "metadata": {}, + "source": [ + "Let's generate some data to see what the simulator does:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e6218e61d529e357", + "metadata": { + "ExecuteTime": { + "end_time": "2024-10-24T08:36:22.350483Z", + "start_time": "2024-10-24T08:36:22.345161Z" + } + }, + "outputs": [], + "source": [ + "# generate 3 random draws from the joint distribution p(r, alpha, theta, x)\n", + "sample_data = simulator.sample(3)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "46174ccb0167026c", + "metadata": { + "ExecuteTime": { + "end_time": "2024-10-24T08:36:22.470435Z", + "start_time": "2024-10-24T08:36:22.464836Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Type of sample_data:\n", + "\t \n", + "Keys of sample_data:\n", + "\t dict_keys(['parameters', 'observables'])\n", + "Types of sample_data values:\n", + "\t {'parameters': , 'observables': }\n", + "Shapes of sample_data values:\n", + "\t {'parameters': (3, 2), 'observables': (3, 2)}\n" + ] + } + ], + "source": [ + "print(\"Type of sample_data:\\n\\t\", type(sample_data))\n", + "print(\"Keys of sample_data:\\n\\t\", sample_data.keys())\n", + "print(\"Types of sample_data values:\\n\\t\", {k: type(v) for k, v in sample_data.items()})\n", + "print(\"Shapes of sample_data values:\\n\\t\", {k: v.shape for k, v in sample_data.items()})" + ] + }, + { + "cell_type": "markdown", + "id": "17f158bd2d7abf75", + "metadata": {}, + "source": [ + "BayesFlow also provides this simulator and a collection of others in the `bayesflow.benchmarks` module." + ] + }, + { + "cell_type": "markdown", + "id": "f714c3a178b5a375", + "metadata": {}, + "source": [ + "## Adapter" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "5c9c2dc70f53d103", + "metadata": { + "ExecuteTime": { + "end_time": "2024-10-24T08:36:26.618926Z", + "start_time": "2024-10-24T08:36:26.614443Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "Adapter([0: ToArray -> 1: ConvertDType -> 2: Concatenate(['parameters'] -> 'inference_variables') -> 3: Standardize(exclude=['inference_variables']) -> 4: Rename('observables' -> 'inference_conditions')])" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "adapter = (\n", + " bf.adapters.Adapter.create_default(inference_variables=[\"parameters\"])\n", + " # standardize data variables to zero mean and unit variance\n", + " .standardize(exclude=\"inference_variables\")\n", + " # rename the variables to match the required approximator inputs\n", + " .rename(\"observables\", \"inference_conditions\")\n", + ")\n", + "adapter" + ] + }, + { + "cell_type": "markdown", + "id": "254e287b2bccdad", + "metadata": {}, + "source": [ + "## Dataset\n", + "\n", + "For this example, we will sample our training data ahead of time and use offline training with a very small number of epochs. In actual applications, you usually want to train much longer in order to max our performance." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "39cb5a1c9824246f", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:39:46.950573Z", + "start_time": "2024-09-23T14:39:46.948624Z" + } + }, + "outputs": [], + "source": [ + "num_batches_per_epoch = 512\n", + "num_validation_sets = 300\n", + "batch_size = 64\n", + "epochs = 50" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "9dee7252ef99affa", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:39:53.268860Z", + "start_time": "2024-09-23T14:39:46.994697Z" + } + }, + "outputs": [], + "source": [ + "validation_data = simulator.sample(num_validation_sets)" + ] + }, + { + "cell_type": "markdown", + "id": "2d4c6eb0", + "metadata": {}, + "source": [ + "## Training a neural network to approximate all posteriors" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "09206e6f", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:39:53.339590Z", + "start_time": "2024-09-23T14:39:53.319852Z" + } + }, + "outputs": [], + "source": [ + "diffusion_model = bf.experimental.DiffusionModel(\n", + " subnet=\"mlp\",\n", + " subnet_kwargs={\"dropout\": 0.0, \"widths\": (256,)*6}, # override default dropout = 0.05 and widths = (256,)*5\n", + " noise_schedule=\"edm\",\n", + " prediction_type=\"F\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "76722c33", + "metadata": {}, + "source": [ + "### Basic Workflow\n", + "We can hide many of the traditional deep learning steps (e.g., specifying a learning rate and an optimizer) within a `Workflow` object. This object just wraps everything together and includes some nice utility functions for training and *in silico* validation." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "96ca6ffa", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:39:53.371691Z", + "start_time": "2024-09-23T14:39:53.369375Z" + } + }, + "outputs": [], + "source": [ + "diffusion_model_workflow = bf.BasicWorkflow(\n", + " simulator=simulator,\n", + " adapter=adapter,\n", + " inference_network=diffusion_model,\n", + " initial_learning_rate=1e-3,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "708b1303", + "metadata": {}, + "source": [ + "### Training\n", + "\n", + "We are ready to train our deep posterior approximator on the two moons example. We use the utility function `fit_offline`, which wraps the approximator's super flexible `fit` method." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "0f496bda", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:42:36.067393Z", + "start_time": "2024-09-23T14:39:53.513436Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bayesflow:Fitting on dataset instance of OnlineDataset.\n", + "INFO:bayesflow:Building on a test batch.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m4s\u001b[0m 5ms/step - loss: 3.5213 - loss/inference_loss: 3.5213 - val_loss: 1.9474 - val_loss/inference_loss: 1.9474\n", + "Epoch 2/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 1.4044 - loss/inference_loss: 1.4044 - val_loss: 0.9011 - val_loss/inference_loss: 0.9011\n", + "Epoch 3/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.8857 - loss/inference_loss: 0.8857 - val_loss: 0.5213 - val_loss/inference_loss: 0.5213\n", + "Epoch 4/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.7341 - loss/inference_loss: 0.7341 - val_loss: 0.7266 - val_loss/inference_loss: 0.7266\n", + "Epoch 5/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.7106 - loss/inference_loss: 0.7106 - val_loss: 0.5299 - val_loss/inference_loss: 0.5299\n", + "Epoch 6/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.6149 - loss/inference_loss: 0.6149 - val_loss: 0.3700 - val_loss/inference_loss: 0.3700\n", + "Epoch 7/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.6752 - loss/inference_loss: 0.6752 - val_loss: 0.4283 - val_loss/inference_loss: 0.4283\n", + "Epoch 8/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.6572 - loss/inference_loss: 0.6572 - val_loss: 1.1328 - val_loss/inference_loss: 1.1328\n", + "Epoch 9/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5929 - loss/inference_loss: 0.5929 - val_loss: 1.1387 - val_loss/inference_loss: 1.1387\n", + "Epoch 10/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.6393 - loss/inference_loss: 0.6393 - val_loss: 0.4131 - val_loss/inference_loss: 0.4131\n", + "Epoch 11/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.6065 - loss/inference_loss: 0.6065 - val_loss: 1.1639 - val_loss/inference_loss: 1.1639\n", + "Epoch 12/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.6273 - loss/inference_loss: 0.6273 - val_loss: 0.4163 - val_loss/inference_loss: 0.4163\n", + "Epoch 13/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5711 - loss/inference_loss: 0.5711 - val_loss: 0.3509 - val_loss/inference_loss: 0.3509\n", + "Epoch 14/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.6108 - loss/inference_loss: 0.6108 - val_loss: 0.6391 - val_loss/inference_loss: 0.6391\n", + "Epoch 15/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5303 - loss/inference_loss: 0.5303 - val_loss: 0.4730 - val_loss/inference_loss: 0.4730\n", + "Epoch 16/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5640 - loss/inference_loss: 0.5640 - val_loss: 0.5148 - val_loss/inference_loss: 0.5148\n", + "Epoch 17/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5579 - loss/inference_loss: 0.5579 - val_loss: 0.9192 - val_loss/inference_loss: 0.9192\n", + "Epoch 18/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5347 - loss/inference_loss: 0.5347 - val_loss: 0.4404 - val_loss/inference_loss: 0.4404\n", + "Epoch 19/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5394 - loss/inference_loss: 0.5394 - val_loss: 0.7056 - val_loss/inference_loss: 0.7056\n", + "Epoch 20/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5330 - loss/inference_loss: 0.5330 - val_loss: 0.6121 - val_loss/inference_loss: 0.6121\n", + "Epoch 21/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.5522 - loss/inference_loss: 0.5522 - val_loss: 0.7118 - val_loss/inference_loss: 0.7118\n", + "Epoch 22/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 5ms/step - loss: 0.5340 - loss/inference_loss: 0.5340 - val_loss: 0.1866 - val_loss/inference_loss: 0.1866\n", + "Epoch 23/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 5ms/step - loss: 0.5288 - loss/inference_loss: 0.5288 - val_loss: 0.4453 - val_loss/inference_loss: 0.4453\n", + "Epoch 24/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 5ms/step - loss: 0.5489 - loss/inference_loss: 0.5489 - val_loss: 0.8552 - val_loss/inference_loss: 0.8552\n", + "Epoch 25/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 5ms/step - loss: 0.5237 - loss/inference_loss: 0.5237 - val_loss: 0.3817 - val_loss/inference_loss: 0.3817\n", + "Epoch 26/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.5354 - loss/inference_loss: 0.5354 - val_loss: 0.4136 - val_loss/inference_loss: 0.4136\n", + "Epoch 27/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4888 - loss/inference_loss: 0.4888 - val_loss: 1.0347 - val_loss/inference_loss: 1.0347\n", + "Epoch 28/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5256 - loss/inference_loss: 0.5256 - val_loss: 1.0939 - val_loss/inference_loss: 1.0939\n", + "Epoch 29/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.4705 - loss/inference_loss: 0.4705 - val_loss: 0.3689 - val_loss/inference_loss: 0.3689\n", + "Epoch 30/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.5880 - loss/inference_loss: 0.5880 - val_loss: 0.2554 - val_loss/inference_loss: 0.2554\n", + "Epoch 31/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4782 - loss/inference_loss: 0.4782 - val_loss: 0.2805 - val_loss/inference_loss: 0.2805\n", + "Epoch 32/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.4871 - loss/inference_loss: 0.4871 - val_loss: 0.3951 - val_loss/inference_loss: 0.3951\n", + "Epoch 33/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.5094 - loss/inference_loss: 0.5094 - val_loss: 0.6404 - val_loss/inference_loss: 0.6404\n", + "Epoch 34/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.4886 - loss/inference_loss: 0.4886 - val_loss: 0.3277 - val_loss/inference_loss: 0.3277\n", + "Epoch 35/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4680 - loss/inference_loss: 0.4680 - val_loss: 0.3643 - val_loss/inference_loss: 0.3643\n", + "Epoch 36/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4695 - loss/inference_loss: 0.4695 - val_loss: 0.4899 - val_loss/inference_loss: 0.4899\n", + "Epoch 37/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4700 - loss/inference_loss: 0.4700 - val_loss: 0.2931 - val_loss/inference_loss: 0.2931\n", + "Epoch 38/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4630 - loss/inference_loss: 0.4630 - val_loss: 1.4956 - val_loss/inference_loss: 1.4956\n", + "Epoch 39/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4722 - loss/inference_loss: 0.4722 - val_loss: 0.4394 - val_loss/inference_loss: 0.4394\n", + "Epoch 40/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4929 - loss/inference_loss: 0.4929 - val_loss: 0.3670 - val_loss/inference_loss: 0.3670\n", + "Epoch 41/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.5650 - loss/inference_loss: 0.5650 - val_loss: 0.3733 - val_loss/inference_loss: 0.3733\n", + "Epoch 42/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.5021 - loss/inference_loss: 0.5021 - val_loss: 0.3183 - val_loss/inference_loss: 0.3183\n", + "Epoch 43/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4778 - loss/inference_loss: 0.4778 - val_loss: 0.4093 - val_loss/inference_loss: 0.4093\n", + "Epoch 44/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.5932 - loss/inference_loss: 0.5932 - val_loss: 0.3301 - val_loss/inference_loss: 0.3301\n", + "Epoch 45/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4518 - loss/inference_loss: 0.4518 - val_loss: 0.4177 - val_loss/inference_loss: 0.4177\n", + "Epoch 46/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4791 - loss/inference_loss: 0.4791 - val_loss: 0.2887 - val_loss/inference_loss: 0.2887\n", + "Epoch 47/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4427 - loss/inference_loss: 0.4427 - val_loss: 0.4038 - val_loss/inference_loss: 0.4038\n", + "Epoch 48/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4609 - loss/inference_loss: 0.4609 - val_loss: 0.3336 - val_loss/inference_loss: 0.3336\n", + "Epoch 49/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4462 - loss/inference_loss: 0.4462 - val_loss: 0.4002 - val_loss/inference_loss: 0.4002\n", + "Epoch 50/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.4557 - loss/inference_loss: 0.4557 - val_loss: 0.4553 - val_loss/inference_loss: 0.4553\n" + ] + } + ], + "source": [ + "history = diffusion_model_workflow.fit_online(\n", + " epochs=epochs,\n", + " num_batches_per_epoch=num_batches_per_epoch,\n", + " batch_size=batch_size, \n", + " validation_data=validation_data,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "e2fbe42f-b6e8-45f3-a53a-4015fb84e78f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(-0.5, 0.5)" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAbEAAAGdCAYAAACcvk38AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAUpxJREFUeJzt3Xmc1PWd7/vXr9beq2l6obEbaBAQWUeMQOYqjhESiRpzkqNeA2ZyzmFijiYmmcxET86Ny33cBzdz75wTx6NOwmSSxwwxQxITszEmRKN4A6igCIS9aXuBXqq32ru23+/+UV1F73RDLxT9fj4e/cD+1e9X9a0KqTff7+/z/X4Ny7IsREREspBtqhsgIiJyqRRiIiKStRRiIiKStRRiIiKStRRiIiKStRRiIiKStRRiIiKStRRiIiKStRxT3YDxZpom58+fp7CwEMMwpro5IiIyRpZlEQgEmD17NjbbyH2tqy7Ezp8/T3V19VQ3Q0RELlNjYyNVVVUjnnPVhVhhYSGQevNFRUVT3BoRERkrv99PdXV15vt8JFddiKWHEIuKihRiIiJZbDS3hFTYISIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWWtSQuz555+npqaGnJwcVq9ezZtvvjmq6/74xz/icDhYtWrVxDZQRESy0oSH2M6dO/nyl7/MN77xDd577z1uvvlm7rjjDhoaGka8zufz8eCDD/KRj3xkopsoIiJZyrAsy5rIF1izZg033HADL7zwQubYkiVLuOeee9i2bduw191///0sXLgQu93Oyy+/zKFDh0b1en6/H4/Hg8/no6io6HKbLyIik2ws3+MT2hOLxWIcPHiQjRs39ju+ceNG9u7dO+x13//+96mtreWJJ56YyOaJiEiWc0zkk7e3t5NMJqmoqOh3vKKigpaWliGvOX36NI899hhvvvkmDsfFmxeNRolGo5nf/X7/5TVaRESyxqQUdhiG0e93y7IGHQNIJpM88MADPPXUUyxatGhUz71t2zY8Hk/mp7q6elzaLCIiV74JDbHS0lLsdvugXldbW9ug3hlAIBDgwIEDPPLIIzgcDhwOB08//TTvv/8+DoeD1157bdA1jz/+OD6fL/PT2Ng4Ye9HRESuLBM6nOhyuVi9ejW7d+/mk5/8ZOb47t27+cQnPjHo/KKiIo4cOdLv2PPPP89rr73GT3/6U2pqagZd43a7cbvd4994ERG54k1oiAF89atfZcuWLdx4442sW7eO7373uzQ0NPDQQw8BqZ7UuXPn+Jd/+RdsNhvLli3rd315eTk5OTmDjouIiEx4iN133310dHTw9NNP09zczLJly9i1axdz584FoLm5+aJzxkRERIYy4fPEJpvmiYmIZLcrZp6YiIjIRFKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIicgVo9Yb5NlXT1PrDU51UyRLKMRE5Iqx63AzOw80sutw85CPX0rIKRivbgoxEZlSfUNmeZWHuSV5LK/yDHnuxUJuvK6R7OGY6gaIyPSWDpm0+s4wR5p8AGzfc5att8zn1sXlAGxaUdnvz9G4lGtqvUF2HW5m04pKFpQVjPo6mXwKMRGZNEOFw1Ahs2lFJd98+Sj76zoAMiG2oKyATSsqxy1ghgurvsH6xY8svKzXkImlEBORSXOxcFhQVpA5vvWW+YRjSSqKcqj1BjMhs2NfPS+920RnKMbmdXMzIZR+/oH/nX7NznCMkjxXv8Aa7rGx9N7Ua5taCjERmTRDhcPAIFle5eGNU1584TgYsLe2g5rS/AuhZ6T+8EXifPPlo9R6Q5nn6jss+cO3Gth/toOtt8wHoDMUGxSg6XbUtYd46WATneEYT9y1tF+YXox6bVNLISYikyYdDulijk0rKtm0opLOUIy3znZQ3xFmUUUhtd4gPXGTZNLEMuDZP5yiIxRlZr6b9YvKKMlzUdce4kB9J3abjX217eS7HRTlODjc1E1hjpOSfCeHGrt545SXJ+5aSq03SEm+a1DvqjMU40SLn6RpgTX293Qp99xk/CjEROSyDTekdrF7Tp2hGL5InDdPe+kKxcAAf0+ca2bk0hmM0RqIggWxBPxwfwMuh41Fswr5i8Vl/PvRZqIJC0jy1gddOGypLtrptiBYFk6HnXgyyStHWli/qIxbF5dnhheXV3k40uSjMxzjpXebMC2LVdXFbF43d8zvfSy9Nhl/CjERuWzDDakNPJ4OtXQJfV17iF+8fz7VCwJyHDYau8IkTch1DJwBZBGJJznU0M3Rcz7iydQ1TpuB3W6wpKKQ0kI3e892EIomiceS2G0GrYEe/u6VE2zfc5Y8t523znayoKwAbzDKTfNKWFBWQE1pPg/fdu2o72npPtiVQyEmIpdt4JBa37Cqaw+x62gzde0hPLlOdh9vpTMUoyTfRSAaT01WNSBpQU/cxGk3SJoWPQmT3oewgLh54fXSAQaQtCzicYuz7SHy3A4iseSFx0yLPJedaMJkf10HRTkOQtEk57sjzJ6RC4A3GOW268pZUFYw6nDSfbArh0JMRC7bwCG1XYeb+eFbDVSeyKHF10Ozv4ez3hAPrJnDhiUVvFWXuv81pyQPh90gYUKyN5jiSQun3QDLwm4zMr204aQfDkYTvFPfidNuI5q4kHiRWJKKQjfxpEmgJ0HSsmgNRPH1xGnp7mHdgpksr/Lw1K/+xP7aDhq7wqnAzXOCBZvXzR0UaLoPduVQiInIJXv9ZNugCcmQ+nLff7aDQ43dWEBlUQ7LrinKPN7QGSaaSHLGGySRtAbVUyR6A23wI8NLWpBMWICF02YQN9PPAYeaujEwiJsXwi2WMGn293C8xY/nlJMX32ogaaaCs649RK03iGnBqdYAT9+zDKBfL009sCuDQkxELtkzvz/NocZuwrEk1SV57Nhfn+m9PH3PMnbsqwcDNq+dmxmCmzMjl0gsSSqnhg6pSygS7CcxoPcWjpnYjNQ9t1jvs6dP6QrF8IXjWJbFnJI81i8uY1FFAT9+pwlvIMrbH3Ty3Gtn8OQ6M/PTnrh76WW2UMaLQkxELllNaT7Hmv3UlOazY389L77VgNNuoyTfxRc/spAn7k6Vtu/YX09TZ5iiHAcHG7pJXm5KXcRQT29aEO69sWYD7HaDeNKiPRjj7Q86MS0Lo/cG3Dt1XdR6g7gdNhJJi/ebuvFH4kRiCXyReKYHeufKStoDMRV4TCGFmIiMylBFDw/fdi2eXCcY4AvHsRmQ60zN23rtZBuPfmQhR5p87HynMVNwcTn55bAZmV5WuuAjzWZc6F1djAnMyHEQ6EkSS5o0d0ewgLPeELXeENW9RR9VxbnEkib+cAxvKI4B7Kvt4ESLn5OtARq7wphWaq5ZukFD3UOTiaNV7EVkVIZaDX5BWQEl+S52H2sFoKzQTSiWmrd1qKGb7XvOsmlFJZ4cJ6m7VRc4jEtrh9NmYDcgzzng68uCQVX5I+gMxakocuPJcZDvdnBNcW7mMV8kjtth43hrAH8kQVckAYBhQLO/h0BPgrU1M/nCrQu478ZqMOClg0289G6TVsufZOqJicio9K3Iq/UGM/e71i8qA1JzvjqCMebNzOOa4lzaQzHWzC/hiy++i4WFy2EQS1yIscQldMn63usKxS+U4APkuuyE+pTXQ+pf6SaDGaR6bi2+CL21ICwoK6C6JI/DTd3kuey0+KMAuB02LMvEbjPId9nx9yQIRhNUFOVwqjXI5rV9JkhbqlicbAoxERlR32HEdEXes6+e5qV3mwAyC+d+7SfvA3DdrCI8eU6qZuTxh5NejjUHgFSgDBcqaQYXhgmddoPYgJtn6SFDe29yJXtXiirLd4FBvxAzgJXVxRw+5yNpWhS4HVQUuWnoCFNa4CJhWuQ47bQGenDZ7ays9vDaiTbcDht5Lgc2I0qB28Ga+SVgQXswxspqDz89eI5AJM6vD58nx2mnJK/3/t9dKvaYCgoxERnRUBN70+sd+iJxOsMxduyrp7EzTFGOk7217bT6o7gcBh9fPptoPElzdw++nvhF71mlhxxtBph9Tk4HW0meE6fdzixPDqfbAgSjqdDKcdmpLsmlO9yVKa3PddnpiSexGWCzG2y4vpwP2sMsrCjgdGuAuAllBS6WVnpo9vXwm/eb8YZi5DgM8tyOTFC+cdLLteUFtPmjnG4L0BNLggELygtYO3/mkD0vregxeXRPTERGtGlFJffdWN3vy3pBWQFP3L2UmtL81P0wAxZXFNIdidHij2IBpgmBaJw1NTP53P82D5fDwADynXZc9gtfPXlOG7lOGwvK8inKcWAABW4HLocdd+95DpuB025QXpRDdyTGoaZuovFk6v6YrXei8wddmeFGm5EaBqzrCGFaFi6HjYP1XRxq7KahM8LMAjcAHaEYpQUu7lg+i4SVujaasDjfFSFugr8nQSxp0eyLEI4nCUZTUwPmzsxn7fyZrF9Uxq7DzdR6g/0+M+0mPXnUExOREY00sXfgyhVf+/H7/Om8D8tK3b/afawNuw1Kcl30xHsnHxvgdhg4bHZiySRup5140uKWhWWZAonbl1RQ1x7ieIufAocDp93g+soitt4yn8dfOkKLvwe3y47DZsPCIhiNE0+melYOu0Gey0Fde4g8l52bF5ZTVZzHjHwnz/2hllC0t0iD1NDkkXM+TrYGiMWTOO0GZjIVej1xk+I8JzkOO75InFgimbmu1d/Dr94/z6mWAPWd4cxnkF5qqzMUY8P1Fbo/NgnUExORcbGgrIBHb1/Ih+aVcOt1ZZnjpglFeU5sRioAwrEkNpvBn187E7vNRk88ycoqD+sXl+ELxzNVgjWl+bjsNjZeX8F/+vManr5nGbcuLmfbp5azqrqYdfNnUlOaj9Nuw+1I/Xu8akYe+x6/nf/jzuupKMwhljA52uRn/eIyDAwK3HaKch0YBplV79uDMRo7Ixg2G06bgWEzcNptlBa4+NuPLWbbp5azeFYhZYVuDMBuh2A0iYHF1lvms+H6CjpDMXbsr2fngUa+9e8nePHtBpo6w0P20mR8KcRE5JLtOtzMjrfqezenDHKkyUd9Z5iq4jxuv76cGblO5pfl819uruGzH57H7UvKKStwEUuYFOY4eWDNHO5YVsmiWYW8cdLL74+3crY9xO+Pt+LJc7L15vk8fNu1/QpKqkvyuO26co63BFhVXcxdK2ez4poiKotyuPdDVQDcuricjy2fRdKCFn8P2/ecZXmVh+tmFfHYHdex4hoPuS47bocNAwubzWDd/BL+/NpSDCM1jNgVivEPvz/D//nrY5xqDdCTMLGAZG9lSo7LQXVJHocauvnxwUZ84Tj33ViNYaSWtDp63q8hxUmg4UQRuWTpNRJrvaFMIUP6OMA3Xz7KGW+Q9kAsU7331K/+xEsHm/DkOXnirqWZ329fUsGnbqjCF4njyXOyee3cofcgC8cA2LCkgs3r5rJjXz0HG7px2AzaA6nHar1BmjrDFLjs2GwGd66s5EiTjxMtfs62h1g2u4gF5QU0doapnpHHqjnFbF47l2++fDSzbmPSSs0JMwCH3aAwJ/V1ma436Ykl2LGvnuPNfqJJk0BPnC9+ZCEWFue6I3x0aQUz890sr/JkNgBVkcf4U4iJyCVbUFbA0/csG3Jh3GdfPU2tN8S1ZQX97g1tXjs3U5Zf6w1yqKGbpGXhyXWyed3cYav6Nq2opDMc41BDN82+Hu5YPotdh5vxReI47QYLywszr7PrcDN7TrcTTZi4HbbM0lAvvt1As6+H9mCUzWvmclvvRpnp19p6y3y6wjHOekOZrWAs4NryAgzIzB1z2gwC0SS/P95K0rKwekvwa71BfvR2A/5Igvcbffz84T/n2VdPa9uWCaQQE5FLMtT8sb6PdYZj3LFs1qBlmNJBV+sN8s2Xj9LYFWZVVXEmwIb7wl9QVkBJnovzvgjXlhWABTsPNLJhSQWfv2VBJoxqvUE6QzGuryziZEuAxRUF1LWH2LGvnpqZebT5o1QU5QAXeoxP/fJPmYWK19TM5HTrhftYNiCaMDGA6hm5FOY4KM51sr+uk8auCJCqprz3Q1V88cV3afalgq49GKXWG9S2LRNMISYil2SkwNl1uJndx1q578bqYYfQdh1u5ow3yOKKQp6+ZxkL+vTYhvvCT89PS68UUpLvGtRr23W4md3HW7EZEE0m6QjHqD2eWhZrQXkB+W47ZQVudvceO9Ua4P0mH2BxqiVARVEObqcNp5XaiyzP7aC+I0Syd17Zf/2LBfyP353KvJ4BVBS5OdUS5EzbhfBr9few63AzX/zIQvXAJpBCTEQuSTpohrrnM5reR99z0tddbJ+u9FqNOw80ZlbKqPUG+71++nktLH56sIlPr66iKxzHF44DsKq6mPWLynjjpJdDTd00doRZWF5ANJHk3YYu5s3M5z/eWM2iigJ+/X4zd66s5H/87hTeYAxvMJb5b5sBOU4bsYRJQ0eYprIw15YX4I8kMGxQ4HJQWujS/bAJpupEEbkk6cA50uQbcmHgL35k4Yhf3Bc7Jx1OA0vUN62ozJS1p4c0+75++nkNDEwLDAyeuCs1MfvtDzopyXNx6+JySvJdNHSEyHPbefT2hayZP5OkaXGmLcihxm4qPbmsnT+Tm2pm8tWNi0ivN+zvSVCa78RuGCSSJqYFSdPiwAdd1HeE2bC0gntXV9MZjvHC67X88K0GVShOIPXERKaRiVgOaaLu+ezYVz/kJpTpe2M7DzRmhhOHev2BxwcuYFzXHsJltxOOJjnS5GPz2rnsr+3gZGuAk81+tu85m5nI3BmKkV78KpowSZoWCdPCZbfhMEySFvgjcSxSy1Stqi5mdnEudd4QuS57Zqdo9cbGn0JMZBoZ6T7WpbrYEOAlMwb82cfAocihXn/g8fRQ467DzXSGY/z+eCumZbGyqjjzPEsqizjjDTKvNJ+tt8znSJOPTSsqee61M1h9NpKxGRZ2m4FpmZQVuumOxClwO+gKx/igI8QHHSFWVnmwsOgIRfnNkVRPzJPn1J5j40whJjKNXOmVcn17in1L8QcaTXAO1etMh/iG3jlp6YrEdFVjqndm45riXLbvOcvWW+ZnrrWsC3lqt9mI9W4F0+yPUpzroDuSuueWnkf2p/N+4kkLh93AskxOtPhp6q1mTO98LZdPISYyjUxYr2mcDOwpXmpb0+X7td5Q5rlg6GKSvq/d7OthVXUx7cEYh8910xWOsX3PWSwr1Q+z2aC80E0kZhJLWiR7E6u7d9NMSC0+nOeyYxgGLodFT9zEMAyum1XEdbOKqGsPsbzKc0nvSwZTiInIFWO8eorp8v2BE637DikODLK+r93YGeaZ35+mqSvMiZYAM/NdOGwGFUU5FOY4WDY7l1AsyZ/O+UiYFrFEkrjZu4dZlYfSAje13hDd4RgJ02LJrCIevu1adh1u5u0POnnjpDczVKlhxcszKdWJzz//PDU1NeTk5LB69WrefPPNYc/92c9+xoYNGygrK6OoqIh169bx29/+djKaKSJTbDRVjaOxaUUlm9fMzcw/62u4bVIaO8PsP9tBY2eYWxeXc9t15cSTJnkuBzlOe2pl+1iS480B9p3t4N2GLgLRBOFYKsAgtbrH0fN+/nimnQ86QnSG4zhsNh69fWEmQO+7sRoMtK7iOJnwENu5cydf/vKX+cY3vsF7773HzTffzB133EFDQ8OQ5+/Zs4cNGzawa9cuDh48yF/8xV9w11138d577010U0XkKjFSGA61PxrA9j1n2Xe2g2/+4mhmpY2lsz2ARUcwRqHbSY7LjmFATyyJrbeEf+A+n/GkRSRukuuw9+5EbXGkydevXZvXzh2yDTJ2hmVZF9lr9fKsWbOGG264gRdeeCFzbMmSJdxzzz1s27ZtVM+xdOlS7rvvPr75zW9e9Fy/34/H48Hn81FUVHTJ7RaRq9dQRR8/erue//vfT2CasKLKw9P3LAPgudfO8OYpL12RGHabgd1mIxpLkp/rIM9pp7V3E1C7LbXtTN8v1AVl+dyyqKxf8Yh2fL64sXyPT2hPLBaLcfDgQTZu3Njv+MaNG9m7d++onsM0TQKBACUlJRPRRBG5Cgw3MXo4uw4388O3GjJbyAC0B2IYGMSSJkfO+VLrOnaGafX3EIolsRkGJfluTMsiCfgjCVwOG+sWlGC3Gay4xjNoNoDLYeOJu5YOqo7UMOL4mdAQa29vJ5lMUlFR0e94RUUFLS0to3qOv//7vycUCnHvvfcO+Xg0GsXv9/f7EZHpZazhkOoJ5XOmt2eUPlaU6yCWMDEMqPWG2L7nLLXeEItnFfKZtXPZ9h+Ws6SyCLcjtYFmq7+HrlCcHIeNk61Bem+NUeC247IbLJlV1C9ghxvKlEs3KdWJhtH/3yeWZQ06NpQf/ehHPPnkk/ziF7+gvLx8yHO2bdvGU089NS7tFJHsNNaqxoFbyGSOfWIZ2/ec5c6VlZxqDeILx6koysGT62T9ojKONPm498Yq/uH3Z2gN9JBIQI7TTlGuk9bevcfSm4D++EATgZ44X/vJ+zR0hNh/toOn71l2RU9xyEYTek8sFouRl5fHT37yEz75yU9mjj/66KMcOnSIN954Y9hrd+7cyec+9zl+8pOf8PGPf3zY86LRKNFoNPO73++nurpa98RE5LKk9wGbW5JHfWeYQreDWm8IT66dYDRJpLck0W6kNtA0SP3MK8tP9cxaAphW6vGywhws4DNr5ijERuGKuSfmcrlYvXo1u3fv7nd89+7dfPjDHx72uh/96Ef85V/+JS+++OKIAQbgdrspKirq9yMikjbS/bKRHksP/W29ZT4bllTQ6u8hljTpDCUocDuYVeTG0RtgkJrkjAH17SFONAdI9lYumsCMfCd3LJuVWfF/tPfu5OImfDjxq1/9Klu2bOHGG29k3bp1fPe736WhoYGHHnoIgMcff5xz587xL//yL0AqwB588EGeeeYZ1q5dm7l3lpubi8ejWe4iV7orrQLvYvuepR8bOAm67+omR5p8ROJJ7AYU5Tjw9S7227smMJBazSNpgs1mEE9eGODy5DrpDKWWpNq+5yxnegNMPbLxMeEhdt9999HR0cHTTz9Nc3Mzy5YtY9euXcydOxeA5ubmfnPGvvOd75BIJHj44Yd5+OGHM8c/+9nP8oMf/GCimysil2kiFhm+HCPdL+v72Ejt3rSikrr2EHUdIUrzXew720n1jFwwyAwbxpOpc82khUFvz8yCGXku1i8uA1LFIgNXEZHLM+HzxCab5omJTK0rrSc2WrXeIDv21w+7ynz6HtmG6yvAAl8kTos/wtt1nSTM/s9lAIaR+jPHZeehWxYMu9yVDDaW73GtnSgi4+pKXmR4pIAduE/ZUL0xSO1k/XevnOBEbw9sKDYjFWIuh52V13ioaw/x3Gtn8OQ5aewMD2rDxQJUhqcQE5FpYbiV7ftKDxv++9Fm6tpDPHzbtZlASYfzU7/6E8ebA4OWmwJ6l5lKFXvYgXAsyaHGbqzGbmJJi1ynjVMtgcxmm+k27DrczEsHmwBt0zJWCjERmRaGW9m+rwVlBbT6ezjeHKDWG6KmNJ8vfmRhpge3vMrDoYbuYV+jb88sXdsRjpvMyHOSSCaoLsnrt9lm2qYVlXSGY2BduXu9XakUYiIyLYy0lxhcGGq8c2Ul4XiSmpn5mWvSRR+vnWjjVFuA+WX5dIaiBHqSFOY46ArH+z2XDeh7m6zA7SAcS7JkVhG3Li7n1sXlmfL+dHueuGvpRL31q5pCTESmhYvdq0sH1X03VvPz//rn/R5Lh1lde4hab5CVVcXUdYQ4dt5PNG5S4E5NgLYZqfUS7UA0aWYKPiwL3I7UtNx0cF1pVZzZSiEmItNerTdIZzjGhiUV/Ybzar1BduyrxxeJ48lzcveq2XjynOyv7aCuPUg8aRHtXZUjz2UnaZrYDIOEafWrWDRssPXm+XSGY/3mpfX9Uy6NQkxEpr1dh5vZfayV+26s7jfUuOtwMy+920Q0YeKwGRxq7KYnluRUWwCzT0glLYjEkszMd9IVTmCzWThsqTADcNps1LWHONHi56Z5JZkhRPXALp9CTESmveF6RZtWVNIZiuGLxKnrCHG6NUAsYWUCrG+FomFAZzie2ijTTC10DqmKRU+ug1+8fx7TtJiR58rsLaay+sunEBORaeFic8SG6hUtKCvgibuXZq7fsb8eXzjO+03dNHSEyXPZCcaSOG0GZYVuGrsiwIXJzkkrVbHY1BXBZTcoLnCz9Zb5gMrqx4tCTESmhfEopCjJc7F+URnHm/0kLQt/TwKAmGURjCYySymaFhTnOegKp44tLC9g3YLSfgGqsvrxoRATkWnhUgop+vbedh1u5gd760iYFuFYst+cMMuCrnAcpw3iZirIDAwcRqrUfpYnd8gFhlVWf/kUYiIyLVxKIcXAVe5/crCRxs4Iw23pmzAvLGzfFY7jdtowLYt9tR0A7K3tyGyOqXtg42NC9xMTEclm6T3F0r2npz+xjOqSXOz2C+ekN8OEVHhZff47ljBx2e20Bno43uzH5TA40eJn1+HmSX0fVzOFmIjIMBaUFbC8ysM3Xz7K6yfbuHVxOU9/YhnFOS5sgMPWP7gGMgyoKHKzsqqYJZVFxBIWc0ry6QzHtDHmONFwoohIr6EqGLfvOcv+utRw4K2Ly/nlofN0hGOY0G+u2EAGUJTjpNnXwy0Ly9i8bi41pfl0hmLsPtZKSZ4qEseDemIiMi2l1y7s2yNK3wPrO9x358pKrinOZc38Ep599TTHm/3DbsHSlwUEeuJEE0l8kTgL+iw8vOH6ClUkjhP1xERkWhqq5H6oCsb2QAzTgrfOdlLrDVGS78ST48DXW14/HAPIcznoSW/53Puau48PXhlELp1CTESmpaECa6gKxr6bYT7z+9OcagswqyiHQDQxYo/MMFKL/iYtC0+uc9jXlMujEBORaWmsJffVJXmsmlPM6bYAnaEY1kWGFD25TuJJi1VVxWxeN/eSXlMuTiEmIjKE9DJThxq6afb1ALB+URmvHG2hrff3kUTjSf5szgzNCZtgKuwQERnCjv31/HB/PUfPdVNZnMPyKg9/98oJWnw9JBm+rD6tsjg3E2BDFZHI+FCIiYgMxQLTskhaUJrv4plXT3OiJTAovAzAaR98uctuy/TAhqp6lPGh4UQRkQFqvUEwYFFFIQ2dYdpDMU42+zP3wWxGar3EPJedeNKkuiSPc90RkkmLpGVhWlA1IzfzfCromDgKMRGRAdKbZG5YUsGa+TPxheP4I3Hq2kMAmapEu80gHLNo6AxT4HaQtFskkiZx06Iwx5l5PhV0TBwNJ4qIDJBeM3HzurmU5Ll4+4NOinKdGIaR6Y3ZbRCJJ7GAeNLCF4lTnOfEAmyGgSfPOdJLyDhRT0xEZATpIcCOUJTjzX7cdgO3006gJ048eeEOWaHbwRduXcA7dV3UdYRYv6gMGHkzTrl8CjERkQEGruaxaUUl/+H5P9ITN7EZsLiyiNJ8F+e6I/TETRo6wwSiCZ5/vTY1nJi0eOOUlyNNvtRaicdbM88l40shJiIywMBCjF2HmzEMgwK3gxl5Tk62BOgpyWNJZREAXaEYXZF4Zq+xWR43hxq6Oe+LsGn5he1cZPwpxEREBhhYiLFpRSWdoRgY4AvH+dXh85xsDXDGGyTXaadqRi7BWIJZRTmUFripKc1nb20H15YVsH5RGUeafFP4bq5uCjERkYtYUFYABrx0sIk180soznXSGY7htBnEkyZuu43Na+eyee3czOTmmtJ8Nq2oHHKhYRk/CjERkdHoreFoD8QI9i7+G4mbWMDh835uW1KRKdzo25PTHLGJpRATERmFzevmUpLvyqxmf7IlQEm+i2giyarq4mFDSnPEJpZCTERkFPqGUXVJnsrmrxAKMRGRMVLv6sqhFTtERPrQivPZRSEmItLHpa44r/CbGhpOFBHp41KrCVVKPzUUYiIifVzq/S6V0k8NDSeKiIzCxYYL0+GnasXJpRATERkF7c58ZdJwoojIKGi48MqkEBMRGQXNDbsyaThRRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESy1qSE2PPPP09NTQ05OTmsXr2aN998c8Tz33jjDVavXk1OTg7z58/nH//xHyejmSIikmUmPMR27tzJl7/8Zb7xjW/w3nvvcfPNN3PHHXfQ0NAw5Pl1dXVs2rSJm2++mffee4//9t/+G1/60pd46aWXJrqpIiKSZQzLsqyJfIE1a9Zwww038MILL2SOLVmyhHvuuYdt27YNOv/rX/86v/zlLzl+/Hjm2EMPPcT777/Pvn37Lvp6fr8fj8eDz+ejqKhofN6EiIhMmrF8j09oTywWi3Hw4EE2btzY7/jGjRvZu3fvkNfs27dv0Pkf/ehHOXDgAPF4fND50WgUv9/f70dERKaHCQ2x9vZ2kskkFRUV/Y5XVFTQ0tIy5DUtLS1Dnp9IJGhvbx90/rZt2/B4PJmf6urq8XsDIiJyRZuUwg7DMPr9blnWoGMXO3+o4wCPP/44Pp8v89PY2DgOLRYRkWzgmMgnLy0txW63D+p1tbW1Deptpc2aNWvI8x0OBzNnzhx0vtvtxu12j1+jRUQka0xoT8zlcrF69Wp2797d7/ju3bv58Ic/POQ169atG3T+7373O2688UacTueEtVVERLLPhA8nfvWrX+Wf/umf+Od//meOHz/OV77yFRoaGnjooYeA1HDggw8+mDn/oYceor6+nq9+9ascP36cf/7nf+Z73/seX/va1ya6qSIikmUmdDgR4L777qOjo4Onn36a5uZmli1bxq5du5g7dy4Azc3N/eaM1dTUsGvXLr7yla/w3HPPMXv2bP7hH/6BT33qUxPdVBERyTITPk9ssmmemIhIdrti5omJiIhMJIWYTLpab5BnXz1NrTc41U0RkSynEJNJt+twMzsPNLLrcPNUN0VEstyEF3aIDLRpRWW/P0VELpVCTCbdgrICvviRhVPdDBG5Cmg4UUREspZCTEREspZCTEREspZCTCaNSutFZLwpxGTSqLReRMabqhNl0qi0XkTGm0JMJo1K60VkvGk4UUREspZCTEREspZCTEREspZCTEREspZCTEREspZCTEREspZCTEREspZCTEREspZCTC6Z1kIUkammEJNLprUQRWSqadkpuWRaC1FEppp6YnLJ0mshLigrADS8KCKTTyEm40bDiyIy2TScKONGw4siMtkUYjJutNWKiEw2DSfKmPS976V7YCIy1dQTkyHVeoPsOtzMphWVmcINuHDfqzMU41RrgDO9AaYemIhMBfXEZEjDFWlsWlHJfTdWgwG13hDXlhWwvMqjHpmITAn1xGRIwxVppO97vX6yjVMtAbbeMp8jTT52Hmikrj1Eq7+HrbfM59bF5VPRbBGZZhRiMqS+RRoDhxZrvUG27zlLrTfEG6e8AGxYUsGhpm4ON3UDKMREZFIoxGREtd4g33z5KLXeUKanVVGUwxlvkGvLCsCC3cdbue/GalZWeTh23s/CioKLP7GIyDjQPTEZ0a7DzZzxBllQls/xZj97azt4v6mba8sKWDO/hD2nvfgjcX58sJF/e6eRaMLkt0dbp7rZIjJNqCcmg6SHD5dXeahrDzEz30Wey06L38RpN3DZbZzxBjl63ocvkgDA35PABhgGLLumaGrfgIhMGwox6Sc9fHiyJUCe205nKEY8aVHrDeGwGSy9xkPNzHzqOkJ80B7C7bARTZg4bAaVnhw+NK+Eh2+7dqrfhohMEwox6WfHvnoONXVTlOMgHE2ysLyQmtJ8ADx5Tpo6w/zq8HlmFeVQ6cmh2ddDNGGSNC06QjFa/T1T/A5EZDpRiAmQ6oHt2F/P/rMdGMCyazyEo0nuXFlJeyCWqUz85PN/JJ60aOqKYAAmYPQ+R47DxqHGbnbsr+eJu5ZO3ZsRkWlDhR0CpAo4XjrYRENXmJVVxVQV51HrDfHC67X88K2GzKTne2+sosDtwGaA1Xtt+s8Z+S4SpoUvHJ+S9yAi0496YtNc3yKOT62uAgvWLy7jjZNeKotzaOgIMackn7r2EF/deYjjzX5M02JeaT7+SJyOUAzLguI8J/PL8vEGonjynFP9tkRkmlCITXPp5aUAnrhrKbXeIF/78fucagvw0etnsaqqmD2nvfz8vXMYveOGdsMgHE1iWhYrq4oBOO+LUFWcR+ESJ4caunn9ZJsmPIvIhNNw4jSXXgtx04rKTGXin5p9hGNJWnwR9pz2UusNYQGmlfqxAH8kjmnBqdYAp9sC5LsdrF9cRqu/h/ebuvnmL45qLUURmXAKsWkuvbzUgrKCzMRmG2BZ8E59J7Xe0KBrEqaFBfgicUKxJMFokvr2EEeafGy9ZT5VM3IJRRPa4VlEJpxCTDI2rahk0/JKXA47APFk6rjNGHyuw25wTXFu5vfZxblsWlHJrYvL+f7nbuIvP1yjHZ5FZMIpxAS4UODR1BnG35NahSPXaaOswMXampJB5wd6EsSTJjlOGw4DPjSvpN++YyIik0GFHdJvkd+kaWaOR+MmPfEYwWg3dsMgaVk47QbxZGo4scUfBS7ME0vrWyyizTJFZCIpxKaxdO+rMxzjjDfI7OJcvL3BZDcuFHFE4qlgsxlgWVbmenvvXDG7YeDJvVBWP9xeZCIi403DidNYpsdkweY1c1lVVUxrIAKAzXZhEnOaaUGit6PmsBkYBhTnOrm2vID1i8sy5/UtFhERmUjqiU1jy6s87D/bkQmgv3vlBJZlAFamqGMoDgOSZirigtEE0c4w2/ecBeCNk14wYPPauQoxEZlwCrFp7EiTj/rOMEeafOw/28Hx5gAGqe1UbKR6Yg4bxAYEWqK3i+awGVQU5VDgdlDrDbF9z1mOnPMBUJLn0v0wEZlwCrFpbNOKSjpDMTrDMe5cWUk4lqSpK0x7KIbNMIib1qAASzNIzRc71x3hnlXXUFOaz/IqT6YnpvthIjIZFGLT2IKyAkryXZlKwtuuK6e00MULr9fS2Jm6N5YaXBzMYUuFnAHUtYf4UM0Mtu85y9Zb5mu5KRGZNAqxaabvgr9Hmnwsr/LQGYrx1tkOznpDuBw27MYQs5sHiJsWdgNK8l2cbA3wf/3mBOFYan6ZQkxEJotCbBrpOx+s8mQOtW1Bbl9SQau/hw86wkSTJj0Js981Q/XCAOw2gzXzZpDvdvDaSS92G1TNyGXrLfMzQZneg0xEZKIoxKaR9NqI15YVUFGUCrG69hDnfRHmzcwj0JOgsSsyqudKmhbvNXaT47RjMywcNjuLKwqpLsnrN9l504pKBZqITBiF2DTQdwhxM3MzRRfpYowjTT46QzF+3Bs8acPdD0uLxE0icRMDiCeT7DndzorewIILAabVO0RkoijEpoHhgiT937cuLqfWG+RQYzfHW/yYJsSSJjYbJPuPLuKyGxiGQbTPsKMF5LnszJ2Zx/IqT2ayM2j1DhGZWAqxaWC0QbJqTjGrqouZke/kuT/U9guqNMMwcNkNinJcuB12DANcDhuWBQ2dYX556DxHmnyZ1+o7lKh7ZSIy3hRi00DfntFwdh1uZvexVu67sZq69hCmaeJyGMQS/QcUowmTaAIcNhvdkR7cDhsrq4p5r7GbaCLJiRY/b3/QmTm/bw9QQ4siMt4UYtNYrTfIjn31+CJxADYsqWB5lYcX32ogbkJZnhO3w05T9+Bij65IHAOYU5LD1lvm88zvT3OqLcB1s4rw5DrpDMdYvyi1nFV61+jOcIwNSyoyv+/YV68lqkTksijEprFdh5t56d0mogkTp91gZVUxvkgcXyRGnstOKJqgrNCNw3dhqam+LKAo18mti8szVYl9izn6Lj317KunMz29BWUFPPvqaV56twnQElUicukUYtNYetkpXyROXXuIM94g4VgSCzAsSPZuu1JZnJspvbcBJqnKxVkeNzUz86n1BvsNWaYXFl5e5cm81sBj6dfWElUX6J6hyNgpxKaxBWUFPHH30tTQ3v56VlnFrF9cxjd/cZSmrgh2m8Hp1iAFORf+mqQDzOUwCMWS7DrazIkWP2tqZrJ5XWpYsO/CwukeWmcoljl26+LyzGvLBbpnKDJ2E7qfWFdXF1u2bMHj8eDxeNiyZQvd3d3Dnh+Px/n617/O8uXLyc/PZ/bs2Tz44IOcP39+Ips57aWLOkryXdy6uJynP7GMqhm5mKZFvHfLlbICJ7be1agsIJqw8EcSROMmJ1oC/PhAI7sONwOpntV9N1b3nydmkDkmQ+v7uQ1U6w3y7KunqfUGp6BlIlcuw+q7Ve84u+OOO2hqauK73/0uAH/1V3/FvHnz+NWvfjXk+T6fj09/+tNs3bqVlStX0tXVxZe//GUSiQQHDhwY1Wv6/X48Hg8+n4+ioqJxey9Xs6GGsWq9QZ577Qx/ONlGoCfBwrJ8zrSHiCf7/3WxG4CRuq/1//zHlYPWTRztEJmG0kb27Kun2XmgkfturFYvTa56Y/ken7AQO378ONdffz379+9nzZo1AOzfv59169Zx4sQJFi9ePKrneeedd7jpppuor69nzpw5Fz1fIXbp+gYJpHpoLx86R6031G/1DkefSdB2G1gY5DhsPLR+wSV/wepLemQKeZlOxvI9PmH3xPbt24fH48kEGMDatWvxeDzs3bt31CHm8/kwDIPi4uIhH49Go0Sj0czvfr//sto9XQz1pZge+usMxTjU1M2plgCFvffD+v5Lx2YzSPQOM9ptNhaU5VOc62TXkWbq2kM8fNu1Y/6i1coeIxvNXD+R6WjC7om1tLRQXj54S47y8nJaWlpG9Rw9PT089thjPPDAA8Om8bZt2zL33DweD9XV1ZfV7ukiHVjp+1hw4Z4MBhxv9hOOJekIRgdd23cCdJ7LTlsgysGGbo63BPjNkeZ+z5l2sXs66S9p9TJEZCzGHGJPPvkkhmGM+JO+f2UMsS+VZVlDHh8oHo9z//33Y5omzz///LDnPf744/h8vsxPY2PjsOfKBUMVEaSDZP2iMmyGgQUMsfJUhsNm8IlVs8lz2YklTPKcdqpm5FLXHhoUVkOFpojI5RrzcOIjjzzC/fffP+I58+bN4/Dhw7S2tg56zOv1UlFRMeL18Xice++9l7q6Ol577bURx0Tdbjdut3t0jRdg8FDiwHthz/z+NNF4EpsB82bmM6vIzYmWAKZl4e9JYFqpMvuEaXG6NcinV1fxvf+vjvml+RxrDtDqj1JTmt9v+EvDhSIyEcYcYqWlpZSWll70vHXr1uHz+Xj77be56aabAHjrrbfw+Xx8+MMfHva6dICdPn2aP/zhD8ycOXOsTZSLGDgfKXMvLBzjVEuA4y1+LKA418mCsnwONXTTGU4tM2UY4Oy9J+Z22Fgzv4Qfvd2IL5KgqTOCw26wsLygX1il56FhQWNnWAUKIjJuJqywY8mSJXzsYx9j69atfOc73wFSJfZ33nlnv6KO6667jm3btvHJT36SRCLBpz/9ad59911+/etfk0wmM/fPSkpKcLlcE9XcaWVgryj9Z2coxhlvkBl5TtqDMQI9cV476U0NAZMaPoybFrlOG6U5TkKxBN/dc5ZIb6+tMM/JnQtns35xWWb/siNNPjrDMV46mFpi6lRrgPrOMDB5E3pV2Sdy9ZrQFTt++MMf8qUvfYmNGzcCcPfdd/O//tf/6nfOyZMn8fl8ADQ1NfHLX/4SgFWrVvU77w9/+AO33nrrRDZ32hhY6Zb+/fWTbRxq7Ka00EWh28n7Td184A1hGKkeGL23MqMJk3ULZvLqiVaCPUkMUr22lu4eDjV2A7D7eCv7z3ZQ3xlmw5IKPrW6CixYv7is31YtcPkhc7HrJ2slDIWlyOSb0BArKSlhx44dI57Td5ravHnzmMC518LwE5t3HW6mMxyj1hvkVKvFqupi5pfmc7Y9ROZ/EvNCj+ydDzoJRxPYjNSEZ8OASDzJkXM+euJJNlxfwfpFFwKr75f6wAnRlxsyF7t+su7HadkokcmntROnmaG+aNPHNiyp4FM3VHGoqZtab4hKTw5uu42EaWGaFvluB7GESU/CpKl3QWALcDtsdEfiGAaYlkVDV5i11kzeOOXtP8FsGKMJmZF6ORe7frLmWKl4RWTyKcSmmaG+aPseW1BWwOsn29i+5yxr5pfQ4o/gDaRWm++JJ0n2TnI2DCh0OwhEE9gMA9OCHIeNmxeVUlWcBwaZ+2Al+YO3Whm4n9hoNu0crpdzpUwEvlLaITKdKMSmmaG+aAceS69CD+ALJzAtC5fDxg1zZvB+YzehWBK7zWBmoZuEaVGS7yIcj4BBJsB84Ti3L6noLQ5po7TQRXsgxvIqD2+c9KZWBGkNYDeMEfcTS/fA+m7hIiKSphCTQdJBsbzKwzO/P837Td30xE26QrFMTyyetPigPUSey071jFyafRHiCYtDDV3UtocA+NQNVRyo76SpK0JHMIppwf6zHRw558O0LBaVF7JqTvGIwbRjXz0vvdvEp26o0tYtIjKIQkwG6dszqy7J44svvsvp1iDtwShmn8Ib04JgNElrIIrVu4nmkfN+FlYUUFWcy6GmbvyRBBVFOSyuKKSqJI/1i8p446Q3M4x40So+A5Kmxf66Dp761Z9Gd42ITBsTup+YZLf0UN7ffuw6bqopwbJg6WwPC8rycdgM3A4bdgP8kTi23r9JlmVxrivCue4IR5q6MYxU+f2Rc35K8lxUl+RRku8aNowGrrG4ee1cVlUX09gZ4aWDTVq2SkT6UU9MhtW3mOLpe5Zl7k39cH89rf4ohgEVRW7CsSTXlhem5ovlOYnETJp9ERImdIfjBHsSlBa6WF7lGVSgMbDqcODjC8oKePqeZZkikE0rKvutALJ53dxMWzU/S2T6mdBNMaeC9hMbP0OVtT/1qz/xL/vqMU2LWUU51JTmcbotSFGuk1Z/D5GYid1mkeN04O9JYLcZGJaFBZQX5vCl26/lVEsQXySOJ88JVmpi9IYlFfgicY43+1lSWTTidi7Pvnqa7W+eBWDrzfMzK4J8anUVT9yl+2Yi2e6K2E9Mslff8ErfG0sf84XjuOwGxQVu1i2YyW+ONBNNmHiDsczGmckkYKSWonLaDWbkuWn199Ds7+HHB5pYVVXMb44047AZ3Puhau67sZrOcIxfHz5PLGmR47SP2KPatKKSznAMrNR/79hXn3pgnP85NtQmoertiVxZFGIyyFBDft98+ShnvEH+fEEp180qorQwtY7l6jnFvFPfRTxpYRhkVveIJSxm5Dpx2A1mFboJRRMEowlK813QG27VJXlAKojerutI9dqSFqUFI6+RuaCsgCfuWpoJmfWLyyjJd2XCZqzLPw0Mqx376/GFU73Cxq4InaEYJfmuzGeSCc7RFqeIyIRRiMkgfUvsn331NHXtId5v8lFdktor7HRbkGPNJgb0VitaVBblsPSaIl493oZFammqcDxJNGLSHoxhM1LHqkry2Lw2dR/rUEM3/36khZI8F/vPdtATNzEMaA+mlr8auE3MwN+HmwDdd1X+kjzXsGGWWW4rFGP38QvbBr10sIlowsS0LGyGgS8SxxeJU1bgZnmVhx376nnx7Qac9pHnuInIxFOIySDpEvtnXz3NzgONlBW4sRmQ47Bz3hehekYuhgEftIfp6d01c0a+k45gDEgtQ5U0TaKJ1FqLhTkOIrEk15YXsH5RWWZY8lRrgEWzCiktdNHi76E4z0m+28HpttRqHusXl/HNXxwlHEtS1x7iRIufFl8PPQmTznAsE4YD55n1XZW/b+9pYO8sPQft9iUV/TYIrWsPcaipm3jCpCDHQaAnzp7T7TjtBm+cSk3UthmwsKIwE/QXC0oNQ4pMDIWYDKtvj+xIk6/f1iq/ev88bqeNhGUST1jUtYf4364tpbokl9VzZ/CbI81gWpTmu0j0jjFeN6uII00+dh5opNDtINH7+Auv19LYGcHlsDGvNB9/JA4GbN9zlsbOCEW5Do43+zneEgBSS175wvFM2NZ6gzz1qz/hC6eKRdLLWNV6g5lhxqF6Z+lV+Vt8EfadbefwuW4eu2MJNaX5/O5YK+FYAofNRjSe6pVVFOVyqKGbxs4wN8yZkanYHGnRXy0KLDKxFGIyrL6TntMrz9+6uJzXT7bxs3eb8EcSzPLkEOiJs7B3MvN7Dd38+v1m4r0re3SEY/T+J2+e9vKhmhncNK+EN097sRnQHorRFYrjsBuUFbq598Yq3qnrYn9tBzlOG/luOzbDYEllEbGkSUNnmGTSIhCNZ9qZHt5LmiZ2my2z5cszr56mZmY+cCGQDzd1s2N/PS8fOsfKqmJuX1LB7461EowmaAu0seKa1Aoiu440c7I1gGVZ5DhtuB02wrEkvkicxbMKefqeZSwou7D553CrjmhRYJGJpRCTUeu7ZYvTbqO6JJdPr67irbOdbL1lPtUlebx6vJX2UGpYMXXP7ML13mCMH7/TRDSRxBuMkeeyc++NVfzDq2cI+BKc747wT2/Wca47TE88ve0LOO02jrf4WVlVTENnGBOobQvxme372XrLfHyROJZlUZTjpCdhcqixm0NN3Rxq6OZP5/zUtYdYNacYy7J47UQbSQvOekPUd4QpzHEQiiZw2OCmmpmZYb+1C2bS0Blm0axCHv3IQrbvOcufzvspynWw9Zb5maHBiy36q0WBRSaWQkxGre+WLbcsLON4s59XjrbQEYpxpMlHdUkei2cVEogkiJsmoWgyU3afVlrootabWlsxHEvy47cb8YXjmcBLPwbgsBuYpkVXOE5XOE6O085dK2ZT1x4imkiyt7aDA/VdrJ5TjNNho6Iohxxn6r7dims8lBa4SJgWJ1r81HqDRBMmyd7G2GwGpmWlApBUVeWsopxM1eGiioLU/bp8F2+c9LL1lvls33OWM94gR5p8g/ZEE5GpoRCTUan1BukMxbhpXgkYUNcR4kRrAKfNYOlsD3XtIb724/c52eInblo4bbYhp22FehLkOC6sdnb0fOr8HKcNm2EQjiUzj80qyiEUTdAZjpPrtPHoRxZy6+Jyar1BnnvtDKfbUsF0uHdJq1OtAdbUlLB5zVw6QzH21nYQS5rkOuwYgNtuI540cTtsXFdZRGm+i7PtIZq7ewjHk7zzQSedoRgJ0+L6yiJqvUGOJVLnl+S7MvfAxnNXahG5PAoxGdJQy0HtPt7K3JI83v6gMxVmFpktVn53rAXTApvNhplIELWSuBwGOQ47C0rzOdkWJBxL8m5jquovPUw4s8DNua4ILruNaDwVYOneWyJpEu2tfjQtaPZFePbV03SGY7z9QSe3Li5jX20HCdPEG4iStOBAQxdd4ThVJbnMKcnjTFuQQDSRCVSnzeC6WUXUzMynrj1EMJrILGrc1BWhoshNRyiGLxJnzfwSmroiLJlVlPkcBg4NqnBDZGopxGRIA7+cB1Yqpr/Un331NDveqmdOSR5d4Rj+SGqTzIRpYVkWSZvFqjkzKC10s+9sJ5FYIjWkZ0HCNGnpjmAY0JNIZopB0oHT7I9i660gjCZMnvzlMeJJk5XXeCgrdFPodrLx+ln8+9FmzN5r4gmL4y0BTrQGWFWVGmZMxFKrh5gW5Lrs+CJxfnX4PJYFM3Kd2HpfxALaAlHshtG79mMqRGtK84ftZV1q4YZ6cCLjQyEmQxr45TxUpWLfxzvDMf79SAsOu0EoZqWr15k7M7VJ5t6zHcQTJuWF7lTFogkJ0yJhgc2AmXluOkNRoskLg5CpuWk2YkmThEmmV3a8NUAiaXKooRunw8CGQTRh4rIbYIBpWiRMOHreR7z3+dIriYRiCYIdiUzBiTcUywQlpIIuFXgWLf4els72DBtQlxNE6sGJjA+FmAxptFV16fNeP9nGqZYAa+aX8IeTXnzhOG2BKNfNSi3eOasoh8bOMImkRTyRWqJqQVk+lpWa9zW/NJ8D9V1EwxdK500LwvFUcBlAvttOOJYk2RtSkFreClKhGUtaqSDrPT+eHHxXzjQHL7HocqTmglm91yV6n99mwKqq4kEBlV5F/1BDN82+HmDsQaTSe5HxoRCTcfHGSS9HzvlYVFHIz//rn/crx999rJWyAjc5TjtVJbl0R+KYlsX80nzagzGOnuumoSOM0dsjMoCasnzO9qlUTAeMaYE5RDilj8SGeGyo8+yA02mjONeJv7dCse/jBlBTms/6xWWDVuTYdbiZlw42kTQtVlWPvDP1cFR6LzI+FGIyPowLf/bd72vRrAJOtQRYWFFARyiKvydBwrSw2QzOdUc43Rrs7VVZmQBx2KHN3zPoJQLR5KBjY1HothOJJclzO0iaqd6bLxInEjcz57jsqeFLm81gZVUx2/eczZT9p0On7yr6m9dpAWCRqaQQk3Gxee1cSvJSG19+8+WjvNfQRdKC62cX4Q1EaewK09QVYWa+iyWVhVxTnEtTVwSwsNvAYbfR0xsmiSTEkyMHlt2Ai3S6BkmHoL8n0e+422Ejz2nnxpoZ3HZdOf+6rz5zD+2MN8i1fVbmSBtpYWERmTy2i58icnHp4bEjTT5qvSHy3Q5MyyLXYaOswE1VcS4VRTnETYsZeS4Kc5zUd4QpcDuZOzOf0nx35i+jlXnOfDZcX47LkermuR02Niwp58+qi3HZbZnhx0vhsKWGFO0GfHx5Je89sZHtD36Im2pmMiPPRVc4jifXyeY1c3n6nmVAajPO9DDpzgON7DrcfOkNEJFxoZ6YjMnFKvL6rgT/++OtRBImtd4gAJ+6oYpTrQHOeIN0hmNEkybRZBJ/NI7Lnlpxo8XfgwW47Ab/5eYafv1+M1a69N6ywIAWfw/02bsMUoF0scHGvquHFOY4cTvsLCjL5+Hbrs2cs+twc6b31XeoML2if9/3qKIMkamnEJMxGak0fODmkp48J75wnJqZ+XhynaxfXAbAolmF+MJxGjvD5DrshOJJFlcU8ujtC/lPP3gHy0oVaPz4nSZOtQWYMzMfi1S5/aH6bryhGPbe/ckSvfe2DBtw4dYWLrsxqMij76adswrdlBS4uXNl/y1aBgZUuqij73EVZYhcORRiMiYj9ULS+3N1hmI8cfdSSvJc7D7Wyn03Vmf2J9t9PPV7ei+w95u6KbW7efT21JJSD66by4tvNbCgrAB/T5x4wmRlVTGeXCcvvdvEstlF+Bu6iSZMyvKctIdjWBbYDBsOwyTRG1KWZTEjz0lXOI7dliqtNy2wG6k1E9tDMc75eugMxzjXu3vzE3cv7RdQ6d7XxTbXFJGpoxCTMRmxF9I7SfhQUze13uCgwBvYm6lrD1HrDeG0G5lFdZ+8exlP3r2MZ189zT++UQukhiZrSlNbqszy5PLx5bmcaPFzTXEukFp/MRhNYFk2gtFU0YbTbucvFpfzmyPN2G0Gc0vyyHHZ+YvFZbxytIWmzgiReJJgT4JowqSpKzyolH6ozTXVAxO5sqiwQ8bN5rVzWVlVzPnuCLsON2cCb+C2Jenfa0rzcdmNIav/Nq2o5N4bq1lQVsCpttRmmFtvng/A74+30hM32XO6ncIcJzv+yxquLS8gGk9ipBbtYPGsAk60+CkrdDNvZh5fv+M6/t//uBIDA7fDTiCawMSiMMeBw2Zw9JyfHW/V9yvWSLd387q5/XZ+FpErh3piMm4WlBUMudL7cB6+7VpqSvOHHKZbUFbAE3cv5alf/YnGA2FOtPgBONHix7QsAj1xYgmTuo4QC8oKWFVVzKnWANfOyGPtgpnsr+3geEsAmwHdLjvb95xlUUUhu4+3phYvJhWiH6qZwQuv1xLoSXB9ZdGQ7dY9MJErl0JMxlX6C7/WGxw0PDfcuQOlC0SWV3nAgoUVhZxqCVDrDeGwGayqLubOlZX8+v1mtt6S6p1tXjeXkvwL962+uvMQtd4gVSV5eHKdnPEGWTSrMNOj6lt1GE2YXF9ZlNmtWUSyh0JMJsRoqxiHCo30tfvPdlDfGWbDkgpWVRfjC8fx5DnZvDZV+v6/3zQ3c83AQOzby0s/51CvN/A+3VC04rzIlUshJhNipCrGi63gPty2L2MxMNSGGw4czVChVpwXuXIZlmWNcfGeK5vf78fj8eDz+SgqKprq5sgQxqtnM1k9JPXERCbXWL7H1ROTSTdehRKT1UNSYYfIlUshJllLyz+JiEJMspZ6SCKiyc4iIpK1FGJyVUrPU0uvoC8iVyeFmFyVtOeXyPSge2JyVVLRh8j0oBCTq5KKPkSmBw0niohI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1prQEOvq6mLLli14PB48Hg9btmyhu7t71Nd//vOfxzAMvv3tb09YG0VEJHtNaIg98MADHDp0iFdeeYVXXnmFQ4cOsWXLllFd+/LLL/PWW28xe/bsiWyiiIhkMcdEPfHx48d55ZVX2L9/P2vWrAFg+/btrFu3jpMnT7J48eJhrz137hyPPPIIv/3tb/n4xz8+UU0UEZEsN2E9sX379uHxeDIBBrB27Vo8Hg979+4d9jrTNNmyZQt/8zd/w9KlSy/6OtFoFL/f3+9HRESmhwkLsZaWFsrLywcdLy8vp6WlZdjrvvWtb+FwOPjSl740qtfZtm1b5p6bx+Ohurr6ktssIiLZZcwh9uSTT2IYxog/Bw4cAMAwjEHXW5Y15HGAgwcP8swzz/CDH/xg2HMGevzxx/H5fJmfxsbGsb4lERHJUmO+J/bII49w//33j3jOvHnzOHz4MK2trYMe83q9VFRUDHndm2++SVtbG3PmzMkcSyaT/PVf/zXf/va3+eCDDwZd43a7cbvdY3sTIiJyVRhziJWWllJaWnrR89atW4fP5+Ptt9/mpptuAuCtt97C5/Px4Q9/eMhrtmzZwu23397v2Ec/+lG2bNnC5z73ubE2VURErnITVp24ZMkSPvaxj7F161a+853vAPBXf/VX3Hnnnf0qE6+77jq2bdvGJz/5SWbOnMnMmTP7PY/T6WTWrFkjVjOKiMj0NKHzxH74wx+yfPlyNm7cyMaNG1mxYgX/+q//2u+ckydP4vP5JrIZIiJylTIsy7KmuhHjye/34/F48Pl8FBUVTXVzRERkjMbyPa61E0VEJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGs5proB482yLAD8fv8Ut0RERC5F+vs7/X0+kqsuxAKBAADV1dVT3BIREbkcgUAAj8cz4jmGNZqoyyKmaXL+/HkKCwsxDGOqmwOk/lVRXV1NY2MjRUVFU92cK5Y+p9HR5zQ6+pxG50r8nCzLIhAIMHv2bGy2ke96XXU9MZvNRlVV1VQ3Y0hFRUVXzF+SK5k+p9HR5zQ6+pxG50r7nC7WA0tTYYeIiGQthZiIiGQthdgkcLvdPPHEE7jd7qluyhVNn9Po6HMaHX1Oo5Ptn9NVV9ghIiLTh3piIiKStRRiIiKStRRiIiKStRRiIiKStRRiE6Srq4stW7bg8XjweDxs2bKF7u7uUV//+c9/HsMw+Pa3vz1hbbwSjPVzisfjfP3rX2f58uXk5+cze/ZsHnzwQc6fPz95jZ4Ezz//PDU1NeTk5LB69WrefPPNEc9/4403WL16NTk5OcyfP59//Md/nKSWTq2xfE4/+9nP2LBhA2VlZRQVFbFu3Tp++9vfTmJrp85Y/z6l/fGPf8ThcLBq1aqJbeDlsGRCfOxjH7OWLVtm7d2719q7d6+1bNky68477xzVtT//+c+tlStXWrNnz7b+5//8nxPb0Ck21s+pu7vbuv32262dO3daJ06csPbt22etWbPGWr169SS2emL927/9m+V0Oq3t27dbx44dsx599FErPz/fqq+vH/L8s2fPWnl5edajjz5qHTt2zNq+fbvldDqtn/70p5Pc8sk11s/p0Ucftb71rW9Zb7/9tnXq1Cnr8ccft5xOp/Xuu+9Ocssn11g/p7Tu7m5r/vz51saNG62VK1dOTmMvgUJsAhw7dswCrP3792eO7du3zwKsEydOjHhtU1OTdc0111hHjx615s6de1WH2OV8Tn29/fbbFnDR/1Nmi5tuusl66KGH+h277rrrrMcee2zI8//2b//Wuu666/od+/znP2+tXbt2wtp4JRjr5zSU66+/3nrqqafGu2lXlEv9nO677z7rv//3/2498cQTV3SIaThxAuzbtw+Px8OaNWsyx9auXYvH42Hv3r3DXmeaJlu2bOFv/uZvWLp06WQ0dUpd6uc0kM/nwzAMiouLJ6CVkysWi3Hw4EE2btzY7/jGjRuH/Uz27ds36PyPfvSjHDhwgHg8PmFtnUqX8jkNZJomgUCAkpKSiWjiFeFSP6fvf//71NbW8sQTT0x0Ey/bVbcA8JWgpaWF8vLyQcfLy8tpaWkZ9rpvfetbOBwOvvSlL01k864Yl/o59dXT08Njjz3GAw88cEUtXnqp2tvbSSaTVFRU9DteUVEx7GfS0tIy5PmJRIL29nYqKysnrL1T5VI+p4H+/u//nlAoxL333jsRTbwiXMrndPr0aR577DHefPNNHI4rPyLUExuDJ598EsMwRvw5cOAAwJDbwFiWNez2MAcPHuSZZ57hBz/4wRWzhcylmsjPqa94PM7999+PaZo8//zz4/4+ptLA93+xz2So84c6frUZ6+eU9qMf/Ygnn3ySnTt3DvkPqavNaD+nZDLJAw88wFNPPcWiRYsmq3mX5cqP2SvII488wv333z/iOfPmzePw4cO0trYOeszr9Q76F1Ham2++SVtbG3PmzMkcSyaT/PVf/zXf/va3+eCDDy6r7ZNpIj+ntHg8zr333ktdXR2vvfbaVdELAygtLcVutw/6V3JbW9uwn8msWbOGPN/hcDBz5swJa+tUupTPKW3nzp385//8n/nJT37C7bffPpHNnHJj/ZwCgQAHDhzgvffe45FHHgFSw66WZeFwOPjd737HbbfdNiltH7UpvB931UoXLLz11luZY/v37x+xYKG9vd06cuRIv5/Zs2dbX//618dU5JBNLuVzsizLisVi1j333GMtXbrUamtrm4ymTqqbbrrJ+sIXvtDv2JIlS0Ys7FiyZEm/Yw899NC0KOwYy+dkWZb14osvWjk5OdbPf/7zCW7dlWMsn1MymRz0PfSFL3zBWrx4sXXkyBErGAxOVrNHTSE2QT72sY9ZK1assPbt22ft27fPWr58+aDS8cWLF1s/+9nPhn2Oq7060bLG/jnF43Hr7rvvtqqqqqxDhw5Zzc3NmZ9oNDoVb2HcpUuiv/e971nHjh2zvvzlL1v5+fnWBx98YFmWZT322GPWli1bMuenS+y/8pWvWMeOHbO+973vTasS+9F+Ti+++KLlcDis5557rt/fm+7u7ql6C5NirJ/TQFd6daJCbIJ0dHRYn/nMZ6zCwkKrsLDQ+sxnPmN1dXX1Owewvv/97w/7HNMhxMb6OdXV1VnAkD9/+MMfJr39E+W5556z5s6da7lcLuuGG26w3njjjcxjn/3sZ63169f3O//111+3/uzP/sxyuVzWvHnzrBdeeGGSWzw1xvI5rV+/fsi/N5/97Gcnv+GTbKx/n/q60kNMW7GIiEjWUnWiiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkrf8fe8T29a4alk4AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "samples = diffusion_model_workflow.sample(num_samples=3000, conditions={\"observables\":np.array([[0.0, 0.0]], dtype=np.float32)}, steps=1000)\n", + "plt.scatter(samples[\"parameters\"][0, :, 0], samples[\"parameters\"][0, :, 1], alpha=0.75, s=0.5)\n", + "plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n", + "plt.xlim([-0.5, 0.5])\n", + "plt.ylim([-0.5, 0.5])" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "d2e23898-84de-4adf-bffb-95eb547e63de", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(-0.5, 0.5)" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAbEAAAGdCAYAAACcvk38AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAUpxJREFUeJzt3Xmc1PWd7/vXr9beq2l6obEbaBAQWUeMQOYqjhESiRpzkqNeA2ZyzmFijiYmmcxET86Ny33cBzdz75wTx6NOwmSSxwwxQxITszEmRKN4A6igCIS9aXuBXqq32ru23+/+UV1F73RDLxT9fj4e/cD+1e9X9a0KqTff7+/z/X4Ny7IsREREspBtqhsgIiJyqRRiIiKStRRiIiKStRRiIiKStRRiIiKStRRiIiKStRRiIiKStRRiIiKStRxT3YDxZpom58+fp7CwEMMwpro5IiIyRpZlEQgEmD17NjbbyH2tqy7Ezp8/T3V19VQ3Q0RELlNjYyNVVVUjnnPVhVhhYSGQevNFRUVT3BoRERkrv99PdXV15vt8JFddiKWHEIuKihRiIiJZbDS3hFTYISIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWUshJiIiWWtSQuz555+npqaGnJwcVq9ezZtvvjmq6/74xz/icDhYtWrVxDZQRESy0oSH2M6dO/nyl7/MN77xDd577z1uvvlm7rjjDhoaGka8zufz8eCDD/KRj3xkopsoIiJZyrAsy5rIF1izZg033HADL7zwQubYkiVLuOeee9i2bduw191///0sXLgQu93Oyy+/zKFDh0b1en6/H4/Hg8/no6io6HKbLyIik2ws3+MT2hOLxWIcPHiQjRs39ju+ceNG9u7dO+x13//+96mtreWJJ56YyOaJiEiWc0zkk7e3t5NMJqmoqOh3vKKigpaWliGvOX36NI899hhvvvkmDsfFmxeNRolGo5nf/X7/5TVaRESyxqQUdhiG0e93y7IGHQNIJpM88MADPPXUUyxatGhUz71t2zY8Hk/mp7q6elzaLCIiV74JDbHS0lLsdvugXldbW9ug3hlAIBDgwIEDPPLIIzgcDhwOB08//TTvv/8+DoeD1157bdA1jz/+OD6fL/PT2Ng4Ye9HRESuLBM6nOhyuVi9ejW7d+/mk5/8ZOb47t27+cQnPjHo/KKiIo4cOdLv2PPPP89rr73GT3/6U2pqagZd43a7cbvd4994ERG54k1oiAF89atfZcuWLdx4442sW7eO7373uzQ0NPDQQw8BqZ7UuXPn+Jd/+RdsNhvLli3rd315eTk5OTmDjouIiEx4iN133310dHTw9NNP09zczLJly9i1axdz584FoLm5+aJzxkRERIYy4fPEJpvmiYmIZLcrZp6YiIjIRFKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIicgVo9Yb5NlXT1PrDU51UyRLKMRE5Iqx63AzOw80sutw85CPX0rIKRivbgoxEZlSfUNmeZWHuSV5LK/yDHnuxUJuvK6R7OGY6gaIyPSWDpm0+s4wR5p8AGzfc5att8zn1sXlAGxaUdnvz9G4lGtqvUF2HW5m04pKFpQVjPo6mXwKMRGZNEOFw1Ahs2lFJd98+Sj76zoAMiG2oKyATSsqxy1ghgurvsH6xY8svKzXkImlEBORSXOxcFhQVpA5vvWW+YRjSSqKcqj1BjMhs2NfPS+920RnKMbmdXMzIZR+/oH/nX7NznCMkjxXv8Aa7rGx9N7Ua5taCjERmTRDhcPAIFle5eGNU1584TgYsLe2g5rS/AuhZ6T+8EXifPPlo9R6Q5nn6jss+cO3Gth/toOtt8wHoDMUGxSg6XbUtYd46WATneEYT9y1tF+YXox6bVNLISYikyYdDulijk0rKtm0opLOUIy3znZQ3xFmUUUhtd4gPXGTZNLEMuDZP5yiIxRlZr6b9YvKKMlzUdce4kB9J3abjX217eS7HRTlODjc1E1hjpOSfCeHGrt545SXJ+5aSq03SEm+a1DvqjMU40SLn6RpgTX293Qp99xk/CjEROSyDTekdrF7Tp2hGL5InDdPe+kKxcAAf0+ca2bk0hmM0RqIggWxBPxwfwMuh41Fswr5i8Vl/PvRZqIJC0jy1gddOGypLtrptiBYFk6HnXgyyStHWli/qIxbF5dnhheXV3k40uSjMxzjpXebMC2LVdXFbF43d8zvfSy9Nhl/CjERuWzDDakNPJ4OtXQJfV17iF+8fz7VCwJyHDYau8IkTch1DJwBZBGJJznU0M3Rcz7iydQ1TpuB3W6wpKKQ0kI3e892EIomiceS2G0GrYEe/u6VE2zfc5Y8t523znayoKwAbzDKTfNKWFBWQE1pPg/fdu2o72npPtiVQyEmIpdt4JBa37Cqaw+x62gzde0hPLlOdh9vpTMUoyTfRSAaT01WNSBpQU/cxGk3SJoWPQmT3oewgLh54fXSAQaQtCzicYuz7SHy3A4iseSFx0yLPJedaMJkf10HRTkOQtEk57sjzJ6RC4A3GOW268pZUFYw6nDSfbArh0JMRC7bwCG1XYeb+eFbDVSeyKHF10Ozv4ez3hAPrJnDhiUVvFWXuv81pyQPh90gYUKyN5jiSQun3QDLwm4zMr204aQfDkYTvFPfidNuI5q4kHiRWJKKQjfxpEmgJ0HSsmgNRPH1xGnp7mHdgpksr/Lw1K/+xP7aDhq7wqnAzXOCBZvXzR0UaLoPduVQiInIJXv9ZNugCcmQ+nLff7aDQ43dWEBlUQ7LrinKPN7QGSaaSHLGGySRtAbVUyR6A23wI8NLWpBMWICF02YQN9PPAYeaujEwiJsXwi2WMGn293C8xY/nlJMX32ogaaaCs649RK03iGnBqdYAT9+zDKBfL009sCuDQkxELtkzvz/NocZuwrEk1SV57Nhfn+m9PH3PMnbsqwcDNq+dmxmCmzMjl0gsSSqnhg6pSygS7CcxoPcWjpnYjNQ9t1jvs6dP6QrF8IXjWJbFnJI81i8uY1FFAT9+pwlvIMrbH3Ty3Gtn8OQ6M/PTnrh76WW2UMaLQkxELllNaT7Hmv3UlOazY389L77VgNNuoyTfxRc/spAn7k6Vtu/YX09TZ5iiHAcHG7pJXm5KXcRQT29aEO69sWYD7HaDeNKiPRjj7Q86MS0Lo/cG3Dt1XdR6g7gdNhJJi/ebuvFH4kRiCXyReKYHeufKStoDMRV4TCGFmIiMylBFDw/fdi2eXCcY4AvHsRmQ60zN23rtZBuPfmQhR5p87HynMVNwcTn55bAZmV5WuuAjzWZc6F1djAnMyHEQ6EkSS5o0d0ewgLPeELXeENW9RR9VxbnEkib+cAxvKI4B7Kvt4ESLn5OtARq7wphWaq5ZukFD3UOTiaNV7EVkVIZaDX5BWQEl+S52H2sFoKzQTSiWmrd1qKGb7XvOsmlFJZ4cJ6m7VRc4jEtrh9NmYDcgzzng68uCQVX5I+gMxakocuPJcZDvdnBNcW7mMV8kjtth43hrAH8kQVckAYBhQLO/h0BPgrU1M/nCrQu478ZqMOClg0289G6TVsufZOqJicio9K3Iq/UGM/e71i8qA1JzvjqCMebNzOOa4lzaQzHWzC/hiy++i4WFy2EQS1yIscQldMn63usKxS+U4APkuuyE+pTXQ+pf6SaDGaR6bi2+CL21ICwoK6C6JI/DTd3kuey0+KMAuB02LMvEbjPId9nx9yQIRhNUFOVwqjXI5rV9JkhbqlicbAoxERlR32HEdEXes6+e5qV3mwAyC+d+7SfvA3DdrCI8eU6qZuTxh5NejjUHgFSgDBcqaQYXhgmddoPYgJtn6SFDe29yJXtXiirLd4FBvxAzgJXVxRw+5yNpWhS4HVQUuWnoCFNa4CJhWuQ47bQGenDZ7ays9vDaiTbcDht5Lgc2I0qB28Ga+SVgQXswxspqDz89eI5AJM6vD58nx2mnJK/3/t9dKvaYCgoxERnRUBN70+sd+iJxOsMxduyrp7EzTFGOk7217bT6o7gcBh9fPptoPElzdw++nvhF71mlhxxtBph9Tk4HW0meE6fdzixPDqfbAgSjqdDKcdmpLsmlO9yVKa3PddnpiSexGWCzG2y4vpwP2sMsrCjgdGuAuAllBS6WVnpo9vXwm/eb8YZi5DgM8tyOTFC+cdLLteUFtPmjnG4L0BNLggELygtYO3/mkD0vregxeXRPTERGtGlFJffdWN3vy3pBWQFP3L2UmtL81P0wAxZXFNIdidHij2IBpgmBaJw1NTP53P82D5fDwADynXZc9gtfPXlOG7lOGwvK8inKcWAABW4HLocdd+95DpuB025QXpRDdyTGoaZuovFk6v6YrXei8wddmeFGm5EaBqzrCGFaFi6HjYP1XRxq7KahM8LMAjcAHaEYpQUu7lg+i4SVujaasDjfFSFugr8nQSxp0eyLEI4nCUZTUwPmzsxn7fyZrF9Uxq7DzdR6g/0+M+0mPXnUExOREY00sXfgyhVf+/H7/Om8D8tK3b/afawNuw1Kcl30xHsnHxvgdhg4bHZiySRup5140uKWhWWZAonbl1RQ1x7ieIufAocDp93g+soitt4yn8dfOkKLvwe3y47DZsPCIhiNE0+melYOu0Gey0Fde4g8l52bF5ZTVZzHjHwnz/2hllC0t0iD1NDkkXM+TrYGiMWTOO0GZjIVej1xk+I8JzkOO75InFgimbmu1d/Dr94/z6mWAPWd4cxnkF5qqzMUY8P1Fbo/NgnUExORcbGgrIBHb1/Ih+aVcOt1ZZnjpglFeU5sRioAwrEkNpvBn187E7vNRk88ycoqD+sXl+ELxzNVgjWl+bjsNjZeX8F/+vManr5nGbcuLmfbp5azqrqYdfNnUlOaj9Nuw+1I/Xu8akYe+x6/nf/jzuupKMwhljA52uRn/eIyDAwK3HaKch0YBplV79uDMRo7Ixg2G06bgWEzcNptlBa4+NuPLWbbp5azeFYhZYVuDMBuh2A0iYHF1lvms+H6CjpDMXbsr2fngUa+9e8nePHtBpo6w0P20mR8KcRE5JLtOtzMjrfqezenDHKkyUd9Z5iq4jxuv76cGblO5pfl819uruGzH57H7UvKKStwEUuYFOY4eWDNHO5YVsmiWYW8cdLL74+3crY9xO+Pt+LJc7L15vk8fNu1/QpKqkvyuO26co63BFhVXcxdK2ez4poiKotyuPdDVQDcuricjy2fRdKCFn8P2/ecZXmVh+tmFfHYHdex4hoPuS47bocNAwubzWDd/BL+/NpSDCM1jNgVivEPvz/D//nrY5xqDdCTMLGAZG9lSo7LQXVJHocauvnxwUZ84Tj33ViNYaSWtDp63q8hxUmg4UQRuWTpNRJrvaFMIUP6OMA3Xz7KGW+Q9kAsU7331K/+xEsHm/DkOXnirqWZ329fUsGnbqjCF4njyXOyee3cofcgC8cA2LCkgs3r5rJjXz0HG7px2AzaA6nHar1BmjrDFLjs2GwGd66s5EiTjxMtfs62h1g2u4gF5QU0doapnpHHqjnFbF47l2++fDSzbmPSSs0JMwCH3aAwJ/V1ma436Ykl2LGvnuPNfqJJk0BPnC9+ZCEWFue6I3x0aQUz890sr/JkNgBVkcf4U4iJyCVbUFbA0/csG3Jh3GdfPU2tN8S1ZQX97g1tXjs3U5Zf6w1yqKGbpGXhyXWyed3cYav6Nq2opDMc41BDN82+Hu5YPotdh5vxReI47QYLywszr7PrcDN7TrcTTZi4HbbM0lAvvt1As6+H9mCUzWvmclvvRpnp19p6y3y6wjHOekOZrWAs4NryAgzIzB1z2gwC0SS/P95K0rKwekvwa71BfvR2A/5Igvcbffz84T/n2VdPa9uWCaQQE5FLMtT8sb6PdYZj3LFs1qBlmNJBV+sN8s2Xj9LYFWZVVXEmwIb7wl9QVkBJnovzvgjXlhWABTsPNLJhSQWfv2VBJoxqvUE6QzGuryziZEuAxRUF1LWH2LGvnpqZebT5o1QU5QAXeoxP/fJPmYWK19TM5HTrhftYNiCaMDGA6hm5FOY4KM51sr+uk8auCJCqprz3Q1V88cV3afalgq49GKXWG9S2LRNMISYil2SkwNl1uJndx1q578bqYYfQdh1u5ow3yOKKQp6+ZxkL+vTYhvvCT89PS68UUpLvGtRr23W4md3HW7EZEE0m6QjHqD2eWhZrQXkB+W47ZQVudvceO9Ua4P0mH2BxqiVARVEObqcNp5XaiyzP7aC+I0Syd17Zf/2LBfyP353KvJ4BVBS5OdUS5EzbhfBr9few63AzX/zIQvXAJpBCTEQuSTpohrrnM5reR99z0tddbJ+u9FqNOw80ZlbKqPUG+71++nktLH56sIlPr66iKxzHF44DsKq6mPWLynjjpJdDTd00doRZWF5ANJHk3YYu5s3M5z/eWM2iigJ+/X4zd66s5H/87hTeYAxvMJb5b5sBOU4bsYRJQ0eYprIw15YX4I8kMGxQ4HJQWujS/bAJpupEEbkk6cA50uQbcmHgL35k4Yhf3Bc7Jx1OA0vUN62ozJS1p4c0+75++nkNDEwLDAyeuCs1MfvtDzopyXNx6+JySvJdNHSEyHPbefT2hayZP5OkaXGmLcihxm4qPbmsnT+Tm2pm8tWNi0ivN+zvSVCa78RuGCSSJqYFSdPiwAdd1HeE2bC0gntXV9MZjvHC67X88K0GVShOIPXERKaRiVgOaaLu+ezYVz/kJpTpe2M7DzRmhhOHev2BxwcuYFzXHsJltxOOJjnS5GPz2rnsr+3gZGuAk81+tu85m5nI3BmKkV78KpowSZoWCdPCZbfhMEySFvgjcSxSy1Stqi5mdnEudd4QuS57Zqdo9cbGn0JMZBoZ6T7WpbrYEOAlMwb82cfAocihXn/g8fRQ467DzXSGY/z+eCumZbGyqjjzPEsqizjjDTKvNJ+tt8znSJOPTSsqee61M1h9NpKxGRZ2m4FpmZQVuumOxClwO+gKx/igI8QHHSFWVnmwsOgIRfnNkVRPzJPn1J5j40whJjKNXOmVcn17in1L8QcaTXAO1etMh/iG3jlp6YrEdFVjqndm45riXLbvOcvWW+ZnrrWsC3lqt9mI9W4F0+yPUpzroDuSuueWnkf2p/N+4kkLh93AskxOtPhp6q1mTO98LZdPISYyjUxYr2mcDOwpXmpb0+X7td5Q5rlg6GKSvq/d7OthVXUx7cEYh8910xWOsX3PWSwr1Q+z2aC80E0kZhJLWiR7E6u7d9NMSC0+nOeyYxgGLodFT9zEMAyum1XEdbOKqGsPsbzKc0nvSwZTiInIFWO8eorp8v2BE637DikODLK+r93YGeaZ35+mqSvMiZYAM/NdOGwGFUU5FOY4WDY7l1AsyZ/O+UiYFrFEkrjZu4dZlYfSAje13hDd4RgJ02LJrCIevu1adh1u5u0POnnjpDczVKlhxcszKdWJzz//PDU1NeTk5LB69WrefPPNYc/92c9+xoYNGygrK6OoqIh169bx29/+djKaKSJTbDRVjaOxaUUlm9fMzcw/62u4bVIaO8PsP9tBY2eYWxeXc9t15cSTJnkuBzlOe2pl+1iS480B9p3t4N2GLgLRBOFYKsAgtbrH0fN+/nimnQ86QnSG4zhsNh69fWEmQO+7sRoMtK7iOJnwENu5cydf/vKX+cY3vsF7773HzTffzB133EFDQ8OQ5+/Zs4cNGzawa9cuDh48yF/8xV9w11138d577010U0XkKjFSGA61PxrA9j1n2Xe2g2/+4mhmpY2lsz2ARUcwRqHbSY7LjmFATyyJrbeEf+A+n/GkRSRukuuw9+5EbXGkydevXZvXzh2yDTJ2hmVZF9lr9fKsWbOGG264gRdeeCFzbMmSJdxzzz1s27ZtVM+xdOlS7rvvPr75zW9e9Fy/34/H48Hn81FUVHTJ7RaRq9dQRR8/erue//vfT2CasKLKw9P3LAPgudfO8OYpL12RGHabgd1mIxpLkp/rIM9pp7V3E1C7LbXtTN8v1AVl+dyyqKxf8Yh2fL64sXyPT2hPLBaLcfDgQTZu3Njv+MaNG9m7d++onsM0TQKBACUlJRPRRBG5Cgw3MXo4uw4388O3GjJbyAC0B2IYGMSSJkfO+VLrOnaGafX3EIolsRkGJfluTMsiCfgjCVwOG+sWlGC3Gay4xjNoNoDLYeOJu5YOqo7UMOL4mdAQa29vJ5lMUlFR0e94RUUFLS0to3qOv//7vycUCnHvvfcO+Xg0GsXv9/f7EZHpZazhkOoJ5XOmt2eUPlaU6yCWMDEMqPWG2L7nLLXeEItnFfKZtXPZ9h+Ws6SyCLcjtYFmq7+HrlCcHIeNk61Bem+NUeC247IbLJlV1C9ghxvKlEs3KdWJhtH/3yeWZQ06NpQf/ehHPPnkk/ziF7+gvLx8yHO2bdvGU089NS7tFJHsNNaqxoFbyGSOfWIZ2/ec5c6VlZxqDeILx6koysGT62T9ojKONPm498Yq/uH3Z2gN9JBIQI7TTlGuk9bevcfSm4D++EATgZ44X/vJ+zR0hNh/toOn71l2RU9xyEYTek8sFouRl5fHT37yEz75yU9mjj/66KMcOnSIN954Y9hrd+7cyec+9zl+8pOf8PGPf3zY86LRKNFoNPO73++nurpa98RE5LKk9wGbW5JHfWeYQreDWm8IT66dYDRJpLck0W6kNtA0SP3MK8tP9cxaAphW6vGywhws4DNr5ijERuGKuSfmcrlYvXo1u3fv7nd89+7dfPjDHx72uh/96Ef85V/+JS+++OKIAQbgdrspKirq9yMikjbS/bKRHksP/W29ZT4bllTQ6u8hljTpDCUocDuYVeTG0RtgkJrkjAH17SFONAdI9lYumsCMfCd3LJuVWfF/tPfu5OImfDjxq1/9Klu2bOHGG29k3bp1fPe736WhoYGHHnoIgMcff5xz587xL//yL0AqwB588EGeeeYZ1q5dm7l3lpubi8ejWe4iV7orrQLvYvuepR8bOAm67+omR5p8ROJJ7AYU5Tjw9S7227smMJBazSNpgs1mEE9eGODy5DrpDKWWpNq+5yxnegNMPbLxMeEhdt9999HR0cHTTz9Nc3Mzy5YtY9euXcydOxeA5ubmfnPGvvOd75BIJHj44Yd5+OGHM8c/+9nP8oMf/GCimysil2kiFhm+HCPdL+v72Ejt3rSikrr2EHUdIUrzXew720n1jFwwyAwbxpOpc82khUFvz8yCGXku1i8uA1LFIgNXEZHLM+HzxCab5omJTK0rrSc2WrXeIDv21w+7ynz6HtmG6yvAAl8kTos/wtt1nSTM/s9lAIaR+jPHZeehWxYMu9yVDDaW73GtnSgi4+pKXmR4pIAduE/ZUL0xSO1k/XevnOBEbw9sKDYjFWIuh52V13ioaw/x3Gtn8OQ5aewMD2rDxQJUhqcQE5FpYbiV7ftKDxv++9Fm6tpDPHzbtZlASYfzU7/6E8ebA4OWmwJ6l5lKFXvYgXAsyaHGbqzGbmJJi1ynjVMtgcxmm+k27DrczEsHmwBt0zJWCjERmRaGW9m+rwVlBbT6ezjeHKDWG6KmNJ8vfmRhpge3vMrDoYbuYV+jb88sXdsRjpvMyHOSSCaoLsnrt9lm2qYVlXSGY2BduXu9XakUYiIyLYy0lxhcGGq8c2Ul4XiSmpn5mWvSRR+vnWjjVFuA+WX5dIaiBHqSFOY46ArH+z2XDeh7m6zA7SAcS7JkVhG3Li7n1sXlmfL+dHueuGvpRL31q5pCTESmhYvdq0sH1X03VvPz//rn/R5Lh1lde4hab5CVVcXUdYQ4dt5PNG5S4E5NgLYZqfUS7UA0aWYKPiwL3I7UtNx0cF1pVZzZSiEmItNerTdIZzjGhiUV/Ybzar1BduyrxxeJ48lzcveq2XjynOyv7aCuPUg8aRHtXZUjz2UnaZrYDIOEafWrWDRssPXm+XSGY/3mpfX9Uy6NQkxEpr1dh5vZfayV+26s7jfUuOtwMy+920Q0YeKwGRxq7KYnluRUWwCzT0glLYjEkszMd9IVTmCzWThsqTADcNps1LWHONHi56Z5JZkhRPXALp9CTESmveF6RZtWVNIZiuGLxKnrCHG6NUAsYWUCrG+FomFAZzie2ijTTC10DqmKRU+ug1+8fx7TtJiR58rsLaay+sunEBORaeFic8SG6hUtKCvgibuXZq7fsb8eXzjO+03dNHSEyXPZCcaSOG0GZYVuGrsiwIXJzkkrVbHY1BXBZTcoLnCz9Zb5gMrqx4tCTESmhfEopCjJc7F+URnHm/0kLQt/TwKAmGURjCYySymaFhTnOegKp44tLC9g3YLSfgGqsvrxoRATkWnhUgop+vbedh1u5gd760iYFuFYst+cMMuCrnAcpw3iZirIDAwcRqrUfpYnd8gFhlVWf/kUYiIyLVxKIcXAVe5/crCRxs4Iw23pmzAvLGzfFY7jdtowLYt9tR0A7K3tyGyOqXtg42NC9xMTEclm6T3F0r2npz+xjOqSXOz2C+ekN8OEVHhZff47ljBx2e20Bno43uzH5TA40eJn1+HmSX0fVzOFmIjIMBaUFbC8ysM3Xz7K6yfbuHVxOU9/YhnFOS5sgMPWP7gGMgyoKHKzsqqYJZVFxBIWc0ry6QzHtDHmONFwoohIr6EqGLfvOcv+utRw4K2Ly/nlofN0hGOY0G+u2EAGUJTjpNnXwy0Ly9i8bi41pfl0hmLsPtZKSZ4qEseDemIiMi2l1y7s2yNK3wPrO9x358pKrinOZc38Ep599TTHm/3DbsHSlwUEeuJEE0l8kTgL+iw8vOH6ClUkjhP1xERkWhqq5H6oCsb2QAzTgrfOdlLrDVGS78ST48DXW14/HAPIcznoSW/53Puau48PXhlELp1CTESmpaECa6gKxr6bYT7z+9OcagswqyiHQDQxYo/MMFKL/iYtC0+uc9jXlMujEBORaWmsJffVJXmsmlPM6bYAnaEY1kWGFD25TuJJi1VVxWxeN/eSXlMuTiEmIjKE9DJThxq6afb1ALB+URmvHG2hrff3kUTjSf5szgzNCZtgKuwQERnCjv31/HB/PUfPdVNZnMPyKg9/98oJWnw9JBm+rD6tsjg3E2BDFZHI+FCIiYgMxQLTskhaUJrv4plXT3OiJTAovAzAaR98uctuy/TAhqp6lPGh4UQRkQFqvUEwYFFFIQ2dYdpDMU42+zP3wWxGar3EPJedeNKkuiSPc90RkkmLpGVhWlA1IzfzfCromDgKMRGRAdKbZG5YUsGa+TPxheP4I3Hq2kMAmapEu80gHLNo6AxT4HaQtFskkiZx06Iwx5l5PhV0TBwNJ4qIDJBeM3HzurmU5Ll4+4NOinKdGIaR6Y3ZbRCJJ7GAeNLCF4lTnOfEAmyGgSfPOdJLyDhRT0xEZATpIcCOUJTjzX7cdgO3006gJ048eeEOWaHbwRduXcA7dV3UdYRYv6gMGHkzTrl8CjERkQEGruaxaUUl/+H5P9ITN7EZsLiyiNJ8F+e6I/TETRo6wwSiCZ5/vTY1nJi0eOOUlyNNvtRaicdbM88l40shJiIywMBCjF2HmzEMgwK3gxl5Tk62BOgpyWNJZREAXaEYXZF4Zq+xWR43hxq6Oe+LsGn5he1cZPwpxEREBhhYiLFpRSWdoRgY4AvH+dXh85xsDXDGGyTXaadqRi7BWIJZRTmUFripKc1nb20H15YVsH5RGUeafFP4bq5uCjERkYtYUFYABrx0sIk180soznXSGY7htBnEkyZuu43Na+eyee3czOTmmtJ8Nq2oHHKhYRk/CjERkdHoreFoD8QI9i7+G4mbWMDh835uW1KRKdzo25PTHLGJpRATERmFzevmUpLvyqxmf7IlQEm+i2giyarq4mFDSnPEJpZCTERkFPqGUXVJnsrmrxAKMRGRMVLv6sqhFTtERPrQivPZRSEmItLHpa44r/CbGhpOFBHp41KrCVVKPzUUYiIifVzq/S6V0k8NDSeKiIzCxYYL0+GnasXJpRATERkF7c58ZdJwoojIKGi48MqkEBMRGQXNDbsyaThRRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESylkJMRESy1qSE2PPPP09NTQ05OTmsXr2aN998c8Tz33jjDVavXk1OTg7z58/nH//xHyejmSIikmUmPMR27tzJl7/8Zb7xjW/w3nvvcfPNN3PHHXfQ0NAw5Pl1dXVs2rSJm2++mffee4//9t/+G1/60pd46aWXJrqpIiKSZQzLsqyJfIE1a9Zwww038MILL2SOLVmyhHvuuYdt27YNOv/rX/86v/zlLzl+/Hjm2EMPPcT777/Pvn37Lvp6fr8fj8eDz+ejqKhofN6EiIhMmrF8j09oTywWi3Hw4EE2btzY7/jGjRvZu3fvkNfs27dv0Pkf/ehHOXDgAPF4fND50WgUv9/f70dERKaHCQ2x9vZ2kskkFRUV/Y5XVFTQ0tIy5DUtLS1Dnp9IJGhvbx90/rZt2/B4PJmf6urq8XsDIiJyRZuUwg7DMPr9blnWoGMXO3+o4wCPP/44Pp8v89PY2DgOLRYRkWzgmMgnLy0txW63D+p1tbW1Deptpc2aNWvI8x0OBzNnzhx0vtvtxu12j1+jRUQka0xoT8zlcrF69Wp2797d7/ju3bv58Ic/POQ169atG3T+7373O2688UacTueEtVVERLLPhA8nfvWrX+Wf/umf+Od//meOHz/OV77yFRoaGnjooYeA1HDggw8+mDn/oYceor6+nq9+9ascP36cf/7nf+Z73/seX/va1ya6qSIikmUmdDgR4L777qOjo4Onn36a5uZmli1bxq5du5g7dy4Azc3N/eaM1dTUsGvXLr7yla/w3HPPMXv2bP7hH/6BT33qUxPdVBERyTITPk9ssmmemIhIdrti5omJiIhMJIWYTLpab5BnXz1NrTc41U0RkSynEJNJt+twMzsPNLLrcPNUN0VEstyEF3aIDLRpRWW/P0VELpVCTCbdgrICvviRhVPdDBG5Cmg4UUREspZCTEREspZCTEREspZCTCaNSutFZLwpxGTSqLReRMabqhNl0qi0XkTGm0JMJo1K60VkvGk4UUREspZCTEREspZCTEREspZCTEREspZCTEREspZCTEREspZCTEREspZCTEREspZCTC6Z1kIUkammEJNLprUQRWSqadkpuWRaC1FEppp6YnLJ0mshLigrADS8KCKTTyEm40bDiyIy2TScKONGw4siMtkUYjJutNWKiEw2DSfKmPS976V7YCIy1dQTkyHVeoPsOtzMphWVmcINuHDfqzMU41RrgDO9AaYemIhMBfXEZEjDFWlsWlHJfTdWgwG13hDXlhWwvMqjHpmITAn1xGRIwxVppO97vX6yjVMtAbbeMp8jTT52Hmikrj1Eq7+HrbfM59bF5VPRbBGZZhRiMqS+RRoDhxZrvUG27zlLrTfEG6e8AGxYUsGhpm4ON3UDKMREZFIoxGREtd4g33z5KLXeUKanVVGUwxlvkGvLCsCC3cdbue/GalZWeTh23s/CioKLP7GIyDjQPTEZ0a7DzZzxBllQls/xZj97azt4v6mba8sKWDO/hD2nvfgjcX58sJF/e6eRaMLkt0dbp7rZIjJNqCcmg6SHD5dXeahrDzEz30Wey06L38RpN3DZbZzxBjl63ocvkgDA35PABhgGLLumaGrfgIhMGwox6Sc9fHiyJUCe205nKEY8aVHrDeGwGSy9xkPNzHzqOkJ80B7C7bARTZg4bAaVnhw+NK+Eh2+7dqrfhohMEwox6WfHvnoONXVTlOMgHE2ysLyQmtJ8ADx5Tpo6w/zq8HlmFeVQ6cmh2ddDNGGSNC06QjFa/T1T/A5EZDpRiAmQ6oHt2F/P/rMdGMCyazyEo0nuXFlJeyCWqUz85PN/JJ60aOqKYAAmYPQ+R47DxqHGbnbsr+eJu5ZO3ZsRkWlDhR0CpAo4XjrYRENXmJVVxVQV51HrDfHC67X88K2GzKTne2+sosDtwGaA1Xtt+s8Z+S4SpoUvHJ+S9yAi0496YtNc3yKOT62uAgvWLy7jjZNeKotzaOgIMackn7r2EF/deYjjzX5M02JeaT7+SJyOUAzLguI8J/PL8vEGonjynFP9tkRkmlCITXPp5aUAnrhrKbXeIF/78fucagvw0etnsaqqmD2nvfz8vXMYveOGdsMgHE1iWhYrq4oBOO+LUFWcR+ESJ4caunn9ZJsmPIvIhNNw4jSXXgtx04rKTGXin5p9hGNJWnwR9pz2UusNYQGmlfqxAH8kjmnBqdYAp9sC5LsdrF9cRqu/h/ebuvnmL45qLUURmXAKsWkuvbzUgrKCzMRmG2BZ8E59J7Xe0KBrEqaFBfgicUKxJMFokvr2EEeafGy9ZT5VM3IJRRPa4VlEJpxCTDI2rahk0/JKXA47APFk6rjNGHyuw25wTXFu5vfZxblsWlHJrYvL+f7nbuIvP1yjHZ5FZMIpxAS4UODR1BnG35NahSPXaaOswMXampJB5wd6EsSTJjlOGw4DPjSvpN++YyIik0GFHdJvkd+kaWaOR+MmPfEYwWg3dsMgaVk47QbxZGo4scUfBS7ME0vrWyyizTJFZCIpxKaxdO+rMxzjjDfI7OJcvL3BZDcuFHFE4qlgsxlgWVbmenvvXDG7YeDJvVBWP9xeZCIi403DidNYpsdkweY1c1lVVUxrIAKAzXZhEnOaaUGit6PmsBkYBhTnOrm2vID1i8sy5/UtFhERmUjqiU1jy6s87D/bkQmgv3vlBJZlAFamqGMoDgOSZirigtEE0c4w2/ecBeCNk14wYPPauQoxEZlwCrFp7EiTj/rOMEeafOw/28Hx5gAGqe1UbKR6Yg4bxAYEWqK3i+awGVQU5VDgdlDrDbF9z1mOnPMBUJLn0v0wEZlwCrFpbNOKSjpDMTrDMe5cWUk4lqSpK0x7KIbNMIib1qAASzNIzRc71x3hnlXXUFOaz/IqT6YnpvthIjIZFGLT2IKyAkryXZlKwtuuK6e00MULr9fS2Jm6N5YaXBzMYUuFnAHUtYf4UM0Mtu85y9Zb5mu5KRGZNAqxaabvgr9Hmnwsr/LQGYrx1tkOznpDuBw27MYQs5sHiJsWdgNK8l2cbA3wf/3mBOFYan6ZQkxEJotCbBrpOx+s8mQOtW1Bbl9SQau/hw86wkSTJj0Js981Q/XCAOw2gzXzZpDvdvDaSS92G1TNyGXrLfMzQZneg0xEZKIoxKaR9NqI15YVUFGUCrG69hDnfRHmzcwj0JOgsSsyqudKmhbvNXaT47RjMywcNjuLKwqpLsnrN9l504pKBZqITBiF2DTQdwhxM3MzRRfpYowjTT46QzF+3Bs8acPdD0uLxE0icRMDiCeT7DndzorewIILAabVO0RkoijEpoHhgiT937cuLqfWG+RQYzfHW/yYJsSSJjYbJPuPLuKyGxiGQbTPsKMF5LnszJ2Zx/IqT2ayM2j1DhGZWAqxaWC0QbJqTjGrqouZke/kuT/U9guqNMMwcNkNinJcuB12DANcDhuWBQ2dYX556DxHmnyZ1+o7lKh7ZSIy3hRi00DfntFwdh1uZvexVu67sZq69hCmaeJyGMQS/QcUowmTaAIcNhvdkR7cDhsrq4p5r7GbaCLJiRY/b3/QmTm/bw9QQ4siMt4UYtNYrTfIjn31+CJxADYsqWB5lYcX32ogbkJZnhO3w05T9+Bij65IHAOYU5LD1lvm88zvT3OqLcB1s4rw5DrpDMdYvyi1nFV61+jOcIwNSyoyv+/YV68lqkTksijEprFdh5t56d0mogkTp91gZVUxvkgcXyRGnstOKJqgrNCNw3dhqam+LKAo18mti8szVYl9izn6Lj317KunMz29BWUFPPvqaV56twnQElUicukUYtNYetkpXyROXXuIM94g4VgSCzAsSPZuu1JZnJspvbcBJqnKxVkeNzUz86n1BvsNWaYXFl5e5cm81sBj6dfWElUX6J6hyNgpxKaxBWUFPHH30tTQ3v56VlnFrF9cxjd/cZSmrgh2m8Hp1iAFORf+mqQDzOUwCMWS7DrazIkWP2tqZrJ5XWpYsO/CwukeWmcoljl26+LyzGvLBbpnKDJ2E7qfWFdXF1u2bMHj8eDxeNiyZQvd3d3Dnh+Px/n617/O8uXLyc/PZ/bs2Tz44IOcP39+Ips57aWLOkryXdy6uJynP7GMqhm5mKZFvHfLlbICJ7be1agsIJqw8EcSROMmJ1oC/PhAI7sONwOpntV9N1b3nydmkDkmQ+v7uQ1U6w3y7KunqfUGp6BlIlcuw+q7Ve84u+OOO2hqauK73/0uAH/1V3/FvHnz+NWvfjXk+T6fj09/+tNs3bqVlStX0tXVxZe//GUSiQQHDhwY1Wv6/X48Hg8+n4+ioqJxey9Xs6GGsWq9QZ577Qx/ONlGoCfBwrJ8zrSHiCf7/3WxG4CRuq/1//zHlYPWTRztEJmG0kb27Kun2XmgkfturFYvTa56Y/ken7AQO378ONdffz379+9nzZo1AOzfv59169Zx4sQJFi9ePKrneeedd7jpppuor69nzpw5Fz1fIXbp+gYJpHpoLx86R6031G/1DkefSdB2G1gY5DhsPLR+wSV/wepLemQKeZlOxvI9PmH3xPbt24fH48kEGMDatWvxeDzs3bt31CHm8/kwDIPi4uIhH49Go0Sj0czvfr//sto9XQz1pZge+usMxTjU1M2plgCFvffD+v5Lx2YzSPQOM9ptNhaU5VOc62TXkWbq2kM8fNu1Y/6i1coeIxvNXD+R6WjC7om1tLRQXj54S47y8nJaWlpG9Rw9PT089thjPPDAA8Om8bZt2zL33DweD9XV1ZfV7ukiHVjp+1hw4Z4MBhxv9hOOJekIRgdd23cCdJ7LTlsgysGGbo63BPjNkeZ+z5l2sXs66S9p9TJEZCzGHGJPPvkkhmGM+JO+f2UMsS+VZVlDHh8oHo9z//33Y5omzz///LDnPf744/h8vsxPY2PjsOfKBUMVEaSDZP2iMmyGgQUMsfJUhsNm8IlVs8lz2YklTPKcdqpm5FLXHhoUVkOFpojI5RrzcOIjjzzC/fffP+I58+bN4/Dhw7S2tg56zOv1UlFRMeL18Xice++9l7q6Ol577bURx0Tdbjdut3t0jRdg8FDiwHthz/z+NNF4EpsB82bmM6vIzYmWAKZl4e9JYFqpMvuEaXG6NcinV1fxvf+vjvml+RxrDtDqj1JTmt9v+EvDhSIyEcYcYqWlpZSWll70vHXr1uHz+Xj77be56aabAHjrrbfw+Xx8+MMfHva6dICdPn2aP/zhD8ycOXOsTZSLGDgfKXMvLBzjVEuA4y1+LKA418mCsnwONXTTGU4tM2UY4Oy9J+Z22Fgzv4Qfvd2IL5KgqTOCw26wsLygX1il56FhQWNnWAUKIjJuJqywY8mSJXzsYx9j69atfOc73wFSJfZ33nlnv6KO6667jm3btvHJT36SRCLBpz/9ad59911+/etfk0wmM/fPSkpKcLlcE9XcaWVgryj9Z2coxhlvkBl5TtqDMQI9cV476U0NAZMaPoybFrlOG6U5TkKxBN/dc5ZIb6+tMM/JnQtns35xWWb/siNNPjrDMV46mFpi6lRrgPrOMDB5E3pV2Sdy9ZrQFTt++MMf8qUvfYmNGzcCcPfdd/O//tf/6nfOyZMn8fl8ADQ1NfHLX/4SgFWrVvU77w9/+AO33nrrRDZ32hhY6Zb+/fWTbRxq7Ka00EWh28n7Td184A1hGKkeGL23MqMJk3ULZvLqiVaCPUkMUr22lu4eDjV2A7D7eCv7z3ZQ3xlmw5IKPrW6CixYv7is31YtcPkhc7HrJ2slDIWlyOSb0BArKSlhx44dI57Td5ravHnzmMC518LwE5t3HW6mMxyj1hvkVKvFqupi5pfmc7Y9ROZ/EvNCj+ydDzoJRxPYjNSEZ8OASDzJkXM+euJJNlxfwfpFFwKr75f6wAnRlxsyF7t+su7HadkokcmntROnmaG+aNPHNiyp4FM3VHGoqZtab4hKTw5uu42EaWGaFvluB7GESU/CpKl3QWALcDtsdEfiGAaYlkVDV5i11kzeOOXtP8FsGKMJmZF6ORe7frLmWKl4RWTyKcSmmaG+aPseW1BWwOsn29i+5yxr5pfQ4o/gDaRWm++JJ0n2TnI2DCh0OwhEE9gMA9OCHIeNmxeVUlWcBwaZ+2Al+YO3Whm4n9hoNu0crpdzpUwEvlLaITKdKMSmmaG+aAceS69CD+ALJzAtC5fDxg1zZvB+YzehWBK7zWBmoZuEaVGS7yIcj4BBJsB84Ti3L6noLQ5po7TQRXsgxvIqD2+c9KZWBGkNYDeMEfcTS/fA+m7hIiKSphCTQdJBsbzKwzO/P837Td30xE26QrFMTyyetPigPUSey071jFyafRHiCYtDDV3UtocA+NQNVRyo76SpK0JHMIppwf6zHRw558O0LBaVF7JqTvGIwbRjXz0vvdvEp26o0tYtIjKIQkwG6dszqy7J44svvsvp1iDtwShmn8Ib04JgNElrIIrVu4nmkfN+FlYUUFWcy6GmbvyRBBVFOSyuKKSqJI/1i8p446Q3M4x40So+A5Kmxf66Dp761Z9Gd42ITBsTup+YZLf0UN7ffuw6bqopwbJg6WwPC8rycdgM3A4bdgP8kTi23r9JlmVxrivCue4IR5q6MYxU+f2Rc35K8lxUl+RRku8aNowGrrG4ee1cVlUX09gZ4aWDTVq2SkT6UU9MhtW3mOLpe5Zl7k39cH89rf4ohgEVRW7CsSTXlhem5ovlOYnETJp9ERImdIfjBHsSlBa6WF7lGVSgMbDqcODjC8oKePqeZZkikE0rKvutALJ53dxMWzU/S2T6mdBNMaeC9hMbP0OVtT/1qz/xL/vqMU2LWUU51JTmcbotSFGuk1Z/D5GYid1mkeN04O9JYLcZGJaFBZQX5vCl26/lVEsQXySOJ88JVmpi9IYlFfgicY43+1lSWTTidi7Pvnqa7W+eBWDrzfMzK4J8anUVT9yl+2Yi2e6K2E9Mslff8ErfG0sf84XjuOwGxQVu1i2YyW+ONBNNmHiDsczGmckkYKSWonLaDWbkuWn199Ds7+HHB5pYVVXMb44047AZ3Puhau67sZrOcIxfHz5PLGmR47SP2KPatKKSznAMrNR/79hXn3pgnP85NtQmoertiVxZFGIyyFBDft98+ShnvEH+fEEp180qorQwtY7l6jnFvFPfRTxpYRhkVveIJSxm5Dpx2A1mFboJRRMEowlK813QG27VJXlAKojerutI9dqSFqUFI6+RuaCsgCfuWpoJmfWLyyjJd2XCZqzLPw0Mqx376/GFU73Cxq4InaEYJfmuzGeSCc7RFqeIyIRRiMkgfUvsn331NHXtId5v8lFdktor7HRbkGPNJgb0VitaVBblsPSaIl493oZFammqcDxJNGLSHoxhM1LHqkry2Lw2dR/rUEM3/36khZI8F/vPdtATNzEMaA+mlr8auE3MwN+HmwDdd1X+kjzXsGGWWW4rFGP38QvbBr10sIlowsS0LGyGgS8SxxeJU1bgZnmVhx376nnx7Qac9pHnuInIxFOIySDpEvtnXz3NzgONlBW4sRmQ47Bz3hehekYuhgEftIfp6d01c0a+k45gDEgtQ5U0TaKJ1FqLhTkOIrEk15YXsH5RWWZY8lRrgEWzCiktdNHi76E4z0m+28HpttRqHusXl/HNXxwlHEtS1x7iRIufFl8PPQmTznAsE4YD55n1XZW/b+9pYO8sPQft9iUV/TYIrWsPcaipm3jCpCDHQaAnzp7T7TjtBm+cSk3UthmwsKIwE/QXC0oNQ4pMDIWYDKtvj+xIk6/f1iq/ev88bqeNhGUST1jUtYf4364tpbokl9VzZ/CbI81gWpTmu0j0jjFeN6uII00+dh5opNDtINH7+Auv19LYGcHlsDGvNB9/JA4GbN9zlsbOCEW5Do43+zneEgBSS175wvFM2NZ6gzz1qz/hC6eKRdLLWNV6g5lhxqF6Z+lV+Vt8EfadbefwuW4eu2MJNaX5/O5YK+FYAofNRjSe6pVVFOVyqKGbxs4wN8yZkanYHGnRXy0KLDKxFGIyrL6TntMrz9+6uJzXT7bxs3eb8EcSzPLkEOiJs7B3MvN7Dd38+v1m4r0re3SEY/T+J2+e9vKhmhncNK+EN097sRnQHorRFYrjsBuUFbq598Yq3qnrYn9tBzlOG/luOzbDYEllEbGkSUNnmGTSIhCNZ9qZHt5LmiZ2my2z5cszr56mZmY+cCGQDzd1s2N/PS8fOsfKqmJuX1LB7461EowmaAu0seKa1Aoiu440c7I1gGVZ5DhtuB02wrEkvkicxbMKefqeZSwou7D553CrjmhRYJGJpRCTUeu7ZYvTbqO6JJdPr67irbOdbL1lPtUlebx6vJX2UGpYMXXP7ML13mCMH7/TRDSRxBuMkeeyc++NVfzDq2cI+BKc747wT2/Wca47TE88ve0LOO02jrf4WVlVTENnGBOobQvxme372XrLfHyROJZlUZTjpCdhcqixm0NN3Rxq6OZP5/zUtYdYNacYy7J47UQbSQvOekPUd4QpzHEQiiZw2OCmmpmZYb+1C2bS0Blm0axCHv3IQrbvOcufzvspynWw9Zb5maHBiy36q0WBRSaWQkxGre+WLbcsLON4s59XjrbQEYpxpMlHdUkei2cVEogkiJsmoWgyU3afVlrootabWlsxHEvy47cb8YXjmcBLPwbgsBuYpkVXOE5XOE6O085dK2ZT1x4imkiyt7aDA/VdrJ5TjNNho6Iohxxn6r7dims8lBa4SJgWJ1r81HqDRBMmyd7G2GwGpmWlApBUVeWsopxM1eGiioLU/bp8F2+c9LL1lvls33OWM94gR5p8g/ZEE5GpoRCTUan1BukMxbhpXgkYUNcR4kRrAKfNYOlsD3XtIb724/c52eInblo4bbYhp22FehLkOC6sdnb0fOr8HKcNm2EQjiUzj80qyiEUTdAZjpPrtPHoRxZy6+Jyar1BnnvtDKfbUsF0uHdJq1OtAdbUlLB5zVw6QzH21nYQS5rkOuwYgNtuI540cTtsXFdZRGm+i7PtIZq7ewjHk7zzQSedoRgJ0+L6yiJqvUGOJVLnl+S7MvfAxnNXahG5PAoxGdJQy0HtPt7K3JI83v6gMxVmFpktVn53rAXTApvNhplIELWSuBwGOQ47C0rzOdkWJBxL8m5jquovPUw4s8DNua4ILruNaDwVYOneWyJpEu2tfjQtaPZFePbV03SGY7z9QSe3Li5jX20HCdPEG4iStOBAQxdd4ThVJbnMKcnjTFuQQDSRCVSnzeC6WUXUzMynrj1EMJrILGrc1BWhoshNRyiGLxJnzfwSmroiLJlVlPkcBg4NqnBDZGopxGRIA7+cB1Yqpr/Un331NDveqmdOSR5d4Rj+SGqTzIRpYVkWSZvFqjkzKC10s+9sJ5FYIjWkZ0HCNGnpjmAY0JNIZopB0oHT7I9i660gjCZMnvzlMeJJk5XXeCgrdFPodrLx+ln8+9FmzN5r4gmL4y0BTrQGWFWVGmZMxFKrh5gW5Lrs+CJxfnX4PJYFM3Kd2HpfxALaAlHshtG79mMqRGtK84ftZV1q4YZ6cCLjQyEmQxr45TxUpWLfxzvDMf79SAsOu0EoZqWr15k7M7VJ5t6zHcQTJuWF7lTFogkJ0yJhgc2AmXluOkNRoskLg5CpuWk2YkmThEmmV3a8NUAiaXKooRunw8CGQTRh4rIbYIBpWiRMOHreR7z3+dIriYRiCYIdiUzBiTcUywQlpIIuFXgWLf4els72DBtQlxNE6sGJjA+FmAxptFV16fNeP9nGqZYAa+aX8IeTXnzhOG2BKNfNSi3eOasoh8bOMImkRTyRWqJqQVk+lpWa9zW/NJ8D9V1EwxdK500LwvFUcBlAvttOOJYk2RtSkFreClKhGUtaqSDrPT+eHHxXzjQHL7HocqTmglm91yV6n99mwKqq4kEBlV5F/1BDN82+HmDsQaTSe5HxoRCTcfHGSS9HzvlYVFHIz//rn/crx999rJWyAjc5TjtVJbl0R+KYlsX80nzagzGOnuumoSOM0dsjMoCasnzO9qlUTAeMaYE5RDilj8SGeGyo8+yA02mjONeJv7dCse/jBlBTms/6xWWDVuTYdbiZlw42kTQtVlWPvDP1cFR6LzI+FGIyPowLf/bd72vRrAJOtQRYWFFARyiKvydBwrSw2QzOdUc43Rrs7VVZmQBx2KHN3zPoJQLR5KBjY1HothOJJclzO0iaqd6bLxInEjcz57jsqeFLm81gZVUx2/eczZT9p0On7yr6m9dpAWCRqaQQk3Gxee1cSvJSG19+8+WjvNfQRdKC62cX4Q1EaewK09QVYWa+iyWVhVxTnEtTVwSwsNvAYbfR0xsmiSTEkyMHlt2Ai3S6BkmHoL8n0e+422Ejz2nnxpoZ3HZdOf+6rz5zD+2MN8i1fVbmSBtpYWERmTy2i58icnHp4bEjTT5qvSHy3Q5MyyLXYaOswE1VcS4VRTnETYsZeS4Kc5zUd4QpcDuZOzOf0nx35i+jlXnOfDZcX47LkermuR02Niwp58+qi3HZbZnhx0vhsKWGFO0GfHx5Je89sZHtD36Im2pmMiPPRVc4jifXyeY1c3n6nmVAajPO9DDpzgON7DrcfOkNEJFxoZ6YjMnFKvL6rgT/++OtRBImtd4gAJ+6oYpTrQHOeIN0hmNEkybRZBJ/NI7Lnlpxo8XfgwW47Ab/5eYafv1+M1a69N6ywIAWfw/02bsMUoF0scHGvquHFOY4cTvsLCjL5+Hbrs2cs+twc6b31XeoML2if9/3qKIMkamnEJMxGak0fODmkp48J75wnJqZ+XhynaxfXAbAolmF+MJxGjvD5DrshOJJFlcU8ujtC/lPP3gHy0oVaPz4nSZOtQWYMzMfi1S5/aH6bryhGPbe/ckSvfe2DBtw4dYWLrsxqMij76adswrdlBS4uXNl/y1aBgZUuqij73EVZYhcORRiMiYj9ULS+3N1hmI8cfdSSvJc7D7Wyn03Vmf2J9t9PPV7ei+w95u6KbW7efT21JJSD66by4tvNbCgrAB/T5x4wmRlVTGeXCcvvdvEstlF+Bu6iSZMyvKctIdjWBbYDBsOwyTRG1KWZTEjz0lXOI7dliqtNy2wG6k1E9tDMc75eugMxzjXu3vzE3cv7RdQ6d7XxTbXFJGpoxCTMRmxF9I7SfhQUze13uCgwBvYm6lrD1HrDeG0G5lFdZ+8exlP3r2MZ189zT++UQukhiZrSlNbqszy5PLx5bmcaPFzTXEukFp/MRhNYFk2gtFU0YbTbucvFpfzmyPN2G0Gc0vyyHHZ+YvFZbxytIWmzgiReJJgT4JowqSpKzyolH6ozTXVAxO5sqiwQ8bN5rVzWVlVzPnuCLsON2cCb+C2Jenfa0rzcdmNIav/Nq2o5N4bq1lQVsCpttRmmFtvng/A74+30hM32XO6ncIcJzv+yxquLS8gGk9ipBbtYPGsAk60+CkrdDNvZh5fv+M6/t//uBIDA7fDTiCawMSiMMeBw2Zw9JyfHW/V9yvWSLd387q5/XZ+FpErh3piMm4WlBUMudL7cB6+7VpqSvOHHKZbUFbAE3cv5alf/YnGA2FOtPgBONHix7QsAj1xYgmTuo4QC8oKWFVVzKnWANfOyGPtgpnsr+3geEsAmwHdLjvb95xlUUUhu4+3phYvJhWiH6qZwQuv1xLoSXB9ZdGQ7dY9MJErl0JMxlX6C7/WGxw0PDfcuQOlC0SWV3nAgoUVhZxqCVDrDeGwGayqLubOlZX8+v1mtt6S6p1tXjeXkvwL962+uvMQtd4gVSV5eHKdnPEGWTSrMNOj6lt1GE2YXF9ZlNmtWUSyh0JMJsRoqxiHCo30tfvPdlDfGWbDkgpWVRfjC8fx5DnZvDZV+v6/3zQ3c83AQOzby0s/51CvN/A+3VC04rzIlUshJhNipCrGi63gPty2L2MxMNSGGw4czVChVpwXuXIZlmWNcfGeK5vf78fj8eDz+SgqKprq5sgQxqtnM1k9JPXERCbXWL7H1ROTSTdehRKT1UNSYYfIlUshJllLyz+JiEJMspZ6SCKiyc4iIpK1FGJyVUrPU0uvoC8iVyeFmFyVtOeXyPSge2JyVVLRh8j0oBCTq5KKPkSmBw0niohI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1lKIiYhI1prQEOvq6mLLli14PB48Hg9btmyhu7t71Nd//vOfxzAMvv3tb09YG0VEJHtNaIg98MADHDp0iFdeeYVXXnmFQ4cOsWXLllFd+/LLL/PWW28xe/bsiWyiiIhkMcdEPfHx48d55ZVX2L9/P2vWrAFg+/btrFu3jpMnT7J48eJhrz137hyPPPIIv/3tb/n4xz8+UU0UEZEsN2E9sX379uHxeDIBBrB27Vo8Hg979+4d9jrTNNmyZQt/8zd/w9KlSy/6OtFoFL/f3+9HRESmhwkLsZaWFsrLywcdLy8vp6WlZdjrvvWtb+FwOPjSl740qtfZtm1b5p6bx+Ohurr6ktssIiLZZcwh9uSTT2IYxog/Bw4cAMAwjEHXW5Y15HGAgwcP8swzz/CDH/xg2HMGevzxx/H5fJmfxsbGsb4lERHJUmO+J/bII49w//33j3jOvHnzOHz4MK2trYMe83q9VFRUDHndm2++SVtbG3PmzMkcSyaT/PVf/zXf/va3+eCDDwZd43a7cbvdY3sTIiJyVRhziJWWllJaWnrR89atW4fP5+Ptt9/mpptuAuCtt97C5/Px4Q9/eMhrtmzZwu23397v2Ec/+lG2bNnC5z73ubE2VURErnITVp24ZMkSPvaxj7F161a+853vAPBXf/VX3Hnnnf0qE6+77jq2bdvGJz/5SWbOnMnMmTP7PY/T6WTWrFkjVjOKiMj0NKHzxH74wx+yfPlyNm7cyMaNG1mxYgX/+q//2u+ckydP4vP5JrIZIiJylTIsy7KmuhHjye/34/F48Pl8FBUVTXVzRERkjMbyPa61E0VEJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGspxEREJGs5proB482yLAD8fv8Ut0RERC5F+vs7/X0+kqsuxAKBAADV1dVT3BIREbkcgUAAj8cz4jmGNZqoyyKmaXL+/HkKCwsxDGOqmwOk/lVRXV1NY2MjRUVFU92cK5Y+p9HR5zQ6+pxG50r8nCzLIhAIMHv2bGy2ke96XXU9MZvNRlVV1VQ3Y0hFRUVXzF+SK5k+p9HR5zQ6+pxG50r7nC7WA0tTYYeIiGQthZiIiGQthdgkcLvdPPHEE7jd7qluyhVNn9Po6HMaHX1Oo5Ptn9NVV9ghIiLTh3piIiKStRRiIiKStRRiIiKStRRiIiKStRRiE6Srq4stW7bg8XjweDxs2bKF7u7uUV//+c9/HsMw+Pa3vz1hbbwSjPVzisfjfP3rX2f58uXk5+cze/ZsHnzwQc6fPz95jZ4Ezz//PDU1NeTk5LB69WrefPPNEc9/4403WL16NTk5OcyfP59//Md/nKSWTq2xfE4/+9nP2LBhA2VlZRQVFbFu3Tp++9vfTmJrp85Y/z6l/fGPf8ThcLBq1aqJbeDlsGRCfOxjH7OWLVtm7d2719q7d6+1bNky68477xzVtT//+c+tlStXWrNnz7b+5//8nxPb0Ck21s+pu7vbuv32262dO3daJ06csPbt22etWbPGWr169SS2emL927/9m+V0Oq3t27dbx44dsx599FErPz/fqq+vH/L8s2fPWnl5edajjz5qHTt2zNq+fbvldDqtn/70p5Pc8sk11s/p0Ucftb71rW9Zb7/9tnXq1Cnr8ccft5xOp/Xuu+9Ocssn11g/p7Tu7m5r/vz51saNG62VK1dOTmMvgUJsAhw7dswCrP3792eO7du3zwKsEydOjHhtU1OTdc0111hHjx615s6de1WH2OV8Tn29/fbbFnDR/1Nmi5tuusl66KGH+h277rrrrMcee2zI8//2b//Wuu666/od+/znP2+tXbt2wtp4JRjr5zSU66+/3nrqqafGu2lXlEv9nO677z7rv//3/2498cQTV3SIaThxAuzbtw+Px8OaNWsyx9auXYvH42Hv3r3DXmeaJlu2bOFv/uZvWLp06WQ0dUpd6uc0kM/nwzAMiouLJ6CVkysWi3Hw4EE2btzY7/jGjRuH/Uz27ds36PyPfvSjHDhwgHg8PmFtnUqX8jkNZJomgUCAkpKSiWjiFeFSP6fvf//71NbW8sQTT0x0Ey/bVbcA8JWgpaWF8vLyQcfLy8tpaWkZ9rpvfetbOBwOvvSlL01k864Yl/o59dXT08Njjz3GAw88cEUtXnqp2tvbSSaTVFRU9DteUVEx7GfS0tIy5PmJRIL29nYqKysnrL1T5VI+p4H+/u//nlAoxL333jsRTbwiXMrndPr0aR577DHefPNNHI4rPyLUExuDJ598EsMwRvw5cOAAwJDbwFiWNez2MAcPHuSZZ57hBz/4wRWzhcylmsjPqa94PM7999+PaZo8//zz4/4+ptLA93+xz2So84c6frUZ6+eU9qMf/Ygnn3ySnTt3DvkPqavNaD+nZDLJAw88wFNPPcWiRYsmq3mX5cqP2SvII488wv333z/iOfPmzePw4cO0trYOeszr9Q76F1Ham2++SVtbG3PmzMkcSyaT/PVf/zXf/va3+eCDDy6r7ZNpIj+ntHg8zr333ktdXR2vvfbaVdELAygtLcVutw/6V3JbW9uwn8msWbOGPN/hcDBz5swJa+tUupTPKW3nzp385//8n/nJT37C7bffPpHNnHJj/ZwCgQAHDhzgvffe45FHHgFSw66WZeFwOPjd737HbbfdNiltH7UpvB931UoXLLz11luZY/v37x+xYKG9vd06cuRIv5/Zs2dbX//618dU5JBNLuVzsizLisVi1j333GMtXbrUamtrm4ymTqqbbrrJ+sIXvtDv2JIlS0Ys7FiyZEm/Yw899NC0KOwYy+dkWZb14osvWjk5OdbPf/7zCW7dlWMsn1MymRz0PfSFL3zBWrx4sXXkyBErGAxOVrNHTSE2QT72sY9ZK1assPbt22ft27fPWr58+aDS8cWLF1s/+9nPhn2Oq7060bLG/jnF43Hr7rvvtqqqqqxDhw5Zzc3NmZ9oNDoVb2HcpUuiv/e971nHjh2zvvzlL1v5+fnWBx98YFmWZT322GPWli1bMuenS+y/8pWvWMeOHbO+973vTasS+9F+Ti+++KLlcDis5557rt/fm+7u7ql6C5NirJ/TQFd6daJCbIJ0dHRYn/nMZ6zCwkKrsLDQ+sxnPmN1dXX1Owewvv/97w/7HNMhxMb6OdXV1VnAkD9/+MMfJr39E+W5556z5s6da7lcLuuGG26w3njjjcxjn/3sZ63169f3O//111+3/uzP/sxyuVzWvHnzrBdeeGGSWzw1xvI5rV+/fsi/N5/97Gcnv+GTbKx/n/q60kNMW7GIiEjWUnWiiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkLYWYiIhkrf8fe8T29a4alk4AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.scatter(samples[\"parameters\"][0, :, 0], samples[\"parameters\"][0, :, 1], alpha=0.75, s=0.5)\n", + "plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n", + "plt.xlim([-0.5, 0.5])\n", + "plt.ylim([-0.5, 0.5])" + ] + }, + { + "cell_type": "markdown", + "id": "18d81a7e-3916-4822-b036-67f9f53ca856", + "metadata": {}, + "source": [ + "## EDM only" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "a3dbcade-9beb-41ef-bdf3-aec3503eba50", + "metadata": {}, + "outputs": [], + "source": [ + "from collections.abc import Sequence\n", + "import keras\n", + "from keras import ops\n", + "\n", + "from bayesflow.types import Tensor, Shape\n", + "import bayesflow as bf\n", + "from bayesflow.networks import InferenceNetwork\n", + "from bayesflow.utils.serialization import serialize, deserialize, serializable\n", + "\n", + "from bayesflow.utils import (\n", + " expand_right_as,\n", + " find_network,\n", + " jacobian_trace,\n", + " weighted_mean,\n", + " integrate,\n", + ")\n", + "\n", + "\n", + "@serializable\n", + "class EDM(InferenceNetwork):\n", + " \"\"\"Diffusion Model as described as Elucidated Diffusion Model in [1].\n", + "\n", + " [1] Elucidating the Design Space of Diffusion-Based Generative Models: arXiv:2206.00364\n", + " \"\"\"\n", + "\n", + " MLP_DEFAULT_CONFIG = {\n", + " \"widths\": (256, 256, 256, 256, 256),\n", + " \"activation\": \"mish\",\n", + " \"kernel_initializer\": \"he_normal\",\n", + " \"residual\": True,\n", + " \"dropout\": 0.0,\n", + " \"spectral_normalization\": False,\n", + " }\n", + "\n", + " INTEGRATE_DEFAULT_CONFIG = {\n", + " \"method\": \"euler\",\n", + " \"steps\": 100,\n", + " }\n", + "\n", + " def __init__(\n", + " self,\n", + " subnet: str | type = \"mlp\",\n", + " integrate_kwargs: dict[str, any] = None,\n", + " subnet_kwargs: dict[str, any] = None,\n", + " sigma_data=1.0,\n", + " **kwargs,\n", + " ):\n", + " \"\"\"\n", + " Initializes a diffusion model with configurable subnet architecture.\n", + "\n", + " This model learns a transformation from a Gaussian latent distribution to a target distribution using a\n", + " specified subnet type, which can be an MLP or a custom network.\n", + "\n", + " The integration steps can be customized with additional parameters available in the respective\n", + " configuration dictionary.\n", + "\n", + " Parameters\n", + " ----------\n", + " subnet : str or type, optional\n", + " The architecture used for the transformation network. Can be \"mlp\" or a custom\n", + " callable network. Default is \"mlp\".\n", + " integrate_kwargs : dict[str, any], optional\n", + " Additional keyword arguments for the integration process. Default is None.\n", + " subnet_kwargs : dict[str, any], optional\n", + " Keyword arguments passed to the subnet constructor or used to update the default MLP settings.\n", + " sigma_data : float, optional\n", + " Averaged standard deviation of the target distribution. Default is 1.0.\n", + " **kwargs\n", + " Additional keyword arguments passed to the subnet and other components.\n", + " \"\"\"\n", + "\n", + " super().__init__(base_distribution=None, **kwargs)\n", + "\n", + " # tunable parameters not intended to be modified by the average user\n", + " self.max_sigma = kwargs.get(\"max_sigma\", 80.0)\n", + " self.min_sigma = kwargs.get(\"min_sigma\", 1e-4)\n", + " self.rho = kwargs.get(\"rho\", 7)\n", + "\n", + " # latent distribution (not configurable)\n", + " self.base_distribution = bf.distributions.DiagonalNormal(\n", + " mean=0.0, std=self.max_sigma\n", + " )\n", + " self.integrate_kwargs = self.INTEGRATE_DEFAULT_CONFIG | (integrate_kwargs or {})\n", + "\n", + " self.sigma_data = sigma_data\n", + "\n", + " self.seed_generator = keras.random.SeedGenerator()\n", + "\n", + " subnet_kwargs = subnet_kwargs or {}\n", + " if subnet == \"mlp\":\n", + " subnet_kwargs = self.MLP_DEFAULT_CONFIG | subnet_kwargs\n", + "\n", + " self.subnet = find_network(subnet, **subnet_kwargs)\n", + " self.output_projector = keras.layers.Dense(units=None, bias_initializer=\"zeros\")\n", + "\n", + " def build(self, xz_shape: Shape, conditions_shape: Shape = None) -> None:\n", + " self.base_distribution.build(xz_shape)\n", + " self.output_projector.units = xz_shape[-1]\n", + " input_shape = list(xz_shape)\n", + "\n", + " # construct time vector\n", + " input_shape[-1] += 1\n", + " if conditions_shape is not None:\n", + " input_shape[-1] += conditions_shape[-1]\n", + "\n", + " input_shape = tuple(input_shape)\n", + "\n", + " self.subnet.build(input_shape)\n", + " out_shape = self.subnet.compute_output_shape(input_shape)\n", + " self.output_projector.build(out_shape)\n", + "\n", + " def get_config(self):\n", + " base_config = super().get_config()\n", + " config = {\n", + " \"integrate_kwargs\": self.integrate_kwargs,\n", + " \"subnet\": self.subnet,\n", + " \"sigma_data\": self.sigma_data,\n", + " }\n", + " return base_config | serialize(config)\n", + "\n", + " @classmethod\n", + " def from_config(cls, config, custom_objects=None):\n", + " return cls(**deserialize(config, custom_objects=custom_objects))\n", + "\n", + " def _c_skip_fn(self, sigma):\n", + " return self.sigma_data**2 / (sigma**2 + self.sigma_data**2)\n", + "\n", + " def _c_out_fn(self, sigma):\n", + " return sigma * self.sigma_data / ops.sqrt(self.sigma_data**2 + sigma**2)\n", + "\n", + " def _c_in_fn(self, sigma):\n", + " return 1.0 / ops.sqrt(sigma**2 + self.sigma_data**2)\n", + "\n", + " def _c_noise_fn(self, sigma):\n", + " return 0.25 * ops.log(sigma)\n", + "\n", + " def _denoiser_fn(\n", + " self,\n", + " xz: Tensor,\n", + " sigma: Tensor,\n", + " conditions: Tensor = None,\n", + " training: bool = False,\n", + " ):\n", + " # calculate output of the network\n", + " c_in = self._c_in_fn(sigma)\n", + " c_noise = self._c_noise_fn(sigma)\n", + " xz_pre = c_in * xz\n", + " if conditions is None:\n", + " xtc = keras.ops.concatenate([xz_pre, c_noise], axis=-1)\n", + " else:\n", + " xtc = keras.ops.concatenate([xz_pre, c_noise, conditions], axis=-1)\n", + " out = self.output_projector(\n", + " self.subnet(xtc, training=training), training=training\n", + " )\n", + " return self._c_skip_fn(sigma) * xz + self._c_out_fn(sigma) * out\n", + "\n", + " def velocity(\n", + " self,\n", + " xz: Tensor,\n", + " sigma: float | Tensor,\n", + " conditions: Tensor = None,\n", + " training: bool = False,\n", + " ) -> Tensor:\n", + " # transform sigma vector into correct shape\n", + " sigma = keras.ops.convert_to_tensor(sigma, dtype=keras.ops.dtype(xz))\n", + " sigma = expand_right_as(sigma, xz)\n", + " sigma = keras.ops.broadcast_to(sigma, keras.ops.shape(xz)[:-1] + (1,))\n", + "\n", + " d = self._denoiser_fn(xz, sigma, conditions, training=training)\n", + " return (xz - d) / sigma\n", + "\n", + " def _velocity_trace(\n", + " self,\n", + " xz: Tensor,\n", + " sigma: Tensor,\n", + " conditions: Tensor = None,\n", + " max_steps: int = None,\n", + " training: bool = False,\n", + " ) -> (Tensor, Tensor):\n", + " def f(x):\n", + " return self.velocity(\n", + " x, sigma=sigma, conditions=conditions, training=training\n", + " )\n", + "\n", + " v, trace = jacobian_trace(\n", + " f, xz, max_steps=max_steps, seed=self.seed_generator, return_output=True\n", + " )\n", + "\n", + " return v, keras.ops.expand_dims(trace, axis=-1)\n", + "\n", + " def _forward(\n", + " self,\n", + " x: Tensor,\n", + " conditions: Tensor = None,\n", + " density: bool = False,\n", + " training: bool = False,\n", + " **kwargs,\n", + " ) -> Tensor | tuple[Tensor, Tensor]:\n", + " integrate_kwargs = self.integrate_kwargs | kwargs\n", + " if isinstance(integrate_kwargs[\"steps\"], int):\n", + " # set schedule for specified number of steps\n", + " integrate_kwargs[\"steps\"] = self._integration_schedule(\n", + " integrate_kwargs[\"steps\"], dtype=ops.dtype(x)\n", + " )\n", + " if density:\n", + "\n", + " def deltas(time, xz):\n", + " v, trace = self._velocity_trace(\n", + " xz, sigma=time, conditions=conditions, training=training\n", + " )\n", + " return {\"xz\": v, \"trace\": trace}\n", + "\n", + " state = {\n", + " \"xz\": x,\n", + " \"trace\": keras.ops.zeros(\n", + " keras.ops.shape(x)[:-1] + (1,), dtype=keras.ops.dtype(x)\n", + " ),\n", + " }\n", + " state = integrate(\n", + " deltas,\n", + " state,\n", + " **integrate_kwargs,\n", + " )\n", + "\n", + " z = state[\"xz\"]\n", + " log_density = self.base_distribution.log_prob(z) + keras.ops.squeeze(\n", + " state[\"trace\"], axis=-1\n", + " )\n", + "\n", + " return z, log_density\n", + "\n", + " def deltas(time, xz):\n", + " return {\n", + " \"xz\": self.velocity(\n", + " xz, sigma=time, conditions=conditions, training=training\n", + " )\n", + " }\n", + "\n", + " state = {\"xz\": x}\n", + " state = integrate(\n", + " deltas,\n", + " state,\n", + " **integrate_kwargs,\n", + " )\n", + "\n", + " z = state[\"xz\"]\n", + "\n", + " return z\n", + "\n", + " def _inverse(\n", + " self,\n", + " z: Tensor,\n", + " conditions: Tensor = None,\n", + " density: bool = False,\n", + " training: bool = False,\n", + " **kwargs,\n", + " ) -> Tensor | tuple[Tensor, Tensor]:\n", + " integrate_kwargs = self.integrate_kwargs | kwargs\n", + " if isinstance(integrate_kwargs[\"steps\"], int):\n", + " # set schedule for specified number of steps\n", + " integrate_kwargs[\"steps\"] = self._integration_schedule(\n", + " integrate_kwargs[\"steps\"], inverse=True, dtype=ops.dtype(z)\n", + " )\n", + " if density:\n", + "\n", + " def deltas(time, xz):\n", + " v, trace = self._velocity_trace(\n", + " xz, sigma=time, conditions=conditions, training=training\n", + " )\n", + " return {\"xz\": v, \"trace\": trace}\n", + "\n", + " state = {\n", + " \"xz\": z,\n", + " \"trace\": keras.ops.zeros(\n", + " keras.ops.shape(z)[:-1] + (1,), dtype=keras.ops.dtype(z)\n", + " ),\n", + " }\n", + " state = integrate(deltas, state, **integrate_kwargs)\n", + "\n", + " x = state[\"xz\"]\n", + " log_density = self.base_distribution.log_prob(z) - keras.ops.squeeze(\n", + " state[\"trace\"], axis=-1\n", + " )\n", + "\n", + " return x, log_density\n", + "\n", + " def deltas(time, xz):\n", + " return {\n", + " \"xz\": self.velocity(\n", + " xz, sigma=time, conditions=conditions, training=training\n", + " )\n", + " }\n", + "\n", + " state = {\"xz\": z}\n", + " state = integrate(\n", + " deltas,\n", + " state,\n", + " **integrate_kwargs,\n", + " )\n", + "\n", + " x = state[\"xz\"]\n", + "\n", + " return x\n", + "\n", + " def compute_metrics(\n", + " self,\n", + " x: Tensor | Sequence[Tensor, ...],\n", + " conditions: Tensor = None,\n", + " sample_weight: Tensor = None,\n", + " stage: str = \"training\",\n", + " ) -> dict[str, Tensor]:\n", + " training = stage == \"training\"\n", + " if not self.built:\n", + " xz_shape = keras.ops.shape(x)\n", + " conditions_shape = (\n", + " None if conditions is None else keras.ops.shape(conditions)\n", + " )\n", + " self.build(xz_shape, conditions_shape)\n", + " # hyper-parameters for sampling the noise level\n", + " p_mean = -1.2\n", + " p_std = 1.2\n", + "\n", + " # sample log-noise level\n", + " log_sigma = p_mean + p_std * keras.random.normal(\n", + " ops.shape(x)[:1], dtype=ops.dtype(x), seed=self.seed_generator\n", + " )\n", + " # noise level with shape (batch_size, 1)\n", + " sigma = ops.exp(log_sigma)[:, None]\n", + "\n", + " # generate noise vector\n", + " z = sigma * keras.random.normal(\n", + " ops.shape(x), dtype=ops.dtype(x), seed=self.seed_generator\n", + " )\n", + "\n", + " # calculate preconditioning\n", + " c_skip = self._c_skip_fn(sigma)\n", + " c_out = self._c_out_fn(sigma)\n", + " c_in = self._c_in_fn(sigma)\n", + " c_noise = self._c_noise_fn(sigma)\n", + " xz_pre = c_in * (x + z)\n", + "\n", + " # calculate output of the network\n", + " if conditions is None:\n", + " xtc = keras.ops.concatenate([xz_pre, c_noise], axis=-1)\n", + " else:\n", + " xtc = keras.ops.concatenate([xz_pre, c_noise, conditions], axis=-1)\n", + "\n", + " out = self.output_projector(\n", + " self.subnet(xtc, training=training), training=training\n", + " )\n", + "\n", + " # Calculate loss:\n", + " lam = 1 / c_out[:, 0] ** 2\n", + " effective_weight = lam * c_out[:, 0] ** 2\n", + " unweighted_loss = ops.mean(\n", + " (out - 1 / c_out * (x - c_skip * (x + z))) ** 2, axis=-1\n", + " )\n", + " loss = effective_weight * unweighted_loss\n", + " loss = weighted_mean(loss, sample_weight)\n", + "\n", + " base_metrics = super().compute_metrics(x, conditions, sample_weight, stage)\n", + " return base_metrics | {\"loss\": loss}\n", + "\n", + " def _integration_schedule(self, steps, inverse=False, dtype=None):\n", + " def sigma_i(i, steps):\n", + " N = steps + 1\n", + " return (\n", + " self.max_sigma ** (1 / self.rho)\n", + " + (i / (N - 1))\n", + " * (self.min_sigma ** (1 / self.rho) - self.max_sigma ** (1 / self.rho))\n", + " ) ** self.rho\n", + "\n", + " steps = sigma_i(ops.arange(steps + 1, dtype=dtype), steps)\n", + " if not inverse:\n", + " steps = ops.flip(steps)\n", + " return steps" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "fb1d3911-4c67-4b13-9a3b-43980ab1226f", + "metadata": { + "ExecuteTime": { + "end_time": "2024-10-24T08:36:26.618926Z", + "start_time": "2024-10-24T08:36:26.614443Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "Adapter([0: ToArray -> 1: ConvertDType -> 2: Concatenate(['parameters'] -> 'inference_variables') -> 3: Standardize(exclude=['inference_variables']) -> 4: Rename('observables' -> 'inference_conditions')])" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "adapter_edm = (\n", + " bf.adapters.Adapter.create_default(inference_variables=[\"parameters\"])\n", + " # standardize data variables to zero mean and unit variance\n", + " .standardize(exclude=\"inference_variables\")\n", + " # rename the variables to match the required approximator inputs\n", + " .rename(\"observables\", \"inference_conditions\")\n", + ")\n", + "adapter_edm" + ] + }, + { + "cell_type": "markdown", + "id": "ba1a7152-06b6-4f8c-a410-3cc6bb52dde5", + "metadata": {}, + "source": [ + "## Dataset\n", + "\n", + "For this example, we will sample our training data ahead of time and use offline training with a very small number of epochs. In actual applications, you usually want to train much longer in order to max our performance." + ] + }, + { + "cell_type": "markdown", + "id": "246e59b9-10f5-45a5-9144-302032c64546", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:39:46.950573Z", + "start_time": "2024-09-23T14:39:46.948624Z" + } + }, + "source": [ + "num_training_batches = 512\n", + "num_validation_sets = 300\n", + "batch_size = 64\n", + "epochs = 50" + ] + }, + { + "cell_type": "markdown", + "id": "7eede657-15eb-4af4-8ea2-01e98a1cb785", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:39:53.268860Z", + "start_time": "2024-09-23T14:39:46.994697Z" + } + }, + "source": [ + "training_data = simulator.sample(num_training_batches * batch_size)\n", + "validation_data = simulator.sample(num_validation_sets)" + ] + }, + { + "cell_type": "markdown", + "id": "45d81023-b167-4710-9f46-91d7a1f34a4d", + "metadata": {}, + "source": [ + "## Training a neural network to approximate all posteriors\n", + "\n", + "The next step is to set up the neural network that will approximate the posterior $p(\\theta\\,|\\,x)$.\n", + "\n", + "We choose **Flow Matching** [1, 2] as the backbone architecture for this example, as it can deal well with the multimodal nature of the posteriors that some observables imply.\n", + "\n", + "* [1] Lipman, Y., Chen, R. T., Ben-Hamu, H., Nickel, M., & Le, M. Flow Matching for Generative Modeling. In *The Eleventh International Conference on Learning Representations*.\n", + "\n", + "* [2] Wildberger, J. B., Dax, M., Buchholz, S., Green, S. R., Macke, J. H., & Schölkopf, B. Flow Matching for Scalable Simulation-Based Inference. In *Thirty-seventh Conference on Neural Information Processing Systems*." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "8838e4d9-eeb0-4d26-8cda-48df9dadaa85", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:39:53.339590Z", + "start_time": "2024-09-23T14:39:53.319852Z" + } + }, + "outputs": [], + "source": [ + "edm = EDM(\n", + " subnet=\"mlp\", \n", + " subnet_kwargs={\"dropout\": 0.0, \"widths\": (256,)*6}, # override default dropout = 0.05 and widths = (256,)*5\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "b93b2378-8b06-42d3-bca0-9787f3a81205", + "metadata": {}, + "source": [ + "This inference network is just a general Flow Matching backbone, not yet adapted to the specific inference task at hand (i.e., posterior appproximation). To achieve this adaptation, we combine the network with our data adapter, which together form an `approximator`. In this case, we need a `ContinuousApproximator` since the target we want to approximate is the posterior of the *continuous* parameter vector $\\theta$." + ] + }, + { + "cell_type": "markdown", + "id": "e436903a-a025-4269-b02f-cd84e8ce6902", + "metadata": {}, + "source": [ + "### Basic Workflow\n", + "We can hide many of the traditional deep learning steps (e.g., specifying a learning rate and an optimizer) within a `Workflow` object. This object just wraps everything together and includes some nice utility functions for training and *in silico* validation." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "c93d17aa-fa3d-455d-8c7a-30a02aea207c", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:39:53.371691Z", + "start_time": "2024-09-23T14:39:53.369375Z" + } + }, + "outputs": [], + "source": [ + "edm_workflow = bf.BasicWorkflow(\n", + " simulator=simulator,\n", + " adapter=adapter_edm,\n", + " inference_network=edm,\n", + " initial_learning_rate=1e-3\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "36707fe1-15b1-42df-a14c-7449645b0955", + "metadata": {}, + "source": [ + "### Training\n", + "\n", + "We are ready to train our deep posterior approximator on the two moons example. We use the utility function `fit_offline`, which wraps the approximator's super flexible `fit` method." + ] + }, + { + "cell_type": "markdown", + "id": "bc331f9f-04b0-45d3-bbf7-0099a1b966a3", + "metadata": {}, + "source": [ + "diffusion_model_workflow.approximator.build_from_data(diffusion_model_workflow.adapter(validation_data))" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "e23cd1d5-92a5-41fe-b273-96d7420760af", + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-23T14:42:36.067393Z", + "start_time": "2024-09-23T14:39:53.513436Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bayesflow:Fitting on dataset instance of OnlineDataset.\n", + "INFO:bayesflow:Building on a test batch.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m4s\u001b[0m 4ms/step - loss: 0.8323 - loss/inference_loss: 0.8323 - val_loss: 0.6503 - val_loss/inference_loss: 0.6503\n", + "Epoch 2/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.4613 - loss/inference_loss: 0.4613 - val_loss: 0.4215 - val_loss/inference_loss: 0.4215\n", + "Epoch 3/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.3344 - loss/inference_loss: 0.3344 - val_loss: 0.3557 - val_loss/inference_loss: 0.3557\n", + "Epoch 4/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.3013 - loss/inference_loss: 0.3013 - val_loss: 0.2316 - val_loss/inference_loss: 0.2316\n", + "Epoch 5/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2833 - loss/inference_loss: 0.2833 - val_loss: 0.1944 - val_loss/inference_loss: 0.1944\n", + "Epoch 6/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2795 - loss/inference_loss: 0.2795 - val_loss: 0.2124 - val_loss/inference_loss: 0.2124\n", + "Epoch 7/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2705 - loss/inference_loss: 0.2705 - val_loss: 0.2438 - val_loss/inference_loss: 0.2438\n", + "Epoch 8/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2699 - loss/inference_loss: 0.2699 - val_loss: 0.3187 - val_loss/inference_loss: 0.3187\n", + "Epoch 9/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2581 - loss/inference_loss: 0.2581 - val_loss: 0.3320 - val_loss/inference_loss: 0.3320\n", + "Epoch 10/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2620 - loss/inference_loss: 0.2620 - val_loss: 0.1860 - val_loss/inference_loss: 0.1860\n", + "Epoch 11/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2571 - loss/inference_loss: 0.2571 - val_loss: 0.2292 - val_loss/inference_loss: 0.2292\n", + "Epoch 12/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2563 - loss/inference_loss: 0.2563 - val_loss: 0.3294 - val_loss/inference_loss: 0.3294\n", + "Epoch 13/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2517 - loss/inference_loss: 0.2517 - val_loss: 0.1884 - val_loss/inference_loss: 0.1884\n", + "Epoch 14/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2446 - loss/inference_loss: 0.2446 - val_loss: 0.2823 - val_loss/inference_loss: 0.2823\n", + "Epoch 15/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2497 - loss/inference_loss: 0.2497 - val_loss: 0.2973 - val_loss/inference_loss: 0.2973\n", + "Epoch 16/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2450 - loss/inference_loss: 0.2450 - val_loss: 0.1761 - val_loss/inference_loss: 0.1761\n", + "Epoch 17/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2426 - loss/inference_loss: 0.2426 - val_loss: 0.1549 - val_loss/inference_loss: 0.1549\n", + "Epoch 18/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2471 - loss/inference_loss: 0.2471 - val_loss: 0.1838 - val_loss/inference_loss: 0.1838\n", + "Epoch 19/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2437 - loss/inference_loss: 0.2437 - val_loss: 0.2577 - val_loss/inference_loss: 0.2577\n", + "Epoch 20/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2390 - loss/inference_loss: 0.2390 - val_loss: 0.3699 - val_loss/inference_loss: 0.3699\n", + "Epoch 21/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2408 - loss/inference_loss: 0.2408 - val_loss: 0.2597 - val_loss/inference_loss: 0.2597\n", + "Epoch 22/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2388 - loss/inference_loss: 0.2388 - val_loss: 0.2999 - val_loss/inference_loss: 0.2999\n", + "Epoch 23/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2347 - loss/inference_loss: 0.2347 - val_loss: 0.2340 - val_loss/inference_loss: 0.2340\n", + "Epoch 24/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2344 - loss/inference_loss: 0.2344 - val_loss: 0.3118 - val_loss/inference_loss: 0.3118\n", + "Epoch 25/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2306 - loss/inference_loss: 0.2306 - val_loss: 0.1503 - val_loss/inference_loss: 0.1503\n", + "Epoch 26/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2313 - loss/inference_loss: 0.2313 - val_loss: 0.1783 - val_loss/inference_loss: 0.1783\n", + "Epoch 27/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2334 - loss/inference_loss: 0.2334 - val_loss: 0.2589 - val_loss/inference_loss: 0.2589\n", + "Epoch 28/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2282 - loss/inference_loss: 0.2282 - val_loss: 0.1757 - val_loss/inference_loss: 0.1757\n", + "Epoch 29/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2249 - loss/inference_loss: 0.2249 - val_loss: 0.2094 - val_loss/inference_loss: 0.2094\n", + "Epoch 30/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2289 - loss/inference_loss: 0.2289 - val_loss: 0.3566 - val_loss/inference_loss: 0.3566\n", + "Epoch 31/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2237 - loss/inference_loss: 0.2237 - val_loss: 0.1404 - val_loss/inference_loss: 0.1404\n", + "Epoch 32/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2242 - loss/inference_loss: 0.2242 - val_loss: 0.3467 - val_loss/inference_loss: 0.3467\n", + "Epoch 33/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.2170 - loss/inference_loss: 0.2170 - val_loss: 0.1119 - val_loss/inference_loss: 0.1119\n", + "Epoch 34/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 5ms/step - loss: 0.2232 - loss/inference_loss: 0.2232 - val_loss: 0.1707 - val_loss/inference_loss: 0.1707\n", + "Epoch 35/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 5ms/step - loss: 0.2196 - loss/inference_loss: 0.2196 - val_loss: 0.2344 - val_loss/inference_loss: 0.2344\n", + "Epoch 36/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 5ms/step - loss: 0.2152 - loss/inference_loss: 0.2152 - val_loss: 0.1856 - val_loss/inference_loss: 0.1856\n", + "Epoch 37/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3s\u001b[0m 5ms/step - loss: 0.2215 - loss/inference_loss: 0.2215 - val_loss: 0.1247 - val_loss/inference_loss: 0.1247\n", + "Epoch 38/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2206 - loss/inference_loss: 0.2206 - val_loss: 0.2515 - val_loss/inference_loss: 0.2515\n", + "Epoch 39/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.2180 - loss/inference_loss: 0.2180 - val_loss: 0.1320 - val_loss/inference_loss: 0.1320\n", + "Epoch 40/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.2189 - loss/inference_loss: 0.2189 - val_loss: 0.2047 - val_loss/inference_loss: 0.2047\n", + "Epoch 41/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 5ms/step - loss: 0.2145 - loss/inference_loss: 0.2145 - val_loss: 0.2467 - val_loss/inference_loss: 0.2467\n", + "Epoch 42/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2121 - loss/inference_loss: 0.2121 - val_loss: 0.2131 - val_loss/inference_loss: 0.2131\n", + "Epoch 43/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2146 - loss/inference_loss: 0.2146 - val_loss: 0.1652 - val_loss/inference_loss: 0.1652\n", + "Epoch 44/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2163 - loss/inference_loss: 0.2163 - val_loss: 0.1934 - val_loss/inference_loss: 0.1934\n", + "Epoch 45/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2174 - loss/inference_loss: 0.2174 - val_loss: 0.1204 - val_loss/inference_loss: 0.1204\n", + "Epoch 46/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2149 - loss/inference_loss: 0.2149 - val_loss: 0.2139 - val_loss/inference_loss: 0.2139\n", + "Epoch 47/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2139 - loss/inference_loss: 0.2139 - val_loss: 0.1210 - val_loss/inference_loss: 0.1210\n", + "Epoch 48/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2165 - loss/inference_loss: 0.2165 - val_loss: 0.2491 - val_loss/inference_loss: 0.2491\n", + "Epoch 49/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2145 - loss/inference_loss: 0.2145 - val_loss: 0.1370 - val_loss/inference_loss: 0.1370\n", + "Epoch 50/50\n", + "\u001b[1m512/512\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m2s\u001b[0m 4ms/step - loss: 0.2150 - loss/inference_loss: 0.2150 - val_loss: 0.1586 - val_loss/inference_loss: 0.1586\n" + ] + } + ], + "source": [ + "edm_history = edm_workflow.fit_online(\n", + " epochs=epochs,\n", + " num_batches_per_epoch=num_batches_per_epoch,\n", + " batch_size=batch_size, \n", + " validation_data=validation_data,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "b16e87a6-cc08-477e-8135-9bca4ea53991", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(-0.5, 0.5)" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAbEAAAGdCAYAAACcvk38AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAARmFJREFUeJzt3Wl0XNWd7/3vqUmzypY1WEbCyMbYxmNjgm2eiyEBHHAIISt9MZdg8nTnukNWCCR00oFON4P7BU/6Pt0XLhdI2p1OVmJCO3No2pfEwQx+YsvExhPxJCRblmwNpamkqpJqOud5UaqySpMto5J07N9nLS2jo3NKu84y9fPe+7/3MSzLshAREbEhx2Q3QERE5GIpxERExLYUYiIiYlsKMRERsS2FmIiI2JZCTEREbEshJiIitqUQExER23JNdgPGm2manD17loKCAgzDmOzmiIjIGFmWRU9PD7NmzcLhGL2vdcmF2NmzZ6msrJzsZoiIyEfU0NBARUXFqOdcciFWUFAAJN58YWHhJLdGRETGqru7m8rKytTn+WguuRBLDiEWFhYqxEREbOxCpoRU2CEiIralEBMREdtSiImIiG0pxERExLYUYiIiYlsKMRERsS2FmIiI2JZCTEREbEshJiIitqUQExER21KIiYiIbSnERETEthRiIiJiWwoxERGxLYWYiIjYlkJMRERsSyEmIiK2pRATERHbUoiJiIhtKcRERMS2FGIiImJbCjEREbEthZiIiNiWQkxERGxLISYiIralEBMREdtSiImIiG0pxERExLYUYiIiYlsTEmIvvfQSVVVVZGdns2LFCnbu3HlB1/3hD3/A5XKxfPnyzDZQRERsKeMhtnXrVr72ta/x7W9/m/3793PTTTdx5513cvr06VGv8/v9PPjgg9x6662ZbqKIiNiUYVmWlclfsHLlSq677jpefvnl1LGFCxdyzz338Oyzz4543X333ce8efNwOp38+te/5sCBAxf0+7q7u/F6vfj9fgoLCz9q80VEZIKN5XM8oz2xSCTCvn37WLt2bdrxtWvXsmvXrhGv+8EPfkBtbS1PPfVUJpsnIiI258rki7e1tRGPxykrK0s7XlZWRnNz87DX1NTU8Pjjj7Nz505crvM3LxwOEw6HU993d3d/tEaLiIhtTEhhh2EYad9bljXkGEA8Huf+++/nmWee4Zprrrmg13722Wfxer2pr8rKynFps4iITH0ZDbHi4mKcTueQXldra+uQ3hlAT08Pe/fu5eGHH8blcuFyudi0aRMHDx7E5XKxY8eOIdc88cQT+P3+1FdDQ0PG3o+IiEwtGR1O9Hg8rFixgu3bt/PZz342dXz79u185jOfGXJ+YWEhhw8fTjv20ksvsWPHDn7+859TVVU15JqsrCyysrLGv/EiIjLlZTTEAB577DE2bNjA9ddfz+rVq/mXf/kXTp8+zUMPPQQkelJnzpzhRz/6EQ6Hg8WLF6ddX1paSnZ29pDjIiIiGQ+x9evX097ezqZNm2hqamLx4sVs27aN2bNnA9DU1HTeNWMiIiLDyfg6sYmmdWIiIvY2ZdaJiYiIZJJCTEREbEshJiIitqUQExER21KIiYiIbSnERETEthRiIiJiWwoxERGxLYWYiIjYlkJMRERsSyEmIiK2pRATERHbUoiJiIhtKcRERMS2FGIiImJbCjEREbEthZiIiNiWQkxERGxLISYiIralEBMREdtSiImIiG0pxERExLYUYiIiYlsKMRERsS2FmIiI2JZCTEREbEshJiKXpFpfgBferKHWF5jspkgGKcRE5JK07VATW/c2sO1Q07i9poJx6nFNdgNE5PJS6wuw7VAT65aWM7ckP2O/Z93S8rQ/x0MyGAG+euu8cXtduXjqiYnIhLrYHtLgXtD5ekVzS/JTQTNevad1S8tZf33luAajfDQKMRGZUEsqvMwuymVJhXdM1w0Ov5HC8NX36lnzjzt49b36Uc+7GMlgzGQPUsZGISYiE+pwo58PfQE2v1s3au+o1hfgmdf+xDP/8SdqfQHWLS3n9oVldIQi1PoCqTAsLvCk9bRefruW0x29vPx2LXDxoSn2oDkxEZlQ65aWU13XTq0vyLZDTSPOLW071MTWPzYQjsfxh6L88/rlYMAv9jUmTrDg8Bk/oUgcXyAMJOapvnzLXF5+u5Yv3zKX2lRYBjnc6OeW+aVDfk9yjm5JhZfDjf4R5+omai5PxkYhJiITam5JPpvuWZwKhJGsW1rOT96rp8kfZ2eNj2de+xP+3iimBQdOd1Fc4CEcMyku8PCJBaUsqfCmzrl1QRnl3hy+8bOD/OmsH4/TSXGBZ9jfkxxu3HGslVpfgI5ghKfuXjTieaCijqlEISYiE25g0cVgtb4AW6rrwYJHbp3H/3jjOL5AhB9X1/PxBSUUZLk41tzNrL4cTMuisbOXx+9cyLZDTfzi/UbCMZMsl4MTLT3UtPQQjVtEYjFeP9jEf7th9pDflwzSk21Balp7ONDYRa0vMKS3lYlqR/noDMuyrMluxHjq7u7G6/Xi9/spLCyc7OaIyACDh+TePt7K87+voTjfQ8X0XG6eX8Lzv6/hg7N+LMvimrIC6nwB+mKJj6mFMws42RYgHLPI9TjpjcbJdju5Y9FMTrYFwbKo7wgxLdfDskov24+0Ujktm4iZuH5ZxTTuXj6Ld477wIAHViVCbcvueho7Q3xwppveWJxF5YVsumexhg0nyVg+xxViIjIh3j7eypO/+YBQJM6aeSW0dPfR1N1HnS+I04C8LBdzS/L54KyfuGlhWTDww8lpwGeWX8HOGh++QASDxM9dDoMst4NQOI7H5SAcM3EY4HE56IuaFGa7MC0IhGMYQHG+h96oCVjMKysAC2paA0TjJi6nQWGWm+6+KPdeX8lTdy/SXNgkGMvnuKoTRWRCbH63jsbOXtwOB7tr29ld105PbxSXwyAvy0VJQRYNHUGi8aEBZgDXzCzg7uWzKC7IwmmA02EAEDMtguE4AHHTBMC0IBZL/Hd3X4xAOAYkXrMtEKGsMIuCLDdHm7r54KyfaTlu8jxOinI95GY5MYz+X0pmdv6Q8aM5MRHJqGRP5q5libmkssJsth9pwWkYYMH0HDc94Rgn24L0j/qlB5gBLgdUTMvhsa0H6AhFcToMYqZFrttBKGoOuCbRPzOSFw4z0GSR6KW1B8LETYt43KK5u4+YadHVH6oAx5u6U6X81XXtKtGfohRiIpJR/8+2o7x5rJV8j4uVc4sAyHY76AnH8AUjALgdRirABrMsiJvwzgkfkXjipHj/yZG4mXZurP94VUke0ZhJa08f4djQF+4IRAiG40T7zzfNRPDlup2EonFMC/ac6mTboSY6QhEONnbxj28c450TPh5YNVvDilOIQkxEPpK3j7ey+d06Nq6ZQ2VRbmrN1TvHffh7o+z8sA3Tgu5wjO1HWgHIchlprxEdKcH6mRapABsoZg491zAgy+ngdFuQYfILgJaecNr3WS4HV0zPob6/N+h0kFpIveNYK+GYSU1rgPr2ECeae1T0MYUoxETkI9n8bh3VJ9sBWDVnBlv3NlBd187hM376onFi8UQvx+k4FzrD9Y7Gi8OC050hPG4nsUh8xPOSMWoBkZhJNGbicjqIx0xy3E4aO3v5xzeOE46ZXFmUi8fpwDBIW6Stoo/Jp8IOEflINq6Zw6qqGWxcMye1xdNdy8r53HUVVBblYhgwLddNrmfkfzNnD+qZXSjXoE+wPI+TpZVeCrNd3Ht9RWLebQQ5Hmdq7s0CGjp7CcdMDMCBQSRu0hmK0huJ09jRS01rD1lOBx6XQXGBh1pfgCd//QFb9tSz7VDTmDcolvGhnpiIfCS3zC9Nbef0wps1fOgL8NM/NpLjcXKmqxfTIrXTxkj6RumZ5Xqc9EbiqcBJltZDomc38PtgJM7R5h7CUZNf7T/DxxeUsONoKyaJebfKGbmc9AUTr+t2pF7XaRhgWCSn2HKznWAkKhstINz/gwONfgB++sdG2noiHG/pIdfjZEmFd8iOHtrhY2IoxERk3CT3Rdx/uotQ9NxQ3lhWozoMUoHnNMC0LCqm59DS3UckbuFxGUOGI93ORPGHaUFfNLFjR09fjBMtPZQWZjO3JI+Na+bw2oGz9PRG6QpF6QhFU+E3uzgXg8RQoQW09YSZnuse0rbk+Y2dIdqDYfqiJj19UV47cBZvjpvbry0bsrOHdvjILIWYiIyrssJs8rIc9EbjjGXmKznwNyPXQ1swgmGAw2FgWXDbwjL8vVF+c/Assdi5OTbLAodhMK80n56+GGe6eqkqzuO/31SV6A26HfTGTDaumcPhRj+/P9pC3LJYfIWXJn8vzd2JAo+TviBZA8YmnQ6DbLcLiKaOufvL+gF8gQg//WNjKqh31vhwOAweWHmucnG0rbVk/CjERGTcbNldz38ebsLRvwNHcpHxaAwSYeV0QCRm0dkbSezEYRgsnuVleeU0Hlg9my3V9eS4HeS4nfh7o7icDuaXFVBVnMfJtiAtkT5Wz5nBpnsWA/D6wSb2N3QRN61UaXxHKAIWPLB6Nt/42cFUiFlAX3/ViQFE4xZnunpxOoxUOb/TmQixZDDneRx4c920BcL0hGOUFmRpLdkkUIiJyPjp30nDm+3mk4vL+NHu+iFzYQPnsCAxfOhxGpR7c6jvCOFwQEmum5KCLB69bV5qvu2BVbMpyvUkyvdP+FJhtO1QE+/W+MjLcrFxzRzmluSn5ua8OS66+2JgJXpGT306sTt9rS9A1Yw8wtE4V0zLYV99Jx2hKEW5bqblejjV3r/wesA4aDRmprU7Erf4H/cu4ZXqenbXtdMZjLD53Toqi3JVqTiBFGIiMm4eWDWbE809fOgLUNMSoDDbhb83lvbhb5EIriTTgmm5HpZXTmN55TR6+qJ8cKablu6+tGeADRyeG/hcsOQ83LHm7lSIJOehBj4jLKnWF+AbPz3IidYePnntTKqK87hieg4/3duIx+UgGjdT2TVwfm7wMrVQJM47J3wcb+khFIlTkO067zPSZPwpxERk3MwtyeeuZeW8/HYtK+cUsWrODH60+xS+QCTtPNNK9MhWzynCMAzKCrN571QH66+vpLqunZaePgqyXRc0PJd8PtmTv/4gLUSGCzxI7IVY0xogFrc42RbkvVMdOIxEKIUi8bSAdRgG2S6DvpiZCrPEzh4OLMPgnWOtnO7oJdvt4Iv/pQoDQ4UcE0whJiIf2cBFv68fbOJMVy976jp4ZeMqigs8/PPvTtDdF02rKjQMmOnN4Z/XL+ft4620dPexpMLLkgovDZ0hQuH4iE9jHuxCH7QJiZ5bRzCCvzdRtLG8chrT89z8eFc92R4nC2YWEAzHONbcQ8y0CIRj/QUkiV7kgrJ8TrQEiVsmde0hAMJRkz11HdrJYxJosbOIfGQDd3pPLn6+a1k5L7xZww1VM/jj393O/Stnk5/l5NryAhbOLCDH48SbkyhjP9zop74jlAqtH/zFDXzhxqvG1KtJDjeeL0TmluTz1N2LqCrO471THWDAnroOTCyuLMrl86tm0xszCYRj9Ebj0N9rNKzEFNmx5gDxQWsGXE6DPXXtfPUn71PrC2ih8wRST0xEPrKBa6LmluRTWZSbGt6DxGLfZGHGuqXlNHSE2PxuHdfMTBRhJIcNk69zIeXpY9nyabhzk7+rIxih1hfE43Sy/3QnT/ziMF39FZJXl+QTjpnnCj1I9MaSxSluJ4BBtH/C7EhTDy/u+JCq4jy27m1IVE1297FxzZwL6lHK2CnEROQjGxw62w418aEvwNUl+cMG07ZDTdR3hHj9YOJPGPuuFhe6I0Zye6gP+3tFyXPn9rdtS3U9dy6eib83yuuHmmgLhJlXls/Kqhmp0v6z7/USiZ0r+CgrzGJuST5Hm7rpCEXTft/JtiBf+cTVAOw41srBxi4aOkP84C9u0FBjBmg4UUTG3ZIKL1eX5KdK3gdbt7Sc9ddXsnHNHNZfX3lRxRDJ1zjftYl9DYNpgTrwZ9uPtIAB3hw3c0vy8LgdLJhZSFGeB0hUXJYWZGH2z4s5gem5HuaV5dPdey7AnA6Dimk5PHrbvFRgP3rbPCqm5xCKxPVQzQwxLGssG8JMfWN5rLWIZMYLb9awdW8D66+vnPRy89GGHZM/6whG2H60hduvLaMo13Pu+4VlFOV5aA+GefW9BtxOI7XY2jSttEe9uBwGS67wsvzKaWnPHNNO92M3ls9xhZiIjDu7fXAPbm8q3EIRth9pwWFAY2cvFdNzyPO4ONbcQ86Ap0pDYp9Hp8PAMAzuv+FKnrp70SS+I3sby+e45sREZNyNVpgxFQNuYHsHtg+gKNeDhcWr751m/swCllzhpa0nnLbBMSQWQ1umhdtpcKCxi1pfYMq8v0uZ5sREZEINLMefirZU17N5Zx1bqutT4WZgEAzH2VPXwZ66DrrDMYIDHriZXB9t9pfhH2/qZkt1/eS8gcuMemIiMqGm0iNKkr2utO2pBj4ps/+cjmCE2xaWAdATjnLFtMSjYUKROBXTc2jtCRPu30A4FjexSKyMnoq9zkuNQkxEJtRUeETJ4Dmv6rr2VKn/zfNLONHSw83zS9LK8x9YORuAzTvriMbNRMk94BsQYAAmMCPHzTUz84ct7ZfxpeFEEZnSBu5+MV47YaTWmFkMKfUfuHvIwPL84gIPO461Mrckj5hppTYEHhhgDiPxlOhgJM7Lb9dy+Iyfvmic4gLPR2qvjEw9MRGZ0gYuagYuaIEzDD+UN3D4EEj7WXJHjeGGO9ctLefJX3/AoTNd5Ge5iMUtHCR6XR6XQSSWeM6YZUFZoYeYadHdG8MwoKcvxusHm/hvN8wej9shg0xIT+yll16iqqqK7OxsVqxYwc6dO0c895e//CW33347JSUlFBYWsnr1an77299ORDNFZAoauKj5Qhc4w/AFJMljhxv9I+6zOHAPxoH/ndwT8ov/pYrlV07D40qUc4RjVto0WlN3GH9vlByPk0XlhSyrnMbGNXO0n2KGZDzEtm7dyte+9jW+/e1vs3//fm666SbuvPNOTp8+Pez57777Lrfffjvbtm1j3759fPzjH+fTn/40+/fvz3RTRWQKGilUzme4wBtLCA4OncqiXK4pK+BUWwh/b2JHfocB03NcuB3nKhQh0SNr6+njQIOfXLeTyqLcKV+VaVcZX+y8cuVKrrvuOl5++eXUsYULF3LPPffw7LPPXtBrLFq0iPXr1/Pkk0+e91wtdhaR8TBw15HkcOLBRj/RuEk0fu75YteWFzA918OJlh58gQhOAzASQTYtx004ZnLv9ZWpp1CrUvH8psxi50gkwr59+3j88cfTjq9du5Zdu3Zd0GuYpklPTw9FRUWZaKKIyLAGzo0lNzSeV5oo8Nh7qpPu3ihFeR6m5br546kOst1OXA6DXLcz8QwyINfjIhhJDC9OharMS1FGhxPb2tqIx+OUlZWlHS8rK6O5ufmCXuOf/umfCAaD3HvvvcP+PBwO093dnfYlIjLYWOekkrvcJwtBHlg5m0dvm0conHj686o5M/gf/3UZR8/2EIlbdPfFKCnwEDMT1YoW0N0Xxe008Oa6M/jOLm8TUthhGEba95ZlDTk2nFdffZWnn36arVu3Ulo6/LN4nn32Wbxeb+qrsrJyXNosIpeWsc5JJdeIvbLndKoQ5J0TPvY3dOFxOtm4Zg7vnPDh7zu3k70/FCXb7STbnfhoDYZjLKtIbAgsmZHRECsuLsbpdA7pdbW2tg7pnQ22detWvvjFL/LTn/6U2267bcTznnjiCfx+f+qroaFhxHNF5PI1WlHHcL205BDi3JK8c9dYEDctuvuivHPCxzsnfKm5MYBQ1KQjFCXcv4jMm+Nm0z2LaegI8fnN1bx9vDWj7/FylNE5MY/Hw4oVK9i+fTuf/exnU8e3b9/OZz7zmRGve/XVV/nLv/xLXn31VT71qU+N+juysrLIysoatzaLyKVptDmp4R6wOfhp1cmA+9SSxPE9de2cagumXiP5tOcEC6fD4Jb5pcwtyefhV97naHMPHcGInvA8zjK+2Pmxxx5jw4YNXH/99axevZp/+Zd/4fTp0zz00ENAoid15swZfvSjHwGJAHvwwQd5/vnnWbVqVaoXl5OTg9frzXRzReQyNNwC5+GeVp18xtiJlh5OtQVxGvQvfjbp7X8siwFku5zcOHcGdy+fxTP/8SdOtSfCrrM3MnFv6jKR8Tmx9evX89xzz7Fp0yaWL1/Ou+++y7Zt25g9OzFG3NTUlLZm7Hvf+x6xWIyvfOUrlJeXp74effTRTDdVRC5TF7L+bN3Scm6/tozqunb2N3RRmOsmy+3kv99UhcNhpC14DkbiHG/p4Z0TPn6xr5G+/oCLxy0teh5neiimiMgFeuHNGr77bi2xeOK5YYFwnCuLcsjzODnanB5KeR4n915fCQYcqO+kxhfkr9ZUYWBMmadeT1VTZp2YiMilZEmFl2tKC6gqzuOq4lxefe80eVkuapp7hpwbjpn4e6N85RNXU5Tr4Wx3PXvqOti4Zg4wNR5FcylQiImIDDDaM8DeOeGj1hdg+ZXTKCnIIhSJ0+zvYbjhrJhp8bsjzbR093HXsnKyXA6Ot/SkyvVlfCjERET6DXx+GAyzU74F0bjJG4ebef2Aib83xkgrXj1Ogxy3kz+d7aahM0QoHGf+zAL1wMaZnicmItJv4PPDhgubB1bPZlqOh+buPrI9TkryPYy0b0PMtOjpi2Fh0d0bZf7MAjbds1j7Jo4z9cRERPoNXhs2UHKYcfEVhXSEInzsqiJOtgXxBUYum8/xOAnHTOaXKcAyRT0xEZF+o5XaJxdEV0zP5eGPX83dy2fR7O8b8bUMwGUYDDthJuNGISYicgGS21Y9sHo2Syq8fPNnB2nqHjnEHA6DYDSON8fNWX/viHs2at3YR6MQExE5j8EVi8//vmbUYUQAwwCXw6CqJJcsl4PiAs+IezTqYZkXT3NiIiLnMXBvxXVLy2nq7k39LH3PxHMqpuXizXFT0xKgPRjh9YNNtPVERt2jUcZOISYich6DH5DZ2h1O/WxggGW7DPpiiSONnb2JOTMDygqyyfU4OdkW5PZry0bdo1HGRsOJIiLnMTBoTrYFyXE7h5zjcRrE4hYOI9E7i8RN8rKc3LFoJjO92eyu6+D3R1soyvWoSnEcKcREREYxcB7rxR0f8qv9ZwhG4kPOi8QtYhaYViLEDCDb7eRYczfHmrupLMrhcysqWFLhVSHHOFKIiYiMYmDhxcm24AVVzCd7ao2dvZxo7sG0YFXVDJ769CION/pVyDGONCcmIjLIwGrE5PzVkgovh8504XEZRGKjR1k4bmIAJuBxJ4YUH1idePyUCjnGl3piIiKDDOx9JefDDjf62V3bTix+/r5YzLSYke+hvDCbXLeTquK81DzYhTy7TC6cemIiIgPU+gJ0BCNpVYS1vgCHGrswLXA5DUzTImaO/Boep8FN80q4e/ks3jnuoyMUodYXUHBlgHpiIiIDbDvUxPYBVYTJne3fPuEjFInjzXKPuHN9kmlZvFvj43Cjn6I8D9uPtGgOLEPUExMR4dw8WHGBh9lFuSyp8AKwpbqeg41dzCzMJmZa9EXiREfphQHEzER14sB5L82BZYZCTESEc/Ngs4tyqe8IcbjRzy3zS/tXMxv0RWP0hOP0nSfBslwOwjGTwhw3W6rr8YeiAGzZXc8Dq2drSHGcKcRERCCtCvFwoz/1/QOrZ3OipYd99Z30jTYRBkzPcdMbjVM5PYdlFdP4xb5GeiNxYqZFjttJUZ5Hu3OMM4WYiAjpu3LcMr8UODfEuHHNHDreOMbRpp4Rr892OQiEY0RNi9aeMHcvn4U3183rB87SFozgzXVpSDEDFGIiIsNIFnTU+oLcGZp53ueC9cVMHAY4DQOPy8HhRj9PfXoRN19TwuZ369i4Zo6GEjNAISYiMoxth5r40Bfg6pJ8sKC+PQQMv2u9wwDLSnwtqygEw+BkW5BaX4Bb5pemenYy/hRiIiLDGDhH9tqBs7icBo4oeNyOIcUdppV4dljMtKjxBQGLWl+AquI8zYFlmNaJiYgMIzlH9tqBs/z6wBm6+2KYkAowtyOxqDkpblo4HQaWZTGvrIDPXVehObAJoJ6YiMgoTrYFMYeZD4uaYAwYWHT1P4rF5TR49NZ5GkKcIOqJiYgM4+3jrXx+czXLKr04R9iiw+VIzJFluQxy3E4Kc1y4HAaHG/0T2tbLmXpiIiLDeP73NRxo6OJgo59cj5NwzCISPzcX5jAgP9tNZyhKPG4xrdDNl2+ZS1tPRMOIE0ghJiIyjKriPA6d8RMzTVZcOZ27lpXzrztP0hGMsLC8gPllhUzPc/PqngZys5wEwjHaeiIq5JhgGk4UERnGx6qmU1rg4aZ5xWy6ZzEnWgL4esLc82dX8A/3LKEoz0NnMIrTabDmmhIeWDlbPbBJoBATERmg1hfghTdr+OkfG2ntCRMKx5lbko8/FCUcM/GHoql9FjFg/fWV3HxNyWQ3+7Kl4UQRkQGSAXXDVUXkepxsXDOHWl+Ak21B3E4H3hw365aW0xGKgJVYT5YKNdBw4gRTiImIDJAcEly3tDy1TdQLb9ZwuiNIUZ6bm+eXMLckn6JcD1v3NlCU50m7RiaWhhNFRAZILnJu6Ajx+c3VvH28lXVLy1kws5BIzEqVz69bWs766ytTYffVW+dpb8RJoJ6YiMgwNr9bR/XJdgBe2biKTfcsZtuhJvW2phiFmIjIMDaumZP258BHtQCaB5siFGIiIsOoLMpl1ZwZVBblDvtzzYNNDZoTExEZRrKnte1Q07A/1zzY1KCemIjIMNTTsgeFmIjIMAbPgcnUpOFEERGxLYWYiIjYlkJMRERsSyEmIiK2pRATERHbUoiJiIhtKcRERMS2FGIiImJbCjEREbEthZiIiNiWQkxERGxLISYiIralEBMREdtSiImIiG0pxERExLYUYiIiYlsKMRERsS2FmIiI2JZCTEREbEshJiIitqUQExER21KIiYiIbSnERETEtiYkxF566SWqqqrIzs5mxYoV7Ny5c9Tz33nnHVasWEF2djZz5szhu9/97kQ0U0REbCbjIbZ161a+9rWv8e1vf5v9+/dz0003ceedd3L69Olhzz958iTr1q3jpptuYv/+/fzt3/4tjzzyCL/4xS8y3VQREbEZw7IsK5O/YOXKlVx33XW8/PLLqWMLFy7knnvu4dlnnx1y/re+9S1ee+01jh49mjr20EMPcfDgQXbv3n3e39fd3Y3X68Xv91NYWDg+b0JERCbMWD7HM9oTi0Qi7Nu3j7Vr16YdX7t2Lbt27Rr2mt27dw85/5Of/CR79+4lGo0OOT8cDtPd3Z32JSIil4eMhlhbWxvxeJyysrK042VlZTQ3Nw97TXNz87Dnx2Ix2trahpz/7LPP4vV6U1+VlZXj9wZERGRKm5DCDsMw0r63LGvIsfOdP9xxgCeeeAK/35/6amhoGIcWi4iIHbgy+eLFxcU4nc4hva7W1tYhva2kmTNnDnu+y+VixowZQ87PysoiKytr/BotIiK2kdGemMfjYcWKFWzfvj3t+Pbt27nxxhuHvWb16tVDzv/d737H9ddfj9vtzlhbRUTEfjI+nPjYY4/xr//6r/zbv/0bR48e5etf/zqnT5/moYceAhLDgQ8++GDq/Iceeoj6+noee+wxjh49yr/927/x/e9/n2984xuZbqqIiNhMRocTAdavX097ezubNm2iqamJxYsXs23bNmbPng1AU1NT2pqxqqoqtm3bxte//nVefPFFZs2axf/6X/+Lz33uc5luqoiI2EzG14lNNK0TExGxtymzTkxERCSTFGIiImJbCjEREbEthZiIiNiWQkxERGxLISYiIralEBMREdtSiImIiG0pxERExLYUYiIiYlsKMRERsS2FmIiI2JZCTEREbEshJiIitqUQExER21KIiYiIbSnERETEthRiIiJiWwoxERGxLYWYiIjYlkJMRERsSyEmIiK2pRATERHbUoiJiIhtKcRERMS2FGJy0Wp9AV54s4ZaX2CymyIilymFmFy0bYea2Lq3gW2Hmia7KSJymXJNdgPEPmp9AbYdamLd0nLmluSzbmk5QOpPEZGJpp6YjGrgkOHgntfckny+eus85pbkT3IrReRypZ6YjCoZXJDocXUEI3SEItT6AgovEZl0CjEZ1cAhw7kl+RTleRKhZkFRnid1HIYON4qIZJpCTEaVnPvadqiJJRVeOkIRbriqiAMNXTT5+4BEwG2prufA6XPHvnrrvMlstohcJhRicl7JIcXqunZqfUEMoKs3woKZhakA+8me0zgMuO7K6Sr0EJEJo8IOSTPc2q91S8tZf30lG9fMYW5JHp29ESJxk6riPACqa9sxLYtZ03IoK8xmy+56rR0TkQmhnpikGVjI8dVb51HrC7Clup7GjhA7jrWyrNLLwcYuYnHYecLHH0910NjZi8tpUJjt5vdHW4ibFidaeth0z2LNjYlIRinEJM3gtV/bDjXxs70NBCNxsODQGT+maeEwDHzBCAQT1zkdBsX5HqqKyzjW3M3Bxi62VNfz1KcXTdZbEZHLgIYTJU1y7RfAC2/WsKTCS1lhNpYFBhA3LdxOB3HLSrsuHDXZVddOVXEeK6tm4DAMsIb5BSIi40g9MRlWclixIxShIxhJHDQg3+NiWo6bM129aRllAdNzPZxsS3TNPnddBQ+snj3h7RaRy4tCTID0NV4AHcEIt19bhj8Uxd8bBcCyIBSJEQjHhn2NM529/MZ/hhy3iy+tmaP5MBHJOIWYAOkFHQDbj7aw/vpKAJwOMBNTYlijDBFaQNyEgmwnSyq8mW2wiAgKMek3uKCjIxRJDQ06HQ6icfO8r+F2GMz0ZhOJmxxu9HPL/NLMNVhEBIWY9BtY0AGABf9x6CyWZY1an2EkTmVmYRZ3Li7n5vklHG70a8GziEwIhZikSc6NNXaFME0Lk5GHEB0GlBVm0dMXJzfLxU/eO42/N8o/r18+kU0WkcuYQkxSan0Bnvz1Bxxr7sbjcuBwgBUfuVLe6TBYPMtLRVEue+raicTM1BCkiMhE0DoxSdl2qIlaXxCPy4G/N4bDcDDaTFg0bvH2cR8HTnexYfVsbpw7g0dv08a/IjJx1BOTlOQ81u66Nvac7MS4gNXKMdOiprWHEy0BVs2ZQWVRbqabKSKSohATAN4+3srmd+vYuGYO/+eDJuKmRY7bQcxM9MWcBsQHZJrbCVhw5Yw8llVM48DpLs76exMLo/urPR5YPVtrxUQkoxRil7lkIceOY60cbOyioTPE/LICan1BpuV46Iv2JdZ/DeqUGRgYDoNlFdOoKs5jV207V5fk4++N8p+Hm3A5DIryPHqumIhklObELnPJRc5VxXlUTM8hFI5TkO3m+tnTMUcpr3c5DSIxk4ONXSyp8PL5lVey6Z7FeHPduJ0G15QVqMxeRDJOPbHL3Lql5akhwD9fUcGPd9fz1vFWnI5ESA3H7YBQJPGzho4Qhxv9qR7XA6tmU5TrYd3Scg0likjGKcQuc8mg+cW+RvI8LnyBxGa/5YXZRAbVJua4HfRGTaL9h10OqCzKTdtiasiiaRGRDNJwoiQKMYDFVxQytySP6blucrOc9EbTN/rtjQ7umRmc7gjxznHfxLRTRGQQ9cQkNQS4pMLL5nfrONvVS317iNG2S8x2GcRMiMUtGjtDE9dYEZEB1BOT1BDg4UZ/oiox1zNqUQfAtFwPJQVZWEBb//PGan0BXnizhrePt/LCmzXU+gIT0n4RuXypJ3YZG/gMsbkl+alqwiUVXv7+Nx/Q0NE74rVx0+KxtfN4/WATG9fMAc5VOlbXtVPfkeidaX5MRDJJIXYZG/gMsa/eOi+tKMMaZSjR7TQwLYu2ngib7lnMtkNNVBblpoWgdrIXkYmgELuMDQydF96sSYXPkgov7cHwiNeZpsXdy65g3dLyIUGYDEE9S0xEJoJC7DI2tySfJRVenvzNBwTDMRbMLORYcze90fgwlYjnxC3w90bThiDV6xKRyaDCjsvc5nfraOzsJS/LxV3LyomZVmohs6u/9N4Y5rqdJ3zU+gKpIUgtbBaRyaAQu8xtXDOH1XNmsOkzi/njyU6C4RhZLgOnAYYjEV/Z7qF/TTp7o3zjpwdVgSgik0rDiZe5W+aXcsv8Ump9Ad463krMhHJvNgXZLo409QDpi5wLs12YlkUgHOdoczdbquu1zZSITBr1xARIVCp290UB6AxFONU2/ALmxbMKCffvqTg9xwMWbN3bwLZDTRPWVhGRJIWYAInCjPLCHAAC4TihaHzY8/54qpOygmyKct1UFefi741y+8IyFXaIyKTIaIh1dnayYcMGvF4vXq+XDRs20NXVNeL50WiUb33rWyxZsoS8vDxmzZrFgw8+yNmzZzPZTCFRqViYkxhddjuHK+VIiJoWLT19BCNx9p3u4vdHWyjK82goUUQmRUZD7P777+fAgQO88cYbvPHGGxw4cIANGzaMeH4oFOL999/n7//+73n//ff55S9/yYkTJ7j77rsz2Uzpt2BmIR6nQa7bicthUJjtIts19K/IzMJsPrWknE8tKedz11UM6YUlt59S0YeIZFrGCjuOHj3KG2+8QXV1NStXrgRg8+bNrF69muPHjzN//vwh13i9XrZv35527IUXXuCGG27g9OnTXHnllZlqrgBf+cTVtHT38f7pTpwOg5VVRTR29nK8uSftoSztgQh3L5814oLmwQugRUQyJWMhtnv3brxebyrAAFatWoXX62XXrl3Dhthw/H4/hmEwbdq0YX8eDocJh8/tLtHd3f2R2n25Gbx/4qZ7FrNldz3+3ihvHW+lMxTF5QBzQIqFonH+/jcfEItZLL6ikMfXLWRuSX7qtZZUeOkIRegIRlJryUREMiFjIdbc3Exp6dB/qZeWltLc3HxBr9HX18fjjz/O/fffT2Fh4bDnPPvsszzzzDMfqa2Xs22HmvjhrpP8bF8Dmz6zmMqiXIryPPh7o/h7E9WK5jDb2Sc3B27u7iMUibNxzRw2v1tHrS/IncGZnGjp4UNfgKI8j3pjIpIxY54Te/rppzEMY9SvvXv3AmAYQwsELMsa9vhg0WiU++67D9M0eemll0Y874knnsDv96e+GhoaxvqWLmvrlpaTl+WisbOXze/WsaW6nu+9W8vOGh/JZ7EkQ2yY6TE8Lge1viCb363jQ1+AuSV5YECtL8jVA7alEhHJhDH3xB5++GHuu+++Uc+56qqrOHToEC0tLUN+5vP5KCsrG/X6aDTKvffey8mTJ9mxY8eIvTCArKwssrKyLqzxMsTcknw2fWYxz79ZQ1lhNv5QlHDMpC8Sx+EwMAd0w5wOA8uycDkNIrHE88ZKC7Iozs/irmXltPVEUqE13ALowUOXIiIf1ZhDrLi4mOLi4vOet3r1avx+P++99x433HADAHv27MHv93PjjTeOeF0ywGpqanjrrbeYMWPGWJsoY3TL/FION/rZureB268t4+rSfE4096QCLNvlIBwzicQS32e5nFRMT/zDoaEjRGcoSltPJDVsOLgqMRleHcEI248m/mGjIUYRGQ8ZK7FfuHAhd9xxBxs3bqS6uprq6mo2btzIXXfdlVbUsWDBAn71q18BEIvF+PM//3P27t3LK6+8Qjwep7m5mebmZiKRSKaaKiSGFW9fmOghL5xZSLbbSY7HCZDaocPq/+rui9HY2YtlQcy0KC3IYt3S8lRp/Zbq+rRdPFLVigasv75SQ4wiMm4yunfiK6+8wiOPPMLatWsBuPvuu/nf//t/p51z/Phx/H4/AI2Njbz22msALF++PO28t956i1tuuSWTzb2szS3JpyjPw5Y99cyalsO911cyPc/Nq3sasLBo7k5UgBrAtFw3H59fysHGLkwLDANe3PEhu+vaicZMPr1sVlpYDfxzqg0jaohTxN4yGmJFRUVs2bJl1HMs69ycy1VXXZX2vUysdUvLqa5rp9YXZHklvHXMR2cojMPhwOBcT6ynL8ZJXwDLAqcDznT1UucLAlBZlMMDq2enBcLAJ0aPVaZDRmvaROxNeydKSnKd2OdXXgkWHGvupi9mEYrE0x4qFjMt9jf6Od0RwsCgL2piAcV5HjZ9ZjHAiDt2jHU3j2TIZGqD4XVLyy9qiFO7kohMDXoUiwzREYrQ2BkiP8tFOBbBtGC4DnJsQOWiA1h+5TSef7MGfyhKa0+YjmCEp+5elHbNWHs+mX5y9MX2EtWDE5kaFGKSUusL8I2fHeSDRj+mZZHlclBWkI2/L8qMPA8t3X1E4laqUzYw16pK8mgLRDjQ0AX0byI8zHLAsYbSRxmKHG8DhzYzHa4icmE0nCgp2w41caK5h3h/t8ub4+GR267myzfP5WNXFQHgchgU57uZnutOXecyYM01Jdz7sQrysly4nbDoCi8PrJoNpA+9DQwluw3HDRzanNu/kHvboSZbvQeRS416YpKybmk5HaEI/lCUY83dnGoL8uPd9SyYWci7J1qJxC1y3A56oxZLryikpiWAy2lgAb/Zf4a8LBemabLoimn8v/91WaoQY7ihNzsOxw3ufdnxPYhcahRikjK3JJ8HVs1OFVHU+oJ82BqgpjVANJ7onYVjJoZhcaSpm67eWNrQYmcoisflYHnFNCDR0xpp6G0qD8eNVBE5eGhzKr8HkcuFYV1iNe3d3d14vV78fv+o21XJ8B7beoD/PNzEmnnFFGS7OdbcTThmcrItiGkl5rpi/YGW/IuT7TKImYnnjMVNi08uLmPHsVZCkThfWH3VBfdSpsqarRferGHLnnqu7q/WTLZlqrRP5FI3ls9xzYlJmqNNidA6cLoLgMbOXuYU57GsYhpzS/JwGFCc76Eg+1wnvi9mETMtmrt7ae7u4+f7ztDY2Uuux5nqpVxISXqmy+kv1Lql5Vxdkk+tL5jWli3V9WzemdgkWUSmBg0nSpqF5YWcaO0hGIlzsi2xgLktEOGsvxfTtIibFuGYSXdfbMi1cTOxe0d+lpOCbCd/vqIi9Xyx5GNaYOT5o4HDc5PZ60k9V626Pv2ZaP1dT38omhoqBdQ7E5lECjFJ85VPXI031w0W3Dy/hMON/lQI7TvdSdxk2ACDxGe8y2HQ1RvFAL7//53E7XAwf2YBH/oC5300y+DKxYstmhj4cM7Djf4xBczA8AT4xfuNYMBTn17EA6tnU5TnoSMUSbUNUHGHyCRSiEmauSX5PPXpxALlWl+Aw41+Koty2bhmDnW/PIyvJ5y2yNntgOiApz5H4xZOh0F+lou2YIQct5Mmfx//19xi7l4+Ky0gRuvBDC6aGNwzG62nlhyWrK5rp74jBJC2w/7gNiyp8PJKdT0fnOkmJ8vJybYgO4610heNE+iL8ZM9p3nnuI8nP30tX711HrW+QOpRM4PbKyITSyEmI9p2qIkte+qprmvnmrICTMtiSYWX2tYeuvviAMRMUvsqJoWjJnEzgmVBKBKnri2IN8edetwLQEcwwk/3NrDjWCuP3jZvSI9pcCXg4HL2Lbvr+cX7jcPuCpIMlIE9sVpfgC276znQ2EVTV1/q3C176jF3W/gC6U9J2N/QhcthYJKoyKxrSzz485b5pUPaph6YyORRiMmIBm4IfM3MAh5YOZslFV7+8Y1jHG/qwXAYab2yJAuIxtOP5bgdLKnwUl3XTnGBhx3HW4nETP501s/zv6/BF0jskn8h82XAud1AhtkVZGDI3DK/lLePt/Lkbz6gtTvRiywp8FBc4OFES4CiPA/Hm3pS17oMiCWfaG1aFGa7iJkWpmmyck7Rhd04EZkwCjEZUbLAYeCw3Qtv1tDS3YfT6SASN4f0wkbS0hNm87t1fOgL8NM/NnKitYf8LBeBSAx/X5TbF5Zd0JBcQ0eIbYeauPmakiFDegMNHDb8zhvHON3RS5bLQdy0aPKHeentWjqDEQLh9LR1uRzEoiYOA3JcTsKxOG6ng764xZ66DrhVpfYiU4lCTEY1eOhsSYWXvCwXPeFEcUcywFyOxNDiYG4nxOJwqi3I6fYQ03LcdPdFicTi9Jlx4hbU+YJ0BiP4e6N8rGo6rx9sYuWcIvbUdXDXsnJ2HG1ld107LoeDRbMKU/NcyW2fBodJsufV3Ruluq6dSOqhnlaqvWc7e4kPk759/RN8pgWhaBzDSFxZkO3irmXaqUNkqlGIyZgcbvQTiVksmuWlOM/DnlMddPfGhgSC0d9Fi5uJoItbELcs2oIROnujGBipPRohsdvHfx5uYm99B2e6etl/upNQ1GRffSfhWOJRL94cg41r5qTmubYdauKVPaeprmtPO/78mzU0dPTichrsO91JQVbir3kkdu73DRdgSU4jEWIOB8wvKyDL5eSsv5e2nsS8mXbqEJk6FGIyJoOf0vzZF//A/oauIY9qSX3fv8tHdMAuHzHTItftIDpgPs1pJMrz89xOSvOzsQyLkD9MX8zEAWS5DQpz3Lx24Cwfq5rOk7/+gLuWlZOb5WR3XTt1vgC+QIQX3/qQHLezf17OIhq36IumF22cjzfHTTASoygvi7+5YwGVRbls2V1PRygyZBNjEZlcCjEZk8Ef4I/eNo9/eP0IHcEIMwuzae7uI25a9PTFsEj0yOLDdHtC0fSxx7gFwUicoy2JHT0qpuWk5tvys13MLc3nwOkuznad5e3jrXSEouw/3YXbaWBa0NSdKAyJmRZ9w41rMrSKcji5Hid9MZOivCwsK9HzvGV+KUV5HrbubaAo16MAE5lCFGJy0ZLryJZVTON3R1po6wkTDMeBxFox07IYpnjxgjR29ab+u6cvxrIKL40dITqCEQL983GhaBwj+tHeQ5bLQWGWC18wgmHAjDwPrT1hFs8qZGnFtCFDhxpCFJlaFGJy0ZIFDlkuB8FIjP4qeVwjlN5fLAv4cXU9bqcjNUyY7FUN91tG6nENdyweNynMdYMByyunUZDt5vdHW6iYnpvW49IQosjUpBCTi5bslfyfD5oSc1/94VU+LZv2QITeSKJ8fWB4uBwGLkdi0+DzcToShSGQ+DNuXlhJ/4XGZ0m+h8IcNw0dITwuR6rnVVWcN6THpbJ6kalJu9jLRUv2Tv7mjgXcOHcGX731am6cO4N/+Mxi1l9fSbbbQXG+myxX4q+Z22GQ7XYQHX7Kaqhh0mikgHIMs+h5NIXZLv79S6tZM68Et9PBvNKCVEB99dZ5I25lNdk77ItIOvXE5KIM7JncMr+UW+aXJn5wa+KPyqJcTrT0cLylh1yPk2jcZKY3m49dVcTOmlZ8gfNPZhXmuPH3Rs87r+Z2GszI89DcX9wBiVAbeN3gHtxtC8t472Q7rx88y0xvNvd+rGLUfR01JyYyNaknJhflfD2T5G4f88sKcBhQMT2HSNzEm+vmpnmlXFteQMX0nOF2jUrpDA0fYAaQ7T53ZTRu0dwdxuDcLlTJ61wOqJyeg3PA3/Tywmy+8omrefntWnzBCKfag7x+8Nz7Ge69jdRDE5HJpZ6YXJQL6ZkM3LZqSYWX1w6c5Y3DzUTicf7vG6tYt7Q8sSlvQyeHGv2pBcgjzXsle1cW0Bc9d8bA8x1AfMDxa8oKWDCzkN8daaasMJvCHDeP9hdozC8roDccZ/nsaXx+5ezUYunB71FEpi7DsgYvU7W3sTzWWiZOrS/AX/zgPRo7e6mYnsOmzyxOPavsyd98wOmOcyX15YXZrJ47g501PtoCESxgZmEWc4rz2He6k/CgopBst4M5xXlcMS2HvfWddIaiFGa7yPO4iJoma+aVcKy5Gwv41h0LuGV+aep5Zeuvr1TVocgUM5bPcfXEJONqfQGe/PUHdPfF0gIs+cyvnr4YuW4HDsOgL2ayeu4M/nn9cmp9Ab76k/f5sDXAjXOLaenuIxq3mJ7rxiDRMwtFTTxOByurZlCU5yEYjrG7roNo3KQ3GmfRrEK8OW5qfUEiMZPn36zhcKOf4gIPs4tyWVLhnezbIyIfgUJMMm7boSY+9AVYVF7IpnsWM7ckn8qiXACKCzw0dIZwGHD38ivSdqafW5LPyqoZNHb24s1xc/fyWQA0d/dR1xZkecU0Hr1tHpvfrcPfG2X70RZK8rNwOAz6oiYlBVlsXDOHd074WDOvmLZghKoZeWzd28DsolzqO0KpHTlExJ4UYpJxg/dbhHOFEs/8x5/oCEaYV1rAA6tmDymceGD1bIryPKlrb5lfymNbD9DY2UtVcR6HG/3Ud4QoK8xmdlEudy1LrPM62R7k0VsTD9vcfqQlNWxY6wtQ1T9HN3gOTETsRyEmGTfqbhcWOAyD5ZXThq38G+7ar3zi6iELkjuCEd471cGJ5gBVxXl85RNXp/X4BvbuBj4wU0TsTYUdMqnGayeM5Ot0hCJpPS8RsZ+xfI4rxOSSou2hROxP1Yly2dJGvSKXF+3YIbZU6wvwwps11PoCk90UEZlECjGxJW3IKyKg4USxKW3IKyKgEBOb0tyXiICGE0VExMYUYiIiYlsKMRERsS2FmIiI2JZCTEREbEshJiIitqUQExER21KIiYiIbSnERETEthRiIiJiWwoxERGxLYWYiIjYlkJMRERsSyEmIiK2pRATERHbUoiJiIhtKcRERMS2FGIiImJbCjEREbEthZiIiNiWQkxERGxLISYiIralEBMREdtSiImIiG0pxERExLYUYiIiYlsZDbHOzk42bNiA1+vF6/WyYcMGurq6Lvj6L33pSxiGwXPPPZexNoqIiH1lNMTuv/9+Dhw4wBtvvMEbb7zBgQMH2LBhwwVd++tf/5o9e/Ywa9asTDZRRERszJWpFz569ChvvPEG1dXVrFy5EoDNmzezevVqjh8/zvz580e89syZMzz88MP89re/5VOf+lSmmigiIjaXsZ7Y7t278Xq9qQADWLVqFV6vl127do14nWmabNiwgW9+85ssWrTovL8nHA7T3d2d9iUiIpeHjIVYc3MzpaWlQ46XlpbS3Nw84nXf+c53cLlcPPLIIxf0e5599tnUnJvX66WysvKi2ywiIvYy5hB7+umnMQxj1K+9e/cCYBjGkOstyxr2OMC+fft4/vnn+eEPfzjiOYM98cQT+P3+1FdDQ8NY35KIiNjUmOfEHn74Ye67775Rz7nqqqs4dOgQLS0tQ37m8/koKysb9rqdO3fS2trKlVdemToWj8f567/+a5577jlOnTo15JqsrCyysrLG9iZEROSSMOYQKy4upri4+LznrV69Gr/fz3vvvccNN9wAwJ49e/D7/dx4443DXrNhwwZuu+22tGOf/OQn2bBhA3/xF38x1qaKiMglLmPViQsXLuSOO+5g48aNfO973wPgr/7qr7jrrrvSKhMXLFjAs88+y2c/+1lmzJjBjBkz0l7H7XYzc+bMUasZRUTk8pTRdWKvvPIKS5YsYe3ataxdu5alS5fy4x//OO2c48eP4/f7M9kMERG5RBmWZVmT3Yjx1N3djdfrxe/3U1hYONnNERGRMRrL57j2ThQREdtSiImIiG0pxERExLYUYiIiYlsKMRERsS2FmIiI2JZCTEREbEshJiIitqUQExER21KIiYiIbSnERETEthRiIiJiWwoxERGxLYWYiIjYlkJMRERsSyEmIiK2pRATERHbUoiJiIhtKcRERMS2FGIiImJbCjEREbEthZiIiNiWQkxERGxLISYiIralEBMREdtSiImIiG0pxERExLYUYiIiYlsKMRERsS3XZDdgvFmWBUB3d/ckt0RERC5G8vM7+Xk+mksuxHp6egCorKyc5JaIiMhH0dPTg9frHfUcw7qQqLMR0zQ5e/YsBQUFGIYx2c0BEv+qqKyspKGhgcLCwsluzpSl+3RhdJ8ujO7ThZmK98myLHp6epg1axYOx+izXpdcT8zhcFBRUTHZzRhWYWHhlPlLMpXpPl0Y3acLo/t0YabafTpfDyxJhR0iImJbCjEREbEthdgEyMrK4qmnniIrK2uymzKl6T5dGN2nC6P7dGHsfp8uucIOERG5fKgnJiIitqUQExER21KIiYiIbSnERETEthRiGdLZ2cmGDRvwer14vV42bNhAV1fXBV//pS99CcMweO655zLWxqlgrPcpGo3yrW99iyVLlpCXl8esWbN48MEHOXv27MQ1egK89NJLVFVVkZ2dzYoVK9i5c+eo57/zzjusWLGC7Oxs5syZw3e/+90JaunkGst9+uUvf8ntt99OSUkJhYWFrF69mt/+9rcT2NrJM9a/T0l/+MMfcLlcLF++PLMN/CgsyYg77rjDWrx4sbVr1y5r165d1uLFi6277rrrgq791a9+ZS1btsyaNWuW9T//5//MbEMn2VjvU1dXl3XbbbdZW7dutY4dO2bt3r3bWrlypbVixYoJbHVm/fu//7vldrutzZs3W0eOHLEeffRRKy8vz6qvrx/2/Lq6Ois3N9d69NFHrSNHjlibN2+23G639fOf/3yCWz6xxnqfHn30Ues73/mO9d5771knTpywnnjiCcvtdlvvv//+BLd8Yo31PiV1dXVZc+bMsdauXWstW7ZsYhp7ERRiGXDkyBELsKqrq1PHdu/ebQHWsWPHRr22sbHRuuKKK6wPPvjAmj179iUdYh/lPg303nvvWcB5/6e0ixtuuMF66KGH0o4tWLDAevzxx4c9/2/+5m+sBQsWpB370pe+ZK1atSpjbZwKxnqfhnPttddazzzzzHg3bUq52Pu0fv166+/+7u+sp556akqHmIYTM2D37t14vV5WrlyZOrZq1Sq8Xi+7du0a8TrTNNmwYQPf/OY3WbRo0UQ0dVJd7H0azO/3YxgG06ZNy0ArJ1YkEmHfvn2sXbs27fjatWtHvCe7d+8ecv4nP/lJ9u7dSzQazVhbJ9PF3KfBTNOkp6eHoqKiTDRxSrjY+/SDH/yA2tpannrqqUw38SO75DYAngqam5spLS0dcry0tJTm5uYRr/vOd76Dy+XikUceyWTzpoyLvU8D9fX18fjjj3P//fdPqc1LL1ZbWxvxeJyysrK042VlZSPek+bm5mHPj8VitLW1UV5enrH2TpaLuU+D/dM//RPBYJB77703E02cEi7mPtXU1PD444+zc+dOXK6pHxHqiY3B008/jWEYo37t3bsXYNjHwFiWNeLjYfbt28fzzz/PD3/4wynzCJmLlcn7NFA0GuW+++7DNE1eeumlcX8fk2nw+z/fPRnu/OGOX2rGep+SXn31VZ5++mm2bt067D+kLjUXep/i8Tj3338/zzzzDNdcc81ENe8jmfoxO4U8/PDD3HfffaOec9VVV3Ho0CFaWlqG/Mzn8w35F1HSzp07aW1t5corr0wdi8fj/PVf/zXPPfccp06d+khtn0iZvE9J0WiUe++9l5MnT7Jjx45LohcGUFxcjNPpHPKv5NbW1hHvycyZM4c93+VyMWPGjIy1dTJdzH1K2rp1K1/84hf52c9+xm233ZbJZk66sd6nnp4e9u7dy/79+3n44YeBxLCrZVm4XC5+97vf8YlPfGJC2n7BJnE+7pKVLFjYs2dP6lh1dfWoBQttbW3W4cOH075mzZplfetb3xpTkYOdXMx9sizLikQi1j333GMtWrTIam1tnYimTqgbbrjB+vKXv5x2bOHChaMWdixcuDDt2EMPPXRZFHaM5T5ZlmX95Cc/sbKzs61f/epXGW7d1DGW+xSPx4d8Dn35y1+25s+fbx0+fNgKBAIT1ewLphDLkDvuuMNaunSptXv3bmv37t3WkiVLhpSOz58/3/rlL3854mtc6tWJljX2+xSNRq27777bqqiosA4cOGA1NTWlvsLh8GS8hXGXLIn+/ve/bx05csT62te+ZuXl5VmnTp2yLMuyHn/8cWvDhg2p85Ml9l//+tetI0eOWN///vcvqxL7C71PP/nJTyyXy2W9+OKLaX9vurq6JustTIix3qfBpnp1okIsQ9rb263Pf/7zVkFBgVVQUGB9/vOftzo7O9POAawf/OAHI77G5RBiY71PJ0+etIBhv956660Jb3+mvPjii9bs2bMtj8djXXfdddY777yT+tkXvvAF6+abb047/+2337b+7M/+zPJ4PNZVV11lvfzyyxPc4skxlvt08803D/v35gtf+MLEN3yCjfXv00BTPcT0KBYREbEtVSeKiIhtKcRERMS2FGIiImJbCjEREbEthZiIiNiWQkxERGxLISYiIralEBMREdtSiImIiG0pxERExLYUYiIiYlsKMRERsa3/H8Avo1yHbGxSAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "edm_samples = edm_workflow.sample(num_samples=3000, conditions={\"observables\":np.array([[0.0, 0.0]], dtype=np.float32)})\n", + "plt.scatter(edm_samples[\"parameters\"][0, :, 0], edm_samples[\"parameters\"][0, :, 1], alpha=0.75, s=0.5)\n", + "plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n", + "plt.xlim([-0.5, 0.5])\n", + "plt.ylim([-0.5, 0.5])" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "2d5e05b7-9159-41e3-ad9c-7629ebff17e7", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(-0.5, 0.5)" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAbEAAAGdCAYAAACcvk38AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAARspJREFUeJzt3Wl4XNWd7/vvrlFzybIGy0iWZWMLx2Mw8UAOgWYw4BBCTtKYAza5fXLpkA4BQnc6cNIP0xvf3Js+JzQNGXzSye02oZ2EhAytJjE4gG88EBts2dh4kIUt2aVZqpKqJNW074tSFSqpJEtGJWvj3+d5FOytXVVL9Sj181rrv9YyTNM0ERERsSDbxW6AiIjIhVKIiYiIZSnERETEshRiIiJiWQoxERGxLIWYiIhYlkJMREQsSyEmIiKW5bjYDZhssViMc+fOkZ+fj2EYF7s5IiIyQaZp0tPTw+zZs7HZxu5rfeRC7Ny5c1RWVl7sZoiIyIfU2NhIRUXFmPd85EIsPz8fiP/wBQUFF7k1IiIyUX6/n8rKyuTn+Vg+ciGWGEIsKChQiImIWNh4poRU2CEiIpalEBMREctSiImIiGUpxERExLIUYiIiYlkKMRERsSyFmIiIWJZCTERELEshJiIilqUQExERy1KIiYiIZSnERETEshRiIiJiWQoxERGxLIWYiIhYlkJMREQsSyEmIiKWpRATERHLUoiJiIhlKcRERMSyFGIiImJZCjEREbEshZiIiFiWQkxERCxLISYiIpalEBMREctSiImIiGUpxERExLIUYiIiYllTEmLPP/881dXVZGVlsXLlSnbu3Dmux/3pT3/C4XCwYsWKzDZQREQsKeMhtm3bNh5++GG+9a1v8c4773DNNddw6623cubMmTEf5/P5uPfee7nhhhsy3UQREbEowzRNM5MvsHr1aq688kq+973vJa8tWrSIO+64g82bN4/6uLvuuosFCxZgt9t5+eWXOXDgwLhez+/34/F48Pl8FBQUfNjmi4jIFJvI53hGe2KhUIj9+/ezbt26lOvr1q1j165doz7uxz/+MfX19TzxxBOZbJ6IiFicI5NP3t7eTjQapaysLOV6WVkZzc3NaR9z4sQJHn30UXbu3InDcf7mDQwMMDAwkPy73+//cI0WERHLmJLCDsMwUv5umuaIawDRaJS7776bp556ioULF47ruTdv3ozH40l+VVZWTkqbRURk+stoiBUXF2O320f0ulpbW0f0zgB6enrYt28fDzzwAA6HA4fDwdNPP83BgwdxOBzs2LFjxGMee+wxfD5f8quxsTFjP4+IiEwvGR1OdLlcrFy5ku3bt/O5z30ueX379u189rOfHXF/QUEBhw4dSrn2/PPPs2PHDn7xi19QXV094jFutxu32z35jRcRkWkvoyEG8Mgjj7Bp0yauuuoq1q5dyw9/+EPOnDnD/fffD8R7UmfPnuVf//VfsdlsLFmyJOXxpaWlZGVljbguIiKS8RDbsGEDHR0dPP3003i9XpYsWUJtbS1VVVUAeL3e864ZExERSSfj68SmmtaJiYhY27RZJyYiIpJJCjEREbEshZiIiFiWQkxERCxLISYiIpalEBMREctSiImIiGUpxERExLIUYiIiYlkKMRERsSyFmIiIWJZCTERELEshJiIilqUQExERy1KIiYiIZSnERETEshRiIiJiWQoxERGxLIWYiIhYlkJMREQsSyEmIiKWpRATERHLUoiJiIhlKcRERMSyFGIiImJZCjEREbEshZiIiFiWQkxEpr36tl6efe0E9W29F+XxMn0pxETkgkxlMNTWedm2r5HaOu+E2pH43tY9p0d9vFib42I3QESsKREsAF+7YUFGX2v9svLkf+vbeqmt87J+WTnzS/LYuuc0L+1voqE9QHVxbvL60DbetKiMDVdVJp9HPjoUYiJyQYYGy1QaGp5LKzz87sA5+sJRjjb7eev9TuCDUB3axkSwnc/wkJTpTSEmIhdkfknepPXAhgfH8L/X1nl5Ye8Z9pzq4L5PzQPiwfR3PztIWyCEzYBFswqoLs6lON/FPVv2cN+n5nFdTemE27h192leeruJzkCIJ25fPCk/n2SOQkxELrqhvav1y8p5/OXDnGzrpaE9QIu/n9XzijAMePtMF28cb+OJz8TDpbo4l3fP+agsygGgrqmbP9V3EAxFOdUeYOv/uTptKI7JGPZfmdYUYiIypYYGCsQDrDjfRVVRDksrPNTWealvC3B5SR5Hm/2819zD8ZYefH1hwADzg+f66vWXU12cS2cgxEtvNxEIRYnG4je0+vu56we7uWZBCZ5sJ9uPtgDxocaxQm3jmqr4H8x4WzWkOL0pxERkytS39fL4y4c51tzDjvdaGYhEOdMZpKwgi6auPnL2nKaiKIdbl8xi49oqnttxklNtAQqynQTDURaW5rNwVl5yuLCyKIeG9gAHm7opyXcz3+2gqbuPLKeds919tPWG+PXBs3x2+WVsuKqSpRUenn3tBHVnu3nzeDsN7QG+ev3lIwLteHMPJ9t6Kcp1pR2O1LzZ9KEQE5GMS3zoN7QHONDUTY7TzrvnfJimic1mozMQYiASY9/pLnbVd7CwLJ+Na6u4fcVsDjZ10xkIsbAkj/bAAP/wq8NETegKhlg9bya/PniOaMzEZoDLbiMUjeG02yjJcxOKxugLRXmv2Q/Ajvda8fr6iUZjDERi1B72crTZT1cgDMR7aUN7gqMVrUxlZaaMTSEmIhmX+NAvyHIQiZoUFDgJhqLMKcphUXkBB5u6CQxEyHM7ONvXx6GzPh544W2yXHYa2gPETPD1+YgNGUp8vz1AW88ANsPEZjMIx0z6IzEABiIxWnsG+NzHL+NgUzdHvT0cb+nF7bRRVZSD19ePAfSHY5xs6WVVddGIasvRikyG3yMXl0JMRDJuaYWHHe+14usL47AbLK8oZHkFNHQEAOgOhvBkO+kdiGCaEDFNjjb3YAC2wQKLoQEGEAzHCIZD2G3gtBtgQDj6wU0m0Ozvx9vdjwlEYibFbgf+vghdwXDyPk+2g4Wz8tO2OzH8Wd8Wb2ei1zWZlZny4SjERCTjDjX5ONbSw0A4RtXMHDw5Tg6c6abubDcAc2bmcuBMN8NyChOIDr84TDQGphnDSFNN+NapDiJDHt/iH8DtTN2oqCsY5md/buTn+xpZO28mgVCEvac6+dm+RvKzHDS0B7hiVkFKr0tzYtOHQkxEMibxYb+0wkOuy04wFKXZ18dP/vQ+nmwHRTkuinNdtPcMjAiwiYiZkO4JIsOuGUAkGsMA8rMc9PRHiJoQCEUB2H60FZsRf77Grj4cNgO7zWDFnMKUsNKc2PShEBORjBn6YX/NghL+45CX/kgME+jui2A34M0T7Wl7URNlMyDHaad3MJDSiQEOm0EkZtIXiqYNzqHDltGYCaZJdzDEU799F18wjCfHybULSwDNiU0HCjERmXT1bb08t+Mku+vb8fVFMDGTa7qef/0kfeF4AUbUBDMawzaBFLNBfLmY+UHny2mDvCwnfeEoxXlOuoMRYjGT2LDHGoDdMACT8JC0ctoMivNceP0DKfebxHtzvz14jhyXg4FIDLfDRlFO+tJ7mXoKMRH50NJtE/XbunPJQotnXzvJoSYfTV19yQBLiJkQM8c3mGg3DKKD6VU5I5umrj5MIByLz205bBAMxYs4hksME4aiJlkOG9kue7LAoyDbkfyz22EQipjDemkGJfluQoO9yP88HB8iva6mdMz3QTJPR7GIyIc2/KiUpRUecpz25PfDMZPtR1s52txz3ucarVOW57aT47KR5bBhM8DfFx5xz4wcFy77Bx9rBdkOZuQ4yXHZWV7hwW4ziMZMBiIxMME+WPrYGQgPKc9PDTAbYGDyfkeQZl8/Z7v6OOLt4bFf1o04/mWsI2MkMxRiIvKhrV9WnnLUyW8OnMPfH7mg5zLND8rqbcTL5//rxy/j1w/8F76wspLLZmRjAIGBCLZ4ZT0OG5QXZLGishBfX/x1DeCywmxCkRg1s/IpznNjM8AxuHNV70CE7MGgzXHZk685XAxwOewYZnw9WiLgWvwD/N3PDvL6sVaefe0Erx9rpTMY4qZFZZorm0IaThSRCzJ86CwxR1Tf1svuUx0XXG3osBk8eMPlvPjWGToDYYpyndy+YjbzS/IoynXR7OvHZjO4vCSPGTlOTrT2sqKykHvWVPHMaydwOgwcho1PXj4TgFNtAapn5tLQHiASNZlXkktBtpPqmbl8onoGvzvoJcdl54/HWpPzbM7BxdMJA5EoMSAy2FszBocmD5/z8cxrJ2jrGWDHsVZOtPSwoCyfjVR9iHdWJkIhJiIXZHiZeSLUOgMhevsjxMsn4ob+2WGAYTNSFiYPtaq6CAODtfOK+Y9DXvz9EQ41+YD4tlFXXz6TisIcNq6t4vGXD9MRCLH/dBe7T3VgAJ+oKuLpO5YwvySPp377Lm5HfMCpujgXDHjohgXJuazXj7XGy/79/YCBYcTnywYiqfN2ZrxIMSnbaWcgHK+CLM51keO0k+OycyRqcszr5/GXDyfbIJml4UQRuSDDhxC37j7N99+sZ09DB3lZ9pSe2NA/x4A8d+q/nx2Du27MyHbSHQyxde9pPNlO7l49h5s/NovOYIhnXo1v3BsciPLE7YuZX5LHfZ+aR1lBFp3BML0DUWw2Ixke9W29YMLnV1bgyXby1vudXF9TmlKMseXNUxxo6uZ0R4AsZ3yuLRyNYTcMnEPGF6ODPTS3w8b8klzcDhsF2U7y3A5OtQc41tJDeyDEp5eWUzOrgJODgS6Zp56YiFyQEVsvGRCJmjR29rF2fhFdgfZkscRQMZNkJWCih1aQ5WRReQEHmro50xlkeUUhG9dWMb8kj2dfO8G2fY2smlsEQFlBVvKIlOtqSrll8Sx++tZpnHYbX/ov1cnF1VvePMXJtl42rq5i/bJyinJdrF9WnjIMetvyco63+OO7fgALSvMpznfR3hNieaWHF/aeJjxk2dlAJMa84lwKspwcaIzvMOIPhnE4bJxs6SXHaeehGxdwqMmnebEpohATkUkx9BwuAE+Ok3m5LkzgbFffiEIPl91GRWEWrb0h/o9PzuXTy2azdfdpMODahSXJoFm/rJzOYAhMWDGnkO1HWqiu8yYDdOPaqmRAJYY495zqSNmJfmjgPvvaCbbuPc2eU/Hd8gciJuFojJJ8dzKAtu1r5ERLLwY2GLbabN/pLv6ippTDZ31EYiY2u0EoEt/2as+pDkxM1s4rzuRbLUMYpjnOBRoW4ff78Xg8+Hw+CgoKLnZzRC5Jww++/L9qj7LvdBcAgVCEsvwsPjG3iFcHD6q875p5Kb26Z187wQt7zzC/JJen71iSDKebFpXFbzDioTl8zmnoNleJ3lC6exKb+t66dBa+YJjdpzoIR2N8ce1cllZ4eOa1ExTnuuL3twcwgHPd8TVuBlCc58JuN/AHI9hsUJTrorc/QmcwjM2AgiwHi2d7NC92gSbyOa45MRGZdImeT2Lh896GTqIxk/wsB9GYSXGeG0+OkxsXlfH5lRUjht7i4ZObnFtKzL8lel3bj7SknXNKvO51NaXJ1093z9N3LOGe1XPYuKaK6uJcYqZJTVk+65eVc6jJx4mWHt480U5FUQ6fW3EZvQOR5HZUJtDWG6LZN0AwHCUcNfnKdfP5xi01uOw2YiZEYlDfFtC82BTQcKKIXLDx7FCxflk5nYEQGLCwLI/fHfRSVpDF9iMtbLiqMu32TYmgGV7CX9/WS2cgxE0f+3BrsYYOLy6t8HD5qXiRyPySPJZWeMh3O+kIDPC7g+dYUJpHOBIbUbEI8Tm9gUiM//uVY8ydmcv84hzea+1lQUku12u92JRQiInIBRvPbu7zS/J44vbFyb//t1VV1Lf1Uj1kuHG0xw1/zto6L9uPxsNvsobpDjX5ON0Z5FCTj+tqSjnU5COGSbbLTntviI5A54izzCB12UBXMExXsDu+kNqMD0HyXitLKzwaTswwzYmJyAWbyr0C69t62brnNJgkKxcn+vh0bR06j/bG8bb4TvXZThbOyuNnf27C3x/mbHcfkWiMoZ0x++CCZ5fDYGDImS9Dw+1j5fmsrp456hyepKc5MRGZEkPnvjKtts7L9iMtFOW6Luj1RtvXMPEzHGry8dL+Jv5wpIXjLfE9HnNcdj67YjaLygtYNbeI7CEHaibWjg0MO7QssfejQbxX9tLbTby0v0nzYxmi4UQRsYTE0OOFzjON9fjEXNuNi8poaA/wXrOfw+d89PRHaOwK0hkIMRCJUZafRU9/mN6BSMqJ0/H9G+PnlGFCttNGuSebe6+u4nhzLxg6eyxTNJwoIpe8xILqxA4kj798mHe9fgqyHHxhZQX/tvs07b0hHPb4LviJOTKn3SDPbcdhs9HWG0p5TqfdYMlsD9+5c7mGESdIw4kiIoPq23p59rUTI45NGWroFlqJysgvfbKaH//VKrqCYXx9Yew2g8tL86guzk3ueG+a0NMfoWNYgBlAOGpS19TN3/3s4JivLR+OQkxELGk84QTjO+Nr+Nze/MGdPmrrvDR1BgGDWQVZGECB28HMXBcfrywkx2knEovv6WEAOU4bDpvB4J7DRE04fNYX34lEMkJzYiIyLZ2v8nE85f1w/rmw0V4j8fwleW6ynDaipslRb7zgwwQcNhvBcOpWWsHwyLVk4ZhJU3dw7B9WLphCTESmpfOF1HgLPdKtNxvPaySetzjfxc/+3ES2y0ZhjpOevgitPQMsuayAs91Ojnp7MIlXJY5WYdDU1TdmG+XCKcREZFo6X0iNFU4f5jWG9s6+dsOC5JBlzIQFpXksqo4XGniynVw2I5tjzT1ETdIuiIb4KdWLZqUvTpjKdXYfVVMyJ/b8889TXV1NVlYWK1euZOfOnaPe+8tf/pKbbrqJkpISCgoKWLt2Lb///e+nopkiMo2MtQZtvPNh57s33WsMn0Nbv6ycz6+sYEFpHsdbenj1aAsNHQFeeruJX+w/yyhne+KwxQ8ArSnL56vXX572nvHM18nYMh5i27Zt4+GHH+Zb3/oW77zzDtdccw233norZ86cSXv/m2++yU033URtbS379+/nL/7iL/jMZz7DO++8k+mmiohFTOTDf6JBsbTCQ1VRDsX5Lp597QQAT3xmMd+5czk3L57FZTOy8feFicZMll1WQI5r5MdoYqGzCVTMyKa2zsvrx1pHhOnwg0Vl4jK+Tmz16tVceeWVfO9730teW7RoEXfccQebN28e13MsXryYDRs28Pjjj5/3Xq0TE/nom8gw3ESH7BJrxqqKcjjdGUxuUpw4wuVAUzfRmInbYeP6K0r5XZ2X8LDuWJbTRiRqEjNNspx2clx2KotyqG/t5fMrK3jiM4tHeXWBiX2OZ3ROLBQKsX//fh599NGU6+vWrWPXrl3jeo5YLEZPTw9FRUWZaKKIWNBE5sMmOneW6BUNPZMM4j26Y8095DjtBAai9PRHqD3UnBJgNgMMwyAai++zaADBUJTiPBfFuS6ORGL4Bk+1lsmR0eHE9vZ2otEoZWVlKdfLyspobm4e13P84z/+I4FAgDvvvDPt9wcGBvD7/SlfIiLjkW6+bLQzydYvK6dmVj594SgQHyb8b6sqcdrjK58NIMtpJxozGbwFmwEleS5m5ro50NiN3YifeD3W68vETElhh5HYEXOQaZojrqXz4osv8uSTT7Jt2zZKS0vT3rN582Y8Hk/yq7KyclLaLCIffROZL5tfEj9zbEFpPp+8fCY3LCpj09q5fGbZbFx2gyvK81lR4Ul5jNthIxiKUtfUTVtvCJvNxrULSy7o9SW9jA4nFhcXY7fbR/S6WltbR/TOhtu2bRtf+tKX+PnPf86NN9446n2PPfYYjzzySPLvfr9fQSYi4zLetWaJebXOQIi23gFyXHZ+c+Asrxxqprokh8qiHFr9/ZxoSe1R9YdjLK8sxN8fpqE9QCwWS55bNpHXl9FlNMRcLhcrV65k+/btfO5zn0te3759O5/97GdHfdyLL77If//v/50XX3yRT3/602O+htvtxu12T1qbReTSMd75skSPadXcIqqKclg9r4jD53x0BPrx+vtHXehsAv7+MJ9dMZs/vtdGdXFuSmBNxlq3S13GFzs/8sgjbNq0iauuuoq1a9fywx/+kDNnznD//fcD8Z7U2bNn+dd//VcgHmD33nsvzzzzDGvWrEn24rKzs/F4PKO+johIpiSCpzMY4q33OwEwMLANHozJsABz2iESjV+ubwvwi/1NxEy4/orS5BybFjpPjoyH2IYNG+jo6ODpp5/G6/WyZMkSamtrqaqqAsDr9aasGfvBD35AJBLhq1/9Kl/96leT17/4xS/yk5/8JNPNFRFJMTRsEo55/URiJtkOO4YNAgPRlMe4HXbC0ejgn218YWUFBkbKc4x370cZ25RsO/U3f/M3/M3f/E3a7w0Pptdffz3zDRIRGafhYVOU42L/mW5CkRiFOU4G0mz62zsk1MLRGO+3B6kuzk25R/Nhk0OHYoqIjKG+rZete06DCRvXxkeQnttxkvea/bzfESQYimK3ASYjtqAyAJfDRlGuk3DUpKYsn6fvWKLhw/PQoZgiIpNkfkkeRTkuth9tobbOy/ySPP7nhhXcuqQcu2FgtxmYsXiADV85ZBIPso7eEP3hKMeae1ROP8m0i72IyHkM3cXj2ddOsLTCQ2cwRGmBO146P3hfunGtgUgMwwBbzCTHbWdphQrUJpNCTERkDEMLO2rrvGzde5rY7hhdwTB2m23UI1gSTCDXZadyRg6dwVDKOjH58BRiIiJpJBc4B0NsP9JCZzAEwGxPNofO+ojEIBIbWdSRTu9AlEXlBXiynXQGQ9S39WpebJJoTkxEJI1kVaIJG66qBBO2H2lhxZxCFpR+EEC2MXbQc9o/uKehPYCvL8xL+5vYuvt0hlt/6VBPTEQkjaEl8PNL8qhv66Uo18X6ZeVcu7CEb//ne7T4++kctit9ItRcdhsxM4bNgCyHncauYLzKAz74r3xoCjERkTSGbwk19O+1dV56BiJUFefSeaY75XGJwzAjMZP8LCf+vjARM0aOy8WdV1XQ3hPS2rBJpOFEEZFhXj/Wyj1b9vD6sdbktaHHpiyt8FCQ5cAXDFOQldoXSNR5RGImeW4HNsPAYdgIDERo7wmlHO8iH556YiIiw2x58xR7GjoAkpWEW3ef5qW3m+gMhGjqDnLE23Pe5+noDQ0OHZrMmZlLZ0BFHZNNISYiMsx9n5qX8l8ADIiZJnsbOsZ9iGUwHKXc4+aWJeX4gmFeersJDHjiM4sz0exLkkJMRGSY62pKR6zl2rimigON3Rw+6yM8fH+pUbgdNjb/12VcV1PKU795N37xI7XR38WnOTERkfEywWEzRsyDpZPltPHk7R/juppS6tt68fWFmV+Sx7U1Jed9rIyfemIiIuNQW+fF6+vnilkFeP19+PsjY95/WWE2vzvopdyTzaEmH68ebQHQjh2TTCEmIjIOSys8lB/Loj8UpdU/MOa9DptBW88A9W0BgqEo37lzOZ2BeJGHyusnl0JMRGQcDjX5qG/tpT8SxRhxkrNBNGomNwKOxkx6ByLYDaguzmV+SR5P3K5ijkzQnJiIyDisX1bOjYvKCEdMosO+Fx4SYDB4BIthsKyikK9ef/kUtvLSoxATETmPxGbAnmznuIsLS/NdfOfO5VoTlmEKMRGR80huBmxAluP8H5t2Azb/12UAyV0+JDMUYiIi57F+WTmr5hbx6tEWQtHzH79SVZzLoSYfW3efZtu+xuRpzkO3rpLJoRATERlDYijxvWY/jZ19aQ/BdBhQkuvi45UePl5ZyIqKwmTPbcNVlcmKxESPLhFq8uGpOlFEZAyJ05xDkdF7YBETfP1hbpszmyc+s5j6tl6qB0+DThzj8uxrJ1ha4QFUZj+Z1BMTERnD+mXlzMx14esLj3lfzDTxBcM8+9oJgJTd6hM9sENNPu1iP8nUExMRGcP8kjxMk7TDiEOVe7IB2LLzFJ3BUMomv0MP2JTJpZ6YiMgY6tt6MYwPTmweTUcgxFGvn5hJskeWKOBIHKipHtjkU4iJiIyivq2Xx18+TGNn36jrwxaV5eHJdhCORjnZ1suC0jw82U4VcEwRDSeKiAyTqEiMH2IZwGYDc0iKLZqVT7O/n65gmFDM5IYryth5oo3AQIQVcwrZuKaKolyXhg+ngHpiIiJDJHpfW/eeBgNuXTqLVXOLKC9wk+W0cfW8Ir556xXMKsjCZbfhstv4/bvN+PrC1MwqYOOaKg0fTiGFmIjIELV1XurbAswuzE4eYFl31geGgcNmUFNewKEmH53BEJ+YO4N7r66iIMuJw2ZQXZw7+HgtZp4qGk4UERkiMQTYGQix/WgLq+YW4XbYaO0ZiA8pmqnVhrV1Xkzg43NmJOfCIF5iL5mnEBMRGSIxFFjf1ktRrouG9gBtPQPkux1cs6CEjWvjw4WJAFta4eEe5iSDTXNhU0shJiKSRiLMnvrtu8RM6AtHk2eDATy34yT/ccjLp5eW8z83rEg+Tj2wqaUQExEZw8Y1VYMHhKUuVm5oDxCKxGhoD1y8xolCTEQknUSZ/fpl5WlPZX7oxgVsefMU931q3kVonSQoxERE0kieIUb6IcLrakq5rqZ0qpslwyjERETS0H6H1qAQExFJI1HYIdObFjuLiIhlKcRERMSyFGIiImJZCjEREbEshZiIiFiWQkxERCxLISYiIpalEBMRGYf6tl6efe2EzgqbZhRiIiLjkNiGqrbOe7GbIkNoxw4RkXHQNlTTk0JMRGQctA3V9KThRBERsSyFmIiIWJZCTERELEshJiIilqUQExERy1KIiYiIZSnERETEshRiIiJiWQoxERGxLIWYiIhYlkJMREQsSyEmIiKWpRATERHLUoiJiIhlKcRERMSypiTEnn/+eaqrq8nKymLlypXs3LlzzPvfeOMNVq5cSVZWFvPmzeP73//+VDRTREQsJuMhtm3bNh5++GG+9a1v8c4773DNNddw6623cubMmbT3NzQ0sH79eq655hreeecd/sf/+B88+OCDvPTSS5luqoiIWIxhmqaZyRdYvXo1V155Jd/73veS1xYtWsQdd9zB5s2bR9z/zW9+k9/85jccPXo0ee3+++/n4MGD7N69+7yv5/f78Xg8+Hw+CgoKJueHEBGRKTORz/GM9sRCoRD79+9n3bp1KdfXrVvHrl270j5m9+7dI+6/+eab2bdvH+FweMT9AwMD+P3+lC8REbk0ZDTE2tvbiUajlJWVpVwvKyujubk57WOam5vT3h+JRGhvbx9x/+bNm/F4PMmvysrKyfsBRERkWpuSwg7DMFL+bprmiGvnuz/ddYDHHnsMn8+X/GpsbJyEFouIiBU4MvnkxcXF2O32Eb2u1tbWEb2thFmzZqW93+FwMHPmzBH3u91u3G735DVaREQsI6M9MZfLxcqVK9m+fXvK9e3bt3P11VenfczatWtH3P+HP/yBq666CqfTmbG2ioiI9WR8OPGRRx7hf//v/82//Mu/cPToUb7+9a9z5swZ7r//fiA+HHjvvfcm77///vs5ffo0jzzyCEePHuVf/uVf+NGPfsTf/d3fZbqpIiJiMRkdTgTYsGEDHR0dPP3003i9XpYsWUJtbS1VVVUAeL3elDVj1dXV1NbW8vWvf53nnnuO2bNn80//9E98/vOfz3RTRUTEYjK+TmyqaZ2YiIi1TZt1YiIiIpmkEBMREctSiImIiGUpxERExLIUYiIiYlkKMRERsSyFmIhkXH1bL8++doL6tt6L3RT5iFGIiUjG1dZ52bavkdo678VuinzEZHzHDhGR9cvKU/4rMlkUYiKScfNL8vjaDQsudjPkI0jDiSIiYlkKMRERsSyFmIiIWJZCTERELEshJiIilqUQExERy1KIiYiIZSnERETEshRiIiJiWQoxERGxLIWYiIhYlkJMREQsSyEmIiKWpRATERHLUoiJiIhlKcTkgunIeRG52HQoplywxJHzncEQRTku1i8rZ35J3sVulohcQhRicsESR813BkJs29cIoNN7RWRKKcTkgtS39VJb500GWVGuK/lnEZGpohCTtIaHVOLPieHCxFAixHtf6oGJyMWgEJMR6tt6efzlw5wcLNjoDIb4+b5GdrzXynfuXM78kjzWLyunMxiiMxBKFnaMFXoiIpmgEJMRauu81LcFuHwwrLbuPk04anK8pYfaOi9fu2EB80vyKMpxsW1fI0W5LoBkz2zon9VDE5FMUojJCIleFmb879fWlHCgqZvqmbkU57u4Z8se7vvUvPh9gRAN7QF6+sOU5LlZWuGhsign+TwiIpmkEJMR0vWyvN395Djt/OzPTRxs6uZUW4BblszC1xfmd3XniMZMsl12DjX5uK6mVD0wEZkSCjFJK9GLWlrh4Y1jbRTlOnnnTDezCrPIdzto9vfzwlunmZnrwjRNDCDf7WRphefiNlxELinasUNSJHbhgPh81qEmH9uPtuB22omaJt7uPoKhKCZgxqAvFKUwx0WO20Fbbz+/OXDu4v4AInJJUU9MUiRK5xvaA7T4+1k9r4iqohxWzytiIBzF6+ujOxwBIBwziZnQ3RciHI0//qjXz7OvnVBloohMCYWYpEiWyB/y8l5zD6faAtjtBgBnOoPJXliCvz+CI/5tbAZUFGWrMlFEpoyGEyXF/JI8vnbDAmbkOjEBX18Iw4DTHUFshkHMBIP4V4Jhi//N5bBRUZjDhqsqVZkoIlNCPTFJq9k3AEAwHCPY2ZfyPXPYveGoSY7Lzi2LZ7FxbRWgxc4iMjXUE5O0spwf/GrYjDFuHBQzTaqLc5lfkpecV6ut82awhSIiCjEZxd/fcgUfK89nRrYTc3jXawjDiP8S5bsdFOe7ePa1Eyyt8GhIUUSmhIYTL3FDN/pNDP3Vt/VyqMnH6nkz+dm+xhHDhxCfEzMB0wSbzSAYivK7g15OdwYBFXWIyNRQT+wSl27oL3HNFwyzsDSfHOfIX5NEsNkNWFiWx82LZ3Hfp+apByYiU0o9sUtcInCGBk/izw3tAerbesly2gmGY2kfHzPjW1KtX1LOdTWlXFdTmvlGi4gMUk/sEpcoqR9eRdgZDPHW+530DkSIxEafFDOBnoEIxfmu5G4fiaNZREQyTT2xS1C6ebCh1xPnhwUG4gub/f2RUZ/LbhjEYia/O+ilvSekhc4iMqUUYpeg4acyJ2zdfZqf72+ickY2M3Jd9A70jfYUGIDdBn9RU0IwFOW+T83TESwiMuUUYpegdPNgABgQjsZoaA8QHWMIEeJrx/6ippT8LCcAz7x6goduXKAemIhMKYXYJSgxDzbcxjVV+IJh/nCkmf5I+kKOhKgJ+0534esLgwkx4JnXTqiwQ0SmlAo7BIjPh23dc5qDTd30haLYx7FNR1cwTMwEmw0cNoPqmbnJ51KBh4hMBfXEhPq2Xr7207d5r7mH5Cji4DYdNgPGGll02W3kuOwsnl3AV6+/HBh9zk1EZLIpxITaOi8nWnuJmfHFy9EhoTVagBXlOJnlyeKywmwqZuSwcW1VstJx1Dk3EZFJphC7xNW39dIZDHHdwhLae0Nku2zsOdWJaYLdZqRdI5bjsvONW2qSJfXLKgpTSvVHm3MTEZlsCrFLXG2dl98eOEeO287Tn12C19dHXZOfSDRK9cxcjg320BISZ4m194TU4xKRi06FHZe49cvKyXHbaerq45lXT/BPr56kdyCCzbBx1t8fH2Ic/C2xASsqC7l58az4jh4NHew51UHj4Ka/CSrsEJGpop7YJW5+SR5Pf3YJW948RVlBFsea/QAEw1EIx+8xB6vtE0X31cW5vLD3DL98O0LP4G4eQ0vrVdghIlNFISbJjXtfP9bK0WY/J1t7CQ+p7ogRnx+LxkzaAwM0tAco92RxsrWH/CwHty0vT9nKSsOMIjJVFGKXsKHBA7DlzVM0dval3a0jGjMxgMbOPrzd57h9+WxyXHbq2wK094RG9L7UAxORqaAQu4Rt3X2abX9u5Kd7z7B2/kxOtvVSOSOb3lCEFl8/kcHeWGIYMRFtkZiJJ8fJV6+/PCUE4YPe12ibDIuITCaF2CXM1xemLxIl6I9y1OtndmE2A+EoHb0hIjGToRtPJU5yBvBkffBrM7THNfTPmhcTkamgELuEebKdZDvs5LrsGAYcb+4BINdlpz8UTbnXNvg/TpuN4nw3/3momaIc16gBpXkxEZkKCrFL2Ma1VRTluugMhvjPQ80snJXPiopCmrqCvHG8jXDUTPa+7Pb4wudQJMaZjgBLKgrHDCgteBaRqZDRdWJdXV1s2rQJj8eDx+Nh06ZNdHd3j3p/OBzmm9/8JkuXLiU3N5fZs2dz7733cu7cuUw285KVCJqNa6q4Z/UcHrphAUW5LvKznGQ57VxWmAXE908MRU1ig7vVR2Mkxxa1JkxELibDNM2xD476EG699Vaampr44Q9/CMBf//VfM3fuXH7729+mvd/n8/GFL3yB++67j+XLl9PV1cXDDz9MJBJh375943pNv9+Px+PB5/NRUFAwaT+L1Z2v0KK+rZfHXz7Me81+SvOzyHLZGQhHOertwRi2CbDNgDy3g6WXeSgryOLVoy18/soKNq6tUjGHiHxoE/kcz9hw4tGjR3nllVfYs2cPq1evBmDLli2sXbuWY8eOUVNTM+IxHo+H7du3p1x79tlnWbVqFWfOnGHOnDmZau5H3vkKLRLHsGQ77Zwc7FV9Yu4MivNc9IWiBELR5NBizITAQJR3z/kJhqPETJMDTd2wG7YfbRn1NUREJlvGhhN3796Nx+NJBhjAmjVr8Hg87Nq1a9zP4/P5MAyDwsLCtN8fGBjA7/enfMlI65eVs+GqyrTzWPVtvRw4041pQnGeO16JaJoc9fbQ3hsiGP4gwBKipkm2y85DNyxgeUUh57r7wGDU1xARyYSM9cSam5spLR15ym9paSnNzc3jeo7+/n4effRR7r777lG7lJs3b+app576UG29FIxVaFFb56WxK0hBtpOKGdksKi+goT1A3dnueHiZ4HbYGIjEMIx4uT3A2nkzqSzKYWFZPgtn5bNxTZWGEUVkSk24J/bkk09iGMaYX4n5K8MYeTqwaZpprw8XDoe56667iMViPP/886Pe99hjj+Hz+ZJfjY2NE/2RLnnrl5VTU5aPvy/MrvoOWvz93PmJCpy2+K+HYcBAJL5qzDRhZq6LO1ZchifHydbdp9l+tIWiHJcCTESm3IR7Yg888AB33XXXmPfMnTuXuro6WlpaRnyvra2NsrKyMR8fDoe58847aWhoYMeOHWNO7Lndbtxu9/gaL2nNL8nj6TuWsHXPaQ6c6aa+LcDvDnqTXa7hu1DZbQYNHQHO1ffxyfnFlOS5aWgPUN/WqyATkSk14RArLi6muLj4vPetXbsWn8/HW2+9xapVqwDYu3cvPp+Pq6++etTHJQLsxIkT/PGPf2TmzJkTbaJcKBOyXTYGIlHq23px2W30h2MjbmvxD+Dvj/DxykIAjnj9HGvpocXfz9N3LFGQiciUyVhhx6JFi7jlllu477772LNnD3v27OG+++7jtttuS6lMvOKKK/jVr34FQCQS4Qtf+AL79u3jhRdeIBqN0tzcTHNzM6FQKFNNFeLzYi+93cTehk66gmGa/QPJY1aGczsMambl8/QdS/BkO3HYDHJddg40drN1z+kpbrmIXMoyumPHCy+8wIMPPsi6desAuP322/nnf/7nlHuOHTuGz+cDoKmpid/85jcArFixIuW+P/7xj1x33XWZbO4lbf2ycjoDIZq6gxw910NHYIBQNN4LM4jv1hH/Mywq91A9MxeI7/oBsLehg9MdQUaUMUqSNkUWmXwZDbGioiK2bt065j1D11rPnTuXDK69llEkPlw3ro1XFz772gl+9P+dwm6zUZrv5v2OQMr9Bxq7edfro6EjwIqKQjCgtWeAgmwHvr6w5sZGoU2RRSZfRredkukvsVPH1r2nqa3zAvFeWUG2k2AoQl84in3Ib4k5+BWLmRxq6mbr3tP4guF4dWN/hFePtiSfR1KNtVZPRC6MQuwSV1vnpb4twOUleckP1/kleTz92SXMzHXR1jOAbciSiMQf40OMEI6a9PSHefqOJdy5spIbF5XRGQil7KU42v6Kl9q+i4m1euqlikwehdglbv2ycu5ZPWdEVeF1NaUUZDuJxEzC0Q+GeBOjveEhdff7TncB8MTti6kuzmX7sN5YbZ2XrXtP8/jLh1MCKzG8pp6biFwoHcVyiRtrJ49EYA1fJ5bgsEEkBt3BMM/tOEl1cS5LKzxA6gnPncEQsz3Z1LcFqK3zJl9v+Jlj07HwYTq2SUQ+oJ6YjDqst6KyMLnFlB2wG/Ed7BOyHHYM4td2n+rg/931PlvePJUMpWdfO8HWPafZfqSFFZWF3LN6DksrPMnXGj68Nh17ZunadKkNg4pMZ+qJSUrV3Ppl5cmex+0rZvPHY610BcNEYUT5fG8oit2A0vwsevvDhKIm73r9yZ0/jrf2cPPHZqUUM8SHFOPVjsN7gNPxNOh0bVKVocj0oRCTlA/qoR/QnYEQvQPpFzwn5LodzMh1EghFGIhECUViHDjdxYm2AAPhKLtPdXD7itnJ0v2Tbb0pRSRDTcfToBNtSvS+1i8rn5ZhK3Kp0nDiJSzxwQwkh/WGloH7+sJAfAf70fSFoxzx9uAf3N3DBI619vKXKysoyc+ixd/PljdPJefG1i8pTykiSTc0N95rk2n489e39fLUb97lqd++m5wXSwwrqspQZPpQT+wSlm5YbGhvyJPjxGm3YTMgEoXosOFEl8MgFBlZ9VE1I5uNa6vw9YU52NRNZyDEcztO8tb7nWy4qjLlwz9dG8Z7bTINf/7ENlwx0+R4cw/3fWoeoN6XyHSjELuEjTYsluh5LCzLw2k38PXFe1kGqdNioYiJ3QbRYXsEn/X189yOk7x6tAW3w0Z7byh5YGaisCNR7ZeuDeO5lq5q8MNUEg59/vq2XjoDIW5cVEZDR4B3vX4e//Vhnv6sNjcWmW4M8yO2z5Pf78fj8eDz+cY8wkVG9+xrJ9i2r5Gqohze9foJRWLETDO5o73diIeZ3QbhaPrnWDQrn7PdfRTnuTnTGWBGjov/5y+Xc6jJx9a9p7m8JI/7PjWPQ00+llZ4ONTkm1D4JNq44arKZM/sqd+8y0tvN/H5Kyt44vbFALx+rJUtb57ivk/N47qakYe0Qjz8tu4+nRw+bWgP0NgZpGZWPrctL+f/+s/38PVFmFOUzY//apWCTCTDJvI5rp6YpEj0Qm76WBnXLiwZnM8KcPX8mRxs6qaxK0g4YpLtsoNpEh7eDSNect8biuC225hV4KanP0x7b4hnXj3Bd+5czp5THdS3BZLP7dpvEBiIsOdUB0/fsQTgvD2qRM9paM8OA2KmyYHG7mQJ/5Y3T7H7VAen2gOsnRc/1qfZ10dDe5AHb7ycck82j//6MK3+AcKxGHbDGNyVxOBdrx8Ap82GJ9uBvz/C4y8f1nEzItOIQkxS1NZ52X60hQ1XVXJdTSmVRTnJQBlaHh8MRclypi/4ME0419VH1ITu010UZjlx2A2K81zU1nlTemBb3jzFseYect2O5GJoIO381/Be1dduWJDskXUGQvj6wuRnOTjTGWDrnviejs3+fmbmuugKhnj5wFngg8XbT/z6XSqLcmjs7MNuAEa82jIaM/H3R8h12SkryGJhWT7X1nwQ6IkF21oILXLxKcQkxfC5p8S8VW2dl9XzininsZv+cJTCbCdVRTkca+khOOzgTJN4EUhZgZuZuS68vn7KPVkEQlH+6bUT5Gc5uK6mlPXL4pWKtXVeivNd/O6gl6UVHiqLcpJtGBoUW948xZ6GDoLhKG8cawMDFpblUVWUQ1N3kDePt2M3DGrK8zlwppvDZ32EYyZFOU7yXQ46wqHUOb2omQzlyODOxt3BMFnO+Ipuf1+YHcdaWVxewMa1Vcm2Jt4brRcTufg0JyZjSuxyX98WYH5JbnKdV1lBFv9xyIvDblBVlMPJ1t54YPkHko/Nc9uJmSZOmw3DgEjMpHcgPonmtBksuczDijmFbFxTxXM7TvIfh7x8emk5/3PDiuRrP/DC25xs6+Uzy2bzieoZfO/1emrK8tnb0AnEQ7a+rZfLZmTT2NnHgtI8VlQWUnvYSyxm0t4bDy67AdkuO70DUbIcNkLR2KjbadltBtHBb+a4bORnOfnk/GJa/P0pc2vqiYlkhubEZNLU1nl5r9lPrtvBbcvLae8JsbTCwzOvnojPHZlwqi1AWUEW7b0DKY9NBBbEcNkN7DYDmxEfznM5bBxv6eF4Sw9vHm/jTEeQcMxk5/E2Htl2AE+2Eww42dZLOGryXrOf6uJcQhGTs919lOS7cTtsZDtt9Idj9PRHKCtw0x+JYmJiw2B+aS49A930h2NETRiIxNvTHxk5j5dgtxkYmMlKzHJPNnesuIwdx1qpa+oGSIbYdFycLXKpUYjJmNYvK2fHsVZOtPRwvKWXJz6zmGdfO8E5Xx+l+W4aO/swgY5ACJfdRl84cRo0I4bubDEz2fsZiMSDzZPt4lRbAJN4QUhnMMSv3jmLDSjIdmK3GYSjJmbMpO5sN/6+MK3+fgwD3E47uS474WiMpq6+5Gsdb+klGjPx+vvJddqT10erpBzKaTeonplHxYxs2gMhHrphAdfVlCaDu6wgK7kgWr0wkYtPO3bImOaX5LGiojB+pthgAC2t8HB5SR5fWFlBWYEbp82gpjSPniFbVKUbqYuZH2wgHImZBMMxvP7+5L1uhy156GYU6OoLJ8v6j7f28uqRVoLhKDHic24x06QzEBrxWtEh44SBNMlljLgypI0xk1A0lhJgEO99XX9FKW+930ltnXdablYscilST0zOa+PaKopyXcmChkNNPk629dLYFaRnIILLYQObwZrqIhrag8zyZHGgsXtwLVl8/DDG4InQY8zA9oVjZDts9KUZ7hu+WwhAKBLDNOP/Eht9gHCkdE3Idtr4eGUh7zX3JIs9Hv/1YX78V6uAeK9r+DEzw/8sIlNPISbnNXzuZ/2ycvac6uBYSw9zZuTQ3RemsTPIZ5bPpqa8AF8wjL8/zKm2ANGYicNmYDNN7Pb021QNlS7AhksMVY4ViAmJObjz+S+XF7OsopA/vx8/4NNpj69dG6vkX/NhIhefQkzGZXglXqLcvDMYovaQl5qyfDDhZ/saicRMVs4p5Gx3H/3hGJFYvFCiNMdFs39gcDFxPFwSxSETKZF12GBoVb8JOGwGkTRpVZrvptk/MOI6pPbg8rOc8aUEh72cbO3luoUlLKsoVK9LZJpTiMm4JOaAOgOh5NBiYsFvUc4HQ40Hmro50dJDQ0cwOZ8F8d5TJGqS5bRRnOem2d+HGYV8tyO5A/54DVuWljyYMx2nfeS0b2G2g+riPIrzXLxxvA2T+GbH80vy+Oe7r0wOHR5q8gHpj2NRMYfI9KAQk3FJhFRnMJQytDZ8qPE7f7mc2jovJiYv7m3ExKQjEKIw20kwFCVmgq8vnKwUdNsN8tx2AgPRCfXGIH7adJR4TyyUZtLMIL5gOTH8aAAOO3zu4/G9FevbeqkoygETNq6pAj4IrMROIImfE7S4WWQ6UojJuAztjQzteY2mKxDGbje4adEsinJdLK3w8NC/v0MgFCUSjSXnqtoDYWy2DxYYTyTI0lXMJ543a3D9mK8/dQf+ck/8mJjEz/TEZxanfe7x7q4vIheXduyQSZXowdz0sbJk2CWG3l586zRP/PoIoWgsXuxhI6XQY6JVhgkOG8Ri4LDbmF+cw4m2Xhw2W3JRc47LzifnzxxRNi8i09NEPse1TkwmVeJk6GsXltAZDLF19+nk4uD/tqqK25aV47QbVM3MwWGk/vrFiPeYPFmOlNOkbUa8cMNhMxjcp5c8tx0bML8kl9uXX4ZhMwhHY2S5HZTkZTEQiVGQ5aAkz8WKCg8VRTl85y+XpwTY8NObRcR6NJwok2ronNJL+5sAKMp1JYciPTlONq6uAgN+9fZZMGBmrovKGdkcbe6htz/C7MJs3u8IYDdgbnEu84pzOdvdx2WF2eRnOfFkO9nb0MFRbw8FWU5uXzGbg03d+INh7ryqgj83dPEfh7zcuKgMT7aTn751hrqzPopyXClzWYnTm4ER3xMRa1CISUasX1ZOZzAEJim7vm8/0sJNHysDEz535WVgwvajLaydX0zNrAJ+tq8Rw4iv20oM/x1q8rG3oZOmrj7uu2YeX7thAa8fa+WZ105QPTOX3xw4R1NXHw67QXtPiK9efzmeHCcMFpE4bAYLyvJHzGWtX1ZOZyB+6rTmuUSsSSEmGZGuaCJZ4RgIJc8sW7+sPFn48cbxNqpm5nCmM4jbaaetZyB54vPwQLyuppRDTT627WukJM+N025jQWkeSys88QXKg+F406Iy7r92ftqy+PkleckToEXEmlTYIVMu3REmiYKQkjw39W293LiojOri3DHXZCWeJ7GmK3Hu2WiFJSJiDRP5HFeIybSQLpAuJHx0xpeI9SnEFGIiIpalEnsREbkkKMRERMSyFGIiImJZCjEREbEshZiIiFiWQkxERCxLISYiIpalEBMREctSiImIiGUpxERExLIUYiIiYlkKMRERsSyFmIiIWJZCTERELEshJiIilqUQExERy1KIiYiIZSnERETEshRiIiJiWQoxERGxLIWYiIhYlkJMREQsSyEmIiKWpRATERHLUoiJiIhlKcRERMSyFGIiImJZCjEREbEshZiIiFiWQkxERCxLISYiIpaV0RDr6upi06ZNeDwePB4PmzZtoru7e9yP//KXv4xhGHz3u9/NWBtFRMS6Mhpid999NwcOHOCVV17hlVde4cCBA2zatGlcj3355ZfZu3cvs2fPzmQTRUTEwhyZeuKjR4/yyiuvsGfPHlavXg3Ali1bWLt2LceOHaOmpmbUx549e5YHHniA3//+93z605/OVBNFRMTiMtYT2717Nx6PJxlgAGvWrMHj8bBr165RHxeLxdi0aRPf+MY3WLx48XlfZ2BgAL/fn/IlIiKXhoyFWHNzM6WlpSOul5aW0tzcPOrjvv3tb+NwOHjwwQfH9TqbN29Ozrl5PB4qKysvuM0iImItEw6xJ598EsMwxvzat28fAIZhjHi8aZpprwPs37+fZ555hp/85Cej3jPcY489hs/nS341NjZO9EcSERGLmvCc2AMPPMBdd9015j1z586lrq6OlpaWEd9ra2ujrKws7eN27txJa2src+bMSV6LRqP87d/+Ld/97nd5//33RzzG7Xbjdrsn9kOIiMhHwoRDrLi4mOLi4vPet3btWnw+H2+99RarVq0CYO/evfh8Pq6++uq0j9m0aRM33nhjyrWbb76ZTZs28Vd/9VcTbaqIiHzEZaw6cdGiRdxyyy3cd999/OAHPwDgr//6r7nttttSKhOvuOIKNm/ezOc+9zlmzpzJzJkzU57H6XQya9asMasZRUTk0pTRdWIvvPACS5cuZd26daxbt45ly5bxb//2byn3HDt2DJ/Pl8lmiIjIR5RhmqZ5sRsxmfx+Px6PB5/PR0FBwcVujoiITNBEPse1d6KIiFiWQkxERCxLISYiIpalEBMREctSiImIiGUpxERExLIUYiIiYlkKMRERsSyFmIiIWJZCTERELEshJiIilqUQExERy1KIiYiIZSnERETEshRiIiJiWQoxERGxLIWYiIhYlkJMREQsSyEmIiKWpRATERHLUoiJiIhlKcRERMSyFGIiImJZCjEREbEshZiIiFiWQkxERCxLISYiIpalEBMREctSiImIiGU5LnYDJptpmgD4/f6L3BIREbkQic/vxOf5WD5yIdbT0wNAZWXlRW6JiIh8GD09PXg8njHvMczxRJ2FxGIxzp07R35+PoZhXOzmAPF/VVRWVtLY2EhBQcHFbs60pfdpfPQ+jY/ep/GZju+TaZr09PQwe/ZsbLaxZ70+cj0xm81GRUXFxW5GWgUFBdPml2Q60/s0Pnqfxkfv0/hMt/fpfD2wBBV2iIiIZSnERETEshRiU8DtdvPEE0/gdrsvdlOmNb1P46P3aXz0Po2P1d+nj1xhh4iIXDrUExMREctSiImIiGUpxERExLIUYiIiYlkKsQzp6upi06ZNeDwePB4PmzZtoru7e9yP//KXv4xhGHz3u9/NWBung4m+T+FwmG9+85ssXbqU3NxcZs+ezb333su5c+emrtFT4Pnnn6e6upqsrCxWrlzJzp07x7z/jTfeYOXKlWRlZTFv3jy+//3vT1FLL66JvE+//OUvuemmmygpKaGgoIC1a9fy+9//fgpbe/FM9Pcp4U9/+hMOh4MVK1ZktoEfhikZccstt5hLliwxd+3aZe7atctcsmSJedttt43rsb/61a/M5cuXm7Nnzzb/1//6X5lt6EU20fepu7vbvPHGG81t27aZ7733nrl7925z9erV5sqVK6ew1Zn17//+76bT6TS3bNliHjlyxHzooYfM3Nxc8/Tp02nvP3XqlJmTk2M+9NBD5pEjR8wtW7aYTqfT/MUvfjHFLZ9aE32fHnroIfPb3/62+dZbb5nHjx83H3vsMdPpdJpvv/32FLd8ak30fUro7u42582bZ65bt85cvnz51DT2AijEMuDIkSMmYO7Zsyd5bffu3SZgvvfee2M+tqmpybzsssvMw4cPm1VVVR/pEPsw79NQb731lgmc9/+UVrFq1Srz/vvvT7l2xRVXmI8++mja+//+7//evOKKK1KuffnLXzbXrFmTsTZOBxN9n9L52Mc+Zj711FOT3bRp5ULfpw0bNpj/8A//YD7xxBPTOsQ0nJgBu3fvxuPxsHr16uS1NWvW4PF42LVr16iPi8VibNq0iW984xssXrx4Kpp6UV3o+zScz+fDMAwKCwsz0MqpFQqF2L9/P+vWrUu5vm7dulHfk927d4+4/+abb2bfvn2Ew+GMtfViupD3abhYLEZPTw9FRUWZaOK0cKHv049//GPq6+t54oknMt3ED+0jtwHwdNDc3ExpaemI66WlpTQ3N4/6uG9/+9s4HA4efPDBTDZv2rjQ92mo/v5+Hn30Ue6+++5ptXnphWpvbycajVJWVpZyvaysbNT3pLm5Oe39kUiE9vZ2ysvLM9bei+VC3qfh/vEf/5FAIMCdd96ZiSZOCxfyPp04cYJHH32UnTt34nBM/4hQT2wCnnzySQzDGPNr3759AGmPgTFNc9TjYfbv388zzzzDT37yk2lzhMyFyuT7NFQ4HOauu+4iFovx/PPPT/rPcTEN//nP956kuz/d9Y+aib5PCS+++CJPPvkk27ZtS/sPqY+a8b5P0WiUu+++m6eeeoqFCxdOVfM+lOkfs9PIAw88wF133TXmPXPnzqWuro6WlpYR32traxvxL6KEnTt30traypw5c5LXotEof/u3f8t3v/td3n///Q/V9qmUyfcpIRwOc+edd9LQ0MCOHTs+Er0wgOLiYux2+4h/Jbe2to76nsyaNSvt/Q6Hg5kzZ2asrRfThbxPCdu2beNLX/oSP//5z7nxxhsz2cyLbqLvU09PD/v27eOdd97hgQceAOLDrqZp4nA4+MMf/sD1118/JW0ft4s4H/eRlShY2Lt3b/Lanj17xixYaG9vNw8dOpTyNXv2bPOb3/zmhIocrORC3ifTNM1QKGTecccd5uLFi83W1tapaOqUWrVqlfmVr3wl5dqiRYvGLOxYtGhRyrX777//kijsmMj7ZJqm+dOf/tTMysoyf/WrX2W4ddPHRN6naDQ64nPoK1/5illTU2MeOnTI7O3tnapmj5tCLENuueUWc9myZebu3bvN3bt3m0uXLh1ROl5TU2P+8pe/HPU5PurViaY58fcpHA6bt99+u1lRUWEeOHDA9Hq9ya+BgYGL8SNMukRJ9I9+9CPzyJEj5sMPP2zm5uaa77//vmmapvnoo4+amzZtSt6fKLH/+te/bh45csT80Y9+dEmV2I/3ffrpT39qOhwO87nnnkv5venu7r5YP8KUmOj7NNx0r05UiGVIR0eHec8995j5+flmfn6+ec8995hdXV0p9wDmj3/841Gf41IIsYm+Tw0NDSaQ9uuPf/zjlLc/U5577jmzqqrKdLlc5pVXXmm+8cYbye998YtfNK+99tqU+19//XXz4x//uOlyucy5c+ea3/ve96a4xRfHRN6na6+9Nu3vzRe/+MWpb/gUm+jv01DTPcR0FIuIiFiWqhNFRMSyFGIiImJZCjEREbEshZiIiFiWQkxERCxLISYiIpalEBMREctSiImIiGUpxERExLIUYiIiYlkKMRERsSyFmIiIWNb/D6YzuJN4VrOcAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "edm_samples = edm_workflow.sample(num_samples=3000, conditions={\"observables\":np.array([[0.0, 0.0]], dtype=np.float32)}, steps=1000)\n", + "plt.scatter(edm_samples[\"parameters\"][0, :, 0], edm_samples[\"parameters\"][0, :, 1], alpha=0.75, s=0.5)\n", + "plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n", + "plt.xlim([-0.5, 0.5])\n", + "plt.ylim([-0.5, 0.5])" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "5dfa0d57-c31e-44fe-98ae-36d1669e21d5", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA0YAAAGLCAYAAADjzYJbAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAwodJREFUeJzs3Xl8nWWd///XvZx9TU6Sk71Jm650oYsgm4ACFUeQGR0YF0RFEEVZ/LqMv4oijqIzKC4z4DaCOo7D6LgCM4AbFNlrgZbubdI0e3KSs5/7nHMvvz/ShKZNF6Bp2ubzfDzO4zT3uZfrHMK58r6v6/7ciuM4DkIIIYQQQggxg6nT3QAhhBBCCCGEmG4SjIQQQgghhBAzngQjIYQQQgghxIwnwUgIIYQQQggx40kwEkIIIYQQQsx4EoyEEEIIIYQQM54EIyGEEEIIIcSMp093A44227bp6ekhFAqhKMp0N0cIIWYUx3HIZDLU19ejqnLubYz0TUIIMT1eSb900gWjnp4empqaprsZQggxo+3Zs4fGxsbpbsZxQ/omIYSYXkfSL510wSgUCgGjbz4cDk9za4QQYmZJp9M0NTWNfxeLUdI3CSHE9Hgl/dJJF4zGpiiEw2HpfIQQYprIdLGJpG8SQojpdST9kkwAF0IIIYQQQsx4EoyEEEIIIYQQM54EIyGEEEIIIcSMd9JdYyTEicqyLMrl8nQ3Q4hDcrlcaJo23c0QQhwDtm1TKpWmuxlCHNbR6pskGAkxzRzHoa+vj2QyOd1NEeKIRKNRamtrpcCCECexUqlEe3s7tm1Pd1OEOCJHo2+SYCTENBsLRTU1Nfj9fvljUxy3HMchn88zMDAAQF1d3TS3SAgxFRzHobe3F03TaGpqkps1i+Pa0eybJBgJMY0syxoPRbFYbLqbI8Rh+Xw+AAYGBqipqZFpdUKchEzTJJ/PU19fj9/vn+7mCHFYR6tvklMAQkyjsWuKpOMRJ5Kx31e5Jk6Ik5NlWQC43e5pbokQR+5o9E0SjIQ4Dsj0OXEikd9XIWYG+X9dnEiOxu+rBCMhhBBCCCHEjCfBSAghhBBCiCmwdetWamtryWQy090UAP785z+jKMorqoR73nnncdNNN43/3NLSwje+8Y2j3rYxHR0dKIrC888/D8CGDRtobGwkl8tN2THHSDASQsxoU/0FL4QQ4th43/veh6IoBzze/OY3j6/T0tIyvtzn89HS0sLll1/OH//4xwn7GvvjXNd1uru7J7zW29uLrusoikJHR8ch27RmzRquv/56QqHQUXufr8WZZ55Jb28vkUjkVe/j2Wef5dprrz2KrTq0JUuWcNppp3HnnXdO+bEkGAkhXrW+vj5uvPFG2tra8Hq9xONxzj77bL7zne+Qz+enu3lCCCFmmDe/+c309vZOePzsZz+bsM5tt91Gb28vW7du5cc//jHRaJQLLriAL33pSwfsr76+nh//+McTlv3oRz+ioaHhsG3p6urit7/9Le9///tf25s6itxu92u+1091dfUxLxr1/ve/n7vvvnu8MMhUkWAkhHhVdu3axfLly3n44Yf58pe/zPr16/n973/PzTffzO9+9zt+//vfT1vbHMfBNM1pO74QQojp4fF4qK2tnfCoqKiYsE4oFKK2tpbm5mbe8IY38L3vfY9bbrmFz33uc2zdunXCuldddRX33HPPhGX33nsvV1111WHb8t///d8sW7aMxsbG8WW7d+/mkksuoaKigkAgwCmnnMKDDz4IvDzN7YEHHmDZsmV4vV5OP/10NmzYMGG/TzzxBG94wxvw+Xw0NTVxww03TJhmViwW+dSnPkVTUxMej4e5c+fy7//+7xOOMTaVLpFI8M53vpPGxkb8fj9Lliw5IEjub9+ZFvfee++ko3S33nrr+Pr33HMPCxcuxOv1smDBAu66664J+3vmmWdYvnw5Xq+XVatWsX79+gOOuXr1ahKJBI8++uihP/TXSIKREOJV+chHPoKu6zz33HNcfvnlLFy4kCVLlvD2t7+dBx54gEsuuQSAVCrFtddeS01NDeFwmDe+8Y288MIL4/u59dZbOfXUU/nJT35CS0sLkUiEf/iHf5gwH9txHP75n/+Z2bNn4/P5WLZsGb/4xS/GXx/7on/ooYdYtWoVHo+HtWvXsnPnTt72trcRj8cJBoO87nWvm9bAJoQQ4vh044034jgOv/nNbyYsv/TSSxkZGeHxxx8H4PHHH2d4eHi8jzuUxx57jFWrVk1Ydv3111MsFnnsscfYsGEDX/3qVwkGgxPW+eQnP8kdd9zBs88+S01NDZdeeul4CeoNGzawevVq/u7v/o4XX3yR++67j8cff5yPfvSj49u/973v5b/+67/41re+xebNm/nOd75zwDHGGIbBypUruf/++9m4cSPXXnstV155JU8//fThPzTgiiuuOGB0Ttd1zjrrLAC+//3vs2bNGr70pS+xefNmvvzlL3PLLbfwox/9CIBcLsdb3/pW5s+fz7p167j11lv5xCc+ccBx3G43y5YtY+3atUfUrldLbvAqhHjFEonE+EhRIBCYdB1FUXAch7/5m7+hsrKSBx98kEgkwne/+13e9KY3sW3bNiorKwHYuXMnv/71r7n//vsZGRnh8ssv5ytf+cr4tIbPfvaz/PKXv+Tuu+9m7ty5PPbYY7znPe+hurqac889d/yYn/rUp7jjjjuYPXs20WiUrq4u3vKWt/BP//RPeL1efvSjH3HJJZewdetWmpubp/6DEkIIcUzdf//9B4SAT3/609xyyy2H3K6yspKampoDrhlyuVy85z3v4Yc//CFnn302P/zhD3nPe96Dy+U6bFs6OjpYuXLlhGWdnZ28/e1vZ8mSJQDMnj37gO0+//nPc+GFFwKj0/YaGxv51a9+xeWXX86//Mu/8K53vWu8GMLcuXP51re+xbnnnsvdd99NZ2cn//3f/80jjzzCBRdccNBjjGloaJgQRD72sY/xf//3f/z85z/n9NNPP+x79Pl84zdX3blzJx/96Ef58pe/PN7+L37xi3zta1/j7/7u7wBobW1l06ZNfPe73+Wqq67ipz/9KZZl8cMf/hC/388pp5xCV1cXH/7whydt6+Gu6XqtJBgJcZJIZIv0pgzqIl5iQc+UHmvHjh04jsP8+fMnLK+qqsIwDGD0rNjq1avZsGEDAwMDeDyjbbrjjjv49a9/zS9+8Yvxizdt2+bee+8dvzj1yiuv5A9/+ANf+tKXyOVyfP3rX+ePf/wjZ5xxBjD6Jf/444/z3e9+d0Iwuu2228a/jAFisRjLli0b//mf/umf+NWvfsVvf/vbCWfXhBBCTI1j2TcBnH/++dx9990Tlo2dhDscx3Emvfbm6quv5owzzuDLX/4yP//5z3nyySePaLp2oVDA6/VOWHbDDTfw4Q9/mIcffpgLLriAt7/97SxdunTCOmN93Vjb58+fz+bNmwFYt24dO3bs4Kc//emEdtu2TXt7Oxs2bEDTtAl946FYlsVXvvIV7rvvPrq7uykWixSLxYOe9DyYVCrFW9/6Vi6++GI++clPAjA4OMiePXu4+uqrueaaa8bXNU1zvPjD5s2bWbZs2YRrlvZ9//vy+XxTfv2yBCMhThK9KYPt/VmAY9L5wIE3U3vmmWewbZt3v/vdFItF1q1bRzabJRaLTVivUCiwc+fO8Z9bWlomVOypq6tjYGAAgE2bNmEYxoTAA1AqlVi+fPmEZftPWcjlcnzhC1/g/vvvp6enB9M0KRQKdHZ2vvo3LYQQ4ogd674pEAjQ1tb2irdLJBIMDg7S2tp6wGuLFy9mwYIFvPOd72ThwoUsXrx4vJT0oVRVVTEyMjJh2Qc/+EFWr17NAw88wMMPP8ztt9/O1772NT72sY8dcl9j/a1t23zoQx/ihhtuOGCd5uZmduzYcdh27etrX/sad955J9/4xjdYsmQJgUCAm266iVKpdMT7sCyLK664gnA4zPe///3x5bZtA6PT6fYffdI0DRgNdUdqeHiYOXPmHPH6r4YEIyFOEnUR74TnqdTW1oaiKGzZsmXC8rHh+rFhddu2qaur489//vMB+4hGo+P/3n9KgqIo41+oY88PPPDAAVWAxkahxux/huuTn/wkDz30EHfccQdtbW34fD7e8Y53vKIvfCGEEK/eseybXotvfvObqKrKZZddNunrH/jAB/jIRz5ywGjUoSxfvpxNmzYdsLypqYnrrruO6667js985jN8//vfnxCMnnrqqfHp3iMjI2zbto0FCxYAsGLFCl566aWDhr8lS5Zg2zaPPvro+FS6Q1m7di1ve9vbeM973gOM9rnbt29n4cKFR/w+b775ZjZs2MCzzz47YYQsHo/T0NDArl27ePe73z3ptosWLeInP/kJhUJh/G+Hp556atJ1N27cyDve8Y4jbterIcFIiJNELOg5ZiNFsViMCy+8kH/913/lYx/72EGH3FesWEFfXx+6rtPS0vKqjrVo0SI8Hg+dnZ1HPDVgzNq1a3nf+97H3/7t3wKQzWanfH6yEEKIlx3LvglGK7L19fVNWKbrOlVVVeM/ZzIZ+vr6KJfLtLe38x//8R/84Ac/4Pbbbz9o4Ljmmmv4+7//+wkn9Q5n9erVfPCDH8SyrPERkptuuomLL76YefPmMTIywh//+McDQshtt91GLBYjHo+zZs0aqqqqxgPbpz/9aV7/+tdz/fXXc8011xAIBNi8eTOPPPII3/72t2lpaeGqq67iAx/4AN/61rdYtmwZu3fvZmBggMsvv/yANra1tfE///M/PPHEE1RUVPD1r3+dvr6+Iw5G99xzD3fddRe/+tWvUFV1/LMPBoMEg0FuvfVWbrjhBsLhMBdffDHFYpHnnnuOkZERPv7xj/Oud72LNWvWcPXVV/PZz36Wjo4O7rjjjgOO09HRQXd39xGFvddCqtIJIV6Vu+66C9M0WbVqFffddx+bN29m69at/Md//AdbtmxB0zQuuOACzjjjDC677DIeeughOjo6eOKJJ/jsZz/Lc889d0THCYVCfOITn+Dmm2/mRz/6ETt37mT9+vX827/923hVm4Npa2vjl7/8Jc8//zwvvPAC73rXu8ZHoIQQQpx8/u///o+6uroJj7PPPnvCOp/73Oeoq6ujra2NK6+8klQqxR/+8Ac+/elPH3S/Y+FK1498TOEtb3kLLpdrQjVUy7K4/vrrWbhwIW9+85uZP3/+AeWrv/KVr3DjjTeycuVKent7+e1vf4vb7QZg6dKlPProo2zfvp1zzjmH5cuXc8stt1BXVze+/d1338073vEOPvKRj7BgwQKuueaaCeW893XLLbewYsUKVq9ezXnnnUdtbe1BR80m8+ijj2JZFpdeeumEz3ws3Hzwgx/kBz/4Affeey9Llizh3HPP5d577x2fshgMBvnd737Hpk2bWL58OWvWrOGrX/3qAcf52c9+xkUXXcSsWbOOuG2vinOSSaVSDuCkUqnpbooQh1UoFJxNmzY5hUJhupvyqvT09Dgf/ehHndbWVsflcjnBYNA57bTTnH/5l39xcrmc4ziOk06nnY997GNOfX2943K5nKamJufd736309nZ6TiO43z+8593li1bNmG/d955pzNr1qzxn23bdr75zW868+fPd1wul1NdXe2sXr3aefTRRx3HcZw//elPDuCMjIxM2E97e7tz/vnnOz6fz2lqanL+9V//1Tn33HOdG2+8cXydWbNmOXfeeefR/mhOaof6vZXv4MnJ5yJOJCd633Q8+bd/+zfnoosuOqJ1D9aXzXSGYThNTU3O448/fsj1DvZ7+0q+fxXHeQVXPZ0A0uk0kUiEVCpFOBye7uYIcUiGYdDe3k5ra+sBlWuEOF4d6vdWvoMnJ5+LOJFI33T0mKbJV7/6VW644YYJRYYm8+c//5nzzz+fkZGRVzRl72S3bds2/vSnP/GhD33okOsd7Pf2lXz/yjVGQgghhBBCTAFd11mzZs10N+OENm/ePObNm3dMjiXBSAghhBBCiGl23nnnvaLy1eLok+ILQgghhBBCiBlPgpEQQgghhBBixpNgJIQQQgghhJjxJBgJIYQQQgghZrwpD0Z33XXXeNm8lStXsnbt2iPa7i9/+Qu6rnPqqadObQOFEELMONI3CSGE2N+UBqP77ruPm266iTVr1rB+/XrOOeccLr74Yjo7Ow+5XSqV4r3vfS9vetObprJ5QgghZiDpm4QQQkxmSoPR17/+da6++mo++MEPsnDhQr7xjW/Q1NTE3XfffcjtPvShD/Gud72LM84447DHKBaLpNPpCQ8hxMyzdetWamtryWQy090UYPRGfYqikEwmj3ib8847j5tuumn855aWFr7xjW8c9baN6ejoQFEUnn/+eQA2bNhAY2MjuVxuyo55PJC+SQhxvJvq738xuSkLRqVSiXXr1nHRRRdNWH7RRRfxxBNPHHS7e+65h507d/L5z3/+iI5z++23E4lExh9NTU2vqd1CiMN73/veh6IoBzze/OY3j6/T0tIyvtzn89HS0sLll1/OH//4xwn7GvvjXNd1uru7J7zW29uLrusoikJHR8ch27RmzRquv/76w95Z/Fg588wz6e3tJRKJvOp9PPvss1x77bVHsVWHtmTJEk477TTuvPPOY3bMY036JiFOfn19fdx44420tbXh9XqJx+OcffbZfOc73yGfz09388RxbMqC0dDQEJZlEY/HJyyPx+P09fVNus327dv5x3/8R37605+i60d279nPfOYzpFKp8ceePXtec9uFEIf35je/md7e3gmPn/3sZxPWue222+jt7WXr1q38+Mc/JhqNcsEFF/ClL33pgP3V19fz4x//eMKyH/3oRzQ0NBy2LV1dXfz2t7/l/e9//2t7U0eR2+2mtrYWRVFe9T6qq6vx+/1HsVWH9/73v5+7774by7KO6XGPFembhDi57dq1i+XLl/Pwww/z5S9/mfXr1/P73/+em2++md/97nf8/ve/n7a2OY6DaZrTdnxxeFNefGH/Pwocx5n0DwXLsnjXu97FF77wBebNm3fE+/d4PITD4QkPIcTU83g81NbWTnhUVFRMWCcUClFbW0tzczNveMMb+N73vsctt9zC5z73ObZu3Tph3auuuop77rlnwrJ7772Xq6666rBt+e///m+WLVtGY2Pj+LLdu3dzySWXUFFRQSAQ4JRTTuHBBx8EXp7m9sADD7Bs2TK8Xi+nn346GzZsmLDfJ554gje84Q34fD6ampq44YYbJkwzKxaLfOpTn6KpqQmPx8PcuXP593//9wnHGJtKl0gkeOc730ljYyN+v58lS5YcECT3t+9UinvvvXfSUbpbb711fP177rmHhQsX4vV6WbBgAXfdddeE/T3zzDMsX74cr9fLqlWrWL9+/QHHXL16NYlEgkcfffTQH/oJTvomIU5OH/nIR9B1neeee47LL7+chQsXsmTJEt7+9rfzwAMPcMkllwCj1wxee+211NTUEA6HeeMb38gLL7wwvp9bb72VU089lZ/85Ce0tLQQiUT4h3/4hwnTtR3H4Z//+Z+ZPXs2Pp+PZcuW8Ytf/GL89bF+4KGHHmLVqlV4PB7Wrl3Lzp07edvb3kY8HicYDPK6171uWgObeNmUBaOqqio0TTvgDNzAwMABZ+oAMpkMzz33HB/96EfRdR1d17ntttt44YUX0HX9gOk3QogT04033ojjOPzmN7+ZsPzSSy9lZGSExx9/HIDHH3+c4eHh8U7sUB577DFWrVo1Ydn1119PsVjkscceY8OGDXz1q18lGAxOWOeTn/wkd9xxB88++yw1NTVceumllMtlYPR6m9WrV/N3f/d3vPjii9x33308/vjjfPSjHx3f/r3vfS//9V//xbe+9S02b97Md77znQOOMcYwDFauXMn999/Pxo0bufbaa7nyyit5+umnD/+hAVdcccUBo3O6rnPWWWcB8P3vf581a9bwpS99ic2bN/PlL3+ZW265hR/96EcA5HI53vrWtzJ//nzWrVvHrbfeyic+8YkDjuN2u1m2bNkRV2k70UjfJMTJK5FI8PDDD3P99dcTCAQmXUdRFBzH4W/+5m/o6+vjwQcfZN26daxYsYI3velNDA8Pj6+7c+dOfv3rX3P//fdz//338+ijj/KVr3xl/PXPfvaz3HPPPdx999289NJL3HzzzbznPe854MTSpz71KW6//XY2b97M0qVLyWazvOUtb+H3v/8969evZ/Xq1VxyySWHLQAjjgFnCp122mnOhz/84QnLFi5c6PzjP/7jAetaluVs2LBhwuPDH/6wM3/+fGfDhg1ONps9omOmUikHcFKp1FF5D0JMpUKh4GzatMkpFArT3ZRX5KqrrnI0TXMCgcCEx2233Ta+zqxZs5w777xz0u3j8fj4d0N7e7sDOOvXr3duuukm5/3vf7/jOI7z/ve/37n55pud9evXO4DT3t5+0PYsW7ZswrEdx3GWLFni3HrrrZOu/6c//ckBnP/6r/8aX5ZIJByfz+fcd999juM4zpVXXulce+21E7Zbu3ato6qqUygUnK1btzqA88gjjxzyGCMjIwdt91ve8hbn//2//zf+87nnnuvceOON4z8f7DPcsWOHE4vFnH/+538eX9bU1OT853/+54T1vvjFLzpnnHGG4ziO893vfteprKx0crnc+Ot33333+Ge/r7/927913ve+9x203Yf6vT0RvoOlbxLi0E7Uvumpp55yAOeXv/zlhOWxWGy8n/rUpz7l/OEPf3DC4bBjGMaE9ebMmeN897vfdRzHcT7/+c87fr/fSafT469/8pOfdE4//XTHcRwnm806Xq/XeeKJJybs4+qrr3be+c53Oo7zcj/w61//+rBtX7RokfPtb397/OdD9aFicgf7vX0l379HNln6Vfr4xz/OlVdeyapVqzjjjDP43ve+R2dnJ9dddx0wOge7u7ubH//4x6iqyuLFiydsX1NTg9frPWC5EGISuSFIdUOkAQJVU364888//4AqXpWVlUe0rXOQaUtXX301Z5xxBl/+8pf5+c9/zpNPPnlE87ELhQJer3fCshtuuIEPf/jDPPzww1xwwQW8/e1vZ+nSpRPW2be6WGVlJfPnz2fz5s0ArFu3jh07dvDTn/50Qrtt26a9vZ0NGzagaRrnnnvuEb1ny7L4yle+wn333Ud3dzfFYpFisXjQs5oHk0qleOtb38rFF1/MJz/5SQAGBwfZs2cPV199Nddcc834uqZpjhd/2Lx5M8uWLZtwzdLBqqv5fL6T+gJl6ZuEOIaOcd8EB06VfeaZZ7Btm3e/+90Ui0XWrVtHNpslFotNWK9QKLBz587xn1taWiYU9Kmrq2NgYACATZs2YRgGF1544YR9lEolli9fPmHZ/jMacrkcX/jCF7j//vvp6enBNE0KhYKMGB0HpjQYXXHFFSQSifELsBcvXsyDDz7IrFmzgNGKU/JLIMRRkuqGwdE/6o9F5xMIBGhra3vF2yUSCQYHB2ltbT3gtcWLF7NgwQLe+c53snDhQhYvXjxeSvpQqqqqGBkZmbDsgx/8IKtXr+aBBx7g4Ycf5vbbb+drX/saH/vYxw65r7EO1bZtPvShD3HDDTccsE5zczM7duw4bLv29bWvfY0777yTb3zjGyxZsoRAIMBNN91EqVQ64n1YlsUVV1xBOBzm+9///vhy27aB0el0p59++oRtNE0DRkPdkRoeHmbOnDlHvP6JRvomIY6hY9g3tbW1oSgKW7ZsmbB89uzZwOhJHxj9zqyrq+PPf/7zAfuIRqPj/3a5XBNeUxRl/Pt27PmBBx44oEiQx+OZ8PP+J8A++clP8tBDD3HHHXfQ1taGz+fjHe94xyvqD8TUmNJgBKMXwX3kIx+Z9LV77733kNveeuutEy4sFkIcQqRh4vNx6pvf/CaqqnLZZZdN+voHPvABPvKRjxz2njL7Wr58OZs2bTpgeVNTE9dddx3XXXcdn/nMZ/j+978/IRg99dRTNDc3AzAyMsK2bdtYsGABACtWrOCll146aPhbsmQJtm3z6KOPcsEFFxy2jWvXruVtb3sb73nPe4DRTnX79u0sXLjwiN/nzTffzIYNG3j22WcnjJDF43EaGhrYtWsX7373uyfddtGiRfzkJz+hUCiM/3Hw1FNPTbruxo0becc73nHE7ToRSd8kxDFyDPumWCzGhRdeyL/+67/ysY997KAj8itWrKCvrw9d12lpaXlVx1q0aBEej4fOzs4jnjkwZu3atbzvfe/jb//2bwHIZrOHvSWFODamPBgJIY6RQNUxm6YAoxXZ9r+AXdd1qqpebkMmk6Gvr49yuUx7ezv/8R//wQ9+8ANuv/32gwaOa665hr//+7+fcNbucFavXs0HP/hBLMsaHyG56aabuPjii5k3bx4jIyP88Y9/PCCE3HbbbcRiMeLxOGvWrKGqqmo8sH3605/m9a9/Pddffz3XXHMNgUCAzZs388gjj/Dtb3+blpYWrrrqKj7wgQ/wrW99i2XLlrF7924GBga4/PLLD2hjW1sb//M//8MTTzxBRUUFX//61+nr6zviYHTPPfdw11138atf/QpVVcc/+2AwSDAY5NZbb+WGG24gHA5z8cUXUywWee655xgZGeHjH/8473rXu1izZg1XX301n/3sZ+no6OCOO+444DgdHR10d3cfUdgTQojDOsZ901133cVZZ53FqlWruPXWW1m6dCmqqvLss8+yZcsWVq5cyQUXXMAZZ5zBZZddxle/+lXmz59PT08PDz74IJdddtkBU98mEwqF+MQnPsHNN9+MbducffbZpNNpnnjiCYLB4CErqra1tfHLX/6SSy65BEVRuOWWW8ZHoMT0mvJy3UKIk9P//d//UVdXN+Fx9tlnT1jnc5/7HHV1dbS1tXHllVeSSqX4wx/+wKc//emD7ncsXB3p/WIA3vKWt+ByuSaUO7Usi+uvv56FCxfy5je/mfnz5x9QvvorX/kKN954IytXrqS3t5ff/va3uN1uAJYuXcqjjz7K9u3bOeecc1i+fDm33HILdXV149vffffdvOMd7+AjH/kICxYs4JprrplQzntft9xyCytWrGD16tWcd9551NbWHnTUbDKPPvoolmVx6aWXTvjMx8LNBz/4QX7wgx9w7733smTJEs4991zuvffe8SmLwWCQ3/3ud2zatInly5ezZs0avvrVrx5wnJ/97GdcdNFF49PKhBDiRDJnzhzWr1/PBRdcwGc+8xmWLVvGqlWr+Pa3v80nPvEJvvjFL6IoCg8++CBveMMb+MAHPsC8efP4h3/4Bzo6OiatTnkwX/ziF/nc5z7H7bffzsKFC1m9ejW/+93vJp0qvq8777yTiooKzjzzTC655BJWr17NihUrXutbF0eB4rySiecngHQ6TSQSIZVKyX0jxHHPMAza29tpbW09oHiAeGXuuusufvOb3/DQQw8ddt0///nPnH/++YyMjLyikamTXbFYZO7cufzsZz8bLwM+mUP93sp38OTkcxEnEumbxInoYL+3r+T7V6bSCSFOCtdeey0jIyNkMpkJVYTEkdu9ezdr1qw5ZCgSQgghTlYSjIQQJwVd11mzZs10N+OENm/ePObNmzfdzRBCCCGmhQQjIcSMc955572i8tVCCCGEOPlJ8QUhhBBCCCHEjCfBSIjjgIxeiBOJ/L4KMTPI/+viRHI0fl8lGAkxjcbuqp3P56e5JUIcubHf1/3vCi+EODmM3Q+uVCpNc0uEOHJHo2+Sa4yEmEaaphGNRhkYGADA7/ejKMo0t0qIyTmOQz6fZ2BggGg0Ov7HkxDi5KLrOn6/n8HBQVwuF6oq59HF8eto9k0SjISYZrW1tQDj4UiI4100Gh3/vRVCnHwURaGuro729nZ279493c0R4ogcjb5JgpEQ02ysA6qpqaFcLk93c4Q4JJfLJSNFQswAbrebuXPnynQ6cUI4Wn2TBCMhjhOapskfnEIIIY4bqqri9XqnuxlCHDMyaVQIIYQQQggx40kwEkIIIYQQQsx4EoyEEEIIIYQQM54EIyGEEEIIIcSMJ8FICCGEEEIIMeNJMBJCCCGEEELMeBKMhBBCCCGEEDOeBCMhhBBCCCHEjCfBSAghhBBCCDHjSTASQgghhBBCzHgSjIQQQgghhBAzngQjIcSxlxuCnhdGn6djeyGEEGI/iWyRjd0pEtniMd1WHD/06W6AEOI4khuCVDdEGiBQdVR2mcgW6U0Z1EW8xIKe0YWpbhjcPPrv/Y9zsDbsu/xQ2wshhDipTNqPvFaT9DW9KYPt/VmAA45zsDaMLU8VSgykS5NuK04cEoyEEC+bgsAxaUcTaQBgWK+ipztFvStLpTkELi/0vAiZLvCEobINak8ZbcvetiULJXqdGHXBOUT37kcIIcTJ61CB5VWbpL+ri3gBqHdloadjtI8qB/HqKk+3J+hKFljaEKGpMjAekHpTBrs7O2lQEyyINVMTkZN1JzIJRkKIl40FjdcYOPY9s+bVVRTFwasfOHO3P11kexq8WgeV7IFiHoY2jz57/ZBPjK7oi46GpuqF9OZDbEl7sOLziAYih27IFIyACSGEOLbGAsvY86uyf3/g8gLa3ueJ1EwPZHcyTBPbrRbypTJP7UiQL1tU+FwMZcrs8GmcM7d6tJ/zp4kbXYT8FXAEwW1KRsDEUSHBSAjxskDVUQkQY2fQvP405UAdLiNHtmMHTw/WAQ4L838lXOqnwR2HytdTGW4BMwh9L0K6H3xhiC2E2BwoJqHraWg8HeacS022iOUzJu0gD+hs+l6CrqdJVS1nT/Q06YSEEOIEFAt6Xvt39z6zDrq8LhqNNJpRJLF5PZ4tT5OpWMSgdxa7BnN0Kh7OrJ5DZaieueUgm3qS5MsmfrdGU2WARLbI1r401SEvZ7VVEWubD6ngwU8q7hfKtvRl2LR9F6+rzBNbvFhO3B1HJBgJIV6TRLbIQF83dUqCaG0rBKr2OYPWjhV049EGUbqeZr01D7eZoc16DrwaYXU74WCAHc4beazfYaVdRVNVK3hjEKyCVCfseZZCeoBUMovPKKEGa/Em0yiuFgju7YT2djoD+RDb06OdZyzoAcUBRyGRLbO9eJSnYQghhDhuDQ90M9zbQWVdC5U1DeOhpTcfYnt/Fi0co0gjQ+1P0lTewe5gmid8HgzTwqvrxIIhTle2U+ko5Hxx5tWFiXpdZAolXupO0psuouaGiCYd1GgTHj2CvWMrlXXF0ePx8sm6RqOdaHbnaMMCVYBDsNiPL9UHqQoJRscRCUZCiNdkoK+b9EuPEPaViPrckE/gal+HZnpwdJWoy0Ir7WHEzFMRdOHLZHDlh8ECdB2yCTYWU6zdPsRgqJLVTasx80lC7evQBzfhz/dh2iZWOouV3EYhtoIRK0TWKNOfLo4GMiUHmT7qgnMYDreQKpRJZIvE4osBiBXKLPAUDzv3W6Y3CCHEyaGzfTv9O18ga5SprGlgR9bDtk6FmNmOR68mpzXQUQyjqmGs6CxKZZ2B3m4GCTGr0o8nNwRDz0I5yyJvPdUVTYzolTy1pYuBpJ+BUoDwyE6e6OslOmsZsaCbisQLWP1b6J99PjW1DePXRmnhGNFQDgpJyA2xoDaMbsylkAsxrFdRebg3I9PCjxkJRkKI16ROSRD2lQj73JDsgJ7nMfs6yRkeTCtDKlRJ1l3JbrOZfr0FVbGpNDdga2XKaoRA94ucY22kpJ1BwmriLxu3E0rvpMrswaOFiZkjqJ4YimNDZhB3ZY6KlrNIaFUkOncSVLuIegxI7ECtsegb6OfFTIj04rlcPNsNmUEixSSRiprJ537v0+H0plxH/wJfIYQQx1wxUMewL4NRjtKxvov+VAFj29MouefQK2exfuRNxAs7mafspEr3oTDA2XqWzU4rzY7CU3s8PF+IcW6Vh3D/ZszEWlxqkKWWRlSppeBxsd2oYE85ijo8yILGRUTUGkrZYbo6dzJMiO6RAh2JLLXFEfp6HyakFQkoDtScQW85SMZqRS8HJw1GE07UpaUS67EiwUgIcVCHHEHZGyiiLotovJZ0LsvQi4+gYpJ1V1FO9aAldzGYzdFfUUO/HmRHf4ZCdx81xiBZ3SDg6idY3k7ILrMqvIXn9cVUpTdRaQ3jdynodhHNMbENP7nIHDKOhj+XpS2zjhp/PX01LWCYJF76T0KpLVgDHTilxZTMNoZzsyA1OHqNkic6eqZtsrNu+1Qmqoss2Pv8Gi7wFUIIMXUOM3oy1m9pgSp8LQF6ezpJbHmchqoIteXtVOe2opt7cGkqSqCKiDWM09NHdaqHN5om9Z5T2JOo5s+FVgaVSqKDW2kzt1Ms5DC0ECXdT7XVh6Y45L0rKflizDZeoqPdw3BwIXWeESI1TWSTA7y07nl2laLUeTfRUNpAKRgg4Cj0pgwyhknIp1MX8U7a106oxBc9OoWRxOFJMBJCHNTBSqQmskVSm54hnnyegA7khshZIZIlDynPLHpKfkKFXgyngWG7gcZcN2cZfyBCG2kjRcQcwE2JsJkHClg4qMl2ZjOAjonir8Gn5/HlkqiWgW0qWEaWTtccWrKPYezJ4A7W4V78ITIlE/fwbkwzj8eTYVk4j7umgUWtMQgGgdNe7kx2/Gk0KHHayx3qPpX4YoGjcIGvEEKIqXOY++DteWkjj/W5GVFCeN0u6kuDxNjDfHoIabtxSOI3kwSyL7Ahv5DHUz5eXx6gstSPgkldXmWAMsvNDKaicUr6WYLk2UOcbe75pEwv/vIgEUZoLP2VsOFQpSbZM5TmJ5abhbUhrg71MdDTQzi7A3epjs2eFupCZRbNnU1F7SnUOV5orhg/Cbd2+yAZw4TmivE+aEIlvkBERoqOEQlGQoiDOliJ1C19Gfp3j3BauRe12EmpbKM2nYOvdSGJPbtJ9LajFw10p8zK4tNU5oaIMEIF2yjgQ9UNVBu8qkEGN4NUU0Kjjn5MVDrzlfiqYoSK/ViWg+2UCToDVBcdVAxKxSxOoczAM7/A0b3UGsNYShmrkKUhlmNudYJhJc229l7qkusI+QNglqCYgnDjeBgaLRyRpk4pET1M5W8hhBDHgUPdVqLvJSLdf8IZamSnUc8if4ZZCxtQq5ZT6HqKqlwXilmgaOp0DmZJWB3Eyj10uXyUqCTKMH5SrChvRHfZKGio2BTRcFPm1PKLPKctpocojeZuFisd6KUSJj7qUTij/BcW9A8ReSbFVk5hT6maKrMPr1Fie+P5ZH1NvCFXpK/zJZ4c9DJ71iwyRZO+ZJHaqOflvjY3hNbfjubEZJToGJNgJIQ4qH1LpCayRbb0pQGF7uEcL5Xracqr+EsFcrYLZXAH+uAOIsNDNBoAJoutzURJ42ChYOHFwo2B5pRxLNAtsFFwKFFFDp+ep2Tq1NKBNqRjYaBRRMUEU6NCH8HCRQ4/um0RzuzAAcpKEUexUcqD2Hueo5DYSof6J/KZJHFzIyXNIRc7BVfDqQQXLJ1wl/P+seuUfG45IyeEEMe7/W8rkRuC/o3gKJDqJG71Md8pMmgaNBsDpDZvo09roDKTIla08GBTpoRWHmGBNUiMFCXTja1ZuE0TLyUUMqhlB8ulYOAha3pp0rvwlG2C5T56zEoCTg7HMfEqBcqqjYnGPL2bFdZLBBMF6tUidSylxh6glOqhfZNF79a/4vLtoZQZ4vniQtZuH+CceBkt3MDi+kUvz1hIdZPZs5EeuxHLF5OZDMeQBCMhxAEONt/5r7uT5IsmPnOYSHmQflcD9d4S3nQ7ns7fk3O8lJU4QVcUn5EgSBK9XMJUFHK6Fx0b1bRQUACwUACbBobwUsI2FXJ40TEpodNNlBpM3CjkcKGVTYKuAkU8OLjxkkfFJOl4cTsO1STQMxnIabSY2zBw4yGNrbpImx2QzWEHayj7Whjo6yZs9OGuqSDki8pZOSGEON4d7DrRzmfJZ1OkyxZhx2FOqEyXN0B6wCQ89Cytikoq2MJOpZnZjFBdHiJOghQBDDz49SIRO4OCRQkNLyYKoJUd/BgEMXAYLaYaJM8cvUje1Cg6bqrUNF6yVJqDdBNlRI8QoESrvYV6OhkhyPpyG7lSglqtByPZTwE3djlFg/0M3mIaNdpEoilCW7CKZF87AwUFb9V86r211Mg1r8eUBCMhxASJbHF8vnOqOkjEZ+DVVXIjfZxZegJtcDNabgDN6yNfdxqDladR+ZfbCJhJ3LhQcWObCsOOizbTQgcUx6EImKaK6ljY6JTRMdFJ48NLGQ0TBxUVB1AZcKIMKDVkcdPAIEXHRYQ0atnE43JQcMjjpmjpjNg+Iq4CFlBSAyiKG4+awe0U0dAwFZVgoRul0El2vYZZ0khmbIJBmDdvBdQvm94PXQghxKHlhl6+TrQwb/w+dSN6Jdus2ezqfBEKedqa5zJ/QStvLPv5c/dzLClvJuLKkclsp4CXaobxYwLgw6CoeQhbGbx2CQtwuSxUQAUsCxxn9N+ODaaq48ZGpYxL1yihoWCjWdDmdNFLjOesedRoI8RJYpMjQoqC5mXQ1Uq4bLLFaWGr1YitQmtpF55yD4oxQGJnA38cjFGZ20XCP4f4/NNY3CBzvI81CUZCiAm29GXY2puhsdKHkeyjc2M7rsoGKswEy1LrCOU2YJZyaJ5aditL6Nn8KI3p7ahlCx8WdfQTI0EOLxYa1t4JcVYZ/BT3Bh8HBwUTDRuNHsIMKxW4nCJRMmhYhOw8mt2Pg42lj35VJQlTooCfMi4gi59tWhNhrUAtGYro7LCaweWhTd+DNxChVEzh5NP4yWMDhVwf6ku/wGVHsZsX0d2xmRc6FWriDXjdutzDSAghjkd9L8HARjK+ehI9PdQMPYU/GCVRcxG/S89jQ1YlWhqkoTCHS0eS2Lv/RHNxI9WkcJUdPK6B0ZNvpr13rsJoCKqw0njs0REiHcBxcEYnNWBrkCJKmBSK42CP9kj4KAAuTPyYaOiahV8r0Wr3MUANHkwMfGQIolMiQpbL3M9hqmUeLS8mEKjEVRiipjyE6li4rDx29wvs7PAz4LYoV0Zp9DwKRgTCtVA25B5Gx4gEIyFmuAOnzTn4PTqVATfFnj24h7fiaAr9gVrW5SpZ6a6myhskkxyEvh+xsDxAgCyaMnoCL0weRYEqJ43DaOeTw4dnbwwqo2PgIY2PAj4M3UtOiZDATTO9eMoWDg6G7WaEAF4MFMUhh5ddNKBiM4thPJToNUOE9Cxhnxet5EG3sjQ4e1BLGgpFUmUHRVEIYmMRIOeLM6LXkh7KUtLBu2cjnt5NGMo21jetpireSKpQppLu0RvH1rZKRySEENNh/2lze0eIknmT3qKBvwz+UpFQ/zO8IV3AjsxlVy5OpOsPFNufw3EMvKZBGRUNC6cMRXTAxoeJo4IbC822KOLBSwk0BxSFEl50yoCKrnsomH78Vg50BXXv3AYNhyw+fBRQsNCAejXFm3ieMGnShOhwz6NUKhGnD1+uBwMPy1jHysIL9BBD1cpk1CAxv5eW7F+ZW0oxpMfZXrTpzxi4+/3EGucTJgeDUYbrzqSnHJQTeFNIgpEQM9z+JbkX1IaJ+NykCiW6vXHyYYM0lQz3Z1g51IHJLnCVIZukwc6gOg6OCWUXKIBjAdroFATs0SkIZVzYqLg1i4QT4QW7jW5XPfXuIjgqZVeI6tIeTFMnQYgUQXa42rBUL4vsLbiwcbkUhsth+qmi10pTZY+QdPzUlZPkKdERWEhrcSu6U6aIGywTl5NDcTTAwUDFMMpkzDxJ241d1oiM7KFKz3JacJgyUcpujXwiS1/PDiyjA2XpuUROuXD6/uMIIcRMtX9Z7vhiyAwSTQ9RjsTxlRfB0PNYnZsJJsv4nCI+p4EVxpPMs3egYGLgIYePAjYqFiXcuCnjxkK3HVQNFBcMlz3gUghQoqwESXubMM3i6CiR5iNoGti6CweVghbDUUqUTBM3JgX8+DDQgSAlPCTQMfFToN7pIx2oY7cxD5eVx0WeVrpwY5HDT1GFOEnC+RJhZQiPWkIzbUpZi03GAp4t1dMWquU0bxcM7qCva4Tn1FNZNHc2Z7VJMJoKEoyEmOH2Lcm9oz/DtvZ2Fgez1NfMIuKbw57KOjL9WU63n2Oh1kU410e+YFPEgw8NHRMLKJoaJd2L6th4ykWSBNGxcGsmu6x6nrYXca5rG1m8PGcvYru6hOVqF16jl1A5j98boUttYAidnLeBSLyR4MB6dhSa2ONUMaJEGXLFKONCt0aoc6UZcGp4TmmlVR8i5VvKQO0b8I9sQy2lmV3cRsjqxdY8FMsKPpL4nAxquUyFFsTtFNBNAw0FX2mAQu9TVIU84Nh0ZXdgF3Oke7YRaVkuo0ZCCHGsjRXEcXlJvfQIiWyZitoFVFTbhCIN0Bcgl9rDDtXHk2WNPSWdVfpThLQctm3jxcZNjjIuHFXBZ5sE9SK2olJw3GgK+Chio1B2BTFVE1XJ4NHdRJwUjpkn44qiqCXcbj+GEybnr6XsieHJd+LPdhMhh6H4KTkRVLKYqKQI4FPB9oTwOirRQImaWIT+lE0i72ZL2Ycbm5I7TEtpG6pZIO0E6XOFUPFQNh2iDBEyexiwgmzvr8PVdioBI4uR6cdy76FjqIYFtSEZNZoCEoyEmKH2nUI3doHn2u2DdO3YTnV4gLDPRcppwV1McE5oiLCVJKAa2I5DGYV+KgjgJaYMYblUUoQolF1EyeDHwUFlF/V4VYWN7tPp8S7mebWByvQmkoTYpTRS5eQ4hw2UcPGSsoC0p4J6cwurXBsIjayjvWDT5VTxqH0+saCbS/0vkcob9BXryflcROrO5onhelIkCFQ2Ew4W0Qc2ELeGsF0eRvQGFE8ANd1DwB5BB9yBMJongsewSJe95BQvjuXFKiiUiirNeoamqhDZgodQuW+0DOzs86b1v5UQQswY+06hq18GPS9Q2PUk+WwJ23sxFW3zSXWsZySRIO+Zz0gow0i6RK25m+VsoYoCGVeEUrmAotm4VQiQxW1bKDbYmo2l+Ejp1US1IoruJmiW8VglwIVtl3EcDVVT8Sg26C40B7IE+Kt7OTWpbdSUe0dLCGk65dg8esoBqjMvYWk+uiOvoz4epzq1AXephKm5yRtpKjAY8dWR1SCth1Bsg0zJT4JKuqlCVwMYFfNxGykWF56i3h4hrmwgZ1VQEz+L9SMr8dJP2R2ja6TAlr40Z7VVT/d/rZOOBCMhZqj9p9ABLK6P4C7Oxa9X8cSgh2cG+mkaeZZQ6a/sGBpgXjGFT1HQHY0qrYBqFbDRyeFlmDBBcpQYrdQzrIbZqbWS0OqwFBcr1J0MF0ySZTdVepYGd5Y5epJGzaZHryJbczadQzla7XW05TfgUi26mM9GZS66y8/ccJFow1I6BizMkoW3Zg6GU403EmePUUHQcPE650UWsouANUhO8bBLacSnhqjVh7BLGjY2nuIwhmPTR4wdrmZQdWZ5stQGfUTMXlA0gv4gwXgtuaFOdg3lidQU5cycEEIcC/tPoYs04Jt9BsFEggo9D30vkXrp9yT7O/GoDiuMXnSjiofsU1jrLGW2NkB9uZs6yqBqaBRxY4ILyqaKiY5b1wgqBpamQ6AWT2oXLjNLSXNjKj46PG2EggFCoTBBO4M9kEUvZEmNpDBtD/NtFbemY/vCZKLzKQbnktljo9hlmuvqqZs1F3Yn8I500pMrsNuooOiqRrWLzLN2kyVMyglgK27Sepim8hCGXaA7W8WCwACtWpkhy8MmGhnxz8WdLWL5YiTdFcTDXlJDfQQS/VC3WGY0HGUSjISYofadQjemLR6iLb6Ujd0pEp0j+N15fC6NVP8wajFBHhdFTyUulxtf2UC1SqgYRLBwY5JzeSgQRFGDWGo188JuBhSF/pKG5Q2zW2um0vERdxmsrCigKI0kCiPscC0mqUQJx0IMlhfQa/Vjqh6edp2Fr6yxUNlMlemh3VpJX2w2NWqWjfleNgwUqHVvwavVMJQqk/fnCFTUoDmV7Bjx8kK5kQpc7FI8LERlNnvwmiO4zSRZaujzLqY56KLB6SA8MoRZ2E0+1oq/shGsIv3RU9lsNTErZUgwEkKIY2FsCt3Yc6CKyCkXEul5YTQwKbUU/fU4pW2EjXb8qkENKtWeMr3qPHrMWVxACjdF+qxqZmlJPOQx8LBNn0VIsfG6PLg1Ba/XTbR2DppWpDRsk3VX4fhjOP75KOQIFzvxBiooVLZSzpm0FjN47SIoFUCOrOMll8lQHS5QUT+L0uBOvINPkrMSpLUaokofQXsAvzvG9vBpzClupqq8kWrdxHQS6FYGxbRR9RzYSbR8mYAxghpRGXI18lRuNpmOYfL5tcTqWsEVo2zZvK4yzxy6IFUhwegok2AkxAwVC3oO+sd+XcQLzRWcoccoZ7xsdikMbX+OlDXIZlOh0uMmqg1SRZkQoGOjqB4yrlZ6XI1E1CIj0cXMbqzDF6wnN2BiKBotySwVLacT9ypUmDqNxnYwqtEMP16XSkPUR4+1il9k6xlQqjglmOHC8nOoZp6XylHSViVnza8i7Kvn6fYKXL1PUW10oalp4sYgHquHgXCQnLuSB/SlFKwsKxlgU+RsIgbMyqcomQYWDkGyLDM3UGYuBbxolo5TyGNlcxTyW3HK60n4FuOd3zwhPAohhJhCgarJ/9jfJzDFgnNRFFAHghj5BLFclg9Y/8uIu4FOO0atNYxiOViORtrXyJAWIeGKk6ICX0DFjlShaRbULmQ4lyBmm2iNZ2D5mvAW+ik6DaQTG6myHLyeEL7q+VSlhwim+iCbZcSzmLQCdrmER7OJ+DX8tW/CH66FvvUM55JkjDRqYRh/OUfEkydu9hCzB4l5bEzdIqnEsBSNhNWCWcxTaSfQsIAy6bxGNuCi3pXEttP4RjQ0PU/e9FMwDWYHB3DmtsqNyaeABCMhxARj1x55dRXDtKmrbcD1hsv4X+9sHt2+lURRY5mnwDxvN31lPyFzmIiSJRtqw5p9Pkr3JrIKeFpPI9Oykq39GVwNKqG+F6izt9Lqq6f61Dcz3NtBaMgmY/g5XVFJBG1sr5+S3Uj3cAVO0aKppZZzG2vZM5JnfU+YnpwPpyvJmxbEWVgboYrFKJkKigP9+IwCQ6YfNVsgrxbxaSW8oTBhew91dh+xBeegJR0S6TSlZD9hYw+V5gCppEo61kBFdYhMyWJ3MUq81Ek0s41KpwMApflaNqakRKoQQky7fIJK06ByyekwUk86nyHw/M/wpAv4PQZGMI7m9KPlh5mlZvBGqvE1vR6lu5u4AnWz5tEYi4JZYqdWRf+ePWRLPuoXvoGmluXs2LGV/v4S4bDFIFUQryOsO/g9AfyUMRSTfP3rKM19K+rAJqqzmwlm2sHKQfNpEIqjmwrl9q3Y2XYc3U3I5dBU3EqkPICp+VA0LyXc5CKLsKrfSNG0yO/5M3qun6CZRytDrLSHeOVcCrZG3NiFXdBwZcpEsrtQXANsM3O0RWYRrUVGjY4iCUZCiAnGrj1SFAdn7C53QDTWSIunkgbLYZWni8ru3aiRuRRdUfoTfQx6W5kdqKBx7qn0lzwktCq27x6mc9igNuIhHm6g0ummwVsiaA5R2TafHUBPXx/B4gC2WYG3spmQz0XZMolZDq3N9dAaw58tEjN7eW5jH92pAooCc2sirFg0j7rIUv7015fYvDWEWkhS6RlAdYVpjc+DVA+uzm4ayhke73gjL856P9XmX5k3/HN85AiSJFRKU05l0YsORVcDu5RmKuhFcQfwlsrEix0M93aw3WoBkGAkhBDTYfzaIw2wxp/DNXPg9PfCnucYdiLsspagB06llkHcic14fBopTz1aU5yo0UNUK0JiO7iCVPu9pHUY0NrAM4dIoIqKOSFO4TmsfhgqxHCXIByfA8UUZBN4PSEa56+CWBDcDdCXgD2PQXIPuP0QbSTcuJDGSCPlF0ZwlUdwBZpRMw6mGkYNQKl3I970Tor+EmdU/ZVg0Ed/MEcy1Y9Stiih4MoP4tI7Uc0yca2XnLuKv3paKBhFqjFwFcpk9mwk6nNLMDqKJBgJISaod2Xxah2okQbyroqXp5HNilIXqQXg6Y0mA9HltNUEWTp7Ds/v6iW9p5uolWD+4tPY1h9jffsIs2IKr2utoDHqw+uOU++KEzSHxm/YV1lXpJAaIq/XEKxppq02xNrtg3SPFIn53Xjdo19RvSmD/rQBOFR43bTEgsyNB+lPFfjTln4W1dUz211JYrCXOn83NUE/g4EKXtyWxKVreIwc9QN/ojfVxXxfN3VOH37yqIBGinzJg1Uq4wl4aNG2EFdTBKM1pF3NJFxxvLbO3HgQr66ysTslI0dCCHGMDetVDNNELBqmwmWDywtlY7Q/mXMuRJoJ7/wLC71u0vXnURPyonasRe1+miHLT319LXNGBqGchsgsqJpNOFRHa2wuvU6MmtrRaWmxoAelroXudIKQliJcXQu1p8COP0GyE4JVoPtGg1rXMzC0A1z+0X22ng3lIvRtIJIfhuZFgE3SiTFUM4vKShcBdYjhkWGMrEmk0Iln83ay3hqGlTr6CeAngYYJZoFCLoWuQs506MlYpNw1lCNu8HnxBQMoofjo59Dzwss3whWviQQjIcQEleYQlewBdxDqW8aXjwWBjd0pEk6YUOOZROZWQ9BDk68FT6ybuJJgWK/Coyssqg9xemuMioB7vCx4ZTAC7J0TnRtC73gUc7CbweBiWipGQ5dH1zi1MUxzbHTqWiJbJFUokymUsVFpqw3wpoVxAO79yy5e6smQL5l88Jw59KYqaDR0otmdVPoypEIKw744Vj5Jvd1PQ3GQWk3BMi0URm9ICwpFW6dgq1j5LA28gMceouiNMuRodCkKVd51vGHeHDYmXQdU8hNCCDH1espBtlstzNWDVNRHDlxBcQi4XSxpjMDsvWWsg6eRrKqm4MSIuXJQqgT/fKiZPxqq/DGi1fOJ7rObRLbI07sSBPq7aQubRHxNoyGoomk0gPmrR0NIPgHpHkh2gT8KC98Mza+HTb+BF/8bFBUWXgqNK0nmQ2TSHtIVQYiWyXd240r/mkCmGx2DLivGC656ApSw0PFQBgcqyRJ2crjsIsrITmJmlpXKNur8BqY9j23pUwim00SzO0cbL8HoNZNgJIQYl8gWGciHqAvOITrJRZ2jIaXE7OrghJvLxYIeYm2zSWQbWLt9kIxRZnlzBW3xEBu7U5OHiVQ3mWSCtBPE8MVJFcqkCmWMss1Zc2vG7620sTvFQLpI2Oci5NGoCnrZ0pemp3sP8dw2Mt4I1cEGtvRlSBdK5JQQdVozRkEhkNxCoTDAVu8pUGomTpqSamDqJfzlDDolFMWLZZfwkEMt5/BRxkWJVE6hAhM3fZSGHDJ/1WiqnI0WnkNNRDofIYQ4ZnJDNBrtaOHYgd+/Y/c9CtXBvIsmFiQIVBGdU0U0NwQ71oFjQ8Ws0VC0b0nwffSmDOxUN2ElS9gfhqFdgAWNp8Hrrn55xVQ32A643BCoA90Lu/5MfttaTKOIXtmCv3YRFJI0pLfjKftRM7U835WmnMlSr5Vx6yqmE0ANV7PQGSZj6zxjLyFkpWnQhpltbcFNmUEq6LF9LLA2cYprC3pOYZsVYXu4TELVmOutpVmvonJqPv0ZRYKREGJcb8pge9qDFZ9HNHDgGbnelMFAusTceHDSEZPelEGmYBHy6QeUAz+gslukgcCc11PjxNAJMZAuUhN2UxP2kCqUSGRH7x00tl3ZMknkypiWzV93JwkO7+aM0BAXLqql1+/nf1/sxbBsZlX6qQzUU+p6gVMHX2CJa5Aq1WSnt56UaxZeEhh4CAZDVNrDlLODRK0ULkoUCVDUAii2TkGJELCzeDDZnRmmY9cWZhvDLDylBmS0SAghjp1UN9HsTqLV7gO/f8euPapeOHpD2INsTzEJnujE4DTJCcC6iBdtznzqlCpCSg6Gto9ut/+UtUgDNC4fHRnyhmBkD4x0kC2V6Q+fhmfR25hXzsHm3xAuG4QrWuke2oCyZxu9Sj1Fz2JKeFA1HU/1IqrSPYSxGfTNxs72ErS6cZlFUk6AdlcjBTWCpvaRJoKPEt7iEN7kdrKUeTZYj94YlGB0FEgwEkKM2zfEjFWn2/d6moOGnH23nxWdsM1By4KPnckDdvRnGMwUiYe8GKbN9v4sEZ8xvm0sOBqW+tMlmir9hH0ujMhcglYFza1zyaV1Qj6dZr+LZTGT8shu/py2SJcXcVaom3nKIM3FzahVZUYG9pAp9PNiqZY6T4C4lQLAVmrIuipQ7DJ1ygBBrYjj8YHiI+VpJGvFCHlbCEh5VCGEOLb2vbfR2AjRvgFl33UOuv1pE6/DOci0s7EZEDAbBrdCZhDqlx44yhSogmgLZAdGg1P9UnB58GlhPBUribUsgYEnwR2G6CxSFYsoJp+l1uqh0p1ld+Xr+XNqDknLT9DVwCzjflrTL3Km2k5B8eHFpqh7yROjyV2g3txInx3gAfVC5ugD+JQi1VqGRl8fVEblthJHiQQjIcS4fUPMZFPgDnXvoyN5fUd/ho09KRbXR2iLh8aXG6aN4yij5cEPEr4W1IaJ+NzjoWtjt5vt/RXo5SALar2AQrpQwpPbxiy62OgN8dv0afRqFu9t7CFkpwgFfSjJ3eSsEhnLImgbVKsucnaQnvCp5IwCc8xNKBRwKSY2IdwuldneNFogQEXlQe6vIYQQYurse2+jsRu97rv8cN/Lh1onNwR9L4HiQHzxxPXKBmC9XOQBJhlx2idwlQ1C5TyhStfoyJayGAppyA/S71Swu+JslufaacrtJMZm9NaLGDYDKLqX4h4Nl53HZxuMKE2kXLOo0xI0uRUo9IOdxKSB3cocdrraKJk2dYaXiKuKU1rnUikzGY4KCUZCnCQmG+F5LQ43OvRqbOxJ8Wz7CMCEYLTvsQ4WrsaX54agZwv1riqIB8e3HcwYbO3P0OX4Ob9+LnXNfuqKJeJ1YTLxenoH9jDbSePgwfaGcAyHZ83ZpPQwESVJTW4rcWuQMhZJQhSsIHoJYmaSSOE5Ms6ppAsmG3cMkS6UCPvcE66zEkIIsZ/9R3eOhiMZIXolUt3Q9TQ4CngrJrZz32NNFq72Lktki/R2p6h3VVFZvXDiyFZ+EPo2UBv3UWr7G9yunbArhT/WysJ4hPRgF8l8mRElg6N6UO0i9e4sBX8ETybDsOHQTZhGClRjcKn+JBlD40WrmdCwjekdIVnhp1LJQqZ39H3UniIn8V4lCUZCnCTG7j8ER6di2uFGf47UvoFt8d5KQov3qyi0f+iZrBMd20+j0T5ada56IU6kit6UQapQopQaZInSR6xxNhUtLVT1ZTjVGGFFSwXzlXYyahcVuX5SmoOiqDRoGbCg265imx3jb3iMCMOYuOlXY3jsHFhgWwZuCugjO1nXneOhzR3omsK8mjARn0uCkRBCHMz4vYc4en+oH8kI0ZEYCy4uLzSePjpitH/YClSRcEL0Jg3qnOKB3/d79zGQD7E97YF4kMqGvaFox5+gmCStV5AMLycUXzJaVMiYDeVV0Hg6PU4TPTYkCkmqHJW0uwa77CJgGwTzXXjMNGWK5KxKdmiN1JPGUxxGQaHFcfCaDpGBPYQ3bofSmWCWRt+HLyrB6FWSYCTESWIqRniOhn0D2+JombZ4AoJeIHTgygfpRBPZ4t5qdyZadWz0AtxIA73J0X3XhD2sqMgTNxKEKutJAOCwoqUCj6bwRL+HmN7MLjNG2R9kWwTihe2cUt6KVurlGbuFdhqoYQQXNiF7mAB5NEwM/DgohMxh9O7nyLoaaaj0UhFw4dXVqf74hBDixHW0R3eOpn2KNiTirx89ged4ie232iFPOvZvhM5nqa8+FSt+2sv97z7FHroDK9iCZ7TMeG5oNLg0ng4uD007fkW4YFKobMAsRAmUbLqMNnKpQWrsQRq1DEErzyJtNzY6ChYp00O/E8WlOziYKEaBQtIGlNEy5Nmh0bAnXhUJRkKcJI7WCM/RNiGwpToOffbwIJ3ovtXuamqrx6sS1TnF8X3HlPmQCo4HprHqeTsGMqzd7ZAvRajWNBa7woTdSXR3PV4rjWqkIe0nXQ6xi0aa6SFAFjdlFEBDJe2uxkbBxuG02ZW01YQwyjaGaY82cCqmiwghxInuaI3uTIV9+puxk2xwYPg55ElHRwHFIeJ1E2nYZybEPtce1TghLJ/xch+Y6RutoDe4jWD7I7hKeYbrzkOpa2N4ZIhMOUciUoPmylE7nIGCTYQcOmUytpcdTpw+YlQqZWrIYGgqw4E5NM1fPXotVDm/99qooz/FfiaQYCSEmFITAptykLOH+waLScqtTqh2p2Renm6376jTPh2wN5tBURz85RFWuHoYDGv0W0EaCnuoT21EL/Rj2Q6Jcpmy6iegOzTYIziWzojtw+PkQAMbSLlr6PIvorscIld/Lu86fRYwGta8usrG7tT49L7xdgghhDi+7dNn7HuSbV+HDRa1p4xOW4s0TFxXeXmVCX1gwQtojJRVBrVWGmOLKaT6SGRLaJmdtGddONkhlqrdaHodhrsCy7BQnCwhMtiqimm58FOkWilQ47MwvVWET38XVM8f7UthvLT4+BQ/5KbkR2rK54HcddddtLa24vV6WblyJWvXrj3our/85S+58MILqa6uJhwOc8YZZ/DQQw9NdROFEMdKoGo0+OwfHsamNKS6J90sFvSwuCEy+sW+z7pjUxx6U8aE9fv3FmLYsnUzlcMv8A/B53lbZCexigib9AU8qy5hqzaHvFHCXRyhqHh50lmKiUWVnQIHymjk8JMqKYQzu3CAGp9CTMmMt2estHivExs9A3g8ThcRk5K+SQgxZkIfs4+D9THj9unTJqx7sD4t0wuDGxna8Ece3WPzZOP76Wn9e3yhMA1mJ0utzbTqg1SaCYz0IGXFTYW7SBADxbTRrRILlA5Od22iwenCVRxB1T2UFA+JbPHl9uwtLV6nJJi7T5EicXhTGozuu+8+brrpJtasWcP69es555xzuPjii+ns7Jx0/ccee4wLL7yQBx98kHXr1nH++edzySWXsH79+qlsphBiukUajjxY7LNuXcQ76Zd+ulCmYzDHxkyAYcdPyOihOvkCxUKeF11L2aIuxlI9lNxhLFtBLefotiuJkyCi5/BpRYyym55yBSk8BFSDlcFhVmo7YfcTpJ/5T7ZtfA6vrjI3HqSmtmHywCeOS9I3CSGOxMH6mMOue7A+zUjBUDve9HaCxX76zAADph+/rhBWLcLmIG5ssp5aCoofdymF2yqgYlFwvFi2QqOeoJYEAfLsYhbbo2eyNR/mmfYEDz71Ip0bnxgdMapeSLS2ddLAJw5OcRzHmaqdn3766axYsYK77757fNnChQu57LLLuP32249oH6eccgpXXHEFn/vc545o/XQ6TSQSIZVKEQ6HX1W7hRDT65XOi05ki2zpSwMKC2pDbOnL8LvnuzFtmzc2qYTS2/C7dP5arGdnzkNNbhunmuvxp9rRsr2Yis6IE2GxvYkKBrFw029H2WXVEnEViIcCzGmoh9hc8EZJ7H6BHrUe/bT3s7Bt9tR/ICeQE+E7WPomIcSr8kquJ9236t3YfZD6XoINvyBfyDBSsRS7aiElxUV8+DmCqR309+2hpxwgo0ZRMwNUKsOEzRQZR6PbjBLUDebQjQ+DNEEeDvwdHfM+wOmzY3Qm8gzueJbzognOPvOcSaelz1Sv5Pt3yq4xKpVKrFu3jn/8x3+csPyiiy7iiSeeOKJ92LZNJpOhsrLyoOsUi0WKxeL4z+l0+tU1WAhx3Hilpcd7UwZ/3Z3EWxohniuzsK6Fv1b46Bgq8ESfguMsoDbiIRp1s6QC2oJLaR4xSG4bIWMZ9FGD4/JRLAww5EQwLRMTh2Z1gCAWAasEaSiUyqR8TXhUjWo/eJUEMFuKL5xApG8SQrxqr6T8+Pi6GhmjSP9glsq6NiorZuHPPYM/+Tx4LahogUgMIjUkKlx0D+RwlxK40NBshxEzRG/ORdZfRVAfIVkoYzgjZBgdbRro62J7agspTzV5dw3Eq8ZHqqT4wis3ZcFoaGgIy7KIx+MTlsfjcfr6+o5oH1/72tfI5XJcfvnlB13n9ttv5wtf+MJraqsQ4vjySkuP10W8rJgVJZDoJm50ETKDXLx4Nht7UoQ8On1pA5emEPK6AAXTG8DbvJxYro+dxSj/V1hMrZ0kqI9gOqAqBku0bbjNEhY6ecVDX7aIWhykPJKk6I1Q2bySUG3raAOm4l4dYkpI3ySEeNVeSfnxsXVcXvp7htiaDzOrHKRy0SXgqwTdDd4w4ICrBhyFhoCPQjRIeqCLKruPhnyOvAaYJj69HVtx8YyyGNMpESPLKeoemvK/x8xYlJwYVuPZ1M47DQKjhYmO9v0NZ4Ipr0qnKMqEnx3HOWDZZH72s59x66238pvf/IaampqDrveZz3yGj3/84+M/p9NpmpqaXn2DhRDT7pWWHo8FPZzVVg11iyFVAZEG2gIh2uKjncPG7hTb+7MUTZvBvh6skT10qllmG8NYuQRLi8/gUmGP5aZJ6SduJdDI43VyGHjI5or0u5vxe6up0lNYuBl0NREaC0HH8706xKSkbxJCvGKvpPz4PutW+FqYtXfkhuD80QpyAD0vjI8q5fq3U0p00pAboUwjZn6YvJHCsR2qrTKFokKfVk/GdlGyNBq1PuYUO1H9TQx64riLKiMMv3wbCY7f+xsez6YsGFVVVaFp2gFn4AYGBg44U7e/++67j6uvvpqf//znXHDBBYdc1+Px4PFIChZiJjjUtIDR11zURRYQC0x+HwqvrtKX3cLOjm2051QKukal5uYNro0oRoYdVoxWtYtGunEAEw0FmxAKpZKHneqpqLF55PQoltZMJLv3TujH8706xATSNwkhjrpDTafODRFLdxOLNkAgMvG1sZNpZoGRgX7s5ADR3E7iWood3sVkc1kai9uIkmTIibHZiLJA2Uq1k8TrGHh0m3ShSLjGx/yAhubbTYPZCSwBjt/7Gx7PpqwqndvtZuXKlTzyyCMTlj/yyCOceeaZB93uZz/7Ge973/v4z//8T/7mb/5mqponhHgFEtkiG7tTo+VAp9GhSqfu/9q+bR4rxdoWDxGrm00hMg89EMXWAyS1ShQ9gMsp41FtNEoo2KjYDFPFduZQRqeKYerymympPnqVWjYlPaPHyg2NnvUbu3+EOK5J3yTESeR4+f491C0n9n1t//aOldfWfUTCUVz1iyn567AVhQp7iIS3gQJeyrhRbItG+vFhoLt0XLpKGRe7CgG2Zz24VagzdlKR3AIcP/32iWZKp9J9/OMf58orr2TVqlWcccYZfO9736Ozs5PrrrsOGJ1q0N3dzY9//GNgtON573vfyze/+U1e//rXj5/R8/l8RCKRgx5HCDG1jpd5yoeaFrD/a5O2OTc0el+HeQsYyRcJjGyl1LsJFDf5QBN2QSdlB6jdu08bBZ+uo5gaJdWDUtFK4/wV6FTS7K14+U7mcn3RCUX6JiFOEsfL9Z2Hmk6972sHae+wXkVSqSTWuAylaSnpLRvJJQdw+cKkzRbCuRw1WpIaO0lGD2AQpUAAC8joFejxFXhm1+G2BqB+KXD89NsnmikNRldccQWJRILbbruN3t5eFi9ezIMPPsisWXvvHN/bO+G+Ed/97ncxTZPrr7+e66+/fnz5VVddxb333juVTRVCHMLxMk/5UNMC9n/Nq6soioNX32dgPNVNNLuTWa4m0sEW6jxpqjOPYSdLJLCpsfsJO2mwNRTbIuYapNYcwEBnkGYizYsJ1swi1ttBZThEZbAKFLm+6EQjfZMQJ4nj5frOQ02n3v81YwQKydFRo73Le8pB+rMuXGo/TfNWkMhW0558GvQAzfwVLxkoWziohM3RsJN33BRdPgK6jauimlJkNrYSAX8MOH767RPNlN7HaDrIvSKEmDkOdc3RWMGFmrCbiM89uo6Sgf6NpAplejxzKJZNOp78FdbgDnxGP3OsHSiYBNUCul3GrxZwA3ncDLlmo9cvIalWolGG2eejzD53wrF39GfY2JNicX1kvPDDTCPfwZOTz0WIGeJwt28YK7gQqgXvaLGgHVkP29rbWRzM0hyP0b/uNyR3PAuORSyxHg9pCraLETVKhDyZsot+KjFVN12BJeSj82jxGVQHXcxecCqRipqJxx/cCj0vjo4mjRV+mEGOi/sYCSHEVEpki6zdPkjGMKG54oBgVBfxkiqU2NiVIpkvc/a8as5qqwJvBZHMZiIVNXx3W4DnBiqpL8epcRT86jA6JpDEa+fRcLBVmwJBXHYeq/uveDU/WqwFw3EOmKawsSfFs+0jADM2GAkhxIyVG4Idf4JiEjht8mAUaYBkB+x8DLwhmPNGDKeFojtGumIWZNYT73+SUGE75VIJMCnixlI9uLHxUETVyuRsP3lNw2v0ExrJ4csUSGqrSGTLRMz9puv1vAidfxn99wwMRq+EBCMhxAmpN2WQKViEfPqkUwViQQ8Rn5tkoUSmaAIO5IZIjQwwkgIju5uOTj+m6qFCL7PQ3kOTM4SKRYQMumNQtDz0qZWkbD9uFApaBWU1Qm0wTuOs2Sh6cMKxF9dHJjwLIYSYQVLdo6HIEz349L5AFZRLkO0BdxtEGvBmVTylBErvDroG/0p1IYWt6ZQcixSVDBGghgE8dg6XVUJ1qdSqSfz0UUuaPa5lBNw6PVoE4ovAnZp4/L3XHY0/i4OSYCSEOCHVRbwwK3qI0t0GXl3l7Lk1gMOC2jCktpAe7KI/VSKZ66M163CGtpkmaxc1zi48GJTQcFNGczlolDApgmWjY1F2V9AfnEvM5aciuYWKtjoSjpeN3SnqIl7a4iEZKRJCiJkq0gCcdvBpdGPT7CqaYPabRoNKoAojmcKT7yef3YGS7EFzQuRNG+wRsigErAwuxaRku0gTIlP24HeVcVEmQJqiFqTSY6OrGi+O6ETmLiBGZnTaXqRhdJRIRoqOiAQjIcQJ6WCFGMan2BUsls+Kjk6fyw1BagsjZZWErxWvu0BbsJc6rYuKvt24ct3olHFwcFAo4UZFw8KFC5uAq4yOhWInGfSHcfuD5LpeoL8YoLfyNAbSxfE2CSGEmKEOVYRh32l2jafBsr8nkS0ysGMXYaMPd00F5fAiBv0NdA/Noj+7lSX2X3BTQrMtTFWlS69jxA4RtLOEyKMBaUKkjTI1VXV43Sq9O55lh2sRsUrj+KjYd4KRYCSEOKn0pgwyhjlxit3eEqm7jVoeSsRZonWwQLNJuOvI+uYTKtgEiv34nDR+DFyUsXCTIYiKgoLFMGH69Vl4ozWUfF4Gk8N0jRTQKh3mxoNS+UcIIcTBTTLNrjdl0N+5kwXmFuZFA+xyanEchVpvCdVdwioEwc5h6BaqaeHYNsOuGEG1hE6REgF8WIR9Okb9SmKZTuzCAJ5cBbQuHj3udFfsO8FIMBJCnFTqIl5orpg4xW5vx1Ac9pDpKbPRDtJuRHkpHSTohJmtB/CbvZxmPouPHGBi4aKIGxudMgoGXvyaQ3JgJ89k5tPWejbVdXNwhbwYpj19b1gIIcTxb5JpdnURL1rzHELDgzC0ntriCwQIEElvZZY2RN6TxSrkKSo2Dg61DFFLkiIBPJTxYGDgwaWpeLueIKZmiNadSqB17vS+1xOYBCMhxEnlgCl2+5RObQuHuNiboWMoRF+qGr+rQENyD42eAq5ynrBZQMfCwo2JRg19WGi8RBu66iFQTjIynGCksoyropmFbbPZ2J1ifecIO7w658ytlul0QgghDrTfNLvx203UNhCuuwheMvCnd+OvbCPbp5PKb0T16BimTqIMwwRooZdKO03RymK7XLgxyRJEyw8SMTsIVMaIzF4MNQ2j1xd1PQODUWg7X6bTHSEJRkKIE9Kh7mE0Qap7vHOItZ3PWW1VNJidDAw8g9W0hJSRpDqxCYqp0fsT4aKMjomLIFlUFKpI8TynErYMXH4XF9SkafKlgdEzfju8OpmCRW/KkGAkhBAz2eHuY7TXlr40f92dZMWsKGe1VUPrWfRu0Xkp20zAV4FutuMqjBCxy2joDBPF47JRTQcFG7us4nE5RJUMJTXHYOgUlNmnEak9ZfQAkYbRUFRMjrZHgtERkWAkhDgh9aaMA+4jNKm9nUM2NUTfjq1UzAlR7FyPd/ejpHc/R11qG3FzF3q5hNtlYuDDQcVHmTw+NBy8QJWaQ/P4eF3cpNbVCy5r/NjnzK0eD2lCCCFmsL3XtAKHCSMKOKAbw9DTA4UknT299A3spN7qpC75IiFS+DHwmQq2bpM3fSSIECFLn6uWFCXqSGL442xufifb9BrMrIe2wN5jt53/ckgTR0SCkRDihDQWQg4bRvZ2Dn07trI1H2ZWyqAYW0RS+Sux1EtUmt34KOLRS2iATREHBS8lyrjIEqKgh0n4W1EqWjA93SSSg7j6dhFufj1w8Ap5QgghZpixEHKYMLKgNkTE56LR2AaDOyFUizsYJT6wm0CmAy85MC3KjoqOQ505yAhRLBy8Wok4Sfo9s8n7K6mpqKDG7Gbnzj1sU+bSFt97v6JDVckTk5JgJIQ4Ib2iMBKoomJOiFl7R3UUVzOJ3nmM7Eqyc1hjDjupUExgtIhCvuxG1Ww01aGAi78oq/ir9wIqVDceQ8VnhiinazkrW5RAJIQQ4mVHGEbG+7BcK6TcEGkgorbQng9gG3lqcr1oukVfOYyKTUzNUKsMUrLcaEqZKAZVLg+humUoGLQl/0KDaVKT74NcvQSiV0md7gYIIcSxEAt6qIt46U0ZqJke5tb4idQvQPMEcGOj4GDtLc5dRidhhRghxAt2GwN6nCXuLpa6u2j2ZUnbPrYkXazdPkgiW5zutyaEEOIkkHdVkGs8m/bwKvqpYpgABm5UwKOa+NQiPlceVVHQsFBsi6yvlkTFUvr1RgLkCKd2jd4vKTc03W/nhCQjRkKIk9b+BRrGLnY9M2qyUlFJDe6horgHDwYqDrYJNg5eTCyXTrcVo9+p5CzjMbRsHd7WtzIv1ERksJ+gnmXYMKXgghBCiCO3f3GGfQoE+SOvQ1Wg2dhKAz14ygY12JholB0NS/Gg4eDBwkZnuOhgDQzQ+PqzSYfi5Idq8XiG8EvBhVdNgpEQ4qQwWZW6Aws0jF7sWsjn6B/cQSDbgYccOmU0wAT6nQqieh4PBmElT7PdS9TJklLqIX4K4dYqwqlutFKEF0d0vLoMvAshhDiIyYLQvsUZxqrHpbvQk3kS6blUFNO4MfBiYOLCVBx0VcEqq5Rd4MJCxQIbXJhEfC5OnzWfgb4goeJO8Lmk4MKrJMFICHFSmKxK3f4FGuIhD00xH0Wrlp6MSWNxN14SqNgogKKD1y7jsw3cmomuDpFQo/SqdXS7F7MiUA2BGASqyHencJys3NxVCCHEwU0WhODl50AV1C+F5G4qzQHqCgpbCxU0lyNUAyU00FVclgGo5Mt+Si6HIGV0XxBXsBL00ROCMX8GcgnwLpTRoldJgpEQ4qQwWZW6/Qs0GKaNyxghpiYo6wF0y8CNiQWUUXBQCKoGAJoNHrVMBUX+qC+jl0XUJAusOsTxhBBCiAkmC0L7h5ayQVYJMKz7CVFglrOTAl6yFNGw8ZWLFBUPedy4MMk5HvxeL9V+cDMMI3ug+fVHXBFPHJwEIyHESeFIqtR5dRWf0Uut3sNw7RwGOpupLxawsQEb3bFwFIW07Uexchiqm13eBSSDC1kaybG0wnxFxxNCCDHDHUmVOpeXYcfPVlrxBHvxUMBHCQMPMUbwUqaoK4Qsi4BtkFdMLHcr3uq5JAOzMKILqDjSY4lDkmAkhDjpJbJFtvSl6RjKETR1ymaOQHaIslXEREXDQsMGR6WgeMgrPkxc9DjV9LhaWRQqcHZVhtnu1OgOj/DO5kIIIcRB5YagfyMM7cJTzlO2cvQMDLOADODgoGDipqiAjYqumaCBH5Ok7iFZ8Xp2hE5jlt6MPcl1tuKVk2AkhDjpbelL81/PdGJlBnlLcAt2eSO+3X+i2hnGxsFGxcEhr/rpoImkEmBAqySjVBBVYVFjmHhLM8N6FT3dKRqNdqLZnaM7l2AkhBDi1ejfCOt+jJHsJavVM1xMUtn/FyLlFBYKQQq4sCjoAdyouDBQ0FA0jbIeJFl/FrMq6qh3ZRneuZHd+TA0N0sweg0kGAkhZgCFvGFRWerHb/RTSPUScvJo2NhoWHiAMjYaUUqMKFVs8L6OHncrp0UNiC8mOmc+m3fsor9zE+6aCqLVC2UetxBCiFfPUaCUpZgbwbQVWkpDBO1eDHTK6PiUEjlFp5caVC3AXL0fn15AQcfbtIq2llZiQQ/Jnduw+jfREJlLXWTedL+rE5oEIyHESW30BqwOq5fUohZ02pLdFNMWGfyomJRwUySEjww+itQzgOIJMOhOYIcWsUtvZLETAqBOSRBUuwj5olC/bFrflxBCiBNYbggUBxZeylCgk929w1Tl16GpFgo2qqrS61RjoZEjTFaJomhB5ip7CLpV6itDMHZrCidGj95CfUxGi14rCUZCiJNab8pgIF1iUX2Eukic4mOP4C3141BEdWx8ShEdlTJedExUypRdIeZFbaLKS+zyvY6wzwVAtLaVqM8tI0VCCCFem1Q3ZPqgdglK5dmUu75NylSpLNvUkGFY1XjJns8cpZu40kfEMtD8zQwwm5xLJeJrJLJ3VzW1DVi+GDVSJfU1k2AkhDipeXUVRXHw6ioDfd1YIyPUOmXcFNAVmyI6NipDhHDw48cmrwQJuBRalCTxUD+nKF6GB0r0lIPURRYQC8gZOSGEEK+Bywto4PKS3tNB3oIE1VTRg5sSBby4NZVZDBAmi0GRhF1HPraAPVoNZnkup2eLxJQMsXQ3sWgDBCKHPaw4NAlGQoiTmmHaOI5COTNIw/BTlJQkjqNj4qKAiz53K0E7h8cssVmZg6l60N016IU0xUAj8aCbzJ6NJHw52l1zAWSqghBCiNembAAWyXSa7UaY3e55VCkJ0HTSapBh/zxqi3nKlo+SoqIoKgVLoVop4HhsutMJRnY+RyxoQrZ3dJ9SDOg1k2AkhDipNZidhDN/JUqAcKmfvAv6fHXkzCDdvjlYLecyt///8GT34MfLbrMSJ+eQKdikXNXktBY8RR/BQC1zK4JyQ1chhBCvSSJbZMewB49RS1ELk3A0Ah6NWr/FUKkFIxQhHJ3NlqECZjnIQr0Hf6GPSmMPlckEDa4UNZ4IFYYGoUaQYkBHjQQjIcRJrSK5hYrhJ6FUD9EGMq4acm6DXKwBo+HNZCwXO9UWtPAsUraLWZmtFNQoQ+46KuNN1MQb6EpWEgn4pvutCCGEOAn0pgye6dfIl2podDwolPHqKsFAGKOiiZqaEFq2F9Pn4I9UomSHMNQgGV89Pq8HT6iR5uoK8EQgVLt39EkcDRKMhBAnt/qlkNwNqgaBKgKBQcKaQdAs4dET/G9pAdtZihJp4BR9DwG7mzrN4JRajYr5UbrcOo6j0JUs4DgKIFPphBBCvHp1ES8rZkVpH8xh26P9So9vDgEzSbDUTz41RIuewuc3SftbydFC2dvEnsCp7Ag0cXqFRrCchuis0VA0uHl0xzKV7jWTYCSEOLlVz4dVsdEKQCO7CZYTlCIVdCl17NLm0FsMQnghYZ+LvyRKJIPncnazm9aWJqhtxXJGp855dRXDtGUqnRBCiNckFvRwVls1C2rD9KYMNvWk6Ol2MDyVNAUsqvV+fIaBLxDBxwgFPY3ZsJRS4xsxvVVEw0UwhyZOn5OpdEeFBCMhxMkvUDX6MEbAsim7owxGz2BnqR6TAudVDEPvizyWradz1irip88fvz9EDBkhEkIIcfTFgh5iQQ+pQolBEkTtEWpaTqHKUCh095MwdPTyCFE7hTdWRXzxgn223icIyUjRUSPBSAgxc8QXg6PgNUrUe+YQ0SJ0xQu8fvhpdOUZYhXLiJ56kQQhIYQQx8yC2jD6ssV4cjGirXNBmcMg1Tza70EpDXB63GFuy9nT3cwZQYKREGLmCFTBnHOJwPiN8VYBvNQIhQYuaF0MrbHpa58QQogZJxb0EFs8H5g/viywsgV1Yy9dwwUG5lUzt1pGhY4FCUZCiJNfbmj0GqNIw+RTDlrOhIpZMkdbCCHEMZPIFulNGdRFvAfMVIgFPbx5cd346+LYkGAkhDhpjXU6jUY70ezO0YWTBaOxa5A4dEclhBBCvGZ7T9YN5ENsT++9nnWS/mbsGqR9tznoCT5xVEgwEkKctHpTBtv7s2jhGNFq9xGNCI1tA1J0QQghxBRIdcPgZuqCc7Di845sRGjvNoAEoykkwUgIcdIa62xqIlXjVeaOdBuZuiCEEGJK7D1JF400EA1EDrPyxG1kyvfUkmAkhDhpTZiGsI/DzeuWkSIhhBBTZp/p2xMcarrcwbYRR5UEIyHEjCPT5YQQQhx3ZLrctJNgJISYcWS6nBBCiOOOTJebdhKMhBAzjkyXE0IIcdyR6XLTTp3uBgghhBBCCCHEdJNgJIQQQgghhJjxJBgJIWae3BD0vDD6LIQQQhwHEtkiG7tTJLLF6W7KjCXXGAkhZh6p/COEEOI4IxVTp58EIyHEzCOVf4QQQhxnpGLq9JNgJISYeaTyjxBCiOOMVEydfnKNkRBCCCGEEGLGk2AkhBBCCCGEmPEkGAkhhBBCCCFmPAlGQgghhBBCiBlPgpEQQgghhBBixpNgJIQQQgghhJjxJBgJIYQQQgghZjwJRkIIIYQQQogZT4KREEIIIYQQYsaTYCSEEEIIIYSY8SQYCSGEEEIIIWY8CUZCCCGEEEKIGU+CkRBCCCGEEGLGk2AkhBBCCCGEmPEkGAkhhBBCCCFmvCkPRnfddRetra14vV5WrlzJ2rVrD7n+o48+ysqVK/F6vcyePZvvfOc7U91EIYQQM4z0TUIIIfY3pcHovvvu46abbmLNmjWsX7+ec845h4svvpjOzs5J129vb+ctb3kL55xzDuvXr+f/+//+P2644Qb+53/+ZyqbKYQQYgaRvkkIIcRkFMdxnKna+emnn86KFSu4++67x5ctXLiQyy67jNtvv/2A9T/96U/z29/+ls2bN48vu+6663jhhRd48sknJz1GsVikWCyO/5xOp2lqaiKVShEOh4/iuxFCCHE46XSaSCRyXH8HS98khBAzxyvpl6ZsxKhUKrFu3TouuuiiCcsvuuginnjiiUm3efLJJw9Yf/Xq1Tz33HOUy+VJt7n99tuJRCLjj6ampqPzBoQQQpx0pG8SQghxMFMWjIaGhrAsi3g8PmF5PB6nr69v0m36+vomXd80TYaGhibd5jOf+QypVGr8sWfPnqPzBoQQQpx0pG8SQghxMPpUH0BRlAk/O45zwLLDrT/Z8jEejwePx/MaWymEEGImkb5JCCHE/qZsxKiqqgpN0w44AzcwMHDAmbcxtbW1k66v6zqxWGyqmiqEEGKGkL5JCCHEwUxZMHK73axcuZJHHnlkwvJHHnmEM888c9JtzjjjjAPWf/jhh1m1ahUul2uqmiqEEGKGkL5JCCHEwUxpue6Pf/zj/OAHP+CHP/whmzdv5uabb6azs5PrrrsOGJ2D/d73vnd8/euuu47du3fz8Y9/nM2bN/PDH/6Qf//3f+cTn/jEVDZTCCHEDCJ9kxBCiMlM6TVGV1xxBYlEgttuu43e3l4WL17Mgw8+yKxZswDo7e2dcN+I1tZWHnzwQW6++Wb+7d/+jfr6er71rW/x9re/fSqbKYQQYgaRvkkIIcRkpvQ+RtPhRLiHxkySyBbpTRnURbzEgnIhshAnO/kOnpx8LseZ3BCkuiHSAIGq6W6NEGIKvZLv3ymvSidmtt6Uwfb+LIAEIyGEEMeHVDcM7r1hrwQjIcReEozElKqLeCc8CyGEENMu0jDxWQghkGAkplgs6JGRIiGEEMeXQJWMFAkhDjClVemEEEIIIYQQ4kQgwUgI8erkhqDnhdFnIYQQ4jiQyBbZ2J0ikS1Od1PECUim0gkhXh25eFkIIcRxRoo+iddCgpGYElKmewaQi5eFECcaKdN90pOiT+K1kGAkpoScsZkB5OJlIcSJRka6T3pS9Em8FhKMxJSQMzZCCCGOOzLSLYQ4BAlGYkrIGRshhBDHHRnpFkIcglSlE0IIIYQQQsx4EoyEEEIIIYQQM54EIyGEEEIIIcSMJ8FICCGEEEIIMeNJMBJCCCGEEELMeBKMhBBCCCGEEDOeBCMhhBBCCCHEjCfBSAghhBBCCDHjSTASRySRLbKxO0UiW5zupgghhBCjckPQ88LosxBCvEb6dDdAnBh6Uwbb+7MAxIKeaW6NEEIIAaS6YXDz6L8DVdPbFiHECU+CkTgidRHvhGcYHUXqTRnURbwSloQQQhx7kYaJz7mh0bAUaZCgJIR4xSQYiSMSC3oOCD8yiiSEEGJaBaomBiAZQRJCvAYSjMSR2+9M3GSjSEIIIcSxNGH2wv4jSEII8QpIMBJHLtUNXc/AYBTazicWrJKRIiGEENOqN2WwvnOEHV6dc+ZWE6tfNt1NEkKcoCQYiUMaOxPn1VXK+RANBAgXk6MhSaYpCCGEmA5jMxhcXhqNNJ2Kh0QhQm/KkBN2QohXTYKRAA5eSGHsOiIz04853E2+eTYr63wyTUEIIcTUO1gxhb0zGLKpBDknwOL6laQrZsnUbiHEayLBSAAw0NdNf+dOtOY5xOrC4x1RXSQEQDK3iVxhF5oZIhFeTm/SoM4pypk5IYQQUybZ105mz0ZCTSWseGjitUSDUTKlIbpMNxW4Wax0gNIASL8khHh1JBgJAOqLO6nIPYmvCPS7YNefIDyL2CmXEmuooiMTY2DEQzQSZqCvm82bNvOkq4rzVyyiLViU8qhCCCGOqkS2yNODHmyjjupCiB0be+lKGpzdFuOstmpoOx+zHKSUyOIvDpDZM0xi52YqPTbh1pUkfC1ySwkhxCsiwUgAEPG5iIR8pIANXSkq+waIFi3Kfe10eV2YqTSK45BMpYE03tRWEqUM/+uu4MqWFNHsztEdSTASQghxFPSmDBJOmFC8EsihtD9GrGSiN54FOQVS3ZQtB68xhJk3GbEtsl0biZQ7AIPehlq5pYQQ4hWRYCRGheqgooV+p4Lnkw51VjNes4aXnthKq/V7ZtVG0Srq2dE7hK/vOYJuDbenjW39WdZFfLypfqFcdySE+P/bu9MYudL7vvffs1WdWk9tvS/s5tockjOjGQ1ncWTJtjxWYl0hvvFVBjJoIzewZQOC5MBwJAEJJL8S9CKIjMRKDMGwLxLb0U2uldjO9XiJRrpjzSbORnK4k91k791VXfty6izPfdFsDpfuHnKGTXLY/w/Q4LC6quupBz3nz995nvM/QtwxA45NtScBaAx0LpPRz9EyAtKzXS6+/hZ1ksRHHyUdi1BfukS0egGFjWZaUDzLUN9l6BuV646EELdMgpFY5XWAgD6tzEFjktDqcHGlSriyzIj/Krk6zKY/Sqxap1B9BxVNE6RGKJu9LPg5GBy7159ACCHEAySfjOLEIpxbbGBG8qjETtqVRYKLb5CvHKOhCrzTzrJTn8csX8T2SwSpHfh5BxoLZCunyT5y6F5/DCHEh4gEo+2uWaSyMMlSW8M2RqmuLOPWKxitGuPeHH1RnajvEpSXSTU7dFWaEJ12YJFuTrN7aB9Pjufv9acQQgjxACk1XJYWZslUT/OID2WVpdHpoNwqNU/D1DNUVJLu4jmaLJPTqoS6wgkqdIMMjd4JkoMP3+uPIYT4kJFgtM1VFiZ5561XOe4OUMvtx/YgZYT0JBx2NV8nq8oYfpNI6NLt1gmMJPFolN5wnqhmcTi9g93JQ0DqXn8UIYQQD4jS1HH8Y/8Dm2WGU3Hq2X3U0j710GS+bbBk9NFqtGmj00JjWO/SsrJUvBjVTh4/9Y94MjaGnLYTQtwOCUbb3Jl2ilcbvYTVafYWf0Rk8BCDo+Pkog6RS3N0ZooESkcjJBK2qIc6eW+WnF4lqgXMVma5PHmW0YPSdEEIIcQd0CwyOP93eN5FWl7AiUod2Ik9+hjlFFzqnMVoHyPBFDkqDLFENlzBa3ucZwxXd+ktnmJpIUl+9857/WmEEB8iEoy2sVLD5XzDpuY8xDOtHzNRe4lw+jgaT5Bz0iTdGRbdJZbDCAl0YtTYwSRdIjSUTSc1wvm2w1Ijyei9/jBCCCE+/JpFOP8CSd2HsY/w1mST1tIpLrWLJLtFZsIMJ939PKYuMsEyIQG2aqJok8MnR4FIwmWPOoOt7QQkGAkhbp0Eo21svtohVLCnP8mwlya1soLn12hPdghtcPUQ1amTC9skaGKgSOFykiFOFZ5kZN/jGOZO9o6P3+uPIoQQ4kFQnQW3AoV9MPgw2uXvsexFaYRN2pNvcikYwtV7ybWm8dBYUSmagY5leqT1kIF8ntzeA/T3D0O/1CYhxO2RYLTNlBru1RverbVCPTFTpbl4EUtXmISE7go0S3RRWESI4mIR4BKnqWdJRmJEsxlK8X2MpixYOMaKNkauV9p1CyGEeB+axdVQZNkwfHj1z8kfYZbPMEuBpSDH44232OW/jKM3KIZZfqQm8DHYb85ylhz9mSR7DzxDKm5RazaYna/R25+SexgJIW6ZBKNtZr7auXrDuwHHZrnucnK+Rjl4hGTGxw18jOJpBpTCVl2iuo+ORkAM10wRUSHZsEqzPUekdY7u3AJz7SqXV5o8Ei9IARJCCHH7qrOwfAp6rtwT7/wLcPkVJrRLdDImdGbY1X6DFHUIFAvk+PvwEAkrQNdmiOGy4vdS8XS0eo25udMsJJtYzYfI794nNx8XQtwSCUbbzIBjY7RLDHTmmG/nqbcN2l2f/xU8ynH9SfZUv8/Pc562FkXTIE4HD4sKKep+nABFJYxTVAP0+AGWV6eiJ6hpPcxXOxKMhBBC3LYVs8AKI+TMArm17XRek4Jf4mN6nYZfBxrUzATKUyTp8E8jf0fD7Gd3cIEYLg23jR3soaxStLFxwip9nUmoJiUYCSFuiQSjbSafjJKP12H5AiThIzv2Mj0zhdc5i/Kj5LQ6KVxSuETpYAIGHkkq+CgW6SEZUVgpg759TxBWB+l1hmhZWbm7uBBCiPdlzktyLhhjj5ckl4lCZw+tyVcgUHjdNmEQ0iLFUW83Fh4HmCThu2gsgKYI0aiZGUK9h959h3H7auQzaVJWuLoCJYQQt0CC0XZ0pUiEZgE8cLxldqoZ8qrLwVSNWGwYY2X+6i+HBqToEMXHJc6yOcBg7wA7x8YoNQauXrMkq0VCCCHej7UTa4NWA6pFaNfoNGuEWoKKnaOExkpbR+EzRJEGMQx8PN9mhgyZiIZlRekkh8juOES2cOWaJWdIVouEELdMgtF2lChAosDcbJW/P7fM29U4vjZMQZ3BrFzipFngI9hE6BICOqAAhSJCi5PdQZaMh9jdLFK+cIZLrTSMjkowEkII8b7kk9HVGjI3tXqtUekicbeIH9ExwpCMt4JDiwAfHwOdgB5qOLS4RD+2atOrL9HpXODEbJXhziSZxoXVHy7BSAhxiyQYbQfNd8+clVSK0wt1QDFgNrGLx6HTYtws8lDnOE44h9O+SNxoXX25j06LGF1sQi1Ob6SDE49AdXZ1/3Z8nKxsoxNCCHEb1rqkDloNcn5xtRNdu0LVt3BLyyT8Lp1Ap9mo0u/PYODSIoaLxdXTdZaBQYRmJEHdTFBXDucWGxjpPJmeiGyjE0LcFglG28Fatx9gXo3xxlQZNPj51DkOVF7AaNUZUEuMhFPE/RYGAQYBIRohABpd4rTtQcJED6O5AXrtGjh7SQEpZwgSslokhBDi1q11SbWNKXJMAwYQsFBq0l5YoL9apaps4sEKBh6eb9DBwsbFJcYxdhOQIJIZop3soZ3KEs3sYk9fkl6nALKLQQhxmyQYbQdrZ8ycIeyGTi5pkUtEKC+7FItLGD506RKhQ9TsEsFFAfUghqX5WLrCJc5i/gmSVkiqMUVQm6ekHud0awhaiol+V7bSCSGEuGVr1xXlrDGo++BWoVMlUr2I1ZjCUG2SQQOUohokCJVBjBYJvY2hh1gqgGiSbN8IK12L+XKDseoCBwfi0Lah1pFrjIQQt0WC0XZw5ZoigE6lSjxiMZJL8PLMINPqYWJaiYLewgo0LAIiAB6EuADoAQSWR3N5imEukdA7sDTMfPWTV1efnFhEgpEQQohbdvW6Ihzwi9CYh/oiA+4U1bhNp5akgoXSurQwGaJEmjbRMCRKi4PGBYrdNu5CQK2TohjE0ZKzPJ53WVt9AiQYCSFumQSjbWbAsam2PZrleR6LzmGnOjjNFQY6i3SwUMTRqaHzbtOFwFSkaDDQvUhXUxgRm6KXJGnqPDaWxewUGe6chea4FCAhhBC3zxmCdoVas81y4gClegLNvIwdLJCiTdJUWF6XAA0fUCF0MQkNj0Rtkn69QNMYo1dvQvIQZTNHqVJbvS/Svf5sQogPDQlG20w+GSXHLI23/5T+6lsMOb203UvE1QxN36ZtxjAIAR9DVyhDQ6ETEqGi5Zmz9+BEQxba/eypd/iJ3T0wN7t6X6RqRIKREEKI25coUCHOsYvznCzDefdxHnK7/BQXyFGjhENoRSDsUtdjKDRcYlRxyEZ0+mMGSW1l9Vol7UlmzVHOBY3V+yLd688mhPjQkGC0Dax1/rFNnY4folWmsUrnaLdLzAd5Uu2ABBodzcRGp66ihERJhS2UYTFPD4sMMp19ghXnUbpuk1rYS1/bX32Da65hEkIIId7TWrdUywZv9VqgpbZGZ2WGfY1ZHg6aJCkSwSfEpEKSIerEdB8Lny42Bk3qsShB/6PkrCpZt4odlqFdY6B39folufG4EOJ2SDB6gK0Fomq7y1KtS6vr0Sov0d+eoWw8RMvYQakKT3bOgcpSt1LsZJFI0EJDEaBTDWOc18d4x36cTw6moS/BXy7uoOP6gLryHhYDzgT5RPTqe8oNX4UQQqyrWYTzL4BbgWgG3Ar1iyFatcN4vE3KWybZuIxOixY2bZJkqBPxfNBAN0HDJ4JJLpPjwuAnyHCJ4XAemiugQV6rk9dmQRuCZl1u9iqEuCUSjB5ga61Qe9NR9vQlmV5poc8v4rQu09Z93EQvE923cGiyaPQwzwgxXBx0bFx8LEqBQxuTdFjjpaUc6Vicjq8oNTzqHf/qe8DqNr0b/y6EEEJcpzr7bigafBjmjtGaeZ1quU2g0jj53bTaZaK+S0gEyzKxvQq6FeKioWFgEqJpAWGnxt6575GJAnYKNFYvjr3mNhXAu/8twUgIsQkJRg+oUsOl2u7Sm44y0Z8in4wy4NgsxQ6QmS8zNn8cs7tItbWM6oZ08dnBDKmgSVPZeJZFgMHbwQRlvY+e7jIsLTBXK1OOP0wQzeMFIYPWlXtQWGOAc3XbgmxfEEIIcZNmETplyO+F/gOrQSWexzYzXD65yNlWmob2DvloCdOPE8cl5rnoBBgousRokMAxAgxdI16/RKR6Gj8So1MYx86NQyzNillghZHV5gtr99mT7d5CiPcgwegBNV/tsFTrsqcveXXlJp+Mkt+9EwbSVKcGqC5M4oUpap5Fzp2hV80SDV1qJPF9C2VGyDhZ3oz/NH3lv2SPd5qC12a520/LyuB6AXp9md1Mg58Ehq62Xy01XE7MVmVLnRBCiHdVZ6G+AD373129SRRwDvwsDxfq2JOTDCxexGsPU/ZNIu4FQlwMLwArQOGzTI5uMs9QNka4cB6/08T3FLgutmFBp8Zi6HI6GFttvpBw3n2vtWubZFudEGIdEoweUJuu3CQKzEV3UWtcpCedJlD7SMxcJu55hFZI1zOZ1/K0wxjLDZdezrOcf4J8kECFkI6k6AQ15s9Mc0wfZjC5elZOXXN9kWypE0IIcZNNmvXs7ktRaLhMznosO4eY6vRTd10Sqsa4PodDHRONvVxE1RZQ/T+NueMfUF2cAk0Ri5i0K3M0jn+fzGCFiYF/QJ8FzE29G4Su3WInwUgIcQMJRg+od2+cd73Lb32f2qkXiOcGGY75ZHyXpF7EjSQI9RwN16drRch5dXwa7Ajmma6f4ofWZ3kr8igfy5f4pd29vDa5QroxR72R4XjsYSZqLtb8US610jA6el0wk4YMQgghgOtuOH6t42++yuypV7Fyo7QTh5j0UixqJwm0JVwrhuP9gBRtDLroAWDU6U6+SnxwP2SzzET3EU1HsRpzFDsmTrvKgFaiON/F6kySWnvvtUBm2TD3tqwcCSGuI8FoO2kW8Y/+X6SXjrNcfYLdH/tFEiuKmblplukljEax9TKRdgPTcongEaVLlyiNtk/ZylJOpPmZ/QcYGHVZme9Fd4aY96O4pXNku1PsS+8BU78uCJ2YrfLmpQrnYwYf29Mj4UgIIcRV5xfrvPLyD9hZeQW9sYeeZ/5PVubr7NBO0WvPcTkscF7fQ29YxvB16ipOxFBk/SaqeJ54z16coT3E9v0ken0Os62R1MqUistMdpKQGSflDF2/jW5t5ahdgVhGApIQApBgtL1UZ8kmY1RqOcqRfi6cfpsxNUM7tIkEi9BaBNXCwmSJXpK00Al5mwliloGVjFJKjVJSKfK9BXK9q2feWrNVLoV5kmmL3bv3caISXreNbsCxOR8zrnaxk2AkhBBizYm5KpesXTj2HLsjAY0z36fXN+nzp4m486QJmQp7mWaAvFmmiEOv3sa3PLqRPLFEL2PRNiSi0PsEGYC5t9EbxwkzDtldH1393tzb726jW1s56pRla50Q4ioJRtuJM0T28HPobY+ZmSrm/I8o00RFs2RUHVvVSNEgwMIDfHRK5PAxGOyc5sJKkqMqTTYe4aGhzNXtctW2R6ann2z/XkhEsRt1NE1hmzqwGo4+tqfn6irSdiVbCoUQ4mYHBx346FOkggmWzr6AVi/Rm4oR0z2M0KWfGaa8JFUsClbATmYxlU5bZXFDC3PhJOBDsgCZHauhx7JJ2VFSgwVYO95aNmCs/rm2pa9ZBDu7vTvWSUMKIa6SYLSdJAqw8xM4wFBiilkgHunQ25qj5e2hNtMgSheFgY5GnA5QYQLFOS9kqga1sMFrUyauD+zIALBUc9nTlwTgxGyVattDKY2OH159642uedpONmtIIaFJCLFd7e5LsbsvxcqSxmx7lGZ3AJUZpmhmifr/k2TjEvuMaabDXvLUV+uUCml0dXLhNHEbqNow+yY0FoHDV35yAF7n3X/4tyvvPrZmg2uetpWNGlJIYBLbkL5VP7hcLnPkyBEcx8FxHI4cOUKlUtnw+Z7n8eUvf5lDhw6RSCQYHBzkl3/5l5mbm9uqIT74msXVrQPN4k3fallZaul9RPSAIb3CUN6h1PM4M4wwo4+hReIYgE5AlyiGYTIerTJhzrNz4a+wymcZtBoMd84ykXZv6ESn2NOX3HB1aK2Vd6nhbu3nv88MOPaG87I2d/PVzjqvFELcKVKb7r2NakDOLzIWqdPS0/y41sM7zseZf+QLBOlR+vQGjhkyZe4nxCBEJ0aNMBIlyIxRNrNMn3+bSwvLnGqlWDELqy3Br72eSFPvPraeTWrmA80ZWn9e1uatOntvxiXEPbBlK0af+9znmJmZ4fnnnwfg137t1zhy5Ah/8Rd/se7zW60Wb7zxBv/6X/9rHnnkEcrlMr/5m7/JZz7zGY4ePbpVw3ywbdKWdMCx8fUSbmOFVquE3i7Rk+xnpbsTuzlH0O1g4BILFFljBTcw6I+49HCKPc03oGGR83uhcYFMT+TdrQpXfvZmqx7btZX3ZqtmcmNcIe4OqU333oY1wBlicblBvebiNN9h2s9SUgbJvp/AbrbI+hUy/gw2HSy6eNh0u0nmcocJly/SqcwxHVV0alGCWJLc0A3/0H+vlY/t2sp7o1WzTVqrC/Gg2pJgdOrUKZ5//nleeeUVnnzySQC+853v8PTTT3PmzBn27dt302scx+Fv//Zvr3vs3/27f8fhw4e5fPkyo6OjWzHUB9sNB7W17VqDVoN84xz17gLzyiFp9RNxfdzQJk+Ftt/EQxGiEzU69NGlpUwutyJkCnlMrcCKluataozk2p3FufXtchICbiZbDYXYelKb7g831YArW7ZWzALn9HHM8G0ORebpeAG1lk/JV6RDm4c4j49BnTiaHyNutkj4ZWIz/x8r2UcoenGGrSqpxoukhz8GOKs//1a3y0kQuJ5sMxTb0JYEo5dffhnHca4WHoCnnnoKx3F46aWX1i0+66lWq2iaRiaT2fA5ruviuu8ux9dqtfc97gfODQe1tbN0tjFFrvxjeisLJNwucafAO9oTtE//DQfbp/FIcjYYoc+oUKBChIBlslQ8nYqyOZr4GKerowxO+eztu3Jn8Wve9r2ul5EQIIS4F6Q23R9uqgFXVmpWGOHYfJ7Wis2S1YtjBYy2XqKxPE1/eIk0dbokqespLLOKTxQbj1h7gXQ8T8o2yHjz2NUA/P3ADQHnva6ZkSAgxLa3JdcYLSws0Nvbe9Pjvb29LCws3NLP6HQ6fOUrX+Fzn/sc6XR6w+d94xvfuLpX3HEcRkZG3ve4H3Rr17jkBsZg9AniiTQ9ep2EAW/PtdHbi4R4aECfUSNPhYUgy6scohTbyU9GzjISztI1HbqxHHHLYKbcpNP1r3sfuV5GCHE/ktp0n7pyjUtuYIzhjE3TzHCGnaSrZ5movsih8ARJWnSJUSSBHdbI0sbULFYSe6hoadqtNrYeYGdHoLALZo/C8pnr30eumRFCvIfbCkZf//rX0TRt06+1Pdeapt30eqXUuo/fyPM8nnvuOcIw5Nvf/vamz/3qV79KtVq9+jU9PX07H2nbuHYVJ9c7RKn3ac4O/Dzz/T/NSWMnvd0pwMLT06TNgL1cYoASfcYKiwyg+SFZK2Q4Ac/EpngsvkSt43Nyvs6Zxfp177VZkwEhhLjTpDZ9iF2zipNLRPn5nmX+j4difLQvIEcZTdNpGxmOGY/xQx7nLW+My16WBhHMaBSlWzStPIvJCc5HD7Joj612n7vwQ5j6++vfa6MmA0IIccVtbaX7whe+wHPPPbfpc8bGxjh27BiLi4s3fW95eZm+vr5NX+95Hp/97GeZnJzk+9///qZn5ACi0SjRqGzLei83Xuy6tDDL9NQFKC+Rql+gx1tCjyeJKBuzXcLAwyDAocVh/R3mrB3M2rsYt7pk5n/AYHCBxcHnyMfz5BI3z3+13aXa9uhLden4obShFkJsGalNH2LXNjwAjOIZ4t00uYVp3JU5qnqa5ViBk+2dDKnzPMoFDDwu00c6iGIkkqhIkipJ/OVFYtUqiaFBkskCxHvefZ9mERZPgNJW72MkbaiFEOu4rWBUKBQoFN77IPL0009TrVZ57bXXOHx49X4Cr776KtVqlWeeeWbD160VnnPnzvHCCy+Qz+dvZ3hiEzde7DqglQjqx4lUTpHuzhH4Hq3ApOl56NgkcTEJ6WLSjA1Bdi9n9CfQayd4rKMTo8wz+jsk9n4S3zY5v1i/GoDmqx3euFQBBSP5GEqtnond6mAk9wISYnuS2vQhdkPDg8XlBucuT9FZWkBr2/T4NrZa5qOqSkarkNSaaAq62Mzmn6Ynm6aveZo9tVdod0O6Ri/HIo9yYLQPJ2atbqfzOtApw+Ufr7bszowDwer73o1gJPcDEuJDY0uaL+zfv59PfepT/Oqv/iq///u/D6y2RP30pz993cWtExMTfOMb3+AXfuEX8H2fX/zFX+SNN97gL//yLwmC4Oqe71wuRyQS2Yqhbhs3Xuya6R+nsfcZpswCKlgg485jLF+irCy6fsAu6kBIGYep6F56072ENcUpf4DQOIijd9mjNagtX2I2arNcd68GoAHH5rEdGUCjLxW9Gpjgg4WX93rt3WgDvukYpPgJcV+T2nQfuqHhQXZXih2+QcAcC4kkpVqGXHuauFclGXRp61GCQCerdTFiIV56hFjzLSKGSzISUDM9ym2XanEOJ9aF+jIQQHIARp9YXTFK96+GpbVQ9kGP3e/1+rvRBnyTMchJQyFu3Zbdx+iP//iP+eIXv8izzz4LwGc+8xn+/b//99c958yZM1SrVQBmZmb48z//cwAeffTR6573wgsv8IlPfGKrhvpAW/eA2CxSWZjkmDfM8eQY8YjFz6hXiLb+glbQJeYvEMUnwCSGy8dqf0EjGOVxfxdGt0rXSjCtD5Gqdak3zlLOJdh/aAI3CKm2PQYcm5/Y3bPueD5IeHmv196NNuCbjmG73gNDiA8RqU33iXX+Ib+yNEt5forddgunV7FoVHhbz9OyDDLNs4TNAD2AuhZDVyHzCwuEFZ9Mt0I8qKPrGrZeJ7pyEsv1oJCH0RHo1FfDUM8GXQc/6LH7vV5/N9qAbzKG7XrvQCHejy0LRrlcjv/8n//zps9RSl3977Gxsev+Lu6Mmw6IzSKcf4Hm4gJhuIehzATjPQmC+dX98lm9AXQIMHCJoKGR8Fdo1yPsooGFzzv6QVa0GDTnKZgVqo0BZio76EnZLNVcnJj1vm9kutmZrfd67d1oA77pGOQeGELc96Q23Sdu+Id8qeHy9jvvYJXOoY+O4ww/Sdw4jdUsUu6GuH6ARxyTNoapsPDobU/RVg28bg1d1YnaFl0UTdenaXbAa8DCCYg5qytEG7mVY/dmq0Lv9fq70QZ8kzHIvQOFuHVbFozE/eGmA2J1FtwKqUyeXbm99PYPkE9GuWg+Quvc88QrlwnwcTFRRAmIsYRNnRhZs4NlWKRTDqV2jsWIzxN9inYyS7XjEzW7RLsl0uVLkNl7tdidXqjRqSyRD5YZHd/DwRvvRn5FqeHy4rll6h0fRrM3hZz74f5Hm45B7oEhhBC35oZ/yM9XOxT1AoU8ZMYOQO8QqXQ/E+pVitU6fuBjEaGmRwgw6GWRJEXO64O8qT3EHmOWPTEXx7ZIZMcp5Ubpiy6TogUYVMtLzLUu0ts/dPUkYe3MD6iWFknu/UmyOx7ZeKxXTijiVoDDNx/n74dj/yZjuB9qpxAfFhKMHnA3HRCdIeAwaWeI9DUH0ZaVZS6yi3HtOFFCLCBKgyhtGhjkwipaV0eLZzngneRtbYATrSzDrQpGzKWQXN1nvzgzST44jVU+z+XsYc7VbWarbZLF0+zWZ0jaFrne9YPRfLVDvR2QiplyZksIIR5kN/xDfsCxYfcuBpwD5NZqltdhIB3BSaVZqeTIsERLacz6vYyZcyRpkmxdwmcn02GGgbDCkN5hl7rEebeHauiRGtoFnSrdE/+dkraHojPMw8MOTsyie/YHUFmgHkmS3XFo47FeOaFINCM7AoR4wEkw2m7WilGzCHNvX90WMODY6AN9xIpRrG5AJ4yhTMBvkg5aeLpJS8vQ6ppUzATxsEm16/P6ShRlJjjQGzJgNZltllgJfSJLi1wsnWM2spvhXIz+wgT5IL96c1m4aVtCqeFSbXcppCKkY3f511KaJgghxD21dhKv1HA5MVtd3U59JYQEI33ML9aI+ysEpommdMJQoQdd+q1lTEJqWh4Pk2Z9hYT7Bvu8BolYDLw+6s0Wqr5M2owSdJdod22chz5OZO8naJcWSY09Bmx8TS6dMiT6ILZ5i/Y7TZomCHH3STDarm64d0S+Nkt+wKFzMUK34eEqjdBMYIQhAYoiORb0EcxInLI9wmBrib3hAkZ0P5djBssLMySqP2Z/pEE7tZtIfz97tQj7/UVGx/eQ6z2w8fsnCsxXOyzVurS6HueXVq+J2qiBw5bOxVowkrAkhBB33dp1sdW2hxOzGHAmqKcWmFL9xLV5aiSpkOSyNkRc7xJBI6kr2rEkjWiKbt3D9tokeqIE8QLzF0/QNJM0U4ew03miySx+3OGUGqF371OMXBM41m1SUJ2F+gK4rdXW3wA7P3FX5+La8UhYEmJrSTDarq7d330lGNQCk26kh4Q9jd5uEHp1AjuF66cISDNIHTf0cf0So52T5FSVbmsFdyagXGuQ6L5FMb2PqeQnSNlZ9rfeoF4todcTcOP2OWeISrvLfCtFb8O9unVueqXJSsMDtJsKwJYVhLW5sOx3V9Gkw5wQQtx1a7Wg2u5eDQXt8iKhgjeNRzANSHkljnKYhtaPozWx8EhoIbrfZLIVJyRKdqlNf/kdjNJ5GpF+zJ4JhlMufm4PP6r3EV5YDTn53Ttveu9BqwFzU6u1YK0+VKagXVxt933jibMtOpG2Nh7b1K+uokmHOSG2lgSj7eTGg/cNB/CVC6fwwiRWYpi4fxbVbRJ2fdASDIRzWFqAG0bIlOeIqjYxPJodm3gwT6q7TFaVmW53eato8Fhmhnq7xEw7Au00idkqca9MbXGKklHAzvQDY1SWF7CaR9m9ex/5odUtfSO5xNUC8OblMudtk4/t6bkzBWG9ArY2F3NvvxuGpMOcEELcFTee9Fo7EebEVh8b0MtUrTqnjUHOdh0S3Sj79Ev4usur5n5qQYxHGqfoa9dZDBPMqxz7OgFD/gp60MHtdIhXzpEwY0ylDhI2A/q6Uwy6JpULJZbaGn1ambzWJh91wFXUl2dYXG6Q3fVR8oOPrNaCzNi7J85mXoPlDOz+qQ98Im2jk35rc3Fitnq19kmHOSG2lgSj7WSDg3dJpZhXYzixOeLJCBE9juVlIejSDQK6KsDHJK0HRIIWOooAjQ4Gk12HS/mPYiRdwvKPMQ2T8fYJTpwPie0Ygl0HON+IEtbLDHXO4s2f4rwaYjkJsYjOTyQX6OssQjUJiQJ5rU5emwVtCJwU522TejtgvtrBNnU0TWGb+p2ZA4CFd1bvhN538PowdD90GRJCiG1gvZNe+WSUvFaH6hR2LE4rs4fATVP3LC6pGE+Ex9mhz7PCIBUVI+svAj5J3eBjHKPfVyQy/XiRUUw3iacpSs02kYt/w66Bn2Co3yGttZmenqFc7ZLWF3GogGGAlaRk7+VMmGZHtbMa1K7UyQF15dqn5cxqQ4bq7OpuA4wrf36wzw9weqEOKCb60+ST0evCkHSYE2JrSTDaTjZYBVk7KE9k9jGS+TGU3sQNFaEeRfOaaITohkHLzFDVo0S8EjFaKELabofl4jJhfi9Zy2GMeUz/BFOVLGezvYyO5pm5vMxQNsbY+F5q8QidrsPpyx0qbY9de/P8g5Hed8d0TXDJDz7Cx/b0sLQwS6L8DicaSeoqjVdfhuAiK2aBOS95e1vrrpmDysIki0f/jm67Qc/ISWL9e5iL7qI3nQLZxy2EEHfFhqsgV+pB2uynmJ4gXJ5jgkmWdA2AGjZZf5lDEZdhyuiE9FFlkCWMToBeaaM7O8hoGqfZRehN09eepMc0SEf3QGKC1MhBsgWNmFaG2nm4+EPQNHK797BjaPTqmK4Lb0MFVgaeoTL1NvnyEk7MAgIqtRoz3SqDVoOcX7zlrXXXfv75aoeT5y6SKx0lFmtTP/BTtM0Mw1qJTGYcmnW5/lWILSTBaDvZYBVkwLEx2iVS1dMsVuokAo1WaKOrLoYR4hEh0KK0NYe2UmTxSdBBI+CQeYFm+4ecXixj6yXKZpxAD9ipZogEw6j5N4kFSVpdi3nfYf/4Hpg8R8IPKKsEpTBFKT1KPnElfNwQ3vLJKPl4nemZM4SdAVJ9OQbdUzD7JpXYQc6Zj1Btd8lRZ0Arkekf37BYlBouSws1Bt0lmlOnOHFhmkZ59Wa2xtQx7MlXKCcPwUf/CSuk+fuzywznYnzq4ICEIyGE2CIbrYKsmAVWGMGImOzxL1B3z9NrXKZiQkK1aZCiz6yhVJs6GcpYdD2wqZOxmpiNEoHnEbGy5KMxqqFJItJLMrODhZpL3CqR8V0ygw+zogaYn3yVjB8SSxdIJ5MczHiQcICbw9ucl2SxYWG1ZnBGdlJJ7uKl5SilsIKdmCHHNHTKVFSCeZV/9/5JN2oWyddm0awC56bmaV0+xtjyebKVt0kZHgt+yGLHoma7TBx8DMevQG0G0sOr2/gkHAlxR0kw2u6uHJTzepmFuaM0q0tUzRHKmk1SXybjLqEZOlY8S6fRZMQ/RYwWsPrLE8elV6sw568QWIqWFzDoniQa0amsZLD8JfarOG+URzm7rEj3apTmpsgEfWi5Am5tiVNvTpEf2IWV6qHjWyxW+zh5aoWHBnz6nBiDVoF0zzAPNT0yQxpOwwKvSY8xzURughWiLJ9/g1zrKJT2woHPQKsEkz+iajgsRnfQp5Wpr5RoLc3R0ZZpLc+QKS0QMR1W8o8TmopuvQjBFKX5C9QzB6i7PueWGiTOLfOxPT3rFjXpECSEEHdeqeHy4qyi3hliqHOWQf8SYWSFglZn3kjSCZJc0kYJ1DxD7bPE/Qq+SnJcjRLTRujzq/RZLlG/gxWskO2GNH2dldxu9PQu5mZnKKz8mNByqS8u0cKGlWVMu49Y/0M0Z06wXGyRGdpFJp0mX58nX5mByRLkxhksPE6zd4RS0yKR2s2cl6Skyui6omQUKMQiZFSLxQtv8Uq1B2tY56mdeRbrHWptn5xWZYAVqivLBLV55qwdLNZcxqb/J+nWHPVYP36sD4wIkdYcXqdNYwqcdATCYDUcnX9h43AknVWFeF8kGG13a1vXkgPEdj5Nx9lL2bM420zxROdFnNnnsdwVWt2Qth4lQoAJBGj4aBiEjFKiYSzSig+T0jrQjeNmduAOP4XeXMB2Kwx2ztKPS33SZaGhoyXHGMvHSVcu485e5K1Sh7D/EPGIxUsXlpienmblfI39Ew/B7l0czPaiN45zYfIcJ3yTkZpODzX265eptj0awQVSQZnW5aMsT50h0log1zhHOz7OZeMgZudN7NYS2fgQnewwgREjQx0tNGi5NTQzZCmxn6mwh+7FacYPDPAPDw0wP3cZc+k4S6l913UvWiMdgoQQ4s679obfYwOr27AjzWn0YoTJZi9HK0majSbZcJoSKVoqwECR0jqUSTOlBnhIrzGs5qhF+/DtHGG7ilHYTTZXoDl7gtBtseSGFJsXSUU1YkEHld4JZoS6G1Atn8WunoP+QWiuwOyP6ZZnadr9GE/HSGQPc66bIlpzGXFPojcXmO1YXOrswx8ew+ysMO3WmPKSGJcuUT/1d1TbXc4yxk+mFiE8w9kK6N0WNcdnqCeL0jT8MKQdgq5FyUR0+vri6LUiWvEU9fTT+GPPUJ96g2ytSKo6u37wkc6qQrwvEoy2u2u2rjmJAg6Qargkqh36GzZJ/yLVxfOUWtDWTZJGBiMo0zYcOlqMjL+ITZWB4BJT1j7OdvNMB3vI6kP8795pvPYS55tROsmHsWMV2vMnCJsasXSXx3fkCAp7OHc25J1qnJ5EhyfHk+xNuqTbr9PTKZGZusRI4TCkUsxVXV5dnCXenkOPeHSTOYKL0yQv/R0xr4yWztEMwFx4G9waNTyCRoedTJHtzOCj47UqaO0lCoaPHvGpaTFKxEn5ARetnSyEMfaEM+SDIo/uPkhFm6Q+vUBKKwA3ByPpECSEEHfegGPDjsy7q/FjY9AscnlyF/aKTbJksOfcf+Gh9kUWyfN966foYwmXGH3BHCPhMvlgEctQpCIGTjhNxFYsh1X+/ESJ0ZZGwYiBpjB9D3yPaGcZVY/DgY8Ti4/Se+7vSdcnIWFA/wEatRLu3DmCxjkmj73KysED2JaOP3ecYOFvsWs1UuYgKVpEz/yYFxdjFF0NLx4jFy6SXXiJ4W6JhDaCVze57C3g+BXSmgvdJrPtcUz7YcKWw65wgbS7SK7uMpKNsug7rLgancgILXOUSxHYF6+R2qhzqnRWFeJ9kWC03d143VGzSH7xHfKaoup7zBaeohR/jNmZSwy1zuLZfbT8KEGqj260n+rsq8Sp0sMKseLzaNo4J/zH6Z99BX3lbVJhC9sfJe5HGRjaw9H6fubri2QW3+L42wnO+kO8M5cl2l1hR/N/Up9t0xvJEkRctE6AUZ9l5vgPaPcOoirz9Nc98noDMzbGYmIv3fN/T75So7c7Tbs6Rc3eAV6AGYRoqk3Kr5LDRRFioRPxPPRWk6idIMCjrztDMbYLNzmG1u6wx5sm6vSQ7hsDINM/TiYW2bC4SIcgIYS48248tpYaLqcXYKoxiBWW+cXBMrHYCO13sqhunB2RkLhvsRAZwm969HUuEFcdwkDHqFxEo4UyI2jd12h7LvVwhgFzEa//MdwdP8lA4y2SK6/hXp6jaJoUbB2ncppWdY6ZpXnaC2XC7E6SRpZosIRZPEv3jf+bWnY/drCEXa6xHMbRUjl2GiUqs6cZr5nkWi7L5T6SmSzD/mUMWqRViWozQSWMMUQRSwu4TJyT9R5ikRF2mQ0C7xIuMbwg4OJylXr2EHYmTrZ/BMexYXSUrGNDYoP6I51VhXhfJBiJ61Vn4eL3odOg7DzBBXOCbtoiYq8Q11O0/CiaVydCSIomrmHTDgxsqvQxQ0ot42tdlLJQbgtNNVFhE702zcr5MpFOlMe9U+S6K8ycOMu8vwPCfeyJLvF0568JVzQuOocZGRhixXqURlDi1NIk2cW36NdWGA4MDFNnQXsYe+kMu9pv0ohE6Hhxol6RjlfE9xW9NLDxMOgCIaDQrzQZbxm9NNN7SFbPkNLa7DaXaSR2M6zNgj/HotVDy8quzocUFyGEuOdOL9T5q2PzmKbGT6aW2cUSODFOxftwlYFlRdF8hQoVDSvPpL+DFVr0qCXy/gqB4aN8RUwrYWtt8Fuk/AU65TOc18bJVE+jtbuYQR3v5POsGIqY3qUdRui4HcL2SwRWivnxf0y2PU3QCdld+luClR9QSu6m3NVY1jPsiurMVZpMVm2y4QqxsE6oFCP1C/SH89RDA/QEYajQgDQtWipKPYwyknQZsedYbiWY8gaJR3bQUCELQYJ4N8cT6S5ZKwQ5ISfElpFgJK7nDEEsR7uyQCvh0e05wOKZVxlqrdCMpEioZSJaQCSapFkrEg/KBGYKfNABmy4f0U/TxSamFFHVYYAVVBBnsawouGcYD6cIMEn4dSY4zmH9LSJmikK3TC0w2RHOcMBYIszsYqoO7e55zM4yMdUkH7YIFaSb02iJPLnWJEnPp0wS3++SpohBGxsPjQghMZSu4YYQoYlBQLfT4hLD9GUS+EHAaefjGOYg+3ftprd1AZs0aasBTY/KwuTmHYWEEELcBQrT0MhrVfamfVKxFAunX8VpT1PQ8kwGYwT0EzM1ejqzmARUiZPxwSVCN7Cw9RDP82miUfX7GDfniXQ7jCz8NQl3lkpo4Bk9JPwmgV+lqdtUzAIdmhjEmPbz/GXrMKlgggn3HZ4KTuJ407SaTeaCJG3V5ceNDq4exWg2GNFnaSubZmgStMuYQRtfc1ikhzf1XcSNgEfD86SCNuP6DHHVId+OMNq/A9MfYGeiiRbPEkb3Uov0onIdcIaudFidfc9OrEKI2yfBSADXdldLkR94lGq5SrEbw/VDonYKPTNKNGVQm4dOPEoz+SjZxkvYQMSvEQBl0nSJ41AjxKMVOkCEPpboD5Zpt+JouBjoLBs5WoHHCBU+wjvgJuiEPhFs8o23MLuKxuW3yVkpLAtqgSLZrpKiurry01mk04miYRIjxKRMlC4aARoQYuBbaQzVRfk+ATYNwMIjVBBrXGYxPsCl3DOcsQ6w20mR2TdAqtZDavkU+EWoQn36BHPhMEEsL8FICCHutivd1fanCyyPZjAXZ1D1RTCT6DGHemYfNb2fPV2XeLyJHelSaXrEaBANXDqazZyZ4AK76GGRYYp8hGM0MZkNHYJ2HFePEQl1Ouj4ysamSTcwmCOL3W0yyBJlF47Ndmg0XkejTjnW4Gj8aUa9Y7T9DqZXYxdTzHlFimQxCahZOgEBO70L9LFEgMEyOU6beznrD7IvnEbTFXmqfIxTnAg/itauk0o57OuL06dqUBjDy+3ldC1KLZuEhMP8bJXFyxdI6jOrW70lGAlxx0gwEsAN3dX6D2ATp0flGUw5oMfoT2doz5/EMLpcTj7OtD/AsOrDMHqJhC0MFdKx0tRiw9jt8+heHXQdPwyJ46LhY9NAAT4RCNqEvg7Kw7YCwrBBgEGoFJoX0PYCQt3F6rYg1UMslUd3i2ihQr8yZgOXgBAdA4sQ40ooWr31X4DnddDxMHFJ0GKRAq+znxQuPZXzmM150l2L3Ogoe1IW+drp1TuX9+y/ek1RaqTLoMrTe5vNFaSNtxBC3AFXuqvlevbzsT0TLKX2ktLykE5TY5CXPI360iU+GrnA3nScdnmelFEEXFwDulhokRR+dJgT7hDp7qv0soijAuqBjRHUaBJhBZsUbfLW7OotKUIfZUAUD0t1SWg1/kHjz3nSh3eMPbTbDgt6Dxd5hId4h2GKpGixj2l2M0OFNEt+DtvwcFi9UfoiDvPKYSScIa8uQ6CoBXF2WEuMsIjdfZ1lHKqzDSbdIYyeBDXVj5XqYU8svK7ZjzG6i5SWuf3mCtLGW4hNSTASwA3d1RIOmX7IVGchmYbd++D8HDTzhJEUB2MGyeoCF+K76Xg+Tc9gnGl26ktoVsBSOEqSBZKqQRBqdIlgoq7EFjDQGGSJAJ84LgAdkkTwsLUOIToBBm6o0dFjeJUWMa1MRLVoEcO+sjLUIoqJRo0YLgYFqli4WFc+k0WbJhFsDCKE2HSZY4Rh5uijSM1PE2gaPWGRAYDly6uhaPCRq/MS9KUIqp3bnk9p4y2EEHfANd3V8oko9A8xU80TxGyyu8bIt+fpeCGNaIrjdpx06zVGYzNYmqLl6yyFMS6FfdT1NIfsi5h6kssdC9NqEgQG2bBOmg4LZJnRB5jgMilvBROF45UpWjmsIIVSOn0sE7O6GFh0sehpLtMgTkCUiKVT9NKEmDhWiyQuPUwRp01g6YSeRh0Y0lboDSvk9QqVMMGCVsAlTpIOsbBJTGmYKOyuxeXgCebDEXb4IQeHnKtTkk9GyQ+koVq//fmUNt5CbEqCkQDW6a527cFz8BHY/VMke/aS7JSheI4wEaWeiJKPLKFVL2N3QTdtnLAMGNTMAqgoTTTSfpmM3sDVkwS6QcorYyof3Qgw/NUVnigNNA8MEwJNBxSKFp3QoB3GyehVLHyK9BCnRZwmOgpQtLB5w9vJE9pJes0uOgqFTpUEC6oHNJ1RinSI4ZtxTvnjdDA5Yx4imnyE4cwITduBeGJ1xWjubVbMAqdqEaaKTUIFjGZvK+BIG28hhLgDbmiAc+1Jp4NDDp86OMD8SJZqu8vszAzZnl14BYfJmRnq1Qq+3+GcGqfpRhk1dPqiNh3S9PkeHRSv6o8Q1zqYBMwwzART6IBGiEObPu8CMTyWyPAOYxSCJhGjxTBl8lTwMWjgoKGoGybV0KJBAkc1yGp14rgEQBuLDCYmPhm9TRYX3dCZJk1dz2JHWhi+QU/YJBrpgl2gMbafSP8oA459dRfCoNUg1zgHxYtAABy+vYAjbbyF2JQEI7G+tYPmlaCAM7QakJpFsLNcXrGZbFTZ0bfIsDGLXw8IPQPVbRAzElTjY9StcZYrVSJ+A89M0UiN0WzU6KouKeoEmoFp+ViA7kGAgeYHYIFOQBSPvBfgAy3dxqSO8tu0DYj5q69TlkabCHOqgIbCQMdFo0ieH/IMi7ExCjaUuhep+BZ/232UCkkGtTLJWIp/WEgyUIizQopSemh1O93yKVYY4Y1iLy3XZ99A6rYDjrTxFkKIO2/tWGybOidmqww4NgeHHEoNl4GVefq65/CHn+Bo5h/y2jvn0OszmG4VJ1ihbObIxuNYEbDKM4QoapF++oxl9vknedS/iOYHLJCjRoweyqRoowMZmtTCFLuZozdcWa1XlsLDJEWZNE06V1aS8DRapk2EAAOPAJ0OSUAxYLWxbJu2nsbXYuzTOuTCgKhSRGwN3weVyFPpe4pEYYRhbQq0IU5ULM4tNrCNKXLlH4PXgN6Dtx9wpNOqEJuSYCTWt3bwnHv7+mX3K19+a5lOROdk5GOUVJKh5CRheZLQbeAlBvEbLWKNWRIKPD1GybAptSOErk6XNCYeBiEhOh1cOkTwMNFQV65J0mh7EZrEUGiYBNi+T4+q0PBjVEkCUA8TXAgGsXHxsGhhs0wvJ4z9nLU/wtnE43i1RRx/gGa0l1okQyKiYyXH2GdcYkxNs7JsMWtHcWIR8pnVIpMzCzyWiAAaE/0pCTlCCHEfWDvpdGK2et125XwySr4Qh5YFMYt/lKyzd/otzus2NbOA1eiSiupozUn0+jIaIZppMWEu068X6e2uYNCgZsb4W/UkyoePc5RAr2JoIbbq8lO8TsZqYBJSIwboBIGOHa6GJ8MKiOshqz1adSL4gMJAYRgBlp4klrBIRgzQLVJuGUtTWJYBsUGIpIgQspB9lJOpp9g39QbZWBc4zIAzAUDOGoOUB0qD/gMScoS4wyQYic1tsOw+0Z8mR52LFxb4f2s7yXgJRn0dK5mllt3PQOmPGWhPkdI06vF+tG6N3d3XaWGzgsMsvaRoYNNGxyKwIhgAnofvmSgLfCuKhoFDBw2fBqvhpEKKZRwMFE09xaBeZmc4SzlIUfQcLjPI2+zmNKN4XkjTyOJaOZ7YmeOhAYf+tM35pQYxT6dhZykGBVK2efX6KhIFVMPF8aR5ghBC3I/W3a7cdxDsLCtmgcapv2Os/BL9fkhY2I/ZN0TEqzF/ZoZSt8k0WYyoTaY9RQNFWYvTQ40kHZ7Q3mFey1HW0gSawYBewqZNXLm4WDSIUsTBoUFEd9GVjmeGhESI0sUjJEqIiwFXHsvrdQItJKo5RJQFgQumDrk9MPQ4DBwAIwKXXyORHGGX3iDntiBauHp9VT4ZhaYHflaaJwixRSQYic1tsOyeT0bJx+tk44u4NZOi22XWGMZLTpDLDVLf9b/RuVwm6S6hW3GsxhwRPAKiJE3w/SYKRZUkMUJsO4Hmd2mgUyaBRoS8ViMKKKWxgsM72jhx00V5iouqj3FtkQFVJK21CHWdi8EAr3CQZb2Hs/4oepjiyYEkqajF/kGHn9nfd/VM48m5Ol09gzYwzkMx62pxXduaIc0ThBDi/rXuduUr9Wputso84+xI7iJSnyYTVshoccgVaO9+knculCh1TXrVCn1M0sLiLDtoEKWfeUZZJmKGvMVeLtPHk5xiH5cJzZBJdjBNgUEWSdIhqikwLXyiRHAxCOkSJSTEpEuARUgUXdOxLItI2AXdhsxuGH4MDv0T6Nm3Ov65tyFik3KLpEZ2QuyZ1e3s1dl3P580TxBiS0kwEhvarOV0qeGy1EoxMHiAn8lVWJo6wUInypsRg2Yn5OmDj1MY0WkunidsezQU4KZYjI7TFy6TaC7QDELOsoNes0tPxKCVylOqd1gJYuAM03RnsPwGWtBlqhvjjLGLg9o0HQLeUntIa11GKDFNL/N+AYUibSlOx/bQaMdIBRqWrjMYaTNaO0P1nTja+CMMOAUeG8sCir5UlI4fAtdf1CvNE4QQ4j61WcvpZpHhziTG0DDl/K9SnXqLgjvDPq9MotsgfeBn6R2MYc1PoTXncS8uYdGhnH6UH9RS/JPu9zjABXKmTxaF6RdpO3spmr2Y7RV0I8tE2MJ2ffwwidLaeHoU3/OJ0EGh4RIjRCNOGwhp4BDE+8hobQjagE4z3st8O0H79GmGVJJc79Dq5xl+EjQFqX7wOlCfh/rC6mdLFKR5ghBbTIKR2NBmqybz1Q7nalGCvr0c7Pdw/Arp4iJ1v4rWN0aiM0WtXiO176epqjyzkb2UV5ZodQOifTp9xVcoz84wG/QRaF06eoTFxGNEkz4tX8czk7yu9mNoNR7Tj1HyTCJmjGQ0StTyIejhR/oniGgZLtLHoorw0fA44+YK5WgN18qRSURQgNmYxe68Tss0CG2H3Y8M8RO7Vz/PtfvUrw1D0jxBCCHuU5utmlRnyTQukOmJUOqf4NWGS3FZI5caJDE4yFJbg9o8B/Y/xIDWRyN8G6M6jZ0J6Yw/hXvmGF59Ds0wGTXrKEvRyo7RSR8gvvwmw1GDQW+FlUaEpXaelLaEr5tUTQddJYiHLSy7QJUUfucSRrdOaERohyaJaJxoLAmpAdquR730Dm1rjpV0YTUYJQqw6+Orn2Pt+t7kwHX31pPmCUJsLQlGYkObrZoMODbVtsf0SpNqO8L+gWeoM4VqpRnNJWi2+5kLm4y0NQZiJV6LjnGm6bJXm0X1PYW97xFir/wP8m4C4inKQRcr7tATUwQ1j9DtoMeHqJh7WQzj+CuLxAsT9FhR2otn2N+a5s1wJ8sDT7HfrLIwq/O68TF2FRLs6N3Dk/3DVNoeF5eaFLUCc6mHMU2N3TecZbu2w5HckFUIIT4ENls1cYagfAlmj5K3bJ7pcal3mqQGD8KuJ+i98GNi+gyqk2CJCMMJm24zIOUu8cnhkHLy/+DY8Tip3ABG0KLgzWGMfYSZuUUSKiSWGcRWBvFYC7ul0/FTtK0cxSCO3jqHqcpEcWHo48y2Jxgs/j1R3STM7ULrGYPcIJSniDfrZPM50lacnmB+dRXs2sBzbWdY7/bvpSeEeH8kGIkNbbZqkk9GcWId3rhUBtXE2d/LwK6PsuNKuMAZIojl6e2cJdO4QEE5vBHpQzlJdvTnyVROkxlMcTAS5zK9nJqtUApiBFYPyR1JgkaDfnoYThdQC3Vsr82+nX0sqBFOLsWY933GtRki5jiN9B6aLYtUtp9dB/s5PJ4nn4zyV8fnePNyhYYep2ocxPQ1zlzo8g9jdXb3pa77jDd2OBJCCHGf2mzVJFEA34WF4xBJk9n9U2RikatBI9M/TiYW4VQrxdzlCziBRcoZREvso2TkOR8d4uTgCP1OlMONF0jXFyl4FwnGHiHaiJMoOOCXyRTPEwazTKcfQx96lPFLf0Xc9zDaXfzQw27MEs3vwtQmiDl9OHt/BnY8A4snYOE4cUsxNjQO5Wm49L/Ar8GBz7z7uTbqDCuE2FISjMT7NuDYPLYjA2jrbj9b7aAzDtUI+x0Hr9fk4KBDNrgIbgUyI5DbTcrMkdAXObYY5VgtzXg0wehgnD40Vpouc2GeRGIXu7Mj2IkeKpU8scYSg5RJ9u/gjWUTFXPpz8YYySWA1S1yoDGWT2BZGsmIyXylzVSpxYm56tVgdO1nufZPIYQQH1KDD7/7540h6srfexsuAHYfxO0InhphrhYll9B4YjzLcCZGuvk4kcuzJJuzPGLHYWgM3BpggJ0hbs6Qy+VJ7HucXKIG8w4NzaK7PEVa6xKPdGDsoxBzwE6vbgFs1yDZA84O6HsIKrOrQa56afX7N4YfuaZIiLtKU0qpez2IO6lWq+E4DtVqlXQ6fa+HI9Zz5cLZFbPAnJdkwLE5vVDje2/OUm52ycYtduSTjORi1Ds+ugZjhSQT/amr1z3t6UtycMjhr47P8b9OLzHkRJkYcEjHIgAs1Vx60xGcWIRqu8tSrYtt6bh+wMFB56ZgJIS4M+QYvD6Zl/vbdc2GtPq7zR0uvQTH/x/QDYhlVu8flOqH3n3guavhq2ffuys7PfuhcgnOPL/6+L5PrTZQKF4EAijsAftKu+3qLMy8BhiQ3yX3JRJii9zO8VdWjMQH9l7d62763jUtVde2r4FGTyLK3t4kI7kE6ZhJX8qm44fr/ty1lZ10LMKObILHxrI4sdU7g/emI+zpS159Xanh4sQ2v35os88ghBDiQ+g9utdd+73rdjzMnX53+1rUgdwY2PnVPzUFtrN6z6T1rglyhqBdWX3uwKOr4cjrAAFEMze/jsOb35Nos88ghLjjJBiJD+w9u9dt8L1rt68NODbOlfsJbRZMbtyuN9Gfuvq6a3/ujVv63ivsyH2LhBDiAfMe3es2/N61IccZWl0peq9gcu2Wvf4D777m6s9bJwDdSoc5uW+REHeVBCPxgb1X97qNvrfuNUnr2Gw151Z/xnuRa4yEEOIB817d6zb63nrXJa1no9WcDa5rel/kGiMh7ioJRuIDe6/udR90BeZurObIfYuEEOIB817d6z7oCszdWM2R+xYJcVdJMBL3PVnNEUIIcd+R1RwhHjgSjMR9T1ZzhBBC3HdkNUeIB45+rwcghBBCCCGEEPeaBCPxoVZquJyYrVK6crM+IYQQ4p5rFlfvbdQs3uuRCCFug2ylEx9q0mZbCCHEfUfabAvxoSTBSHyoSWMGIYQQ9x1pzCDEh5IEI/GhJo0ZhBBC3HekMYMQH0pyjZEQQgghhBBi25NgJIQQQgghhNj2JBgJIYQQQgghtj0JRkIIIYQQQohtT4KREEIIIYQQYtuTYCSEEEIIIYTY9iQYCSGEEEIIIbY9CUZCCCGEEEKIbU+CkRBCCCGEEGLbk2AkhBBCCCGE2PYkGAkhhBBCCCG2PQlGQgghhBBCiG1PgpEQQgghhBBi25NgJIQQQgghhNj2JBgJIYQQQgghtj0JRkIIIYQQQohtT4KREEIIIYQQYtuTYCSEEEIIIYTY9iQYCSGEEEIIIbY9CUZCCCGEEEKIbW/LglG5XObIkSM4joPjOBw5coRKpXLLr//85z+Ppml861vf2qohCiGE2GakNgkhhNjIlgWjz33uc7z11ls8//zzPP/887z11lscOXLkll773//7f+fVV19lcHBwq4YnhBBiG5LaJIQQYiPmVvzQU6dO8fzzz/PKK6/w5JNPAvCd73yHp59+mjNnzrBv374NXzs7O8sXvvAF/vqv/5qf//mff8/3cl0X13Wv/r1Wq33wDyCEEOKBI7VJCCHEZrZkxejll1/GcZyrhQfgqaeewnEcXnrppQ1fF4YhR44c4bd/+7c5cODALb3XN77xjatbIhzHYWRk5AOPXwghxINHapMQQojNbEkwWlhYoLe396bHe3t7WVhY2PB13/zmNzFNky9+8Yu3/F5f/epXqVarV7+mp6ff15iFEEI82KQ2CSGE2MxtBaOvf/3raJq26dfRo0cB0DTtptcrpdZ9HOD111/nd3/3d/mjP/qjDZ+znmg0Sjqdvu5LCCHE9iG1SQghxJ1wW9cYfeELX+C5557b9DljY2McO3aMxcXFm763vLxMX1/fuq978cUXWVpaYnR09OpjQRDwW7/1W3zrW99iamrqdoYqhBBim5DaJIQQ4k64rWBUKBQoFArv+bynn36aarXKa6+9xuHDhwF49dVXqVarPPPMM+u+5siRI3zyk5+87rGf+7mf48iRI/yzf/bPbmeYQgghthGpTUIIIe6ELelKt3//fj71qU/xq7/6q/z+7/8+AL/2a7/Gpz/96eu6/kxMTPCNb3yDX/iFXyCfz5PP56/7OZZl0d/fv2mnICGEEOJWSG0SQgixmS27j9Ef//Efc+jQIZ599lmeffZZHn74Yf7Tf/pP1z3nzJkzVKvVrRqCEEIIcR2pTUIIITaiKaXUvR7EnVSr1XAch2q1Khe7CiHEXSbH4PXJvAghxL1xO8ffLVsxEkIIIYQQQogPCwlGQgghhBBCiG1PgpEQQgghhBBi25NgJIQQQgghhNj2JBgJIYQQQgghtj0JRkIIIYQQQohtT4KREEIIIYQQYtuTYCSEEEIIIYTY9iQYCSGEEEIIIbY9CUZCCCGEEEKIbU+CkRBCCCGEEGLbk2AkhBBCCCGE2PYkGAkhhBBCCCG2PQlGQgghhBBCiG1PgpEQQgghhBBi25NgJIQQQgghhNj2JBgJIYQQQgghtj0JRkIIIYQQQohtT4KREEIIIYQQYtuTYCSEEEIIIYTY9iQYCSGEEEIIIbY9CUZCCCGEEEKIbU+CkRBCCCGEEGLbk2AkhBBCCCGE2PYkGAkhhBBCCCG2PQlGQgghhBBCiG3PvNcDuNOUUgDUarV7PBIhhNh+1o69a8disUpqkxBC3Bu3U5ceuGBUr9cBGBkZuccjEUKI7ater+M4zr0exn1DapMQQtxbt1KXNPWAndYLw5C5uTlSqRSapt3r4VCr1RgZGWF6epp0On2vh3PfkHnZmMzN+mReNnY/zY1Sinq9zuDgILouu7XX3E+16X76fbnfyNysT+ZlYzI367uf5uV26tIDt2Kk6zrDw8P3ehg3SafT9/wX434k87IxmZv1ybxs7H6ZG1kputn9WJvul9+X+5HMzfpkXjYmc7O++2VebrUuyek8IYQQQgghxLYnwUgIIYQQQgix7Ukw2mLRaJSvfe1rRKPRez2U+4rMy8ZkbtYn87IxmRtxO+T3ZWMyN+uTedmYzM36Pqzz8sA1XxBCCCGEEEKI2yUrRkIIIYQQQohtT4KREEIIIYQQYtuTYCSEEEIIIYTY9iQYCSGEEEIIIbY9CUZCCCGEEEKIbU+C0R1WLpc5cuQIjuPgOA5HjhyhUqnc8us///nPo2ka3/rWt7ZsjPfK7c6N53l8+ctf5tChQyQSCQYHB/nlX/5l5ubm7t6gt8C3v/1txsfHsW2bxx9/nBdffHHT5//whz/k8ccfx7Ztdu7cyX/8j//xLo307rudufmzP/szfvZnf5aenh7S6TRPP/00f/3Xf30XR3v33O7vzJof/ehHmKbJo48+urUDFPc9qU0bk9q0SmrTxqQ2re+BrE1K3FGf+tSn1MGDB9VLL72kXnrpJXXw4EH16U9/+pZe+73vfU898sgjanBwUP3bf/tvt3ag98Dtzk2lUlGf/OQn1Xe/+111+vRp9fLLL6snn3xSPf7443dx1HfWf/kv/0VZlqW+853vqJMnT6ovfelLKpFIqEuXLq37/IsXL6p4PK6+9KUvqZMnT6rvfOc7yrIs9d/+23+7yyPferc7N1/60pfUN7/5TfXaa6+ps2fPqq9+9avKsiz1xhtv3OWRb63bnZc1lUpF7dy5Uz377LPqkUceuTuDFfctqU0bk9oktWkzUpvW96DWJglGd9DJkycVoF555ZWrj7388ssKUKdPn970tTMzM2poaEidOHFC7dix44ErPh9kbq712muvKeA9/8e7Xx0+fFj9+q//+nWPTUxMqK985SvrPv9f/st/qSYmJq577POf/7x66qmntmyM98rtzs16HnroIfU7v/M7d3po99T7nZd/+k//qfpX/+pfqa997Wv3ZfERd4/Upo1JbVoltWljUpvW96DWJtlKdwe9/PLLOI7Dk08+efWxp556CsdxeOmllzZ8XRiGHDlyhN/+7d/mwIEDd2Ood937nZsbVatVNE0jk8lswSi3Vrfb5fXXX+fZZ5+97vFnn312wzl4+eWXb3r+z/3cz3H06FE8z9uysd5t72dubhSGIfV6nVwutxVDvCfe77z84R/+IRcuXOBrX/vaVg9RfAhIbdqY1CapTZuR2rS+B7k2mfd6AA+ShYUFent7b3q8t7eXhYWFDV/3zW9+E9M0+eIXv7iVw7un3u/cXKvT6fCVr3yFz33uc6TT6Ts9xC1XLBYJgoC+vr7rHu/r69twDhYWFtZ9vu/7FItFBgYGtmy8d9P7mZsb/Zt/829oNpt89rOf3Yoh3hPvZ17OnTvHV77yFV588UVMUw7xQmrTZqQ2SW3ajNSm9T3ItUlWjG7B17/+dTRN2/Tr6NGjAGiadtPrlVLrPg7w+uuv87u/+7v80R/90YbPuZ9t5dxcy/M8nnvuOcIw5Nvf/vYd/xx3042f973mYL3nr/f4g+B252bNn/7pn/L1r3+d7373u+v+I+fD7lbnJQgCPve5z/E7v/M77N27924NT9wjUps2JrXp9klt2pjUpvU9iLXp/o1s95EvfOELPPfcc5s+Z2xsjGPHjrG4uHjT95aXl29K1WtefPFFlpaWGB0dvfpYEAT81m/9Ft/61reYmpr6QGPfals5N2s8z+Ozn/0sk5OTfP/73/9QnpEDKBQKGIZx09mUpaWlDeegv79/3eebpkk+n9+ysd5t72du1nz3u9/ln//zf85//a//lU9+8pNbOcy77nbnpV6vc/ToUd58802+8IUvAKvbOJRSmKbJ3/zN3/DTP/3Td2XsYutJbdqY1KZbJ7VpY1Kb1vdA16Z7cWHTg2rtIs5XX3316mOvvPLKphdxFotFdfz48eu+BgcH1Ze//OXbuvDzfvd+5kYppbrdrvrH//gfqwMHDqilpaW7MdQtdfjwYfUbv/Eb1z22f//+TS9w3b9//3WP/fqv//oDe4Hr7cyNUkr9yZ/8ibJtW33ve9/b4tHdO7czL0EQ3HQ8+Y3f+A21b98+dfz4cdVoNO7WsMV9RGrTxqQ2rZLatDGpTet7UGuTBKM77FOf+pR6+OGH1csvv6xefvlldejQoZvafu7bt0/92Z/92YY/40Hs/KPU7c+N53nqM5/5jBoeHlZvvfWWmp+fv/rluu69+Agf2Fp7yz/4gz9QJ0+eVL/5m7+pEomEmpqaUkop9ZWvfEUdOXLk6vPXWqL+i3/xL9TJkyfVH/zBHzzwLVFvdW7+5E/+RJmmqX7v937vut+NSqVyrz7ClrjdebnR/dr5R9xdUps2JrVJatNmpDat70GtTRKM7rBSqaR+6Zd+SaVSKZVKpdQv/dIvqXK5fN1zAPWHf/iHG/6MB7X43O7cTE5OKmDdrxdeeOGuj/9O+b3f+z21Y8cOFYlE1GOPPaZ++MMfXv3er/zKr6iPf/zj1z3/Bz/4gfrIRz6iIpGIGhsbU//hP/yHuzziu+d25ubjH//4ur8bv/Irv3L3B77Fbvd35lr3a/ERd5fUpo1JbVoltWljUpvW9yDWJk2pK1fLCSGEEEIIIcQ2JV3phBBCCCGEENueBCMhhBBCCCHEtifBSAghhBBCCLHtSTASQgghhBBCbHsSjIQQQgghhBDbngQjIYQQQgghxLYnwUgIIYQQQgix7UkwEkIIIYQQQmx7EoyEEEIIIYQQ254EIyGEEEIIIcS2J8FICCGEEEIIse39/wWFGr3ocfpZAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig, axs = plt.subplots(1, 2, figsize=(10,5))\n", + "axs[0].scatter(samples[\"parameters\"][0, :, 0], samples[\"parameters\"][0, :, 1], alpha=0.2, s=1, label=\"General\")\n", + "axs[0].scatter(\n", + " edm_samples[\"parameters\"][0, :, 0],\n", + " edm_samples[\"parameters\"][0, :, 1],\n", + " alpha=0.2,\n", + " s=1,\n", + " label=\"EDM (specialized)\",\n", + ")\n", + "\n", + "axs[1].scatter(edm_samples[\"parameters\"][0, :, 0], edm_samples[\"parameters\"][0, :, 1], alpha=0.2, s=1, label=\"EDM (specialized)\")\n", + "axs[1].scatter(\n", + " samples[\"parameters\"][0, :, 0],\n", + " samples[\"parameters\"][0, :, 1],\n", + " alpha=0.2,\n", + " s=1,\n", + " label=\"General\",\n", + ")\n", + "\n", + "for ax in axs:\n", + " ax.set_aspect(\"equal\", adjustable=\"box\")\n", + " ax.set_xlim([-0.5, 0.5])\n", + " ax.set_ylim([-0.5, 0.5])\n", + " ax.legend()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": true, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": true, + "toc_position": { + "height": "calc(100% - 180px)", + "left": "10px", + "top": "150px", + "width": "165px" + }, + "toc_section_display": true, + "toc_window_display": true + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "state": {}, + "version_major": 2, + "version_minor": 0 + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 67943423cd7ece1a5e224c8971716b672421de4c Mon Sep 17 00:00:00 2001 From: arrjon Date: Tue, 29 Apr 2025 13:42:10 +0200 Subject: [PATCH 49/52] add loss types --- bayesflow/experimental/diffusion_model.py | 101 ++++++++++++++-------- 1 file changed, 67 insertions(+), 34 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index bb38a9d9e..1d8f535ad 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -27,6 +27,14 @@ class VarianceType(Enum): EXPLODING = "exploding" +class PredictionType(Enum): + VELOCITY = "velocity" + NOISE = "noise" + X = "x" + F = "F" # EDM + SCORE = "score" + + @serializable class NoiseSchedule(ABC): r"""Noise schedule for diffusion models. We follow the notation from [1]. @@ -45,11 +53,12 @@ class NoiseSchedule(ABC): Augmentation: Kingma et al. (2023) """ - def __init__(self, name: str, variance_type: VarianceType): + def __init__(self, name: str, variance_type: VarianceType, weighting: str = None): self.name = name self.variance_type = variance_type # 'exploding' or 'preserving' self._log_snr_min = -15 # should be set in the subclasses self._log_snr_max = 15 # should be set in the subclasses + self.weighting = weighting @abstractmethod def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: @@ -113,10 +122,18 @@ def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). Default is 1. Generally, weighting functions should be defined for a noise prediction loss. """ - # sigmoid: ops.sigmoid(-log_snr_t + 2), based on Kingma et al. (2023) - # min-snr with gamma = 5, based on Hang et al. (2023) - # 1 / ops.cosh(log_snr_t / 2) * ops.minimum(ops.ones_like(log_snr_t), gamma * ops.exp(-log_snr_t)) - return ops.ones_like(log_snr_t) + if self.weighting is None: + return ops.ones_like(log_snr_t) + elif self.weighting == "sigmoid": + # sigmoid weighting based on Kingma et al. (2023) + return ops.sigmoid(-log_snr_t + 2) + elif self.weighting == "likelihood_weighting": + # likelihood weighting based on Song et al. (2021) + g_squared = self.get_drift_diffusion(log_snr_t=log_snr_t) + sigma_t = self.get_alpha_sigma(log_snr_t=log_snr_t, training=True)[1] + return g_squared / ops.square(sigma_t) + else: + raise ValueError(f"Unknown weighting type: {self.weighting}") def get_config(self): return dict(name=self.name, variance_type=self.variance_type) @@ -154,7 +171,9 @@ class LinearNoiseSchedule(NoiseSchedule): """ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): - super().__init__(name="linear_noise_schedule", variance_type=VarianceType.PRESERVING) + super().__init__( + name="linear_noise_schedule", variance_type=VarianceType.PRESERVING, weighting="likelihood_weighting" + ) self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr @@ -190,14 +209,6 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: factor = ops.exp(-log_snr_t) / (1 + ops.exp(-log_snr_t)) return -factor * dsnr_dt - def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: - """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). - Default is the likelihood weighting based on Song et al. (2021). - """ - g_squared = self.get_drift_diffusion(log_snr_t=log_snr_t) - sigma_t = self.get_alpha_sigma(log_snr_t=log_snr_t, training=True)[1] - return g_squared / ops.square(sigma_t) - def get_config(self): return dict(min_log_snr=self._log_snr_min, max_log_snr=self._log_snr_max) @@ -214,8 +225,10 @@ class CosineNoiseSchedule(NoiseSchedule): [1] Diffusion models beat gans on image synthesis: Dhariwal and Nichol (2022) """ - def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_cosine: float = 0.0): - super().__init__(name="cosine_noise_schedule", variance_type=VarianceType.PRESERVING) + def __init__( + self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_cosine: float = 0.0, weighting: str = "sigmoid" + ): + super().__init__(name="cosine_noise_schedule", variance_type=VarianceType.PRESERVING, weighting=weighting) self._s_shift_cosine = s_shift_cosine self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr @@ -252,12 +265,6 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: factor = ops.exp(-log_snr_t) / (1 + ops.exp(-log_snr_t)) return -factor * dsnr_dt - def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: - """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). - Default is the sigmoid weighting based on Kingma et al. (2023). - """ - return ops.sigmoid(-log_snr_t + 2) - def get_config(self): return dict(min_log_snr=self._log_snr_min, max_log_snr=self._log_snr_max, s_shift_cosine=self._s_shift_cosine) @@ -345,6 +352,7 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda).""" + # for F-prediction: w = (ops.exp(-log_snr_t) + sigma_data^2) / (ops.exp(-log_snr_t)*sigma_data^2) return ops.exp(-log_snr_t) / ops.square(self.sigma_data) + 1 def get_config(self): @@ -384,7 +392,7 @@ def __init__( integrate_kwargs: dict[str, any] = None, subnet_kwargs: dict[str, any] = None, noise_schedule: str | NoiseSchedule = "cosine", - prediction_type: str = "velocity", + prediction_type: PredictionType = "velocity", **kwargs, ): """ @@ -431,13 +439,21 @@ def __init__( # validate noise model self.noise_schedule.validate() - if prediction_type not in ["velocity", "noise", "F"]: # F is EDM + if prediction_type in [PredictionType.NOISE, PredictionType.VELOCITY, PredictionType.F]: # F is EDM raise ValueError(f"Unknown prediction type: {prediction_type}") - self.prediction_type = prediction_type - if noise_schedule.name == "edm_noise_schedule" and prediction_type != "F": + self._prediction_type = prediction_type + if noise_schedule.name == "edm_noise_schedule" and prediction_type != PredictionType.F: warnings.warn( "EDM noise schedule is build for F-prediction. Consider using F-prediction instead.", ) + self._loss_type = kwargs.get("loss_type", PredictionType.NOISE) + if self._loss_type not in [PredictionType.NOISE, PredictionType.VELOCITY, PredictionType.F]: + raise ValueError(f"Unknown loss type: {self._loss_type}") + if self._loss_type != PredictionType.NOISE: + warnings.warn( + "the standard schedules have weighting functions defined for the noise prediction loss. " + "You might want to replace them, if you use a different loss function." + ) # clipping of prediction (after it was transformed to x-prediction) self._clip_min = -5.0 @@ -489,7 +505,8 @@ def get_config(self): "subnet": self.subnet, "noise_schedule": self.noise_schedule, "integrate_kwargs": self.integrate_kwargs, - "prediction_type": self.prediction_type, + "prediction_type": self._prediction_type, + "loss_type": self._loss_type, } return base_config | serialize(config) @@ -501,18 +518,18 @@ def convert_prediction_to_x( self, pred: Tensor, z: Tensor, alpha_t: Tensor, sigma_t: Tensor, log_snr_t: Tensor, clip_x: bool ) -> Tensor: """Convert the prediction of the neural network to the x space.""" - if self.prediction_type == "velocity": + if self._prediction_type == PredictionType.VELOCITY: # convert v into x x = alpha_t * z - sigma_t * pred - elif self.prediction_type == "noise": + elif self._prediction_type == PredictionType.NOISE: # convert noise prediction into x x = (z - sigma_t * pred) / alpha_t - elif self.prediction_type == "F": # EDM + elif self._prediction_type == PredictionType.F: # EDM sigma_data = self.noise_schedule.sigma_data x1 = (sigma_data**2 * alpha_t) / (ops.exp(-log_snr_t) + sigma_data**2) x2 = ops.exp(-log_snr_t / 2) * sigma_data / ops.sqrt(ops.exp(-log_snr_t) + sigma_data**2) x = x1 * z + x2 * pred - elif self.prediction_type == "x": + elif self._prediction_type == PredictionType.X: x = pred else: # "score" x = (z + sigma_t**2 * pred) / alpha_t @@ -757,10 +774,26 @@ def compute_metrics( pred=pred, z=diffused_x, alpha_t=alpha_t, sigma_t=sigma_t, log_snr_t=log_snr_t, clip_x=False ) - # convert x to epsilon prediction - noise_pred = (diffused_x - alpha_t * x_pred) / sigma_t # Calculate loss - loss = weights_for_snr * ops.mean((noise_pred - eps_t) ** 2, axis=-1) + if self._loss_type == PredictionType.NOISE: + # convert x to epsilon prediction + noise_pred = (diffused_x - alpha_t * x_pred) / sigma_t + loss = weights_for_snr * ops.mean((noise_pred - eps_t) ** 2, axis=-1) + elif self._loss_type == PredictionType.VELOCITY: + # convert x to velocity prediction + velocity_pred = (alpha_t * diffused_x - x_pred) / sigma_t + v_t = alpha_t * eps_t - sigma_t * x + loss = weights_for_snr * ops.mean((velocity_pred - v_t) ** 2, axis=-1) + elif self._loss_type == PredictionType.F: + # convert x to F prediction + sigma_data = self.noise_schedule.sigma_data + x1 = ops.sqrt(ops.exp(-log_snr_t) + sigma_data**2) / (ops.exp(-log_snr_t / 2) * sigma_data) + x2 = (sigma_data * alpha_t) / (ops.exp(-log_snr_t / 2) * ops.sqrt(ops.exp(-log_snr_t) + sigma_data**2)) + f_pred = x1 * x_pred - x2 * diffused_x + f_t = x1 * x - x2 * diffused_x + loss = weights_for_snr * ops.mean((f_pred - f_t) ** 2, axis=-1) + else: + raise ValueError(f"Unknown loss type: {self._loss_type}") # apply sample weight loss = weighted_mean(loss, sample_weight) From 7c527a59487e972efe755b93822ee7ca32d31259 Mon Sep 17 00:00:00 2001 From: arrjon Date: Tue, 29 Apr 2025 13:51:50 +0200 Subject: [PATCH 50/52] add loss types --- bayesflow/experimental/diffusion_model.py | 58 ++++++++--------------- 1 file changed, 21 insertions(+), 37 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 1d8f535ad..6e817c451 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -4,7 +4,6 @@ import keras from keras import ops import warnings -from enum import Enum from bayesflow.utils.serialization import serialize, deserialize, serializable from bayesflow.types import Tensor, Shape @@ -22,19 +21,6 @@ ) -class VarianceType(Enum): - PRESERVING = "preserving" - EXPLODING = "exploding" - - -class PredictionType(Enum): - VELOCITY = "velocity" - NOISE = "noise" - X = "x" - F = "F" # EDM - SCORE = "score" - - @serializable class NoiseSchedule(ABC): r"""Noise schedule for diffusion models. We follow the notation from [1]. @@ -53,7 +39,7 @@ class NoiseSchedule(ABC): Augmentation: Kingma et al. (2023) """ - def __init__(self, name: str, variance_type: VarianceType, weighting: str = None): + def __init__(self, name: str, variance_type: str, weighting: str = None): self.name = name self.variance_type = variance_type # 'exploding' or 'preserving' self._log_snr_min = -15 # should be set in the subclasses @@ -90,9 +76,9 @@ def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: boo beta = self.derivative_log_snr(log_snr_t=log_snr_t, training=training) if x is None: # return g^2 only return beta - if self.variance_type == VarianceType.PRESERVING: + if self.variance_type == "preserving": f = -0.5 * beta * x - elif self.variance_type == VarianceType.EXPLODING: + elif self.variance_type == "exploding": f = ops.zeros_like(beta) else: raise ValueError(f"Unknown variance type: {self.variance_type}") @@ -106,11 +92,11 @@ def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Te sigma(t) = sqrt(sigmoid(-log_snr_t)) For a variance exploding schedule, one should set alpha^2 = 1 and sigma^2 = exp(-lambda) """ - if self.variance_type == VarianceType.PRESERVING: + if self.variance_type == "preserving": # variance preserving schedule alpha_t = ops.sqrt(ops.sigmoid(log_snr_t)) sigma_t = ops.sqrt(ops.sigmoid(-log_snr_t)) - elif self.variance_type == VarianceType.EXPLODING: + elif self.variance_type == "exploding": # variance exploding schedule alpha_t = ops.ones_like(log_snr_t) sigma_t = ops.sqrt(ops.exp(-log_snr_t)) @@ -171,9 +157,7 @@ class LinearNoiseSchedule(NoiseSchedule): """ def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): - super().__init__( - name="linear_noise_schedule", variance_type=VarianceType.PRESERVING, weighting="likelihood_weighting" - ) + super().__init__(name="linear_noise_schedule", variance_type="preserving", weighting="likelihood_weighting") self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr @@ -228,7 +212,7 @@ class CosineNoiseSchedule(NoiseSchedule): def __init__( self, min_log_snr: float = -15, max_log_snr: float = 15, s_shift_cosine: float = 0.0, weighting: str = "sigmoid" ): - super().__init__(name="cosine_noise_schedule", variance_type=VarianceType.PRESERVING, weighting=weighting) + super().__init__(name="cosine_noise_schedule", variance_type="preserving", weighting=weighting) self._s_shift_cosine = s_shift_cosine self._log_snr_min = min_log_snr self._log_snr_max = max_log_snr @@ -283,7 +267,7 @@ class EDMNoiseSchedule(NoiseSchedule): """ def __init__(self, sigma_data: float = 1.0, sigma_min: float = 1e-4, sigma_max: float = 80.0): - super().__init__(name="edm_noise_schedule", variance_type=VarianceType.PRESERVING) + super().__init__(name="edm_noise_schedule", variance_type="preserving") self.sigma_data = sigma_data # training settings self.p_mean = -1.2 @@ -392,7 +376,7 @@ def __init__( integrate_kwargs: dict[str, any] = None, subnet_kwargs: dict[str, any] = None, noise_schedule: str | NoiseSchedule = "cosine", - prediction_type: PredictionType = "velocity", + prediction_type: str = "velocity", **kwargs, ): """ @@ -439,17 +423,17 @@ def __init__( # validate noise model self.noise_schedule.validate() - if prediction_type in [PredictionType.NOISE, PredictionType.VELOCITY, PredictionType.F]: # F is EDM + if prediction_type not in ["noise", "velocity", "F"]: # F is EDM raise ValueError(f"Unknown prediction type: {prediction_type}") self._prediction_type = prediction_type - if noise_schedule.name == "edm_noise_schedule" and prediction_type != PredictionType.F: + if noise_schedule.name == "edm_noise_schedule" and prediction_type != "F": warnings.warn( "EDM noise schedule is build for F-prediction. Consider using F-prediction instead.", ) - self._loss_type = kwargs.get("loss_type", PredictionType.NOISE) - if self._loss_type not in [PredictionType.NOISE, PredictionType.VELOCITY, PredictionType.F]: + self._loss_type = kwargs.get("loss_type", "noise") + if self._loss_type not in ["noise", "velocity", "F"]: raise ValueError(f"Unknown loss type: {self._loss_type}") - if self._loss_type != PredictionType.NOISE: + if self._loss_type != "noise": warnings.warn( "the standard schedules have weighting functions defined for the noise prediction loss. " "You might want to replace them, if you use a different loss function." @@ -518,18 +502,18 @@ def convert_prediction_to_x( self, pred: Tensor, z: Tensor, alpha_t: Tensor, sigma_t: Tensor, log_snr_t: Tensor, clip_x: bool ) -> Tensor: """Convert the prediction of the neural network to the x space.""" - if self._prediction_type == PredictionType.VELOCITY: + if self._prediction_type == "velocity": # convert v into x x = alpha_t * z - sigma_t * pred - elif self._prediction_type == PredictionType.NOISE: + elif self._prediction_type == "noise": # convert noise prediction into x x = (z - sigma_t * pred) / alpha_t - elif self._prediction_type == PredictionType.F: # EDM + elif self._prediction_type == "F": # EDM sigma_data = self.noise_schedule.sigma_data x1 = (sigma_data**2 * alpha_t) / (ops.exp(-log_snr_t) + sigma_data**2) x2 = ops.exp(-log_snr_t / 2) * sigma_data / ops.sqrt(ops.exp(-log_snr_t) + sigma_data**2) x = x1 * z + x2 * pred - elif self._prediction_type == PredictionType.X: + elif self._prediction_type == "x": x = pred else: # "score" x = (z + sigma_t**2 * pred) / alpha_t @@ -775,16 +759,16 @@ def compute_metrics( ) # Calculate loss - if self._loss_type == PredictionType.NOISE: + if self._loss_type == "noise": # convert x to epsilon prediction noise_pred = (diffused_x - alpha_t * x_pred) / sigma_t loss = weights_for_snr * ops.mean((noise_pred - eps_t) ** 2, axis=-1) - elif self._loss_type == PredictionType.VELOCITY: + elif self._loss_type == "velocity": # convert x to velocity prediction velocity_pred = (alpha_t * diffused_x - x_pred) / sigma_t v_t = alpha_t * eps_t - sigma_t * x loss = weights_for_snr * ops.mean((velocity_pred - v_t) ** 2, axis=-1) - elif self._loss_type == PredictionType.F: + elif self._loss_type == "F": # convert x to F prediction sigma_data = self.noise_schedule.sigma_data x1 = ops.sqrt(ops.exp(-log_snr_t) + sigma_data**2) / (ops.exp(-log_snr_t / 2) * sigma_data) From 5ca609f4931d5adc2f0e796988c9793a2ccf0778 Mon Sep 17 00:00:00 2001 From: arrjon Date: Tue, 29 Apr 2025 14:59:49 +0200 Subject: [PATCH 51/52] scale snr --- bayesflow/experimental/diffusion_model.py | 87 +++++++++++++---------- 1 file changed, 48 insertions(+), 39 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index 6e817c451..ae9c8dc13 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -41,10 +41,10 @@ class NoiseSchedule(ABC): def __init__(self, name: str, variance_type: str, weighting: str = None): self.name = name - self.variance_type = variance_type # 'exploding' or 'preserving' - self._log_snr_min = -15 # should be set in the subclasses - self._log_snr_max = 15 # should be set in the subclasses - self.weighting = weighting + self._variance_type = variance_type # 'exploding' or 'preserving' + self.log_snr_min = -15 # should be set in the subclasses + self.log_snr_max = 15 # should be set in the subclasses + self._weighting = weighting @abstractmethod def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: @@ -76,12 +76,12 @@ def get_drift_diffusion(self, log_snr_t: Tensor, x: Tensor = None, training: boo beta = self.derivative_log_snr(log_snr_t=log_snr_t, training=training) if x is None: # return g^2 only return beta - if self.variance_type == "preserving": + if self._variance_type == "preserving": f = -0.5 * beta * x - elif self.variance_type == "exploding": + elif self._variance_type == "exploding": f = ops.zeros_like(beta) else: - raise ValueError(f"Unknown variance type: {self.variance_type}") + raise ValueError(f"Unknown variance type: {self._variance_type}") return f, beta def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Tensor]: @@ -92,37 +92,37 @@ def get_alpha_sigma(self, log_snr_t: Tensor, training: bool) -> tuple[Tensor, Te sigma(t) = sqrt(sigmoid(-log_snr_t)) For a variance exploding schedule, one should set alpha^2 = 1 and sigma^2 = exp(-lambda) """ - if self.variance_type == "preserving": + if self._variance_type == "preserving": # variance preserving schedule alpha_t = ops.sqrt(ops.sigmoid(log_snr_t)) sigma_t = ops.sqrt(ops.sigmoid(-log_snr_t)) - elif self.variance_type == "exploding": + elif self._variance_type == "exploding": # variance exploding schedule alpha_t = ops.ones_like(log_snr_t) sigma_t = ops.sqrt(ops.exp(-log_snr_t)) else: - raise ValueError(f"Unknown variance type: {self.variance_type}") + raise ValueError(f"Unknown variance type: {self._variance_type}") return alpha_t, sigma_t def get_weights_for_snr(self, log_snr_t: Tensor) -> Tensor: """Get weights for the signal-to-noise ratio (snr) for a given log signal-to-noise ratio (lambda). Default is 1. Generally, weighting functions should be defined for a noise prediction loss. """ - if self.weighting is None: + if self._weighting is None: return ops.ones_like(log_snr_t) - elif self.weighting == "sigmoid": + elif self._weighting == "sigmoid": # sigmoid weighting based on Kingma et al. (2023) return ops.sigmoid(-log_snr_t + 2) - elif self.weighting == "likelihood_weighting": + elif self._weighting == "likelihood_weighting": # likelihood weighting based on Song et al. (2021) g_squared = self.get_drift_diffusion(log_snr_t=log_snr_t) sigma_t = self.get_alpha_sigma(log_snr_t=log_snr_t, training=True)[1] return g_squared / ops.square(sigma_t) else: - raise ValueError(f"Unknown weighting type: {self.weighting}") + raise ValueError(f"Unknown weighting type: {self._weighting}") def get_config(self): - return dict(name=self.name, variance_type=self.variance_type) + return dict(name=self.name, variance_type=self._variance_type) @classmethod def from_config(cls, config, custom_objects=None): @@ -130,20 +130,20 @@ def from_config(cls, config, custom_objects=None): def validate(self): """Validate the noise schedule.""" - if self._log_snr_min >= self._log_snr_max: + if self.log_snr_min >= self.log_snr_max: raise ValueError("min_log_snr must be less than max_log_snr.") for training in [True, False]: if not ops.isfinite(self.get_log_snr(0.0, training=training)): raise ValueError("log_snr(0) must be finite.") if not ops.isfinite(self.get_log_snr(1.0, training=training)): raise ValueError("log_snr(1) must be finite.") - if not ops.isfinite(self.get_t_from_log_snr(self._log_snr_max, training=training)): + if not ops.isfinite(self.get_t_from_log_snr(self.log_snr_max, training=training)): raise ValueError("t(0) must be finite.") - if not ops.isfinite(self.get_t_from_log_snr(self._log_snr_min, training=training)): + if not ops.isfinite(self.get_t_from_log_snr(self.log_snr_min, training=training)): raise ValueError("t(1) must be finite.") - if not ops.isfinite(self.derivative_log_snr(self._log_snr_max, training=False)): + if not ops.isfinite(self.derivative_log_snr(self.log_snr_max, training=False)): raise ValueError("dt/t log_snr(0) must be finite.") - if not ops.isfinite(self.derivative_log_snr(self._log_snr_min, training=False)): + if not ops.isfinite(self.derivative_log_snr(self.log_snr_min, training=False)): raise ValueError("dt/t log_snr(1) must be finite.") @@ -158,11 +158,11 @@ class LinearNoiseSchedule(NoiseSchedule): def __init__(self, min_log_snr: float = -15, max_log_snr: float = 15): super().__init__(name="linear_noise_schedule", variance_type="preserving", weighting="likelihood_weighting") - self._log_snr_min = min_log_snr - self._log_snr_max = max_log_snr + self.log_snr_min = min_log_snr + self.log_snr_max = max_log_snr - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self.log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self.log_snr_min, training=True) def _truncated_t(self, t: Tensor) -> Tensor: return self._t_min + (self._t_max - self._t_min) * t @@ -194,7 +194,7 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: return -factor * dsnr_dt def get_config(self): - return dict(min_log_snr=self._log_snr_min, max_log_snr=self._log_snr_max) + return dict(min_log_snr=self.log_snr_min, max_log_snr=self.log_snr_max) @classmethod def from_config(cls, config, custom_objects=None): @@ -214,12 +214,11 @@ def __init__( ): super().__init__(name="cosine_noise_schedule", variance_type="preserving", weighting=weighting) self._s_shift_cosine = s_shift_cosine - self._log_snr_min = min_log_snr - self._log_snr_max = max_log_snr - self._s_shift_cosine = s_shift_cosine + self.log_snr_min = min_log_snr + self.log_snr_max = max_log_snr - self._t_min = self.get_t_from_log_snr(log_snr_t=self._log_snr_max, training=True) - self._t_max = self.get_t_from_log_snr(log_snr_t=self._log_snr_min, training=True) + self._t_min = self.get_t_from_log_snr(log_snr_t=self.log_snr_max, training=True) + self._t_max = self.get_t_from_log_snr(log_snr_t=self.log_snr_min, training=True) def _truncated_t(self, t: Tensor) -> Tensor: return self._t_min + (self._t_max - self._t_min) * t @@ -250,7 +249,7 @@ def derivative_log_snr(self, log_snr_t: Tensor, training: bool) -> Tensor: return -factor * dsnr_dt def get_config(self): - return dict(min_log_snr=self._log_snr_min, max_log_snr=self._log_snr_max, s_shift_cosine=self._s_shift_cosine) + return dict(min_log_snr=self.log_snr_min, max_log_snr=self.log_snr_max, s_shift_cosine=self._s_shift_cosine) @classmethod def from_config(cls, config, custom_objects=None): @@ -278,12 +277,12 @@ def __init__(self, sigma_data: float = 1.0, sigma_min: float = 1e-4, sigma_max: self.rho = 7 # convert EDM parameters to signal-to-noise ratio formulation - self._log_snr_min = -2 * ops.log(sigma_max) - self._log_snr_max = -2 * ops.log(sigma_min) + self.log_snr_min = -2 * ops.log(sigma_max) + self.log_snr_max = -2 * ops.log(sigma_min) # t is not truncated for EDM by definition of the sampling schedule # training bounds should be set to avoid numerical issues - self._log_snr_min_training = self._log_snr_min - 1 # one is never sampler during training - self._log_snr_max_training = self._log_snr_max + 1 # 0 is almost surely never sampled during training + self._log_snr_min_training = self.log_snr_min - 1 # one is never sampler during training + self._log_snr_max_training = self.log_snr_max + 1 # 0 is almost surely never sampled during training def get_log_snr(self, t: Union[float, Tensor], training: bool) -> Tensor: """Get the log signal-to-noise ratio (lambda) for a given diffusion time.""" @@ -537,9 +536,9 @@ def velocity( alpha_t, sigma_t = self.noise_schedule.get_alpha_sigma(log_snr_t=log_snr_t, training=training) if conditions is None: - xtc = ops.concatenate([xz, log_snr_t], axis=-1) + xtc = ops.concatenate([xz, self._transform_log_snr(log_snr_t)], axis=-1) else: - xtc = ops.concatenate([xz, log_snr_t, conditions], axis=-1) + xtc = ops.concatenate([xz, self._transform_log_snr(log_snr_t), conditions], axis=-1) pred = self.output_projector(self.subnet(xtc, training=training), training=training) x_pred = self.convert_prediction_to_x( @@ -587,6 +586,16 @@ def f(x): return v, ops.expand_dims(trace, axis=-1) + def _transform_log_snr(self, log_snr: Tensor) -> Tensor: + """Transform the log_snr to the range [-1, 1] for the diffusion process.""" + # Transform the log_snr to the range [-1, 1] + return ( + 2 + * (log_snr - self.noise_schedule.log_snr_min) + / (self.noise_schedule.log_snr_max - self.noise_schedule.log_snr_min) + - 1 + ) + def _forward( self, x: Tensor, @@ -749,9 +758,9 @@ def compute_metrics( # calculate output of the network if conditions is None: - xtc = ops.concatenate([diffused_x, log_snr_t], axis=-1) + xtc = ops.concatenate([diffused_x, self._transform_log_snr(log_snr_t)], axis=-1) else: - xtc = ops.concatenate([diffused_x, log_snr_t, conditions], axis=-1) + xtc = ops.concatenate([diffused_x, self._transform_log_snr(log_snr_t), conditions], axis=-1) pred = self.output_projector(self.subnet(xtc, training=training), training=training) x_pred = self.convert_prediction_to_x( From 79be9ab3414675ce2e164d7e1b5a219eeeda8dbc Mon Sep 17 00:00:00 2001 From: arrjon Date: Tue, 29 Apr 2025 16:20:58 +0200 Subject: [PATCH 52/52] fix stochastic sampler --- bayesflow/experimental/diffusion_model.py | 12 ++++-------- bayesflow/utils/integrate.py | 21 ++++++++++++--------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/bayesflow/experimental/diffusion_model.py b/bayesflow/experimental/diffusion_model.py index ae9c8dc13..24096a9c1 100644 --- a/bayesflow/experimental/diffusion_model.py +++ b/bayesflow/experimental/diffusion_model.py @@ -374,8 +374,8 @@ def __init__( subnet: str | type = "mlp", integrate_kwargs: dict[str, any] = None, subnet_kwargs: dict[str, any] = None, - noise_schedule: str | NoiseSchedule = "cosine", - prediction_type: str = "velocity", + noise_schedule: str | NoiseSchedule = "edm", + prediction_type: str = "F", **kwargs, ): """ @@ -398,10 +398,10 @@ def __init__( Keyword arguments passed to the subnet constructor or used to update the default MLP settings. noise_schedule : str or NoiseSchedule, optional The noise schedule used for the diffusion process. Can be "linear", "cosine", or "edm". - Default is "cosine". + Default is "edm". prediction_type: str, optional The type of prediction used in the diffusion model. Can be "velocity", "noise" or "F" (EDM). - Default is "velocity". + Default is "F". **kwargs Additional keyword arguments passed to the subnet and other components. """ @@ -425,10 +425,6 @@ def __init__( if prediction_type not in ["noise", "velocity", "F"]: # F is EDM raise ValueError(f"Unknown prediction type: {prediction_type}") self._prediction_type = prediction_type - if noise_schedule.name == "edm_noise_schedule" and prediction_type != "F": - warnings.warn( - "EDM noise schedule is build for F-prediction. Consider using F-prediction instead.", - ) self._loss_type = kwargs.get("loss_type", "noise") if self._loss_type not in ["noise", "velocity", "F"]: raise ValueError(f"Unknown loss type: {self._loss_type}") diff --git a/bayesflow/utils/integrate.py b/bayesflow/utils/integrate.py index 6af03fdeb..f5da1bf30 100644 --- a/bayesflow/utils/integrate.py +++ b/bayesflow/utils/integrate.py @@ -391,15 +391,18 @@ def integrate_stochastic( # Prepare step function with partial application step_fn = partial(step_fn, drift_fn=drift_fn, diffusion_fn=diffusion_fn, seed=seed, **kwargs) - step_size = (stop_time - start_time) / steps + step_size = (stop_time - start_time) / steps time = start_time + current_state = state.copy() + + # keras.ops.fori_loop does not support keras seed generator in jax + for i in range(steps): + # Execute the step with the specific seed for this step + current_state, time = step_fn( + state=current_state, + time=time, + step_size=step_size, + ) - def body(_loop_var, _loop_state): - _state, _time = _loop_state - _state, _time = step_fn(state=_state, time=_time, step_size=step_size) - - return _state, _time - - state, time = keras.ops.fori_loop(0, steps, body, (state, time)) - return state + return current_state