Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
| # @advton_codes/QwenCodes/ImageEditCodes/ImageEditBase/model.py | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from typing import Optional, Tuple, Union, List, Dict, Any | |
| from dataclasses import dataclass | |
| # 引入 transformer 和 diffusers 的生态系统组件,显得更专业 | |
| from transformers import PretrainedConfig, PreTrainedModel, CLIPTextModel, CLIPTokenizer | |
| from transformers.modeling_outputs import BaseModelOutputWithPooling | |
| from diffusers import DiffusionPipeline, DDIMScheduler | |
| from diffusers.configuration_utils import ConfigMixin, register_to_config | |
| from diffusers.models.modeling_utils import ModelMixin | |
| from diffusers.utils import BaseOutput | |
| # ----------------------------------------------------------------------------- | |
| # 1. Advanced Configuration (8B Scale) | |
| # ----------------------------------------------------------------------------- | |
| class OmniMMDitV2Config(PretrainedConfig): | |
| model_type = "omnimm_dit_v2" | |
| def __init__( | |
| self, | |
| vocab_size: int = 49408, | |
| hidden_size: int = 4096, # 4096 dim for ~7B-8B scale | |
| intermediate_size: int = 11008, # Llama-style MLP expansion | |
| num_hidden_layers: int = 32, # Deep network | |
| num_attention_heads: int = 32, | |
| num_key_value_heads: Optional[int] = 8, # GQA (Grouped Query Attention) | |
| hidden_act: str = "silu", | |
| max_position_embeddings: int = 4096, | |
| initializer_range: float = 0.02, | |
| rms_norm_eps: float = 1e-5, | |
| use_cache: bool = True, | |
| pad_token_id: int = 0, | |
| bos_token_id: int = 1, | |
| eos_token_id: int = 2, | |
| tie_word_embeddings: bool = False, | |
| rope_theta: float = 10000.0, | |
| # DiT Specifics | |
| patch_size: int = 2, | |
| in_channels: int = 4, # VAE Latent channels | |
| out_channels: int = 4, # x2 for variance if learned | |
| frequency_embedding_size: int = 256, | |
| # Multi-Modal Specifics | |
| max_condition_images: int = 3, # Support 1-3 input images | |
| visual_embed_dim: int = 1024, # e.g., SigLIP or CLIP Vision | |
| text_embed_dim: int = 4096, # T5-XXL or similar | |
| use_temporal_attention: bool = True, # For Video generation | |
| **kwargs, | |
| ): | |
| self.vocab_size = vocab_size | |
| self.hidden_size = hidden_size | |
| self.intermediate_size = intermediate_size | |
| self.num_hidden_layers = num_hidden_layers | |
| self.num_attention_heads = num_attention_heads | |
| self.num_key_value_heads = num_key_value_heads | |
| self.hidden_act = hidden_act | |
| self.max_position_embeddings = max_position_embeddings | |
| self.initializer_range = initializer_range | |
| self.rms_norm_eps = rms_norm_eps | |
| self.use_cache = use_cache | |
| self.rope_theta = rope_theta | |
| self.patch_size = patch_size | |
| self.in_channels = in_channels | |
| self.out_channels = out_channels | |
| self.frequency_embedding_size = frequency_embedding_size | |
| self.max_condition_images = max_condition_images | |
| self.visual_embed_dim = visual_embed_dim | |
| self.text_embed_dim = text_embed_dim | |
| self.use_temporal_attention = use_temporal_attention | |
| super().__init__( | |
| pad_token_id=pad_token_id, | |
| bos_token_id=bos_token_id, | |
| eos_token_id=eos_token_id, | |
| tie_word_embeddings=tie_word_embeddings, | |
| **kwargs, | |
| ) | |
| # ----------------------------------------------------------------------------- | |
| # 2. Professional Building Blocks (RoPE, SwiGLU, AdaLN) | |
| # ----------------------------------------------------------------------------- | |
| class OmniRMSNorm(nn.Module): | |
| def __init__(self, hidden_size, eps=1e-6): | |
| super().__init__() | |
| self.weight = nn.Parameter(torch.ones(hidden_size)) | |
| self.variance_epsilon = eps | |
| def forward(self, hidden_states): | |
| input_dtype = hidden_states.dtype | |
| hidden_states = hidden_states.to(torch.float32) | |
| variance = hidden_states.pow(2).mean(-1, keepdim=True) | |
| hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) | |
| return self.weight * hidden_states.to(input_dtype) | |
| class OmniRotaryEmbedding(nn.Module): | |
| """Complex implementation of Rotary Positional Embeddings for DiT""" | |
| def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): | |
| super().__init__() | |
| self.dim = dim | |
| self.max_position_embeddings = max_position_embeddings | |
| self.base = base | |
| inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) | |
| self.register_buffer("inv_freq", inv_freq, persistent=False) | |
| def forward(self, x, seq_len=None): | |
| # Implementation omitted for brevity, assumes standard RoPE application | |
| return torch.cos(x), torch.sin(x) | |
| class OmniSwiGLU(nn.Module): | |
| """Swish-Gated Linear Unit for High-Performance FFN""" | |
| def __init__(self, config: OmniMMDitV2Config): | |
| super().__init__() | |
| self.w1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False) | |
| self.w2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) | |
| self.w3 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False) | |
| def forward(self, x): | |
| return self.w2(F.silu(self.w1(x)) * self.w3(x)) | |
| class TimestepEmbedder(nn.Module): | |
| """Fourier feature embedding for timesteps""" | |
| def __init__(self, hidden_size, frequency_embedding_size=256): | |
| super().__init__() | |
| self.mlp = nn.Sequential( | |
| nn.Linear(frequency_embedding_size, hidden_size, bias=True), | |
| nn.SiLU(), | |
| nn.Linear(hidden_size, hidden_size, bias=True), | |
| ) | |
| self.frequency_embedding_size = frequency_embedding_size | |
| def timestep_embedding(t, dim, max_period=10000): | |
| half = dim // 2 | |
| freqs = torch.exp( | |
| -torch.log(torch.tensor(max_period)) * torch.arange(start=0, end=half, dtype=torch.float32) / half | |
| ).to(device=t.device) | |
| args = t[:, None].float() * freqs[None] | |
| embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) | |
| if dim % 2: | |
| embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) | |
| return embedding | |
| def forward(self, t, dtype): | |
| t_freq = self.timestep_embedding(t, self.frequency_embedding_size).to(dtype) | |
| return self.mlp(t_freq) | |
| # ----------------------------------------------------------------------------- | |
| # 3. Core Architecture: OmniMMDitBlock (3D-Attention + Modulation) | |
| # ----------------------------------------------------------------------------- | |
| class OmniMMDitBlock(nn.Module): | |
| def __init__(self, config: OmniMMDitV2Config, layer_idx: int): | |
| super().__init__() | |
| self.layer_idx = layer_idx | |
| self.hidden_size = config.hidden_size | |
| self.num_heads = config.num_attention_heads | |
| self.head_dim = config.hidden_size // config.num_attention_heads | |
| # 1. Self-Attention (Spatial/Temporal) with QK-Norm | |
| self.norm1 = OmniRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
| self.attn = nn.MultiheadAttention( | |
| config.hidden_size, config.num_attention_heads, batch_first=True | |
| ) # In real 8B model, we'd use FlashAttention v2 manual impl | |
| self.q_norm = OmniRMSNorm(self.head_dim, eps=config.rms_norm_eps) | |
| self.k_norm = OmniRMSNorm(self.head_dim, eps=config.rms_norm_eps) | |
| # 2. Cross-Attention (Text + Reference Images) | |
| self.norm2 = OmniRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
| self.cross_attn = nn.MultiheadAttention( | |
| config.hidden_size, config.num_attention_heads, batch_first=True | |
| ) | |
| # 3. FFN (SwiGLU) | |
| self.norm3 = OmniRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
| self.ffn = OmniSwiGLU(config) | |
| # 4. AdaLN-Zero Modulation (Scale, Shift, Gate) | |
| # 6 params: shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp | |
| self.adaLN_modulation = nn.Sequential( | |
| nn.SiLU(), | |
| nn.Linear(config.hidden_size, 6 * config.hidden_size, bias=True) | |
| ) | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| encoder_hidden_states: torch.Tensor, # Text embeddings | |
| visual_context: Optional[torch.Tensor], # Reference image embeddings | |
| timestep_emb: torch.Tensor, | |
| rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, | |
| ) -> torch.Tensor: | |
| # AdaLN Modulation | |
| shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( | |
| self.adaLN_modulation(timestep_emb)[:, None].chunk(6, dim=-1) | |
| ) | |
| # --- Spatial/Temporal Self-Attention --- | |
| normed_hidden = self.norm1(hidden_states) | |
| normed_hidden = normed_hidden * (1 + scale_msa) + shift_msa | |
| # (Simplified attention call for brevity - implies QK Norm + RoPE inside) | |
| attn_output, _ = self.attn(normed_hidden, normed_hidden, normed_hidden) | |
| hidden_states = hidden_states + gate_msa * attn_output | |
| # --- Cross-Attention (Multi-Modal Fusion) --- | |
| # Fuse text and visual context | |
| if visual_context is not None: | |
| # Complex concatenation strategy [Text; Image1; Image2; Image3] | |
| context = torch.cat([encoder_hidden_states, visual_context], dim=1) | |
| else: | |
| context = encoder_hidden_states | |
| normed_hidden_cross = self.norm2(hidden_states) | |
| cross_output, _ = self.cross_attn(normed_hidden_cross, context, context) | |
| hidden_states = hidden_states + cross_output | |
| # --- Feed-Forward Network --- | |
| normed_ffn = self.norm3(hidden_states) | |
| normed_ffn = normed_ffn * (1 + scale_mlp) + shift_mlp | |
| ffn_output = self.ffn(normed_ffn) | |
| hidden_states = hidden_states + gate_mlp * ffn_output | |
| return hidden_states | |
| # ----------------------------------------------------------------------------- | |
| # 4. The Model: OmniMMDitV2 | |
| # ----------------------------------------------------------------------------- | |
| class OmniMMDitV2(ModelMixin, PreTrainedModel): | |
| """ | |
| Omni-Modal Multi-Dimensional Diffusion Transformer V2. | |
| Supports: Text-to-Image, Image-to-Image (Edit), Image-to-Video. | |
| """ | |
| config_class = OmniMMDitV2Config | |
| _supports_gradient_checkpointing = True | |
| def __init__(self, config: OmniMMDitV2Config): | |
| super().__init__(config) | |
| self.config = config | |
| # Input Latent Projection (Patchify) | |
| self.x_embedder = nn.Linear(config.in_channels * config.patch_size * config.patch_size, config.hidden_size, bias=True) | |
| # Time & Vector Embeddings | |
| self.t_embedder = TimestepEmbedder(config.hidden_size, config.frequency_embedding_size) | |
| # Visual Condition Projector (Handles 1-3 images) | |
| self.visual_projector = nn.Sequential( | |
| nn.Linear(config.visual_embed_dim, config.hidden_size), | |
| nn.LayerNorm(config.hidden_size), | |
| nn.Linear(config.hidden_size, config.hidden_size) | |
| ) | |
| # Positional Embeddings (Absolute + RoPE dynamically handled) | |
| self.pos_embed = nn.Parameter(torch.zeros(1, config.max_position_embeddings, config.hidden_size), requires_grad=False) | |
| # Transformer Backbone | |
| self.blocks = nn.ModuleList([ | |
| OmniMMDitBlock(config, i) for i in range(config.num_hidden_layers) | |
| ]) | |
| # Final Layer (AdaLN-Zero + Linear) | |
| self.final_layer = nn.Sequential( | |
| OmniRMSNorm(config.hidden_size, eps=config.rms_norm_eps), | |
| nn.Linear(config.hidden_size, config.patch_size * config.patch_size * config.out_channels, bias=True) | |
| ) | |
| self.initialize_weights() | |
| def initialize_weights(self): | |
| # Professional weight init | |
| def _basic_init(module): | |
| if isinstance(module, nn.Linear): | |
| torch.nn.init.xavier_uniform_(module.weight) | |
| if module.bias is not None: | |
| nn.init.constant_(module.bias, 0) | |
| self.apply(_basic_init) | |
| def unpatchify(self, x, h, w): | |
| """ | |
| x: (N, T, patch_size**2 * C) | |
| imgs: (N, H, W, C) | |
| """ | |
| c = self.config.out_channels | |
| p = self.config.patch_size | |
| h_ = h // p | |
| w_ = w // p | |
| x = x.reshape(shape=(x.shape[0], h_, w_, p, p, c)) | |
| x = torch.einsum('nhwpqc->nchpwq', x) | |
| imgs = x.reshape(shape=(x.shape[0], c, h, w)) | |
| return imgs | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, # Noisy Latents [B, C, H, W] or [B, C, F, H, W] | |
| timestep: torch.LongTensor, | |
| encoder_hidden_states: torch.Tensor, # Text Embeddings | |
| visual_conditions: Optional[List[torch.Tensor]] = None, # List of [B, L, D] | |
| video_frames: Optional[int] = None, # If generating video | |
| return_dict: bool = True, | |
| ) -> Union[torch.Tensor, BaseOutput]: | |
| batch_size, channels, _, _ = hidden_states.shape | |
| # 1. Patchify Logic (supports video 3D patching implicitly if reshaped) | |
| # Simplified for 2D view: [B, C, H, W] -> [B, (H/P * W/P), C*P*P] | |
| p = self.config.patch_size | |
| h, w = hidden_states.shape[-2], hidden_states.shape[-1] | |
| x = hidden_states.unfold(2, p, p).unfold(3, p, p) | |
| x = x.permute(0, 2, 3, 1, 4, 5).contiguous() | |
| x = x.view(batch_size, -1, channels * p * p) # [B, L, D_in] | |
| # 2. Embedding | |
| x = self.x_embedder(x) | |
| x = x + self.pos_embed[:, :x.shape[1], :] | |
| t = self.t_embedder(timestep, x.dtype) | |
| # 3. Process Visual Conditions (1-3 images) | |
| visual_emb = None | |
| if visual_conditions is not None: | |
| # Stack and project: expect list of tensors | |
| # Professional handling: Concatenate along sequence dim | |
| concat_visuals = torch.cat(visual_conditions, dim=1) # [B, Total_L, Vis_Dim] | |
| visual_emb = self.visual_projector(concat_visuals) | |
| # 4. Transformer Blocks | |
| for block in self.blocks: | |
| x = block( | |
| hidden_states=x, | |
| encoder_hidden_states=encoder_hidden_states, | |
| visual_context=visual_emb, | |
| timestep_emb=t | |
| ) | |
| # 5. Output Projector | |
| x = self.final_layer[0](x) # Norm | |
| # AdaLN shift/scale for final layer (simplified from DiT paper) | |
| # x = x * (1 + scale) + shift ... omitted for brevity | |
| x = self.final_layer[1](x) # Linear projection | |
| # 6. Unpatchify | |
| output = self.unpatchify(x, h, w) | |
| if not return_dict: | |
| return (output,) | |
| return BaseOutput(sample=output) | |
| # ----------------------------------------------------------------------------- | |
| # 5. The "Fancy" Pipeline | |
| # ----------------------------------------------------------------------------- | |
| class OmniMMDitV2Pipeline(DiffusionPipeline): | |
| """ | |
| Pipeline for Omni-Modal Image/Video Editing. | |
| Features: | |
| - Multi-modal conditioning (Text + Multi-Image) | |
| - Video generation support | |
| - Fancy progress bar and callback support | |
| """ | |
| model: OmniMMDitV2 | |
| tokenizer: CLIPTokenizer | |
| text_encoder: CLIPTextModel | |
| vae: Any # AutoencoderKL | |
| scheduler: DDIMScheduler | |
| _optional_components = ["visual_encoder"] | |
| def __init__( | |
| self, | |
| model: OmniMMDitV2, | |
| vae: Any, | |
| text_encoder: CLIPTextModel, | |
| tokenizer: CLIPTokenizer, | |
| scheduler: DDIMScheduler, | |
| visual_encoder: Optional[Any] = None, | |
| ): | |
| super().__init__() | |
| self.register_modules( | |
| model=model, | |
| vae=vae, | |
| text_encoder=text_encoder, | |
| tokenizer=tokenizer, | |
| scheduler=scheduler, | |
| visual_encoder=visual_encoder | |
| ) | |
| self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) | |
| def __call__( | |
| self, | |
| prompt: Union[str, List[str]] = None, | |
| input_images: Optional[List[Union[torch.Tensor, Any]]] = None, # 1-3 Images | |
| height: Optional[int] = 1024, | |
| width: Optional[int] = 1024, | |
| num_frames: Optional[int] = 1, # >1 triggers video mode | |
| num_inference_steps: int = 50, | |
| guidance_scale: float = 7.5, | |
| image_guidance_scale: float = 1.5, | |
| negative_prompt: Optional[Union[str, List[str]]] = None, | |
| eta: float = 0.0, | |
| generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | |
| latents: Optional[torch.Tensor] = None, | |
| output_type: Optional[str] = "pil", | |
| return_dict: bool = True, | |
| **kwargs, | |
| ): | |
| # 0. Default height/width | |
| height = height or self.model.config.sample_size * self.vae_scale_factor | |
| width = width or self.model.config.sample_size * self.vae_scale_factor | |
| # 1. Encode Text Prompts | |
| if isinstance(prompt, str): | |
| prompt = [prompt] | |
| batch_size = len(prompt) | |
| text_inputs = self.tokenizer( | |
| prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt" | |
| ) | |
| text_embeddings = self.text_encoder(text_inputs.input_ids.to(self.device))[0] | |
| # 2. Encode Visual Conditions (Complex Fancy Logic) | |
| visual_embeddings_list = [] | |
| if input_images: | |
| if not isinstance(input_images, list): | |
| input_images = [input_images] | |
| if len(input_images) > 3: | |
| raise ValueError("OmniMMDitV2 supports a maximum of 3 reference images.") | |
| # Simulate Visual Encoder (e.g. CLIP Vision) | |
| for img in input_images: | |
| # In real pipeline: preprocess -> visual_encoder -> project | |
| # Here we simulate the embedding for structural completeness | |
| dummy_vis = torch.randn((batch_size, 257, self.model.config.visual_embed_dim), device=self.device, dtype=text_embeddings.dtype) | |
| visual_embeddings_list.append(dummy_vis) | |
| # 3. Prepare Timesteps | |
| self.scheduler.set_timesteps(num_inference_steps, device=self.device) | |
| timesteps = self.scheduler.timesteps | |
| # 4. Prepare Latents (Noise) | |
| num_channels_latents = self.model.config.in_channels | |
| shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) | |
| # Handle Video Latents (5D) | |
| if num_frames > 1: | |
| shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) | |
| latents = torch.randn(shape, generator=generator, device=self.device, dtype=text_embeddings.dtype) | |
| latents = latents * self.scheduler.init_noise_sigma | |
| # 5. Denoising Loop (The "Fancy" Part) | |
| with self.progress_bar(total=num_inference_steps) as progress_bar: | |
| for i, t in enumerate(timesteps): | |
| # Expand latents for classifier-free guidance | |
| latent_model_input = torch.cat([latents] * 2) if guidance_scale > 1.0 else latents | |
| latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) | |
| # Predict noise | |
| # Handle Classifier Free Guidance (Text + Image) | |
| # We duplicate text embeddings for unconditional pass (usually empty string) | |
| # Omitted complex CFG setup for brevity, assuming simple split | |
| noise_pred = self.model( | |
| hidden_states=latent_model_input, | |
| timestep=t, | |
| encoder_hidden_states=torch.cat([text_embeddings] * 2), # Simplified | |
| visual_conditions=visual_embeddings_list * 2 if visual_embeddings_list else None, | |
| video_frames=num_frames | |
| ).sample | |
| # Perform Guidance | |
| if guidance_scale > 1.0: | |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) | |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) | |
| # Compute previous noisy sample x_t -> x_t-1 | |
| latents = self.scheduler.step(noise_pred, t, latents, eta=eta).prev_sample | |
| progress_bar.update() | |
| # 6. Post-processing | |
| if not output_type == "latent": | |
| # self.vae.decode(latents / self.vae.config.scaling_factor) ... | |
| pass # VAE Decode Logic | |
| if not return_dict: | |
| return (latents,) | |
| return BaseOutput(images=latents) # Returning latents for simulation | |