Quality improvements

#3
by pulb - opened

Hi,
got no time to file individual pull requests so I thought I just paste my whole script here.
Based on your work I added the following changes:

  • Added aspect ratio support. This also fixes the cropping/zooming issue when editing the same image multiple times.
  • Added possiblity to generate images without any lora
  • Load and convert phr00ts latest transformer (v19) directly instead of using linoyts outdated preconverted version (v4). This seems to significantly improve quality and generates far more realistic looking results.
  • Unload and cleanup lora weights before every run
  • Add possibility to edit negative prompt
  • (disabled examples)

Maybe you find some of the changes useful.

import os
import gc
import gradio as gr
import numpy as np
import spaces
import torch
import random
from PIL import Image
from typing import Iterable
from gradio.themes import Soft
from gradio.themes.utils import colors, fonts, sizes

from diffusers import FlowMatchEulerDiscreteScheduler
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3

import safetensors.torch
from huggingface_hub import hf_hub_download
from accelerate import init_empty_weights

colors.orange_red = colors.Color(
    name="orange_red",
    c50="#FFF0E5",
    c100="#FFE0CC",
    c200="#FFC299",
    c300="#FFA366",
    c400="#FF8533",
    c500="#FF4500",
    c600="#E63E00",
    c700="#CC3700",
    c800="#B33000",
    c900="#992900",
    c950="#802200",
)

class OrangeRedTheme(Soft):
    def __init__(
        self,
        *,
        primary_hue: colors.Color | str = colors.gray,
        secondary_hue: colors.Color | str = colors.orange_red,
        neutral_hue: colors.Color | str = colors.slate,
        text_size: sizes.Size | str = sizes.text_lg,
        font: fonts.Font | str | Iterable[fonts.Font | str] = (
            fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
        ),
        font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
            fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
        ),
    ):
        super().__init__(
            primary_hue=primary_hue,
            secondary_hue=secondary_hue,
            neutral_hue=neutral_hue,
            text_size=text_size,
            font=font,
            font_mono=font_mono,
        )
        super().set(
            background_fill_primary="*primary_50",
            background_fill_primary_dark="*primary_900",
            body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
            body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
            button_primary_text_color="white",
            button_primary_text_color_hover="white",
            button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
            button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
            button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_700)",
            button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_600)",
            button_secondary_text_color="black",
            button_secondary_text_color_hover="white",
            button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
            button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
            button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
            button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
            slider_color="*secondary_500",
            slider_color_dark="*secondary_600",
            block_title_text_weight="600",
            block_border_width="3px",
            block_shadow="*shadow_drop_lg",
            button_primary_shadow="*shadow_drop_lg",
            button_large_padding="11px",
            color_accent_soft="*primary_100",
            block_label_background_fill="*primary_200",
        )

orange_red_theme = OrangeRedTheme()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.bfloat16

print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
print("torch.__version__ =", torch.__version__)
print("Using device:", device)

TRANSFORMER_CONFIG = {
    "attention_head_dim": 128,
    "axes_dims_rope": [16, 56, 56],
    "guidance_embeds": False,
    "in_channels": 64,
    "joint_attention_dim": 3584,
    "num_attention_heads": 24,
    "num_layers": 60,
    "out_channels": 16,
    "patch_size": 2
}

def load_and_convert_transformer():
    repo_id = "Phr00t/Qwen-Image-Edit-Rapid-AIO"
    filename = "v19/Qwen-Rapid-AIO-NSFW-v19.safetensors"
    
    print(f"Downloading raw model from {repo_id}...")
    # Downloads the file to the Hugging Face cache
    checkpoint_path = hf_hub_download(repo_id=repo_id, filename=filename)
    
    print("Converting keys in RAM and filtering metadata...")
    # Initialize an empty model structure (meta device) to save RAM
    with init_empty_weights():
        model = QwenImageTransformer2DModel(**TRANSFORMER_CONFIG)
    
    # Load the weights from the downloaded file
    state_dict = safetensors.torch.load_file(checkpoint_path, device="cpu")
    
    new_state_dict = {}
    prefix = "model.diffusion_model."
    
    # List of known metadata keys that are not part of the model architecture
    ignored_keys = ["__index_timestep_zero__", "iteration", "global_step"]

    for key, value in state_dict.items():
        # Skip metadata keys to avoid RuntimeError
        if key in ignored_keys:
            continue
            
        # Process and rename keys starting with the specific prefix
        if key.startswith(prefix):
            new_key = key[len(prefix):]
            new_state_dict[new_key] = value
        # Optional: handle keys without prefix if necessary
        # else: new_state_dict[key] = value 

    # Load the weights into the model
    # strict=False ignores missing or unexpected keys instead of crashing
    # assign=True is required to move weights from the state_dict to the meta-model
    missing, unexpected = model.load_state_dict(new_state_dict, assign=True, strict=False)
    
    if unexpected:
        print(f"Note: Ignored unexpected keys: {unexpected}")
    if missing:
        print(f"Note: Missing keys (this might be critical): {missing}")
            
    # Cleanup to free up system memory
    del state_dict
    del new_state_dict
    gc.collect()
    
    # Move model to the target device and data type
    return model.to(dtype=dtype, device=device)

pipe = QwenImageEditPlusPipeline.from_pretrained(
    "Qwen/Qwen-Image-Edit-2511",
    tranformer=None,
#    transformer=QwenImageTransformer2DModel.from_pretrained(
#        "linoyts/Qwen-Image-Edit-Rapid-AIO",
#        subfolder='transformer',
#        torch_dtype=dtype,
#        device_map='cuda'
#    ),
    torch_dtype=dtype
)#.to(device)

pipe.transformer = load_and_convert_transformer()
pipe.to(device)

gc.collect()
torch.cuda.empty_cache()

try:
    pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
    print("Flash Attention 3 Processor set successfully.")
except Exception as e:
    print(f"Warning: Could not set FA3 processor: {e}")

MAX_SEED = np.iinfo(np.int32).max

BASE_WIDTH = 112 * 9

ASPECT_RATIOS = {
    "1:1": (BASE_WIDTH, BASE_WIDTH),
    "16:9": (BASE_WIDTH, int(BASE_WIDTH / (16/9))),
    "9:16": (BASE_WIDTH, int(BASE_WIDTH / (9/16))),
    "4:3": (BASE_WIDTH, int(BASE_WIDTH / (4/3))),
    "3:4": (BASE_WIDTH, int(BASE_WIDTH / (3/4))),
    "3:2": (BASE_WIDTH, int(BASE_WIDTH / (3/2))),
    "2:3": (BASE_WIDTH, int(BASE_WIDTH / (2/3))),
    "Original": (0, 0)
}

ADAPTER_SPECS = {
    "None": { "adapter_name": "none" },
    "Multiple-Angles": {
        "repo": "dx8152/Qwen-Edit-2509-Multiple-angles",
        "weights": "镜头转换.safetensors",
        "adapter_name": "multiple-angles"
    },
    "Photo-to-Anime": {
        "repo": "autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
        "weights": "Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
        "adapter_name": "photo-to-anime"
    },
    "Anime-V2": {
        "repo": "prithivMLmods/Qwen-Image-Edit-2511-Anime",
        "weights": "Qwen-Image-Edit-2511-Anime-2000.safetensors",
        "adapter_name": "anime-v2"
    },
    "Light-Migration": {
        "repo": "dx8152/Qwen-Edit-2509-Light-Migration",
        "weights": "参考色调.safetensors",
        "adapter_name": "light-migration"
    },
    "Upscaler": {
        "repo": "starsfriday/Qwen-Image-Edit-2511-Upscale2K",
        "weights": "qwen_image_edit_2511_upscale.safetensors",
        "adapter_name": "upscale-2k"
    },
    "Style-Transfer": {
        "repo": "zooeyy/Style-Transfer",
        "weights": "Style Transfer-Alpha-V0.1.safetensors",
        "adapter_name": "style-transfer"
    },
    "Manga-Tone": {
        "repo": "nappa114514/Qwen-Image-Edit-2509-Manga-Tone",
        "weights": "tone001.safetensors",
        "adapter_name": "manga-tone"
    },
    "Anything2Real": {
        "repo": "lrzjason/Anything2Real_2601",
        "weights": "anything2real_2601.safetensors",
        "adapter_name": "anything2real"
    },
    "Fal-Multiple-Angles": {
        "repo": "fal/Qwen-Image-Edit-2511-Multiple-Angles-LoRA",
        "weights": "qwen-image-edit-2511-multiple-angles-lora.safetensors",
        "adapter_name": "fal-multiple-angles"
    },
    "Polaroid-Photo": {
        "repo": "prithivMLmods/Qwen-Image-Edit-2511-Polaroid-Photo",
        "weights": "Qwen-Image-Edit-2511-Polaroid-Photo.safetensors",
        "adapter_name": "polaroid-photo"
    },
    "Unblur-Anything": {
        "repo": "prithivMLmods/Qwen-Image-Edit-2511-Unblur-Upscale",
        "weights": "Qwen-Image-Edit-2511-Unblur-Anything.safetensors",
        "adapter_name": "unblur-anything"
    },
    "Midnight-Noir-Eyes-Spotlight": {
        "repo": "prithivMLmods/Qwen-Image-Edit-2511-Midnight-Noir-Eyes-Spotlight",
        "weights": "Qwen-Image-Edit-2511-Midnight-Noir-Eyes-Spotlight.safetensors",
        "adapter_name": "midnight-noir-eyes-spotlight"
    },    
}

LOADED_ADAPTERS = set()

#def update_dimensions_on_upload(image, max_width, max_height):
#    if image is None:
#        return max_width, max_height
#    
#    original_width, original_height = image.size
#    
#    if original_width > original_height:
#        new_width = max_width
#        aspect_ratio = original_height / original_width
#        new_height = int(new_width * aspect_ratio)
#    else:
#        new_height = max_height
#        aspect_ratio = original_width / original_height
#        new_width = int(new_height * aspect_ratio)
#
#    new_width = (new_width // 112) * 112
#    new_height = (new_height // 112) * 112
#    
#    return new_width, new_height

@spaces.GPU
def infer(
    images,
    prompt,
    negative_prompt,
    lora_adapter,
    aspect_ratio,
    seed,
    randomize_seed,
    guidance_scale,
    steps,
    progress=gr.Progress(track_tqdm=True)
):
    gc.collect()
    torch.cuda.empty_cache()

    if not images:
        raise gr.Error("Please upload at least one image to edit.")

    pil_images = []
    if images is not None:
        for item in images:
            try:
                if isinstance(item, tuple) or isinstance(item, list):
                    path_or_img = item[0]
                else:
                    path_or_img = item

                if isinstance(path_or_img, str):
                    pil_images.append(Image.open(path_or_img).convert("RGB"))
                elif isinstance(path_or_img, Image.Image):
                    pil_images.append(path_or_img.convert("RGB"))
                else:
                    pil_images.append(Image.open(path_or_img.name).convert("RGB"))
            except Exception as e:
                print(f"Skipping invalid image item: {e}")
                continue

    if not pil_images:
        raise gr.Error("Could not process uploaded images.")

    spec = ADAPTER_SPECS.get(lora_adapter)
    if not spec:
        raise gr.Error(f"Configuration not found for: {lora_adapter}")

    adapter_name = spec["adapter_name"]

    if adapter_name not in LOADED_ADAPTERS:
        print(f"--- Downloading and Loading Adapter: {lora_adapter} ---")
        try:
            # Unload all existing LoRAs to keep memory lean
            pipe.unload_lora_weights()

            for old_adapter in list(LOADED_ADAPTERS):
                pipe.delete_adapters(old_adapter)
            
            LOADED_ADAPTERS.clear()
            
            if adapter_name != "none":
                pipe.load_lora_weights(
                    spec["repo"], 
                    weight_name=spec["weights"], 
                    adapter_name=adapter_name
                )
                LOADED_ADAPTERS.add(adapter_name)
        except Exception as e:
            raise gr.Error(f"Failed to load adapter {lora_adapter}: {e}")
    else:
        print(f"--- Adapter {lora_adapter} is already loaded. ---")
    
    if adapter_name != "none":
        pipe.set_adapters([adapter_name], adapter_weights=[1.0])

    if randomize_seed:
        seed = random.randint(0, MAX_SEED)

    generator = torch.Generator(device=device).manual_seed(seed)
#    negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"

#    max_width = 1008
#    max_height = 1008
#
#    if adapter_name == 'upscale-2k':
#        max_width = 1232
#        max_height = 1232
#        
#    width, height = update_dimensions_on_upload(pil_images[0], max_width, max_height)
    width, height = pil_images[0].size

    if aspect_ratio != 'Original':
        width, height = ASPECT_RATIOS.get(aspect_ratio);

    width = (width // 8) * 8
    height = (height // 8) * 8
    
    try:
        result_image = pipe(
            image=pil_images,
            prompt=prompt,
            negative_prompt=negative_prompt,
            height=height,
            width=width,
            num_inference_steps=steps,
            generator=generator,
            true_cfg_scale=guidance_scale,
        ).images[0]
        
        return result_image, seed

    except Exception as e:
        raise e
    finally:
        gc.collect()
        torch.cuda.empty_cache()

#@spaces.GPU
#def infer_example(images, prompt, lora_adapter):
#    if not images:
#        return None, 0
#    
#    if isinstance(images, str):
#        images_list = [images]
#    else:
#        images_list = images
#        
#    result, seed = infer(
#        images=images_list,
#        prompt=prompt,
#        lora_adapter=lora_adapter,
#        seed=0,
#        randomize_seed=True,
#        guidance_scale=1.0,
#        steps=4
#    )
#    return result, seed

css="""
#col-container {
    margin: 0 auto;
    max-width: 1000px;
}
#main-title h1 {font-size: 2.3em !important;}
"""

with gr.Blocks() as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown("# **Qwen-Image-Edit-2511-LoRAs-Fast**", elem_id="main-title")
        gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2511) adapters. Upload one or more images.")

        with gr.Row(equal_height=True):
            with gr.Column():
                images = gr.Gallery(
                    label="Upload Images", 
                    type="filepath", 
                    columns=2, 
                    rows=1, 
                    height=300,
                    allow_preview=True
                )
                
                prompt = gr.Text(
                    label="Prompt",
                    show_label=True,
                    placeholder="e.g., transform into anime..",
                )
            
                negative_prompt = gr.Text(
                    label="Negative Prompt",
                    show_label=True,
                    value="worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry",
                )

                run_button = gr.Button("Edit Image", variant="primary")

            with gr.Column():
                output_image = gr.Image(label="Output Image", interactive=False, format="png", height=363)
                
                with gr.Row():
                    lora_adapter = gr.Dropdown(
                        label="Choose Editing Style",
                        choices=list(ADAPTER_SPECS.keys()),
                        value="None"
                    )

                with gr.Row():
                    aspect_ratio = gr.Dropdown(
                        label="Aspect Ratio",
                        choices=list(ASPECT_RATIOS.keys()),
                        value="Original"
                    )
                
                with gr.Accordion("Advanced Settings", open=False, visible=True):
                    seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
                    randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
                    guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
                    steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
        
#        gr.Examples(
#            examples=[
#                [["examples/B.jpg"], "Transform into anime.", "Photo-to-Anime"],
#                [["examples/A.jpeg"], "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
#                [["examples/U.jpg"], "Upscale this picture to 4K resolution.", "Upscaler"],
#                [["examples/L1.jpg", "examples/L2.jpg"], "Refer to the color tone, remove the original lighting from Image 1, and relight Image 1 based on the lighting and color tone of Image 2.", "Light-Migration"],
#                [["examples/P1.jpg", "examples/P2.jpg"], "Make the person in image 1 do the exact same pose of the person in image 2. Changing the style and background of the image of the person in image 1 is undesirable, so don't do it.", "Any-Pose"],
#            ],
#            inputs=[images, prompt, lora_adapter],
#            outputs=[output_image, seed],
#            fn=infer_example,
#            cache_examples=False,
#            label="Examples"
#        )
        
        gr.Markdown("[*](https://huggingface.co/spaces/prithivMLmods/Qwen-Image-Edit-2511-LoRAs-Fast)This is still an experimental Space for Qwen-Image-Edit-2511.")

    run_button.click(
        fn=infer,
        inputs=[images, prompt, negative_prompt, lora_adapter, aspect_ratio, seed, randomize_seed, guidance_scale, steps],
        outputs=[output_image, seed]
    )

if __name__ == "__main__":
    demo.queue(max_size=30).launch(css=css, theme=orange_red_theme, mcp_server=True, ssr_mode=False, show_error=True)

@pulb
Updating soon. Thanks for the suggestion.🤗

prithivMLmods changed discussion status to closed

Hi, just out of curiosity, is there a reason you pre-converted the model to disk instead of doing it on the fly like in my script? And did you use the same conversion I do, or is there a difference in your V19 files?

Exactly the same. The reason for the conversion is flexibility. It allows people to load it wherever needed and also reduces inference time when loading to cuda during the build.

Alright, thanks for the clarification :)

Sign up or log in to comment