# Copyright 2024 Anton Obukhov, Bingxin Ke, ETH Zurich and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- # If you find this code useful, we kindly ask you to cite our paper in your work. # Please find bibtex at: https://github.com/prs-eth/Marigold#-citation # More information about the method can be found at https://marigoldmonodepth.github.io # -------------------------------------------------------------------------- import logging import math from typing import Union import numpy as np import torch from PIL import Image from PIL.Image import Resampling from torch.utils.data import DataLoader, TensorDataset from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel, LCMScheduler, ) from diffusers.utils import BaseOutput, check_min_version # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.27.0.dev0") class MarigoldNormalsOutput(BaseOutput): """ Output class for Marigold monocular normals prediction pipeline. Args: normals_np (`np.ndarray`): Predicted normals map, with normals values in the range of [0, 1]. normals_colored (`None` or `PIL.Image.Image`): Colorized normals map, with the shape of [3, H, W] and values in [0, 1]. normals_uncertainty (`None` or `np.ndarray`): Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling. """ normals_np: np.ndarray normals_colored: Union[None, Image.Image] normals_uncertainty: Union[None, np.ndarray] class MarigoldNormalsPipeline(DiffusionPipeline): """ Pipeline for monocular normals estimation using Marigold: https://marigoldmonodepth.github.io. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: unet (`UNet2DConditionModel`): Conditional U-Net to denoise the normals latent, conditioned on image latent. vae (`AutoencoderKL`): Variational Auto-Encoder (VAE) Model to encode and decode images and normals maps to and from latent representations. scheduler (`DDIMScheduler`): A scheduler to be used in combination with `unet` to denoise the encoded image latents. text_encoder (`CLIPTextModel`): Text-encoder, for empty text embedding. tokenizer (`CLIPTokenizer`): CLIP tokenizer. """ latent_scale_factor = 0.18215 def __init__( self, unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: DDIMScheduler, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, ): super().__init__() self.register_modules( unet=unet, vae=vae, scheduler=scheduler, text_encoder=text_encoder, tokenizer=tokenizer, ) self.empty_text_embed = None @torch.no_grad() def __call__( self, input_image: Image, denoising_steps: int = 10, ensemble_size: int = 10, processing_res: int = 768, match_input_res: bool = True, resample_method: str = "bilinear", batch_size: int = 0, save_memory: bool = False, seed: Union[int, None] = None, color_map: str = "Spectral", # TODO change colorization api based on modality show_progress_bar: bool = True, **kwargs, ) -> MarigoldNormalsOutput: """ Function invoked when calling the pipeline. Args: input_image (`Image`): Input RGB (or gray-scale) image. denoising_steps (`int`, *optional*, defaults to `10`): Number of diffusion denoising steps (DDIM) during inference. ensemble_size (`int`, *optional*, defaults to `10`): Number of predictions to be ensembled. processing_res (`int`, *optional*, defaults to `768`): Maximum resolution of processing. If set to 0: will not resize at all. match_input_res (`bool`, *optional*, defaults to `True`): Resize normals prediction to match input resolution. Only valid if `limit_input_res` is not None. resample_method: (`str`, *optional*, defaults to `bilinear`): Resampling method used to resize images and depth predictions. This can be one of `bilinear`, `bicubic` or `nearest`, defaults to: `bilinear`. batch_size (`int`, *optional*, defaults to `0`): Inference batch size, no bigger than `num_ensemble`. If set to 0, the script will automatically decide the proper batch size. save_memory (`bool`, defaults to `False`): Extra steps to save memory at the cost of perforance. seed (`int`, *optional*, defaults to `None`) Reproducibility seed. color_map (`str`, *optional*, defaults to `"Spectral"`, pass `None` to skip colorized normals map generation): Colormap used to colorize the normals map. show_progress_bar (`bool`, *optional*, defaults to `True`): Display a progress bar of diffusion denoising. Returns: `MarigoldNormalsOutput`: Output class for Marigold monocular normals prediction pipeline, including: - **normals_np** (`np.ndarray`) Predicted normals map, with normals values in the range of [-1, 1] - **normals_colored** (`None` or `PIL.Image.Image`) Colorized normals map, with the shape of [3, H, W] and values in [0, 1]. None if `color_map` is `None` - **normals_uncertainty** (`None` or `np.ndarray`) Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling. None if `ensemble_size = 1` """ if not match_input_res: assert processing_res is not None assert processing_res >= 0 assert denoising_steps >= 1 assert ensemble_size >= 1 # Check if denoising step is reasonable self.check_inference_step(denoising_steps) resample_method: Resampling = self.get_pil_resample_method(resample_method) W, H = input_image.size if processing_res > 0: input_image = self.resize_max_res( input_image, max_edge_resolution=processing_res, resample_method=resample_method, ) input_image = input_image.convert("RGB") image = np.asarray(input_image) rgb = np.transpose(image, (2, 0, 1)) # [H, W, rgb] -> [rgb, H, W] rgb_norm = rgb / 255.0 * 2.0 - 1.0 # [0, 255] -> [-1, 1] rgb_norm = torch.from_numpy(rgb_norm).to(self.dtype) rgb_norm = rgb_norm.to(self.device) assert rgb_norm.min() >= -1.0 and rgb_norm.max() <= 1.0 # TODO remove this duplicated_rgb = torch.stack([rgb_norm] * ensemble_size) single_rgb_dataset = TensorDataset(duplicated_rgb) if batch_size <= 0: batch_size = self.find_batch_size( ensemble_size=ensemble_size, input_res=max(rgb_norm.shape[1:]), dtype=self.dtype, ) single_rgb_loader = DataLoader( single_rgb_dataset, batch_size=batch_size, shuffle=False ) pred = [] iterable = single_rgb_loader if show_progress_bar: iterable = tqdm( single_rgb_loader, desc=" " * 2 + "Inference batches", leave=False ) for batch in iterable: (batched_img,) = batch pred_raw = self.single_infer( rgb_in=batched_img, num_inference_steps=denoising_steps, seed=seed, show_pbar=show_progress_bar, ) pred_raw = pred_raw.detach() if save_memory: pred_raw = pred_raw.cpu() pred.append(pred_raw) pred = torch.concat(pred, dim=0) # [B,3,H,W] pred_uncert = None if save_memory: torch.cuda.empty_cache() if ensemble_size > 1: pred, pred_uncert = self.ensemble_normals( pred, **(kwargs.get('ensemble_kwargs', {})) ) # [1,3,H,W], [1,H,W] if match_input_res: pred = torch.nn.functional.interpolate( pred, (H, W), mode="bilinear" # TODO: parameterize this method ) # [1,3,H,W] norm = torch.norm(pred, dim=1, keepdim=True) # [1,1,H,W] pred /= norm.clamp(min=1e-6) if pred_uncert is not None: pred_uncert = torch.nn.functional.interpolate( pred_uncert.unsqueeze(1), (H, W), mode="bilinear" ).squeeze( 1 ) # [1,H,W] # TODO: make X-axis of normals configurable through abstraction if color_map is not None: colored = (pred.squeeze(0) + 1.0) * 0.5 colored = (colored * 255).to(torch.uint8) colored = self.chw2hwc(colored).cpu().numpy() colored_img = Image.fromarray(colored) else: colored_img = None if pred_uncert is not None: pred_uncert = pred_uncert.cpu().numpy() pred = pred.cpu().numpy() # TODO: np or torch? out = MarigoldNormalsOutput( normals_np=pred, normals_colored=colored_img, normals_uncertainty=pred_uncert, ) return out def check_inference_step(self, n_step: int): """ Check if denoising step is reasonable Args: n_step (`int`): denoising steps """ assert n_step >= 1 if isinstance(self.scheduler, DDIMScheduler): if n_step < 10: logging.warning( # TODO: do we want logging import? or just warning? f"Too few denoising steps: {n_step}. Recommended to use the LCM checkpoint for few-step inference." ) elif isinstance(self.scheduler, LCMScheduler): if not 1 <= n_step <= 4: logging.warning(f"Non-optimal setting of denoising steps: {n_step}. Recommended setting is 1-4 steps.") else: raise RuntimeError(f"Unsupported scheduler type: {type(self.scheduler)}") def encode_empty_text(self): """ Encode text embedding for empty prompt. """ prompt = "" text_inputs = self.tokenizer( prompt, padding="do_not_pad", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(self.text_encoder.device) self.empty_text_embed = self.text_encoder(text_input_ids)[0].to(self.dtype) @torch.no_grad() def single_infer( self, rgb_in: torch.Tensor, num_inference_steps: int, seed: Union[int, None], show_pbar: bool, ) -> torch.Tensor: """ Perform an individual normals prediction without ensembling. """ device = rgb_in.device # Set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # [T] # Encode image rgb_latent = self.encode_rgb(rgb_in) # Initialize prediction latent with noise if seed is None: rand_num_generator = None else: rand_num_generator = torch.Generator(device=device) rand_num_generator.manual_seed(seed) pred_latent = torch.randn( rgb_latent.shape, device=device, dtype=self.dtype, generator=rand_num_generator, ) # [B, 4, h, w] # Batched empty text embedding if self.empty_text_embed is None: self.encode_empty_text() batch_empty_text_embed = self.empty_text_embed.repeat( (rgb_latent.shape[0], 1, 1) ) # [B, 2, 1024] # Denoising loop if show_pbar: iterable = tqdm( enumerate(timesteps), total=len(timesteps), leave=False, desc=" " * 4 + "Diffusion denoising", ) else: iterable = enumerate(timesteps) for i, t in iterable: unet_input = torch.cat( [rgb_latent, pred_latent], dim=1 ) # this order is important # predict the noise residual noise_pred = self.unet( unet_input, t, encoder_hidden_states=batch_empty_text_embed ).sample # [B, 4, h, w] # compute the previous noisy sample x_t -> x_t-1 pred_latent = self.scheduler.step(noise_pred, t, pred_latent, generator=rand_num_generator).prev_sample # torch.cuda.empty_cache() # TODO is it really needed here, even if memory saving? pred_pixels = self.decode_pred(pred_latent) # [B, 3, H, W] return pred_pixels def encode_rgb(self, rgb_in: torch.Tensor) -> torch.Tensor: """ Encode RGB image into latent. Args: rgb_in (`torch.Tensor`): Input RGB image to be encoded. Returns: `torch.Tensor`: Image latent. """ # encode h = self.vae.encoder(rgb_in) moments = self.vae.quant_conv(h) mean, logvar = torch.chunk(moments, 2, dim=1) # scale latent rgb_latent = mean * self.latent_scale_factor return rgb_latent def decode_pred(self, latent: torch.Tensor) -> torch.Tensor: """ Decode normals latent into normals map. Args: latent (`torch.Tensor`): Prediction latent to be decoded [B, 4, h, w]. Returns: `torch.Tensor`: Decoded prediction map [B, 3, H, W]. """ # decode latent latent = latent / self.latent_scale_factor latent = self.vae.post_quant_conv(latent) pixels = self.vae.decoder(latent) # clip prediction pixels = torch.clip(pixels, -1.0, 1.0) # renormalize prediction norm = torch.norm(pixels, dim=1, keepdim=True) pixels = pixels / norm.clamp(min=1e-6) return pixels @staticmethod def get_pil_resample_method(method_str: str) -> Resampling: resample_method_dic = { "bilinear": Resampling.BILINEAR, "bicubic": Resampling.BICUBIC, "nearest": Resampling.NEAREST, } resample_method = resample_method_dic.get(method_str, None) if resample_method is None: raise ValueError(f"Unknown resampling method: {resample_method}") else: return resample_method @staticmethod def resize_max_res(img: Image.Image, max_edge_resolution: int, resample_method=Resampling.BILINEAR) -> Image.Image: """ Resize image to limit maximum edge length while keeping aspect ratio. """ original_width, original_height = img.size downscale_factor = min(max_edge_resolution / original_width, max_edge_resolution / original_height) new_width = int(original_width * downscale_factor) new_height = int(original_height * downscale_factor) resized_img = img.resize((new_width, new_height), resample=resample_method) return resized_img @staticmethod def chw2hwc(chw): assert 3 == len(chw.shape) if isinstance(chw, torch.Tensor): hwc = torch.permute(chw, (1, 2, 0)) elif isinstance(chw, np.ndarray): hwc = np.moveaxis(chw, 0, -1) return hwc @staticmethod def find_batch_size(ensemble_size: int, input_res: int, dtype: torch.dtype) -> int: """ Automatically search for suitable operating batch size. Args: ensemble_size (`int`): Number of predictions to be ensembled. input_res (`int`): Operating resolution of the input image. Returns: `int`: Operating batch size. """ # Search table for suggested max. inference batch size bs_search_table = [ # tested on A100-PCIE-80GB {"res": 768, "total_vram": 79, "bs": 35, "dtype": torch.float32}, {"res": 1024, "total_vram": 79, "bs": 20, "dtype": torch.float32}, # tested on A100-PCIE-40GB {"res": 768, "total_vram": 39, "bs": 15, "dtype": torch.float32}, {"res": 1024, "total_vram": 39, "bs": 8, "dtype": torch.float32}, {"res": 768, "total_vram": 39, "bs": 30, "dtype": torch.float16}, {"res": 1024, "total_vram": 39, "bs": 15, "dtype": torch.float16}, # tested on RTX3090, RTX4090 {"res": 512, "total_vram": 23, "bs": 20, "dtype": torch.float32}, {"res": 768, "total_vram": 23, "bs": 7, "dtype": torch.float32}, {"res": 1024, "total_vram": 23, "bs": 3, "dtype": torch.float32}, {"res": 512, "total_vram": 23, "bs": 40, "dtype": torch.float16}, {"res": 768, "total_vram": 23, "bs": 18, "dtype": torch.float16}, {"res": 1024, "total_vram": 23, "bs": 10, "dtype": torch.float16}, # tested on GTX1080Ti {"res": 512, "total_vram": 10, "bs": 5, "dtype": torch.float32}, {"res": 768, "total_vram": 10, "bs": 2, "dtype": torch.float32}, {"res": 512, "total_vram": 10, "bs": 10, "dtype": torch.float16}, {"res": 768, "total_vram": 10, "bs": 5, "dtype": torch.float16}, {"res": 1024, "total_vram": 10, "bs": 3, "dtype": torch.float16}, ] if not torch.cuda.is_available(): return 1 total_vram = torch.cuda.mem_get_info()[1] / 1024.0**3 filtered_bs_search_table = [s for s in bs_search_table if s["dtype"] == dtype] for settings in sorted( filtered_bs_search_table, key=lambda k: (k["res"], -k["total_vram"]), ): if input_res <= settings["res"] and total_vram >= settings["total_vram"]: bs = settings["bs"] if bs > ensemble_size: bs = ensemble_size elif bs > math.ceil(ensemble_size / 2) and bs < ensemble_size: bs = math.ceil(ensemble_size / 2) return bs return 1 @staticmethod def ensemble_normals(pred_normals: torch.Tensor, reduction: str = "median"): assert reduction in ("median", "mean") B, C, H, W = pred_normals.shape assert C == 3 mean_normals = pred_normals.mean(dim=0, keepdim=True) # [1,3,H,W] mean_normals_norm = mean_normals.norm(dim=1, keepdim=True) # [1,1,H,W] mean_normals /= mean_normals_norm.clip(min=1e-6) # [1,3,H,W] sim_cos = (mean_normals * pred_normals).sum(dim=1) # [B,H,W] sim_acos = sim_cos.arccos() # [B,H,W] sim_acos = sim_acos.mean(dim=0, keepdim=True) / math.pi # [1,H,W] if reduction == "mean": return mean_normals, sim_acos # [1,3,H,W], [1,H,W] # Find the index of the closest normal vector for each pixel closest_indices = sim_cos.argmax(dim=0, keepdim=True) # [1,H,W] closest_indices = closest_indices.unsqueeze(0).repeat(1, 3, 1, 1) # [1,3,H,W] closest_normals = torch.gather(pred_normals, 0, closest_indices) return closest_normals, sim_acos # [1,3,H,W], [1,H,W]