import os import sys import argparse from pathlib import Path from PIL import Image from typing import Any import torch import torchvision.transforms as T from datasets import load_dataset sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) os.environ["GRADIO_TEMP_DIR"] = "./tmp" from jodi_pipeline import JodiPipeline from model.postprocess import ( ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor, NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor, ) from transformers import ( Qwen2VLForConditionalGeneration, Qwen2_5_VLForConditionalGeneration, Qwen3VLForConditionalGeneration, Qwen3VLMoeForConditionalGeneration ) from transformers import AutoProcessor, Trainer from pathlib import Path import itertools import ast import re from PIL import Image import json def clean_question(q: str) -> str: if not isinstance(q, str): q = str(q) # 删除 等占位符 q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE) # 再清理多余空白 q = re.sub(r"\s+", " ", q).strip() return q def dump_image(image, save_root): os.makedirs(save_root, exist_ok=True) save_path = os.path.join(save_root, "input.jpg") image.convert("RGB").save(save_path, format="JPEG", quality=95) return save_path def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"): """ 将多个图像拼接成一张大图并保存。 Args: image_paths: List[str] 图像路径列表 save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行) image_format: 保存格式 """ from PIL import Image import io # 读取图像 images = [Image.open(p).convert("RGB") for p in image_paths] if images_per_row is None: images_per_row = len(images) # 调整尺寸(可选) target_size = min(1024, images[0].size[0]) images = [img.resize((target_size, target_size)) for img in images] # 拼接 widths, heights = zip(*(img.size for img in images)) max_width = max(widths) rows = (len(images) + images_per_row - 1) // images_per_row total_height = sum(heights[:images_per_row]) * rows new_im = Image.new("RGB", (max_width * images_per_row, total_height)) y_offset = 0 for i in range(0, len(images), images_per_row): row_imgs = images[i:i + images_per_row] x_offset = 0 for img in row_imgs: new_im.paste(img, (x_offset, y_offset)) x_offset += max_width y_offset += heights[0] os.makedirs(os.path.dirname(save_path), exist_ok=True) new_im.save(save_path, format=image_format.upper()) print(f"🧩 Saved merged image → {save_path}") return save_path def build_vqa_message(root, prompt, question): """ Build Qwen3-VL message for multimodal or single-image VQA. Now explicitly tags each modality image before feeding into Qwen3-VL, so that the model can distinguish RGB, edge, depth, normal, etc. """ root_path = Path(root) # ---------- 单图像情况 ---------- if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png"]: image_path = str(root_path) text_prompt = ( f"You are given one RGB image and a text description of the same scene.\n" f"Scene description: \"{prompt}\"\n\n" f"Now analyze the image carefully and answer the following question based only on what is visible.\n" f"Do NOT guess or add details not supported by the image.\n" f"Question: \"{question}\"\n" "" ) messages = [ { "role": "user", "content": [ {"type": "image", "image": image_path}, {"type": "text", "text": text_prompt}, ], } ] return messages # ---------- 多模态文件夹情况 ---------- modality_names = [ "image", "annotation_lineart", "annotation_edge", "annotation_depth", "annotation_normal", "annotation_albedo", "annotation_seg_12colors", "annotation_openpose", ] # 检查存在的模态文件 available = [] for name in modality_names: for ext in [".png", ".jpg", ".jpeg"]: path = Path(root) / f"{name}{ext}" if path.exists(): available.append((name, str(path))) break # 可读名称映射 readable_map = { "image": "RGB image", "annotation_lineart": "line drawing", "annotation_edge": "edge map", "annotation_depth": "depth map", "annotation_normal": "normal map", "annotation_albedo": "albedo map", "annotation_seg_12colors": "segmentation map", "annotation_openpose": "human pose map", } present_modalities = [readable_map[n] for n, _ in available] # ---------- 指令文本 ---------- text_prompt = ( f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. " f"The **RGB image** is the primary and most reliable modality that truly represents the scene. " f"Other modalities (e.g., depth, normal, segmentation) may contain small errors or artifacts, " f"so use them only as optional references for additional context. " f"Each modality provides complementary information about the same visual content:\n" f"- The line drawing highlights object outlines, shapes, and fine structures.\n" f"- The edge map emphasizes boundaries and contours.\n" f"- The depth map reveals spatial distances, perspective, and 3D relationships.\n" f"- The normal map shows surface orientation and geometric curvature.\n" f"- The albedo map presents true surface color without illumination or shadows.\n" f"- The segmentation map divides the scene into semantic regions and object categories.\n" f"- The human pose map indicates body orientation, structure, and articulation.\n\n" f"Together, these modalities offer a unified, rich understanding of the scene.\n" f"Scene description: \"{prompt}\"\n\n" f"Please answer the following question using visual reasoning primarily grounded in the RGB image, " f"while cross-checking with other modalities (e.g., edge or depth) when relevant.\n" f"If multiple correct answers are possible, choose the most precise and visually supported one.\n\n" f"Question: \"{question}\"\n" ) # ---------- 构建内容序列(模态锚定) ---------- content = [] for name, path in available: readable = readable_map.get(name, "visual input") # 在每张图像前显式标注模态类型 content.append({"type": "text", "text": f"This is the {readable}."}) content.append({"type": "image", "image": path}) # 最后加入主指令 content.append({"type": "text", "text": text_prompt}) messages = [{"role": "user", "content": content}] return messages def build_multimodal_message(root, coarse_caption="a generic scene", feedback=""): """ Build Qwen3-VL message for multi-modal caption refinement. Explicitly binds each image to its modality name (RGB, edge, depth, etc.) so Qwen3-VL can reason over them correctly and refine the caption faithfully. """ modality_names = [ "image", "annotation_lineart", "annotation_edge", "annotation_depth", "annotation_normal", "annotation_albedo", "annotation_seg_12colors", "annotation_openpose", ] # --- 检查存在的模态 --- available = [] for name in modality_names: for ext in [".png", ".jpg", ".jpeg"]: path = Path(root) / f"{name}{ext}" if path.exists(): available.append((name, str(path))) break # --- 构建模态说明 --- readable_map = { "image": "RGB image", "annotation_lineart": "line drawing", "annotation_edge": "edge map", "annotation_depth": "depth map", "annotation_normal": "normal map", "annotation_albedo": "albedo map", "annotation_seg_12colors": "segmentation map", "annotation_openpose": "human pose map", } present_modalities = [readable_map[n] for n, _ in available] # --- 构造文本指令 --- # --- 构建消息内容:在每个图像前加模态标识 --- content = [] text_prompt = ("you are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}.\n" f"Each modality provides a different aspect of visual information about the same scene.\n\n" f"### Modality Information:\n" f"- **RGB image:** shows colors, textures, lighting, and overall appearance.\n" f"- **Line drawing:** reveals outlines, object contours, and structural details.\n" f"- **Edge map:** highlights strong edges and object boundaries.\n" f"- **Depth map:** encodes per-object spatial distance and perspective. " f"For each main object, estimate its approximate physical distance from the camera or ground reference " f"in **meters**. " f"If multiple objects are visible, provide numeric distances rather than qualitative terms like " f"'closer' or 'farther'.\n" f"- **Normal map:** provides surface orientation and facing direction.\n" f"- **Albedo map:** shows true surface color unaffected by lighting or shadows.\n" f"- **Segmentation map:** divides the image into semantic regions and object categories.\n" f"- **Human pose map:** depicts human keypoints, poses, and orientations if present.\n\n" f"### Your Task:\n" f"Refine the coarse caption into a detailed, modality-wise visual description. " f"For each available modality listed above, generate one corresponding description paragraph " f"based only on what that modality shows.\n\n" f"### Rules:\n" f"1. Follow the order and modality names given in 'Modality Information'.\n" f"2. Start each paragraph with the modality name (e.g., 'RGB image:').\n" f"3. Describe only what is visible in that modality—do NOT merge or summarize multiple modalities.\n" f"4. Use **numeric distance estimates in meters** for the depth map whenever possible.\n" f"5. Use clear and factual language (no imagination or hallucination).\n" #f"6. You may use the following feedback for improvement: '{feedback}'\n\n" f"### Coarse Caption:\n'{coarse_caption}'\n\n" f"Now, according to the 'Modality Information' above, write one detailed description for each available modality below." ) for name, path in available: readable = readable_map.get(name, "visual input") content.append({ "type": "text", "text": f"This is the {readable}, which provides {get_modality_description(name)}." }) content.append({"type": "image", "image": path}) # 最后附上总任务说明 content.append({"type": "text", "text": text_prompt}) messages = [{"role": "user", "content": content}] return messages def get_modality_description(name: str) -> str: """为每个模态生成一句说明,用于提示模型理解模态功能""" desc_map = { "image": "the main visual appearance of the scene, including color, texture, and lighting", "annotation_lineart": "structural outlines, object contours, and fine geometry", "annotation_edge": "strong boundaries and contrast edges between objects", "annotation_depth": "distance and perspective information for spatial understanding", "annotation_normal": "surface orientation and geometric curvature cues", "annotation_albedo": "pure surface color without lighting or shading effects", "annotation_seg_12colors": "semantic regions and object categories", "annotation_openpose": "human body keypoints, joints, and orientation", } return desc_map.get(name, "complementary visual evidence") # ------------------------------ # Argument Parser # ------------------------------ def get_parser(): parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.") parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.") parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth', help="Path to model checkpoint.") parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct', help="Path to model checkpoint.") parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images", help="Prompt text for generation.") parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json", help="Optional negative prompt.") parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp", help="Prompt text for generation.") parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.") parser.add_argument("--question", type=str, default="how many cars in this image?", help="Optional negative prompt.") parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.") parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.") parser.add_argument("--guidance_scale", type=float, default=4.5) parser.add_argument("--seed", type=int, default=41) parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.") return parser # ------------------------------ # Main Inference Function # ------------------------------ @torch.inference_mode() def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300): messages = [ { "role": "user", "content": [ { "type": "image", "image": image_path, }, {"type": "text", "text": f"Describe this image."}, ], } ] inputs = processor.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ) inputs = inputs.to(model.device) # Inference: Generation of the output generated_ids = model.generate(**inputs, max_new_tokens=max_length) generated_ids_trimmed = [ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text) os.makedirs(args.output_dir, exist_ok=True) save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" save_dir.mkdir(parents=True, exist_ok=True) caption_path = Path(save_dir) / f"caption.txt" with open(caption_path, "w", encoding="utf-8") as f: f.write(output_text[0].strip()) return output_text[0] @torch.inference_mode() def evaluate_consistency(image_path, model, processor, caption, max_length=256): # --- 构造 Qwen 输入 --- eval_prompt = f""" You are an image-text alignment evaluator. You are given one RGB image and a description that may include references to multiple visual modalities (e.g., depth map, normal map, segmentation map, etc.). These terms are just analytical perspectives of the same scene — they should not reduce the consistency score. Focus only on whether the described visual content matches what is visible in the RGB image. Your task: 1. Judge how accurately the text describes what is visually present in the image. 2. Ignore mentions of modality names (such as 'depth map' or 'normal map'). 3. Provide a consistency score between 0.0 (completely mismatched) and 1.0 (perfect match). 4. Provide one short feedback sentence suggesting how to make the description better aligned. Return JSON strictly in this format: {{"Consistency": , "Feedback": ""}} Description: "{caption}" """ messages = [ { "role": "user", "content": [ {"type": "image", "image": image_path}, {"type": "text", "text": eval_prompt}, ], } ] # --- 推理 --- inputs = processor.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ).to(model.device) out_ids = model.generate(**inputs, max_new_tokens=max_length) out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)] text = processor.batch_decode(out_trim, skip_special_tokens=True)[0] # --- 解析输出 --- try: data = json.loads(re.search(r"\{.*\}", text, re.S).group(0)) score = float(data.get("Consistency", 0)) feedback = data.get("Feedback", "") except Exception: score, feedback = 0.0, text.strip() print(f"🧮 [Image Consistency] {score:.3f} | Feedback: {feedback}") return score, feedback @torch.inference_mode() def text_refine(root, model, processor, prompt, feedback, iter_num, vqa_id, max_length=300): messages = build_multimodal_message(root, prompt, feedback) inputs = processor.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ) inputs = inputs.to(model.device) # Inference: Generation of the output generated_ids = model.generate(**inputs, max_new_tokens=max_length) generated_ids_trimmed = [ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text) os.makedirs(args.output_dir, exist_ok=True) save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}" save_dir.mkdir(parents=True, exist_ok=True) caption_path = Path(save_dir) / f"caption.txt" with open(caption_path, "w", encoding="utf-8") as f: f.write(output_text[0].strip()) return output_text[0] @torch.inference_mode() def vqa(root, model, processor, prompt, question, vqa_id, max_length=300): messages = build_vqa_message(root, prompt, question) print(messages) inputs = processor.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ) inputs = inputs.to(model.device) generated_ids = model.generate(**inputs, max_new_tokens=max_length) generated_ids_trimmed = [ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text) os.makedirs(args.output_dir, exist_ok=True) save_dir = Path(args.output_dir) / vqa_id / 'vqa_answer' save_dir.mkdir(parents=True, exist_ok=True) caption_path = Path(save_dir) / f"caption.txt" with open(caption_path, "w", encoding="utf-8") as f: f.write(output_text[0].strip()) return output_text[0] @torch.inference_mode() def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id): # print(f"🚀 Generating with prompt: {prompt}") outputs = pipe( images=images, role=role, prompt=prompt, negative_prompt=args.negative_prompt, height=height, width=width, num_inference_steps=args.steps, guidance_scale=args.guidance_scale, num_images_per_prompt=1, generator=generator, task='t2i' ) # Apply post-processing for each modality results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)] results = torch.stack(results, dim=1).reshape(-1, 3, height, width) results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)] # -------------------------- # Save results # -------------------------- os.makedirs(args.output_dir, exist_ok=True) save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}" save_dir.mkdir(parents=True, exist_ok=True) for idx, img in enumerate(results): name = modality_names[idx] save_path = save_dir / f"{name}.png" img.save(save_path) print(f"💾 Saved {name} → {save_path}") merged_path = save_dir / f"merged_iteration_{iter_num}.png" concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path) print(f"\n✅ All results saved in: {save_dir}\n") return save_dir if __name__ == "__main__": args = get_parser().parse_args() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"✅ Using device: {device}") processor = AutoProcessor.from_pretrained( args.model_name_or_path, ) model = Qwen3VLForConditionalGeneration.from_pretrained( args.text_model_path, attn_implementation="flash_attention_2", dtype=(torch.bfloat16), ).to(device) pipe = JodiPipeline(args.config) pipe.from_pretrained(args.model_path) modality_names = [ "image", "annotation_lineart", "annotation_edge", "annotation_depth", "annotation_normal", "annotation_albedo", "annotation_seg_12colors", "annotation_openpose", ] # Build post-processors post_processors: list[Any] = [ImagePostProcessor()] for condition in pipe.config.conditions: # type: ignore if condition == "lineart": post_processors.append(LineartPostProcessor()) elif condition == "edge": post_processors.append(EdgePostProcessor()) elif condition == "depth": post_processors.append(DepthPostProcessor()) elif condition == "normal": post_processors.append(NormalPostProcessor()) elif condition == "albedo": post_processors.append(AlbedoPostProcessor()) elif condition == "segmentation": post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True)) elif condition == "openpose": post_processors.append(OpenposePostProcessor()) else: print(f"⚠️ Warning: Unknown condition: {condition}") post_processors.append(ImagePostProcessor()) torch.manual_seed(args.seed) generator = torch.Generator(device=device).manual_seed(args.seed) with open(args.json, "r", encoding="utf-8") as f: annotations = json.load(f) for sample in annotations[1:255]: image_path = os.path.join(args.data_path, sample["image"]) image_id = sample["image"].split('.')[0] image = Image.open(image_path) question = sample["question"] control_images = [image.convert('RGB')] + [None] * pipe.num_conditions role = [1] + [0] * pipe.num_conditions print(role) best_dir, best_caption, best_score = '', '', 0.0 max_length = 1024 # input_img = Image.open(image_path).convert("RGB") width, height = image.size print(f'ori width:{width}', f'ori height:{height}') prompt = init_i2t(model, processor, image_path, 0, image_id, max_length) score, feedback = evaluate_consistency(image_path, model, processor, prompt) if score >= best_score: best_caption, best_score = prompt, score best_dir = image_path for step in range(1, args.iters): save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width, image_id) max_length += 100 prompt = text_refine(save_dir, model, processor, prompt, feedback, step, image_id, max_length) score, feedback = evaluate_consistency(image_path, model, processor, prompt) #if score >= best_score: best_caption, best_score = prompt, score best_dir = save_dir result = vqa(best_dir, model, processor, best_caption, question, image_id, max_length) print(f'result:{result}')