File size: 1,245 Bytes
93842b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
from transformers import AutoImageProcessor, AutoModelForSemanticSegmentation
import torch
from PIL import Image
import numpy as np

model_path = "../segformer-b0-finetuned-ade-512-512"

processor = AutoImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
model = AutoModelForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")

def segment(image):
    inputs = processor(images=image, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)

    logits = outputs.logits
    upsampled_logits = torch.nn.functional.interpolate(
        logits, size=image.size[::-1], mode="bilinear", align_corners=False
    )
    pred_seg = upsampled_logits.argmax(dim=1)[0]

    palette = np.random.randint(0, 255, size=(model.config.num_labels, 3), dtype=np.uint8)
    seg_img = Image.fromarray(palette[pred_seg.numpy()])

    return seg_img

demo = gr.Interface(
    fn=segment,
    inputs=gr.Image(type="pil", label="Upload an Image"),
    outputs=gr.Image(label="Segmented Output"),
    title="SegFormer ADE20K Segmentation Demo",
    description="Locally loaded SegFormer model finetuned on ADE20K (512x512)."
)

if __name__ == "__main__":
    demo.launch()