FcF use unique resize strategy
This commit is contained in:
@@ -4,7 +4,6 @@ from typing import Optional
|
||||
import cv2
|
||||
import torch
|
||||
from loguru import logger
|
||||
import numpy as np
|
||||
|
||||
from lama_cleaner.helper import boxes_from_mask, resize_max_size, pad_img_to_modulo
|
||||
from lama_cleaner.schema import Config, HDStrategy
|
||||
@@ -92,7 +91,6 @@ class InpaintModel:
|
||||
inpaint_result = cv2.resize(inpaint_result,
|
||||
(origin_size[1], origin_size[0]),
|
||||
interpolation=cv2.INTER_CUBIC)
|
||||
|
||||
original_pixel_indices = mask < 127
|
||||
inpaint_result[original_pixel_indices] = image[:, :, ::-1][original_pixel_indices]
|
||||
|
||||
@@ -101,7 +99,7 @@ class InpaintModel:
|
||||
|
||||
return inpaint_result
|
||||
|
||||
def _run_box(self, image, mask, box, config: Config):
|
||||
def _crop_box(self, image, mask, box, config: Config):
|
||||
"""
|
||||
|
||||
Args:
|
||||
@@ -110,7 +108,7 @@ class InpaintModel:
|
||||
box: [left,top,right,bottom]
|
||||
|
||||
Returns:
|
||||
BGR IMAGE
|
||||
BGR IMAGE, (l, r, r, b)
|
||||
"""
|
||||
box_h = box[3] - box[1]
|
||||
box_w = box[2] - box[0]
|
||||
@@ -131,7 +129,7 @@ class InpaintModel:
|
||||
t = max(_t, 0)
|
||||
b = min(_b, img_h)
|
||||
|
||||
# try to get more context when crop around image edge
|
||||
# try to get more context when crop around image edge
|
||||
if _l < 0:
|
||||
r += abs(_l)
|
||||
if _r > img_w:
|
||||
@@ -151,4 +149,19 @@ class InpaintModel:
|
||||
|
||||
logger.info(f"box size: ({box_h},{box_w}) crop size: {crop_img.shape}")
|
||||
|
||||
return crop_img, crop_mask, [l, t, r, b]
|
||||
|
||||
def _run_box(self, image, mask, box, config: Config):
|
||||
"""
|
||||
|
||||
Args:
|
||||
image: [H, W, C] RGB
|
||||
mask: [H, W, 1]
|
||||
box: [left,top,right,bottom]
|
||||
|
||||
Returns:
|
||||
BGR IMAGE
|
||||
"""
|
||||
crop_img, crop_mask, [l, t, r, b] = self._crop_box(image, mask, box, config)
|
||||
|
||||
return self._pad_forward(crop_img, crop_mask, config), [l, t, r, b]
|
||||
|
||||
@@ -8,7 +8,7 @@ import torch.fft as fft
|
||||
|
||||
from lama_cleaner.schema import Config
|
||||
|
||||
from lama_cleaner.helper import load_model, get_cache_path_by_url, norm_img
|
||||
from lama_cleaner.helper import load_model, get_cache_path_by_url, norm_img, boxes_from_mask, resize_max_size
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
from torch import conv2d, nn
|
||||
import torch.nn.functional as F
|
||||
@@ -1154,6 +1154,38 @@ class FcF(InpaintModel):
|
||||
def is_downloaded() -> bool:
|
||||
return os.path.exists(get_cache_path_by_url(FCF_MODEL_URL))
|
||||
|
||||
@torch.no_grad()
|
||||
def __call__(self, image, mask, config: Config):
|
||||
"""
|
||||
images: [H, W, C] RGB, not normalized
|
||||
masks: [H, W]
|
||||
return: BGR IMAGE
|
||||
"""
|
||||
boxes = boxes_from_mask(mask)
|
||||
crop_result = []
|
||||
config.hd_strategy_crop_margin = 128
|
||||
for box in boxes:
|
||||
crop_image, crop_mask, crop_box = self._crop_box(image, mask, box, config)
|
||||
origin_size = crop_image.shape[:2]
|
||||
resize_image = resize_max_size(crop_image, size_limit=512)
|
||||
resize_mask = resize_max_size(crop_mask, size_limit=512)
|
||||
inpaint_result = self._pad_forward(resize_image, resize_mask, config)
|
||||
|
||||
# only paste masked area result
|
||||
inpaint_result = cv2.resize(inpaint_result, (origin_size[1], origin_size[0]), interpolation=cv2.INTER_CUBIC)
|
||||
|
||||
original_pixel_indices = crop_mask < 127
|
||||
inpaint_result[original_pixel_indices] = crop_image[:, :, ::-1][original_pixel_indices]
|
||||
|
||||
crop_result.append((inpaint_result, crop_box))
|
||||
|
||||
inpaint_result = image[:, :, ::-1]
|
||||
for crop_image, crop_box in crop_result:
|
||||
x1, y1, x2, y2 = crop_box
|
||||
inpaint_result[y1:y2, x1:x2, :] = crop_image
|
||||
|
||||
return inpaint_result
|
||||
|
||||
def forward(self, image, mask, config: Config):
|
||||
"""Input images and output images have same size
|
||||
images: [H, W, C] RGB
|
||||
|
||||
Reference in New Issue
Block a user