add sam2
This commit is contained in:
5
iopaint/plugins/segment_anything2/utils/__init__.py
Normal file
5
iopaint/plugins/segment_anything2/utils/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
90
iopaint/plugins/segment_anything2/utils/misc.py
Normal file
90
iopaint/plugins/segment_anything2/utils/misc.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def get_sdpa_settings():
|
||||
if torch.cuda.is_available():
|
||||
old_gpu = torch.cuda.get_device_properties(0).major < 7
|
||||
# only use Flash Attention on Ampere (8.0) or newer GPUs
|
||||
use_flash_attn = torch.cuda.get_device_properties(0).major >= 8
|
||||
if not use_flash_attn:
|
||||
warnings.warn(
|
||||
"Flash Attention is disabled as it requires a GPU with Ampere (8.0) CUDA capability.",
|
||||
category=UserWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
# keep math kernel for PyTorch versions before 2.2 (Flash Attention v2 is only
|
||||
# available on PyTorch 2.2+, while Flash Attention v1 cannot handle all cases)
|
||||
pytorch_version = tuple(int(v) for v in torch.__version__.split(".")[:2])
|
||||
if pytorch_version < (2, 2):
|
||||
warnings.warn(
|
||||
f"You are using PyTorch {torch.__version__} without Flash Attention v2 support. "
|
||||
"Consider upgrading to PyTorch 2.2+ for Flash Attention v2 (which could be faster).",
|
||||
category=UserWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
math_kernel_on = pytorch_version < (2, 2) or not use_flash_attn
|
||||
else:
|
||||
old_gpu = True
|
||||
use_flash_attn = False
|
||||
math_kernel_on = True
|
||||
|
||||
return old_gpu, use_flash_attn, math_kernel_on
|
||||
|
||||
|
||||
def mask_to_box(masks: torch.Tensor):
|
||||
"""
|
||||
compute bounding box given an input mask
|
||||
|
||||
Inputs:
|
||||
- masks: [B, 1, H, W] boxes, dtype=torch.Tensor
|
||||
|
||||
Returns:
|
||||
- box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor
|
||||
"""
|
||||
B, _, h, w = masks.shape
|
||||
device = masks.device
|
||||
xs = torch.arange(w, device=device, dtype=torch.int32)
|
||||
ys = torch.arange(h, device=device, dtype=torch.int32)
|
||||
grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy")
|
||||
grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w)
|
||||
grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w)
|
||||
min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1)
|
||||
max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1)
|
||||
min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1)
|
||||
max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1)
|
||||
bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1)
|
||||
|
||||
return bbox_coords
|
||||
|
||||
|
||||
def _load_img_as_tensor(img_path, image_size):
|
||||
img_pil = Image.open(img_path)
|
||||
img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size)))
|
||||
if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images
|
||||
img_np = img_np / 255.0
|
||||
else:
|
||||
raise RuntimeError(f"Unknown image dtype: {img_np.dtype} on {img_path}")
|
||||
img = torch.from_numpy(img_np).permute(2, 0, 1)
|
||||
video_width, video_height = img_pil.size # the original video size
|
||||
return img, video_height, video_width
|
||||
|
||||
|
||||
def concat_points(old_point_inputs, new_points, new_labels):
|
||||
"""Add new points and labels to previous point inputs (add at the end)."""
|
||||
if old_point_inputs is None:
|
||||
points, labels = new_points, new_labels
|
||||
else:
|
||||
points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1)
|
||||
labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1)
|
||||
|
||||
return {"point_coords": points, "point_labels": labels}
|
||||
77
iopaint/plugins/segment_anything2/utils/transforms.py
Normal file
77
iopaint/plugins/segment_anything2/utils/transforms.py
Normal file
@@ -0,0 +1,77 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torchvision.transforms import Normalize, Resize, ToTensor
|
||||
|
||||
|
||||
class SAM2Transforms(nn.Module):
|
||||
def __init__(
|
||||
self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0
|
||||
):
|
||||
"""
|
||||
Transforms for SAM2.
|
||||
"""
|
||||
super().__init__()
|
||||
self.resolution = resolution
|
||||
self.mask_threshold = mask_threshold
|
||||
self.max_hole_area = max_hole_area
|
||||
self.max_sprinkle_area = max_sprinkle_area
|
||||
self.mean = [0.485, 0.456, 0.406]
|
||||
self.std = [0.229, 0.224, 0.225]
|
||||
self.to_tensor = ToTensor()
|
||||
self.transforms = torch.jit.script(
|
||||
nn.Sequential(
|
||||
Resize((self.resolution, self.resolution)),
|
||||
Normalize(self.mean, self.std),
|
||||
)
|
||||
)
|
||||
|
||||
def __call__(self, x):
|
||||
x = self.to_tensor(x)
|
||||
return self.transforms(x)
|
||||
|
||||
def forward_batch(self, img_list):
|
||||
img_batch = [self.transforms(self.to_tensor(img)) for img in img_list]
|
||||
img_batch = torch.stack(img_batch, dim=0)
|
||||
return img_batch
|
||||
|
||||
def transform_coords(
|
||||
self, coords: torch.Tensor, normalize=False, orig_hw=None
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates,
|
||||
If the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
|
||||
|
||||
Returns
|
||||
Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model.
|
||||
"""
|
||||
if normalize:
|
||||
assert orig_hw is not None
|
||||
h, w = orig_hw
|
||||
coords = coords.clone()
|
||||
coords[..., 0] = coords[..., 0] / w
|
||||
coords[..., 1] = coords[..., 1] / h
|
||||
|
||||
coords = coords * self.resolution # unnormalize coords
|
||||
return coords
|
||||
|
||||
def transform_boxes(
|
||||
self, boxes: torch.Tensor, normalize=False, orig_hw=None
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates,
|
||||
if the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
|
||||
"""
|
||||
boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw)
|
||||
return boxes
|
||||
|
||||
def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor:
|
||||
"""
|
||||
Perform PostProcessing on output masks.
|
||||
"""
|
||||
return masks
|
||||
Reference in New Issue
Block a user