Skip to content
Open
45 changes: 45 additions & 0 deletions pyraws/l1/l1_event.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
get_l1C_image_default_path,
read_L1C_event_from_database,
read_L1C_event_from_path,
read_L1C_image_from_tif,
reproject_raster,
)
import matplotlib.pyplot as plt
Expand All @@ -17,6 +18,7 @@
from termcolor import colored
import torch
from tqdm import tqdm
import cv2


class L1C_event:
Expand Down Expand Up @@ -546,3 +548,46 @@ def crop_tile(
dest.write(band[0, :, :], n + 1)

return out_name

def register_l1_to_raw(
self,
granule_idx: int,
raw_height: int,
raw_width: int,
keypoints_l1c: np.array,
keypoints_raw: np.array,
method_homography=cv2.RANSAC,
) -> np.ndarray:
"""Match a L1C image to its corresponding Raw image.

Args:
granule_idx (int): index of the granule.
raw_height (int): height of the Raw image.
raw_width (int): width of the Raw image.
keypoints_l1c (np.array): keypoints detected in the L1C image.
keypoints_raw (np.array): keypoints detected in the Raw image that match 'keypoints_l1c'.
method_homography (optional): method used to compute a homography matrix. Defaults to cv2.RANSAC.

Returns:
np.ndarray: L1C image matched to its corresponding Raw image.
"""
granule_idx_str = str(granule_idx)

l1c_tif, _, _ = read_L1C_image_from_tif(
id_event=self.__event_name,
out_name_ending=granule_idx_str,
device=self.__device,
)
l1c_numpy = l1c_tif.numpy()

# Find the homography matrix.
homography, _ = cv2.findHomography(
keypoints_l1c, keypoints_raw, method_homography
)

# Use homography matrix to transform the L1C image wrt the raw image.
l1c_registered_to_raw = cv2.warpPerspective(
l1c_numpy, homography, (raw_width, raw_height), flags=cv2.INTER_NEAREST
)
# TODO: save into TIF file, add overwrite param and other params as well
return l1c_registered_to_raw
64 changes: 64 additions & 0 deletions pyraws/utils/img_processing_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import numpy as np
import cv2


def three_channel_to_grayscale_img(img: np.ndarray) -> np.ndarray:
"""Converts a 3-channel image to grayscale (i.e. 1-channel)

Args:
img (np.ndarray): 3-channel image.

Returns:
np.ndarray: grayscale image.
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)


def save_img(img: np.ndarray, path: str):
"""Saves an image to path.

Args:
img (np.ndarray): image to save.
path (str): path where to save the image.
"""
cv2.imwrite(path, img)


def is_2D_image(img: np.ndarray) -> bool:
"""True if an image has just two dimensions. False otherwise.

Args:
img (np.ndarray): image.

Returns:
bool: True if an image has just two dimensions. False otherwise.
"""
return img.shape == 2


def normalize_img(
img: np.ndarray,
min: float,
max: float,
a: float = 0,
b: float = 1,
tol: float = 1e-6,
) -> np.ndarray:
"""Normalizes an image from range [min, max] to range [a, b].

Args:
img (np.ndarray): image to normalize.
min (float): minimum of the previous range.
max (float): maximum of the previous range.
a (float, optional): minimum of the new range. Defaults to 0.
b (float, optional): maximum of the new range. Defaults to 1.
tol (float, optional): tolerance. Defaults to 1e-6.

Returns:
np.ndarray: normalized image.
"""
assert max != min
img_norm = ((b - a) * ((img - min) / (max - min))) + a
assert np.count_nonzero(img_norm < a - tol) <= 0
assert np.count_nonzero(img_norm > b + tol) <= 0
return img_norm
74 changes: 74 additions & 0 deletions pyraws/utils/keypoint_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
from pyraws.utils.img_processing_utils import (
is_2D_image,
three_channel_to_grayscale_img,
normalize_img,
)
import numpy as np
import cv2


def get_matched_keypoints_sift(
img_1: np.ndarray,
img_2: np.ndarray,
min_matches: int = 10,
threshold_lowe_ratio: float = 0.7,
max_val_grayscale: int = 255,
):
"""Detect keypoints in two different images and get keypoints that match using SIFT and Lowe's ratio test.

Args:
img_1 (np.ndarray): first image.
img_2 (np.ndarray): second image.
min_matches (int, optional): Minimum number of matches to get. Defaults to 10.
threshold_lowe_ratio (float, optional): Threshold for Lowe's ratio test. It should be a number in [0, 1]. Defaults to 0.7.
max_val_grayscale (int, optional): Maximum image grayscale value. Defaults to 255.

Raises:
ValueError: There are less than 'min_matches' keypoints matches.

Returns:
(np.ndarray, np.ndarray): coordinates of matching keypoints.
"""

# Convert images to grayscale if they are not already.
if not is_2D_image(img_1):
img_1 = three_channel_to_grayscale_img(img_1)
if not is_2D_image(img_2):
img_2 = three_channel_to_grayscale_img(img_2)

sift = cv2.SIFT_create()

img_1 = normalize_img(img_1, np.min(img_1), np.max(img_1), 0, max_val_grayscale)
img_2 = normalize_img(img_2, np.min(img_2), np.max(img_2), 0, max_val_grayscale)
img_1 = img_1.astype(np.uint8)
img_2 = img_2.astype(np.uint8)
# Find keypoints and descriptors.
kp1, d1 = sift.detectAndCompute(img_1, None)
kp2, d2 = sift.detectAndCompute(img_2, None)

# Match features between the two images.
# We create a Brute Force matcher with Euclidean distance as measurement mode.
matcher = cv2.BFMatcher()

# Match the two sets of descriptors.
matches = matcher.knnMatch(d1, d2, k=2)
# Store all the good matches as per Lowe's ratio test.
lowe_filtered_matches = []
for m, n in matches:
if m.distance < threshold_lowe_ratio * n.distance:
lowe_filtered_matches.append(m)
if len(lowe_filtered_matches) < min_matches:
raise ValueError("Not enough matches between keypoints were found.")

no_of_filtered_matches = len(lowe_filtered_matches)

# Define empty matrices of shape no_of_matches * len_coordinates.
len_coordinates = 2
p1 = np.zeros((no_of_filtered_matches, len_coordinates))
p2 = np.zeros((no_of_filtered_matches, len_coordinates))

for i in range(len(lowe_filtered_matches)):
p1[i, :] = kp1[lowe_filtered_matches[i].queryIdx].pt
p2[i, :] = kp2[lowe_filtered_matches[i].trainIdx].pt

return p1, p2
Loading