From 054acc8dffbd58d4bfa58c65306e3f6cd4aeeadf Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Thu, 18 Apr 2024 14:01:36 +0200 Subject: [PATCH 01/11] add method to perform image to image registration between L1C and Raw granule --- pyraws/l1/l1_event.py | 45 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/pyraws/l1/l1_event.py b/pyraws/l1/l1_event.py index bc673ce..18d9681 100644 --- a/pyraws/l1/l1_event.py +++ b/pyraws/l1/l1_event.py @@ -5,6 +5,7 @@ get_l1C_image_default_path, read_L1C_event_from_database, read_L1C_event_from_path, + read_L1C_image_from_tif, reproject_raster, ) import matplotlib.pyplot as plt @@ -17,6 +18,7 @@ from termcolor import colored import torch from tqdm import tqdm +import cv2 class L1C_event: @@ -546,3 +548,46 @@ def crop_tile( dest.write(band[0, :, :], n + 1) return out_name + + def register_l1_to_raw( + self, + granule_idx: int, + raw_height: int, + raw_width: int, + keypoints_l1c: np.array, + keypoints_raw: np.array, + method_homography=cv2.RANSAC, + ) -> np.ndarray: + """Match a L1C image to its corresponding Raw image. + + Args: + granule_idx (int): index of the granule. + raw_height (int): height of the Raw image. + raw_width (int): width of the Raw image. + keypoints_l1c (np.array): keypoints detected in the L1C image. + keypoints_raw (np.array): keypoints detected in the Raw image that match 'keypoints_l1c'. + method_homography (optional): method used to compute a homography matrix. Defaults to cv2.RANSAC. + + Returns: + np.ndarray: L1C image matched to its corresponding Raw image. + """ + granule_idx_str = str(granule_idx) + + l1c_tif, _, _ = read_L1C_image_from_tif( + id_event=self.__event_name, + out_name_ending=granule_idx_str, + device=self.__device, + ) + l1c_numpy = l1c_tif.numpy() + + # Find the homography matrix. + homography, _ = cv2.findHomography( + keypoints_l1c, keypoints_raw, method_homography + ) + + # Use homography matrix to transform the L1C image wrt the raw image. + l1c_registered_to_raw = cv2.warpPerspective( + l1c_numpy, homography, (raw_width, raw_height) + ) + # TODO: save into TIF file, add overwrite param and other params as well + return l1c_registered_to_raw From 05cfe3711c6bfddbe2619037401ac61e720f199c Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Thu, 18 Apr 2024 14:02:50 +0200 Subject: [PATCH 02/11] add function to detect and match keypoints using SIFT. In this script other keypoints detectors such as SuperGlue could be then added. I just put SIFT as first example that is ready to use --- pyraws/utils/keypoint_utils.py | 62 ++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 pyraws/utils/keypoint_utils.py diff --git a/pyraws/utils/keypoint_utils.py b/pyraws/utils/keypoint_utils.py new file mode 100644 index 0000000..007f006 --- /dev/null +++ b/pyraws/utils/keypoint_utils.py @@ -0,0 +1,62 @@ +from pyraws.utils.img_processing_utils import ( + is_2D_image, + three_channel_to_grayscale_img, + normalize_img, +) +import numpy as np +import cv2 + + +def get_matched_keypoints_sift( + img_to_match: np.ndarray, + reference_img: np.ndarray, + perc_matched_kp: float, + max_val_grayscale: int = 255, +): + + # Convert images to grayscale if they are not already. + if not is_2D_image(img_to_match): + img_to_match = three_channel_to_grayscale_img(img_to_match) + if not is_2D_image(reference_img): + reference_img = three_channel_to_grayscale_img(reference_img) + + sift = cv2.SIFT_create() + + img_to_match = ( + (img_to_match - np.min(img_to_match)) * max_val_grayscale / np.max(img_to_match) + ) + reference_img = ( + (reference_img - np.min(reference_img)) + * max_val_grayscale + / np.max(reference_img) + ) + img_to_match = img_to_match.astype(np.uint8) + reference_img = reference_img.astype(np.uint8) + # Find keypoints and descriptors. + kp1, d1 = sift.detectAndCompute(img_to_match, None) + kp2, d2 = sift.detectAndCompute(reference_img, None) + + # Match features between the two images. + # We create a Brute Force matcher with Hamming distance as measurement mode. + matcher = cv2.BFMatcher() + + # Match the two sets of descriptors. + matches = matcher.match(d1, d2) + + # Sort matches on the basis of their Hamming distance. + matches = sorted(matches, key=lambda x: x.distance) + + # Take the top 'perc_matched_kp' % matches forward. + matches = matches[: int(len(matches) * perc_matched_kp)] + no_of_matches = len(matches) + + # Define empty matrices of shape no_of_matches * len_coordinates. + len_coordinates = 2 + p1 = np.zeros((no_of_matches, len_coordinates)) + p2 = np.zeros((no_of_matches, len_coordinates)) + + for i in range(len(matches)): + p1[i, :] = kp1[matches[i].queryIdx].pt + p2[i, :] = kp2[matches[i].trainIdx].pt + + return p1, p2 From 3a09de9ea8ce4dd11b2188e388e89ef7d5150782 Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Thu, 18 Apr 2024 14:03:50 +0200 Subject: [PATCH 03/11] add img processing utils --- pyraws/utils/img_processing_utils.py | 36 ++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 pyraws/utils/img_processing_utils.py diff --git a/pyraws/utils/img_processing_utils.py b/pyraws/utils/img_processing_utils.py new file mode 100644 index 0000000..b4420b2 --- /dev/null +++ b/pyraws/utils/img_processing_utils.py @@ -0,0 +1,36 @@ +import numpy as np +import cv2 + + +def three_channel_to_grayscale_img(img: np.ndarray) -> np.ndarray: + """Converts a 3-channel image to grayscale (i.e. 1-channel) + + Args: + img (np.ndarray): 3-channel image. + + Returns: + np.ndarray: grayscale image. + """ + return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + + +def save_img(img: np.ndarray, path: str): + """Saves an image to path. + + Args: + img (np.ndarray): image to save. + path (str): path where to save the image. + """ + cv2.imwrite(path, img) + + +def is_2D_image(img: np.ndarray) -> bool: + """True if an image has just two dimensions. False otherwise. + + Args: + img (np.ndarray): image. + + Returns: + bool: True if an image has just two dimensions. False otherwise. + """ + return img.shape == 2 From 9f2e39a7332d6b5a1b5682d4938f67c61e4f89ca Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Thu, 18 Apr 2024 14:03:59 +0200 Subject: [PATCH 04/11] delete unused import --- pyraws/utils/keypoint_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pyraws/utils/keypoint_utils.py b/pyraws/utils/keypoint_utils.py index 007f006..d8e3799 100644 --- a/pyraws/utils/keypoint_utils.py +++ b/pyraws/utils/keypoint_utils.py @@ -1,7 +1,6 @@ from pyraws.utils.img_processing_utils import ( is_2D_image, three_channel_to_grayscale_img, - normalize_img, ) import numpy as np import cv2 From c6abd45caec1be026ac34f6b34d756f31f0dda61 Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Thu, 18 Apr 2024 16:06:41 +0200 Subject: [PATCH 05/11] update sift with lowe's ratio test --- pyraws/utils/keypoint_utils.py | 76 ++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/pyraws/utils/keypoint_utils.py b/pyraws/utils/keypoint_utils.py index d8e3799..17fc1ed 100644 --- a/pyraws/utils/keypoint_utils.py +++ b/pyraws/utils/keypoint_utils.py @@ -7,55 +7,67 @@ def get_matched_keypoints_sift( - img_to_match: np.ndarray, - reference_img: np.ndarray, - perc_matched_kp: float, + img_1: np.ndarray, + img_2: np.ndarray, + min_matches: int = 10, + threshold_lowe_ratio: float = 0.7, max_val_grayscale: int = 255, ): + """Detect keypoints in two different images and get keypoints that match using SIFT and Lowe's ratio test. + + Args: + img_1 (np.ndarray): first image. + img_2 (np.ndarray): second image. + min_matches (int, optional): Minimum number of matches to get. Defaults to 10. + threshold_lowe_ratio (float, optional): Threshold for Lowe's ratio test. It should be a number in [0, 1]. Defaults to 0.7. + max_val_grayscale (int, optional): Maximum image grayscale value. Defaults to 255. + + Raises: + ValueError: There are less than 'min_matches' keypoints matches. + + Returns: + (np.ndarray, np.ndarray): coordinates of matching keypoints. + """ # Convert images to grayscale if they are not already. - if not is_2D_image(img_to_match): - img_to_match = three_channel_to_grayscale_img(img_to_match) - if not is_2D_image(reference_img): - reference_img = three_channel_to_grayscale_img(reference_img) + if not is_2D_image(img_1): + img_1 = three_channel_to_grayscale_img(img_1) + if not is_2D_image(img_2): + img_2 = three_channel_to_grayscale_img(img_2) sift = cv2.SIFT_create() - img_to_match = ( - (img_to_match - np.min(img_to_match)) * max_val_grayscale / np.max(img_to_match) - ) - reference_img = ( - (reference_img - np.min(reference_img)) - * max_val_grayscale - / np.max(reference_img) - ) - img_to_match = img_to_match.astype(np.uint8) - reference_img = reference_img.astype(np.uint8) + img_1 = (img_1 - np.min(img_1)) * max_val_grayscale / np.max(img_1) + img_2 = (img_2 - np.min(img_2)) * max_val_grayscale / np.max(img_2) + img_1 = img_1.astype(np.uint8) + img_2 = img_2.astype(np.uint8) # Find keypoints and descriptors. - kp1, d1 = sift.detectAndCompute(img_to_match, None) - kp2, d2 = sift.detectAndCompute(reference_img, None) + kp1, d1 = sift.detectAndCompute(img_1, None) + kp2, d2 = sift.detectAndCompute(img_2, None) # Match features between the two images. - # We create a Brute Force matcher with Hamming distance as measurement mode. + # We create a Brute Force matcher with Euclidean distance as measurement mode. matcher = cv2.BFMatcher() # Match the two sets of descriptors. - matches = matcher.match(d1, d2) - - # Sort matches on the basis of their Hamming distance. - matches = sorted(matches, key=lambda x: x.distance) + matches = matcher.knnMatch(d1, d2, k=2) + # Store all the good matches as per Lowe's ratio test. + lowe_filtered_matches = [] + for m, n in matches: + if m.distance < threshold_lowe_ratio * n.distance: + lowe_filtered_matches.append(m) + if len(lowe_filtered_matches) < min_matches: + raise ValueError("Not enough matches between keypoints were found.") - # Take the top 'perc_matched_kp' % matches forward. - matches = matches[: int(len(matches) * perc_matched_kp)] - no_of_matches = len(matches) + no_of_filtered_matches = len(lowe_filtered_matches) # Define empty matrices of shape no_of_matches * len_coordinates. len_coordinates = 2 - p1 = np.zeros((no_of_matches, len_coordinates)) - p2 = np.zeros((no_of_matches, len_coordinates)) + p1 = np.zeros((no_of_filtered_matches, len_coordinates)) + p2 = np.zeros((no_of_filtered_matches, len_coordinates)) - for i in range(len(matches)): - p1[i, :] = kp1[matches[i].queryIdx].pt - p2[i, :] = kp2[matches[i].trainIdx].pt + for i in range(len(lowe_filtered_matches)): + p1[i, :] = kp1[lowe_filtered_matches[i].queryIdx].pt + p2[i, :] = kp2[lowe_filtered_matches[i].trainIdx].pt return p1, p2 From 6edbc876adb344e9cb77e9bf9ee318dd5d26b3c0 Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Thu, 18 Apr 2024 16:07:14 +0200 Subject: [PATCH 06/11] include L1C to Raw granule registration example in API Quickstart by using SIFT --- .../pyraws_Raw_API_demonstration.ipynb | 151 +++++++++++++++--- 1 file changed, 127 insertions(+), 24 deletions(-) diff --git a/quickstart/API_demonstration/pyraws_Raw_API_demonstration.ipynb b/quickstart/API_demonstration/pyraws_Raw_API_demonstration.ipynb index a27948d..b777f7c 100644 --- a/quickstart/API_demonstration/pyraws_Raw_API_demonstration.ipynb +++ b/quickstart/API_demonstration/pyraws_Raw_API_demonstration.ipynb @@ -95,6 +95,7 @@ "from pyraws.l1.l1_event import L1C_event\n", "from pyraws.utils.l1_utils import read_L1C_image_from_tif\n", "from pyraws.utils.visualization_utils import plot_img1_vs_img2_bands\n", + "from pyraws.utils.keypoint_utils import get_matched_keypoints_sift\n", "from s2pix_detector import s2pix_detector\n", "from functools import partial\n", "from geopy.geocoders import Nominatim\n", @@ -218,7 +219,7 @@ "outputs": [], "source": [ "requested_bands=[\"B8A\",\"B11\",\"B12\"]\n", - "event_name=\"Etna_00\"" + "event_name=\"France_0\"" ] }, { @@ -295,7 +296,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In the next lines, we will select the granule `0` and will show the bands requested when the `Raw_event` was created." + "In the next lines, we will select the granule `granule_idx` and will show the bands requested when the `Raw_event` was created." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "granule_idx = 1" ] }, { @@ -308,7 +318,7 @@ }, "outputs": [], "source": [ - "raw_granule=event.get_granule(0)\n", + "raw_granule=event.get_granule(granule_idx)\n", "raw_granule.show_bands(downsampling=True)" ] }, @@ -325,7 +335,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The APIs offer utils to compose granules along and across track. However, the granules from an event cannot composed arbitrarily. Indeed, to compose two granules **along track** they must have the same **detector number** and the **sensing-time** to be different of 3.6 s (3 or 4 seconds). For the event `Etna_00`, granules [0,2], [1,3], [3,5] can be stacked along tracks.
The next line will stack granules [0,2] along track. The string \"T\" means that the granule 0 will be stacked on top of 2. " + "The APIs offer utils to compose granules along and across track. However, the granules from an event cannot composed arbitrarily. Indeed, to compose two granules **along track** they must have the same **detector number** and the **sensing-time** to be different of 3.6 s (3 or 4 seconds). For the event `Etna_00`, granules [0,2], [1,3], [3,5] can be stacked along tracks.
Set `granule_idx=2` to stack granules [0,2] along track. The string \"T\" means that the granule 0 will be stacked on top of 2. " ] }, { @@ -338,7 +348,7 @@ }, "outputs": [], "source": [ - "raw_granule_0_2_stacked=event.stack_granules([0,2], \"T\")" + "raw_granule_0_2_stacked=event.stack_granules([0,granule_idx], \"T\")" ] }, { @@ -407,15 +417,6 @@ "raw_granule_0_2_stacked.coarse_coregistration(crop_empty_pixels=False, verbose=True).show_bands_superimposition(equalize=False)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "raw_granule_0_2_stacked.coarse_coregistration_old(crop_empty_pixels=True, verbose=True).show_bands_superimposition(equalize=False)" - ] - }, { "attachments": {}, "cell_type": "markdown", @@ -446,7 +447,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The next line extracts the granule 2 from the event and performs the coregistration." + "The next line extracts the granule `granule_idx` from the event and performs the coregistration." ] }, { @@ -459,10 +460,10 @@ }, "outputs": [], "source": [ - "# Get granule 2\n", - "raw_granule=event.get_granule(2)\n", - "# Get granule 2 from the Raw event and perform coarse coregistration\n", - "raw_granule_registered=event.coarse_coregistration(granules_idx=[2])" + "# Get granule 'granule_idx'\n", + "raw_granule=event.get_granule(granule_idx)\n", + "# Get granule 'granule_idx' from the Raw event and perform coarse coregistration\n", + "raw_granule_registered=event.coarse_coregistration(granules_idx=[granule_idx])" ] }, { @@ -507,8 +508,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Get granule 2 from the Raw event and perform coarse coregistration with filling elements\n", - "raw_granule_registered_filled=event.coarse_coregistration(granules_idx=[2], use_complementary_granules=True)" + "# Get granule 'granule_idx' from the Raw event and perform coarse coregistration with filling elements\n", + "raw_granule_registered_filled=event.coarse_coregistration(granules_idx=[granule_idx], use_complementary_granules=True)" ] }, { @@ -617,7 +618,7 @@ }, "outputs": [], "source": [ - "raw_granule_coregistered=event.coarse_coregistration(granules_idx=[2], use_complementary_granules=True, downsampling=False)\n", + "raw_granule_coregistered=event.coarse_coregistration(granules_idx=[granule_idx], use_complementary_granules=True, downsampling=False)\n", "coordinates=raw_granule_coregistered.get_granule_coordinates()" ] }, @@ -829,7 +830,7 @@ }, "outputs": [], "source": [ - "ending=\"2\"\n", + "ending=str(granule_idx)\n", "output_cropped_tile_path=l1c_event.crop_tile(band_shifted_dict[requested_bands[0]], None,out_name_ending=ending, lat_lon_format=True)" ] }, @@ -1005,6 +1006,108 @@ " ax.add_patch(rect)\n", "plt.show()" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 9) - Registering L1C on Raw granule\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the cropped L1C data is registered to the Raw granule.
First, the height and width of the Raw granule are retrieved so that the L1C data new height and width will correspond to the Raw granule. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "l1c_tif_numpy = l1c_tif.numpy()\n", + "raw_granule_numpy = raw_granule_tensor.numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "raw_height, raw_width = raw_granule_numpy.shape[:2]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Keypoints are detected from both L1C data and the Raw granule with a ketpoint detection algorithm of choice (SuperGlue is shown as an example). Only matching keypoints are kept." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "keypoints_l1c, keypoints_raw = get_matched_keypoints_sift(\n", + " img_1=l1c_tif_numpy,\n", + " img_2=raw_granule_numpy,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "L1C data is then registered to the Raw granule based on their matching kepoints. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "l1c_registered_to_raw = l1c_event.register_l1_to_raw(\n", + " granule_idx=granule_idx, \n", + " raw_height=raw_height, \n", + " raw_width=raw_width, \n", + " keypoints_l1c=keypoints_l1c, \n", + " keypoints_raw=keypoints_raw\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Show the Raw granule and the L1C data that is registered to the Raw granule." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_img1_vs_img2_bands(\n", + " raw_granule_tensor/raw_granule_tensor.max(), \n", + " torch.from_numpy(l1c_registered_to_raw), \n", + " [\"Raw coregistered granule\", \"L1C registered to Raw\"]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -1023,7 +1126,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.15" + "version": "3.9.19" }, "vscode": { "interpreter": { From 9831a7a7d61ece94c7c8a37e2da4354414531a45 Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Thu, 18 Apr 2024 16:38:22 +0200 Subject: [PATCH 07/11] set event_name=Etna_00 and granule_idx=2 as consistent with the instructions --- .../API_demonstration/pyraws_Raw_API_demonstration.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/quickstart/API_demonstration/pyraws_Raw_API_demonstration.ipynb b/quickstart/API_demonstration/pyraws_Raw_API_demonstration.ipynb index b777f7c..83584c4 100644 --- a/quickstart/API_demonstration/pyraws_Raw_API_demonstration.ipynb +++ b/quickstart/API_demonstration/pyraws_Raw_API_demonstration.ipynb @@ -219,7 +219,7 @@ "outputs": [], "source": [ "requested_bands=[\"B8A\",\"B11\",\"B12\"]\n", - "event_name=\"France_0\"" + "event_name=\"Etna_00\"" ] }, { @@ -305,7 +305,7 @@ "metadata": {}, "outputs": [], "source": [ - "granule_idx = 1" + "granule_idx = 2" ] }, { From 006f2f109efc27205e6738700b6b048fce91d80d Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Thu, 18 Apr 2024 21:26:58 +0200 Subject: [PATCH 08/11] add normalize_img function --- pyraws/utils/img_processing_utils.py | 28 ++++++++++++++++++++++++++++ pyraws/utils/keypoint_utils.py | 5 +++-- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/pyraws/utils/img_processing_utils.py b/pyraws/utils/img_processing_utils.py index b4420b2..102cba4 100644 --- a/pyraws/utils/img_processing_utils.py +++ b/pyraws/utils/img_processing_utils.py @@ -34,3 +34,31 @@ def is_2D_image(img: np.ndarray) -> bool: bool: True if an image has just two dimensions. False otherwise. """ return img.shape == 2 + + +def normalize_img( + img: np.ndarray, + min: float, + max: float, + a: float = 0, + b: float = 1, + tol: float = 1e-6, +) -> np.ndarray: + """Normalizes an image from range [min, max] to range [a, b]. + + Args: + img (np.ndarray): image to normalize. + min (float): minimum of the previous range. + max (float): maximum of the previous range. + a (float, optional): minimum of the new range. Defaults to 0. + b (float, optional): maximum of the new range. Defaults to 1. + tol (float, optional): tolerance. Defaults to 1e-6. + + Returns: + np.ndarray: normalized image. + """ + assert max != min + img_norm = ((b - a) * ((img - min) / (max - min))) + a + assert np.count_nonzero(img_norm < a - tol) <= 0 + assert np.count_nonzero(img_norm > b + tol) <= 0 + return img_norm diff --git a/pyraws/utils/keypoint_utils.py b/pyraws/utils/keypoint_utils.py index 17fc1ed..c1ea18b 100644 --- a/pyraws/utils/keypoint_utils.py +++ b/pyraws/utils/keypoint_utils.py @@ -1,6 +1,7 @@ from pyraws.utils.img_processing_utils import ( is_2D_image, three_channel_to_grayscale_img, + normalize_img, ) import numpy as np import cv2 @@ -37,8 +38,8 @@ def get_matched_keypoints_sift( sift = cv2.SIFT_create() - img_1 = (img_1 - np.min(img_1)) * max_val_grayscale / np.max(img_1) - img_2 = (img_2 - np.min(img_2)) * max_val_grayscale / np.max(img_2) + img_1 = normalize_img(img_1, np.min(img_1), np.max(img_1), 0, max_val_grayscale) + img_2 = normalize_img(img_2, np.min(img_2), np.max(img_2), 0, max_val_grayscale) img_1 = img_1.astype(np.uint8) img_2 = img_2.astype(np.uint8) # Find keypoints and descriptors. From f4cca54f0cd984842ea2ca299b30dd6087134972 Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Mon, 13 May 2024 14:18:19 +0200 Subject: [PATCH 09/11] change interpolation from linear to nearest --- pyraws/l1/l1_event.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyraws/l1/l1_event.py b/pyraws/l1/l1_event.py index 18d9681..71801d3 100644 --- a/pyraws/l1/l1_event.py +++ b/pyraws/l1/l1_event.py @@ -587,7 +587,7 @@ def register_l1_to_raw( # Use homography matrix to transform the L1C image wrt the raw image. l1c_registered_to_raw = cv2.warpPerspective( - l1c_numpy, homography, (raw_width, raw_height) + l1c_numpy, homography, (raw_width, raw_height), flags=cv2.INTER_NEAREST ) # TODO: save into TIF file, add overwrite param and other params as well return l1c_registered_to_raw From 0aed3fbc61b3901fa3aa756200d04783012162fb Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Mon, 13 May 2024 14:18:20 +0200 Subject: [PATCH 10/11] trial --- pyraws/l1/l1_event.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyraws/l1/l1_event.py b/pyraws/l1/l1_event.py index 71801d3..5b62625 100644 --- a/pyraws/l1/l1_event.py +++ b/pyraws/l1/l1_event.py @@ -589,5 +589,5 @@ def register_l1_to_raw( l1c_registered_to_raw = cv2.warpPerspective( l1c_numpy, homography, (raw_width, raw_height), flags=cv2.INTER_NEAREST ) - # TODO: save into TIF file, add overwrite param and other params as well + # TODO: save into TIF file, add overwrite param and other params as well. return l1c_registered_to_raw From ee83696335eabc3bb19cfc3f09f63c53fa9fbb73 Mon Sep 17 00:00:00 2001 From: Luca Marini Date: Mon, 13 May 2024 14:18:20 +0200 Subject: [PATCH 11/11] Revert "trial" This reverts commit 06439821cbb88b8d9ba814831106c8b572b901ae. --- pyraws/l1/l1_event.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyraws/l1/l1_event.py b/pyraws/l1/l1_event.py index 5b62625..71801d3 100644 --- a/pyraws/l1/l1_event.py +++ b/pyraws/l1/l1_event.py @@ -589,5 +589,5 @@ def register_l1_to_raw( l1c_registered_to_raw = cv2.warpPerspective( l1c_numpy, homography, (raw_width, raw_height), flags=cv2.INTER_NEAREST ) - # TODO: save into TIF file, add overwrite param and other params as well. + # TODO: save into TIF file, add overwrite param and other params as well return l1c_registered_to_raw