Bläddra i källkod

Initial commit

Ba Tran 1 år sedan
incheckning
366b97be93

+ 2 - 0
.flake8

@@ -0,0 +1,2 @@
+[flake8]
+max-line-length = 120

+ 43 - 0
.github/workflows/build.yml

@@ -0,0 +1,43 @@
+name: Build
+
+on:
+  push:
+    branches: ["master"]
+  pull_request:
+
+jobs:
+  linting:
+    runs-on: ubuntu-latest
+
+    steps:
+      - uses: actions/checkout@v3
+
+      - name: Setup python
+        uses: actions/setup-python@v4
+        with:
+          python-version: "3.x"
+
+      - name: Apply pre-commit
+        uses: pre-commit/action@v3.0.0
+        with:
+          extra_args: --all-files
+
+  test-production:
+    runs-on: ubuntu-latest
+
+    strategy:
+      fail-fast: false
+
+    steps:
+      - uses: actions/checkout@v3
+      - name: Setup python
+        uses: actions/setup-python@v4
+        with:
+          python-version: "3.x"
+
+      - name: Run tests
+        run: |
+          pip install . -U
+          pip install -r requirements-dev.txt
+          python setup.py sdist
+          pytest -vv --cov --profile

+ 31 - 0
.github/workflows/pypi.yml

@@ -0,0 +1,31 @@
+on:
+  release:
+    types:
+      - created
+
+jobs:
+  deploy:
+    runs-on: ubuntu-latest
+
+    steps:
+      - uses: actions/checkout@v3
+      - name: Setup python
+        uses: actions/setup-python@v4
+        with:
+          python-version: "3.x"
+
+      - name: Install dependencies
+        run: |
+          python -m pip install --upgrade pip
+          python -m pip install wheel
+
+      - name: Build
+        run: |
+          python setup.py sdist bdist_wheel
+
+      - name: Publish to PyPI
+        if: startsWith(github.event.ref, 'refs/tags') || github.event_name == 'release'
+        uses: pypa/gh-action-pypi-publish@release/v1.5
+        with:
+          user: __token__
+          password: ${{ secrets.PYPI_API_TOKEN }}

+ 92 - 0
.gitignore

@@ -0,0 +1,92 @@
+# Compiled source #
+###################
+*.com
+*.class
+*.dll
+*.exe
+*.o
+*.so
+*.pyc
+.ipynb_checkpoints
+*~
+*#
+build*
+*egg-info
+dist/*
+
+# Packages #
+###################
+# it's better to unpack these files and commit the raw source
+# git has its own built in compression methods
+*.7z
+*.dmg
+*.gz
+*.iso
+*.jar
+*.rar
+*.tar
+*.zip
+
+# Logs and databases #
+######################
+*.log
+*.sql
+*.sqlite
+
+# OS generated files #
+######################
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+ehthumbs.db
+Thumbs.db
+
+# Images
+######################
+*.jpg
+*.gif
+*.png
+*.svg
+*.ico
+
+# Video
+######################
+*.wmv
+*.mpg
+*.mpeg
+*.mp4
+*.mov
+*.flv
+*.avi
+*.ogv
+*.ogg
+*.webm
+
+# Audio
+######################
+*.wav
+*.mp3
+*.wma
+
+# Fonts
+######################
+Fonts
+*.eot
+*.ttf
+*.woff
+
+# Format
+######################
+CPPLINT.cfg
+.clang-format
+
+# Gtags
+######################
+GPATH
+GRTAGS
+GSYMS
+GTAGS
+
+*.pth

+ 45 - 0
.pre-commit-config.yaml

@@ -0,0 +1,45 @@
+exclude: |
+  (?x)(
+      ^superpoint_superglue_deployment/superpoint.py |
+      ^superpoint_superglue_deployment/superglue.py
+  )
+
+repos:
+  - repo: https://github.com/pycqa/flake8
+    rev: 4.0.1
+    hooks:
+      - id: flake8
+        additional_dependencies: [pep8-naming]
+
+  - repo: https://github.com/psf/black
+    rev: 22.6.0
+    hooks:
+      - id: black
+        language_version: python3
+
+  - repo: https://github.com/pycqa/isort
+    rev: 5.12.0
+    hooks:
+      - id: isort
+
+  - repo: https://github.com/pre-commit/mirrors-mypy
+    rev: v0.961
+    hooks:
+      - id: mypy
+
+  - repo: https://github.com/pre-commit/pre-commit-hooks
+    rev: v4.3.0
+    hooks:
+      - id: end-of-file-fixer
+      - id: trailing-whitespace
+
+  - repo: https://github.com/pre-commit/mirrors-prettier
+    rev: v2.7.1
+    hooks:
+      - id: prettier
+        types_or: [json, markdown, yaml]
+
+  - repo: https://github.com/lovesegfault/beautysh
+    rev: v6.2.1
+    hooks:
+      - id: beautysh

+ 2 - 0
MANIFEST.in

@@ -0,0 +1,2 @@
+include README.md
+include requirements.txt

+ 108 - 0
README.md

@@ -0,0 +1,108 @@
+<p align="center">
+<a href="https://github.com/xmba15/superpoint_superglue_deployment/actions/workflows/build.yml" target="_blank">
+  <img src="https://github.com/xmba15/superpoint_superglue_deployment/actions/workflows/build.yml/badge.svg" alt="Build Status">
+</a>
+</p>
+
+# 📝 simple library to make life easy when deploying superpoint, superglue models
+
+---
+
+## :gear: Installation
+
+---
+
+```bash
+pip install superpoint_superglue_deployment
+```
+
+## :tada: TODO
+
+---
+
+- [x] interface to deploy superpoint, superglue
+- [x] testing on real data
+
+## :running: How to Run
+
+---
+
+### Basic usage
+
+```python
+import cv2
+import numpy as np
+from loguru import logger
+
+from superpoint_superglue_deployment import Matcher
+
+
+def main():
+    query_image = cv2.imread("./data/images/one_pillar_pagoda_1.jpg")
+    ref_image = cv2.imread("./data/images/one_pillar_pagoda_2.jpg")
+
+    query_gray = cv2.imread("./data/images/one_pillar_pagoda_1.jpg", 0)
+    ref_gray = cv2.imread("./data/images/one_pillar_pagoda_2.jpg", 0)
+
+    superglue_matcher = Matcher(
+        {
+            "superpoint": {
+                "input_shape": (-1, -1),
+                "keypoint_threshold": 0.003,
+            },
+            "superglue": {
+                "match_threshold": 0.5,
+            },
+            "use_gpu": True,
+        }
+    )
+    query_kpts, ref_kpts, _, _, matches = superglue_matcher.match(query_gray, ref_gray)
+    M, mask = cv2.findHomography(
+        np.float64([query_kpts[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2),
+        np.float64([ref_kpts[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2),
+        method=cv2.USAC_MAGSAC,
+        ransacReprojThreshold=5.0,
+        maxIters=10000,
+        confidence=0.95,
+    )
+    logger.info(f"number of inliers: {mask.sum()}")
+    matches = np.array(matches)[np.all(mask > 0, axis=1)]
+    matches = sorted(matches, key=lambda match: match.distance)
+    matched_image = cv2.drawMatches(
+        query_image,
+        query_kpts,
+        ref_image,
+        ref_kpts,
+        matches[:50],
+        None,
+        flags=2,
+    )
+    cv2.imwrite("matched_image.jpg", matched_image)
+
+
+if __name__ == "__main__":
+    main()
+```
+
+<p align="center">
+  <img src="https://raw.githubusercontent.com/xmba15/superpoint_superglue_deployment/master/docs/images/matched_image.jpg" alt="matched image sample">
+</p>
+
+- [Notebook with detailed sample code for SuperPoint](notebooks/demo_superpoint.ipynb)
+- [Notebook with detailed sample code for SuperGlue](notebooks/demo_superglue.ipynb)
+
+## 🎛 Development environment
+
+---
+
+```bash
+mamba env create --file environment.yml
+mamba activate superpoint_superglue_deployment
+```
+
+## :gem: References
+
+---
+
+- [SuperPoint: Self-Supervised Interest Point Detection and Description.](https://github.com/rpautrat/SuperPoint)
+- [SuperGlue: Learning Feature Matching with Graph Neural Networks](https://github.com/magicleap/SuperGluePretrainedNetwork)

+ 0 - 0
data/.keep


BIN
data/images/one_pillar_pagoda_1.jpg


BIN
data/images/one_pillar_pagoda_2.jpg


+ 0 - 0
docs/.keep


BIN
docs/images/matched_image.jpg


+ 12 - 0
environment.yml

@@ -0,0 +1,12 @@
+name: superpoint_superglue_deployment
+channels:
+  - defaults
+  - anaconda
+dependencies:
+  - python=3.8
+  - pip
+  - pip:
+      - -r requirements.txt
+
+      # environment tools
+      - -r requirements-dev.txt

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 85 - 0
notebooks/demo_superglue.ipynb


Filskillnaden har hållts tillbaka eftersom den är för stor
+ 75 - 0
notebooks/demo_superpoint.ipynb


+ 9 - 0
pyproject.toml

@@ -0,0 +1,9 @@
+[tool.black]
+line-length = 120
+
+[tool.isort]
+profile = "black"
+multi_line_output = 3
+
+[tool.mypy]
+ignore_missing_imports = true

+ 9 - 0
requirements-dev.txt

@@ -0,0 +1,9 @@
+flake8
+pep8-naming
+black
+isort
+mypy
+pytest
+pytest-cov
+pytest-profiling
+pre-commit

+ 6 - 0
requirements.txt

@@ -0,0 +1,6 @@
+# typing
+typing-extensions
+
+torch>=1.1.0
+opencv-python>=3.4.19
+loguru>=0.7.0

+ 0 - 0
scripts/.keep


+ 2 - 0
setup.cfg

@@ -0,0 +1,2 @@
+[metadata]
+license_files = LICENSE

+ 42 - 0
setup.py

@@ -0,0 +1,42 @@
+import os
+from io import open
+from typing import Any, Dict
+
+from setuptools import find_packages, setup
+
+_PARENT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
+_PACKAGE_NAME = "superpoint_superglue_deployment"
+
+_PACKAGE_VARS: Dict[str, Any] = {}
+exec(open(os.path.join(_PARENT_DIRECTORY, _PACKAGE_NAME, "version.py")).read(), _PACKAGE_VARS)
+
+_LONG_DESCRIPTION = open(os.path.join(_PARENT_DIRECTORY, "README.md"), encoding="utf-8").read()
+_INSTALL_REQUIRES = open(os.path.join(_PARENT_DIRECTORY, "requirements.txt")).read().splitlines()
+_INSTALL_REQUIRES = [line for line in _INSTALL_REQUIRES if line and not line.startswith("#")]
+
+
+def main():
+    setup(
+        name=_PACKAGE_NAME,
+        version=_PACKAGE_VARS["__version__"],
+        description="",
+        long_description=_LONG_DESCRIPTION,
+        long_description_content_type="text/markdown",
+        author="Ba Tran",
+        url="https://github.com/xmba15/superpoint_superglue_deployment",
+        classifiers=[
+            "Development Status :: 3 - Alpha",
+            "Intended Audience :: Developers",
+            "Programming Language :: Python :: 3",
+            "Programming Language :: Python :: 3.6",
+            "Programming Language :: Python :: 3.7",
+            "Programming Language :: Python :: 3.8",
+            "Programming Language :: Python :: 3.9",
+        ],
+        packages=find_packages(exclude=["tests"]),
+        install_requires=_INSTALL_REQUIRES,
+    )
+
+
+if __name__ == "__main__":
+    main()

+ 6 - 0
superpoint_superglue_deployment/__init__.py

@@ -0,0 +1,6 @@
+# flake8: noqa
+
+from superpoint_superglue_deployment.matcher import *
+from superpoint_superglue_deployment.superglue_handler import *
+from superpoint_superglue_deployment.superpoint_handler import *
+from superpoint_superglue_deployment.version import __version__

+ 3 - 0
superpoint_superglue_deployment/core/__init__.py

@@ -0,0 +1,3 @@
+# flake8: noqa
+
+from superpoint_superglue_deployment.core.utils import *

+ 9 - 0
superpoint_superglue_deployment/core/utils.py

@@ -0,0 +1,9 @@
+import numpy as np
+
+__all__ = [
+    "assert_single_channel",
+]
+
+
+def assert_single_channel(image: np.ndarray):
+    assert len(image.shape) == 2 or (len(image.shape) == 3 and image.shape[2] == 1)

+ 73 - 0
superpoint_superglue_deployment/matcher.py

@@ -0,0 +1,73 @@
+from typing import Any, Dict, List, Optional, Tuple
+
+import cv2
+import numpy as np
+
+from superpoint_superglue_deployment.superglue_handler import SuperGlueHandler
+from superpoint_superglue_deployment.superpoint_handler import SuperPointHandler
+
+__all__ = ["Matcher"]
+
+
+class Matcher:
+    __DEFAULT_CONFIG: Dict[str, Any] = {
+        "superpoint": {
+            "descriptor_dim": 256,
+            "nms_radius": 4,
+            "keypoint_threshold": 0.005,
+            "max_keypoints": -1,
+            "remove_borders": 4,
+            "input_shape": (-1, -1),
+        },
+        "superglue": {
+            "descriptor_dim": 256,
+            "weights": "outdoor",
+            "keypoint_encoder": [32, 64, 128, 256],
+            "GNN_layers": ["self", "cross"] * 9,
+            "sinkhorn_iterations": 100,
+            "match_threshold": 0.2,
+        },
+        "use_gpu": False,
+    }
+
+    def __init__(
+        self,
+        config: Optional[Dict[str, Any]] = None,
+    ):
+        self._config = self.__DEFAULT_CONFIG.copy()
+        if config is not None:
+            self._config.update(config)
+        self._config["superpoint"].update({"use_gpu": self._config["use_gpu"]})
+        self._config["superglue"].update({"use_gpu": self._config["use_gpu"]})
+        self._superpoint_handler = SuperPointHandler(self._config["superpoint"])
+        self._superglue_handler = SuperGlueHandler(self._config["superglue"])
+
+    def match(
+        self,
+        query_image: np.ndarray,
+        ref_image: np.ndarray,
+    ) -> Tuple[List[cv2.KeyPoint], List[cv2.KeyPoint], np.ndarray, np.ndarray, List[cv2.DMatch]]:
+        """
+        Parameters
+        ----------
+        query_image:
+             Single channel 8bit image
+        ref_image:
+             Single channel 8bit image
+        """
+        query_pred = self._superpoint_handler.run(query_image)
+        ref_pred = self._superpoint_handler.run(ref_image)
+        query_kpts, query_descs = self._superpoint_handler.process_prediction(query_pred)
+        ref_kpts, ref_descs = self._superpoint_handler.process_prediction(ref_pred)
+        return (
+            query_kpts,
+            ref_kpts,
+            query_descs,
+            ref_descs,
+            self._superglue_handler.match(
+                query_pred,
+                ref_pred,
+                query_image.shape[:2],
+                ref_image.shape[:2],
+            ),
+        )

+ 278 - 0
superpoint_superglue_deployment/superglue.py

@@ -0,0 +1,278 @@
+# %BANNER_BEGIN%
+# ---------------------------------------------------------------------
+# %COPYRIGHT_BEGIN%
+#
+#  Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
+#
+#  Unpublished Copyright (c) 2020
+#  Magic Leap, Inc., All Rights Reserved.
+#
+# NOTICE:  All information contained herein is, and remains the property
+# of COMPANY. The intellectual and technical concepts contained herein
+# are proprietary to COMPANY and may be covered by U.S. and Foreign
+# Patents, patents in process, and are protected by trade secret or
+# copyright law.  Dissemination of this information or reproduction of
+# this material is strictly forbidden unless prior written permission is
+# obtained from COMPANY.  Access to the source code contained herein is
+# hereby forbidden to anyone except current COMPANY employees, managers
+# or contractors who have executed Confidentiality and Non-disclosure
+# agreements explicitly covering such access.
+#
+# The copyright notice above does not evidence any actual or intended
+# publication or disclosure  of  this source code, which includes
+# information that is confidential and/or proprietary, and is a trade
+# secret, of  COMPANY.   ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
+# PUBLIC  PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE  OF THIS
+# SOURCE CODE  WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
+# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
+# INTERNATIONAL TREATIES.  THE RECEIPT OR POSSESSION OF  THIS SOURCE
+# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
+# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
+# USE, OR SELL ANYTHING THAT IT  MAY DESCRIBE, IN WHOLE OR IN PART.
+#
+# %COPYRIGHT_END%
+# ----------------------------------------------------------------------
+# %AUTHORS_BEGIN%
+#
+#  Originating Authors: Paul-Edouard Sarlin
+#
+# %AUTHORS_END%
+# --------------------------------------------------------------------*/
+# %BANNER_END%
+
+from copy import deepcopy
+from typing import List, Tuple
+
+import torch
+from torch import nn
+
+
+def MLP(channels: List[int], do_bn: bool = True) -> nn.Module:
+    """ Multi-layer perceptron """
+    n = len(channels)
+    layers = []
+    for i in range(1, n):
+        layers.append(
+            nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
+        if i < (n-1):
+            if do_bn:
+                layers.append(nn.BatchNorm1d(channels[i]))
+            layers.append(nn.ReLU())
+    return nn.Sequential(*layers)
+
+
+def normalize_keypoints(kpts, image_shape):
+    """ Normalize keypoints locations based on image image_shape"""
+    height, width = image_shape[:2]
+    one = kpts.new_tensor(1)
+    size = torch.stack([one*width, one*height])[None]
+    center = size / 2
+    scaling = size.max(1, keepdim=True).values * 0.7
+    return (kpts - center[:, None, :]) / scaling[:, None, :]
+
+
+class KeypointEncoder(nn.Module):
+    """ Joint encoding of visual appearance and location using MLPs"""
+    def __init__(self, feature_dim: int, layers: List[int]) -> None:
+        super().__init__()
+        self.encoder = MLP([3] + layers + [feature_dim])
+        nn.init.constant_(self.encoder[-1].bias, 0.0)
+
+    def forward(self, kpts, scores):
+        inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)]
+        return self.encoder(torch.cat(inputs, dim=1))
+
+
+def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]:
+    dim = query.shape[1]
+    scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5
+    prob = torch.nn.functional.softmax(scores, dim=-1)
+    return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob
+
+
+class MultiHeadedAttention(nn.Module):
+    """ Multi-head attention to increase model expressivitiy """
+    def __init__(self, num_heads: int, d_model: int):
+        super().__init__()
+        assert d_model % num_heads == 0
+        self.dim = d_model // num_heads
+        self.num_heads = num_heads
+        self.merge = nn.Conv1d(d_model, d_model, kernel_size=1)
+        self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)])
+
+    def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> torch.Tensor:
+        batch_dim = query.size(0)
+        query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1)
+                             for l, x in zip(self.proj, (query, key, value))]
+        x, _ = attention(query, key, value)
+        return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1))
+
+
+class AttentionalPropagation(nn.Module):
+    def __init__(self, feature_dim: int, num_heads: int):
+        super().__init__()
+        self.attn = MultiHeadedAttention(num_heads, feature_dim)
+        self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim])
+        nn.init.constant_(self.mlp[-1].bias, 0.0)
+
+    def forward(self, x: torch.Tensor, source: torch.Tensor) -> torch.Tensor:
+        message = self.attn(x, source, source)
+        return self.mlp(torch.cat([x, message], dim=1))
+
+
+class AttentionalGNN(nn.Module):
+    def __init__(self, feature_dim: int, layer_names: List[str]) -> None:
+        super().__init__()
+        self.layers = nn.ModuleList([
+            AttentionalPropagation(feature_dim, 4)
+            for _ in range(len(layer_names))])
+        self.names = layer_names
+
+    def forward(self, desc0: torch.Tensor, desc1: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]:
+        for layer, name in zip(self.layers, self.names):
+            if name == 'cross':
+                src0, src1 = desc1, desc0
+            else:  # if name == 'self':
+                src0, src1 = desc0, desc1
+            delta0, delta1 = layer(desc0, src0), layer(desc1, src1)
+            desc0, desc1 = (desc0 + delta0), (desc1 + delta1)
+        return desc0, desc1
+
+
+def log_sinkhorn_iterations(Z: torch.Tensor, log_mu: torch.Tensor, log_nu: torch.Tensor, iters: int) -> torch.Tensor:
+    """ Perform Sinkhorn Normalization in Log-space for stability"""
+    u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu)
+    for _ in range(iters):
+        u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2)
+        v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1)
+    return Z + u.unsqueeze(2) + v.unsqueeze(1)
+
+
+def log_optimal_transport(scores: torch.Tensor, alpha: torch.Tensor, iters: int) -> torch.Tensor:
+    """ Perform Differentiable Optimal Transport in Log-space for stability"""
+    b, m, n = scores.shape
+    one = scores.new_tensor(1)
+    ms, ns = (m*one).to(scores), (n*one).to(scores)
+
+    bins0 = alpha.expand(b, m, 1)
+    bins1 = alpha.expand(b, 1, n)
+    alpha = alpha.expand(b, 1, 1)
+
+    couplings = torch.cat([torch.cat([scores, bins0], -1),
+                           torch.cat([bins1, alpha], -1)], 1)
+
+    norm = - (ms + ns).log()
+    log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm])
+    log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm])
+    log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1)
+
+    Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters)
+    Z = Z - norm  # multiply probabilities by M+N
+    return Z
+
+
+def arange_like(x, dim: int):
+    return x.new_ones(x.shape[dim]).cumsum(0) - 1  # traceable in 1.1
+
+
+class SuperGlue(nn.Module):
+    """SuperGlue feature matching middle-end
+
+    Given two sets of keypoints and locations, we determine the
+    correspondences by:
+      1. Keypoint Encoding (normalization + visual feature and location fusion)
+      2. Graph Neural Network with multiple self and cross-attention layers
+      3. Final projection layer
+      4. Optimal Transport Layer (a differentiable Hungarian matching algorithm)
+      5. Thresholding matrix based on mutual exclusivity and a match_threshold
+
+    The correspondence ids use -1 to indicate non-matching points.
+
+    Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew
+    Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural
+    Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763
+
+    """
+    default_config = {
+        'descriptor_dim': 256,
+        'weights': 'indoor',
+        'keypoint_encoder': [32, 64, 128, 256],
+        'GNN_layers': ['self', 'cross'] * 9,
+        'sinkhorn_iterations': 100,
+        'match_threshold': 0.2,
+    }
+
+    def __init__(self, config):
+        super().__init__()
+        self.config = {**self.default_config, **config}
+
+        self.kenc = KeypointEncoder(
+            self.config['descriptor_dim'], self.config['keypoint_encoder'])
+
+        self.gnn = AttentionalGNN(
+            feature_dim=self.config['descriptor_dim'], layer_names=self.config['GNN_layers'])
+
+        self.final_proj = nn.Conv1d(
+            self.config['descriptor_dim'], self.config['descriptor_dim'],
+            kernel_size=1, bias=True)
+
+        bin_score = torch.nn.Parameter(torch.tensor(1.))
+        self.register_parameter('bin_score', bin_score)
+
+    def forward(self, data):
+        """Run SuperGlue on a pair of keypoints and descriptors"""
+        desc0, desc1 = data['descriptors0'], data['descriptors1']
+        kpts0, kpts1 = data['keypoints0'], data['keypoints1']
+
+        if kpts0.shape[1] == 0 or kpts1.shape[1] == 0:  # no keypoints
+            shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1]
+            return {
+                'matches0': kpts0.new_full(shape0, -1, dtype=torch.int),
+                'matches1': kpts1.new_full(shape1, -1, dtype=torch.int),
+                'matching_scores0': kpts0.new_zeros(shape0),
+                'matching_scores1': kpts1.new_zeros(shape1),
+            }
+
+        # Keypoint normalization.
+        kpts0 = normalize_keypoints(kpts0, data['image0_shape'])
+        kpts1 = normalize_keypoints(kpts1, data['image1_shape'])
+
+        # Keypoint MLP encoder.
+        desc0 = desc0 + self.kenc(kpts0, data['scores0'])
+        desc1 = desc1 + self.kenc(kpts1, data['scores1'])
+        del data
+
+        # Multi-layer Transformer network.
+        desc0, desc1 = self.gnn(desc0, desc1)
+
+        # Final MLP projection.
+        mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1)
+
+        # Compute matching descriptor distance.
+        scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1)
+        scores = scores / self.config['descriptor_dim']**.5
+
+        # Run the optimal transport.
+        scores = log_optimal_transport(
+            scores, self.bin_score,
+            iters=self.config['sinkhorn_iterations'])
+
+        # Get the matches with score above "match_threshold".
+        max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1)
+        indices0, indices1 = max0.indices, max1.indices
+        mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0)
+        mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1)
+        zero = scores.new_tensor(0)
+        mscores0 = torch.where(mutual0, max0.values.exp(), zero)
+        mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero)
+        valid0 = mutual0 & (mscores0 > self.config['match_threshold'])
+        valid1 = mutual1 & valid0.gather(1, indices1)
+        indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1))
+        indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1))
+
+        return {
+            'matches0': indices0, # use -1 for invalid match
+            'matches1': indices1, # use -1 for invalid match
+            'matching_scores0': mscores0,
+            'matching_scores1': mscores1,
+        }

+ 138 - 0
superpoint_superglue_deployment/superglue_handler.py

@@ -0,0 +1,138 @@
+import os
+from typing import Any, Dict, List, Optional, Tuple
+
+import cv2
+import numpy as np
+import torch
+from loguru import logger
+
+from superpoint_superglue_deployment.superglue import SuperGlue
+
+__all__ = ["SuperGlueHandler"]
+
+
+class SuperGlueHandler:
+    __CACHED_DIR = os.path.join(os.path.expanduser("~"), ".cache/torch/hub/checkpoints")
+    __MODEL_WEIGHTS_DICT: Dict[str, Any] = {
+        "indoor": {
+            "name": "superglue_indoor.pth",
+            "url": "https://github.com/xmba15/superpoint_superglue_deployment/releases/download/model_weights/superglue_indoor.pth",  # noqa: E501
+        },
+        "outdoor": {
+            "name": "superglue_outdoor.pth",
+            "url": "https://github.com/xmba15/superpoint_superglue_deployment/releases/download/model_weights/superglue_outdoor.pth",  # noqa: E501
+        },
+    }
+    __MODEL_WEIGHTS_OUTDOOR_FILE_NAME = "superglue_outdoor.pth"
+
+    __DEFAULT_CONFIG: Dict[str, Any] = {
+        "descriptor_dim": 256,
+        "weights": "outdoor",
+        "keypoint_encoder": [32, 64, 128, 256],
+        "GNN_layers": ["self", "cross"] * 9,
+        "sinkhorn_iterations": 100,
+        "match_threshold": 0.2,
+        "use_gpu": False,
+    }
+
+    def __init__(
+        self,
+        config: Optional[Dict[str, Any]] = None,
+    ):
+        self._config = self.__DEFAULT_CONFIG.copy()
+        if config is not None:
+            self._config.update(config)
+
+        assert self._config["weights"] in self.__MODEL_WEIGHTS_DICT
+
+        os.makedirs(self.__CACHED_DIR, exist_ok=True)
+
+        if self._config["use_gpu"] and not torch.cuda.is_available():
+            logger.info("gpu environment is not available, falling back to cpu")
+            self._config["use_gpu"] = False
+        self._device = torch.device("cuda" if self._config["use_gpu"] else "cpu")
+
+        self._superglue_engine = SuperGlue(self._config)
+
+        if not os.path.isfile(
+            os.path.join(self.__CACHED_DIR, self.__MODEL_WEIGHTS_DICT[self._config["weights"]]["name"])
+        ):
+            torch.hub.load_state_dict_from_url(
+                self.__MODEL_WEIGHTS_DICT[self._config["weights"]]["url"], map_location=lambda storage, loc: storage
+            )
+        self._superglue_engine.load_state_dict(
+            torch.load(os.path.join(self.__CACHED_DIR, self.__MODEL_WEIGHTS_DICT[self._config["weights"]]["name"]))
+        )
+        self._superglue_engine = self._superglue_engine.eval().to(self._device)
+        logger.info(f"loaded superglue weights {self.__MODEL_WEIGHTS_DICT[self._config['weights']]['name']}")
+
+    @property
+    def device(self):
+        return self._device
+
+    def run(
+        self,
+        query_pred: Dict[str, torch.Tensor],
+        ref_pred: Dict[str, torch.Tensor],
+        query_shape: Tuple[int, int],
+        ref_shape: Tuple[int, int],
+    ) -> Dict[str, torch.Tensor]:
+        """
+        Parameters
+        ----------
+        query_pred
+             dict data in the following form
+             {
+              "keypoints": List[torch.Tensor]  # tensor has shape: num keypoints x 2
+              "descriptors": List[torch.Tensor] # tensor has shape: 256 x num keypoints
+              }
+        ref_pred
+             dict data in the same form as query_pred's
+        """
+        data_dict: Dict[str, Any] = dict()
+        data_dict = {**data_dict, **{k + "0": v for k, v in query_pred.items()}}
+        data_dict = {**data_dict, **{k + "1": v for k, v in ref_pred.items()}}
+        for k in data_dict:
+            if isinstance(data_dict[k], (list, tuple)):
+                data_dict[k] = torch.stack(data_dict[k])
+        del query_pred, ref_pred
+
+        for k in data_dict:
+            if isinstance(data_dict[k], torch.Tensor) and data_dict[k].device.type != self._device.type:
+                data_dict[k] = data_dict[k].to(self._device)
+
+        data_dict["image0_shape"] = query_shape
+        data_dict["image1_shape"] = ref_shape
+
+        with torch.no_grad():
+            return self._superglue_engine(data_dict)
+
+    def match(
+        self,
+        query_pred: Dict[str, torch.Tensor],
+        ref_pred: Dict[str, torch.Tensor],
+        query_shape: Tuple[int, int],
+        ref_shape: Tuple[int, int],
+    ) -> List[cv2.DMatch]:
+        pred = self.run(
+            query_pred,
+            ref_pred,
+            query_shape,
+            ref_shape,
+        )
+        query_indices = pred["matches0"].cpu().numpy().squeeze(0)
+        ref_indices = pred["matches1"].cpu().numpy().squeeze(0)
+        query_matching_scores = pred["matching_scores0"].cpu().numpy().squeeze(0)
+
+        del pred
+        matched_query_indices = np.where(query_indices > -1)[0]
+        matched_ref_indices = np.where(ref_indices > -1)[0]
+        matches = [
+            cv2.DMatch(
+                _distance=1 - query_matching_scores[matched_query_idx],
+                _queryIdx=matched_query_idx,
+                _trainIdx=matched_ref_idx,
+            )
+            for matched_query_idx, matched_ref_idx in zip(matched_query_indices, matched_ref_indices)
+        ]
+        return matches

+ 196 - 0
superpoint_superglue_deployment/superpoint.py

@@ -0,0 +1,196 @@
+# %BANNER_BEGIN%
+# ---------------------------------------------------------------------
+# %COPYRIGHT_BEGIN%
+#
+#  Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
+#
+#  Unpublished Copyright (c) 2020
+#  Magic Leap, Inc., All Rights Reserved.
+#
+# NOTICE:  All information contained herein is, and remains the property
+# of COMPANY. The intellectual and technical concepts contained herein
+# are proprietary to COMPANY and may be covered by U.S. and Foreign
+# Patents, patents in process, and are protected by trade secret or
+# copyright law.  Dissemination of this information or reproduction of
+# this material is strictly forbidden unless prior written permission is
+# obtained from COMPANY.  Access to the source code contained herein is
+# hereby forbidden to anyone except current COMPANY employees, managers
+# or contractors who have executed Confidentiality and Non-disclosure
+# agreements explicitly covering such access.
+#
+# The copyright notice above does not evidence any actual or intended
+# publication or disclosure  of  this source code, which includes
+# information that is confidential and/or proprietary, and is a trade
+# secret, of  COMPANY.   ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
+# PUBLIC  PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE  OF THIS
+# SOURCE CODE  WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
+# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
+# INTERNATIONAL TREATIES.  THE RECEIPT OR POSSESSION OF  THIS SOURCE
+# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
+# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
+# USE, OR SELL ANYTHING THAT IT  MAY DESCRIBE, IN WHOLE OR IN PART.
+#
+# %COPYRIGHT_END%
+# ----------------------------------------------------------------------
+# %AUTHORS_BEGIN%
+#
+#  Originating Authors: Paul-Edouard Sarlin
+#
+# %AUTHORS_END%
+# --------------------------------------------------------------------*/
+# %BANNER_END%
+
+import torch
+from torch import nn
+
+def simple_nms(scores, nms_radius: int):
+    """ Fast Non-maximum suppression to remove nearby points """
+    assert(nms_radius >= 0)
+
+    def max_pool(x):
+        return torch.nn.functional.max_pool2d(
+            x, kernel_size=nms_radius*2+1, stride=1, padding=nms_radius)
+
+    zeros = torch.zeros_like(scores)
+    max_mask = scores == max_pool(scores)
+    for _ in range(2):
+        supp_mask = max_pool(max_mask.float()) > 0
+        supp_scores = torch.where(supp_mask, zeros, scores)
+        new_max_mask = supp_scores == max_pool(supp_scores)
+        max_mask = max_mask | (new_max_mask & (~supp_mask))
+    return torch.where(max_mask, scores, zeros)
+
+
+def remove_borders(keypoints, scores, border: int, height: int, width: int):
+    """ Removes keypoints too close to the border """
+    mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border))
+    mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border))
+    mask = mask_h & mask_w
+    return keypoints[mask], scores[mask]
+
+
+def top_k_keypoints(keypoints, scores, k: int):
+    if k >= len(keypoints):
+        return keypoints, scores
+    scores, indices = torch.topk(scores, k, dim=0)
+    return keypoints[indices], scores
+
+
+def sample_descriptors(keypoints, descriptors, s: int = 8):
+    """ Interpolate descriptors at keypoint locations """
+    b, c, h, w = descriptors.shape
+    keypoints = keypoints - s / 2 + 0.5
+    keypoints /= torch.tensor([(w*s - s/2 - 0.5), (h*s - s/2 - 0.5)],
+                              ).to(keypoints)[None]
+    keypoints = keypoints*2 - 1  # normalize to (-1, 1)
+    args = {'align_corners': True} if torch.__version__ >= '1.3' else {}
+    descriptors = torch.nn.functional.grid_sample(
+        descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', **args)
+    descriptors = torch.nn.functional.normalize(
+        descriptors.reshape(b, c, -1), p=2, dim=1)
+    return descriptors
+
+
+class SuperPoint(nn.Module):
+    """SuperPoint Convolutional Detector and Descriptor
+
+    SuperPoint: Self-Supervised Interest Point Detection and
+    Description. Daniel DeTone, Tomasz Malisiewicz, and Andrew
+    Rabinovich. In CVPRW, 2019. https://arxiv.org/abs/1712.07629
+
+    """
+    default_config = {
+        'descriptor_dim': 256,
+        'nms_radius': 4,
+        'keypoint_threshold': 0.005,
+        'max_keypoints': -1,
+        'remove_borders': 4,
+    }
+
+    def __init__(self, config):
+        super().__init__()
+        self.config = {**self.default_config, **config}
+
+        self.relu = nn.ReLU(inplace=True)
+        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
+        c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256
+
+        self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1)
+        self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1)
+        self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1)
+        self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1)
+        self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1)
+        self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1)
+        self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1)
+        self.conv4b = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1)
+
+        self.convPa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
+        self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0)
+
+        self.convDa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
+        self.convDb = nn.Conv2d(
+            c5, self.config['descriptor_dim'],
+            kernel_size=1, stride=1, padding=0)
+
+        mk = self.config['max_keypoints']
+        if mk == 0 or mk < -1:
+            raise ValueError('\"max_keypoints\" must be positive or \"-1\"')
+
+    def forward(self, data):
+        """ Compute keypoints, scores, descriptors for image """
+        # Shared Encoder
+        x = self.relu(self.conv1a(data['image']))
+        x = self.relu(self.conv1b(x))
+        x = self.pool(x)
+        x = self.relu(self.conv2a(x))
+        x = self.relu(self.conv2b(x))
+        x = self.pool(x)
+        x = self.relu(self.conv3a(x))
+        x = self.relu(self.conv3b(x))
+        x = self.pool(x)
+        x = self.relu(self.conv4a(x))
+        x = self.relu(self.conv4b(x))
+
+        # Compute the dense keypoint scores
+        cPa = self.relu(self.convPa(x))
+        scores = self.convPb(cPa)
+        scores = torch.nn.functional.softmax(scores, 1)[:, :-1]
+        b, _, h, w = scores.shape
+        scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8)
+        scores = scores.permute(0, 1, 3, 2, 4).reshape(b, h*8, w*8)
+        scores = simple_nms(scores, self.config['nms_radius'])
+
+        # Extract keypoints
+        keypoints = [
+            torch.nonzero(s > self.config['keypoint_threshold'])
+            for s in scores]
+        scores = [s[tuple(k.t())] for s, k in zip(scores, keypoints)]
+
+        # Discard keypoints near the image borders
+        keypoints, scores = list(zip(*[
+            remove_borders(k, s, self.config['remove_borders'], h*8, w*8)
+            for k, s in zip(keypoints, scores)]))
+
+        # Keep the k keypoints with highest score
+        if self.config['max_keypoints'] >= 0:
+            keypoints, scores = list(zip(*[
+                top_k_keypoints(k, s, self.config['max_keypoints'])
+                for k, s in zip(keypoints, scores)]))
+
+        # Convert (h, w) to (x, y)
+        keypoints = [torch.flip(k, [1]).float() for k in keypoints]
+
+        # Compute the dense descriptors
+        cDa = self.relu(self.convDa(x))
+        descriptors = self.convDb(cDa)
+        descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1)
+
+        # Extract descriptors
+        descriptors = [sample_descriptors(k[None], d[None], 8)[0]
+                       for k, d in zip(keypoints, descriptors)]
+
+        return {
+            'keypoints': keypoints,
+            'scores': scores,
+            'descriptors': descriptors,
+        }

+ 125 - 0
superpoint_superglue_deployment/superpoint_handler.py

@@ -0,0 +1,125 @@
+import os
+from typing import Any, Dict, List, Optional, Tuple
+
+import cv2
+import numpy as np
+import torch
+from loguru import logger
+
+from superpoint_superglue_deployment.core import assert_single_channel
+from superpoint_superglue_deployment.superpoint import SuperPoint
+
+__all__ = ["SuperPointHandler"]
+
+
+class SuperPointHandler:
+    __CACHED_DIR = os.path.join(os.path.expanduser("~"), ".cache/torch/hub/checkpoints")
+    __MODEL_WEIGHTS_FILE_NAME = "superpoint_v1.pth"
+    __MODEL_WEIGHTS_URL = (
+        "https://github.com/xmba15/superpoint_superglue_deployment/releases/download/model_weights/superpoint_v1.pth"
+    )
+
+    __DEFAULT_CONFIG: Dict[str, Any] = {
+        "descriptor_dim": 256,
+        "nms_radius": 4,
+        "keypoint_threshold": 0.005,
+        "max_keypoints": -1,
+        "remove_borders": 4,
+        "input_shape": (-1, -1),
+        "use_gpu": False,
+    }
+
+    def __init__(
+        self,
+        config: Optional[Dict[str, Any]] = None,
+    ):
+        self._config = self.__DEFAULT_CONFIG.copy()
+        if config is not None:
+            self._config.update(config)
+
+        os.makedirs(self.__CACHED_DIR, exist_ok=True)
+
+        if all([e > 0 for e in self._config["input_shape"]]):
+            self._validate_input_shape(self._config["input_shape"])
+
+        if self._config["use_gpu"] and not torch.cuda.is_available():
+            logger.info("gpu environment is not available, falling back to cpu")
+            self._config["use_gpu"] = False
+        self._device = torch.device("cuda" if self._config["use_gpu"] else "cpu")
+
+        self._superpoint_engine = SuperPoint(self._config)
+
+        if not os.path.isfile(os.path.join(self.__CACHED_DIR, self.__MODEL_WEIGHTS_FILE_NAME)):
+            torch.hub.load_state_dict_from_url(self.__MODEL_WEIGHTS_URL, map_location=lambda storage, loc: storage)
+        self._superpoint_engine.load_state_dict(
+            torch.load(os.path.join(self.__CACHED_DIR, self.__MODEL_WEIGHTS_FILE_NAME))
+        )
+        self._superpoint_engine = self._superpoint_engine.eval().to(self._device)
+        logger.info(f"loaded superpoint weights {self.__MODEL_WEIGHTS_FILE_NAME}")
+
+    def _validate_input_shape(self, image_shape: Tuple[int, int]):
+        assert (
+            max(image_shape) >= 160 and max(image_shape) <= 2000
+        ), f"input resolution {image_shape} is too small or too large"
+
+    @property
+    def device(self):
+        return self._device
+
+    def run(self, image: np.ndarray) -> Dict[str, Tuple[torch.Tensor]]:
+        """
+        Returns
+        -------
+        Dict[str, Tuple[torch.Tensor]]
+            dict data in the following form:
+            {
+              "keypoints": List[torch.Tensor]  # tensor has shape: num keypoints x 2
+              "scores": Tuple[torch.Tensor] # tensor has shape: num keypoints
+              "descriptors": List[torch.Tensor] # tensor has shape: 256 x num keypoints
+            }
+        """
+        assert_single_channel(image)
+        self._validate_input_shape(image.shape[:2])
+        with torch.no_grad():
+            pred = self._superpoint_engine({"image": self._to_tensor(image)})
+        if all([e > 0 for e in self._config["input_shape"]]):
+            pred["keypoints"][0] = torch.mul(
+                pred["keypoints"][0],
+                torch.from_numpy(np.divide(image.shape[:2][::-1], self._config["input_shape"][::-1])).to(self._device),
+            )
+        return pred
+
+    def process_prediction(self, pred: Dict[str, torch.Tensor]) -> Tuple[List[cv2.KeyPoint], np.ndarray]:
+        keypoints_arr = pred["keypoints"][0].cpu().numpy()  # num keypoints x 2
+        scores_arr = pred["scores"][0].cpu().numpy()  # num keypoints
+        descriptors_arr = pred["descriptors"][0].cpu().numpy()  # 256 x num keypoints
+        del pred
+
+        num_keypoints = keypoints_arr.shape[0]
+        if num_keypoints == 0:
+            return [], np.array([])
+
+        keypoints = []
+        for idx in range(num_keypoints):
+            keypoint = cv2.KeyPoint()
+            keypoint.pt = keypoints_arr[idx]
+            keypoint.response = scores_arr[idx]
+            keypoints.append(keypoint)
+        return keypoints, descriptors_arr.transpose(1, 0)
+
+    def detect_and_compute(self, image: np.ndarray) -> Tuple[List[cv2.KeyPoint], np.ndarray]:
+        pred = self.run(image)
+        return self.process_prediction(pred)
+
+    def detect(self, image) -> List[cv2.KeyPoint]:
+        return self.detect_and_compute(image)[0]
+
+    def _to_tensor(self, image: np.ndarray):
+        if all([e > 0 for e in self._config["input_shape"]]):
+            return (
+                torch.from_numpy(cv2.resize(image, self._config["input_shape"][::-1]).astype(np.float32) / 255.0)
+                .float()[None, None]
+                .to(self._device)
+            )
+        else:
+            return torch.from_numpy(image.astype(np.float32) / 255.0).float()[None, None].to(self._device)

+ 1 - 0
superpoint_superglue_deployment/version.py

@@ -0,0 +1 @@
+__version__ = "0.0.1"

+ 12 - 0
tests/test_matcher.py

@@ -0,0 +1,12 @@
+import numpy as np
+
+from superpoint_superglue_deployment import Matcher
+
+
+def test_inference():
+    query_image = np.random.rand(300, 300) * 255
+    query_image = query_image.astype(np.uint8)
+    ref_image = query_image.astype(np.uint8)
+    ref_image = ref_image.astype(np.uint8)
+    matcher = Matcher()
+    matcher.match(query_image, ref_image)

+ 37 - 0
tests/test_superglue_handler.py

@@ -0,0 +1,37 @@
+import numpy as np
+import pytest
+
+from superpoint_superglue_deployment import SuperGlueHandler, SuperPointHandler
+
+
+def test_initialization_success():
+    SuperGlueHandler()
+
+
+def test_initialization_failure():
+    with pytest.raises(AssertionError):
+        SuperGlueHandler({"weights": "unknown"})
+
+
+def test_inference():
+    superpoint_handler = SuperPointHandler(
+        {
+            "use_gpu": True,
+            "input_shape": (-1, -1),
+            "keypoint_threshold": 0.001,
+        }
+    )
+    query_image = np.random.rand(300, 300) * 255
+    query_image = query_image.astype(np.uint8)
+    ref_image = query_image.astype(np.uint8)
+    ref_image = ref_image.astype(np.uint8)
+
+    query_pred = superpoint_handler.run(query_image)
+    ref_pred = superpoint_handler.run(ref_image)
+
+    superglue_handler = SuperGlueHandler(
+        {
+            "use_gpu": False,
+        }
+    )
+    superglue_handler.match(query_pred, ref_pred, query_image.shape[:2], ref_image.shape[:2])

+ 59 - 0
tests/test_superpoint_handler.py

@@ -0,0 +1,59 @@
+import numpy as np
+import pytest
+
+from superpoint_superglue_deployment import SuperPointHandler
+
+
+def test_initialization_success():
+    SuperPointHandler(
+        {
+            "use_gpu": False,
+            "input_shape": (-1, -1),
+        }
+    )
+
+
+def test_initialization_failure():
+    with pytest.raises(AssertionError):
+        SuperPointHandler(
+            {
+                "use_gpu": False,
+                "input_shape": (100, 100),
+            }
+        )
+
+    with pytest.raises(AssertionError):
+        SuperPointHandler(
+            {
+                "use_gpu": False,
+                "input_shape": (3000, 100),
+            }
+        )
+
+
+def test_inference():
+    superpoint_handler = SuperPointHandler(
+        {
+            "use_gpu": True,
+            "input_shape": (-1, -1),
+            "keypoint_threshold": 0.001,
+        }
+    )
+    image = np.random.rand(300, 300) * 255
+    image = image.astype(np.uint8)
+    keypoints, _ = superpoint_handler.detect_and_compute(image)
+    assert len(keypoints) > 0
+
+
+def test_inference_failure():
+    superpoint_handler = SuperPointHandler(
+        {
+            "use_gpu": True,
+            "input_shape": (-1, -1),
+            "keypoint_threshold": 0.001,
+        }
+    )
+    image = np.random.rand(300, 300, 3) * 255
+    image = image.astype(np.uint8)
+    with pytest.raises(AssertionError):
+        superpoint_handler.detect_and_compute(image)

Vissa filer visades inte eftersom för många filer har ändrats