class ISupervisionResult(ABC):
@property
@abstractmethod
def template(self) -> ITemplate:
raise NotImplementedError()
@property
@abstractmethod
def matching_result(self) -> IMatchingResult:
raise NotImplementedError()
@property
@abstractmethod
def score(self) -> float:
raise NotImplementedError()
@property
@abstractmethod
def delta(self) -> np.ndarray:
raise NotImplementedError()
@property
@abstractmethod
def delta_prime(self) -> np.ndarray:
raise NotImplementedError()
@property
@abstractmethod
def transformation_matrix(self) -> np.ndarray:
raise NotImplementedError()
def translate(self, template_point: np.ndarray, /) -> np.ndarray:
"""
Translates the given template point into a target point. That is, given a position in the template's coordinate system, this function
outputs the corresponding position in the target image's coordinate system, according to the affine transformation model.
"""
assert template_point.shape == (2,)
return self.transformation_matrix @ (template_point - self.delta) + self.delta_prime
@abstractmethod
def get_match_weight(self, match: IMatch, /) -> float:
raise NotImplementedError()
@abstractmethod
def interpret_async(self, /, *, target: IImage) -> Future:
raise NotImplementedError()
@abstractmethod
def interpret(self, /, **kwargs) -> IInterpretationResult:
raise NotImplementedError()
def get_weighted_mse(self, /) -> float:
error = 0.0
singificant_match_count = 0
for match in self.matching_result.get_all_matches():
match_weight = self.get_match_weight(match)
if match_weight < sys.float_info.epsilon:
continue
singificant_match_count += 1
s = match.template_point
# calculate prediction
p = self.translate(s)
# calculate destination
d = match.target_point
current_error = p - d
current_error_value = np.dot(current_error, current_error)
error += current_error_value * match_weight
return error / singificant_match_count
def warp_feature(self, feature: IFeature, target: np.ndarray, /) -> np.ndarray:
target_tl = self.translate(feature.top_left)
target_tr = self.translate(feature.top_right)
target_bl = self.translate(feature.bottom_left)
target_br = self.translate(feature.bottom_right)
dest_tl = np.array([0, 0], dtype=np.float64)
dest_tr = np.array([feature.w, 0], dtype=np.float64)
dest_br = np.array([feature.w, feature.h], dtype=np.float64)
dest_bl = np.array([0, feature.h], dtype=np.float64)
source_points = [target_tl, target_tr, target_br, target_bl]
destination_points = [dest_tl, dest_tr, dest_br, dest_bl]
homography = cv2.getPerspectiveTransform(np.float32(source_points), np.float32(destination_points))
return cv2.warpPerspective(
target,
np.float32(homography),
(feature.w, feature.h),
flags=cv2.INTER_LINEAR
)