keypoint_preprocess.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. this code is based on https://github.com/open-mmlab/mmpose/mmpose/core/post_processing/post_transforms.py
  16. """
  17. import cv2
  18. import numpy as np
  19. class EvalAffine(object):
  20. def __init__(self, size, stride=64):
  21. super(EvalAffine, self).__init__()
  22. self.size = size
  23. self.stride = stride
  24. def __call__(self, image, im_info):
  25. s = self.size
  26. h, w, _ = image.shape
  27. trans, size_resized = get_affine_mat_kernel(h, w, s, inv=False)
  28. image_resized = cv2.warpAffine(image, trans, size_resized)
  29. return image_resized, im_info
  30. def get_affine_mat_kernel(h, w, s, inv=False):
  31. if w < h:
  32. w_ = s
  33. h_ = int(np.ceil((s / w * h) / 64.) * 64)
  34. scale_w = w
  35. scale_h = h_ / w_ * w
  36. else:
  37. h_ = s
  38. w_ = int(np.ceil((s / h * w) / 64.) * 64)
  39. scale_h = h
  40. scale_w = w_ / h_ * h
  41. center = np.array([np.round(w / 2.), np.round(h / 2.)])
  42. size_resized = (w_, h_)
  43. trans = get_affine_transform(
  44. center, np.array([scale_w, scale_h]), 0, size_resized, inv=inv)
  45. return trans, size_resized
  46. def get_affine_transform(center,
  47. input_size,
  48. rot,
  49. output_size,
  50. shift=(0., 0.),
  51. inv=False):
  52. """Get the affine transform matrix, given the center/scale/rot/output_size.
  53. Args:
  54. center (np.ndarray[2, ]): Center of the bounding box (x, y).
  55. scale (np.ndarray[2, ]): Scale of the bounding box
  56. wrt [width, height].
  57. rot (float): Rotation angle (degree).
  58. output_size (np.ndarray[2, ]): Size of the destination heatmaps.
  59. shift (0-100%): Shift translation ratio wrt the width/height.
  60. Default (0., 0.).
  61. inv (bool): Option to inverse the affine transform direction.
  62. (inv=False: src->dst or inv=True: dst->src)
  63. Returns:
  64. np.ndarray: The transform matrix.
  65. """
  66. assert len(center) == 2
  67. assert len(output_size) == 2
  68. assert len(shift) == 2
  69. if not isinstance(input_size, (np.ndarray, list)):
  70. input_size = np.array([input_size, input_size], dtype=np.float32)
  71. scale_tmp = input_size
  72. shift = np.array(shift)
  73. src_w = scale_tmp[0]
  74. dst_w = output_size[0]
  75. dst_h = output_size[1]
  76. rot_rad = np.pi * rot / 180
  77. src_dir = rotate_point([0., src_w * -0.5], rot_rad)
  78. dst_dir = np.array([0., dst_w * -0.5])
  79. src = np.zeros((3, 2), dtype=np.float32)
  80. src[0, :] = center + scale_tmp * shift
  81. src[1, :] = center + src_dir + scale_tmp * shift
  82. src[2, :] = _get_3rd_point(src[0, :], src[1, :])
  83. dst = np.zeros((3, 2), dtype=np.float32)
  84. dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
  85. dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
  86. dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
  87. if inv:
  88. trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
  89. else:
  90. trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
  91. return trans
  92. def get_warp_matrix(theta, size_input, size_dst, size_target):
  93. """This code is based on
  94. https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py
  95. Calculate the transformation matrix under the constraint of unbiased.
  96. Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
  97. Data Processing for Human Pose Estimation (CVPR 2020).
  98. Args:
  99. theta (float): Rotation angle in degrees.
  100. size_input (np.ndarray): Size of input image [w, h].
  101. size_dst (np.ndarray): Size of output image [w, h].
  102. size_target (np.ndarray): Size of ROI in input plane [w, h].
  103. Returns:
  104. matrix (np.ndarray): A matrix for transformation.
  105. """
  106. theta = np.deg2rad(theta)
  107. matrix = np.zeros((2, 3), dtype=np.float32)
  108. scale_x = size_dst[0] / size_target[0]
  109. scale_y = size_dst[1] / size_target[1]
  110. matrix[0, 0] = np.cos(theta) * scale_x
  111. matrix[0, 1] = -np.sin(theta) * scale_x
  112. matrix[0, 2] = scale_x * (
  113. -0.5 * size_input[0] * np.cos(theta) + 0.5 * size_input[1] *
  114. np.sin(theta) + 0.5 * size_target[0])
  115. matrix[1, 0] = np.sin(theta) * scale_y
  116. matrix[1, 1] = np.cos(theta) * scale_y
  117. matrix[1, 2] = scale_y * (
  118. -0.5 * size_input[0] * np.sin(theta) - 0.5 * size_input[1] *
  119. np.cos(theta) + 0.5 * size_target[1])
  120. return matrix
  121. def rotate_point(pt, angle_rad):
  122. """Rotate a point by an angle.
  123. Args:
  124. pt (list[float]): 2 dimensional point to be rotated
  125. angle_rad (float): rotation angle by radian
  126. Returns:
  127. list[float]: Rotated point.
  128. """
  129. assert len(pt) == 2
  130. sn, cs = np.sin(angle_rad), np.cos(angle_rad)
  131. new_x = pt[0] * cs - pt[1] * sn
  132. new_y = pt[0] * sn + pt[1] * cs
  133. rotated_pt = [new_x, new_y]
  134. return rotated_pt
  135. def _get_3rd_point(a, b):
  136. """To calculate the affine matrix, three pairs of points are required. This
  137. function is used to get the 3rd point, given 2D points a & b.
  138. The 3rd point is defined by rotating vector `a - b` by 90 degrees
  139. anticlockwise, using b as the rotation center.
  140. Args:
  141. a (np.ndarray): point(x,y)
  142. b (np.ndarray): point(x,y)
  143. Returns:
  144. np.ndarray: The 3rd point.
  145. """
  146. assert len(a) == 2
  147. assert len(b) == 2
  148. direction = a - b
  149. third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)
  150. return third_pt
  151. class TopDownEvalAffine(object):
  152. """apply affine transform to image and coords
  153. Args:
  154. trainsize (list): [w, h], the standard size used to train
  155. use_udp (bool): whether to use Unbiased Data Processing.
  156. records(dict): the dict contained the image and coords
  157. Returns:
  158. records (dict): contain the image and coords after tranformed
  159. """
  160. def __init__(self, trainsize, use_udp=False):
  161. self.trainsize = trainsize
  162. self.use_udp = use_udp
  163. def __call__(self, image, im_info):
  164. rot = 0
  165. imshape = im_info['im_shape'][::-1]
  166. center = im_info['center'] if 'center' in im_info else imshape / 2.
  167. scale = im_info['scale'] if 'scale' in im_info else imshape
  168. if self.use_udp:
  169. trans = get_warp_matrix(
  170. rot, center * 2.0,
  171. [self.trainsize[0] - 1.0, self.trainsize[1] - 1.0], scale)
  172. image = cv2.warpAffine(
  173. image,
  174. trans, (int(self.trainsize[0]), int(self.trainsize[1])),
  175. flags=cv2.INTER_LINEAR)
  176. else:
  177. trans = get_affine_transform(center, scale, rot, self.trainsize)
  178. image = cv2.warpAffine(
  179. image,
  180. trans, (int(self.trainsize[0]), int(self.trainsize[1])),
  181. flags=cv2.INTER_LINEAR)
  182. return image, im_info
  183. def expand_crop(images, rect, expand_ratio=0.3):
  184. imgh, imgw, c = images.shape
  185. label, conf, xmin, ymin, xmax, ymax = [int(x) for x in rect.tolist()]
  186. if label != 0:
  187. return None, None, None
  188. org_rect = [xmin, ymin, xmax, ymax]
  189. h_half = (ymax - ymin) * (1 + expand_ratio) / 2.
  190. w_half = (xmax - xmin) * (1 + expand_ratio) / 2.
  191. if h_half > w_half * 4 / 3:
  192. w_half = h_half * 0.75
  193. center = [(ymin + ymax) / 2., (xmin + xmax) / 2.]
  194. ymin = max(0, int(center[0] - h_half))
  195. ymax = min(imgh - 1, int(center[0] + h_half))
  196. xmin = max(0, int(center[1] - w_half))
  197. xmax = min(imgw - 1, int(center[1] + w_half))
  198. return images[ymin:ymax, xmin:xmax, :], [xmin, ymin, xmax, ymax], org_rect