crop_or_pad.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. import numpy as np
  2. def crop_or_pad(image, x_min, y_min, x_max, y_max, border_value=0):
  3. """
  4. See Also:
  5. translate_image
  6. References:
  7. tf.image.resize_image_with_crop_or_pad
  8. """
  9. assert image.ndim in [2, 3]
  10. assert isinstance(x_min, int) and isinstance(y_min, int)
  11. assert isinstance(x_max, int) and isinstance(y_max, int)
  12. assert (x_min <= x_max) and (y_min <= y_max)
  13. src_height, src_width = image.shape[:2]
  14. dst_height, dst_width = y_max - y_min + 1, x_max - x_min + 1
  15. channels = 1 if image.ndim == 2 else image.shape[2]
  16. if image.ndim == 2:
  17. dst_image_shape = (dst_height, dst_width)
  18. else:
  19. dst_image_shape = (dst_height, dst_width, channels)
  20. if isinstance(border_value, (int, float)):
  21. dst_image = np.full(dst_image_shape, border_value, dtype=image.dtype)
  22. elif isinstance(border_value, tuple):
  23. assert len(border_value) == channels, \
  24. 'Expected the num of elements in tuple equals the channels' \
  25. 'of input image. Found {} vs {}'.format(
  26. len(border_value), channels)
  27. if channels == 1:
  28. dst_image = np.full(dst_image_shape, border_value[0], dtype=image.dtype)
  29. else:
  30. border_value = np.asarray(border_value, dtype=image.dtype)
  31. dst_image = np.empty(dst_image_shape, dtype=image.dtype)
  32. dst_image[:] = border_value
  33. else:
  34. raise ValueError(
  35. 'Invalid type {} for `border_value`.'.format(type(border_value)))
  36. src_x_begin = max(x_min, 0)
  37. src_x_end = min(x_max + 1, src_width)
  38. dst_x_begin = src_x_begin - x_min
  39. dst_x_end = src_x_end - x_min
  40. src_y_begin = max(y_min, 0)
  41. src_y_end = min(y_max + 1, src_height)
  42. dst_y_begin = src_y_begin - y_min
  43. dst_y_end = src_y_end - y_min
  44. if (src_x_begin >= src_x_end) or (src_y_begin >= src_y_end):
  45. return dst_image
  46. dst_image[dst_y_begin: dst_y_end, dst_x_begin: dst_x_end, ...] = \
  47. image[src_y_begin: src_y_end, src_x_begin: src_x_end, ...]
  48. return dst_image
  49. def center_crop(image, dst_width, dst_height):
  50. assert image.ndim in [2, 3]
  51. assert isinstance(dst_width, int) and isinstance(dst_height, int)
  52. assert (image.shape[0] >= dst_height) and (image.shape[1] >= dst_width)
  53. crop_top = (image.shape[0] - dst_height) // 2
  54. crop_left = (image.shape[1] - dst_width) // 2
  55. dst_image = image[crop_top: dst_height + crop_top,
  56. crop_left: dst_width + crop_left, ...]
  57. return dst_image
  58. def crop_or_pad_coords(boxes, image_width, image_height):
  59. """
  60. References:
  61. `mmcv.impad`
  62. `pad` in https://github.com/kpzhang93/MTCNN_face_detection_alignment
  63. `MtcnnDetector.pad` in https://github.com/AITTSMD/MTCNN-Tensorflow
  64. """
  65. x_mins = boxes[:, 0]
  66. y_mins = boxes[:, 1]
  67. x_maxs = boxes[:, 2]
  68. y_maxs = boxes[:, 3]
  69. dst_widths = x_maxs - x_mins + 1
  70. dst_heights = y_maxs - y_mins + 1
  71. src_x_begin = np.maximum(x_mins, 0)
  72. src_x_end = np.minimum(x_maxs + 1, image_width)
  73. dst_x_begin = src_x_begin - x_mins
  74. dst_x_end = src_x_end - x_mins
  75. src_y_begin = np.maximum(y_mins, 0)
  76. src_y_end = np.minimum(y_maxs + 1, image_height)
  77. dst_y_begin = src_y_begin - y_mins
  78. dst_y_end = src_y_end - y_mins
  79. coords = np.stack([dst_y_begin, dst_y_end, dst_x_begin, dst_x_end,
  80. src_y_begin, src_y_end, src_x_begin, src_x_end,
  81. dst_heights, dst_widths], axis=0)
  82. return coords