crop_or_pad.py 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. import numpy as np
  2. def crop_or_pad(image, x_min, y_min, x_max, y_max, border_value=0):
  3. """
  4. See Also:
  5. translate_image
  6. References:
  7. tf.image.resize_image_with_crop_or_pad
  8. """
  9. assert image.ndim in [2, 3]
  10. assert isinstance(x_min, int) and isinstance(y_min, int)
  11. assert isinstance(x_max, int) and isinstance(y_max, int)
  12. assert (x_min <= x_max) and (y_min <= y_max)
  13. src_height, src_width = image.shape[:2]
  14. dst_height, dst_width = y_max - y_min + 1, x_max - x_min + 1
  15. channels = 1 if image.ndim == 2 else image.shape[2]
  16. if image.ndim == 2:
  17. dst_image_shape = (dst_height, dst_width)
  18. else:
  19. dst_image_shape = (dst_height, dst_width, channels)
  20. if isinstance(border_value, (int, float)):
  21. dst_image = np.full(dst_image_shape, border_value, dtype=image.dtype)
  22. elif isinstance(border_value, tuple):
  23. assert len(border_value) == channels, \
  24. 'Expected the num of elements in tuple equals the channels' \
  25. 'of input image. Found {} vs {}'.format(
  26. len(border_value), channels)
  27. if channels == 1:
  28. dst_image = np.full(dst_image_shape, border_value[0], dtype=image.dtype)
  29. else:
  30. border_value = np.asarray(border_value, dtype=image.dtype)
  31. dst_image = np.empty(dst_image_shape, dtype=image.dtype)
  32. dst_image[:] = border_value
  33. else:
  34. raise ValueError(
  35. 'Invalid type {} for `border_value`.'.format(type(border_value)))
  36. src_x_begin = max(x_min, 0)
  37. src_x_end = min(x_max + 1, src_width)
  38. dst_x_begin = src_x_begin - x_min
  39. dst_x_end = src_x_end - x_min
  40. src_y_begin = max(y_min, 0, )
  41. src_y_end = min(y_max + 1, src_height)
  42. dst_y_begin = src_y_begin - y_min
  43. dst_y_end = src_y_end - y_min
  44. dst_image[dst_y_begin: dst_y_end, dst_x_begin: dst_x_end, ...] = \
  45. image[src_y_begin: src_y_end, src_x_begin: src_x_end, ...]
  46. return dst_image
  47. def crop_or_pad_coords(boxes, image_width, image_height):
  48. """
  49. References:
  50. `mmcv.impad`
  51. `pad` in https://github.com/kpzhang93/MTCNN_face_detection_alignment
  52. `MtcnnDetector.pad` in https://github.com/AITTSMD/MTCNN-Tensorflow
  53. """
  54. x_mins = boxes[:, 0]
  55. y_mins = boxes[:, 1]
  56. x_maxs = boxes[:, 2]
  57. y_maxs = boxes[:, 3]
  58. dst_widths = x_maxs - x_mins + 1
  59. dst_heights = y_maxs - y_mins + 1
  60. src_x_begin = np.maximum(x_mins, 0)
  61. src_x_end = np.minimum(x_maxs + 1, image_width)
  62. dst_x_begin = src_x_begin - x_mins
  63. dst_x_end = src_x_end - x_mins
  64. src_y_begin = np.maximum(y_mins, 0)
  65. src_y_end = np.minimum(y_maxs + 1, image_height)
  66. dst_y_begin = src_y_begin - y_mins
  67. dst_y_end = src_y_end - y_mins
  68. coords = np.stack([dst_y_begin, dst_y_end, dst_x_begin, dst_x_end,
  69. src_y_begin, src_y_end, src_x_begin, src_x_end,
  70. dst_heights, dst_widths], axis=0)
  71. return coords