seal_det_warp.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import copy
  15. import numpy as np
  16. from numpy import arctan, cos, sin, sqrt
  17. from .....utils import logging
  18. from .....utils.deps import (
  19. class_requires_deps,
  20. function_requires_deps,
  21. is_dep_available,
  22. )
  23. if is_dep_available("opencv-contrib-python"):
  24. import cv2
  25. #### [TODO] need sunting to add explanatory notes
  26. @function_requires_deps("opencv-contrib-python")
  27. def Homography(
  28. image,
  29. img_points,
  30. world_width,
  31. world_height,
  32. interpolation=None,
  33. ratio_width=1.0,
  34. ratio_height=1.0,
  35. ):
  36. if interpolation is None:
  37. interpolation = cv2.INTER_CUBIC
  38. _points = np.array(img_points).reshape(-1, 2).astype(np.float32)
  39. expand_x = int(0.5 * world_width * (ratio_width - 1))
  40. expand_y = int(0.5 * world_height * (ratio_height - 1))
  41. pt_lefttop = [expand_x, expand_y]
  42. pt_righttop = [expand_x + world_width, expand_y]
  43. pt_leftbottom = [expand_x + world_width, expand_y + world_height]
  44. pt_rightbottom = [expand_x, expand_y + world_height]
  45. pts_std = np.float32([pt_lefttop, pt_righttop, pt_leftbottom, pt_rightbottom])
  46. img_crop_width = int(world_width * ratio_width)
  47. img_crop_height = int(world_height * ratio_height)
  48. M = cv2.getPerspectiveTransform(_points, pts_std)
  49. dst_img = cv2.warpPerspective(
  50. image,
  51. M,
  52. (img_crop_width, img_crop_height),
  53. borderMode=cv2.BORDER_CONSTANT, # BORDER_CONSTANT BORDER_REPLICATE
  54. flags=interpolation,
  55. )
  56. return dst_img
  57. @class_requires_deps("opencv-contrib-python")
  58. class PlanB:
  59. def __call__(
  60. self,
  61. image,
  62. points,
  63. curveTextRectifier,
  64. interpolation=None,
  65. ratio_width=1.0,
  66. ratio_height=1.0,
  67. loss_thresh=5.0,
  68. square=False,
  69. ):
  70. """
  71. Plan B using sub-image when it failed in original image
  72. :param image:
  73. :param points:
  74. :param curveTextRectifier: CurveTextRectifier
  75. :param interpolation: cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4
  76. :param ratio_width: roi_image width expansion. It should not be smaller than 1.0
  77. :param ratio_height: roi_image height expansion. It should not be smaller than 1.0
  78. :param loss_thresh: if loss greater than loss_thresh --> get_rotate_crop_image
  79. :param square: crop square image or not. True or False. The default is False
  80. :return:
  81. """
  82. if interpolation is None:
  83. interpolation = cv2.INTER_LINEAR
  84. h, w = image.shape[:2]
  85. _points = np.array(points).reshape(-1, 2).astype(np.float32)
  86. x_min = int(np.min(_points[:, 0]))
  87. y_min = int(np.min(_points[:, 1]))
  88. x_max = int(np.max(_points[:, 0]))
  89. y_max = int(np.max(_points[:, 1]))
  90. dx = x_max - x_min
  91. dy = y_max - y_min
  92. max_d = max(dx, dy)
  93. mean_pt = np.mean(_points, 0)
  94. expand_x = (ratio_width - 1.0) * 0.5 * max_d
  95. expand_y = (ratio_height - 1.0) * 0.5 * max_d
  96. if square:
  97. x_min = np.clip(int(mean_pt[0] - max_d - expand_x), 0, w - 1)
  98. y_min = np.clip(int(mean_pt[1] - max_d - expand_y), 0, h - 1)
  99. x_max = np.clip(int(mean_pt[0] + max_d + expand_x), 0, w - 1)
  100. y_max = np.clip(int(mean_pt[1] + max_d + expand_y), 0, h - 1)
  101. else:
  102. x_min = np.clip(int(x_min - expand_x), 0, w - 1)
  103. y_min = np.clip(int(y_min - expand_y), 0, h - 1)
  104. x_max = np.clip(int(x_max + expand_x), 0, w - 1)
  105. y_max = np.clip(int(y_max + expand_y), 0, h - 1)
  106. new_image = image[y_min:y_max, x_min:x_max, :].copy()
  107. new_points = _points.copy()
  108. new_points[:, 0] -= x_min
  109. new_points[:, 1] -= y_min
  110. dst_img, loss = curveTextRectifier(
  111. new_image,
  112. new_points,
  113. interpolation,
  114. ratio_width,
  115. ratio_height,
  116. mode="calibration",
  117. )
  118. return dst_img, loss
  119. @class_requires_deps("opencv-contrib-python")
  120. class CurveTextRectifier:
  121. """
  122. spatial transformer via monocular vision
  123. """
  124. def __init__(self):
  125. self.get_virtual_camera_parameter()
  126. def get_virtual_camera_parameter(self):
  127. vcam_thz = 0
  128. vcam_thx1 = 180
  129. vcam_thy = 180
  130. vcam_thx2 = 0
  131. vcam_x = 0
  132. vcam_y = 0
  133. vcam_z = 100
  134. radian = np.pi / 180
  135. angle_z = radian * vcam_thz
  136. angle_x1 = radian * vcam_thx1
  137. angle_y = radian * vcam_thy
  138. angle_x2 = radian * vcam_thx2
  139. optic_x = vcam_x
  140. optic_y = vcam_y
  141. optic_z = vcam_z
  142. fu = 100
  143. fv = 100
  144. matT = np.zeros((4, 4))
  145. matT[0, 0] = cos(angle_z) * cos(angle_y) - sin(angle_z) * sin(angle_x1) * sin(
  146. angle_y
  147. )
  148. matT[0, 1] = cos(angle_z) * sin(angle_y) * sin(angle_x2) - sin(angle_z) * (
  149. cos(angle_x1) * cos(angle_x2) - sin(angle_x1) * cos(angle_y) * sin(angle_x2)
  150. )
  151. matT[0, 2] = cos(angle_z) * sin(angle_y) * cos(angle_x2) + sin(angle_z) * (
  152. cos(angle_x1) * sin(angle_x2) + sin(angle_x1) * cos(angle_y) * cos(angle_x2)
  153. )
  154. matT[0, 3] = optic_x
  155. matT[1, 0] = sin(angle_z) * cos(angle_y) + cos(angle_z) * sin(angle_x1) * sin(
  156. angle_y
  157. )
  158. matT[1, 1] = sin(angle_z) * sin(angle_y) * sin(angle_x2) + cos(angle_z) * (
  159. cos(angle_x1) * cos(angle_x2) - sin(angle_x1) * cos(angle_y) * sin(angle_x2)
  160. )
  161. matT[1, 2] = sin(angle_z) * sin(angle_y) * cos(angle_x2) - cos(angle_z) * (
  162. cos(angle_x1) * sin(angle_x2) + sin(angle_x1) * cos(angle_y) * cos(angle_x2)
  163. )
  164. matT[1, 3] = optic_y
  165. matT[2, 0] = -cos(angle_x1) * sin(angle_y)
  166. matT[2, 1] = cos(angle_x1) * cos(angle_y) * sin(angle_x2) + sin(angle_x1) * cos(
  167. angle_x2
  168. )
  169. matT[2, 2] = cos(angle_x1) * cos(angle_y) * cos(angle_x2) - sin(angle_x1) * sin(
  170. angle_x2
  171. )
  172. matT[2, 3] = optic_z
  173. matT[3, 0] = 0
  174. matT[3, 1] = 0
  175. matT[3, 2] = 0
  176. matT[3, 3] = 1
  177. matS = np.zeros((4, 4))
  178. matS[2, 3] = 0.5
  179. matS[3, 2] = 0.5
  180. self.ifu = 1 / fu
  181. self.ifv = 1 / fv
  182. self.matT = matT
  183. self.matS = matS
  184. self.K = np.dot(matT.T, matS)
  185. self.K = np.dot(self.K, matT)
  186. def vertical_text_process(self, points, org_size):
  187. """
  188. change sequence amd process
  189. :param points:
  190. :param org_size:
  191. :return:
  192. """
  193. org_w, org_h = org_size
  194. _points = np.array(points).reshape(-1).tolist()
  195. _points = np.array(_points[2:] + _points[:2]).reshape(-1, 2)
  196. # convert to horizontal points
  197. adjusted_points = np.zeros(_points.shape, dtype=np.float32)
  198. adjusted_points[:, 0] = _points[:, 1]
  199. adjusted_points[:, 1] = org_h - _points[:, 0] - 1
  200. _image_coord, _world_coord, _new_image_size = self.horizontal_text_process(
  201. adjusted_points
  202. )
  203. # # convert to vertical points back
  204. image_coord = _points.reshape(1, -1, 2)
  205. world_coord = np.zeros(_world_coord.shape, dtype=np.float32)
  206. world_coord[:, :, 0] = 0 - _world_coord[:, :, 1]
  207. world_coord[:, :, 1] = _world_coord[:, :, 0]
  208. world_coord[:, :, 2] = _world_coord[:, :, 2]
  209. new_image_size = (_new_image_size[1], _new_image_size[0])
  210. return image_coord, world_coord, new_image_size
  211. def horizontal_text_process(self, points):
  212. """
  213. get image coordinate and world coordinate
  214. :param points:
  215. :return:
  216. """
  217. poly = np.array(points).reshape(-1)
  218. dx_list = []
  219. dy_list = []
  220. for i in range(1, len(poly) // 2):
  221. xdx = poly[i * 2] - poly[(i - 1) * 2]
  222. xdy = poly[i * 2 + 1] - poly[(i - 1) * 2 + 1]
  223. d = sqrt(xdx**2 + xdy**2)
  224. dx_list.append(d)
  225. for i in range(0, len(poly) // 4):
  226. ydx = poly[i * 2] - poly[len(poly) - 1 - (i * 2 + 1)]
  227. ydy = poly[i * 2 + 1] - poly[len(poly) - 1 - (i * 2)]
  228. d = sqrt(ydx**2 + ydy**2)
  229. dy_list.append(d)
  230. dx_list = [
  231. (dx_list[i] + dx_list[len(dx_list) - 1 - i]) / 2
  232. for i in range(len(dx_list) // 2)
  233. ]
  234. height = np.around(np.mean(dy_list))
  235. rect_coord = [0, 0]
  236. for i in range(0, len(poly) // 4 - 1):
  237. x = rect_coord[-2]
  238. x += dx_list[i]
  239. y = 0
  240. rect_coord.append(x)
  241. rect_coord.append(y)
  242. rect_coord_half = copy.deepcopy(rect_coord)
  243. for i in range(0, len(poly) // 4):
  244. x = rect_coord_half[len(rect_coord_half) - 2 * i - 2]
  245. y = height
  246. rect_coord.append(x)
  247. rect_coord.append(y)
  248. np_rect_coord = np.array(rect_coord).reshape(-1, 2)
  249. x_min = np.min(np_rect_coord[:, 0])
  250. y_min = np.min(np_rect_coord[:, 1])
  251. x_max = np.max(np_rect_coord[:, 0])
  252. y_max = np.max(np_rect_coord[:, 1])
  253. new_image_size = (int(x_max - x_min + 0.5), int(y_max - y_min + 0.5))
  254. x_mean = (x_max - x_min) / 2
  255. y_mean = (y_max - y_min) / 2
  256. np_rect_coord[:, 0] -= x_mean
  257. np_rect_coord[:, 1] -= y_mean
  258. rect_coord = np_rect_coord.reshape(-1).tolist()
  259. rect_coord = np.array(rect_coord).reshape(-1, 2)
  260. world_coord = np.ones((len(rect_coord), 3)) * 0
  261. world_coord[:, :2] = rect_coord
  262. image_coord = np.array(poly).reshape(1, -1, 2)
  263. world_coord = world_coord.reshape(1, -1, 3)
  264. return image_coord, world_coord, new_image_size
  265. def horizontal_text_estimate(self, points):
  266. """
  267. horizontal or vertical text
  268. :param points:
  269. :return:
  270. """
  271. pts = np.array(points).reshape(-1, 2)
  272. x_min = int(np.min(pts[:, 0]))
  273. y_min = int(np.min(pts[:, 1]))
  274. x_max = int(np.max(pts[:, 0]))
  275. y_max = int(np.max(pts[:, 1]))
  276. x = x_max - x_min
  277. y = y_max - y_min
  278. is_horizontal_text = True
  279. if y / x > 1.5: # vertical text condition
  280. is_horizontal_text = False
  281. return is_horizontal_text
  282. def virtual_camera_to_world(self, size):
  283. ifu, ifv = self.ifu, self.ifv
  284. K, matT = self.K, self.matT
  285. ppu = size[0] / 2 + 1e-6
  286. ppv = size[1] / 2 + 1e-6
  287. P = np.zeros((size[1], size[0], 3))
  288. lu = np.array([i for i in range(size[0])])
  289. lv = np.array([i for i in range(size[1])])
  290. u, v = np.meshgrid(lu, lv)
  291. yp = (v - ppv) * ifv
  292. xp = (u - ppu) * ifu
  293. angle_a = arctan(sqrt(xp * xp + yp * yp))
  294. angle_b = arctan(yp / xp)
  295. D0 = sin(angle_a) * cos(angle_b)
  296. D1 = sin(angle_a) * sin(angle_b)
  297. D2 = cos(angle_a)
  298. D0[xp <= 0] = -D0[xp <= 0]
  299. D1[xp <= 0] = -D1[xp <= 0]
  300. ratio_a = (
  301. K[0, 0] * D0 * D0
  302. + K[1, 1] * D1 * D1
  303. + K[2, 2] * D2 * D2
  304. + (K[0, 1] + K[1, 0]) * D0 * D1
  305. + (K[0, 2] + K[2, 0]) * D0 * D2
  306. + (K[1, 2] + K[2, 1]) * D1 * D2
  307. )
  308. ratio_b = (
  309. (K[0, 3] + K[3, 0]) * D0
  310. + (K[1, 3] + K[3, 1]) * D1
  311. + (K[2, 3] + K[3, 2]) * D2
  312. )
  313. ratio_c = K[3, 3] * np.ones(ratio_b.shape)
  314. delta = ratio_b * ratio_b - 4 * ratio_a * ratio_c
  315. t = np.zeros(delta.shape)
  316. t[ratio_a == 0] = -ratio_c[ratio_a == 0] / ratio_b[ratio_a == 0]
  317. t[ratio_a != 0] = (-ratio_b[ratio_a != 0] + sqrt(delta[ratio_a != 0])) / (
  318. 2 * ratio_a[ratio_a != 0]
  319. )
  320. t[delta < 0] = 0
  321. P[:, :, 0] = matT[0, 3] + t * (
  322. matT[0, 0] * D0 + matT[0, 1] * D1 + matT[0, 2] * D2
  323. )
  324. P[:, :, 1] = matT[1, 3] + t * (
  325. matT[1, 0] * D0 + matT[1, 1] * D1 + matT[1, 2] * D2
  326. )
  327. P[:, :, 2] = matT[2, 3] + t * (
  328. matT[2, 0] * D0 + matT[2, 1] * D1 + matT[2, 2] * D2
  329. )
  330. return P
  331. def world_to_image(self, image_size, world, intrinsic, distCoeffs, rotation, tvec):
  332. r11 = rotation[0, 0]
  333. r12 = rotation[0, 1]
  334. r13 = rotation[0, 2]
  335. r21 = rotation[1, 0]
  336. r22 = rotation[1, 1]
  337. r23 = rotation[1, 2]
  338. r31 = rotation[2, 0]
  339. r32 = rotation[2, 1]
  340. r33 = rotation[2, 2]
  341. t1 = tvec[0]
  342. t2 = tvec[1]
  343. t3 = tvec[2]
  344. k1 = distCoeffs[0]
  345. k2 = distCoeffs[1]
  346. p1 = distCoeffs[2]
  347. p2 = distCoeffs[3]
  348. k3 = distCoeffs[4]
  349. k4 = distCoeffs[5]
  350. k5 = distCoeffs[6]
  351. k6 = distCoeffs[7]
  352. if len(distCoeffs) > 8:
  353. s1 = distCoeffs[8]
  354. s2 = distCoeffs[9]
  355. s3 = distCoeffs[10]
  356. s4 = distCoeffs[11]
  357. else:
  358. s1 = s2 = s3 = s4 = 0
  359. if len(distCoeffs) > 12:
  360. tx = distCoeffs[12]
  361. ty = distCoeffs[13]
  362. else:
  363. tx = ty = 0
  364. fu = intrinsic[0, 0]
  365. fv = intrinsic[1, 1]
  366. ppu = intrinsic[0, 2]
  367. ppv = intrinsic[1, 2]
  368. cos_tx = cos(tx)
  369. cos_ty = cos(ty)
  370. sin_tx = sin(tx)
  371. sin_ty = sin(ty)
  372. tao11 = cos_ty * cos_tx * cos_ty + sin_ty * cos_tx * sin_ty
  373. tao12 = cos_ty * cos_tx * sin_ty * sin_tx - sin_ty * cos_tx * cos_ty * sin_tx
  374. tao13 = -cos_ty * cos_tx * sin_ty * cos_tx + sin_ty * cos_tx * cos_ty * cos_tx
  375. tao21 = -sin_tx * sin_ty
  376. tao22 = cos_ty * cos_tx * cos_tx + sin_tx * cos_ty * sin_tx
  377. tao23 = cos_ty * cos_tx * sin_tx - sin_tx * cos_ty * cos_tx
  378. P = np.zeros((image_size[1], image_size[0], 2))
  379. c3 = r31 * world[:, :, 0] + r32 * world[:, :, 1] + r33 * world[:, :, 2] + t3
  380. c1 = r11 * world[:, :, 0] + r12 * world[:, :, 1] + r13 * world[:, :, 2] + t1
  381. c2 = r21 * world[:, :, 0] + r22 * world[:, :, 1] + r23 * world[:, :, 2] + t2
  382. x1 = c1 / c3
  383. y1 = c2 / c3
  384. x12 = x1 * x1
  385. y12 = y1 * y1
  386. x1y1 = 2 * x1 * y1
  387. r2 = x12 + y12
  388. r4 = r2 * r2
  389. r6 = r2 * r4
  390. radial_distortion = (1 + k1 * r2 + k2 * r4 + k3 * r6) / (
  391. 1 + k4 * r2 + k5 * r4 + k6 * r6
  392. )
  393. x2 = (
  394. x1 * radial_distortion + p1 * x1y1 + p2 * (r2 + 2 * x12) + s1 * r2 + s2 * r4
  395. )
  396. y2 = (
  397. y1 * radial_distortion + p2 * x1y1 + p1 * (r2 + 2 * y12) + s3 * r2 + s4 * r4
  398. )
  399. x3 = tao11 * x2 + tao12 * y2 + tao13
  400. y3 = tao21 * x2 + tao22 * y2 + tao23
  401. P[:, :, 0] = fu * x3 + ppu
  402. P[:, :, 1] = fv * y3 + ppv
  403. P[c3 <= 0] = 0
  404. return P
  405. def spatial_transform(
  406. self, image_data, new_image_size, mtx, dist, rvecs, tvecs, interpolation
  407. ):
  408. rotation, _ = cv2.Rodrigues(rvecs)
  409. world_map = self.virtual_camera_to_world(new_image_size)
  410. image_map = self.world_to_image(
  411. new_image_size, world_map, mtx, dist, rotation, tvecs
  412. )
  413. image_map = image_map.astype(np.float32)
  414. dst = cv2.remap(
  415. image_data, image_map[:, :, 0], image_map[:, :, 1], interpolation
  416. )
  417. return dst
  418. def calibrate(self, org_size, image_coord, world_coord):
  419. """
  420. calibration
  421. :param org_size:
  422. :param image_coord:
  423. :param world_coord:
  424. :return:
  425. """
  426. # flag = cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_TILTED_MODEL | cv2.CALIB_THIN_PRISM_MODEL
  427. flag = cv2.CALIB_RATIONAL_MODEL
  428. flag2 = cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_TILTED_MODEL
  429. flag3 = cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_THIN_PRISM_MODEL
  430. flag4 = (
  431. cv2.CALIB_RATIONAL_MODEL
  432. | cv2.CALIB_ZERO_TANGENT_DIST
  433. | cv2.CALIB_FIX_ASPECT_RATIO
  434. )
  435. flag5 = (
  436. cv2.CALIB_RATIONAL_MODEL
  437. | cv2.CALIB_TILTED_MODEL
  438. | cv2.CALIB_ZERO_TANGENT_DIST
  439. )
  440. flag6 = cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_ASPECT_RATIO
  441. flag_list = [flag2, flag3, flag4, flag5, flag6]
  442. ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
  443. world_coord.astype(np.float32),
  444. image_coord.astype(np.float32),
  445. org_size,
  446. None,
  447. None,
  448. flags=flag,
  449. )
  450. if ret > 2:
  451. # strategies
  452. min_ret = ret
  453. for i, flag in enumerate(flag_list):
  454. _ret, _mtx, _dist, _rvecs, _tvecs = cv2.calibrateCamera(
  455. world_coord.astype(np.float32),
  456. image_coord.astype(np.float32),
  457. org_size,
  458. None,
  459. None,
  460. flags=flag,
  461. )
  462. if _ret < min_ret:
  463. min_ret = _ret
  464. ret, mtx, dist, rvecs, tvecs = _ret, _mtx, _dist, _rvecs, _tvecs
  465. return ret, mtx, dist, rvecs, tvecs
  466. def dc_homo(
  467. self,
  468. img,
  469. img_points,
  470. obj_points,
  471. is_horizontal_text,
  472. interpolation=None,
  473. ratio_width=1.0,
  474. ratio_height=1.0,
  475. ):
  476. """
  477. divide and conquer: homography
  478. # ratio_width and ratio_height must be 1.0 here
  479. """
  480. if interpolation is None:
  481. interpolation = cv2.INTER_LINEAR
  482. _img_points = img_points.reshape(-1, 2)
  483. _obj_points = obj_points.reshape(-1, 3)
  484. homo_img_list = []
  485. width_list = []
  486. height_list = []
  487. # divide and conquer
  488. for i in range(len(_img_points) // 2 - 1):
  489. new_img_points = np.zeros((4, 2)).astype(np.float32)
  490. new_obj_points = np.zeros((4, 2)).astype(np.float32)
  491. new_img_points[0:2, :] = _img_points[i : (i + 2), :2]
  492. new_img_points[2:4, :] = _img_points[::-1, :][i : (i + 2), :2][::-1, :]
  493. new_obj_points[0:2, :] = _obj_points[i : (i + 2), :2]
  494. new_obj_points[2:4, :] = _obj_points[::-1, :][i : (i + 2), :2][::-1, :]
  495. if is_horizontal_text:
  496. world_width = np.abs(new_obj_points[1, 0] - new_obj_points[0, 0])
  497. world_height = np.abs(new_obj_points[3, 1] - new_obj_points[0, 1])
  498. else:
  499. world_width = np.abs(new_obj_points[1, 1] - new_obj_points[0, 1])
  500. world_height = np.abs(new_obj_points[3, 0] - new_obj_points[0, 0])
  501. homo_img = Homography(
  502. img,
  503. new_img_points,
  504. world_width,
  505. world_height,
  506. interpolation=interpolation,
  507. ratio_width=ratio_width,
  508. ratio_height=ratio_height,
  509. )
  510. homo_img_list.append(homo_img)
  511. _h, _w = homo_img.shape[:2]
  512. width_list.append(_w)
  513. height_list.append(_h)
  514. # stitching
  515. rectified_image = np.zeros((np.max(height_list), sum(width_list), 3)).astype(
  516. np.uint8
  517. )
  518. st = 0
  519. for homo_img, w, h in zip(homo_img_list, width_list, height_list):
  520. rectified_image[:h, st : st + w, :] = homo_img
  521. st += w
  522. if not is_horizontal_text:
  523. # vertical rotation
  524. rectified_image = np.rot90(rectified_image, 3)
  525. return rectified_image
  526. def Homography(
  527. self,
  528. image,
  529. img_points,
  530. world_width,
  531. world_height,
  532. interpolation=None,
  533. ratio_width=1.0,
  534. ratio_height=1.0,
  535. ):
  536. if interpolation is None:
  537. interpolation = cv2.INTER_CUBIC
  538. _points = np.array(img_points).reshape(-1, 2).astype(np.float32)
  539. expand_x = int(0.5 * world_width * (ratio_width - 1))
  540. expand_y = int(0.5 * world_height * (ratio_height - 1))
  541. pt_lefttop = [expand_x, expand_y]
  542. pt_righttop = [expand_x + world_width, expand_y]
  543. pt_leftbottom = [expand_x + world_width, expand_y + world_height]
  544. pt_rightbottom = [expand_x, expand_y + world_height]
  545. pts_std = np.float32([pt_lefttop, pt_righttop, pt_leftbottom, pt_rightbottom])
  546. img_crop_width = int(world_width * ratio_width)
  547. img_crop_height = int(world_height * ratio_height)
  548. M = cv2.getPerspectiveTransform(_points, pts_std)
  549. dst_img = cv2.warpPerspective(
  550. image,
  551. M,
  552. (img_crop_width, img_crop_height),
  553. borderMode=cv2.BORDER_CONSTANT, # BORDER_CONSTANT BORDER_REPLICATE
  554. flags=interpolation,
  555. )
  556. return dst_img
  557. def __call__(
  558. self,
  559. image_data,
  560. points,
  561. interpolation=None,
  562. ratio_width=1.0,
  563. ratio_height=1.0,
  564. mode="calibration",
  565. ):
  566. """
  567. spatial transform for a poly text
  568. :param image_data:
  569. :param points: [x1,y1,x2,y2,x3,y3,...], clockwise order, (x1,y1) must be the top-left of first char.
  570. :param interpolation: cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4
  571. :param ratio_width: roi_image width expansion. It should not be smaller than 1.0
  572. :param ratio_height: roi_image height expansion. It should not be smaller than 1.0
  573. :param mode: 'calibration' or 'homography'. when homography, ratio_width and ratio_height must be 1.0
  574. :return:
  575. """
  576. if interpolation is None:
  577. interpolation = cv2.INTER_LINEAR
  578. org_h, org_w = image_data.shape[:2]
  579. org_size = (org_w, org_h)
  580. self.image = image_data
  581. is_horizontal_text = self.horizontal_text_estimate(points)
  582. if is_horizontal_text:
  583. image_coord, world_coord, new_image_size = self.horizontal_text_process(
  584. points
  585. )
  586. else:
  587. image_coord, world_coord, new_image_size = self.vertical_text_process(
  588. points, org_size
  589. )
  590. if mode.lower() == "calibration":
  591. ret, mtx, dist, rvecs, tvecs = self.calibrate(
  592. org_size, image_coord, world_coord
  593. )
  594. st_size = (
  595. int(new_image_size[0] * ratio_width),
  596. int(new_image_size[1] * ratio_height),
  597. )
  598. dst = self.spatial_transform(
  599. image_data, st_size, mtx, dist[0], rvecs[0], tvecs[0], interpolation
  600. )
  601. elif mode.lower() == "homography":
  602. # ratio_width and ratio_height must be 1.0 here and ret set to 0.01 without loss manually
  603. ret = 0.01
  604. dst = self.dc_homo(
  605. image_data,
  606. image_coord,
  607. world_coord,
  608. is_horizontal_text,
  609. interpolation=interpolation,
  610. ratio_width=1.0,
  611. ratio_height=1.0,
  612. )
  613. else:
  614. raise ValueError(
  615. 'mode must be ["calibration", "homography"], but got {}'.format(mode)
  616. )
  617. return dst, ret
  618. @class_requires_deps("opencv-contrib-python")
  619. class AutoRectifier:
  620. def __init__(self):
  621. self.npoints = 10
  622. self.curveTextRectifier = CurveTextRectifier()
  623. @staticmethod
  624. def get_rotate_crop_image(
  625. img, points, interpolation=None, ratio_width=1.0, ratio_height=1.0
  626. ):
  627. """
  628. crop or homography
  629. :param img:
  630. :param points:
  631. :param interpolation:
  632. :param ratio_width:
  633. :param ratio_height:
  634. :return:
  635. """
  636. if interpolation is None:
  637. interpolation = cv2.INTER_CUBIC
  638. h, w = img.shape[:2]
  639. _points = np.array(points).reshape(-1, 2).astype(np.float32)
  640. if len(_points) != 4:
  641. x_min = int(np.min(_points[:, 0]))
  642. y_min = int(np.min(_points[:, 1]))
  643. x_max = int(np.max(_points[:, 0]))
  644. y_max = int(np.max(_points[:, 1]))
  645. dx = x_max - x_min
  646. dy = y_max - y_min
  647. expand_x = int(0.5 * dx * (ratio_width - 1))
  648. expand_y = int(0.5 * dy * (ratio_height - 1))
  649. x_min = np.clip(int(x_min - expand_x), 0, w - 1)
  650. y_min = np.clip(int(y_min - expand_y), 0, h - 1)
  651. x_max = np.clip(int(x_max + expand_x), 0, w - 1)
  652. y_max = np.clip(int(y_max + expand_y), 0, h - 1)
  653. dst_img = img[y_min:y_max, x_min:x_max, :].copy()
  654. else:
  655. img_crop_width = int(
  656. max(
  657. np.linalg.norm(_points[0] - _points[1]),
  658. np.linalg.norm(_points[2] - _points[3]),
  659. )
  660. )
  661. img_crop_height = int(
  662. max(
  663. np.linalg.norm(_points[0] - _points[3]),
  664. np.linalg.norm(_points[1] - _points[2]),
  665. )
  666. )
  667. dst_img = Homography(
  668. img,
  669. _points,
  670. img_crop_width,
  671. img_crop_height,
  672. interpolation,
  673. ratio_width,
  674. ratio_height,
  675. )
  676. return dst_img
  677. def visualize(self, image_data, points_list):
  678. visualization = image_data.copy()
  679. for box in points_list:
  680. box = np.array(box).reshape(-1, 2).astype(np.int32)
  681. cv2.drawContours(
  682. visualization, [np.array(box).reshape((-1, 1, 2))], -1, (0, 0, 255), 2
  683. )
  684. for i, p in enumerate(box):
  685. if i != 0:
  686. cv2.circle(
  687. visualization,
  688. tuple(p),
  689. radius=1,
  690. color=(255, 0, 0),
  691. thickness=2,
  692. )
  693. else:
  694. cv2.circle(
  695. visualization,
  696. tuple(p),
  697. radius=1,
  698. color=(255, 255, 0),
  699. thickness=2,
  700. )
  701. return visualization
  702. def __call__(
  703. self,
  704. image_data,
  705. points,
  706. interpolation=None,
  707. ratio_width=1.0,
  708. ratio_height=1.0,
  709. loss_thresh=5.0,
  710. mode="calibration",
  711. ):
  712. """
  713. rectification in strategies for a poly text
  714. :param image_data:
  715. :param points: [x1,y1,x2,y2,x3,y3,...], clockwise order, (x1,y1) must be the top-left of first char.
  716. :param interpolation: cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4
  717. :param ratio_width: roi_image width expansion. It should not be smaller than 1.0
  718. :param ratio_height: roi_image height expansion. It should not be smaller than 1.0
  719. :param loss_thresh: if loss greater than loss_thresh --> get_rotate_crop_image
  720. :param mode: 'calibration' or 'homography'. when homography, ratio_width and ratio_height must be 1.0
  721. :return:
  722. """
  723. if interpolation is None:
  724. interpolation = cv2.INTER_LINEAR
  725. _points = np.array(points).reshape(-1, 2)
  726. if len(_points) >= self.npoints and len(_points) % 2 == 0:
  727. try:
  728. curveTextRectifier = CurveTextRectifier()
  729. dst_img, loss = curveTextRectifier(
  730. image_data, points, interpolation, ratio_width, ratio_height, mode
  731. )
  732. if loss >= 2:
  733. # for robust
  734. # large loss means it cannot be reconstruct correctly, we must find other way to reconstruct
  735. img_list, loss_list = [dst_img], [loss]
  736. _dst_img, _loss = PlanB()(
  737. image_data,
  738. points,
  739. curveTextRectifier,
  740. interpolation,
  741. ratio_width,
  742. ratio_height,
  743. loss_thresh=loss_thresh,
  744. square=True,
  745. )
  746. img_list += [_dst_img]
  747. loss_list += [_loss]
  748. _dst_img, _loss = PlanB()(
  749. image_data,
  750. points,
  751. curveTextRectifier,
  752. interpolation,
  753. ratio_width,
  754. ratio_height,
  755. loss_thresh=loss_thresh,
  756. square=False,
  757. )
  758. img_list += [_dst_img]
  759. loss_list += [_loss]
  760. min_loss = min(loss_list)
  761. dst_img = img_list[loss_list.index(min_loss)]
  762. if min_loss >= loss_thresh:
  763. logging.warning(
  764. "calibration loss: {} is too large for spatial transformer. It is failed. Using get_rotate_crop_image".format(
  765. loss
  766. )
  767. )
  768. dst_img = self.get_rotate_crop_image(
  769. image_data, points, interpolation, ratio_width, ratio_height
  770. )
  771. except Exception as e:
  772. logging.warning(f"Exception caught: {e}")
  773. dst_img = self.get_rotate_crop_image(
  774. image_data, points, interpolation, ratio_width, ratio_height
  775. )
  776. else:
  777. dst_img = self.get_rotate_crop_image(
  778. image_data, _points, interpolation, ratio_width, ratio_height
  779. )
  780. return dst_img
  781. def run(
  782. self,
  783. image_data,
  784. points_list,
  785. interpolation=None,
  786. ratio_width=1.0,
  787. ratio_height=1.0,
  788. loss_thresh=5.0,
  789. mode="calibration",
  790. ):
  791. """
  792. run for texts in an image
  793. :param image_data: numpy.ndarray. The shape is [h, w, 3]
  794. :param points_list: [[x1,y1,x2,y2,x3,y3,...], [x1,y1,x2,y2,x3,y3,...], ...], clockwise order, (x1,y1) must be the top-left of first char.
  795. :param interpolation: cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4
  796. :param ratio_width: roi_image width expansion. It should not be smaller than 1.0
  797. :param ratio_height: roi_image height expansion. It should not be smaller than 1.0
  798. :param loss_thresh: if loss greater than loss_thresh --> get_rotate_crop_image
  799. :param mode: 'calibration' or 'homography'. when homography, ratio_width and ratio_height must be 1.0
  800. :return: res: roi-image list, visualized_image: draw polys in original image
  801. """
  802. if image_data is None:
  803. raise ValueError
  804. if not isinstance(points_list, list):
  805. raise ValueError
  806. for points in points_list:
  807. if not isinstance(points, list):
  808. raise ValueError
  809. if interpolation is None:
  810. interpolation = cv2.INTER_LINEAR
  811. if ratio_width < 1.0 or ratio_height < 1.0:
  812. raise ValueError(
  813. "ratio_width and ratio_height cannot be smaller than 1, but got {}",
  814. (ratio_width, ratio_height),
  815. )
  816. if mode.lower() != "calibration" and mode.lower() != "homography":
  817. raise ValueError(
  818. 'mode must be ["calibration", "homography"], but got {}'.format(mode)
  819. )
  820. if mode.lower() == "homography" and ratio_width != 1.0 and ratio_height != 1.0:
  821. raise ValueError(
  822. "ratio_width and ratio_height must be 1.0 when mode is homography, but got mode:{}, ratio:({},{})".format(
  823. mode, ratio_width, ratio_height
  824. )
  825. )
  826. res = []
  827. for points in points_list:
  828. rectified_img = self(
  829. image_data,
  830. points,
  831. interpolation,
  832. ratio_width,
  833. ratio_height,
  834. loss_thresh=loss_thresh,
  835. mode=mode,
  836. )
  837. res.append(rectified_img)
  838. # visualize
  839. visualized_image = self.visualize(image_data, points_list)
  840. return res, visualized_image