roi_align_rotated.cc 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. // This code is refer from:
  2. // https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/csrc/pytorch/cpu/roi_align_rotated.cpp
  3. #include <cassert>
  4. #include <cmath>
  5. #include <vector>
  6. #include "paddle/extension.h"
  7. #define PADDLE_WITH_CUDA
  8. #define CHECK_INPUT_SAME(x1, x2) \
  9. PD_CHECK(x1.place() == x2.place(), "input must be smae pacle.")
  10. #define CHECK_INPUT_CPU(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
  11. template <typename T> struct PreCalc {
  12. int pos1;
  13. int pos2;
  14. int pos3;
  15. int pos4;
  16. T w1;
  17. T w2;
  18. T w3;
  19. T w4;
  20. };
  21. template <typename T>
  22. void pre_calc_for_bilinear_interpolate(
  23. const int height, const int width, const int pooled_height,
  24. const int pooled_width, const int iy_upper, const int ix_upper,
  25. T roi_start_h, T roi_start_w, T bin_size_h, T bin_size_w,
  26. int roi_bin_grid_h, int roi_bin_grid_w, T roi_center_h, T roi_center_w,
  27. T cos_theta, T sin_theta, std::vector<PreCalc<T>> &pre_calc) {
  28. int pre_calc_index = 0;
  29. for (int ph = 0; ph < pooled_height; ph++) {
  30. for (int pw = 0; pw < pooled_width; pw++) {
  31. for (int iy = 0; iy < iy_upper; iy++) {
  32. const T yy = roi_start_h + ph * bin_size_h +
  33. static_cast<T>(iy + .5f) * bin_size_h /
  34. static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
  35. for (int ix = 0; ix < ix_upper; ix++) {
  36. const T xx = roi_start_w + pw * bin_size_w +
  37. static_cast<T>(ix + .5f) * bin_size_w /
  38. static_cast<T>(roi_bin_grid_w);
  39. // Rotate by theta around the center and translate
  40. // In image space, (y, x) is the order for Right Handed System,
  41. // and this is essentially multiplying the point by a rotation matrix
  42. // to rotate it counterclockwise through angle theta.
  43. T y = yy * cos_theta - xx * sin_theta + roi_center_h;
  44. T x = yy * sin_theta + xx * cos_theta + roi_center_w;
  45. // deal with: inverse elements are out of feature map boundary
  46. if (y < -1.0 || y > height || x < -1.0 || x > width) {
  47. // empty
  48. PreCalc<T> pc;
  49. pc.pos1 = 0;
  50. pc.pos2 = 0;
  51. pc.pos3 = 0;
  52. pc.pos4 = 0;
  53. pc.w1 = 0;
  54. pc.w2 = 0;
  55. pc.w3 = 0;
  56. pc.w4 = 0;
  57. pre_calc[pre_calc_index] = pc;
  58. pre_calc_index += 1;
  59. continue;
  60. }
  61. if (y < 0) {
  62. y = 0;
  63. }
  64. if (x < 0) {
  65. x = 0;
  66. }
  67. int y_low = (int)y;
  68. int x_low = (int)x;
  69. int y_high;
  70. int x_high;
  71. if (y_low >= height - 1) {
  72. y_high = y_low = height - 1;
  73. y = (T)y_low;
  74. } else {
  75. y_high = y_low + 1;
  76. }
  77. if (x_low >= width - 1) {
  78. x_high = x_low = width - 1;
  79. x = (T)x_low;
  80. } else {
  81. x_high = x_low + 1;
  82. }
  83. T ly = y - y_low;
  84. T lx = x - x_low;
  85. T hy = 1. - ly, hx = 1. - lx;
  86. T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
  87. // save weights and indices
  88. PreCalc<T> pc;
  89. pc.pos1 = y_low * width + x_low;
  90. pc.pos2 = y_low * width + x_high;
  91. pc.pos3 = y_high * width + x_low;
  92. pc.pos4 = y_high * width + x_high;
  93. pc.w1 = w1;
  94. pc.w2 = w2;
  95. pc.w3 = w3;
  96. pc.w4 = w4;
  97. pre_calc[pre_calc_index] = pc;
  98. pre_calc_index += 1;
  99. }
  100. }
  101. }
  102. }
  103. }
  104. template <typename T>
  105. void roi_align_rotated_cpu_forward(const int nthreads, const T *input,
  106. const T &spatial_scale, const bool aligned,
  107. const bool clockwise, const int channels,
  108. const int height, const int width,
  109. const int pooled_height,
  110. const int pooled_width,
  111. const int sampling_ratio, const T *rois,
  112. T *output) {
  113. int n_rois = nthreads / channels / pooled_width / pooled_height;
  114. // (n, c, ph, pw) is an element in the pooled output
  115. // can be parallelized using omp
  116. // #pragma omp parallel for num_threads(32)
  117. for (int n = 0; n < n_rois; n++) {
  118. int index_n = n * channels * pooled_width * pooled_height;
  119. const T *current_roi = rois + n * 6;
  120. int roi_batch_ind = current_roi[0];
  121. // Do not use rounding; this implementation detail is critical
  122. T offset = aligned ? (T)0.5 : (T)0.0;
  123. T roi_center_w = current_roi[1] * spatial_scale - offset;
  124. T roi_center_h = current_roi[2] * spatial_scale - offset;
  125. T roi_width = current_roi[3] * spatial_scale;
  126. T roi_height = current_roi[4] * spatial_scale;
  127. T theta = current_roi[5];
  128. if (clockwise) {
  129. theta = -theta; // If clockwise, the angle needs to be reversed.
  130. }
  131. T cos_theta = cos(theta);
  132. T sin_theta = sin(theta);
  133. if (aligned) {
  134. assert(roi_width >= 0 && roi_height >= 0);
  135. } else { // for backward-compatibility only
  136. roi_width = std::max(roi_width, (T)1.);
  137. roi_height = std::max(roi_height, (T)1.);
  138. }
  139. T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
  140. T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
  141. // We use roi_bin_grid to sample the grid and mimic integral
  142. int roi_bin_grid_h = (sampling_ratio > 0)
  143. ? sampling_ratio
  144. : ceilf(roi_height / pooled_height); // e.g., = 2
  145. int roi_bin_grid_w =
  146. (sampling_ratio > 0) ? sampling_ratio : ceilf(roi_width / pooled_width);
  147. // We do average (integral) pooling inside a bin
  148. const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
  149. // we want to precalculate indices and weights shared by all channels,
  150. // this is the key point of optimization
  151. std::vector<PreCalc<T>> pre_calc(roi_bin_grid_h * roi_bin_grid_w *
  152. pooled_width * pooled_height);
  153. // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
  154. // Appropriate translation needs to be applied after.
  155. T roi_start_h = -roi_height / 2.0;
  156. T roi_start_w = -roi_width / 2.0;
  157. pre_calc_for_bilinear_interpolate(
  158. height, width, pooled_height, pooled_width, roi_bin_grid_h,
  159. roi_bin_grid_w, roi_start_h, roi_start_w, bin_size_h, bin_size_w,
  160. roi_bin_grid_h, roi_bin_grid_w, roi_center_h, roi_center_w, cos_theta,
  161. sin_theta, pre_calc);
  162. for (int c = 0; c < channels; c++) {
  163. int index_n_c = index_n + c * pooled_width * pooled_height;
  164. const T *offset_input =
  165. input + (roi_batch_ind * channels + c) * height * width;
  166. int pre_calc_index = 0;
  167. for (int ph = 0; ph < pooled_height; ph++) {
  168. for (int pw = 0; pw < pooled_width; pw++) {
  169. int index = index_n_c + ph * pooled_width + pw;
  170. T output_val = 0.;
  171. for (int iy = 0; iy < roi_bin_grid_h; iy++) {
  172. for (int ix = 0; ix < roi_bin_grid_w; ix++) {
  173. PreCalc<T> pc = pre_calc[pre_calc_index];
  174. output_val += pc.w1 * offset_input[pc.pos1] +
  175. pc.w2 * offset_input[pc.pos2] +
  176. pc.w3 * offset_input[pc.pos3] +
  177. pc.w4 * offset_input[pc.pos4];
  178. pre_calc_index += 1;
  179. }
  180. }
  181. output_val /= count;
  182. output[index] = output_val;
  183. } // for pw
  184. } // for ph
  185. } // for c
  186. } // for n
  187. }
  188. template <typename T>
  189. void bilinear_interpolate_gradient(const int height, const int width, T y, T x,
  190. T &w1, T &w2, T &w3, T &w4, int &x_low,
  191. int &x_high, int &y_low, int &y_high) {
  192. // deal with cases that inverse elements are out of feature map boundary
  193. if (y < -1.0 || y > height || x < -1.0 || x > width) {
  194. // empty
  195. w1 = w2 = w3 = w4 = 0.;
  196. x_low = x_high = y_low = y_high = -1;
  197. return;
  198. }
  199. if (y < 0) {
  200. y = 0;
  201. }
  202. if (x < 0) {
  203. x = 0;
  204. }
  205. y_low = (int)y;
  206. x_low = (int)x;
  207. if (y_low >= height - 1) {
  208. y_high = y_low = height - 1;
  209. y = (T)y_low;
  210. } else {
  211. y_high = y_low + 1;
  212. }
  213. if (x_low >= width - 1) {
  214. x_high = x_low = width - 1;
  215. x = (T)x_low;
  216. } else {
  217. x_high = x_low + 1;
  218. }
  219. T ly = y - y_low;
  220. T lx = x - x_low;
  221. T hy = 1. - ly, hx = 1. - lx;
  222. // reference in forward
  223. // T v1 = input[y_low * width + x_low];
  224. // T v2 = input[y_low * width + x_high];
  225. // T v3 = input[y_high * width + x_low];
  226. // T v4 = input[y_high * width + x_high];
  227. // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
  228. w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
  229. return;
  230. }
  231. template <class T> inline void add(T *address, const T &val) {
  232. *address += val;
  233. }
  234. template <typename T>
  235. void roi_align_rotated_cpu_backward(
  236. const int nthreads,
  237. // may not be contiguous. should index using n_stride, etc
  238. const T *grad_output, const T &spatial_scale, const bool aligned,
  239. const bool clockwise, const int channels, const int height, const int width,
  240. const int pooled_height, const int pooled_width, const int sampling_ratio,
  241. T *grad_input, const T *rois, const int n_stride, const int c_stride,
  242. const int h_stride, const int w_stride) {
  243. for (int index = 0; index < nthreads; index++) {
  244. // (n, c, ph, pw) is an element in the pooled output
  245. int pw = index % pooled_width;
  246. int ph = (index / pooled_width) % pooled_height;
  247. int c = (index / pooled_width / pooled_height) % channels;
  248. int n = index / pooled_width / pooled_height / channels;
  249. const T *current_roi = rois + n * 6;
  250. int roi_batch_ind = current_roi[0];
  251. // Do not use rounding; this implementation detail is critical
  252. T offset = aligned ? (T)0.5 : (T)0.0;
  253. T roi_center_w = current_roi[1] * spatial_scale - offset;
  254. T roi_center_h = current_roi[2] * spatial_scale - offset;
  255. T roi_width = current_roi[3] * spatial_scale;
  256. T roi_height = current_roi[4] * spatial_scale;
  257. T theta = current_roi[5];
  258. if (clockwise) {
  259. theta = -theta; // If clockwise, the angle needs to be reversed.
  260. }
  261. T cos_theta = cos(theta);
  262. T sin_theta = sin(theta);
  263. if (aligned) {
  264. assert(roi_width >= 0 && roi_height >= 0);
  265. } else { // for backward-compatibility only
  266. roi_width = std::max(roi_width, (T)1.);
  267. roi_height = std::max(roi_height, (T)1.);
  268. }
  269. T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
  270. T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
  271. T *offset_grad_input =
  272. grad_input + ((roi_batch_ind * channels + c) * height * width);
  273. int output_offset = n * n_stride + c * c_stride;
  274. const T *offset_grad_output = grad_output + output_offset;
  275. const T grad_output_this_bin =
  276. offset_grad_output[ph * h_stride + pw * w_stride];
  277. // We use roi_bin_grid to sample the grid and mimic integral
  278. int roi_bin_grid_h = (sampling_ratio > 0)
  279. ? sampling_ratio
  280. : ceilf(roi_height / pooled_height); // e.g., = 2
  281. int roi_bin_grid_w =
  282. (sampling_ratio > 0) ? sampling_ratio : ceilf(roi_width / pooled_width);
  283. // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
  284. // Appropriate translation needs to be applied after.
  285. T roi_start_h = -roi_height / 2.0;
  286. T roi_start_w = -roi_width / 2.0;
  287. // We do average (integral) pooling inside a bin
  288. const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
  289. for (int iy = 0; iy < roi_bin_grid_h; iy++) {
  290. const T yy = roi_start_h + ph * bin_size_h +
  291. static_cast<T>(iy + .5f) * bin_size_h /
  292. static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
  293. for (int ix = 0; ix < roi_bin_grid_w; ix++) {
  294. const T xx = roi_start_w + pw * bin_size_w +
  295. static_cast<T>(ix + .5f) * bin_size_w /
  296. static_cast<T>(roi_bin_grid_w);
  297. // Rotate by theta around the center and translate
  298. T y = yy * cos_theta - xx * sin_theta + roi_center_h;
  299. T x = yy * sin_theta + xx * cos_theta + roi_center_w;
  300. T w1, w2, w3, w4;
  301. int x_low, x_high, y_low, y_high;
  302. bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4,
  303. x_low, x_high, y_low, y_high);
  304. T g1 = grad_output_this_bin * w1 / count;
  305. T g2 = grad_output_this_bin * w2 / count;
  306. T g3 = grad_output_this_bin * w3 / count;
  307. T g4 = grad_output_this_bin * w4 / count;
  308. if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
  309. // atomic add is not needed for now since it is single threaded
  310. add(offset_grad_input + y_low * width + x_low, static_cast<T>(g1));
  311. add(offset_grad_input + y_low * width + x_high, static_cast<T>(g2));
  312. add(offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
  313. add(offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
  314. } // if
  315. } // ix
  316. } // iy
  317. } // for
  318. } // ROIAlignRotatedBackward
  319. std::vector<paddle::Tensor>
  320. RoIAlignRotatedCPUForward(const paddle::Tensor &input,
  321. const paddle::Tensor &rois, int aligned_height,
  322. int aligned_width, float spatial_scale,
  323. int sampling_ratio, bool aligned, bool clockwise) {
  324. CHECK_INPUT_CPU(input);
  325. CHECK_INPUT_CPU(rois);
  326. auto num_rois = rois.shape()[0];
  327. auto channels = input.shape()[1];
  328. auto height = input.shape()[2];
  329. auto width = input.shape()[3];
  330. auto output =
  331. paddle::empty({num_rois, channels, aligned_height, aligned_width},
  332. input.type(), paddle::CPUPlace());
  333. auto output_size = output.numel();
  334. PD_DISPATCH_FLOATING_TYPES(
  335. input.type(), "roi_align_rotated_cpu_forward", ([&] {
  336. roi_align_rotated_cpu_forward<data_t>(
  337. output_size, input.data<data_t>(),
  338. static_cast<data_t>(spatial_scale), aligned, clockwise, channels,
  339. height, width, aligned_height, aligned_width, sampling_ratio,
  340. rois.data<data_t>(), output.data<data_t>());
  341. }));
  342. return {output};
  343. }
  344. std::vector<paddle::Tensor> RoIAlignRotatedCPUBackward(
  345. const paddle::Tensor &input, const paddle::Tensor &rois,
  346. const paddle::Tensor &grad_output, int aligned_height, int aligned_width,
  347. float spatial_scale, int sampling_ratio, bool aligned, bool clockwise) {
  348. auto batch_size = input.shape()[0];
  349. auto channels = input.shape()[1];
  350. auto height = input.shape()[2];
  351. auto width = input.shape()[3];
  352. auto grad_input = paddle::full({batch_size, channels, height, width}, 0.0,
  353. input.type(), paddle::CPUPlace());
  354. // get stride values to ensure indexing into gradients is correct.
  355. int n_stride = grad_output.shape()[0];
  356. int c_stride = grad_output.shape()[1];
  357. int h_stride = grad_output.shape()[2];
  358. int w_stride = grad_output.shape()[3];
  359. PD_DISPATCH_FLOATING_TYPES(
  360. grad_output.type(), "roi_align_rotated_cpu_backward", [&] {
  361. roi_align_rotated_cpu_backward<data_t>(
  362. grad_output.numel(), grad_output.data<data_t>(),
  363. static_cast<data_t>(spatial_scale), aligned, clockwise, channels,
  364. height, width, aligned_height, aligned_width, sampling_ratio,
  365. grad_input.data<data_t>(), rois.data<data_t>(), n_stride, c_stride,
  366. h_stride, w_stride);
  367. });
  368. return {grad_input};
  369. }
  370. #ifdef PADDLE_WITH_CUDA
  371. std::vector<paddle::Tensor>
  372. RoIAlignRotatedCUDAForward(const paddle::Tensor &input,
  373. const paddle::Tensor &rois, int aligned_height,
  374. int aligned_width, float spatial_scale,
  375. int sampling_ratio, bool aligned, bool clockwise);
  376. #endif
  377. #ifdef PADDLE_WITH_CUDA
  378. std::vector<paddle::Tensor> RoIAlignRotatedCUDABackward(
  379. const paddle::Tensor &input, const paddle::Tensor &rois,
  380. const paddle::Tensor &grad_output, int aligned_height, int aligned_width,
  381. float spatial_scale, int sampling_ratio, bool aligned, bool clockwise);
  382. #endif
  383. std::vector<paddle::Tensor>
  384. RoIAlignRotatedForward(const paddle::Tensor &input, const paddle::Tensor &rois,
  385. int aligned_height, int aligned_width,
  386. float spatial_scale, int sampling_ratio, bool aligned,
  387. bool clockwise) {
  388. CHECK_INPUT_SAME(input, rois);
  389. if (input.is_cpu()) {
  390. return RoIAlignRotatedCPUForward(input, rois, aligned_height, aligned_width,
  391. spatial_scale, sampling_ratio, aligned,
  392. clockwise);
  393. #ifdef PADDLE_WITH_CUDA
  394. } else if (input.is_gpu()) {
  395. return RoIAlignRotatedCUDAForward(input, rois, aligned_height,
  396. aligned_width, spatial_scale,
  397. sampling_ratio, aligned, clockwise);
  398. #endif
  399. } else {
  400. PD_THROW("Unsupported device type for forward function of roi align "
  401. "rotated operator.");
  402. }
  403. }
  404. std::vector<paddle::Tensor>
  405. RoIAlignRotatedBackward(const paddle::Tensor &input, const paddle::Tensor &rois,
  406. const paddle::Tensor &grad_output, int aligned_height,
  407. int aligned_width, float spatial_scale,
  408. int sampling_ratio, bool aligned, bool clockwise) {
  409. CHECK_INPUT_SAME(input, rois);
  410. if (input.is_cpu()) {
  411. return RoIAlignRotatedCPUBackward(input, rois, grad_output, aligned_height,
  412. aligned_width, spatial_scale,
  413. sampling_ratio, aligned, clockwise);
  414. #ifdef PADDLE_WITH_CUDA
  415. } else if (input.is_gpu()) {
  416. return RoIAlignRotatedCUDABackward(input, rois, grad_output, aligned_height,
  417. aligned_width, spatial_scale,
  418. sampling_ratio, aligned, clockwise);
  419. #endif
  420. } else {
  421. PD_THROW("Unsupported device type for forward function of roi align "
  422. "rotated operator.");
  423. }
  424. }
  425. std::vector<std::vector<int64_t>> InferShape(std::vector<int64_t> input_shape,
  426. std::vector<int64_t> rois_shape) {
  427. return {{rois_shape[0], input_shape[1], input_shape[2], input_shape[3]}};
  428. }
  429. std::vector<std::vector<int64_t>>
  430. InferBackShape(std::vector<int64_t> input_shape,
  431. std::vector<int64_t> rois_shape) {
  432. return {input_shape};
  433. }
  434. std::vector<paddle::DataType> InferDtype(paddle::DataType input_dtype,
  435. paddle::DataType rois_dtype) {
  436. return {input_dtype};
  437. }
  438. PD_BUILD_OP(roi_align_rotated)
  439. .Inputs({"Input", "Rois"})
  440. .Outputs({"Output"})
  441. .Attrs({"aligned_height: int", "aligned_width: int", "spatial_scale: float",
  442. "sampling_ratio: int", "aligned: bool", "clockwise: bool"})
  443. .SetKernelFn(PD_KERNEL(RoIAlignRotatedForward))
  444. .SetInferShapeFn(PD_INFER_SHAPE(InferShape))
  445. .SetInferDtypeFn(PD_INFER_DTYPE(InferDtype));
  446. PD_BUILD_GRAD_OP(roi_align_rotated)
  447. .Inputs({"Input", "Rois", paddle::Grad("Output")})
  448. .Attrs({"aligned_height: int", "aligned_width: int", "spatial_scale: float",
  449. "sampling_ratio: int", "aligned: bool", "clockwise: bool"})
  450. .Outputs({paddle::Grad("Input")})
  451. .SetKernelFn(PD_KERNEL(RoIAlignRotatedBackward))
  452. .SetInferShapeFn(PD_INFER_SHAPE(InferBackShape));