cupy-python.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. import sys
  2. import requests
  3. import cv2
  4. import random
  5. import time
  6. import numpy as np
  7. import cupy as cp
  8. import tensorrt as trt
  9. from PIL import Image
  10. from collections import OrderedDict, namedtuple
  11. from pathlib import Path
  12. def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32):
  13. # Resize and pad image while meeting stride-multiple constraints
  14. shape = im.shape[:2] # current shape [height, width]
  15. if isinstance(new_shape, int):
  16. new_shape = (new_shape, new_shape)
  17. # Scale ratio (new / old)
  18. r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
  19. if not scaleup: # only scale down, do not scale up (for better val mAP)
  20. r = min(r, 1.0)
  21. # Compute padding
  22. new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
  23. dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
  24. if auto: # minimum rectangle
  25. dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
  26. dw /= 2 # divide padding into 2 sides
  27. dh /= 2
  28. if shape[::-1] != new_unpad: # resize
  29. im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
  30. top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
  31. left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
  32. im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
  33. return im, r, (dw, dh)
  34. names = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
  35. 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
  36. 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
  37. 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
  38. 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
  39. 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
  40. 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
  41. 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
  42. 'hair drier', 'toothbrush']
  43. colors = {name: [random.randint(0, 255) for _ in range(3)] for i, name in enumerate(names)}
  44. url = 'https://oneflow-static.oss-cn-beijing.aliyuncs.com/tripleMu/image1.jpg'
  45. file = requests.get(url)
  46. img = cv2.imdecode(np.frombuffer(file.content, np.uint8), 1)
  47. w = Path(sys.argv[1])
  48. assert w.exists() and w.suffix in ('.engine', '.plan'), 'Wrong engine path'
  49. mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 3, 1, 1)
  50. std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 3, 1, 1)
  51. mean = cp.asarray(mean)
  52. std = cp.asarray(std)
  53. # Infer TensorRT Engine
  54. Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
  55. logger = trt.Logger(trt.Logger.INFO)
  56. trt.init_libnvinfer_plugins(logger, namespace="")
  57. with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
  58. model = runtime.deserialize_cuda_engine(f.read())
  59. bindings = OrderedDict()
  60. fp16 = False # default updated below
  61. for index in range(model.num_bindings):
  62. name = model.get_binding_name(index)
  63. dtype = trt.nptype(model.get_binding_dtype(index))
  64. shape = tuple(model.get_binding_shape(index))
  65. data = cp.empty(shape, dtype=cp.dtype(dtype))
  66. bindings[name] = Binding(name, dtype, shape, data, int(data.data.ptr))
  67. if model.binding_is_input(index) and dtype == np.float16:
  68. fp16 = True
  69. binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
  70. context = model.create_execution_context()
  71. image = img.copy()
  72. image, ratio, dwdh = letterbox(image, auto=False)
  73. image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  74. image_copy = image.copy()
  75. image = image.transpose((2, 0, 1))
  76. image = np.expand_dims(image, 0)
  77. image = np.ascontiguousarray(image)
  78. im = cp.asarray(image)
  79. im = im.astype(cp.float32)
  80. im /= 255
  81. im -= mean
  82. im /= std
  83. # warmup for 10 times
  84. for _ in range(10):
  85. tmp = cp.random.randn(1, 3, 640, 640).astype(cp.float32)
  86. binding_addrs['image'] = int(tmp.data.ptr)
  87. context.execute_v2(list(binding_addrs.values()))
  88. start = time.perf_counter()
  89. binding_addrs['image'] = int(im.data.ptr)
  90. context.execute_v2(list(binding_addrs.values()))
  91. print(f'Cost {(time.perf_counter() - start) * 1000}ms')
  92. nums = bindings['num_dets'].data
  93. boxes = bindings['det_boxes'].data
  94. scores = bindings['det_scores'].data
  95. classes = bindings['det_classes'].data
  96. num = int(nums[0][0])
  97. box_img = boxes[0, :num].round().astype(cp.int32)
  98. score_img = scores[0, :num]
  99. clss_img = classes[0, :num]
  100. for i, (box, score, clss) in enumerate(zip(box_img, score_img, clss_img)):
  101. name = names[int(clss)]
  102. color = colors[name]
  103. cv2.rectangle(image_copy, box[:2].tolist(), box[2:].tolist(), color, 2)
  104. cv2.putText(image_copy, name, (int(box[0]), int(box[1]) - 2), cv2.FONT_HERSHEY_SIMPLEX,
  105. 0.75, [225, 255, 255], thickness=2)
  106. cv2.imshow('Result', cv2.cvtColor(image_copy, cv2.COLOR_RGB2BGR))
  107. cv2.waitKey(0)