download.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import os.path as osp
  19. import sys
  20. import yaml
  21. import time
  22. import shutil
  23. import requests
  24. import tqdm
  25. import hashlib
  26. import base64
  27. import binascii
  28. import tarfile
  29. import zipfile
  30. import errno
  31. from paddle.utils.download import _get_unique_endpoints
  32. from ppdet.core.workspace import BASE_KEY
  33. from .logger import setup_logger
  34. from .voc_utils import create_list
  35. logger = setup_logger(__name__)
  36. __all__ = [
  37. 'get_weights_path', 'get_dataset_path', 'get_config_path',
  38. 'download_dataset', 'create_voc_list'
  39. ]
  40. WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/weights")
  41. DATASET_HOME = osp.expanduser("~/.cache/paddle/dataset")
  42. CONFIGS_HOME = osp.expanduser("~/.cache/paddle/configs")
  43. # dict of {dataset_name: (download_info, sub_dirs)}
  44. # download info: [(url, md5sum)]
  45. DATASETS = {
  46. 'coco': ([
  47. (
  48. 'http://images.cocodataset.org/zips/train2017.zip',
  49. 'cced6f7f71b7629ddf16f17bbcfab6b2', ),
  50. (
  51. 'http://images.cocodataset.org/zips/val2017.zip',
  52. '442b8da7639aecaf257c1dceb8ba8c80', ),
  53. (
  54. 'http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
  55. 'f4bbac642086de4f52a3fdda2de5fa2c', ),
  56. ], ["annotations", "train2017", "val2017"]),
  57. 'voc': ([
  58. (
  59. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
  60. '6cd6e144f989b92b3379bac3b3de84fd', ),
  61. (
  62. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
  63. 'c52e279531787c972589f7e41ab4ae64', ),
  64. (
  65. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
  66. 'b6e924de25625d8de591ea690078ad9f', ),
  67. (
  68. 'https://paddledet.bj.bcebos.com/data/label_list.txt',
  69. '5ae5d62183cfb6f6d3ac109359d06a1b', ),
  70. ], ["VOCdevkit/VOC2012", "VOCdevkit/VOC2007"]),
  71. 'wider_face': ([
  72. (
  73. 'https://dataset.bj.bcebos.com/wider_face/WIDER_train.zip',
  74. '3fedf70df600953d25982bcd13d91ba2', ),
  75. (
  76. 'https://dataset.bj.bcebos.com/wider_face/WIDER_val.zip',
  77. 'dfa7d7e790efa35df3788964cf0bbaea', ),
  78. (
  79. 'https://dataset.bj.bcebos.com/wider_face/wider_face_split.zip',
  80. 'a4a898d6193db4b9ef3260a68bad0dc7', ),
  81. ], ["WIDER_train", "WIDER_val", "wider_face_split"]),
  82. 'fruit': ([(
  83. 'https://dataset.bj.bcebos.com/PaddleDetection_demo/fruit.tar',
  84. 'baa8806617a54ccf3685fa7153388ae6', ), ],
  85. ['Annotations', 'JPEGImages']),
  86. 'roadsign_voc': ([(
  87. 'https://paddlemodels.bj.bcebos.com/object_detection/roadsign_voc.tar',
  88. '8d629c0f880dd8b48de9aeff44bf1f3e', ), ], ['annotations', 'images']),
  89. 'roadsign_coco': ([(
  90. 'https://paddlemodels.bj.bcebos.com/object_detection/roadsign_coco.tar',
  91. '49ce5a9b5ad0d6266163cd01de4b018e', ), ], ['annotations', 'images']),
  92. 'spine_coco': ([(
  93. 'https://paddledet.bj.bcebos.com/data/spine.tar',
  94. '8a3a353c2c54a2284ad7d2780b65f6a6', ), ], ['annotations', 'images']),
  95. 'coco_ce': ([(
  96. 'https://paddledet.bj.bcebos.com/data/coco_ce.tar',
  97. 'eadd1b79bc2f069f2744b1dd4e0c0329', ), ], [])
  98. }
  99. DOWNLOAD_DATASETS_LIST = DATASETS.keys()
  100. DOWNLOAD_RETRY_LIMIT = 3
  101. PPDET_WEIGHTS_DOWNLOAD_URL_PREFIX = 'https://paddledet.bj.bcebos.com/'
  102. # When running unit tests, there could be multiple processes that
  103. # trying to create DATA_HOME directory simultaneously, so we cannot
  104. # use a if condition to check for the existence of the directory;
  105. # instead, we use the filesystem as the synchronization mechanism by
  106. # catching returned errors.
  107. def must_mkdirs(path):
  108. try:
  109. os.makedirs(path)
  110. except OSError as exc:
  111. if exc.errno != errno.EEXIST:
  112. raise
  113. pass
  114. def parse_url(url):
  115. url = url.replace("ppdet://", PPDET_WEIGHTS_DOWNLOAD_URL_PREFIX)
  116. return url
  117. def get_weights_path(url):
  118. """Get weights path from WEIGHTS_HOME, if not exists,
  119. download it from url.
  120. """
  121. url = parse_url(url)
  122. path, _ = get_path(url, WEIGHTS_HOME)
  123. return path
  124. def get_config_path(url):
  125. """Get weights path from CONFIGS_HOME, if not exists,
  126. download it from url.
  127. """
  128. url = parse_url(url)
  129. path = map_path(url, CONFIGS_HOME, path_depth=2)
  130. if os.path.isfile(path):
  131. return path
  132. # config file not found, try download
  133. # 1. clear configs directory
  134. if osp.isdir(CONFIGS_HOME):
  135. shutil.rmtree(CONFIGS_HOME)
  136. # 2. get url
  137. try:
  138. from ppdet import __version__ as version
  139. except ImportError:
  140. version = None
  141. cfg_url = "ppdet://configs/{}/configs.tar".format(version) \
  142. if version else "ppdet://configs/configs.tar"
  143. cfg_url = parse_url(cfg_url)
  144. # 3. download and decompress
  145. cfg_fullname = _download_dist(cfg_url, osp.dirname(CONFIGS_HOME))
  146. _decompress_dist(cfg_fullname)
  147. # 4. check config file existing
  148. if os.path.isfile(path):
  149. return path
  150. else:
  151. logger.error("Get config {} failed after download, please contact us on " \
  152. "https://github.com/PaddlePaddle/PaddleDetection/issues".format(path))
  153. sys.exit(1)
  154. def get_dataset_path(path, annotation, image_dir):
  155. """
  156. If path exists, return path.
  157. Otherwise, get dataset path from DATASET_HOME, if not exists,
  158. download it.
  159. """
  160. if _dataset_exists(path, annotation, image_dir):
  161. return path
  162. data_name = os.path.split(path.strip().lower())[-1]
  163. if data_name not in DOWNLOAD_DATASETS_LIST:
  164. raise ValueError(
  165. "Dataset {} is not valid for reason above, please check again.".
  166. format(osp.realpath(path)))
  167. else:
  168. logger.warning(
  169. "Dataset {} is not valid for reason above, try searching {} or "
  170. "downloading dataset...".format(osp.realpath(path), DATASET_HOME))
  171. for name, dataset in DATASETS.items():
  172. if data_name == name:
  173. logger.debug("Parse dataset_dir {} as dataset "
  174. "{}".format(path, name))
  175. data_dir = osp.join(DATASET_HOME, name)
  176. if name == "spine_coco":
  177. if _dataset_exists(data_dir, annotation, image_dir):
  178. return data_dir
  179. # For voc, only check dir VOCdevkit/VOC2012, VOCdevkit/VOC2007
  180. if name in ['voc', 'fruit', 'roadsign_voc']:
  181. exists = True
  182. for sub_dir in dataset[1]:
  183. check_dir = osp.join(data_dir, sub_dir)
  184. if osp.exists(check_dir):
  185. logger.info("Found {}".format(check_dir))
  186. else:
  187. exists = False
  188. if exists:
  189. return data_dir
  190. # voc exist is checked above, voc is not exist here
  191. check_exist = name != 'voc' and name != 'fruit' and name != 'roadsign_voc'
  192. for url, md5sum in dataset[0]:
  193. get_path(url, data_dir, md5sum, check_exist)
  194. # voc should create list after download
  195. if name == 'voc':
  196. create_voc_list(data_dir)
  197. return data_dir
  198. raise ValueError("Dataset automaticly downloading Error.")
  199. def create_voc_list(data_dir, devkit_subdir='VOCdevkit'):
  200. logger.debug("Create voc file list...")
  201. devkit_dir = osp.join(data_dir, devkit_subdir)
  202. years = ['2007', '2012']
  203. # NOTE: since using auto download VOC
  204. # dataset, VOC default label list should be used,
  205. # do not generate label_list.txt here. For default
  206. # label, see ../data/source/voc.py
  207. create_list(devkit_dir, years, data_dir)
  208. logger.debug("Create voc file list finished")
  209. def map_path(url, root_dir, path_depth=1):
  210. # parse path after download to decompress under root_dir
  211. assert path_depth > 0, "path_depth should be a positive integer"
  212. dirname = url
  213. for _ in range(path_depth):
  214. dirname = osp.dirname(dirname)
  215. fpath = osp.relpath(url, dirname)
  216. zip_formats = ['.zip', '.tar', '.gz']
  217. for zip_format in zip_formats:
  218. fpath = fpath.replace(zip_format, '')
  219. return osp.join(root_dir, fpath)
  220. def get_path(url, root_dir, md5sum=None, check_exist=True):
  221. """ Download from given url to root_dir.
  222. if file or directory specified by url is exists under
  223. root_dir, return the path directly, otherwise download
  224. from url and decompress it, return the path.
  225. url (str): download url
  226. root_dir (str): root dir for downloading, it should be
  227. WEIGHTS_HOME or DATASET_HOME
  228. md5sum (str): md5 sum of download package
  229. """
  230. # parse path after download to decompress under root_dir
  231. fullpath = map_path(url, root_dir)
  232. # For same zip file, decompressed directory name different
  233. # from zip file name, rename by following map
  234. decompress_name_map = {
  235. "VOCtrainval_11-May-2012": "VOCdevkit/VOC2012",
  236. "VOCtrainval_06-Nov-2007": "VOCdevkit/VOC2007",
  237. "VOCtest_06-Nov-2007": "VOCdevkit/VOC2007",
  238. "annotations_trainval": "annotations"
  239. }
  240. for k, v in decompress_name_map.items():
  241. if fullpath.find(k) >= 0:
  242. fullpath = osp.join(osp.split(fullpath)[0], v)
  243. if osp.exists(fullpath) and check_exist:
  244. if not osp.isfile(fullpath) or \
  245. _check_exist_file_md5(fullpath, md5sum, url):
  246. logger.debug("Found {}".format(fullpath))
  247. return fullpath, True
  248. else:
  249. os.remove(fullpath)
  250. fullname = _download_dist(url, root_dir, md5sum)
  251. # new weights format which postfix is 'pdparams' not
  252. # need to decompress
  253. if osp.splitext(fullname)[-1] not in ['.pdparams', '.yml']:
  254. _decompress_dist(fullname)
  255. return fullpath, False
  256. def download_dataset(path, dataset=None):
  257. if dataset not in DATASETS.keys():
  258. logger.error("Unknown dataset {}, it should be "
  259. "{}".format(dataset, DATASETS.keys()))
  260. return
  261. dataset_info = DATASETS[dataset][0]
  262. for info in dataset_info:
  263. get_path(info[0], path, info[1], False)
  264. logger.debug("Download dataset {} finished.".format(dataset))
  265. def _dataset_exists(path, annotation, image_dir):
  266. """
  267. Check if user define dataset exists
  268. """
  269. if not osp.exists(path):
  270. logger.warning("Config dataset_dir {} is not exits, "
  271. "dataset config is not valid".format(path))
  272. return False
  273. if annotation:
  274. annotation_path = osp.join(path, annotation)
  275. if not osp.isfile(annotation_path):
  276. logger.warning("Config annotation {} is not a "
  277. "file, dataset config is not "
  278. "valid".format(annotation_path))
  279. return False
  280. if image_dir:
  281. image_path = osp.join(path, image_dir)
  282. if not osp.isdir(image_path):
  283. logger.warning("Config image_dir {} is not a "
  284. "directory, dataset config is not "
  285. "valid".format(image_path))
  286. return False
  287. return True
  288. def _download(url, path, md5sum=None):
  289. """
  290. Download from url, save to path.
  291. url (str): download url
  292. path (str): download to given path
  293. """
  294. must_mkdirs(path)
  295. fname = osp.split(url)[-1]
  296. fullname = osp.join(path, fname)
  297. retry_cnt = 0
  298. while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum,
  299. url)):
  300. if retry_cnt < DOWNLOAD_RETRY_LIMIT:
  301. retry_cnt += 1
  302. else:
  303. raise RuntimeError("Download from {} failed. "
  304. "Retry limit reached".format(url))
  305. logger.info("Downloading {} from {}".format(fname, url))
  306. # NOTE: windows path join may incur \, which is invalid in url
  307. if sys.platform == "win32":
  308. url = url.replace('\\', '/')
  309. req = requests.get(url, stream=True)
  310. if req.status_code != 200:
  311. raise RuntimeError("Downloading from {} failed with code "
  312. "{}!".format(url, req.status_code))
  313. # For protecting download interupted, download to
  314. # tmp_fullname firstly, move tmp_fullname to fullname
  315. # after download finished
  316. tmp_fullname = fullname + "_tmp"
  317. total_size = req.headers.get('content-length')
  318. with open(tmp_fullname, 'wb') as f:
  319. if total_size:
  320. for chunk in tqdm.tqdm(
  321. req.iter_content(chunk_size=1024),
  322. total=(int(total_size) + 1023) // 1024,
  323. unit='KB'):
  324. f.write(chunk)
  325. else:
  326. for chunk in req.iter_content(chunk_size=1024):
  327. if chunk:
  328. f.write(chunk)
  329. shutil.move(tmp_fullname, fullname)
  330. return fullname
  331. def _download_dist(url, path, md5sum=None):
  332. env = os.environ
  333. if 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:
  334. # Mainly used to solve the problem of downloading data from
  335. # different machines in the case of multiple machines.
  336. # Different nodes will download data, and the same node
  337. # will only download data once.
  338. # Reference https://github.com/PaddlePaddle/PaddleClas/blob/develop/ppcls/utils/download.py#L108
  339. rank_id_curr_node = int(os.environ.get("PADDLE_RANK_IN_NODE", 0))
  340. num_trainers = int(env['PADDLE_TRAINERS_NUM'])
  341. if num_trainers <= 1:
  342. return _download(url, path, md5sum)
  343. else:
  344. fname = osp.split(url)[-1]
  345. fullname = osp.join(path, fname)
  346. lock_path = fullname + '.download.lock'
  347. must_mkdirs(path)
  348. if not osp.exists(fullname):
  349. with open(lock_path, 'w'): # touch
  350. os.utime(lock_path, None)
  351. if rank_id_curr_node == 0:
  352. _download(url, path, md5sum)
  353. os.remove(lock_path)
  354. else:
  355. while os.path.exists(lock_path):
  356. time.sleep(0.5)
  357. return fullname
  358. else:
  359. return _download(url, path, md5sum)
  360. def _check_exist_file_md5(filename, md5sum, url):
  361. # if md5sum is None, and file to check is weights file,
  362. # read md5um from url and check, else check md5sum directly
  363. return _md5check_from_url(filename, url) if md5sum is None \
  364. and filename.endswith('pdparams') \
  365. else _md5check(filename, md5sum)
  366. def _md5check_from_url(filename, url):
  367. # For weights in bcebos URLs, MD5 value is contained
  368. # in request header as 'content_md5'
  369. req = requests.get(url, stream=True)
  370. content_md5 = req.headers.get('content-md5')
  371. req.close()
  372. if not content_md5 or _md5check(
  373. filename,
  374. binascii.hexlify(base64.b64decode(content_md5.strip('"'))).decode(
  375. )):
  376. return True
  377. else:
  378. return False
  379. def _md5check(fullname, md5sum=None):
  380. if md5sum is None:
  381. return True
  382. logger.debug("File {} md5 checking...".format(fullname))
  383. md5 = hashlib.md5()
  384. with open(fullname, 'rb') as f:
  385. for chunk in iter(lambda: f.read(4096), b""):
  386. md5.update(chunk)
  387. calc_md5sum = md5.hexdigest()
  388. if calc_md5sum != md5sum:
  389. logger.warning("File {} md5 check failed, {}(calc) != "
  390. "{}(base)".format(fullname, calc_md5sum, md5sum))
  391. return False
  392. return True
  393. def _decompress(fname):
  394. """
  395. Decompress for zip and tar file
  396. """
  397. logger.info("Decompressing {}...".format(fname))
  398. # For protecting decompressing interupted,
  399. # decompress to fpath_tmp directory firstly, if decompress
  400. # successed, move decompress files to fpath and delete
  401. # fpath_tmp and remove download compress file.
  402. fpath = osp.split(fname)[0]
  403. fpath_tmp = osp.join(fpath, 'tmp')
  404. if osp.isdir(fpath_tmp):
  405. shutil.rmtree(fpath_tmp)
  406. os.makedirs(fpath_tmp)
  407. if fname.find('tar') >= 0:
  408. with tarfile.open(fname) as tf:
  409. tf.extractall(path=fpath_tmp)
  410. elif fname.find('zip') >= 0:
  411. with zipfile.ZipFile(fname) as zf:
  412. zf.extractall(path=fpath_tmp)
  413. elif fname.find('.txt') >= 0:
  414. return
  415. else:
  416. raise TypeError("Unsupport compress file type {}".format(fname))
  417. for f in os.listdir(fpath_tmp):
  418. src_dir = osp.join(fpath_tmp, f)
  419. dst_dir = osp.join(fpath, f)
  420. _move_and_merge_tree(src_dir, dst_dir)
  421. shutil.rmtree(fpath_tmp)
  422. os.remove(fname)
  423. def _decompress_dist(fname):
  424. env = os.environ
  425. if 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:
  426. trainer_id = int(env['PADDLE_TRAINER_ID'])
  427. num_trainers = int(env['PADDLE_TRAINERS_NUM'])
  428. if num_trainers <= 1:
  429. _decompress(fname)
  430. else:
  431. lock_path = fname + '.decompress.lock'
  432. from paddle.distributed import ParallelEnv
  433. unique_endpoints = _get_unique_endpoints(ParallelEnv()
  434. .trainer_endpoints[:])
  435. # NOTE(dkp): _decompress_dist always performed after
  436. # _download_dist, in _download_dist sub-trainers is waiting
  437. # for download lock file release with sleeping, if decompress
  438. # prograss is very fast and finished with in the sleeping gap
  439. # time, e.g in tiny dataset such as coco_ce, spine_coco, main
  440. # trainer may finish decompress and release lock file, so we
  441. # only craete lock file in main trainer and all sub-trainer
  442. # wait 1s for main trainer to create lock file, for 1s is
  443. # twice as sleeping gap, this waiting time can keep all
  444. # trainer pipeline in order
  445. # **change this if you have more elegent methods**
  446. if ParallelEnv().current_endpoint in unique_endpoints:
  447. with open(lock_path, 'w'): # touch
  448. os.utime(lock_path, None)
  449. _decompress(fname)
  450. os.remove(lock_path)
  451. else:
  452. time.sleep(1)
  453. while os.path.exists(lock_path):
  454. time.sleep(0.5)
  455. else:
  456. _decompress(fname)
  457. def _move_and_merge_tree(src, dst):
  458. """
  459. Move src directory to dst, if dst is already exists,
  460. merge src to dst
  461. """
  462. if not osp.exists(dst):
  463. shutil.move(src, dst)
  464. elif osp.isfile(src):
  465. shutil.move(src, dst)
  466. else:
  467. for fp in os.listdir(src):
  468. src_fp = osp.join(src, fp)
  469. dst_fp = osp.join(dst, fp)
  470. if osp.isdir(src_fp):
  471. if osp.isdir(dst_fp):
  472. _move_and_merge_tree(src_fp, dst_fp)
  473. else:
  474. shutil.move(src_fp, dst_fp)
  475. elif osp.isfile(src_fp) and \
  476. not osp.isfile(dst_fp):
  477. shutil.move(src_fp, dst_fp)