download.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import os.path as osp
  19. import shutil
  20. import requests
  21. import tqdm
  22. import hashlib
  23. import binascii
  24. import base64
  25. import tarfile
  26. import zipfile
  27. from .voc_utils import create_list
  28. import logging
  29. logger = logging.getLogger(__name__)
  30. __all__ = [
  31. 'get_weights_path', 'get_dataset_path', 'download_dataset',
  32. 'create_voc_list'
  33. ]
  34. WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/weights/static")
  35. DATASET_HOME = osp.expanduser("~/.cache/paddle/dataset")
  36. # dict of {dataset_name: (download_info, sub_dirs)}
  37. # download info: [(url, md5sum)]
  38. DATASETS = {
  39. 'coco': ([
  40. (
  41. 'http://images.cocodataset.org/zips/train2017.zip',
  42. 'cced6f7f71b7629ddf16f17bbcfab6b2', ),
  43. (
  44. 'http://images.cocodataset.org/zips/val2017.zip',
  45. '442b8da7639aecaf257c1dceb8ba8c80', ),
  46. (
  47. 'http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
  48. 'f4bbac642086de4f52a3fdda2de5fa2c', ),
  49. ], ["annotations", "train2017", "val2017"]),
  50. 'voc': ([
  51. (
  52. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
  53. '6cd6e144f989b92b3379bac3b3de84fd', ),
  54. (
  55. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
  56. 'c52e279531787c972589f7e41ab4ae64', ),
  57. (
  58. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
  59. 'b6e924de25625d8de591ea690078ad9f', ),
  60. ], ["VOCdevkit/VOC2012", "VOCdevkit/VOC2007"]),
  61. 'wider_face': ([
  62. (
  63. 'https://dataset.bj.bcebos.com/wider_face/WIDER_train.zip',
  64. '3fedf70df600953d25982bcd13d91ba2', ),
  65. (
  66. 'https://dataset.bj.bcebos.com/wider_face/WIDER_val.zip',
  67. 'dfa7d7e790efa35df3788964cf0bbaea', ),
  68. (
  69. 'https://dataset.bj.bcebos.com/wider_face/wider_face_split.zip',
  70. 'a4a898d6193db4b9ef3260a68bad0dc7', ),
  71. ], ["WIDER_train", "WIDER_val", "wider_face_split"]),
  72. 'fruit': ([(
  73. 'https://dataset.bj.bcebos.com/PaddleDetection_demo/fruit.tar',
  74. 'baa8806617a54ccf3685fa7153388ae6', ), ],
  75. ['Annotations', 'JPEGImages']),
  76. 'roadsign_voc': ([(
  77. 'https://paddlemodels.bj.bcebos.com/object_detection/roadsign_voc.tar',
  78. '8d629c0f880dd8b48de9aeff44bf1f3e', ), ], ['annotations', 'images']),
  79. 'roadsign_coco': ([(
  80. 'https://paddlemodels.bj.bcebos.com/object_detection/roadsign_coco.tar',
  81. '49ce5a9b5ad0d6266163cd01de4b018e', ), ], ['annotations', 'images']),
  82. 'objects365': (),
  83. }
  84. DOWNLOAD_RETRY_LIMIT = 3
  85. def get_weights_path(url):
  86. """Get weights path from WEIGHT_HOME, if not exists,
  87. download it from url.
  88. """
  89. path, _ = get_path(url, WEIGHTS_HOME)
  90. return path
  91. def get_dataset_path(path, annotation, image_dir):
  92. """
  93. If path exists, return path.
  94. Otherwise, get dataset path from DATASET_HOME, if not exists,
  95. download it.
  96. """
  97. if _dataset_exists(path, annotation, image_dir):
  98. return path
  99. logger.info("Dataset {} is not valid for reason above, try searching {} or "
  100. "downloading dataset...".format(
  101. osp.realpath(path), DATASET_HOME))
  102. data_name = os.path.split(path.strip().lower())[-1]
  103. for name, dataset in DATASETS.items():
  104. if data_name == name:
  105. logger.debug("Parse dataset_dir {} as dataset "
  106. "{}".format(path, name))
  107. if name == 'objects365':
  108. raise NotImplementedError(
  109. "Dataset {} is not valid for download automatically. "
  110. "Please apply and download the dataset from "
  111. "https://www.objects365.org/download.html".format(name))
  112. data_dir = osp.join(DATASET_HOME, name)
  113. # For VOC-style datasets, only check subdirs
  114. if name in ['voc', 'fruit', 'roadsign_voc']:
  115. exists = True
  116. for sub_dir in dataset[1]:
  117. check_dir = osp.join(data_dir, sub_dir)
  118. if osp.exists(check_dir):
  119. logger.info("Found {}".format(check_dir))
  120. else:
  121. exists = False
  122. if exists:
  123. return data_dir
  124. # voc exist is checked above, voc is not exist here
  125. check_exist = name != 'voc' and name != 'fruit' and name != 'roadsign_voc'
  126. for url, md5sum in dataset[0]:
  127. get_path(url, data_dir, md5sum, check_exist)
  128. # voc should create list after download
  129. if name == 'voc':
  130. create_voc_list(data_dir)
  131. return data_dir
  132. # not match any dataset in DATASETS
  133. raise ValueError(
  134. "Dataset {} is not valid and cannot parse dataset type "
  135. "'{}' for automaticly downloading, which only supports "
  136. "'voc' , 'coco', 'wider_face', 'fruit' and 'roadsign_voc' currently".
  137. format(path, osp.split(path)[-1]))
  138. def create_voc_list(data_dir, devkit_subdir='VOCdevkit'):
  139. logger.debug("Create voc file list...")
  140. devkit_dir = osp.join(data_dir, devkit_subdir)
  141. year_dirs = [osp.join(devkit_dir, x) for x in os.listdir(devkit_dir)]
  142. # NOTE: since using auto download VOC
  143. # dataset, VOC default label list should be used,
  144. # do not generate label_list.txt here. For default
  145. # label, see ../data/source/voc.py
  146. create_list(year_dirs, data_dir)
  147. logger.debug("Create voc file list finished")
  148. def map_path(url, root_dir):
  149. # parse path after download to decompress under root_dir
  150. fname = osp.split(url)[-1]
  151. zip_formats = ['.zip', '.tar', '.gz']
  152. fpath = fname
  153. for zip_format in zip_formats:
  154. fpath = fpath.replace(zip_format, '')
  155. return osp.join(root_dir, fpath)
  156. def get_path(url, root_dir, md5sum=None, check_exist=True):
  157. """ Download from given url to root_dir.
  158. if file or directory specified by url is exists under
  159. root_dir, return the path directly, otherwise download
  160. from url and decompress it, return the path.
  161. url (str): download url
  162. root_dir (str): root dir for downloading, it should be
  163. WEIGHTS_HOME or DATASET_HOME
  164. md5sum (str): md5 sum of download package
  165. """
  166. # parse path after download to decompress under root_dir
  167. fullpath = map_path(url, root_dir)
  168. # For same zip file, decompressed directory name different
  169. # from zip file name, rename by following map
  170. decompress_name_map = {
  171. "VOCtrainval_11-May-2012": "VOCdevkit/VOC2012",
  172. "VOCtrainval_06-Nov-2007": "VOCdevkit/VOC2007",
  173. "VOCtest_06-Nov-2007": "VOCdevkit/VOC2007",
  174. "annotations_trainval": "annotations"
  175. }
  176. for k, v in decompress_name_map.items():
  177. if fullpath.find(k) >= 0:
  178. fullpath = osp.join(osp.split(fullpath)[0], v)
  179. if osp.exists(fullpath) and check_exist:
  180. # If fullpath is a directory, it has been decompressed
  181. # checking MD5 is impossible, so we skip checking when
  182. # fullpath is a directory here
  183. if osp.isdir(fullpath) or \
  184. _md5check_from_req(fullpath,
  185. requests.get(url, stream=True)):
  186. logger.debug("Found {}".format(fullpath))
  187. return fullpath, True
  188. else:
  189. if osp.isdir(fullpath):
  190. shutil.rmtree(fullpath)
  191. else:
  192. os.remove(fullpath)
  193. fullname = _download(url, root_dir, md5sum)
  194. # new weights format whose postfix is 'pdparams',
  195. # which is not need to decompress
  196. if osp.splitext(fullname)[-1] != '.pdparams':
  197. _decompress(fullname)
  198. return fullpath, False
  199. def download_dataset(path, dataset=None):
  200. if dataset not in DATASETS.keys():
  201. logger.error("Unknown dataset {}, it should be "
  202. "{}".format(dataset, DATASETS.keys()))
  203. return
  204. dataset_info = DATASETS[dataset][0]
  205. for info in dataset_info:
  206. get_path(info[0], path, info[1], False)
  207. logger.debug("Download dataset {} finished.".format(dataset))
  208. def _dataset_exists(path, annotation, image_dir):
  209. """
  210. Check if user define dataset exists
  211. """
  212. if not osp.exists(path):
  213. logger.debug("Config dataset_dir {} is not exits, "
  214. "dataset config is not valid".format(path))
  215. return False
  216. if annotation:
  217. annotation_path = osp.join(path, annotation)
  218. if not osp.exists(annotation_path):
  219. logger.error("Config dataset_dir {} is not exits!".format(path))
  220. if not osp.isfile(annotation_path):
  221. logger.warning("Config annotation {} is not a "
  222. "file, dataset config is not "
  223. "valid".format(annotation_path))
  224. return False
  225. if image_dir:
  226. image_path = osp.join(path, image_dir)
  227. if not osp.exists(image_path):
  228. logger.warning("Config dataset_dir {} is not exits!".format(path))
  229. if not osp.isdir(image_path):
  230. logger.warning("Config image_dir {} is not a "
  231. "directory, dataset config is not "
  232. "valid".format(image_path))
  233. return False
  234. return True
  235. def _download(url, path, md5sum=None):
  236. """
  237. Download from url, save to path.
  238. url (str): download url
  239. path (str): download to given path
  240. """
  241. if not osp.exists(path):
  242. os.makedirs(path)
  243. fname = osp.split(url)[-1]
  244. fullname = osp.join(path, fname)
  245. retry_cnt = 0
  246. while not (osp.exists(fullname) and _md5check(fullname, md5sum)):
  247. if retry_cnt < DOWNLOAD_RETRY_LIMIT:
  248. retry_cnt += 1
  249. else:
  250. raise RuntimeError("Download from {} failed. "
  251. "Retry limit reached".format(url))
  252. logger.info("Downloading {} from {}".format(fname, url))
  253. req = requests.get(url, stream=True)
  254. if req.status_code != 200:
  255. raise RuntimeError("Downloading from {} failed with code "
  256. "{}!".format(url, req.status_code))
  257. # For protecting download interupted, download to
  258. # tmp_fullname firstly, move tmp_fullname to fullname
  259. # after download finished
  260. tmp_fullname = fullname + "_tmp"
  261. total_size = req.headers.get('content-length')
  262. with open(tmp_fullname, 'wb') as f:
  263. if total_size:
  264. for chunk in tqdm.tqdm(
  265. req.iter_content(chunk_size=1024),
  266. total=(int(total_size) + 1023) // 1024,
  267. unit='KB'):
  268. f.write(chunk)
  269. else:
  270. for chunk in req.iter_content(chunk_size=1024):
  271. if chunk:
  272. f.write(chunk)
  273. # check md5 after download in Content-MD5 in req.headers
  274. if _md5check_from_req(tmp_fullname, req):
  275. shutil.move(tmp_fullname, fullname)
  276. return fullname
  277. else:
  278. logger.warning(
  279. "Download from url imcomplete, try downloading again...")
  280. os.remove(tmp_fullname)
  281. continue
  282. def _md5check_from_req(weights_path, req):
  283. # For weights in bcebos URLs, MD5 value is contained
  284. # in request header as 'content_md5'
  285. content_md5 = req.headers.get('content-md5')
  286. if not content_md5 or _md5check(
  287. weights_path,
  288. binascii.hexlify(base64.b64decode(content_md5.strip('"'))).decode(
  289. )):
  290. return True
  291. else:
  292. return False
  293. def _md5check(fullname, md5sum=None):
  294. if md5sum is None:
  295. return True
  296. logger.debug("File {} md5 checking...".format(fullname))
  297. md5 = hashlib.md5()
  298. with open(fullname, 'rb') as f:
  299. for chunk in iter(lambda: f.read(4096), b""):
  300. md5.update(chunk)
  301. calc_md5sum = md5.hexdigest()
  302. if calc_md5sum != md5sum:
  303. logger.warning("File {} md5 check failed, {}(calc) != "
  304. "{}(base)".format(fullname, calc_md5sum, md5sum))
  305. return False
  306. return True
  307. def _decompress(fname):
  308. """
  309. Decompress for zip and tar file
  310. """
  311. logger.info("Decompressing {}...".format(fname))
  312. # For protecting decompressing interupted,
  313. # decompress to fpath_tmp directory firstly, if decompress
  314. # successed, move decompress files to fpath and delete
  315. # fpath_tmp and remove download compress file.
  316. fpath = osp.split(fname)[0]
  317. fpath_tmp = osp.join(fpath, 'tmp')
  318. if osp.isdir(fpath_tmp):
  319. shutil.rmtree(fpath_tmp)
  320. os.makedirs(fpath_tmp)
  321. if fname.find('tar') >= 0:
  322. with tarfile.open(fname) as tf:
  323. tf.extractall(path=fpath_tmp)
  324. elif fname.find('zip') >= 0:
  325. with zipfile.ZipFile(fname) as zf:
  326. zf.extractall(path=fpath_tmp)
  327. else:
  328. raise TypeError("Unsupport compress file type {}".format(fname))
  329. for f in os.listdir(fpath_tmp):
  330. src_dir = osp.join(fpath_tmp, f)
  331. dst_dir = osp.join(fpath, f)
  332. _move_and_merge_tree(src_dir, dst_dir)
  333. shutil.rmtree(fpath_tmp)
  334. os.remove(fname)
  335. def _move_and_merge_tree(src, dst):
  336. """
  337. Move src directory to dst, if dst is already exists,
  338. merge src to dst
  339. """
  340. if not osp.exists(dst):
  341. shutil.move(src, dst)
  342. elif osp.isfile(src):
  343. shutil.move(src, dst)
  344. else:
  345. for fp in os.listdir(src):
  346. src_fp = osp.join(src, fp)
  347. dst_fp = osp.join(dst, fp)
  348. if osp.isdir(src_fp):
  349. if osp.isdir(dst_fp):
  350. _move_and_merge_tree(src_fp, dst_fp)
  351. else:
  352. shutil.move(src_fp, dst_fp)
  353. elif osp.isfile(src_fp) and \
  354. not osp.isfile(dst_fp):
  355. shutil.move(src_fp, dst_fp)