Forráskód Böngészése

初始化PaddleOCR

yangjun 1 éve
szülő
commit
b0c1878a97
100 módosított fájl, 24035 hozzáadás és 2 törlés
  1. 34 2
      .gitignore
  2. 203 0
      LICENSE
  3. 10 0
      MANIFEST.in
  4. 35 0
      PPOCRLabel/Makefile
  5. 2840 0
      PPOCRLabel/PPOCRLabel.py
  6. 281 0
      PPOCRLabel/README.md
  7. 264 0
      PPOCRLabel/README_ch.md
  8. 0 0
      PPOCRLabel/__init__.py
  9. BIN
      PPOCRLabel/data/gif/kie.gif
  10. BIN
      PPOCRLabel/data/gif/multi-point.gif
  11. BIN
      PPOCRLabel/data/gif/steps_en.gif
  12. BIN
      PPOCRLabel/data/gif/table.gif
  13. BIN
      PPOCRLabel/data/paddle.png
  14. 0 0
      PPOCRLabel/data/predefined_classes.txt
  15. 151 0
      PPOCRLabel/gen_ocr_train_val_test.py
  16. 2 0
      PPOCRLabel/libs/__init__.py
  17. 171 0
      PPOCRLabel/libs/autoDialog.py
  18. 912 0
      PPOCRLabel/libs/canvas.py
  19. 49 0
      PPOCRLabel/libs/colorDialog.py
  20. 32 0
      PPOCRLabel/libs/constants.py
  21. 143 0
      PPOCRLabel/libs/create_ml_io.py
  22. 29 0
      PPOCRLabel/libs/editinlist.py
  23. 40 0
      PPOCRLabel/libs/hashableQListWidgetItem.py
  24. 216 0
      PPOCRLabel/libs/keyDialog.py
  25. 88 0
      PPOCRLabel/libs/labelColor.py
  26. 107 0
      PPOCRLabel/libs/labelDialog.py
  27. 11715 0
      PPOCRLabel/libs/resources.py
  28. 60 0
      PPOCRLabel/libs/settings.py
  29. 264 0
      PPOCRLabel/libs/shape.py
  30. 90 0
      PPOCRLabel/libs/stringBundle.py
  31. 51 0
      PPOCRLabel/libs/toolBar.py
  32. 46 0
      PPOCRLabel/libs/unique_label_qlist_widget.py
  33. 29 0
      PPOCRLabel/libs/ustr.py
  34. 326 0
      PPOCRLabel/libs/utils.py
  35. 38 0
      PPOCRLabel/libs/zoomWidget.py
  36. 3 0
      PPOCRLabel/requirements.txt
  37. 41 0
      PPOCRLabel/resources.qrc
  38. BIN
      PPOCRLabel/resources/icons/Auto.png
  39. BIN
      PPOCRLabel/resources/icons/app.icns
  40. BIN
      PPOCRLabel/resources/icons/app.png
  41. 27 0
      PPOCRLabel/resources/icons/app.svg
  42. BIN
      PPOCRLabel/resources/icons/cancel.png
  43. BIN
      PPOCRLabel/resources/icons/close.png
  44. BIN
      PPOCRLabel/resources/icons/color.png
  45. BIN
      PPOCRLabel/resources/icons/color_line.png
  46. BIN
      PPOCRLabel/resources/icons/copy.png
  47. BIN
      PPOCRLabel/resources/icons/delete.png
  48. BIN
      PPOCRLabel/resources/icons/done.png
  49. 400 0
      PPOCRLabel/resources/icons/done.svg
  50. BIN
      PPOCRLabel/resources/icons/edit.png
  51. BIN
      PPOCRLabel/resources/icons/expert1.png
  52. BIN
      PPOCRLabel/resources/icons/expert2.png
  53. BIN
      PPOCRLabel/resources/icons/eye.png
  54. BIN
      PPOCRLabel/resources/icons/feBlend-icon.png
  55. BIN
      PPOCRLabel/resources/icons/file.png
  56. BIN
      PPOCRLabel/resources/icons/fit-width.png
  57. BIN
      PPOCRLabel/resources/icons/fit-window.png
  58. BIN
      PPOCRLabel/resources/icons/fit.png
  59. BIN
      PPOCRLabel/resources/icons/format_createml.png
  60. BIN
      PPOCRLabel/resources/icons/format_voc.png
  61. BIN
      PPOCRLabel/resources/icons/format_yolo.png
  62. BIN
      PPOCRLabel/resources/icons/help.png
  63. BIN
      PPOCRLabel/resources/icons/labels.png
  64. 819 0
      PPOCRLabel/resources/icons/labels.svg
  65. BIN
      PPOCRLabel/resources/icons/lock.png
  66. BIN
      PPOCRLabel/resources/icons/new.png
  67. BIN
      PPOCRLabel/resources/icons/next.png
  68. BIN
      PPOCRLabel/resources/icons/objects.png
  69. BIN
      PPOCRLabel/resources/icons/open.png
  70. 577 0
      PPOCRLabel/resources/icons/open.svg
  71. BIN
      PPOCRLabel/resources/icons/prev.png
  72. BIN
      PPOCRLabel/resources/icons/quit.png
  73. BIN
      PPOCRLabel/resources/icons/reRec.png
  74. BIN
      PPOCRLabel/resources/icons/resetall.png
  75. BIN
      PPOCRLabel/resources/icons/rotateLeft.png
  76. BIN
      PPOCRLabel/resources/icons/rotateRight.png
  77. BIN
      PPOCRLabel/resources/icons/save-as.png
  78. 1358 0
      PPOCRLabel/resources/icons/save-as.svg
  79. BIN
      PPOCRLabel/resources/icons/save.png
  80. 679 0
      PPOCRLabel/resources/icons/save.svg
  81. BIN
      PPOCRLabel/resources/icons/undo-cross.png
  82. BIN
      PPOCRLabel/resources/icons/undo.png
  83. BIN
      PPOCRLabel/resources/icons/verify.png
  84. BIN
      PPOCRLabel/resources/icons/zoom-in.png
  85. BIN
      PPOCRLabel/resources/icons/zoom-out.png
  86. BIN
      PPOCRLabel/resources/icons/zoom.png
  87. 116 0
      PPOCRLabel/resources/strings/strings-en.properties
  88. 116 0
      PPOCRLabel/resources/strings/strings-zh-CN.properties
  89. 8 0
      PPOCRLabel/setup.cfg
  90. 52 0
      PPOCRLabel/setup.py
  91. 243 0
      README.md
  92. 254 0
      README_ch.md
  93. 219 0
      StyleText/README.md
  94. 205 0
      StyleText/README_ch.md
  95. 0 0
      StyleText/__init__.py
  96. 0 0
      StyleText/arch/__init__.py
  97. 255 0
      StyleText/arch/base_module.py
  98. 251 0
      StyleText/arch/decoder.py
  99. 186 0
      StyleText/arch/encoder.py
  100. 0 0
      StyleText/arch/spectral_norm.py

+ 34 - 2
.gitignore

@@ -1,2 +1,34 @@
-node_modules
-.idea
+# Byte-compiled / optimized / DLL files
+__pycache__/
+.ipynb_checkpoints/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+inference/
+inference_results/
+output/
+train_data/
+log/
+*.DS_Store
+*.vs
+*.user
+*~
+*.vscode
+*.idea
+
+*.log
+.clang-format
+.clang_format.hook
+
+build/
+dist/
+paddleocr.egg-info/
+/deploy/android_demo/app/OpenCV/
+/deploy/android_demo/app/PaddleLite/
+/deploy/android_demo/app/.cxx/
+/deploy/android_demo/app/cache/
+test_tipc/web/models/
+test_tipc/web/node_modules/

+ 203 - 0
LICENSE

@@ -0,0 +1,203 @@
+Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 10 - 0
MANIFEST.in

@@ -0,0 +1,10 @@
+include LICENSE
+include README.md
+
+recursive-include ppocr/utils *.*
+recursive-include ppocr/data *.py
+recursive-include ppocr/postprocess *.py
+recursive-include tools/infer *.py
+recursive-include tools __init__.py
+recursive-include ppocr/utils/e2e_utils *.py
+recursive-include ppstructure *.py

+ 35 - 0
PPOCRLabel/Makefile

@@ -0,0 +1,35 @@
+# ex: set ts=8 noet:
+
+all: qt5 test
+
+test: testpy3
+
+testpy2:
+	python -m unittest discover tests
+
+testpy3:
+	python3 -m unittest discover tests
+
+qt4: qt4py2
+
+qt5: qt5py3
+
+qt4py2:
+	pyrcc4 -py2 -o libs/resources.py resources.qrc
+
+qt4py3:
+	pyrcc4 -py3 -o libs/resources.py resources.qrc
+
+qt5py3:
+	pyrcc5 -o libs/resources.py resources.qrc
+
+clean:
+	rm -rf ~/.labelImgSettings.pkl *.pyc dist labelImg.egg-info __pycache__ build
+
+pip_upload:
+	python3 setup.py upload
+
+long_description:
+	restview --long-description
+
+.PHONY: all

A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 2840 - 0
PPOCRLabel/PPOCRLabel.py


A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 281 - 0
PPOCRLabel/README.md


A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 264 - 0
PPOCRLabel/README_ch.md


+ 0 - 0
PPOCRLabel/__init__.py


BIN
PPOCRLabel/data/gif/kie.gif


BIN
PPOCRLabel/data/gif/multi-point.gif


BIN
PPOCRLabel/data/gif/steps_en.gif


BIN
PPOCRLabel/data/gif/table.gif


BIN
PPOCRLabel/data/paddle.png


+ 0 - 0
PPOCRLabel/data/predefined_classes.txt


+ 151 - 0
PPOCRLabel/gen_ocr_train_val_test.py

@@ -0,0 +1,151 @@
+# coding:utf8
+import os
+import shutil
+import random
+import argparse
+
+
+# 删除划分的训练集、验证集、测试集文件夹,重新创建一个空的文件夹
+def isCreateOrDeleteFolder(path, flag):
+    flagPath = os.path.join(path, flag)
+
+    if os.path.exists(flagPath):
+        shutil.rmtree(flagPath)
+
+    os.makedirs(flagPath)
+    flagAbsPath = os.path.abspath(flagPath)
+    return flagAbsPath
+
+
+def splitTrainVal(root, absTrainRootPath, absValRootPath, absTestRootPath, trainTxt, valTxt, testTxt, flag):
+    # 按照指定的比例划分训练集、验证集、测试集
+    dataAbsPath = os.path.abspath(root)
+
+    if flag == "det":
+        labelFilePath = os.path.join(dataAbsPath, args.detLabelFileName)
+    elif flag == "rec":
+        labelFilePath = os.path.join(dataAbsPath, args.recLabelFileName)
+
+    labelFileRead = open(labelFilePath, "r", encoding="UTF-8")
+    labelFileContent = labelFileRead.readlines()
+    random.shuffle(labelFileContent)
+    labelRecordLen = len(labelFileContent)
+
+    for index, labelRecordInfo in enumerate(labelFileContent):
+        imageRelativePath = labelRecordInfo.split('\t')[0]
+        imageLabel = labelRecordInfo.split('\t')[1]
+        imageName = os.path.basename(imageRelativePath)
+
+        if flag == "det":
+            imagePath = os.path.join(dataAbsPath, imageName)
+        elif flag == "rec":
+            imagePath = os.path.join(dataAbsPath, "{}\\{}".format(args.recImageDirName, imageName))
+
+        # 按预设的比例划分训练集、验证集、测试集
+        trainValTestRatio = args.trainValTestRatio.split(":")
+        trainRatio = eval(trainValTestRatio[0]) / 10
+        valRatio = trainRatio + eval(trainValTestRatio[1]) / 10
+        curRatio = index / labelRecordLen
+
+        if curRatio < trainRatio:
+            imageCopyPath = os.path.join(absTrainRootPath, imageName)
+            shutil.copy(imagePath, imageCopyPath)
+            trainTxt.write("{}\t{}".format(imageCopyPath, imageLabel))
+        elif curRatio >= trainRatio and curRatio < valRatio:
+            imageCopyPath = os.path.join(absValRootPath, imageName)
+            shutil.copy(imagePath, imageCopyPath)
+            valTxt.write("{}\t{}".format(imageCopyPath, imageLabel))
+        else:
+            imageCopyPath = os.path.join(absTestRootPath, imageName)
+            shutil.copy(imagePath, imageCopyPath)
+            testTxt.write("{}\t{}".format(imageCopyPath, imageLabel))
+
+
+# 删掉存在的文件
+def removeFile(path):
+    if os.path.exists(path):
+        os.remove(path)
+
+
+def genDetRecTrainVal(args):
+    detAbsTrainRootPath = isCreateOrDeleteFolder(args.detRootPath, "train")
+    detAbsValRootPath = isCreateOrDeleteFolder(args.detRootPath, "val")
+    detAbsTestRootPath = isCreateOrDeleteFolder(args.detRootPath, "test")
+    recAbsTrainRootPath = isCreateOrDeleteFolder(args.recRootPath, "train")
+    recAbsValRootPath = isCreateOrDeleteFolder(args.recRootPath, "val")
+    recAbsTestRootPath = isCreateOrDeleteFolder(args.recRootPath, "test")
+
+    removeFile(os.path.join(args.detRootPath, "train.txt"))
+    removeFile(os.path.join(args.detRootPath, "val.txt"))
+    removeFile(os.path.join(args.detRootPath, "test.txt"))
+    removeFile(os.path.join(args.recRootPath, "train.txt"))
+    removeFile(os.path.join(args.recRootPath, "val.txt"))
+    removeFile(os.path.join(args.recRootPath, "test.txt"))
+
+    detTrainTxt = open(os.path.join(args.detRootPath, "train.txt"), "a", encoding="UTF-8")
+    detValTxt = open(os.path.join(args.detRootPath, "val.txt"), "a", encoding="UTF-8")
+    detTestTxt = open(os.path.join(args.detRootPath, "test.txt"), "a", encoding="UTF-8")
+    recTrainTxt = open(os.path.join(args.recRootPath, "train.txt"), "a", encoding="UTF-8")
+    recValTxt = open(os.path.join(args.recRootPath, "val.txt"), "a", encoding="UTF-8")
+    recTestTxt = open(os.path.join(args.recRootPath, "test.txt"), "a", encoding="UTF-8")
+
+    splitTrainVal(args.datasetRootPath, detAbsTrainRootPath, detAbsValRootPath, detAbsTestRootPath, detTrainTxt, detValTxt,
+                  detTestTxt, "det")
+
+    for root, dirs, files in os.walk(args.datasetRootPath):
+        for dir in dirs:
+            if dir == 'crop_img':
+                splitTrainVal(root, recAbsTrainRootPath, recAbsValRootPath, recAbsTestRootPath, recTrainTxt, recValTxt,
+                              recTestTxt, "rec")
+            else:
+                continue
+        break
+
+
+
+if __name__ == "__main__":
+    # 功能描述:分别划分检测和识别的训练集、验证集、测试集
+    # 说明:可以根据自己的路径和需求调整参数,图像数据往往多人合作分批标注,每一批图像数据放在一个文件夹内用PPOCRLabel进行标注,
+    # 如此会有多个标注好的图像文件夹汇总并划分训练集、验证集、测试集的需求
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--trainValTestRatio",
+        type=str,
+        default="6:2:2",
+        help="ratio of trainset:valset:testset")
+    parser.add_argument(
+        "--datasetRootPath",
+        type=str,
+        default="../train_data/",
+        help="path to the dataset marked by ppocrlabel, E.g, dataset folder named 1,2,3..."
+    )
+    parser.add_argument(
+        "--detRootPath",
+        type=str,
+        default="../train_data/det",
+        help="the path where the divided detection dataset is placed")
+    parser.add_argument(
+        "--recRootPath",
+        type=str,
+        default="../train_data/rec",
+        help="the path where the divided recognition dataset is placed"
+    )
+    parser.add_argument(
+        "--detLabelFileName",
+        type=str,
+        default="Label.txt",
+        help="the name of the detection annotation file")
+    parser.add_argument(
+        "--recLabelFileName",
+        type=str,
+        default="rec_gt.txt",
+        help="the name of the recognition annotation file"
+    )
+    parser.add_argument(
+        "--recImageDirName",
+        type=str,
+        default="crop_img",
+        help="the name of the folder where the cropped recognition dataset is located"
+    )
+    args = parser.parse_args()
+    genDetRecTrainVal(args)

+ 2 - 0
PPOCRLabel/libs/__init__.py

@@ -0,0 +1,2 @@
+__version_info__ = ('1', '0', '0')
+__version__ = '.'.join(__version_info__)

+ 171 - 0
PPOCRLabel/libs/autoDialog.py

@@ -0,0 +1,171 @@
+try:
+    from PyQt5.QtGui import *
+    from PyQt5.QtCore import *
+    from PyQt5.QtWidgets import *
+except ImportError:
+    from PyQt4.QtGui import *
+    from PyQt4.QtCore import *
+
+import time
+import datetime
+import json
+import cv2
+import numpy as np
+
+from libs.utils import newIcon
+
+BB = QDialogButtonBox
+
+
+class Worker(QThread):
+    progressBarValue = pyqtSignal(int)
+    listValue = pyqtSignal(str)
+    endsignal = pyqtSignal(int, str)
+    handle = 0
+
+    def __init__(self, ocr, mImgList, mainThread, model):
+        super(Worker, self).__init__()
+        self.ocr = ocr
+        self.mImgList = mImgList
+        self.mainThread = mainThread
+        self.model = model
+        self.setStackSize(1024*1024)
+
+    def run(self):
+        try:
+            findex = 0
+            for Imgpath in self.mImgList:
+                if self.handle == 0:
+                    self.listValue.emit(Imgpath)
+                    if self.model == 'paddle':
+                        h, w, _ = cv2.imdecode(np.fromfile(Imgpath, dtype=np.uint8), 1).shape
+                        if h > 32 and w > 32:
+                            self.result_dic = self.ocr.ocr(Imgpath, cls=True, det=True)[0]
+                        else:
+                            print('The size of', Imgpath, 'is too small to be recognised')
+                            self.result_dic = None
+
+                    # 结果保存
+                    if self.result_dic is None or len(self.result_dic) == 0:
+                        print('Can not recognise file', Imgpath)
+                        pass
+                    else:
+                        strs = ''
+                        for res in self.result_dic:
+                            chars = res[1][0]
+                            cond = res[1][1]
+                            posi = res[0]
+                            strs += "Transcription: " + chars + " Probability: " + str(cond) + \
+                                    " Location: " + json.dumps(posi) +'\n'
+                        # Sending large amounts of data repeatedly through pyqtSignal may affect the program efficiency
+                        self.listValue.emit(strs)
+                        self.mainThread.result_dic = self.result_dic
+                        self.mainThread.filePath = Imgpath
+                        # 保存
+                        self.mainThread.saveFile(mode='Auto')
+                    findex += 1
+                    self.progressBarValue.emit(findex)
+                else:
+                    break
+            self.endsignal.emit(0, "readAll")
+            self.exec()
+        except Exception as e:
+            print(e)
+            raise
+
+
+class AutoDialog(QDialog):
+
+    def __init__(self, text="Enter object label", parent=None, ocr=None, mImgList=None, lenbar=0):
+        super(AutoDialog, self).__init__(parent)
+        self.setFixedWidth(1000)
+        self.parent = parent
+        self.ocr = ocr
+        self.mImgList = mImgList
+        self.lender = lenbar
+        self.pb = QProgressBar()
+        self.pb.setRange(0, self.lender)
+        self.pb.setValue(0)
+
+        layout = QVBoxLayout()
+        layout.addWidget(self.pb)
+        self.model = 'paddle'
+        self.listWidget = QListWidget(self)
+        layout.addWidget(self.listWidget)
+
+        self.buttonBox = bb = BB(BB.Ok | BB.Cancel, Qt.Horizontal, self)
+        bb.button(BB.Ok).setIcon(newIcon('done'))
+        bb.button(BB.Cancel).setIcon(newIcon('undo'))
+        bb.accepted.connect(self.validate)
+        bb.rejected.connect(self.reject)
+        layout.addWidget(bb)
+        bb.button(BB.Ok).setEnabled(False)
+
+        self.setLayout(layout)
+        # self.setWindowTitle("自动标注中")
+        self.setWindowModality(Qt.ApplicationModal)
+
+        # self.setWindowFlags(Qt.WindowCloseButtonHint)
+
+        self.thread_1 = Worker(self.ocr, self.mImgList, self.parent, 'paddle')
+        self.thread_1.progressBarValue.connect(self.handleProgressBarSingal)
+        self.thread_1.listValue.connect(self.handleListWidgetSingal)
+        self.thread_1.endsignal.connect(self.handleEndsignalSignal)
+        self.time_start = time.time()  # save start time
+
+    def handleProgressBarSingal(self, i):
+        self.pb.setValue(i)
+
+        # calculate time left of auto labeling
+        avg_time = (time.time() - self.time_start) / i  # Use average time to prevent time fluctuations
+        time_left = str(datetime.timedelta(seconds=avg_time * (self.lender - i))).split(".")[0]  # Remove microseconds
+        self.setWindowTitle("PPOCRLabel  --  " + f"Time Left: {time_left}")  # show
+
+    def handleListWidgetSingal(self, i):
+        self.listWidget.addItem(i)
+        titem = self.listWidget.item(self.listWidget.count() - 1)
+        self.listWidget.scrollToItem(titem)
+
+    def handleEndsignalSignal(self, i, str):
+        if i == 0 and str == "readAll":
+            self.buttonBox.button(BB.Ok).setEnabled(True)
+            self.buttonBox.button(BB.Cancel).setEnabled(False)
+
+    def reject(self):
+        print("reject")
+        self.thread_1.handle = -1
+        self.thread_1.quit()
+        # del self.thread_1
+        # if self.thread_1.isRunning():
+        #     self.thread_1.terminate()
+        # self.thread_1.quit()
+        # super(AutoDialog,self).reject()
+        while not self.thread_1.isFinished():
+            pass
+        self.accept()
+
+    def validate(self):
+        self.accept()
+
+    def postProcess(self):
+        try:
+            self.edit.setText(self.edit.text().trimmed())
+            # print(self.edit.text())
+        except AttributeError:
+            # PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
+            self.edit.setText(self.edit.text())
+            print(self.edit.text())
+
+    def popUp(self):
+        self.thread_1.start()
+        return 1 if self.exec_() else None
+
+    def closeEvent(self, event):
+        print("???")
+        # if self.thread_1.isRunning():
+        #     self.thread_1.quit()
+        #
+        #     # self._thread.terminate()
+        # # del self.thread_1
+        # super(AutoDialog, self).closeEvent(event)
+        self.reject()

+ 912 - 0
PPOCRLabel/libs/canvas.py

@@ -0,0 +1,912 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+import copy
+
+from PyQt5.QtCore import Qt, pyqtSignal, QPointF, QPoint
+from PyQt5.QtGui import QPainter, QBrush, QColor, QPixmap
+from PyQt5.QtWidgets import QWidget, QMenu, QApplication
+from libs.shape import Shape
+from libs.utils import distance
+
+CURSOR_DEFAULT = Qt.ArrowCursor
+CURSOR_POINT = Qt.PointingHandCursor
+CURSOR_DRAW = Qt.CrossCursor
+CURSOR_MOVE = Qt.ClosedHandCursor
+CURSOR_GRAB = Qt.OpenHandCursor
+
+
+class Canvas(QWidget):
+    zoomRequest = pyqtSignal(int)
+    scrollRequest = pyqtSignal(int, int)
+    newShape = pyqtSignal()
+    # selectionChanged = pyqtSignal(bool)
+    selectionChanged = pyqtSignal(list)
+    shapeMoved = pyqtSignal()
+    drawingPolygon = pyqtSignal(bool)
+
+    CREATE, EDIT = list(range(2))
+    _fill_drawing = False # draw shadows
+
+    epsilon = 5.0
+
+    def __init__(self, *args, **kwargs):
+        super(Canvas, self).__init__(*args, **kwargs)
+        # Initialise local state.
+        self.mode = self.EDIT
+        self.shapes = []
+        self.shapesBackups = []
+        self.current = None
+        self.selectedShapes = []
+        self.selectedShape = None  # save the selected shape here
+        self.selectedShapesCopy = []
+        self.drawingLineColor = QColor(0, 0, 255)
+        self.drawingRectColor = QColor(0, 0, 255)
+        self.line = Shape(line_color=self.drawingLineColor)
+        self.prevPoint = QPointF()
+        self.offsets = QPointF(), QPointF()
+        self.scale = 1.0
+        self.pixmap = QPixmap()
+        self.visible = {}
+        self._hideBackround = False
+        self.hideBackround = False
+        self.hShape = None
+        self.hVertex = None
+        self._painter = QPainter()
+        self._cursor = CURSOR_DEFAULT
+        # Menus:
+        self.menus = (QMenu(), QMenu())
+        # Set widget options.
+        self.setMouseTracking(True)
+        self.setFocusPolicy(Qt.WheelFocus)
+        self.verified = False
+        self.drawSquare = False
+        self.fourpoint = True # ADD
+        self.pointnum = 0
+        self.movingShape = False
+        self.selectCountShape = False
+
+        #initialisation for panning
+        self.pan_initial_pos = QPoint()
+
+        #lockedshapes related 
+        self.lockedShapes = []
+        self.isInTheSameImage = False
+
+    def setDrawingColor(self, qColor):
+        self.drawingLineColor = qColor
+        self.drawingRectColor = qColor
+
+    def enterEvent(self, ev):
+        self.overrideCursor(self._cursor)
+
+    def leaveEvent(self, ev):
+        self.restoreCursor()
+
+    def focusOutEvent(self, ev):
+        self.restoreCursor()
+
+    def isVisible(self, shape):
+        return self.visible.get(shape, True)
+
+    def drawing(self):
+        return self.mode == self.CREATE
+
+    def editing(self):
+        return self.mode == self.EDIT
+
+    def setEditing(self, value=True):
+        self.mode = self.EDIT if value else self.CREATE
+        if not value:  # Create
+            self.unHighlight()
+            self.deSelectShape()
+        self.prevPoint = QPointF()
+        self.repaint()
+
+    def unHighlight(self):
+        if self.hShape:
+            self.hShape.highlightClear()
+        self.hVertex = self.hShape = None
+
+    def selectedVertex(self):
+        return self.hVertex is not None
+
+    def mouseMoveEvent(self, ev):
+        """Update line with last point and current coordinates."""
+        pos = self.transformPos(ev.pos())
+
+        # Update coordinates in status bar if image is opened
+        window = self.parent().window()
+        if window.filePath is not None:
+            self.parent().window().labelCoordinates.setText(
+                'X: %d; Y: %d' % (pos.x(), pos.y()))
+
+        # Polygon drawing.
+        if self.drawing():
+            self.overrideCursor(CURSOR_DRAW) # ?
+            if self.current:
+                # Display annotation width and height while drawing
+                currentWidth = abs(self.current[0].x() - pos.x())
+                currentHeight = abs(self.current[0].y() - pos.y())
+                self.parent().window().labelCoordinates.setText(
+                        'Width: %d, Height: %d / X: %d; Y: %d' % (currentWidth, currentHeight, pos.x(), pos.y()))
+
+                color = self.drawingLineColor
+                if self.outOfPixmap(pos):
+                    # Don't allow the user to draw outside the pixmap.
+                    # Clip the coordinates to 0 or max,
+                    # if they are outside the range [0, max]
+                    size = self.pixmap.size()
+                    clipped_x = min(max(0, pos.x()), size.width())
+                    clipped_y = min(max(0, pos.y()), size.height())
+                    pos = QPointF(clipped_x, clipped_y)
+
+                elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]):
+                    # Attract line to starting point and colorise to alert the
+                    # user:
+                    pos = self.current[0]
+                    color = self.current.line_color
+                    self.overrideCursor(CURSOR_POINT)
+                    self.current.highlightVertex(0, Shape.NEAR_VERTEX)
+
+                if self.drawSquare:
+                    self.line.points = [self.current[0], pos]
+                    self.line.close()
+
+                elif self.fourpoint:
+                    self.line[0] = self.current[-1]
+                    self.line[1] = pos
+
+                else:
+                    self.line[1] = pos # pos is the mouse's current position
+
+                self.line.line_color = color
+                self.prevPoint = QPointF() # ?
+                self.current.highlightClear()
+            else:
+                self.prevPoint = pos
+            self.repaint()
+            return
+
+        # Polygon copy moving.
+        if Qt.RightButton & ev.buttons():
+            if self.selectedShapesCopy and self.prevPoint:
+                self.overrideCursor(CURSOR_MOVE)
+                self.boundedMoveShape(self.selectedShapesCopy, pos)
+                self.repaint()
+            elif self.selectedShapes:
+                self.selectedShapesCopy = [
+                    s.copy() for s in self.selectedShapes
+                ]
+                self.repaint()
+            return
+
+        # Polygon/Vertex moving.
+        if Qt.LeftButton & ev.buttons():
+            if self.selectedVertex():
+                self.boundedMoveVertex(pos)
+                self.shapeMoved.emit()
+                self.repaint()
+                self.movingShape = True
+            elif self.selectedShapes and self.prevPoint:
+                self.overrideCursor(CURSOR_MOVE)
+                self.boundedMoveShape(self.selectedShapes, pos)
+                self.shapeMoved.emit()
+                self.repaint()
+                self.movingShape = True
+            else:
+                #pan
+                delta_x = pos.x() - self.pan_initial_pos.x()
+                delta_y = pos.y() - self.pan_initial_pos.y()
+                self.scrollRequest.emit(delta_x, Qt.Horizontal)
+                self.scrollRequest.emit(delta_y, Qt.Vertical)
+                self.update()
+            return
+
+        # Just hovering over the canvas, 2 posibilities:
+        # - Highlight shapes
+        # - Highlight vertex
+        # Update shape/vertex fill and tooltip value accordingly.
+        self.setToolTip("Image")
+        for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
+            # Look for a nearby vertex to highlight. If that fails,
+            # check if we happen to be inside a shape.
+            index = shape.nearestVertex(pos, self.epsilon)
+            if index is not None:
+                if self.selectedVertex():
+                    self.hShape.highlightClear()
+                self.hVertex, self.hShape = index, shape
+                shape.highlightVertex(index, shape.MOVE_VERTEX)
+                self.overrideCursor(CURSOR_POINT)
+                self.setToolTip("Click & drag to move point")
+                self.setStatusTip(self.toolTip())
+                self.update()
+                break
+            elif shape.containsPoint(pos):
+                if self.selectedVertex():
+                    self.hShape.highlightClear()
+                self.hVertex, self.hShape = None, shape
+                self.setToolTip(
+                    "Click & drag to move shape '%s'" % shape.label)
+                self.setStatusTip(self.toolTip())
+                self.overrideCursor(CURSOR_GRAB)
+                self.update()
+                break
+        else:  # Nothing found, clear highlights, reset state.
+            if self.hShape:
+                self.hShape.highlightClear()
+                self.update()
+            self.hVertex, self.hShape = None, None
+            self.overrideCursor(CURSOR_DEFAULT)
+
+    def mousePressEvent(self, ev):
+        pos = self.transformPos(ev.pos())
+        if ev.button() == Qt.LeftButton:
+            if self.drawing():
+                # self.handleDrawing(pos) # OLD
+                if self.current:
+                    if self.fourpoint: # ADD IF
+                        # Add point to existing shape.
+                        # print('Adding points in mousePressEvent is ', self.line[1])
+                        self.current.addPoint(self.line[1])
+                        self.line[0] = self.current[-1]
+                        if self.current.isClosed():
+                            # print('1111')
+                            self.finalise()
+                    elif self.drawSquare:
+                        assert len(self.current.points) == 1
+                        self.current.points = self.line.points
+                        self.finalise()
+                elif not self.outOfPixmap(pos):
+                    # Create new shape.
+                    self.current = Shape()
+                    self.current.addPoint(pos)
+                    self.line.points = [pos, pos]
+                    self.setHiding()
+                    self.drawingPolygon.emit(True)
+                    self.update()
+
+            else:
+                group_mode = int(ev.modifiers()) == Qt.ControlModifier
+                self.selectShapePoint(pos, multiple_selection_mode=group_mode)
+                self.prevPoint = pos
+                self.pan_initial_pos = pos
+
+        elif ev.button() == Qt.RightButton and self.editing():
+            group_mode = int(ev.modifiers()) == Qt.ControlModifier
+            self.selectShapePoint(pos, multiple_selection_mode=group_mode)
+            self.prevPoint = pos
+        self.update()
+
+    def mouseReleaseEvent(self, ev):
+        if ev.button() == Qt.RightButton:
+            menu = self.menus[bool(self.selectedShapesCopy)]
+            self.restoreCursor()
+            if not menu.exec_(self.mapToGlobal(ev.pos()))\
+               and self.selectedShapesCopy:
+                # Cancel the move by deleting the shadow copy.
+                # self.selectedShapeCopy = None
+                self.selectedShapesCopy = []
+                self.repaint()
+
+        elif ev.button() == Qt.LeftButton and self.selectedShapes:
+            if self.selectedVertex():
+                self.overrideCursor(CURSOR_POINT)
+            else:
+                self.overrideCursor(CURSOR_GRAB)
+
+        elif ev.button() == Qt.LeftButton and not self.fourpoint:
+            pos = self.transformPos(ev.pos())
+            if self.drawing():
+                self.handleDrawing(pos)
+            else:
+                #pan
+                QApplication.restoreOverrideCursor() # ?
+
+        if self.movingShape and self.hShape:
+            if self.hShape in self.shapes:
+                index = self.shapes.index(self.hShape)
+                if (
+                    self.shapesBackups[-1][index].points
+                    != self.shapes[index].points
+                ):
+                    self.storeShapes()
+                    self.shapeMoved.emit() # connect to updateBoxlist in PPOCRLabel.py
+
+                self.movingShape = False
+
+    def endMove(self, copy=False):
+        assert self.selectedShapes and self.selectedShapesCopy
+        assert len(self.selectedShapesCopy) == len(self.selectedShapes)
+        if copy:
+            for i, shape in enumerate(self.selectedShapesCopy):
+                shape.idx = len(self.shapes) # add current box index
+                self.shapes.append(shape)
+                self.selectedShapes[i].selected = False
+                self.selectedShapes[i] = shape
+        else:
+            for i, shape in enumerate(self.selectedShapesCopy):
+                self.selectedShapes[i].points = shape.points
+        self.selectedShapesCopy = []
+        self.repaint()
+        self.storeShapes()
+        return True
+
+    def hideBackroundShapes(self, value):
+        self.hideBackround = value
+        if self.selectedShapes:
+            # Only hide other shapes if there is a current selection.
+            # Otherwise the user will not be able to select a shape.
+            self.setHiding(True)
+            self.repaint()
+
+    def handleDrawing(self, pos):
+        if self.current and self.current.reachMaxPoints() is False:
+            if self.fourpoint:
+                targetPos = self.line[self.pointnum]
+                self.current.addPoint(targetPos)
+                print('current points in handleDrawing is ', self.line[self.pointnum])
+                self.update()
+                if self.pointnum == 3:
+                    self.finalise()
+
+            else:
+                initPos = self.current[0]
+                print('initPos', self.current[0])
+                minX = initPos.x()
+                minY = initPos.y()
+                targetPos = self.line[1]
+                maxX = targetPos.x()
+                maxY = targetPos.y()
+                self.current.addPoint(QPointF(maxX, minY))
+                self.current.addPoint(targetPos)
+                self.current.addPoint(QPointF(minX, maxY))
+                self.finalise()
+
+        elif not self.outOfPixmap(pos):
+            print('release')
+            self.current = Shape()
+            self.current.addPoint(pos)
+            self.line.points = [pos, pos]
+            self.setHiding()
+            self.drawingPolygon.emit(True)
+            self.update()
+
+    def setHiding(self, enable=True):
+        self._hideBackround = self.hideBackround if enable else False
+
+    def canCloseShape(self):
+        return self.drawing() and self.current and len(self.current) > 2
+
+    def mouseDoubleClickEvent(self, ev):
+        # We need at least 4 points here, since the mousePress handler
+        # adds an extra one before this handler is called.
+        if self.canCloseShape() and len(self.current) > 3:
+            if not self.fourpoint:
+                self.current.popPoint()
+            self.finalise()
+
+    def selectShapes(self, shapes):
+        for s in shapes: s.seleted = True
+        self.setHiding()
+        self.selectionChanged.emit(shapes)
+        self.update()
+
+    def selectShapePoint(self, point, multiple_selection_mode):
+        """Select the first shape created which contains this point."""
+        if self.selectedVertex():  # A vertex is marked for selection.
+            index, shape = self.hVertex, self.hShape
+            shape.highlightVertex(index, shape.MOVE_VERTEX)
+            return self.hVertex
+        else:
+            for shape in reversed(self.shapes):
+                if self.isVisible(shape) and shape.containsPoint(point):
+                    self.calculateOffsets(shape, point)
+                    self.setHiding()
+                    if multiple_selection_mode:
+                        if shape not in self.selectedShapes: # list
+                            self.selectionChanged.emit(
+                                self.selectedShapes + [shape]
+                            )
+                    else:
+                        self.selectionChanged.emit([shape])
+                    return
+        self.deSelectShape()
+
+    def calculateOffsets(self, shape, point):
+        rect = shape.boundingRect()
+        x1 = rect.x() - point.x()
+        y1 = rect.y() - point.y()
+        x2 = (rect.x() + rect.width()) - point.x()
+        y2 = (rect.y() + rect.height()) - point.y()
+        self.offsets = QPointF(x1, y1), QPointF(x2, y2)
+
+    def snapPointToCanvas(self, x, y):
+        """
+        Moves a point x,y to within the boundaries of the canvas.
+        :return: (x,y,snapped) where snapped is True if x or y were changed, False if not.
+        """
+        if x < 0 or x > self.pixmap.width() or y < 0 or y > self.pixmap.height():
+            x = max(x, 0)
+            y = max(y, 0)
+            x = min(x, self.pixmap.width())
+            y = min(y, self.pixmap.height())
+            return x, y, True
+
+        return x, y, False
+
+    def boundedMoveVertex(self, pos):
+        index, shape = self.hVertex, self.hShape
+        point = shape[index]
+        if self.outOfPixmap(pos):
+            size = self.pixmap.size()
+            clipped_x = min(max(0, pos.x()), size.width())
+            clipped_y = min(max(0, pos.y()), size.height())
+            pos = QPointF(clipped_x, clipped_y)
+
+        if self.drawSquare:
+            opposite_point_index = (index + 2) % 4
+            opposite_point = shape[opposite_point_index]
+
+            min_size = min(abs(pos.x() - opposite_point.x()), abs(pos.y() - opposite_point.y()))
+            directionX = -1 if pos.x() - opposite_point.x() < 0 else 1
+            directionY = -1 if pos.y() - opposite_point.y() < 0 else 1
+            shiftPos = QPointF(opposite_point.x() + directionX * min_size - point.x(),
+                               opposite_point.y() + directionY * min_size - point.y())
+        else:
+            shiftPos = pos - point
+
+        if [shape[0].x(), shape[0].y(), shape[2].x(), shape[2].y()] \
+                == [shape[3].x(),shape[1].y(),shape[1].x(),shape[3].y()]:
+            shape.moveVertexBy(index, shiftPos)
+            lindex = (index + 1) % 4
+            rindex = (index + 3) % 4
+            lshift = None
+            rshift = None
+            if index % 2 == 0:
+                rshift = QPointF(shiftPos.x(), 0)
+                lshift = QPointF(0, shiftPos.y())
+            else:
+                lshift = QPointF(shiftPos.x(), 0)
+                rshift = QPointF(0, shiftPos.y())
+            shape.moveVertexBy(rindex, rshift)
+            shape.moveVertexBy(lindex, lshift)
+
+        else:
+            shape.moveVertexBy(index, shiftPos)
+
+    def boundedMoveShape(self, shapes, pos):
+        if type(shapes).__name__ != 'list': shapes = [shapes]
+        if self.outOfPixmap(pos):
+            return False  # No need to move
+        o1 = pos + self.offsets[0]
+        if self.outOfPixmap(o1):
+            pos -= QPointF(min(0, o1.x()), min(0, o1.y()))
+        o2 = pos + self.offsets[1]
+        if self.outOfPixmap(o2):
+            pos += QPointF(min(0, self.pixmap.width() - o2.x()),
+                           min(0, self.pixmap.height() - o2.y()))
+        # The next line tracks the new position of the cursor
+        # relative to the shape, but also results in making it
+        # a bit "shaky" when nearing the border and allows it to
+        # go outside of the shape's area for some reason. XXX
+        #self.calculateOffsets(self.selectedShape, pos)
+        dp = pos - self.prevPoint
+        if dp:
+            for shape in shapes:
+                shape.moveBy(dp)
+                shape.close()
+            self.prevPoint = pos
+            return True
+        return False
+
+    def deSelectShape(self):
+        if self.selectedShapes:
+            for shape in self.selectedShapes: shape.selected=False
+            self.setHiding(False)
+            self.selectionChanged.emit([])
+            self.update()
+
+    def deleteSelected(self):
+        deleted_shapes = []
+        if self.selectedShapes:
+            for shape in self.selectedShapes:
+                self.shapes.remove(shape)
+                deleted_shapes.append(shape)
+            self.storeShapes()
+            self.selectedShapes = []
+            self.update()
+
+        self.updateShapeIndex()
+
+        return deleted_shapes
+
+    def storeShapes(self):
+        shapesBackup = []
+        for shape in self.shapes:
+            shapesBackup.append(shape.copy())
+        if len(self.shapesBackups) >= 10:
+            self.shapesBackups = self.shapesBackups[-9:]
+        self.shapesBackups.append(shapesBackup)
+
+    def copySelectedShape(self):
+        if self.selectedShapes:
+            self.selectedShapesCopy = [s.copy() for s in self.selectedShapes]
+            self.boundedShiftShapes(self.selectedShapesCopy)
+            self.endMove(copy=True)
+        return self.selectedShapes
+
+    def boundedShiftShapes(self, shapes):
+        # Try to move in one direction, and if it fails in another.
+        # Give up if both fail.
+        for shape in shapes:
+            point = shape[0]
+            offset = QPointF(5.0, 5.0)
+            self.calculateOffsets(shape, point)
+            self.prevPoint = point
+            if not self.boundedMoveShape(shape, point - offset):
+                self.boundedMoveShape(shape, point + offset)
+
+    def paintEvent(self, event):
+        if not self.pixmap:
+            return super(Canvas, self).paintEvent(event)
+
+        p = self._painter
+        p.begin(self)
+        p.setRenderHint(QPainter.Antialiasing)
+        p.setRenderHint(QPainter.HighQualityAntialiasing)
+        p.setRenderHint(QPainter.SmoothPixmapTransform)
+
+        p.scale(self.scale, self.scale)
+        p.translate(self.offsetToCenter())
+
+        p.drawPixmap(0, 0, self.pixmap)
+        Shape.scale = self.scale
+        for shape in self.shapes:
+            if (shape.selected or not self._hideBackround) and self.isVisible(shape):
+                shape.fill = shape.selected or shape == self.hShape
+                shape.paint(p)
+        if self.current:
+            self.current.paint(p)
+            self.line.paint(p)
+        if self.selectedShapesCopy:
+            for s in self.selectedShapesCopy:
+                s.paint(p)
+
+        # Paint rect
+        if self.current is not None and len(self.line) == 2 and not self.fourpoint:
+            # print('Drawing rect')
+            leftTop = self.line[0]
+            rightBottom = self.line[1]
+            rectWidth = rightBottom.x() - leftTop.x()
+            rectHeight = rightBottom.y() - leftTop.y()
+            p.setPen(self.drawingRectColor)
+            brush = QBrush(Qt.BDiagPattern)
+            p.setBrush(brush)
+            p.drawRect(leftTop.x(), leftTop.y(), rectWidth, rectHeight)
+
+
+        # ADD:
+        if (
+                self.fillDrawing()
+                and self.fourpoint
+                and self.current is not None
+                and len(self.current.points) >= 2
+        ):
+            print('paint event')
+            drawing_shape = self.current.copy()
+            drawing_shape.addPoint(self.line[1])
+            drawing_shape.fill = True
+            drawing_shape.paint(p)
+
+        if self.drawing() and not self.prevPoint.isNull() and not self.outOfPixmap(self.prevPoint):
+            p.setPen(QColor(0, 0, 0))
+            p.drawLine(int(self.prevPoint.x()), 0, int(self.prevPoint.x()), self.pixmap.height())
+            p.drawLine(0, int(self.prevPoint.y()), self.pixmap.width(), int(self.prevPoint.y()))
+
+        self.setAutoFillBackground(True)
+        if self.verified:
+            pal = self.palette()
+            pal.setColor(self.backgroundRole(), QColor(184, 239, 38, 128))
+            self.setPalette(pal)
+        else:
+            pal = self.palette()
+            pal.setColor(self.backgroundRole(), QColor(232, 232, 232, 255))
+            self.setPalette(pal)
+
+        # adaptive BBOX label & index font size
+        if self.pixmap:
+            h, w = self.pixmap.size().height(), self.pixmap.size().width()
+            fontszie = int(max(h, w) / 48)
+            for s in self.shapes:
+                s.fontsize = fontszie
+        
+        p.end()
+
+    def fillDrawing(self):
+        return self._fill_drawing
+
+    def transformPos(self, point):
+        """Convert from widget-logical coordinates to painter-logical coordinates."""
+        return point / self.scale - self.offsetToCenter()
+
+    def offsetToCenter(self):
+        s = self.scale
+        area = super(Canvas, self).size()
+        w, h = self.pixmap.width() * s, self.pixmap.height() * s
+        aw, ah = area.width(), area.height()
+        x = (aw - w) / (2 * s) if aw > w else 0
+        y = (ah - h) / (2 * s) if ah > h else 0
+        return QPointF(x, y)
+
+    def outOfPixmap(self, p):
+        w, h = self.pixmap.width(), self.pixmap.height()
+        return not (0 <= p.x() <= w and 0 <= p.y() <= h)
+
+    def finalise(self):
+        assert self.current
+        if self.current.points[0] == self.current.points[-1]:
+            # print('finalse')
+            self.current = None
+            self.drawingPolygon.emit(False)
+            self.update()
+            return
+
+        self.current.close()
+        self.current.idx = len(self.shapes) # add current box index
+        self.shapes.append(self.current) 
+        self.current = None
+        self.setHiding(False)
+        self.newShape.emit()
+        self.update()
+
+    def closeEnough(self, p1, p2):
+        #d = distance(p1 - p2)
+        #m = (p1-p2).manhattanLength()
+        # print "d %.2f, m %d, %.2f" % (d, m, d - m)
+        return distance(p1 - p2) < self.epsilon
+
+    # These two, along with a call to adjustSize are required for the
+    # scroll area.
+    def sizeHint(self):
+        return self.minimumSizeHint()
+
+    def minimumSizeHint(self):
+        if self.pixmap:
+            return self.scale * self.pixmap.size()
+        return super(Canvas, self).minimumSizeHint()
+
+    def wheelEvent(self, ev):
+        qt_version = 4 if hasattr(ev, "delta") else 5
+        if qt_version == 4:
+            if ev.orientation() == Qt.Vertical:
+                v_delta = ev.delta()
+                h_delta = 0
+            else:
+                h_delta = ev.delta()
+                v_delta = 0
+        else:
+            delta = ev.angleDelta()
+            h_delta = delta.x()
+            v_delta = delta.y()
+
+        mods = ev.modifiers()
+        if Qt.ControlModifier == int(mods) and v_delta:
+            self.zoomRequest.emit(v_delta)
+        else:
+            v_delta and self.scrollRequest.emit(v_delta, Qt.Vertical)
+            h_delta and self.scrollRequest.emit(h_delta, Qt.Horizontal)
+        ev.accept()
+
+    def keyPressEvent(self, ev):
+        key = ev.key()
+        shapesBackup = copy.deepcopy(self.shapes)
+        if len(shapesBackup) == 0:
+            return
+        self.shapesBackups.pop()
+        self.shapesBackups.append(shapesBackup)
+        if key == Qt.Key_Escape and self.current:
+            print('ESC press')
+            self.current = None
+            self.drawingPolygon.emit(False)
+            self.update()
+        elif key == Qt.Key_Return and self.canCloseShape():
+            self.finalise()
+        elif key == Qt.Key_Left and self.selectedShapes:
+             self.moveOnePixel('Left')
+        elif key == Qt.Key_Right and self.selectedShapes:
+             self.moveOnePixel('Right')
+        elif key == Qt.Key_Up and self.selectedShapes:
+             self.moveOnePixel('Up')
+        elif key == Qt.Key_Down and self.selectedShapes:
+             self.moveOnePixel('Down')
+        elif key == Qt.Key_X and self.selectedShapes:
+            for i in range(len(self.selectedShapes)):
+                self.selectedShape = self.selectedShapes[i]
+                if self.rotateOutOfBound(0.01):
+                    continue
+                self.selectedShape.rotate(0.01)
+            self.shapeMoved.emit()
+            self.update()
+
+        elif key == Qt.Key_C and self.selectedShapes:
+            for i in range(len(self.selectedShapes)):
+                self.selectedShape = self.selectedShapes[i]
+                if self.rotateOutOfBound(-0.01):
+                    continue
+                self.selectedShape.rotate(-0.01)
+            self.shapeMoved.emit()
+            self.update()
+
+    def rotateOutOfBound(self, angle):
+        for shape in range(len(self.selectedShapes)):
+            self.selectedShape = self.selectedShapes[shape]
+            for i, p in enumerate(self.selectedShape.points):
+                if self.outOfPixmap(self.selectedShape.rotatePoint(p, angle)):
+                    return True
+            return False
+
+    def moveOnePixel(self, direction):
+        # print(self.selectedShape.points)
+        self.selectCount = len(self.selectedShapes)
+        self.selectCountShape = True
+        for i in range(len(self.selectedShapes)):
+            self.selectedShape = self.selectedShapes[i]
+            if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
+                # print("move Left one pixel")
+                self.selectedShape.points[0] += QPointF(-1.0, 0)
+                self.selectedShape.points[1] += QPointF(-1.0, 0)
+                self.selectedShape.points[2] += QPointF(-1.0, 0)
+                self.selectedShape.points[3] += QPointF(-1.0, 0)
+            elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
+                # print("move Right one pixel")
+                self.selectedShape.points[0] += QPointF(1.0, 0)
+                self.selectedShape.points[1] += QPointF(1.0, 0)
+                self.selectedShape.points[2] += QPointF(1.0, 0)
+                self.selectedShape.points[3] += QPointF(1.0, 0)
+            elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
+                # print("move Up one pixel")
+                self.selectedShape.points[0] += QPointF(0, -1.0)
+                self.selectedShape.points[1] += QPointF(0, -1.0)
+                self.selectedShape.points[2] += QPointF(0, -1.0)
+                self.selectedShape.points[3] += QPointF(0, -1.0)
+            elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
+                # print("move Down one pixel")
+                self.selectedShape.points[0] += QPointF(0, 1.0)
+                self.selectedShape.points[1] += QPointF(0, 1.0)
+                self.selectedShape.points[2] += QPointF(0, 1.0)
+                self.selectedShape.points[3] += QPointF(0, 1.0)
+        shapesBackup = []
+        shapesBackup = copy.deepcopy(self.shapes)
+        self.shapesBackups.append(shapesBackup)
+        self.shapeMoved.emit()
+        self.repaint()
+
+    def moveOutOfBound(self, step):
+        points = [p1+p2 for p1, p2 in zip(self.selectedShape.points, [step]*4)]
+        return True in map(self.outOfPixmap, points)
+
+    def setLastLabel(self, text, line_color=None, fill_color=None, key_cls=None):
+        assert text
+        self.shapes[-1].label = text
+        if line_color:
+            self.shapes[-1].line_color = line_color
+
+        if fill_color:
+            self.shapes[-1].fill_color = fill_color
+
+        if key_cls:
+            self.shapes[-1].key_cls = key_cls
+
+        self.storeShapes()
+
+        return self.shapes[-1]
+
+    def undoLastLine(self):
+        assert self.shapes
+        self.current = self.shapes.pop()
+        self.current.setOpen()
+        self.line.points = [self.current[-1], self.current[0]]
+        self.drawingPolygon.emit(True)
+
+    def undoLastPoint(self):
+        if not self.current or self.current.isClosed():
+            return
+        self.current.popPoint()
+        if len(self.current) > 0:
+            self.line[0] = self.current[-1]
+        else:
+            self.current = None
+            self.drawingPolygon.emit(False)
+        self.repaint()
+
+    def resetAllLines(self):
+        assert self.shapes
+        self.current = self.shapes.pop()
+        self.current.setOpen()
+        self.line.points = [self.current[-1], self.current[0]]
+        self.drawingPolygon.emit(True)
+        self.current = None
+        self.drawingPolygon.emit(False)
+        self.update()
+
+    def loadPixmap(self, pixmap):
+        self.pixmap = pixmap
+        self.shapes = []
+        self.repaint()
+
+    def loadShapes(self, shapes, replace=True):
+        if replace:
+            self.shapes = list(shapes)
+        else:
+            self.shapes.extend(shapes)
+        self.current = None
+        self.hShape = None
+        self.hVertex = None
+        # self.hEdge = None
+        self.storeShapes()
+        self.updateShapeIndex()
+        self.repaint()
+
+    def setShapeVisible(self, shape, value):
+        self.visible[shape] = value
+        self.repaint()
+
+    def currentCursor(self):
+        cursor = QApplication.overrideCursor()
+        if cursor is not None:
+            cursor = cursor.shape()
+        return cursor
+
+    def overrideCursor(self, cursor):
+        self._cursor = cursor
+        if self.currentCursor() is None:
+            QApplication.setOverrideCursor(cursor)
+        else:
+            QApplication.changeOverrideCursor(cursor)
+
+    def restoreCursor(self):
+        QApplication.restoreOverrideCursor()
+
+    def resetState(self):
+        self.restoreCursor()
+        self.pixmap = None
+        self.update()
+        self.shapesBackups = []
+
+    def setDrawingShapeToSquare(self, status):
+        self.drawSquare = status
+
+    def restoreShape(self):
+        if not self.isShapeRestorable:
+            return
+
+        self.shapesBackups.pop()  # latest
+        shapesBackup = self.shapesBackups.pop()
+        self.shapes = shapesBackup
+        self.selectedShapes = []
+        for shape in self.shapes:
+            shape.selected = False
+        self.updateShapeIndex()
+        self.repaint()
+    
+    @property
+    def isShapeRestorable(self):
+        if len(self.shapesBackups) < 2:
+            return False
+        return True
+
+    def updateShapeIndex(self):
+        for i in range(len(self.shapes)):
+            self.shapes[i].idx = i
+        self.update()

+ 49 - 0
PPOCRLabel/libs/colorDialog.py

@@ -0,0 +1,49 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+try:
+    from PyQt5.QtGui import *
+    from PyQt5.QtCore import *
+    from PyQt5.QtWidgets import QColorDialog, QDialogButtonBox
+except ImportError:
+    from PyQt4.QtGui import *
+    from PyQt4.QtCore import *
+
+BB = QDialogButtonBox
+
+
+class ColorDialog(QColorDialog):
+
+    def __init__(self, parent=None):
+        super(ColorDialog, self).__init__(parent)
+        self.setOption(QColorDialog.ShowAlphaChannel)
+        # The Mac native dialog does not support our restore button.
+        self.setOption(QColorDialog.DontUseNativeDialog)
+        # Add a restore defaults button.
+        # The default is set at invocation time, so that it
+        # works across dialogs for different elements.
+        self.default = None
+        self.bb = self.layout().itemAt(1).widget()
+        self.bb.addButton(BB.RestoreDefaults)
+        self.bb.clicked.connect(self.checkRestore)
+
+    def getColor(self, value=None, title=None, default=None):
+        self.default = default
+        if title:
+            self.setWindowTitle(title)
+        if value:
+            self.setCurrentColor(value)
+        return self.currentColor() if self.exec_() else None
+
+    def checkRestore(self, button):
+        if self.bb.buttonRole(button) & BB.ResetRole and self.default:
+            self.setCurrentColor(self.default)

+ 32 - 0
PPOCRLabel/libs/constants.py

@@ -0,0 +1,32 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+SETTING_FILENAME = 'filename'
+SETTING_RECENT_FILES = 'recentFiles'
+SETTING_WIN_SIZE = 'window/size'
+SETTING_WIN_POSE = 'window/position'
+SETTING_WIN_GEOMETRY = 'window/geometry'
+SETTING_LINE_COLOR = 'line/color'
+SETTING_FILL_COLOR = 'fill/color'
+SETTING_ADVANCE_MODE = 'advanced'
+SETTING_WIN_STATE = 'window/state'
+SETTING_SAVE_DIR = 'savedir'
+SETTING_PAINT_LABEL = 'paintlabel'
+SETTING_PAINT_INDEX = 'paintindex'
+SETTING_LAST_OPEN_DIR = 'lastOpenDir'
+SETTING_AUTO_SAVE = 'autosave'
+SETTING_SINGLE_CLASS = 'singleclass'
+FORMAT_PASCALVOC='PascalVOC'
+FORMAT_YOLO='YOLO'
+SETTING_DRAW_SQUARE = 'draw/square'
+SETTING_LABEL_FILE_FORMAT= 'labelFileFormat'
+DEFAULT_ENCODING = 'utf-8'

+ 143 - 0
PPOCRLabel/libs/create_ml_io.py

@@ -0,0 +1,143 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#!/usr/bin/env python
+# -*- coding: utf8 -*-
+import json
+from pathlib import Path
+
+from libs.constants import DEFAULT_ENCODING
+import os
+
+JSON_EXT = '.json'
+ENCODE_METHOD = DEFAULT_ENCODING
+
+
+class CreateMLWriter:
+    def __init__(self, foldername, filename, imgsize, shapes, outputfile, databasesrc='Unknown', localimgpath=None):
+        self.foldername = foldername
+        self.filename = filename
+        self.databasesrc = databasesrc
+        self.imgsize = imgsize
+        self.boxlist = []
+        self.localimgpath = localimgpath
+        self.verified = False
+        self.shapes = shapes
+        self.outputfile = outputfile
+
+    def write(self):
+        if os.path.isfile(self.outputfile):
+            with open(self.outputfile, "r") as file:
+                input_data = file.read()
+                outputdict = json.loads(input_data)
+        else:
+            outputdict = []
+
+        outputimagedict = {
+            "image": self.filename,
+            "annotations": []
+        }
+
+        for shape in self.shapes:
+            points = shape["points"]
+
+            x1 = points[0][0]
+            y1 = points[0][1]
+            x2 = points[1][0]
+            y2 = points[2][1]
+
+            height, width, x, y = self.calculate_coordinates(x1, x2, y1, y2)
+
+            shapedict = {
+                "label": shape["label"],
+                "coordinates": {
+                    "x": x,
+                    "y": y,
+                    "width": width,
+                    "height": height
+                }
+            }
+            outputimagedict["annotations"].append(shapedict)
+
+        # check if image already in output
+        exists = False
+        for i in range(0, len(outputdict)):
+            if outputdict[i]["image"] == outputimagedict["image"]:
+                exists = True
+                outputdict[i] = outputimagedict
+                break
+
+        if not exists:
+            outputdict.append(outputimagedict)
+
+        Path(self.outputfile).write_text(json.dumps(outputdict), ENCODE_METHOD)
+
+    def calculate_coordinates(self, x1, x2, y1, y2):
+        if x1 < x2:
+            xmin = x1
+            xmax = x2
+        else:
+            xmin = x2
+            xmax = x1
+        if y1 < y2:
+            ymin = y1
+            ymax = y2
+        else:
+            ymin = y2
+            ymax = y1
+        width = xmax - xmin
+        if width < 0:
+            width = width * -1
+        height = ymax - ymin
+        # x and y from center of rect
+        x = xmin + width / 2
+        y = ymin + height / 2
+        return height, width, x, y
+
+
+class CreateMLReader:
+    def __init__(self, jsonpath, filepath):
+        self.jsonpath = jsonpath
+        self.shapes = []
+        self.verified = False
+        self.filename = filepath.split("/")[-1:][0]
+        try:
+            self.parse_json()
+        except ValueError:
+            print("JSON decoding failed")
+
+    def parse_json(self):
+        with open(self.jsonpath, "r") as file:
+            inputdata = file.read()
+
+        outputdict = json.loads(inputdata)
+        self.verified = True
+
+        if len(self.shapes) > 0:
+            self.shapes = []
+        for image in outputdict:
+            if image["image"] == self.filename:
+                for shape in image["annotations"]:
+                    self.add_shape(shape["label"], shape["coordinates"])
+
+    def add_shape(self, label, bndbox):
+        xmin = bndbox["x"] - (bndbox["width"] / 2)
+        ymin = bndbox["y"] - (bndbox["height"] / 2)
+
+        xmax = bndbox["x"] + (bndbox["width"] / 2)
+        ymax = bndbox["y"] + (bndbox["height"] / 2)
+
+        points = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
+        self.shapes.append((label, points, None, None, True))
+
+    def get_shapes(self):
+        return self.shapes

+ 29 - 0
PPOCRLabel/libs/editinlist.py

@@ -0,0 +1,29 @@
+# !/usr/bin/env python
+# -*- coding: utf-8 -*-
+from PyQt5.QtCore import QModelIndex
+from PyQt5.QtWidgets import QListWidget
+
+
+class EditInList(QListWidget):
+    def __init__(self):
+        super(EditInList, self).__init__()
+        self.edited_item = None
+
+    def item_clicked(self, modelindex: QModelIndex):
+        try:
+            if self.edited_item is not None:
+                self.closePersistentEditor(self.edited_item)
+        except:
+            self.edited_item = self.currentItem()
+
+        self.edited_item = self.item(modelindex.row())
+        self.openPersistentEditor(self.edited_item)
+        self.editItem(self.edited_item)
+
+    def mouseDoubleClickEvent(self, event):
+        pass
+
+    def leaveEvent(self, event):
+        # close edit
+        for i in range(self.count()):
+            self.closePersistentEditor(self.item(i))

+ 40 - 0
PPOCRLabel/libs/hashableQListWidgetItem.py

@@ -0,0 +1,40 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import sys
+try:
+    from PyQt5.QtGui import *
+    from PyQt5.QtCore import *
+    from PyQt5.QtWidgets import *
+except ImportError:
+    # needed for py3+qt4
+    # Ref:
+    # http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
+    # http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
+    if sys.version_info.major >= 3:
+        import sip
+        sip.setapi('QVariant', 2)
+    from PyQt4.QtGui import *
+    from PyQt4.QtCore import *
+
+# PyQt5: TypeError: unhashable type: 'QListWidgetItem'
+
+
+class HashableQListWidgetItem(QListWidgetItem):
+
+    def __init__(self, *args):
+        super(HashableQListWidgetItem, self).__init__(*args)
+
+    def __hash__(self):
+        return hash(id(self))

+ 216 - 0
PPOCRLabel/libs/keyDialog.py

@@ -0,0 +1,216 @@
+import re
+
+from PyQt5 import QtCore
+from PyQt5 import QtGui
+from PyQt5 import QtWidgets
+from PyQt5.Qt import QT_VERSION_STR
+from libs.utils import newIcon, labelValidator
+
+QT5 = QT_VERSION_STR[0] == '5'
+
+
+# TODO(unknown):
+# - Calculate optimal position so as not to go out of screen area.
+
+
+class KeyQLineEdit(QtWidgets.QLineEdit):
+    def setListWidget(self, list_widget):
+        self.list_widget = list_widget
+
+    def keyPressEvent(self, e):
+        if e.key() in [QtCore.Qt.Key_Up, QtCore.Qt.Key_Down]:
+            self.list_widget.keyPressEvent(e)
+        else:
+            super(KeyQLineEdit, self).keyPressEvent(e)
+
+
+class KeyDialog(QtWidgets.QDialog):
+    def __init__(
+            self,
+            text="Enter object label",
+            parent=None,
+            labels=None,
+            sort_labels=True,
+            show_text_field=True,
+            completion="startswith",
+            fit_to_content=None,
+            flags=None,
+    ):
+        if fit_to_content is None:
+            fit_to_content = {"row": False, "column": True}
+        self._fit_to_content = fit_to_content
+
+        super(KeyDialog, self).__init__(parent)
+        self.edit = KeyQLineEdit()
+        self.edit.setPlaceholderText(text)
+        self.edit.setValidator(labelValidator())
+        self.edit.editingFinished.connect(self.postProcess)
+        if flags:
+            self.edit.textChanged.connect(self.updateFlags)
+
+        layout = QtWidgets.QVBoxLayout()
+        if show_text_field:
+            layout_edit = QtWidgets.QHBoxLayout()
+            layout_edit.addWidget(self.edit, 6)
+            layout.addLayout(layout_edit)
+        # buttons
+        self.buttonBox = bb = QtWidgets.QDialogButtonBox(
+            QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
+            QtCore.Qt.Horizontal,
+            self,
+        )
+        bb.button(bb.Ok).setIcon(newIcon("done"))
+        bb.button(bb.Cancel).setIcon(newIcon("undo"))
+        bb.accepted.connect(self.validate)
+        bb.rejected.connect(self.reject)
+        layout.addWidget(bb)
+        # label_list
+        self.labelList = QtWidgets.QListWidget()
+        if self._fit_to_content["row"]:
+            self.labelList.setHorizontalScrollBarPolicy(
+                QtCore.Qt.ScrollBarAlwaysOff
+            )
+        if self._fit_to_content["column"]:
+            self.labelList.setVerticalScrollBarPolicy(
+                QtCore.Qt.ScrollBarAlwaysOff
+            )
+        self._sort_labels = sort_labels
+        if labels:
+            self.labelList.addItems(labels)
+        if self._sort_labels:
+            self.labelList.sortItems()
+        else:
+            self.labelList.setDragDropMode(
+                QtWidgets.QAbstractItemView.InternalMove
+            )
+        self.labelList.currentItemChanged.connect(self.labelSelected)
+        self.labelList.itemDoubleClicked.connect(self.labelDoubleClicked)
+        self.edit.setListWidget(self.labelList)
+        layout.addWidget(self.labelList)
+        # label_flags
+        if flags is None:
+            flags = {}
+        self._flags = flags
+        self.flagsLayout = QtWidgets.QVBoxLayout()
+        self.resetFlags()
+        layout.addItem(self.flagsLayout)
+        self.edit.textChanged.connect(self.updateFlags)
+        self.setLayout(layout)
+        # completion
+        completer = QtWidgets.QCompleter()
+        if not QT5 and completion != "startswith":
+            completion = "startswith"
+        if completion == "startswith":
+            completer.setCompletionMode(QtWidgets.QCompleter.InlineCompletion)
+            # Default settings.
+            # completer.setFilterMode(QtCore.Qt.MatchStartsWith)
+        elif completion == "contains":
+            completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
+            completer.setFilterMode(QtCore.Qt.MatchContains)
+        else:
+            raise ValueError("Unsupported completion: {}".format(completion))
+        completer.setModel(self.labelList.model())
+        self.edit.setCompleter(completer)
+
+    def addLabelHistory(self, label):
+        if self.labelList.findItems(label, QtCore.Qt.MatchExactly):
+            return
+        self.labelList.addItem(label)
+        if self._sort_labels:
+            self.labelList.sortItems()
+
+    def labelSelected(self, item):
+        self.edit.setText(item.text())
+
+    def validate(self):
+        text = self.edit.text()
+        if hasattr(text, "strip"):
+            text = text.strip()
+        else:
+            text = text.trimmed()
+        if text:
+            self.accept()
+
+    def labelDoubleClicked(self, item):
+        self.validate()
+
+    def postProcess(self):
+        text = self.edit.text()
+        if hasattr(text, "strip"):
+            text = text.strip()
+        else:
+            text = text.trimmed()
+        self.edit.setText(text)
+
+    def updateFlags(self, label_new):
+        # keep state of shared flags
+        flags_old = self.getFlags()
+
+        flags_new = {}
+        for pattern, keys in self._flags.items():
+            if re.match(pattern, label_new):
+                for key in keys:
+                    flags_new[key] = flags_old.get(key, False)
+        self.setFlags(flags_new)
+
+    def deleteFlags(self):
+        for i in reversed(range(self.flagsLayout.count())):
+            item = self.flagsLayout.itemAt(i).widget()
+            self.flagsLayout.removeWidget(item)
+            item.setParent(None)
+
+    def resetFlags(self, label=""):
+        flags = {}
+        for pattern, keys in self._flags.items():
+            if re.match(pattern, label):
+                for key in keys:
+                    flags[key] = False
+        self.setFlags(flags)
+
+    def setFlags(self, flags):
+        self.deleteFlags()
+        for key in flags:
+            item = QtWidgets.QCheckBox(key, self)
+            item.setChecked(flags[key])
+            self.flagsLayout.addWidget(item)
+            item.show()
+
+    def getFlags(self):
+        flags = {}
+        for i in range(self.flagsLayout.count()):
+            item = self.flagsLayout.itemAt(i).widget()
+            flags[item.text()] = item.isChecked()
+        return flags
+
+    def popUp(self, text=None, move=True, flags=None):
+        if self._fit_to_content["row"]:
+            self.labelList.setMinimumHeight(
+                self.labelList.sizeHintForRow(0) * self.labelList.count() + 2
+            )
+        if self._fit_to_content["column"]:
+            self.labelList.setMinimumWidth(
+                self.labelList.sizeHintForColumn(0) + 2
+            )
+        # if text is None, the previous label in self.edit is kept
+        if text is None:
+            text = self.edit.text()
+        if flags:
+            self.setFlags(flags)
+        else:
+            self.resetFlags(text)
+        self.edit.setText(text)
+        self.edit.setSelection(0, len(text))
+
+        items = self.labelList.findItems(text, QtCore.Qt.MatchFixedString)
+        if items:
+            if len(items) != 1:
+                self.labelList.setCurrentItem(items[0])
+            row = self.labelList.row(items[0])
+            self.edit.completer().setCurrentRow(row)
+        self.edit.setFocus(QtCore.Qt.PopupFocusReason)
+        if move:
+            self.move(QtGui.QCursor.pos())
+        if self.exec_():
+            return self.edit.text(), self.getFlags()
+        else:
+            return None, None

+ 88 - 0
PPOCRLabel/libs/labelColor.py

@@ -0,0 +1,88 @@
+import PIL.Image
+import numpy as np
+
+
+def rgb2hsv(rgb):
+    # type: (np.ndarray) -> np.ndarray
+    """Convert rgb to hsv.
+
+    Parameters
+    ----------
+    rgb: numpy.ndarray, (H, W, 3), np.uint8
+        Input rgb image.
+
+    Returns
+    -------
+    hsv: numpy.ndarray, (H, W, 3), np.uint8
+        Output hsv image.
+
+    """
+    hsv = PIL.Image.fromarray(rgb, mode="RGB")
+    hsv = hsv.convert("HSV")
+    hsv = np.array(hsv)
+    return hsv
+
+
+def hsv2rgb(hsv):
+    # type: (np.ndarray) -> np.ndarray
+    """Convert hsv to rgb.
+
+    Parameters
+    ----------
+    hsv: numpy.ndarray, (H, W, 3), np.uint8
+        Input hsv image.
+
+    Returns
+    -------
+    rgb: numpy.ndarray, (H, W, 3), np.uint8
+        Output rgb image.
+
+    """
+    rgb = PIL.Image.fromarray(hsv, mode="HSV")
+    rgb = rgb.convert("RGB")
+    rgb = np.array(rgb)
+    return rgb
+
+
+def label_colormap(n_label=256, value=None):
+    """Label colormap.
+
+    Parameters
+    ----------
+    n_label: int
+        Number of labels (default: 256).
+    value: float or int
+        Value scale or value of label color in HSV space.
+
+    Returns
+    -------
+    cmap: numpy.ndarray, (N, 3), numpy.uint8
+        Label id to colormap.
+
+    """
+
+    def bitget(byteval, idx):
+        return (byteval & (1 << idx)) != 0
+
+    cmap = np.zeros((n_label, 3), dtype=np.uint8)
+    for i in range(0, n_label):
+        id = i
+        r, g, b = 0, 0, 0
+        for j in range(0, 8):
+            r = np.bitwise_or(r, (bitget(id, 0) << 7 - j))
+            g = np.bitwise_or(g, (bitget(id, 1) << 7 - j))
+            b = np.bitwise_or(b, (bitget(id, 2) << 7 - j))
+            id = id >> 3
+        cmap[i, 0] = r
+        cmap[i, 1] = g
+        cmap[i, 2] = b
+
+    if value is not None:
+        hsv = rgb2hsv(cmap.reshape(1, -1, 3))
+        if isinstance(value, float):
+            hsv[:, 1:, 2] = hsv[:, 1:, 2].astype(float) * value
+        else:
+            assert isinstance(value, int)
+            hsv[:, 1:, 2] = value
+        cmap = hsv2rgb(hsv).reshape(-1, 3)
+    return cmap

+ 107 - 0
PPOCRLabel/libs/labelDialog.py

@@ -0,0 +1,107 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+try:
+    from PyQt5.QtGui import *
+    from PyQt5.QtCore import *
+    from PyQt5.QtWidgets import *
+except ImportError:
+    from PyQt4.QtGui import *
+    from PyQt4.QtCore import *
+
+from libs.utils import newIcon, labelValidator
+
+BB = QDialogButtonBox
+
+
+class LabelDialog(QDialog):
+
+    def __init__(self, text="Enter object label", parent=None, listItem=None):
+        super(LabelDialog, self).__init__(parent)
+
+        self.edit = QLineEdit()  # OLD
+        # self.edit = QTextEdit()
+        self.edit.setText(text)
+        # self.edit.setValidator(labelValidator()) # 验证有效性
+        self.edit.editingFinished.connect(self.postProcess)
+
+        model = QStringListModel()
+        model.setStringList(listItem)
+        completer = QCompleter()
+        completer.setModel(model)
+        self.edit.setCompleter(completer)
+
+        layout = QVBoxLayout()
+        layout.addWidget(self.edit)
+        self.buttonBox = bb = BB(BB.Ok | BB.Cancel, Qt.Horizontal, self)
+        bb.button(BB.Ok).setIcon(newIcon('done'))
+        bb.button(BB.Cancel).setIcon(newIcon('undo'))
+        bb.accepted.connect(self.validate)
+        bb.rejected.connect(self.reject)
+        layout.addWidget(bb)
+
+        # if listItem is not None and len(listItem) > 0:
+        #     self.listWidget = QListWidget(self)
+        #     for item in listItem:
+        #         self.listWidget.addItem(item)
+        #     self.listWidget.itemClicked.connect(self.listItemClick)
+        #     self.listWidget.itemDoubleClicked.connect(self.listItemDoubleClick)
+        #     layout.addWidget(self.listWidget)
+
+        self.setLayout(layout)
+
+    def validate(self):
+        try:
+            if self.edit.text().trimmed():
+                self.accept()
+        except AttributeError:
+            # PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
+            if self.edit.text().strip():
+                self.accept()
+
+    def postProcess(self):
+        try:
+            self.edit.setText(self.edit.text().trimmed())
+            # print(self.edit.text())
+        except AttributeError:
+            # PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
+            self.edit.setText(self.edit.text())
+            print(self.edit.text())
+
+    def popUp(self, text='', move=True):
+        self.edit.setText(text)
+        self.edit.setSelection(0, len(text))
+        self.edit.setFocus(Qt.PopupFocusReason)
+        if move:
+            cursor_pos = QCursor.pos()
+            parent_bottomRight = self.parentWidget().geometry()
+            max_x = parent_bottomRight.x() + parent_bottomRight.width() - self.sizeHint().width()
+            max_y = parent_bottomRight.y() + parent_bottomRight.height() - self.sizeHint().height()
+            max_global = self.parentWidget().mapToGlobal(QPoint(max_x, max_y))
+            if cursor_pos.x() > max_global.x():
+                cursor_pos.setX(max_global.x())
+            if cursor_pos.y() > max_global.y():
+                cursor_pos.setY(max_global.y())
+            self.move(cursor_pos)
+        return self.edit.text() if self.exec_() else None
+
+    def listItemClick(self, tQListWidgetItem):
+        try:
+            text = tQListWidgetItem.text().trimmed()
+        except AttributeError:
+            # PyQt5: AttributeError: 'str' object has no attribute 'trimmed'
+            text = tQListWidgetItem.text().strip()
+        self.edit.setText(text)
+
+    def listItemDoubleClick(self, tQListWidgetItem):
+        self.listItemClick(tQListWidgetItem)
+        self.validate()

A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 11715 - 0
PPOCRLabel/libs/resources.py


+ 60 - 0
PPOCRLabel/libs/settings.py

@@ -0,0 +1,60 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+import pickle
+import os
+import sys
+
+
+class Settings(object):
+    def __init__(self):
+        # Be default, the home will be in the same folder as labelImg
+        home = os.path.expanduser("~")
+        self.data = {}
+        # self.path = os.path.join(home, '.labelImgSettings.pkl')
+        self.path = os.path.join(home, '.autoOCRSettings.pkl')
+
+    def __setitem__(self, key, value):
+        self.data[key] = value
+
+    def __getitem__(self, key):
+        return self.data[key]
+
+    def get(self, key, default=None):
+        if key in self.data:
+            return self.data[key]
+        return default
+
+    def save(self):
+        if self.path:
+            with open(self.path, 'wb') as f:
+                pickle.dump(self.data, f, pickle.HIGHEST_PROTOCOL)
+                return True
+        return False
+
+    def load(self):
+        try:
+            if os.path.exists(self.path):
+                with open(self.path, 'rb') as f:
+                    self.data = pickle.load(f)
+                    return True
+        except:
+            print('Loading setting failed')
+        return False
+
+    def reset(self):
+        if os.path.exists(self.path):
+            os.remove(self.path)
+            print('Remove setting pkl file ${0}'.format(self.path))
+        self.data = {}
+        self.path = None

+ 264 - 0
PPOCRLabel/libs/shape.py

@@ -0,0 +1,264 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+# !/usr/bin/python
+# -*- coding: utf-8 -*-
+import math
+import sys
+
+from PyQt5.QtCore import QPointF
+from PyQt5.QtGui import QColor, QPen, QPainterPath, QFont
+from libs.utils import distance
+
+DEFAULT_LINE_COLOR = QColor(0, 255, 0, 128)
+DEFAULT_FILL_COLOR = QColor(255, 0, 0, 128)
+DEFAULT_SELECT_LINE_COLOR = QColor(255, 255, 255)
+DEFAULT_SELECT_FILL_COLOR = QColor(0, 128, 255, 155)
+DEFAULT_VERTEX_FILL_COLOR = QColor(0, 255, 0, 255)
+DEFAULT_HVERTEX_FILL_COLOR = QColor(255, 0, 0)
+DEFAULT_LOCK_COLOR = QColor(255, 0, 255)
+MIN_Y_LABEL = 10
+
+
+class Shape(object):
+    P_SQUARE, P_ROUND = range(2)
+
+    MOVE_VERTEX, NEAR_VERTEX = range(2)
+
+    # The following class variables influence the drawing
+    # of _all_ shape objects.
+    line_color = DEFAULT_LINE_COLOR
+    fill_color = DEFAULT_FILL_COLOR
+    select_line_color = DEFAULT_SELECT_LINE_COLOR
+    select_fill_color = DEFAULT_SELECT_FILL_COLOR
+    vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR
+    hvertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR
+    point_type = P_ROUND
+    point_size = 8
+    scale = 1.0
+
+    def __init__(self, label=None, line_color=None, difficult=False, key_cls="None", paintLabel=False, paintIdx=False):
+        self.label = label
+        self.idx = None # bbox order, only for table annotation
+        self.points = []
+        self.fill = False
+        self.selected = False
+        self.difficult = difficult
+        self.key_cls = key_cls
+        self.paintLabel = paintLabel
+        self.paintIdx = paintIdx
+        self.locked = False
+        self.direction = 0
+        self.center = None
+        self.epsilon = 5  # same as canvas
+        self._highlightIndex = None
+        self._highlightMode = self.NEAR_VERTEX
+        self._highlightSettings = {
+            self.NEAR_VERTEX: (4, self.P_ROUND),
+            self.MOVE_VERTEX: (1.5, self.P_SQUARE),
+        }
+        self.fontsize = 8
+
+        self._closed = False
+
+        if line_color is not None:
+            # Override the class line_color attribute
+            # with an object attribute. Currently this
+            # is used for drawing the pending line a different color.
+            self.line_color = line_color
+
+    def rotate(self, theta):
+        for i, p in enumerate(self.points):
+            self.points[i] = self.rotatePoint(p, theta)
+        self.direction -= theta
+        self.direction = self.direction % (2 * math.pi)
+
+    def rotatePoint(self, p, theta):
+        order = p - self.center
+        cosTheta = math.cos(theta)
+        sinTheta = math.sin(theta)
+        pResx = cosTheta * order.x() + sinTheta * order.y()
+        pResy = - sinTheta * order.x() + cosTheta * order.y()
+        pRes = QPointF(self.center.x() + pResx, self.center.y() + pResy)
+        return pRes
+
+    def close(self):
+        self.center = QPointF((self.points[0].x() + self.points[2].x()) / 2,
+                              (self.points[0].y() + self.points[2].y()) / 2)
+        self._closed = True
+
+    def reachMaxPoints(self):
+        if len(self.points) >= 4:
+            return True
+        return False
+
+    def addPoint(self, point):
+        if self.reachMaxPoints() and self.closeEnough(self.points[0], point):
+            self.close()
+        else:
+            self.points.append(point)
+
+    def closeEnough(self, p1, p2):
+        return distance(p1 - p2) < self.epsilon
+
+    def popPoint(self):
+        if self.points:
+            return self.points.pop()
+        return None
+
+    def isClosed(self):
+        return self._closed
+
+    def setOpen(self):
+        self._closed = False
+
+    def paint(self, painter):
+        if self.points:
+            color = self.select_line_color if self.selected else self.line_color
+            pen = QPen(color)
+            # Try using integer sizes for smoother drawing(?)
+            # pen.setWidth(max(1, int(round(2.0 / self.scale))))
+            painter.setPen(pen)
+
+            line_path = QPainterPath()
+            vrtx_path = QPainterPath()
+
+            line_path.moveTo(self.points[0])
+            # Uncommenting the following line will draw 2 paths
+            # for the 1st vertex, and make it non-filled, which
+            # may be desirable.
+            # self.drawVertex(vrtx_path, 0)
+
+            for i, p in enumerate(self.points):
+                line_path.lineTo(p)
+                self.drawVertex(vrtx_path, i)
+            if self.isClosed():
+                line_path.lineTo(self.points[0])
+
+            painter.drawPath(line_path)
+            painter.drawPath(vrtx_path)
+            painter.fillPath(vrtx_path, self.vertex_fill_color)
+
+            # Draw text at the top-left
+            if self.paintLabel:
+                min_x = sys.maxsize
+                min_y = sys.maxsize
+                for point in self.points:
+                    min_x = min(min_x, point.x())
+                    min_y = min(min_y, point.y())
+                if min_x != sys.maxsize and min_y != sys.maxsize:
+                    font = QFont()
+                    font.setPointSize(self.fontsize)
+                    font.setBold(True)
+                    painter.setFont(font)
+                    if self.label is None:
+                        self.label = ""
+                    if min_y < MIN_Y_LABEL:
+                        min_y += MIN_Y_LABEL
+                    painter.drawText(min_x, min_y, self.label)
+
+            # Draw number at the top-right
+            if self.paintIdx:
+                min_x = sys.maxsize
+                min_y = sys.maxsize
+                for point in self.points:
+                    min_x = min(min_x, point.x())
+                    min_y = min(min_y, point.y())
+                if min_x != sys.maxsize and min_y != sys.maxsize:
+                    font = QFont()
+                    font.setPointSize(self.fontsize)
+                    font.setBold(True)
+                    painter.setFont(font)
+                    text = ''
+                    if self.idx != None:
+                        text = str(self.idx)
+                    if min_y < MIN_Y_LABEL:
+                        min_y += MIN_Y_LABEL
+                    painter.drawText(min_x, min_y, text)
+
+            if self.fill:
+                color = self.select_fill_color if self.selected else self.fill_color
+                painter.fillPath(line_path, color)
+
+    def drawVertex(self, path, i):
+        d = self.point_size / self.scale
+        shape = self.point_type
+        point = self.points[i]
+        if i == self._highlightIndex:
+            size, shape = self._highlightSettings[self._highlightMode]
+            d *= size
+        if self._highlightIndex is not None:
+            self.vertex_fill_color = self.hvertex_fill_color
+        else:
+            self.vertex_fill_color = Shape.vertex_fill_color
+        if shape == self.P_SQUARE:
+            path.addRect(point.x() - d / 2, point.y() - d / 2, d, d)
+        elif shape == self.P_ROUND:
+            path.addEllipse(point, d / 2.0, d / 2.0)
+        else:
+            assert False, "unsupported vertex shape"
+
+    def nearestVertex(self, point, epsilon):
+        for i, p in enumerate(self.points):
+            if distance(p - point) <= epsilon:
+                return i
+        return None
+
+    def containsPoint(self, point):
+        return self.makePath().contains(point)
+
+    def makePath(self):
+        path = QPainterPath(self.points[0])
+        for p in self.points[1:]:
+            path.lineTo(p)
+        return path
+
+    def boundingRect(self):
+        return self.makePath().boundingRect()
+
+    def moveBy(self, offset):
+        self.points = [p + offset for p in self.points]
+
+    def moveVertexBy(self, i, offset):
+        self.points[i] = self.points[i] + offset
+
+    def highlightVertex(self, i, action):
+        self._highlightIndex = i
+        self._highlightMode = action
+
+    def highlightClear(self):
+        self._highlightIndex = None
+
+    def copy(self):
+        shape = Shape("%s" % self.label)
+        shape.points = [p for p in self.points]
+        shape.center = self.center
+        shape.direction = self.direction
+        shape.fill = self.fill
+        shape.selected = self.selected
+        shape._closed = self._closed
+        if self.line_color != Shape.line_color:
+            shape.line_color = self.line_color
+        if self.fill_color != Shape.fill_color:
+            shape.fill_color = self.fill_color
+        shape.difficult = self.difficult
+        shape.key_cls = self.key_cls
+        return shape
+
+    def __len__(self):
+        return len(self.points)
+
+    def __getitem__(self, key):
+        return self.points[key]
+
+    def __setitem__(self, key, value):
+        self.points[key] = value

+ 90 - 0
PPOCRLabel/libs/stringBundle.py

@@ -0,0 +1,90 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import re
+import os
+import sys
+import locale
+from libs.ustr import ustr
+
+__dir__ = os.path.dirname(os.path.abspath(__file__)) # 获取本程序文件路径
+__dirpath__ = os.path.abspath(os.path.join(__dir__, '../resources/strings'))
+
+try:
+    from PyQt5.QtCore import *
+except ImportError:
+    if sys.version_info.major >= 3:
+        import sip
+        sip.setapi('QVariant', 2)
+    from PyQt4.QtCore import *
+
+
+class StringBundle:
+
+    __create_key = object()
+
+    def __init__(self, create_key, localeStr):
+        assert(create_key == StringBundle.__create_key), "StringBundle must be created using StringBundle.getBundle"
+        self.idToMessage = {}
+        paths = self.__createLookupFallbackList(localeStr)
+        for path in paths:
+            self.__loadBundle(path)
+
+    @classmethod
+    def getBundle(cls, localeStr=None):
+        if localeStr is None:
+            try:
+                localeStr = locale.getlocale()[0] if locale.getlocale() and len(
+                    locale.getlocale()) > 0 else os.getenv('LANG')
+            except:
+                print('Invalid locale')
+                localeStr = 'en'
+
+        return StringBundle(cls.__create_key, localeStr)
+
+    def getString(self, stringId):
+        assert(stringId in self.idToMessage), "Missing string id : " + stringId
+        return self.idToMessage[stringId]
+
+    def __createLookupFallbackList(self, localeStr):
+        resultPaths = []
+        basePath = "\strings" if os.name == 'nt' else "/strings"
+        resultPaths.append(basePath)
+        if localeStr is not None:
+            # Don't follow standard BCP47. Simple fallback
+            tags = re.split('[^a-zA-Z]', localeStr)
+            for tag in tags:
+                lastPath = resultPaths[-1]
+                resultPaths.append(lastPath + '-' + tag)
+            resultPaths[-1] = __dirpath__ + resultPaths[-1] + ".properties"
+
+        return resultPaths
+
+    def __loadBundle(self, path):
+        PROP_SEPERATOR = '='
+        f = QFile(path)
+        if f.exists():
+            if f.open(QIODevice.ReadOnly | QFile.Text):
+                text = QTextStream(f)
+                text.setCodec("UTF-8")
+
+            while not text.atEnd():
+                line = ustr(text.readLine())
+                key_value = line.split(PROP_SEPERATOR)
+                key = key_value[0].strip()
+                value = PROP_SEPERATOR.join(key_value[1:]).strip().strip('"')
+                self.idToMessage[key] = value
+
+            f.close()

+ 51 - 0
PPOCRLabel/libs/toolBar.py

@@ -0,0 +1,51 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+try:
+    from PyQt5.QtGui import *
+    from PyQt5.QtCore import *
+    from PyQt5.QtWidgets import *
+except ImportError:
+    from PyQt4.QtGui import *
+    from PyQt4.QtCore import *
+
+
+class ToolBar(QToolBar):
+
+    def __init__(self, title):
+        super(ToolBar, self).__init__(title)
+        layout = self.layout()
+        m = (0, 0, 0, 0)
+        layout.setSpacing(0)
+        layout.setContentsMargins(*m)
+        self.setContentsMargins(*m)
+        self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
+
+    def addAction(self, action):
+        if isinstance(action, QWidgetAction):
+            return super(ToolBar, self).addAction(action)
+        btn = ToolButton()
+        btn.setDefaultAction(action)
+        btn.setToolButtonStyle(self.toolButtonStyle())
+        self.addWidget(btn)
+
+
+class ToolButton(QToolButton):
+    """ToolBar companion class which ensures all buttons have the same size."""
+    minSize = (60, 60)
+
+    def minimumSizeHint(self):
+        ms = super(ToolButton, self).minimumSizeHint()
+        w1, h1 = ms.width(), ms.height()
+        w2, h2 = self.minSize
+        ToolButton.minSize = max(w1, w2), max(h1, h2)
+        return QSize(*ToolButton.minSize)

+ 46 - 0
PPOCRLabel/libs/unique_label_qlist_widget.py

@@ -0,0 +1,46 @@
+# -*- encoding: utf-8 -*-
+
+from PyQt5.QtCore import Qt, QSize
+from PyQt5 import QtWidgets
+
+
+class EscapableQListWidget(QtWidgets.QListWidget):
+    def keyPressEvent(self, event):
+        super(EscapableQListWidget, self).keyPressEvent(event)
+        if event.key() == Qt.Key_Escape:
+            self.clearSelection()
+
+
+class UniqueLabelQListWidget(EscapableQListWidget):
+    def mousePressEvent(self, event):
+        super(UniqueLabelQListWidget, self).mousePressEvent(event)
+        if not self.indexAt(event.pos()).isValid():
+            self.clearSelection()
+
+    def findItemsByLabel(self, label, get_row=False):
+        items = []
+        for row in range(self.count()):
+            item = self.item(row)
+            if item.data(Qt.UserRole) == label:
+                items.append(item)
+                if get_row:
+                    return row
+        return items
+
+    def createItemFromLabel(self, label):
+        item = QtWidgets.QListWidgetItem()
+        item.setData(Qt.UserRole, label)
+        return item
+
+    def setItemLabel(self, item, label, color=None):
+        qlabel = QtWidgets.QLabel()
+        if color is None:
+            qlabel.setText(f"{label}")
+        else:
+            qlabel.setText('<font color="#{:02x}{:02x}{:02x}">●</font> {} '.format(*color, label))
+        qlabel.setAlignment(Qt.AlignBottom)
+
+        # item.setSizeHint(qlabel.sizeHint())
+        item.setSizeHint(QSize(25, 25))
+
+        self.setItemWidget(item, qlabel)

+ 29 - 0
PPOCRLabel/libs/ustr.py

@@ -0,0 +1,29 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+import sys
+from libs.constants import DEFAULT_ENCODING
+
+def ustr(x):
+    '''py2/py3 unicode helper'''
+
+    if sys.version_info < (3, 0, 0):
+        from PyQt4.QtCore import QString
+        if type(x) == str:
+            return x.decode(DEFAULT_ENCODING)
+        if type(x) == QString:
+            #https://blog.csdn.net/friendan/article/details/51088476
+            #https://blog.csdn.net/xxm524/article/details/74937308
+            return unicode(x.toUtf8(), DEFAULT_ENCODING, 'ignore')
+        return x
+    else:
+        return x

+ 326 - 0
PPOCRLabel/libs/utils.py

@@ -0,0 +1,326 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+import hashlib
+import os
+import re
+import sys
+from math import sqrt
+
+import cv2
+import numpy as np
+from PyQt5.QtCore import QRegExp, QT_VERSION_STR
+from PyQt5.QtGui import QIcon, QRegExpValidator, QColor
+from PyQt5.QtWidgets import QPushButton, QAction, QMenu
+from libs.ustr import ustr
+
+__dir__ = os.path.dirname(os.path.abspath(__file__))  # 获取本程序文件路径
+__iconpath__ = os.path.abspath(os.path.join(__dir__, '../resources/icons'))
+
+
+def newIcon(icon, iconSize=None):
+    if iconSize is not None:
+        return QIcon(QIcon(__iconpath__ + "/" + icon + ".png").pixmap(iconSize, iconSize))
+    else:
+        return QIcon(__iconpath__ + "/" + icon + ".png")
+
+
+def newButton(text, icon=None, slot=None):
+    b = QPushButton(text)
+    if icon is not None:
+        b.setIcon(newIcon(icon))
+    if slot is not None:
+        b.clicked.connect(slot)
+    return b
+
+
+def newAction(parent, text, slot=None, shortcut=None, icon=None,
+              tip=None, checkable=False, enabled=True, iconSize=None):
+    """Create a new action and assign callbacks, shortcuts, etc."""
+    a = QAction(text, parent)
+    if icon is not None:
+        if iconSize is not None:
+            a.setIcon(newIcon(icon, iconSize))
+        else:
+            a.setIcon(newIcon(icon))
+    if shortcut is not None:
+        if isinstance(shortcut, (list, tuple)):
+            a.setShortcuts(shortcut)
+        else:
+            a.setShortcut(shortcut)
+    if tip is not None:
+        a.setToolTip(tip)
+        a.setStatusTip(tip)
+    if slot is not None:
+        a.triggered.connect(slot)
+    if checkable:
+        a.setCheckable(True)
+    a.setEnabled(enabled)
+    return a
+
+
+def addActions(widget, actions):
+    for action in actions:
+        if action is None:
+            widget.addSeparator()
+        elif isinstance(action, QMenu):
+            widget.addMenu(action)
+        else:
+            widget.addAction(action)
+
+
+def labelValidator():
+    return QRegExpValidator(QRegExp(r'^[^ \t].+'), None)
+
+
+class struct(object):
+
+    def __init__(self, **kwargs):
+        self.__dict__.update(kwargs)
+
+
+def distance(p):
+    return sqrt(p.x() * p.x() + p.y() * p.y())
+
+
+def fmtShortcut(text):
+    mod, key = text.split('+', 1)
+    return '<b>%s</b>+<b>%s</b>' % (mod, key)
+
+
+def generateColorByText(text):
+    s = ustr(text)
+    hashCode = int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16)
+    r = int((hashCode / 255) % 255)
+    g = int((hashCode / 65025) % 255)
+    b = int((hashCode / 16581375) % 255)
+    return QColor(r, g, b, 100)
+
+
+def have_qstring():
+    '''p3/qt5 get rid of QString wrapper as py3 has native unicode str type'''
+    return not (sys.version_info.major >= 3 or QT_VERSION_STR.startswith('5.'))
+
+
+def natural_sort(list, key=lambda s: s):
+    """
+    Sort the list into natural alphanumeric order.
+    """
+
+    def get_alphanum_key_func(key):
+        convert = lambda text: int(text) if text.isdigit() else text
+        return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]
+
+    sort_key = get_alphanum_key_func(key)
+    list.sort(key=sort_key)
+
+
+def get_rotate_crop_image(img, points):
+    # Use Green's theory to judge clockwise or counterclockwise
+    # author: biyanhua
+    d = 0.0
+    for index in range(-1, 3):
+        d += -0.5 * (points[index + 1][1] + points[index][1]) * (
+                points[index + 1][0] - points[index][0])
+    if d < 0:  # counterclockwise
+        tmp = np.array(points)
+        points[1], points[3] = tmp[3], tmp[1]
+
+    try:
+        img_crop_width = int(
+            max(
+                np.linalg.norm(points[0] - points[1]),
+                np.linalg.norm(points[2] - points[3])))
+        img_crop_height = int(
+            max(
+                np.linalg.norm(points[0] - points[3]),
+                np.linalg.norm(points[1] - points[2])))
+        pts_std = np.float32([[0, 0], [img_crop_width, 0],
+                              [img_crop_width, img_crop_height],
+                              [0, img_crop_height]])
+        M = cv2.getPerspectiveTransform(points, pts_std)
+        dst_img = cv2.warpPerspective(
+            img,
+            M, (img_crop_width, img_crop_height),
+            borderMode=cv2.BORDER_REPLICATE,
+            flags=cv2.INTER_CUBIC)
+        dst_img_height, dst_img_width = dst_img.shape[0:2]
+        if dst_img_height * 1.0 / dst_img_width >= 1.5:
+            dst_img = np.rot90(dst_img)
+        return dst_img
+    except Exception as e:
+        print(e)
+
+
+def boxPad(box, imgShape, pad : int) -> np.array:
+    """
+    Pad a box with [pad] pixels on each side.
+    """
+    box = np.array(box, dtype=np.int32)
+    box[0][0], box[0][1] = box[0][0] - pad, box[0][1] - pad
+    box[1][0], box[1][1] = box[1][0] + pad, box[1][1] - pad
+    box[2][0], box[2][1] = box[2][0] + pad, box[2][1] + pad
+    box[3][0], box[3][1] = box[3][0] - pad, box[3][1] + pad
+    h, w, _ = imgShape
+    box[:,0] = np.clip(box[:,0], 0, w)
+    box[:,1] = np.clip(box[:,1], 0, h)
+    return box
+
+
+def expand_list(merged, html_list):
+    '''
+    Fill blanks according to merged cells
+    '''
+    sr, er, sc, ec = merged
+    for i in range(sr, er):
+        for j in range(sc, ec):
+            html_list[i][j] = None
+    html_list[sr][sc] = ''
+    if ec - sc > 1:
+        html_list[sr][sc] += " colspan={}".format(ec - sc)
+    if er - sr > 1:
+        html_list[sr][sc] += " rowspan={}".format(er - sr)
+    return html_list
+
+
+def convert_token(html_list):
+    '''
+    Convert raw html to label format
+    '''
+    token_list = ["<tbody>"]
+    # final html list:
+    for row in html_list:
+        token_list.append("<tr>")
+        for col in row:
+            if col == None:
+                continue
+            elif col == 'td':
+                token_list.extend(["<td>", "</td>"])
+            else:
+                token_list.append("<td")
+                if 'colspan' in col:
+                    _, n = col.split('colspan=')
+                    token_list.append(" colspan=\"{}\"".format(n[0]))
+                if 'rowspan' in col:
+                    _, n = col.split('rowspan=')
+                    token_list.append(" rowspan=\"{}\"".format(n[0]))
+                token_list.extend([">", "</td>"])
+        token_list.append("</tr>")
+    token_list.append("</tbody>")
+
+    return token_list
+
+
+def rebuild_html_from_ppstructure_label(label_info):
+        from html import escape
+        html_code = label_info['html']['structure']['tokens'].copy()
+        to_insert = [
+            i for i, tag in enumerate(html_code) if tag in ('<td>', '>')
+        ]
+        for i, cell in zip(to_insert[::-1], label_info['html']['cells'][::-1]):
+            if cell['tokens']:
+                cell = [
+                    escape(token) if len(token) == 1 else token
+                    for token in cell['tokens']
+                ]
+                cell = ''.join(cell)
+                html_code.insert(i + 1, cell)
+        html_code = ''.join(html_code)
+        html_code = '<html><body><table>{}</table></body></html>'.format(
+            html_code)
+        return html_code
+
+
+def stepsInfo(lang='en'):
+    if lang == 'ch':
+        msg = "1. 安装与运行:使用上述命令安装与运行程序。\n" \
+              "2. 打开文件夹:在菜单栏点击 “文件” - 打开目录 选择待标记图片的文件夹.\n" \
+              "3. 自动标注:点击 ”自动标注“,使用PPOCR超轻量模型对图片文件名前图片状态为 “X” 的图片进行自动标注。\n" \
+              "4. 手动标注:点击 “矩形标注”(推荐直接在英文模式下点击键盘中的 “W”),用户可对当前图片中模型未检出的部分进行手动" \
+              "绘制标记框。点击键盘P,则使用四点标注模式(或点击“编辑” - “四点标注”),用户依次点击4个点后,双击左键表示标注完成。\n" \
+              "5. 标记框绘制完成后,用户点击 “确认”,检测框会先被预分配一个 “待识别” 标签。\n" \
+              "6. 重新识别:将图片中的所有检测画绘制/调整完成后,点击 “重新识别”,PPOCR模型会对当前图片中的**所有检测框**重新识别。\n" \
+              "7. 内容更改:双击识别结果,对不准确的识别结果进行手动更改。\n" \
+              "8. 保存:点击 “保存”,图片状态切换为 “√”,跳转至下一张。\n" \
+              "9. 删除:点击 “删除图像”,图片将会被删除至回收站。\n" \
+              "10. 标注结果:关闭应用程序或切换文件路径后,手动保存过的标签将会被存放在所打开图片文件夹下的" \
+              "*Label.txt*中。在菜单栏点击 “PaddleOCR” - 保存识别结果后,会将此类图片的识别训练数据保存在*crop_img*文件夹下," \
+              "识别标签保存在*rec_gt.txt*中。\n"
+
+    else:
+        msg = "1. Build and launch using the instructions above.\n" \
+              "2. Click 'Open Dir' in Menu/File to select the folder of the picture.\n" \
+              "3. Click 'Auto recognition', use PPOCR model to automatically annotate images which marked with 'X' before the file name." \
+              "4. Create Box:\n" \
+              "4.1 Click 'Create RectBox' or press 'W' in English keyboard mode to draw a new rectangle detection box. Click and release left mouse to select a region to annotate the text area.\n" \
+              "4.2 Press 'P' to enter four-point labeling mode which enables you to create any four-point shape by clicking four points with the left mouse button in succession and DOUBLE CLICK the left mouse as the signal of labeling completion.\n" \
+              "5. After the marking frame is drawn, the user clicks 'OK', and the detection frame will be pre-assigned a TEMPORARY label.\n" \
+              "6. Click re-Recognition, model will rewrite ALL recognition results in ALL detection box.\n" \
+              "7. Double click the result in 'recognition result' list to manually change inaccurate recognition results.\n" \
+              "8. Click 'Save', the image status will switch to '√',then the program automatically jump to the next.\n" \
+              "9. Click 'Delete Image' and the image will be deleted to the recycle bin.\n" \
+              "10. Labeling result: After closing the application or switching the file path, the manually saved label will be stored in *Label.txt* under the opened picture folder.\n" \
+              "    Click PaddleOCR-Save Recognition Results in the menu bar, the recognition training data of such pictures will be saved in the *crop_img* folder, and the recognition label will be saved in *rec_gt.txt*.\n"
+
+    return msg
+
+
+def keysInfo(lang='en'):
+    if lang == 'ch':
+        msg = "快捷键\t\t\t说明\n" \
+              "———————————————————————\n" \
+              "Ctrl + shift + R\t\t对当前图片的所有标记重新识别\n" \
+              "W\t\t\t新建矩形框\n" \
+              "Q\t\t\t新建四点框\n" \
+              "Ctrl + E\t\t编辑所选框标签\n" \
+              "Ctrl + R\t\t重新识别所选标记\n" \
+              "Ctrl + C\t\t复制并粘贴选中的标记框\n" \
+              "Ctrl + 鼠标左键\t\t多选标记框\n" \
+              "Backspace\t\t删除所选框\n" \
+              "Ctrl + V\t\t确认本张图片标记\n" \
+              "Ctrl + Shift + d\t删除本张图片\n" \
+              "D\t\t\t下一张图片\n" \
+              "A\t\t\t上一张图片\n" \
+              "Ctrl++\t\t\t缩小\n" \
+              "Ctrl--\t\t\t放大\n" \
+              "↑→↓←\t\t\t移动标记框\n" \
+              "———————————————————————\n" \
+              "注:Mac用户Command键替换上述Ctrl键"
+
+    else:
+        msg = "Shortcut Keys\t\tDescription\n" \
+              "———————————————————————\n" \
+              "Ctrl + shift + R\t\tRe-recognize all the labels\n" \
+              "\t\t\tof the current image\n" \
+              "\n" \
+              "W\t\t\tCreate a rect box\n" \
+              "Q\t\t\tCreate a four-points box\n" \
+              "Ctrl + E\t\tEdit label of the selected box\n" \
+              "Ctrl + R\t\tRe-recognize the selected box\n" \
+              "Ctrl + C\t\tCopy and paste the selected\n" \
+              "\t\t\tbox\n" \
+              "\n" \
+              "Ctrl + Left Mouse\tMulti select the label\n" \
+              "Button\t\t\tbox\n" \
+              "\n" \
+              "Backspace\t\tDelete the selected box\n" \
+              "Ctrl + V\t\tCheck image\n" \
+              "Ctrl + Shift + d\tDelete image\n" \
+              "D\t\t\tNext image\n" \
+              "A\t\t\tPrevious image\n" \
+              "Ctrl++\t\t\tZoom in\n" \
+              "Ctrl--\t\t\tZoom out\n" \
+              "↑→↓←\t\t\tMove selected box" \
+              "———————————————————————\n" \
+              "Notice:For Mac users, use the 'Command' key instead of the 'Ctrl' key"
+
+    return msg

+ 38 - 0
PPOCRLabel/libs/zoomWidget.py

@@ -0,0 +1,38 @@
+# Copyright (c) <2015-Present> Tzutalin
+# Copyright (C) 2013  MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
+# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+# associated documentation files (the "Software"), to deal in the Software without restriction, including without
+# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
+# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+try:
+    from PyQt5.QtGui import *
+    from PyQt5.QtCore import *
+    from PyQt5.QtWidgets import *
+except ImportError:
+    from PyQt4.QtGui import *
+    from PyQt4.QtCore import *
+
+
+class ZoomWidget(QSpinBox):
+
+    def __init__(self, value=100):
+        super(ZoomWidget, self).__init__()
+        self.setButtonSymbols(QAbstractSpinBox.NoButtons)
+        self.setRange(1, 500)
+        self.setSuffix(' %')
+        self.setValue(value)
+        self.setToolTip(u'Zoom Level')
+        self.setStatusTip(self.toolTip())
+        self.setAlignment(Qt.AlignCenter)
+
+    def minimumSizeHint(self):
+        height = super(ZoomWidget, self).minimumSizeHint().height()
+        fm = QFontMetrics(self.font())
+        width = fm.width(str(self.maximum()))
+        return QSize(width, height)

+ 3 - 0
PPOCRLabel/requirements.txt

@@ -0,0 +1,3 @@
+pyqt5
+paddleocr
+xlrd==1.2.0

+ 41 - 0
PPOCRLabel/resources.qrc

@@ -0,0 +1,41 @@
+<!DOCTYPE RCC><RCC version="1.0">
+<qresource>
+
+<file alias="help">resources/icons/help.png</file>
+<file alias="app">resources/icons/app.png</file>
+<file alias="Auto">resources/icons/Auto.png</file>
+<file alias="reRec">resources/icons/reRec.png</file>
+<file alias="expert">resources/icons/expert2.png</file>
+<file alias="done">resources/icons/done.png</file>
+<file alias="file">resources/icons/file.png</file>
+<file alias="labels">resources/icons/labels.png</file>
+<file alias="new">resources/icons/objects.png</file>
+<file alias="close">resources/icons/close.png</file>
+<file alias="fit-width">resources/icons/fit-width.png</file>
+<file alias="fit-window">resources/icons/fit-window.png</file>
+<file alias="undo">resources/icons/undo.png</file>
+<file alias="hide">resources/icons/eye.png</file>
+<file alias="quit">resources/icons/quit.png</file>
+<file alias="copy">resources/icons/copy.png</file>
+<file alias="edit">resources/icons/edit.png</file>
+<file alias="rotateLeft">resources/icons/rotateLeft.png</file>
+<file alias="rotateRight">resources/icons/rotateRight.png</file>
+<file alias="open">resources/icons/open.png</file>
+<file alias="save">resources/icons/save.png</file>
+<file alias="format_voc">resources/icons/format_voc.png</file>
+<file alias="format_yolo">resources/icons/format_yolo.png</file>
+<file alias="save-as">resources/icons/save-as.png</file>
+<file alias="color">resources/icons/color.png</file>
+<file alias="color_line">resources/icons/color_line.png</file>
+<file alias="zoom">resources/icons/zoom.png</file>
+<file alias="zoom-in">resources/icons/zoom-in.png</file>
+<file alias="zoom-out">resources/icons/zoom-out.png</file>
+<file alias="delete">resources/icons/cancel.png</file>
+<file alias="next">resources/icons/next.png</file>
+<file alias="prev">resources/icons/prev.png</file>
+<file alias="resetall">resources/icons/resetall.png</file>
+<file alias="verify">resources/icons/verify.png</file>
+<file alias="strings">resources/strings/strings-en.properties</file>
+<file alias="strings-zh-CN">resources/strings/strings-zh-CN.properties</file>
+</qresource>
+</RCC>

BIN
PPOCRLabel/resources/icons/Auto.png


BIN
PPOCRLabel/resources/icons/app.icns


BIN
PPOCRLabel/resources/icons/app.png


A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 27 - 0
PPOCRLabel/resources/icons/app.svg


BIN
PPOCRLabel/resources/icons/cancel.png


BIN
PPOCRLabel/resources/icons/close.png


BIN
PPOCRLabel/resources/icons/color.png


BIN
PPOCRLabel/resources/icons/color_line.png


BIN
PPOCRLabel/resources/icons/copy.png


BIN
PPOCRLabel/resources/icons/delete.png


BIN
PPOCRLabel/resources/icons/done.png


A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 400 - 0
PPOCRLabel/resources/icons/done.svg


BIN
PPOCRLabel/resources/icons/edit.png


BIN
PPOCRLabel/resources/icons/expert1.png


BIN
PPOCRLabel/resources/icons/expert2.png


BIN
PPOCRLabel/resources/icons/eye.png


BIN
PPOCRLabel/resources/icons/feBlend-icon.png


BIN
PPOCRLabel/resources/icons/file.png


BIN
PPOCRLabel/resources/icons/fit-width.png


BIN
PPOCRLabel/resources/icons/fit-window.png


BIN
PPOCRLabel/resources/icons/fit.png


BIN
PPOCRLabel/resources/icons/format_createml.png


BIN
PPOCRLabel/resources/icons/format_voc.png


BIN
PPOCRLabel/resources/icons/format_yolo.png


BIN
PPOCRLabel/resources/icons/help.png


BIN
PPOCRLabel/resources/icons/labels.png


A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 819 - 0
PPOCRLabel/resources/icons/labels.svg


BIN
PPOCRLabel/resources/icons/lock.png


BIN
PPOCRLabel/resources/icons/new.png


BIN
PPOCRLabel/resources/icons/next.png


BIN
PPOCRLabel/resources/icons/objects.png


BIN
PPOCRLabel/resources/icons/open.png


A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 577 - 0
PPOCRLabel/resources/icons/open.svg


BIN
PPOCRLabel/resources/icons/prev.png


BIN
PPOCRLabel/resources/icons/quit.png


BIN
PPOCRLabel/resources/icons/reRec.png


BIN
PPOCRLabel/resources/icons/resetall.png


BIN
PPOCRLabel/resources/icons/rotateLeft.png


BIN
PPOCRLabel/resources/icons/rotateRight.png


BIN
PPOCRLabel/resources/icons/save-as.png


A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 1358 - 0
PPOCRLabel/resources/icons/save-as.svg


BIN
PPOCRLabel/resources/icons/save.png


+ 679 - 0
PPOCRLabel/resources/icons/save.svg

@@ -0,0 +1,679 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
+"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
+<!-- Created with Sodipodi ("http://www.sodipodi.com/") -->
+<svg
+   width="48pt"
+   height="48pt"
+   viewBox="0 0 48 48"
+   style="overflow:visible;enable-background:new 0 0 48 48"
+   xml:space="preserve"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:xap="http://ns.adobe.com/xap/1.0/"
+   xmlns:xapGImg="http://ns.adobe.com/xap/1.0/g/img/"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:xml="http://www.w3.org/XML/1998/namespace"
+   xmlns:xapMM="http://ns.adobe.com/xap/1.0/mm/"
+   xmlns:pdf="http://ns.adobe.com/pdf/1.3/"
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:a="http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/"
+   xmlns:x="adobe:ns:meta/"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:xlink="http://www.w3.org/1999/xlink"
+   id="svg589"
+   sodipodi:version="0.32"
+   sodipodi:docname="/home/david/Desktop/temp/devices/gnome-dev-floppy.svg"
+   sodipodi:docbase="/home/david/Desktop/temp/devices/">
+  <defs
+     id="defs677" />
+  <sodipodi:namedview
+     id="base" />
+  <metadata
+     id="metadata590">
+    <xpacket>begin='' id='W5M0MpCehiHzreSzNTczkc9d'    </xpacket>
+    <x:xmpmeta
+       x:xmptk="XMP toolkit 3.0-29, framework 1.6">
+      <rdf:RDF>
+        <rdf:Description
+           rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
+          <pdf:Producer>
+Adobe PDF library 5.00</pdf:Producer>
+        </rdf:Description>
+        <rdf:Description
+           rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998" />
+        <rdf:Description
+           rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998" />
+        <rdf:Description
+           rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
+          <xap:CreateDate>
+2004-02-04T02:08:51+02:00</xap:CreateDate>
+          <xap:ModifyDate>
+2004-03-29T09:20:16Z</xap:ModifyDate>
+          <xap:CreatorTool>
+Adobe Illustrator 10.0</xap:CreatorTool>
+          <xap:MetadataDate>
+2004-02-29T14:54:28+01:00</xap:MetadataDate>
+          <xap:Thumbnails>
+            <rdf:Alt>
+              <rdf:li
+                 rdf:parseType="Resource">
+                <xapGImg:format>
+JPEG</xapGImg:format>
+                <xapGImg:width>
+256</xapGImg:width>
+                <xapGImg:height>
+256</xapGImg:height>
+                <xapGImg:image>
+/9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA
+AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK
+DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f
+Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgBAAEAAwER
+AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA
+AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB
+UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE
+1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ
+qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy
+obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp
+0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo
++DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FXYq7FXYq7FXYq7
+FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7F
+XYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FX
+Yq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXY
+q7FXzd+b/wDzlWum3k+h+QxFc3EJMdzrkoEkKuNiLZPsyU/nb4fAEb50vZ/YXEBPLsP5v62meXue
+A3v5mfmprl080vmLVriXdjHBcTIi17rFCVRfoXOghocEBQhH5NJmepUf8Tfmj/1dtb/6SLv/AJqy
+f5fD/Nj8gjxPN3+JvzR/6u2t/wDSRd/81Y/l8P8ANj8gviebv8Tfmj/1dtb/AOki7/5qx/L4f5sf
+kF8Tzd/ib80f+rtrf/SRd/8ANWP5fD/Nj8gviebv8Tfmj/1dtb/6SLv/AJqx/L4f5sfkF8Tzd/ib
+80f+rtrf/SRd/wDNWP5fD/Nj8gviebv8Tfmj/wBXbW/+ki7/AOasfy+H+bH5BfE83f4m/NH/AKu2
+t/8ASRd/81Y/l8P82PyC+J5u/wATfmj/ANXbW/8ApIu/+asfy+H+bH5BfE83f4m/NH/q7a3/ANJF
+3/zVj+Xw/wA2PyC+J5u/xN+aP/V21v8A6SLv/mrH8vh/mx+QXxPN3+JvzR/6u2t/9JF3/wA1Y/l8
+P82PyC+J5u/xN+aP/V21v/pIu/8AmrH8vh/mx+QXxPN3+JvzR/6u2t/9JF3/AM1Y/l8P82PyC+J5
+u/xN+aP/AFdtb/6SLv8A5qx/L4f5sfkF8Tzd/ib80f8Aq7a3/wBJF3/zVj+Xw/zY/IL4nm7/ABN+
+aP8A1dtb/wCki7/5qx/L4f5sfkF8Tzd/ib80f+rtrf8A0kXf/NWP5fD/ADY/IL4nm7/E35o/9XbW
+/wDpIu/+asfy+H+bH5BfE82j5t/M+Aes2ta3EI/i9U3N2vGnfly2x/LYT/DH5BePzZ15C/5yh/Mb
+y7cxRaxcHzDpQIEsF2f9IC9zHc058v8AX5D9ea/VdiYcg9I4JeXL5NkchD688jeefLvnby/DrmhT
++rayEpLE4CywygAtFKtTxYV+RG4qDnH6nTTwT4JjdyIytkGY6XYq7FXYq7FXYq7FXjX/ADlH+YV1
+5W8hppunymHU/MMj2qSqaMltGoNwynxPNE/2WbrsPSDLl4pfTDf49GvJKg+VPy+8lP5ivecqM9rG
+4jWFaqZpTvw57cVUULGvcfMdtYFk7Ac3Ua3VHGAI/XLk+jNK/LfSLS0SK4JYqDSGCkUCV3PBVAPX
+vtXwzWT7TlfoAA+11f5Xi3mTIo608meV/wBL2lnLbSSLcc/92sB8Kk70IOU5+0s4xSmCPT5NuDRY
+pZBEjmyu2/KnydcFgliF4ip5TT/wY5ov5f1f877B+p2/8kaf+b9pVv8AlT3lL/lkT/kdcf1w/wAv
+az+d9kf1I/kjTfzftLR/J/yl/wAsif8AI65/rj/L2s/nfZH9S/yRpv5v2lafyg8p/wDLKn/I65/r
+h/l3Wfzvsj+pf5J03837S0fyh8p/8sqf8jrn+uP8u6z+d9kf1L/JOm/m/aWj+UXlP/llj/5HXP8A
+XH+XdZ/O+yP6l/knTfzftLX/ACqPyn/yzR/8jrn+uH+XNb/O+yP6l/knTd32lr/lUflX/lmj/wCR
+1z/XB/Lmt/nfZH9S/wAk6bu+0u/5VD5W/wCWaP8A5HXP9cf5d1n877I/qX+SdN/N+0u/5VB5Y/5Z
+ov8Akdc/1x/l3Wfzvsj+pf5J03837S7/AJU/5a/5Zov+R1z/AFx/l3Wfzvsj+pf5J03837S7/lT3
+lv8A5Zov+R1z/XB/L2s/nfZH9S/yRpv5v2l3/KnfLv8AyzRf8jrn+uP8vaz+d9kf1L/JGm/m/aXf
+8qc8v/8ALNF/yOuf64/y9rP532R/Uv8AJGm/m/aXf8qb0H/lmh/5HXP9cf5f1n877I/qX+SNN/N+
+0u/5U1oP/LND/wAjrn+uD+X9Z/O+wfqT/JGn/m/aVk/5P6BDBJM1rEVjUswE1xWg8KnH/RBq/wCd
+9g/Uv8kaf+b9pYp5i8oeXLOGBoLQo0j8SRJIe3+Uxza9ldq6jNKQnLkO4Ov1/Z2HGAYj7SkreXdK
+IoEZD/Mrmo+Vaj8M3I1eR1fgRee/mD+W8NxE91ZIPrhq0UygL6rbt6ctNubfssevy6XwmJjbYjo5
+ml1csUhGRuB+xJP+cfvzGvfJvny1T1T+iNXdLTUbcn4SWNIpPZkduvgTmq7Z0gy4Sf4obj9L0WOV
+F93xSJLGsiGqOAyn2O+cK5K7FXYq7FXYq7FXYq+R/wDnM65lbzjoFsT+6i05pEG/2pJ2VvbpGM6/
+2cH7uR/pfocfNzb/ACCs7caXZzBAJPQuJS3fn9ZMXL/gNs2uvkRirvl+h0GffUm+kfx972EnNKyU
+LXfzNpZ/4y/8QOOo/wAWn8PvbdN/fRei6SPjl/1R+vOWDvyjyMsQsIwoWkYVWEYULSMKFhGSVrFV
+wOBVwOBVwOBK4HFVwOBK4HAq4HAlcDgVQ1I/7jrn/jE36siUh5X5uH+j23tL/DN52F9U/c6vtX6Q
+x0nOidEgNZodNmBAP2aE9jzG4+jL9P8AWGrL9JfNGuSmDzPqEsICGK9maNRsF4ykgCnhmRKArhel
+08iccT5B+iHk+4afQbcsalBx+8Bv+Ns8wdknWKuxV2KuxV2KuxV8hf8AOZn/ACneif8AbLH/AFES
+52Hs7/dS/rfoDj5uaO/IUf7gbI/8ulx/1GnNlr/7v/O/Q6DN/jEv6v6nqxOahksshXzJpv8Az0/4
+gcjqf8Xn8PvbdL/exei6SPjk/wBUfrzlw9AmBGTYrSMKrCMKFpGFVhGFC0jChYRklaxVcDgVcDgV
+cDgSuBxVcDgSuBwKuBwJUdRP+4+5/wCMTfqyJSHlvmwf6Lb+0n8M3XYX1S9zq+1fpDwzzXoX1nzD
+eT8a82U1/wBgBm1y6fikS6qGfhFJt5T076lomoJSnOSM/dTMzQYuCTj6rJxh4h5k/wCUi1T/AJjJ
+/wDk62bM83fab+6j/VH3P0N8jf8AHBj+Y/5NpnlztGQYq7FXYq7FXYq7FXyF/wA5mf8AKd6J/wBs
+sf8AURLnYezv91L+t+gOPm5ph+Q4/wCddsj/AMutx/1Gtmx1/wBH+d+h0Gb/ABiX9X9T1InNUl2n
+b+Y9P/56f8QOQ1X+Lz+H3t+l/vYvRtJH7yT/AFR+vOWDv0xIySFhGSQtIwqsIwoWkYVWEYULSMKF
+hGSVrFVwOBVwOBVwOBK4HFVwOBK4HAqjf/8AHPuf+MTfqyEkh5j5rH+iQ/65/Uc3XYf1y9zre1Pp
+DDpbGzkcu8QZ26k50weeMQoXVvDDZyrEgQNQkD5jLMX1BhMbPmrzN/ykmrf8xlx/ydbMp6XTf3cf
+6o+5+hnkb/jgx/Mf8m0zy52bIMVdirsVdirsVdir5C/5zM/5TvRP+2WP+oiXOw9nf7qX9b9AcfNz
+TL8iR/zrFif+Xa4/6jWzYa76f879Doc/9/L3fqenE5rEL9KFfMNh85P+IHK9X/cT+H3uRpP72L0f
+SR+8k/1f45yzv0xIwqtIwoWEZJC0jCqwjChaRhVYRhQtIwoWEZJWsVXA4FXA4FXA4ErgcVXA4EqV
+9/vBc/8AGJv1ZCXJIea+ah/ocfsx/wCInNx2H9cvcHW9qfQGIE507z6HvN7dx8v1jLMfNhPk+Z/N
+H/KTav8A8xtx/wAnWzJek0/93H+qPufoX5G/44MfzH/JtM8vdmyDFXYq7FXYq7FXYq+Qv+czP+U7
+0T/tlj/qIlzsPZ3+6l/W/QHHzc0z/Isf86nYH/l3uP8AqNbM/W8v879Doc/9/L3fqelk5rkK2j76
+/ZfN/wDiBynWf3Evx1cjSf3oej6UP3r/AOr/ABzl3fpliq0jCq0jChYRkkLSMKrCMKFpGFVhGFC0
+jChYRklaxVcDgVcDgVcDgSuBxVTvP94rn/jE36shPkyDzjzUP9BX5n/iJzbdifXL4Ou7U+gfFhhO
+dS86pXG8TD5frycebGXJ8z+av+Un1j/mNuf+TrZkh6TT/wB3H+qPufoV5G/44MfzH/JtM8vdmyDF
+XYq7FXYq7FXYq+Qv+czP+U70T/tlj/qIlzsPZ3+6l/W/QHHzc01/I0f86fp5/wCKLj/qNbM7W8v8
+79Dos/8AfH3fqejE5gMEVoe+u2fzf/iByjW/3Evx1cnR/wB4Ho+l/wB4/wAv45y7v0xxV2KrSMKr
+SMKFhGSQtIwqsIwoWkYVWEYULSMKFhGSVrFVwOBVwOBVwOBKy6P+h3H/ABib9WQnySHnnmkf6APY
+t/xE5texPrPwdf2n9A+LByc6t5xTfcEZIIL5p82f8pTrP/Mdc/8AJ5syRyek0/8Adx9w+5+hPkb/
+AI4MfzH/ACbTPL3ZsgxV2KuxV2KuxV2KvkL/AJzM/wCU70T/ALZY/wCoiXOw9nf7qX9b9AcfNzTf
+8jx/zpWnH/im4/6jHzO1n6f0Oi1H98fd+p6ETmE1o3y/vrdr82/4gcxtd/cycrR/3gej6b/eP8v4
+5y7v0wxV2KuxVaRhVaRhQsIySFpGFVhGFC0jCqwjChaRhQsIyStYquBwKuBwKtuT/olx/wAYm/Vk
+J8mUXn/mkf7jj/sv+InNp2L/AHh+Dr+0/oHxYGTnWvONDdgMUPmnzb/yletf8x9z/wAnmzIjyelw
+f3cfcH6EeRv+ODH8x/ybTPMHZMgxV2KuxV2KuxV2KvkL/nMz/lO9E/7ZY/6iJc7D2d/upf1v0Bx8
+3NOPyRH/ADo2mn/im4/6jHzN1fP4/odHqP70+5n5OYjUmHlzfWrb5t/xA5ia7+5k5Wi/vA9H07+8
+f5fxzmHfo/FXYq7FXYqtIwqtIwoWEZJC0jCqwjChaRhVYRhQtIwoWEZJWsVXA4Fan/3luP8AjE36
+shk5MosD80D/AHGt8m/4gc2XY394fg4Haf0fN56TnXvNLod5VHz/AFYJclD5p83/APKWa3/zH3X/
+ACebMiPIPS4P7uPuD9CPI3/HBj+Y/wCTaZ5g7JkGKuxV2KuxV2KuxV8hf85mf8p3on/bLH/URLnY
+ezv91L+t+gOPm5p1+SYp5B0w/wDFVx/1GPmZq/q+P6HR6n+9PuZ0TmM0pr5Y31iD5t/xA5h6/wDu
+i5mi/vA9G0/7b/LOYd8jsVdirsVdirsVWkYVWkYULCMkhaRhVYRhQtIwqsIwoWkYULCMkrWKul/3
+mn/4xt+rK8nJMebB/NA/3Fyf6r/8QObHsb+8Pw+9we0/o+bzgnOxeZVLXe4QfP8AUcjPkmPN81ec
+f+Uu1z/toXX/ACebL4fSHpcH0R9wfoP5G/44MfzH/JtM8xdkyDFXYq7FXYq7FXYq+Qv+czP+U70T
+/tlj/qIlzsPZ3+6l/W/QHHzc08/JUf8AIPNLP/Fdx/1GSZl6r6z7/wBDpNT/AHh9zNicocdOPKu+
+rQ/M/wDEGzB7Q/ui5uh+sPRbEhXappt3zmXfI3mn8w+/FXeon8w+/FWvUj/mH3jFXepH/MPvGKu9
+WP8AnH3jFXepF/Ov3jFVpeP+dfvGG1Wl4/51+8YbQtLJ/Mv3jDa0tJT+ZfvGHiCKWnj/ADL/AMEP
+64eILS08f5l/4If1w8QRS0qP5l/4If1w8YWlpUfzL/wS/wBceMIorCn+Uv8AwS/1w8YXhKyai289
+WXeNgPiB3I+eRnIEJiGFeZx/uKm/1H/4gc2PY/8AefL73B7S+j5vNCc7N5dWsN7uMfP/AIichl+k
+so83zX5z/wCUw13/ALaF1/yffL8f0j3PS4foj7g/QbyN/wAcGP5j/k2meYuyZBirsVdirsVdirsV
+fIX/ADmZ/wAp3on/AGyx/wBREudh7O/3Uv636A4+bmnv5Lj/AJBxpZ/yLj/qMkzK1X1n3/odJqv7
+w+5mZOVOOmvly5jtrwTyAlIzuFpXdSO9Mw9bjM4cI6uVpJiMrLK/8T2H++5fuX/mrNL/ACdk7x+P
+g7b85DuLX+JbD/fcv3L/AM1Y/wAnZO8fj4L+ch3Fr/Elj/vuX7l/5qx/k7J3j8fBfzkO4tf4jsf9
+9y/cv/NWP8nZO8fj4L+ch3Fo+YrH/fcv3L/zVj/J2TvH4+C/nIdxW/4hsv5JPuX/AJqx/k7J3j8f
+BfzkO4tfp+y/kk+5f+asf5Oyd4/HwX85DuLX6es/5JPuX/mrH+TsnePx8F/OQ7i1+nbP+ST7l/5q
+x/k7J3j8fBfzkO4tfpy0/kk+5f64/wAnZO8fj4L+ch3Fr9N2n8kn3L/XH+TsnePx8F/OQ7i0datf
+5JPuX+uP8nZO8fj4L+ch3Fb+mLX+R/uH9cf5Oyd4/HwX85DuLX6Xtv5H+4f1x/k7J3j8fBfzkO4t
+fpa2/lf7h/XH+TsnePx8F/OQ7i0dVt/5X+4f1x/k7J3j8fBfzkO4tHVLf+V/uH9cf5Oyd4/HwX85
+DuKW6/dxz6XcKgYFY5DvT+Q++bDs7TSx5Bdbkfe4etzicNvN5sTnWPOojTN7+If63/ETleb6Cyhz
+fNnnX/lMte/7aN3/AMn3y/H9I9z02H6B7g/QXyN/xwY/mP8Ak2meYuxZBirsVdirsVdirsVfIX/O
+Zn/Kd6J/2yx/1ES52Hs7/dS/rfoDj5uaf/kyP+QZ6Uf8m4/6jJMytT/eH8dHS6r6z7mXk5W4rSyy
+JXgxWvWhIxMQVEiOTjdXH+/X/wCCOPAO5eM9603Vz/v1/wDgjh4I9y8Z71pu7n/fz/8ABHDwR7kc
+Z71pu7r/AH8//BH+uHw49y8cu9aby6/39J/wR/rh8OPcEccu9ab27/3/ACf8E39cPhx7gjjl3rTe
+3f8Av+T/AINv64fDj3BfEl3rTfXn+/5P+Db+uHw49wR4ku8rTfXv/LRJ/wAG39cPhR7gviS7ytN/
+e/8ALRJ/wbf1w+FHuCPEl3ladQvv+WiX/g2/rh8KPcEeJLvK06hff8tMv/Bt/XD4Ue4L4ku8rTqN
+/wD8tMv/AAbf1w+FDuCPEl3ladRv/wDlpl/4Nv64fBh3D5L4ku8rTqWof8tUv/Bt/XD4MO4fJHiy
+7ytOp6h/y1Tf8jG/rh8GHcPkjxZd5aOp6j/y1Tf8jG/rh8GHcPkviy7ypvqN+6lWuZWVhRlLsQQe
+xFcIwwHQfJByS7yhScta0Xo++pQj/W/4icq1H0Fnj+p82+d/+Uz1/wD7aN3/AMn3y7F9I9z02H6B
+7g/QTyN/xwY/mP8Ak2meZOxZBirsVdirsVdirsVfIX/OZn/Kd6J/2yx/1ES52Hs7/dS/rfoDj5ub
+IfybH/ILtJPtcf8AUZLmTqP70/jo6XVfWWVE5FxFpOFVpOFDCLz82fLtrdz2slteGSCRonKpFQlC
+VNKyDbbLRjLLgKgfzh8tf8s17/wEX/VXD4ZXwytP5weWv+Wa9/4CL/qrjwFHhlo/m95b/wCWa8/4
+CL/qrh4Cvhlo/m75b/5Zrz/gIv8Aqrh4V8Mrf+Vt+XD/AMe15/wEX/VXCIFHhF3/ACtjy6f+Pa8/
+4CL/AKqZMYijwy1/ytXy8f8Aj3u/+Ai/6qZYNPJHhl3/ACtPy+f+Pe7/AOAj/wCqmTGll5I8Mtf8
+rQ0A/wDHvd/8BH/1UywaKfkjwy7/AJWboR/497r/AICP/qpkx2fPvCOAtf8AKytDP+6Lr/gI/wDq
+pkx2bk7x+PgjgLY/MXRT0guf+Bj/AOa8P8nZO8fj4LwFseftIPSG4/4FP+a8f5Pn3j8fBHAUTY+b
+dOvbqO2iimWSQkKXVQNhXejHwyGTSSiLNIMSE4JzGYLCcKFpOFCN0PfVYB/rf8QOU6n+7LZi+oPm
+7zx/ymvmD/tpXn/J98uxfQPcHpsX0D3B+gfkb/jgx/Mf8m0zzJ2LIMVdirsVdirsVdir5C/5zM/5
+TvRP+2WP+oiXOw9nf7qX9b9AcfNzZF+To/5BVpB9rj/qMlzI1H98fx0dNq/qLJycXDWk4ULScKEq
+/IbT7OTVvMty0S/Wm1BoRPQcxHVmKqT0BPXNL25M3EdKd52bEUS9s/RNv/O/3j+maC3Zu/RNv/O/
+3j+mNq79E2/87/eP6Y2rv0Tb/wA7/eP6Y2rv0Tb/AM7/AHj+mNq79E2/87/eP6Y2rv0Tb/zv94/p
+jau/RNv/ADv94/pjau/RNv8Azv8AeP6Y2rv0Tb/zv94/pjau/RNv/O/3j+mNq80/PXTbMeUJmaMP
+LbyQvBKwBZC8gRqEU6qc6L2YyyjqwAdpA38nA7RiDiJ7nzykeekEvOpz5cSmsWx9z/xE5jak+gsZ
+cmeE5qWhaThQtJwqj/L2+sW4/wBf/iDZRq/7s/jq2YfqD5v89f8AKb+Yf+2nef8AUQ+W4foHuD02
+L6R7n6BeRv8Ajgx/Mf8AJtM8zdiyDFXYq7FXYq7FXYq+Qv8AnMz/AJTvRP8Atlj/AKiJc7D2d/up
+f1v0Bx83Nkn5Pj/kEujn/mI/6jJcvz/35/HR02r+osjJyThLScKFhOSQgvyCamo+YR46o3/G2aHt
+z6o+533Zv0l7pmhdk7FXYq7FXYq7FXYq7FXYq7FXYq8w/PPfytdr7wf8nRm/9m/8bj7pfc4PaP8A
+cn4PntI89IJebTXQUpqlufc/8ROY+c+gsZcmZk5rWhaThVaThQmPlrfW7Yf6/wDybbMfWf3R/HVt
+wfWHzh58/wCU58xf9tO8/wCoh8twfRH3B6fH9I9z9AfI3/HBj+Y/5NpnmbsGQYq7FXYq7FXYq7FX
+yF/zmZ/yneif9ssf9REudh7O/wB1L+t+gOPm5sm/KEf8gh0Y+9x/1GTZdm/vz+OgdPrOZT8nLHAW
+E5JC0nCqX/kO9NT8wf8AbUb/AI2zQ9ufVH3O+7N+kvdPUzQ07Jg/5n+a7ny3o9zq0CGY20cREHMx
+hvUnEfUA9OVemZmh03jZRC6u/utpz5eCBl3PIv8AoY3V/wDq1j/pKf8A5ozoR7NxP8f2ftdf/KR/
+m/ay/wDLf81dQ826lcW0tsbQWypJyWZpOXJuNKELmu7U7JGliJCXFZ7nJ0ur8UkVVPZvUzR05rvU
+xpXepjSu9TGld6mNK71MaV3qY0rzP8625eXrlf8AjB/ydGb32c/xuPul9zg9o/3J+DwdI89FJebT
+PRkpqEJ9z+o5RmPpLCXJlJOYLStJwoWE4UJp5V31+1H/ABk/5NtmNrf7o/D727T/AFh84efv+U68
+x/8AbUvf+oh8swf3cfcHp8f0j3P0B8jf8cGP5j/k2meaOwZBirsVdirsVdirsVfIX/OZn/Kd6J/2
+yx/1ES52Hs7/AHUv636A4+bmyf8AKMf8gc0U/wCVcf8AUZNl2b/GD+OgdPrOZTsnLnXrScKrScKE
+s/I1qanr3/bTb/jbND22PVH3O/7N+kvb/UzROyeYfny9fJmoj/iu2/6i0zbdiD/CofH/AHJcTW/3
+R+H3vmQDPQ4wefep/kEeOuah/wAYov8Ak5nOe1Eaxw/rH7nZdmfUfc+l/UziXcu9TFXepirvUxV3
+qYq71MVd6mKvOPzhblolwPaH/k5m79nv8aj7j9zgdo/3J+DxdI89BJebTDTEpeRH3P6jlOQ7MZck
+/JzFaFhOFC0nCqbeUd/MVoP+Mn/Jpsxdf/cy+H3hu031h84/mB/ynnmT/tqXv/UQ+Waf+7j/AFR9
+z0+P6R7n6AeRv+ODH8x/ybTPNHYMgxV2KuxV2KuxV2KvkL/nMz/lO9E/7ZY/6iJc7D2d/upf1v0B
+x83NlP5TD/kC+iH/AC7n/qMmy3L/AIzL8dA6jWcym5OZDrlpOFC0nChKfyUbjqmue+pN/wAbZpO3
+h6of1Xf9m/SXtXqZz9Oyeafnm9fKOoD/AIrt/wDqKXNz2CP8Lh/nf7kuJrv7o/D73zaFz0mMHnre
+nfkWeOt33/GKP/k5nMe1kaxQ/rH7nZ9l/Ufc+j/UzhKdy71MaV3qY0rvUxpXepjSu9TGld6mNK8/
+/NduWlzL7Rf8nM3XYH+NR+P3OD2l/cn4PJEjzvSXmkbYpS4Q/wCfTKpnZjLkmpOUtC0nCq0nJITj
+ybv5lsx/xk/5NPmH2h/cy+H3hv0394Hzl+YP/KfeZf8Atq3v/US+Waf+7j/VH3PTw+kPv/yN/wAc
+GP5j/k2meaOwZBirsVdirsVdirsVfIX/ADmZ/wAp3on/AGyx/wBREudh7O/3Uv636A4+bmyv8qB/
+yBPRD/xZc/8AUZNlmT/GpfjoHUa1MycynWrScKFhOFUn/JxuOqa1/wBtJv8AjbNR7QD1Q/qu+7M+
+kvZfUznKdm83/Ox+XlW/H/Fdv/1Erm69nh/hkP8AO/3JcTXf3J+H3vncLnp8YvOPSvyUHDWL0+Mc
+f/E85P2u/uof1j9ztOy/qPufQ3qZwVO6d6mNK71MaV3qY0rvUxpXepjSu9TGlYJ+ZjcrGUe0X/E8
+3HYX+Mx+P3OB2l/cn4PNEjzuSXmkVbpSRTlZLGXJFk5FpWk5JC0nChOvJG/miyH/ABl/5MvmF2l/
+cS+H3hyNL/eD8dHzn+Yf/Kf+Zv8AtrX3/US+T0391H+qPueoh9Iff3kb/jgx/Mf8m0zzVz2QYq7F
+XYq7FXYq7FXyF/zmZ/yneif9ssf9REudh7O/3Uv636A4+bmyz8qv/JHaGf8Aiy5/6jJ8nk/xuXu/
+QHUa1MCczHWLCcKrScKEk/KN+Gqaz/20W/42zV+0Y3x/1Xfdl/SXr31gZzVO0Yv520E+YLSSwbms
+EyIHkjKhgUk9Tbl8hmXodXLTZRliATG+fmKas2IZImJ6sFH5J2Q/3ddffF/TOh/0W5/5kPt/W4P8
+lw7ynvlX8v18vXbz25mkMoVX9QpQBWrtxAzV9pdsZNXERkAOHutyNPpI4iSDzei/WBmnpy3fWBjS
+u+sDGld9YGNK76wMaV31gY0rvrAxpWGfmA4kt5B/kx/8Tzbdi/4wPj9zgdpf3J+DAkjztCXmldEp
+vkbYy5Licm0LScKFhOFU98ib+a7H/nr/AMmXzB7T/wAXl8PvDkaT+8H46PnT8xf/ACYPmf8A7a19
+/wBRL5PTf3Uf6o+56iHIPv3yN/xwY/mP+TaZ5q57IMVdirsVdirsVdir5C/5zMB/x1oh7fosf9RE
+udh7O/3Uv636A4+bmyz8qv8AyRuh07S3Ffb/AEyfJz/xuXu/QHUa3kjSczXWLScKFpOFDH/ywfhq
+OsH/AJf2/W2a72lG+P8AqO+7L+kvT/rXvnMU7R31r3xpXfWvfGld9a98aV31r3xpXfWvfGld9a98
+aV31r3xpXfWvfGld9a98aV31r3xpWM+bpPUiYeyf8Szadj/4wPj9zg9pf3J+DFUjzsCXmVVkpGTg
+id2MuSHJy9oWE4VWk4UJ95CqfNljQbD1a/8AIl8wO1P8Xl8PvDkaP+8H46PnX8xf/Jg+Z/8AtrX3
+/US+T0v91H+qPuephyD798jf8cGP5j/k2meaueyDFXYq7FXYq7FXYq+b/wDnMvyrcXGj6F5ngQtH
+YSSWV6QK8VuOLxMfBQ8bLXxYZ0vs7nAlLGeu4+DTmHVif/OOXm+xvdGvfImoTiO5LvdaSXbZlIDS
+RINt0ZfUp1ILeGbPtDGYTGUfF12pxcQZ/fafeWUhjuIytDQPT4W+Ry3FljMWC6acDHmhCcta1hOF
+Uo/KW39fzBf2/X1dQYU/4LNf7UHfH/Ud92V9Je4/4U/yPwzkuN2tO/wp/kfhjxrTv8Kf5H4Y8a07
+/Cn+R+GPGtO/wp/kfhjxrTv8Kf5H4Y8a07/Cn+R+GPGtO/wp/kfhjxrTv8Kf5H4Y8a07/Cn+R+GP
+GtO/wp/kfhjxrTz78wrH6lf/AFelKxI1Pmx/pm27GN5x8fucDtP+5PwYmkedcS8wuuEpbufb+OMD
+6mMuSWE5ltK0nChyJJK4jjUu7bKqgkk+wGJIAsqBfJldi1p5F0G982+Yf3BjjMdlZsQsskjbqig/
+tvxoB2FSds0Wu1H5iQxY9+8u20OlINl82eV7HUPNvny1WWs1zqF4bm8cDqC5lmb2rvT3zK1mUYMB
+PdGh9wd/AWafoD5TtzBo6L2LEj5ABf8AjXPPHLTjFXYq7FXYq7FXYql/mDQdL8waLeaLqsIuNPv4
+mhuIj3Vu4PZlO6nsd8sxZZY5CUeYQRb4V/NL8oPNv5a656pEs2kiX1NL1uDko+FqpzZf7qVdtvHd
+Sc7vQ9o49TGuUusfxzDjTgQmOjf85K/mRp1klrMbLUymy3F5C5loBQAtDJCG+ZFfE4z7KxSN7j3O
+OcUSj/8Aoaf8wf8Aq36T/wAibn/soyH8kYu+X2fqR4Ad/wBDT/mD/wBW/Sf+RNz/ANlGP8kYu+X2
+fqXwAoN/zkl5puryK6v9OtRJACIHsXmtXUk9SzvcfgBlObsSEuUiPfv+puxejkjP+hnPMn++bz/u
+JS/9U8xv9Dw/n/7H9rd4rv8AoZzzJ/vm8/7iUv8A1Tx/0PD+f/sf2r4rv+hnPMn++bz/ALiUv/VP
+H/Q8P5/+x/aviu/6Gc8yf75vP+4lL/1Tx/0PD+f/ALH9q+K7/oZzzJ/vm8/7iUv/AFTx/wBDw/n/
+AOx/aviu/wChnPMn++bz/uJS/wDVPH/Q8P5/+x/aviu/6Gc8yf75vP8AuJS/9U8f9Dw/n/7H9q+K
+7/oZzzJ/vm8/7iUv/VPH/Q8P5/8Asf2r4rv+hnPMn++bz/uJS/8AVPH/AEPD+f8A7H9q+K7/AKGc
+8yf75vP+4lL/ANU8f9Dw/n/7H9q+K7/oZzzJ/vm8/wC4lL/1Tx/0PD+f/sf2r4qEm/5yR8yi8jvr
+awikvEBQyahNLdjgRSg4mBh1/mPyy7D2FCJ3kT7hX62vJLjFK3/Q0/5g/wDVv0n/AJE3P/ZRmT/J
+GLvl9n6nH8AO/wChp/zB/wCrfpP/ACJuf+yjH+SMXfL7P1L4Ad/0NP8AmD/1b9J/5E3P/ZRj/JGL
+vl9n6l8AO/6Gn/MH/q36T/yJuf8Asox/kjF3y+z9S+AGj/zlP+YJH/HP0ke/o3P/AGUY/wAkYu+X
+2fqXwQwPXvM/nfz/AKxF9emm1O7qRa2cS0jiDHf040AVR0qx32+I5lxhi08L2iO9tjCtg+ifyJ/J
+ubQF+u36q+tXajmRusEXXiD+vxNPAE8f2r2l+YlUfoH2+f6nKhCn0XBCkEKQxiiRgKv0ZqGxfirs
+VdirsVdirsVdiqhfWFlf2slpewpcW0o4yQyKGVh7g4QSNwryzXP+cZ/yy1G4a4i0xIGY1McTyQrX
+5RMo/wCFzYY+1tTAUJn40fvYHGEp/wChVPy+/wCWAf8ASXdf1yf8tar+f9kf1L4cXf8AQqn5ff8A
+LAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/
+rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n
+/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF
+3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff
+8sA/6S7r+uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r
++uP8tar+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+
+f9kf1L4cXf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4c
+Xf8AQqn5ff8ALAP+ku6/rj/LWq/n/ZH9S+HF3/Qqn5ff8sA/6S7r+uP8tar+f9kf1L4cW1/5xW/L
+9WDCwWo33urkj7icT2zqv5/2R/UvhxZl5Z/KLy9oKcLG1t7RduRgT42p4sQN/c5g5tRkym5yMmQA
+DNrOytrSL04E4j9o9ST7nKUq+KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2K
+uxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2Ku
+xV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2Kux
+V2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV//2Q==</xapGImg:image>
+              </rdf:li>
+            </rdf:Alt>
+          </xap:Thumbnails>
+        </rdf:Description>
+        <rdf:Description
+           rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
+          <xapMM:DocumentID>
+uuid:f3c53255-be8a-4b04-817b-695bf2c54c8b</xapMM:DocumentID>
+        </rdf:Description>
+        <rdf:Description
+           rdf:about="uuid:9dfcc10e-f4e2-4cbf-91b0-8deea2f1a998">
+          <dc:format>
+image/svg+xml</dc:format>
+          <dc:title>
+            <rdf:Alt>
+              <rdf:li
+                 xml:lang="x-default">
+filesave.ai</rdf:li>
+            </rdf:Alt>
+          </dc:title>
+        </rdf:Description>
+      </rdf:RDF>
+    </x:xmpmeta>
+    <xpacket>end='w'    </xpacket>
+  </metadata>
+  <g
+     id="Layer_1">
+    <path
+       style="opacity:0.2;"
+       d="M9.416,5.208c-2.047,0-3.712,1.693-3.712,3.775V39.15c0,2.082,1.666,3.775,3.712,3.775h29.401     c2.047,0,3.712-1.693,3.712-3.775V8.983c0-2.082-1.665-3.775-3.712-3.775H9.416z"
+       id="path592" />
+    <path
+       style="opacity:0.2;"
+       d="M9.041,4.833c-2.047,0-3.712,1.693-3.712,3.775v30.167c0,2.082,1.666,3.775,3.712,3.775h29.401     c2.047,0,3.712-1.693,3.712-3.775V8.608c0-2.082-1.665-3.775-3.712-3.775H9.041z"
+       id="path593" />
+    <path
+       style="fill:#00008D;"
+       d="M8.854,4.646c-2.047,0-3.712,1.693-3.712,3.775v30.167c0,2.082,1.666,3.775,3.712,3.775h29.401     c2.047,0,3.712-1.693,3.712-3.775V8.42c0-2.082-1.665-3.775-3.712-3.775H8.854z"
+       id="path594" />
+    <path
+       style="fill:#00008D;"
+       d="M8.854,5.021c-1.84,0-3.337,1.525-3.337,3.4v30.167c0,1.875,1.497,3.4,3.337,3.4h29.401     c1.84,0,3.337-1.525,3.337-3.4V8.42c0-1.875-1.497-3.4-3.337-3.4H8.854z"
+       id="path595" />
+    <path
+       id="path166_1_"
+       style="fill:#FFFFFF;"
+       d="M40.654,38.588c0,1.36-1.074,2.463-2.399,2.463H8.854c-1.326,0-2.4-1.103-2.4-2.463V8.42     c0-1.36,1.074-2.462,2.4-2.462h29.401c1.325,0,2.399,1.103,2.399,2.462V38.588z" />
+    <linearGradient
+       id="path166_2_"
+       gradientUnits="userSpaceOnUse"
+       x1="-149.0464"
+       y1="251.1436"
+       x2="-149.0464"
+       y2="436.303"
+       gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
+      <stop
+         offset="0"
+         style="stop-color:#B4E2FF"
+         id="stop598" />
+      <stop
+         offset="1"
+         style="stop-color:#006DFF"
+         id="stop599" />
+      <a:midPointStop
+         offset="0"
+         style="stop-color:#B4E2FF"
+         id="midPointStop600" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#B4E2FF"
+         id="midPointStop601" />
+      <a:midPointStop
+         offset="1"
+         style="stop-color:#006DFF"
+         id="midPointStop602" />
+    </linearGradient>
+    <path
+       id="path166"
+       style="fill:url(#path166_2_);"
+       d="M40.654,38.588c0,1.36-1.074,2.463-2.399,2.463H8.854c-1.326,0-2.4-1.103-2.4-2.463V8.42     c0-1.36,1.074-2.462,2.4-2.462h29.401c1.325,0,2.399,1.103,2.399,2.462V38.588z" />
+    <path
+       style="fill:#FFFFFF;"
+       d="M8.854,6.521c-1.013,0-1.837,0.852-1.837,1.9v30.167c0,1.048,0.824,1.9,1.837,1.9h29.401     c1.013,0,1.837-0.853,1.837-1.9V8.42c0-1.048-0.824-1.9-1.837-1.9H8.854z"
+       id="path604" />
+    <linearGradient
+       id="XMLID_1_"
+       gradientUnits="userSpaceOnUse"
+       x1="7.3057"
+       y1="7.2559"
+       x2="50.7728"
+       y2="50.7231">
+      <stop
+         offset="0"
+         style="stop-color:#94CAFF"
+         id="stop606" />
+      <stop
+         offset="1"
+         style="stop-color:#006DFF"
+         id="stop607" />
+      <a:midPointStop
+         offset="0"
+         style="stop-color:#94CAFF"
+         id="midPointStop608" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#94CAFF"
+         id="midPointStop609" />
+      <a:midPointStop
+         offset="1"
+         style="stop-color:#006DFF"
+         id="midPointStop610" />
+    </linearGradient>
+    <path
+       style="fill:url(#XMLID_1_);"
+       d="M8.854,6.521c-1.013,0-1.837,0.852-1.837,1.9v30.167c0,1.048,0.824,1.9,1.837,1.9h29.401     c1.013,0,1.837-0.853,1.837-1.9V8.42c0-1.048-0.824-1.9-1.837-1.9H8.854z"
+       id="path611" />
+    <linearGradient
+       id="XMLID_2_"
+       gradientUnits="userSpaceOnUse"
+       x1="23.5039"
+       y1="2.187"
+       x2="23.5039"
+       y2="34.4368">
+      <stop
+         offset="0"
+         style="stop-color:#428AFF"
+         id="stop613" />
+      <stop
+         offset="1"
+         style="stop-color:#C9E6FF"
+         id="stop614" />
+      <a:midPointStop
+         offset="0"
+         style="stop-color:#428AFF"
+         id="midPointStop615" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#428AFF"
+         id="midPointStop616" />
+      <a:midPointStop
+         offset="1"
+         style="stop-color:#C9E6FF"
+         id="midPointStop617" />
+    </linearGradient>
+    <path
+       style="fill:url(#XMLID_2_);"
+       d="M36.626,6.861c0,0-26.184,0-26.914,0c0,0.704,0,16.59,0,17.294c0.721,0,26.864,0,27.583,0     c0-0.704,0-16.59,0-17.294C36.988,6.861,36.626,6.861,36.626,6.861z"
+       id="path618" />
+    <polygon
+       id="path186_1_"
+       style="fill:#FFFFFF;"
+       points="35.809,6.486 10.221,6.486 10.221,23.405 36.788,23.405 36.788,6.486 " />
+    <linearGradient
+       id="path186_2_"
+       gradientUnits="userSpaceOnUse"
+       x1="-104.5933"
+       y1="411.6699"
+       x2="-206.815"
+       y2="309.4482"
+       gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
+      <stop
+         offset="0"
+         style="stop-color:#CCCCCC"
+         id="stop621" />
+      <stop
+         offset="1"
+         style="stop-color:#F0F0F0"
+         id="stop622" />
+      <a:midPointStop
+         offset="0"
+         style="stop-color:#CCCCCC"
+         id="midPointStop623" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#CCCCCC"
+         id="midPointStop624" />
+      <a:midPointStop
+         offset="1"
+         style="stop-color:#F0F0F0"
+         id="midPointStop625" />
+    </linearGradient>
+    <polygon
+       id="path186"
+       style="fill:url(#path186_2_);"
+       points="35.809,6.486 10.221,6.486 10.221,23.405 36.788,23.405 36.788,6.486 " />
+    <path
+       style="fill:#FFFFFF;stroke:#FFFFFF;stroke-width:0.1875;"
+       d="M11.488,7.019c0,0.698,0,14.542,0,15.239c0.716,0,23.417,0,24.133,0c0-0.698,0-14.541,0-15.239     C34.904,7.019,12.204,7.019,11.488,7.019z"
+       id="path627" />
+    <linearGradient
+       id="XMLID_3_"
+       gradientUnits="userSpaceOnUse"
+       x1="34.5967"
+       y1="3.5967"
+       x2="18.4087"
+       y2="19.7847">
+      <stop
+         offset="0"
+         style="stop-color:#FFFFFF"
+         id="stop629" />
+      <stop
+         offset="0.5506"
+         style="stop-color:#E6EDFF"
+         id="stop630" />
+      <stop
+         offset="1"
+         style="stop-color:#FFFFFF"
+         id="stop631" />
+      <a:midPointStop
+         offset="0"
+         style="stop-color:#FFFFFF"
+         id="midPointStop632" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#FFFFFF"
+         id="midPointStop633" />
+      <a:midPointStop
+         offset="0.5506"
+         style="stop-color:#E6EDFF"
+         id="midPointStop634" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#E6EDFF"
+         id="midPointStop635" />
+      <a:midPointStop
+         offset="1"
+         style="stop-color:#FFFFFF"
+         id="midPointStop636" />
+    </linearGradient>
+    <path
+       style="fill:url(#XMLID_3_);stroke:#FFFFFF;stroke-width:0.1875;"
+       d="M11.488,7.019c0,0.698,0,14.542,0,15.239c0.716,0,23.417,0,24.133,0c0-0.698,0-14.541,0-15.239     C34.904,7.019,12.204,7.019,11.488,7.019z"
+       id="path637" />
+    <linearGradient
+       id="path205_1_"
+       gradientUnits="userSpaceOnUse"
+       x1="-174.4409"
+       y1="300.0908"
+       x2="-108.8787"
+       y2="210.2074"
+       gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
+      <stop
+         offset="0"
+         style="stop-color:#003399"
+         id="stop639" />
+      <stop
+         offset="0.2697"
+         style="stop-color:#0035ED"
+         id="stop640" />
+      <stop
+         offset="1"
+         style="stop-color:#57ADFF"
+         id="stop641" />
+      <a:midPointStop
+         offset="0"
+         style="stop-color:#003399"
+         id="midPointStop642" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#003399"
+         id="midPointStop643" />
+      <a:midPointStop
+         offset="0.2697"
+         style="stop-color:#0035ED"
+         id="midPointStop644" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#0035ED"
+         id="midPointStop645" />
+      <a:midPointStop
+         offset="1"
+         style="stop-color:#57ADFF"
+         id="midPointStop646" />
+    </linearGradient>
+    <rect
+       id="path205"
+       x="12.154"
+       y="26.479"
+       style="fill:url(#path205_1_);"
+       width="22.007"
+       height="13.978" />
+    <linearGradient
+       id="XMLID_4_"
+       gradientUnits="userSpaceOnUse"
+       x1="21.8687"
+       y1="25.1875"
+       x2="21.8687"
+       y2="44.6251">
+      <stop
+         offset="0"
+         style="stop-color:#DFDFDF"
+         id="stop649" />
+      <stop
+         offset="1"
+         style="stop-color:#7D7D99"
+         id="stop650" />
+      <a:midPointStop
+         offset="0"
+         style="stop-color:#DFDFDF"
+         id="midPointStop651" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#DFDFDF"
+         id="midPointStop652" />
+      <a:midPointStop
+         offset="1"
+         style="stop-color:#7D7D99"
+         id="midPointStop653" />
+    </linearGradient>
+    <path
+       style="fill:url(#XMLID_4_);"
+       d="M13.244,27.021c-0.311,0-0.563,0.252-0.563,0.563v13.104c0,0.312,0.252,0.563,0.563,0.563h17.249     c0.311,0,0.563-0.251,0.563-0.563V27.583c0-0.311-0.252-0.563-0.563-0.563H13.244z M18.85,30.697c0,0.871,0,5.078,0,5.949     c-0.683,0-2.075,0-2.759,0c0-0.871,0-5.078,0-5.949C16.775,30.697,18.167,30.697,18.85,30.697z"
+       id="path654" />
+    <linearGradient
+       id="XMLID_5_"
+       gradientUnits="userSpaceOnUse"
+       x1="-158.0337"
+       y1="288.0684"
+       x2="-158.0337"
+       y2="231.3219"
+       gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
+      <stop
+         offset="0"
+         style="stop-color:#F0F0F0"
+         id="stop656" />
+      <stop
+         offset="0.6348"
+         style="stop-color:#CECEDB"
+         id="stop657" />
+      <stop
+         offset="0.8595"
+         style="stop-color:#B1B1C5"
+         id="stop658" />
+      <stop
+         offset="1"
+         style="stop-color:#FFFFFF"
+         id="stop659" />
+      <a:midPointStop
+         offset="0"
+         style="stop-color:#F0F0F0"
+         id="midPointStop660" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#F0F0F0"
+         id="midPointStop661" />
+      <a:midPointStop
+         offset="0.6348"
+         style="stop-color:#CECEDB"
+         id="midPointStop662" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#CECEDB"
+         id="midPointStop663" />
+      <a:midPointStop
+         offset="0.8595"
+         style="stop-color:#B1B1C5"
+         id="midPointStop664" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#B1B1C5"
+         id="midPointStop665" />
+      <a:midPointStop
+         offset="1"
+         style="stop-color:#FFFFFF"
+         id="midPointStop666" />
+    </linearGradient>
+    <path
+       style="fill:url(#XMLID_5_);"
+       d="M13.244,27.583v13.104h17.249V27.583H13.244z M19.413,37.209h-3.884v-7.074h3.884V37.209z"
+       id="path667" />
+    <linearGradient
+       id="path228_1_"
+       gradientUnits="userSpaceOnUse"
+       x1="-68.1494"
+       y1="388.4561"
+       x2="-68.1494"
+       y2="404.6693"
+       gradientTransform="matrix(0.1875 0 0 -0.1875 51.5 83.75)">
+      <stop
+         offset="0"
+         style="stop-color:#3399FF"
+         id="stop669" />
+      <stop
+         offset="1"
+         style="stop-color:#000000"
+         id="stop670" />
+      <a:midPointStop
+         offset="0"
+         style="stop-color:#3399FF"
+         id="midPointStop671" />
+      <a:midPointStop
+         offset="0.5"
+         style="stop-color:#3399FF"
+         id="midPointStop672" />
+      <a:midPointStop
+         offset="1"
+         style="stop-color:#000000"
+         id="midPointStop673" />
+    </linearGradient>
+    <rect
+       id="path228"
+       x="37.83"
+       y="9.031"
+       style="fill:url(#path228_1_);"
+       width="1.784"
+       height="1.785" />
+    <polyline
+       id="_x3C_Slice_x3E_"
+       style="fill:none;"
+       points="0,48 0,0 48,0 48,48 " />
+  </g>
+</svg>

BIN
PPOCRLabel/resources/icons/undo-cross.png


BIN
PPOCRLabel/resources/icons/undo.png


BIN
PPOCRLabel/resources/icons/verify.png


BIN
PPOCRLabel/resources/icons/zoom-in.png


BIN
PPOCRLabel/resources/icons/zoom-out.png


BIN
PPOCRLabel/resources/icons/zoom.png


+ 116 - 0
PPOCRLabel/resources/strings/strings-en.properties

@@ -0,0 +1,116 @@
+openFile=Open
+openFileDetail=Open image or label file
+quit=Quit
+quitApp=Quit application
+openDir=Open Dir
+openDatasetDir=Open DatasetDir
+copyPrevBounding=Copy previous Bounding Boxes in the current image 
+changeSavedAnnotationDir=Change default saved Annotation dir
+openAnnotation=Open Annotation
+openAnnotationDetail=Open an annotation file
+changeSaveDir=Change Save Dir
+nextImg=Next Image
+nextImgDetail=Open the next Image
+prevImg=Prev Image
+prevImgDetail=Open the previous Image
+verifyImg=Verify Image
+verifyImgDetail=Verify Image
+save=Check
+saveDetail=Save the labels to a file
+changeSaveFormat=Change save format
+saveAs=Save As
+saveAsDetail=Save the labels to a different file
+closeCur=Close
+closeCurDetail=Close the current file
+deleteImg=Delete current image
+deleteImgDetail=Delete the current image
+resetAll=Reset Interface and Save Dir
+resetAllDetail=Reset All
+boxLineColor=Box Line Color
+boxLineColorDetail=Choose Box line color
+crtBox=Create RectBox
+crtBoxDetail=Draw a new box
+delBox=Delete RectBox
+delBoxDetail=Remove the box
+dupBox=Duplicate RectBox
+dupBoxDetail=Create a duplicate of the selected box
+tutorial=PaddleOCR url
+tutorialDetail=Show demo
+info=Information
+zoomin=Zoom In
+zoominDetail=Increase zoom level
+zoomout=Zoom Out
+zoomoutDetail=Decrease zoom level
+originalsize=Original size
+originalsizeDetail=Zoom to original size
+fitWin=Fit Window
+fitWinDetail=Zoom follows window size
+fitWidth=Fit Width
+fitWidthDetail=Zoom follows window width
+editLabel=Edit Label
+editLabelDetail=Modify the label of the selected Box
+shapeLineColor=Shape Line Color
+shapeLineColorDetail=Change the line color for this specific shape
+shapeFillColor=Shape Fill Color
+shapeFillColorDetail=Change the fill color for this specific shape
+showHide=Show/Hide Label Panel
+useDefaultLabel=Use default label
+useDifficult=Difficult
+boxLabelText=Box Labels
+labels=Labels
+autoSaveMode=Auto Save mode
+singleClsMode=Single Class Mode
+displayLabel=Display Labels
+displayIndex=Display box index
+fileList=File List
+files=Files
+advancedMode=Advanced Mode
+advancedModeDetail=Swtich to advanced mode
+showAllBoxDetail=Show all bounding boxes
+hideAllBoxDetail=Hide all bounding boxes
+annoPanel=anno Panel
+anno=anno
+addNewBbox=new bbox
+reLabel=reLabel
+choosemodel=Choose OCR model
+tipchoosemodel=Choose OCR model from dir
+ImageResize=Image Resize
+IR=Image Resize
+autoRecognition=Auto Recognition
+reRecognition=Re-recognition
+mfile=File
+medit=Edit
+mview=View
+mhelp=Help
+iconList=Icon List
+detectionBoxposition=Detection box position
+recognitionResult=Recognition result
+creatPolygon=Create PolygonBox
+rotateLeft=Left turn 90 degrees
+rotateRight=Right turn 90 degrees
+drawSquares=Draw Squares
+saveRec=Export Recognition Result
+tempLabel=TEMPORARY
+nullLabel=NULL
+steps=Steps
+keys=Shortcut Keys
+choseModelLg=Choose Model Language
+cancel=Cancel
+ok=OK
+autolabeling=Automatic Labeling
+hideBox=Hide All Box
+showBox=Show All Box
+saveLabel=Export Label
+singleRe=Re-recognition RectBox
+labelDialogOption=Pop-up Label Input Dialog
+undo=Undo
+undoLastPoint=Undo Last Point
+autoSaveMode=Auto Export Label Mode
+lockBox=Lock selected box/Unlock all box
+lockBoxDetail=Lock selected box/Unlock all box
+keyListTitle=Key List
+keyDialogTip=Enter object label
+keyChange=Change Box Key
+TableRecognition=Table Recognition
+cellreRecognition=Cell Re-Recognition
+exportJSON=Export Table Label

+ 116 - 0
PPOCRLabel/resources/strings/strings-zh-CN.properties

@@ -0,0 +1,116 @@
+saveAsDetail=將标签保存到其他文件
+changeSaveDir=改变存放目录
+openFile=打开文件
+shapeLineColorDetail=更改线条颜色
+resetAll=重置界面与保存地址
+crtBox=矩形标注
+crtBoxDetail=创建一个新的区块
+dupBoxDetail=复制区块
+verifyImg=验证图像
+zoominDetail=放大
+verifyImgDetail=验证图像
+saveDetail=保存标签文件
+openFileDetail=打开图像文件
+fitWidthDetail=调整宽度适应到窗口宽度
+tutorial=PaddleOCR地址
+editLabel=编辑标签
+openAnnotationDetail=打开标签文件
+quit=退出
+shapeFillColorDetail=更改填充颜色
+closeCurDetail=关闭当前文件
+closeCur=关闭文件
+deleteImg=删除图像
+deleteImgDetail=删除当前图像
+fitWin=调整到窗口大小
+delBox=删除选择的区块
+boxLineColorDetail=选择线框颜色
+originalsize=原始大小
+resetAllDetail=重置所有设定
+zoomoutDetail=放大画面
+save=确认
+saveAs=另存为
+fitWinDetail=缩放到当前窗口大小
+openDir=打开目录
+openDatasetDir=打开数据集路径
+copyPrevBounding=复制当前图像中的上一个边界框
+showHide=显示/隐藏标签
+changeSaveFormat=更改存储格式
+shapeFillColor=填充颜色
+quitApp=退出程序
+dupBox=复制区块
+delBoxDetail=删除区块
+zoomin=放大画面
+info=信息
+openAnnotation=开启标签
+prevImgDetail=上一个图像
+fitWidth=缩放到当前画面宽度
+zoomout=缩小画面
+changeSavedAnnotationDir=更改保存标签文件的预设目录
+nextImgDetail=下一个图像
+originalsizeDetail=放大到原始大小
+prevImg=上一张
+tutorialDetail=显示示范内容
+shapeLineColor=形状线条颜色
+boxLineColor=区块线条颜色
+editLabelDetail=修改当前所选的区块颜色
+nextImg=下一张
+useDefaultLabel=使用预设标签
+useDifficult=有难度的
+boxLabelText=区块的标签
+labels=标签
+autoSaveMode=自动保存模式
+singleClsMode=单一类别模式
+displayLabel=显示类别
+displayIndex=显示box序号
+fileList=文件列表
+files=文件
+advancedMode=专家模式
+advancedModeDetail=切换到专家模式
+showAllBoxDetail=显示所有区块
+hideAllBoxDetail=隐藏所有区块
+annoPanel=标注面板
+anno=标注
+addNewBbox=新框
+reLabel=重标注
+choosemodel=选择模型
+tipchoosemodel=选择OCR模型
+ImageResize=图片缩放
+IR=图片缩放
+autoRecognition=自动标注
+reRecognition=重新识别
+mfile=文件
+medit=编辑
+mview=视图
+mhelp=帮助
+iconList=缩略图
+detectionBoxposition=检测框位置
+recognitionResult=识别结果
+creatPolygon=多点标注
+drawSquares=正方形标注
+rotateLeft=图片左旋转90度
+rotateRight=图片右旋转90度
+saveRec=导出识别结果
+tempLabel=待识别
+nullLabel=无法识别
+steps=操作步骤
+keys=快捷键
+choseModelLg=选择模型语言
+cancel=取消
+ok=确认
+autolabeling=自动标注中
+hideBox=隐藏所有标注
+showBox=显示所有标注
+saveLabel=导出标记结果
+singleRe=重识别此区块
+labelDialogOption=弹出标记输入框
+undo=撤销
+undoLastPoint=撤销上个点
+autoSaveMode=自动导出标记结果
+lockBox=锁定框/解除锁定框
+lockBoxDetail=若当前没有框处于锁定状态则锁定选中的框,若存在锁定框则解除所有锁定框的锁定状态
+keyListTitle=关键词列表
+keyDialogTip=请输入类型名称
+keyChange=更改Box关键字类别
+TableRecognition=表格识别
+cellreRecognition=单元格重识别
+exportJSON=导出表格标注

+ 8 - 0
PPOCRLabel/setup.cfg

@@ -0,0 +1,8 @@
+[bumpversion]
+commit = True
+tag = True
+
+[bumpversion:file:setup.py]
+
+[bdist_wheel]
+universal = 1

+ 52 - 0
PPOCRLabel/setup.py

@@ -0,0 +1,52 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from setuptools import setup
+from io import open
+
+with open('requirements.txt', encoding="utf-8-sig") as f:
+    requirements = f.readlines()
+    requirements.append('tqdm')
+
+
+def readme():
+    with open('README.md', encoding="utf-8-sig") as f:
+        README = f.read()
+    return README
+
+
+setup(
+    name='PPOCRLabel',
+    packages=['PPOCRLabel'],
+    package_data = {'PPOCRLabel': ['libs/*','resources/strings/*','resources/icons/*']},
+    package_dir={'PPOCRLabel': ''},
+    include_package_data=True,
+    entry_points={"console_scripts": ["PPOCRLabel= PPOCRLabel.PPOCRLabel:main"]},
+    version='2.1.3',
+    install_requires=requirements,
+    license='Apache License 2.0',
+    description='PPOCRLabelv2 is a semi-automatic graphic annotation tool suitable for OCR field, with built-in PP-OCR model to automatically detect and re-recognize data. It is written in Python3 and PyQT5, supporting rectangular box, table, irregular text and key information annotation modes. Annotations can be directly used for the training of PP-OCR detection and recognition models.',
+    long_description=readme(),
+    long_description_content_type='text/markdown',
+    url='https://github.com/PaddlePaddle/PaddleOCR',
+    download_url='https://github.com/PaddlePaddle/PaddleOCR.git',
+    keywords=[
+        'ocr textdetection textrecognition paddleocr crnn east star-net rosetta ocrlite db chineseocr chinesetextdetection chinesetextrecognition'
+    ],
+    classifiers=[
+        'Intended Audience :: Developers', 'Operating System :: OS Independent',
+        'Natural Language :: English',
+        'Programming Language :: Python :: 3.6',
+        'Programming Language :: Python :: 3.7', 'Topic :: Utilities'
+    ], )

A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 243 - 0
README.md


A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 254 - 0
README_ch.md


+ 219 - 0
StyleText/README.md

@@ -0,0 +1,219 @@
+English | [简体中文](README_ch.md)
+
+## Style Text
+
+### Contents
+- [1. Introduction](#Introduction)
+- [2. Preparation](#Preparation)
+- [3. Quick Start](#Quick_Start)
+- [4. Applications](#Applications)
+- [5. Code Structure](#Code_structure)
+
+
+<a name="Introduction"></a>
+### Introduction
+
+<div align="center">
+    <img src="doc/images/3.png" width="800">
+</div>
+
+<div align="center">
+    <img src="doc/images/9.png" width="600">
+</div>
+
+
+The Style-Text data synthesis tool is a tool based on Baidu and HUST cooperation research work, "Editing Text in the Wild" [https://arxiv.org/abs/1908.03047](https://arxiv.org/abs/1908.03047).
+
+Different from the commonly used GAN-based data synthesis tools, the main framework of Style-Text includes:
+* (1) Text foreground style transfer module.
+* (2) Background extraction module.
+* (3) Fusion module.
+
+After these three steps, you can quickly realize the image text style transfer. The following figure is some results of the data synthesis tool.
+
+<div align="center">
+    <img src="doc/images/10.png" width="1000">
+</div>
+
+
+<a name="Preparation"></a>
+#### Preparation
+
+1. Please refer the [QUICK INSTALLATION](../doc/doc_en/installation_en.md) to install PaddlePaddle. Python3 environment is strongly recommended.
+2. Download the pretrained models and unzip:
+
+```bash
+cd StyleText
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/style_text/style_text_models.zip
+unzip style_text_models.zip
+```
+
+If you save the model in another location, please modify the address of the model file in `configs/config.yml`, and you need to modify these three configurations at the same time:
+
+```
+bg_generator:
+  pretrain: style_text_models/bg_generator
+...
+text_generator:
+  pretrain: style_text_models/text_generator
+...
+fusion_generator:
+  pretrain: style_text_models/fusion_generator
+```
+
+<a name="Quick_Start"></a>
+### Quick Start
+
+#### Synthesis single image
+
+1. You can run `tools/synth_image` and generate the demo image, which is saved in the current folder.
+
+```python
+python3 tools/synth_image.py -c configs/config.yml --style_image examples/style_images/2.jpg --text_corpus PaddleOCR --language en
+```
+
+* Note 1: The language options is correspond to the corpus. Currently, the tool only supports English(en), Simplified Chinese(ch) and Korean(ko).
+* Note 2: Synth-Text is mainly used to generate images for OCR recognition models.
+  So the height of style images should be around 32 pixels. Images in other sizes may behave poorly.
+* Note 3: You can modify `use_gpu` in `configs/config.yml` to determine whether to use GPU for prediction.
+
+
+
+For example, enter the following image and corpus `PaddleOCR`.
+
+<div align="center">
+    <img src="examples/style_images/2.jpg" width="300">
+</div>
+
+The result `fake_fusion.jpg` will be generated.
+
+<div align="center">
+    <img src="doc/images/4.jpg" width="300">
+</div>
+
+What's more, the medium result `fake_bg.jpg` will also be saved, which is the background output.
+
+<div align="center">
+    <img src="doc/images/7.jpg" width="300">
+</div>
+
+
+`fake_text.jpg` * `fake_text.jpg` is the generated image with the same font style as `Style Input`.
+
+
+<div align="center">
+    <img src="doc/images/8.jpg" width="300">
+</div>
+
+
+#### Batch synthesis
+
+In actual application scenarios, it is often necessary to synthesize pictures in batches and add them to the training set. StyleText can use a batch of style pictures and corpus to synthesize data in batches. The synthesis process is as follows:
+
+1. The referenced dataset can be specifed in `configs/dataset_config.yml`:
+
+   * `Global`:
+     * `output_dir:`:Output synthesis data path.
+   * `StyleSampler`:
+     * `image_home`:style images' folder.
+     * `label_file`:Style images' file list. If label is provided, then it is the label file path.
+     * `with_label`:Whether the `label_file` is label file list.
+   * `CorpusGenerator`:
+     * `method`:Method of CorpusGenerator,supports `FileCorpus` and `EnNumCorpus`. If `EnNumCorpus` is used,No other configuration is needed,otherwise you need to set `corpus_file` and `language`.
+     * `language`:Language of the corpus. Currently, the tool only supports English(en), Simplified Chinese(ch) and Korean(ko).
+     * `corpus_file`: Filepath of the corpus. Corpus file should be a text file which will be split by line-endings('\n'). Corpus generator samples one line each time.
+
+
+Example of corpus file:
+```
+PaddleOCR
+飞桨文字识别
+StyleText
+风格文本图像数据合成
+```
+
+We provide a general dataset containing Chinese, English and Korean (50,000 images in all) for your trial ([download link](https://paddleocr.bj.bcebos.com/dygraph_v2.0/style_text/chkoen_5w.tar)), some examples are given below :
+
+<div align="center">
+     <img src="doc/images/5.png" width="800">
+</div>
+
+2. You can run the following command to start synthesis task:
+
+   ``` bash
+   python3 tools/synth_dataset.py -c configs/dataset_config.yml
+   ```
+
+We also provide example corpus and images in `examples` folder.
+    <div align="center">
+        <img src="examples/style_images/1.jpg" width="300">
+        <img src="examples/style_images/2.jpg" width="300">
+    </div>
+If you run the code above directly, you will get example output data in `output_data` folder.
+You will get synthesis images and labels as below:
+   <div align="center">
+       <img src="doc/images/12.png" width="800">
+   </div>
+There will be some cache under the `label` folder. If the program exit unexpectedly, you can find cached labels there.
+When the program finish normally, you will find all the labels in `label.txt` which give the final results.
+
+<a name="Applications"></a>
+### Applications
+We take two scenes as examples, which are metal surface English number recognition and general Korean recognition, to illustrate practical cases of using StyleText to synthesize data to improve text recognition. The following figure shows some examples of real scene images and composite images:
+
+<div align="center">
+    <img src="doc/images/11.png" width="800">
+</div>
+
+
+After adding the above synthetic data for training, the accuracy of the recognition model is improved, which is shown in the following table:
+
+
+| Scenario | Characters | Raw Data | Test Data | Only Use Raw Data</br>Recognition Accuracy | New Synthetic Data | Simultaneous Use of Synthetic Data</br>Recognition Accuracy | Index Improvement |
+| -------- | ---------- | -------- | -------- | -------------------------- | ------------ | ---------------------- | -------- |
+| Metal surface | English and numbers | 2203     | 650      | 59.38%                     | 20000        | 75.46%                 | 16.08%      |
+| Random background | Korean       | 5631     | 1230     | 30.12%                     | 100000       | 50.57%                 | 20.45%      |
+
+<a name="Code_structure"></a>
+### Code Structure
+
+```
+StyleText
+|-- arch                        // Network module files.
+|   |-- base_module.py
+|   |-- decoder.py
+|   |-- encoder.py
+|   |-- spectral_norm.py
+|   `-- style_text_rec.py
+|-- configs                     // Config files.
+|   |-- config.yml
+|   `-- dataset_config.yml
+|-- engine                      // Synthesis engines.
+|   |-- corpus_generators.py    // Sample corpus from file or generate random corpus.
+|   |-- predictors.py           // Predict using network.
+|   |-- style_samplers.py       // Sample style images.
+|   |-- synthesisers.py         // Manage other engines to synthesis images.
+|   |-- text_drawers.py         // Generate standard input text images.
+|   `-- writers.py              // Write synthesis images and labels into files.
+|-- examples                    // Example files.
+|   |-- corpus
+|   |   `-- example.txt
+|   |-- image_list.txt
+|   `-- style_images
+|       |-- 1.jpg
+|       `-- 2.jpg
+|-- fonts                       // Font files.
+|   |-- ch_standard.ttf
+|   |-- en_standard.ttf
+|   `-- ko_standard.ttf
+|-- tools                       // Program entrance.
+|   |-- __init__.py
+|   |-- synth_dataset.py        // Synthesis dataset.
+|   `-- synth_image.py          // Synthesis image.
+`-- utils                       // Module of basic functions.
+    |-- config.py
+    |-- load_params.py
+    |-- logging.py
+    |-- math_functions.py
+    `-- sys_funcs.py
+```

+ 205 - 0
StyleText/README_ch.md

@@ -0,0 +1,205 @@
+简体中文 | [English](README.md)
+
+## Style Text
+
+
+### 目录
+- [一、工具简介](#工具简介)
+- [二、环境配置](#环境配置)
+- [三、快速上手](#快速上手)
+- [四、应用案例](#应用案例)
+- [五、代码结构](#代码结构)
+
+<a name="工具简介"></a>
+### 一、工具简介
+<div align="center">
+    <img src="doc/images/3.png" width="800">
+</div>
+
+<div align="center">
+    <img src="doc/images/1.png" width="600">
+</div>
+
+
+Style-Text数据合成工具是基于百度和华科合作研发的文本编辑算法《Editing Text in the Wild》https://arxiv.org/abs/1908.03047
+
+不同于常用的基于GAN的数据合成工具,Style-Text主要框架包括:1.文本前景风格迁移模块 2.背景抽取模块 3.融合模块。经过这样三步,就可以迅速实现图像文本风格迁移。下图是一些该数据合成工具效果图。
+
+<div align="center">
+    <img src="doc/images/2.png" width="1000">
+</div>
+
+<a name="环境配置"></a>
+### 二、环境配置
+
+1. 参考[快速安装](../doc/doc_ch/installation.md),安装PaddleOCR。
+2. 进入`StyleText`目录,下载模型,并解压:
+
+```bash
+cd StyleText
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/style_text/style_text_models.zip
+unzip style_text_models.zip
+```
+
+如果您将模型保存再其他位置,请在`configs/config.yml`中修改模型文件的地址,修改时需要同时修改这三个配置:
+
+```
+bg_generator:
+  pretrain: style_text_models/bg_generator
+...
+text_generator:
+  pretrain: style_text_models/text_generator
+...
+fusion_generator:
+  pretrain: style_text_models/fusion_generator
+```
+
+<a name="快速上手"></a>
+### 三、快速上手
+
+#### 合成单张图
+输入一张风格图和一段文字语料,运行tools/synth_image,合成单张图片,结果图像保存在当前目录下:
+
+```python
+python3 tools/synth_image.py -c configs/config.yml --style_image examples/style_images/2.jpg --text_corpus PaddleOCR --language en
+```
+* 注1:语言选项和语料相对应,目前支持英文(en)、简体中文(ch)和韩语(ko)。
+* 注2:Style-Text生成的数据主要应用于OCR识别场景。基于当前PaddleOCR识别模型的设计,我们主要支持高度在32左右的风格图像。
+  如果输入图像尺寸相差过多,效果可能不佳。
+* 注3:可以通过修改配置文件`configs/config.yml`中的`use_gpu`(true或者false)参数来决定是否使用GPU进行预测。
+
+
+例如,输入如下图片和语料"PaddleOCR":
+
+<div align="center">
+    <img src="examples/style_images/2.jpg" width="300">
+</div>
+
+生成合成数据`fake_fusion.jpg`:
+<div align="center">
+    <img src="doc/images/4.jpg" width="300">
+</div>
+
+除此之外,程序还会生成并保存中间结果`fake_bg.jpg`:为风格参考图去掉文字后的背景;
+
+<div align="center">
+    <img src="doc/images/7.jpg" width="300">
+</div>
+
+`fake_text.jpg`:是用提供的字符串,仿照风格参考图中文字的风格,生成在灰色背景上的文字图片。
+
+<div align="center">
+    <img src="doc/images/8.jpg" width="300">
+</div>
+
+#### 批量合成
+在实际应用场景中,经常需要批量合成图片,补充到训练集中。Style-Text可以使用一批风格图片和语料,批量合成数据。合成过程如下:
+
+1. 在`configs/dataset_config.yml`中配置目标场景风格图像和语料的路径,具体如下:
+
+   * `Global`:
+     * `output_dir:`:保存合成数据的目录。
+   * `StyleSampler`:
+     * `image_home`:风格图片目录;
+     * `label_file`:风格图片路径列表文件,如果所用数据集有label,则label_file为label文件路径;
+     * `with_label`:标志`label_file`是否为label文件。
+   * `CorpusGenerator`:
+     * `method`:语料生成方法,目前有`FileCorpus`和`EnNumCorpus`可选。如果使用`EnNumCorpus`,则不需要填写其他配置,否则需要修改`corpus_file`和`language`;
+     * `language`:语料的语种,目前支持英文(en)、简体中文(ch)和韩语(ko);
+     * `corpus_file`: 语料文件路径。语料文件应使用文本文件。语料生成器首先会将语料按行切分,之后每次随机选取一行。
+
+   语料文件格式示例:
+   ```
+   PaddleOCR
+   飞桨文字识别
+   StyleText
+   风格文本图像数据合成
+   ...
+   ```
+
+   Style-Text也提供了一批中英韩5万张通用场景数据用作文本风格图像,便于合成场景丰富的文本图像,下图给出了一些示例。
+
+   中英韩5万张通用场景数据: [下载地址](https://paddleocr.bj.bcebos.com/dygraph_v2.0/style_text/chkoen_5w.tar)
+
+<div align="center">
+    <img src="doc/images/5.png" width="800">
+</div>
+
+2. 运行`tools/synth_dataset`合成数据:
+
+   ``` bash
+   python3 tools/synth_dataset.py -c configs/dataset_config.yml
+   ```
+   我们在examples目录下提供了样例图片和语料。
+    <div align="center">
+        <img src="examples/style_images/1.jpg" width="300">
+        <img src="examples/style_images/2.jpg" width="300">
+    </div>
+
+   直接运行上述命令,可以在output_data中产生样例输出,包括图片和用于训练识别模型的标注文件:
+   <div align="center">
+       <img src="doc/images/12.png" width="800">
+   </div>
+
+   其中label目录下的标注文件为程序运行过程中产生的缓存,如果程序在中途异常终止,可以使用缓存的标注文件。
+   如果程序正常运行完毕,则会在output_data下生成label.txt,为最终的标注结果。
+
+<a name="应用案例"></a>
+### 四、应用案例
+下面以金属表面英文数字识别和通用韩语识别两个场景为例,说明使用Style-Text合成数据,来提升文本识别效果的实际案例。下图给出了一些真实场景图像和合成图像的示例:
+
+<div align="center">
+    <img src="doc/images/6.png" width="800">
+</div>
+
+在添加上述合成数据进行训练后,识别模型的效果提升,如下表所示:
+
+| 场景     | 字符       | 原始数据 | 测试数据 | 只使用原始数据</br>识别准确率 | 新增合成数据 | 同时使用合成数据</br>识别准确率 | 指标提升 |
+| -------- | ---------- | -------- | -------- | -------------------------- | ------------ | ---------------------- | -------- |
+| 金属表面 | 英文和数字 | 2203     | 650      | 59.38%                     | 20000        | 75.46%                 | 16.08%      |
+| 随机背景 | 韩语       | 5631     | 1230     | 30.12%                     | 100000       | 50.57%                 | 20.45%      |
+
+
+<a name="代码结构"></a>
+### 五、代码结构
+
+```
+StyleText
+|-- arch                        // 网络结构定义文件
+|   |-- base_module.py
+|   |-- decoder.py
+|   |-- encoder.py
+|   |-- spectral_norm.py
+|   `-- style_text_rec.py
+|-- configs                     // 配置文件
+|   |-- config.yml
+|   `-- dataset_config.yml
+|-- engine                      // 数据合成引擎
+|   |-- corpus_generators.py    // 从文本采样或随机生成语料
+|   |-- predictors.py           // 调用网络生成数据
+|   |-- style_samplers.py       // 采样风格图片
+|   |-- synthesisers.py         // 调度各个模块,合成数据
+|   |-- text_drawers.py         // 生成标准文字图片,用作输入
+|   `-- writers.py              // 将合成的图片和标签写入本地目录
+|-- examples                    // 示例文件
+|   |-- corpus
+|   |   `-- example.txt
+|   |-- image_list.txt
+|   `-- style_images
+|       |-- 1.jpg
+|       `-- 2.jpg
+|-- fonts                       // 字体文件
+|   |-- ch_standard.ttf
+|   |-- en_standard.ttf
+|   `-- ko_standard.ttf
+|-- tools                       // 程序入口
+|   |-- __init__.py
+|   |-- synth_dataset.py        // 批量合成数据
+|   `-- synth_image.py          // 合成单张图片
+`-- utils                       // 其他基础功能模块
+    |-- config.py
+    |-- load_params.py
+    |-- logging.py
+    |-- math_functions.py
+    `-- sys_funcs.py
+```

+ 0 - 0
StyleText/__init__.py


+ 0 - 0
StyleText/arch/__init__.py


+ 255 - 0
StyleText/arch/base_module.py

@@ -0,0 +1,255 @@
+#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import paddle
+import paddle.nn as nn
+
+from arch.spectral_norm import spectral_norm
+
+
+class CBN(nn.Layer):
+    def __init__(self,
+                 name,
+                 in_channels,
+                 out_channels,
+                 kernel_size,
+                 stride=1,
+                 padding=0,
+                 dilation=1,
+                 groups=1,
+                 use_bias=False,
+                 norm_layer=None,
+                 act=None,
+                 act_attr=None):
+        super(CBN, self).__init__()
+        if use_bias:
+            bias_attr = paddle.ParamAttr(name=name + "_bias")
+        else:
+            bias_attr = None
+        self._conv = paddle.nn.Conv2D(
+            in_channels=in_channels,
+            out_channels=out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            padding=padding,
+            dilation=dilation,
+            groups=groups,
+            weight_attr=paddle.ParamAttr(name=name + "_weights"),
+            bias_attr=bias_attr)
+        if norm_layer:
+            self._norm_layer = getattr(paddle.nn, norm_layer)(
+                num_features=out_channels, name=name + "_bn")
+        else:
+            self._norm_layer = None
+        if act:
+            if act_attr:
+                self._act = getattr(paddle.nn, act)(**act_attr,
+                                                    name=name + "_" + act)
+            else:
+                self._act = getattr(paddle.nn, act)(name=name + "_" + act)
+        else:
+            self._act = None
+
+    def forward(self, x):
+        out = self._conv(x)
+        if self._norm_layer:
+            out = self._norm_layer(out)
+        if self._act:
+            out = self._act(out)
+        return out
+
+
+class SNConv(nn.Layer):
+    def __init__(self,
+                 name,
+                 in_channels,
+                 out_channels,
+                 kernel_size,
+                 stride=1,
+                 padding=0,
+                 dilation=1,
+                 groups=1,
+                 use_bias=False,
+                 norm_layer=None,
+                 act=None,
+                 act_attr=None):
+        super(SNConv, self).__init__()
+        if use_bias:
+            bias_attr = paddle.ParamAttr(name=name + "_bias")
+        else:
+            bias_attr = None
+        self._sn_conv = spectral_norm(
+            paddle.nn.Conv2D(
+                in_channels=in_channels,
+                out_channels=out_channels,
+                kernel_size=kernel_size,
+                stride=stride,
+                padding=padding,
+                dilation=dilation,
+                groups=groups,
+                weight_attr=paddle.ParamAttr(name=name + "_weights"),
+                bias_attr=bias_attr))
+        if norm_layer:
+            self._norm_layer = getattr(paddle.nn, norm_layer)(
+                num_features=out_channels, name=name + "_bn")
+        else:
+            self._norm_layer = None
+        if act:
+            if act_attr:
+                self._act = getattr(paddle.nn, act)(**act_attr,
+                                                    name=name + "_" + act)
+            else:
+                self._act = getattr(paddle.nn, act)(name=name + "_" + act)
+        else:
+            self._act = None
+
+    def forward(self, x):
+        out = self._sn_conv(x)
+        if self._norm_layer:
+            out = self._norm_layer(out)
+        if self._act:
+            out = self._act(out)
+        return out
+
+
+class SNConvTranspose(nn.Layer):
+    def __init__(self,
+                 name,
+                 in_channels,
+                 out_channels,
+                 kernel_size,
+                 stride=1,
+                 padding=0,
+                 output_padding=0,
+                 dilation=1,
+                 groups=1,
+                 use_bias=False,
+                 norm_layer=None,
+                 act=None,
+                 act_attr=None):
+        super(SNConvTranspose, self).__init__()
+        if use_bias:
+            bias_attr = paddle.ParamAttr(name=name + "_bias")
+        else:
+            bias_attr = None
+        self._sn_conv_transpose = spectral_norm(
+            paddle.nn.Conv2DTranspose(
+                in_channels=in_channels,
+                out_channels=out_channels,
+                kernel_size=kernel_size,
+                stride=stride,
+                padding=padding,
+                output_padding=output_padding,
+                dilation=dilation,
+                groups=groups,
+                weight_attr=paddle.ParamAttr(name=name + "_weights"),
+                bias_attr=bias_attr))
+        if norm_layer:
+            self._norm_layer = getattr(paddle.nn, norm_layer)(
+                num_features=out_channels, name=name + "_bn")
+        else:
+            self._norm_layer = None
+        if act:
+            if act_attr:
+                self._act = getattr(paddle.nn, act)(**act_attr,
+                                                    name=name + "_" + act)
+            else:
+                self._act = getattr(paddle.nn, act)(name=name + "_" + act)
+        else:
+            self._act = None
+
+    def forward(self, x):
+        out = self._sn_conv_transpose(x)
+        if self._norm_layer:
+            out = self._norm_layer(out)
+        if self._act:
+            out = self._act(out)
+        return out
+
+
+class MiddleNet(nn.Layer):
+    def __init__(self, name, in_channels, mid_channels, out_channels,
+                 use_bias):
+        super(MiddleNet, self).__init__()
+        self._sn_conv1 = SNConv(
+            name=name + "_sn_conv1",
+            in_channels=in_channels,
+            out_channels=mid_channels,
+            kernel_size=1,
+            use_bias=use_bias,
+            norm_layer=None,
+            act=None)
+        self._pad2d = nn.Pad2D(padding=[1, 1, 1, 1], mode="replicate")
+        self._sn_conv2 = SNConv(
+            name=name + "_sn_conv2",
+            in_channels=mid_channels,
+            out_channels=mid_channels,
+            kernel_size=3,
+            use_bias=use_bias)
+        self._sn_conv3 = SNConv(
+            name=name + "_sn_conv3",
+            in_channels=mid_channels,
+            out_channels=out_channels,
+            kernel_size=1,
+            use_bias=use_bias)
+
+    def forward(self, x):
+
+        sn_conv1 = self._sn_conv1.forward(x)
+        pad_2d = self._pad2d.forward(sn_conv1)
+        sn_conv2 = self._sn_conv2.forward(pad_2d)
+        sn_conv3 = self._sn_conv3.forward(sn_conv2)
+        return sn_conv3
+
+
+class ResBlock(nn.Layer):
+    def __init__(self, name, channels, norm_layer, use_dropout, use_dilation,
+                 use_bias):
+        super(ResBlock, self).__init__()
+        if use_dilation:
+            padding_mat = [1, 1, 1, 1]
+        else:
+            padding_mat = [0, 0, 0, 0]
+        self._pad1 = nn.Pad2D(padding_mat, mode="replicate")
+
+        self._sn_conv1 = SNConv(
+            name=name + "_sn_conv1",
+            in_channels=channels,
+            out_channels=channels,
+            kernel_size=3,
+            padding=0,
+            norm_layer=norm_layer,
+            use_bias=use_bias,
+            act="ReLU",
+            act_attr=None)
+        if use_dropout:
+            self._dropout = nn.Dropout(0.5)
+        else:
+            self._dropout = None
+        self._pad2 = nn.Pad2D([1, 1, 1, 1], mode="replicate")
+        self._sn_conv2 = SNConv(
+            name=name + "_sn_conv2",
+            in_channels=channels,
+            out_channels=channels,
+            kernel_size=3,
+            norm_layer=norm_layer,
+            use_bias=use_bias,
+            act="ReLU",
+            act_attr=None)
+
+    def forward(self, x):
+        pad1 = self._pad1.forward(x)
+        sn_conv1 = self._sn_conv1.forward(pad1)
+        pad2 = self._pad2.forward(sn_conv1)
+        sn_conv2 = self._sn_conv2.forward(pad2)
+        return sn_conv2 + x

+ 251 - 0
StyleText/arch/decoder.py

@@ -0,0 +1,251 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import paddle
+import paddle.nn as nn
+
+from arch.base_module import SNConv, SNConvTranspose, ResBlock
+
+
+class Decoder(nn.Layer):
+    def __init__(self, name, encode_dim, out_channels, use_bias, norm_layer,
+                 act, act_attr, conv_block_dropout, conv_block_num,
+                 conv_block_dilation, out_conv_act, out_conv_act_attr):
+        super(Decoder, self).__init__()
+        conv_blocks = []
+        for i in range(conv_block_num):
+            conv_blocks.append(
+                ResBlock(
+                    name="{}_conv_block_{}".format(name, i),
+                    channels=encode_dim * 8,
+                    norm_layer=norm_layer,
+                    use_dropout=conv_block_dropout,
+                    use_dilation=conv_block_dilation,
+                    use_bias=use_bias))
+        self.conv_blocks = nn.Sequential(*conv_blocks)
+        self._up1 = SNConvTranspose(
+            name=name + "_up1",
+            in_channels=encode_dim * 8,
+            out_channels=encode_dim * 4,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            output_padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._up2 = SNConvTranspose(
+            name=name + "_up2",
+            in_channels=encode_dim * 4,
+            out_channels=encode_dim * 2,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            output_padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._up3 = SNConvTranspose(
+            name=name + "_up3",
+            in_channels=encode_dim * 2,
+            out_channels=encode_dim,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            output_padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._pad2d = paddle.nn.Pad2D([1, 1, 1, 1], mode="replicate")
+        self._out_conv = SNConv(
+            name=name + "_out_conv",
+            in_channels=encode_dim,
+            out_channels=out_channels,
+            kernel_size=3,
+            use_bias=use_bias,
+            norm_layer=None,
+            act=out_conv_act,
+            act_attr=out_conv_act_attr)
+
+    def forward(self, x):
+        if isinstance(x, (list, tuple)):
+            x = paddle.concat(x, axis=1)
+        output_dict = dict()
+        output_dict["conv_blocks"] = self.conv_blocks.forward(x)
+        output_dict["up1"] = self._up1.forward(output_dict["conv_blocks"])
+        output_dict["up2"] = self._up2.forward(output_dict["up1"])
+        output_dict["up3"] = self._up3.forward(output_dict["up2"])
+        output_dict["pad2d"] = self._pad2d.forward(output_dict["up3"])
+        output_dict["out_conv"] = self._out_conv.forward(output_dict["pad2d"])
+        return output_dict
+
+
+class DecoderUnet(nn.Layer):
+    def __init__(self, name, encode_dim, out_channels, use_bias, norm_layer,
+                 act, act_attr, conv_block_dropout, conv_block_num,
+                 conv_block_dilation, out_conv_act, out_conv_act_attr):
+        super(DecoderUnet, self).__init__()
+        conv_blocks = []
+        for i in range(conv_block_num):
+            conv_blocks.append(
+                ResBlock(
+                    name="{}_conv_block_{}".format(name, i),
+                    channels=encode_dim * 8,
+                    norm_layer=norm_layer,
+                    use_dropout=conv_block_dropout,
+                    use_dilation=conv_block_dilation,
+                    use_bias=use_bias))
+        self._conv_blocks = nn.Sequential(*conv_blocks)
+        self._up1 = SNConvTranspose(
+            name=name + "_up1",
+            in_channels=encode_dim * 8,
+            out_channels=encode_dim * 4,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            output_padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._up2 = SNConvTranspose(
+            name=name + "_up2",
+            in_channels=encode_dim * 8,
+            out_channels=encode_dim * 2,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            output_padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._up3 = SNConvTranspose(
+            name=name + "_up3",
+            in_channels=encode_dim * 4,
+            out_channels=encode_dim,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            output_padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._pad2d = paddle.nn.Pad2D([1, 1, 1, 1], mode="replicate")
+        self._out_conv = SNConv(
+            name=name + "_out_conv",
+            in_channels=encode_dim,
+            out_channels=out_channels,
+            kernel_size=3,
+            use_bias=use_bias,
+            norm_layer=None,
+            act=out_conv_act,
+            act_attr=out_conv_act_attr)
+
+    def forward(self, x, y, feature2, feature1):
+        output_dict = dict()
+        output_dict["conv_blocks"] = self._conv_blocks(
+            paddle.concat(
+                (x, y), axis=1))
+        output_dict["up1"] = self._up1.forward(output_dict["conv_blocks"])
+        output_dict["up2"] = self._up2.forward(
+            paddle.concat(
+                (output_dict["up1"], feature2), axis=1))
+        output_dict["up3"] = self._up3.forward(
+            paddle.concat(
+                (output_dict["up2"], feature1), axis=1))
+        output_dict["pad2d"] = self._pad2d.forward(output_dict["up3"])
+        output_dict["out_conv"] = self._out_conv.forward(output_dict["pad2d"])
+        return output_dict
+
+
+class SingleDecoder(nn.Layer):
+    def __init__(self, name, encode_dim, out_channels, use_bias, norm_layer,
+                 act, act_attr, conv_block_dropout, conv_block_num,
+                 conv_block_dilation, out_conv_act, out_conv_act_attr):
+        super(SingleDecoder, self).__init__()
+        conv_blocks = []
+        for i in range(conv_block_num):
+            conv_blocks.append(
+                ResBlock(
+                    name="{}_conv_block_{}".format(name, i),
+                    channels=encode_dim * 4,
+                    norm_layer=norm_layer,
+                    use_dropout=conv_block_dropout,
+                    use_dilation=conv_block_dilation,
+                    use_bias=use_bias))
+        self._conv_blocks = nn.Sequential(*conv_blocks)
+        self._up1 = SNConvTranspose(
+            name=name + "_up1",
+            in_channels=encode_dim * 4,
+            out_channels=encode_dim * 4,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            output_padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._up2 = SNConvTranspose(
+            name=name + "_up2",
+            in_channels=encode_dim * 8,
+            out_channels=encode_dim * 2,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            output_padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._up3 = SNConvTranspose(
+            name=name + "_up3",
+            in_channels=encode_dim * 4,
+            out_channels=encode_dim,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            output_padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._pad2d = paddle.nn.Pad2D([1, 1, 1, 1], mode="replicate")
+        self._out_conv = SNConv(
+            name=name + "_out_conv",
+            in_channels=encode_dim,
+            out_channels=out_channels,
+            kernel_size=3,
+            use_bias=use_bias,
+            norm_layer=None,
+            act=out_conv_act,
+            act_attr=out_conv_act_attr)
+
+    def forward(self, x, feature2, feature1):
+        output_dict = dict()
+        output_dict["conv_blocks"] = self._conv_blocks.forward(x)
+        output_dict["up1"] = self._up1.forward(output_dict["conv_blocks"])
+        output_dict["up2"] = self._up2.forward(
+            paddle.concat(
+                (output_dict["up1"], feature2), axis=1))
+        output_dict["up3"] = self._up3.forward(
+            paddle.concat(
+                (output_dict["up2"], feature1), axis=1))
+        output_dict["pad2d"] = self._pad2d.forward(output_dict["up3"])
+        output_dict["out_conv"] = self._out_conv.forward(output_dict["pad2d"])
+        return output_dict

+ 186 - 0
StyleText/arch/encoder.py

@@ -0,0 +1,186 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import paddle
+import paddle.nn as nn
+
+from arch.base_module import SNConv, SNConvTranspose, ResBlock
+
+
+class Encoder(nn.Layer):
+    def __init__(self, name, in_channels, encode_dim, use_bias, norm_layer,
+                 act, act_attr, conv_block_dropout, conv_block_num,
+                 conv_block_dilation):
+        super(Encoder, self).__init__()
+        self._pad2d = paddle.nn.Pad2D([3, 3, 3, 3], mode="replicate")
+        self._in_conv = SNConv(
+            name=name + "_in_conv",
+            in_channels=in_channels,
+            out_channels=encode_dim,
+            kernel_size=7,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._down1 = SNConv(
+            name=name + "_down1",
+            in_channels=encode_dim,
+            out_channels=encode_dim * 2,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._down2 = SNConv(
+            name=name + "_down2",
+            in_channels=encode_dim * 2,
+            out_channels=encode_dim * 4,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._down3 = SNConv(
+            name=name + "_down3",
+            in_channels=encode_dim * 4,
+            out_channels=encode_dim * 4,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        conv_blocks = []
+        for i in range(conv_block_num):
+            conv_blocks.append(
+                ResBlock(
+                    name="{}_conv_block_{}".format(name, i),
+                    channels=encode_dim * 4,
+                    norm_layer=norm_layer,
+                    use_dropout=conv_block_dropout,
+                    use_dilation=conv_block_dilation,
+                    use_bias=use_bias))
+        self._conv_blocks = nn.Sequential(*conv_blocks)
+
+    def forward(self, x):
+        out_dict = dict()
+        x = self._pad2d(x)
+        out_dict["in_conv"] = self._in_conv.forward(x)
+        out_dict["down1"] = self._down1.forward(out_dict["in_conv"])
+        out_dict["down2"] = self._down2.forward(out_dict["down1"])
+        out_dict["down3"] = self._down3.forward(out_dict["down2"])
+        out_dict["res_blocks"] = self._conv_blocks.forward(out_dict["down3"])
+        return out_dict
+
+
+class EncoderUnet(nn.Layer):
+    def __init__(self, name, in_channels, encode_dim, use_bias, norm_layer,
+                 act, act_attr):
+        super(EncoderUnet, self).__init__()
+        self._pad2d = paddle.nn.Pad2D([3, 3, 3, 3], mode="replicate")
+        self._in_conv = SNConv(
+            name=name + "_in_conv",
+            in_channels=in_channels,
+            out_channels=encode_dim,
+            kernel_size=7,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._down1 = SNConv(
+            name=name + "_down1",
+            in_channels=encode_dim,
+            out_channels=encode_dim * 2,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._down2 = SNConv(
+            name=name + "_down2",
+            in_channels=encode_dim * 2,
+            out_channels=encode_dim * 2,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._down3 = SNConv(
+            name=name + "_down3",
+            in_channels=encode_dim * 2,
+            out_channels=encode_dim * 2,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._down4 = SNConv(
+            name=name + "_down4",
+            in_channels=encode_dim * 2,
+            out_channels=encode_dim * 2,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._up1 = SNConvTranspose(
+            name=name + "_up1",
+            in_channels=encode_dim * 2,
+            out_channels=encode_dim * 2,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+        self._up2 = SNConvTranspose(
+            name=name + "_up2",
+            in_channels=encode_dim * 4,
+            out_channels=encode_dim * 4,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            use_bias=use_bias,
+            norm_layer=norm_layer,
+            act=act,
+            act_attr=act_attr)
+
+    def forward(self, x):
+        output_dict = dict()
+        x = self._pad2d(x)
+        output_dict['in_conv'] = self._in_conv.forward(x)
+        output_dict['down1'] = self._down1.forward(output_dict['in_conv'])
+        output_dict['down2'] = self._down2.forward(output_dict['down1'])
+        output_dict['down3'] = self._down3.forward(output_dict['down2'])
+        output_dict['down4'] = self._down4.forward(output_dict['down3'])
+        output_dict['up1'] = self._up1.forward(output_dict['down4'])
+        output_dict['up2'] = self._up2.forward(
+            paddle.concat(
+                (output_dict['down3'], output_dict['up1']), axis=1))
+        output_dict['concat'] = paddle.concat(
+            (output_dict['down2'], output_dict['up2']), axis=1)
+        return output_dict

+ 0 - 0
StyleText/arch/spectral_norm.py


Nem az összes módosított fájl került megjelenítésre, mert túl sok fájl változott