Procházet zdrojové kódy

图像增强分支创建

yangjun před 1 rokem
revize
a1a2b4a94f
100 změnil soubory, kde provedl 339 přidání a 0 odebrání
  1. 88 0
      .gitignore
  2. 96 0
      MS_SSIM_L1_loss.py
  3. 75 0
      convert_model_to_tflite.py
  4. 80 0
      dataset.py
  5. binární
      dataset/raw_data/imgs_Trainblocks/block_0.png
  6. binární
      dataset/raw_data/imgs_Trainblocks/block_1.png
  7. binární
      dataset/raw_data/imgs_Trainblocks/block_10.png
  8. binární
      dataset/raw_data/imgs_Trainblocks/block_100.png
  9. binární
      dataset/raw_data/imgs_Trainblocks/block_101.png
  10. binární
      dataset/raw_data/imgs_Trainblocks/block_102.png
  11. binární
      dataset/raw_data/imgs_Trainblocks/block_103.png
  12. binární
      dataset/raw_data/imgs_Trainblocks/block_104.png
  13. binární
      dataset/raw_data/imgs_Trainblocks/block_105.png
  14. binární
      dataset/raw_data/imgs_Trainblocks/block_106.png
  15. binární
      dataset/raw_data/imgs_Trainblocks/block_107.png
  16. binární
      dataset/raw_data/imgs_Trainblocks/block_108.png
  17. binární
      dataset/raw_data/imgs_Trainblocks/block_109.png
  18. binární
      dataset/raw_data/imgs_Trainblocks/block_11.png
  19. binární
      dataset/raw_data/imgs_Trainblocks/block_110.png
  20. binární
      dataset/raw_data/imgs_Trainblocks/block_111.png
  21. binární
      dataset/raw_data/imgs_Trainblocks/block_112.png
  22. binární
      dataset/raw_data/imgs_Trainblocks/block_113.png
  23. binární
      dataset/raw_data/imgs_Trainblocks/block_114.png
  24. binární
      dataset/raw_data/imgs_Trainblocks/block_115.png
  25. binární
      dataset/raw_data/imgs_Trainblocks/block_116.png
  26. binární
      dataset/raw_data/imgs_Trainblocks/block_117.png
  27. binární
      dataset/raw_data/imgs_Trainblocks/block_118.png
  28. binární
      dataset/raw_data/imgs_Trainblocks/block_119.png
  29. binární
      dataset/raw_data/imgs_Trainblocks/block_12.png
  30. binární
      dataset/raw_data/imgs_Trainblocks/block_120.png
  31. binární
      dataset/raw_data/imgs_Trainblocks/block_121.png
  32. binární
      dataset/raw_data/imgs_Trainblocks/block_122.png
  33. binární
      dataset/raw_data/imgs_Trainblocks/block_123.png
  34. binární
      dataset/raw_data/imgs_Trainblocks/block_124.png
  35. binární
      dataset/raw_data/imgs_Trainblocks/block_125.png
  36. binární
      dataset/raw_data/imgs_Trainblocks/block_126.png
  37. binární
      dataset/raw_data/imgs_Trainblocks/block_127.png
  38. binární
      dataset/raw_data/imgs_Trainblocks/block_128.png
  39. binární
      dataset/raw_data/imgs_Trainblocks/block_129.png
  40. binární
      dataset/raw_data/imgs_Trainblocks/block_13.png
  41. binární
      dataset/raw_data/imgs_Trainblocks/block_130.png
  42. binární
      dataset/raw_data/imgs_Trainblocks/block_131.png
  43. binární
      dataset/raw_data/imgs_Trainblocks/block_132.png
  44. binární
      dataset/raw_data/imgs_Trainblocks/block_133.png
  45. binární
      dataset/raw_data/imgs_Trainblocks/block_134.png
  46. binární
      dataset/raw_data/imgs_Trainblocks/block_135.png
  47. binární
      dataset/raw_data/imgs_Trainblocks/block_136.png
  48. binární
      dataset/raw_data/imgs_Trainblocks/block_137.png
  49. binární
      dataset/raw_data/imgs_Trainblocks/block_138.png
  50. binární
      dataset/raw_data/imgs_Trainblocks/block_139.png
  51. binární
      dataset/raw_data/imgs_Trainblocks/block_14.png
  52. binární
      dataset/raw_data/imgs_Trainblocks/block_140.png
  53. binární
      dataset/raw_data/imgs_Trainblocks/block_141.png
  54. binární
      dataset/raw_data/imgs_Trainblocks/block_142.png
  55. binární
      dataset/raw_data/imgs_Trainblocks/block_143.png
  56. binární
      dataset/raw_data/imgs_Trainblocks/block_144.png
  57. binární
      dataset/raw_data/imgs_Trainblocks/block_145.png
  58. binární
      dataset/raw_data/imgs_Trainblocks/block_146.png
  59. binární
      dataset/raw_data/imgs_Trainblocks/block_147.png
  60. binární
      dataset/raw_data/imgs_Trainblocks/block_148.png
  61. binární
      dataset/raw_data/imgs_Trainblocks/block_149.png
  62. binární
      dataset/raw_data/imgs_Trainblocks/block_15.png
  63. binární
      dataset/raw_data/imgs_Trainblocks/block_150.png
  64. binární
      dataset/raw_data/imgs_Trainblocks/block_151.png
  65. binární
      dataset/raw_data/imgs_Trainblocks/block_152.png
  66. binární
      dataset/raw_data/imgs_Trainblocks/block_153.png
  67. binární
      dataset/raw_data/imgs_Trainblocks/block_154.png
  68. binární
      dataset/raw_data/imgs_Trainblocks/block_155.png
  69. binární
      dataset/raw_data/imgs_Trainblocks/block_156.png
  70. binární
      dataset/raw_data/imgs_Trainblocks/block_157.png
  71. binární
      dataset/raw_data/imgs_Trainblocks/block_158.png
  72. binární
      dataset/raw_data/imgs_Trainblocks/block_159.png
  73. binární
      dataset/raw_data/imgs_Trainblocks/block_16.png
  74. binární
      dataset/raw_data/imgs_Trainblocks/block_160.png
  75. binární
      dataset/raw_data/imgs_Trainblocks/block_161.png
  76. binární
      dataset/raw_data/imgs_Trainblocks/block_162.png
  77. binární
      dataset/raw_data/imgs_Trainblocks/block_163.png
  78. binární
      dataset/raw_data/imgs_Trainblocks/block_164.png
  79. binární
      dataset/raw_data/imgs_Trainblocks/block_165.png
  80. binární
      dataset/raw_data/imgs_Trainblocks/block_166.png
  81. binární
      dataset/raw_data/imgs_Trainblocks/block_167.png
  82. binární
      dataset/raw_data/imgs_Trainblocks/block_168.png
  83. binární
      dataset/raw_data/imgs_Trainblocks/block_169.png
  84. binární
      dataset/raw_data/imgs_Trainblocks/block_17.png
  85. binární
      dataset/raw_data/imgs_Trainblocks/block_170.png
  86. binární
      dataset/raw_data/imgs_Trainblocks/block_171.png
  87. binární
      dataset/raw_data/imgs_Trainblocks/block_172.png
  88. binární
      dataset/raw_data/imgs_Trainblocks/block_173.png
  89. binární
      dataset/raw_data/imgs_Trainblocks/block_174.png
  90. binární
      dataset/raw_data/imgs_Trainblocks/block_175.png
  91. binární
      dataset/raw_data/imgs_Trainblocks/block_176.png
  92. binární
      dataset/raw_data/imgs_Trainblocks/block_177.png
  93. binární
      dataset/raw_data/imgs_Trainblocks/block_178.png
  94. binární
      dataset/raw_data/imgs_Trainblocks/block_179.png
  95. binární
      dataset/raw_data/imgs_Trainblocks/block_18.png
  96. binární
      dataset/raw_data/imgs_Trainblocks/block_180.png
  97. binární
      dataset/raw_data/imgs_Trainblocks/block_181.png
  98. binární
      dataset/raw_data/imgs_Trainblocks/block_182.png
  99. binární
      dataset/raw_data/imgs_Trainblocks/block_183.png
  100. 0 0
      dataset/raw_data/imgs_Trainblocks/block_184.png

+ 88 - 0
.gitignore

@@ -0,0 +1,88 @@
+# Virtualenv
+/.venv/
+/venv/
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+.ipynb_checkpoints/
+*.py[cod]
+
+# C extensions
+*.so
+
+# json file
+*.json
+
+# log file
+*.log
+
+# Distribution / packaging
+/bin/
+/build/
+/develop-eggs/
+/dist/
+/eggs/
+/lib/
+/lib64/
+/output/
+/inference_model/
+/output_inference/
+/parts/
+/sdist/
+/var/
+/*.egg-info/
+/.installed.cfg
+/*.egg
+/.eggs
+
+# AUTHORS and ChangeLog will be generated while packaging
+/AUTHORS
+/ChangeLog
+
+# BCloud / BuildSubmitter
+/build_submitter.*
+/logger_client_log
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+.tox/
+.coverage
+.cache
+.pytest_cache
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+
+# Sphinx documentation
+/docs/_build/
+
+*.tar
+*.pyc
+
+.idea/
+
+dataset/coco/annotations
+dataset/coco/train2017
+dataset/coco/val2017
+dataset/voc/VOCdevkit
+dataset/fruit/fruit-detection/
+dataset/voc/test.txt
+dataset/voc/trainval.txt
+dataset/wider_face/WIDER_test
+dataset/wider_face/WIDER_train
+dataset/wider_face/WIDER_val
+dataset/wider_face/wider_face_split
+
+ppdet/version.py
+
+# NPU meta folder
+kernel_meta/
+
+# MAC
+*.DS_Store
+

+ 96 - 0
MS_SSIM_L1_loss.py

@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Dec  3 00:28:15 2020
+
+@author: Yunpeng Li, Tianjin University
+"""
+
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class MS_SSIM_L1_LOSS(nn.Module):
+    # Have to use cuda, otherwise the speed is too slow.
+    def __init__(self, gaussian_sigmas=[0.5, 1.0, 2.0, 4.0, 8.0],
+                 data_range = 1.0,
+                 K=(0.01, 0.03),
+                 alpha=0.025,
+                 compensation=200.0,
+                 device=torch.device('cpu')):
+        super(MS_SSIM_L1_LOSS, self).__init__()
+        self.DR = data_range
+        self.C1 = (K[0] * data_range) ** 2
+        self.C2 = (K[1] * data_range) ** 2
+        self.pad = int(2 * gaussian_sigmas[-1])
+        self.alpha = alpha
+        self.compensation=compensation
+        filter_size = int(4 * gaussian_sigmas[-1] + 1)
+        g_masks = torch.zeros((3*len(gaussian_sigmas), 1, filter_size, filter_size))
+        for idx, sigma in enumerate(gaussian_sigmas):
+            # r0,g0,b0,r1,g1,b1,...,rM,gM,bM
+            g_masks[3*idx+0, 0, :, :] = self._fspecial_gauss_2d(filter_size, sigma)
+            g_masks[3*idx+1, 0, :, :] = self._fspecial_gauss_2d(filter_size, sigma)
+            g_masks[3*idx+2, 0, :, :] = self._fspecial_gauss_2d(filter_size, sigma)
+        self.g_masks = g_masks.to(device)
+
+    def _fspecial_gauss_1d(self, size, sigma):
+        """Create 1-D gauss kernel
+        Args:
+            size (int): the size of gauss kernel
+            sigma (float): sigma of normal distribution
+
+        Returns:
+            torch.Tensor: 1D kernel (size)
+        """
+        coords = torch.arange(size).to(dtype=torch.float)
+        coords -= size // 2
+        g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))
+        g /= g.sum()
+        return g.reshape(-1)
+
+    def _fspecial_gauss_2d(self, size, sigma):
+        """Create 2-D gauss kernel
+        Args:
+            size (int): the size of gauss kernel
+            sigma (float): sigma of normal distribution
+
+        Returns:
+            torch.Tensor: 2D kernel (size x size)
+        """
+        gaussian_vec = self._fspecial_gauss_1d(size, sigma)
+        return torch.outer(gaussian_vec, gaussian_vec)
+
+    def forward(self, x, y):
+        b, c, h, w = x.shape
+        mux = F.conv2d(x, self.g_masks, groups=3, padding=self.pad)
+        muy = F.conv2d(y, self.g_masks, groups=3, padding=self.pad)
+
+        mux2 = mux * mux
+        muy2 = muy * muy
+        muxy = mux * muy
+
+        sigmax2 = F.conv2d(x * x, self.g_masks, groups=3, padding=self.pad) - mux2
+        sigmay2 = F.conv2d(y * y, self.g_masks, groups=3, padding=self.pad) - muy2
+        sigmaxy = F.conv2d(x * y, self.g_masks, groups=3, padding=self.pad) - muxy
+
+        # l(j), cs(j) in MS-SSIM
+        l  = (2 * muxy    + self.C1) / (mux2    + muy2    + self.C1)  # [B, 15, H, W]
+        cs = (2 * sigmaxy + self.C2) / (sigmax2 + sigmay2 + self.C2)
+
+        lM = l[:, -1, :, :] * l[:, -2, :, :] * l[:, -3, :, :]
+        PIcs = cs.prod(dim=1)
+
+        loss_ms_ssim = 1 - lM*PIcs  # [B, H, W]
+
+        loss_l1 = F.l1_loss(x, y, reduction='none')  # [B, 3, H, W]
+        # average l1 loss in 3 channels
+        gaussian_l1 = F.conv2d(loss_l1, self.g_masks.narrow(dim=0, start=-3, length=3),
+                               groups=3, padding=self.pad).mean(1)  # [B, H, W]
+
+        loss_mix = self.alpha * loss_ms_ssim + (1 - self.alpha) * gaussian_l1 / self.DR
+        loss_mix = self.compensation*loss_mix
+
+        return loss_mix.mean()
+

+ 75 - 0
convert_model_to_tflite.py

@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+import argparse
+import os
+import shutil
+import onnx
+import torch
+import torch.backends._nnapi.prepare
+import torch.utils.bundled_inputs
+import torch.utils.mobile_optimizer
+from onnx_tf.backend import prepare
+import tensorflow as tf
+from model import M64ColorNet
+from torch.nn.utils import prune
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--ckpt_path',
+                    type=str,
+                    help='This is the path where to store the ckpt file',
+                    default="output/model.pt")
+
+
+def convert_to_tflite(out_dir: str, model: torch.nn.Module):
+    dummy_input = torch.randn((1, 3, 256, 256))
+    onnx_path = f"{out_dir}/converted.onnx"
+    torch.onnx.export(model, dummy_input, onnx_path, verbose=True,
+                      input_names=['input'], output_names=['output'])
+
+    tf_path = f"{out_dir}/tf_model"
+    onnx_model = onnx.load(onnx_path)
+    # prepare function converts an ONNX model to an internel representation
+    # of the computational graph called TensorflowRep and returns
+    # the converted representation.
+    tf_rep = prepare(onnx_model)  # creating TensorflowRep object
+    # export_graph function obtains the graph proto corresponding to the ONNX
+    # model associated with the backend representation and serializes
+    # to a protobuf file.
+    tf_rep.export_graph(tf_path)
+
+    converter = tf.lite.TFLiteConverter.from_saved_model(tf_path)
+    converter.optimizations = [tf.lite.Optimize.DEFAULT]
+    tf_lite_model = converter.convert()
+    tflite_path = f"{out_dir}/doc_clean.tflite"
+    with open(tflite_path, 'wb') as f:
+        f.write(tf_lite_model)
+
+
+def convert_to_tflite_with_tiny(out_dir: str, fileName:str, model: torch.nn.Module):
+    from tinynn.converter import TFLiteConverter
+    dummy_input = torch.rand((1, 3, 256, 256))
+
+    # output_path = os.path.join(out_dir, 'out', 'mbv1_224.tflite')
+    tflite_path = f"{out_dir}/{fileName}"
+
+    # When converting quantized models, please ensure the quantization backend is set.
+    # torch.backends.quantized.engine = 'qnnpack'
+
+    # The code section below is used to convert the model to the TFLite format
+    # If you want perform dynamic quantization on the float models,
+    # you may pass the following arguments.
+    #   `quantize_target_type='int8', hybrid_quantization_from_float=True, hybrid_per_channel=False`
+    # As for static quantization (e.g. quantization-aware training and post-training quantization),
+    # please refer to the code examples in the `examples/quantization` folder.
+    converter = TFLiteConverter(model, dummy_input, tflite_path)
+    converter.convert()
+
+if __name__ == "__main__":
+    out_dir = "output_tflite"
+    shutil.rmtree(out_dir, ignore_errors=True)
+    os.mkdir(out_dir)
+    args = parser.parse_args()
+    model, _, _, _, ssim, psnr = M64ColorNet.load_trained_model(args.ckpt_path)
+    name = os.path.basename(args.ckpt_path).split(".")[0]
+    fileName = f"ssim_{round(ssim, 2)}_psnr_{round(psnr, 2)}_{name}.tflite"
+    # convert_to_tflite(out_dir, model)
+    convert_to_tflite_with_tiny(out_dir, fileName, model)

+ 80 - 0
dataset.py

@@ -0,0 +1,80 @@
+from torch.utils.data import Dataset
+from PIL import Image
+from torchvision import transforms
+from typing import List, Tuple
+import imgaug.augmenters as iaa
+import numpy as np
+from sklearn.model_selection import train_test_split
+
+class UnNormalize(object):
+    def __init__(self, mean, std):
+        self.mean = mean
+        self.std = std
+
+    def __call__(self, tensor):
+        """
+        Args:
+            tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
+        Returns:
+            Tensor: Normalized image.
+        """
+        for t, m, s in zip(tensor, self.mean, self.std):
+            t.mul_(s).add_(m)
+            # The normalize code -> t.sub_(m).div_(s)
+        return tensor
+
+class DocCleanDataset(Dataset):
+
+    @staticmethod
+    def prepareDataset(dataset:str, shuffle=True):
+        # imgs_dir = "dataset/raw_data/imgs_Trainblocks"
+        with open(f"{dataset}/train_block_names.txt") as train_block_names_file:
+            image_names = train_block_names_file.read().splitlines()
+            train_img_names, eval_img_names, _, _ = train_test_split(
+                image_names, image_names, test_size=0.2, random_state=1, shuffle=shuffle)
+            return train_img_names, eval_img_names, dataset
+
+    def __init__(self, img_names: List[str], imgs_dir: str, normalized_tuple: Tuple[List[float], List[float]] = None, dev=False, img_aug=False):
+        if dev:
+            num = int(len(img_names) * 0.01)
+            img_names = img_names[0:num]
+        self.img_names = img_names
+        self.imgs_dir = imgs_dir
+        if normalized_tuple:
+            mean, std = normalized_tuple
+            self.normalized = transforms.Compose([
+                transforms.ToTensor(), 
+                transforms.Normalize(mean=mean, std=std)
+            ])
+            self.aug_seq = iaa.Sometimes(0.7, iaa.OneOf([
+                    iaa.SaltAndPepper(p=(0.0, 0.05)),
+                    iaa.imgcorruptlike.MotionBlur(severity=2),
+                    iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6)),
+                    iaa.imgcorruptlike.JpegCompression(severity=2),
+                    iaa.GammaContrast((0.5, 2.0)),
+                    iaa.LogContrast(gain=(0.5, 0.9)),
+                    iaa.GaussianBlur(sigma=(0, 1)),
+                    iaa.imgcorruptlike.SpeckleNoise(severity=1),
+                    iaa.AdditiveGaussianNoise(scale=(0.03*255, 0.2*255), per_channel=True),
+                    iaa.Add((-20, 20), per_channel=0.5),
+                    iaa.AddToBrightness((-30, 30))
+                ]))
+        self.img_aug = img_aug
+        self.toTensor = transforms.ToTensor()
+
+    def __len__(self):
+        return len(self.img_names)
+
+    def __getitem__(self, index):
+        img = Image.open(f"{self.imgs_dir}/{self.img_names[index]}")
+        gt = Image.open(f"{self.imgs_dir}/gt{self.img_names[index]}")
+        if hasattr(self, 'normalized'):
+            img_np = np.array(img)
+            if self.img_aug == True:
+                img_np = self.aug_seq.augment_images([np.array(img)])[0]
+            normalized_img = self.normalized(img_np)
+            img = self.toTensor(img_np)
+        else:
+            img = self.toTensor(img)
+            normalized_img = img
+        return img, normalized_img, self.toTensor(gt)

binární
dataset/raw_data/imgs_Trainblocks/block_0.png


binární
dataset/raw_data/imgs_Trainblocks/block_1.png


binární
dataset/raw_data/imgs_Trainblocks/block_10.png


binární
dataset/raw_data/imgs_Trainblocks/block_100.png


binární
dataset/raw_data/imgs_Trainblocks/block_101.png


binární
dataset/raw_data/imgs_Trainblocks/block_102.png


binární
dataset/raw_data/imgs_Trainblocks/block_103.png


binární
dataset/raw_data/imgs_Trainblocks/block_104.png


binární
dataset/raw_data/imgs_Trainblocks/block_105.png


binární
dataset/raw_data/imgs_Trainblocks/block_106.png


binární
dataset/raw_data/imgs_Trainblocks/block_107.png


binární
dataset/raw_data/imgs_Trainblocks/block_108.png


binární
dataset/raw_data/imgs_Trainblocks/block_109.png


binární
dataset/raw_data/imgs_Trainblocks/block_11.png


binární
dataset/raw_data/imgs_Trainblocks/block_110.png


binární
dataset/raw_data/imgs_Trainblocks/block_111.png


binární
dataset/raw_data/imgs_Trainblocks/block_112.png


binární
dataset/raw_data/imgs_Trainblocks/block_113.png


binární
dataset/raw_data/imgs_Trainblocks/block_114.png


binární
dataset/raw_data/imgs_Trainblocks/block_115.png


binární
dataset/raw_data/imgs_Trainblocks/block_116.png


binární
dataset/raw_data/imgs_Trainblocks/block_117.png


binární
dataset/raw_data/imgs_Trainblocks/block_118.png


binární
dataset/raw_data/imgs_Trainblocks/block_119.png


binární
dataset/raw_data/imgs_Trainblocks/block_12.png


binární
dataset/raw_data/imgs_Trainblocks/block_120.png


binární
dataset/raw_data/imgs_Trainblocks/block_121.png


binární
dataset/raw_data/imgs_Trainblocks/block_122.png


binární
dataset/raw_data/imgs_Trainblocks/block_123.png


binární
dataset/raw_data/imgs_Trainblocks/block_124.png


binární
dataset/raw_data/imgs_Trainblocks/block_125.png


binární
dataset/raw_data/imgs_Trainblocks/block_126.png


binární
dataset/raw_data/imgs_Trainblocks/block_127.png


binární
dataset/raw_data/imgs_Trainblocks/block_128.png


binární
dataset/raw_data/imgs_Trainblocks/block_129.png


binární
dataset/raw_data/imgs_Trainblocks/block_13.png


binární
dataset/raw_data/imgs_Trainblocks/block_130.png


binární
dataset/raw_data/imgs_Trainblocks/block_131.png


binární
dataset/raw_data/imgs_Trainblocks/block_132.png


binární
dataset/raw_data/imgs_Trainblocks/block_133.png


binární
dataset/raw_data/imgs_Trainblocks/block_134.png


binární
dataset/raw_data/imgs_Trainblocks/block_135.png


binární
dataset/raw_data/imgs_Trainblocks/block_136.png


binární
dataset/raw_data/imgs_Trainblocks/block_137.png


binární
dataset/raw_data/imgs_Trainblocks/block_138.png


binární
dataset/raw_data/imgs_Trainblocks/block_139.png


binární
dataset/raw_data/imgs_Trainblocks/block_14.png


binární
dataset/raw_data/imgs_Trainblocks/block_140.png


binární
dataset/raw_data/imgs_Trainblocks/block_141.png


binární
dataset/raw_data/imgs_Trainblocks/block_142.png


binární
dataset/raw_data/imgs_Trainblocks/block_143.png


binární
dataset/raw_data/imgs_Trainblocks/block_144.png


binární
dataset/raw_data/imgs_Trainblocks/block_145.png


binární
dataset/raw_data/imgs_Trainblocks/block_146.png


binární
dataset/raw_data/imgs_Trainblocks/block_147.png


binární
dataset/raw_data/imgs_Trainblocks/block_148.png


binární
dataset/raw_data/imgs_Trainblocks/block_149.png


binární
dataset/raw_data/imgs_Trainblocks/block_15.png


binární
dataset/raw_data/imgs_Trainblocks/block_150.png


binární
dataset/raw_data/imgs_Trainblocks/block_151.png


binární
dataset/raw_data/imgs_Trainblocks/block_152.png


binární
dataset/raw_data/imgs_Trainblocks/block_153.png


binární
dataset/raw_data/imgs_Trainblocks/block_154.png


binární
dataset/raw_data/imgs_Trainblocks/block_155.png


binární
dataset/raw_data/imgs_Trainblocks/block_156.png


binární
dataset/raw_data/imgs_Trainblocks/block_157.png


binární
dataset/raw_data/imgs_Trainblocks/block_158.png


binární
dataset/raw_data/imgs_Trainblocks/block_159.png


binární
dataset/raw_data/imgs_Trainblocks/block_16.png


binární
dataset/raw_data/imgs_Trainblocks/block_160.png


binární
dataset/raw_data/imgs_Trainblocks/block_161.png


binární
dataset/raw_data/imgs_Trainblocks/block_162.png


binární
dataset/raw_data/imgs_Trainblocks/block_163.png


binární
dataset/raw_data/imgs_Trainblocks/block_164.png


binární
dataset/raw_data/imgs_Trainblocks/block_165.png


binární
dataset/raw_data/imgs_Trainblocks/block_166.png


binární
dataset/raw_data/imgs_Trainblocks/block_167.png


binární
dataset/raw_data/imgs_Trainblocks/block_168.png


binární
dataset/raw_data/imgs_Trainblocks/block_169.png


binární
dataset/raw_data/imgs_Trainblocks/block_17.png


binární
dataset/raw_data/imgs_Trainblocks/block_170.png


binární
dataset/raw_data/imgs_Trainblocks/block_171.png


binární
dataset/raw_data/imgs_Trainblocks/block_172.png


binární
dataset/raw_data/imgs_Trainblocks/block_173.png


binární
dataset/raw_data/imgs_Trainblocks/block_174.png


binární
dataset/raw_data/imgs_Trainblocks/block_175.png


binární
dataset/raw_data/imgs_Trainblocks/block_176.png


binární
dataset/raw_data/imgs_Trainblocks/block_177.png


binární
dataset/raw_data/imgs_Trainblocks/block_178.png


binární
dataset/raw_data/imgs_Trainblocks/block_179.png


binární
dataset/raw_data/imgs_Trainblocks/block_18.png


binární
dataset/raw_data/imgs_Trainblocks/block_180.png


binární
dataset/raw_data/imgs_Trainblocks/block_181.png


binární
dataset/raw_data/imgs_Trainblocks/block_182.png


binární
dataset/raw_data/imgs_Trainblocks/block_183.png


+ 0 - 0
dataset/raw_data/imgs_Trainblocks/block_184.png


Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů