From 8190215aa14e388e64116fc6d2c58def2e83e6e5 Mon Sep 17 00:00:00 2001 From: Igor Adamski Date: Wed, 30 Jan 2019 10:27:30 +0100 Subject: [PATCH 1/7] Use different Docker image for blender verification --- apps/blender/blender_reference_generator.py | 400 ------------------ .../images/blender_verifier.Dockerfile | 36 ++ .../images/scripts_verifier/__init__.py | 0 .../resources/images/scripts_verifier/copy.sh | 9 + .../images/scripts_verifier/crop_generator.py | 173 ++++++++ .../images/scripts_verifier/decision_tree.py | 40 ++ .../images/scripts_verifier/edges.py | 63 +++ .../histograms_correlation.py | 56 +++ .../scripts_verifier/img_format_converter.py | 26 ++ .../img_metrics_calculator.py | 222 ++++++++++ .../images/scripts_verifier/imgmetrics.py | 122 ++++++ .../scripts_verifier/mass_center_distance.py | 75 ++++ .../resources/images/scripts_verifier/psnr.py | 55 +++ .../images/scripts_verifier/requirements.txt | 9 + .../images/scripts_verifier/runner.py | 17 + .../images/scripts_verifier/skimage.py | 219 ++++++++++ .../resources/images/scripts_verifier/ssim.py | 50 +++ .../tree35_[crr=87.71][frr=0.92].pkl | Bin 0 -> 10966 bytes .../images/scripts_verifier/variance.py | 39 ++ .../images/scripts_verifier/verificator.py | 128 ++++++ .../images/scripts_verifier/wavelet.py | 165 ++++++++ apps/blender/task/blenderrendertask.py | 2 - apps/core/task/coretask.py | 5 - apps/images.ini | 2 +- golem/verificator/blender_verifier.py | 232 +++------- golem/verificator/rendering_verifier.py | 38 -- golem/verificator/verifier.py | 9 +- .../test_blender_reference_generator.py | 138 ------ .../verification/test_verification_queue.py | 2 - .../test_verificator_integration.py | 187 ++------ .../golem/verificator/test_blenderverifier.py | 136 +----- .../verificator/test_renderingverifier.py | 44 -- 32 files changed, 1597 insertions(+), 1102 deletions(-) delete mode 100644 apps/blender/blender_reference_generator.py create mode 100644 apps/blender/resources/images/blender_verifier.Dockerfile create mode 100644 apps/blender/resources/images/scripts_verifier/__init__.py create mode 100755 apps/blender/resources/images/scripts_verifier/copy.sh create mode 100644 apps/blender/resources/images/scripts_verifier/crop_generator.py create mode 100644 apps/blender/resources/images/scripts_verifier/decision_tree.py create mode 100644 apps/blender/resources/images/scripts_verifier/edges.py create mode 100644 apps/blender/resources/images/scripts_verifier/histograms_correlation.py create mode 100644 apps/blender/resources/images/scripts_verifier/img_format_converter.py create mode 100644 apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py create mode 100644 apps/blender/resources/images/scripts_verifier/imgmetrics.py create mode 100644 apps/blender/resources/images/scripts_verifier/mass_center_distance.py create mode 100644 apps/blender/resources/images/scripts_verifier/psnr.py create mode 100644 apps/blender/resources/images/scripts_verifier/requirements.txt create mode 100644 apps/blender/resources/images/scripts_verifier/runner.py create mode 100644 apps/blender/resources/images/scripts_verifier/skimage.py create mode 100644 apps/blender/resources/images/scripts_verifier/ssim.py create mode 100644 apps/blender/resources/images/scripts_verifier/tree35_[crr=87.71][frr=0.92].pkl create mode 100644 apps/blender/resources/images/scripts_verifier/variance.py create mode 100644 apps/blender/resources/images/scripts_verifier/verificator.py create mode 100644 apps/blender/resources/images/scripts_verifier/wavelet.py delete mode 100644 tests/apps/blender/verification/test_blender_reference_generator.py diff --git a/apps/blender/blender_reference_generator.py b/apps/blender/blender_reference_generator.py deleted file mode 100644 index 61e491ab2d..0000000000 --- a/apps/blender/blender_reference_generator.py +++ /dev/null @@ -1,400 +0,0 @@ -import logging -import math -import os -import random -from copy import deepcopy -from typing import Dict, Tuple, List, Callable, Optional, Any, Generator -from twisted.internet.defer import Deferred, inlineCallbacks - -import numpy - -from golem.core.common import timeout_to_deadline -from golem.task.localcomputer import ComputerAdapter - -logger = logging.getLogger("apps.blender.blender_reference_generator") - - -class Region: - - def __init__(self, left: float = -1, top: float = -1, right: float = -1, - bottom: float = -1) -> None: - self.left = left - self.right = right - self.top = top - self.bottom = bottom - - def to_tuple(self): - return self.left, self.top, self.right, self.bottom - - -class PixelRegion: - - def __init__(self, left: int = -1, top: int = -1, right: int = -1, - bottom: int = -1) -> None: - self.left = left - self.right = right - self.top = top - self.bottom = bottom - - -class SubImage: - CROP_RELATIVE_SIZE = 0.1 - PIXEL_OFFSET = numpy.float32(0.5) - MIN_CROP_SIZE = 8 - - def __init__(self, region: Region, resolution: Tuple[int, int]) -> None: - self.region = region - self.pixel_region = self.calculate_pixels(region, resolution[0], - resolution[1]) - self.width = self.pixel_region.right - self.pixel_region.left - self.height = self.pixel_region.top - self.pixel_region.bottom - self.image_width = resolution[0] - self.image_height = resolution[1] - - @staticmethod - def calculate_pixels(region: Region, width: int, height: int) \ - -> PixelRegion: - # This is how Blender is calculating pixel, check - # BlenderSync::get_buffer_params in blender_camera.cpp file - # BoundBox2D border = cam->border.clamp(); - # params.full_x = (int)(border.left * (float)width); - - # NOTE blender uses floats (single precision) while python operates on - # doubles - # Here numpy is used to emulate this loss of precision when assigning - # double to float: - left = math.floor( - numpy.float32(region.left) * numpy.float32(width) + - SubImage.PIXEL_OFFSET) - - right = math.floor( - numpy.float32(region.right) * numpy.float32(width) + - SubImage.PIXEL_OFFSET) - - bottom = math.floor( - numpy.float32(region.bottom) * numpy.float32(height) + - SubImage.PIXEL_OFFSET) - - top = math.floor( - numpy.float32(region.top) * numpy.float32(height) + - SubImage.PIXEL_OFFSET) - - return PixelRegion(int(left), int(top), int(right), int(bottom)) - - @staticmethod - def __calculate_crop_side_length(subtask_side_length: int) -> int: - calculated_length = int( - SubImage.CROP_RELATIVE_SIZE * subtask_side_length) - - return max(SubImage.MIN_CROP_SIZE, calculated_length) - - def get_default_crop_size(self) -> Tuple[int, int]: - x = self.__calculate_crop_side_length(self.width) - y = self.__calculate_crop_side_length(self.height) - return x, y - - -class Crop: - - @staticmethod - def create_from_region(crop_id: str, crop_region: Region, - subimage: SubImage, - crops_path: str): - crop = Crop(crop_id, subimage, crops_path) - crop.crop_region = crop_region - crop.pixel_region = crop.subimage.calculate_pixels( - crop_region, subimage.image_width, subimage.image_height) - return crop - - @staticmethod - def create_from_pixel_region(crop_id: str, pixel_region: PixelRegion, - subimage: SubImage, crops_path: str): - crop = Crop(crop_id, subimage, crops_path) - crop.pixel_region = pixel_region - crop.crop_region = crop.calculate_borders() - return crop - - def __init__(self, crop_id: str, subimage: SubImage, crops_path: str) \ - -> None: - self.crop_id = crop_id - self.subimage = subimage - self.crop_path = os.path.join(crops_path, crop_id) - self.pixel_region = PixelRegion() - self.crop_region = Region() - - def get_relative_top_left(self) -> Tuple[int, int]: - # get top left corner of crop in relation to particular subimage - y = self.subimage.pixel_region.top - self.pixel_region.top - logger.debug("X=%r, Y=%r", self.pixel_region.left, y) - return self.pixel_region.left, y - - def calculate_borders(self): - left = float( - (numpy.float32(self.pixel_region.left) + SubImage.PIXEL_OFFSET) / - numpy.float32(self.subimage.image_width)) - - right = float( - (numpy.float32(self.pixel_region.right) + SubImage.PIXEL_OFFSET) / - numpy.float32(self.subimage.image_width)) - - top = float( - (numpy.float32(self.pixel_region.top) + SubImage.PIXEL_OFFSET) / - numpy.float32(self.subimage.image_height)) - - bottom = float( - (numpy.float32(self.pixel_region.bottom) + SubImage.PIXEL_OFFSET) / - numpy.float32(self.subimage.image_height)) - - return Region(left, top, right, bottom) - - def get_path(self): - return self.crop_path - - -# FIXME #2086 -# pylint: disable=R0903 -# pylint: disable=R0902 -class VerificationContext: - def __init__(self, crops_descriptors: List[Crop], computer, - subtask_data: Dict[str, Any], crops_number) -> None: - self.crops = crops_descriptors - self.computer = computer - self.resources = subtask_data['resources'] - self.subtask_info = subtask_data['subtask_info'] - self.finished = [Deferred() for _ in range(crops_number)] - - def get_crop_path(self, crop_id: str) -> Optional[str]: - crop = self.get_crop_with_id(crop_id) - if crop: - return crop.get_path() - return None - - def get_crop_with_id(self, crop_id: str) -> Optional[Crop]: - for crop in self.crops: - if crop.crop_id == crop_id: - return crop - return None - - -CropRenderedSuccessCallbackType = Callable[[List[str], - float, - VerificationContext, - int], - None] -CropRenderedFailureCallbackType = Callable[[Exception], None] - - -class BlenderReferenceGenerator: - DEFAULT_CROPS_NUMBER = 3 - - def __init__(self, computer: Optional[ComputerAdapter] = None) -> None: - self.computer = computer or ComputerAdapter() - self.crops_desc: List[Crop] = [] - self.rendered_crops_results: Dict[int, List[Any]] = {} - self.crop_jobs: Dict[str, Deferred] = dict() - self.stopped = False - - def clear(self): - self.rendered_crops_results = {} - self.stopped = False - - # pylint: disable=R0914 - def generate_crops_data(self, - resolution: Tuple[int, int], - subtask_border: Dict, - crops_number: int, crops_path: str): - """ - This function will generate split data for performing random crops. - Crops will be rendered from blend files using calculated values - (floats that indicate position in original blender file). - - :param resolution: This is the x, y resolution of whole image from - which split data should be generated - :param subtask_border: Dict of borders per direction. This is ROI - from which split data should be generated. This is in blender crop - values format, which means floats, where left, right, top, bottom. - Values from 0 to 1. Where 1 means top or right and 0 bottom or left. - :param crops_number: Number of split data, sets - :param crops_path: base crop path - :return: Tuple of two list. First list is filled with float values - useful for cropping with blender, second one are corresponding - pixels. Each list has splits_num elements, one for each split. - """ - - logger.debug("Subtasks borders left = %r," - " top = %r, " - "right = %r, " - "bottom=%r", - subtask_border["borders_x"][0], - subtask_border["borders_y"][1], - subtask_border["borders_x"][1], - subtask_border["borders_y"][0]) - - subimage = SubImage(Region(subtask_border["borders_x"][0], - subtask_border["borders_y"][1], - subtask_border["borders_x"][1], - subtask_border["borders_y"][0]), - resolution) - - for i in range(crops_number): - self.crops_desc.append( - BlenderReferenceGenerator.generate_single_random_crop_data( - subimage, - subimage.get_default_crop_size(), - str(i), crops_path)) - - return self.crops_desc - - @staticmethod - def generate_single_random_crop_data(subimage: SubImage, - crop_size_px: Tuple[int, int], - crop_id: str, - crops_path: str) \ - -> Crop: - - crop_horizontal_pixel_coordinates = \ - BlenderReferenceGenerator._get_random_interval_within_boundaries( - subimage.pixel_region.left, - subimage.pixel_region.right, - crop_size_px[0]) - - crop_vertical_pixel_coordinates = \ - BlenderReferenceGenerator._get_random_interval_within_boundaries( - subimage.pixel_region.bottom, - subimage.pixel_region.top, - crop_size_px[1]) - - crop = Crop.create_from_pixel_region(crop_id, PixelRegion( - crop_horizontal_pixel_coordinates[0], - crop_vertical_pixel_coordinates[1], - crop_horizontal_pixel_coordinates[1], - crop_vertical_pixel_coordinates[0]), subimage, crops_path) - - return crop - - @staticmethod - def _get_random_interval_within_boundaries(begin: int, - end: int, - interval_length: int) \ - -> Tuple[int, int]: - - # survive in edge cases - end -= 1 - begin += 1 - - logger.debug("begin %r, end %r", begin, end) - - max_possible_interval_end = (end - interval_length) - if max_possible_interval_end < 0: - raise Exception("Subtask is too small for reliable verification") - interval_begin = random.randint(begin, max_possible_interval_end) - interval_end = interval_begin + interval_length - return interval_begin, interval_end - - # pylint: disable-msg=too-many-arguments - - def render_crops(self, - resources: List[str], - subtask_info: Dict[str, Any], - num_crops: int = DEFAULT_CROPS_NUMBER) -> List[Deferred]: - crops_path = os.path.join(subtask_info['tmp_dir'], - subtask_info['subtask_id']) - crops_descriptors = self.generate_crops_data( - (subtask_info['resolution'][0], subtask_info['resolution'][1]), - subtask_info['crops'][0], - num_crops, crops_path) - - verification_context = \ - VerificationContext(crops_descriptors, - self.computer, - {'resources': resources, - 'subtask_info': subtask_info}, num_crops) - - self.start(verification_context, num_crops) - - return verification_context.finished - - # FIXME it would be better to make this subtask agnostic, pass only data - # needed to generate crops. Drop local computer. - # Issue # 2447 - # pylint: disable-msg=too-many-arguments - # pylint: disable=R0914 - @inlineCallbacks - def start(self, - verification_context: VerificationContext, - crop_count: int) -> Generator: - - for i in range(0, crop_count): - if self.stopped: - break - - crop = verification_context.get_crop_with_id(str(i)) - if not crop: - raise Exception("Crop %s not found " % i) - - left, top, right, bottom = crop.calculate_borders().to_tuple() - - verification_context.subtask_info['use_compositing'] = False - crops = verification_context.subtask_info['crops'] - crops[0]['outfilebasename'] = "ref_" + crops[0]['outfilebasename'] - crops[0]['borders_x'] = [left, right] - crops[0]['borders_y'] = [bottom, top] - verification_context.subtask_info['crops'] = crops - task_definition = BlenderReferenceGenerator \ - .generate_computational_task_definition( - verification_context.subtask_info) - - yield self.schedule_crop_job(verification_context, task_definition, - i) - - if not self.stopped: - for i in range(0, crop_count): - verification_context.finished[i].callback(( - self.rendered_crops_results[i][0], - self.rendered_crops_results[i][1], - self.rendered_crops_results[i][2], i)) - - def stop(self): - self.stopped = True - - def schedule_crop_job(self, verification_context, task_definition, - crop_number): - - defer = Deferred() - - def success(results: List[str], time_spent: float): - self.rendered_crops_results[crop_number] = [results, - time_spent, - verification_context] - defer.callback(True) - - def failure(exc): - self.stopped = True - logger.error(exc) - verification_context.finished[crop_number].errback(False) - - path = verification_context.get_crop_path(str(crop_number)) - if not path: - raise Exception("Crop %s not found " % crop_number) - - verification_context.computer.start_computation( - root_path=path, - success_callback=success, - error_callback=failure, - compute_task_def=task_definition, - resources=verification_context.resources, - additional_resources=[] - ) - - return defer - - @staticmethod - def generate_computational_task_definition(subtask_info: Dict[str, Any]) \ - -> Dict[str, Any]: - - task_definition = deepcopy(subtask_info['ctd']) - - task_definition['deadline'] = timeout_to_deadline( - subtask_info['subtask_timeout']) - - return task_definition diff --git a/apps/blender/resources/images/blender_verifier.Dockerfile b/apps/blender/resources/images/blender_verifier.Dockerfile new file mode 100644 index 0000000000..ad289495d7 --- /dev/null +++ b/apps/blender/resources/images/blender_verifier.Dockerfile @@ -0,0 +1,36 @@ +FROM golemfactory/blender:1.8 + +# Install scripts requirements first, then add scripts. +ADD scripts_verifier/requirements.txt /golem/work/ +ADD scripts_verifier/copy.sh /golem/ + +# Install any needed packages specified in requirements.txt +RUN set +x \ + && apt-get update \ + && apt-get install -y libglib2.0-0 \ + && apt-get install -y g++ \ + && apt-get install -y libsm6 \ + && apt-get install -y libxrender1 \ + && apt-get install -y wget \ + && apt-get install -y zlib1g-dev \ + && apt-get install -y libopenexr-dev \ + && /golem/install_py_libs.sh /golem/work/requirements.txt \ + && /golem/copy.sh \ + && apt-get remove -y libopenexr-dev \ + && apt-get remove -y zlib1g-dev \ + && apt-get remove -y wget \ + && apt-get remove -y libxrender1 \ + && apt-get remove -y libsm6 \ + && apt-get remove -y g++ \ + && apt-get remove -y libglib2.0-0 \ + && apt-get clean \ + && apt-get -y autoremove \ + && rm -rf /var/lib/apt/lists/* + +ENV PYTHONPATH=/golem/scripts:/golem/scripts_verifier:/golem:$PYTHONPATH + +# Create symbolic link to python. I don't know where, something removes it. +RUN ln -s /usr/bin/python3.6 /usr/bin/python3 + +RUN mkdir /golem/scripts_verifier +ADD scripts_verifier/ /golem/scripts_verifier diff --git a/apps/blender/resources/images/scripts_verifier/__init__.py b/apps/blender/resources/images/scripts_verifier/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/apps/blender/resources/images/scripts_verifier/copy.sh b/apps/blender/resources/images/scripts_verifier/copy.sh new file mode 100755 index 0000000000..b49d9c7632 --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/copy.sh @@ -0,0 +1,9 @@ +cp /usr/lib/x86_64-linux-gnu/libXrender.so.1 /usr/local/lib/. +cp /usr/lib/x86_64-linux-gnu/libSM.so.6 /usr/local/lib/. +cp /usr/lib/x86_64-linux-gnu/libgthread-2.0.so.0 /usr/local/lib/. +cp /usr/lib/x86_64-linux-gnu/libglib-2.0.so.0 /usr/local/lib/. +cp /usr/lib/x86_64-linux-gnu/libICE.so.6 /usr/local/lib/. +cp /usr/lib/x86_64-linux-gnu/libIex-2_2.so.12 /usr/local/lib/. +cp /usr/lib/x86_64-linux-gnu/libIlmImf-2_2.so.22 /usr/local/lib/. +cp /usr/lib/x86_64-linux-gnu/libHalf.so.12 /usr/local/lib/. +cp /usr/lib/x86_64-linux-gnu/libIlmThread-2_2.so.12 /usr/local/lib/. diff --git a/apps/blender/resources/images/scripts_verifier/crop_generator.py b/apps/blender/resources/images/scripts_verifier/crop_generator.py new file mode 100644 index 0000000000..9e90ac2b9f --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/crop_generator.py @@ -0,0 +1,173 @@ +import os +import numpy +import math +import random +from typing import Dict, Tuple, List, Optional + +WORK_DIR = "/golem/work" +OUTPUT_DIR = "/golem/output" + +class Region: + + def __init__(self, left: float, top: float, right: float, bottom: float): + self.left = left + self.right = right + self.top = top + self.bottom = bottom + +class PixelRegion: + + def __init__(self, left: int, top: int, right: int, bottom: int): + self.left = left + self.right = right + self.top = top + self.bottom = bottom + +class SubImage: + + CROP_RELATIVE_SIZE = 0.1 + PIXEL_OFFSET = numpy.float32(0.5) + MIN_CROP_SIZE = 8 + + def __init__(self, region: Region, resolution: List[int]): + self.region = region + self.pixel_region = self.calculate_pixels(region, resolution[0], resolution[1]) + self.width = self.pixel_region.right - self.pixel_region.left + self.height = self.pixel_region.top - self.pixel_region.bottom + self.resolution = resolution + + def calculate_pixels(self, region: Region, width: int, height: int) -> None: + # This is how Blender is calculating pixel, check + # BlenderSync::get_buffer_params in blender_camera.cpp file + # BoundBox2D border = cam->border.clamp(); + # params.full_x = (int)(border.left * (float)width); + + # NOTE blender uses floats (single precision) while python operates on + # doubles + # Here numpy is used to emulate this loss of precision when assigning + # double to float: + left = math.floor( + numpy.float32(region.left) * numpy.float32(width) + + SubImage.PIXEL_OFFSET) + + right = math.floor( + numpy.float32(region.right) * numpy.float32(width) + + SubImage.PIXEL_OFFSET) + + # NOTE we are exchanging here top with bottom, because borders + # in blender are in OpenGL UV coordinate system (left, bottom is 0,0) + # where pixel values are for use in classic coordinate system (left, top is 0,0) + + top = math.floor( + numpy.float32(region.bottom) * numpy.float32(height) + + SubImage.PIXEL_OFFSET) + + bottom = math.floor( + numpy.float32(region.top) * numpy.float32(height) + + SubImage.PIXEL_OFFSET) + + print("Pixels left=%r, top=%r, right=%r, bottom=%r" % + (left, top, right, bottom)) + return PixelRegion(left, top, right, bottom) + + @staticmethod + def __calculate_crop_side_length(subtask_side_length: int) -> int: + calculated_length = int( + SubImage.CROP_RELATIVE_SIZE * subtask_side_length) + + return max(SubImage.MIN_CROP_SIZE, calculated_length) + + def get_default_crop_size(self) -> Tuple[int, int]: + x = self.__calculate_crop_side_length(self.width) + y = self.__calculate_crop_side_length(self.height) + return x, y + +class Crop: + + @staticmethod + def create_from_region(id: int, crop_region: Region, subimage: SubImage): + crop = Crop(id, subimage) + crop.crop_region = crop_region + crop.pixel_region = crop.subimage.calculate_pixels(crop_region, + subimage.width, subimage.height) + return crop + + @staticmethod + def create_from_pixel_region(id: int, pixel_region: PixelRegion, subimage: SubImage): + crop = Crop(id, subimage) + crop.pixel_region = pixel_region + crop.crop_region = crop.calculate_borders(pixel_region, subimage.resolution[0], subimage.resolution[1]) + return crop + + def __init__(self, id: int, subimage: SubImage): + self.id = id + self.subimage = subimage + self.pixel_region = None + self.crop_region = None + + def get_relative_top_left(self) \ + -> Tuple[int, int]: + # get top left corner of crop in relation to particular subimage + print("Sumimag top=%r - crop.top=%r" % (self.subimage.region.top, self.pixel_region.top)) + y = self.subimage.pixel_region.top - self.pixel_region.top + print("X=%r, Y=%r" % (self.pixel_region.left, y)) + return self.pixel_region.left, y + + def calculate_borders(self, pixel_region: PixelRegion, width: int, height: int): + + left = numpy.float32( + (numpy.float32(pixel_region.left) + SubImage.PIXEL_OFFSET) / + numpy.float32(width)) + + right = numpy.float32( + (numpy.float32(pixel_region.right) + SubImage.PIXEL_OFFSET) / + numpy.float32(width)) + + bottom = numpy.float32( + (numpy.float32(pixel_region.top) + SubImage.PIXEL_OFFSET) / + numpy.float32(height)) + + top = numpy.float32( + (numpy.float32(pixel_region.bottom) + SubImage.PIXEL_OFFSET) / + numpy.float32(height)) + + return Region(left, top, right, bottom) + +def generate_single_random_crop_data(subimage: SubImage, crop_size_px: Tuple[int, int], id: int) \ + -> Crop: + + crop_horizontal_pixel_coordinates = _get_random_interval_within_boundaries( + subimage.pixel_region.left, + subimage.pixel_region.right, + crop_size_px[0]) + + crop_vertical_pixel_coordinates = _get_random_interval_within_boundaries( + subimage.pixel_region.bottom, + subimage.pixel_region.top, + crop_size_px[1]) + + crop = Crop.create_from_pixel_region(id, PixelRegion( + crop_horizontal_pixel_coordinates[0], + crop_vertical_pixel_coordinates[1], + crop_horizontal_pixel_coordinates[1], + crop_vertical_pixel_coordinates[0]), subimage) + + return crop + +def _get_random_interval_within_boundaries(begin: int, + end: int, + interval_length: int) \ + -> Tuple[int, int]: + + # survive in edge cases + end -= 1 + begin += 1 + + print("begin %r, end %r" % (begin, end)) + + max_possible_interval_end = (end - interval_length) + if max_possible_interval_end < 0: + raise Exception("Subtask is too small for reliable verification") + interval_begin = random.randint(begin, max_possible_interval_end) + interval_end = interval_begin + interval_length + return interval_begin, interval_end diff --git a/apps/blender/resources/images/scripts_verifier/decision_tree.py b/apps/blender/resources/images/scripts_verifier/decision_tree.py new file mode 100644 index 0000000000..c3147869ef --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/decision_tree.py @@ -0,0 +1,40 @@ +import numpy +from sklearn.externals import joblib + + +## ======================= ## +## +class DecisionTree: + + ## ======================= ## + ## + def __init__( self, clf ): + self.clf = clf + + ## ======================= ## + ## + @staticmethod + def load( file ): + data = joblib.load( file ) + tree = DecisionTree( data[0] ) + + return tree, data[1] + + ## ======================= ## + ## + def classify_with_feature_vector(self, feature_vector, labels): + + numpy_format = [] + for label in labels: + numpy_format.append((label, numpy.float64)) + + converted_features = numpy.zeros(1, dtype=numpy_format) + for name in converted_features.dtype.names: + converted_features[name] = feature_vector[name] + + samples = converted_features.view(numpy.float64).reshape( + converted_features.shape + (-1,)) + + results = self.clf.predict(samples) + + return numpy.array(results) diff --git a/apps/blender/resources/images/scripts_verifier/edges.py b/apps/blender/resources/images/scripts_verifier/edges.py new file mode 100644 index 0000000000..1622711c62 --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/edges.py @@ -0,0 +1,63 @@ +from PIL import Image, ImageFilter +import numpy +from skimage import compare_mse + + +import sys + + +## ======================= ## +## +class MetricEdgeFactor: + + + ## ======================= ## + ## + @staticmethod + def compute_metrics( image1, image2 ): + + image1 = image1.convert("RGB") + image2 = image2.convert("RGB") + + edged_image1 = image1.filter( ImageFilter.FIND_EDGES ) + edged_image2 = image2.filter( ImageFilter.FIND_EDGES ) + + np_image1 = numpy.array( edged_image1 ) + np_image2 = numpy.array( edged_image2 ) + + ref_edge_factor = numpy.mean( np_image1 ) + comp_edge_factor = numpy.mean( np_image2 ) + + edge_factor = compare_mse( np_image1, np_image2 ) + + result = dict() + result[ "ref_edge_factor" ] = ref_edge_factor + result[ "comp_edge_factor" ] = comp_edge_factor + result[ "edge_difference" ] = edge_factor + + return result + + ## ======================= ## + ## + @staticmethod + def get_labels(): + return [ "ref_edge_factor", "comp_edge_factor", "edge_difference" ] + +## ======================= ## +## +def run(): + + first_img = sys.argv[ 1 ] + second_img = sys.argv[ 2 ] + + first_img = Image.open( first_img ) + second_img = Image.open( second_img ) + + ssim = MetricEdgeFactor() + + print( ssim.compute_metrics( first_img, second_img ) ) + + + +if __name__ == "__main__": + run() diff --git a/apps/blender/resources/images/scripts_verifier/histograms_correlation.py b/apps/blender/resources/images/scripts_verifier/histograms_correlation.py new file mode 100644 index 0000000000..893a303a23 --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/histograms_correlation.py @@ -0,0 +1,56 @@ +import cv2 +import numpy +from PIL import Image +import sys + + +class MetricHistogramsCorrelation: + + @staticmethod + def compute_metrics( image1, image2): + if image1.size != image2.size: + raise Exception("Image sizes differ") + opencv_image_1 = cv2.cvtColor(numpy.array(image1), cv2.COLOR_RGB2BGR) + opencv_image_2 = cv2.cvtColor(numpy.array(image2), cv2.COLOR_RGB2BGR) + return {"histograms_correlation": MetricHistogramsCorrelation.compare_histograms(opencv_image_1, opencv_image_2)} + + @staticmethod + def get_labels(): + return ["histograms_correlation"] + + @staticmethod + def get_number_of_pixels(image): + height, width = image.shape[:2] + return height * width + + @staticmethod + def calculate_normalized_histogram(image): + number_of_bins = 256 + channels_number = 3 # because of conversion from PIL to opencv + histogram = cv2.calcHist([image], + range(channels_number), + None, + [number_of_bins] * channels_number, + [0, 256] * channels_number) + cv2.normalize(histogram, histogram, 0, 256, cv2.NORM_MINMAX) + return histogram + + @staticmethod + def compare_histograms(image_a, image_b): + histogram_a = MetricHistogramsCorrelation.calculate_normalized_histogram(image_a) + histogram_b = MetricHistogramsCorrelation.calculate_normalized_histogram(image_b) + result = cv2.compareHist(histogram_a, histogram_b, cv2.HISTCMP_CORREL) + return result + + +def run(): + first_img = Image.open(sys.argv[1]) + second_img = Image.open(sys.argv[2]) + + histograms_correlation_metric = MetricHistogramsCorrelation() + + print(histograms_correlation_metric.compute_metrics(first_img, second_img)) + + +if __name__ == "__main__": + run() diff --git a/apps/blender/resources/images/scripts_verifier/img_format_converter.py b/apps/blender/resources/images/scripts_verifier/img_format_converter.py new file mode 100644 index 0000000000..ea0c31266c --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/img_format_converter.py @@ -0,0 +1,26 @@ + +import numpy as np +from PIL import Image +import OpenEXR + +import Imath + +# converting .exr file to .png if user gave .exr file as a rendered scene +def ConvertEXRToPNG(exrfile, pngfile): + File = OpenEXR.InputFile(exrfile) + PixType = Imath.PixelType(Imath.PixelType.FLOAT) + DW = File.header()['dataWindow'] + Size = (DW.max.x - DW.min.x + 1, DW.max.y - DW.min.y + 1) + rgb = [np.frombuffer(File.channel(c, PixType), dtype=np.float32) for c in + 'RGB'] + for i in range(3): + rgb[i] = np.where(rgb[i] <= 0.0031308, + (rgb[i] * 12.92) * 255.0, + (1.055 * (rgb[i] ** (1.0 / 2.4)) - 0.055) * 255.0) + rgb8 = [Image.frombytes("F", Size, c.tostring()).convert("L") for c in rgb] + Image.merge("RGB", rgb8).save(pngfile, "PNG") + +# converting .tga file to .png if user gave .tga file as a rendered scene +def ConvertTGAToPNG(tgafile, pngfile): + img = Image.open(tgafile) + img.save(pngfile) diff --git a/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py b/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py new file mode 100644 index 0000000000..789cd7d0fd --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py @@ -0,0 +1,222 @@ +import os +import sys +import pickle +from typing import Dict + +import numpy as np +import OpenEXR +from PIL import Image + +import decision_tree +from img_format_converter import \ + ConvertTGAToPNG, ConvertEXRToPNG +from imgmetrics import \ + ImgMetrics + +CROP_NAME = "/golem/output/scene_crop.png" +VERIFICATION_SUCCESS = "TRUE" +VERIFICATION_FAIL = "FALSE" +TREE_PATH = "/golem/scripts_verifier/tree35_[crr=87.71][frr=0.92].pkl" + +def compare_crop_window(cropped_img_path, + rendered_scene_path, + xres, yres, + output_filename_path='metrics.txt'): + """ + This is the entry point for calculation of metrics between the + rendered_scene and the sample(cropped_img) generated for comparison. + :param cropped_img_path: + :param rendered_scene_path: + :param xres: x position of crop (left, top) + :param yres: y position of crop (left, top) + :param output_filename_path: + :return: + """ + + cropped_img, scene_crops, rendered_scene = \ + _load_and_prepare_img_for_comparison( + cropped_img_path, + rendered_scene_path, + xres, yres) + + best_crop = None + best_img_metrics = None + img_metrics = dict() + img_metrics['Label'] = VERIFICATION_FAIL + + effective_metrics, classifier, labels, available_metrics = get_metrics() + + # First try not offset crop + + default_crop = scene_crops[0] + default_metrics = compare_images(cropped_img, default_crop, available_metrics) + try: + label = classify_with_tree(default_metrics, classifier, labels) + default_metrics['Label'] = label + except Exception as e: + print("There were errors %r" % e, file=sys.stderr) + default_metrics['Label'] = VERIFICATION_FAIL + if default_metrics['Label'] == VERIFICATION_SUCCESS: + default_crop.save(CROP_NAME) + return ImgMetrics(default_metrics).write_to_file(output_filename_path) + else: + # Try offsete crops + for crop in scene_crops[1:]: + try: + img_metrics = compare_images(cropped_img, crop, available_metrics) + img_metrics['Label'] = classify_with_tree(img_metrics, classifier, labels) + except Exception as e: + print("There were error %r" % e, file=sys.stderr) + img_metrics['Label'] = VERIFICATION_FAIL + if img_metrics['Label'] == VERIFICATION_SUCCESS: + best_img_metrics = img_metrics + best_crop = crop + break + if best_crop and best_img_metrics: + best_crop.save(CROP_NAME) + return ImgMetrics(best_img_metrics).write_to_file(output_filename_path) + else: + # We didnt find any better match in offset crops, return the default one + default_crop.save(CROP_NAME) + path_to_metrics = ImgMetrics(default_metrics).write_to_file(output_filename_path) + return path_to_metrics + + #This is unexpected but handle in case of errors + stub_data = {element:-1 for element in get_labels_from_metrics(available_metrics)} + stub_data['Label'] = VERIFICATION_FAIL + path_to_metrics = ImgMetrics(stub_data).write_to_file(output_filename_path) + return path_to_metrics + +def load_classifier(): + data = decision_tree.DecisionTree.load(TREE_PATH) + + return data[0], data[1] + +def classify_with_tree(metrics, classifier, feature_labels): + + features = dict() + for label in feature_labels: + features[label] = metrics[label] + + results = classifier.classify_with_feature_vector(features, feature_labels) + + return results[0].decode('utf-8') + +def _load_and_prepare_img_for_comparison(cropped_img_path, + rendered_scene_path, + xres, yres): + + """ + This function prepares (i.e. crops) the rendered_scene so that it will + fit the sample(cropped_img) generated for comparison. + + :param cropped_img_path: + :param rendered_scene_path: + :param xres: x position of crop (left, top) + :param yres: y position of crop (left, top) + :return: + """ + rendered_scene = None + # if rendered scene has .exr format need to convert it for .png format + if os.path.splitext(rendered_scene_path)[1] == ".exr": + check_input = OpenEXR.InputFile(rendered_scene_path).header()[ + 'channels'] + if 'RenderLayer.Combined.R' in check_input: + sys.exit("There is no support for OpenEXR multilayer") + file_name = "/tmp/scene.png" + ConvertEXRToPNG(rendered_scene_path, file_name) + rendered_scene = Image.open(file_name) + elif os.path.splitext(rendered_scene_path)[1] == ".tga": + file_name = "/tmp/scene.png" + ConvertTGAToPNG(rendered_scene_path, file_name) + rendered_scene = Image.open(file_name) + else: + rendered_scene = Image.open(rendered_scene_path) + + cropped_img = Image.open(cropped_img_path) + (crop_width, crop_height) = cropped_img.size + + crops = get_crops(rendered_scene, xres, yres, crop_width, crop_height) + + return cropped_img, crops, rendered_scene + + +def get_crops(input, x, y, width, height): + crops = [] + + scene_crop = input.crop((x, y, x + width, y + height)) + + crops.append(scene_crop) + + scene_crop_left = input.crop((x-1, y, x + width-1, y + height)) + + crops.append(scene_crop_left) + + scene_crop_left_up = input.crop((x-1, y-1, x + width-1, y + height-1)) + + crops.append(scene_crop_left_up) + + scene_crop_up = input.crop((x, y-1, x + width, y + height-1)) + + crops.append(scene_crop_up) + + scene_crop_up_right = input.crop((x+1, y-1, x + width+1, y + height-1)) + + crops.append(scene_crop_up_right) + + scene_crop_right = input.crop((x+1, y, x + width+1, y + height)) + + crops.append(scene_crop_right) + + scene_crop_down_right = input.crop((x+1, y+1, x + width+1, y + height+1)) + + crops.append(scene_crop_down_right) + + scene_crop_down = input.crop((x, y+1, x + width, y + height+1)) + + crops.append(scene_crop_down) + + scene_crop_down_left = input.crop((x-1, y+1, x + width-1, y + height+1)) + + crops.append(scene_crop_down_left) + + return crops + +def get_metrics(): + classifier, feature_labels = load_classifier() + available_metrics = ImgMetrics.get_metric_classes() + effective_metrics = [] + for metric in available_metrics: + for label in feature_labels: + for label_part in metric.get_labels(): + if label_part == label and metric not in effective_metrics: + effective_metrics.append(metric) + return effective_metrics, classifier, feature_labels, available_metrics + +def get_labels_from_metrics(metrics): + labels = [] + for metric in metrics: + labels.extend(metric.get_lables()) + return labels + +def compare_images(image_a, image_b, metrics) -> Dict: + """ + This the entry point for calculating metrics between image_a, image_b + once they are cropped to the same size. + :param image_a: + :param image_b: + :return: ImgMetrics + """ + + """imageA/B are images read by: PIL.Image.open(img.png)""" + (crop_height, crop_width) = image_a.size + crop_resolution = str(crop_height) + "x" + str(crop_width) + + data = {"crop_resolution": crop_resolution} + + for metric_class in metrics: + result = metric_class.compute_metrics(image_a, image_b) + for key, value in result.items(): + data[key] = value + + return data diff --git a/apps/blender/resources/images/scripts_verifier/imgmetrics.py b/apps/blender/resources/images/scripts_verifier/imgmetrics.py new file mode 100644 index 0000000000..df1510a1cf --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/imgmetrics.py @@ -0,0 +1,122 @@ +import io +import os +import json +import numpy as np +import sys + + +class MyEncoder(json.JSONEncoder): + def default(self, obj): + print("There were obj %r" % obj, file=sys.stderr) + + if isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.float32): + return float(obj) + elif isinstance(obj, np.float64): + return float(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + else: + return obj.__dict__ + + +class ImgMetrics: + """ + ImgMetrics is a structure for storing img comparison metric. + methods write/load are to facilitate file movement to/from docker. + """ + + def __init__(self, dictionary=None): + self.ssim = None + self.reference_variance = None + self.image_variance = None + self.ref_edge_factor = None + self.comp_edge_factor = None + self.edge_difference = None + self.wavelet_sym2_base = None + self.wavelet_sym2_low = None + self.wavelet_sym2_mid = None + self.wavelet_sym2_high = None + self.wavelet_db4_base = None + self.wavelet_db4_low = None + self.wavelet_db4_mid = None + self.wavelet_db4_high = None + self.wavelet_haar_base = None + self.wavelet_haar_low = None + self.wavelet_haar_mid = None + self.wavelet_haar_high = None + self.wavelet_haar_freq_x1 = None + self.wavelet_haar_freq_x2 = None + self.wavelet_haar_freq_x3 = None + self.histograms_correlation = None + self.max_x_mass_center_distance = None + self.max_y_mass_center_distance = None + self.crop_resolution = None + self.variance_difference = None + + # ensure that the keys are correct + keys = ImgMetrics.get_metric_names() + keys.append('Label') + + for key in keys: + if key not in dictionary: + raise KeyError("missing metric:" + key) + + # read into ImgMetrics object + for key in dictionary: + setattr(self, key, dictionary[key]) + + @staticmethod + def get_metric_classes(): + import ssim + import psnr + import variance + import edges + import wavelet + import histograms_correlation + import mass_center_distance + available_metrics = [ssim.MetricSSIM, + psnr.MetricPSNR, + variance.ImageVariance, + edges.MetricEdgeFactor, + wavelet.MetricWavelet, + histograms_correlation.MetricHistogramsCorrelation, + mass_center_distance.MetricMassCenterDistance] + + return available_metrics + + + @staticmethod + def get_metric_names(): + metric_names = [] + for metric_class in ImgMetrics.get_metric_classes(): + metric_names = metric_names + metric_class.get_labels() + return metric_names + + def to_json(self): + str_ = json.dumps(self, + cls=MyEncoder, + indent=4, + sort_keys=True, + separators=(',', ': '), + ensure_ascii=False) + return str_ + + + def write_to_file(self, file_name='img_metrics.txt'): + dir_path = os.path.dirname(os.path.realpath(__file__)) + file_path = os.path.join(dir_path, file_name) + + data = self.to_json() + with io.open(file_path, 'w', encoding='utf-8') as f: + f.write(data) + + return file_path + + @classmethod + def load_from_file(cls, file_path=None): + with open(file_path, 'r') as f: + dictionary = json.load(f) + img_metrics = cls(dictionary) + return img_metrics diff --git a/apps/blender/resources/images/scripts_verifier/mass_center_distance.py b/apps/blender/resources/images/scripts_verifier/mass_center_distance.py new file mode 100644 index 0000000000..c14cd7f83d --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/mass_center_distance.py @@ -0,0 +1,75 @@ +from PIL import Image +import sys + + +class MetricMassCenterDistance: + + @staticmethod + def compute_metrics(image1, image2): + if image1.size != image2.size: + raise Exception("Image sizes differ") + mass_centers_1 = MetricMassCenterDistance.compute_mass_centers(image1) + mass_centers_2 = MetricMassCenterDistance.compute_mass_centers(image2) + max_x_distance = 0 + max_y_distance = 0 + for channel_index in mass_centers_1.keys(): + x1, y1 = mass_centers_1[channel_index] + x2, y2 = mass_centers_2[channel_index] + x_distance = abs(x1 - x2) + y_distance = abs(y1 - y2) + max_x_distance = max(max_x_distance, x_distance) + max_y_distance = max(max_y_distance, y_distance) + return { + "max_x_mass_center_distance": max_x_distance, + "max_y_mass_center_distance": max_y_distance + } + + @staticmethod + def get_labels(): + return ["max_x_mass_center_distance", "max_y_mass_center_distance"] + + @staticmethod + def compute_mass_centers(image): + image = image.convert('RGB') + pixels = image.load() + width, height = image.size + results = dict() + for channel_index in range(len(pixels[0, 0])): + mass_center_x = 0 + mass_center_y = 0 + total_mass = 0 + for x in range(width): + for y in range(height): + mass = pixels[x, y][channel_index] + mass_center_x += mass * x + mass_center_y += mass * y + total_mass += mass + + divisor_x = (float(total_mass) * width) + divisor_y = (float(total_mass) * height) + + if divisor_x == 0: + mass_center_x = 0.5 + else: + mass_center_x = mass_center_x / divisor_x + + if divisor_y == 0: + mass_center_y = 0.5 + else: + mass_center_y = mass_center_y / divisor_y + + results[channel_index] = mass_center_x, mass_center_y + return results + + +def run(): + first_img = Image.open(sys.argv[1]) + second_img = Image.open(sys.argv[2]) + + mass_center_distance = MetricMassCenterDistance() + + print(mass_center_distance.compute_metrics(first_img, second_img)) + + +if __name__ == "__main__": + run() diff --git a/apps/blender/resources/images/scripts_verifier/psnr.py b/apps/blender/resources/images/scripts_verifier/psnr.py new file mode 100644 index 0000000000..71259175bb --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/psnr.py @@ -0,0 +1,55 @@ +import numpy +import math +from skimage import compare_psnr + +import sys + + +## ======================= ## +## +class MetricPSNR: + + ## ======================= ## + ## + @staticmethod + def compute_metrics( image1, image2 ): + + image1 = image1.convert("RGB") + image2 = image2.convert("RGB") + + np_image1 = numpy.array( image1 ) + np_image2 = numpy.array( image2 ) + + psnr = compare_psnr( np_image1, np_image2 ) + + if math.isinf( psnr ): + psnr = numpy.finfo( numpy.float32 ).max + + result = dict() + result[ "psnr" ] = psnr + + return result + + ## ======================= ## + ## + @staticmethod + def get_labels(): + return [ "psnr" ] + +## ======================= ## +## +def run(): + + first_img = sys.argv[ 1 ] + second_img = sys.argv[ 2 ] + + psnr = MetricPSNR() + + print( psnr.compute_metrics( first_img, second_img ) ) + + + +if __name__ == "__main__": + run() + + diff --git a/apps/blender/resources/images/scripts_verifier/requirements.txt b/apps/blender/resources/images/scripts_verifier/requirements.txt new file mode 100644 index 0000000000..10d8498e88 --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/requirements.txt @@ -0,0 +1,9 @@ +--extra-index-url https://builds.golem.network +OpenEXR +scipy +six==1.5 +Pillow==5.1.0 +sklearn +numpy==1.15.4 +PyWavelets +opencv-python diff --git a/apps/blender/resources/images/scripts_verifier/runner.py b/apps/blender/resources/images/scripts_verifier/runner.py new file mode 100644 index 0000000000..95703e5383 --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/runner.py @@ -0,0 +1,17 @@ +import json + +from verificator import verify + +with open('params.json', 'r') as params_file: + params = json.load(params_file) + +verify( + params['subtask_paths'], + params['subtask_borders'], + params['scene_path'], + params['resolution'], + params['samples'], + params['frames'], + params['output_format'], + params['basefilename'], +) diff --git a/apps/blender/resources/images/scripts_verifier/skimage.py b/apps/blender/resources/images/scripts_verifier/skimage.py new file mode 100644 index 0000000000..ffe239d457 --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/skimage.py @@ -0,0 +1,219 @@ +import numpy as np +from numpy.lib.arraypad import _validate_lengths + +from scipy.ndimage import uniform_filter, gaussian_filter + +# all methods come from scikit-image +# copied to reduce size of the container +# https://github.com/scikit-image/scikit-image + +_integer_types = (np.byte, np.ubyte, # 8 bits + np.short, np.ushort, # 16 bits + np.intc, np.uintc, # 16 or 32 or 64 bits + np.int_, np.uint, # 32 or 64 bits + np.longlong, np.ulonglong) # 64 bits +_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) + for t in _integer_types} +dtype_range = {np.bool_: (False, True), + np.bool8: (False, True), + np.float16: (-1, 1), + np.float32: (-1, 1), + np.float64: (-1, 1)} +dtype_range.update(_integer_ranges) + +def crop(ar, crop_width, copy=False, order='K'): + ar = np.array(ar, copy=False) + crops = _validate_lengths(ar, crop_width) + slices = tuple(slice(a, ar.shape[i] - b) + for i, (a, b) in enumerate(crops)) + if copy: + cropped = np.array(ar[slices], order=order, copy=True) + else: + cropped = ar[slices] + return cropped + +def _assert_compatible(im1, im2): + if not im1.shape == im2.shape: + raise ValueError('Input images must have the same dimensions.') + return + +def _as_floats(im1, im2): + float_type = np.result_type(im1.dtype, im2.dtype, np.float32) + im1 = np.asarray(im1, dtype=float_type) + im2 = np.asarray(im2, dtype=float_type) + return im1, im2 + +def compare_mse(im1, im2): + _assert_compatible(im1, im2) + im1, im2 = _as_floats(im1, im2) + return np.mean(np.square(im1 - im2), dtype=np.float64) + +def compare_psnr(im_true, im_test, data_range=None): + + _assert_compatible(im_true, im_test) + + if data_range is None: + if im_true.dtype != im_test.dtype: + warn("Inputs have mismatched dtype. Setting data_range based on " + "im_true.") + dmin, dmax = dtype_range[im_true.dtype.type] + true_min, true_max = np.min(im_true), np.max(im_true) + if true_max > dmax or true_min < dmin: + raise ValueError( + "im_true has intensity values outside the range expected for " + "its data type. Please manually specify the data_range") + if true_min >= 0: + # most common case (255 for uint8, 1 for float) + data_range = dmax + else: + data_range = dmax - dmin + + im_true, im_test = _as_floats(im_true, im_test) + + err = compare_mse(im_true, im_test) + return 10 * np.log10((data_range ** 2) / err) + + +def compare_ssim(X, Y, win_size=None, gradient=False, + data_range=None, multichannel=False, gaussian_weights=False, + full=False, **kwargs): + + _assert_compatible(X, Y) + + if multichannel: + # loop over channels + args = dict(win_size=win_size, + gradient=gradient, + data_range=data_range, + multichannel=False, + gaussian_weights=gaussian_weights, + full=full) + args.update(kwargs) + nch = X.shape[-1] + mssim = np.empty(nch) + if gradient: + G = np.empty(X.shape) + if full: + S = np.empty(X.shape) + for ch in range(nch): + ch_result = compare_ssim(X[..., ch], Y[..., ch], **args) + if gradient and full: + mssim[..., ch], G[..., ch], S[..., ch] = ch_result + elif gradient: + mssim[..., ch], G[..., ch] = ch_result + elif full: + mssim[..., ch], S[..., ch] = ch_result + else: + mssim[..., ch] = ch_result + mssim = mssim.mean() + if gradient and full: + return mssim, G, S + elif gradient: + return mssim, G + elif full: + return mssim, S + else: + return mssim + + K1 = kwargs.pop('K1', 0.01) + K2 = kwargs.pop('K2', 0.03) + sigma = kwargs.pop('sigma', 1.5) + if K1 < 0: + raise ValueError("K1 must be positive") + if K2 < 0: + raise ValueError("K2 must be positive") + if sigma < 0: + raise ValueError("sigma must be positive") + use_sample_covariance = kwargs.pop('use_sample_covariance', True) + + if win_size is None: + if gaussian_weights: + win_size = 11 # 11 to match Wang et. al. 2004 + else: + win_size = 7 # backwards compatibility + + if np.any((np.asarray(X.shape) - win_size) < 0): + raise ValueError( + "win_size exceeds image extent. If the input is a multichannel " + "(color) image, set multichannel=True.") + + if not (win_size % 2 == 1): + raise ValueError('Window size must be odd.') + + if data_range is None: + if X.dtype != Y.dtype: + print("Inputs have mismatched dtype. Setting data_range based on " + "X.dtype.") + dmin, dmax = dtype_range[X.dtype.type] + data_range = dmax - dmin + + ndim = X.ndim + + if gaussian_weights: + # sigma = 1.5 to approximately match filter in Wang et. al. 2004 + # this ends up giving a 13-tap rather than 11-tap Gaussian + filter_func = gaussian_filter + filter_args = {'sigma': sigma} + + else: + filter_func = uniform_filter + filter_args = {'size': win_size} + + # ndimage filters need floating point data + X = X.astype(np.float64) + Y = Y.astype(np.float64) + + NP = win_size ** ndim + + # filter has already normalized by NP + if use_sample_covariance: + cov_norm = NP / (NP - 1) # sample covariance + else: + cov_norm = 1.0 # population covariance to match Wang et. al. 2004 + + # compute (weighted) means + ux = filter_func(X, **filter_args) + uy = filter_func(Y, **filter_args) + + # compute (weighted) variances and covariances + uxx = filter_func(X * X, **filter_args) + uyy = filter_func(Y * Y, **filter_args) + uxy = filter_func(X * Y, **filter_args) + vx = cov_norm * (uxx - ux * ux) + vy = cov_norm * (uyy - uy * uy) + vxy = cov_norm * (uxy - ux * uy) + + R = data_range + C1 = (K1 * R) ** 2 + C2 = (K2 * R) ** 2 + + A1, A2, B1, B2 = ((2 * ux * uy + C1, + 2 * vxy + C2, + ux ** 2 + uy ** 2 + C1, + vx + vy + C2)) + D = B1 * B2 + S = (A1 * A2) / D + + # to avoid edge effects will ignore filter radius strip around edges + pad = (win_size - 1) // 2 + + # compute (weighted) mean of ssim + mssim = crop(S, pad).mean() + + if gradient: + # The following is Eqs. 7-8 of Avanaki 2009. + grad = filter_func(A1 / D, **filter_args) * X + grad += filter_func(-S / B2, **filter_args) * Y + grad += filter_func((ux * (A2 - A1) - uy * (B2 - B1) * S) / D, + **filter_args) + grad *= (2 / X.size) + + if full: + return mssim, grad, S + else: + return mssim, grad + else: + if full: + return mssim, S + else: + return mssim diff --git a/apps/blender/resources/images/scripts_verifier/ssim.py b/apps/blender/resources/images/scripts_verifier/ssim.py new file mode 100644 index 0000000000..41c527acc4 --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/ssim.py @@ -0,0 +1,50 @@ +import numpy +from skimage import compare_ssim + +import sys + + +## ======================= ## +## +class MetricSSIM: + + + ## ======================= ## + ## + @staticmethod + def compute_metrics( image1, image2 ): + + image1 = image1.convert("RGB") + image2 = image2.convert("RGB") + + np_image1 = numpy.array( image1 ) + np_image2 = numpy.array( image2 ) + + structualSim = compare_ssim( np_image1, np_image2, multichannel=True ) + + result = dict() + result[ "ssim" ] = structualSim + + return result + + ## ======================= ## + ## + @staticmethod + def get_labels(): + return [ "ssim" ] + +## ======================= ## +## +def run(): + + first_img = sys.argv[ 1 ] + second_img = sys.argv[ 2 ] + + ssim = MetricSSIM() + + print( ssim.compute_metrics( first_img, second_img ) ) + + + +if __name__ == "__main__": + run() diff --git a/apps/blender/resources/images/scripts_verifier/tree35_[crr=87.71][frr=0.92].pkl b/apps/blender/resources/images/scripts_verifier/tree35_[crr=87.71][frr=0.92].pkl new file mode 100644 index 0000000000000000000000000000000000000000..834cd9c37b8c72db02e98d99956a4d5ba8ff3594 GIT binary patch literal 10966 zcmbt4d3;mF*3(i777C^EpvYE=KtL@!wt&op0+yy0XjxLWG;LCDFikQ^S_)ddiWCs^ z74bntETtfVfVL>eq8QmhHd%zS7G$Y{Y|2BPKKsqwnQ3mz=kI>sd-F%mJ#*GM=ge|% zMy@vr16`KqMZ4APcEoz!R;%(8HpH4`^Vpn@(WEEAZuWR=**2>?3$ZAu;}oUOZFX3kIVO+S?6u8tW4$Jd)XE#qA8ivv7N*!|Aq=JVzUY2r&V7Q2H$1>4YhU zYf!K-+|d|JSkmJ0lLlZ2nvFZ9;?6olJw@ zF5GoaZl)42q5;qtQV4idYJ$*>8H~+xx~;J}xpuFOx$em_+s$O8Pf5%NTO59x`S46C zjuk!(#NCMxN!6q&Kh%>RnF$T4*R z(cdsSmBMg8Lko$<``H22Da{aFh{sAvM2oHvkCP0fr$-?+<{IKisYQ5kdZrR`TD-I+ zG60%#NPz#;`2uuw+xibC6n_l$>-q{xAGq*6y+~D0-rHPI3GDjv11OhnXlYn58%93! zxS;f~Kk(^kHfU?d08q)H%KNt>pi2dB{=^CI zBl%YDtp2+(Odm&!RB8L#?Dy$uaneRtRoc|{4xBj$!80#8&97ouHasAAWS>tT%GiPE zRmG*`)_nD+BR zY^@U>feg>=MVfN{U6a3v-T4;$YJ#W`Aa`n$t{2wn9z$DAmPoy9JRXE1Hccuu9M}#o zi9`{e9;N?6Y9HxyT|bws|MTS1ykl@H-55plZq$^^n9#FP`A;9gF2W%qdnf535`Kxi z37nASr$%0eRo%138TRgl4@L-M08~|mYQ{xyn+A&~GVsMmHT?ur3R`X8w-owBK@ssg zheGsXxO8u)P0SDSh{CFGLwV}ki34t|g+SRY6c!LumY}B_Fl}_VrcE_IVqBReq{Fv&GA)z+M(0H zk2k<&-2-x9`#w#%m2*Eo*tz=!=uqVkbn&AWTn$}4X;Z}oG`)Wt-pQD43*S)!dtPRA z`dgKmpqW3^exsZy`QXX&fLul+Z8@KB!R=dV6>wsYfE>7d6}94M8&x-d^uPAuZ3wFW zIDYHF3AiIZz#w-!hMwSPYYxTwL{PNgK`a7a)RRjh-iKkaD!I{nHRD;-Hge~cg zn5b0zlhvAX!b)n4cp|XtH=1&rGq1g|a^=O^_JX!*%H>BFh3clCfooLr0ie%Ndv5+| z4#oOSP=CbUY&7}Q%#Qi*e+SnF1?pPf&{7}G`JduY%zJ{$rC(1)qjPtKA26SVhmUXu z0*ZFuLmfD(<50{ef<_q#Qk~0ucdHW_3Vd})Y#k?XYGM4-XC%-Z9 z@bC*2aGoO|=YRSK)RCi42D-(Qy#AfrFm924O^>@5V0%@)fQZwY@tkq(=g6;CU4%Lj zK%&(@_&ZIx#QE}~WV{JJ5QR*=>AMYe=3aH-P!-RN4%Y^vX#ZC?vMgUid(E+cFfKf( zX^*P|vqiv3hn%oWQ|?Z#}Xai}7rwF6TSx;OJ^&%s4-#tXtS zNl@W&O?!fShh97pKV91E{C_(CbbnSq9Q2?9?pGC(-0wT|3^y;99Tgd*9t9|>^QG#I z!^+`#aS$W?ZGs-xi)BMB$EtY*nw)W=h(3otdvS9q{D)Y8h@3p7ndbVr?jH-&j?D8z zsG7IY)mJs+S#tTI?D4z<$Er4}xDnx+_6B|QtEt#=5+;fbf%xfvuLAYraS`RGiVN~D zz6lTS4DEcU;ZfM+L|})jTG~S2o6Gg#P#qWPpJ^zmRepyh_!JCQonQRk^P1!1>Ohj% z&}m0fxu%@%Nc+7~!v0$8CyJ>+{kXye4#j>=Q1rDApytLU#)UI?!^U3(#t-?|7yKsyy}ekN2i;f>U<{ z<~a^ke#$HMSW%D5zc#sls|1#B3fPn5ucHAR z6~_}*8OlT6n27x2PM&?k{2B~jsMM7EtNA{NtAQMf^BO_r zw?JRi(9p5^+y~p?N2-lLUa(WsUPSwcU1t>?t5q*RTBj)|Ms-^pG-=1}%}6Y7gE@n0 z9?5O4*GD^cwDvrE_B6a8HUh#|xj;LfF8#uvQ~Dox9W!?>O2IaImh-}d_j zavndCIx2AtZ2V2Yo-th8-ftaBQdTb647aN1!-oII`D|mrUeU+l`r+KaOb%6QODaBp z36+&bJn3zD0Cs;>oLGRIF(W~r#QD=WROKCwpZN#8`P}fh9z)JS+q}RVY;aCzy@B(m za;TcOXk4xfybQhF9F3+vv3J=EVP~Optk6LAm75nd>(f!Z91irJ zt5q*@`Zs7KS2K!3bv$#>Rd{Y;&zS9FzJp(gz*9c-y{5hY^nA_T0M%k4A|3LpM>Oq4 zX9TytEw6yh#fuk_GsZX8kLC)~IaH0$AGHOb9-(jEyppsWPW&OTA6c$Et{=nsjU1}# zrR43l7*)E;yF9n`B;2n$zsOsTAd!}FTu!ylg9cARRMd3!k$J^m!Diy1O8Deq+ULms zcKu8jFK*PYbnW+?J;sjVv;JM@{pi{zG@g&k%wZUZ1o&^Hdl6_69&|8m9$Z)+h^Oyb zq<#YDo5-Q6t}`ZW8i7#i=yA)Ny#~*I7?4X|8>u;u@)Hi6T(RT;{7SW6gZPbT61S7V zp(^hsjT+QP^7k)KKW@XY-NJx9X>6o^GUqpOsLDG?TAYVUY`d2n%3B3**f|3M{))4j z^(jZq8Fn1J344pc5jokb{eE;h@?z?+yu-ETE$FsZ)1L6Icjv(P4*DWA;~Dm0_K$gn zleO)wT92~0LW#q`>IzW*b&HYTn$hp$IqTq_Ujl`Owhz!-IKP!c)p^igy5KkHo$lK; z*Leme`#1vyhknu=-gXW1?(Ta^QT^L&DvzsS|v-_N)@Fd|?MfDxZ*&Z9+k_r6Kz&eYC(Ss9wf73OoOQtMxT zZx@tbG3y&cH*~EGP>Mh8tyyP_f**W0X6`ZgwQ9owdF#-0ZbwYZfL7oyK7A8{$R))C zXV_u6FF?s!-djI|%e};*s+`dNFe8fYJ~@3){JSvU5;*UUv^CQ7ldA)M<$php?FrMA z8|mGWADLcfpPu@;W*&sS_&$Lzxbb~@=#6v&05KPbt6aKW?j zB>pF5n(|j15s+7%9jd&MWnS?q(DzD=0-eVgp17fzg10@w@S;VHC@zoM#__HS4hC@C zl@_e1f?u1^34!DEz47O$A4-84m6r zV*HgG89cm~LGTKLOILIJa|TykQQ*i3E-#%|@S>xG*?9JG`O1ro|Kvdi!6yooLxg?{ z>vw#_=^Q7Y$bq^I=5+r$){oXQC@*3#?zRHuZy#6eM_>Pu^(UTEVCA&y46oeH_!gXH zcoD;ai~Co$ll9-&&R_(`?{8r^AV)~rK0Cl5&kz07O1o>?_BDt-v;3jS$2rc%M?ZNo zI{&RZ{u!S!df7e&$_d=hagi*(ewGKhIjdXX=K0CL!sMDPXRuK?9{MPmh4 z&0_N!_;)Dw>8rO&AMnpVsq_mv=*;a$f5G}!^YuZ@mpb!@zOt(Ifq6V0>)ASYWj?PD zz8=UGtPX+9=A9h6lgsmX*Ns1Ug{!4MnfoWRI*QuE>j#x6e94PB4&G&QTQ+ccre8`H zbcXxmc^(17<>b?x-@@Qjkq@?h(N_zgZrOs)>QBDcpV9OAy7BMJOb_C5+Q8N=F!Hd{ zU)k&uql@*ijy-yW2CC*8&-JfnayKVF!R#T%XYbaE`^VN7dPG%;17H{aGCI)l_2dql zf8Y#X4@(#>r;Ge@{yOsX$fnjmX=iWlPZ{g4-p=VfALQty<_8(VA4Z?NlFRe_f;>TI z`B~u;bha*y?8xd2$b6k5M~PbcM{{{jr(3_E=VmH*`Aqzlp()SmrdJE`WaVtoBerLJ zY)@>;&76cYbjoExa@#Nmo0UhG$Kl4A#wUm_df_VHltXXSWmz5Mikrz|BUktwSyr57 zY(W?#f;y+T%{hd@>2_P~W_l|STWX}I|L@XPBYo$yTBcZu#_VjX+e&q2D^6!Qb6h5c zlua%fI^B4Ra$|9tInQdhdQG0`IlWAo^iH6pu-ED*ch|7((e50Z1y6lci$tzl;TLPM zTQd7F<#xqGp`3P8-8ql+Qq_(}R4AIAwJb_zvzw`Q)nZrrsc!seH&yO_REwhBW2Em7 z!a+q#w%dwL`8~1suSmUc?q895<2-8to~DyRr20~*)PUZ7ljzk?Np Optional[Crop]: + for crop in crops: + if crop.id == id: + return crop + return None + + +def prepare_params(mounted_paths, subtask_border, scene_file_path, resolution, samples, frames, output_format, basefilename, + crops_count=3, crops_borders=None): + + subimage = SubImage(Region(subtask_border[0], + subtask_border[1], + subtask_border[2], + subtask_border[3]), resolution) + + crops: List[Crop] = [] + crops_render_data = [] + + if crops_borders: + idx = 0 + for border in crops_borders: + crop = Crop.create_from_region(idx, Region(border[0], border[1], border[2], border[3]), subimage) + crops_render_data.append( + { + "id": crop.id, + "outfilebasename" : "crop" + str(idx) + '_', + "borders_x" : [crop.crop_region.left, crop.crop_region.right], + "borders_y" : [crop.crop_region.top, crop.crop_region.bottom] + } + ) + crops.append(crop) + idx += 1 + else: + for i in range(0,crops_count): + crop = generate_single_random_crop_data(subimage, subimage.get_default_crop_size(), i) + crops_render_data.append( + { + "id": crop.id, + "outfilebasename" : "crop" + str(i) + '_', + "borders_x" : [crop.crop_region.left, crop.crop_region.right], + "borders_y" : [crop.crop_region.top, crop.crop_region.bottom] + } + ) + crops.append(crop) + + params = { + "scene_file" : scene_file_path, + "resolution" : resolution, + "use_compositing" : False, + "samples" : samples, + "frames" : frames, + "start_task": 1, + "output_format" : output_format, + "crops" : crops_render_data + } + + return crops, params + + +def make_verdict( subtask_file_paths, crops, results ): + verdict = True + + for crop_data in results: + crop = get_crop_with_id(crop_data['crop']['id'], crops) + + left, top = crop.get_relative_top_left() + + print("left " + str(left)) + print("top " + str(top)) + + for crop, subtask in zip(crop_data['results'], subtask_file_paths): + crop_path = os.path.join(OUTPUT_DIR, crop) + results_path = compare_crop_window(crop_path, + subtask, + left, top, + output_filename_path=os.path.join(OUTPUT_DIR, crop_data['crop']['outfilebasename'] + "metrics.txt")) + + with open(results_path, 'r') as f: + data = json.load(f) + if data['Label'] != "TRUE": + verdict = False + + with open(os.path.join(OUTPUT_DIR, 'verdict.json'), 'w') as f: + json.dump({'verdict': verdict}, f) + + + +def verify(subtask_file_paths, subtask_border, scene_file_path, resolution, samples, frames, output_format, basefilename, + crops_count=3, crops_borders=None): + + """ Function will verifiy image with crops rendered from given blender scene file. + + subtask_file_paths - path (or paths if there was more than one frame) to image file, that will be compared against crops + subtask_border - [left, top, right, bottom] float decimal values representing + image localization in whole blender scene + scene_file_path - path to blender scene file + resolution - resolution at which given subtask was rendered (crop will be rendered with exactly same parameters) + samples - samples at which given subtask was rendered + frames - number of frames that are present in subtasks + output_format - output format of rendered crops + basefilename - this will be used for creating crop names + crops_count - number of randomly generated crops, (default 3) + work_dir - work + crops_borders - list of [left, top, right, bottom] float decimal values list, representing crops borders + those will be used instead of random crops, if present. + + """ + mounted_paths = dict() + mounted_paths["WORK_DIR"] = WORK_DIR + mounted_paths["OUTPUT_DIR"] = OUTPUT_DIR + + crops, params = prepare_params(mounted_paths, subtask_border, scene_file_path, + resolution, samples, frames, output_format, + basefilename, crops_count, crops_borders) + + results = blender.render(params, mounted_paths) + + print(results) + + make_verdict( subtask_file_paths, crops, results ) diff --git a/apps/blender/resources/images/scripts_verifier/wavelet.py b/apps/blender/resources/images/scripts_verifier/wavelet.py new file mode 100644 index 0000000000..552f54c602 --- /dev/null +++ b/apps/blender/resources/images/scripts_verifier/wavelet.py @@ -0,0 +1,165 @@ +import pywt +import numpy +from PIL import Image + +import sys + +def calculate_sum( coeff ): + return sum( sum( coeff ** 2 ) ) + +def calculate_size( coeff ): + shape = coeff.shape + return shape[ 0 ] * shape[ 1 ] + +def calculate_mse( coeff1, coeff2, low, high ): + if low == high: + if low == 0: + high = low + 1 + else: + low = high - 1 + suma = 0 + num = 0 + for i in range( low, high ): + if type( coeff1[ i ] ) is tuple: + suma += calculate_sum( coeff1[ i ][ 0 ] - coeff2[ i ][ 0 ] ) + suma += calculate_sum( coeff1[ i ][ 1 ] - coeff2[ i ][ 1 ] ) + suma += calculate_sum( coeff1[ i ][ 2 ] - coeff2[ i ][ 2 ] ) + num += 3 * coeff1[ i ][ 0 ].size + else: + suma += calculate_sum(coeff1[i] - coeff2[i] ) + num += coeff1[ i ].size + if( num == 0 ): + return 0 + else: + return suma / num + +## ======================= ## +## +def calculate_frequencies( coeff1, coeff2 ): + + num_levels = len( coeff1 ) + start_level = num_levels - 3 + + freq_list = list() + + for i in range( start_level, num_levels ): + + abs_coeff1 = numpy.absolute( coeff1[ i ] ) + abs_coeff2 = numpy.absolute( coeff2[ i ] ) + + sum_coeffs1 = sum( sum( sum( abs_coeff1 ) ) ) + sum_coeffs2 = sum( sum( sum( abs_coeff2 ) ) ) + + diff = numpy.absolute( sum_coeffs2 - sum_coeffs1 ) / ( 3 * coeff1[ i ][ 0 ].size ) + + freq_list = [ diff ] + freq_list + + + return freq_list + + +## ======================= ## +## +class MetricWavelet: + + ## ======================= ## + ## + @staticmethod + def compute_metrics( image1, image2): + + image1 = image1.convert("RGB") + image2 = image2.convert("RGB") + + np_image1 = numpy.array(image1) + np_image2 = numpy.array(image2) + + result = dict() + result["wavelet_db4_base"] = 0 + result["wavelet_db4_low"] = 0 + result["wavelet_db4_mid"] = 0 + result["wavelet_db4_high"] = 0 + + for i in range(0,3): + coeff1 = pywt.wavedec2( np_image1[...,i], "db4" ) + coeff2 = pywt.wavedec2( np_image2[...,i], "db4" ) + + len_total = len( coeff1 ) - 1 + len_div_3 = int( len_total / 3 ) + len_two_thirds = int( len_total * 2 / 3 ) + + result[ "wavelet_db4_base" ] += calculate_mse( coeff1, coeff2, 0, 1 ) + result[ "wavelet_db4_low" ] = result[ "wavelet_db4_low" ] + calculate_mse( coeff1, coeff2, 1, 1 + len_div_3 ) + result[ "wavelet_db4_mid" ] = result[ "wavelet_db4_mid" ] + calculate_mse( coeff1, coeff2, 1 + len_div_3, 1 + len_two_thirds ) + result[ "wavelet_db4_high" ] = result[ "wavelet_db4_high" ] + calculate_mse( coeff1, coeff2, 1 + len_two_thirds, 1 + len_total ) + + # + result["wavelet_sym2_base"] = 0 + result["wavelet_sym2_low"] = 0 + result["wavelet_sym2_mid"] = 0 + result["wavelet_sym2_high"] = 0 + + for i in range(0,3): + coeff1 = pywt.wavedec2( np_image1[...,i], "sym2" ) + coeff2 = pywt.wavedec2( np_image2[...,i], "sym2" ) + + len_total = len( coeff1 ) - 1 + len_div_3 = int( len_total / 3 ) + len_two_thirds = int( len_total * 2 / 3 ) + + result[ "wavelet_sym2_base" ] += calculate_mse( coeff1, coeff2, 0, 1 ) + result[ "wavelet_sym2_low" ] = result[ "wavelet_sym2_low" ] + calculate_mse( coeff1, coeff2, 1, 1 + len_div_3 ) + result[ "wavelet_sym2_mid" ] = result[ "wavelet_sym2_mid" ] + calculate_mse( coeff1, coeff2, 1 + len_div_3, 1 + len_two_thirds ) + result[ "wavelet_sym2_high" ] = result[ "wavelet_sym2_high" ] + calculate_mse( coeff1, coeff2, 1 + len_two_thirds, 1 + len_total ) + + + # Frequency metrics based on haar wavlets + result[ "wavelet_haar_freq_x1" ] = 0 + result[ "wavelet_haar_freq_x2" ] = 0 + result[ "wavelet_haar_freq_x3" ] = 0 + + result["wavelet_haar_base"] = 0 + result["wavelet_haar_low"] = 0 + result["wavelet_haar_mid"] = 0 + result["wavelet_haar_high"] = 0 + + for i in range(0,3): + coeff1 = pywt.wavedec2( np_image1[...,i], "haar" ) + coeff2 = pywt.wavedec2( np_image2[...,i], "haar" ) + + freqs = calculate_frequencies( coeff1, coeff2 ) + + result[ "wavelet_haar_freq_x1" ] = result[ "wavelet_haar_freq_x1" ] + freqs[ 0 ] + result[ "wavelet_haar_freq_x2" ] = result[ "wavelet_haar_freq_x2" ] + freqs[ 1 ] + result[ "wavelet_haar_freq_x3" ] = result[ "wavelet_haar_freq_x3" ] + freqs[ 2 ] + + len_total = len( coeff1 ) - 1 + len_div_3 = int( len_total / 3 ) + len_two_thirds = int( len_total * 2 / 3 ) + + result[ "wavelet_haar_base" ] += calculate_mse( coeff1, coeff2, 0, 1 ) + result[ "wavelet_haar_low" ] += calculate_mse( coeff1, coeff2, 1, 1 + len_div_3 ) + result[ "wavelet_haar_mid" ] += calculate_mse( coeff1, coeff2, 1 + len_div_3, 1 + len_two_thirds ) + result[ "wavelet_haar_high" ] += calculate_mse( coeff1, coeff2, 1 + len_two_thirds, 1 + len_total ) + + return result + + ## ======================= ## + ## + @staticmethod + def get_labels(): + return [ "wavelet_sym2_base", "wavelet_sym2_low", "wavelet_sym2_mid", "wavelet_sym2_high", "wavelet_db4_base", "wavelet_db4_low", "wavelet_db4_mid", "wavelet_db4_high", "wavelet_haar_base", "wavelet_haar_low", "wavelet_haar_mid", "wavelet_haar_high", "wavelet_haar_freq_x1", "wavelet_haar_freq_x2", "wavelet_haar_freq_x3" ] + + +## ======================= ## +## +def run(): + first_img = Image.open( sys.argv[1] ) + second_img = Image.open( sys.argv[2] ) + + ssim = MetricWavelet() + + print(ssim.compute_metrics(first_img, second_img)) + + +if __name__ == "__main__": + run() diff --git a/apps/blender/task/blenderrendertask.py b/apps/blender/task/blenderrendertask.py index caf9f603e5..1ccf6f1fc1 100644 --- a/apps/blender/task/blenderrendertask.py +++ b/apps/blender/task/blenderrendertask.py @@ -10,7 +10,6 @@ from PIL import Image, ImageChops, ImageFile import apps.blender.resources.blenderloganalyser as log_analyser -from apps.blender.blender_reference_generator import BlenderReferenceGenerator from apps.blender.blenderenvironment import BlenderEnvironment, \ BlenderNVGPUEnvironment from apps.core.task.coretask import CoreTaskTypeInfo @@ -310,7 +309,6 @@ def __init__(self): class BlenderRenderTask(FrameRenderingTask): ENVIRONMENT_CLASS: Type[BlenderEnvironment] = BlenderEnvironment VERIFIER_CLASS = functools.partial(BlenderVerifier, - cropper_cls=BlenderReferenceGenerator, docker_task_cls=DockerTaskThread) BLENDER_MIN_BOX = [8, 8] diff --git a/apps/core/task/coretask.py b/apps/core/task/coretask.py index c34be29a4b..2ea32343eb 100644 --- a/apps/core/task/coretask.py +++ b/apps/core/task/coretask.py @@ -208,13 +208,8 @@ def verification_finished_(subtask_id, verdict, result): **{'owner': self.header.task_owner.key}}, results=result_files, resources=self.task_resources, - reference_data=self.get_reference_data() ) - # pylint:disable=no-self-use - def get_reference_data(self): - return [] - def verification_finished(self, subtask_id, verdict, result): try: if verdict == SubtaskVerificationState.VERIFIED: diff --git a/apps/images.ini b/apps/images.ini index 28aff18d6e..92cf7f7ce6 100644 --- a/apps/images.ini +++ b/apps/images.ini @@ -1,6 +1,6 @@ golemfactory/base core/resources/images/base.Dockerfile 1.4 . golemfactory/nvgpu core/resources/images/nvgpu.Dockerfile 1.2 . apps.core.nvgpu.is_supported golemfactory/blender blender/resources/images/blender.Dockerfile 1.8 blender/resources/images/ +golemfactory/blender_verifier blender/resources/images/blender_verifier.Dockerfile 1.0 blender/resources/images/ golemfactory/blender_nvgpu blender/resources/images/blender_nvgpu.Dockerfile 1.2 . apps.core.nvgpu.is_supported golemfactory/dummy dummy/resources/images/Dockerfile 1.1 dummy/resources/images -golemfactory/image_metrics rendering/resources/images/Dockerfile 1.12 . diff --git a/golem/verificator/blender_verifier.py b/golem/verificator/blender_verifier.py index 56a8f165f2..084b3f19a7 100644 --- a/golem/verificator/blender_verifier.py +++ b/golem/verificator/blender_verifier.py @@ -1,18 +1,14 @@ import logging from typing import Type -import math +import numpy import os -import posixpath import json -import numpy -from threading import Lock -from shutil import copy from golem.verificator.verifier import SubtaskVerificationState from .rendering_verifier import FrameRenderingVerifier -from twisted.internet.defer import Deferred, gatherResults +from twisted.internet.defer import Deferred logger = logging.getLogger("apps.blender") @@ -20,80 +16,41 @@ # FIXME #2086 # pylint: disable=R0902 class BlenderVerifier(FrameRenderingVerifier): - DOCKER_NAME = "golemfactory/image_metrics" - DOCKER_TAG = '1.12' + DOCKER_NAME = "golemfactory/blender_verifier" + DOCKER_TAG = '1.0' - def __init__(self, verification_data, cropper_cls: Type, + def __init__(self, verification_data, docker_task_cls: Type) -> None: super().__init__(verification_data) - self.lock = Lock() - self.verified_crops_counter = 0 self.finished = Deferred() - self.current_results_files = None - self.already_called = False - self.cropper = cropper_cls() self.docker_task_cls = docker_task_cls - self.metrics = dict() - self.crops_size = () - self.additional_test = False - self.default_crops_number = 3 self.timeout = 0 self.docker_task = None - def _get_part_img_size(self, subtask_info): - x, y = self._get_part_size(subtask_info) - return 0, 0, x, y - def _get_part_size(self, subtask_info): if subtask_info['use_frames'] and len(subtask_info['all_frames']) \ - >= subtask_info['total_tasks']: + >= subtask_info['total_tasks']: res_y = subtask_info['resolution'][1] else: res_y = int(round(numpy.float32( - numpy.float32(subtask_info['crops'][0]['borders_y'][0]) - * numpy.float32(subtask_info['resolution'][1])))) + numpy.float32(subtask_info['crops'][0]['borders_y'][0]) * + numpy.float32(subtask_info['resolution'][1])))) return subtask_info['resolution'][0], res_y - @staticmethod - def _get_part_size_from_subtask_number(subtask_info): - - if subtask_info['resolution'][1] % subtask_info['total_tasks'] == 0: - res_y = int(subtask_info['resolution'][1] / subtask_info['total_tasks']) - else: - # in this case task will be divided into not equal parts: - # floor or ceil of (res_y/total_tasks) - # ceiling will be height of subtasks with smaller num - ceiling_height = int(math.ceil(subtask_info['resolution'][1] / - subtask_info['total_tasks'])) - additional_height = ceiling_height * subtask_info['total_tasks'] - additional_pixels = additional_height - subtask_info['resolution'][1] - ceiling_subtasks = subtask_info['total_tasks'] - additional_pixels - - if subtask_info['start_task'] > ceiling_subtasks: - res_y = ceiling_height - 1 - else: - res_y = ceiling_height - return res_y - # pylint: disable-msg=too-many-arguments def _verify_with_reference(self, verification_data): - self.current_results_files = verification_data["results"] - self.subtask_info = verification_data["subtask_info"] self.verification_data = verification_data try: self.start_rendering() # pylint: disable=W0703 except Exception as e: - logger.error("Crop generation failed %r", e) + logger.error("Verification failed %r", e) self.finished.errback(e) return self.finished def stop(self): - for d in self.finished_crops: - d.cancel() - self.can_make_verdict.cancel() if self.docker_task: self.docker_task.end_comp() @@ -110,44 +67,37 @@ def failure(exc): self.state = SubtaskVerificationState.WRONG_ANSWER return exc - self.finished_crops = self.cropper.render_crops( - self.resources, - self.verification_data["subtask_info"], - self.default_crops_number) self.finished.addCallback(success) self.finished.addErrback(failure) - for d in self.finished_crops: - d.addCallback(self._crop_rendered) - d.addErrback(self._crop_render_failure) - self.can_make_verdict = gatherResults(self.finished_crops) - self.can_make_verdict.addCallback(self.make_verdict) - self.can_make_verdict.addErrback(failure) - - # The verification function will generate three random crops, from results - # only after all three will be generated, we can start verification process - # pylint: disable=R0914 - def _crop_rendered(self, result): - results, time_spend, verification_context, crop_number = result - - logger.info("Crop no [%r] rendered for verification. Time spent: %r.", - crop_number, time_spend) - - work_dir = verification_context.get_crop_path( - str(crop_number)) - if not work_dir: - raise Exception("Crop %s not found", crop_number) + subtask_info = self.verification_data['subtask_info'] + work_dir = os.path.dirname(self.verification_data['results'][0]) dir_mapping = self.docker_task_cls.specify_dir_mapping( - resources=os.path.join(work_dir, "resources"), + resources=subtask_info['path_root'], temporary=os.path.dirname(work_dir), work=work_dir, output=os.path.join(work_dir, "output"), logs=os.path.join(work_dir, "logs"), ) - extra_data = self.create_extra_data( - results, verification_context, - crop_number, dir_mapping) + extra_data = dict( + subtask_paths=['/golem/work/{}'.format( + os.path.basename(i)) for i in self.verification_data['results'] + ], + subtask_borders=[ + subtask_info['crop_window'][0], + subtask_info['crop_window'][2], + subtask_info['crop_window'][1], + subtask_info['crop_window'][3], + ], + scene_path=subtask_info['scene_file'], + resolution=subtask_info['resolution'], + samples=subtask_info['samples'], + frames=subtask_info['frames'], + output_format=subtask_info['output_format'], + basefilename='crop', + script_filepath="/golem/scripts_verifier/runner.py", + ) self.docker_task = self.docker_task_cls( docker_images=[(self.DOCKER_NAME, self.DOCKER_TAG)], @@ -156,101 +106,29 @@ def _crop_rendered(self, result): timeout=self.timeout) def error(e): - # is handled elsewhere - e.trap(Exception) - - self.docker_task.run() - self.docker_task._deferred.addErrback(error) - - self.metrics[crop_number] = dict() - for root, _, files in os.walk(str(dir_mapping.output)): - for i, file in enumerate(files): - with open(dir_mapping.output / file) as json_data: - self.metrics[crop_number][i] = json.load(json_data) - - # One failure is enough to stop verification process, although this might - # change in future - def _crop_render_failure(self, error): - logger.warning("Crop render for verification failure %r", error) - self.call_if_not_called(False) - - def create_extra_data(self, results, verification_context, crop_number, - dir_mapping): - filtered_results = list(filter( - lambda x: not os.path.basename(x).endswith(".log"), results['data'] - )) - - dir_mapping.mkdirs() - verification_pairs = dict() - - for result in self.current_results_files: - copy(result, dir_mapping.resources) - for ref_result in filtered_results: - if os.path.basename(result) == os.path.basename(ref_result)[4:]: - verification_pairs[posixpath.join( - "/golem/resources", - os.path.basename(result))] = posixpath.join( - "/golem/work/tmp/output", os.path.basename(ref_result)) - - # This is failsafe in 99% cases there will be only one result file - # in subtask, so match it even if outfilebasename doesnt match pattern - if not verification_pairs: - verification_pairs[posixpath.join( - "/golem/resources", - os.path.basename( - self.current_results_files[0]))] = posixpath.join( - "/golem/work/tmp/output", os.path.basename(filtered_results[0])) - - crop = verification_context.get_crop_with_id(str(crop_number)) - if not crop: - raise Exception("Crop %s not found", crop_number) - - x, y = crop.get_relative_top_left() - return dict( - verification_files=verification_pairs, - xres=x, - yres=y, - script_filepath="/golem/scripts/runner.py", - ) - - def make_verdict(self, result): - labels = [] - for crop_idx in range(len(self.metrics.keys())): - for frame_idx, metric in self.metrics[crop_idx].items(): - labels.append(metric['Label']) - logger.debug( - "METRIC: Subtask: %r crop no: %r, frame %r " - "scene %s \n" - "requestor %r\n" - "provider %r " - "metrics %s", - self.subtask_info['subtask_id'], - crop_idx, - frame_idx, - self.subtask_info['scene_file'], - self.subtask_info['owner'], - self.subtask_info['node_id'], - str(self.metrics)) - - if metric['Label'] == "FALSE": - logger.warning("Subtask %r verified. Result: REJECT", self.subtask_info['subtask_id']) - self.call_if_not_called(False) - return - - if labels and all(label == "TRUE" for label in labels): - logger.info("Subtask %r verified. Result: ACCEPT", - self.subtask_info['subtask_id']) - self.call_if_not_called(True) - else: - logger.warning("Unexpected verification output for subtask %r,", - self.subtask_info['subtask_id']) - self.call_if_not_called(False) + logger.warning( + "Verification exception %s, accepting task as it's likely not " + "providers fault", + e, + ) + self.finished.callback(True) + + def callback(*_): + with open(os.path.join(dir_mapping.output, 'verdict.json'), 'r') \ + as f: + verdict = json.load(f) + + logger.info( + "Subtask %s verification verdict: %s", + subtask_info['subtask_id'], + verdict, + ) + if verdict['verdict']: + self.finished.callback(True) + else: + self.finished.errback( + Exception('Verification result negative', verdict)) - def call_if_not_called(self, callback): - with self.lock: - if self.already_called is False: - self.already_called = True - if callback is True: - self.finished.callback(True) - else: - self.finished.errback(False) + d = self.docker_task.start() + d.addErrback(error) + d.addCallback(callback) diff --git a/golem/verificator/rendering_verifier.py b/golem/verificator/rendering_verifier.py index cb8507c926..8aa0a5f532 100644 --- a/golem/verificator/rendering_verifier.py +++ b/golem/verificator/rendering_verifier.py @@ -14,7 +14,6 @@ class RenderingVerifier(CoreVerifier): def __init__(self, verification_data): super().__init__() self.subtask_info = verification_data["subtask_info"] - self.reference_data = verification_data["reference_data"] self.resources = verification_data["resources"] self.results = verification_data["results"] self.state = SubtaskVerificationState.WAITING @@ -38,21 +37,6 @@ def check_size(file_, res_x, res_y): def _get_part_size(self, subtask_info): return subtask_info['res_x'], subtask_info['res_y'] - def _get_part_img_size(self, subtask_info): - # verification method reacts to key error - num_task = subtask_info['start_task'] - total_tasks = subtask_info['total_tasks'] - res_x = subtask_info['res_x'] - res_y = subtask_info['res_y'] - if total_tasks == 0 \ - or num_task > total_tasks: - logger.error("Wrong total tasks number ({}) " - "for subtask number {}".format(total_tasks, - num_task)) - return 0, 0, 0, 0 - img_height = int(math.floor(res_y / total_tasks)) - return 0, (num_task - 1) * img_height, res_x, num_task * img_height - class FrameRenderingVerifier(RenderingVerifier): @@ -82,25 +66,3 @@ def simple_verification(self, verification_data): return False self.state = SubtaskVerificationState.VERIFIED return True - - def _get_part_img_size(self, subtask_info): - use_frames = subtask_info['use_frames'] - if not use_frames or self.__full_frames(subtask_info): - return super(FrameRenderingVerifier, self)\ - ._get_part_img_size(subtask_info) - else: - start_task = subtask_info['start_task'] - parts = subtask_info['parts'] - num_task = self._count_part(start_task, parts) - img_height = int(math.floor(subtask_info['res_y'] / parts)) - part_min_x = 1 - part_max_x = subtask_info['res_x'] - 1 - part_min_y = (num_task - 1) * img_height + 1 - part_max_y = num_task * img_height - 1 - return part_min_x, part_min_y, part_max_x, part_max_y - - def _count_part(self, start_num, parts): - return ((start_num - 1) % parts) + 1 - - def __full_frames(self, subtask_info): - return subtask_info['total_tasks'] <= len(subtask_info['all_frames']) diff --git a/golem/verificator/verifier.py b/golem/verificator/verifier.py index 9faac98a6c..6036cfd7d8 100644 --- a/golem/verificator/verifier.py +++ b/golem/verificator/verifier.py @@ -18,7 +18,7 @@ class SubtaskVerificationState(Enum): class Verifier: - def start_verification(self, subtask_info: dict, reference_data: list, + def start_verification(self, subtask_info: dict, resources: list, results: list) -> None: raise NotImplementedError @@ -31,7 +31,6 @@ class StateVerifier(Verifier): def __init__(self): super(StateVerifier, self).__init__() self.subtask_info = {} - self.reference_data = [] self.resources = [] self.results = [] self.state = SubtaskVerificationState.UNKNOWN_SUBTASK @@ -52,7 +51,7 @@ def task_timeout(self, subtask_id): self.time_started = self.time_ended = datetime.utcnow() self.state = SubtaskVerificationState.TIMEOUT self.message = "Verification never ran, task timed out" - + state = self.state answer = self._get_answer() self._clear_state() @@ -60,7 +59,6 @@ def task_timeout(self, subtask_id): def _clear_state(self): self.subtask_info = {} - self.reference_data = [] self.resources = [] self.results = [] self.state = SubtaskVerificationState.UNKNOWN_SUBTASK @@ -69,8 +67,7 @@ def _clear_state(self): self.extra_data = {} def _get_answer(self): - return {'reference_data': self.reference_data, - 'message': self.message, + return {'message': self.message, 'time_started': self.time_started, 'time_ended': self.time_ended, 'extra_data': self.extra_data} diff --git a/tests/apps/blender/verification/test_blender_reference_generator.py b/tests/apps/blender/verification/test_blender_reference_generator.py deleted file mode 100644 index 4d8505af21..0000000000 --- a/tests/apps/blender/verification/test_blender_reference_generator.py +++ /dev/null @@ -1,138 +0,0 @@ -import logging - -import numpy -from golem.verificator.common.rendering_task_utils import get_min_max_y - -from apps.blender.blender_reference_generator import BlenderReferenceGenerator -from apps.blender.blender_reference_generator import SubImage, Region, Crop, \ - PixelRegion -from golem.testutils import TempDirFixture - -logger = logging.getLogger(__name__) - - -class TestBlenderReferenceGenerator(TempDirFixture): - - def test_get_default_crop_size(self): - sub_image = SubImage(Region(0, 1, 1, 0), (800, 8000)) - assert sub_image.get_default_crop_size() == (80, 800) - - sub_image = SubImage(Region(0, 1, 1, 0), (400, 799)) - assert sub_image.get_default_crop_size() == (40, 79) - - sub_image = SubImage(Region(0, 1, 1, 0), (399, 9000)) - assert sub_image.get_default_crop_size() == (39, 900) - - def test_get_random_interval_within_boundaries(self): - def _test_crop(min_, max_, step): - crop_min, crop_max = BlenderReferenceGenerator\ - ._get_random_interval_within_boundaries(min_, max_, step) - - assert round(crop_min, 2) >= round(min_, 2) - assert round(crop_max, 2) <= round(max_, 2) - assert abs(crop_max - crop_min - step) <= 0.1 - - _test_crop(40, 60, 8) - _test_crop(550, 570, 10) - - def test_get_relative_top_left(self): - sub_image = SubImage(Region(0, 1, 1, 0), (400, 160)) - crop = Crop.create_from_pixel_region( - "1", - PixelRegion(40, 100, 100, 20), - sub_image, "") - assert crop.get_relative_top_left() == (40, 60) - - sub_image = SubImage(Region(0, 1, 1, 0), (400, 90)) - crop = Crop.create_from_pixel_region( - "1", - PixelRegion(40, 30, 100, 20), - sub_image, "") - assert crop.get_relative_top_left() == (40, 60) - - sub_image = SubImage(Region(0, 1, 1, 0), (400, 90)) - crop = Crop.create_from_pixel_region( - "1", - PixelRegion(40, 30, 100, 20), - sub_image, "") - assert crop.get_relative_top_left() == (40, 60) - - def test_generate_crops_data(self): - - def _test_crop(resolution, crop_border, num): - blender_reference_generator = BlenderReferenceGenerator() - crop = { - "outbasefilename": 'outbasefilename', - "borders_x": [crop_border[0], crop_border[1]], - "borders_y": [crop_border[2], crop_border[3]] - } - crops_desc = blender_reference_generator\ - .generate_crops_data(resolution, crop, num, "") - - assert len(crops_desc) == 3 - for desc in crops_desc: - assert 0 <= desc.pixel_region.left <= resolution[0] - assert 0 <= desc.pixel_region.top <= resolution[1] - - for desc in crops_desc: - assert crop_border[0] <= desc.crop_region.left <= crop_border[1] - - assert crop_border[0] \ - <= desc.crop_region.right \ - <= crop_border[1] - - assert desc.crop_region.left <= desc.crop_region.right - - assert crop_border[2] <= desc.crop_region.top <= crop_border[3] - - assert crop_border[2] \ - <= desc.crop_region.bottom \ - <= crop_border[3] - - assert desc.crop_region.bottom <= desc.crop_region.top - - for _ in range(100): - _test_crop([800, 600], (numpy.float32(0.0), - numpy.float32(0.3), - numpy.float32(0.0), - numpy.float32(0.3)), 3) - _test_crop([800, 600], (numpy.float32(0.5), - numpy.float32(0.8), - numpy.float32(0.2), - numpy.float32(0.4)), 3) - _test_crop([1000, 888], (numpy.float32(0.2), - numpy.float32(0.4), - numpy.float32(0.2), - numpy.float32(0.5)), 3) - _test_crop([800, 600], (numpy.float32(0.0), - numpy.float32(0.1), - numpy.float32(0.0), - numpy.float32(0.4)), 3) - with self.assertRaises(Exception): - _test_crop([800, 600], (numpy.float32(0.0), - numpy.float32(0.01), - numpy.float32(0.0), - numpy.float32(0.01)), 3) - - def test_generate_crops_data_for_strange_resolutions(self): - # pylint: disable=R0914 - strange_res = [313, 317, 953, 967, 1949, 1951, 3319, 3323, 9949, 9967] - for l in range(0, 9): - res = (strange_res[l], strange_res[l + 1]) - for i in range(1, 10): - min_y, max_y = get_min_max_y(i, 9, res[1]) - min_y = numpy.float32(min_y) - max_y = numpy.float32(max_y) - crop_window = { - "outfilebasename": "basename", - "borders_x": [0.0, 1.0], - "borders_y": [min_y, max_y], - } - blender_reference_generator = BlenderReferenceGenerator() - crops_desc = blender_reference_generator\ - .generate_crops_data(res, crop_window, 3, "") - for j in range(0, 3): - assert crops_desc[j].pixel_region.left < crops_desc[ - j].pixel_region.right - assert crops_desc[j].pixel_region.top > crops_desc[ - j].pixel_region.bottom diff --git a/tests/apps/blender/verification/test_verification_queue.py b/tests/apps/blender/verification/test_verification_queue.py index 2d32fb2531..47a32932a3 100644 --- a/tests/apps/blender/verification/test_verification_queue.py +++ b/tests/apps/blender/verification/test_verification_queue.py @@ -5,7 +5,6 @@ from golem.core.common import timeout_to_deadline from golem.docker.task_thread import DockerTaskThread from apps.core.verification_queue import VerificationQueue -from apps.blender.blender_reference_generator import BlenderReferenceGenerator class TestVerificationQueue(unittest.TestCase): @@ -33,7 +32,6 @@ def test_timeout(): self.queue.submit( functools.partial(BlenderVerifier, - cropper_cls=BlenderReferenceGenerator, docker_task_cls=DockerTaskThread), "deadbeef", timeout_to_deadline(10), diff --git a/tests/apps/blender/verification/test_verificator_integration.py b/tests/apps/blender/verification/test_verificator_integration.py index 8592b77192..4d6599b10a 100644 --- a/tests/apps/blender/verification/test_verificator_integration.py +++ b/tests/apps/blender/verification/test_verificator_integration.py @@ -1,27 +1,21 @@ -import logging import os import time -from unittest import skip, mock +import pytest +from unittest import mock -from twisted.internet.defer import Deferred - -from apps.blender.blender_reference_generator import BlenderReferenceGenerator -from apps.blender.blenderenvironment import BlenderEnvironment from apps.blender.task.blenderrendertask import BlenderRenderTask -from golem.verificator.common.ci import ci_skip from golem.core.common import get_golem_path from golem.core.deferred import sync_wait from golem.docker.image import DockerImage from golem.docker.manager import DockerManager from golem.docker.task_thread import DockerTaskThread -from golem.environments.environmentsmanager import EnvironmentsManager from golem.task.localcomputer import ComputerAdapter from golem.testutils import TempDirFixture -@ci_skip +@pytest.mark.slow class TestVerificatorModuleIntegration(TempDirFixture): - TIMEOUT = 30 + TIMEOUT = 60 def setUp(self): # pylint: disable=R0915 @@ -32,9 +26,11 @@ def setUp(self): done_callback=mock.Mock(), work_dir=self.new_path, in_background=True) - self.blender_reference_generator = BlenderReferenceGenerator() - self.golem_dir = get_golem_path() - self.resources = ['tests/apps/blender/verification/test_data/bmw.blend'] + self.resources = [ + os.path.join( + get_golem_path(), + 'tests/apps/blender/verification/test_data/bmw.blend'), + ] self.computer = ComputerAdapter() self.subtask_info = dict() @@ -55,6 +51,7 @@ def setUp(self): 'borders_y':[0.0, 1.0] } ] + self.subtask_info['crop_window'] = [0.0, 1.0, 0.0, 1.0] self.subtask_info['node_id'] = 'deadbeef' self.subtask_info['subtask_id'] = '250771152547690738285326338136457465' self.subtask_info['all_frames'] = [1] @@ -92,165 +89,47 @@ def setUp(self): self.subtask_info['path_root'] self.subtask_info['ctd']['extra_data']['script_filepath'] = \ self.subtask_info['script_filepath'] - self.subtask_info['ctd']['short_description'] = '' self.subtask_info['ctd']['subtask_id'] = self.subtask_info['subtask_id'] - def test_bad_image(self): - - d = Deferred() - - def success(*args, **kwargs): - # pylint: disable=unused-argument - assert False - - def failure(*args, **kwargs): - # pylint: disable=unused-argument - d.callback(True) - + def _test_image(self, results, expected_result): verification_data = {} verification_data['subtask_info'] = self.subtask_info verification_data['results'] = [] + for result in results: + result_path = os.path.join(self.tempdir, result) + os.link( + os.path.join( + get_golem_path(), + 'tests/apps/blender/verification/test_data', + result, + ), + result_path, + ) + verification_data['results'].append(result_path) verification_data['reference_data'] = [] verification_data['resources'] = self.resources verification_data['paths'] = os.path.dirname(self.resources[0]) verifier = BlenderRenderTask.VERIFIER_CLASS(verification_data) - verifier.default_crops_number = 1 - verifier.current_results_files = ['tests/apps/blender/verification/' - 'test_data/very_bad_image.png'] + d = verifier._verify_with_reference(verification_data) - verifier.subtask_info = self.subtask_info - verifier.resources = self.resources + if expected_result: + sync_wait(d, TestVerificatorModuleIntegration.TIMEOUT) + else: + with self.assertRaisesRegex(Exception, 'result negative'): + sync_wait(d, TestVerificatorModuleIntegration.TIMEOUT) - finished = self.blender_reference_generator.render_crops( - self.resources, - self.subtask_info, - 1 - ) - - for deferred in finished: - deferred.addCallback(success) - deferred.addErrback(failure) - - sync_wait(d, TestVerificatorModuleIntegration.TIMEOUT) + def test_bad_image(self): + self._test_image(['very_bad_image.png'], False) def test_good_image(self): - d = Deferred() - - def success(*args, **kwargs): - # pylint: disable=unused-argument - d.callback(True) - - def failure(*args, **kwargs): - # pylint: disable=unused-argument - assert False - - verification_data = {} - verification_data['subtask_info'] = self.subtask_info - verification_data['results'] = [] - verification_data['reference_data'] = [] - verification_data['resources'] = self.resources - verification_data['paths'] = os.path.dirname(self.resources[0]) - - verifier = BlenderRenderTask.VERIFIER_CLASS(verification_data) - verifier.default_crops_number = 1 - verifier.current_results_files = \ - ['tests/apps/blender/verification/test_data/GolemTask_10001.png'] - - verifier.subtask_info = self.subtask_info - verifier.resources = self.resources - - finished = self.blender_reference_generator.render_crops( - self.resources, - self.subtask_info, - 1 - ) - - for deferred in finished: - deferred.addCallback(success) - deferred.addErrback(failure) - - sync_wait(d, TestVerificatorModuleIntegration.TIMEOUT) + self._test_image(['GolemTask_10001.png'], True) def test_subsampled_image(self): - d = Deferred() - - def success(*args, **kwargs): - # pylint: disable=unused-argument - assert False - - def failure(*args, **kwargs): - # pylint: disable=unused-argument - d.callback(True) - - verification_data = {} - verification_data['subtask_info'] = self.subtask_info - verification_data['results'] = [] - verification_data['reference_data'] = [] - verification_data['resources'] = self.resources - verification_data['paths'] = os.path.dirname(self.resources[0]) - - verifier = BlenderRenderTask.VERIFIER_CLASS(verification_data) - verifier.default_crops_number = 1 - verifier.current_results_files = \ - ['tests/apps/blender/verification/test_data/almost_good_image.png'] - - verifier.subtask_info = self.subtask_info - verifier.resources = self.resources - - finished = self.blender_reference_generator.render_crops( - self.resources, - self.subtask_info, - 1 - ) - - for deferred in finished: - deferred.addCallback(success) - deferred.addErrback(failure) - - sync_wait(d, TestVerificatorModuleIntegration.TIMEOUT) + self._test_image(['almost_good_image.png'], False) def test_multiple_frames_in_subtask(self): - d = Deferred() - self.subtask_info['all_frames'] = [1, 2] self.subtask_info['frames'] = [1, 2] self.subtask_info['ctd']['extra_data']['frames'] = [1, 2] - - def success(*args, **kwargs): - # pylint: disable=unused-argument - d.callback(True) - - def failure(*args, **kwargs): - # pylint: disable=unused-argument - assert False - - verification_data = {} - verification_data['subtask_info'] = self.subtask_info - verification_data['results'] = [] - verification_data['reference_data'] = [] - verification_data['resources'] = self.resources - verification_data["reference_generator"] = \ - self.blender_reference_generator - verification_data['paths'] = os.path.dirname(self.resources[0]) - - verifier = BlenderRenderTask.VERIFIER_CLASS(verification_data) - verifier.default_crops_number = 1 - verifier.current_results_files = [ - 'tests/apps/blender/verification/test_data/GolemTask_10001.png', - 'tests/apps/blender/verification/test_data/GolemTask_10002.png'] - - verifier.subtask_info = self.subtask_info - verifier.resources = self.resources - - finished = self.blender_reference_generator.render_crops( - self.resources, - self.subtask_info, - 1 - ) - - for deferred in finished: - deferred.addCallback(success) - deferred.addErrback(failure) - - sync_wait(d, TestVerificatorModuleIntegration.TIMEOUT) + self._test_image(['GolemTask_10001.png', 'GolemTask_10002.png'], True) diff --git a/tests/golem/verificator/test_blenderverifier.py b/tests/golem/verificator/test_blenderverifier.py index 9b32f49fab..8d88f319cc 100644 --- a/tests/golem/verificator/test_blenderverifier.py +++ b/tests/golem/verificator/test_blenderverifier.py @@ -4,141 +4,7 @@ from golem.testutils import TempDirFixture from golem.tools.assertlogs import LogTestCase from golem.verificator.blender_verifier import BlenderVerifier, logger -from golem.verificator.common.ci import ci_skip class TestBlenderVerifier(LogTestCase, TempDirFixture): - - def test_get_part_size_from_subtask_number(self): - subtask_info = { - "resolution": [800, 600], - "total_tasks": 20, - "start_task": 3, - } - - verification_data = {} - verification_data['subtask_info'] = subtask_info - verification_data['results'] = [] - verification_data['reference_data'] = [] - verification_data['resources'] = [] - - blender_verifier = BlenderVerifier(verification_data, - cropper_cls=mock.Mock(), - docker_task_cls=mock.Mock()) - assert blender_verifier._get_part_size_from_subtask_number(subtask_info) == 30 - subtask_info["total_tasks"] = 13 - subtask_info["start_task"] = 2 - assert blender_verifier._get_part_size_from_subtask_number(subtask_info) == 47 - subtask_info["start_task"] = 3 - assert blender_verifier._get_part_size_from_subtask_number(subtask_info) == 46 - subtask_info["start_task"] = 13 - assert blender_verifier._get_part_size_from_subtask_number(subtask_info) == 46 - - def test_get_part_size(self): - - crops = [ - { - "outfilebasename": 'test', - "borders_x": [0, 1], - "borders_y": [0.05, 1] - } - ] - subtask_info = { - "subtask_id": "deadbeef", - "use_frames": False, - "resolution": [800, 600], - "total_tasks": 20, - "start_task": 3, - "crops": crops - } - - verification_data = {} - verification_data['subtask_info'] = subtask_info - verification_data['results'] = [] - verification_data['reference_data'] = [] - verification_data['resources'] = [] - - blender_verifier = BlenderVerifier(verification_data, - cropper_cls=mock.Mock(), - docker_task_cls=mock.Mock()) - assert blender_verifier._get_part_size(subtask_info) == (800, 30) - subtask_info["use_frames"] = True - subtask_info["all_frames"] = list(range(40)) - subtask_info["crops"][0]['borders_x'] = [0, 1] - subtask_info["crops"][0]['borders_y'] = [0, 1] - assert blender_verifier._get_part_size(subtask_info) == (800, 600) - subtask_info["all_frames"] = list(range(10)) - subtask_info["crops"][0]['borders_x'] = [0, 1] - subtask_info["crops"][0]['borders_y'] = [0.5, 1] - assert blender_verifier._get_part_size(subtask_info) == (800, 300) - - def test_crop_render_failure(self): - verification_data = {} - verification_data['subtask_info'] = {} - verification_data['results'] = [] - verification_data['reference_data'] = [] - verification_data['resources'] = [] - - blender_verifier = BlenderVerifier(verification_data, - cropper_cls=mock.Mock(), - docker_task_cls=mock.Mock()) - blender_verifier.failure = lambda: None - - with self.assertLogs(logger, level="WARNING") as logs: - blender_verifier._crop_render_failure("There was a problem") - assert any("WARNING:apps.blender:Crop render for verification failure" - " 'There was a problem'" - in log for log in logs.output) - - @ci_skip - def test_crop_rendered(self): - crop_path = os.path.join(self.tempdir, str(0)) - - verification_data = {} - verification_data['subtask_info'] = {'subtask_id': 'deadbeef'} - verification_data['results'] = [] - verification_data['reference_data'] = [] - verification_data['resources'] = [] - - reference_generator = mock.MagicMock() - reference_generator.crop_counter = 3 - - docker_task_thread = mock.Mock() - docker_task_thread.return_value.output_dir_path = os.path.join( - self.tempdir, 'output') - docker_task_thread.specify_dir_mapping.return_value = \ - mock.Mock(resources=crop_path, temporary=self.tempdir) - - bv = BlenderVerifier(verification_data, - cropper_cls=reference_generator, - docker_task_cls=docker_task_thread) - bv.current_results_files = [os.path.join(self.tempdir, "none.png")] - open(bv.current_results_files[0], mode='a').close() - if not os.path.exists(crop_path): - os.mkdir(crop_path) - output_dir = os.path.join(crop_path, "output") - if not os.path.exists(output_dir): - os.mkdir(output_dir) - f = open(os.path.join(output_dir, "result_0.txt"), mode='a') - f.write("{") - f.write("\"MSE_canny\": 2032.03125,") - f.write("\"MSE_normal\": 1.171875,") - f.write("\"MSE_wavelet\": 5080.765625,") - f.write("\"SSIM_canny\": 0.9377418556022814,") - f.write("\"SSIM_normal\": 0.9948028194990917,") - f.write("\"SSIM_wavelet\": 0.7995332835184454,") - f.write("\"crop_resolution\": \"8x8\",") - f.write("\"imgCorr\": 0.7342643964262355") - f.write("}") - f.close() - verification_context = mock.MagicMock() - verification_context.get_crop_path = mock.MagicMock(return_value="0") - crop = mock.Mock() - crop.get_relative_top_left = mock.Mock(return_value=(3,5)) - verification_context.get_crop_with_id = mock.Mock(return_value=crop) - with self.assertLogs(logger, level="INFO") as logs: - bv._crop_rendered(({"data": ["def"]}, 2913, verification_context, 0)) - assert any("rendered for verification" - in log for log in logs.output) - assert any("2913" in log for log in logs.output) - + pass diff --git a/tests/golem/verificator/test_renderingverifier.py b/tests/golem/verificator/test_renderingverifier.py index c91dd3a305..506a0b8ce2 100644 --- a/tests/golem/verificator/test_renderingverifier.py +++ b/tests/golem/verificator/test_renderingverifier.py @@ -6,7 +6,6 @@ from golem.tools.assertlogs import LogTestCase from golem.verificator.rendering_verifier import ( RenderingVerifier, - logger, FrameRenderingVerifier, ) from golem.verificator.verifier import SubtaskVerificationState @@ -76,34 +75,6 @@ def test_simple_verification(self): self.last_verdict = rendering_verifier.verification_completed()[1] assert self.last_verdict == SubtaskVerificationState.VERIFIED - def test_get_part_img_size(self): - subtask_info = { - "res_x": 800, - "res_y": 600, - "total_tasks": 30, - "start_task": 3 - } - - verification_data = {'subtask_info': subtask_info, 'results': ["file1"], 'reference_data': [], 'resources': []} - - rendering_verifier = RenderingVerifier(verification_data) - - assert rendering_verifier._get_part_img_size(subtask_info) == (0, 40, 800, 60) - - subtask_info["total_tasks"] = 0 - with self.assertLogs(logger, level="WARNING"): - assert rendering_verifier._get_part_img_size(subtask_info) == (0, 0, 0, 0) - - subtask_info["total_tasks"] = 30 - subtask_info["start_task"] = 34 - with self.assertLogs(logger, level="WARNING"): - assert rendering_verifier._get_part_img_size(subtask_info) == (0, 0, 0, 0) - - subtask_info["total_tasks"] = 11 - subtask_info["res_y"] = 211 - subtask_info["start_task"] = 5 - assert rendering_verifier._get_part_img_size(subtask_info) == (0, 76, 800, 95) - class TestFrameRenderingVerifier(TempDirFixture): @@ -148,18 +119,3 @@ def test_simple_verification_frames(self): frame_rendering_verifier.simple_verification(verification_data) frame_rendering_verifier.verification_completed() assert frame_rendering_verifier.state == SubtaskVerificationState.WRONG_ANSWER - - def test_get_part_img_size(self): - verification_data = {'subtask_info': {}, 'results': [], 'reference_data': [], 'resources': []} - frame_rendering_verifier = FrameRenderingVerifier(verification_data) - subtask_info = { - "res_x": 600, - "res_y": 800, - "total_tasks": 20, - "all_frames": [5, 6, 7, 8, 9], - "start_task": 1, - "parts": 4, - "use_frames": True} - assert frame_rendering_verifier._get_part_img_size(subtask_info) == (1, 1, 599, 199) - subtask_info["use_frames"] = False - assert frame_rendering_verifier._get_part_img_size(subtask_info) == (0, 0, 600, 40) From a22401eda95b6d3c1d21afa99949e7ca51bc5191 Mon Sep 17 00:00:00 2001 From: Igor Adamski Date: Mon, 4 Feb 2019 10:55:32 +0100 Subject: [PATCH 2/7] test cleanup --- .../test_verificator_integration.py | 135 ------------------ .../golem/verificator/test_blenderverifier.py | 133 ++++++++++++++++- 2 files changed, 129 insertions(+), 139 deletions(-) delete mode 100644 tests/apps/blender/verification/test_verificator_integration.py diff --git a/tests/apps/blender/verification/test_verificator_integration.py b/tests/apps/blender/verification/test_verificator_integration.py deleted file mode 100644 index 4d6599b10a..0000000000 --- a/tests/apps/blender/verification/test_verificator_integration.py +++ /dev/null @@ -1,135 +0,0 @@ -import os -import time -import pytest -from unittest import mock - -from apps.blender.task.blenderrendertask import BlenderRenderTask -from golem.core.common import get_golem_path -from golem.core.deferred import sync_wait -from golem.docker.image import DockerImage -from golem.docker.manager import DockerManager -from golem.docker.task_thread import DockerTaskThread -from golem.task.localcomputer import ComputerAdapter -from golem.testutils import TempDirFixture - - -@pytest.mark.slow -class TestVerificatorModuleIntegration(TempDirFixture): - TIMEOUT = 60 - - def setUp(self): - # pylint: disable=R0915 - super().setUp() - dm = DockerTaskThread.docker_manager = DockerManager.install() - dm.update_config( - status_callback=mock.Mock(), - done_callback=mock.Mock(), - work_dir=self.new_path, - in_background=True) - self.resources = [ - os.path.join( - get_golem_path(), - 'tests/apps/blender/verification/test_data/bmw.blend'), - ] - self.computer = ComputerAdapter() - - self.subtask_info = dict() - self.subtask_info['scene_file'] = '/golem/resources/bmw.blend' - self.subtask_info['resolution'] = [150, 150] - self.subtask_info['use_compositing'] = False - self.subtask_info['samples'] = 35 - self.subtask_info['frames'] = [1] - self.subtask_info['output_format'] = 'PNG' - self.subtask_info['use_frames'] = False - self.subtask_info['start_task'] = 1 - self.subtask_info['total_tasks'] = 1 - self.subtask_info['crops'] = [ - { - 'outfilebasename': - 'GolemTask_{}'.format(self.subtask_info['start_task']), - 'borders_x': [0.0, 1.0], - 'borders_y':[0.0, 1.0] - } - ] - self.subtask_info['crop_window'] = [0.0, 1.0, 0.0, 1.0] - self.subtask_info['node_id'] = 'deadbeef' - self.subtask_info['subtask_id'] = '250771152547690738285326338136457465' - self.subtask_info['all_frames'] = [1] - self.subtask_info['tmp_dir'] = self.tempdir - self.subtask_info['subtask_timeout'] = 600 - self.subtask_info['script_filepath'] = '/golem/scripts/job.py' - - self.subtask_info['path_root'] = os.path.dirname(self.resources[0]) - self.subtask_info['parts'] = 1 - self.subtask_info['owner'] = "deadbeef" - self.subtask_info['ctd'] = dict() - self.subtask_info['ctd']['deadline'] = time.time() + 3600 - self.subtask_info['ctd']['docker_images'] = [DockerImage( - 'golemfactory/blender', tag='1.8').to_dict()] - self.subtask_info['ctd']['extra_data'] = dict() - self.subtask_info['ctd']['extra_data']['scene_file'] = \ - self.subtask_info['scene_file'] - self.subtask_info['ctd']['extra_data']['resolution'] = \ - self.subtask_info['resolution'] - self.subtask_info['ctd']['extra_data']['use_compositing'] = \ - self.subtask_info['use_compositing'] - self.subtask_info['ctd']['extra_data']['samples'] = \ - self.subtask_info['samples'] - self.subtask_info['ctd']['extra_data']['frames'] = \ - self.subtask_info['frames'] - self.subtask_info['ctd']['extra_data']['output_format'] = \ - self.subtask_info['output_format'] - self.subtask_info['ctd']['extra_data']['start_task'] = \ - self.subtask_info['start_task'] - self.subtask_info['ctd']['extra_data']['total_tasks'] = \ - self.subtask_info['total_tasks'] - self.subtask_info['ctd']['extra_data']['crops'] = \ - self.subtask_info['crops'] - self.subtask_info['ctd']['extra_data']['path_root'] = \ - self.subtask_info['path_root'] - self.subtask_info['ctd']['extra_data']['script_filepath'] = \ - self.subtask_info['script_filepath'] - self.subtask_info['ctd']['subtask_id'] = self.subtask_info['subtask_id'] - - def _test_image(self, results, expected_result): - verification_data = {} - verification_data['subtask_info'] = self.subtask_info - verification_data['results'] = [] - for result in results: - result_path = os.path.join(self.tempdir, result) - os.link( - os.path.join( - get_golem_path(), - 'tests/apps/blender/verification/test_data', - result, - ), - result_path, - ) - verification_data['results'].append(result_path) - verification_data['reference_data'] = [] - verification_data['resources'] = self.resources - verification_data['paths'] = os.path.dirname(self.resources[0]) - - verifier = BlenderRenderTask.VERIFIER_CLASS(verification_data) - d = verifier._verify_with_reference(verification_data) - - if expected_result: - sync_wait(d, TestVerificatorModuleIntegration.TIMEOUT) - else: - with self.assertRaisesRegex(Exception, 'result negative'): - sync_wait(d, TestVerificatorModuleIntegration.TIMEOUT) - - def test_bad_image(self): - self._test_image(['very_bad_image.png'], False) - - def test_good_image(self): - self._test_image(['GolemTask_10001.png'], True) - - def test_subsampled_image(self): - self._test_image(['almost_good_image.png'], False) - - def test_multiple_frames_in_subtask(self): - self.subtask_info['all_frames'] = [1, 2] - self.subtask_info['frames'] = [1, 2] - self.subtask_info['ctd']['extra_data']['frames'] = [1, 2] - self._test_image(['GolemTask_10001.png', 'GolemTask_10002.png'], True) diff --git a/tests/golem/verificator/test_blenderverifier.py b/tests/golem/verificator/test_blenderverifier.py index 8d88f319cc..6a91a3e5c9 100644 --- a/tests/golem/verificator/test_blenderverifier.py +++ b/tests/golem/verificator/test_blenderverifier.py @@ -1,10 +1,135 @@ import os +import time +import pytest from unittest import mock +from golem.core.common import get_golem_path +from golem.core.deferred import sync_wait +from golem.docker.image import DockerImage +from golem.docker.manager import DockerManager +from golem.docker.task_thread import DockerTaskThread +from golem.task.localcomputer import ComputerAdapter from golem.testutils import TempDirFixture -from golem.tools.assertlogs import LogTestCase -from golem.verificator.blender_verifier import BlenderVerifier, logger +from golem.verificator.blender_verifier import BlenderVerifier -class TestBlenderVerifier(LogTestCase, TempDirFixture): - pass +@pytest.mark.slow +class TestBlenderVerifier(TempDirFixture): + TIMEOUT = 60 + + def setUp(self): + # pylint: disable=R0915 + super().setUp() + dm = DockerTaskThread.docker_manager = DockerManager.install() + dm.update_config( + status_callback=mock.Mock(), + done_callback=mock.Mock(), + work_dir=self.new_path, + in_background=True) + self.resources = [ + os.path.join( + get_golem_path(), + 'tests/apps/blender/verification/test_data/bmw.blend'), + ] + self.computer = ComputerAdapter() + + self.subtask_info = dict() + self.subtask_info['scene_file'] = '/golem/resources/bmw.blend' + self.subtask_info['resolution'] = [150, 150] + self.subtask_info['use_compositing'] = False + self.subtask_info['samples'] = 35 + self.subtask_info['frames'] = [1] + self.subtask_info['output_format'] = 'PNG' + self.subtask_info['use_frames'] = False + self.subtask_info['start_task'] = 1 + self.subtask_info['total_tasks'] = 1 + self.subtask_info['crops'] = [ + { + 'outfilebasename': + 'GolemTask_{}'.format(self.subtask_info['start_task']), + 'borders_x': [0.0, 1.0], + 'borders_y':[0.0, 1.0] + } + ] + self.subtask_info['crop_window'] = [0.0, 1.0, 0.0, 1.0] + self.subtask_info['node_id'] = 'deadbeef' + self.subtask_info['subtask_id'] = '250771152547690738285326338136457465' + self.subtask_info['all_frames'] = [1] + self.subtask_info['tmp_dir'] = self.tempdir + self.subtask_info['subtask_timeout'] = 600 + self.subtask_info['script_filepath'] = '/golem/scripts/job.py' + + self.subtask_info['path_root'] = os.path.dirname(self.resources[0]) + self.subtask_info['parts'] = 1 + self.subtask_info['owner'] = "deadbeef" + self.subtask_info['ctd'] = dict() + self.subtask_info['ctd']['deadline'] = time.time() + 3600 + self.subtask_info['ctd']['docker_images'] = [DockerImage( + 'golemfactory/blender', tag='1.8').to_dict()] + self.subtask_info['ctd']['extra_data'] = dict() + self.subtask_info['ctd']['extra_data']['scene_file'] = \ + self.subtask_info['scene_file'] + self.subtask_info['ctd']['extra_data']['resolution'] = \ + self.subtask_info['resolution'] + self.subtask_info['ctd']['extra_data']['use_compositing'] = \ + self.subtask_info['use_compositing'] + self.subtask_info['ctd']['extra_data']['samples'] = \ + self.subtask_info['samples'] + self.subtask_info['ctd']['extra_data']['frames'] = \ + self.subtask_info['frames'] + self.subtask_info['ctd']['extra_data']['output_format'] = \ + self.subtask_info['output_format'] + self.subtask_info['ctd']['extra_data']['start_task'] = \ + self.subtask_info['start_task'] + self.subtask_info['ctd']['extra_data']['total_tasks'] = \ + self.subtask_info['total_tasks'] + self.subtask_info['ctd']['extra_data']['crops'] = \ + self.subtask_info['crops'] + self.subtask_info['ctd']['extra_data']['path_root'] = \ + self.subtask_info['path_root'] + self.subtask_info['ctd']['extra_data']['script_filepath'] = \ + self.subtask_info['script_filepath'] + self.subtask_info['ctd']['subtask_id'] = self.subtask_info['subtask_id'] + + def _test_image(self, results, expected_result): + verification_data = {} + verification_data['subtask_info'] = self.subtask_info + verification_data['results'] = [] + for result in results: + result_path = os.path.join(self.tempdir, result) + os.link( + os.path.join( + get_golem_path(), + 'tests/apps/blender/verification/test_data', + result, + ), + result_path, + ) + verification_data['results'].append(result_path) + verification_data['reference_data'] = [] + verification_data['resources'] = self.resources + verification_data['paths'] = os.path.dirname(self.resources[0]) + + verifier = BlenderVerifier(verification_data, DockerTaskThread) + d = verifier._verify_with_reference(verification_data) + + if expected_result: + sync_wait(d, self.TIMEOUT) + else: + with self.assertRaisesRegex(Exception, 'result negative'): + sync_wait(d, self.TIMEOUT) + + def test_bad_image(self): + self._test_image(['very_bad_image.png'], False) + + def test_good_image(self): + self._test_image(['GolemTask_10001.png'], True) + + def test_subsampled_image(self): + self._test_image(['almost_good_image.png'], False) + + def test_multiple_frames_in_subtask(self): + self.subtask_info['all_frames'] = [1, 2] + self.subtask_info['frames'] = [1, 2] + self.subtask_info['ctd']['extra_data']['frames'] = [1, 2] + self._test_image(['GolemTask_10001.png', 'GolemTask_10002.png'], True) From 15b14b555709861652e72e33d79f63c88adf742e Mon Sep 17 00:00:00 2001 From: Igor Adamski Date: Mon, 4 Feb 2019 11:55:27 +0100 Subject: [PATCH 3/7] test failing docker --- golem/verificator/blender_verifier.py | 17 +++++------- golem/verificator/rendering_verifier.py | 7 ----- .../blender/task/test_blenderrendertask.py | 8 +++--- .../golem/verificator/test_blenderverifier.py | 27 +++++++++++++------ 4 files changed, 30 insertions(+), 29 deletions(-) diff --git a/golem/verificator/blender_verifier.py b/golem/verificator/blender_verifier.py index 084b3f19a7..791b97b9f8 100644 --- a/golem/verificator/blender_verifier.py +++ b/golem/verificator/blender_verifier.py @@ -1,6 +1,7 @@ -import logging +from datetime import datetime from typing import Type +import logging import numpy import os import json @@ -10,7 +11,7 @@ from .rendering_verifier import FrameRenderingVerifier from twisted.internet.defer import Deferred -logger = logging.getLogger("apps.blender") +logger = logging.getLogger(__name__) # FIXME #2086 @@ -37,8 +38,8 @@ def _get_part_size(self, subtask_info): numpy.float32(subtask_info['resolution'][1])))) return subtask_info['resolution'][0], res_y - # pylint: disable-msg=too-many-arguments - def _verify_with_reference(self, verification_data): + def start_verification(self, verification_data): + self.time_started = datetime.utcnow() self.verification_data = verification_data try: @@ -106,12 +107,8 @@ def failure(exc): timeout=self.timeout) def error(e): - logger.warning( - "Verification exception %s, accepting task as it's likely not " - "providers fault", - e, - ) - self.finished.callback(True) + logger.warning("Verification process exception %s", e) + self.finished.errback(e) def callback(*_): with open(os.path.join(dir_mapping.output, 'verdict.json'), 'r') \ diff --git a/golem/verificator/rendering_verifier.py b/golem/verificator/rendering_verifier.py index 8aa0a5f532..81bc8d2748 100644 --- a/golem/verificator/rendering_verifier.py +++ b/golem/verificator/rendering_verifier.py @@ -1,7 +1,4 @@ import logging -import math -from datetime import datetime -from collections import Callable from .core_verifier import CoreVerifier from .imgrepr import load_img from .verifier import SubtaskVerificationState @@ -18,10 +15,6 @@ def __init__(self, verification_data): self.results = verification_data["results"] self.state = SubtaskVerificationState.WAITING - def start_verification(self, verification_data): - self.time_started = datetime.utcnow() - return self._verify_with_reference(verification_data) - @staticmethod def check_size(file_, res_x, res_y): img = load_img(file_) diff --git a/tests/apps/blender/task/test_blenderrendertask.py b/tests/apps/blender/task/test_blenderrendertask.py index 0bb1848605..43f0a5d76e 100644 --- a/tests/apps/blender/task/test_blenderrendertask.py +++ b/tests/apps/blender/task/test_blenderrendertask.py @@ -150,8 +150,8 @@ def verification_finished1(verification_data): SubtaskVerificationState.VERIFIED, result) - with mock.patch('golem.verificator.rendering_verifier.' - 'RenderingVerifier.start_verification', + with mock.patch('golem.verificator.blender_verifier.' + 'BlenderVerifier.start_verification', side_effect=verification_finished1): self.bt.computation_finished( extra_data3.ctd['subtask_id'], @@ -181,8 +181,8 @@ def verification_finished2(verification_data): img.save(file2, "PNG") img.close() - with mock.patch('golem.verificator.rendering_verifier.' - 'RenderingVerifier.start_verification', + with mock.patch('golem.verificator.blender_verifier.' + 'BlenderVerifier.start_verification', side_effect=verification_finished2): self.bt.computation_finished( extra_data4.ctd['subtask_id'], diff --git a/tests/golem/verificator/test_blenderverifier.py b/tests/golem/verificator/test_blenderverifier.py index 6a91a3e5c9..c14b1888d6 100644 --- a/tests/golem/verificator/test_blenderverifier.py +++ b/tests/golem/verificator/test_blenderverifier.py @@ -91,7 +91,7 @@ def setUp(self): self.subtask_info['script_filepath'] self.subtask_info['ctd']['subtask_id'] = self.subtask_info['subtask_id'] - def _test_image(self, results, expected_result): + def _test_image(self, results, exception_regex=None): verification_data = {} verification_data['subtask_info'] = self.subtask_info verification_data['results'] = [] @@ -111,25 +111,36 @@ def _test_image(self, results, expected_result): verification_data['paths'] = os.path.dirname(self.resources[0]) verifier = BlenderVerifier(verification_data, DockerTaskThread) - d = verifier._verify_with_reference(verification_data) + d = verifier.start_verification(verification_data) - if expected_result: + if not exception_regex: sync_wait(d, self.TIMEOUT) else: - with self.assertRaisesRegex(Exception, 'result negative'): + with self.assertRaisesRegex(Exception, exception_regex): sync_wait(d, self.TIMEOUT) def test_bad_image(self): - self._test_image(['very_bad_image.png'], False) + self._test_image(['very_bad_image.png'], 'Verification result negative') def test_good_image(self): - self._test_image(['GolemTask_10001.png'], True) + self._test_image(['GolemTask_10001.png']) def test_subsampled_image(self): - self._test_image(['almost_good_image.png'], False) + self._test_image( + ['almost_good_image.png'], + 'Verification result negative', + ) def test_multiple_frames_in_subtask(self): self.subtask_info['all_frames'] = [1, 2] self.subtask_info['frames'] = [1, 2] self.subtask_info['ctd']['extra_data']['frames'] = [1, 2] - self._test_image(['GolemTask_10001.png', 'GolemTask_10002.png'], True) + self._test_image(['GolemTask_10001.png', 'GolemTask_10002.png']) + + def test_docker_error(self): + # Set na invalid param so that Docker computation fails inside + self.subtask_info['frames'] = None + self._test_image( + ['GolemTask_10001.png'], + 'Subtask computation failed with exit code 1', + ) From 7c8425a572dad3a186fff7ad21c529fbbd1fdd1d Mon Sep 17 00:00:00 2001 From: "nieznany.sprawiciel" Date: Thu, 7 Feb 2019 14:57:09 +0100 Subject: [PATCH 4/7] Update scripts to newer version --- .../img_metrics_calculator.py | 156 ++++++++---------- .../images/scripts_verifier/verificator.py | 6 +- 2 files changed, 68 insertions(+), 94 deletions(-) diff --git a/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py b/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py index 789cd7d0fd..ec26a52802 100644 --- a/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py +++ b/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py @@ -1,43 +1,41 @@ import os import sys -import pickle from typing import Dict -import numpy as np import OpenEXR from PIL import Image import decision_tree -from img_format_converter import \ - ConvertTGAToPNG, ConvertEXRToPNG -from imgmetrics import \ - ImgMetrics +from img_format_converter import ConvertTGAToPNG, ConvertEXRToPNG +from imgmetrics import ImgMetrics -CROP_NAME = "/golem/output/scene_crop.png" +CROP_NAME = "scene_crop.png" VERIFICATION_SUCCESS = "TRUE" VERIFICATION_FAIL = "FALSE" TREE_PATH = "/golem/scripts_verifier/tree35_[crr=87.71][frr=0.92].pkl" -def compare_crop_window(cropped_img_path, - rendered_scene_path, - xres, yres, - output_filename_path='metrics.txt'): + +def calculate_metrics(reference_img_path, + result_img_path, + xres, + yres, + metrics_output_filename='metrics.txt'): """ This is the entry point for calculation of metrics between the rendered_scene and the sample(cropped_img) generated for comparison. - :param cropped_img_path: - :param rendered_scene_path: + :param reference_img_path: + :param result_img_path: :param xres: x position of crop (left, top) :param yres: y position of crop (left, top) - :param output_filename_path: + :param metrics_output_filename: :return: """ cropped_img, scene_crops, rendered_scene = \ - _load_and_prepare_img_for_comparison( - cropped_img_path, - rendered_scene_path, - xres, yres) + _load_and_prepare_images_for_comparison(reference_img_path, + result_img_path, + xres, + yres) best_crop = None best_img_metrics = None @@ -47,7 +45,7 @@ def compare_crop_window(cropped_img_path, effective_metrics, classifier, labels, available_metrics = get_metrics() # First try not offset crop - + # TODO this shouldn't depend on the crops' ordering default_crop = scene_crops[0] default_metrics = compare_images(cropped_img, default_crop, available_metrics) try: @@ -58,9 +56,9 @@ def compare_crop_window(cropped_img_path, default_metrics['Label'] = VERIFICATION_FAIL if default_metrics['Label'] == VERIFICATION_SUCCESS: default_crop.save(CROP_NAME) - return ImgMetrics(default_metrics).write_to_file(output_filename_path) + return ImgMetrics(default_metrics).write_to_file(metrics_output_filename) else: - # Try offsete crops + # Try offset crops for crop in scene_crops[1:]: try: img_metrics = compare_images(cropped_img, crop, available_metrics) @@ -74,114 +72,88 @@ def compare_crop_window(cropped_img_path, break if best_crop and best_img_metrics: best_crop.save(CROP_NAME) - return ImgMetrics(best_img_metrics).write_to_file(output_filename_path) + return ImgMetrics(best_img_metrics).write_to_file(metrics_output_filename) else: # We didnt find any better match in offset crops, return the default one default_crop.save(CROP_NAME) - path_to_metrics = ImgMetrics(default_metrics).write_to_file(output_filename_path) + path_to_metrics = ImgMetrics(default_metrics).write_to_file(metrics_output_filename) return path_to_metrics - #This is unexpected but handle in case of errors + # This is unexpected but handle in case of errors stub_data = {element:-1 for element in get_labels_from_metrics(available_metrics)} stub_data['Label'] = VERIFICATION_FAIL - path_to_metrics = ImgMetrics(stub_data).write_to_file(output_filename_path) + path_to_metrics = ImgMetrics(stub_data).write_to_file(metrics_output_filename) return path_to_metrics + def load_classifier(): data = decision_tree.DecisionTree.load(TREE_PATH) - return data[0], data[1] -def classify_with_tree(metrics, classifier, feature_labels): +def classify_with_tree(metrics, classifier, feature_labels): features = dict() for label in feature_labels: features[label] = metrics[label] - results = classifier.classify_with_feature_vector(features, feature_labels) - return results[0].decode('utf-8') -def _load_and_prepare_img_for_comparison(cropped_img_path, - rendered_scene_path, - xres, yres): + +def _load_and_prepare_images_for_comparison(reference_img_path, + result_img_path, + xres, + yres): """ This function prepares (i.e. crops) the rendered_scene so that it will fit the sample(cropped_img) generated for comparison. - :param cropped_img_path: - :param rendered_scene_path: + :param reference_img_path: + :param result_img_path: :param xres: x position of crop (left, top) :param yres: y position of crop (left, top) :return: """ - rendered_scene = None - # if rendered scene has .exr format need to convert it for .png format - if os.path.splitext(rendered_scene_path)[1] == ".exr": - check_input = OpenEXR.InputFile(rendered_scene_path).header()[ - 'channels'] - if 'RenderLayer.Combined.R' in check_input: - sys.exit("There is no support for OpenEXR multilayer") - file_name = "/tmp/scene.png" - ConvertEXRToPNG(rendered_scene_path, file_name) - rendered_scene = Image.open(file_name) - elif os.path.splitext(rendered_scene_path)[1] == ".tga": - file_name = "/tmp/scene.png" - ConvertTGAToPNG(rendered_scene_path, file_name) - rendered_scene = Image.open(file_name) - else: - rendered_scene = Image.open(rendered_scene_path) - - cropped_img = Image.open(cropped_img_path) - (crop_width, crop_height) = cropped_img.size - + rendered_scene = convert_to_png_if_needed(result_img_path) + reference_img = convert_to_png_if_needed(reference_img_path) + (crop_width, crop_height) = reference_img.size crops = get_crops(rendered_scene, xres, yres, crop_width, crop_height) + return reference_img, crops, rendered_scene - return cropped_img, crops, rendered_scene - - -def get_crops(input, x, y, width, height): - crops = [] - - scene_crop = input.crop((x, y, x + width, y + height)) - - crops.append(scene_crop) - scene_crop_left = input.crop((x-1, y, x + width-1, y + height)) +def get_file_extension_lowercase(file_path): + return os.path.splitext(file_path)[1][1:].lower() - crops.append(scene_crop_left) - scene_crop_left_up = input.crop((x-1, y-1, x + width-1, y + height-1)) - - crops.append(scene_crop_left_up) - - scene_crop_up = input.crop((x, y-1, x + width, y + height-1)) - - crops.append(scene_crop_up) - - scene_crop_up_right = input.crop((x+1, y-1, x + width+1, y + height-1)) - - crops.append(scene_crop_up_right) - - scene_crop_right = input.crop((x+1, y, x + width+1, y + height)) - - crops.append(scene_crop_right) - - scene_crop_down_right = input.crop((x+1, y+1, x + width+1, y + height+1)) - - crops.append(scene_crop_down_right) - - scene_crop_down = input.crop((x, y+1, x + width, y + height+1)) - - crops.append(scene_crop_down) - - scene_crop_down_left = input.crop((x-1, y+1, x + width-1, y + height+1)) +def convert_to_png_if_needed(img_path): + extension = get_file_extension_lowercase(img_path) + name = os.path.basename(img_path) + file_name = os.path.join("/tmp/", name) + if extension == "exr": + channels = OpenEXR.InputFile(img_path).header()['channels'] + if 'RenderLayer.Combined.R' in channels: + sys.exit("There is no support for OpenEXR multilayer") + ConvertEXRToPNG(img_path, file_name) + elif extension == "tga": + ConvertTGAToPNG(img_path, file_name) + else: + file_name = img_path + return Image.open(file_name) - crops.append(scene_crop_down_left) +def get_crops(rendered_scene, x, y, width, height): + crops = [] + offsets = [0, 1, -1] + for x_offset in offsets: + for y_offset in offsets: + crop = rendered_scene.crop((x + x_offset, + y + y_offset, + x + width - x_offset, + y + height - y_offset)) + crops.append(crop) return crops + def get_metrics(): classifier, feature_labels = load_classifier() available_metrics = ImgMetrics.get_metric_classes() @@ -193,12 +165,14 @@ def get_metrics(): effective_metrics.append(metric) return effective_metrics, classifier, feature_labels, available_metrics + def get_labels_from_metrics(metrics): labels = [] for metric in metrics: labels.extend(metric.get_lables()) return labels + def compare_images(image_a, image_b, metrics) -> Dict: """ This the entry point for calculating metrics between image_a, image_b diff --git a/apps/blender/resources/images/scripts_verifier/verificator.py b/apps/blender/resources/images/scripts_verifier/verificator.py index f2d20ac383..fa03c9f6e0 100644 --- a/apps/blender/resources/images/scripts_verifier/verificator.py +++ b/apps/blender/resources/images/scripts_verifier/verificator.py @@ -4,7 +4,7 @@ import blender_render as blender from crop_generator import WORK_DIR, OUTPUT_DIR, SubImage, Region, PixelRegion, \ generate_single_random_crop_data, Crop -from img_metrics_calculator import compare_crop_window +from img_metrics_calculator import calculate_metrics def get_crop_with_id(id: int, crops: [List[Crop]]) -> Optional[Crop]: for crop in crops: @@ -78,10 +78,10 @@ def make_verdict( subtask_file_paths, crops, results ): for crop, subtask in zip(crop_data['results'], subtask_file_paths): crop_path = os.path.join(OUTPUT_DIR, crop) - results_path = compare_crop_window(crop_path, + results_path = calculate_metrics(crop_path, subtask, left, top, - output_filename_path=os.path.join(OUTPUT_DIR, crop_data['crop']['outfilebasename'] + "metrics.txt")) + metrics_output_filename=os.path.join(OUTPUT_DIR, crop_data['crop']['outfilebasename'] + "metrics.txt")) with open(results_path, 'r') as f: data = json.load(f) From 8fef6d32d35e8d072d722a96d83d944fe320ee1e Mon Sep 17 00:00:00 2001 From: "nieznany.sprawiciel" Date: Thu, 7 Feb 2019 15:10:31 +0100 Subject: [PATCH 5/7] Fix to pixel shifts --- .../scripts_verifier/img_metrics_calculator.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py b/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py index ec26a52802..d8f41d901f 100644 --- a/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py +++ b/apps/blender/resources/images/scripts_verifier/img_metrics_calculator.py @@ -1,3 +1,4 @@ +import itertools import os import sys from typing import Dict @@ -142,15 +143,11 @@ def convert_to_png_if_needed(img_path): def get_crops(rendered_scene, x, y, width, height): - crops = [] - offsets = [0, 1, -1] - for x_offset in offsets: - for y_offset in offsets: - crop = rendered_scene.crop((x + x_offset, - y + y_offset, - x + width - x_offset, - y + height - y_offset)) - crops.append(crop) + offsets = itertools.product([0, -1, 1], repeat=2) + crops = [rendered_scene.crop((x + x_offset, y + y_offset, + x + width + x_offset, + y + height + y_offset)) + for x_offset, y_offset in offsets] return crops From ae97391810c35ee80fbdc4d016275b4ef2412b5a Mon Sep 17 00:00:00 2001 From: Igor Adamski Date: Fri, 8 Feb 2019 16:10:47 +0100 Subject: [PATCH 6/7] Run docker tests on linux only --- tests/golem/verificator/test_blenderverifier.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/golem/verificator/test_blenderverifier.py b/tests/golem/verificator/test_blenderverifier.py index c14b1888d6..48a428a8a9 100644 --- a/tests/golem/verificator/test_blenderverifier.py +++ b/tests/golem/verificator/test_blenderverifier.py @@ -3,7 +3,7 @@ import pytest from unittest import mock -from golem.core.common import get_golem_path +from golem.core.common import get_golem_path, is_linux from golem.core.deferred import sync_wait from golem.docker.image import DockerImage from golem.docker.manager import DockerManager @@ -14,8 +14,11 @@ @pytest.mark.slow +@pytest.mark.skipif( + not is_linux(), + reason='Docker is only available on Linux buildbots') class TestBlenderVerifier(TempDirFixture): - TIMEOUT = 60 + TIMEOUT = 120 def setUp(self): # pylint: disable=R0915 From 86e72e245ee3b3c553ea91de28224f892e3de692 Mon Sep 17 00:00:00 2001 From: Igor Adamski Date: Fri, 15 Feb 2019 15:06:57 +0100 Subject: [PATCH 7/7] freeze scikit-learn --- apps/blender/resources/images/scripts_verifier/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/blender/resources/images/scripts_verifier/requirements.txt b/apps/blender/resources/images/scripts_verifier/requirements.txt index 10d8498e88..e97f6cc23d 100644 --- a/apps/blender/resources/images/scripts_verifier/requirements.txt +++ b/apps/blender/resources/images/scripts_verifier/requirements.txt @@ -1,4 +1,5 @@ --extra-index-url https://builds.golem.network +scikit-learn==0.19.1 OpenEXR scipy six==1.5