From 909dfad21dec204fd4bf8b115f8a7419cf4e0eb1 Mon Sep 17 00:00:00 2001 From: Andrea Nardi Date: Tue, 6 Feb 2024 09:32:17 +0100 Subject: [PATCH] my changes --- .../genetic_algorithm/fitness_evaluator.py | 3 +- .../classes/stages/InterCoreMappingStage.py | 34 ++++++++++++++----- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/stream/classes/opt/allocation/genetic_algorithm/fitness_evaluator.py b/stream/classes/opt/allocation/genetic_algorithm/fitness_evaluator.py index 327ca38a..a5125f61 100644 --- a/stream/classes/opt/allocation/genetic_algorithm/fitness_evaluator.py +++ b/stream/classes/opt/allocation/genetic_algorithm/fitness_evaluator.py @@ -16,13 +16,14 @@ def __init__( workload: Workload | None = None, accelerator: Accelerator | None = None, node_hw_performances: dict[ComputationNode, dict[Core, CostModelEvaluation]] | None = None, + original_workload: Workload | None = None ) -> None: self.workload = workload self.accelerator = accelerator self.node_hw_performances = node_hw_performances # self.num_cores = len(inputs.accelerator.cores) - def get_fitness(self): + def get_fitness(self, core_allocations: list, return_scme=False): raise NotImplementedError diff --git a/stream/classes/stages/InterCoreMappingStage.py b/stream/classes/stages/InterCoreMappingStage.py index a64b3754..4c0b62a9 100644 --- a/stream/classes/stages/InterCoreMappingStage.py +++ b/stream/classes/stages/InterCoreMappingStage.py @@ -14,13 +14,15 @@ GeneticAlgorithm, ) from stream.classes.opt.allocation.genetic_algorithm.fitness_evaluator import ( - StandardFitnessEvaluator, + StandardFitnessEvaluator, FitnessEvaluator ) from stream.utils import get_too_large_operands from zigzag.workload.Workload import Workload +from typing import Type, TypeVar logger = logging.getLogger(__name__) +TFitnessEvaluator = TypeVar("TFitnessEvaluator", bound=FitnessEvaluator) class InterCoreMappingStage(Stage): """ @@ -44,6 +46,7 @@ def __init__( plot_full_schedule: bool = False, plot_data_transfer: bool = False, operands_to_prefetch: list[LayerOperand], + custom_fitness_evaluator: Type[TFitnessEvaluator] | None = None, **kwargs, ): """Initialize the InterCoreMappingStage. @@ -68,6 +71,8 @@ def __init__( self.plot_data_transfer = plot_data_transfer self.operands_to_prefetch = operands_to_prefetch self.scheduling_order = kwargs.get("scheduling_order", None) + self.original_workload = kwargs["original_workload"] + self.custom_fitness_evaluator = custom_fitness_evaluator # Determine the set of all (layer, group) combinations to be allocated separately self.layer_groups: list[tuple[int, int]] = sorted(set((n.id, n.group) for n in self.workload.nodes())) @@ -102,14 +107,25 @@ def __init__( self.set_hw_performance_non_flexible_nodes() # Initialize the fitness evaluator of different core allocations - self.fitness_evaluator = StandardFitnessEvaluator( - self.workload, - self.accelerator, - self.node_hw_performances, - self.layer_groups_flexible, - self.operands_to_prefetch, - self.scheduling_order, - ) + if self.custom_fitness_evaluator is not None: + self.fitness_evaluator = self.custom_fitness_evaluator( + self.workload, + self.accelerator, + self.node_hw_performances, + self.layer_groups_flexible, + self.scheduling_order, + self.operands_to_prefetch, + self.original_workload, + ) + else: + self.fitness_evaluator = StandardFitnessEvaluator( + self.workload, + self.accelerator, + self.node_hw_performances, + self.layer_groups_flexible, + self.operands_to_prefetch, + self.scheduling_order, + ) # Extract the length of an individual. # This is the number of unique original nodes that have more than one possible core allocation