From eeec23a46ea0699716ab256559a65bcd296a435b Mon Sep 17 00:00:00 2001 From: Mohse Morad Date: Thu, 7 Nov 2024 15:22:56 +0200 Subject: [PATCH] Support show-severity & show-cluster name in csv formatter --- robusta_krr/core/models/config.py | 5 +- robusta_krr/formatters/csv.py | 87 +++++++---- robusta_krr/main.py | 29 +++- tests/formatters/test_csv_formatter.py | 190 +++++++++++++++++++------ 4 files changed, 232 insertions(+), 79 deletions(-) diff --git a/robusta_krr/core/models/config.py b/robusta_krr/core/models/config.py index d7c92976..32241ed1 100644 --- a/robusta_krr/core/models/config.py +++ b/robusta_krr/core/models/config.py @@ -58,10 +58,11 @@ class Config(pd.BaseSettings): strategy: str log_to_stderr: bool width: Optional[int] = pd.Field(None, ge=1) + show_severity: bool = True # Output Settings file_output: Optional[str] = pd.Field(None) - file_output_dynamic = bool = pd.Field(False) + file_output_dynamic: bool = pd.Field(False) slack_output: Optional[str] = pd.Field(None) other_args: dict[str, Any] @@ -105,7 +106,7 @@ def validate_namespaces(cls, v: Union[list[str], Literal["*"]]) -> Union[list[st for val in v: if val.startswith("*"): raise ValueError("Namespace's values cannot start with an asterisk (*)") - + return [val.lower() for val in v] @pd.validator("resources", pre=True) diff --git a/robusta_krr/formatters/csv.py b/robusta_krr/formatters/csv.py index 812c35bd..a8106c45 100644 --- a/robusta_krr/formatters/csv.py +++ b/robusta_krr/formatters/csv.py @@ -1,14 +1,30 @@ -import itertools import csv -import logging import io +import itertools +import logging +from typing import Any from robusta_krr.core.abstract import formatters -from robusta_krr.core.models.allocations import RecommendationValue, format_recommendation_value, format_diff, NONE_LITERAL, NAN_LITERAL +from robusta_krr.core.models.allocations import NONE_LITERAL, format_diff, format_recommendation_value from robusta_krr.core.models.result import ResourceScan, ResourceType, Result logger = logging.getLogger("krr") + +NAMESPACE_HEADER = "Namespace" +NAME_HEADER = "Name" +PODS_HEADER = "Pods" +OLD_PODS_HEADER = "Old Pods" +TYPE_HEADER = "Type" +CONTAINER_HEADER = "Container" +CLUSTER_HEADER = "Cluster" +SEVERITY_HEADER = "Severity" + +RESOURCE_DIFF_HEADER = "{resource_name} Diff" +RESOURCE_REQUESTS_HEADER = "{resource_name} Requests" +RESOURCE_LIMITS_HEADER = "{resource_name} Limits" + + def _format_request_str(item: ResourceScan, resource: ResourceType, selector: str) -> str: allocated = getattr(item.object.allocations, selector)[resource] recommended = getattr(item.recommended, selector)[resource] @@ -20,12 +36,8 @@ def _format_request_str(item: ResourceScan, resource: ResourceType, selector: st if diff != "": diff = f"({diff}) " - return ( - diff - + format_recommendation_value(allocated) - + " -> " - + format_recommendation_value(recommended.value) - ) + return diff + format_recommendation_value(allocated) + " -> " + format_recommendation_value(recommended.value) + def _format_total_diff(item: ResourceScan, resource: ResourceType, pods_current: int) -> str: selector = "requests" @@ -34,43 +46,58 @@ def _format_total_diff(item: ResourceScan, resource: ResourceType, pods_current: return format_diff(allocated, recommended, selector, pods_current) + @formatters.register("csv") def csv_exporter(result: Result) -> str: # We need to order the resource columns so that they are in the format of Namespace,Name,Pods,Old Pods,Type,Container,CPU Diff,CPU Requests,CPU Limits,Memory Diff,Memory Requests,Memory Limits - resource_columns = [] + csv_columns = ["Namespace", "Name", "Pods", "Old Pods", "Type", "Container"] + + if result.config and result.config.show_cluster_name: + csv_columns.insert(0, "Cluster") + + if result.config and result.config.show_severity: + csv_columns.append("Severity") + for resource in ResourceType: - resource_columns.append(f"{resource.name} Diff") - resource_columns.append(f"{resource.name} Requests") - resource_columns.append(f"{resource.name} Limits") + csv_columns.append(RESOURCE_DIFF_HEADER.format(resource_name=resource.name)) + csv_columns.append(RESOURCE_REQUESTS_HEADER.format(resource_name=resource.name)) + csv_columns.append(RESOURCE_LIMITS_HEADER.format(resource_name=resource.name)) output = io.StringIO() - csv_writer = csv.writer(output) - csv_writer.writerow([ - "Namespace", "Name", "Pods", "Old Pods", "Type", "Container", - *resource_columns - ]) + csv_writer = csv.DictWriter(output, csv_columns, extrasaction="ignore") + csv_writer.writeheader() + # csv_writer.writerow(csv_columns) for _, group in itertools.groupby( enumerate(result.scans), key=lambda x: (x[1].object.cluster, x[1].object.namespace, x[1].object.name) ): group_items = list(group) - for j, (i, item) in enumerate(group_items): + for j, (_, item) in enumerate(group_items): full_info_row = j == 0 - row = [ - item.object.namespace if full_info_row else "", - item.object.name if full_info_row else "", - f"{item.object.current_pods_count}" if full_info_row else "", - f"{item.object.deleted_pods_count}" if full_info_row else "", - item.object.kind if full_info_row else "", - item.object.container, - ] + row: dict[str, Any] = { + NAMESPACE_HEADER: item.object.namespace if full_info_row else "", + NAME_HEADER: item.object.name if full_info_row else "", + PODS_HEADER: f"{item.object.current_pods_count}" if full_info_row else "", + OLD_PODS_HEADER: f"{item.object.deleted_pods_count}" if full_info_row else "", + TYPE_HEADER: item.object.kind if full_info_row else "", + CONTAINER_HEADER: item.object.container, + SEVERITY_HEADER: item.severity, + CLUSTER_HEADER: item.object.cluster, + } for resource in ResourceType: - row.append(_format_total_diff(item, resource, item.object.current_pods_count)) - row += [_format_request_str(item, resource, selector) for selector in ["requests", "limits"]] + row[RESOURCE_DIFF_HEADER.format(resource_name=resource.name)] = _format_total_diff( + item, resource, item.object.current_pods_count + ) + row[RESOURCE_REQUESTS_HEADER.format(resource_name=resource.name)] = _format_request_str( + item, resource, "requests" + ) + row[RESOURCE_LIMITS_HEADER.format(resource_name=resource.name)] = _format_request_str( + item, resource, "limits" + ) csv_writer.writerow(row) - + return output.getvalue() diff --git a/robusta_krr/main.py b/robusta_krr/main.py index dd9ee03b..9ac8666d 100644 --- a/robusta_krr/main.py +++ b/robusta_krr/main.py @@ -19,7 +19,12 @@ from robusta_krr.core.runner import Runner from robusta_krr.utils.version import get_version -app = typer.Typer(pretty_exceptions_show_locals=False, pretty_exceptions_short=True, no_args_is_help=True, help="IMPORTANT: Run `krr simple --help` to see all cli flags!") +app = typer.Typer( + pretty_exceptions_show_locals=False, + pretty_exceptions_short=True, + no_args_is_help=True, + help="IMPORTANT: Run `krr simple --help` to see all cli flags!", +) # NOTE: Disable insecure request warnings, as it might be expected to use self-signed certificates inside the cluster urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @@ -216,7 +221,16 @@ def run_strategy( rich_help_panel="Logging Settings", ), show_cluster_name: bool = typer.Option( - False, "--show-cluster-name", help="In table output, always show the cluster name even for a single cluster", rich_help_panel="Output Settings" + False, + "--show-cluster-name", + help="In table output, always show the cluster name even for a single cluster", + rich_help_panel="Output Settings", + ), + show_severity: bool = typer.Option( + True, + "--show-severity", + help="Whether to include the severity in the output or not", + rich_help_panel="Output Settings", ), verbose: bool = typer.Option( False, "--verbose", "-v", help="Enable verbose mode", rich_help_panel="Logging Settings" @@ -234,10 +248,16 @@ def run_strategy( rich_help_panel="Logging Settings", ), file_output: Optional[str] = typer.Option( - None, "--fileoutput", help="Filename to write output to (if not specified, file output is disabled)", rich_help_panel="Output Settings" + None, + "--fileoutput", + help="Filename to write output to (if not specified, file output is disabled)", + rich_help_panel="Output Settings", ), file_output_dynamic: bool = typer.Option( - False, "--fileoutput-dynamic", help="Ignore --fileoutput and write files to the current directory in the format krr-{datetime}.{format} (e.g. krr-20240518223924.csv)", rich_help_panel="Output Settings" + False, + "--fileoutput-dynamic", + help="Ignore --fileoutput and write files to the current directory in the format krr-{datetime}.{format} (e.g. krr-20240518223924.csv)", + rich_help_panel="Output Settings", ), slack_output: Optional[str] = typer.Option( None, @@ -284,6 +304,7 @@ def run_strategy( file_output=file_output, file_output_dynamic=file_output_dynamic, slack_output=slack_output, + show_severity=show_severity, strategy=_strategy_name, other_args=strategy_args, ) diff --git a/tests/formatters/test_csv_formatter.py b/tests/formatters/test_csv_formatter.py index bd8b48f9..43841d0c 100644 --- a/tests/formatters/test_csv_formatter.py +++ b/tests/formatters/test_csv_formatter.py @@ -1,9 +1,12 @@ +import csv +import io import json from typing import Any + +import pytest + from robusta_krr.core.models.result import Result from robusta_krr.formatters.csv import csv_exporter -import io -import csv RESULT = """ { @@ -32,8 +35,8 @@ "kind": "Deployment", "allocations": { "requests": { - "cpu": 1.0, - "memory": 1.0 + "cpu": "50m", + "memory": "2048Mi" }, "limits": { "cpu": 2.0, @@ -46,12 +49,12 @@ "recommended": { "requests": { "cpu": { - "value": "?", + "value": 0.0065, "severity": "UNKNOWN" }, "memory": { - "value": "?", - "severity": "UNKNOWN" + "value": 0.5, + "severity": "CRITICAL" } }, "limits": { @@ -60,8 +63,8 @@ "severity": "UNKNOWN" }, "memory": { - "value": "?", - "severity": "UNKNOWN" + "value": 0.5, + "severity": "CRITICAL" } }, "info": { @@ -69,7 +72,7 @@ "memory": "Not enough data" } }, - "severity": "UNKNOWN" + "severity": "CRITICAL" } ], "score": 100, @@ -138,48 +141,149 @@ "oom_memory_buffer_percentage": "25" }, "inside_cluster": false, - "file_output_dynamic": false, - "bool": false + "file_output_dynamic": false } } """ -def test_csv_headers() -> None: +def _load_result(override_config: dict[str, Any]) -> Result: res_data = json.loads(RESULT) + res_data["config"].update(override_config) result = Result(**res_data) - x = csv_exporter(result) - reader = csv.DictReader(io.StringIO(x)) + return result + + +@pytest.mark.parametrize( + "override_config, expected_headers", + [ + ( + {}, + [ + "Namespace", + "Name", + "Pods", + "Old Pods", + "Type", + "Container", + "Severity", + "CPU Diff", + "CPU Requests", + "CPU Limits", + "Memory Diff", + "Memory Requests", + "Memory Limits", + ], + ), + ( + {"show_severity": False}, + [ + "Namespace", + "Name", + "Pods", + "Old Pods", + "Type", + "Container", + "CPU Diff", + "CPU Requests", + "CPU Limits", + "Memory Diff", + "Memory Requests", + "Memory Limits", + ], + ), + ( + {"show_cluster_name": True}, + [ + "Cluster", + "Namespace", + "Name", + "Pods", + "Old Pods", + "Type", + "Container", + "Severity", + "CPU Diff", + "CPU Requests", + "CPU Limits", + "Memory Diff", + "Memory Requests", + "Memory Limits", + ], + ), + ], +) +def test_csv_headers(override_config: dict[str, Any], expected_headers: list[str]) -> None: + result = _load_result(override_config=override_config) + output = csv_exporter(result) + reader = csv.DictReader(io.StringIO(output)) - expected_headers: list[str] = [ - "Namespace", - "Name", - "Pods", - "Old Pods", - "Type", - "Container", - "CPU Diff", - "CPU Requests", - "CPU Limits", - "Memory Diff", - "Memory Requests", - "Memory Limits", - ] assert reader.fieldnames == expected_headers - expected_first_row: dict[str, str] = { - "Namespace": "default", - "Name": "mock-object-1", - "Pods": "2", - "Old Pods": "1", - "Type": "Deployment", - "Container": "mock-container-1", - "CPU Diff": "", - "CPU Requests": "1.0 -> ?", - "CPU Limits": "2.0 -> ?", - "Memory Diff": "", - "Memory Requests": "1.0 -> ?", - "Memory Limits": "2.0 -> ?", - } + +@pytest.mark.parametrize( + "override_config, expected_first_row", + [ + ( + {}, + { + "Namespace": "default", + "Name": "mock-object-1", + "Pods": "2", + "Old Pods": "1", + "Type": "Deployment", + "Container": "mock-container-1", + 'Severity': 'CRITICAL', + "CPU Diff": "-87m", + "CPU Requests": "(-43m) 50m -> 6m", + "CPU Limits": "2.0 -> ?", + "Memory Diff": "-4096Mi", + "Memory Requests": "(-2048Mi) 2048Mi -> 500m", + "Memory Limits": "2.0 -> 500m", + }, + ), + ( + {"show_severity": False}, + { + "Namespace": "default", + "Name": "mock-object-1", + "Pods": "2", + "Old Pods": "1", + "Type": "Deployment", + "Container": "mock-container-1", + "CPU Diff": "-87m", + "CPU Requests": "(-43m) 50m -> 6m", + "CPU Limits": "2.0 -> ?", + "Memory Diff": "-4096Mi", + "Memory Requests": "(-2048Mi) 2048Mi -> 500m", + "Memory Limits": "2.0 -> 500m", + }, + ), + ( + {"show_cluster_name": True}, + { + "Cluster": "mock-cluster", + "Namespace": "default", + "Name": "mock-object-1", + "Pods": "2", + "Old Pods": "1", + "Type": "Deployment", + "Container": "mock-container-1", + 'Severity': 'CRITICAL', + "CPU Diff": "-87m", + "CPU Requests": "(-43m) 50m -> 6m", + "CPU Limits": "2.0 -> ?", + "Memory Diff": "-4096Mi", + "Memory Requests": "(-2048Mi) 2048Mi -> 500m", + "Memory Limits": "2.0 -> 500m", + }, + ), + ], +) +def test_csv_row_value(override_config: dict[str, Any], expected_first_row: list[str]) -> None: + result = _load_result(override_config=override_config) + output = csv_exporter(result) + reader = csv.DictReader(io.StringIO(output)) + first_row: dict[str, Any] = next(reader) assert first_row == expected_first_row