diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 69e832126..d511f23dc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,8 +6,8 @@ repos: hooks: - id: shed # Python Linting - - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.2 + - repo: https://github.com/pycqa/flake8 + rev: 4.0.1 hooks: - id: flake8 additional_dependencies: diff --git a/cubedash/_model.py b/cubedash/_model.py index 23112320e..9ef06f220 100644 --- a/cubedash/_model.py +++ b/cubedash/_model.py @@ -295,7 +295,10 @@ def enable_prometheus(): ) metrics = GunicornInternalPrometheusMetrics(app, group_by="endpoint") - _LOG.info(f"Prometheus metrics enabled : {metrics}") + _LOG.info( + "Prometheus metrics enabled : {metrics}", + extra=dict(metrics=metrics) + ) @app.before_first_request diff --git a/cubedash/_utils.py b/cubedash/_utils.py index 351bfdb96..a32ae90c9 100644 --- a/cubedash/_utils.py +++ b/cubedash/_utils.py @@ -407,7 +407,7 @@ def dataset_created(dataset: Dataset) -> Optional[datetime]: try: return default_utc(dc_utils.parse_time(value)) except ValueError: - _LOG.warn("invalid_dataset.creation_dt", dataset_id=dataset.id, value=value) + _LOG.warning("invalid_dataset.creation_dt", dataset_id=dataset.id, value=value) return None @@ -818,12 +818,12 @@ def dataset_shape(ds: Dataset) -> Tuple[Optional[Polygon], bool]: return None, False if extent is None: - log.warn("invalid_dataset.empty_extent") + log.warning("invalid_dataset.empty_extent") return None, False geom = shape(extent.to_crs(CRS(_TARGET_CRS))) if not geom.is_valid: - log.warn( + log.warning( "invalid_dataset.invalid_extent", reason_text=shapely.validation.explain_validity(geom), ) @@ -837,7 +837,7 @@ def dataset_shape(ds: Dataset) -> Tuple[Optional[Polygon], bool]: return clean, False if geom.is_empty: - _LOG.warn("invalid_dataset.empty_extent_geom", dataset_id=ds.id) + _LOG.warning("invalid_dataset.empty_extent_geom", dataset_id=ds.id) return None, False return geom, True diff --git a/cubedash/generate.py b/cubedash/generate.py index 5654fffbc..a9a807f34 100755 --- a/cubedash/generate.py +++ b/cubedash/generate.py @@ -131,7 +131,7 @@ def print_status(product_name=None, year=None, month=None, day=None, summary=Non log.warning("product.unsupported", reason=e.reason) return product_name, GenerateResult.UNSUPPORTED, None except Exception: - log.exception("product.error", exc_info=True) + log.exception("product.error") return product_name, GenerateResult.ERROR, None finally: store.index.close() diff --git a/cubedash/summary/_model.py b/cubedash/summary/_model.py index a39dbfb48..4bf015ac0 100644 --- a/cubedash/summary/_model.py +++ b/cubedash/summary/_model.py @@ -273,7 +273,7 @@ def footprint_srid(self): epsg = self.footprint_crs.lower() if not epsg.startswith("epsg:"): - _LOG.warn("unsupported.to_srid", crs=self.footprint_crs) + _LOG.warning("unsupported.to_srid", crs=self.footprint_crs) return None return int(epsg.split(":")[1]) @@ -319,12 +319,12 @@ def _create_unified_footprint( # avoid non-noded intersection. # TODO: does shapely have a snap-to-grid? try: - _LOG.warn("summary.footprint.invalid_union", exc_info=True) + _LOG.warning("summary.footprint.invalid_union", exc_info=True) geometry_union = shapely.ops.unary_union( [p.footprint_geometry.buffer(0.001) for p in with_valid_geometries] ) except ValueError: - _LOG.warn("summary.footprint.invalid_buffered_union", exc_info=True) + _LOG.warning("summary.footprint.invalid_buffered_union", exc_info=True) # Attempt 3 at union: Recursive filter bad polygons first polygonlist = _polygon_chain(with_valid_geometries) diff --git a/cubedash/summary/_schema.py b/cubedash/summary/_schema.py index 9ec19684e..2bd6f5dd4 100644 --- a/cubedash/summary/_schema.py +++ b/cubedash/summary/_schema.py @@ -304,7 +304,7 @@ def update_schema(engine: Engine) -> Set[PleaseRefresh]: refresh = set() if not pg_column_exists(engine, f"{CUBEDASH_SCHEMA}.product", "fixed_metadata"): - _LOG.warn("schema.applying_update.add_fixed_metadata") + _LOG.warning("schema.applying_update.add_fixed_metadata") engine.execute( f""" alter table {CUBEDASH_SCHEMA}.product add column fixed_metadata jsonb @@ -316,20 +316,20 @@ def update_schema(engine: Engine) -> Set[PleaseRefresh]: engine, f"{CUBEDASH_SCHEMA}.{_COLLECTION_ITEMS_INDEX.name}", ): - _LOG.warn("schema.applying_update.add_collection_items_idx") + _LOG.warning("schema.applying_update.add_collection_items_idx") _COLLECTION_ITEMS_INDEX.create(engine) if not pg_exists( engine, f"{CUBEDASH_SCHEMA}.{_ALL_COLLECTIONS_ORDER_INDEX.name}", ): - _LOG.warn("schema.applying_update.add_all_collections_idx") + _LOG.warning("schema.applying_update.add_all_collections_idx") _ALL_COLLECTIONS_ORDER_INDEX.create(engine) if not pg_column_exists( engine, f"{CUBEDASH_SCHEMA}.time_overview", "product_refresh_time" ): - _LOG.warn("schema.applying_update.add_refresh_time") + _LOG.warning("schema.applying_update.add_refresh_time") engine.execute( f""" alter table {CUBEDASH_SCHEMA}.time_overview @@ -340,7 +340,7 @@ def update_schema(engine: Engine) -> Set[PleaseRefresh]: if not pg_column_exists( engine, f"{CUBEDASH_SCHEMA}.product", "last_successful_summary" ): - _LOG.warn("schema.applying_update.add_summary_success_time") + _LOG.warning("schema.applying_update.add_summary_success_time") engine.execute( f""" alter table {CUBEDASH_SCHEMA}.product @@ -363,7 +363,7 @@ def check_or_update_odc_schema(engine: Engine): try: # We can try to install it ourselves if we have permission, using ODC's code. if not pg_column_exists(engine, ODC_DATASET.fullname, "updated"): - _LOG.warn("schema.applying_update.add_odc_change_triggers") + _LOG.warning("schema.applying_update.add_odc_change_triggers") _utils.install_timestamp_trigger(engine) except ProgrammingError as e: # We don't have permission. @@ -389,14 +389,14 @@ def check_or_update_odc_schema(engine: Engine): if not pg_index_exists( engine, ODC_DATASET.schema, ODC_DATASET.name, "ix_dataset_added" ): - _LOG.warn("schema.applying_update.add_odc_added_index") + _LOG.warning("schema.applying_update.add_odc_added_index") statements.append( f"create index ix_dataset_added on {ODC_DATASET.fullname}(added desc);" ) if not pg_index_exists( engine, ODC_DATASET.schema, ODC_DATASET.name, "ix_dataset_type_changed" ): - _LOG.warn("schema.applying_update.add_odc_changed_index") + _LOG.warning("schema.applying_update.add_odc_changed_index") statements.append( f"create index ix_dataset_type_changed on " f"{ODC_DATASET.fullname}(dataset_type_ref, greatest(added, updated, archived) desc);" diff --git a/cubedash/summary/_stores.py b/cubedash/summary/_stores.py index 389bf0dc3..cc5040943 100644 --- a/cubedash/summary/_stores.py +++ b/cubedash/summary/_stores.py @@ -761,7 +761,8 @@ def _get_linked_products( ).fetchone() _LOG.info( - f"product.links.{kind}", + "product.links.{kind}", + extra=dict(kind=kind), product=product.name, linked=linked_product_names, sample_percentage=round(sample_percentage, 2), @@ -1468,7 +1469,7 @@ def refresh( # Then choose the whole time range of the product to generate. log.info("product.generate_whole_range") if force: - log.warn("forcing_refresh") + log.warning("forcing_refresh") # Regenerate the old months too, in case any have been deleted. old_months = self._already_summarised_months(product_name)