From 2289a737143005937dddf2292cc567764ef5c574 Mon Sep 17 00:00:00 2001 From: Anne Haley Date: Mon, 2 Mar 2026 13:07:35 -0500 Subject: [PATCH 1/2] fix: update error logging for analytics tasks --- .../tasks/analytics/create_road_network.py | 24 +- .../tasks/analytics/flood_network_failure.py | 204 ++++++++------- .../core/tasks/analytics/flood_simulation.py | 39 +-- .../tasks/analytics/geoai_segmentation.py | 205 ++++++++------- .../core/tasks/analytics/network_recovery.py | 246 +++++++++--------- 5 files changed, 376 insertions(+), 342 deletions(-) diff --git a/uvdat/core/tasks/analytics/create_road_network.py b/uvdat/core/tasks/analytics/create_road_network.py index d194f969..ad4dbeb9 100644 --- a/uvdat/core/tasks/analytics/create_road_network.py +++ b/uvdat/core/tasks/analytics/create_road_network.py @@ -62,17 +62,25 @@ def metadata_for_row(row): @shared_task def create_road_network(result_id): + import logging + import osmnx from xdg_base_dirs import xdg_cache_home + logger = logging.getLogger(__name__) osmnx.settings.cache_folder = xdg_cache_home() / "osmnx" - result = TaskResult.objects.get(id=result_id) - try: - location = result.inputs.get("location") - if location is None: - raise ValueError("location not provided") + # Input validation + location = result.inputs.get("location") + if location is None: + result.write_error("Location not provided") + + if result.error: + result.complete() + return + + try: result.write_status("Fetching road data via OSMnx...") roads = osmnx.graph_from_place(location, network_type="drive") road_nodes, road_edges = osmnx.graph_to_gdfs(roads) @@ -149,6 +157,8 @@ def create_road_network(result_id): vector_data.get_summary() result.outputs = {"roads": dataset.id} - except Exception as e: - result.error = str(e) + + except Exception: + logger.exception() + result.error = "An error occurred during this task. See logs for details." result.complete() diff --git a/uvdat/core/tasks/analytics/flood_network_failure.py b/uvdat/core/tasks/analytics/flood_network_failure.py index 478c0296..d437f69d 100644 --- a/uvdat/core/tasks/analytics/flood_network_failure.py +++ b/uvdat/core/tasks/analytics/flood_network_failure.py @@ -57,111 +57,115 @@ def run_task(self, *, project, **inputs): @shared_task def flood_network_failure(result_id): + import logging + + logger = logging.getLogger(__name__) result = TaskResult.objects.get(id=result_id) + # Input validation + network = None + network_id = result.inputs.get("network") + if network_id is None: + result.write_error("Network not provided") + else: + try: + network = Network.objects.get(id=network_id) + except Network.DoesNotExist: + result.write_error("Network not found") + + flood_sim = None + flood_sim_id = result.inputs.get("flood_simulation") + if flood_sim_id is None: + result.write_error("Flood simulation not provided") + else: + try: + flood_sim = TaskResult.objects.get(id=flood_sim_id) + except TaskResult.DoesNotExist: + result.write_error("Flood simulation not found") + + tolerance = result.inputs.get("depth_tolerance_meters") + if tolerance is None: + result.write_error("Depth tolerance not provided") + else: + try: + tolerance = float(tolerance) + except ValueError: + result.write_error("Depth tolerance not valid") + if tolerance <= 0: + result.write_error("Depth tolerance must be greater than 0") + + radius_meters = result.inputs.get("station_radius_meters") + if radius_meters is None: + result.write_error("Station radius not provided") + else: + try: + radius_meters = float(radius_meters) + except ValueError: + result.write_error("Station radius not valid") + if radius_meters < 10: + # data is at 10 meter resolution + result.write_error("Station radius must be greater than 10") + + if result.error: + result.complete() + return + try: - # Verify inputs - network = None - network_id = result.inputs.get("network") - if network_id is None: - result.write_error("Network not provided") - else: - try: - network = Network.objects.get(id=network_id) - except Network.DoesNotExist: - result.write_error("Network not found") - - flood_sim = None - flood_sim_id = result.inputs.get("flood_simulation") - if flood_sim_id is None: - result.write_error("Flood simulation not provided") - else: - try: - flood_sim = TaskResult.objects.get(id=flood_sim_id) - except TaskResult.DoesNotExist: - result.write_error("Flood simulation not found") - - tolerance = result.inputs.get("depth_tolerance_meters") - if tolerance is None: - result.write_error("Depth tolerance not provided") - else: - try: - tolerance = float(tolerance) - except ValueError: - result.write_error("Depth tolerance not valid") - if tolerance <= 0: - result.write_error("Depth tolerance must be greater than 0") - - radius_meters = result.inputs.get("station_radius_meters") - if radius_meters is None: - result.write_error("Station radius not provided") - else: - try: - radius_meters = float(radius_meters) - except ValueError: - result.write_error("Station radius not valid") - if radius_meters < 10: - # data is at 10 meter resolution - result.write_error("Station radius must be greater than 10") - - # Run task - if not result.error: - # Update name - result.name = ( - f"Failures for Network {network.id} with Flood Result {flood_sim.id}, " - f"{tolerance} Tolerance, {radius_meters} Radius" + # Update name + result.name = ( + f"Failures for Network {network.id} with Flood Result {flood_sim.id}, " + f"{tolerance} Tolerance, {radius_meters} Radius" + ) + result.save() + + n_nodes = network.nodes.count() + flood_dataset_id = flood_sim.outputs.get("flood") + flood_layer = Layer.objects.get(dataset__id=flood_dataset_id) + + # this uses radius_meters to get a rectangular region, not a circular one + def get_station_region(point): + earth_radius_meters = 6378000 + lat_delta = (radius_meters / earth_radius_meters) * (180 / math.pi) + lon_delta = ( + (radius_meters / earth_radius_meters) + * (180 / math.pi) + / math.cos(point.y * math.pi / 180) ) - result.save() - - n_nodes = network.nodes.count() - flood_dataset_id = flood_sim.outputs.get("flood") - flood_layer = Layer.objects.get(dataset__id=flood_dataset_id) - - # this uses radius_meters to get a rectangular region, not a circular one - def get_station_region(point): - earth_radius_meters = 6378000 - lat_delta = (radius_meters / earth_radius_meters) * (180 / math.pi) - lon_delta = ( - (radius_meters / earth_radius_meters) - * (180 / math.pi) - / math.cos(point.y * math.pi / 180) - ) - return { - "top": point.y + lat_delta, - "bottom": point.y - lat_delta, - "left": point.x - lon_delta, - "right": point.x + lon_delta, - "units": "EPSG:4326", - } - - # Precompute node regions - node_regions = { - node.id: get_station_region(node.location) for node in network.nodes.all() + return { + "top": point.y + lat_delta, + "bottom": point.y - lat_delta, + "left": point.x - lon_delta, + "right": point.x + lon_delta, + "units": "EPSG:4326", } - # Assume that all frames in flood_layer refer to frames of the same RasterData - raster = flood_layer.frames.first().raster - raster_path = utilities.field_file_to_local_path(raster.cloud_optimized_geotiff) - source = tilesource.get_tilesource_from_path(raster_path) - metadata = source.getMetadata() - - animation_results = {} - node_failures = [] - for frame in metadata.get("frames", []): - frame_index = frame.get("Index") - result.write_status( - f"Evaluating flood levels at {n_nodes} nodes for frame {frame_index}..." + # Precompute node regions + node_regions = {node.id: get_station_region(node.location) for node in network.nodes.all()} + + # Assume that all frames in flood_layer refer to frames of the same RasterData + raster = flood_layer.frames.first().raster + raster_path = utilities.field_file_to_local_path(raster.cloud_optimized_geotiff) + source = tilesource.get_tilesource_from_path(raster_path) + metadata = source.getMetadata() + + animation_results = {} + node_failures = [] + for frame in metadata.get("frames", []): + frame_index = frame.get("Index") + result.write_status( + f"Evaluating flood levels at {n_nodes} nodes for frame {frame_index}..." + ) + for node_id, node_region in node_regions.items(): + region_data, _ = source.getRegion( + region=node_region, + frame=frame_index, + format="numpy", ) - for node_id, node_region in node_regions.items(): - region_data, _ = source.getRegion( - region=node_region, - frame=frame_index, - format="numpy", - ) - if node_id not in node_failures and np.any(np.where(region_data > tolerance)): - node_failures.append(node_id) - animation_results[frame_index] = node_failures.copy() - result.outputs = {"failures": animation_results} - except Exception as e: - result.error = str(e) + if node_id not in node_failures and np.any(np.where(region_data > tolerance)): + node_failures.append(node_id) + animation_results[frame_index] = node_failures.copy() + result.outputs = {"failures": animation_results} + except Exception: + logger.exception() + result.error = "An error occurred during this task. See logs for details." result.complete() diff --git a/uvdat/core/tasks/analytics/flood_simulation.py b/uvdat/core/tasks/analytics/flood_simulation.py index 9a22815f..76c0597a 100644 --- a/uvdat/core/tasks/analytics/flood_simulation.py +++ b/uvdat/core/tasks/analytics/flood_simulation.py @@ -60,25 +60,30 @@ def run_task(self, *, project, **inputs): @shared_task def flood_simulation(result_id): + import logging from uvdat_flood_sim import run_sim, write_multiframe_geotiff + logger = logging.getLogger(__name__) result = TaskResult.objects.get(id=result_id) - try: - for input_key in [ - "initial_conditions_id", - "time_period", - "hydrograph", - "potential_evapotranspiration_percentile", - "soil_moisture_percentile", - "ground_water_percentile", - "annual_probability", - ]: - if result.inputs.get(input_key) is None: - result.write_error(f"{input_key} not provided") - result.complete() - return + # Input validation + for input_key in [ + "initial_conditions_id", + "time_period", + "hydrograph", + "potential_evapotranspiration_percentile", + "soil_moisture_percentile", + "ground_water_percentile", + "annual_probability", + ]: + if result.inputs.get(input_key) is None: + result.write_error(f"{input_key} not provided") + + if result.error: + result.complete() + return + try: result.write_status("Interpreting input values") initial_conditions_id = result.inputs.get("initial_conditions_id") time_period = result.inputs.get("time_period") @@ -209,6 +214,8 @@ def flood_simulation(result_id): ) result.outputs = {"flood": dataset.id} - except Exception as e: - result.error = str(e) + + except Exception: + logger.exception() + result.error = "An error occurred during this task. See logs for details." result.complete() diff --git a/uvdat/core/tasks/analytics/geoai_segmentation.py b/uvdat/core/tasks/analytics/geoai_segmentation.py index 2bef8a60..9bb736c0 100644 --- a/uvdat/core/tasks/analytics/geoai_segmentation.py +++ b/uvdat/core/tasks/analytics/geoai_segmentation.py @@ -62,105 +62,112 @@ def run_task(self, *, project, **inputs): @shared_task def geoai_segmentation(result_id): + import logging + + logger = logging.getLogger(__name__) result = TaskResult.objects.get(id=result_id) + + # Input validation + imagery = None + imagery_id = result.inputs.get("aerial_imagery") + segmentation_prompt = result.inputs.get("segmentation_prompt") + tile_size = result.inputs.get("tile_size") + tile_overlap = result.inputs.get("tile_overlap") + threshold = result.inputs.get("threshold") + smoothing_sigma = result.inputs.get("smoothing_sigma") + if imagery_id is None: + result.write_error("Aerial imagery raster data not provided") + else: + try: + imagery = RasterData.objects.get(id=imagery_id) + except RasterData.DoesNotExist: + result.write_error("Aerial imagery raster data not found") + + if result.error: + result.complete() + return + try: - # Verify inputs - imagery = None - imagery_id = result.inputs.get("aerial_imagery") - segmentation_prompt = result.inputs.get("segmentation_prompt") - tile_size = result.inputs.get("tile_size") - tile_overlap = result.inputs.get("tile_overlap") - threshold = result.inputs.get("threshold") - smoothing_sigma = result.inputs.get("smoothing_sigma") - if imagery_id is None: - result.write_error("Aerial imagery raster data not provided") - else: - try: - imagery = RasterData.objects.get(id=imagery_id) - except RasterData.DoesNotExist: - result.write_error("Aerial imagery raster data not found") - - # Run task - if not result.error: - import geoai - - # Update name - result.name = f"Segmentation of {segmentation_prompt} in {imagery.name}" - result.save() - - result.write_status("Reading aerial imagery...") - imagery_path = utilities.field_file_to_local_path(imagery.cloud_optimized_geotiff) - segmentation_path = imagery_path.parent / "segmentation.tif" - mask_path = imagery_path.parent / f"{segmentation_prompt}_mask.tif" - - result.write_status("Loading GeoAI CLIPSegmentation model...") - segmenter = geoai.CLIPSegmentation(tile_size=tile_size, overlap=tile_overlap) - - result.write_status(f'Segmenting image with prompt "{segmentation_prompt}"...') - segmenter.segment_image( - imagery_path, - output_path=segmentation_path, - text_prompt=segmentation_prompt, - threshold=threshold, - smoothing_sigma=smoothing_sigma, - ) - - # Reformat data as binary mask - seg = large_image.open(segmentation_path) - sink = large_image.new() - region_size = 1000 - for iy in range(int(seg.sizeY / region_size)): - for ix in range(int(seg.sizeX / region_size)): - region = { - "top": iy * region_size, - "left": ix * region_size, - "bottom": (iy + 1) * region_size, - "right": (ix + 1) * region_size, - } - data, _ = seg.getRegion(region=region, format="numpy") - mask = (data[:, :, 0] > 0).astype(int) * 255 - sink.addTile(mask, x=region["left"], y=region["top"]) - - # Apply georeferencing to raster output - projection = "epsg:4326" - original = large_image.open(imagery_path) - source_bounds = original.getMetadata().get("sourceBounds") - crs_from = CRS(source_bounds.get("srs")) - crs_to = CRS(projection) - transformer = Transformer.from_crs(crs_from, crs_to) - p1 = transformer.transform(source_bounds["xmin"], source_bounds["ymax"]) - p2 = transformer.transform(source_bounds["xmax"], source_bounds["ymin"]) - gcps = [[p1[1], p1[0], 0, 0], [p2[1], p2[0], sink.sizeX, sink.sizeY]] - sink.projection = projection - sink.gcps = gcps - sink.write(mask_path) - - result.write_status("Saving results...") - dataset_name = f"Segmentation of {segmentation_prompt}" - existing_count = Dataset.objects.filter(name__contains=dataset_name).count() - if existing_count: - dataset_name += f" ({existing_count + 1})" - dataset = Dataset.objects.create( - name=dataset_name, - description="Segmentation generated by GeoAI from aerial imagery", - category="segmentation", - metadata={ - "creation_time": datetime.datetime.now(datetime.UTC).isoformat(), - "api": "https://opengeoai.org/geoai/?h=clipseg#geoai.geoai.CLIPSegmentation", - }, - ) - dataset.set_tags(["analytics", "segmentation", "imagery"]) - raster_file_item = FileItem.objects.create( - name=mask_path.name, - dataset=dataset, - file_type="tif", - file_size=mask_path.stat().st_size, - ) - with mask_path.open("rb") as f: - raster_file_item.file.save(mask_path, ContentFile(f.read())) - - dataset.spawn_conversion_task(asynchronous=False) - result.outputs = {"result": dataset.id} - except Exception as e: - result.error = str(e) + import geoai + + # Update name + result.name = f"Segmentation of {segmentation_prompt} in {imagery.name}" + result.save() + + result.write_status("Reading aerial imagery...") + imagery_path = utilities.field_file_to_local_path(imagery.cloud_optimized_geotiff) + segmentation_path = imagery_path.parent / "segmentation.tif" + mask_path = imagery_path.parent / f"{segmentation_prompt}_mask.tif" + + result.write_status("Loading GeoAI CLIPSegmentation model...") + segmenter = geoai.CLIPSegmentation(tile_size=tile_size, overlap=tile_overlap) + + result.write_status(f'Segmenting image with prompt "{segmentation_prompt}"...') + segmenter.segment_image( + imagery_path, + output_path=segmentation_path, + text_prompt=segmentation_prompt, + threshold=threshold, + smoothing_sigma=smoothing_sigma, + ) + + # Reformat data as binary mask + seg = large_image.open(segmentation_path) + sink = large_image.new() + region_size = 1000 + for iy in range(int(seg.sizeY / region_size)): + for ix in range(int(seg.sizeX / region_size)): + region = { + "top": iy * region_size, + "left": ix * region_size, + "bottom": (iy + 1) * region_size, + "right": (ix + 1) * region_size, + } + data, _ = seg.getRegion(region=region, format="numpy") + mask = (data[:, :, 0] > 0).astype(int) * 255 + sink.addTile(mask, x=region["left"], y=region["top"]) + + # Apply georeferencing to raster output + projection = "epsg:4326" + original = large_image.open(imagery_path) + source_bounds = original.getMetadata().get("sourceBounds") + crs_from = CRS(source_bounds.get("srs")) + crs_to = CRS(projection) + transformer = Transformer.from_crs(crs_from, crs_to) + p1 = transformer.transform(source_bounds["xmin"], source_bounds["ymax"]) + p2 = transformer.transform(source_bounds["xmax"], source_bounds["ymin"]) + gcps = [[p1[1], p1[0], 0, 0], [p2[1], p2[0], sink.sizeX, sink.sizeY]] + sink.projection = projection + sink.gcps = gcps + sink.write(mask_path) + + result.write_status("Saving results...") + dataset_name = f"Segmentation of {segmentation_prompt}" + existing_count = Dataset.objects.filter(name__contains=dataset_name).count() + if existing_count: + dataset_name += f" ({existing_count + 1})" + dataset = Dataset.objects.create( + name=dataset_name, + description="Segmentation generated by GeoAI from aerial imagery", + category="segmentation", + metadata={ + "creation_time": datetime.datetime.now(datetime.UTC).isoformat(), + "api": "https://opengeoai.org/geoai/?h=clipseg#geoai.geoai.CLIPSegmentation", + }, + ) + dataset.set_tags(["analytics", "segmentation", "imagery"]) + raster_file_item = FileItem.objects.create( + name=mask_path.name, + dataset=dataset, + file_type="tif", + file_size=mask_path.stat().st_size, + ) + with mask_path.open("rb") as f: + raster_file_item.file.save(mask_path, ContentFile(f.read())) + + dataset.spawn_conversion_task(asynchronous=False) + result.outputs = {"result": dataset.id} + except Exception: + logger.exception() + result.error = "An error occurred during this task. See logs for details." result.complete() diff --git a/uvdat/core/tasks/analytics/network_recovery.py b/uvdat/core/tasks/analytics/network_recovery.py index 925296ad..4fb4835c 100644 --- a/uvdat/core/tasks/analytics/network_recovery.py +++ b/uvdat/core/tasks/analytics/network_recovery.py @@ -128,128 +128,134 @@ def sort_graph_centrality(g, measure): @shared_task def network_recovery(result_id): + import logging + + logger = logging.getLogger(__name__) result = TaskResult.objects.get(id=result_id) - try: - # Verify inputs - failure = None - failure_id = result.inputs.get("network_failure") - if failure_id is None: - result.write_error("Network failure result not provided") + # Input validation + failure = None + failure_id = result.inputs.get("network_failure") + if failure_id is None: + result.write_error("Network failure result not provided") + else: + try: + failure = TaskResult.objects.get(id=failure_id) + except TaskResult.DoesNotExist: + result.write_error("Network failure result not found") + + mode = result.inputs.get("recovery_mode") + if mode is None: + result.write_error("Recovery mode not provided") + elif mode not in RECOVERY_MODES: + result.write_error("Recovery mode not a valid option") + + if failure is not None: + network_id = failure.inputs.get("network") + if network_id is None: + result.write_error("Network not provided") else: try: - failure = TaskResult.objects.get(id=failure_id) - except TaskResult.DoesNotExist: - result.write_error("Network failure result not found") - - mode = result.inputs.get("recovery_mode") - if mode is None: - result.write_error("Recovery mode not provided") - elif mode not in RECOVERY_MODES: - result.write_error("Recovery mode not a valid option") - - if failure is not None: - network_id = failure.inputs.get("network") - if network_id is None: - result.write_error("Network not provided") - else: - try: - network = Network.objects.get(id=network_id) - except Network.DoesNotExist: - result.write_error("Network not found") - - # Run task - if not result.error: - # Update name - result.name = f"{mode.title()} Recovery from Failure Result {failure.id}" - result.save() - - result.write_status("Reading network failure state...") - node_failures = failure.outputs.get("failures") - frames = sorted(int(key) for key in node_failures) - last_frame_failures = node_failures[str(frames[-1])] - node_recoveries = last_frame_failures.copy() - graph = get_network_graph(network) - - result.write_status("Sorting failed nodes according to recovery mode...") - if mode == "random": - random.shuffle(node_recoveries) - else: - nodes_sorted, _edge_list = sort_graph_centrality(graph, mode) - node_recoveries.sort(key=nodes_sorted.index) - - recovery_timesteps = { - i: [n for n in last_frame_failures if n not in node_recoveries[:i]] - for i in range(len(node_recoveries) + 1) - } - - result.write_status("Creating GCC chart...") - timesteps = [] - n_deactivated_values = [] - gcc_values = [] - - def get_gcc(deactivated): - remaining_graph = graph.copy() - remaining_graph.remove_nodes_from(deactivated) - components = list(nx.connected_components(remaining_graph)) - return max(components, key=len) - - for i, nodes in enumerate(node_failures.values()): - timesteps.append(i) - n_deactivated_values.append(len(nodes)) - gcc_values.append(len(get_gcc(nodes))) - for i, nodes in enumerate(recovery_timesteps.values()): - timesteps.append(len(node_failures) + i) - n_deactivated_values.append(len(nodes)) - gcc_values.append(len(get_gcc(nodes))) - - chart, _ = Chart.objects.get_or_create( - name=f"Network GCC Changes for {mode.title()} Recovery After {failure.name}", - description=( - "Number of nodes in the network's greatest connected component " - "over time during network outages and recoveries" - ), - project=result.project, - ) - chart.metadata = { - "source": "Generated by Network Recovery Analysis Task", - "created": timezone.now().strftime("%d/%m/%Y %H:%M"), - "node_failures": node_failures, - "node_recoveries": recovery_timesteps, - } - chart.chart_data = { - "labels": timesteps, - "datasets": [ - { - "data": n_deactivated_values, - "label": "Deactivated Nodes", - "borderColor": "#ff0000", - "backgroundColor": "#ff0000", - }, - { - "data": gcc_values, - "label": "Greatest Connected Component", - "borderColor": "#0000ff", - "backgroundColor": "#0000ff", - }, - ], - } - chart.chart_options = { - "chart_title": "Greatest Connected Component versus Deactivated Nodes Over Time", - "x_title": "Timestep in Network Event", - "y_title": "Number of nodes", - } - chart.save() - - # resiliency score equals area under gcc curve with outages - # over area under gcc curve without outages - resiliency = sum(gcc_values) / (network.nodes.count() * len(gcc_values)) - - result.outputs = { - "recoveries": recovery_timesteps, - "gcc_chart": chart.id, - "resiliency_score": resiliency, - } - except Exception as e: - result.error = str(e) + network = Network.objects.get(id=network_id) + except Network.DoesNotExist: + result.write_error("Network not found") + + if result.error: + result.complete() + return + + try: + # Update name + result.name = f"{mode.title()} Recovery from Failure Result {failure.id}" + result.save() + + result.write_status("Reading network failure state...") + node_failures = failure.outputs.get("failures") + frames = sorted(int(key) for key in node_failures) + last_frame_failures = node_failures[str(frames[-1])] + node_recoveries = last_frame_failures.copy() + graph = get_network_graph(network) + + result.write_status("Sorting failed nodes according to recovery mode...") + if mode == "random": + random.shuffle(node_recoveries) + else: + nodes_sorted, _edge_list = sort_graph_centrality(graph, mode) + node_recoveries.sort(key=nodes_sorted.index) + + recovery_timesteps = { + i: [n for n in last_frame_failures if n not in node_recoveries[:i]] + for i in range(len(node_recoveries) + 1) + } + + result.write_status("Creating GCC chart...") + timesteps = [] + n_deactivated_values = [] + gcc_values = [] + + def get_gcc(deactivated): + remaining_graph = graph.copy() + remaining_graph.remove_nodes_from(deactivated) + components = list(nx.connected_components(remaining_graph)) + return max(components, key=len) + + for i, nodes in enumerate(node_failures.values()): + timesteps.append(i) + n_deactivated_values.append(len(nodes)) + gcc_values.append(len(get_gcc(nodes))) + for i, nodes in enumerate(recovery_timesteps.values()): + timesteps.append(len(node_failures) + i) + n_deactivated_values.append(len(nodes)) + gcc_values.append(len(get_gcc(nodes))) + + chart, _ = Chart.objects.get_or_create( + name=f"Network GCC Changes for {mode.title()} Recovery After {failure.name}", + description=( + "Number of nodes in the network's greatest connected component " + "over time during network outages and recoveries" + ), + project=result.project, + ) + chart.metadata = { + "source": "Generated by Network Recovery Analysis Task", + "created": timezone.now().strftime("%d/%m/%Y %H:%M"), + "node_failures": node_failures, + "node_recoveries": recovery_timesteps, + } + chart.chart_data = { + "labels": timesteps, + "datasets": [ + { + "data": n_deactivated_values, + "label": "Deactivated Nodes", + "borderColor": "#ff0000", + "backgroundColor": "#ff0000", + }, + { + "data": gcc_values, + "label": "Greatest Connected Component", + "borderColor": "#0000ff", + "backgroundColor": "#0000ff", + }, + ], + } + chart.chart_options = { + "chart_title": "Greatest Connected Component versus Deactivated Nodes Over Time", + "x_title": "Timestep in Network Event", + "y_title": "Number of nodes", + } + chart.save() + + # resiliency score equals area under gcc curve with outages + # over area under gcc curve without outages + resiliency = sum(gcc_values) / (network.nodes.count() * len(gcc_values)) + + result.outputs = { + "recoveries": recovery_timesteps, + "gcc_chart": chart.id, + "resiliency_score": resiliency, + } + except Exception: + logger.exception() + result.error = "An error occurred during this task. See logs for details." result.complete() From db4962e097a70b9ed43a2c5222da01cbf8c9e8a3 Mon Sep 17 00:00:00 2001 From: Anne Haley Date: Mon, 2 Mar 2026 13:11:41 -0500 Subject: [PATCH 2/2] style: blank line between imports --- uvdat/core/tasks/analytics/flood_simulation.py | 1 + 1 file changed, 1 insertion(+) diff --git a/uvdat/core/tasks/analytics/flood_simulation.py b/uvdat/core/tasks/analytics/flood_simulation.py index 76c0597a..23f69caa 100644 --- a/uvdat/core/tasks/analytics/flood_simulation.py +++ b/uvdat/core/tasks/analytics/flood_simulation.py @@ -61,6 +61,7 @@ def run_task(self, *, project, **inputs): @shared_task def flood_simulation(result_id): import logging + from uvdat_flood_sim import run_sim, write_multiframe_geotiff logger = logging.getLogger(__name__)