From 631fd75cc0d71ba46f1e022ed04c5db0d15aa49a Mon Sep 17 00:00:00 2001 From: bhavinjp Date: Tue, 18 Feb 2025 16:40:46 -0600 Subject: [PATCH] Added Content Provenance notebook --- .../00-NovaCanvas-prerequisites.ipynb | 2 +- .../NovaCanvas/01-text-to-image.ipynb | 3 +- .../NovaCanvas/08-content-provenance.ipynb | 345 ++++++++++++++++++ .../workshop-sample/NovaCanvas/utils.py | 316 +++++++++++++++- 4 files changed, 663 insertions(+), 3 deletions(-) create mode 100644 multimodal-generation/workshop-sample/NovaCanvas/08-content-provenance.ipynb diff --git a/multimodal-generation/workshop-sample/NovaCanvas/00-NovaCanvas-prerequisites.ipynb b/multimodal-generation/workshop-sample/NovaCanvas/00-NovaCanvas-prerequisites.ipynb index 019c6067..ff1f9243 100644 --- a/multimodal-generation/workshop-sample/NovaCanvas/00-NovaCanvas-prerequisites.ipynb +++ b/multimodal-generation/workshop-sample/NovaCanvas/00-NovaCanvas-prerequisites.ipynb @@ -22,7 +22,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install boto3 numpy pillow matplotlib --quiet" + "%pip install boto3 numpy pillow matplotlib c2pa-python --quiet" ] }, { diff --git a/multimodal-generation/workshop-sample/NovaCanvas/01-text-to-image.ipynb b/multimodal-generation/workshop-sample/NovaCanvas/01-text-to-image.ipynb index cc5dc476..208c87a5 100644 --- a/multimodal-generation/workshop-sample/NovaCanvas/01-text-to-image.ipynb +++ b/multimodal-generation/workshop-sample/NovaCanvas/01-text-to-image.ipynb @@ -35,7 +35,8 @@ "4. Background Removal\n", "5. Image Variation\n", "6. Image Conditioning\n", - "7. Color Conditioning\n" + "7. Color Conditioning\n", + "8. Content Provenance\n" ] }, { diff --git a/multimodal-generation/workshop-sample/NovaCanvas/08-content-provenance.ipynb b/multimodal-generation/workshop-sample/NovaCanvas/08-content-provenance.ipynb new file mode 100644 index 00000000..ff715322 --- /dev/null +++ b/multimodal-generation/workshop-sample/NovaCanvas/08-content-provenance.ipynb @@ -0,0 +1,345 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "83632d5f", + "metadata": {}, + "source": [ + "## Introduction to Content Provenance\n", + "\n", + "Content Credentials (C2PA) is a built-in feature of Amazon Nova Canvas that enables verification and tracking of AI-generated images. This capability adds cryptographically verifiable metadata to every generated image, documenting its origin, creation process, and any subsequent modifications. The metadata includes details such as generation timestamp, model information, and AWS's digital signature, enabling both automated and manual verification of authenticity.\n", + "\n", + "### Use Case\n", + "\n", + "OctankFashion wants to ensure transparency with their customers about their use of AI in their marketing materials. They need a reliable way to:\n", + "1. Prove the authenticity of their AI-generated product images\n", + "2. Track modifications made through their design workflow\n", + "3. Allow customers to verify the authenticity of marketing materials\n", + "4. Create a verification process for their creative team to ensure compliance\n", + "\n", + "To demonstrate these capabilities, we'll explore how Content Credentials are:\n", + "- Automatically embedded during image generation\n", + "- Preserved and updated through editing operations\n", + "- Verified using both programmatic tools and user-friendly interfaces\n" + ] + }, + { + "cell_type": "markdown", + "id": "7eac1c0f", + "metadata": {}, + "source": [ + "
\n", + "Prerequisites: Please run the prerequisites 00-prerequisites.ipynb first before proceeding.\n", + "
\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80af40c9", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import json\n", + "import base64\n", + "import boto3\n", + "from botocore.config import Config\n", + "from PIL import Image\n", + "from utils import (\n", + " plot_images,\n", + " save_binary_image,\n", + " display_image_with_metadata,\n", + " verify_c2pa_metadata,\n", + " track_edit_provenance\n", + ")\n", + "\n", + "bedrock_runtime_client = boto3.client(\n", + " \"bedrock-runtime\",\n", + " region_name=\"us-east-1\",\n", + " config=Config(\n", + " read_timeout=5 * 60\n", + " ),\n", + ")\n", + "image_generation_model_id = \"amazon.nova-canvas-v1:0\"\n", + "output_dir = \"output\"" + ] + }, + { + "cell_type": "markdown", + "id": "cb61977f", + "metadata": { + "tags": [] + }, + "source": [ + "#### Example 1: Generating Images with Provenance\n", + "\n", + "Amazon Nova Canvas automatically embeds Content Credentials metadata in every generated image. This metadata provides a cryptographically verifiable record that includes:\n", + "\n", + "- **Manifest Identifier**: A unique UUID for the manifest\n", + "- **Generator Information**: Details about Nova Canvas and AWS Bedrock\n", + "- **Creation Time**: Precise timestamp of generation\n", + "- **Digital Signature**: AWS's cryptographic signature\n", + "- **AI Generation Markers**: Explicit indication of AI generation\n", + "\n", + "The C2PA manifest includes several key components:\n", + "- `claim_generator`: Identifies the software/system that created the image\n", + "- `claim_generator_info`: Detailed information about the generation process\n", + "- `assertions`: List of claims about the image, including creation time and AI attribution\n", + "- `signature_info`: Cryptographic signature details from AWS\n", + "\n", + "This metadata can be verified in two ways:\n", + "1. Through the [Content Credentials Verify](https://contentcredentials.org/verify) website\n", + "2. Programmatically using the [C2PA library](https://opensource.contentauthenticity.org/docs/c2pa-python/) as demonstrated below" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6e9e5747", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Generate a simple product image\n", + "text = \"A white t-shirt with a modern geometric pattern, professional product photography\"\n", + "\n", + "body = json.dumps({\n", + " \"taskType\": \"TEXT_IMAGE\",\n", + " \"textToImageParams\": {\"text\": text},\n", + " \"imageGenerationConfig\": {\n", + " \"numberOfImages\": 1,\n", + " \"width\": 1024,\n", + " \"height\": 1024,\n", + " \"cfgScale\": 6.5,\n", + " \"seed\": 42,\n", + " \"quality\": \"premium\"\n", + " }\n", + "})\n", + "\n", + "print(\"Generating image...\")\n", + "response = bedrock_runtime_client.invoke_model(\n", + " body=body,\n", + " modelId=image_generation_model_id,\n", + " accept=\"application/json\",\n", + " contentType=\"application/json\"\n", + ")\n", + "\n", + "response_body = json.loads(response.get(\"body\").read())\n", + "base64_image = response_body.get(\"images\")[0]\n", + "\n", + "# Save the image preserving C2PA metadata\n", + "image_path = f\"{output_dir}/08-provenance-original.png\"\n", + "save_binary_image(base64_image, image_path)\n", + "print(f\"\\nImage saved to: {image_path}\")\n", + "\n", + "# Display image and metadata using utils.plot_images\n", + "c2pa_info = display_image_with_metadata(image_path)" + ] + }, + { + "cell_type": "markdown", + "id": "19ecc82e", + "metadata": { + "tags": [] + }, + "source": [ + "### Example 2: Verifying Image Provenance\n", + "\n", + "Verification of Content Credentials is crucial for establishing trust in AI-generated content. This example demonstrates how to programmatically verify an image's authenticity using the C2PA Python library. The verification process includes:\n", + "\n", + "**Verification Components:**\n", + "- **Digital Signature Validation**: Verifies AWS's cryptographic signature\n", + "- **Manifest Integrity**: Ensures the manifest hasn't been tampered with\n", + "- **Creation Information**: Validates timestamp and generator details\n", + "- **AI Attribution**: Confirms AI generation markers\n", + "\n", + "**Key Metadata Fields:**\n", + "- `signature_info`: Contains the AWS digital signature and certificate information\n", + "- `actions`: Documents creation time and AI generation markers\n", + "- `claim_generator_info`: Provides details about the generation software\n", + "\n", + "The verification can be integrated into automated workflows where programmatic verification is needed, while the Content Credentials Verify website provides a user-friendly interface for manual verification.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e2ea365c", + "metadata": {}, + "outputs": [], + "source": [ + "# Verify an image with C2PA metadata\n", + "print(\"Verifying image provenance...\")\n", + "verification_result = verify_c2pa_metadata(f\"{output_dir}/08-provenance-original.png\")\n", + "\n", + "if verification_result['verified']:\n", + " print(\"\\nVerification Status: ✓ Image verified\")\n", + " print(\"\\nSource Information:\")\n", + " print(f\"Generator: {verification_result['source']['generator']}\")\n", + " print(\"\\nGenerator Details:\")\n", + " for info in verification_result['source']['generator_info']:\n", + " print(f\"- {info['name']}\" + (f\" v{info['version']}\" if 'version' in info else \"\"))\n", + "\n", + " print(\"\\nCreation Information:\")\n", + " print(f\"Action: {verification_result['creation']['action']}\")\n", + " print(f\"Time: {verification_result['creation']['time']}\")\n", + " print(f\"AI Generated: {'Yes' if verification_result['creation']['ai_generated'] else 'No'}\")\n", + "\n", + " print(\"\\nSignature Information:\")\n", + " sig_info = verification_result['signature']\n", + " print(f\"Issuer: {sig_info['issuer']}\")\n", + " print(f\"Algorithm: {sig_info['alg']}\")\n", + " print(f\"Time: {sig_info['time']}\")\n", + " print(f\"Certificate SN: {sig_info['cert_serial_number']}\")\n", + "\n", + " # Display the verified image\n", + " img = Image.open(f\"{output_dir}/08-provenance-original.png\")\n", + " plot_images([img], processed_title=\"Verified Image with C2PA Metadata\")\n", + "else:\n", + " print(\"\\nVerification Status: ✗ Image not verified\")\n", + " print(f\"Reason: {verification_result['reason']}\")" + ] + }, + { + "cell_type": "markdown", + "id": "80c0589e", + "metadata": { + "tags": [] + }, + "source": [ + "### Example 3: Maintaining Provenance Through Edits\n", + "\n", + "When editing images using Nova Canvas features like inpainting or outpainting, the Content Credentials are updated to reflect these modifications. Each edit operation generates a new manifest with updated metadata that documents the change while maintaining cryptographic verifiability.\n", + "\n", + "**Key Changes in Edit Manifests:**\n", + "- **New Manifest ID**: A unique UUID for the edited version\n", + "- **Updated Action**: Changes from `c2pa.created` to `c2pa.edited`\n", + "- **New Timestamp**: Records when the edit occurred\n", + "- **Preserved Generator Info**: Maintains the link to Nova Canvas as the editing tool\n", + "- **Updated Signature**: New cryptographic signature validating the edit\n", + "\n", + "**Key Fields for Edit Tracking:**\n", + "- `active_manifest`: Points to the most recent manifest\n", + "- `claim_generator`: Identifies Nova Canvas as the editing tool\n", + "- `actions`: Documents the edit operation with:\n", + " - Type: `c2pa.edited`\n", + " - Timestamp: When the edit occurred\n", + " - Software agent: Nova Canvas\n", + " - Digital source type: Confirms AI-based modification\n", + "\n", + "This example demonstrates how Nova Canvas maintains verifiable provenance through editing operations by creating new signed manifests that document each modification." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ad3dc304-cb45-4f11-bb81-0b7945608410", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# First, let's read our original image\n", + "with open(f\"{output_dir}/08-provenance-original.png\", \"rb\") as image_file:\n", + " original_image = base64.b64encode(image_file.read()).decode('utf8')\n", + "\n", + "# Perform an inpainting operation to add a logo\n", + "body = json.dumps({\n", + " \"taskType\": \"INPAINTING\",\n", + " \"inPaintingParams\": {\n", + " \"text\": \"Add a small company logo to the t-shirt\",\n", + " \"image\": original_image,\n", + " \"maskPrompt\": \"center of the t-shirt\",\n", + " },\n", + " \"imageGenerationConfig\": {\n", + " \"numberOfImages\": 1,\n", + " \"cfgScale\": 6.5,\n", + " \"seed\": 42,\n", + " \"quality\": \"premium\"\n", + " }\n", + "})\n", + "\n", + "print(\"Performing inpainting...\")\n", + "response = bedrock_runtime_client.invoke_model(\n", + " body=body,\n", + " modelId=image_generation_model_id,\n", + " accept=\"application/json\",\n", + " contentType=\"application/json\"\n", + ")\n", + "\n", + "response_body = json.loads(response.get(\"body\").read())\n", + "base64_image = response_body.get(\"images\")[0]\n", + "\n", + "# Save the edited image preserving C2PA metadata\n", + "edited_path = f\"{output_dir}/08-provenance-edited.png\"\n", + "save_binary_image(base64_image, edited_path)\n", + "print(f\"\\nEdited image saved to: {edited_path}\")\n", + "\n", + "# Compare provenance before and after editing\n", + "print(\"\\nComparing manifests before and after editing...\")\n", + "comparison = track_edit_provenance(\n", + " f\"{output_dir}/08-provenance-original.png\",\n", + " edited_path\n", + ")\n", + "\n", + "# Display key changes summary\n", + "print(\"\\nKey Changes Summary:\")\n", + "print(\"-\" * 50)\n", + "if 'error' not in comparison:\n", + " original_time = comparison['original']['creation_time']\n", + " edited_time = comparison['edited']['creation_time']\n", + " print(f\"Original Manifest ID: {comparison['original']['manifest_id']}\")\n", + " print(f\"Created: {original_time}\")\n", + " print(f\"\\nEdited Manifest ID: {comparison['edited']['manifest_id']}\")\n", + " print(f\"Modified: {edited_time}\")\n", + "else:\n", + " print(f\"Error: {comparison['error']}\")" + ] + }, + { + "cell_type": "markdown", + "id": "01fc6841", + "metadata": {}, + "source": [ + "## Take Away\n", + "\n", + "Content provenance is a crucial feature of Amazon Nova Canvas that enables transparency and trust in AI-generated images. Through this notebook, we've explored how to:\n", + "\n", + "1. Generate images with built-in provenance metadata\n", + "2. Verify the authenticity of Nova Canvas-generated images\n", + "3. Maintain provenance through editing operations\n", + "\n", + "This capability is particularly valuable for businesses like OctankFashion that want to be transparent about their use of AI in marketing materials while maintaining trust with their customers.\n", + "\n", + "Key points to remember:\n", + "- All Nova Canvas-generated images include provenance metadata by default\n", + "- Provenance information persists through supported editing operations\n", + "- Verification can be automated and integrated into existing workflows\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "conda_python3", + "language": "python", + "name": "conda_python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/multimodal-generation/workshop-sample/NovaCanvas/utils.py b/multimodal-generation/workshop-sample/NovaCanvas/utils.py index 3c182f94..c97246d0 100644 --- a/multimodal-generation/workshop-sample/NovaCanvas/utils.py +++ b/multimodal-generation/workshop-sample/NovaCanvas/utils.py @@ -1,9 +1,13 @@ import base64 import io - +import json import matplotlib.pyplot as plt import numpy as np +import textwrap from PIL import Image +from c2pa import Reader +from typing import Dict, Any, Optional, Tuple +from matplotlib.patches import Rectangle # Define function to save the output @@ -232,3 +236,313 @@ def plot_images_for_comparison( print(f"Prompt: {prompt}\n") plt.tight_layout() plt.show() + +def save_binary_image(base64_image: str, output_path: str) -> None: + """ + Save a base64 encoded image as binary file, preserving C2PA metadata. + + Args: + base64_image (str): Base64 encoded image data + output_path (str): Path where to save the image + """ + image_bytes = base64.b64decode(base64_image) + with open(output_path, 'wb') as f: + f.write(image_bytes) + +def extract_c2pa_metadata(image_path: str) -> Dict[str, Any]: + """ + Extract C2PA metadata from an image file. + + Args: + image_path (str): Path to the image file + + Returns: + dict: Dictionary containing manifest store and active manifest information + """ + try: + reader = Reader.from_file(image_path) + manifest_store = reader.json() + active_manifest = reader.get_active_manifest() + + return { + "manifest_store": json.loads(manifest_store), + "active_manifest": active_manifest + } + except Exception as e: + print(f"Error extracting C2PA metadata: {str(e)}") + + return {} + +def display_image_with_metadata(image_path: str, ref_image_path: Optional[str] = None) -> Dict[str, Any]: + """ + Display an image and its C2PA metadata using the utils.plot_images function. + + Args: + image_path (str): Path to the image file + ref_image_path (str, optional): Path to a reference image for comparison + + Returns: + dict: C2PA metadata information + """ + # Extract C2PA metadata + c2pa_info = extract_c2pa_metadata(image_path) + + print("\nC2PA Manifest Store:") + print(json.dumps(c2pa_info.get("manifest_store", {}), indent=2)) + + # Open the image using PIL + image = Image.open(image_path) + + # Use plot_images from utils to display + plot_images([image], ref_image_path=ref_image_path, + original_title="Reference Image" if ref_image_path else None, + processed_title="Generated Image with C2PA Metadata") + + return c2pa_info + +def get_manifest_info(manifest_store: Dict[str, Any]) -> Dict[str, Any]: + """ + Extract key information from a manifest store. + + Args: + manifest_store (dict): The C2PA manifest store + + Returns: + dict: Dictionary containing key manifest information + """ + info = {} + + try: + active_manifest_id = manifest_store.get("active_manifest") + if not active_manifest_id: + return info + + active_manifest = manifest_store["manifests"][active_manifest_id] + + info["manifest_id"] = active_manifest_id + info["generator"] = active_manifest.get("claim_generator") + info["generator_info"] = active_manifest.get("claim_generator_info") + info["signature"] = active_manifest.get("signature_info") + + # Extract creation info from assertions + for assertion in active_manifest.get("assertions", []): + if assertion["label"] == "c2pa.actions": + actions = assertion["data"].get("actions", []) + for action in actions: + if action["action"] in ["c2pa.created", "c2pa.edited"]: + info["action"] = action["action"] + info["timestamp"] = action["when"] + info["software_agent"] = action.get("softwareAgent") + info["is_ai_generated"] = ( + action.get("digitalSourceType") == + "http://cv.iptc.org/newscodes/digitalsourcetype/trainedAlgorithmicMedia" + ) + break + except Exception as e: + print(f"Error extracting manifest info: {str(e)}") + + return info + +def verify_c2pa_metadata(image_path: str) -> Dict[str, Any]: + """ + Verify C2PA metadata in an image and return verification results. + + Args: + image_path (str): Path to the image file + + Returns: + dict: Verification results including source, creation, and signature info + """ + try: + # Extract C2PA metadata + c2pa_info = extract_c2pa_metadata(image_path) + manifest_store = c2pa_info.get("manifest_store", {}) + + # Get the active manifest + active_manifest_id = manifest_store.get("active_manifest") + if not active_manifest_id: + return { + 'verified': False, + 'reason': 'No active manifest found' + } + + # Get manifest info + manifest_info = get_manifest_info(manifest_store) + + verification_result = { + 'verified': True, + 'source': { + 'generator': manifest_info.get('generator'), + 'generator_info': manifest_info.get('generator_info') + }, + 'creation': { + 'time': manifest_info.get('timestamp'), + 'ai_generated': manifest_info.get('is_ai_generated'), + 'action': manifest_info.get('action') + }, + 'signature': manifest_info.get('signature'), + 'manifest_id': manifest_info.get('manifest_id') + } + + return verification_result + + except Exception as e: + return { + 'verified': False, + 'reason': f'Error verifying image: {str(e)}' + } + +def track_edit_provenance(original_path: str, edited_path: str, figsize: Tuple[int, int] = (22, 14)) -> Dict[str, Any]: + """ + Compare Content Credentials before and after editing and visualize the changes. + + Args: + original_path (str): Path to the original image + edited_path (str): Path to the edited image + figsize (tuple): Figure size for the visualization + + Returns: + dict: Comparison information between original and edited manifests + """ + + try: + # Read manifests from both images + original_store = extract_c2pa_metadata(original_path)["manifest_store"] + edited_store = extract_c2pa_metadata(edited_path)["manifest_store"] + + # Create figure and axes with increased size for better readability + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize) + fig.patch.set_facecolor('white') + + # Helper function to format and identify lines to highlight + def process_json(json_obj: Dict[str, Any], is_edited: bool = False) -> Tuple[list, list]: + json_str = json.dumps(json_obj, indent=2) + # Wrap long lines for better readability + wrapped_lines = [] + for line in json_str.split('\n'): + if len(line) > 80: + # Preserve indentation for wrapped lines + indent = len(line) - len(line.lstrip()) + indent_str = ' ' * indent + wrapped = textwrap.wrap(line.lstrip(), width=80-indent, + subsequent_indent=indent_str + ' ') + wrapped_lines.extend(indent_str + l for l in wrapped) + else: + wrapped_lines.append(line) + + highlight_terms = [ + 'urn:uuid:', + '"action":', + '"when":', + '"time":', + '"instance_id":', + '"signature_info":' + ] + + highlight_indices = [] + for i, line in enumerate(wrapped_lines): + if is_edited: + if any(term in line for term in highlight_terms) or '"c2pa.edited"' in line: + highlight_indices.append(i) + else: + if any(term in line for term in highlight_terms) or '"c2pa.created"' in line: + highlight_indices.append(i) + + return wrapped_lines, highlight_indices + + # Process both manifests + original_lines, original_highlights = process_json(original_store) + edited_lines, edited_highlights = process_json(edited_store, is_edited=True) + + # Clear and set up axes + for ax, title in [(ax1, 'Original Manifest'), (ax2, 'Edited Manifest')]: + ax.clear() + ax.set_xticks([]) + ax.set_yticks([]) + ax.set_title(title, pad=20, fontsize=14, fontweight='bold') + + # Calculate text properties + font_size = 12 + line_height = 1.3 + text_left_margin = 0.05 + + # Display and highlight text for original manifest + for i, line in enumerate(original_lines): + y_pos = len(original_lines) - i - 1 + if i in original_highlights: + rect = Rectangle((-0.02, y_pos-0.2), 1.04, 1.0, + facecolor='lightblue', alpha=0.3, + transform=ax1.transData) + ax1.add_patch(rect) + ax1.text(text_left_margin, y_pos, line, + fontfamily='monospace', + fontsize=font_size, + ha='left', + va='center') + + # Display and highlight text for edited manifest + for i, line in enumerate(edited_lines): + y_pos = len(edited_lines) - i - 1 + if i in edited_highlights: + rect = Rectangle((-0.02, y_pos-0.2), 1.04, 1.0, + facecolor='yellow', alpha=0.3, + transform=ax2.transData) + ax2.add_patch(rect) + ax2.text(text_left_margin, y_pos, line, + fontfamily='monospace', + fontsize=font_size, + ha='left', + va='center') + + # Set axis limits with additional padding + max_lines = max(len(original_lines), len(edited_lines)) + for ax in [ax1, ax2]: + ax.set_ylim(-1, max_lines + 1) + ax.set_xlim(-0.1, 1.1) + for spine in ax.spines.values(): + spine.set_visible(False) + + # Adjust layout to prevent overlap + plt.tight_layout(pad=3.0) + plt.show() + + # Compare images visually + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 8)) + fig.suptitle('Original vs Edited Image', fontsize=14) + + # Display original image + img1 = plt.imread(original_path) + ax1.imshow(img1) + ax1.set_title('Original Image') + ax1.axis('off') + + # Display edited image + img2 = plt.imread(edited_path) + ax2.imshow(img2) + ax2.set_title('Edited Image') + ax2.axis('off') + + plt.tight_layout() + plt.show() + + # Get manifest information for both images + original_info = get_manifest_info(original_store) + edited_info = get_manifest_info(edited_store) + + return { + 'original': { + 'manifest_id': original_info['manifest_id'], + 'creation_time': original_info['timestamp'], + 'manifest': original_store + }, + 'edited': { + 'manifest_id': edited_info['manifest_id'], + 'creation_time': edited_info['timestamp'], + 'manifest': edited_store + } + } + + except Exception as e: + return { + 'error': f'Error comparing provenance: {str(e)}' + } \ No newline at end of file