diff --git a/samples/scenarios/DocumentProcessingOnAKS/Client/Client.csproj b/samples/scenarios/DocumentProcessingOnAKS/Client/Client.csproj
new file mode 100644
index 0000000..6d355ca
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Client/Client.csproj
@@ -0,0 +1,16 @@
+
+
+
+ Exe
+ net10.0
+ enable
+ enable
+ b2f26a38-2066-4051-916a-b21af6b0f10e
+
+
+
+
+
+
+
+
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Client/Dockerfile b/samples/scenarios/DocumentProcessingOnAKS/Client/Dockerfile
new file mode 100644
index 0000000..426cf67
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Client/Dockerfile
@@ -0,0 +1,19 @@
+FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build
+WORKDIR /src
+
+# Copy csproj and restore dependencies
+COPY ["Client.csproj", "./"]
+RUN dotnet restore
+
+# Copy all files and build
+COPY . .
+RUN dotnet publish -c Release -o /app/publish
+
+# Build runtime image
+FROM mcr.microsoft.com/dotnet/runtime:10.0 AS final
+WORKDIR /app
+COPY --from=build /app/publish .
+
+EXPOSE 8080
+# Set the entrypoint
+ENTRYPOINT ["dotnet", "Client.dll"]
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Client/Models/DocumentInfo.cs b/samples/scenarios/DocumentProcessingOnAKS/Client/Models/DocumentInfo.cs
new file mode 100644
index 0000000..8f020ee
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Client/Models/DocumentInfo.cs
@@ -0,0 +1,7 @@
+namespace DurableTaskOnAKS.Client.Models;
+
+///
+/// A document submitted for processing.
+/// Shape must match the Worker's DocumentInfo for JSON round-tripping.
+///
+public record DocumentInfo(string Id, string Title, string Content);
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Client/Program.cs b/samples/scenarios/DocumentProcessingOnAKS/Client/Program.cs
new file mode 100644
index 0000000..81d0ece
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Client/Program.cs
@@ -0,0 +1,78 @@
+using DurableTaskOnAKS.Client.Models;
+using Microsoft.DurableTask.Client;
+using Microsoft.DurableTask.Client.AzureManaged;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Hosting;
+
+// ---------------------------------------------------------------------------
+// Setup
+// ---------------------------------------------------------------------------
+
+string endpoint = Environment.GetEnvironmentVariable("ENDPOINT") ?? "http://localhost:8080";
+string taskHub = Environment.GetEnvironmentVariable("TASKHUB") ?? "default";
+string? clientId = Environment.GetEnvironmentVariable("AZURE_CLIENT_ID");
+
+var builder = Host.CreateApplicationBuilder();
+builder.Services.AddDurableTaskClient(b => b.UseDurableTaskScheduler(
+ BuildConnectionString(endpoint, taskHub, clientId)));
+
+using var host = builder.Build();
+await host.StartAsync();
+var client = host.Services.GetRequiredService();
+
+// ---------------------------------------------------------------------------
+// Sample documents
+// ---------------------------------------------------------------------------
+
+DocumentInfo[] docs =
+[
+ new("doc-1", "Cloud Migration Strategy",
+ "Plan to migrate on-prem workloads to Azure over 18 months."),
+ new("doc-2", "Quarterly Incident Report",
+ "Summary of production incidents and remediation steps for Q4."),
+ new("doc-3", "ML Model Evaluation",
+ "Transformer model achieved 94% accuracy on document classification."),
+];
+
+// ---------------------------------------------------------------------------
+// Submit and wait for each orchestration
+// ---------------------------------------------------------------------------
+
+Console.WriteLine($"Endpoint: {endpoint} | TaskHub: {taskHub}");
+Console.WriteLine($"Submitting {docs.Length} documents...\n");
+
+foreach (var doc in docs)
+{
+ string id = await client.ScheduleNewOrchestrationInstanceAsync(
+ "DocumentProcessingOrchestration", doc);
+
+ Console.WriteLine($" Scheduled [{id}] '{doc.Title}'");
+
+ var meta = await client.WaitForInstanceCompletionAsync(id, getInputsAndOutputs: true);
+
+ if (meta.RuntimeStatus == OrchestrationRuntimeStatus.Completed)
+ Console.WriteLine($" -> {meta.ReadOutputAs()}\n");
+ else
+ Console.WriteLine($" -> {meta.RuntimeStatus}: {meta.FailureDetails?.ErrorMessage}\n");
+}
+
+Console.WriteLine("Done.");
+
+// In non-interactive/container environments (including AKS), keep the process alive for log inspection; locally (interactive) just exit.
+if (!Environment.UserInteractive || Console.IsInputRedirected)
+ await Task.Delay(Timeout.InfiniteTimeSpan); // keep pod alive for log inspection
+
+// ---------------------------------------------------------------------------
+// Helpers
+// ---------------------------------------------------------------------------
+
+static string BuildConnectionString(string endpoint, string taskHub, string? clientId)
+{
+ string auth = endpoint.StartsWith("http://localhost")
+ ? "None"
+ : !string.IsNullOrEmpty(clientId)
+ ? $"ManagedIdentity;ClientID={clientId}"
+ : "DefaultAzure";
+
+ return $"Endpoint={endpoint};TaskHub={taskHub};Authentication={auth}";
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Client/manifests/deployment.tmpl.yaml b/samples/scenarios/DocumentProcessingOnAKS/Client/manifests/deployment.tmpl.yaml
new file mode 100644
index 0000000..266d505
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Client/manifests/deployment.tmpl.yaml
@@ -0,0 +1,46 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: client
+ namespace: default
+ annotations:
+ azure.workload.identity/client-id: {{ .Env.AZURE_USER_ASSIGNED_IDENTITY_CLIENT_ID }}
+ labels:
+ azure.workload.identity/use: "true"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: client
+ namespace: default
+ labels:
+ app: client
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: client
+ template:
+ metadata:
+ labels:
+ app: client
+ azure.workload.identity/use: "true"
+ spec:
+ serviceAccountName: client
+ containers:
+ - name: client
+ image: {{ .Env.SERVICE_CLIENT_IMAGE_NAME }}
+ env:
+ - name: ENDPOINT
+ value: {{ .Env.DTS_ENDPOINT }}
+ - name: TASKHUB
+ value: {{ .Env.DTS_TASKHUB_NAME }}
+ - name: AZURE_CLIENT_ID
+ value: {{ .Env.AZURE_USER_ASSIGNED_IDENTITY_CLIENT_ID }}
+ resources:
+ requests:
+ cpu: 250m
+ memory: 256Mi
+ limits:
+ cpu: "1"
+ memory: 512Mi
diff --git a/samples/scenarios/DocumentProcessingOnAKS/DurableTaskOnAKS.sln b/samples/scenarios/DocumentProcessingOnAKS/DurableTaskOnAKS.sln
new file mode 100644
index 0000000..76a2127
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/DurableTaskOnAKS.sln
@@ -0,0 +1,31 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 17
+VisualStudioVersion = 17.5.2.0
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Worker", "Worker\Worker.csproj", "{69C79C25-F84F-479E-3E79-EC68C71F7F70}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Client", "Client\Client.csproj", "{AE7EEA74-2FE0-136F-D797-854FD87E022A}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Release|Any CPU = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {69C79C25-F84F-479E-3E79-EC68C71F7F70}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {69C79C25-F84F-479E-3E79-EC68C71F7F70}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {69C79C25-F84F-479E-3E79-EC68C71F7F70}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {69C79C25-F84F-479E-3E79-EC68C71F7F70}.Release|Any CPU.Build.0 = Release|Any CPU
+ {AE7EEA74-2FE0-136F-D797-854FD87E022A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {AE7EEA74-2FE0-136F-D797-854FD87E022A}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {AE7EEA74-2FE0-136F-D797-854FD87E022A}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {AE7EEA74-2FE0-136F-D797-854FD87E022A}.Release|Any CPU.Build.0 = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {CE89663C-31F8-4259-9DC2-3C41C5629266}
+ EndGlobalSection
+EndGlobal
diff --git a/samples/scenarios/DocumentProcessingOnAKS/README.md b/samples/scenarios/DocumentProcessingOnAKS/README.md
new file mode 100644
index 0000000..957642c
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/README.md
@@ -0,0 +1,185 @@
+# Durable Task Scheduler on AKS
+
+A document-processing sample built with the [.NET Durable Task SDK](https://www.nuget.org/packages/Microsoft.DurableTask.Client.AzureManaged) and [Azure Durable Task Scheduler](https://learn.microsoft.com/en-us/azure/durable-task-scheduler/), deployed to **Azure Kubernetes Service (AKS)** with `azd`.
+
+## How It Works
+
+The app has two components — a **Client** and a **Worker** — that communicate through the Durable Task Scheduler (DTS).
+
+The **Client** submits three sample documents and waits for each to be processed. The **Worker** hosts a `DocumentProcessingOrchestration` that runs a two-stage pipeline:
+
+```
+Client ──▶ DocumentProcessingOrchestration
+ │
+ ├─ 1. ValidateDocument (activity chaining)
+ │
+ ├─ 2. ClassifyDocument × 3 (fan-out / fan-in)
+ │ ├─ Sentiment
+ │ ├─ Topic
+ │ └─ Priority
+ │
+ └─ 3. Assemble result string ──▶ return to Client
+```
+
+**Key patterns demonstrated:**
+
+- **Activity chaining** — `ValidateDocument` must pass before classification begins.
+- **Fan-out / fan-in** — Three `ClassifyDocument` activities run in parallel (Sentiment, Topic, Priority) and the orchestration awaits all three.
+- **Client scheduling** — `ScheduleNewOrchestrationInstanceAsync` + `WaitForInstanceCompletionAsync` for fire-and-wait semantics.
+- **Workload Identity** — In AKS, pods authenticate to DTS using federated credentials on a user-assigned managed identity (no secrets stored).
+
+Both Client and Worker auto-detect the environment: locally they connect to `http://localhost:8080` with no auth; in AKS they use the DTS endpoint with managed-identity auth via the `ENDPOINT`, `TASKHUB`, and `AZURE_CLIENT_ID` environment variables injected by the Kubernetes manifests.
+
+## Project Structure
+
+```
+├── azure.yaml # azd service definitions (host: aks)
+├── scripts/acr-build.sh # Predeploy hook — builds images via ACR Tasks
+├── Client/
+│ ├── Program.cs # Submits 3 documents, waits for results
+│ ├── Models/DocumentInfo.cs # Input record
+│ ├── Dockerfile
+│ └── manifests/deployment.tmpl.yaml # K8s deployment + service account
+├── Worker/
+│ ├── Program.cs # Registers orchestrations & activities
+│ ├── Orchestrations/
+│ │ └── DocumentProcessingOrchestration.cs
+│ ├── Activities/
+│ │ ├── ValidateDocument.cs # Checks title + content (returns bool)
+│ │ └── ClassifyDocument.cs # Stub classifier (called 3× in parallel)
+│ ├── Models/DocumentModels.cs # DocumentInfo, ClassifyRequest, ClassificationResult
+│ ├── Dockerfile
+│ └── manifests/deployment.tmpl.yaml # K8s deployment (2 replicas) + service account
+└── infra/ # Bicep modules: AKS, ACR, VNet, DTS, identity, RBAC
+```
+
+## Prerequisites
+
+- [.NET 10 SDK](https://dotnet.microsoft.com/download/dotnet/10.0)
+- [Docker Desktop](https://www.docker.com/products/docker-desktop/) — for the local DTS emulator
+- [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli) & [Azure Developer CLI (`azd`)](https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/install-azd) — for Azure deployment
+- [kubectl](https://kubernetes.io/docs/tasks/tools/) — for verifying the AKS deployment
+
+## Run Locally
+
+### 1. Start the DTS emulator
+
+```bash
+docker run -d --name dts-emulator -p 8080:8080 -p 8082:8082 \
+ mcr.microsoft.com/dts/dts-emulator:latest
+```
+
+- Port **8080** — gRPC endpoint (used by Client and Worker)
+- Port **8082** — [Dashboard UI](http://localhost:8082) for inspecting orchestrations
+
+### 2. Build the solution
+
+```bash
+dotnet build DurableTaskOnAKS.sln
+```
+
+### 3. Start the Worker
+
+```bash
+cd Worker && dotnet run
+```
+
+Wait until you see `Sidecar work-item streaming connection established.`
+
+### 4. Start the Client (in a separate terminal)
+
+```bash
+cd Client && dotnet run
+```
+
+### 5. Expected output
+
+```
+Endpoint: http://localhost:8080 | TaskHub: default
+Submitting 3 documents...
+
+ Scheduled [abc123] 'Cloud Migration Strategy'
+ -> Processed 'Cloud Migration Strategy': Sentiment=Positive, Topic=Technology, Priority=Normal
+
+ Scheduled [def456] 'Quarterly Incident Report'
+ -> Processed 'Quarterly Incident Report': Sentiment=Positive, Topic=Technology, Priority=Normal
+
+ Scheduled [ghi789] 'ML Model Evaluation'
+ -> Processed 'ML Model Evaluation': Sentiment=Positive, Topic=Technology, Priority=Normal
+
+Done.
+```
+
+### 6. Clean up
+
+```bash
+docker stop dts-emulator && docker rm dts-emulator
+```
+
+## Deploy to Azure
+
+### 1. Provision and deploy
+
+```bash
+azd auth login && az login
+azd up
+```
+
+This provisions all required Azure resources via Bicep (~5–10 min):
+
+| Resource | Purpose |
+|----------|---------|
+| **AKS cluster** | Hosts Client (1 replica) and Worker (2 replicas) pods |
+| **Azure Container Registry** | Stores Docker images (built server-side via ACR Tasks) |
+| **Durable Task Scheduler** (Consumption SKU) | Managed orchestration backend |
+| **VNet** | Network isolation for AKS |
+| **User-assigned managed identity** + federated credentials | Workload Identity auth from pods to DTS |
+
+### 2. Verify the deployment
+
+```bash
+# Get AKS credentials
+az aks get-credentials --resource-group --name
+
+# Check pods are running (1 client + 2 workers)
+kubectl get pods
+
+# View client output (orchestration results)
+kubectl logs -l app=client --tail=30
+
+# View worker logs (activity execution)
+kubectl logs -l app=worker --tail=30
+```
+
+You can find the resource group and cluster names from `azd env get-values`.
+
+Expected client log output:
+
+```
+Submitting 3 documents...
+
+ Scheduled [...] 'Cloud Migration Strategy'
+ -> Processed 'Cloud Migration Strategy': Sentiment=Positive, Topic=Technology, Priority=Normal
+
+ Scheduled [...] 'Quarterly Incident Report'
+ -> Processed 'Quarterly Incident Report': Sentiment=Positive, Topic=Technology, Priority=Normal
+
+ Scheduled [...] 'ML Model Evaluation'
+ -> Processed 'ML Model Evaluation': Sentiment=Positive, Topic=Technology, Priority=Normal
+
+Done.
+```
+
+You can also view orchestration history in the **Azure Portal → Durable Task Scheduler → Task Hub** dashboard.
+
+## Cleanup
+
+```bash
+azd down
+```
+
+## Resources
+
+- [Durable Task Scheduler documentation](https://learn.microsoft.com/en-us/azure/durable-task-scheduler/)
+- [.NET Durable Task SDK (NuGet)](https://www.nuget.org/packages/Microsoft.DurableTask.Client.AzureManaged)
+- [AKS Workload Identity](https://learn.microsoft.com/en-us/azure/aks/workload-identity-overview)
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Worker/Activities/ClassifyDocument.cs b/samples/scenarios/DocumentProcessingOnAKS/Worker/Activities/ClassifyDocument.cs
new file mode 100644
index 0000000..42c9b86
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Worker/Activities/ClassifyDocument.cs
@@ -0,0 +1,34 @@
+using Microsoft.DurableTask;
+using Microsoft.Extensions.Logging;
+using DurableTaskOnAKS.Models;
+
+namespace DurableTaskOnAKS;
+
+///
+/// Classifies a document along one dimension (Sentiment, Topic, or Priority).
+/// Three instances run in parallel during the fan-out step.
+///
+public class ClassifyDocument : TaskActivity
+{
+ private readonly ILogger _log;
+ public ClassifyDocument(ILogger log) => _log = log;
+
+ public override async Task RunAsync(
+ TaskActivityContext context, ClassifyRequest req)
+ {
+ _log.LogInformation("Classifying '{Category}' for {Id}", req.Category, req.DocumentId);
+ await Task.Delay(200); // simulate calling a classification service
+
+ // Stub results — replace with real ML / API calls
+ var (label, confidence) = req.Category switch
+ {
+ "Sentiment" => ("Positive", 0.85),
+ "Topic" => ("Technology", 0.92),
+ "Priority" => ("Normal", 0.78),
+ _ => ("Unknown", 0.50),
+ };
+
+ _log.LogInformation("{Category} = {Label} ({Conf:P0})", req.Category, label, confidence);
+ return new ClassificationResult(req.Category, label, confidence);
+ }
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Worker/Activities/ValidateDocument.cs b/samples/scenarios/DocumentProcessingOnAKS/Worker/Activities/ValidateDocument.cs
new file mode 100644
index 0000000..cd6db4c
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Worker/Activities/ValidateDocument.cs
@@ -0,0 +1,25 @@
+using Microsoft.DurableTask;
+using Microsoft.Extensions.Logging;
+using DurableTaskOnAKS.Models;
+
+namespace DurableTaskOnAKS;
+
+/// Checks that a document has a title and non-empty content.
+public class ValidateDocument : TaskActivity
+{
+ private readonly ILogger _log;
+ public ValidateDocument(ILogger log) => _log = log;
+
+ public override async Task RunAsync(TaskActivityContext context, DocumentInfo doc)
+ {
+ _log.LogInformation("Validating '{Title}'", doc.Title);
+ await Task.Delay(100); // simulate I/O
+
+ bool valid = !string.IsNullOrWhiteSpace(doc.Title)
+ && !string.IsNullOrWhiteSpace(doc.Content)
+ && doc.Content.Length <= 10_000;
+
+ _log.LogInformation("'{Title}' valid={Valid}", doc.Title, valid);
+ return valid;
+ }
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Worker/Dockerfile b/samples/scenarios/DocumentProcessingOnAKS/Worker/Dockerfile
new file mode 100644
index 0000000..34a0d66
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Worker/Dockerfile
@@ -0,0 +1,19 @@
+FROM mcr.microsoft.com/dotnet/sdk:10.0 AS build
+WORKDIR /src
+
+# Copy csproj and restore dependencies
+COPY ["Worker.csproj", "./"]
+RUN dotnet restore
+
+# Copy all files and build
+COPY . .
+RUN dotnet publish -c Release -o /app/publish
+
+# Build runtime image
+FROM mcr.microsoft.com/dotnet/runtime:10.0 AS final
+WORKDIR /app
+COPY --from=build /app/publish .
+
+EXPOSE 8080
+# Set the entrypoint
+ENTRYPOINT ["dotnet", "Worker.dll"]
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Worker/Models/DocumentModels.cs b/samples/scenarios/DocumentProcessingOnAKS/Worker/Models/DocumentModels.cs
new file mode 100644
index 0000000..215b354
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Worker/Models/DocumentModels.cs
@@ -0,0 +1,10 @@
+namespace DurableTaskOnAKS.Models;
+
+/// A document submitted for processing.
+public record DocumentInfo(string Id, string Title, string Content);
+
+/// Input for the classification activity — one per dimension.
+public record ClassifyRequest(string DocumentId, string Content, string Category);
+
+/// Result from a single classification pass.
+public record ClassificationResult(string Category, string Label, double Confidence);
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Worker/Orchestrations/DocumentProcessingOrchestration.cs b/samples/scenarios/DocumentProcessingOnAKS/Worker/Orchestrations/DocumentProcessingOrchestration.cs
new file mode 100644
index 0000000..fdc2e63
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Worker/Orchestrations/DocumentProcessingOrchestration.cs
@@ -0,0 +1,50 @@
+using Microsoft.DurableTask;
+using Microsoft.Extensions.Logging;
+using DurableTaskOnAKS.Models;
+
+namespace DurableTaskOnAKS;
+
+///
+/// Demonstrates activity chaining and fan-out / fan-in.
+///
+/// 1. Validate — reject malformed documents (chaining)
+/// 2. Classify ×3 — sentiment, topic, priority (fan-out / fan-in)
+/// 3. Return — assembled result string
+///
+public class DocumentProcessingOrchestration : TaskOrchestrator
+{
+ public override async Task RunAsync(
+ TaskOrchestrationContext context, DocumentInfo doc)
+ {
+ var log = context.CreateReplaySafeLogger();
+ log.LogInformation("Processing '{Title}'", doc.Title);
+
+ // Step 1 — Validate (activity chaining)
+ bool isValid = await context.CallActivityAsync(
+ nameof(ValidateDocument), doc);
+
+ if (!isValid)
+ return $"Document '{doc.Title}' failed validation.";
+
+ // Step 2 — Fan-out: three classification tasks in parallel
+ var tasks = new[]
+ {
+ context.CallActivityAsync(
+ nameof(ClassifyDocument), new ClassifyRequest(doc.Id, doc.Content, "Sentiment")),
+ context.CallActivityAsync(
+ nameof(ClassifyDocument), new ClassifyRequest(doc.Id, doc.Content, "Topic")),
+ context.CallActivityAsync(
+ nameof(ClassifyDocument), new ClassifyRequest(doc.Id, doc.Content, "Priority")),
+ };
+
+ // Fan-in: wait for all three to complete
+ ClassificationResult[] results = await Task.WhenAll(tasks);
+
+ // Assemble result
+ string labels = string.Join(", ", results.Select(r => $"{r.Category}={r.Label}"));
+ string result = $"Processed '{doc.Title}': {labels}";
+
+ log.LogInformation("{Result}", result);
+ return result;
+ }
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Worker/Program.cs b/samples/scenarios/DocumentProcessingOnAKS/Worker/Program.cs
new file mode 100644
index 0000000..0e12a86
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Worker/Program.cs
@@ -0,0 +1,55 @@
+using Microsoft.DurableTask.Worker;
+using Microsoft.DurableTask.Worker.AzureManaged;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Hosting;
+using Microsoft.Extensions.Logging;
+using DurableTaskOnAKS;
+
+// ---------------------------------------------------------------------------
+// Configuration
+// ---------------------------------------------------------------------------
+
+var builder = Host.CreateApplicationBuilder();
+builder.Logging.AddConsole().SetMinimumLevel(LogLevel.Information);
+
+string endpoint = Environment.GetEnvironmentVariable("ENDPOINT") ?? "http://localhost:8080";
+string taskHub = Environment.GetEnvironmentVariable("TASKHUB") ?? "default";
+string? clientId = Environment.GetEnvironmentVariable("AZURE_CLIENT_ID");
+
+string connectionString = BuildConnectionString(endpoint, taskHub, clientId);
+
+builder.Services.AddDurableTaskWorker()
+ .AddTasks(r =>
+ {
+ r.AddOrchestrator();
+ r.AddActivity();
+ r.AddActivity();
+ })
+ .UseDurableTaskScheduler(connectionString);
+
+// ---------------------------------------------------------------------------
+// Run
+// ---------------------------------------------------------------------------
+
+var host = builder.Build();
+var log = host.Services.GetRequiredService().CreateLogger("Worker");
+
+log.LogInformation("Endpoint: {Endpoint} TaskHub: {TaskHub}", endpoint, taskHub);
+log.LogInformation("Starting worker...");
+
+await host.RunAsync();
+
+// ---------------------------------------------------------------------------
+// Helpers
+// ---------------------------------------------------------------------------
+
+static string BuildConnectionString(string endpoint, string taskHub, string? clientId)
+{
+ string auth = endpoint.StartsWith("http://localhost")
+ ? "None"
+ : !string.IsNullOrEmpty(clientId)
+ ? $"ManagedIdentity;ClientID={clientId}"
+ : "DefaultAzure";
+
+ return $"Endpoint={endpoint};TaskHub={taskHub};Authentication={auth}";
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Worker/Worker.csproj b/samples/scenarios/DocumentProcessingOnAKS/Worker/Worker.csproj
new file mode 100644
index 0000000..9e23839
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Worker/Worker.csproj
@@ -0,0 +1,17 @@
+
+
+
+ Exe
+ net10.0
+ enable
+ enable
+ 15db87b3-cffd-47da-a40c-a8a1f09ab2b7
+
+
+
+
+
+
+
+
+
diff --git a/samples/scenarios/DocumentProcessingOnAKS/Worker/manifests/deployment.tmpl.yaml b/samples/scenarios/DocumentProcessingOnAKS/Worker/manifests/deployment.tmpl.yaml
new file mode 100644
index 0000000..6b7e489
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/Worker/manifests/deployment.tmpl.yaml
@@ -0,0 +1,46 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: worker
+ namespace: default
+ annotations:
+ azure.workload.identity/client-id: {{ .Env.AZURE_USER_ASSIGNED_IDENTITY_CLIENT_ID }}
+ labels:
+ azure.workload.identity/use: "true"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: worker
+ namespace: default
+ labels:
+ app: worker
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: worker
+ template:
+ metadata:
+ labels:
+ app: worker
+ azure.workload.identity/use: "true"
+ spec:
+ serviceAccountName: worker
+ containers:
+ - name: worker
+ image: {{ .Env.SERVICE_WORKER_IMAGE_NAME }}
+ env:
+ - name: ENDPOINT
+ value: {{ .Env.DTS_ENDPOINT }}
+ - name: TASKHUB
+ value: {{ .Env.DTS_TASKHUB_NAME }}
+ - name: AZURE_CLIENT_ID
+ value: {{ .Env.AZURE_USER_ASSIGNED_IDENTITY_CLIENT_ID }}
+ resources:
+ requests:
+ cpu: 250m
+ memory: 256Mi
+ limits:
+ cpu: "1"
+ memory: 512Mi
diff --git a/samples/scenarios/DocumentProcessingOnAKS/azure.yaml b/samples/scenarios/DocumentProcessingOnAKS/azure.yaml
new file mode 100644
index 0000000..8e7ebdc
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/azure.yaml
@@ -0,0 +1,22 @@
+# yaml-language-server: $schema=https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json
+
+# Durable Task Scheduler on Azure Kubernetes Service
+# Adapted from the AutoscalingInACA sample to run on AKS (without autoscaling support).
+# Images are built server-side via ACR Tasks (az acr build) to avoid local Docker push issues.
+
+metadata:
+ template: durable-task-on-aks-dotnet
+name: durable-task-on-aks
+hooks:
+ predeploy:
+ shell: bash
+ run: ./scripts/acr-build.sh
+services:
+ client:
+ project: ./Client
+ language: csharp
+ host: aks
+ worker:
+ project: ./Worker
+ language: csharp
+ host: aks
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/abbreviations.json b/samples/scenarios/DocumentProcessingOnAKS/infra/abbreviations.json
new file mode 100644
index 0000000..10424d3
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/abbreviations.json
@@ -0,0 +1,17 @@
+{
+ "analysisServicesServers": "as",
+ "apiManagementService": "apim-",
+ "appConfigurationStores": "appcs-",
+ "appManagedEnvironments": "cae-",
+ "appContainerApps": "ca-",
+ "authorizationPolicyDefinitions": "policy-",
+ "automationAutomationAccounts": "aa-",
+ "containerRegistryRegistries": "cr",
+ "containerServiceManagedClusters": "aks-",
+ "networkVirtualNetworks": "vnet-",
+ "networkNetworkSecurityGroups": "nsg-",
+ "managedIdentityUserAssignedIdentities": "id-",
+ "resourcesResourceGroups": "rg-",
+ "dts": "dts-",
+ "taskhub": "taskhub-"
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/app/dts.bicep b/samples/scenarios/DocumentProcessingOnAKS/infra/app/dts.bicep
new file mode 100644
index 0000000..2d0fec0
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/app/dts.bicep
@@ -0,0 +1,27 @@
+param ipAllowlist array
+param location string
+param tags object = {}
+param name string
+param taskhubname string
+param skuName string
+
+resource dts 'Microsoft.DurableTask/schedulers@2025-11-01' = {
+ location: location
+ tags: tags
+ name: name
+ properties: {
+ ipAllowlist: ipAllowlist
+ sku: {
+ name: skuName
+ }
+ }
+}
+
+resource taskhub 'Microsoft.DurableTask/schedulers/taskhubs@2025-11-01' = {
+ parent: dts
+ name: taskhubname
+}
+
+output dts_NAME string = dts.name
+output dts_URL string = dts.properties.endpoint
+output TASKHUB_NAME string = taskhub.name
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/app/federated-identity.bicep b/samples/scenarios/DocumentProcessingOnAKS/infra/app/federated-identity.bicep
new file mode 100644
index 0000000..18cadea
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/app/federated-identity.bicep
@@ -0,0 +1,34 @@
+metadata description = 'Creates a federated identity credential for AKS workload identity.'
+
+@description('The name of the user-assigned managed identity')
+param identityName string
+
+@description('The name of the federated credential')
+param federatedCredentialName string
+
+@description('The OIDC issuer URL from the AKS cluster')
+param oidcIssuerUrl string
+
+@description('The Kubernetes namespace for the service account')
+param serviceAccountNamespace string = 'default'
+
+@description('The Kubernetes service account name')
+param serviceAccountName string
+
+resource identity 'Microsoft.ManagedIdentity/userAssignedIdentities@2023-01-31' existing = {
+ name: identityName
+}
+
+// Federated identity credential binds a Kubernetes service account to the
+// user-assigned managed identity, enabling pods to authenticate as that identity.
+resource federatedCredential 'Microsoft.ManagedIdentity/userAssignedIdentities/federatedIdentityCredentials@2024-11-30' = {
+ parent: identity
+ name: federatedCredentialName
+ properties: {
+ issuer: oidcIssuerUrl
+ subject: 'system:serviceaccount:${serviceAccountNamespace}:${serviceAccountName}'
+ audiences: [
+ 'api://AzureADTokenExchange'
+ ]
+ }
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/app/user-assigned-identity.bicep b/samples/scenarios/DocumentProcessingOnAKS/infra/app/user-assigned-identity.bicep
new file mode 100644
index 0000000..0583ab8
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/app/user-assigned-identity.bicep
@@ -0,0 +1,17 @@
+metadata description = 'Creates a Microsoft Entra user-assigned identity.'
+
+param name string
+param location string = resourceGroup().location
+param tags object = {}
+
+resource identity 'Microsoft.ManagedIdentity/userAssignedIdentities@2023-01-31' = {
+ name: name
+ location: location
+ tags: tags
+}
+
+output name string = identity.name
+output resourceId string = identity.id
+output principalId string = identity.properties.principalId
+output clientId string = identity.properties.clientId
+output tenantId string = identity.properties.tenantId
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/core/host/aks-cluster.bicep b/samples/scenarios/DocumentProcessingOnAKS/infra/core/host/aks-cluster.bicep
new file mode 100644
index 0000000..d2a767e
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/core/host/aks-cluster.bicep
@@ -0,0 +1,94 @@
+metadata description = 'Creates an Azure Kubernetes Service (AKS) cluster.'
+
+@description('The name of the AKS cluster')
+param name string
+
+@description('The Azure region for the AKS cluster')
+param location string = resourceGroup().location
+
+@description('Tags to apply to the AKS cluster')
+param tags object = {}
+
+@description('The Kubernetes version for the AKS cluster')
+param kubernetesVersion string = '1.32'
+
+@description('The VM size for the default node pool')
+param agentVMSize string = 'standard_d4s_v5'
+
+@description('The number of nodes in the default node pool')
+param agentCount int = 2
+
+@description('The minimum number of nodes for autoscaling')
+param agentMinCount int = 1
+
+@description('The maximum number of nodes for autoscaling')
+param agentMaxCount int = 5
+
+@description('The subnet resource ID for the AKS nodes')
+param subnetId string = ''
+
+@description('The name of the container registry to attach')
+param containerRegistryName string = ''
+
+@description('Enable OIDC issuer for workload identity')
+param enableOidcIssuer bool = true
+
+@description('Enable workload identity')
+param enableWorkloadIdentity bool = true
+
+// AKS cluster with workload identity and OIDC issuer enabled
+resource aksCluster 'Microsoft.ContainerService/managedClusters@2024-09-01' = {
+ name: name
+ location: location
+ tags: tags
+ identity: {
+ type: 'SystemAssigned'
+ }
+ properties: {
+ kubernetesVersion: kubernetesVersion
+ dnsPrefix: name
+ enableRBAC: true
+ agentPoolProfiles: [
+ {
+ name: 'system'
+ count: agentCount
+ vmSize: agentVMSize
+ mode: 'System'
+ osType: 'Linux'
+ osSKU: 'AzureLinux'
+ enableAutoScaling: true
+ minCount: agentMinCount
+ maxCount: agentMaxCount
+ vnetSubnetID: !empty(subnetId) ? subnetId : null
+ }
+ ]
+ networkProfile: {
+ networkPlugin: 'azure'
+ networkPolicy: 'azure'
+ serviceCidr: '10.1.0.0/16'
+ dnsServiceIP: '10.1.0.10'
+ }
+ oidcIssuerProfile: {
+ enabled: enableOidcIssuer
+ }
+ securityProfile: {
+ workloadIdentity: {
+ enabled: enableWorkloadIdentity
+ }
+ }
+ }
+}
+
+// Grant AKS kubelet identity AcrPull access to the container registry
+module registryAccess '../security/registry-access.bicep' = if (!empty(containerRegistryName)) {
+ name: 'aks-registry-access'
+ params: {
+ containerRegistryName: containerRegistryName
+ principalId: aksCluster.properties.identityProfile.kubeletidentity.objectId
+ }
+}
+
+output clusterName string = aksCluster.name
+output clusterId string = aksCluster.id
+output oidcIssuerUrl string = aksCluster.properties.oidcIssuerProfile.issuerURL
+output kubeletIdentityObjectId string = aksCluster.properties.identityProfile.kubeletidentity.objectId
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/core/host/container-registry.bicep b/samples/scenarios/DocumentProcessingOnAKS/infra/core/host/container-registry.bicep
new file mode 100644
index 0000000..4cd2445
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/core/host/container-registry.bicep
@@ -0,0 +1,59 @@
+metadata description = 'Creates an Azure Container Registry.'
+param name string
+param location string = resourceGroup().location
+param tags object = {}
+
+@description('Indicates whether admin user is enabled')
+param adminUserEnabled bool = false
+
+@description('Indicates whether anonymous pull is enabled')
+param anonymousPullEnabled bool = false
+
+@description('SKU settings')
+param sku object = {
+ name: 'Standard'
+}
+
+@description('The log analytics workspace ID used for logging and monitoring')
+param workspaceId string = ''
+
+resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-11-01-preview' = {
+ name: name
+ location: location
+ tags: tags
+ sku: sku
+ properties: {
+ adminUserEnabled: adminUserEnabled
+ anonymousPullEnabled: anonymousPullEnabled
+ publicNetworkAccess: 'Enabled'
+ }
+}
+
+resource diagnostics 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = if (!empty(workspaceId)) {
+ name: 'registry-diagnostics'
+ scope: containerRegistry
+ properties: {
+ workspaceId: workspaceId
+ logs: [
+ {
+ category: 'ContainerRegistryRepositoryEvents'
+ enabled: true
+ }
+ {
+ category: 'ContainerRegistryLoginEvents'
+ enabled: true
+ }
+ ]
+ metrics: [
+ {
+ category: 'AllMetrics'
+ enabled: true
+ timeGrain: 'PT1M'
+ }
+ ]
+ }
+}
+
+output id string = containerRegistry.id
+output loginServer string = containerRegistry.properties.loginServer
+output name string = containerRegistry.name
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/core/networking/vnet.bicep b/samples/scenarios/DocumentProcessingOnAKS/infra/core/networking/vnet.bicep
new file mode 100644
index 0000000..46764a8
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/core/networking/vnet.bicep
@@ -0,0 +1,40 @@
+@description('The name of the Virtual Network')
+param name string
+
+@description('The Azure region where the Virtual Network should exist')
+param location string = resourceGroup().location
+
+@description('Optional tags for the resources')
+param tags object = {}
+
+@description('The address prefixes of the Virtual Network')
+param addressPrefixes array = ['10.0.0.0/16']
+
+@description('The subnets to create in the Virtual Network')
+param subnets array = [
+ {
+ name: 'aks-subnet'
+ properties: {
+ addressPrefix: '10.0.0.0/21'
+ delegations: []
+ privateEndpointNetworkPolicies: 'Disabled'
+ privateLinkServiceNetworkPolicies: 'Enabled'
+ }
+ }
+]
+
+resource vnet 'Microsoft.Network/virtualNetworks@2023-11-01' = {
+ name: name
+ location: location
+ tags: tags
+ properties: {
+ addressSpace: {
+ addressPrefixes: addressPrefixes
+ }
+ subnets: subnets
+ }
+}
+
+output id string = vnet.id
+output name string = vnet.name
+output aksSubnetId string = resourceId('Microsoft.Network/virtualNetworks/subnets', name, 'aks-subnet')
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/core/security/registry-access.bicep b/samples/scenarios/DocumentProcessingOnAKS/infra/core/security/registry-access.bicep
new file mode 100644
index 0000000..f977b9f
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/core/security/registry-access.bicep
@@ -0,0 +1,19 @@
+metadata description = 'Assigns ACR Pull permissions to access an Azure Container Registry.'
+param containerRegistryName string
+param principalId string
+
+var acrPullRole = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d')
+
+resource aksAcrPull 'Microsoft.Authorization/roleAssignments@2022-04-01' = {
+ scope: containerRegistry // Use when specifying a scope that is different than the deployment scope
+ name: guid(subscription().id, resourceGroup().id, principalId, acrPullRole)
+ properties: {
+ roleDefinitionId: acrPullRole
+ principalType: 'ServicePrincipal'
+ principalId: principalId
+ }
+}
+
+resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-11-01-preview' existing = {
+ name: containerRegistryName
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/core/security/role.bicep b/samples/scenarios/DocumentProcessingOnAKS/infra/core/security/role.bicep
new file mode 100644
index 0000000..0b30cfd
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/core/security/role.bicep
@@ -0,0 +1,21 @@
+metadata description = 'Creates a role assignment for a service principal.'
+param principalId string
+
+@allowed([
+ 'Device'
+ 'ForeignGroup'
+ 'Group'
+ 'ServicePrincipal'
+ 'User'
+])
+param principalType string = 'ServicePrincipal'
+param roleDefinitionId string
+
+resource role 'Microsoft.Authorization/roleAssignments@2022-04-01' = {
+ name: guid(subscription().id, resourceGroup().id, principalId, roleDefinitionId)
+ properties: {
+ principalId: principalId
+ principalType: principalType
+ roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', roleDefinitionId)
+ }
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/main.bicep b/samples/scenarios/DocumentProcessingOnAKS/infra/main.bicep
new file mode 100644
index 0000000..9533e7c
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/main.bicep
@@ -0,0 +1,216 @@
+targetScope = 'subscription'
+
+// Main Bicep module to provision Azure resources for the Durable Task Scheduler on AKS.
+// Adapted from the AutoscalingInACA sample to deploy to Azure Kubernetes Service.
+// This deployment does NOT include autoscaling support (no KEDA custom scaler).
+
+@minLength(1)
+@maxLength(64)
+@description('Name of the environment which is used to generate a short unique hash used in all resources.')
+param environmentName string
+
+@minLength(1)
+@description('Primary location for all resources')
+param location string
+
+@description('Id of the user or app to assign application roles')
+param principalId string = ''
+
+// AKS parameters
+param aksClusterName string = ''
+param kubernetesVersion string = '1.32'
+param aksVmSize string = 'standard_d4s_v5'
+param aksNodeCount int = 2
+
+// Container registry parameters
+param containerRegistryName string = ''
+
+// Durable Task Scheduler parameters
+param dtsLocation string = location
+param dtsSkuName string = 'Consumption'
+param dtsName string = ''
+param taskHubName string = ''
+
+// Service names (must match azure.yaml service names)
+param clientsServiceName string = 'client'
+param workerServiceName string = 'worker'
+
+// Optional resource group name override
+param resourceGroupName string = ''
+
+var abbrs = loadJsonContent('./abbreviations.json')
+
+// Tags applied to all resources
+var tags = {
+ 'azd-env-name': environmentName
+}
+
+// Generate a unique token for resource naming
+#disable-next-line no-unused-vars
+var resourceToken = toLower(uniqueString(subscription().id, environmentName, location))
+
+// Organize resources in a resource group
+resource rg 'Microsoft.Resources/resourceGroups@2021-04-01' = {
+ name: !empty(resourceGroupName) ? resourceGroupName : '${abbrs.resourcesResourceGroups}${environmentName}'
+ location: location
+ tags: tags
+}
+
+// ============================
+// Identity
+// ============================
+
+// User-assigned managed identity for the workloads to authenticate to DTS
+module identity './app/user-assigned-identity.bicep' = {
+ scope: rg
+ params: {
+ name: 'dts-aks-identity'
+ }
+}
+
+// Grant the managed identity the Durable Task Scheduler Contributor role
+// Role ID: 0ad04412-c4d5-4796-b79c-f76d14c8d402
+module identityAssignDTS './core/security/role.bicep' = {
+ name: 'identityAssignDTS'
+ scope: rg
+ params: {
+ principalId: identity.outputs.principalId
+ roleDefinitionId: '0ad04412-c4d5-4796-b79c-f76d14c8d402'
+ principalType: 'ServicePrincipal'
+ }
+}
+
+// Grant the deploying user/principal the DTS role for dashboard access
+module identityAssignDTSDash './core/security/role.bicep' = {
+ name: 'identityAssignDTSDash'
+ scope: rg
+ params: {
+ principalId: principalId
+ roleDefinitionId: '0ad04412-c4d5-4796-b79c-f76d14c8d402'
+ principalType: 'User'
+ }
+}
+
+// ============================
+// Networking
+// ============================
+
+module vnet './core/networking/vnet.bicep' = {
+ scope: rg
+ params: {
+ name: '${abbrs.networkVirtualNetworks}${resourceToken}'
+ location: location
+ tags: tags
+ }
+}
+
+// ============================
+// Container Registry
+// ============================
+
+module containerRegistry './core/host/container-registry.bicep' = {
+ name: 'container-registry'
+ scope: rg
+ params: {
+ name: !empty(containerRegistryName) ? containerRegistryName : '${abbrs.containerRegistryRegistries}${resourceToken}'
+ location: location
+ tags: tags
+ sku: {
+ name: 'Standard'
+ }
+ anonymousPullEnabled: false
+ }
+}
+
+// ============================
+// AKS Cluster
+// ============================
+
+module aksCluster './core/host/aks-cluster.bicep' = {
+ name: 'aks-cluster'
+ scope: rg
+ params: {
+ name: !empty(aksClusterName) ? aksClusterName : '${abbrs.containerServiceManagedClusters}${resourceToken}'
+ location: location
+ tags: tags
+ kubernetesVersion: kubernetesVersion
+ agentVMSize: aksVmSize
+ agentCount: aksNodeCount
+ subnetId: vnet.outputs.aksSubnetId
+ containerRegistryName: containerRegistry.outputs.name
+ }
+}
+
+// ============================
+// Workload Identity Federation
+// ============================
+
+// Create federated identity credential so that the Kubernetes service account
+// can authenticate as the user-assigned managed identity (for DTS access).
+module federatedIdentityClient './app/federated-identity.bicep' = {
+ name: 'federated-identity-client'
+ scope: rg
+ params: {
+ identityName: identity.outputs.name
+ federatedCredentialName: 'fed-${clientsServiceName}'
+ oidcIssuerUrl: aksCluster.outputs.oidcIssuerUrl
+ serviceAccountNamespace: 'default'
+ serviceAccountName: clientsServiceName
+ }
+}
+
+module federatedIdentityWorker './app/federated-identity.bicep' = {
+ name: 'federated-identity-worker'
+ scope: rg
+ dependsOn: [federatedIdentityClient] // Serialize to avoid concurrent FIC write errors on the same identity
+ params: {
+ identityName: identity.outputs.name
+ federatedCredentialName: 'fed-${workerServiceName}'
+ oidcIssuerUrl: aksCluster.outputs.oidcIssuerUrl
+ serviceAccountNamespace: 'default'
+ serviceAccountName: workerServiceName
+ }
+}
+
+// ============================
+// Durable Task Scheduler
+// ============================
+
+module dts './app/dts.bicep' = {
+ scope: rg
+ name: 'dtsResource'
+ params: {
+ name: !empty(dtsName) ? dtsName : '${abbrs.dts}${resourceToken}'
+ taskhubname: !empty(taskHubName) ? taskHubName : '${abbrs.taskhub}${resourceToken}'
+ location: dtsLocation
+ tags: tags
+ ipAllowlist: [
+ '0.0.0.0/0'
+ ]
+ skuName: dtsSkuName
+ }
+}
+
+// ============================
+// Outputs
+// ============================
+
+// Outputs are automatically saved in the local azd environment .env file.
+// To see these outputs, run `azd env get-values`.
+output AZURE_LOCATION string = location
+output AZURE_TENANT_ID string = tenant().tenantId
+
+// Container registry outputs
+output AZURE_CONTAINER_REGISTRY_ENDPOINT string = containerRegistry.outputs.loginServer
+output AZURE_CONTAINER_REGISTRY_NAME string = containerRegistry.outputs.name
+
+// AKS outputs (used by azd for deployment)
+output AZURE_AKS_CLUSTER_NAME string = aksCluster.outputs.clusterName
+
+// Identity outputs (used in Kubernetes manifests)
+output AZURE_USER_ASSIGNED_IDENTITY_NAME string = identity.outputs.name
+output AZURE_USER_ASSIGNED_IDENTITY_CLIENT_ID string = identity.outputs.clientId
+
+// DTS outputs (used in Kubernetes manifests via env substitution)
+output DTS_ENDPOINT string = dts.outputs.dts_URL
+output DTS_TASKHUB_NAME string = dts.outputs.TASKHUB_NAME
diff --git a/samples/scenarios/DocumentProcessingOnAKS/infra/main.parameters.json b/samples/scenarios/DocumentProcessingOnAKS/infra/main.parameters.json
new file mode 100644
index 0000000..c8d3453
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/infra/main.parameters.json
@@ -0,0 +1,15 @@
+{
+ "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#",
+ "contentVersion": "1.0.0.0",
+ "parameters": {
+ "environmentName": {
+ "value": "${AZURE_ENV_NAME}"
+ },
+ "location": {
+ "value": "${AZURE_LOCATION}"
+ },
+ "principalId": {
+ "value": "${AZURE_PRINCIPAL_ID}"
+ }
+ }
+}
diff --git a/samples/scenarios/DocumentProcessingOnAKS/scripts/acr-build.sh b/samples/scenarios/DocumentProcessingOnAKS/scripts/acr-build.sh
new file mode 100755
index 0000000..009d819
--- /dev/null
+++ b/samples/scenarios/DocumentProcessingOnAKS/scripts/acr-build.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+# Builds Docker images server-side using ACR Tasks (az acr build).
+# This avoids local Docker networking issues and doesn't require Docker Desktop.
+# Called by azd as a predeploy hook.
+
+set -euo pipefail
+
+REGISTRY="${AZURE_CONTAINER_REGISTRY_NAME:?AZURE_CONTAINER_REGISTRY_NAME must be set}"
+REGISTRY_ENDPOINT="${AZURE_CONTAINER_REGISTRY_ENDPOINT:?AZURE_CONTAINER_REGISTRY_ENDPOINT must be set}"
+TAG="azd-deploy-$(date +%s)"
+
+build_and_set() {
+ local service_name="$1" # e.g. client, worker
+ local project_dir="$2" # e.g. ./Client
+ local image_repo="durable-task-on-aks/${service_name}-${AZURE_ENV_NAME}"
+ local full_image="${REGISTRY_ENDPOINT}/${image_repo}:${TAG}"
+
+ echo "==> Building ${service_name} via ACR Tasks (${image_repo}:${TAG})..."
+ az acr build \
+ --registry "${REGISTRY}" \
+ --image "${image_repo}:${TAG}" \
+ "${project_dir}" \
+ --no-logs
+
+ # Persist the image name so the .tmpl.yaml manifest can reference it
+ local env_var="SERVICE_$(echo "${service_name}" | tr '[:lower:]' '[:upper:]')_IMAGE_NAME"
+ azd env set "${env_var}" "${full_image}"
+ echo "==> Set ${env_var}=${full_image}"
+}
+
+build_and_set "client" "./Client"
+build_and_set "worker" "./Worker"
+
+echo "==> All images built and pushed to ${REGISTRY_ENDPOINT}"
diff --git a/samples/scenarios/ECommerceOrder/README.md b/samples/scenarios/ECommerceOrder/README.md
deleted file mode 100644
index 4efef25..0000000
--- a/samples/scenarios/ECommerceOrder/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# E-Commerce Order Processing
-
-Multi-language Scenario
-
-## Overview
-
-This scenario demonstrates a complete e-commerce order processing workflow using durable orchestrations with the Saga pattern for distributed transaction management.
-
-### Workflow
-
-```
-Customer places order
- │
- ├─→ 1. Validate Order
- │ └─→ Check inventory, validate payment method
- │
- ├─→ 2. Reserve Inventory
- │ └─→ Decrement stock (compensate: release inventory)
- │
- ├─→ 3. Process Payment
- │ └─→ Charge customer (compensate: refund payment)
- │
- ├─→ 4. Create Shipment
- │ └─→ Generate shipping label (compensate: cancel shipment)
- │
- └─→ 5. Send Confirmation
- └─→ Email + push notification
-```
-
-If any step fails, all previous steps are automatically compensated (rolled back) in reverse order.
-
-### Why Durable Execution?
-
-Without durable execution, a failure during payment processing could leave inventory reserved but never purchased, or a charge applied without a shipment created. Durable orchestrations ensure:
-
-- **Atomicity** — Either all steps complete or all are compensated
-- **Visibility** — Full execution history viewable in the dashboard
-- **Recoverability** — If the process crashes mid-flight, it resumes from the last checkpoint
-
-## Implementations
-
-| Language | Framework | Sample |
-|----------|-----------|--------|
-| .NET | Durable Functions | [Saga Pattern](../../durable-functions/dotnet/Saga/) |
-| .NET | Durable Functions | [Order Processor](../../durable-functions/dotnet/OrderProcessor/) |
-
-## Related Patterns
-
-- [Function Chaining](../../durable-task-sdks/dotnet/FunctionChaining/) — Sequential activity execution
-- [Human Interaction](../../durable-task-sdks/dotnet/HumanInteraction/) — Add approval steps to your workflow
-- [Saga Pattern documentation →](https://learn.microsoft.com/azure/architecture/reference-architectures/saga/saga)
-
-## Learn More
-
-- [Durable Task Scheduler Documentation](https://aka.ms/dts-documentation)
-- [Durable Functions Patterns](https://learn.microsoft.com/azure/azure-functions/durable/durable-functions-overview?tabs=csharp#application-patterns)