From b2199873ebeab1cbdfbfcc994d0fc31e8e6ab373 Mon Sep 17 00:00:00 2001 From: salaboy Date: Thu, 4 Dec 2025 14:52:40 +0100 Subject: [PATCH 01/18] initial workflow dashboard config Signed-off-by: salaboy --- .../workflows/patterns/pom.xml | 6 ++ .../wfp/DaprTestContainersConfig.java | 47 +++++++++- .../WorkflowDashboardContainer.java | 94 +++++++++++++++++++ 3 files changed, 142 insertions(+), 5 deletions(-) create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java diff --git a/spring-boot-examples/workflows/patterns/pom.xml b/spring-boot-examples/workflows/patterns/pom.xml index 5aa157ed3..4c5dfae72 100644 --- a/spring-boot-examples/workflows/patterns/pom.xml +++ b/spring-boot-examples/workflows/patterns/pom.xml @@ -47,6 +47,12 @@ microcks-testcontainers test + + org.testcontainers + testcontainers-postgresql + 2.0.1 + test + diff --git a/spring-boot-examples/workflows/patterns/src/test/java/io/dapr/springboot/examples/wfp/DaprTestContainersConfig.java b/spring-boot-examples/workflows/patterns/src/test/java/io/dapr/springboot/examples/wfp/DaprTestContainersConfig.java index a9ca48bfb..b49200fb4 100644 --- a/spring-boot-examples/workflows/patterns/src/test/java/io/dapr/springboot/examples/wfp/DaprTestContainersConfig.java +++ b/spring-boot-examples/workflows/patterns/src/test/java/io/dapr/springboot/examples/wfp/DaprTestContainersConfig.java @@ -13,9 +13,10 @@ package io.dapr.springboot.examples.wfp; + import io.dapr.testcontainers.Component; import io.dapr.testcontainers.DaprContainer; -import io.dapr.testcontainers.DaprLogLevel; +import io.dapr.testcontainers.WorkflowDashboardContainer; import io.github.microcks.testcontainers.MicrocksContainersEnsemble; import org.junit.runner.Description; import org.junit.runners.model.Statement; @@ -26,9 +27,12 @@ import org.springframework.test.context.DynamicPropertyRegistrar; import org.testcontainers.DockerClientFactory; import org.testcontainers.containers.Network; +import org.testcontainers.postgresql.PostgreSQLContainer; +import org.testcontainers.utility.DockerImageName; -import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static io.dapr.testcontainers.DaprContainerConstants.DAPR_RUNTIME_IMAGE_TAG; @@ -45,19 +49,44 @@ @TestConfiguration(proxyBeanMethods = false) public class DaprTestContainersConfig { + Map postgreSQLDetails = new HashMap<>(); + + {{ + postgreSQLDetails.put("host", "postgresql"); + postgreSQLDetails.put("user", "postgres"); + postgreSQLDetails.put("password", "postgres"); + postgreSQLDetails.put("database", "dapr"); + postgreSQLDetails.put("port", "5432"); + postgreSQLDetails.put("actorStateStore", String.valueOf(true)); + + }} + + private Component stateStoreComponent = new Component("kvstore", + "state.postgresql", "v2", postgreSQLDetails); + @Bean @ServiceConnection - public DaprContainer daprContainer(Network network) { + public DaprContainer daprContainer(Network network, PostgreSQLContainer postgreSQLContainer) { return new DaprContainer(DAPR_RUNTIME_IMAGE_TAG) .withAppName("workflow-patterns-app") - .withComponent(new Component("kvstore", "state.in-memory", "v1", Collections.singletonMap("actorStateStore", String.valueOf(true)))) + .withComponent(stateStoreComponent) .withAppPort(8080) .withNetwork(network) .withAppHealthCheckPath("/actuator/health") - .withAppChannelAddress("host.testcontainers.internal"); + .withAppChannelAddress("host.testcontainers.internal") + .dependsOn(postgreSQLContainer); } + @Bean + public PostgreSQLContainer postgreSQLContainer(Network network) { + return new PostgreSQLContainer(DockerImageName.parse("postgres")) + .withNetworkAliases("postgresql") + .withDatabaseName("dapr") + .withUsername("postgres") + .withPassword("postgres") + .withNetwork(network); + } @Bean MicrocksContainersEnsemble microcksEnsemble(Network network) { @@ -66,6 +95,14 @@ MicrocksContainersEnsemble microcksEnsemble(Network network) { .withMainArtifacts("third-parties/remote-http-service.yaml"); } + @Bean + public WorkflowDashboardContainer workflowDashboard(Network network) { + return new WorkflowDashboardContainer(WorkflowDashboardContainer.getDefaultImageName()) + .withNetwork(network) + .withStateStoreComponent(stateStoreComponent) + .withExposedPorts(8080); + } + @Bean public DynamicPropertyRegistrar endpointsProperties(MicrocksContainersEnsemble ensemble) { // We need to replace the default endpoints with those provided by Microcks. diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java new file mode 100644 index 000000000..79097c3e3 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java @@ -0,0 +1,94 @@ +/* + * Copyright 2024 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers; + +import io.dapr.testcontainers.converter.ComponentYamlConverter; +import io.dapr.testcontainers.converter.YamlConverter; +import io.dapr.testcontainers.converter.YamlMapperFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.images.builder.Transferable; +import org.testcontainers.utility.DockerImageName; +import org.yaml.snakeyaml.Yaml; + +/** + * Test container for Dapr Workflow Dashboard. + */ +public class WorkflowDashboardContainer extends GenericContainer { + + private static final Yaml YAML_MAPPER = YamlMapperFactory.create(); + private static final YamlConverter COMPONENT_CONVERTER = new ComponentYamlConverter(YAML_MAPPER); + private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName + .parse("public.ecr.aws/diagrid-dev/diagrid-dashboard:latest"); + private int dashboardPort = 8080; + private Component stateStoreComponent; + + /** + * Creates a new Dapr scheduler container. + * @param dockerImageName Docker image name. + */ + public WorkflowDashboardContainer(DockerImageName dockerImageName) { + super(dockerImageName); + dockerImageName.assertCompatibleWith(DEFAULT_IMAGE_NAME); + withExposedPorts(dashboardPort); + } + + public WorkflowDashboardContainer withStateStoreComponent(Component stateStoreComponent) { + this.stateStoreComponent = stateStoreComponent; + return this; + } + + /** + * Creates a new Dapr schedulers container. + * @param image Docker image name. + */ + public WorkflowDashboardContainer(String image) { + this(DockerImageName.parse(image)); + } + + @Override + protected void configure() { + super.configure(); + if (stateStoreComponent != null) { + String componentYaml = COMPONENT_CONVERTER.convert(stateStoreComponent); + withCopyToContainer(Transferable.of(componentYaml), "/app/components/" + stateStoreComponent.getName() + ".yaml"); + } + + withEnv("COMPONENT_FILE", "/app/components/" + stateStoreComponent.getName() + ".yaml"); + + } + + public static DockerImageName getDefaultImageName() { + return DEFAULT_IMAGE_NAME; + } + + public WorkflowDashboardContainer withPort(Integer port) { + this.dashboardPort = port; + return this; + } + + public int getPort() { + return dashboardPort; + } + + // Required by spotbugs plugin + @Override + public boolean equals(Object o) { + return super.equals(o); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} From 429926b11a1157b42ee18a615e28397e4b9203d8 Mon Sep 17 00:00:00 2001 From: salaboy Date: Fri, 5 Dec 2025 09:23:16 +0100 Subject: [PATCH 02/18] adding test for dashboard container Signed-off-by: salaboy --- .../WorkflowDashboardContainer.java | 2 +- .../DaprWorkflowDashboardTest.java | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java index 79097c3e3..d71d9974e 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java @@ -28,7 +28,7 @@ public class WorkflowDashboardContainer extends GenericContainer COMPONENT_CONVERTER = new ComponentYamlConverter(YAML_MAPPER); - private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName + public static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName .parse("public.ecr.aws/diagrid-dev/diagrid-dashboard:latest"); private int dashboardPort = 8080; private Component stateStoreComponent; diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java new file mode 100644 index 000000000..72fb55fbd --- /dev/null +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java @@ -0,0 +1,25 @@ +package io.dapr.testcontainers; + +import org.junit.jupiter.api.Test; + +import java.util.Collections; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class DaprWorkflowDashboardTest { + + @Test + public void dashboardTest() { + Component stateStoreComponent = new Component("kvstore", + "state.in-memory", "v1", Collections.singletonMap("actorStateStore", "true")); + try (WorkflowDashboardContainer dashboard = + new WorkflowDashboardContainer(WorkflowDashboardContainer.DEFAULT_IMAGE_NAME) + .withStateStoreComponent(stateStoreComponent)) { + dashboard.configure(); + assertNotNull(dashboard.getEnvMap().get("COMPONENT_FILE")); + assertFalse(dashboard.getEnvMap().get("COMPONENT_FILE").isEmpty()); + } + } +} From 52a6c78b0f669f8577266eb2414e68dd1507df2f Mon Sep 17 00:00:00 2001 From: salaboy Date: Fri, 5 Dec 2025 09:33:13 +0100 Subject: [PATCH 03/18] adding URL to output Signed-off-by: salaboy --- .../testcontainers/WorkflowDashboardContainer.java | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java index d71d9974e..97783c1bb 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java @@ -16,6 +16,8 @@ import io.dapr.testcontainers.converter.ComponentYamlConverter; import io.dapr.testcontainers.converter.YamlConverter; import io.dapr.testcontainers.converter.YamlMapperFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.testcontainers.containers.GenericContainer; import org.testcontainers.images.builder.Transferable; import org.testcontainers.utility.DockerImageName; @@ -25,7 +27,7 @@ * Test container for Dapr Workflow Dashboard. */ public class WorkflowDashboardContainer extends GenericContainer { - + private static final Logger LOGGER = LoggerFactory.getLogger(WorkflowDashboardContainer.class); private static final Yaml YAML_MAPPER = YamlMapperFactory.create(); private static final YamlConverter COMPONENT_CONVERTER = new ComponentYamlConverter(YAML_MAPPER); public static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName @@ -77,6 +79,14 @@ public WorkflowDashboardContainer withPort(Integer port) { return this; } + @Override + public void start() { + super.start(); + + LOGGER.info("Dapr Workflow Dashboard container started."); + LOGGER.info("Access the Dashboard at: http://localhost:{}", this.getMappedPort(dashboardPort)); + } + public int getPort() { return dashboardPort; } From fcfc69c0ca93ddda3fd04253d9be1e656763e152 Mon Sep 17 00:00:00 2001 From: artur-ciocanu Date: Fri, 5 Dec 2025 10:38:40 +0200 Subject: [PATCH 04/18] Adding a Flux based subscribeToEvents method (#1598) * Adding a Flux based subscribeToEvents method Signed-off-by: Artur Ciocanu * Simplify GRPC stream handling Signed-off-by: Artur Ciocanu * Simplify Javadoc Signed-off-by: Artur Ciocanu * Fix unit tests and simplify implementation Signed-off-by: Artur Ciocanu * Adding event subscriber stream observer to simplify subscription logic Signed-off-by: Artur Ciocanu * Use start() method to start stream subscription Signed-off-by: Artur Ciocanu * Add unit test for event suscriber observer Signed-off-by: Artur Ciocanu * Improve the tests a little bit Signed-off-by: Artur Ciocanu * Remove the unnecessary method Signed-off-by: Artur Ciocanu * Improve error handling and use CloudEvent wrapper Signed-off-by: Artur Ciocanu * Fix unit tests asserts Signed-off-by: Artur Ciocanu * Adjust Java examples for Subscriber Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy --- .../io/dapr/examples/pubsub/stream/README.md | 30 +- .../examples/pubsub/stream/Subscriber.java | 31 +- .../java/io/dapr/client/DaprClientImpl.java | 37 ++ .../io/dapr/client/DaprPreviewClient.java | 16 +- .../java/io/dapr/client/Subscription.java | 1 + .../io/dapr/client/SubscriptionListener.java | 1 + .../EventSubscriberStreamObserver.java | 223 ++++++++ .../client/DaprPreviewClientGrpcTest.java | 81 +++ .../EventSubscriberStreamObserverTest.java | 506 ++++++++++++++++++ 9 files changed, 887 insertions(+), 39 deletions(-) create mode 100644 sdk/src/main/java/io/dapr/internal/subscription/EventSubscriberStreamObserver.java create mode 100644 sdk/src/test/java/io/dapr/internal/subscription/EventSubscriberStreamObserverTest.java diff --git a/examples/src/main/java/io/dapr/examples/pubsub/stream/README.md b/examples/src/main/java/io/dapr/examples/pubsub/stream/README.md index d9d41b375..da3e4e248 100644 --- a/examples/src/main/java/io/dapr/examples/pubsub/stream/README.md +++ b/examples/src/main/java/io/dapr/examples/pubsub/stream/README.md @@ -49,7 +49,7 @@ The subscriber uses the `DaprPreviewClient` interface to use a new feature where The publisher is a simple Java application with a main method that uses the Dapr gRPC Client to publish 10 messages to a specific topic. -In the `Subscriber.java` file, you will find the `Subscriber` class, containing the main method. The main method declares a `DaprPreviewClient` using the `DaprClientBuilder` class. When invoking `subscribeToEvents`, the subscriber provides an implementation of the `SubscriptionListener` interface, receiving a `Subscription` object. The `Subscription` object implements the `Closeable` interface and the `close()` method must be used to stop the subscription. +In the `Subscriber.java` file, you will find the `Subscriber` class, containing the main method. The main method declares a `DaprPreviewClient` using the `DaprClientBuilder` class. When invoking `subscribeToEvents`, the method returns a `Flux>` that can be processed using reactive operators like `doOnNext()` for event handling and `doOnError()` for error handling. The example uses `blockLast()` to keep the subscriber running indefinitely. For production use cases requiring explicit subscription lifecycle control, you can use `.subscribe()` which returns a `Disposable` that can be disposed via `disposable.dispose()`. ```java public class Subscriber { @@ -59,25 +59,19 @@ public class Subscriber { public static void main(String[] args) throws Exception { String topicName = getTopicName(args); try (var client = new DaprClientBuilder().buildPreviewClient()) { - var subscription = client.subscribeToEvents( + // Subscribe to events using the Flux-based reactive API + // The stream will emit CloudEvent objects as they arrive + client.subscribeToEvents( PUBSUB_NAME, topicName, - new SubscriptionListener<>() { - - @Override - public Mono onEvent(CloudEvent event) { - System.out.println("Subscriber got: " + event.getData()); - return Mono.just(Status.SUCCESS); - } - - @Override - public void onError(RuntimeException exception) { - System.out.println("Subscriber got exception: " + exception.getMessage()); - } - }, - TypeRef.STRING); - - subscription.awaitTermination(); + TypeRef.STRING) + .doOnNext(event -> { + System.out.println("Subscriber got: " + event.getData()); + }) + .doOnError(throwable -> { + System.out.println("Subscriber got exception: " + throwable.getMessage()); + }) + .blockLast(); // Blocks indefinitely until the stream completes (keeps the subscriber running) } } diff --git a/examples/src/main/java/io/dapr/examples/pubsub/stream/Subscriber.java b/examples/src/main/java/io/dapr/examples/pubsub/stream/Subscriber.java index 31678dce0..763bb436c 100644 --- a/examples/src/main/java/io/dapr/examples/pubsub/stream/Subscriber.java +++ b/examples/src/main/java/io/dapr/examples/pubsub/stream/Subscriber.java @@ -14,10 +14,7 @@ package io.dapr.examples.pubsub.stream; import io.dapr.client.DaprClientBuilder; -import io.dapr.client.SubscriptionListener; -import io.dapr.client.domain.CloudEvent; import io.dapr.utils.TypeRef; -import reactor.core.publisher.Mono; /** * Subscriber using bi-directional gRPC streaming, which does not require an app port. @@ -44,25 +41,19 @@ public class Subscriber { public static void main(String[] args) throws Exception { String topicName = getTopicName(args); try (var client = new DaprClientBuilder().buildPreviewClient()) { - var subscription = client.subscribeToEvents( + // Subscribe to events using the Flux-based reactive API + // The stream will emit CloudEvent objects as they arrive + client.subscribeToEvents( PUBSUB_NAME, topicName, - new SubscriptionListener<>() { - - @Override - public Mono onEvent(CloudEvent event) { - System.out.println("Subscriber got: " + event.getData()); - return Mono.just(Status.SUCCESS); - } - - @Override - public void onError(RuntimeException exception) { - System.out.println("Subscriber got exception: " + exception.getMessage()); - } - }, - TypeRef.STRING); - - subscription.awaitTermination(); + TypeRef.STRING) + .doOnNext(event -> { + System.out.println("Subscriber got: " + event.getData()); + }) + .doOnError(throwable -> { + System.out.println("Subscriber got exception: " + throwable.getMessage()); + }) + .blockLast(); // Blocks indefinitely until the stream completes (keeps the subscriber running) } } diff --git a/sdk/src/main/java/io/dapr/client/DaprClientImpl.java b/sdk/src/main/java/io/dapr/client/DaprClientImpl.java index 012921a89..0dfb1b644 100644 --- a/sdk/src/main/java/io/dapr/client/DaprClientImpl.java +++ b/sdk/src/main/java/io/dapr/client/DaprClientImpl.java @@ -91,6 +91,7 @@ import io.dapr.internal.grpc.DaprClientGrpcInterceptors; import io.dapr.internal.resiliency.RetryPolicy; import io.dapr.internal.resiliency.TimeoutPolicy; +import io.dapr.internal.subscription.EventSubscriberStreamObserver; import io.dapr.serializer.DaprObjectSerializer; import io.dapr.serializer.DefaultObjectSerializer; import io.dapr.utils.DefaultContentTypeConverter; @@ -475,6 +476,42 @@ public Subscription subscribeToEvents( return buildSubscription(listener, type, request); } + /** + * {@inheritDoc} + */ + @Override + public Flux> subscribeToEvents(String pubsubName, String topic, TypeRef type) { + DaprProtos.SubscribeTopicEventsRequestInitialAlpha1 initialRequest = + DaprProtos.SubscribeTopicEventsRequestInitialAlpha1.newBuilder() + .setTopic(topic) + .setPubsubName(pubsubName) + .build(); + DaprProtos.SubscribeTopicEventsRequestAlpha1 request = + DaprProtos.SubscribeTopicEventsRequestAlpha1.newBuilder() + .setInitialRequest(initialRequest) + .build(); + + return Flux.create(sink -> { + DaprGrpc.DaprStub interceptedStub = this.grpcInterceptors.intercept(this.asyncStub); + EventSubscriberStreamObserver eventSubscriber = new EventSubscriberStreamObserver<>( + interceptedStub, + sink, + type, + this.objectSerializer + ); + StreamObserver requestStream = eventSubscriber.start(request); + + // Cleanup when Flux is cancelled or completed + sink.onDispose(() -> { + try { + requestStream.onCompleted(); + } catch (Exception e) { + logger.debug("Completing the subscription stream resulted in an error: {}", e.getMessage()); + } + }); + }, FluxSink.OverflowStrategy.BUFFER); + } + @Nonnull private Subscription buildSubscription( SubscriptionListener listener, diff --git a/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java b/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java index 92c6a61c3..545b8e5dc 100644 --- a/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java +++ b/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java @@ -17,6 +17,7 @@ import io.dapr.client.domain.BulkPublishRequest; import io.dapr.client.domain.BulkPublishResponse; import io.dapr.client.domain.BulkPublishResponseFailedEntry; +import io.dapr.client.domain.CloudEvent; import io.dapr.client.domain.ConversationRequest; import io.dapr.client.domain.ConversationRequestAlpha2; import io.dapr.client.domain.ConversationResponse; @@ -32,6 +33,7 @@ import io.dapr.client.domain.UnlockResponseStatus; import io.dapr.client.domain.query.Query; import io.dapr.utils.TypeRef; +import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.util.List; @@ -271,12 +273,24 @@ Mono> publishEvents(String pubsubName, String topicNa * @param topic Name of the topic to subscribe to. * @param listener Callback methods to process events. * @param type Type for object deserialization. - * @return An active subscription. * @param Type of object deserialization. + * @return An active subscription. + * @deprecated Use {@link #subscribeToEvents(String, String, TypeRef)} instead for a more reactive approach. */ + @Deprecated Subscription subscribeToEvents( String pubsubName, String topic, SubscriptionListener listener, TypeRef type); + /** + * Subscribe to pubsub events via streaming using Project Reactor Flux. + * @param pubsubName Name of the pubsub component. + * @param topic Name of the topic to subscribe to. + * @param type Type for object deserialization. + * @return A Flux of CloudEvents containing deserialized event payloads and metadata. + * @param Type of the event payload. + */ + Flux> subscribeToEvents(String pubsubName, String topic, TypeRef type); + /** * Schedules a job using the provided job request details. * diff --git a/sdk/src/main/java/io/dapr/client/Subscription.java b/sdk/src/main/java/io/dapr/client/Subscription.java index 53e89e845..2cbd1e9b3 100644 --- a/sdk/src/main/java/io/dapr/client/Subscription.java +++ b/sdk/src/main/java/io/dapr/client/Subscription.java @@ -35,6 +35,7 @@ * Streaming subscription of events for Dapr's pubsub. * @param Application's object type. */ +@Deprecated public class Subscription implements Closeable { private final BlockingQueue ackQueue = new LinkedBlockingQueue<>(50); diff --git a/sdk/src/main/java/io/dapr/client/SubscriptionListener.java b/sdk/src/main/java/io/dapr/client/SubscriptionListener.java index 5a467d69f..c5420af60 100644 --- a/sdk/src/main/java/io/dapr/client/SubscriptionListener.java +++ b/sdk/src/main/java/io/dapr/client/SubscriptionListener.java @@ -20,6 +20,7 @@ * Callback interface to receive events from a streaming subscription of events. * @param Object type for deserialization. */ +@Deprecated public interface SubscriptionListener { /** diff --git a/sdk/src/main/java/io/dapr/internal/subscription/EventSubscriberStreamObserver.java b/sdk/src/main/java/io/dapr/internal/subscription/EventSubscriberStreamObserver.java new file mode 100644 index 000000000..56131882b --- /dev/null +++ b/sdk/src/main/java/io/dapr/internal/subscription/EventSubscriberStreamObserver.java @@ -0,0 +1,223 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.internal.subscription; + +import io.dapr.client.domain.CloudEvent; +import io.dapr.exceptions.DaprException; +import io.dapr.serializer.DaprObjectSerializer; +import io.dapr.utils.TypeRef; +import io.dapr.v1.DaprAppCallbackProtos; +import io.dapr.v1.DaprGrpc; +import io.dapr.v1.DaprProtos; +import io.grpc.stub.StreamObserver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.FluxSink; + +import java.io.IOException; + +/** + * StreamObserver implementation for subscribing to Dapr pub/sub events. + * Thread Safety: This class relies on gRPC's StreamObserver contract, which guarantees that + * onNext(), onError(), and onCompleted() are never called concurrently and always from the + * same thread. Therefore, no additional synchronization is needed. + * + * @param The type of the event payload + */ +public class EventSubscriberStreamObserver implements StreamObserver { + + private static final Logger logger = LoggerFactory.getLogger(EventSubscriberStreamObserver.class); + + private final DaprGrpc.DaprStub stub; + private final FluxSink> sink; + private final TypeRef type; + private final DaprObjectSerializer objectSerializer; + + private StreamObserver requestStream; + + /** + * Creates a new EventSubscriberStreamObserver. + * + * @param stub The gRPC stub for making Dapr service calls + * @param sink The FluxSink to emit CloudEvents to + * @param type The TypeRef for deserializing event payloads + * @param objectSerializer The serializer to use for deserialization + */ + public EventSubscriberStreamObserver( + DaprGrpc.DaprStub stub, + FluxSink> sink, + TypeRef type, + DaprObjectSerializer objectSerializer) { + this.stub = stub; + this.sink = sink; + this.type = type; + this.objectSerializer = objectSerializer; + } + + /** Starts the subscription by sending the initial request. + * + * @param request The subscription request + * @return The StreamObserver to send further requests (acknowledgments) + */ + public StreamObserver start( + DaprProtos.SubscribeTopicEventsRequestAlpha1 request + ) { + requestStream = stub.subscribeTopicEventsAlpha1(this); + + requestStream.onNext(request); + + return requestStream; + } + + @Override + public void onNext(DaprProtos.SubscribeTopicEventsResponseAlpha1 response) { + if (!isValidEventMessage(response)) { + return; + } + + DaprAppCallbackProtos.TopicEventRequest message = response.getEventMessage(); + String eventId = message.getId(); + + try { + T data = deserializeEventData(message); + CloudEvent cloudEvent = buildCloudEvent(message, data); + emitEventAndAcknowledge(cloudEvent, eventId); + } catch (IOException e) { + // Deserialization failure - send DROP ack + handleDeserializationError(eventId, e); + } catch (Exception e) { + // Processing failure - send RETRY ack + handleProcessingError(eventId, e); + } + } + + @Override + public void onError(Throwable throwable) { + sink.error(DaprException.propagate(throwable)); + } + + @Override + public void onCompleted() { + sink.complete(); + } + + private boolean isValidEventMessage(DaprProtos.SubscribeTopicEventsResponseAlpha1 response) { + if (response.getEventMessage() == null) { + logger.debug("Received response with null event message, skipping"); + return false; + } + + DaprAppCallbackProtos.TopicEventRequest message = response.getEventMessage(); + + if (message.getPubsubName() == null || message.getPubsubName().isEmpty()) { + logger.debug("Received event with empty pubsub name, skipping"); + return false; + } + + if (message.getId() == null || message.getId().isEmpty()) { + logger.debug("Received event with empty ID, skipping"); + return false; + } + + return true; + } + + private T deserializeEventData(DaprAppCallbackProtos.TopicEventRequest message) throws IOException { + if (type == null) { + logger.debug("Type is null, skipping deserialization for event ID: {}", message.getId()); + return null; + } + + return objectSerializer.deserialize(message.getData().toByteArray(), type); + } + + private CloudEvent buildCloudEvent(DaprAppCallbackProtos.TopicEventRequest message, T data) { + CloudEvent cloudEvent = new CloudEvent<>(); + + cloudEvent.setId(message.getId()); + cloudEvent.setType(message.getType()); + cloudEvent.setSpecversion(message.getSpecVersion()); + cloudEvent.setDatacontenttype(message.getDataContentType()); + cloudEvent.setTopic(message.getTopic()); + cloudEvent.setPubsubName(message.getPubsubName()); + cloudEvent.setData(data); + + return cloudEvent; + } + + private void emitEventAndAcknowledge(CloudEvent cloudEvent, String eventId) { + sink.next(cloudEvent); + + // Send SUCCESS acknowledgment + requestStream.onNext(buildSuccessAck(eventId)); + } + + private void handleDeserializationError(String eventId, IOException cause) { + logger.error("Deserialization failed for event ID: {}, sending DROP ack", eventId, cause); + + // Send DROP ack - cannot process malformed data + requestStream.onNext(buildDropAck(eventId)); + + // Propagate error to sink + sink.error(new DaprException("DESERIALIZATION_ERROR", + "Failed to deserialize event with ID: " + eventId, cause)); + } + + private void handleProcessingError(String eventId, Exception cause) { + logger.error("Processing error for event ID: {}, attempting to send RETRY ack", eventId, cause); + + try { + // Try to send RETRY acknowledgment + requestStream.onNext(buildRetryAck(eventId)); + } catch (Exception ackException) { + // Failed to send ack - this is critical + logger.error("Failed to send RETRY ack for event ID: {}", eventId, ackException); + sink.error(DaprException.propagate(ackException)); + + return; + } + + // Propagate the original processing error + sink.error(DaprException.propagate(cause)); + } + + private static DaprProtos.SubscribeTopicEventsRequestAlpha1 buildSuccessAck(String eventId) { + return buildAckRequest(eventId, DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.SUCCESS); + } + + private static DaprProtos.SubscribeTopicEventsRequestAlpha1 buildRetryAck(String eventId) { + return buildAckRequest(eventId, DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.RETRY); + } + + private static DaprProtos.SubscribeTopicEventsRequestAlpha1 buildDropAck(String eventId) { + return buildAckRequest(eventId, DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.DROP); + } + + private static DaprProtos.SubscribeTopicEventsRequestAlpha1 buildAckRequest( + String eventId, + DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus status) { + DaprProtos.SubscribeTopicEventsRequestProcessedAlpha1 eventProcessed = + DaprProtos.SubscribeTopicEventsRequestProcessedAlpha1.newBuilder() + .setId(eventId) + .setStatus( + DaprAppCallbackProtos.TopicEventResponse.newBuilder() + .setStatus(status) + .build()) + .build(); + + return DaprProtos.SubscribeTopicEventsRequestAlpha1.newBuilder() + .setEventProcessed(eventProcessed) + .build(); + } +} diff --git a/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java b/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java index f7b5584cc..a42c4f946 100644 --- a/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java +++ b/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java @@ -586,6 +586,87 @@ public void onError(RuntimeException exception) { assertEquals(numErrors, errors.size()); } + @Test + public void subscribeEventFluxTest() throws Exception { + var numEvents = 100; + var pubsubName = "pubsubName"; + var topicName = "topicName"; + var data = "my message"; + var started = new Semaphore(0); + + doAnswer((Answer>) invocation -> { + StreamObserver observer = + (StreamObserver) invocation.getArguments()[0]; + + var emitterThread = new Thread(() -> { + try { + started.acquire(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + observer.onNext(DaprProtos.SubscribeTopicEventsResponseAlpha1.getDefaultInstance()); + + for (int i = 0; i < numEvents; i++) { + DaprProtos.SubscribeTopicEventsResponseAlpha1 reponse = + DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage(DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId(Integer.toString(i)) + .setPubsubName(pubsubName) + .setTopic(topicName) + .setData(ByteString.copyFromUtf8("\"" + data + "\"")) + .setDataContentType("application/json") + .build()) + .build(); + observer.onNext(reponse); + } + + observer.onCompleted(); + }); + + emitterThread.start(); + + return new StreamObserver<>() { + @Override + public void onNext(DaprProtos.SubscribeTopicEventsRequestAlpha1 subscribeTopicEventsRequestAlpha1) { + started.release(); + } + + @Override + public void onError(Throwable throwable) { + // No-op + } + + @Override + public void onCompleted() { + // No-op + } + }; + }).when(daprStub).subscribeTopicEventsAlpha1(any(StreamObserver.class)); + + final AtomicInteger eventCount = new AtomicInteger(0); + final Semaphore gotAll = new Semaphore(0); + var disposable = previewClient.subscribeToEvents(pubsubName, topicName, TypeRef.STRING) + .doOnNext(cloudEvent -> { + assertEquals(data, cloudEvent.getData()); + assertEquals(pubsubName, cloudEvent.getPubsubName()); + assertEquals(topicName, cloudEvent.getTopic()); + assertNotNull(cloudEvent.getId()); + + int count = eventCount.incrementAndGet(); + + if (count >= numEvents) { + gotAll.release(); + } + }) + .subscribe(); + + gotAll.acquire(); + disposable.dispose(); + + assertEquals(numEvents, eventCount.get()); + } + @Test public void converseShouldThrowIllegalArgumentExceptionWhenComponentNameIsNull() throws Exception { List inputs = new ArrayList<>(); diff --git a/sdk/src/test/java/io/dapr/internal/subscription/EventSubscriberStreamObserverTest.java b/sdk/src/test/java/io/dapr/internal/subscription/EventSubscriberStreamObserverTest.java new file mode 100644 index 000000000..7328f79e5 --- /dev/null +++ b/sdk/src/test/java/io/dapr/internal/subscription/EventSubscriberStreamObserverTest.java @@ -0,0 +1,506 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +package io.dapr.internal.subscription; + +import com.google.protobuf.ByteString; +import io.dapr.client.domain.CloudEvent; +import io.dapr.exceptions.DaprException; +import io.dapr.serializer.DaprObjectSerializer; +import io.dapr.serializer.DefaultObjectSerializer; +import io.dapr.utils.TypeRef; +import io.dapr.v1.DaprAppCallbackProtos; +import io.dapr.v1.DaprGrpc; +import io.dapr.v1.DaprProtos; +import io.grpc.stub.StreamObserver; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +/** + * Unit tests for EventSubscriberStreamObserver. + */ +class EventSubscriberStreamObserverTest { + + public static final String PUBSUB_NAME = "pubsub"; + public static final String TOPIC_NAME = "topic"; + private DaprGrpc.DaprStub mockStub; + private DaprObjectSerializer objectSerializer; + private StreamObserver mockRequestStream; + + @BeforeEach + @SuppressWarnings("unchecked") + void setUp() { + mockStub = mock(DaprGrpc.DaprStub.class); + objectSerializer = new DefaultObjectSerializer(); + mockRequestStream = mock(StreamObserver.class); + + when(mockStub.subscribeTopicEventsAlpha1(any())).thenReturn(mockRequestStream); + } + + @Test + @DisplayName("Should successfully process events and send SUCCESS acks") + void testSuccessfulEventProcessing() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + // Start the subscription + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + // Simulate receiving an event + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = buildEventResponse( + "event-1", + "Hello World" + ); + observer.onNext(response); + + // Complete the stream + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> { + assertEquals("Hello World", cloudEvent.getData()); + assertEquals("event-1", cloudEvent.getId()); + assertEquals(PUBSUB_NAME, cloudEvent.getPubsubName()); + assertEquals(TOPIC_NAME, cloudEvent.getTopic()); + }) + .verifyComplete(); + + ArgumentCaptor requestCaptor = + ArgumentCaptor.forClass(DaprProtos.SubscribeTopicEventsRequestAlpha1.class); + + verify(mockRequestStream, times(2)).onNext(requestCaptor.capture()); + + List requests = requestCaptor.getAllValues(); + + assertEquals(2, requests.size()); + assertTrue(requests.get(0).hasInitialRequest()); + assertTrue(requests.get(1).hasEventProcessed()); + assertEquals("event-1", requests.get(1).getEventProcessed().getId()); + assertEquals( + DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.SUCCESS, + requests.get(1).getEventProcessed().getStatus().getStatus() + ); + } + + @Test + @DisplayName("Should handle multiple consecutive events correctly") + void testMultipleEvents() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + observer.onNext(buildEventResponse("event-1", "Message 1")); + observer.onNext(buildEventResponse("event-2", "Message 2")); + observer.onNext(buildEventResponse("event-3", "Message 3")); + + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> { + assertEquals("Message 1", cloudEvent.getData()); + assertEquals("event-1", cloudEvent.getId()); + }) + .assertNext(cloudEvent -> { + assertEquals("Message 2", cloudEvent.getData()); + assertEquals("event-2", cloudEvent.getId()); + }) + .assertNext(cloudEvent -> { + assertEquals("Message 3", cloudEvent.getData()); + assertEquals("event-3", cloudEvent.getId()); + }) + .verifyComplete(); + + verify(mockRequestStream, times(4)).onNext(any()); + } + + @Test + @DisplayName("Should send DROP ack when deserialization fails") + void testDeserializationError() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + // Send an event with invalid data (can't deserialize to String) + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId("event-1") + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFrom(new byte[]{(byte) 0xFF, (byte) 0xFE})) // Invalid UTF-8 + .build() + ) + .build(); + + observer.onNext(response); + }); + + StepVerifier.create(flux) + .expectErrorMatches(error -> + error instanceof DaprException + && error.getMessage().contains("DESERIALIZATION_ERROR") + && error.getMessage().contains("event-1")) + .verify(); + + ArgumentCaptor requestCaptor = + ArgumentCaptor.forClass(DaprProtos.SubscribeTopicEventsRequestAlpha1.class); + + verify(mockRequestStream, atLeast(2)).onNext(requestCaptor.capture()); + + List ackRequests = requestCaptor.getAllValues().stream() + .filter(DaprProtos.SubscribeTopicEventsRequestAlpha1::hasEventProcessed) + .collect(Collectors.toList()); + + assertEquals(1, ackRequests.size()); + assertEquals("event-1", ackRequests.get(0).getEventProcessed().getId()); + assertEquals( + DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.DROP, + ackRequests.get(0).getEventProcessed().getStatus().getStatus() + ); + } + + @Test + @DisplayName("Should send RETRY ack when non-deserialization error occurs") + void testProcessingError() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest(); + observer.start(initialRequest); + + // Simulate a processing error by throwing during sink.next() + sink.onRequest(n -> { + throw new RuntimeException("Processing error"); + }); + + observer.onNext(buildEventResponse("event-1", "Hello")); + }); + + StepVerifier.create(flux) + .expectError(RuntimeException.class) + .verify(); + + // Note: When error occurs in onRequest callback (before processing), + // no ack is sent as the error happens before we can handle the event + verify(mockRequestStream, times(1)).onNext(any()); // Only initial request sent + } + + @Test + @DisplayName("Should propagate gRPC errors as DaprException") + void testGrpcError() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest(); + observer.start(initialRequest); + + // Simulate gRPC error + observer.onError(new RuntimeException("gRPC connection failed")); + }); + + StepVerifier.create(flux) + .expectError(DaprException.class) + .verify(); + } + + @Test + @DisplayName("Should handle null event messages gracefully without emitting events") + void testNullEventMessage() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .build(); + + observer.onNext(response); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .verifyComplete(); + + verify(mockRequestStream, times(1)).onNext(any()); + } + + @Test + @DisplayName("Should skip events with empty pubsub name") + void testEmptyPubsubName() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId("event-1") + .setPubsubName("") + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFromUtf8("\"Hello\"")) + .build() + ) + .build(); + + observer.onNext(response); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .verifyComplete(); + + verify(mockRequestStream, times(1)).onNext(any()); + } + + @Test + @DisplayName("Should skip events with empty event ID") + void testEmptyEventId() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId("") + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFromUtf8("\"Hello\"")) + .build() + ) + .build(); + + observer.onNext(response); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .verifyComplete(); + + verify(mockRequestStream, times(1)).onNext(any()); + } + + @Test + @DisplayName("Should handle null type parameter and emit CloudEvent with null data") + void testNullData() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + null, // null type + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + + observer.start(initialRequest); + observer.onNext(buildEventResponse("event-1", "Hello")); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> { + assertNull(cloudEvent.getData()); + assertEquals("event-1", cloudEvent.getId()); + assertEquals(PUBSUB_NAME, cloudEvent.getPubsubName()); + assertEquals(TOPIC_NAME, cloudEvent.getTopic()); + }) + .verifyComplete(); + + verify(mockRequestStream, times(2)).onNext(any()); + } + + @Test + @DisplayName("Should deserialize and emit complex objects correctly") + void testComplexObjectSerialization() throws IOException { + TestEvent testEvent = new TestEvent("test-name", 42); + byte[] serializedEvent = objectSerializer.serialize(testEvent); + + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.get(TestEvent.class), + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId("event-1") + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFrom(serializedEvent)) + .build() + ) + .build(); + + observer.onNext(response); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> { + TestEvent event = cloudEvent.getData(); + assertEquals("test-name", event.name); + assertEquals(42, event.value); + assertEquals("event-1", cloudEvent.getId()); + }) + .verifyComplete(); + } + + @Test + @DisplayName("Should propagate errors when ack sending fails") + void testErrorDuringSendingAck() { + doThrow(new RuntimeException("Failed to send ack")) + .when(mockRequestStream) + .onNext(argThat(DaprProtos.SubscribeTopicEventsRequestAlpha1::hasEventProcessed)); + + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest(); + observer.start(initialRequest); + + observer.onNext(buildEventResponse("event-1", "Hello")); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> assertEquals("Hello", cloudEvent.getData())) // Event is emitted before ack + .expectError(DaprException.class) // Then error when sending ack + .verify(); + } + + private DaprProtos.SubscribeTopicEventsRequestAlpha1 buildInitialRequest() { + return DaprProtos.SubscribeTopicEventsRequestAlpha1.newBuilder() + .setInitialRequest( + DaprProtos.SubscribeTopicEventsRequestInitialAlpha1.newBuilder() + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .build() + ) + .build(); + } + + private DaprProtos.SubscribeTopicEventsResponseAlpha1 buildEventResponse(String eventId, String data) { + + try { + byte[] serializedData = objectSerializer.serialize(data); + return DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId(eventId) + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFrom(serializedData)) + .build() + ) + .build(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static class TestEvent { + public String name; + public int value; + + public TestEvent() { + } + + public TestEvent(String name, int value) { + this.name = name; + this.value = value; + } + } +} From 84ffabda8ec889a8d3e31e5673a822696f7a1221 Mon Sep 17 00:00:00 2001 From: salaboy Date: Fri, 5 Dec 2025 09:40:10 +0100 Subject: [PATCH 05/18] fixing configure() Signed-off-by: salaboy --- .../io/dapr/testcontainers/WorkflowDashboardContainer.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java index 97783c1bb..d3616bad9 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java @@ -64,10 +64,9 @@ protected void configure() { if (stateStoreComponent != null) { String componentYaml = COMPONENT_CONVERTER.convert(stateStoreComponent); withCopyToContainer(Transferable.of(componentYaml), "/app/components/" + stateStoreComponent.getName() + ".yaml"); + withEnv("COMPONENT_FILE", "/app/components/" + stateStoreComponent.getName() + ".yaml"); } - withEnv("COMPONENT_FILE", "/app/components/" + stateStoreComponent.getName() + ".yaml"); - } public static DockerImageName getDefaultImageName() { From 54858242f60c2603ddd1518d06424a9fff8cb719 Mon Sep 17 00:00:00 2001 From: Marc Duiker Date: Fri, 5 Dec 2025 15:07:50 +0100 Subject: [PATCH 06/18] Remove SDK docs due to migration to main Docs repo (#1593) * Remove SDK docs due to migration to main Docs repo Signed-off-by: Marc Duiker * Remove sed lines related to sdk docs Signed-off-by: Marc Duiker --------- Signed-off-by: Marc Duiker Co-authored-by: salaboy Signed-off-by: salaboy --- .github/scripts/update_docs.sh | 9 - daprdocs/README.md | 25 - .../java-contributing.md | 27 - daprdocs/content/en/java-sdk-docs/_index.md | 145 ---- .../en/java-sdk-docs/java-ai/_index.md | 7 - .../en/java-sdk-docs/java-ai/java-ai-howto.md | 105 --- .../en/java-sdk-docs/java-client/_index.md | 756 ------------------ .../java-sdk-docs/java-client/properties.md | 211 ----- .../en/java-sdk-docs/java-jobs/_index.md | 7 - .../java-jobs/java-jobs-howto.md | 164 ---- .../en/java-sdk-docs/java-workflow/_index.md | 7 - .../java-workflow/java-workflow-howto.md | 284 ------- .../en/java-sdk-docs/spring-boot/_index.md | 347 -------- 13 files changed, 2094 deletions(-) delete mode 100644 daprdocs/README.md delete mode 100644 daprdocs/content/en/java-sdk-contributing/java-contributing.md delete mode 100644 daprdocs/content/en/java-sdk-docs/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-ai/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-ai/java-ai-howto.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-client/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-client/properties.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-jobs/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-jobs/java-jobs-howto.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-workflow/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-workflow/java-workflow-howto.md delete mode 100644 daprdocs/content/en/java-sdk-docs/spring-boot/_index.md diff --git a/.github/scripts/update_docs.sh b/.github/scripts/update_docs.sh index 21c6bddc7..011676508 100755 --- a/.github/scripts/update_docs.sh +++ b/.github/scripts/update_docs.sh @@ -11,21 +11,12 @@ DAPR_JAVA_SDK_ALPHA_VERSION=`echo $DAPR_JAVA_SDK_VERSION | sed 's/^[0-9]*\./0./' if [[ "$OSTYPE" == "darwin"* ]]; then sed -i bak "s/.*<\/version>\$/${DAPR_JAVA_SDK_VERSION}<\/version>/g" README.md sed -i bak "s/compile('io.dapr:\(.*\):.*')/compile('io.dapr:\\1:${DAPR_JAVA_SDK_VERSION}')/g" README.md - sed -i bak "s/.*<\/version>\$/${DAPR_JAVA_SDK_VERSION}<\/version>/g" daprdocs/content/en/java-sdk-docs/_index.md - sed -i bak "s/compile('io.dapr:\(.*\):.*')/compile('io.dapr:\\1:${DAPR_JAVA_SDK_VERSION}')/g" daprdocs/content/en/java-sdk-docs/_index.md - sed -i bak "s/.*<\/version>\$/${DAPR_JAVA_SDK_ALPHA_VERSION}<\/version>/g" daprdocs/content/en/java-sdk-docs/spring-boot/_index.md rm README.mdbak else sed -i "s/.*<\/version>\$/${DAPR_JAVA_SDK_VERSION}<\/version>/g" README.md sed -i "s/compile('io.dapr:\(.*\):.*')/compile('io.dapr:\\1:${DAPR_JAVA_SDK_VERSION}')/g" README.md - sed -i "s/.*<\/version>\$/${DAPR_JAVA_SDK_VERSION}<\/version>/g" daprdocs/content/en/java-sdk-docs/_index.md - sed -i "s/compile('io.dapr:\(.*\):.*')/compile('io.dapr:\\1:${DAPR_JAVA_SDK_VERSION}')/g" daprdocs/content/en/java-sdk-docs/_index.md - sed -i "s/.*<\/version>\$/${DAPR_JAVA_SDK_ALPHA_VERSION}<\/version>/g" daprdocs/content/en/java-sdk-docs/spring-boot/_index.md fi -rm -f daprdocs/content/en/java-sdk-docs/_index.mdbak || echo -rm -f daprdocs/content/en/java-sdk-docs/spring-boot/_index.md/_index.mdbak || echo - rm -rf docs ./mvnw -Dmaven.test.skip=true -Djacoco.skip=true clean install ./mvnw -Dmaven.test.skip=true -Djacoco.skip=true site-deploy diff --git a/daprdocs/README.md b/daprdocs/README.md deleted file mode 100644 index 1fe0b1234..000000000 --- a/daprdocs/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Dapr Java SDK documentation - -This page covers how the documentation is structured for the Dapr Java SDK - -## Dapr Docs - -All Dapr documentation is hosted at [docs.dapr.io](https://docs.dapr.io), including the docs for the [Java SDK](https://docs.dapr.io/developing-applications/sdks/java/). Head over there if you want to read the docs. - -### Java SDK docs source - -Although the docs site code and content is in the [docs repo](https://github.com/dapr/docs), the Java SDK content and images are within the `content` and `static` directories, respectively. - -This allows separation of roles and expertise between maintainers, and makes it easy to find the docs files you are looking for. - -## Writing Java SDK docs - -To get up and running to write Java SDK docs, visit the [docs repo](https://github.com/dapr/docs) to initialize your environment. It will clone both the docs repo and this repo, so you can make changes and see it rendered within the site instantly, as well as commit and PR into this repo. - -Make sure to read the [docs contributing guide](https://docs.dapr.io/contributing/contributing-docs/) for information on style/semantics/etc. - -## Docs architecture - -The docs site is built on [Hugo](https://gohugo.io), which lives in the docs repo. This repo is setup as a git submodule so that when the repo is cloned and initialized, the java-sdk repo, along with the docs, are cloned as well. - -Then, in the Hugo configuration file, the `daprdocs/content` and `daprdocs/static` directories are redirected to the `daprdocs/developing-applications/sdks/java` and `static/java` directories, respectively. Thus, all the content within this repo is folded into the main docs site. \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-contributing/java-contributing.md b/daprdocs/content/en/java-sdk-contributing/java-contributing.md deleted file mode 100644 index 03ba6d4e5..000000000 --- a/daprdocs/content/en/java-sdk-contributing/java-contributing.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -type: docs -title: "Contributing to the Java SDK" -linkTitle: "Java SDK" -weight: 3000 -description: Guidelines for contributing to the Dapr Java SDK ---- - -When contributing to the [Java SDK](https://github.com/dapr/java-sdk) the following rules and best-practices should be followed. - -## Examples - -The `examples` directory contains code samples for users to run to try out specific functionality of the various Java SDK packages and extensions. When writing new and updated samples keep in mind: - -- All examples should be runnable on Windows, Linux, and MacOS. While Java code is consistent among operating systems, any pre/post example commands should provide options through [tabpane]({{% ref "contributing-docs.md#tabbed-content" %}}) -- Contain steps to download/install any required pre-requisites. Someone coming in with a fresh OS install should be able to start on the example and complete it without an error. Links to external download pages are fine. - -## Docs - -The `daprdocs` directory contains the markdown files that are rendered into the [Dapr Docs](https://docs.dapr.io) website. When the documentation website is built, this repo is cloned and configured so that its contents are rendered with the docs content. When writing docs, keep in mind: - - - All rules in the [docs guide]({{% ref contributing-docs.md %}}) should be followed in addition to these. - - All files and directories should be prefixed with `java-` to ensure all file/directory names are globally unique across all Dapr documentation. - -## Github Dapr Bot Commands - -Checkout the [daprbot documentation](https://docs.dapr.io/contributing/daprbot/) for Github commands you can run in this repo for common tasks. For example, you can run the `/assign` (as a comment on an issue) to assign the issue to yourself. diff --git a/daprdocs/content/en/java-sdk-docs/_index.md b/daprdocs/content/en/java-sdk-docs/_index.md deleted file mode 100644 index d640101bc..000000000 --- a/daprdocs/content/en/java-sdk-docs/_index.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -type: docs -title: "Dapr Java SDK" -linkTitle: "Java" -weight: 1000 -description: Java SDK packages for developing Dapr applications -cascade: - github_repo: https://github.com/dapr/java-sdk - github_subdir: daprdocs/content/en/java-sdk-docs - path_base_for_github_subdir: content/en/developing-applications/sdks/java/ - github_branch: master ---- - -Dapr offers a variety of packages to help with the development of Java applications. Using them you can create Java clients, servers, and virtual actors with Dapr. - -## Prerequisites - -- [Dapr CLI]({{% ref install-dapr-cli.md %}}) installed -- Initialized [Dapr environment]({{% ref install-dapr-selfhost.md %}}) -- JDK 11 or above - the published jars are compatible with Java 8: - - [AdoptOpenJDK 11 - LTS](https://adoptopenjdk.net/) - - [Oracle's JDK 15](https://www.oracle.com/java/technologies/javase-downloads.html) - - [Oracle's JDK 11 - LTS](https://www.oracle.com/java/technologies/javase-jdk11-downloads.html) - - [OpenJDK](https://openjdk.java.net/) -- Install one of the following build tools for Java: - - [Maven 3.x](https://maven.apache.org/install.html) - - [Gradle 6.x](https://gradle.org/install/) - -## Import Dapr's Java SDK - -Next, import the Java SDK packages to get started. Select your preferred build tool to learn how to import. - -{{< tabpane text=true >}} - -{{% tab header="Maven" %}} - - -For a Maven project, add the following to your `pom.xml` file: - -```xml - - ... - - ... - - - io.dapr - dapr-sdk - 1.16.0 - - - - io.dapr - dapr-sdk-actors - 1.16.0 - - - - io.dapr - dapr-sdk-springboot - 1.16.0 - - ... - - ... - -``` -{{% /tab %}} - -{{% tab header="Gradle" %}} - - -For a Gradle project, add the following to your `build.gradle` file: - -```java -dependencies { -... - // Dapr's core SDK with all features, except Actors. - compile('io.dapr:dapr-sdk:1.16.0') - // Dapr's SDK for Actors (optional). - compile('io.dapr:dapr-sdk-actors:1.16.0') - // Dapr's SDK integration with SpringBoot (optional). - compile('io.dapr:dapr-sdk-springboot:1.16.0') -} -``` - -{{% /tab %}} - -{{< /tabpane >}} - -If you are also using Spring Boot, you may run into a common issue where the `OkHttp` version that the Dapr SDK uses conflicts with the one specified in the Spring Boot _Bill of Materials_. - -You can fix this by specifying a compatible `OkHttp` version in your project to match the version that the Dapr SDK uses: - -```xml - - com.squareup.okhttp3 - okhttp - 1.16.0 - -``` - -## Try it out - -Put the Dapr Java SDK to the test. Walk through the Java quickstarts and tutorials to see Dapr in action: - -| SDK samples | Description | -| ----------- | ----------- | -| [Quickstarts]({{% ref quickstarts %}}) | Experience Dapr's API building blocks in just a few minutes using the Java SDK. | -| [SDK samples](https://github.com/dapr/java-sdk/tree/master/examples) | Clone the SDK repo to try out some examples and get started. | - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - // sending a class with message; BINDING_OPERATION="create" - client.invokeBinding(BINDING_NAME, BINDING_OPERATION, myClass).block(); - - // sending a plain string - client.invokeBinding(BINDING_NAME, BINDING_OPERATION, message).block(); -} -``` - -- For a full guide on output bindings visit [How-To: Output bindings]({{% ref howto-bindings.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/bindings/http) for code samples and instructions to try out output bindings. - -## Available packages - -
-
-
-
Client
-

Create Java clients that interact with a Dapr sidecar and other Dapr applications.

- -
-
-
-
-
Workflow
-

Create and manage workflows that work with other Dapr APIs in Java.

- -
-
-
diff --git a/daprdocs/content/en/java-sdk-docs/java-ai/_index.md b/daprdocs/content/en/java-sdk-docs/java-ai/_index.md deleted file mode 100644 index 904edfc11..000000000 --- a/daprdocs/content/en/java-sdk-docs/java-ai/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -type: docs -title: "AI" -linkTitle: "AI" -weight: 3000 -description: With the Dapr Conversation AI package, you can interact with the Dapr AI workloads from a Java application. To get started, walk through the [Dapr AI]({{% ref java-ai-howto.md %}}) how-to guide. ---- \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-docs/java-ai/java-ai-howto.md b/daprdocs/content/en/java-sdk-docs/java-ai/java-ai-howto.md deleted file mode 100644 index 39970d521..000000000 --- a/daprdocs/content/en/java-sdk-docs/java-ai/java-ai-howto.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -type: docs -title: "How to: Author and manage Dapr Conversation AI in the Java SDK" -linkTitle: "How to: Author and manage Conversation AI" -weight: 20000 -description: How to get up and running with Conversation AI using the Dapr Java SDK ---- - -As part of this demonstration, we will look at how to use the Conversation API to converse with a Large Language Model (LLM). The API -will return the response from the LLM for the given prompt. With the [provided conversation ai example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/conversation), you will: - -- You will provide a prompt using the [Conversation AI example](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/conversation/DemoConversationAI.java) -- Filter out Personally identifiable information (PII). - -This example uses the default configuration from `dapr init` in [self-hosted mode](https://github.com/dapr/cli#install-dapr-on-your-local-machine-self-hosted). - -## Prerequisites - -- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started). -- Java JDK 11 (or greater): - - [Oracle JDK](https://www.oracle.com/java/technologies/downloads), or - - OpenJDK -- [Apache Maven](https://maven.apache.org/install.html), version 3.x. -- [Docker Desktop](https://www.docker.com/products/docker-desktop) - -## Set up the environment - -Clone the [Java SDK repo](https://github.com/dapr/java-sdk) and navigate into it. - -```bash -git clone https://github.com/dapr/java-sdk.git -cd java-sdk -``` - -Run the following command to install the requirements for running the Conversation AI example with the Dapr Java SDK. - -```bash -mvn clean install -DskipTests -``` - -From the Java SDK root directory, navigate to the examples' directory. - -```bash -cd examples -``` - -Run the Dapr sidecar. - -```sh -dapr run --app-id conversationapp --dapr-grpc-port 51439 --dapr-http-port 3500 --app-port 8080 -``` - -> Now, Dapr is listening for HTTP requests at `http://localhost:3500` and gRPC requests at `http://localhost:51439`. - -## Send a prompt with Personally identifiable information (PII) to the Conversation AI API - -In the `DemoConversationAI` there are steps to send a prompt using the `converse` method under the `DaprPreviewClient`. - -```java -public class DemoConversationAI { - /** - * The main method to start the client. - * - * @param args Input arguments (unused). - */ - public static void main(String[] args) { - try (DaprPreviewClient client = new DaprClientBuilder().buildPreviewClient()) { - System.out.println("Sending the following input to LLM: Hello How are you? This is the my number 672-123-4567"); - - ConversationInput daprConversationInput = new ConversationInput("Hello How are you? " - + "This is the my number 672-123-4567"); - - // Component name is the name provided in the metadata block of the conversation.yaml file. - Mono responseMono = client.converse(new ConversationRequest("echo", - List.of(daprConversationInput)) - .setContextId("contextId") - .setScrubPii(true).setTemperature(1.1d)); - ConversationResponse response = responseMono.block(); - System.out.printf("Conversation output: %s", response.getConversationOutputs().get(0).getResult()); - } catch (Exception e) { - throw new RuntimeException(e); - } - } -} -``` - -Run the `DemoConversationAI` with the following command. - -```sh -java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.conversation.DemoConversationAI -``` - -### Sample output -``` -== APP == Conversation output: Hello How are you? This is the my number -``` - -As shown in the output, the number sent to the API is obfuscated and returned in the form of . -The example above uses an ["echo"](https://docs.dapr.io/developing-applications/building-blocks/conversation/howto-conversation-layer/#set-up-the-conversation-component) -component for testing, which simply returns the input message. -When integrated with LLMs like OpenAI or Claude, you’ll receive meaningful responses instead of echoed input. - -## Next steps -- [Learn more about Conversation AI]({{% ref conversation-overview.md %}}) -- [Conversation AI API reference]({{% ref conversation_api.md %}}) \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-docs/java-client/_index.md b/daprdocs/content/en/java-sdk-docs/java-client/_index.md deleted file mode 100644 index 5f33eb41e..000000000 --- a/daprdocs/content/en/java-sdk-docs/java-client/_index.md +++ /dev/null @@ -1,756 +0,0 @@ ---- -type: docs -title: "Getting started with the Dapr client Java SDK" -linkTitle: "Client" -weight: 3000 -description: How to get up and running with the Dapr Java SDK ---- - -The Dapr client package allows you to interact with other Dapr applications from a Java application. - -{{% alert title="Note" color="primary" %}} -If you haven't already, [try out one of the quickstarts]({{% ref quickstarts %}}) for a quick walk-through on how to use the Dapr Java SDK with an API building block. - -{{% /alert %}} - -## Prerequisites - -[Complete initial setup and import the Java SDK into your project]({{% ref java %}}) - -## Initializing the client -You can initialize a Dapr client as so: - -```java -DaprClient client = new DaprClientBuilder().build() -``` - -This will connect to the default Dapr gRPC endpoint `localhost:50001`. For information about configuring the client using environment variables and system properties, see [Properties]({{% ref properties.md %}}). - -#### Error Handling - -Initially, errors in Dapr followed the Standard gRPC error model. However, to provide more detailed and informative error -messages, in version 1.13 an enhanced error model has been introduced which aligns with the gRPC Richer error model. In -response, the Java SDK extended the DaprException to include the error details that were added in Dapr. - -Example of handling the DaprException and consuming the error details when using the Dapr Java SDK: - -```java -... - try { - client.publishEvent("unknown_pubsub", "mytopic", "mydata").block(); - } catch (DaprException exception) { - System.out.println("Dapr exception's error code: " + exception.getErrorCode()); - System.out.println("Dapr exception's message: " + exception.getMessage()); - // DaprException now contains `getStatusDetails()` to include more details about the error from Dapr runtime. - System.out.println("Dapr exception's reason: " + exception.getStatusDetails().get( - DaprErrorDetails.ErrorDetailType.ERROR_INFO, - "reason", - TypeRef.STRING)); - } -... -``` - -## Building blocks - -The Java SDK allows you to interface with all of the [Dapr building blocks]({{% ref building-blocks %}}). - -### Invoke a service - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - // invoke a 'GET' method (HTTP) skipping serialization: \say with a Mono return type - // for gRPC set HttpExtension.NONE parameters below - response = client.invokeMethod(SERVICE_TO_INVOKE, METHOD_TO_INVOKE, "{\"name\":\"World!\"}", HttpExtension.GET, byte[].class).block(); - - // invoke a 'POST' method (HTTP) skipping serialization: to \say with a Mono return type - response = client.invokeMethod(SERVICE_TO_INVOKE, METHOD_TO_INVOKE, "{\"id\":\"100\", \"FirstName\":\"Value\", \"LastName\":\"Value\"}", HttpExtension.POST, byte[].class).block(); - - System.out.println(new String(response)); - - // invoke a 'POST' method (HTTP) with serialization: \employees with a Mono return type - Employee newEmployee = new Employee("Nigel", "Guitarist"); - Employee employeeResponse = client.invokeMethod(SERVICE_TO_INVOKE, "employees", newEmployee, HttpExtension.POST, Employee.class).block(); -} -``` - -- For a full guide on service invocation visit [How-To: Invoke a service]({{% ref howto-invoke-discover-services.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/invoke) for code samples and instructions to try out service invocation - -### Save & get application state - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.domain.State; -import reactor.core.publisher.Mono; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - // Save state - client.saveState(STATE_STORE_NAME, FIRST_KEY_NAME, myClass).block(); - - // Get state - State retrievedMessage = client.getState(STATE_STORE_NAME, FIRST_KEY_NAME, MyClass.class).block(); - - // Delete state - client.deleteState(STATE_STORE_NAME, FIRST_KEY_NAME).block(); -} -``` - -- For a full list of state operations visit [How-To: Get & save state]({{% ref howto-get-save-state.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/state) for code samples and instructions to try out state management - -### Publish & subscribe to messages - -##### Publish messages - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.domain.Metadata; -import static java.util.Collections.singletonMap; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - client.publishEvent(PUBSUB_NAME, TOPIC_NAME, message, singletonMap(Metadata.TTL_IN_SECONDS, MESSAGE_TTL_IN_SECONDS)).block(); -} -``` - -##### Subscribe to messages - -```java -import com.fasterxml.jackson.databind.ObjectMapper; -import io.dapr.Topic; -import io.dapr.client.domain.BulkSubscribeAppResponse; -import io.dapr.client.domain.BulkSubscribeAppResponseEntry; -import io.dapr.client.domain.BulkSubscribeAppResponseStatus; -import io.dapr.client.domain.BulkSubscribeMessage; -import io.dapr.client.domain.BulkSubscribeMessageEntry; -import io.dapr.client.domain.CloudEvent; -import io.dapr.springboot.annotations.BulkSubscribe; -import org.springframework.web.bind.annotation.PostMapping; -import org.springframework.web.bind.annotation.RequestBody; -import org.springframework.web.bind.annotation.RestController; -import reactor.core.publisher.Mono; - -@RestController -public class SubscriberController { - - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - - @Topic(name = "testingtopic", pubsubName = "${myAppProperty:messagebus}") - @PostMapping(path = "/testingtopic") - public Mono handleMessage(@RequestBody(required = false) CloudEvent cloudEvent) { - return Mono.fromRunnable(() -> { - try { - System.out.println("Subscriber got: " + cloudEvent.getData()); - System.out.println("Subscriber got: " + OBJECT_MAPPER.writeValueAsString(cloudEvent)); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - } - - @Topic(name = "testingtopic", pubsubName = "${myAppProperty:messagebus}", - rule = @Rule(match = "event.type == 'myevent.v2'", priority = 1)) - @PostMapping(path = "/testingtopicV2") - public Mono handleMessageV2(@RequestBody(required = false) CloudEvent envelope) { - return Mono.fromRunnable(() -> { - try { - System.out.println("Subscriber got: " + cloudEvent.getData()); - System.out.println("Subscriber got: " + OBJECT_MAPPER.writeValueAsString(cloudEvent)); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - } - - @BulkSubscribe() - @Topic(name = "testingtopicbulk", pubsubName = "${myAppProperty:messagebus}") - @PostMapping(path = "/testingtopicbulk") - public Mono handleBulkMessage( - @RequestBody(required = false) BulkSubscribeMessage> bulkMessage) { - return Mono.fromCallable(() -> { - if (bulkMessage.getEntries().size() == 0) { - return new BulkSubscribeAppResponse(new ArrayList()); - } - - System.out.println("Bulk Subscriber received " + bulkMessage.getEntries().size() + " messages."); - - List entries = new ArrayList(); - for (BulkSubscribeMessageEntry entry : bulkMessage.getEntries()) { - try { - System.out.printf("Bulk Subscriber message has entry ID: %s\n", entry.getEntryId()); - CloudEvent cloudEvent = (CloudEvent) entry.getEvent(); - System.out.printf("Bulk Subscriber got: %s\n", cloudEvent.getData()); - entries.add(new BulkSubscribeAppResponseEntry(entry.getEntryId(), BulkSubscribeAppResponseStatus.SUCCESS)); - } catch (Exception e) { - e.printStackTrace(); - entries.add(new BulkSubscribeAppResponseEntry(entry.getEntryId(), BulkSubscribeAppResponseStatus.RETRY)); - } - } - return new BulkSubscribeAppResponse(entries); - }); - } -} -``` - -##### Bulk Publish Messages -> Note: API is in Alpha stage - - -```java -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.DaprPreviewClient; -import io.dapr.client.domain.BulkPublishResponse; -import io.dapr.client.domain.BulkPublishResponseFailedEntry; -import java.util.ArrayList; -import java.util.List; -class Solution { - public void publishMessages() { - try (DaprPreviewClient client = (new DaprClientBuilder()).buildPreviewClient()) { - // Create a list of messages to publish - List messages = new ArrayList<>(); - for (int i = 0; i < NUM_MESSAGES; i++) { - String message = String.format("This is message #%d", i); - messages.add(message); - System.out.println("Going to publish message : " + message); - } - - // Publish list of messages using the bulk publish API - BulkPublishResponse res = client.publishEvents(PUBSUB_NAME, TOPIC_NAME, "text/plain", messages).block() - } - } -} -``` - -- For a full guide on publishing messages and subscribing to a topic [How-To: Publish & subscribe]({{% ref howto-publish-subscribe.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/pubsub/http) for code samples and instructions to try out pub/sub - -### Interact with output bindings - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - // sending a class with message; BINDING_OPERATION="create" - client.invokeBinding(BINDING_NAME, BINDING_OPERATION, myClass).block(); - - // sending a plain string - client.invokeBinding(BINDING_NAME, BINDING_OPERATION, message).block(); -} -``` - -- For a full guide on output bindings visit [How-To: Output bindings]({{% ref howto-bindings.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/bindings/http) for code samples and instructions to try out output bindings. - -### Interact with input bindings - -```java -import org.springframework.web.bind.annotation.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@RestController -@RequestMapping("/") -public class myClass { - private static final Logger log = LoggerFactory.getLogger(myClass); - @PostMapping(path = "/checkout") - public Mono getCheckout(@RequestBody(required = false) byte[] body) { - return Mono.fromRunnable(() -> - log.info("Received Message: " + new String(body))); - } -} -``` - -- For a full guide on input bindings, visit [How-To: Input bindings]({{% ref howto-triggers %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/bindings/http) for code samples and instructions to try out input bindings. - -### Retrieve secrets - -```java -import com.fasterxml.jackson.databind.ObjectMapper; -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; -import java.util.Map; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - Map secret = client.getSecret(SECRET_STORE_NAME, secretKey).block(); - System.out.println(JSON_SERIALIZER.writeValueAsString(secret)); -} -``` - -- For a full guide on secrets visit [How-To: Retrieve secrets]({{% ref howto-secrets.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/secrets) for code samples and instructions to try out retrieving secrets - -### Actors -An actor is an isolated, independent unit of compute and state with single-threaded execution. Dapr provides an actor implementation based on the [Virtual Actor pattern](https://www.microsoft.com/en-us/research/project/orleans-virtual-actors/), which provides a single-threaded programming model and where actors are garbage collected when not in use. With Dapr's implementaiton, you write your Dapr actors according to the Actor model, and Dapr leverages the scalability and reliability that the underlying platform provides. - -```java -import io.dapr.actors.ActorMethod; -import io.dapr.actors.ActorType; -import reactor.core.publisher.Mono; - -@ActorType(name = "DemoActor") -public interface DemoActor { - - void registerReminder(); - - @ActorMethod(name = "echo_message") - String say(String something); - - void clock(String message); - - @ActorMethod(returns = Integer.class) - Mono incrementAndGet(int delta); -} -``` - -- For a full guide on actors visit [How-To: Use virtual actors in Dapr]({{% ref howto-actors.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/actors) for code samples and instructions to try actors - -### Get & Subscribe to application configurations - -> Note this is a preview API and thus will only be accessible via the DaprPreviewClient interface and not the normal DaprClient interface - -```java -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.DaprPreviewClient; -import io.dapr.client.domain.ConfigurationItem; -import io.dapr.client.domain.GetConfigurationRequest; -import io.dapr.client.domain.SubscribeConfigurationRequest; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -try (DaprPreviewClient client = (new DaprClientBuilder()).buildPreviewClient()) { - // Get configuration for a single key - Mono item = client.getConfiguration(CONFIG_STORE_NAME, CONFIG_KEY).block(); - - // Get configurations for multiple keys - Mono> items = - client.getConfiguration(CONFIG_STORE_NAME, CONFIG_KEY_1, CONFIG_KEY_2); - - // Subscribe to configuration changes - Flux outFlux = client.subscribeConfiguration(CONFIG_STORE_NAME, CONFIG_KEY_1, CONFIG_KEY_2); - outFlux.subscribe(configItems -> configItems.forEach(...)); - - // Unsubscribe from configuration changes - Mono unsubscribe = client.unsubscribeConfiguration(SUBSCRIPTION_ID, CONFIG_STORE_NAME) -} -``` - -- For a full list of configuration operations visit [How-To: Manage configuration from a store]({{% ref howto-manage-configuration.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/configuration) for code samples and instructions to try out different configuration operations. - -### Query saved state - -> Note this is a preview API and thus will only be accessible via the DaprPreviewClient interface and not the normal DaprClient interface - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.DaprPreviewClient; -import io.dapr.client.domain.QueryStateItem; -import io.dapr.client.domain.QueryStateRequest; -import io.dapr.client.domain.QueryStateResponse; -import io.dapr.client.domain.query.Query; -import io.dapr.client.domain.query.Sorting; -import io.dapr.client.domain.query.filters.EqFilter; - -try (DaprClient client = builder.build(); DaprPreviewClient previewClient = builder.buildPreviewClient()) { - String searchVal = args.length == 0 ? "searchValue" : args[0]; - - // Create JSON data - Listing first = new Listing(); - first.setPropertyType("apartment"); - first.setId("1000"); - ... - Listing second = new Listing(); - second.setPropertyType("row-house"); - second.setId("1002"); - ... - Listing third = new Listing(); - third.setPropertyType("apartment"); - third.setId("1003"); - ... - Listing fourth = new Listing(); - fourth.setPropertyType("apartment"); - fourth.setId("1001"); - ... - Map meta = new HashMap<>(); - meta.put("contentType", "application/json"); - - // Save state - SaveStateRequest request = new SaveStateRequest(STATE_STORE_NAME).setStates( - new State<>("1", first, null, meta, null), - new State<>("2", second, null, meta, null), - new State<>("3", third, null, meta, null), - new State<>("4", fourth, null, meta, null) - ); - client.saveBulkState(request).block(); - - - // Create query and query state request - - Query query = new Query() - .setFilter(new EqFilter<>("propertyType", "apartment")) - .setSort(Arrays.asList(new Sorting("id", Sorting.Order.DESC))); - QueryStateRequest request = new QueryStateRequest(STATE_STORE_NAME) - .setQuery(query); - - // Use preview client to call query state API - QueryStateResponse result = previewClient.queryState(request, MyData.class).block(); - - // View Query state response - System.out.println("Found " + result.getResults().size() + " items."); - for (QueryStateItem item : result.getResults()) { - System.out.println("Key: " + item.getKey()); - System.out.println("Data: " + item.getValue()); - } -} -``` -- For a full how-to on query state, visit [How-To: Query state]({{% ref howto-state-query-api.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/querystate) for complete code sample. - -### Distributed lock - -```java -package io.dapr.examples.lock.grpc; - -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.DaprPreviewClient; -import io.dapr.client.domain.LockRequest; -import io.dapr.client.domain.UnlockRequest; -import io.dapr.client.domain.UnlockResponseStatus; -import reactor.core.publisher.Mono; - -public class DistributedLockGrpcClient { - private static final String LOCK_STORE_NAME = "lockstore"; - - /** - * Executes various methods to check the different apis. - * - * @param args arguments - * @throws Exception throws Exception - */ - public static void main(String[] args) throws Exception { - try (DaprPreviewClient client = (new DaprClientBuilder()).buildPreviewClient()) { - System.out.println("Using preview client..."); - tryLock(client); - unlock(client); - } - } - - /** - * Trying to get lock. - * - * @param client DaprPreviewClient object - */ - public static void tryLock(DaprPreviewClient client) { - System.out.println("*******trying to get a free distributed lock********"); - try { - LockRequest lockRequest = new LockRequest(LOCK_STORE_NAME, "resouce1", "owner1", 5); - Mono result = client.tryLock(lockRequest); - System.out.println("Lock result -> " + (Boolean.TRUE.equals(result.block()) ? "SUCCESS" : "FAIL")); - } catch (Exception ex) { - System.out.println(ex.getMessage()); - } - } - - /** - * Unlock a lock. - * - * @param client DaprPreviewClient object - */ - public static void unlock(DaprPreviewClient client) { - System.out.println("*******unlock a distributed lock********"); - try { - UnlockRequest unlockRequest = new UnlockRequest(LOCK_STORE_NAME, "resouce1", "owner1"); - Mono result = client.unlock(unlockRequest); - System.out.println("Unlock result ->" + result.block().name()); - } catch (Exception ex) { - System.out.println(ex.getMessage()); - } - } -} -``` - -- For a full how-to on distributed lock, visit [How-To: Use a Lock]({{% ref howto-use-distributed-lock.md %}}) -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/lock) for complete code sample. - -### Workflow - -```java -package io.dapr.examples.workflows; - -import io.dapr.workflows.client.DaprWorkflowClient; -import io.dapr.workflows.client.WorkflowState; - -import java.time.Duration; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * For setup instructions, see the README. - */ -public class DemoWorkflowClient { - - /** - * The main method. - * - * @param args Input arguments (unused). - * @throws InterruptedException If program has been interrupted. - */ - public static void main(String[] args) throws InterruptedException { - DaprWorkflowClient client = new DaprWorkflowClient(); - - try (client) { - String separatorStr = "*******"; - System.out.println(separatorStr); - String instanceId = client.scheduleNewWorkflow(DemoWorkflow.class, "input data"); - System.out.printf("Started new workflow instance with random ID: %s%n", instanceId); - - System.out.println(separatorStr); - System.out.println("**GetWorkflowMetadata:Running Workflow**"); - WorkflowState workflowMetadata = client.getWorkflowState(instanceId, true); - System.out.printf("Result: %s%n", workflowMetadata); - - System.out.println(separatorStr); - System.out.println("**WaitForWorkflowStart**"); - try { - WorkflowState waitForWorkflowStartResult = - client.waitForWorkflowStart(instanceId, Duration.ofSeconds(60), true); - System.out.printf("Result: %s%n", waitForWorkflowStartResult); - } catch (TimeoutException ex) { - System.out.printf("waitForWorkflowStart has an exception:%s%n", ex); - } - - System.out.println(separatorStr); - System.out.println("**SendExternalMessage**"); - client.raiseEvent(instanceId, "TestEvent", "TestEventPayload"); - - System.out.println(separatorStr); - System.out.println("** Registering parallel Events to be captured by allOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "event1", "TestEvent 1 Payload"); - client.raiseEvent(instanceId, "event2", "TestEvent 2 Payload"); - client.raiseEvent(instanceId, "event3", "TestEvent 3 Payload"); - System.out.printf("Events raised for workflow with instanceId: %s\n", instanceId); - - System.out.println(separatorStr); - System.out.println("** Registering Event to be captured by anyOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "e2", "event 2 Payload"); - System.out.printf("Event raised for workflow with instanceId: %s\n", instanceId); - - - System.out.println(separatorStr); - System.out.println("**waitForWorkflowCompletion**"); - try { - WorkflowState waitForWorkflowCompletionResult = - client.waitForWorkflowCompletion(instanceId, Duration.ofSeconds(60), true); - System.out.printf("Result: %s%n", waitForWorkflowCompletionResult); - } catch (TimeoutException ex) { - System.out.printf("waitForWorkflowCompletion has an exception:%s%n", ex); - } - - System.out.println(separatorStr); - System.out.println("**purgeWorkflow**"); - boolean purgeResult = client.purgeWorkflow(instanceId); - System.out.printf("purgeResult: %s%n", purgeResult); - - System.out.println(separatorStr); - System.out.println("**raiseEvent**"); - - String eventInstanceId = client.scheduleNewWorkflow(DemoWorkflow.class); - System.out.printf("Started new workflow instance with random ID: %s%n", eventInstanceId); - client.raiseEvent(eventInstanceId, "TestException", null); - System.out.printf("Event raised for workflow with instanceId: %s\n", eventInstanceId); - - System.out.println(separatorStr); - String instanceToTerminateId = "terminateMe"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, instanceToTerminateId); - System.out.printf("Started new workflow instance with specified ID: %s%n", instanceToTerminateId); - - TimeUnit.SECONDS.sleep(5); - System.out.println("Terminate this workflow instance manually before the timeout is reached"); - client.terminateWorkflow(instanceToTerminateId, null); - System.out.println(separatorStr); - - String restartingInstanceId = "restarting"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, restartingInstanceId); - System.out.printf("Started new workflow instance with ID: %s%n", restartingInstanceId); - System.out.println("Sleeping 30 seconds to restart the workflow"); - TimeUnit.SECONDS.sleep(30); - - System.out.println("**SendExternalMessage: RestartEvent**"); - client.raiseEvent(restartingInstanceId, "RestartEvent", "RestartEventPayload"); - - System.out.println("Sleeping 30 seconds to terminate the eternal workflow"); - TimeUnit.SECONDS.sleep(30); - client.terminateWorkflow(restartingInstanceId, null); - } - - System.out.println("Exiting DemoWorkflowClient."); - System.exit(0); - } -} -``` - -- For a full guide on workflows, visit: - - [How-To: Author workflows]({{% ref howto-author-workflow.md %}}). - - [How-To: Manage workflows]({{% ref howto-manage-workflow.md %}}). -- [Learn more about how to use workflows with the Java SDK]({{% ref java-workflow.md %}}). - -## Sidecar APIs - -#### Wait for sidecar -The `DaprClient` also provides a helper method to wait for the sidecar to become healthy (components only). When using -this method, be sure to specify a timeout in milliseconds and block() to wait for the result of a reactive operation. - -```java -// Wait for the Dapr sidecar to report healthy before attempting to use Dapr components. -try (DaprClient client = new DaprClientBuilder().build()) { - System.out.println("Waiting for Dapr sidecar ..."); - client.waitForSidecar(10000).block(); // Specify the timeout in milliseconds - System.out.println("Dapr sidecar is ready."); - ... -} - -// Perform Dapr component operations here i.e. fetching secrets or saving state. -``` - -### Shutdown the sidecar -```java -try (DaprClient client = new DaprClientBuilder().build()) { - logger.info("Sending shutdown request."); - client.shutdown().block(); - logger.info("Ensuring dapr has stopped."); - ... -} -``` - -Learn more about the [Dapr Java SDK packages available to add to your Java applications](https://dapr.github.io/java-sdk/). - -## Security - -### App API Token Authentication - -The building blocks like pubsub, input bindings, or jobs require Dapr to make incoming calls to your application, you can secure these requests using [Dapr App API Token Authentication]({{% ref app-api-token.md %}}). This ensures that only Dapr can invoke your application's endpoints. - -#### Understanding the two tokens - -Dapr uses two different tokens for securing communication. See [Properties]({{% ref properties.md %}}) for detailed information about both tokens: - -- **`DAPR_API_TOKEN`** (Your app → Dapr sidecar): Automatically handled by the Java SDK when using `DaprClient` -- **`APP_API_TOKEN`** (Dapr → Your app): Requires server-side validation in your application - -The examples below show how to implement server-side validation for `APP_API_TOKEN`. - -#### Implementing server-side token validation - -When using gRPC protocol, implement a server interceptor to capture the metadata. - -```java -import io.grpc.Context; -import io.grpc.Contexts; -import io.grpc.Metadata; -import io.grpc.ServerCall; -import io.grpc.ServerCallHandler; -import io.grpc.ServerInterceptor; - -public class SubscriberGrpcService extends AppCallbackGrpc.AppCallbackImplBase { - public static final Context.Key METADATA_KEY = Context.key("grpc-metadata"); - - // gRPC interceptor to capture metadata - public static class MetadataInterceptor implements ServerInterceptor { - @Override - public ServerCall.Listener interceptCall( - ServerCall call, - Metadata headers, - ServerCallHandler next) { - Context contextWithMetadata = Context.current().withValue(METADATA_KEY, headers); - return Contexts.interceptCall(contextWithMetadata, call, headers, next); - } - } - - // Your service methods go here... -} -``` - -Register the interceptor when building your gRPC server: - -```java -Server server = ServerBuilder.forPort(port) - .intercept(new SubscriberGrpcService.MetadataInterceptor()) - .addService(new SubscriberGrpcService()) - .build(); -server.start(); -``` - -Then, in your service methods, extract the token from metadata: - -```java -@Override -public void onTopicEvent(DaprAppCallbackProtos.TopicEventRequest request, - StreamObserver responseObserver) { - try { - // Extract metadata from context - Context context = Context.current(); - Metadata metadata = METADATA_KEY.get(context); - - if (metadata != null) { - String apiToken = metadata.get( - Metadata.Key.of("dapr-api-token", Metadata.ASCII_STRING_MARSHALLER)); - - // Validate token accordingly - } - - // Process the request - // ... - - } catch (Throwable e) { - responseObserver.onError(e); - } -} -``` - -#### Using with HTTP endpoints - -For HTTP-based endpoints, extract the token from the headers: - -```java -@RestController -public class SubscriberController { - - @PostMapping(path = "/endpoint") - public Mono handleRequest( - @RequestBody(required = false) byte[] body, - @RequestHeader Map headers) { - return Mono.fromRunnable(() -> { - try { - // Extract the token from headers - String apiToken = headers.get("dapr-api-token"); - - // Validate token accordingly - - // Process the request - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - } -} -``` - -#### Examples - -For working examples with pubsub, bindings, and jobs: -- [PubSub with App API Token Authentication](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/pubsub#app-api-token-authentication-optional) -- [Bindings with App API Token Authentication](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/bindings/http#app-api-token-authentication-optional) -- [Jobs with App API Token Authentication](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/jobs#app-api-token-authentication-optional) - -## Related links -- [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples) - -For a full list of SDK properties and how to configure them, visit [Properties]({{% ref properties.md %}}). diff --git a/daprdocs/content/en/java-sdk-docs/java-client/properties.md b/daprdocs/content/en/java-sdk-docs/java-client/properties.md deleted file mode 100644 index 87eb7a99c..000000000 --- a/daprdocs/content/en/java-sdk-docs/java-client/properties.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -type: docs -title: "Properties" -linkTitle: "Properties" -weight: 3001 -description: SDK-wide properties for configuring the Dapr Java SDK using environment variables and system properties ---- - -# Properties - -The Dapr Java SDK provides a set of global properties that control the behavior of the SDK. These properties can be configured using environment variables or system properties. System properties can be set using the `-D` flag when running your Java application. - -These properties affect the entire SDK, including clients and runtime. They control aspects such as: -- Sidecar connectivity (endpoints, ports) -- Security settings (TLS, API tokens) -- Performance tuning (timeouts, connection pools) -- Protocol settings (gRPC, HTTP) -- String encoding - -## Environment Variables - -The following environment variables are available for configuring the Dapr Java SDK: - -### Sidecar Endpoints - -When these variables are set, the client will automatically use them to connect to the Dapr sidecar. - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_GRPC_ENDPOINT` | The gRPC endpoint for the Dapr sidecar | `localhost:50001` | -| `DAPR_HTTP_ENDPOINT` | The HTTP endpoint for the Dapr sidecar | `localhost:3500` | -| `DAPR_GRPC_PORT` | The gRPC port for the Dapr sidecar (legacy, `DAPR_GRPC_ENDPOINT` takes precedence) | `50001` | -| `DAPR_HTTP_PORT` | The HTTP port for the Dapr sidecar (legacy, `DAPR_HTTP_ENDPOINT` takes precedence) | `3500` | - -### API Tokens - -Dapr supports two types of API tokens for securing communication: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_API_TOKEN` | API token for authenticating requests **from your app to the Dapr sidecar**. The Java SDK automatically includes this token in requests when using `DaprClient`. | `null` | -| `APP_API_TOKEN` | API token for authenticating requests **from Dapr to your app**. When set, Dapr includes this token in the `dapr-api-token` header/metadata when calling your application (for pubsub subscribers, input bindings, or job triggers). Your application must validate this token. | `null` | - -For implementation examples, see [App API Token Authentication]({{% ref java-client#app-api-token-authentication %}}). For more details, see [Dapr API token authentication](https://docs.dapr.io/operations/security/api-token/). - -### gRPC Configuration - -#### TLS Settings -For secure gRPC communication, you can configure TLS settings using the following environment variables: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_GRPC_TLS_INSECURE` | When set to "true", enables insecure TLS mode which still uses TLS but doesn't verify certificates. This uses InsecureTrustManagerFactory to trust all certificates. This should only be used for testing or in secure environments. | `false` | -| `DAPR_GRPC_TLS_CA_PATH` | Path to the CA certificate file. This is used for TLS connections to servers with self-signed certificates. | `null` | -| `DAPR_GRPC_TLS_CERT_PATH` | Path to the TLS certificate file for client authentication. | `null` | -| `DAPR_GRPC_TLS_KEY_PATH` | Path to the TLS private key file for client authentication. | `null` | - -#### Keepalive Settings -Configure gRPC keepalive behavior using these environment variables: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_GRPC_ENABLE_KEEP_ALIVE` | Whether to enable gRPC keepalive | `false` | -| `DAPR_GRPC_KEEP_ALIVE_TIME_SECONDS` | gRPC keepalive time in seconds | `10` | -| `DAPR_GRPC_KEEP_ALIVE_TIMEOUT_SECONDS` | gRPC keepalive timeout in seconds | `5` | -| `DAPR_GRPC_KEEP_ALIVE_WITHOUT_CALLS` | Whether to keep gRPC connection alive without calls | `true` | - -#### Inbound Message Settings -Configure gRPC inbound message settings using these environment variables: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_GRPC_MAX_INBOUND_MESSAGE_SIZE_BYTES` | Dapr's maximum inbound message size for gRPC in bytes. This value sets the maximum size of a gRPC message that can be received by the application | `4194304` | -| `DAPR_GRPC_MAX_INBOUND_METADATA_SIZE_BYTES` | Dapr's maximum inbound metadata size for gRPC in bytes | `8192` | - -### HTTP Client Configuration - -These properties control the behavior of the HTTP client used for communication with the Dapr sidecar: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_HTTP_CLIENT_READ_TIMEOUT_SECONDS` | Timeout in seconds for HTTP client read operations. This is the maximum time to wait for a response from the Dapr sidecar. | `60` | -| `DAPR_HTTP_CLIENT_MAX_REQUESTS` | Maximum number of concurrent HTTP requests that can be executed. Above this limit, requests will queue in memory waiting for running calls to complete. | `1024` | -| `DAPR_HTTP_CLIENT_MAX_IDLE_CONNECTIONS` | Maximum number of idle connections in the HTTP connection pool. This is the maximum number of connections that can remain idle in the pool. | `128` | - -### API Configuration - -These properties control the behavior of API calls made through the SDK: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_API_MAX_RETRIES` | Maximum number of retries for retriable exceptions when making API calls to the Dapr sidecar | `0` | -| `DAPR_API_TIMEOUT_MILLISECONDS` | Timeout in milliseconds for API calls to the Dapr sidecar. A value of 0 means no timeout. | `0` | - -### String Encoding - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_STRING_CHARSET` | Character set used for string encoding/decoding in the SDK. Must be a valid Java charset name. | `UTF-8` | - -### System Properties - -All environment variables can be set as system properties using the `-D` flag. Here is the complete list of available system properties: - -| System Property | Description | Default | -|----------------|-------------|---------| -| `dapr.sidecar.ip` | IP address for the Dapr sidecar | `localhost` | -| `dapr.http.port` | HTTP port for the Dapr sidecar | `3500` | -| `dapr.grpc.port` | gRPC port for the Dapr sidecar | `50001` | -| `dapr.grpc.tls.cert.path` | Path to the gRPC TLS certificate | `null` | -| `dapr.grpc.tls.key.path` | Path to the gRPC TLS key | `null` | -| `dapr.grpc.tls.ca.path` | Path to the gRPC TLS CA certificate | `null` | -| `dapr.grpc.tls.insecure` | Whether to use insecure TLS mode | `false` | -| `dapr.grpc.endpoint` | gRPC endpoint for remote sidecar | `null` | -| `dapr.grpc.enable.keep.alive` | Whether to enable gRPC keepalive | `false` | -| `dapr.grpc.keep.alive.time.seconds` | gRPC keepalive time in seconds | `10` | -| `dapr.grpc.keep.alive.timeout.seconds` | gRPC keepalive timeout in seconds | `5` | -| `dapr.grpc.keep.alive.without.calls` | Whether to keep gRPC connection alive without calls | `true` | -| `dapr.http.endpoint` | HTTP endpoint for remote sidecar | `null` | -| `dapr.api.maxRetries` | Maximum number of retries for API calls | `0` | -| `dapr.api.timeoutMilliseconds` | Timeout for API calls in milliseconds | `0` | -| `dapr.api.token` | API token for authentication | `null` | -| `dapr.string.charset` | String encoding used in the SDK | `UTF-8` | -| `dapr.http.client.readTimeoutSeconds` | Timeout in seconds for HTTP client reads | `60` | -| `dapr.http.client.maxRequests` | Maximum number of concurrent HTTP requests | `1024` | -| `dapr.http.client.maxIdleConnections` | Maximum number of idle HTTP connections | `128` | - -## Property Resolution Order - -Properties are resolved in the following order: -1. Override values (if provided when creating a Properties instance) -2. System properties (set via `-D`) -3. Environment variables -4. Default values - -The SDK checks each source in order. If a value is invalid for the property type (e.g., non-numeric for a numeric property), the SDK will log a warning and try the next source. For example: - -```bash -# Invalid boolean value - will be ignored -java -Ddapr.grpc.enable.keep.alive=not-a-boolean -jar myapp.jar - -# Valid boolean value - will be used -export DAPR_GRPC_ENABLE_KEEP_ALIVE=false -``` - -In this case, the environment variable is used because the system property value is invalid. However, if both values are valid, the system property takes precedence: - -```bash -# Valid boolean value - will be used -java -Ddapr.grpc.enable.keep.alive=true -jar myapp.jar - -# Valid boolean value - will be ignored -export DAPR_GRPC_ENABLE_KEEP_ALIVE=false -``` - -Override values can be set using the `DaprClientBuilder` in two ways: - -1. Using individual property overrides (recommended for most cases): -```java -import io.dapr.config.Properties; - -// Set a single property override -DaprClient client = new DaprClientBuilder() - .withPropertyOverride(Properties.GRPC_ENABLE_KEEP_ALIVE, "true") - .build(); - -// Or set multiple property overrides -DaprClient client = new DaprClientBuilder() - .withPropertyOverride(Properties.GRPC_ENABLE_KEEP_ALIVE, "true") - .withPropertyOverride(Properties.HTTP_CLIENT_READ_TIMEOUT_SECONDS, "120") - .build(); -``` - -2. Using a Properties instance (useful when you have many properties to set at once): -```java -// Create a map of property overrides -Map overrides = new HashMap<>(); -overrides.put("dapr.grpc.enable.keep.alive", "true"); -overrides.put("dapr.http.client.readTimeoutSeconds", "120"); - -// Create a Properties instance with overrides -Properties properties = new Properties(overrides); - -// Use these properties when creating a client -DaprClient client = new DaprClientBuilder() - .withProperties(properties) - .build(); -``` - -For most use cases, you'll use system properties or environment variables. Override values are primarily used when you need different property values for different instances of the SDK in the same application. - -## Proxy Configuration - -You can configure proxy settings for your Java application using system properties. These are standard Java system properties that are part of Java's networking layer (`java.net` package), not specific to Dapr. They are used by Java's networking stack, including the HTTP client that Dapr's SDK uses. - -For detailed information about Java's proxy configuration, including all available properties and their usage, see the [Java Networking Properties documentation](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/net/doc-files/net-properties.html). - - -For example, here's how to configure a proxy: - -```bash -# Configure HTTP proxy - replace with your actual proxy server details -java -Dhttp.proxyHost=your-proxy-server.com -Dhttp.proxyPort=8080 -jar myapp.jar - -# Configure HTTPS proxy - replace with your actual proxy server details -java -Dhttps.proxyHost=your-proxy-server.com -Dhttps.proxyPort=8443 -jar myapp.jar -``` - -Replace `your-proxy-server.com` with your actual proxy server hostname or IP address, and adjust the port numbers to match your proxy server configuration. - -These proxy settings will affect all HTTP/HTTPS connections made by your Java application, including connections to the Dapr sidecar. \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-docs/java-jobs/_index.md b/daprdocs/content/en/java-sdk-docs/java-jobs/_index.md deleted file mode 100644 index 9d017f777..000000000 --- a/daprdocs/content/en/java-sdk-docs/java-jobs/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -type: docs -title: "Jobs" -linkTitle: "Jobs" -weight: 3000 -description: With the Dapr Jobs package, you can interact with the Dapr Jobs APIs from a Java application to trigger future operations to run according to a predefined schedule with an optional payload. To get started, walk through the [Dapr Jobs]({{% ref java-jobs-howto.md %}}) how-to guide. ---- diff --git a/daprdocs/content/en/java-sdk-docs/java-jobs/java-jobs-howto.md b/daprdocs/content/en/java-sdk-docs/java-jobs/java-jobs-howto.md deleted file mode 100644 index e7c634628..000000000 --- a/daprdocs/content/en/java-sdk-docs/java-jobs/java-jobs-howto.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -type: docs -title: "How to: Author and manage Dapr Jobs in the Java SDK" -linkTitle: "How to: Author and manage Jobs" -weight: 20000 -description: How to get up and running with Jobs using the Dapr Java SDK ---- - -As part of this demonstration we will schedule a Dapr Job. The scheduled job will trigger an endpoint registered in the -same app. With the [provided jobs example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/jobs), you will: - -- Schedule a Job [Job scheduling example](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/jobs/DemoJobsClient.java) -- Register an endpoint for the dapr sidecar to invoke at trigger time [Endpoint Registration](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/jobs/DemoJobsSpringApplication.java) - -This example uses the default configuration from `dapr init` in [self-hosted mode](https://github.com/dapr/cli#install-dapr-on-your-local-machine-self-hosted). - -## Prerequisites - -- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started). -- Java JDK 11 (or greater): - - [Oracle JDK](https://www.oracle.com/java/technologies/downloads), or - - OpenJDK -- [Apache Maven](https://maven.apache.org/install.html), version 3.x. -- [Docker Desktop](https://www.docker.com/products/docker-desktop) - -## Set up the environment - -Clone the [Java SDK repo](https://github.com/dapr/java-sdk) and navigate into it. - -```bash -git clone https://github.com/dapr/java-sdk.git -cd java-sdk -``` - -Run the following command to install the requirements for running the jobs example with the Dapr Java SDK. - -```bash -mvn clean install -DskipTests -``` - -From the Java SDK root directory, navigate to the examples' directory. - -```bash -cd examples -``` - -Run the Dapr sidecar. - -```sh -dapr run --app-id jobsapp --dapr-grpc-port 51439 --dapr-http-port 3500 --app-port 8080 -``` - -> Now, Dapr is listening for HTTP requests at `http://localhost:3500` and internal Jobs gRPC requests at `http://localhost:51439`. - -## Schedule and Get a job - -In the `DemoJobsClient` there are steps to schedule a job. Calling `scheduleJob` using the `DaprPreviewClient` -will schedule a job with the Dapr Runtime. - -```java -public class DemoJobsClient { - - /** - * The main method of this app to schedule and get jobs. - */ - public static void main(String[] args) throws Exception { - try (DaprPreviewClient client = new DaprClientBuilder().withPropertyOverrides(overrides).buildPreviewClient()) { - - // Schedule a job. - System.out.println("**** Scheduling a Job with name dapr-jobs-1 *****"); - ScheduleJobRequest scheduleJobRequest = new ScheduleJobRequest("dapr-job-1", - JobSchedule.fromString("* * * * * *")).setData("Hello World!".getBytes()); - client.scheduleJob(scheduleJobRequest).block(); - - System.out.println("**** Scheduling job dapr-jobs-1 completed *****"); - } - } -} -``` - -Call `getJob` to retrieve the job details that were previously created and scheduled. -``` -client.getJob(new GetJobRequest("dapr-job-1")).block() -``` - -Run the `DemoJobsClient` with the following command. - -```sh -java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.jobs.DemoJobsClient -``` - -### Sample output -``` -**** Scheduling a Job with name dapr-jobs-1 ***** -**** Scheduling job dapr-jobs-1 completed ***** -**** Retrieving a Job with name dapr-jobs-1 ***** -``` - -## Set up an endpoint to be invoked when the job is triggered - -The `DemoJobsSpringApplication` class starts a Spring Boot application that registers the endpoints specified in the `JobsController` -This endpoint acts like a callback for the scheduled job requests. - -```java -@RestController -public class JobsController { - - /** - * Handles jobs callback from Dapr. - * - * @param jobName name of the job. - * @param payload data from the job if payload exists. - * @return Empty Mono. - */ - @PostMapping("/job/{jobName}") - public Mono handleJob(@PathVariable("jobName") String jobName, - @RequestBody(required = false) byte[] payload) { - System.out.println("Job Name: " + jobName); - System.out.println("Job Payload: " + new String(payload)); - - return Mono.empty(); - } -} -``` - -Parameters: - -* `jobName`: The name of the triggered job. -* `payload`: Optional payload data associated with the job (as a byte array). - -Run the Spring Boot application with the following command. - -```sh -java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.jobs.DemoJobsSpringApplication -``` - -### Sample output -``` -Job Name: dapr-job-1 -Job Payload: Hello World! -``` - -## Delete a scheduled job - -```java -public class DemoJobsClient { - - /** - * The main method of this app deletes a job that was previously scheduled. - */ - public static void main(String[] args) throws Exception { - try (DaprPreviewClient client = new DaprClientBuilder().buildPreviewClient()) { - - // Delete a job. - System.out.println("**** Delete a Job with name dapr-jobs-1 *****"); - client.deleteJob(new DeleteJobRequest("dapr-job-1")).block(); - } - } -} -``` - -## Next steps -- [Learn more about Jobs]({{% ref jobs-overview.md %}}) -- [Jobs API reference]({{% ref jobs_api.md %}}) \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-docs/java-workflow/_index.md b/daprdocs/content/en/java-sdk-docs/java-workflow/_index.md deleted file mode 100644 index ecfb7adeb..000000000 --- a/daprdocs/content/en/java-sdk-docs/java-workflow/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -type: docs -title: "Workflow" -linkTitle: "Workflow" -weight: 3000 -description: How to get up and running with the Dapr Workflow extension ---- diff --git a/daprdocs/content/en/java-sdk-docs/java-workflow/java-workflow-howto.md b/daprdocs/content/en/java-sdk-docs/java-workflow/java-workflow-howto.md deleted file mode 100644 index 79c6e06d0..000000000 --- a/daprdocs/content/en/java-sdk-docs/java-workflow/java-workflow-howto.md +++ /dev/null @@ -1,284 +0,0 @@ ---- -type: docs -title: "How to: Author and manage Dapr Workflow in the Java SDK" -linkTitle: "How to: Author and manage workflows" -weight: 20000 -description: How to get up and running with workflows using the Dapr Java SDK ---- - -Let's create a Dapr workflow and invoke it using the console. With the [provided workflow example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows), you will: - -- Execute the workflow instance using the [Java workflow worker](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/workflows/DemoWorkflowWorker.java) -- Utilize the Java workflow client and API calls to [start and terminate workflow instances](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/workflows/DemoWorkflowClient.java) - -This example uses the default configuration from `dapr init` in [self-hosted mode](https://github.com/dapr/cli#install-dapr-on-your-local-machine-self-hosted). - -## Prerequisites - -- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started). -- Java JDK 11 (or greater): - - [Oracle JDK](https://www.oracle.com/java/technologies/downloads), or - - OpenJDK -- [Apache Maven](https://maven.apache.org/install.html), version 3.x. - -- [Docker Desktop](https://www.docker.com/products/docker-desktop) - -- Verify you're using the latest proto bindings - -## Set up the environment - -Clone the Java SDK repo and navigate into it. - -```bash -git clone https://github.com/dapr/java-sdk.git -cd java-sdk -``` - -Run the following command to install the requirements for running this workflow sample with the Dapr Java SDK. - -```bash -mvn clean install -``` - -From the Java SDK root directory, navigate to the Dapr Workflow example. - -```bash -cd examples -``` - -## Run the `DemoWorkflowWorker` - -The `DemoWorkflowWorker` class registers an implementation of `DemoWorkflow` in Dapr's workflow runtime engine. In the `DemoWorkflowWorker.java` file, you can find the `DemoWorkflowWorker` class and the `main` method: - -```java -public class DemoWorkflowWorker { - - public static void main(String[] args) throws Exception { - // Register the Workflow with the runtime. - WorkflowRuntime.getInstance().registerWorkflow(DemoWorkflow.class); - System.out.println("Start workflow runtime"); - WorkflowRuntime.getInstance().startAndBlock(); - System.exit(0); - } -} -``` - -In the code above: -- `WorkflowRuntime.getInstance().registerWorkflow()` registers `DemoWorkflow` as a workflow in the Dapr Workflow runtime. -- `WorkflowRuntime.getInstance().start()` builds and starts the engine within the Dapr Workflow runtime. - -In the terminal, execute the following command to kick off the `DemoWorkflowWorker`: - -```sh -dapr run --app-id demoworkflowworker --resources-path ./components/workflows --dapr-grpc-port 50001 -- java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.workflows.DemoWorkflowWorker -``` - -**Expected output** - -``` -You're up and running! Both Dapr and your app logs will appear here. - -... - -== APP == Start workflow runtime -== APP == Sep 13, 2023 9:02:03 AM com.microsoft.durabletask.DurableTaskGrpcWorker startAndBlock -== APP == INFO: Durable Task worker is connecting to sidecar at 127.0.0.1:50001. -``` - -## Run the `DemoWorkflowClient` - -The `DemoWorkflowClient` starts instances of workflows that have been registered with Dapr. - -```java -public class DemoWorkflowClient { - - // ... - public static void main(String[] args) throws InterruptedException { - DaprWorkflowClient client = new DaprWorkflowClient(); - - try (client) { - String separatorStr = "*******"; - System.out.println(separatorStr); - String instanceId = client.scheduleNewWorkflow(DemoWorkflow.class, "input data"); - System.out.printf("Started new workflow instance with random ID: %s%n", instanceId); - - System.out.println(separatorStr); - System.out.println("**GetInstanceMetadata:Running Workflow**"); - WorkflowState workflowMetadata = client.getWorkflowState(instanceId, true); - System.out.printf("Result: %s%n", workflowMetadata); - - System.out.println(separatorStr); - System.out.println("**WaitForWorkflowStart**"); - try { - WorkflowState waitForWorkflowStartResult = - client.waitForWorkflowStart(instanceId, Duration.ofSeconds(60), true); - System.out.printf("Result: %s%n", waitForWorkflowStartResult); - } catch (TimeoutException ex) { - System.out.printf("waitForWorkflowStart has an exception:%s%n", ex); - } - - System.out.println(separatorStr); - System.out.println("**SendExternalMessage**"); - client.raiseEvent(instanceId, "TestEvent", "TestEventPayload"); - - System.out.println(separatorStr); - System.out.println("** Registering parallel Events to be captured by allOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "event1", "TestEvent 1 Payload"); - client.raiseEvent(instanceId, "event2", "TestEvent 2 Payload"); - client.raiseEvent(instanceId, "event3", "TestEvent 3 Payload"); - System.out.printf("Events raised for workflow with instanceId: %s\n", instanceId); - - System.out.println(separatorStr); - System.out.println("** Registering Event to be captured by anyOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "e2", "event 2 Payload"); - System.out.printf("Event raised for workflow with instanceId: %s\n", instanceId); - - - System.out.println(separatorStr); - System.out.println("**waitForWorkflowCompletion**"); - try { - WorkflowState waitForWorkflowCompletionResult = - client.waitForWorkflowCompletion(instanceId, Duration.ofSeconds(60), true); - System.out.printf("Result: %s%n", waitForWorkflowCompletionResult); - } catch (TimeoutException ex) { - System.out.printf("waitForWorkflowCompletion has an exception:%s%n", ex); - } - - System.out.println(separatorStr); - System.out.println("**purgeWorkflow**"); - boolean purgeResult = client.purgeWorkflow(instanceId); - System.out.printf("purgeResult: %s%n", purgeResult); - - System.out.println(separatorStr); - System.out.println("**raiseEvent**"); - - String eventInstanceId = client.scheduleNewWorkflow(DemoWorkflow.class); - System.out.printf("Started new workflow instance with random ID: %s%n", eventInstanceId); - client.raiseEvent(eventInstanceId, "TestException", null); - System.out.printf("Event raised for workflow with instanceId: %s\n", eventInstanceId); - - System.out.println(separatorStr); - String instanceToTerminateId = "terminateMe"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, instanceToTerminateId); - System.out.printf("Started new workflow instance with specified ID: %s%n", instanceToTerminateId); - - TimeUnit.SECONDS.sleep(5); - System.out.println("Terminate this workflow instance manually before the timeout is reached"); - client.terminateWorkflow(instanceToTerminateId, null); - System.out.println(separatorStr); - - String restartingInstanceId = "restarting"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, restartingInstanceId); - System.out.printf("Started new workflow instance with ID: %s%n", restartingInstanceId); - System.out.println("Sleeping 30 seconds to restart the workflow"); - TimeUnit.SECONDS.sleep(30); - - System.out.println("**SendExternalMessage: RestartEvent**"); - client.raiseEvent(restartingInstanceId, "RestartEvent", "RestartEventPayload"); - - System.out.println("Sleeping 30 seconds to terminate the eternal workflow"); - TimeUnit.SECONDS.sleep(30); - client.terminateWorkflow(restartingInstanceId, null); - } - - System.out.println("Exiting DemoWorkflowClient."); - System.exit(0); - } -} -``` - -In a second terminal window, start the workflow by running the following command: - -```sh -java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.workflows.DemoWorkflowClient -``` - -**Expected output** - -``` -******* -Started new workflow instance with random ID: 0b4cc0d5-413a-4c1c-816a-a71fa24740d4 -******* -**GetInstanceMetadata:Running Workflow** -Result: [Name: 'io.dapr.examples.workflows.DemoWorkflow', ID: '0b4cc0d5-413a-4c1c-816a-a71fa24740d4', RuntimeStatus: RUNNING, CreatedAt: 2023-09-13T13:02:30.547Z, LastUpdatedAt: 2023-09-13T13:02:30.699Z, Input: '"input data"', Output: ''] -******* -**WaitForWorkflowStart** -Result: [Name: 'io.dapr.examples.workflows.DemoWorkflow', ID: '0b4cc0d5-413a-4c1c-816a-a71fa24740d4', RuntimeStatus: RUNNING, CreatedAt: 2023-09-13T13:02:30.547Z, LastUpdatedAt: 2023-09-13T13:02:30.699Z, Input: '"input data"', Output: ''] -******* -**SendExternalMessage** -******* -** Registering parallel Events to be captured by allOf(t1,t2,t3) ** -Events raised for workflow with instanceId: 0b4cc0d5-413a-4c1c-816a-a71fa24740d4 -******* -** Registering Event to be captured by anyOf(t1,t2,t3) ** -Event raised for workflow with instanceId: 0b4cc0d5-413a-4c1c-816a-a71fa24740d4 -******* -**WaitForWorkflowCompletion** -Result: [Name: 'io.dapr.examples.workflows.DemoWorkflow', ID: '0b4cc0d5-413a-4c1c-816a-a71fa24740d4', RuntimeStatus: FAILED, CreatedAt: 2023-09-13T13:02:30.547Z, LastUpdatedAt: 2023-09-13T13:02:55.054Z, Input: '"input data"', Output: ''] -******* -**purgeWorkflow** -purgeResult: true -******* -**raiseEvent** -Started new workflow instance with random ID: 7707d141-ebd0-4e54-816e-703cb7a52747 -Event raised for workflow with instanceId: 7707d141-ebd0-4e54-816e-703cb7a52747 -******* -Started new workflow instance with specified ID: terminateMe -Terminate this workflow instance manually before the timeout is reached -******* -Started new workflow instance with ID: restarting -Sleeping 30 seconds to restart the workflow -**SendExternalMessage: RestartEvent** -Sleeping 30 seconds to terminate the eternal workflow -Exiting DemoWorkflowClient. -``` - -## What happened? - -1. When you ran `dapr run`, the workflow worker registered the workflow (`DemoWorkflow`) and its actvities to the Dapr Workflow engine. -1. When you ran `java`, the workflow client started the workflow instance with the following activities. You can follow along with the output in the terminal where you ran `dapr run`. - 1. The workflow is started, raises three parallel tasks, and waits for them to complete. - 1. The workflow client calls the activity and sends the "Hello Activity" message to the console. - 1. The workflow times out and is purged. - 1. The workflow client starts a new workflow instance with a random ID, uses another workflow instance called `terminateMe` to terminate it, and restarts it with the workflow called `restarting`. - 1. The worfklow client is then exited. - -## Next steps -- [Learn more about Dapr workflow]({{% ref workflow-overview.md %}}) -- [Workflow API reference]({{% ref workflow_api.md %}}) - -## Advanced features - -### Task Execution Keys - -Task execution keys are unique identifiers generated by the durabletask-java library. They are stored in the `WorkflowActivityContext` and can be used to track and manage the execution of workflow activities. They are particularly useful for: - -1. **Idempotency**: Ensuring activities are not executed multiple times for the same task -2. **State Management**: Tracking the state of activity execution -3. **Error Handling**: Managing retries and failures in a controlled manner - -Here's an example of how to use task execution keys in your workflow activities: - -```java -public class TaskExecutionKeyActivity implements WorkflowActivity { - @Override - public Object run(WorkflowActivityContext ctx) { - // Get the task execution key for this activity - String taskExecutionKey = ctx.getTaskExecutionKey(); - - // Use the key to implement idempotency or state management - // For example, check if this task has already been executed - if (isTaskAlreadyExecuted(taskExecutionKey)) { - return getPreviousResult(taskExecutionKey); - } - - // Execute the activity logic - Object result = executeActivityLogic(); - - // Store the result with the task execution key - storeResult(taskExecutionKey, result); - - return result; - } -} -``` diff --git a/daprdocs/content/en/java-sdk-docs/spring-boot/_index.md b/daprdocs/content/en/java-sdk-docs/spring-boot/_index.md deleted file mode 100644 index fcfaacd1a..000000000 --- a/daprdocs/content/en/java-sdk-docs/spring-boot/_index.md +++ /dev/null @@ -1,347 +0,0 @@ ---- -type: docs -title: "Getting started with the Dapr and Spring Boot" -linkTitle: "Spring Boot Integration" -weight: 4000 -description: How to get started with Dapr and Spring Boot ---- - -By combining Dapr and Spring Boot, we can create infrastructure independent Java applications that can be deployed across different environments, supporting a wide range of on-premises and cloud provider services. - -First, we will start with a simple integration covering the `DaprClient` and the [Testcontainers](https://testcontainers.com/) integration, to then use Spring and Spring Boot mechanisms and programming model to leverage the Dapr APIs under the hood. This helps teams to remove dependencies such as clients and drivers required to connect to environment-specific infrastructure (databases, key-value stores, message brokers, configuration/secret stores, etc) - -{{% alert title="Note" color="primary" %}} -The Spring Boot integration requires Spring Boot 3.x+ to work. This will not work with Spring Boot 2.x. -The Spring Boot integration remains in alpha. We need your help and feedback to graduate it. -Please join the [#java-sdk discord channel](https://discord.com/channels/778680217417809931/778749797242765342) discussion or open issues in the [dapr/java-sdk](https://github.com/dapr/java-sdk/issues). - -{{% /alert %}} - - -## Adding the Dapr and Spring Boot integration to your project - -If you already have a Spring Boot application, you can directly add the following dependencies to your project: - -``` - - io.dapr.spring - dapr-spring-boot-starter - 0.16.0 - - - io.dapr.spring - dapr-spring-boot-starter-test - 0.16.0 - test - -``` - -You can find the [latest released version here](https://central.sonatype.com/artifact/io.dapr.spring/dapr-spring-boot-starter). - -By adding these dependencies, you can: -- Autowire a `DaprClient` to use inside your applications -- Use the Spring Data and Messaging abstractions and programming model that uses the Dapr APIs under the hood -- Improve your inner-development loop by relying on [Testcontainers](https://testcontainers.com/) to bootstrap Dapr Control plane services and default components - -Once these dependencies are in your application, you can rely on Spring Boot autoconfiguration to autowire a `DaprClient` instance: - -```java -@Autowired -private DaprClient daprClient; - -``` - -This will connect to the default Dapr gRPC endpoint `localhost:50001`, requiring you to start Dapr outside of your application. - -{{% alert title="Note" color="primary" %}} -By default, the following properties are preconfigured for `DaprClient` and `DaprWorkflowClient`: -```properties -dapr.client.httpEndpoint=http://localhost -dapr.client.httpPort=3500 -dapr.client.grpcEndpoint=localhost -dapr.client.grpcPort=50001 -dapr.client.apiToken= -``` -These values are used by default, but you can override them in your `application.properties` file to suit your environment. Please note that both kebab case and camel case are supported. -{{% /alert %}} - -You can use the `DaprClient` to interact with the Dapr APIs anywhere in your application, for example from inside a REST endpoint: - -```java -@RestController -public class DemoRestController { - @Autowired - private DaprClient daprClient; - - @PostMapping("/store") - public void storeOrder(@RequestBody Order order){ - daprClient.saveState("kvstore", order.orderId(), order).block(); - } -} - -record Order(String orderId, Integer amount){} -``` - -If you want to avoid managing Dapr outside of your Spring Boot application, you can rely on [Testcontainers](https://testcontainers.com/) to bootstrap Dapr beside your application for development purposes. -To do this we can create a test configuration that uses `Testcontainers` to bootstrap all we need to develop our applications using the Dapr APIs. - -Using [Testcontainers](https://testcontainers.com/) and Dapr integrations, we let the `@TestConfiguration` bootstrap Dapr for our applications. -Notice that for this example, we are configuring Dapr with a Statestore component called `kvstore` that connects to an instance of `PostgreSQL` also bootstrapped by Testcontainers. - -```java -@TestConfiguration(proxyBeanMethods = false) -public class DaprTestContainersConfig { - @Bean - @ServiceConnection - public DaprContainer daprContainer(Network daprNetwork, PostgreSQLContainer postgreSQLContainer){ - - return new DaprContainer("daprio/daprd:1.16.0-rc.5") - .withAppName("producer-app") - .withNetwork(daprNetwork) - .withComponent(new Component("kvstore", "state.postgresql", "v1", STATE_STORE_PROPERTIES)) - .withComponent(new Component("kvbinding", "bindings.postgresql", "v1", BINDING_PROPERTIES)) - .dependsOn(postgreSQLContainer); - } -} -``` - -Inside the test classpath you can add a new Spring Boot Application that uses this configuration for tests: - -```java -@SpringBootApplication -public class TestProducerApplication { - - public static void main(String[] args) { - - SpringApplication - .from(ProducerApplication::main) - .with(DaprTestContainersConfig.class) - .run(args); - } - -} -``` - -Now you can start your application with: -```bash -mvn spring-boot:test-run -``` - -Running this command will start the application, using the provided test configuration that includes the Testcontainers and Dapr integration. In the logs you should be able to see that the `daprd` and the `placement` service containers were started for your application. - -Besides the previous configuration (`DaprTestContainersConfig`) your tests shouldn't be testing Dapr itself, just the REST endpoints that your application is exposing. - - -## Leveraging Spring & Spring Boot programming model with Dapr - -The Java SDK allows you to interface with all of the [Dapr building blocks]({{% ref building-blocks %}}). -But if you want to leverage the Spring and Spring Boot programming model you can use the `dapr-spring-boot-starter` integration. -This includes implementations of Spring Data (`KeyValueTemplate` and `CrudRepository`) as well as a `DaprMessagingTemplate` for producing and consuming messages -(similar to [Spring Kafka](https://spring.io/projects/spring-kafka), [Spring Pulsar](https://spring.io/projects/spring-pulsar) and [Spring AMQP for RabbitMQ](https://spring.io/projects/spring-amqp)) and Dapr workflows. - -## Using Spring Data `CrudRepository` and `KeyValueTemplate` - -You can use well known Spring Data constructs relying on a Dapr-based implementation. -With Dapr, you don't need to add any infrastructure-related driver or client, making your Spring application lighter and decoupled from the environment where it is running. - -Under the hood these implementations use the Dapr Statestore and Binding APIs. - -### Configuration parameters - -With Spring Data abstractions you can configure which statestore and bindings will be used by Dapr to connect to the available infrastructure. -This can be done by setting the following properties: - -```properties -dapr.statestore.name=kvstore -dapr.statestore.binding=kvbinding -``` - -Then you can `@Autowire` a `KeyValueTemplate` or a `CrudRepository` like this: - -```java -@RestController -@EnableDaprRepositories -public class OrdersRestController { - @Autowired - private OrderRepository repository; - - @PostMapping("/orders") - public void storeOrder(@RequestBody Order order){ - repository.save(order); - } - - @GetMapping("/orders") - public Iterable getAll(){ - return repository.findAll(); - } - - -} -``` - -Where `OrderRepository` is defined in an interface that extends the Spring Data `CrudRepository` interface: - -```java -public interface OrderRepository extends CrudRepository {} -``` - -Notice that the `@EnableDaprRepositories` annotation does all the magic of wiring the Dapr APIs under the `CrudRespository` interface. -Because Dapr allow users to interact with different StateStores from the same application, as a user you need to provide the following beans as a Spring Boot `@Configuration`: - -```java -@Configuration -@EnableConfigurationProperties({DaprStateStoreProperties.class}) -public class ProducerAppConfiguration { - - @Bean - public KeyValueAdapterResolver keyValueAdapterResolver(DaprClient daprClient, ObjectMapper mapper, DaprStateStoreProperties daprStatestoreProperties) { - String storeName = daprStatestoreProperties.getName(); - String bindingName = daprStatestoreProperties.getBinding(); - - return new DaprKeyValueAdapterResolver(daprClient, mapper, storeName, bindingName); - } - - @Bean - public DaprKeyValueTemplate daprKeyValueTemplate(KeyValueAdapterResolver keyValueAdapterResolver) { - return new DaprKeyValueTemplate(keyValueAdapterResolver); - } - -} -``` - -## Using Spring Messaging for producing and consuming events - -Similar to Spring Kafka, Spring Pulsar and Spring AMQP you can use the `DaprMessagingTemplate` to publish messages to the configured infrastructure. To consume messages you can use the `@Topic` annotation (soon to be renamed to `@DaprListener`). - -To publish events/messages you can `@Autowired` the `DaprMessagingTemplate` in your Spring application. -For this example we will be publishing `Order` events and we are sending messages to the topic named `topic`. - -```java -@Autowired -private DaprMessagingTemplate messagingTemplate; - -@PostMapping("/orders") -public void storeOrder(@RequestBody Order order){ - repository.save(order); - messagingTemplate.send("topic", order); -} - -``` - -Similarly to the `CrudRepository` we need to specify which PubSub broker do we want to use to publish and consume our messages. - -```properties -dapr.pubsub.name=pubsub -``` - -Because with Dapr you can connect to multiple PubSub brokers you need to provide the following bean to let Dapr know which PubSub broker your `DaprMessagingTemplate` will use: -```java -@Bean -public DaprMessagingTemplate messagingTemplate(DaprClient daprClient, - DaprPubSubProperties daprPubSubProperties) { - return new DaprMessagingTemplate<>(daprClient, daprPubSubProperties.getName()); -} -``` - -Finally, because Dapr PubSub requires a bidirectional connection between your application and Dapr you need to expand your Testcontainers configuration with a few parameters: - -```java -@Bean -@ServiceConnection -public DaprContainer daprContainer(Network daprNetwork, PostgreSQLContainer postgreSQLContainer, RabbitMQContainer rabbitMQContainer){ - - return new DaprContainer("daprio/daprd:1.16.0-rc.5") - .withAppName("producer-app") - .withNetwork(daprNetwork) - .withComponent(new Component("kvstore", "state.postgresql", "v1", STATE_STORE_PROPERTIES)) - .withComponent(new Component("kvbinding", "bindings.postgresql", "v1", BINDING_PROPERTIES)) - .withComponent(new Component("pubsub", "pubsub.rabbitmq", "v1", rabbitMqProperties)) - .withAppPort(8080) - .withAppChannelAddress("host.testcontainers.internal") - .dependsOn(rabbitMQContainer) - .dependsOn(postgreSQLContainer); -} -``` - -Now, in the Dapr configuration we have included a `pubsub` component that will connect to an instance of RabbitMQ started by Testcontainers. -We have also set two important parameters `.withAppPort(8080)` and `.withAppChannelAddress("host.testcontainers.internal")` which allows Dapr to -contact back to the application when a message is published in the broker. - -To listen to events/messages you need to expose an endpoint in the application that will be responsible to receive the messages. -If you expose a REST endpoint you can use the `@Topic` annotation to let Dapr know where it needs to forward the events/messages too: - -```java -@PostMapping("subscribe") -@Topic(pubsubName = "pubsub", name = "topic") -public void subscribe(@RequestBody CloudEvent cloudEvent){ - events.add(cloudEvent); -} -``` - -Upon bootstrapping your application, Dapr will register the subscription to messages to be forwarded to the `subscribe` endpoint exposed by your application. - -If you are writing tests for these subscribers you need to ensure that Testcontainers knows that your application will be running on port 8080, -so containers started with Testcontainers know where your application is: - -```java -@BeforeAll -public static void setup(){ - org.testcontainers.Testcontainers.exposeHostPorts(8080); -} -``` - -You can check and run the [full example source code here](https://github.com/salaboy/dapr-spring-boot-docs-examples). - -## Using Dapr Workflows with Spring Boot - -Following the same approach that we used for Spring Data and Spring Messaging, the `dapr-spring-boot-starter` brings Dapr Workflow integration for Spring Boot users. - -To work with Dapr Workflows you need to define and implement your workflows using code. The Dapr Spring Boot Starter makes your life easier by managing `Workflow`s and `WorkflowActivity`s as Spring beans. - -In order to enable the automatic bean discovery you can annotate your `@SpringBootApplication` with the `@EnableDaprWorkflows` annotation: - -``` -@SpringBootApplication -@EnableDaprWorkflows -public class MySpringBootApplication {} -``` - -By adding this annotation, all the `WorkflowActivity`s will be automatically managed by Spring and registered to the workflow engine. - -By having all `WorkflowActivity`s as managed beans we can use Spring `@Autowired` mechanism to inject any bean that our workflow activity might need to implement its functionality, for example the `@RestTemplate`: - -``` -public class MyWorkflowActivity implements WorkflowActivity { - - @Autowired - private RestTemplate restTemplate; -``` - -You can also `@Autowired` the `DaprWorkflowClient` to create new instances of your workflows. - -``` -@Autowired -private DaprWorkflowClient daprWorkflowClient; -``` - -This enable applications to schedule new workflow instances and raise events. - -``` -String instanceId = daprWorkflowClient.scheduleNewWorkflow(MyWorkflow.class, payload); -``` - -and - -``` -daprWorkflowClient.raiseEvent(instanceId, "MyEvenet", event); -``` - -Check the [Dapr Workflow documentation](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/) for more information about how to work with Dapr Workflows. - - -## Next steps - -Learn more about the [Dapr Java SDK packages available to add to your Java applications](https://dapr.github.io/java-sdk/). - -## Related links -- [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples) From 5984c0f91bbccbf1195bc841f682bcbd16592e5c Mon Sep 17 00:00:00 2001 From: salaboy Date: Wed, 10 Dec 2025 15:23:58 +0100 Subject: [PATCH 07/18] fixing headers Signed-off-by: salaboy --- .../testcontainers/WorkflowDashboardContainer.java | 2 +- .../testcontainers/DaprWorkflowDashboardTest.java | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java index d3616bad9..9cb09710c 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java @@ -1,5 +1,5 @@ /* - * Copyright 2024 The Dapr Authors + * Copyright 2025 The Dapr Authors * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java index 72fb55fbd..24799c727 100644 --- a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java @@ -1,3 +1,16 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + package io.dapr.testcontainers; import org.junit.jupiter.api.Test; From 0df131b1a872d0bca964a09d58a33ee2733e4d22 Mon Sep 17 00:00:00 2001 From: salaboy Date: Wed, 10 Dec 2025 16:52:47 +0100 Subject: [PATCH 08/18] codecov token and new image Signed-off-by: salaboy --- .github/workflows/build.yml | 2 ++ .../io/dapr/testcontainers/WorkflowDashboardContainer.java | 2 +- .../java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java | 3 +++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 64f2b6dfd..b1fdcc46b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -33,6 +33,8 @@ jobs: run: ./mvnw clean install -B -q -DskipITs=true - name: Codecov uses: codecov/codecov-action@v5.5.1 + with: + token: ${{ secrets.CODECOV_TOKEN }} - name: Upload test report for sdk uses: actions/upload-artifact@v5 with: diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java index 9cb09710c..5f77f6798 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java @@ -31,7 +31,7 @@ public class WorkflowDashboardContainer extends GenericContainer COMPONENT_CONVERTER = new ComponentYamlConverter(YAML_MAPPER); public static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName - .parse("public.ecr.aws/diagrid-dev/diagrid-dashboard:latest"); + .parse("ghcr.io/diagridio/diagrid-dashboard:0.0.1"); private int dashboardPort = 8080; private Component stateStoreComponent; diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java index 24799c727..5b914edcc 100644 --- a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprWorkflowDashboardTest.java @@ -17,6 +17,7 @@ import java.util.Collections; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -33,6 +34,8 @@ public void dashboardTest() { dashboard.configure(); assertNotNull(dashboard.getEnvMap().get("COMPONENT_FILE")); assertFalse(dashboard.getEnvMap().get("COMPONENT_FILE").isEmpty()); + assertEquals(8080, dashboard.getPort()); + } } } From 1a5ed85607f4f80129d3f3e498f01f85dba1810e Mon Sep 17 00:00:00 2001 From: salaboy Date: Mon, 5 Jan 2026 12:04:37 +0100 Subject: [PATCH 09/18] Update testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java Co-authored-by: Joe Bowbeer Signed-off-by: salaboy --- .../java/io/dapr/testcontainers/WorkflowDashboardContainer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java index 5f77f6798..b59750aed 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java @@ -51,7 +51,7 @@ public WorkflowDashboardContainer withStateStoreComponent(Component stateStoreCo } /** - * Creates a new Dapr schedulers container. + * Creates a new workflow dashboard container. * @param image Docker image name. */ public WorkflowDashboardContainer(String image) { From ff5e2f1787731594a8303fa00b2d9c4f3d9e6184 Mon Sep 17 00:00:00 2001 From: salaboy Date: Mon, 5 Jan 2026 12:04:43 +0100 Subject: [PATCH 10/18] Update testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java Co-authored-by: Joe Bowbeer Signed-off-by: salaboy --- .../java/io/dapr/testcontainers/WorkflowDashboardContainer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java index b59750aed..950b609b4 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java @@ -36,7 +36,7 @@ public class WorkflowDashboardContainer extends GenericContainer Date: Sat, 13 Dec 2025 05:06:33 +0100 Subject: [PATCH 11/18] Bringing Durable Task Java as a Maven module inside the Java SDK (#1575) * fixing checkstyle and javadocs Signed-off-by: salaboy * Replace openjdk:17-jdk-slim to eclipse-temurin:17-jdk-jammy (#1574) Signed-off-by: Matheus Cruz Signed-off-by: salaboy * Align Java API with other languages (#1560) * Align Java API with other languages Signed-off-by: Matheus Cruz * Update documentation Signed-off-by: Matheus Cruz * Change return type of waitForWorkflowStart method Signed-off-by: artur-ciocanu --------- Signed-off-by: Matheus Cruz Signed-off-by: artur-ciocanu Co-authored-by: artur-ciocanu Signed-off-by: salaboy * use built in durable task Signed-off-by: salaboy * exclude jacoco rules for examples and durabletask-client Signed-off-by: salaboy * increasing timeout for IT Signed-off-by: salaboy * removing dt build from matrix Signed-off-by: salaboy * adding java to dt build Signed-off-by: salaboy * Fix dependencies multi app build and add proper test deps (#1572) * Force Jackson version to override the SB Jackson version Signed-off-by: Artur Ciocanu * Move all the Jackson deps to parent POM. Signed-off-by: Artur Ciocanu * Ensure app JAR build order Signed-off-by: Artur Ciocanu * Remove explicit Jackson from sdk-tests module. Signed-off-by: Artur Ciocanu * Make sure test is used for test dependencies. Signed-off-by: Artur Ciocanu * Remove extra Jackson modules. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * docs: add architecture diagram to README (#1549) * Preview New README * Preview New README 2 * Preview New README 3 * docs: add architecture diagram showing Java SDK interaction with Dapr runtime (close #<915>) * docs: add architecture diagram showing Java SDK interaction with Dapr runtime CORRECTION (close #<915>) * docs: add architecture diagram showing Java SDK interaction with Dapr runtime (close #<915>) * docs: add architecture diagram showing Java SDK interaction with Dapr runtime (close #<915>) --------- Co-authored-by: Siri Varma Vegiraju Co-authored-by: artur-ciocanu Co-authored-by: Cassie Coyle Signed-off-by: salaboy * Add statestore example with Outbox pattern (#1582) * Add statestore example with Outbox pattern Signed-off-by: Matheus Cruz * Clean events after each test Signed-off-by: Matheus Cruz * Add license header Signed-off-by: Matheus Cruz * Apply pull request suggestions Signed-off-by: Matheus Cruz --------- Signed-off-by: Matheus Cruz Co-authored-by: salaboy Signed-off-by: salaboy * adding new method signature plus test (#1570) * adding new method signature plus test Signed-off-by: salaboy * re adding imports Signed-off-by: salaboy * fixing style Signed-off-by: salaboy * checking empty metadata Signed-off-by: salaboy * copy meta for safety and check if key is present Signed-off-by: salaboy * Centralize Maven dependency version management (#1564) Signed-off-by: salaboy * Fix dependencies multi app build and add proper test deps (#1572) * Force Jackson version to override the SB Jackson version Signed-off-by: Artur Ciocanu * Move all the Jackson deps to parent POM. Signed-off-by: Artur Ciocanu * Ensure app JAR build order Signed-off-by: Artur Ciocanu * Remove explicit Jackson from sdk-tests module. Signed-off-by: Artur Ciocanu * Make sure test is used for test dependencies. Signed-off-by: Artur Ciocanu * Remove extra Jackson modules. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * reverting pom Signed-off-by: salaboy * fix codestyle Signed-off-by: salaboy * using metaCopy Signed-off-by: salaboy --------- Signed-off-by: salaboy Signed-off-by: Artur Ciocanu Co-authored-by: artur-ciocanu Signed-off-by: salaboy * Bump actions/upload-artifact from 4 to 5 (#1587) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 5. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: salaboy * Add gRPC support to Dapr testcontainer (#1586) * Add gRPC support to Dapr testcontainer Signed-off-by: wlfgang * Avoid using null to indicate default value Signed-off-by: wlfgang --------- Signed-off-by: wlfgang Co-authored-by: artur-ciocanu Co-authored-by: wlfgang Signed-off-by: salaboy * Use dependencies BOM and remove duplicates. (#1588) Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * Examples + Docs for App API Token authentication for gRPC and HTTP (#1589) * example Signed-off-by: Cassandra Coyle * docs for example Signed-off-by: Cassandra Coyle --------- Signed-off-by: Cassandra Coyle Signed-off-by: salaboy * Another set of Maven version, properties and plugin improvements (#1596) Signed-off-by: salaboy * Adding a Flux based subscribeToEvents method (#1598) * Adding a Flux based subscribeToEvents method Signed-off-by: Artur Ciocanu * Simplify GRPC stream handling Signed-off-by: Artur Ciocanu * Simplify Javadoc Signed-off-by: Artur Ciocanu * Fix unit tests and simplify implementation Signed-off-by: Artur Ciocanu * Adding event subscriber stream observer to simplify subscription logic Signed-off-by: Artur Ciocanu * Use start() method to start stream subscription Signed-off-by: Artur Ciocanu * Add unit test for event suscriber observer Signed-off-by: Artur Ciocanu * Improve the tests a little bit Signed-off-by: Artur Ciocanu * Remove the unnecessary method Signed-off-by: Artur Ciocanu * Improve error handling and use CloudEvent wrapper Signed-off-by: Artur Ciocanu * Fix unit tests asserts Signed-off-by: Artur Ciocanu * Adjust Java examples for Subscriber Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * Remove SDK docs due to migration to main Docs repo (#1593) * Remove SDK docs due to migration to main Docs repo Signed-off-by: Marc Duiker * Remove sed lines related to sdk docs Signed-off-by: Marc Duiker --------- Signed-off-by: Marc Duiker Co-authored-by: salaboy Signed-off-by: salaboy * cleaning up sdk version script Signed-off-by: salaboy --------- Signed-off-by: salaboy Signed-off-by: Matheus Cruz Signed-off-by: artur-ciocanu Signed-off-by: Artur Ciocanu Signed-off-by: dependabot[bot] Signed-off-by: wlfgang Signed-off-by: Cassandra Coyle Signed-off-by: Marc Duiker Co-authored-by: Matheus Cruz <56329339+mcruzdev@users.noreply.github.com> Co-authored-by: artur-ciocanu Co-authored-by: Raymundo Zamora Co-authored-by: Siri Varma Vegiraju Co-authored-by: Cassie Coyle Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: wlfgang <14792753+wlfgang@users.noreply.github.com> Co-authored-by: wlfgang Co-authored-by: Marc Duiker Signed-off-by: salaboy --- .github/scripts/update_sdk_version.sh | 18 +- .github/workflows/build.yml | 55 +- durabletask-client/pom.xml | 163 ++ .../CompositeTaskFailedException.java | 68 + .../io/dapr/durabletask/DataConverter.java | 88 + .../dapr/durabletask/DurableTaskClient.java | 346 ++++ .../durabletask/DurableTaskGrpcClient.java | 423 ++++ .../DurableTaskGrpcClientBuilder.java | 128 ++ .../durabletask/DurableTaskGrpcWorker.java | 328 +++ .../DurableTaskGrpcWorkerBuilder.java | 164 ++ .../io/dapr/durabletask/FailureDetails.java | 145 ++ .../java/io/dapr/durabletask/Helpers.java | 77 + .../durabletask/JacksonDataConverter.java | 58 + .../NewOrchestrationInstanceOptions.java | 147 ++ ...NonDeterministicOrchestratorException.java | 20 + .../durabletask/OrchestrationMetadata.java | 283 +++ .../dapr/durabletask/OrchestrationRunner.java | 169 ++ .../OrchestrationRuntimeStatus.java | 118 ++ .../durabletask/OrchestrationStatusQuery.java | 217 ++ .../OrchestrationStatusQueryResult.java | 53 + .../durabletask/OrchestratorFunction.java | 38 + .../durabletask/PurgeInstanceCriteria.java | 125 ++ .../java/io/dapr/durabletask/PurgeResult.java | 37 + .../io/dapr/durabletask/RetryContext.java | 79 + .../io/dapr/durabletask/RetryHandler.java | 31 + .../java/io/dapr/durabletask/RetryPolicy.java | 176 ++ .../main/java/io/dapr/durabletask/Task.java | 91 + .../io/dapr/durabletask/TaskActivity.java | 45 + .../dapr/durabletask/TaskActivityContext.java | 51 + .../durabletask/TaskActivityExecutor.java | 96 + .../dapr/durabletask/TaskActivityFactory.java | 33 + .../durabletask/TaskCanceledException.java | 26 + .../dapr/durabletask/TaskFailedException.java | 76 + .../java/io/dapr/durabletask/TaskOptions.java | 171 ++ .../dapr/durabletask/TaskOrchestration.java | 82 + .../durabletask/TaskOrchestrationContext.java | 598 ++++++ .../TaskOrchestrationExecutor.java | 1515 ++++++++++++++ .../durabletask/TaskOrchestrationFactory.java | 33 + .../durabletask/TaskOrchestratorResult.java | 40 + .../ContinueAsNewInterruption.java | 32 + .../OrchestratorBlockedException.java | 31 + .../dapr/durabletask/util/UuidGenerator.java | 63 + .../dapr/durabletask/DurableTaskClientIT.java | 1785 +++++++++++++++++ .../DurableTaskGrpcClientTlsTest.java | 342 ++++ .../io/dapr/durabletask/ErrorHandlingIT.java | 306 +++ .../dapr/durabletask/IntegrationTestBase.java | 91 + .../io/dapr/durabletask/TaskOptionsTest.java | 142 ++ pom.xml | 25 +- sdk-workflows/pom.xml | 22 +- .../runtime/DefaultWorkflowContext.java | 2 +- .../workflows/DefaultWorkflowContextTest.java | 2 +- 51 files changed, 9199 insertions(+), 55 deletions(-) create mode 100644 durabletask-client/pom.xml create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/CompositeTaskFailedException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DataConverter.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskClient.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClient.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClientBuilder.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorker.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorkerBuilder.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/FailureDetails.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/Helpers.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/JacksonDataConverter.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/NewOrchestrationInstanceOptions.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/NonDeterministicOrchestratorException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationMetadata.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRunner.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRuntimeStatus.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQuery.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQueryResult.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestratorFunction.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/PurgeInstanceCriteria.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/PurgeResult.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/RetryContext.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/RetryHandler.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/RetryPolicy.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/Task.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskActivity.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityContext.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityExecutor.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityFactory.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskCanceledException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskFailedException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOptions.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestration.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationContext.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationExecutor.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationFactory.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestratorResult.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/interruption/ContinueAsNewInterruption.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/interruption/OrchestratorBlockedException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/util/UuidGenerator.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskClientIT.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskGrpcClientTlsTest.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/ErrorHandlingIT.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/IntegrationTestBase.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/TaskOptionsTest.java diff --git a/.github/scripts/update_sdk_version.sh b/.github/scripts/update_sdk_version.sh index f11dd3db6..0e5726ecc 100755 --- a/.github/scripts/update_sdk_version.sh +++ b/.github/scripts/update_sdk_version.sh @@ -8,27 +8,11 @@ DAPR_JAVA_SDK_VERSION=$1 # Replaces the SDK major version to 0 for alpha artifacts. DAPR_JAVA_SDK_ALPHA_VERSION=`echo $DAPR_JAVA_SDK_VERSION | sed 's/^[0-9]*\./0./'` -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION +mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -DprocessDependencies=true mvn versions:set-property -Dproperty=dapr.sdk.alpha.version -DnewVersion=$DAPR_JAVA_SDK_ALPHA_VERSION mvn versions:set-property -Dproperty=dapr.sdk.version -DnewVersion=$DAPR_JAVA_SDK_VERSION mvn versions:set-property -Dproperty=dapr.sdk.version -DnewVersion=$DAPR_JAVA_SDK_VERSION -f sdk-tests/pom.xml mvn versions:set-property -Dproperty=dapr.sdk.alpha.version -DnewVersion=$DAPR_JAVA_SDK_ALPHA_VERSION -f sdk-tests/pom.xml -################### -# Alpha artifacts # -################### - -# sdk-workflows -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -f sdk-workflows/pom.xml - -# testcontainers-dapr -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -f testcontainers-dapr/pom.xml - -# dapr-spring -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -DprocessDependencies=true -f dapr-spring/pom.xml -mvn versions:set-property -Dproperty=dapr.spring.version -DnewVersion=$DAPR_JAVA_SDK_VERSION -f dapr-spring/pom.xml - -# spring-boot-examples -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -f spring-boot-examples/pom.xml git clean -f diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b1fdcc46b..47f26c9e2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -46,10 +46,58 @@ jobs: name: report-dapr-java-sdk-actors-jdk${{ env.JDK_VER }} path: sdk-actors/target/jacoco-report/ + build-durabletask: + name: "Durable Task build & tests" + runs-on: ubuntu-latest + timeout-minutes: 30 + continue-on-error: false + env: + JDK_VER: 17 + steps: + - uses: actions/checkout@v5 + - name: Set up OpenJDK ${{ env.JDK_VER }} + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: ${{ env.JDK_VER }} + - name: Checkout Durable Task Sidecar + uses: actions/checkout@v4 + with: + repository: dapr/durabletask-go + path: durabletask-sidecar + + # TODO: Move the sidecar into a central image repository + - name: Initialize Durable Task Sidecar + run: docker run -d --name durabletask-sidecar -p 4001:4001 --rm -i $(docker build -q ./durabletask-sidecar) + + - name: Display Durable Task Sidecar Logs + run: nohup docker logs --since=0 durabletask-sidecar > durabletask-sidecar.log 2>&1 & + + # wait for 10 seconds, so sidecar container can be fully up, this will avoid intermittent failing issues for integration tests causing by failed to connect to sidecar + - name: Wait for 10 seconds + run: sleep 10 + + - name: Integration Tests For Durable Tasks + run: ./mvnw -B -pl durabletask-client -Pintegration-tests dependency:copy-dependencies verify || echo "TEST_FAILED=true" >> $GITHUB_ENV + continue-on-error: true + + - name: Kill Durable Task Sidecar + run: docker kill durabletask-sidecar + + - name: Upload Durable Task Sidecar Logs + uses: actions/upload-artifact@v4 + with: + name: Durable Task Sidecar Logs + path: durabletask-sidecar.log + + - name: Fail the job if tests failed + if: env.TEST_FAILED == 'true' + run: exit 1 + build: name: "Build jdk:${{ matrix.java }} sb:${{ matrix.spring-boot-display-version }} exp:${{ matrix.experimental }}" runs-on: ubuntu-latest - timeout-minutes: 30 + timeout-minutes: 45 continue-on-error: ${{ matrix.experimental }} strategy: fail-fast: false @@ -151,7 +199,7 @@ jobs: run: ./mvnw clean install -B -q -DskipTests - name: Integration tests using spring boot version ${{ matrix.spring-boot-version }} id: integration_tests - run: PRODUCT_SPRING_BOOT_VERSION=${{ matrix.spring-boot-version }} ./mvnw -B -Pintegration-tests dependency:copy-dependencies verify + run: PRODUCT_SPRING_BOOT_VERSION=${{ matrix.spring-boot-version }} ./mvnw -B -pl !durabletask-client -Pintegration-tests dependency:copy-dependencies verify - name: Upload failsafe test report for sdk-tests on failure if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }} uses: actions/upload-artifact@v5 @@ -165,9 +213,10 @@ jobs: name: surefire-report-sdk-tests-jdk${{ matrix.java }}-sb${{ matrix.spring-boot-version }} path: sdk-tests/target/surefire-reports + publish: runs-on: ubuntu-latest - needs: [ build, test ] + needs: [ build, test, build-durabletask ] timeout-minutes: 30 env: JDK_VER: 17 diff --git a/durabletask-client/pom.xml b/durabletask-client/pom.xml new file mode 100644 index 000000000..93bed3255 --- /dev/null +++ b/durabletask-client/pom.xml @@ -0,0 +1,163 @@ + + + 4.0.0 + + io.dapr + dapr-sdk-parent + 1.17.0-SNAPSHOT + + + durabletask-client + + + ${project.build.directory}/generated-sources + ${project.build.directory}/proto + + + + + javax.annotation + javax.annotation-api + provided + + + io.grpc + grpc-protobuf + + + io.grpc + grpc-stub + + + io.grpc + grpc-netty + + + com.google.protobuf + protobuf-java + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + io.grpc + grpc-testing + test + + + org.junit.jupiter + junit-jupiter + test + + + org.testcontainers + testcontainers + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + + + org.apache.maven.plugins + maven-failsafe-plugin + + ${project.build.outputDirectory} + + + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.0 + + + getDaprProto + initialize + + wget + + + true + ${durabletask.proto.url} + orchestrator_service.proto + ${protobuf.input.directory} + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + grpc-java + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + ${protobuf.input.directory} + + + + + compile + compile-custom + + + + + + org.apache.maven.plugins + maven-source-plugin + 3.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 3.2.0 + + true + + + + attach-javadocs + + jar + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + + true + + + + + diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/CompositeTaskFailedException.java b/durabletask-client/src/main/java/io/dapr/durabletask/CompositeTaskFailedException.java new file mode 100644 index 000000000..d57ea37d2 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/CompositeTaskFailedException.java @@ -0,0 +1,68 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import java.util.ArrayList; +import java.util.List; + +/** + * Exception that gets thrown when multiple {@link Task}s for an activity or sub-orchestration fails with an + * unhandled exception. + * + *

Detailed information associated with each task failure can be retrieved using the {@link #getExceptions()} + * method.

+ */ +public class CompositeTaskFailedException extends RuntimeException { + private final List exceptions; + + CompositeTaskFailedException() { + this.exceptions = new ArrayList<>(); + } + + CompositeTaskFailedException(List exceptions) { + this.exceptions = exceptions; + } + + CompositeTaskFailedException(String message, List exceptions) { + super(message); + this.exceptions = exceptions; + } + + CompositeTaskFailedException(String message, Throwable cause, List exceptions) { + super(message, cause); + this.exceptions = exceptions; + } + + CompositeTaskFailedException(Throwable cause, List exceptions) { + super(cause); + this.exceptions = exceptions; + } + + CompositeTaskFailedException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace, + List exceptions) { + super(message, cause, enableSuppression, writableStackTrace); + this.exceptions = exceptions; + } + + /** + * Gets a list of exceptions that occurred during execution of a group of {@link Task}. + * These exceptions include details of the task failure and exception information + * + * @return a list of exceptions + */ + public List getExceptions() { + return new ArrayList<>(this.exceptions); + } + +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DataConverter.java b/durabletask-client/src/main/java/io/dapr/durabletask/DataConverter.java new file mode 100644 index 000000000..3c2dd7b7e --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DataConverter.java @@ -0,0 +1,88 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.Timestamp; + +import javax.annotation.Nullable; +import java.time.Instant; +import java.time.temporal.ChronoUnit; + +/** + * Interface for serializing and deserializing data that gets passed to and from orchestrators and activities. + * + *

Implementations of this abstract class are free to use any serialization method. Currently, only strings are + * supported as the serialized representation of data. Byte array payloads and streams are not supported by this + * abstraction. Note that these methods all accept null values, in which case the return value should also be null.

+ */ +public interface DataConverter { + /** + * Serializes the input into a text representation. + * + * @param value the value to be serialized + * @return a serialized text representation of the value or null if the value is null + */ + @Nullable + String serialize(@Nullable Object value); + + /** + * Deserializes the given text data into an object of the specified type. + * + * @param data the text data to deserialize into an object + * @param target the target class to deserialize the input into + * @param the generic parameter type representing the target class to deserialize the input into + * @return a deserialized object of type T + * @throws DataConverterException if the text data cannot be deserialized + */ + @Nullable + T deserialize(@Nullable String data, Class target); + + // Data conversion errors are expected to be unrecoverable in most cases, hence an unchecked runtime exception + class DataConverterException extends RuntimeException { + public DataConverterException(String message, Throwable cause) { + super(message, cause); + } + } + + /** + * Convert from Timestamp to Instant. + * + * @param ts timestamp to convert + * @return instant + */ + static Instant getInstantFromTimestamp(Timestamp ts) { + if (ts == null) { + return null; + } + + // We don't include nanoseconds because of serialization round-trip issues + return Instant.ofEpochSecond(ts.getSeconds(), ts.getNanos()).truncatedTo(ChronoUnit.MILLIS); + } + + /** + * Convert from Instant to Timestamp. + * @param instant to convert + * @return timestamp + */ + static Timestamp getTimestampFromInstant(Instant instant) { + if (instant == null) { + return null; + } + + return Timestamp.newBuilder() + .setSeconds(instant.getEpochSecond()) + .setNanos(instant.getNano()) + .build(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskClient.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskClient.java new file mode 100644 index 000000000..42a98dd55 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskClient.java @@ -0,0 +1,346 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.util.concurrent.TimeoutException; + +/** + * Base class that defines client operations for managing orchestration instances. + * + *

Instances of this class can be used to start, query, raise events to, and terminate orchestration instances. + * In most cases, methods on this class accept an instance ID as a parameter, which identifies the orchestration + * instance.

+ * + *

At the time of writing, the most common implementation of this class is DurableTaskGrpcClient, + * which works by making gRPC calls to a remote service (e.g. a sidecar) that implements the operation behavior. To + * ensure any owned network resources are properly released, instances of this class should be closed when they are no + * longer needed.

+ * + *

Instances of this class are expected to be safe for multithreaded apps. You can therefore safely cache instances + * of this class and reuse them across multiple contexts. Caching these objects is useful to improve overall + * performance.

+ */ +public abstract class DurableTaskClient implements AutoCloseable { + + /** + * Releases any network resources held by this object. + */ + @Override + public void close() { + // no default implementation + } + + /** + * Schedules a new orchestration instance with a random ID for execution. + * + * @param orchestratorName the name of the orchestrator to schedule + * @return the randomly-generated instance ID of the scheduled orchestration instance + */ + public String scheduleNewOrchestrationInstance(String orchestratorName) { + return this.scheduleNewOrchestrationInstance(orchestratorName, null, null); + } + + /** + * Schedules a new orchestration instance with a specified input and a random ID for execution. + * + * @param orchestratorName the name of the orchestrator to schedule + * @param input the input to pass to the scheduled orchestration instance. Must be serializable. + * @return the randomly-generated instance ID of the scheduled orchestration instance + */ + public String scheduleNewOrchestrationInstance(String orchestratorName, Object input) { + return this.scheduleNewOrchestrationInstance(orchestratorName, input, null); + } + + /** + * Schedules a new orchestration instance with a specified input and ID for execution. + * + * @param orchestratorName the name of the orchestrator to schedule + * @param input the input to pass to the scheduled orchestration instance. Must be serializable. + * @param instanceId the unique ID of the orchestration instance to schedule + * @return the instanceId parameter value + */ + public String scheduleNewOrchestrationInstance(String orchestratorName, Object input, String instanceId) { + NewOrchestrationInstanceOptions options = new NewOrchestrationInstanceOptions() + .setInput(input) + .setInstanceId(instanceId); + return this.scheduleNewOrchestrationInstance(orchestratorName, options); + } + + /** + * Schedules a new orchestration instance with a specified set of options for execution. + * + * @param orchestratorName the name of the orchestrator to schedule + * @param options the options for the new orchestration instance, including input, instance ID, etc. + * @return the ID of the scheduled orchestration instance, which was either provided in options + * or randomly generated + */ + public abstract String scheduleNewOrchestrationInstance( + String orchestratorName, + NewOrchestrationInstanceOptions options); + + /** + * Sends an event notification message to a waiting orchestration instance. + * + *

In order to handle the event, the target orchestration instance must be waiting for an event named + * eventName using the {@link TaskOrchestrationContext#waitForExternalEvent(String)} method. + * If the target orchestration instance is not yet waiting for an event named eventName, + * then the event will be saved in the orchestration instance state and dispatched immediately when the + * orchestrator calls {@link TaskOrchestrationContext#waitForExternalEvent(String)}. This event saving occurs even + * if the orchestrator has canceled its wait operation before the event was received.

+ * + *

Raised events for a completed or non-existent orchestration instance will be silently discarded.

+ * + * @param instanceId the ID of the orchestration instance that will handle the event + * @param eventName the case-insensitive name of the event + */ + public void raiseEvent(String instanceId, String eventName) { + this.raiseEvent(instanceId, eventName, null); + } + + /** + * Sends an event notification message with a payload to a waiting orchestration instance. + * + *

In order to handle the event, the target orchestration instance must be waiting for an event named + * eventName using the {@link TaskOrchestrationContext#waitForExternalEvent(String)} method. + * If the target orchestration instance is not yet waiting for an event named eventName, + * then the event will be saved in the orchestration instance state and dispatched immediately when the + * orchestrator calls {@link TaskOrchestrationContext#waitForExternalEvent(String)}. This event saving occurs even + * if the orchestrator has canceled its wait operation before the event was received.

+ * + *

Raised events for a completed or non-existent orchestration instance will be silently discarded.

+ * + * @param instanceId the ID of the orchestration instance that will handle the event + * @param eventName the case-insensitive name of the event + * @param eventPayload the serializable data payload to include with the event + */ + public abstract void raiseEvent(String instanceId, String eventName, @Nullable Object eventPayload); + + /** + * Fetches orchestration instance metadata from the configured durable store. + * + * @param instanceId the unique ID of the orchestration instance to fetch + * @param getInputsAndOutputs true to fetch the orchestration instance's inputs, outputs, and custom + * status, or false to omit them + * @return a metadata record that describes the orchestration instance and its execution status, or + * a default instance if no such instance is found. Please refer to method + * {@link OrchestrationMetadata#isInstanceFound()} to check if an instance is found. + */ + @Nullable + public abstract OrchestrationMetadata getInstanceMetadata(String instanceId, boolean getInputsAndOutputs); + + /** + * Waits for an orchestration to start running and returns an {@link OrchestrationMetadata} object that contains + * metadata about the started instance. + * + *

A "started" orchestration instance is any instance not in the Pending state.

+ * + *

If an orchestration instance is already running when this method is called, the method will return immediately. + *

+ * + *

Note that this method overload will not fetch the orchestration's inputs, outputs, or custom status payloads. + *

+ * + * @param instanceId the unique ID of the orchestration instance to wait for + * @param timeout the amount of time to wait for the orchestration instance to start + * @return the orchestration instance metadata or null if no such instance is found + * @throws TimeoutException when the orchestration instance is not started within the specified amount of time + */ + @Nullable + public OrchestrationMetadata waitForInstanceStart(String instanceId, Duration timeout) throws TimeoutException { + return this.waitForInstanceStart(instanceId, timeout, false); + } + + /** + * Waits for an orchestration to start running and returns an {@link OrchestrationMetadata} object that contains + * metadata about the started instance and optionally its input, output, and custom status payloads. + * + *

A "started" orchestration instance is any instance not in the Pending state.

+ * + *

If an orchestration instance is already running when this method is called, the method will return immediately. + *

+ * + * @param instanceId the unique ID of the orchestration instance to wait for + * @param timeout the amount of time to wait for the orchestration instance to start + * @param getInputsAndOutputs true to fetch the orchestration instance's inputs, outputs, and custom + * status, or false to omit them + * @return the orchestration instance metadata or null if no such instance is found + * @throws TimeoutException when the orchestration instance is not started within the specified amount of time + */ + @Nullable + public abstract OrchestrationMetadata waitForInstanceStart( + String instanceId, + Duration timeout, + boolean getInputsAndOutputs) throws TimeoutException; + + /** + * Waits for an orchestration to complete and returns an {@link OrchestrationMetadata} object that contains + * metadata about the completed instance. + * + *

A "completed" orchestration instance is any instance in one of the terminal states. For example, the + * Completed, Failed, or Terminated states.

+ * + *

Orchestrations are long-running and could take hours, days, or months before completing. + * Orchestrations can also be eternal, in which case they'll never complete unless terminated. + * In such cases, this call may block indefinitely, so care must be taken to ensure appropriate timeouts are used. + *

+ * + *

If an orchestration instance is already complete when this method is called, the method will return immediately. + *

+ * @param instanceId the unique ID of the orchestration instance to wait for + * @param timeout the amount of time to wait for the orchestration instance to complete + * @param getInputsAndOutputs true to fetch the orchestration instance's inputs, outputs, and custom + * status, or false to omit them + * @return the orchestration instance metadata or null if no such instance is found + * @throws TimeoutException when the orchestration instance is not completed within the specified amount of time + */ + @Nullable + public abstract OrchestrationMetadata waitForInstanceCompletion( + String instanceId, + Duration timeout, + boolean getInputsAndOutputs) throws TimeoutException; + + /** + * Terminates a running orchestration instance and updates its runtime status to Terminated. + * + *

This method internally enqueues a "terminate" message in the task hub. When the task hub worker processes + * this message, it will update the runtime status of the target instance to Terminated. + * You can use the {@link #waitForInstanceCompletion} to wait for the instance to reach the terminated state. + *

+ * + *

Terminating an orchestration instance has no effect on any in-flight activity function executions + * or sub-orchestrations that were started by the terminated instance. Those actions will continue to run + * without interruption. However, their results will be discarded. If you want to terminate sub-orchestrations, + * you must issue separate terminate commands for each sub-orchestration instance.

+ * + *

At the time of writing, there is no way to terminate an in-flight activity execution.

+ * + *

Attempting to terminate a completed or non-existent orchestration instance will fail silently.

+ * + * @param instanceId the unique ID of the orchestration instance to terminate + * @param output the optional output to set for the terminated orchestration instance. + * This value must be serializable. + */ + public abstract void terminate(String instanceId, @Nullable Object output); + + /** + * Fetches orchestration instance metadata from the configured durable store using a status query filter. + * + * @param query filter criteria that determines which orchestrations to fetch data for. + * @return the result of the query operation, including instance metadata and possibly a continuation token + */ + public abstract OrchestrationStatusQueryResult queryInstances(OrchestrationStatusQuery query); + + /** + * Initializes the target task hub data store. + * + *

This is an administrative operation that only needs to be done once for the lifetime of the task hub.

+ * + * @param recreateIfExists true to delete any existing task hub first; false to make this + * operation a no-op if the task hub data store already exists. Note that deleting a task + * hub will result in permanent data loss. Use this operation with care. + */ + public abstract void createTaskHub(boolean recreateIfExists); + + /** + * Permanently deletes the target task hub data store and any orchestration data it may contain. + * + *

This is an administrative operation that is irreversible. It should be used with great care.

+ */ + public abstract void deleteTaskHub(); + + /** + * Purges orchestration instance metadata from the durable store. + * + *

This method can be used to permanently delete orchestration metadata from the underlying storage provider, + * including any stored inputs, outputs, and orchestration history records. This is often useful for implementing + * data retention policies and for keeping storage costs minimal. Only orchestration instances in the + * Completed, Failed, or Terminated state can be purged.

+ * + *

If the target orchestration instance is not found in the data store, or if the instance is found but not in a + * terminal state, then the returned {@link PurgeResult} will report that zero instances were purged. + * Otherwise, the existing data will be purged and the returned {@link PurgeResult} will report that one instance + * was purged.

+ * + * @param instanceId the unique ID of the orchestration instance to purge + * @return the result of the purge operation, including the number of purged orchestration instances (0 or 1) + */ + public abstract PurgeResult purgeInstance(String instanceId); + + /** + * Purges orchestration instance metadata from the durable store using a filter that determines which instances to + * purge data for. + * + *

This method can be used to permanently delete orchestration metadata from the underlying storage provider, + * including any stored inputs, outputs, and orchestration history records. This is often useful for implementing + * data retention policies and for keeping storage costs minimal. Only orchestration instances in the + * Completed, Failed, or Terminated state can be purged.

+ * + *

Depending on the type of the durable store, purge operations that target multiple orchestration instances may + * take a long time to complete and be resource intensive. It may therefore be useful to break up purge operations + * into multiple method calls over a period of time and have them cover smaller time windows.

+ * + * @param purgeInstanceCriteria orchestration instance filter criteria used to determine which instances to purge + * @return the result of the purge operation, including the number of purged orchestration instances (0 or 1) + * @throws TimeoutException when purging instances is not completed within the specified amount of time. + * The default timeout for purging instances is 10 minutes + */ + public abstract PurgeResult purgeInstances(PurgeInstanceCriteria purgeInstanceCriteria) throws TimeoutException; + + /** + * Restarts an existing orchestration instance with the original input. + * + * @param instanceId the ID of the previously run orchestration instance to restart. + * @param restartWithNewInstanceId true to restart the orchestration instance with a new instance ID + * false to restart the orchestration instance with same instance ID + * @return the ID of the scheduled orchestration instance, which is either instanceId or randomly + * generated depending on the value of restartWithNewInstanceId + */ + public abstract String restartInstance(String instanceId, boolean restartWithNewInstanceId); + + /** + * Suspends a running orchestration instance. + * + * @param instanceId the ID of the orchestration instance to suspend + */ + public void suspendInstance(String instanceId) { + this.suspendInstance(instanceId, null); + } + + /** + * Suspends a running orchestration instance. + * + * @param instanceId the ID of the orchestration instance to suspend + * @param reason the reason for suspending the orchestration instance + */ + public abstract void suspendInstance(String instanceId, @Nullable String reason); + + /** + * Resumes a running orchestration instance. + * + * @param instanceId the ID of the orchestration instance to resume + */ + public void resumeInstance(String instanceId) { + this.resumeInstance(instanceId, null); + } + + /** + * Resumes a running orchestration instance. + * + * @param instanceId the ID of the orchestration instance to resume + * @param reason the reason for resuming the orchestration instance + */ + public abstract void resumeInstance(String instanceId, @Nullable String reason); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClient.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClient.java new file mode 100644 index 000000000..b0fa24a5e --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClient.java @@ -0,0 +1,423 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.StringValue; +import com.google.protobuf.Timestamp; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; +import io.dapr.durabletask.implementation.protobuf.TaskHubSidecarServiceGrpc; +import io.grpc.Channel; +import io.grpc.ChannelCredentials; +import io.grpc.Grpc; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.TlsChannelCredentials; +import io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.NettyChannelBuilder; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; + +import javax.annotation.Nullable; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.logging.Logger; + +/** + * Durable Task client implementation that uses gRPC to connect to a remote "sidecar" process. + */ +public final class DurableTaskGrpcClient extends DurableTaskClient { + private static final int DEFAULT_PORT = 4001; + private static final Logger logger = Logger.getLogger(DurableTaskGrpcClient.class.getPackage().getName()); + private static final String GRPC_TLS_CA_PATH = "DAPR_GRPC_TLS_CA_PATH"; + private static final String GRPC_TLS_CERT_PATH = "DAPR_GRPC_TLS_CERT_PATH"; + private static final String GRPC_TLS_KEY_PATH = "DAPR_GRPC_TLS_KEY_PATH"; + private static final String GRPC_TLS_INSECURE = "DAPR_GRPC_TLS_INSECURE"; + + private final DataConverter dataConverter; + private final ManagedChannel managedSidecarChannel; + private final TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub sidecarClient; + + DurableTaskGrpcClient(DurableTaskGrpcClientBuilder builder) { + this.dataConverter = builder.dataConverter != null ? builder.dataConverter : new JacksonDataConverter(); + + Channel sidecarGrpcChannel; + if (builder.channel != null) { + // The caller is responsible for managing the channel lifetime + this.managedSidecarChannel = null; + sidecarGrpcChannel = builder.channel; + } else { + // Construct our own channel using localhost + a port number + int port = DEFAULT_PORT; + if (builder.port > 0) { + port = builder.port; + } + + String endpoint = "localhost:" + port; + ManagedChannelBuilder channelBuilder; + + // Get TLS configuration from builder or environment variables + String tlsCaPath = builder.tlsCaPath != null ? builder.tlsCaPath : System.getenv(GRPC_TLS_CA_PATH); + String tlsCertPath = builder.tlsCertPath != null ? builder.tlsCertPath : System.getenv(GRPC_TLS_CERT_PATH); + String tlsKeyPath = builder.tlsKeyPath != null ? builder.tlsKeyPath : System.getenv(GRPC_TLS_KEY_PATH); + boolean insecure = builder.insecure || Boolean.parseBoolean(System.getenv(GRPC_TLS_INSECURE)); + + if (insecure) { + // Insecure mode - uses TLS but doesn't verify certificates + try { + channelBuilder = NettyChannelBuilder.forTarget(endpoint) + .sslContext(GrpcSslContexts.forClient() + .trustManager(InsecureTrustManagerFactory.INSTANCE) + .build()); + } catch (Exception e) { + throw new RuntimeException("Failed to create insecure TLS credentials", e); + } + } else if (tlsCertPath != null && tlsKeyPath != null) { + // mTLS case - using client cert and key, with optional CA cert for server authentication + try ( + InputStream clientCertInputStream = new FileInputStream(tlsCertPath); + InputStream clientKeyInputStream = new FileInputStream(tlsKeyPath); + InputStream caCertInputStream = tlsCaPath != null ? new FileInputStream(tlsCaPath) : null + ) { + TlsChannelCredentials.Builder tlsBuilder = TlsChannelCredentials.newBuilder() + .keyManager(clientCertInputStream, clientKeyInputStream); // For client authentication + if (caCertInputStream != null) { + tlsBuilder.trustManager(caCertInputStream); // For server authentication + } + ChannelCredentials credentials = tlsBuilder.build(); + channelBuilder = Grpc.newChannelBuilder(endpoint, credentials); + } catch (IOException e) { + throw new RuntimeException("Failed to create mTLS credentials" + + (tlsCaPath != null ? " with CA cert" : ""), e); + } + } else if (tlsCaPath != null) { + // Simple TLS case - using CA cert only for server authentication + try (InputStream caCertInputStream = new FileInputStream(tlsCaPath)) { + ChannelCredentials credentials = TlsChannelCredentials.newBuilder() + .trustManager(caCertInputStream) + .build(); + channelBuilder = Grpc.newChannelBuilder(endpoint, credentials); + } catch (IOException e) { + throw new RuntimeException("Failed to create TLS credentials with CA cert", e); + } + } else { + // No TLS config provided, use plaintext + channelBuilder = ManagedChannelBuilder.forTarget(endpoint).usePlaintext(); + } + + // Need to keep track of this channel so we can dispose it on close() + this.managedSidecarChannel = channelBuilder.build(); + sidecarGrpcChannel = this.managedSidecarChannel; + } + + this.sidecarClient = TaskHubSidecarServiceGrpc.newBlockingStub(sidecarGrpcChannel); + } + + /** + * Closes the internally managed gRPC channel, if one exists. + * + *

This method is a no-op if this client object was created using a builder with a gRPC channel object explicitly + * configured.

+ */ + @Override + public void close() { + if (this.managedSidecarChannel != null) { + try { + this.managedSidecarChannel.shutdown().awaitTermination(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + // Best effort. Also note that AutoClose documentation recommends NOT having + // close() methods throw InterruptedException: + // https://docs.oracle.com/javase/7/docs/api/java/lang/AutoCloseable.html + } + } + } + + @Override + public String scheduleNewOrchestrationInstance( + String orchestratorName, + NewOrchestrationInstanceOptions options) { + if (orchestratorName == null || orchestratorName.length() == 0) { + throw new IllegalArgumentException("A non-empty orchestrator name must be specified."); + } + + Helpers.throwIfArgumentNull(options, "options"); + + OrchestratorService.CreateInstanceRequest.Builder builder = OrchestratorService.CreateInstanceRequest.newBuilder(); + builder.setName(orchestratorName); + + String instanceId = options.getInstanceId(); + if (instanceId == null) { + instanceId = UUID.randomUUID().toString(); + } + builder.setInstanceId(instanceId); + + String version = options.getVersion(); + if (version != null) { + builder.setVersion(StringValue.of(version)); + } + + Object input = options.getInput(); + if (input != null) { + String serializedInput = this.dataConverter.serialize(input); + builder.setInput(StringValue.of(serializedInput)); + } + + Instant startTime = options.getStartTime(); + if (startTime != null) { + Timestamp ts = DataConverter.getTimestampFromInstant(startTime); + builder.setScheduledStartTimestamp(ts); + } + + OrchestratorService.CreateInstanceRequest request = builder.build(); + OrchestratorService.CreateInstanceResponse response = this.sidecarClient.startInstance(request); + return response.getInstanceId(); + } + + @Override + public void raiseEvent(String instanceId, String eventName, Object eventPayload) { + Helpers.throwIfArgumentNull(instanceId, "instanceId"); + Helpers.throwIfArgumentNull(eventName, "eventName"); + + OrchestratorService.RaiseEventRequest.Builder builder = OrchestratorService.RaiseEventRequest.newBuilder() + .setInstanceId(instanceId) + .setName(eventName); + if (eventPayload != null) { + String serializedPayload = this.dataConverter.serialize(eventPayload); + builder.setInput(StringValue.of(serializedPayload)); + } + + OrchestratorService.RaiseEventRequest request = builder.build(); + this.sidecarClient.raiseEvent(request); + } + + @Override + public OrchestrationMetadata getInstanceMetadata(String instanceId, boolean getInputsAndOutputs) { + OrchestratorService.GetInstanceRequest request = OrchestratorService.GetInstanceRequest.newBuilder() + .setInstanceId(instanceId) + .setGetInputsAndOutputs(getInputsAndOutputs) + .build(); + OrchestratorService.GetInstanceResponse response = this.sidecarClient.getInstance(request); + return new OrchestrationMetadata(response, this.dataConverter, request.getGetInputsAndOutputs()); + } + + @Override + public OrchestrationMetadata waitForInstanceStart(String instanceId, Duration timeout, boolean getInputsAndOutputs) + throws TimeoutException { + OrchestratorService.GetInstanceRequest request = OrchestratorService.GetInstanceRequest.newBuilder() + .setInstanceId(instanceId) + .setGetInputsAndOutputs(getInputsAndOutputs) + .build(); + + if (timeout == null || timeout.isNegative() || timeout.isZero()) { + timeout = Duration.ofMinutes(10); + } + + TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub grpcClient = this.sidecarClient.withDeadlineAfter( + timeout.toMillis(), + TimeUnit.MILLISECONDS); + + OrchestratorService.GetInstanceResponse response; + try { + response = grpcClient.waitForInstanceStart(request); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.DEADLINE_EXCEEDED) { + throw new TimeoutException("Start orchestration timeout reached."); + } + throw e; + } + return new OrchestrationMetadata(response, this.dataConverter, request.getGetInputsAndOutputs()); + } + + @Override + public OrchestrationMetadata waitForInstanceCompletion(String instanceId, Duration timeout, + boolean getInputsAndOutputs) throws TimeoutException { + OrchestratorService.GetInstanceRequest request = OrchestratorService.GetInstanceRequest.newBuilder() + .setInstanceId(instanceId) + .setGetInputsAndOutputs(getInputsAndOutputs) + .build(); + + if (timeout == null || timeout.isNegative() || timeout.isZero()) { + timeout = Duration.ofMinutes(10); + } + + TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub grpcClient = this.sidecarClient.withDeadlineAfter( + timeout.toMillis(), + TimeUnit.MILLISECONDS); + + OrchestratorService.GetInstanceResponse response; + try { + response = grpcClient.waitForInstanceCompletion(request); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.DEADLINE_EXCEEDED) { + throw new TimeoutException("Orchestration instance completion timeout reached."); + } + throw e; + } + return new OrchestrationMetadata(response, this.dataConverter, request.getGetInputsAndOutputs()); + } + + @Override + public void terminate(String instanceId, @Nullable Object output) { + Helpers.throwIfArgumentNull(instanceId, "instanceId"); + String serializeOutput = this.dataConverter.serialize(output); + this.logger.fine(() -> String.format( + "Terminating instance %s and setting output to: %s", + instanceId, + serializeOutput != null ? serializeOutput : "(null)")); + OrchestratorService.TerminateRequest.Builder builder = OrchestratorService.TerminateRequest.newBuilder() + .setInstanceId(instanceId); + if (serializeOutput != null) { + builder.setOutput(StringValue.of(serializeOutput)); + } + this.sidecarClient.terminateInstance(builder.build()); + } + + @Override + public OrchestrationStatusQueryResult queryInstances(OrchestrationStatusQuery query) { + OrchestratorService.InstanceQuery.Builder instanceQueryBuilder = OrchestratorService.InstanceQuery.newBuilder(); + Optional.ofNullable(query.getCreatedTimeFrom()).ifPresent(createdTimeFrom -> + instanceQueryBuilder.setCreatedTimeFrom(DataConverter.getTimestampFromInstant(createdTimeFrom))); + Optional.ofNullable(query.getCreatedTimeTo()).ifPresent(createdTimeTo -> + instanceQueryBuilder.setCreatedTimeTo(DataConverter.getTimestampFromInstant(createdTimeTo))); + Optional.ofNullable(query.getContinuationToken()).ifPresent(token -> + instanceQueryBuilder.setContinuationToken(StringValue.of(token))); + Optional.ofNullable(query.getInstanceIdPrefix()).ifPresent(prefix -> + instanceQueryBuilder.setInstanceIdPrefix(StringValue.of(prefix))); + instanceQueryBuilder.setFetchInputsAndOutputs(query.isFetchInputsAndOutputs()); + instanceQueryBuilder.setMaxInstanceCount(query.getMaxInstanceCount()); + query.getRuntimeStatusList().forEach(runtimeStatus -> + Optional.ofNullable(runtimeStatus).ifPresent(status -> + instanceQueryBuilder.addRuntimeStatus(OrchestrationRuntimeStatus.toProtobuf(status)))); + query.getTaskHubNames().forEach(taskHubName -> Optional.ofNullable(taskHubName).ifPresent(name -> + instanceQueryBuilder.addTaskHubNames(StringValue.of(name)))); + OrchestratorService.QueryInstancesResponse queryInstancesResponse = this.sidecarClient + .queryInstances(OrchestratorService.QueryInstancesRequest.newBuilder().setQuery(instanceQueryBuilder).build()); + return toQueryResult(queryInstancesResponse, query.isFetchInputsAndOutputs()); + } + + private OrchestrationStatusQueryResult toQueryResult( + OrchestratorService.QueryInstancesResponse queryInstancesResponse, boolean fetchInputsAndOutputs) { + List metadataList = new ArrayList<>(); + queryInstancesResponse.getOrchestrationStateList().forEach(state -> { + metadataList.add(new OrchestrationMetadata(state, this.dataConverter, fetchInputsAndOutputs)); + }); + return new OrchestrationStatusQueryResult(metadataList, queryInstancesResponse.getContinuationToken().getValue()); + } + + @Override + public void createTaskHub(boolean recreateIfExists) { + this.sidecarClient.createTaskHub(OrchestratorService.CreateTaskHubRequest.newBuilder() + .setRecreateIfExists(recreateIfExists).build()); + } + + @Override + public void deleteTaskHub() { + this.sidecarClient.deleteTaskHub(OrchestratorService.DeleteTaskHubRequest.newBuilder().build()); + } + + @Override + public PurgeResult purgeInstance(String instanceId) { + OrchestratorService.PurgeInstancesRequest request = OrchestratorService.PurgeInstancesRequest.newBuilder() + .setInstanceId(instanceId) + .build(); + + OrchestratorService.PurgeInstancesResponse response = this.sidecarClient.purgeInstances(request); + return toPurgeResult(response); + } + + @Override + public PurgeResult purgeInstances(PurgeInstanceCriteria purgeInstanceCriteria) throws TimeoutException { + OrchestratorService.PurgeInstanceFilter.Builder builder = OrchestratorService.PurgeInstanceFilter.newBuilder(); + builder.setCreatedTimeFrom(DataConverter.getTimestampFromInstant(purgeInstanceCriteria.getCreatedTimeFrom())); + Optional.ofNullable(purgeInstanceCriteria.getCreatedTimeTo()).ifPresent(createdTimeTo -> + builder.setCreatedTimeTo(DataConverter.getTimestampFromInstant(createdTimeTo))); + purgeInstanceCriteria.getRuntimeStatusList().forEach(runtimeStatus -> + Optional.ofNullable(runtimeStatus).ifPresent(status -> + builder.addRuntimeStatus(OrchestrationRuntimeStatus.toProtobuf(status)))); + + Duration timeout = purgeInstanceCriteria.getTimeout(); + if (timeout == null || timeout.isNegative() || timeout.isZero()) { + timeout = Duration.ofMinutes(4); + } + + TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub grpcClient = this.sidecarClient.withDeadlineAfter( + timeout.toMillis(), + TimeUnit.MILLISECONDS); + + OrchestratorService.PurgeInstancesResponse response; + try { + response = grpcClient.purgeInstances(OrchestratorService.PurgeInstancesRequest.newBuilder() + .setPurgeInstanceFilter(builder).build()); + return toPurgeResult(response); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.DEADLINE_EXCEEDED) { + String timeOutException = String.format("Purge instances timeout duration of %s reached.", timeout); + throw new TimeoutException(timeOutException); + } + throw e; + } + } + + @Override + public void suspendInstance(String instanceId, @Nullable String reason) { + OrchestratorService.SuspendRequest.Builder suspendRequestBuilder = OrchestratorService.SuspendRequest.newBuilder(); + suspendRequestBuilder.setInstanceId(instanceId); + if (reason != null) { + suspendRequestBuilder.setReason(StringValue.of(reason)); + } + this.sidecarClient.suspendInstance(suspendRequestBuilder.build()); + } + + @Override + public void resumeInstance(String instanceId, @Nullable String reason) { + OrchestratorService.ResumeRequest.Builder resumeRequestBuilder = OrchestratorService.ResumeRequest.newBuilder(); + resumeRequestBuilder.setInstanceId(instanceId); + if (reason != null) { + resumeRequestBuilder.setReason(StringValue.of(reason)); + } + this.sidecarClient.resumeInstance(resumeRequestBuilder.build()); + } + + @Override + public String restartInstance(String instanceId, boolean restartWithNewInstanceId) { + OrchestrationMetadata metadata = this.getInstanceMetadata(instanceId, true); + if (!metadata.isInstanceFound()) { + throw new IllegalArgumentException(new StringBuilder() + .append("An orchestration with instanceId ") + .append(instanceId) + .append(" was not found.").toString()); + } + + if (restartWithNewInstanceId) { + return this.scheduleNewOrchestrationInstance(metadata.getName(), + this.dataConverter.deserialize(metadata.getSerializedInput(), Object.class)); + } else { + return this.scheduleNewOrchestrationInstance(metadata.getName(), + this.dataConverter.deserialize(metadata.getSerializedInput(), Object.class), metadata.getInstanceId()); + } + } + + private PurgeResult toPurgeResult(OrchestratorService.PurgeInstancesResponse response) { + return new PurgeResult(response.getDeletedInstanceCount()); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClientBuilder.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClientBuilder.java new file mode 100644 index 000000000..f3ba1cd82 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClientBuilder.java @@ -0,0 +1,128 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.grpc.Channel; + +/** + * Builder class for constructing new {@link DurableTaskClient} objects that communicate with a sidecar process + * over gRPC. + */ +public final class DurableTaskGrpcClientBuilder { + DataConverter dataConverter; + int port; + Channel channel; + String tlsCaPath; + String tlsCertPath; + String tlsKeyPath; + boolean insecure; + + /** + * Sets the {@link DataConverter} to use for converting serializable data payloads. + * + * @param dataConverter the {@link DataConverter} to use for converting serializable data payloads + * @return this builder object + */ + public DurableTaskGrpcClientBuilder dataConverter(DataConverter dataConverter) { + this.dataConverter = dataConverter; + return this; + } + + /** + * Sets the gRPC channel to use for communicating with the sidecar process. + * + *

This builder method allows you to provide your own gRPC channel for communicating with the Durable Task sidecar + * endpoint. Channels provided using this method won't be closed when the client is closed. + * Rather, the caller remains responsible for shutting down the channel after disposing the client.

+ * + *

If not specified, a gRPC channel will be created automatically for each constructed + * {@link DurableTaskClient}.

+ * + * @param channel the gRPC channel to use + * @return this builder object + */ + public DurableTaskGrpcClientBuilder grpcChannel(Channel channel) { + this.channel = channel; + return this; + } + + /** + * Sets the gRPC endpoint port to connect to. If not specified, the default Durable Task port number will be used. + * + * @param port the gRPC endpoint port to connect to + * @return this builder object + */ + public DurableTaskGrpcClientBuilder port(int port) { + this.port = port; + return this; + } + + /** + * Sets the path to the TLS CA certificate file for server authentication. + * If not set, the system's default CA certificates will be used. + * + * @param tlsCaPath path to the TLS CA certificate file + * @return this builder object + */ + public DurableTaskGrpcClientBuilder tlsCaPath(String tlsCaPath) { + this.tlsCaPath = tlsCaPath; + return this; + } + + /** + * Sets the path to the TLS client certificate file for client authentication. + * This is used for mTLS (mutual TLS) connections. + * + * @param tlsCertPath path to the TLS client certificate file + * @return this builder object + */ + public DurableTaskGrpcClientBuilder tlsCertPath(String tlsCertPath) { + this.tlsCertPath = tlsCertPath; + return this; + } + + /** + * Sets the path to the TLS client key file for client authentication. + * This is used for mTLS (mutual TLS) connections. + * + * @param tlsKeyPath path to the TLS client key file + * @return this builder object + */ + public DurableTaskGrpcClientBuilder tlsKeyPath(String tlsKeyPath) { + this.tlsKeyPath = tlsKeyPath; + return this; + } + + /** + * Sets whether to use insecure (plaintext) mode for gRPC communication. + * When set to true, TLS will be disabled and communication will be unencrypted. + * This should only be used for development/testing. + * + * @param insecure whether to use insecure mode + * @return this builder object + */ + public DurableTaskGrpcClientBuilder insecure(boolean insecure) { + this.insecure = insecure; + return this; + } + + /** + * Initializes a new {@link DurableTaskClient} object with the settings specified in the current builder object. + * + * @return a new {@link DurableTaskClient} object + */ + public DurableTaskClient build() { + return new DurableTaskGrpcClient(this); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorker.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorker.java new file mode 100644 index 000000000..eb3be6bb9 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorker.java @@ -0,0 +1,328 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.StringValue; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService.TaskFailureDetails; +import io.dapr.durabletask.implementation.protobuf.TaskHubSidecarServiceGrpc; +import io.grpc.Channel; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; + +import java.time.Duration; +import java.util.HashMap; +import java.util.Iterator; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Task hub worker that connects to a sidecar process over gRPC to execute + * orchestrator and activity events. + */ +public final class DurableTaskGrpcWorker implements AutoCloseable { + + private static final int DEFAULT_PORT = 4001; + private static final Logger logger = Logger.getLogger(DurableTaskGrpcWorker.class.getPackage().getName()); + private static final Duration DEFAULT_MAXIMUM_TIMER_INTERVAL = Duration.ofDays(3); + + private final HashMap orchestrationFactories = new HashMap<>(); + private final HashMap activityFactories = new HashMap<>(); + + private final ManagedChannel managedSidecarChannel; + private final DataConverter dataConverter; + private final Duration maximumTimerInterval; + private final ExecutorService workerPool; + private final String appId; // App ID for cross-app routing + + private final TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub sidecarClient; + private final boolean isExecutorServiceManaged; + private volatile boolean isNormalShutdown = false; + private Thread workerThread; + + DurableTaskGrpcWorker(DurableTaskGrpcWorkerBuilder builder) { + this.orchestrationFactories.putAll(builder.orchestrationFactories); + this.activityFactories.putAll(builder.activityFactories); + this.appId = builder.appId; + + Channel sidecarGrpcChannel; + if (builder.channel != null) { + // The caller is responsible for managing the channel lifetime + this.managedSidecarChannel = null; + sidecarGrpcChannel = builder.channel; + } else { + // Construct our own channel using localhost + a port number + int port = DEFAULT_PORT; + if (builder.port > 0) { + port = builder.port; + } + + // Need to keep track of this channel so we can dispose it on close() + this.managedSidecarChannel = ManagedChannelBuilder + .forAddress("localhost", port) + .usePlaintext() + .build(); + sidecarGrpcChannel = this.managedSidecarChannel; + } + + this.sidecarClient = TaskHubSidecarServiceGrpc.newBlockingStub(sidecarGrpcChannel); + this.dataConverter = builder.dataConverter != null ? builder.dataConverter : new JacksonDataConverter(); + this.maximumTimerInterval = builder.maximumTimerInterval != null ? builder.maximumTimerInterval + : DEFAULT_MAXIMUM_TIMER_INTERVAL; + this.workerPool = builder.executorService != null ? builder.executorService : Executors.newCachedThreadPool(); + this.isExecutorServiceManaged = builder.executorService == null; + } + + /** + * Establishes a gRPC connection to the sidecar and starts processing work-items + * in the background. + * + *

This method retries continuously to establish a connection to the sidecar. If + * a connection fails, + * a warning log message will be written and a new connection attempt will be + * made. This process + * continues until either a connection succeeds or the process receives an + * interrupt signal.

+ */ + public void start() { + this.workerThread = new Thread(this::startAndBlock); + this.workerThread.start(); + } + + /** + * Closes the internally managed gRPC channel and executor service, if one + * exists. + * + *

Only the internally managed GRPC Channel and Executor services are closed. If + * any of them are supplied, + * it is the responsibility of the supplier to take care of them.

+ * + */ + public void close() { + this.workerThread.interrupt(); + this.isNormalShutdown = true; + this.shutDownWorkerPool(); + this.closeSideCarChannel(); + } + + /** + * Establishes a gRPC connection to the sidecar and starts processing work-items + * on the current thread. + * This method call blocks indefinitely, or until the current thread is + * interrupted. + * + *

Use can alternatively use the {@link #start} method to run orchestration + * processing in a background thread.

+ * + *

This method retries continuously to establish a connection to the sidecar. If + * a connection fails, + * a warning log message will be written and a new connection attempt will be + * made. This process + * continues until either a connection succeeds or the process receives an + * interrupt signal.

+ */ + public void startAndBlock() { + logger.log(Level.INFO, "Durable Task worker is connecting to sidecar at {0}.", this.getSidecarAddress()); + + TaskOrchestrationExecutor taskOrchestrationExecutor = new TaskOrchestrationExecutor( + this.orchestrationFactories, + this.dataConverter, + this.maximumTimerInterval, + logger, + this.appId); + TaskActivityExecutor taskActivityExecutor = new TaskActivityExecutor( + this.activityFactories, + this.dataConverter, + logger); + + while (true) { + try { + OrchestratorService.GetWorkItemsRequest getWorkItemsRequest = OrchestratorService.GetWorkItemsRequest + .newBuilder().build(); + Iterator workItemStream = this.sidecarClient.getWorkItems(getWorkItemsRequest); + while (workItemStream.hasNext()) { + OrchestratorService.WorkItem workItem = workItemStream.next(); + OrchestratorService.WorkItem.RequestCase requestType = workItem.getRequestCase(); + if (requestType == OrchestratorService.WorkItem.RequestCase.ORCHESTRATORREQUEST) { + OrchestratorService.OrchestratorRequest orchestratorRequest = workItem.getOrchestratorRequest(); + logger.log(Level.FINEST, + String.format("Processing orchestrator request for instance: {0}", + orchestratorRequest.getInstanceId())); + + // TODO: Error handling + this.workerPool.submit(() -> { + TaskOrchestratorResult taskOrchestratorResult = taskOrchestrationExecutor.execute( + orchestratorRequest.getPastEventsList(), + orchestratorRequest.getNewEventsList()); + + OrchestratorService.OrchestratorResponse response = OrchestratorService.OrchestratorResponse.newBuilder() + .setInstanceId(orchestratorRequest.getInstanceId()) + .addAllActions(taskOrchestratorResult.getActions()) + .setCustomStatus(StringValue.of(taskOrchestratorResult.getCustomStatus())) + .setCompletionToken(workItem.getCompletionToken()) + .build(); + + try { + this.sidecarClient.completeOrchestratorTask(response); + logger.log(Level.FINEST, + "Completed orchestrator request for instance: {0}", + orchestratorRequest.getInstanceId()); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) { + logger.log(Level.WARNING, + "The sidecar at address {0} is unavailable while completing the orchestrator task.", + this.getSidecarAddress()); + } else if (e.getStatus().getCode() == Status.Code.CANCELLED) { + logger.log(Level.WARNING, + "Durable Task worker has disconnected from {0} while completing the orchestrator task.", + this.getSidecarAddress()); + } else { + logger.log(Level.WARNING, + "Unexpected failure completing the orchestrator task at {0}.", + this.getSidecarAddress()); + } + } + }); + } else if (requestType == OrchestratorService.WorkItem.RequestCase.ACTIVITYREQUEST) { + OrchestratorService.ActivityRequest activityRequest = workItem.getActivityRequest(); + logger.log(Level.FINEST, + String.format("Processing activity request: %s for instance: %s}", + activityRequest.getName(), + activityRequest.getOrchestrationInstance().getInstanceId())); + + // TODO: Error handling + this.workerPool.submit(() -> { + String output = null; + TaskFailureDetails failureDetails = null; + try { + output = taskActivityExecutor.execute( + activityRequest.getName(), + activityRequest.getInput().getValue(), + activityRequest.getTaskExecutionId(), + activityRequest.getTaskId()); + } catch (Throwable e) { + failureDetails = TaskFailureDetails.newBuilder() + .setErrorType(e.getClass().getName()) + .setErrorMessage(e.getMessage()) + .setStackTrace(StringValue.of(FailureDetails.getFullStackTrace(e))) + .build(); + } + + OrchestratorService.ActivityResponse.Builder responseBuilder = OrchestratorService.ActivityResponse + .newBuilder() + .setInstanceId(activityRequest.getOrchestrationInstance().getInstanceId()) + .setTaskId(activityRequest.getTaskId()) + .setCompletionToken(workItem.getCompletionToken()); + + if (output != null) { + responseBuilder.setResult(StringValue.of(output)); + } + + if (failureDetails != null) { + responseBuilder.setFailureDetails(failureDetails); + } + + try { + this.sidecarClient.completeActivityTask(responseBuilder.build()); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) { + logger.log(Level.WARNING, + "The sidecar at address {0} is unavailable while completing the activity task.", + this.getSidecarAddress()); + } else if (e.getStatus().getCode() == Status.Code.CANCELLED) { + logger.log(Level.WARNING, + "Durable Task worker has disconnected from {0} while completing the activity task.", + this.getSidecarAddress()); + } else { + logger.log(Level.WARNING, "Unexpected failure completing the activity task at {0}.", + this.getSidecarAddress()); + } + } + }); + } else if (requestType == OrchestratorService.WorkItem.RequestCase.HEALTHPING) { + // No-op + } else { + logger.log(Level.WARNING, + "Received and dropped an unknown '{0}' work-item from the sidecar.", + requestType); + } + } + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) { + logger.log(Level.INFO, "The sidecar at address {0} is unavailable. Will continue retrying.", + this.getSidecarAddress()); + } else if (e.getStatus().getCode() == Status.Code.CANCELLED) { + logger.log(Level.INFO, "Durable Task worker has disconnected from {0}.", this.getSidecarAddress()); + } else { + logger.log(Level.WARNING, + String.format("Unexpected failure connecting to %s", this.getSidecarAddress()), e); + } + + // Retry after 5 seconds + try { + Thread.sleep(5000); + } catch (InterruptedException ex) { + break; + } + } + } + } + + /** + * Stops the current worker's listen loop, preventing any new orchestrator or + * activity events from being processed. + */ + public void stop() { + this.close(); + } + + private void closeSideCarChannel() { + if (this.managedSidecarChannel != null) { + try { + this.managedSidecarChannel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + // Best effort. Also note that AutoClose documentation recommends NOT having + // close() methods throw InterruptedException: + // https://docs.oracle.com/javase/7/docs/api/java/lang/AutoCloseable.html + } + } + } + + private void shutDownWorkerPool() { + if (this.isExecutorServiceManaged) { + if (!this.isNormalShutdown) { + logger.log(Level.WARNING, + "ExecutorService shutdown initiated unexpectedly. No new tasks will be accepted"); + } + + this.workerPool.shutdown(); + try { + if (!this.workerPool.awaitTermination(60, TimeUnit.SECONDS)) { + this.workerPool.shutdownNow(); + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + } + + private String getSidecarAddress() { + return this.sidecarClient.getChannel().authority(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorkerBuilder.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorkerBuilder.java new file mode 100644 index 000000000..0d3ebf227 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorkerBuilder.java @@ -0,0 +1,164 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.grpc.Channel; + +import java.time.Duration; +import java.util.HashMap; +import java.util.concurrent.ExecutorService; + +/** + * Builder object for constructing customized {@link DurableTaskGrpcWorker} instances. + * + */ +public final class DurableTaskGrpcWorkerBuilder { + final HashMap orchestrationFactories = new HashMap<>(); + final HashMap activityFactories = new HashMap<>(); + int port; + Channel channel; + DataConverter dataConverter; + Duration maximumTimerInterval; + ExecutorService executorService; + String appId; // App ID for cross-app routing + + /** + * Adds an orchestration factory to be used by the constructed {@link DurableTaskGrpcWorker}. + * + * @param factory an orchestration factory to be used by the constructed {@link DurableTaskGrpcWorker} + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder addOrchestration(TaskOrchestrationFactory factory) { + String key = factory.getName(); + if (key == null || key.length() == 0) { + throw new IllegalArgumentException("A non-empty task orchestration name is required."); + } + + if (this.orchestrationFactories.containsKey(key)) { + throw new IllegalArgumentException( + String.format("A task orchestration factory named %s is already registered.", key)); + } + + this.orchestrationFactories.put(key, factory); + return this; + } + + /** + * Adds an activity factory to be used by the constructed {@link DurableTaskGrpcWorker}. + * + * @param factory an activity factory to be used by the constructed {@link DurableTaskGrpcWorker} + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder addActivity(TaskActivityFactory factory) { + // TODO: Input validation + String key = factory.getName(); + if (key == null || key.length() == 0) { + throw new IllegalArgumentException("A non-empty task activity name is required."); + } + + if (this.activityFactories.containsKey(key)) { + throw new IllegalArgumentException( + String.format("A task activity factory named %s is already registered.", key)); + } + + this.activityFactories.put(key, factory); + return this; + } + + /** + * Sets the gRPC channel to use for communicating with the sidecar process. + * + *

This builder method allows you to provide your own gRPC channel for communicating with the Durable Task sidecar + * endpoint. Channels provided using this method won't be closed when the worker is closed. + * Rather, the caller remains responsible for shutting down the channel after disposing the worker.

+ * + *

If not specified, a gRPC channel will be created automatically for each constructed + * {@link DurableTaskGrpcWorker}.

+ * + * @param channel the gRPC channel to use + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder grpcChannel(Channel channel) { + this.channel = channel; + return this; + } + + /** + * Sets the gRPC endpoint port to connect to. If not specified, the default Durable Task port number will be used. + * + * @param port the gRPC endpoint port to connect to + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder port(int port) { + this.port = port; + return this; + } + + /** + * Sets the {@link DataConverter} to use for converting serializable data payloads. + * + * @param dataConverter the {@link DataConverter} to use for converting serializable data payloads + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder dataConverter(DataConverter dataConverter) { + this.dataConverter = dataConverter; + return this; + } + + /** + * Sets the maximum timer interval. If not specified, the default maximum timer interval duration will be used. + * The default maximum timer interval duration is 3 days. + * + * @param maximumTimerInterval the maximum timer interval + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder maximumTimerInterval(Duration maximumTimerInterval) { + this.maximumTimerInterval = maximumTimerInterval; + return this; + } + + /** + * Sets the executor service that will be used to execute threads. + * + * @param executorService {@link ExecutorService}. + * @return this builder object. + */ + public DurableTaskGrpcWorkerBuilder withExecutorService(ExecutorService executorService) { + this.executorService = executorService; + return this; + } + + /** + * Sets the app ID for cross-app workflow routing. + * + *

This app ID is used to identify this worker in cross-app routing scenarios. + * It should match the app ID configured in the Dapr sidecar.

+ * + * @param appId the app ID for this worker + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder appId(String appId) { + this.appId = appId; + return this; + } + + /** + * Initializes a new {@link DurableTaskGrpcWorker} object with the settings specified in the current builder object. + * + * @return a new {@link DurableTaskGrpcWorker} object + */ + public DurableTaskGrpcWorker build() { + return new DurableTaskGrpcWorker(this); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/FailureDetails.java b/durabletask-client/src/main/java/io/dapr/durabletask/FailureDetails.java new file mode 100644 index 000000000..f5d9d834e --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/FailureDetails.java @@ -0,0 +1,145 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.StringValue; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService.TaskFailureDetails; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * Class that represents the details of a task failure. + * + *

In most cases, failures are caused by unhandled exceptions in activity or orchestrator code, in which case + * instances of this class will expose the details of the exception. However, it's also possible that other types + * of errors could result in task failures, in which case there may not be any exception-specific information.

+ */ +public final class FailureDetails { + private final String errorType; + private final String errorMessage; + private final String stackTrace; + private final boolean isNonRetriable; + + FailureDetails( + String errorType, + @Nullable String errorMessage, + @Nullable String errorDetails, + boolean isNonRetriable) { + this.errorType = errorType; + this.stackTrace = errorDetails; + + // Error message can be null for things like NullPointerException but the gRPC contract doesn't allow null + this.errorMessage = errorMessage != null ? errorMessage : ""; + this.isNonRetriable = isNonRetriable; + } + + FailureDetails(Exception exception) { + this(exception.getClass().getName(), exception.getMessage(), getFullStackTrace(exception), false); + } + + FailureDetails(TaskFailureDetails proto) { + this(proto.getErrorType(), + proto.getErrorMessage(), + proto.getStackTrace().getValue(), + proto.getIsNonRetriable()); + } + + /** + * Gets the exception class name if the failure was caused by an unhandled exception. Otherwise, gets a symbolic + * name that describes the general type of error that was encountered. + * + * @return the error type as a {@code String} value + */ + @Nonnull + public String getErrorType() { + return this.errorType; + } + + /** + * Gets a summary description of the error that caused this failure. If the failure was caused by an exception, the + * exception message is returned. + * + * @return a summary description of the error + */ + @Nonnull + public String getErrorMessage() { + return this.errorMessage; + } + + /** + * Gets the stack trace of the exception that caused this failure, or {@code null} if the failure was caused by + * a non-exception error. + * + * @return the stack trace of the failure exception or {@code null} if the failure was not caused by an exception + */ + @Nullable + public String getStackTrace() { + return this.stackTrace; + } + + /** + * Returns {@code true} if the failure doesn't permit retries, otherwise {@code false}. + * + * @return {@code true} if the failure doesn't permit retries, otherwise {@code false}. + */ + public boolean isNonRetriable() { + return this.isNonRetriable; + } + + /** + * Returns {@code true} if the task failure was provided by the specified exception type, otherwise {@code false}. + * + *

This method allows checking if a task failed due to a specific exception type by attempting to load the class + * specified in {@link #getErrorType()}. If the exception class cannot be loaded for any reason, this method will + * return {@code false}. Base types are supported by this method, as shown in the following example:

+ *
{@code
+   * boolean isRuntimeException = failureDetails.isCausedBy(RuntimeException.class);
+   * }
+ * + * @param exceptionClass the class representing the exception type to test + * @return {@code true} if the task failure was provided by the specified exception type, otherwise {@code false} + */ + public boolean isCausedBy(Class exceptionClass) { + String actualClassName = this.getErrorType(); + try { + // Try using reflection to load the failure's class type and see if it's a subtype of the specified + // exception. For example, this should always succeed if exceptionClass is System.Exception. + Class actualExceptionClass = Class.forName(actualClassName); + return exceptionClass.isAssignableFrom(actualExceptionClass); + } catch (ClassNotFoundException ex) { + // Can't load the class and thus can't tell if it's related + return false; + } + } + + static String getFullStackTrace(Throwable e) { + StackTraceElement[] elements = e.getStackTrace(); + + // Plan for 256 characters per stack frame (which is likely on the high-end) + StringBuilder sb = new StringBuilder(elements.length * 256); + for (StackTraceElement element : elements) { + sb.append("\tat ").append(element.toString()).append(System.lineSeparator()); + } + return sb.toString(); + } + + TaskFailureDetails toProto() { + return TaskFailureDetails.newBuilder() + .setErrorType(this.getErrorType()) + .setErrorMessage(this.getErrorMessage()) + .setStackTrace(StringValue.of(this.getStackTrace() != null ? this.getStackTrace() : "")) + .build(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/Helpers.java b/durabletask-client/src/main/java/io/dapr/durabletask/Helpers.java new file mode 100644 index 000000000..265bb0ab0 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/Helpers.java @@ -0,0 +1,77 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.time.Duration; + +final class Helpers { + static final Duration maxDuration = Duration.ofSeconds(Long.MAX_VALUE, 999999999L); + + static @Nonnull V throwIfArgumentNull(@Nullable V argValue, String argName) { + if (argValue == null) { + throw new IllegalArgumentException("The argument '" + argName + "' was null."); + } + + return argValue; + } + + static @Nonnull String throwIfArgumentNullOrWhiteSpace(String argValue, String argName) { + throwIfArgumentNull(argValue, argName); + if (argValue.trim().length() == 0) { + throw new IllegalArgumentException("The argument '" + argName + "' was empty or contained only whitespace."); + } + + return argValue; + } + + static void throwIfOrchestratorComplete(boolean isComplete) { + if (isComplete) { + throw new IllegalStateException("The orchestrator has already completed"); + } + } + + static boolean isInfiniteTimeout(Duration timeout) { + return timeout == null || timeout.isNegative() || timeout.equals(maxDuration); + } + + static double powExact(double base, double exponent) throws ArithmeticException { + if (base == 0.0) { + return 0.0; + } + + double result = Math.pow(base, exponent); + + if (result == Double.POSITIVE_INFINITY) { + throw new ArithmeticException("Double overflow resulting in POSITIVE_INFINITY"); + } else if (result == Double.NEGATIVE_INFINITY) { + throw new ArithmeticException("Double overflow resulting in NEGATIVE_INFINITY"); + } else if (Double.compare(-0.0f, result) == 0) { + throw new ArithmeticException("Double overflow resulting in negative zero"); + } else if (Double.compare(+0.0f, result) == 0) { + throw new ArithmeticException("Double overflow resulting in positive zero"); + } + + return result; + } + + static boolean isNullOrEmpty(String s) { + return s == null || s.isEmpty(); + } + + // Cannot be instantiated + private Helpers() { + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/JacksonDataConverter.java b/durabletask-client/src/main/java/io/dapr/durabletask/JacksonDataConverter.java new file mode 100644 index 000000000..29912aa3f --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/JacksonDataConverter.java @@ -0,0 +1,58 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.json.JsonMapper; + +/** + * An implementation of {@link DataConverter} that uses Jackson APIs for data serialization. + */ +public final class JacksonDataConverter implements DataConverter { + // Static singletons are recommended by the Jackson documentation + private static final ObjectMapper jsonObjectMapper = JsonMapper.builder() + .findAndAddModules() + .build(); + + @Override + public String serialize(Object value) { + if (value == null) { + return null; + } + + try { + return jsonObjectMapper.writeValueAsString(value); + } catch (JsonProcessingException e) { + throw new DataConverterException( + String.format("Failed to serialize argument of type '%s'. Detailed error message: %s", + value.getClass().getName(), e.getMessage()), + e); + } + } + + @Override + public T deserialize(String jsonText, Class targetType) { + if (jsonText == null || jsonText.length() == 0 || targetType == Void.class) { + return null; + } + + try { + return jsonObjectMapper.readValue(jsonText, targetType); + } catch (JsonProcessingException e) { + throw new DataConverterException(String.format("Failed to deserialize the JSON text to %s. " + + "Detailed error message: %s", targetType.getName(), e.getMessage()), e); + } + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/NewOrchestrationInstanceOptions.java b/durabletask-client/src/main/java/io/dapr/durabletask/NewOrchestrationInstanceOptions.java new file mode 100644 index 000000000..32639e41d --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/NewOrchestrationInstanceOptions.java @@ -0,0 +1,147 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import java.time.Instant; + +/** + * Options for starting a new instance of an orchestration. + */ +public final class NewOrchestrationInstanceOptions { + private String version; + private String instanceId; + private Object input; + private Instant startTime; + private String appID; // Target app ID for cross-app workflow routing + + /** + * Default constructor for the {@link NewOrchestrationInstanceOptions} class. + */ + public NewOrchestrationInstanceOptions() { + } + + /** + * Sets the version of the orchestration to start. + * + * @param version the user-defined version of the orchestration + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setVersion(String version) { + this.version = version; + return this; + } + + /** + * Sets the instance ID of the orchestration to start. + * If no instance ID is configured, the orchestration will be created with a randomly generated instance ID. + * + * @param instanceId the ID of the new orchestration instance + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setInstanceId(String instanceId) { + this.instanceId = instanceId; + return this; + } + + /** + * Sets the input of the orchestration to start. + * There are no restrictions on the type of inputs that can be used except that they must be serializable using + * the {@link DataConverter} that was configured for the {@link DurableTaskClient} at creation time. + * + * @param input the input of the new orchestration instance + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setInput(Object input) { + this.input = input; + return this; + } + + /** + * Sets the start time of the new orchestration instance. + * By default, new orchestration instances start executing immediately. This method can be used + * to start them at a specific time in the future. + * + * @param startTime the start time of the new orchestration instance + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setStartTime(Instant startTime) { + this.startTime = startTime; + return this; + } + + /** + * Sets the target app ID for cross-app workflow routing. + * + * @param appID the target app ID for cross-app routing + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setAppID(String appID) { + this.appID = appID; + return this; + } + + /** + * Gets the user-specified version of the new orchestration. + * + * @return the user-specified version of the new orchestration. + */ + public String getVersion() { + return this.version; + } + + /** + * Gets the instance ID of the new orchestration. + * + * @return the instance ID of the new orchestration. + */ + public String getInstanceId() { + return this.instanceId; + } + + /** + * Gets the input of the new orchestration. + * + * @return the input of the new orchestration. + */ + public Object getInput() { + return this.input; + } + + /** + * Gets the configured start time of the new orchestration instance. + * + * @return the configured start time of the new orchestration instance. + */ + public Instant getStartTime() { + return this.startTime; + } + + /** + * Gets the configured target app ID for cross-app workflow routing. + * + * @return the configured target app ID + */ + public String getAppID() { + return this.appID; + } + + /** + * Checks if an app ID is configured for cross-app routing. + * + * @return true if an app ID is configured, false otherwise + */ + public boolean hasAppID() { + return this.appID != null && !this.appID.isEmpty(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/NonDeterministicOrchestratorException.java b/durabletask-client/src/main/java/io/dapr/durabletask/NonDeterministicOrchestratorException.java new file mode 100644 index 000000000..101e6bd04 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/NonDeterministicOrchestratorException.java @@ -0,0 +1,20 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +final class NonDeterministicOrchestratorException extends RuntimeException { + public NonDeterministicOrchestratorException(String message) { + super(message); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationMetadata.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationMetadata.java new file mode 100644 index 000000000..a0565ba63 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationMetadata.java @@ -0,0 +1,283 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationState; + +import java.time.Instant; + +import static io.dapr.durabletask.Helpers.isNullOrEmpty; + +/** + * Represents a snapshot of an orchestration instance's current state, including metadata. + * + *

Instances of this class are produced by methods in the {@link DurableTaskClient} class, such as + * {@link DurableTaskClient#getInstanceMetadata}, {@link DurableTaskClient#waitForInstanceStart} and + * {@link DurableTaskClient#waitForInstanceCompletion}.

+ */ +public final class OrchestrationMetadata { + private final DataConverter dataConverter; + private final boolean requestedInputsAndOutputs; + + private final String name; + private final String instanceId; + private final OrchestrationRuntimeStatus runtimeStatus; + private final Instant createdAt; + private final Instant lastUpdatedAt; + private final String serializedInput; + private final String serializedOutput; + private final String serializedCustomStatus; + private final FailureDetails failureDetails; + + OrchestrationMetadata( + OrchestratorService.GetInstanceResponse fetchResponse, + DataConverter dataConverter, + boolean requestedInputsAndOutputs) { + this(fetchResponse.getOrchestrationState(), dataConverter, requestedInputsAndOutputs); + } + + OrchestrationMetadata( + OrchestrationState state, + DataConverter dataConverter, + boolean requestedInputsAndOutputs) { + this.dataConverter = dataConverter; + this.requestedInputsAndOutputs = requestedInputsAndOutputs; + + this.name = state.getName(); + this.instanceId = state.getInstanceId(); + this.runtimeStatus = OrchestrationRuntimeStatus.fromProtobuf(state.getOrchestrationStatus()); + this.createdAt = DataConverter.getInstantFromTimestamp(state.getCreatedTimestamp()); + this.lastUpdatedAt = DataConverter.getInstantFromTimestamp(state.getLastUpdatedTimestamp()); + this.serializedInput = state.getInput().getValue(); + this.serializedOutput = state.getOutput().getValue(); + this.serializedCustomStatus = state.getCustomStatus().getValue(); + this.failureDetails = new FailureDetails(state.getFailureDetails()); + } + + /** + * Gets the name of the orchestration. + * + * @return the name of the orchestration + */ + public String getName() { + return this.name; + } + + /** + * Gets the unique ID of the orchestration instance. + * + * @return the unique ID of the orchestration instance + */ + public String getInstanceId() { + return this.instanceId; + } + + /** + * Gets the current runtime status of the orchestration instance at the time this object was fetched. + * + * @return the current runtime status of the orchestration instance at the time this object was fetched + */ + public OrchestrationRuntimeStatus getRuntimeStatus() { + return this.runtimeStatus; + } + + /** + * Gets the orchestration instance's creation time in UTC. + * + * @return the orchestration instance's creation time in UTC + */ + public Instant getCreatedAt() { + return this.createdAt; + } + + /** + * Gets the orchestration instance's last updated time in UTC. + * + * @return the orchestration instance's last updated time in UTC + */ + public Instant getLastUpdatedAt() { + return this.lastUpdatedAt; + } + + /** + * Gets the orchestration instance's serialized input, if any, as a string value. + * + * @return the orchestration instance's serialized input or {@code null} + */ + public String getSerializedInput() { + return this.serializedInput; + } + + /** + * Gets the orchestration instance's serialized output, if any, as a string value. + * + * @return the orchestration instance's serialized output or {@code null} + */ + public String getSerializedOutput() { + return this.serializedOutput; + } + + /** + * Gets the failure details, if any, for the failed orchestration instance. + * + *

This method returns data only if the orchestration is in the {@link OrchestrationRuntimeStatus#FAILED} state, + * and only if this instance metadata was fetched with the option to include output data.

+ * + * @return the failure details of the failed orchestration instance or {@code null} + */ + public FailureDetails getFailureDetails() { + return this.failureDetails; + } + + /** + * Gets a value indicating whether the orchestration instance was running at the time this object was fetched. + * + * @return {@code true} if the orchestration existed and was in a running state; otherwise {@code false} + */ + public boolean isRunning() { + return isInstanceFound() && this.runtimeStatus == OrchestrationRuntimeStatus.RUNNING; + } + + /** + * Gets a value indicating whether the orchestration instance was completed at the time this object was fetched. + * + *

An orchestration instance is considered completed when its runtime status value is + * {@link OrchestrationRuntimeStatus#COMPLETED}, {@link OrchestrationRuntimeStatus#FAILED}, or + * {@link OrchestrationRuntimeStatus#TERMINATED}.

+ * + * @return {@code true} if the orchestration was in a terminal state; otherwise {@code false} + */ + public boolean isCompleted() { + return + this.runtimeStatus == OrchestrationRuntimeStatus.COMPLETED + || this.runtimeStatus == OrchestrationRuntimeStatus.FAILED + || this.runtimeStatus == OrchestrationRuntimeStatus.TERMINATED; + } + + /** + * Deserializes the orchestration's input into an object of the specified type. + * + *

Deserialization is performed using the {@link DataConverter} that was configured on + * the {@link DurableTaskClient} object that created this orchestration metadata object.

+ * + * @param type the class associated with the type to deserialize the input data into + * @param the type to deserialize the input data into + * @return the deserialized input value + * @throws IllegalStateException if the metadata was fetched without the option to read inputs and outputs + */ + public T readInputAs(Class type) { + return this.readPayloadAs(type, this.serializedInput); + } + + /** + * Deserializes the orchestration's output into an object of the specified type. + * + *

Deserialization is performed using the {@link DataConverter} that was configured on + * the {@link DurableTaskClient} object that created this orchestration metadata object.

+ * + * @param type the class associated with the type to deserialize the output data into + * @param the type to deserialize the output data into + * @return the deserialized input value + * @throws IllegalStateException if the metadata was fetched without the option to read inputs and outputs + */ + public T readOutputAs(Class type) { + return this.readPayloadAs(type, this.serializedOutput); + } + + /** + * Deserializes the orchestration's custom status into an object of the specified type. + * + *

Deserialization is performed using the {@link DataConverter} that was configured on + * the {@link DurableTaskClient} object that created this orchestration metadata object.

+ * + * @param type the class associated with the type to deserialize the custom status data into + * @param the type to deserialize the custom status data into + * @return the deserialized input value + * @throws IllegalStateException if the metadata was fetched without the option to read inputs and outputs + */ + public T readCustomStatusAs(Class type) { + return this.readPayloadAs(type, this.serializedCustomStatus); + } + + /** + * Returns {@code true} if the orchestration has a non-empty custom status value; otherwise {@code false}. + * + *

This method will always return {@code false} if the metadata was fetched without the option to read inputs and + * outputs.

+ * + * @return {@code true} if the orchestration has a non-empty custom status value; otherwise {@code false} + */ + public boolean isCustomStatusFetched() { + return this.serializedCustomStatus != null && !this.serializedCustomStatus.isEmpty(); + } + + private T readPayloadAs(Class type, String payload) { + if (!this.requestedInputsAndOutputs) { + throw new IllegalStateException("This method can only be used when instance metadata is fetched with the option " + + "to include input and output data."); + } + + // Note that the Java gRPC implementation converts null protobuf strings into empty Java strings + if (payload == null || payload.isEmpty()) { + return null; + } + + return this.dataConverter.deserialize(payload, type); + } + + /** + * Generates a user-friendly string representation of the current metadata object. + * + * @return a user-friendly string representation of the current metadata object + */ + @Override + public String toString() { + String baseString = String.format( + "[Name: '%s', ID: '%s', RuntimeStatus: %s, CreatedAt: %s, LastUpdatedAt: %s", + this.name, + this.instanceId, + this.runtimeStatus, + this.createdAt, + this.lastUpdatedAt); + StringBuilder sb = new StringBuilder(baseString); + if (this.serializedInput != null) { + sb.append(", Input: '").append(getTrimmedPayload(this.serializedInput)).append('\''); + } + + if (this.serializedOutput != null) { + sb.append(", Output: '").append(getTrimmedPayload(this.serializedOutput)).append('\''); + } + + return sb.append(']').toString(); + } + + private static String getTrimmedPayload(String payload) { + int maxLength = 50; + if (payload.length() > maxLength) { + return payload.substring(0, maxLength) + "..."; + } + + return payload; + } + + /** + * Returns {@code true} if an orchestration instance with this ID was found; otherwise {@code false}. + * + * @return {@code true} if an orchestration instance with this ID was found; otherwise {@code false} + */ + public boolean isInstanceFound() { + return !(isNullOrEmpty(this.name) && isNullOrEmpty(this.instanceId)); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRunner.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRunner.java new file mode 100644 index 000000000..22b215460 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRunner.java @@ -0,0 +1,169 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.StringValue; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; + +import java.time.Duration; +import java.util.Base64; +import java.util.HashMap; +import java.util.logging.Logger; + +/** + * Helper class for invoking orchestrations directly, without constructing a {@link DurableTaskGrpcWorker} object. + * + *

This static class can be used to execute orchestration logic directly. In order to use it for this purpose, the + * caller must provide orchestration state as serialized protobuf bytes.

+ */ +public final class OrchestrationRunner { + private static final Logger logger = Logger.getLogger(OrchestrationRunner.class.getPackage().getName()); + private static final Duration DEFAULT_MAXIMUM_TIMER_INTERVAL = Duration.ofDays(3); + + private OrchestrationRunner() { + } + + /** + * Loads orchestration history from {@code base64EncodedOrchestratorRequest} and uses it to execute the + * orchestrator function code pointed to by {@code orchestratorFunc}. + * + * @param base64EncodedOrchestratorRequest the base64-encoded protobuf payload representing an orchestrator execution + * request + * @param orchestratorFunc a function that implements the orchestrator logic + * @param the type of the orchestrator function output, which must be serializable + * to JSON + * @return a base64-encoded protobuf payload of orchestrator actions to be interpreted by the external + * orchestration engine + * @throws IllegalArgumentException if either parameter is {@code null} or + * if {@code base64EncodedOrchestratorRequest} is not valid base64-encoded protobuf + */ + public static String loadAndRun( + String base64EncodedOrchestratorRequest, + OrchestratorFunction orchestratorFunc) { + // Example string: CiBhOTMyYjdiYWM5MmI0MDM5YjRkMTYxMDIwNzlmYTM1YSIaCP///////////wESCwi254qRBhDk+rgocgAicgj////// + // ///8BEgwIs+eKkQYQzMXjnQMaVwoLSGVsbG9DaXRpZXMSACJGCiBhOTMyYjdiYWM5MmI0MDM5YjRkMTYxMDIwNzlmYTM1YRIiCiA3ODEwOTA + // 2N2Q4Y2Q0ODg1YWU4NjQ0OTNlMmRlMGQ3OA== + byte[] decodedBytes = Base64.getDecoder().decode(base64EncodedOrchestratorRequest); + byte[] resultBytes = loadAndRun(decodedBytes, orchestratorFunc); + return Base64.getEncoder().encodeToString(resultBytes); + } + + /** + * Loads orchestration history from {@code orchestratorRequestBytes} and uses it to execute the + * orchestrator function code pointed to by {@code orchestratorFunc}. + * + * @param orchestratorRequestBytes the protobuf payload representing an orchestrator execution request + * @param orchestratorFunc a function that implements the orchestrator logic + * @param the type of the orchestrator function output, which must be serializable to JSON + * @return a protobuf-encoded payload of orchestrator actions to be interpreted by the external orchestration engine + * @throws IllegalArgumentException if either parameter is {@code null} or if {@code orchestratorRequestBytes} is + * not valid protobuf + */ + public static byte[] loadAndRun( + byte[] orchestratorRequestBytes, + OrchestratorFunction orchestratorFunc) { + if (orchestratorFunc == null) { + throw new IllegalArgumentException("orchestratorFunc must not be null"); + } + + // Wrap the provided lambda in an anonymous TaskOrchestration + TaskOrchestration orchestration = ctx -> { + R output = orchestratorFunc.apply(ctx); + ctx.complete(output); + }; + + return loadAndRun(orchestratorRequestBytes, orchestration); + } + + /** + * Loads orchestration history from {@code base64EncodedOrchestratorRequest} and uses it to execute the + * {@code orchestration}. + * + * @param base64EncodedOrchestratorRequest the base64-encoded protobuf payload representing an orchestrator + * execution request + * @param orchestration the orchestration to execute + * @return a base64-encoded protobuf payload of orchestrator actions to be interpreted by the external + * orchestration engine + * @throws IllegalArgumentException if either parameter is {@code null} or + * if {@code base64EncodedOrchestratorRequest} is not valid base64-encoded protobuf + */ + public static String loadAndRun( + String base64EncodedOrchestratorRequest, + TaskOrchestration orchestration) { + byte[] decodedBytes = Base64.getDecoder().decode(base64EncodedOrchestratorRequest); + byte[] resultBytes = loadAndRun(decodedBytes, orchestration); + return Base64.getEncoder().encodeToString(resultBytes); + } + + /** + * Loads orchestration history from {@code orchestratorRequestBytes} and uses it to execute the + * {@code orchestration}. + * + * @param orchestratorRequestBytes the protobuf payload representing an orchestrator execution request + * @param orchestration the orchestration to execute + * @return a protobuf-encoded payload of orchestrator actions to be interpreted by the external orchestration engine + * @throws IllegalArgumentException if either parameter is {@code null} or if {@code orchestratorRequestBytes} + * is not valid protobuf + */ + public static byte[] loadAndRun(byte[] orchestratorRequestBytes, TaskOrchestration orchestration) { + if (orchestratorRequestBytes == null || orchestratorRequestBytes.length == 0) { + throw new IllegalArgumentException("triggerStateProtoBytes must not be null or empty"); + } + + if (orchestration == null) { + throw new IllegalArgumentException("orchestration must not be null"); + } + + OrchestratorService.OrchestratorRequest orchestratorRequest; + try { + orchestratorRequest = OrchestratorService.OrchestratorRequest.parseFrom(orchestratorRequestBytes); + } catch (InvalidProtocolBufferException e) { + throw new IllegalArgumentException("triggerStateProtoBytes was not valid protobuf", e); + } + + // Register the passed orchestration as the default ("*") orchestration + HashMap orchestrationFactories = new HashMap<>(); + orchestrationFactories.put("*", new TaskOrchestrationFactory() { + @Override + public String getName() { + return "*"; + } + + @Override + public TaskOrchestration create() { + return orchestration; + } + }); + + TaskOrchestrationExecutor taskOrchestrationExecutor = new TaskOrchestrationExecutor( + orchestrationFactories, + new JacksonDataConverter(), + DEFAULT_MAXIMUM_TIMER_INTERVAL, + logger, + null); // No app ID for static runner + + // TODO: Error handling + TaskOrchestratorResult taskOrchestratorResult = taskOrchestrationExecutor.execute( + orchestratorRequest.getPastEventsList(), + orchestratorRequest.getNewEventsList()); + + OrchestratorService.OrchestratorResponse response = OrchestratorService.OrchestratorResponse.newBuilder() + .setInstanceId(orchestratorRequest.getInstanceId()) + .addAllActions(taskOrchestratorResult.getActions()) + .setCustomStatus(StringValue.of(taskOrchestratorResult.getCustomStatus())) + .build(); + return response.toByteArray(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRuntimeStatus.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRuntimeStatus.java new file mode 100644 index 000000000..1bdd33ab3 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRuntimeStatus.java @@ -0,0 +1,118 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; + +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_CANCELED; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_COMPLETED; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_CONTINUED_AS_NEW; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_FAILED; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_PENDING; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_RUNNING; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_SUSPENDED; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_TERMINATED; + +/** + * Enum describing the runtime status of the orchestration. + */ +public enum OrchestrationRuntimeStatus { + /** + * The orchestration started running. + */ + RUNNING, + + /** + * The orchestration completed normally. + */ + COMPLETED, + + /** + * The orchestration is transitioning into a new instance. + * This status value is obsolete and exists only for compatibility reasons. + */ + CONTINUED_AS_NEW, + + /** + * The orchestration completed with an unhandled exception. + */ + FAILED, + + /** + * The orchestration canceled gracefully. + * The Canceled status is not currently used and exists only for compatibility reasons. + */ + CANCELED, + + /** + * The orchestration was abruptly terminated via a management API call. + */ + TERMINATED, + + /** + * The orchestration was scheduled but hasn't started running. + */ + PENDING, + + /** + * The orchestration is in a suspended state. + */ + SUSPENDED; + + static OrchestrationRuntimeStatus fromProtobuf(OrchestratorService.OrchestrationStatus status) { + switch (status) { + case ORCHESTRATION_STATUS_RUNNING: + return RUNNING; + case ORCHESTRATION_STATUS_COMPLETED: + return COMPLETED; + case ORCHESTRATION_STATUS_CONTINUED_AS_NEW: + return CONTINUED_AS_NEW; + case ORCHESTRATION_STATUS_FAILED: + return FAILED; + case ORCHESTRATION_STATUS_CANCELED: + return CANCELED; + case ORCHESTRATION_STATUS_TERMINATED: + return TERMINATED; + case ORCHESTRATION_STATUS_PENDING: + return PENDING; + case ORCHESTRATION_STATUS_SUSPENDED: + return SUSPENDED; + default: + throw new IllegalArgumentException(String.format("Unknown status value: %s", status)); + } + } + + static OrchestratorService.OrchestrationStatus toProtobuf(OrchestrationRuntimeStatus status) { + switch (status) { + case RUNNING: + return ORCHESTRATION_STATUS_RUNNING; + case COMPLETED: + return ORCHESTRATION_STATUS_COMPLETED; + case CONTINUED_AS_NEW: + return ORCHESTRATION_STATUS_CONTINUED_AS_NEW; + case FAILED: + return ORCHESTRATION_STATUS_FAILED; + case CANCELED: + return ORCHESTRATION_STATUS_CANCELED; + case TERMINATED: + return ORCHESTRATION_STATUS_TERMINATED; + case PENDING: + return ORCHESTRATION_STATUS_PENDING; + case SUSPENDED: + return ORCHESTRATION_STATUS_SUSPENDED; + default: + throw new IllegalArgumentException(String.format("Unknown status value: %s", status)); + } + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQuery.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQuery.java new file mode 100644 index 000000000..864fc37c8 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQuery.java @@ -0,0 +1,217 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +/** + * Class used for constructing orchestration metadata queries. + */ +public final class OrchestrationStatusQuery { + private List runtimeStatusList = new ArrayList<>(); + private Instant createdTimeFrom; + private Instant createdTimeTo; + private List taskHubNames = new ArrayList<>(); + private int maxInstanceCount = 100; + private String continuationToken; + private String instanceIdPrefix; + private boolean fetchInputsAndOutputs; + + /** + * Sole constructor. + */ + public OrchestrationStatusQuery() { + } + + /** + * Sets the list of runtime status values to use as a filter. Only orchestration instances that have a matching + * runtime status will be returned. The default {@code null} value will disable runtime status filtering. + * + * @param runtimeStatusList the list of runtime status values to use as a filter + * @return this query object + */ + public OrchestrationStatusQuery setRuntimeStatusList(@Nullable List runtimeStatusList) { + this.runtimeStatusList = runtimeStatusList; + return this; + } + + /** + * Include orchestration instances that were created after the specified instant. + * + * @param createdTimeFrom the minimum orchestration creation time to use as a filter or {@code null} to disable this + * filter + * @return this query object + */ + public OrchestrationStatusQuery setCreatedTimeFrom(@Nullable Instant createdTimeFrom) { + this.createdTimeFrom = createdTimeFrom; + return this; + } + + /** + * Include orchestration instances that were created before the specified instant. + * + * @param createdTimeTo the maximum orchestration creation time to use as a filter or {@code null} to disable this + * filter + * @return this query object + */ + public OrchestrationStatusQuery setCreatedTimeTo(@Nullable Instant createdTimeTo) { + this.createdTimeTo = createdTimeTo; + return this; + } + + /** + * Sets the maximum number of records that can be returned by the query. The default value is 100. + * + *

Requests may return fewer records than the specified page size, even if there are more records. + * Always check the continuation token to determine whether there are more records.

+ * + * @param maxInstanceCount the maximum number of orchestration metadata records to return + * @return this query object + */ + public OrchestrationStatusQuery setMaxInstanceCount(int maxInstanceCount) { + this.maxInstanceCount = maxInstanceCount; + return this; + } + + /** + * Include orchestration metadata records that have a matching task hub name. + * + * @param taskHubNames the task hub name to match or {@code null} to disable this filter + * @return this query object + */ + public OrchestrationStatusQuery setTaskHubNames(@Nullable List taskHubNames) { + this.taskHubNames = taskHubNames; + return this; + } + + /** + * Sets the continuation token used to continue paging through orchestration metadata results. + * + *

This should always be the continuation token value from the previous query's + * {@link OrchestrationStatusQueryResult} result.

+ * + * @param continuationToken the continuation token from the previous query + * @return this query object + */ + public OrchestrationStatusQuery setContinuationToken(@Nullable String continuationToken) { + this.continuationToken = continuationToken; + return this; + } + + /** + * Include orchestration metadata records with the specified instance ID prefix. + * + *

For example, if there are three orchestration instances in the metadata store with IDs "Foo", "Bar", and "Baz", + * specifying a prefix value of "B" will exclude "Foo" since its ID doesn't start with "B".

+ * + * @param instanceIdPrefix the instance ID prefix filter value + * @return this query object + */ + public OrchestrationStatusQuery setInstanceIdPrefix(@Nullable String instanceIdPrefix) { + this.instanceIdPrefix = instanceIdPrefix; + return this; + } + + /** + * Sets whether to fetch orchestration inputs, outputs, and custom status values. The default value is {@code false}. + * + * @param fetchInputsAndOutputs {@code true} to fetch orchestration inputs, outputs, and custom status values, + * otherwise {@code false} + * @return this query object + */ + public OrchestrationStatusQuery setFetchInputsAndOutputs(boolean fetchInputsAndOutputs) { + this.fetchInputsAndOutputs = fetchInputsAndOutputs; + return this; + } + + /** + * Gets the configured runtime status filter or {@code null} if none was configured. + * + * @return the configured runtime status filter as a list of values or {@code null} if none was configured + */ + public List getRuntimeStatusList() { + return runtimeStatusList; + } + + /** + * Gets the configured minimum orchestration creation time or {@code null} if none was configured. + * + * @return the configured minimum orchestration creation time or {@code null} if none was configured + */ + @Nullable + public Instant getCreatedTimeFrom() { + return createdTimeFrom; + } + + /** + * Gets the configured maximum orchestration creation time or {@code null} if none was configured. + * + * @return the configured maximum orchestration creation time or {@code null} if none was configured + */ + @Nullable + public Instant getCreatedTimeTo() { + return createdTimeTo; + } + + /** + * Gets the configured maximum number of records that can be returned by the query. + * + * @return the configured maximum number of records that can be returned by the query + */ + public int getMaxInstanceCount() { + return maxInstanceCount; + } + + /** + * Gets the configured task hub names to match or {@code null} if none were configured. + * + * @return the configured task hub names to match or {@code null} if none were configured + */ + public List getTaskHubNames() { + return taskHubNames; + } + + /** + * Gets the configured continuation token value or {@code null} if none was configured. + * + * @return the configured continuation token value or {@code null} if none was configured + */ + @Nullable + public String getContinuationToken() { + return continuationToken; + } + + /** + * Gets the configured instance ID prefix filter value or {@code null} if none was configured. + * + * @return the configured instance ID prefix filter value or {@code null} if none was configured. + */ + @Nullable + public String getInstanceIdPrefix() { + return instanceIdPrefix; + } + + /** + * Gets the configured value that determines whether to fetch orchestration inputs, outputs, and custom status values. + * + * @return the configured value that determines whether to fetch orchestration inputs, outputs, and custom + * status values + */ + public boolean isFetchInputsAndOutputs() { + return fetchInputsAndOutputs; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQueryResult.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQueryResult.java new file mode 100644 index 000000000..efb4908c1 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQueryResult.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.util.List; + +/** + * Class representing the results of a filtered orchestration metadata query. + * + *

Orchestration metadata can be queried with filters using the {@link DurableTaskClient#queryInstances} method.

+ */ +public final class OrchestrationStatusQueryResult { + private final List orchestrationStates; + private final String continuationToken; + + OrchestrationStatusQueryResult(List orchestrationStates, @Nullable String continuationToken) { + this.orchestrationStates = orchestrationStates; + this.continuationToken = continuationToken; + } + + /** + * Gets the list of orchestration metadata records that matched the {@link DurableTaskClient#queryInstances} query. + * + * @return the list of orchestration metadata records that matched the {@link DurableTaskClient#queryInstances} query. + */ + public List getOrchestrationState() { + return this.orchestrationStates; + } + + /** + * Gets the continuation token to use with the next query or {@code null} if no more metadata records are found. + * + *

Note that a non-null value does not always mean that there are more metadata records that can be returned by a + * query.

+ * + * @return the continuation token to use with the next query or {@code null} if no more metadata records are found. + */ + public String getContinuationToken() { + return this.continuationToken; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestratorFunction.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestratorFunction.java new file mode 100644 index 000000000..a4d2f2f08 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestratorFunction.java @@ -0,0 +1,38 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Functional interface for inline orchestrator functions. + * + *

See the description of {@link TaskOrchestration} for more information about how to correctly + * implement orchestrators.

+ * + * @param the type of the result returned by the function + */ +@FunctionalInterface +public interface OrchestratorFunction { + /** + * Executes an orchestrator function and returns a result to use as the orchestration output. + * + *

This functional interface is designed to support implementing orchestrators as lambda functions. It's intended + * to be very similar to {@link java.util.function.Function}, but with a signature that's specific to + * orchestrators.

+ * + * @param ctx the orchestration context, which provides access to additional context for the current orchestration + * execution + * @return the serializable output of the orchestrator function + */ + R apply(TaskOrchestrationContext ctx); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/PurgeInstanceCriteria.java b/durabletask-client/src/main/java/io/dapr/durabletask/PurgeInstanceCriteria.java new file mode 100644 index 000000000..50260c1fc --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/PurgeInstanceCriteria.java @@ -0,0 +1,125 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +/** + * Class used for constructing orchestration instance purge selection criteria. + */ +public final class PurgeInstanceCriteria { + + private Instant createdTimeFrom; + private Instant createdTimeTo; + private List runtimeStatusList = new ArrayList<>(); + private Duration timeout; + + /** + * Creates a new, default instance of the {@code PurgeInstanceCriteria} class. + */ + public PurgeInstanceCriteria() { + } + + /** + * Purge orchestration instances that were created after the specified instant. + * + * @param createdTimeFrom the minimum orchestration creation time to use as a selection criteria or {@code null} to + * disable this selection criteria + * @return this criteria object + */ + public PurgeInstanceCriteria setCreatedTimeFrom(Instant createdTimeFrom) { + this.createdTimeFrom = createdTimeFrom; + return this; + } + + /** + * Purge orchestration instances that were created before the specified instant. + * + * @param createdTimeTo the maximum orchestration creation time to use as a selection criteria or {@code null} to + * disable this selection criteria + * @return this criteria object + */ + public PurgeInstanceCriteria setCreatedTimeTo(Instant createdTimeTo) { + this.createdTimeTo = createdTimeTo; + return this; + } + + /** + * Sets the list of runtime status values to use as a selection criteria. Only orchestration instances that have a + * matching runtime status will be purged. An empty list is the same as selecting for all runtime status values. + * + * @param runtimeStatusList the list of runtime status values to use as a selection criteria + * @return this criteria object + */ + public PurgeInstanceCriteria setRuntimeStatusList(List runtimeStatusList) { + this.runtimeStatusList = runtimeStatusList; + return this; + } + + /** + * Sets a timeout duration for the purge operation. Setting to {@code null} will reset the timeout + * to be the default value. + * + * @param timeout the amount of time to wait for the purge instance operation to complete + * @return this criteria object + */ + public PurgeInstanceCriteria setTimeout(Duration timeout) { + this.timeout = timeout; + return this; + } + + /** + * Gets the configured minimum orchestration creation time or {@code null} if none was configured. + * + * @return the configured minimum orchestration creation time or {@code null} if none was configured + */ + @Nullable + public Instant getCreatedTimeFrom() { + return this.createdTimeFrom; + } + + /** + * Gets the configured maximum orchestration creation time or {@code null} if none was configured. + * + * @return the configured maximum orchestration creation time or {@code null} if none was configured + */ + @Nullable + public Instant getCreatedTimeTo() { + return this.createdTimeTo; + } + + /** + * Gets the configured runtime status selection criteria. + * + * @return the configured runtime status filter as a list of values + */ + public List getRuntimeStatusList() { + return this.runtimeStatusList; + } + + /** + * Gets the configured timeout duration or {@code null} if none was configured. + * + * @return the configured timeout + */ + @Nullable + public Duration getTimeout() { + return this.timeout; + } + +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/PurgeResult.java b/durabletask-client/src/main/java/io/dapr/durabletask/PurgeResult.java new file mode 100644 index 000000000..8d3521866 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/PurgeResult.java @@ -0,0 +1,37 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Class representing the results of an orchestration state purge operation. + * + *

Orchestration state can be purged using any of the {@link DurableTaskClient#purgeInstances} method overloads.

+ */ +public final class PurgeResult { + + private final int deletedInstanceCount; + + PurgeResult(int deletedInstanceCount) { + this.deletedInstanceCount = deletedInstanceCount; + } + + /** + * Gets the number of purged orchestration instances. + * + * @return the number of purged orchestration instances + */ + public int getDeletedInstanceCount() { + return this.deletedInstanceCount; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/RetryContext.java b/durabletask-client/src/main/java/io/dapr/durabletask/RetryContext.java new file mode 100644 index 000000000..620e02c7d --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/RetryContext.java @@ -0,0 +1,79 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import java.time.Duration; + +/** + * Context data that's provided to {@link RetryHandler} implementations. + */ +public final class RetryContext { + private final TaskOrchestrationContext orchestrationContext; + private final int lastAttemptNumber; + private final FailureDetails lastFailure; + private final Duration totalRetryTime; + + RetryContext( + TaskOrchestrationContext orchestrationContext, + int lastAttemptNumber, + FailureDetails lastFailure, + Duration totalRetryTime) { + this.orchestrationContext = orchestrationContext; + this.lastAttemptNumber = lastAttemptNumber; + this.lastFailure = lastFailure; + this.totalRetryTime = totalRetryTime; + } + + /** + * Gets the context of the current orchestration. + * + *

The orchestration context can be used in retry handlers to schedule timers (via the + * {@link TaskOrchestrationContext#createTimer} methods) for implementing delays between retries. It can also be + * used to implement time-based retry logic by using the {@link TaskOrchestrationContext#getCurrentInstant} method. + *

+ * + * @return the context of the parent orchestration + */ + public TaskOrchestrationContext getOrchestrationContext() { + return this.orchestrationContext; + } + + /** + * Gets the details of the previous task failure, including the exception type, message, and callstack. + * + * @return the details of the previous task failure + */ + public FailureDetails getLastFailure() { + return this.lastFailure; + } + + /** + * Gets the previous retry attempt number. This number starts at 1 and increments each time the retry handler + * is invoked for a particular task failure. + * + * @return the previous retry attempt number + */ + public int getLastAttemptNumber() { + return this.lastAttemptNumber; + } + + /** + * Gets the total amount of time spent in a retry loop for the current task. + * + * @return the total amount of time spent in a retry loop for the current task + */ + public Duration getTotalRetryTime() { + return this.totalRetryTime; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/RetryHandler.java b/durabletask-client/src/main/java/io/dapr/durabletask/RetryHandler.java new file mode 100644 index 000000000..ad246a0c6 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/RetryHandler.java @@ -0,0 +1,31 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Functional interface for implementing custom task retry handlers. + * + *

It's important to remember that retry handler code is an extension of the orchestrator code and must + * therefore comply with all the determinism requirements of orchestrator code.

+ */ +@FunctionalInterface +public interface RetryHandler { + /** + * Invokes the retry handler logic and returns a value indicating whether to continue retrying. + * + * @param context retry context that's updated between each retry attempt + * @return {@code true} to continue retrying or {@code false} to stop retrying. + */ + boolean handle(RetryContext context); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/RetryPolicy.java b/durabletask-client/src/main/java/io/dapr/durabletask/RetryPolicy.java new file mode 100644 index 000000000..9efd912b1 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/RetryPolicy.java @@ -0,0 +1,176 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.util.Objects; + +/** + * A declarative retry policy that can be configured for activity or sub-orchestration calls. + */ +public final class RetryPolicy { + + private int maxNumberOfAttempts; + private Duration firstRetryInterval; + private double backoffCoefficient = 1.0; + private Duration maxRetryInterval = Duration.ZERO; + private Duration retryTimeout = Duration.ZERO; + + /** + * Creates a new {@code RetryPolicy} object. + * + * @param maxNumberOfAttempts the maximum number of task invocation attempts; must be 1 or greater + * @param firstRetryInterval the amount of time to delay between the first and second attempt + * @throws IllegalArgumentException if {@code maxNumberOfAttempts} is zero or negative + */ + public RetryPolicy(int maxNumberOfAttempts, Duration firstRetryInterval) { + this.setMaxNumberOfAttempts(maxNumberOfAttempts); + this.setFirstRetryInterval(firstRetryInterval); + } + + /** + * Sets the maximum number of task invocation attempts; must be 1 or greater. + * + *

This value represents the number of times to attempt to execute the task. It does not represent + * the maximum number of times to retry the task. This is why the number must be 1 or greater.

+ * + * @param maxNumberOfAttempts the maximum number of attempts; must be 1 or greater + * @return this retry policy object + * @throws IllegalArgumentException if {@code maxNumberOfAttempts} is zero or negative + */ + public RetryPolicy setMaxNumberOfAttempts(int maxNumberOfAttempts) { + if (maxNumberOfAttempts <= 0) { + throw new IllegalArgumentException("The value for maxNumberOfAttempts must be greater than zero."); + } + this.maxNumberOfAttempts = maxNumberOfAttempts; + return this; + } + + /** + * Sets the amount of time to delay between the first and second attempt. + * + * @param firstRetryInterval the amount of time to delay between the first and second attempt + * @return this retry policy object + * @throws IllegalArgumentException if {@code firstRetryInterval} is {@code null}, zero, or negative. + */ + public RetryPolicy setFirstRetryInterval(Duration firstRetryInterval) { + if (firstRetryInterval == null) { + throw new IllegalArgumentException("firstRetryInterval cannot be null."); + } + if (firstRetryInterval.isZero() || firstRetryInterval.isNegative()) { + throw new IllegalArgumentException("The value for firstRetryInterval must be greater than zero."); + } + this.firstRetryInterval = firstRetryInterval; + return this; + } + + /** + * Sets the exponential backoff coefficient used to determine the delay between subsequent retries. + * Must be 1.0 or greater. + * + *

To avoid extremely long delays between retries, consider also specifying a maximum retry interval using the + * {@link #setMaxRetryInterval} method.

+ * + * @param backoffCoefficient the exponential backoff coefficient + * @return this retry policy object + * @throws IllegalArgumentException if {@code backoffCoefficient} is less than 1.0 + */ + public RetryPolicy setBackoffCoefficient(double backoffCoefficient) { + if (backoffCoefficient < 1.0) { + throw new IllegalArgumentException("The value for backoffCoefficient must be greater or equal to 1.0."); + } + this.backoffCoefficient = backoffCoefficient; + return this; + } + + /** + * Sets the maximum time to delay between attempts. + * + *

It's recommended to set a maximum retry interval whenever using a backoff coefficient that's greater than the + * default of 1.0.

+ * + * @param maxRetryInterval the maximum time to delay between attempts or {@code null} to remove the maximum retry + * interval + * @return this retry policy object + */ + public RetryPolicy setMaxRetryInterval(@Nullable Duration maxRetryInterval) { + if (maxRetryInterval != null && maxRetryInterval.compareTo(this.firstRetryInterval) < 0) { + throw new IllegalArgumentException("The value for maxRetryInterval must be greater than or equal to the value " + + "for firstRetryInterval."); + } + this.maxRetryInterval = maxRetryInterval; + return this; + } + + /** + * Sets the overall timeout for retries, regardless of the retry count. + * + * @param retryTimeout the overall timeout for retries + * @return this retry policy object + */ + public RetryPolicy setRetryTimeout(Duration retryTimeout) { + if (retryTimeout == null || retryTimeout.compareTo(this.firstRetryInterval) < 0) { + throw new IllegalArgumentException("The value for retryTimeout cannot be null and must be greater than or equal " + + "to the value for firstRetryInterval."); + } + this.retryTimeout = retryTimeout; + return this; + } + + /** + * Gets the configured maximum number of task invocation attempts. + * + * @return the configured maximum number of task invocation attempts. + */ + public int getMaxNumberOfAttempts() { + return this.maxNumberOfAttempts; + } + + /** + * Gets the configured amount of time to delay between the first and second attempt. + * + * @return the configured amount of time to delay between the first and second attempt + */ + public Duration getFirstRetryInterval() { + return this.firstRetryInterval; + } + + /** + * Gets the configured exponential backoff coefficient used to determine the delay between subsequent retries. + * + * @return the configured exponential backoff coefficient used to determine the delay between subsequent retries + */ + public double getBackoffCoefficient() { + return this.backoffCoefficient; + } + + /** + * Gets the configured maximum time to delay between attempts. + * + * @return the configured maximum time to delay between attempts + */ + public Duration getMaxRetryInterval() { + return this.maxRetryInterval; + } + + /** + * Gets the configured overall timeout for retries. + * + * @return the configured overall timeout for retries + */ + public Duration getRetryTimeout() { + return this.retryTimeout; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/Task.java b/durabletask-client/src/main/java/io/dapr/durabletask/Task.java new file mode 100644 index 000000000..a3f331381 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/Task.java @@ -0,0 +1,91 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.dapr.durabletask.interruption.OrchestratorBlockedException; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * Represents an asynchronous operation in a durable orchestration. + * + *

{@code Task} instances are created by methods on the {@link TaskOrchestrationContext} class, which is available + * in {@link TaskOrchestration} implementations. For example, scheduling an activity will return a task.

+ *
+ * Task{@literal <}int{@literal >} activityTask = ctx.callActivity("MyActivity", someInput, int.class);
+ * 
+ *

Orchestrator code uses the {@link #await()} method to block on the completion of the task and retrieve the result. + * If the task is not yet complete, the {@code await()} method will throw an {@link OrchestratorBlockedException}, which + * pauses the orchestrator's execution so that it can save its progress into durable storage and schedule any + * outstanding work. When the task is complete, the orchestrator will run again from the beginning and the next time + * the task's {@code await()} method is called, the result will be returned, or a {@link TaskFailedException} will be + * thrown if the result of the task was an unhandled exception.

+ *

Note that orchestrator code must never catch {@code OrchestratorBlockedException} because doing so can cause the + * orchestration instance to get permanently stuck.

+ * + * @param the return type of the task + */ +public abstract class Task { + final CompletableFuture future; + + Task(CompletableFuture future) { + this.future = future; + } + + /** + * Returns {@code true} if completed in any fashion: normally, with an exception, or via cancellation. + * + * @return {@code true} if completed, otherwise {@code false} + */ + public boolean isDone() { + return this.future.isDone(); + } + + /** + * Returns {@code true} if the task was cancelled. + * + * @return {@code true} if the task was cancelled, otherwise {@code false} + */ + public boolean isCancelled() { + return this.future.isCancelled(); + } + + /** + * Blocks the orchestrator until this task to complete, and then returns its result. + * + * @return the result of the task + */ + public abstract V await(); + + /** + * Returns a new {@link Task} that, when this Task completes normally, + * is executed with this Task's result as the argument to the supplied function. + * + * @param fn the function to use to compute the value of the returned Task + * @param the function's return type + * @return the new Task + */ + public abstract Task thenApply(Function fn); + + /** + * Returns a new {@link Task} that, when this Task completes normally, + * is executed with this Task's result as the argument to the supplied action. + * + * @param fn the function to use to compute the value of the returned Task + * @return the new Task + */ + public abstract Task thenAccept(Consumer fn); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivity.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivity.java new file mode 100644 index 000000000..27e4291e9 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivity.java @@ -0,0 +1,45 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Common interface for task activity implementations. + * + *

Activities are the basic unit of work in a durable task orchestration. Activities are the tasks that are + * orchestrated in the business process. For example, you might create an orchestrator to process an order. The tasks + * ay involve checking the inventory, charging the customer, and creating a shipment. Each task would be a separate + * activity. These activities may be executed serially, in parallel, or some combination of both.

+ * + *

Unlike task orchestrators, activities aren't restricted in the type of work you can do in them. Activity functions + * are frequently used to make network calls or run CPU intensive operations. An activity can also return data back to + * the orchestrator function. The Durable Task runtime guarantees that each called activity function will be executed + * at least once during an orchestration's execution.

+ * + *

Because activities only guarantee at least once execution, it's recommended that activity logic be implemented as + * idempotent whenever possible.

+ * + *

Activities are scheduled by orchestrators using one of the {@link TaskOrchestrationContext#callActivity} method + * overloads.

+ */ +@FunctionalInterface +public interface TaskActivity { + /** + * Executes the activity logic and returns a value which will be serialized and returned to the calling orchestrator. + * + * @param ctx provides information about the current activity execution, like the activity's name and the input + * data provided to it by the orchestrator. + * @return any serializable value to be returned to the calling orchestrator. + */ + Object run(TaskActivityContext ctx); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityContext.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityContext.java new file mode 100644 index 000000000..b2043b51e --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityContext.java @@ -0,0 +1,51 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Interface that provides {@link TaskActivity} implementations with activity context, such as an activity's name and + * its input. + */ +public interface TaskActivityContext { + /** + * Gets the name of the current task activity. + * + * @return the name of the current task activity + */ + String getName(); + + /** + * Gets the deserialized activity input. + * + * @param targetType the {@link Class} object associated with {@code T} + * @param the target type to deserialize the input into + * @return the deserialized activity input value + */ + T getInput(Class targetType); + + + /** + * Gets the execution id of the current task activity. + * + * @return the execution id of the current task activity + */ + String getTaskExecutionId(); + + /** + * Gets the task id of the current task activity. + * + * @return the task id of the current task activity + */ + int getTaskId(); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityExecutor.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityExecutor.java new file mode 100644 index 000000000..a8ef6c67e --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityExecutor.java @@ -0,0 +1,96 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import java.util.HashMap; +import java.util.logging.Logger; + +final class TaskActivityExecutor { + private final HashMap activityFactories; + private final DataConverter dataConverter; + private final Logger logger; + + public TaskActivityExecutor( + HashMap activityFactories, + DataConverter dataConverter, + Logger logger) { + this.activityFactories = activityFactories; + this.dataConverter = dataConverter; + this.logger = logger; + } + + public String execute(String taskName, String input, String taskExecutionId, int taskId) throws Throwable { + TaskActivityFactory factory = this.activityFactories.get(taskName); + if (factory == null) { + throw new IllegalStateException( + String.format("No activity task named '%s' is registered.", taskName)); + } + + TaskActivity activity = factory.create(); + if (activity == null) { + throw new IllegalStateException( + String.format("The task factory '%s' returned a null TaskActivity object.", taskName)); + } + + TaskActivityContextImpl context = new TaskActivityContextImpl(taskName, input, taskExecutionId, taskId); + + // Unhandled exceptions are allowed to escape + Object output = activity.run(context); + if (output != null) { + return this.dataConverter.serialize(output); + } + + return null; + } + + private class TaskActivityContextImpl implements TaskActivityContext { + private final String name; + private final String rawInput; + private final String taskExecutionId; + private final int taskId; + + private final DataConverter dataConverter = TaskActivityExecutor.this.dataConverter; + + public TaskActivityContextImpl(String activityName, String rawInput, String taskExecutionId, int taskId) { + this.name = activityName; + this.rawInput = rawInput; + this.taskExecutionId = taskExecutionId; + this.taskId = taskId; + } + + @Override + public String getName() { + return this.name; + } + + @Override + public T getInput(Class targetType) { + if (this.rawInput == null) { + return null; + } + + return this.dataConverter.deserialize(this.rawInput, targetType); + } + + @Override + public String getTaskExecutionId() { + return this.taskExecutionId; + } + + @Override + public int getTaskId() { + return this.taskId; + } + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityFactory.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityFactory.java new file mode 100644 index 000000000..e3ef45a95 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Factory interface for producing {@link TaskActivity} implementations. + */ +public interface TaskActivityFactory { + /** + * Gets the name of the activity this factory creates. + * + * @return the name of the activity + */ + String getName(); + + /** + * Creates a new instance of {@link TaskActivity}. + * + * @return the created activity instance + */ + TaskActivity create(); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskCanceledException.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskCanceledException.java new file mode 100644 index 000000000..5b79882ed --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskCanceledException.java @@ -0,0 +1,26 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +//@TODO: This should inherit from Exception, not TaskFailedException + +/** + * Represents a task cancellation, either because of a timeout or because of an explicit cancellation operation. + */ +public final class TaskCanceledException extends TaskFailedException { + // Only intended to be created within this package + TaskCanceledException(String message, String taskName, int taskId) { + super(message, taskName, taskId, new FailureDetails(TaskCanceledException.class.getName(), message, "", true)); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskFailedException.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskFailedException.java new file mode 100644 index 000000000..377eecb42 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskFailedException.java @@ -0,0 +1,76 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Exception that gets thrown when awaiting a {@link Task} for an activity or sub-orchestration that fails with an + * unhandled exception. + *

Detailed information associated with a particular task failure can be retrieved + * using the {@link #getErrorDetails()} method.

+ */ +public class TaskFailedException extends RuntimeException { + private final FailureDetails details; + private final String taskName; + private final int taskId; + + TaskFailedException(String taskName, int taskId, FailureDetails details) { + this(getExceptionMessage(taskName, taskId, details), taskName, taskId, details); + } + + TaskFailedException(String message, String taskName, int taskId, FailureDetails details) { + super(message); + this.taskName = taskName; + this.taskId = taskId; + this.details = details; + } + + /** + * Gets the ID of the failed task. + * + *

Each durable task (activities, timers, sub-orchestrations, etc.) scheduled by a task orchestrator has an + * auto-incrementing ID associated with it. This ID is used to distinguish tasks from one another, even if, for + * example, they are tasks that call the same activity. This ID can therefore be used to more easily correlate a + * specific task failure to a specific task.

+ * + * @return the ID of the failed task + */ + public int getTaskId() { + return this.taskId; + } + + /** + * Gets the name of the failed task. + * + * @return the name of the failed task + */ + public String getTaskName() { + return this.taskName; + } + + /** + * Gets the details of the task failure, including exception information. + * + * @return the details of the task failure + */ + public FailureDetails getErrorDetails() { + return this.details; + } + + private static String getExceptionMessage(String taskName, int taskId, FailureDetails details) { + return String.format("Task '%s' (#%d) failed with an unhandled exception: %s", + taskName, + taskId, + details.getErrorMessage()); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOptions.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOptions.java new file mode 100644 index 000000000..e23ee54b7 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOptions.java @@ -0,0 +1,171 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Options that can be used to control the behavior of orchestrator and activity task execution. + */ +public final class TaskOptions { + private final RetryPolicy retryPolicy; + private final RetryHandler retryHandler; + private final String appID; + + private TaskOptions(RetryPolicy retryPolicy, RetryHandler retryHandler, String appID) { + this.retryPolicy = retryPolicy; + this.retryHandler = retryHandler; + this.appID = appID; + } + + /** + * Creates a new builder for {@code TaskOptions}. + * + * @return a new builder instance + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a new {@code TaskOptions} object with default values. + * + * @return a new TaskOptions instance with no configuration + */ + public static TaskOptions create() { + return new Builder().build(); + } + + /** + * Creates a new {@code TaskOptions} object from a {@link RetryPolicy}. + * + * @param retryPolicy the retry policy to use in the new {@code TaskOptions} object. + * @return a new TaskOptions instance with the specified retry policy + */ + public static TaskOptions withRetryPolicy(RetryPolicy retryPolicy) { + return new Builder().retryPolicy(retryPolicy).build(); + } + + /** + * Creates a new {@code TaskOptions} object from a {@link RetryHandler}. + * + * @param retryHandler the retry handler to use in the new {@code TaskOptions} object. + * @return a new TaskOptions instance with the specified retry handler + */ + public static TaskOptions withRetryHandler(RetryHandler retryHandler) { + return new Builder().retryHandler(retryHandler).build(); + } + + /** + * Creates a new {@code TaskOptions} object with the specified app ID. + * + * @param appID the app ID to use for cross-app workflow routing + * @return a new TaskOptions instance with the specified app ID + */ + public static TaskOptions withAppID(String appID) { + return new Builder().appID(appID).build(); + } + + boolean hasRetryPolicy() { + return this.retryPolicy != null; + } + + /** + * Gets the configured {@link RetryPolicy} value or {@code null} if none was configured. + * + * @return the configured retry policy + */ + public RetryPolicy getRetryPolicy() { + return this.retryPolicy; + } + + boolean hasRetryHandler() { + return this.retryHandler != null; + } + + /** + * Gets the configured {@link RetryHandler} value or {@code null} if none was configured. + * + * @return the configured retry handler. + */ + public RetryHandler getRetryHandler() { + return this.retryHandler; + } + + /** + * Gets the configured app ID value or {@code null} if none was configured. + * + * @return the configured app ID + */ + public String getAppID() { + return this.appID; + } + + boolean hasAppID() { + return this.appID != null && !this.appID.isEmpty(); + } + + /** + * Builder for creating {@code TaskOptions} instances. + */ + public static final class Builder { + private RetryPolicy retryPolicy; + private RetryHandler retryHandler; + private String appID; + + private Builder() { + // Private constructor -enforces using TaskOptions.builder() + } + + /** + * Sets the retry policy for the task options. + * + * @param retryPolicy the retry policy to use + * @return this builder instance for method chaining + */ + public Builder retryPolicy(RetryPolicy retryPolicy) { + this.retryPolicy = retryPolicy; + return this; + } + + /** + * Sets the retry handler for the task options. + * + * @param retryHandler the retry handler to use + * @return this builder instance for method chaining + */ + public Builder retryHandler(RetryHandler retryHandler) { + this.retryHandler = retryHandler; + return this; + } + + /** + * Sets the app ID for cross-app workflow routing. + * + * @param appID the app ID to use + * @return this builder instance for method chaining + */ + public Builder appID(String appID) { + this.appID = appID; + return this; + } + + /** + * Builds a new {@code TaskOptions} instance with the configured values. + * + * @return a new TaskOptions instance + */ + public TaskOptions build() { + return new TaskOptions(this.retryPolicy, this.retryHandler, this.appID); + } + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestration.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestration.java new file mode 100644 index 000000000..893531377 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestration.java @@ -0,0 +1,82 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Common interface for task orchestrator implementations. + * + *

Task orchestrators describe how actions are executed and the order in which actions are executed. Orchestrators + * don't call into external services or do complex computation directly. Rather, they delegate these tasks to + * activities, which perform the actual work.

+ * + *

Orchestrators can be scheduled using the {@link DurableTaskClient#scheduleNewOrchestrationInstance} method + * overloads. Orchestrators can also invoke child orchestrators using the + * {@link TaskOrchestrationContext#callSubOrchestrator} method overloads.

+ * + *

Orchestrators may be replayed multiple times to rebuild their local state after being reloaded into memory. + * Orchestrator code must therefore be deterministic to ensure no unexpected side effects from execution + * replay. To account for this behavior, there are several coding constraints to be aware of:

+ *
    + *
  • + * An orchestrator must not generate random numbers or random UUIDs, get the current date, read environment + * variables, or do anything else that might result in a different value if the code is replayed in the future. + * Activities and built-in methods on the {@link TaskOrchestrationContext} parameter, like + * {@link TaskOrchestrationContext#getCurrentInstant()}, can be used to work around these restrictions. + *
  • + *
  • + * Orchestrator logic must be executed on the orchestrator thread. Creating new threads or scheduling callbacks + * onto background threads is forbidden and may result in failures or other unexpected behavior. + *
  • + *
  • + * Avoid infinite loops as they could cause the application to run out of memory. Instead, ensure that loops are + * bounded or use {@link TaskOrchestrationContext#continueAsNew} to restart an orchestrator with a new input. + *
  • + *
  • + * Avoid logging directly in the orchestrator code because log messages will be duplicated on each replay. + * Instead, check the value of the {@link TaskOrchestrationContext#getIsReplaying} method and write log messages + * only when it is {@code false}. + *
  • + *
+ * + *

Orchestrator code is tightly coupled with its execution history so special care must be taken when making changes + * to orchestrator code. For example, adding or removing activity tasks to an orchestrator's code may cause a + * mismatch between code and history for in-flight orchestrations. To avoid potential issues related to orchestrator + * versioning, consider applying the following strategies:

+ *
    + *
  • + * Deploy multiple versions of applications side-by-side allowing new code to run independently of old code. + *
  • + *
  • + * Rather than changing existing orchestrators, create new orchestrators that implement the modified behavior. + *
  • + *
  • + * Ensure all in-flight orchestrations are complete before applying code changes to existing orchestrator code. + *
  • + *
  • + * If possible, only make changes to orchestrator code that won't impact its history or execution path. For + * example, renaming variables or adding log statements have no impact on an orchestrator's execution path and + * are safe to apply to existing orchestrations. + *
  • + *
+ */ +@FunctionalInterface +public interface TaskOrchestration { + /** + * Executes the orchestrator logic. + * + * @param ctx provides access to methods for scheduling durable tasks and getting information about the current + * orchestration instance. + */ + void run(TaskOrchestrationContext ctx); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationContext.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationContext.java new file mode 100644 index 000000000..df0c95ec8 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationContext.java @@ -0,0 +1,598 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +/** + * Used by orchestrators to perform actions such as scheduling tasks, durable timers, waiting for external events, + * and for getting basic information about the current orchestration. + */ +public interface TaskOrchestrationContext { + /** + * Gets the name of the current task orchestration. + * + * @return the name of the current task orchestration + */ + String getName(); + + /** + * Gets the deserialized input of the current task orchestration. + * + * @param targetType the {@link Class} object associated with {@code V} + * @param the expected type of the orchestrator input + * @return the deserialized input as an object of type {@code V} or {@code null} if no input was provided. + */ + V getInput(Class targetType); + + /** + * Gets the unique ID of the current orchestration instance. + * + * @return the unique ID of the current orchestration instance + */ + String getInstanceId(); + + /** + * Gets the app ID of the current orchestration instance, if available. + * This is used for cross-app workflow routing. + * + * @return the app ID of the current orchestration instance, or null if not available + */ + String getAppId(); + + /** + * Gets the current orchestration time in UTC. + * + * @return the current orchestration time in UTC + */ + Instant getCurrentInstant(); + + /** + * Gets a value indicating whether the orchestrator is currently replaying a previous execution. + * + *

Orchestrator functions are "replayed" after being unloaded from memory to reconstruct local variable state. + * During a replay, previously executed tasks will be completed automatically with previously seen values + * that are stored in the orchestration history. One the orchestrator reaches the point in the orchestrator + * where it's no longer replaying existing history, this method will return {@code false}.

+ * + *

You can use this method if you have logic that needs to run only when not replaying. For example, + * certain types of application logging may become too noisy when duplicated as part of replay. The + * application code could check to see whether the function is being replayed and then issue the log statements + * when this value is {@code false}.

+ * + * @return {@code true} if the orchestrator is replaying, otherwise {@code false} + */ + boolean getIsReplaying(); + + /** + * Returns a new {@code Task} that is completed when all tasks in {@code tasks} completes. + * See {@link #allOf(Task[])} for more detailed information. + * + * @param tasks the list of {@code Task} objects + * @param the return type of the {@code Task} objects + * @return a new {@code Task} that is completed when any of the given {@code Task}s complete + * @see #allOf(Task[]) + */ + Task> allOf(List> tasks); + + // TODO: Update the description of allOf to be more specific about the exception behavior. + + // https://github.io.dapr.durabletask-java/issues/54 + + /** + * Returns a new {@code Task} that is completed when all the given {@code Task}s complete. If any of the given + * {@code Task}s complete with an exception, the returned {@code Task} will also complete with + * an {@link CompositeTaskFailedException} containing details of the first encountered failure. + * The value of the returned {@code Task} is an ordered list of + * the return values of the given tasks. If no tasks are provided, returns a {@code Task} completed with value + * {@code null}. + * + *

This method is useful for awaiting the completion of a set of independent tasks before continuing to the next + * step in the orchestration, as in the following example:

+ *
{@code
+   * Task t1 = ctx.callActivity("MyActivity", String.class);
+   * Task t2 = ctx.callActivity("MyActivity", String.class);
+   * Task t3 = ctx.callActivity("MyActivity", String.class);
+   *
+   * List orderedResults = ctx.allOf(t1, t2, t3).await();
+   * }
+ * + *

Exceptions in any of the given tasks results in an unchecked {@link CompositeTaskFailedException}. + * This exception can be inspected to obtain failure details of individual {@link Task}s.

+ *
{@code
+   * try {
+   *     List orderedResults = ctx.allOf(t1, t2, t3).await();
+   * } catch (CompositeTaskFailedException e) {
+   *     List exceptions = e.getExceptions()
+   * }
+   * }
+ * + * @param tasks the {@code Task}s + * @param the return type of the {@code Task} objects + * @return the values of the completed {@code Task} objects in the same order as the source list + */ + default Task> allOf(Task... tasks) { + return this.allOf(Arrays.asList(tasks)); + } + + /** + * Returns a new {@code Task} that is completed when any of the tasks in {@code tasks} completes. + * See {@link #anyOf(Task[])} for more detailed information. + * + * @param tasks the list of {@code Task} objects + * @return a new {@code Task} that is completed when any of the given {@code Task}s complete + * @see #anyOf(Task[]) + */ + Task> anyOf(List> tasks); + + /** + * Returns a new {@code Task} that is completed when any of the given {@code Task}s complete. The value of the + * new {@code Task} is a reference to the completed {@code Task} object. If no tasks are provided, returns a + * {@code Task} that never completes. + * + *

This method is useful for waiting on multiple concurrent tasks and performing a task-specific operation when the + * first task completes, as in the following example:

+ *
{@code
+   * Task event1 = ctx.waitForExternalEvent("Event1");
+   * Task event2 = ctx.waitForExternalEvent("Event2");
+   * Task event3 = ctx.waitForExternalEvent("Event3");
+   *
+   * Task winner = ctx.anyOf(event1, event2, event3).await();
+   * if (winner == event1) {
+   *     // ...
+   * } else if (winner == event2) {
+   *     // ...
+   * } else if (winner == event3) {
+   *     // ...
+   * }
+   * }
+ * + *

The {@code anyOf} method can also be used for implementing long-running timeouts, as in the following example: + *

+ *
{@code
+   * Task activityTask = ctx.callActivity("SlowActivity");
+   * Task timeoutTask = ctx.createTimer(Duration.ofMinutes(30));
+   *
+   * Task winner = ctx.anyOf(activityTask, timeoutTask).await();
+   * if (winner == activityTask) {
+   *     // completion case
+   * } else {
+   *     // timeout case
+   * }
+   * }
+ * + * @param tasks the list of {@code Task} objects + * @return a new {@code Task} that is completed when any of the given {@code Task}s complete + */ + default Task> anyOf(Task... tasks) { + return this.anyOf(Arrays.asList(tasks)); + } + + /** + * Creates a durable timer that expires after the specified delay. + * + *

Specifying a long delay (for example, a delay of a few days or more) may result in the creation of multiple, + * internally-managed durable timers. The orchestration code doesn't need to be aware of this behavior. However, + * it may be visible in framework logs and the stored history state.

+ * + * @param delay the amount of time before the timer should expire + * @return a new {@code Task} that completes after the specified delay + */ + Task createTimer(Duration delay); + + /** + * Creates a durable timer that expires after the specified timestamp with specific zone. + * + *

Specifying a long delay (for example, a delay of a few days or more) may result in the creation of multiple, + * internally-managed durable timers. The orchestration code doesn't need to be aware of this behavior. However, + * it may be visible in framework logs and the stored history state.

+ * + * @param zonedDateTime timestamp with specific zone when the timer should expire + * @return a new {@code Task} that completes after the specified delay + */ + Task createTimer(ZonedDateTime zonedDateTime); + + /** + * Transitions the orchestration into the {@link OrchestrationRuntimeStatus#COMPLETED} state with the given output. + * + * @param output the serializable output of the completed orchestration + */ + void complete(Object output); + + /** + * Asynchronously invokes an activity by name and with the specified input value and returns a new {@link Task} + * that completes when the activity completes. If the activity completes successfully, the returned {@code Task}'s + * value will be the activity's output. If the activity fails, the returned {@code Task} will complete exceptionally + * with a {@link TaskFailedException}. + * + *

Activities are the basic unit of work in a durable task orchestration. Unlike orchestrators, which are not + * allowed to do any I/O or call non-deterministic APIs, activities have no implementation restrictions.

+ * + *

An activity may execute in the local machine or a remote machine. The exact behavior depends on the underlying + * storage provider, which is responsible for distributing tasks across machines. In general, you should never make + * any assumptions about where an activity will run. You should also assume at-least-once execution guarantees for + * activities, meaning that an activity may be executed twice if, for example, there is a process failure before + * the activities result is saved into storage.

+ * + *

Both the inputs and outputs of activities are serialized and stored in durable storage. It's highly recommended + * to not include any sensitive data in activity inputs or outputs. It's also recommended to not use large payloads + * for activity inputs and outputs, which can result in expensive serialization and network utilization. For data + * that cannot be cheaply or safely persisted to storage, it's recommended to instead pass references + * (for example, a URL to a storage blog) to the data and have activities fetch the data directly as part of their + * implementation.

+ * + * @param name the name of the activity to call + * @param input the serializable input to pass to the activity + * @param options additional options that control the execution and processing of the activity + * @param returnType the expected class type of the activity output + * @param the expected type of the activity output + * @return a new {@link Task} that completes when the activity completes or fails + */ + Task callActivity(String name, Object input, TaskOptions options, Class returnType); + + /** + * Asynchronously invokes an activity by name and returns a new {@link Task} that completes when the activity + * completes. See {@link #callActivity(String, Object, TaskOptions, Class)} for a complete description. + * + * @param name the name of the activity to call + * @return a new {@link Task} that completes when the activity completes or fails + * @see #callActivity(String, Object, TaskOptions, Class) + */ + default Task callActivity(String name) { + return this.callActivity(name, Void.class); + } + + /** + * Asynchronously invokes an activity by name and with the specified input value and returns a new {@link Task} + * that completes when the activity completes. See {@link #callActivity(String, Object, TaskOptions, Class)} for a + * complete description. + * + * @param name the name of the activity to call + * @param input the serializable input to pass to the activity + * @return a new {@link Task} that completes when the activity completes or fails + */ + default Task callActivity(String name, Object input) { + return this.callActivity(name, input, null, Void.class); + } + + /** + * Asynchronously invokes an activity by name and returns a new {@link Task} that completes when the activity + * completes. If the activity completes successfully, the returned {@code Task}'s value will be the activity's + * output. See {@link #callActivity(String, Object, TaskOptions, Class)} for a complete description. + * + * @param name the name of the activity to call + * @param returnType the expected class type of the activity output + * @param the expected type of the activity output + * @return a new {@link Task} that completes when the activity completes or fails + */ + default Task callActivity(String name, Class returnType) { + return this.callActivity(name, null, null, returnType); + } + + /** + * Asynchronously invokes an activity by name and with the specified input value and returns a new {@link Task} + * that completes when the activity completes.If the activity completes successfully, the returned {@code Task}'s + * value will be the activity's output. See {@link #callActivity(String, Object, TaskOptions, Class)} for a + * complete description. + * + * @param name the name of the activity to call + * @param input the serializable input to pass to the activity + * @param returnType the expected class type of the activity output + * @param the expected type of the activity output + * @return a new {@link Task} that completes when the activity completes or fails + */ + default Task callActivity(String name, Object input, Class returnType) { + return this.callActivity(name, input, null, returnType); + } + + /** + * Asynchronously invokes an activity by name and with the specified input value and returns a new {@link Task} + * that completes when the activity completes. See {@link #callActivity(String, Object, TaskOptions, Class)} for a + * complete description. + * + * @param name the name of the activity to call + * @param input the serializable input to pass to the activity + * @param options additional options that control the execution and processing of the activity + * @return a new {@link Task} that completes when the activity completes or fails + */ + default Task callActivity(String name, Object input, TaskOptions options) { + return this.callActivity(name, input, options, Void.class); + } + + /** + * Restarts the orchestration with a new input and clears its history. See {@link #continueAsNew(Object, boolean)} + * for a full description. + * + * @param input the serializable input data to re-initialize the instance with + */ + default void continueAsNew(Object input) { + this.continueAsNew(input, true); + } + + /** + * Restarts the orchestration with a new input and clears its history. + * + *

This method is primarily designed for eternal orchestrations, which are orchestrations that + * may not ever complete. It works by restarting the orchestration, providing it with a new input, + * and truncating the existing orchestration history. It allows an orchestration to continue + * running indefinitely without having its history grow unbounded. The benefits of periodically + * truncating history include decreased memory usage, decreased storage volumes, and shorter orchestrator + * replays when rebuilding state.

+ * + *

The results of any incomplete tasks will be discarded when an orchestrator calls {@code continueAsNew}. + * For example, if a timer is scheduled and then {@code continueAsNew} is called before the timer fires, the timer + * event will be discarded. The only exception to this is external events. By default, if an external event is + * received by an orchestration but not yet processed, the event is saved in the orchestration state unit it is + * received by a call to {@link #waitForExternalEvent}. These events will remain in memory + * even after an orchestrator restarts using {@code continueAsNew}. This behavior can be disabled by specifying + * {@code false} for the {@code preserveUnprocessedEvents} parameter value.

+ * + *

Orchestrator implementations should complete immediately after calling the{@code continueAsNew} method.

+ * + * @param input the serializable input data to re-initialize the instance with + * @param preserveUnprocessedEvents {@code true} to push unprocessed external events into the new orchestration + * history, otherwise {@code false} + */ + void continueAsNew(Object input, boolean preserveUnprocessedEvents); + + /** + * Create a new Uuid that is safe for replay within an orchestration or operation. + * + *

The default implementation of this method creates a name-based Uuid + * using the algorithm from RFC 4122 §4.3. The name input used to generate + * this value is a combination of the orchestration instance ID and an + * internally managed sequence number. + *

+ * + * @return a deterministic Uuid + */ + default UUID newUuid() { + throw new RuntimeException("No implementation found."); + } + + /** + * Sends an external event to another orchestration instance. + * + * @param instanceID the unique ID of the receiving orchestration instance. + * @param eventName the name of the event to send + */ + default void sendEvent(String instanceID, String eventName) { + this.sendEvent(instanceID, eventName, null); + } + + /** + * Sends an external event to another orchestration instance. + * + * @param instanceId the unique ID of the receiving orchestration instance. + * @param eventName the name of the event to send + * @param eventData the payload of the event to send + */ + void sendEvent(String instanceId, String eventName, Object eventData); + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + * @see #callSubOrchestrator(String, Object, String, TaskOptions, Class) + */ + default Task callSubOrchestrator(String name) { + return this.callSubOrchestrator(name, null); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + default Task callSubOrchestrator(String name, Object input) { + return this.callSubOrchestrator(name, input, null); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @param returnType the expected class type of the sub-orchestration output + * @param the expected type of the sub-orchestration output + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + default Task callSubOrchestrator(String name, Object input, Class returnType) { + return this.callSubOrchestrator(name, input, null, returnType); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @param instanceID the unique ID of the sub-orchestration + * @param returnType the expected class type of the sub-orchestration output + * @param the expected type of the sub-orchestration output + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + default Task callSubOrchestrator(String name, Object input, String instanceID, Class returnType) { + return this.callSubOrchestrator(name, input, instanceID, null, returnType); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @param instanceID the unique ID of the sub-orchestration + * @param options additional options that control the execution and processing of the activity + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + default Task callSubOrchestrator(String name, Object input, String instanceID, TaskOptions options) { + return this.callSubOrchestrator(name, input, instanceID, options, Void.class); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. If the sub-orchestration completes successfully, the returned + * {@code Task}'s value will be the activity's output. If the sub-orchestration fails, the returned {@code Task} + * will complete exceptionally with a {@link TaskFailedException}. + * + *

A sub-orchestration has its own instance ID, history, and status that is independent of the parent orchestrator + * that started it. There are many advantages to breaking down large orchestrations into sub-orchestrations:

+ *
    + *
  • + * Splitting large orchestrations into a series of smaller sub-orchestrations can make code more maintainable. + *
  • + *
  • + * Distributing orchestration logic across multiple compute nodes concurrently is useful if + * orchestration logic otherwise needs to coordinate a lot of tasks. + *
  • + *
  • + * Memory usage and CPU overhead can be reduced by keeping the history of parent orchestrations smaller. + *
  • + *
+ * + *

The disadvantage is that there is overhead associated with starting a sub-orchestration and processing its + * output. This is typically only an issue for very small orchestrations.

+ * + *

Because sub-orchestrations are independent of their parents, terminating a parent orchestration does not affect + * any sub-orchestrations. Sub-orchestrations must be terminated independently using their unique instance ID, + * which is specified using the {@code instanceID} parameter.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @param instanceID the unique ID of the sub-orchestration + * @param options additional options that control the execution and processing of the activity + * @param returnType the expected class type of the sub-orchestration output + * @param the expected type of the sub-orchestration output + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + Task callSubOrchestrator( + String name, + @Nullable Object input, + @Nullable String instanceID, + @Nullable TaskOptions options, + Class returnType); + + /** + * Waits for an event to be raised named {@code name} and returns a {@link Task} that completes when the event is + * received or is canceled when {@code timeout} expires. + * + *

External clients can raise events to a waiting orchestration instance using the + * {@link DurableTaskClient#raiseEvent} method.

+ * + *

If the current orchestration is not yet waiting for an event named {@code name}, then the event will be saved in + * the orchestration instance state and dispatched immediately when this method is called. This event saving occurs + * even if the current orchestrator cancels the wait operation before the event is received.

+ * + *

Orchestrators can wait for the same event name multiple times, so waiting for multiple events with the same name + * is allowed. Each external event received by an orchestrator will complete just one task returned by this method. + *

+ * + * @param name the case-insensitive name of the event to wait for + * @param timeout the amount of time to wait before canceling the returned {@code Task} + * @param dataType the expected class type of the event data payload + * @param the expected type of the event data payload + * @return a new {@link Task} that completes when the external event is received or when {@code timeout} expires + * @throws TaskCanceledException if the specified {@code timeout} value expires before the event is received + */ + Task waitForExternalEvent(String name, Duration timeout, Class dataType) throws TaskCanceledException; + + /** + * Waits for an event to be raised named {@code name} and returns a {@link Task} that completes when the event is + * received or is canceled when {@code timeout} expires. + * + *

See {@link #waitForExternalEvent(String, Duration, Class)} for a full description.

+ * + * @param name the case-insensitive name of the event to wait for + * @param timeout the amount of time to wait before canceling the returned {@code Task} + * @return a new {@link Task} that completes when the external event is received or when {@code timeout} expires + * @throws TaskCanceledException if the specified {@code timeout} value expires before the event is received + */ + default Task waitForExternalEvent(String name, Duration timeout) throws TaskCanceledException { + return this.waitForExternalEvent(name, timeout, Void.class); + } + + /** + * Waits for an event to be raised named {@code name} and returns a {@link Task} that completes when the event is + * received. + * + *

See {@link #waitForExternalEvent(String, Duration, Class)} for a full description.

+ * + * @param name the case-insensitive name of the event to wait for + * @return a new {@link Task} that completes when the external event is received + */ + default Task waitForExternalEvent(String name) { + return this.waitForExternalEvent(name, Void.class); + } + + /** + * Waits for an event to be raised named {@code name} and returns a {@link Task} that completes when the event is + * received. + * + *

See {@link #waitForExternalEvent(String, Duration, Class)} for a full description.

+ * + * @param name the case-insensitive name of the event to wait for + * @param dataType the expected class type of the event data payload + * @param the expected type of the event data payload + * @return a new {@link Task} that completes when the external event is received + */ + default Task waitForExternalEvent(String name, Class dataType) { + try { + return this.waitForExternalEvent(name, null, dataType); + } catch (TaskCanceledException e) { + // This should never happen because of the max duration + throw new RuntimeException("An unexpected exception was throw while waiting for an external event.", e); + } + } + + /** + * Assigns a custom status value to the current orchestration. + * + *

The {@code customStatus} value is serialized and stored in orchestration state and will be made available to the + * orchestration status query APIs, such as {@link DurableTaskClient#getInstanceMetadata}. The serialized value + * must not exceed 16 KB of UTF-16 encoded text.

+ * + *

Use {@link #clearCustomStatus()} to remove the custom status value from the orchestration state.

+ * + * @param customStatus A serializable value to assign as the custom status value. + */ + void setCustomStatus(Object customStatus); + + /** + * Clears the orchestration's custom status. + */ + void clearCustomStatus(); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationExecutor.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationExecutor.java new file mode 100644 index 000000000..7a3436b03 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationExecutor.java @@ -0,0 +1,1515 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.StringValue; +import com.google.protobuf.Timestamp; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService.ScheduleTaskAction.Builder; +import io.dapr.durabletask.interruption.ContinueAsNewInterruption; +import io.dapr.durabletask.interruption.OrchestratorBlockedException; +import io.dapr.durabletask.util.UuidGenerator; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.IntFunction; +import java.util.logging.Logger; + +final class TaskOrchestrationExecutor { + + private static final String EMPTY_STRING = ""; + private final HashMap orchestrationFactories; + private final DataConverter dataConverter; + private final Logger logger; + private final Duration maximumTimerInterval; + private final String appId; + + public TaskOrchestrationExecutor( + HashMap orchestrationFactories, + DataConverter dataConverter, + Duration maximumTimerInterval, + Logger logger, + String appId) { + this.orchestrationFactories = orchestrationFactories; + this.dataConverter = dataConverter; + this.maximumTimerInterval = maximumTimerInterval; + this.logger = logger; + this.appId = appId; // extracted from router + } + + public TaskOrchestratorResult execute(List pastEvents, + List newEvents) { + ContextImplTask context = new ContextImplTask(pastEvents, newEvents); + + boolean completed = false; + try { + // Play through the history events until either we've played through everything + // or we receive a yield signal + while (context.processNextEvent()) { + /* no method body */ + } + completed = true; + logger.finest("The orchestrator execution completed normally"); + } catch (OrchestratorBlockedException orchestratorBlockedException) { + logger.fine("The orchestrator has yielded and will await for new events."); + } catch (ContinueAsNewInterruption continueAsNewInterruption) { + logger.fine("The orchestrator has continued as new."); + context.complete(null); + } catch (Exception e) { + // The orchestrator threw an unhandled exception - fail it + // TODO: What's the right way to log this? + logger.warning("The orchestrator failed with an unhandled exception: " + e.toString()); + context.fail(new FailureDetails(e)); + } + + if ((context.continuedAsNew && !context.isComplete) || (completed && context.pendingActions.isEmpty() + && !context.waitingForEvents())) { + // There are no further actions for the orchestrator to take so auto-complete the orchestration. + context.complete(null); + } + + return new TaskOrchestratorResult(context.pendingActions.values(), context.getCustomStatus()); + } + + private class ContextImplTask implements TaskOrchestrationContext { + + private String orchestratorName; + private String rawInput; + private String instanceId; + private Instant currentInstant; + private boolean isComplete; + private boolean isSuspended; + private boolean isReplaying = true; + private int newUuidCounter; + private String appId; + + // LinkedHashMap to maintain insertion order when returning the list of pending actions + private final Map pendingActions = new LinkedHashMap<>(); + private final Map> openTasks = new HashMap<>(); + private final Map>> outstandingEvents = new LinkedHashMap<>(); + private final List unprocessedEvents = new LinkedList<>(); + private final Queue eventsWhileSuspended = new ArrayDeque<>(); + private final DataConverter dataConverter = TaskOrchestrationExecutor.this.dataConverter; + private final Duration maximumTimerInterval = TaskOrchestrationExecutor.this.maximumTimerInterval; + private final Logger logger = TaskOrchestrationExecutor.this.logger; + private final OrchestrationHistoryIterator historyEventPlayer; + private int sequenceNumber; + private boolean continuedAsNew; + private Object continuedAsNewInput; + private boolean preserveUnprocessedEvents; + private Object customStatus; + + public ContextImplTask(List pastEvents, + List newEvents) { + this.historyEventPlayer = new OrchestrationHistoryIterator(pastEvents, newEvents); + } + + @Override + public String getName() { + // TODO: Throw if name is null + return this.orchestratorName; + } + + private void setName(String name) { + // TODO: Throw if name is not null + this.orchestratorName = name; + } + + private void setInput(String rawInput) { + this.rawInput = rawInput; + } + + @Override + public T getInput(Class targetType) { + if (this.rawInput == null || this.rawInput.length() == 0) { + return null; + } + + return this.dataConverter.deserialize(this.rawInput, targetType); + } + + @Override + public String getInstanceId() { + // TODO: Throw if instance ID is null + return this.instanceId; + } + + private void setInstanceId(String instanceId) { + // TODO: Throw if instance ID is not null + this.instanceId = instanceId; + } + + @Override + public String getAppId() { + return this.appId; + } + + private void setAppId(String appId) { + this.appId = appId; + } + + @Override + public Instant getCurrentInstant() { + // TODO: Throw if instant is null + return this.currentInstant; + } + + private void setCurrentInstant(Instant instant) { + // This will be set multiple times as the orchestration progresses + this.currentInstant = instant; + } + + private String getCustomStatus() { + return this.customStatus != null ? this.dataConverter.serialize(this.customStatus) : EMPTY_STRING; + } + + @Override + public void setCustomStatus(Object customStatus) { + this.customStatus = customStatus; + } + + @Override + public void clearCustomStatus() { + this.setCustomStatus(null); + } + + @Override + public boolean getIsReplaying() { + return this.isReplaying; + } + + private void setDoneReplaying() { + this.isReplaying = false; + } + + public Task completedTask(V value) { + CompletableTask task = new CompletableTask<>(); + task.complete(value); + return task; + } + + @Override + public Task> allOf(List> tasks) { + Helpers.throwIfArgumentNull(tasks, "tasks"); + + CompletableFuture[] futures = tasks.stream() + .map(t -> t.future) + .toArray((IntFunction[]>) CompletableFuture[]::new); + + Function> resultPath = x -> { + List results = new ArrayList<>(futures.length); + + // All futures are expected to be completed at this point + for (CompletableFuture cf : futures) { + try { + results.add(cf.get()); + } catch (Exception ex) { + results.add(null); + } + } + return results; + }; + + Function> exceptionPath = throwable -> { + ArrayList exceptions = new ArrayList<>(futures.length); + for (CompletableFuture cf : futures) { + try { + cf.get(); + } catch (ExecutionException ex) { + exceptions.add((Exception) ex.getCause()); + } catch (Exception ex) { + exceptions.add(ex); + } + } + throw new CompositeTaskFailedException( + String.format( + "%d out of %d tasks failed with an exception. See the exceptions list for details.", + exceptions.size(), + futures.length), + exceptions); + }; + CompletableFuture> future = CompletableFuture.allOf(futures) + .thenApply(resultPath) + .exceptionally(exceptionPath); + + return new CompoundTask<>(tasks, future); + } + + @Override + public Task> anyOf(List> tasks) { + Helpers.throwIfArgumentNull(tasks, "tasks"); + + CompletableFuture[] futures = tasks.stream() + .map(t -> t.future) + .toArray((IntFunction[]>) CompletableFuture[]::new); + + CompletableFuture> future = CompletableFuture.anyOf(futures).thenApply(x -> { + // Return the first completed task in the list. Unlike the implementation in other languages, + // this might not necessarily be the first task that completed, so calling code shouldn't make + // assumptions about this. Note that changing this behavior later could be breaking. + for (Task task : tasks) { + if (task.isDone()) { + return task; + } + } + + // Should never get here + return completedTask(null); + }); + + return new CompoundTask(tasks, future); + } + + @Override + public Task callActivity( + String name, + @Nullable Object input, + @Nullable TaskOptions options, + Class returnType) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(name, "name"); + Helpers.throwIfArgumentNull(returnType, "returnType"); + + if (input instanceof TaskOptions) { + throw new IllegalArgumentException("TaskOptions cannot be used as an input. " + + "Did you call the wrong method overload?"); + } + + String serializedInput = this.dataConverter.serialize(input); + Builder scheduleTaskBuilder = OrchestratorService.ScheduleTaskAction.newBuilder().setName(name) + .setTaskExecutionId(newUuid().toString()); + if (serializedInput != null) { + scheduleTaskBuilder.setInput(StringValue.of(serializedInput)); + } + + // Add router information for cross-app routing + // Router always has a source app ID from EXECUTIONSTARTED event + OrchestratorService.TaskRouter.Builder routerBuilder = OrchestratorService.TaskRouter.newBuilder() + .setSourceAppID(this.appId); + + // Add target app ID if specified in options + if (options != null && options.hasAppID()) { + String targetAppId = options.getAppID(); + OrchestratorService.TaskRouter router = OrchestratorService.TaskRouter.newBuilder() + .setSourceAppID(this.appId) + .setTargetAppID(targetAppId) + .build(); + scheduleTaskBuilder.setRouter(router); + this.logger.fine(() -> String.format( + "cross app routing detected: source=%s, target=%s", + this.appId, targetAppId)); + } + TaskFactory taskFactory = () -> { + int id = this.sequenceNumber++; + OrchestratorService.ScheduleTaskAction scheduleTaskAction = scheduleTaskBuilder.build(); + OrchestratorService.OrchestratorAction.Builder actionBuilder = OrchestratorService.OrchestratorAction + .newBuilder() + .setId(id) + .setScheduleTask(scheduleTaskBuilder); + if (options != null && options.hasAppID()) { + String targetAppId = options.getAppID(); + OrchestratorService.TaskRouter actionRouter = OrchestratorService.TaskRouter.newBuilder() + .setSourceAppID(this.appId) + .setTargetAppID(targetAppId) + .build(); + actionBuilder.setRouter(actionRouter); + } + this.pendingActions.put(id, actionBuilder.build()); + + if (!this.isReplaying) { + this.logger.fine(() -> String.format( + "%s: calling activity '%s' (#%d) with serialized input: %s", + this.instanceId, + name, + id, + serializedInput != null ? serializedInput : "(null)")); + } + + CompletableTask task = new CompletableTask<>(); + TaskRecord record = new TaskRecord<>(task, name, returnType); + this.openTasks.put(id, record); + return task; + }; + + return this.createAppropriateTask(taskFactory, options); + } + + @Override + public void continueAsNew(Object input, boolean preserveUnprocessedEvents) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + + this.continuedAsNew = true; + this.continuedAsNewInput = input; + this.preserveUnprocessedEvents = preserveUnprocessedEvents; + + // The ContinueAsNewInterruption exception allows the orchestration to complete immediately and return back + // to the sidecar. + // We can send the current set of actions back to the worker and wait for new events to come in. + // This is *not* an exception - it's a normal part of orchestrator control flow. + throw new ContinueAsNewInterruption( + "The orchestrator invoked continueAsNew. This Throwable should never be caught by user code."); + } + + @Override + public UUID newUuid() { + final int version = 5; + final String hashV5 = "SHA-1"; + final String dnsNameSpace = "9e952958-5e33-4daf-827f-2fa12937b875"; + final String name = new StringBuilder(this.instanceId) + .append("-") + .append(this.currentInstant) + .append("-") + .append(this.newUuidCounter).toString(); + this.newUuidCounter++; + return UuidGenerator.generate(version, hashV5, UUID.fromString(dnsNameSpace), name); + } + + @Override + public void sendEvent(String instanceId, String eventName, Object eventData) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNullOrWhiteSpace(instanceId, "instanceId"); + + int id = this.sequenceNumber++; + String serializedEventData = this.dataConverter.serialize(eventData); + OrchestratorService.OrchestrationInstance.Builder orchestrationInstanceBuilder = + OrchestratorService.OrchestrationInstance.newBuilder() + .setInstanceId(instanceId); + OrchestratorService.SendEventAction.Builder builder = OrchestratorService + .SendEventAction.newBuilder().setInstance(orchestrationInstanceBuilder) + .setName(eventName); + if (serializedEventData != null) { + builder.setData(StringValue.of(serializedEventData)); + } + OrchestratorService.OrchestratorAction.Builder actionBuilder = OrchestratorService.OrchestratorAction.newBuilder() + .setId(id) + .setSendEvent(builder); + + this.pendingActions.put(id, actionBuilder.build()); + + if (!this.isReplaying) { + this.logger.fine(() -> String.format( + "%s: sending event '%s' (#%d) with serialized event data: %s", + this.instanceId, + eventName, + id, + serializedEventData != null ? serializedEventData : "(null)")); + } + } + + @Override + public Task callSubOrchestrator( + String name, + @Nullable Object input, + @Nullable String instanceId, + @Nullable TaskOptions options, + Class returnType) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(name, "name"); + Helpers.throwIfArgumentNull(returnType, "returnType"); + + if (input instanceof TaskOptions) { + throw new IllegalArgumentException("TaskOptions cannot be used as an input. " + + "Did you call the wrong method overload?"); + } + + String serializedInput = this.dataConverter.serialize(input); + OrchestratorService.CreateSubOrchestrationAction.Builder createSubOrchestrationActionBuilder = + OrchestratorService.CreateSubOrchestrationAction + .newBuilder().setName(name); + if (serializedInput != null) { + createSubOrchestrationActionBuilder.setInput(StringValue.of(serializedInput)); + } + + if (instanceId == null) { + instanceId = this.newUuid().toString(); + } + createSubOrchestrationActionBuilder.setInstanceId(instanceId); + + // TODO: @cicoyle - add suborchestration cross app logic here when its supported + TaskFactory taskFactory = () -> { + int id = this.sequenceNumber++; + this.pendingActions.put(id, OrchestratorService.OrchestratorAction.newBuilder() + .setId(id) + .setCreateSubOrchestration(createSubOrchestrationActionBuilder) + .build()); + + if (!this.isReplaying) { + this.logger.fine(() -> String.format( + "%s: calling sub-orchestration '%s' (#%d) with serialized input: %s", + this.instanceId, + name, + id, + serializedInput != null ? serializedInput : "(null)")); + } + + CompletableTask task = new CompletableTask<>(); + TaskRecord record = new TaskRecord<>(task, name, returnType); + this.openTasks.put(id, record); + return task; + }; + + return this.createAppropriateTask(taskFactory, options); + } + + private Task createAppropriateTask(TaskFactory taskFactory, TaskOptions options) { + // Retry policies and retry handlers will cause us to return a RetriableTask + if (options != null && (options.hasRetryPolicy() || options.hasRetryHandler())) { + return new RetriableTask(this, taskFactory, options.getRetryPolicy(), options.getRetryHandler()); + } else { + // Return a single vanilla task without any wrapper + return taskFactory.create(); + } + } + + public Task waitForExternalEvent(String name, Duration timeout, Class dataType) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(name, "name"); + Helpers.throwIfArgumentNull(dataType, "dataType"); + + int id = this.sequenceNumber++; + + CompletableTask eventTask = new ExternalEventTask<>(name, id, timeout); + + // Check for a previously received event with the same name + for (OrchestratorService.HistoryEvent e : this.unprocessedEvents) { + OrchestratorService.EventRaisedEvent existing = e.getEventRaised(); + if (name.equalsIgnoreCase(existing.getName())) { + String rawEventData = existing.getInput().getValue(); + V data = this.dataConverter.deserialize(rawEventData, dataType); + eventTask.complete(data); + this.unprocessedEvents.remove(e); + return eventTask; + } + } + + boolean hasTimeout = !Helpers.isInfiniteTimeout(timeout); + + // Immediately cancel the task and return if the timeout is zero. + if (hasTimeout && timeout.isZero()) { + eventTask.cancel(); + return eventTask; + } + + // Add this task to the list of tasks waiting for an external event. + TaskRecord record = new TaskRecord<>(eventTask, name, dataType); + Queue> eventQueue = this.outstandingEvents.computeIfAbsent(name, k -> new LinkedList<>()); + eventQueue.add(record); + + // If a non-infinite timeout is specified, schedule an internal durable timer. + // If the timer expires and the external event task hasn't yet completed, we'll cancel the task. + if (hasTimeout) { + this.createTimer(timeout).future.thenRun(() -> { + if (!eventTask.isDone()) { + // Book-keeping - remove the task record for the canceled task + eventQueue.removeIf(t -> t.task == eventTask); + if (eventQueue.isEmpty()) { + this.outstandingEvents.remove(name); + } + + eventTask.cancel(); + } + }); + } + + return eventTask; + } + + private void handleTaskScheduled(OrchestratorService.HistoryEvent e) { + int taskId = e.getEventId(); + + OrchestratorService.TaskScheduledEvent taskScheduled = e.getTaskScheduled(); + + // The history shows that this orchestrator created a durable task in a previous execution. + // We can therefore remove it from the map of pending actions. If we can't find the pending + // action, then we assume a non-deterministic code violation in the orchestrator. + OrchestratorService.OrchestratorAction taskAction = this.pendingActions.remove(taskId); + if (taskAction == null) { + String message = String.format( + "Non-deterministic orchestrator detected: a history event scheduling an activity task with sequence " + + "ID %d and name '%s' was replayed but the current orchestrator implementation didn't actually " + + "schedule this task. Was a change made to the orchestrator code after this instance " + + "had already started running?", + taskId, + taskScheduled.getName()); + throw new NonDeterministicOrchestratorException(message); + } + } + + @SuppressWarnings("unchecked") + private void handleTaskCompleted(OrchestratorService.HistoryEvent e) { + OrchestratorService.TaskCompletedEvent completedEvent = e.getTaskCompleted(); + int taskId = completedEvent.getTaskScheduledId(); + TaskRecord record = this.openTasks.remove(taskId); + if (record == null) { + this.logger.warning("Discarding a potentially duplicate TaskCompleted event with ID = " + taskId); + return; + } + + String rawResult = completedEvent.getResult().getValue(); + + if (!this.isReplaying) { + // TODO: Structured logging + // TODO: Would it make more sense to put this log in the activity executor? + this.logger.fine(() -> String.format( + "%s: Activity '%s' (#%d) completed with serialized output: %s", + this.instanceId, + record.getTaskName(), + taskId, + rawResult != null ? rawResult : "(null)")); + + } + CompletableTask task = record.getTask(); + try { + Object result = this.dataConverter.deserialize(rawResult, record.getDataType()); + task.complete(result); + } catch (Exception ex) { + task.completeExceptionally(ex); + } + } + + private void handleTaskFailed(OrchestratorService.HistoryEvent e) { + OrchestratorService.TaskFailedEvent failedEvent = e.getTaskFailed(); + int taskId = failedEvent.getTaskScheduledId(); + TaskRecord record = this.openTasks.remove(taskId); + if (record == null) { + // TODO: Log a warning about a potential duplicate task completion event + return; + } + + FailureDetails details = new FailureDetails(failedEvent.getFailureDetails()); + + if (!this.isReplaying) { + // TODO: Log task failure, including the number of bytes in the result + } + + CompletableTask task = record.getTask(); + TaskFailedException exception = new TaskFailedException( + record.taskName, + taskId, + details); + task.completeExceptionally(exception); + } + + @SuppressWarnings("unchecked") + private void handleEventRaised(OrchestratorService.HistoryEvent e) { + OrchestratorService.EventRaisedEvent eventRaised = e.getEventRaised(); + String eventName = eventRaised.getName(); + + Queue> outstandingEventQueue = this.outstandingEvents.get(eventName); + if (outstandingEventQueue == null) { + // No code is waiting for this event. Buffer it in case user-code waits for it later. + this.unprocessedEvents.add(e); + return; + } + + // Signal the first waiter in the queue with this event payload. + TaskRecord matchingTaskRecord = outstandingEventQueue.remove(); + if (outstandingEventQueue.isEmpty()) { + this.outstandingEvents.remove(eventName); + } + String rawResult = eventRaised.getInput().getValue(); + CompletableTask task = matchingTaskRecord.getTask(); + try { + Object result = this.dataConverter.deserialize( + rawResult, + matchingTaskRecord.getDataType()); + task.complete(result); + } catch (Exception ex) { + task.completeExceptionally(ex); + } + } + + private void handleEventWhileSuspended(OrchestratorService.HistoryEvent historyEvent) { + if (historyEvent.getEventTypeCase() != OrchestratorService.HistoryEvent.EventTypeCase.EXECUTIONSUSPENDED) { + eventsWhileSuspended.offer(historyEvent); + } + } + + private void handleExecutionSuspended(OrchestratorService.HistoryEvent historyEvent) { + this.isSuspended = true; + } + + private void handleExecutionResumed(OrchestratorService.HistoryEvent historyEvent) { + this.isSuspended = false; + while (!eventsWhileSuspended.isEmpty()) { + this.processEvent(eventsWhileSuspended.poll()); + } + } + + public Task createTimer(Duration duration) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(duration, "duration"); + + Instant finalFireAt = this.currentInstant.plus(duration); + return createTimer(finalFireAt); + } + + @Override + public Task createTimer(ZonedDateTime zonedDateTime) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(zonedDateTime, "zonedDateTime"); + + Instant finalFireAt = zonedDateTime.toInstant(); + return createTimer(finalFireAt); + } + + private Task createTimer(Instant finalFireAt) { + return new TimerTask(finalFireAt); + } + + private CompletableTask createInstantTimer(int id, Instant fireAt) { + Timestamp ts = DataConverter.getTimestampFromInstant(fireAt); + this.pendingActions.put(id, OrchestratorService.OrchestratorAction.newBuilder() + .setId(id) + .setCreateTimer(OrchestratorService.CreateTimerAction.newBuilder().setFireAt(ts)) + .build()); + + if (!this.isReplaying) { + logger.finer(() -> String.format("Creating Instant Timer with id: %s, fireAt: %s ", id, fireAt)); + } + + CompletableTask timerTask = new CompletableTask<>(); + TaskRecord record = new TaskRecord<>(timerTask, "(timer)", Void.class); + this.openTasks.put(id, record); + return timerTask; + } + + private void handleTimerCreated(OrchestratorService.HistoryEvent e) { + int timerEventId = e.getEventId(); + if (timerEventId == -100) { + // Infrastructure timer used by the dispatcher to break transactions into multiple batches + return; + } + + OrchestratorService.TimerCreatedEvent timerCreatedEvent = e.getTimerCreated(); + + // The history shows that this orchestrator created a durable timer in a previous execution. + // We can therefore remove it from the map of pending actions. If we can't find the pending + // action, then we assume a non-deterministic code violation in the orchestrator. + OrchestratorService.OrchestratorAction timerAction = this.pendingActions.remove(timerEventId); + if (timerAction == null) { + String message = String.format( + "Non-deterministic orchestrator detected: a history event creating a timer with ID %d and " + + "fire-at time %s was replayed but the current orchestrator implementation didn't actually create " + + "this timer. Was a change made to the orchestrator code after this instance " + + "had already started running?", + timerEventId, + DataConverter.getInstantFromTimestamp(timerCreatedEvent.getFireAt())); + throw new NonDeterministicOrchestratorException(message); + } + } + + public void handleTimerFired(OrchestratorService.HistoryEvent e) { + OrchestratorService.TimerFiredEvent timerFiredEvent = e.getTimerFired(); + int timerEventId = timerFiredEvent.getTimerId(); + TaskRecord record = this.openTasks.remove(timerEventId); + if (record == null) { + // TODO: Log a warning about a potential duplicate timer fired event + return; + } + + if (!this.isReplaying) { + this.logger.finer(() -> + String.format("Firing timer by completing task: %s expected fire at time: %s", timerEventId, + Instant.ofEpochSecond(timerFiredEvent.getFireAt().getSeconds(), + timerFiredEvent.getFireAt().getNanos()))); + } + + CompletableTask task = record.getTask(); + task.complete(null); + } + + private void handleSubOrchestrationCreated(OrchestratorService.HistoryEvent e) { + int taskId = e.getEventId(); + OrchestratorService.SubOrchestrationInstanceCreatedEvent subOrchestrationInstanceCreated = + e.getSubOrchestrationInstanceCreated(); + OrchestratorService.OrchestratorAction taskAction = this.pendingActions.remove(taskId); + if (taskAction == null) { + String message = String.format( + "Non-deterministic orchestrator detected: a history event scheduling an sub-orchestration task " + + "with sequence ID %d and name '%s' was replayed but the current orchestrator implementation didn't " + + "actually schedule this task. Was a change made to the orchestrator code after this instance had " + + "already started running?", + taskId, + subOrchestrationInstanceCreated.getName()); + throw new NonDeterministicOrchestratorException(message); + } + } + + private void handleSubOrchestrationCompleted(OrchestratorService.HistoryEvent e) { + OrchestratorService.SubOrchestrationInstanceCompletedEvent subOrchestrationInstanceCompletedEvent = + e.getSubOrchestrationInstanceCompleted(); + int taskId = subOrchestrationInstanceCompletedEvent.getTaskScheduledId(); + TaskRecord record = this.openTasks.remove(taskId); + if (record == null) { + this.logger.warning("Discarding a potentially duplicate SubOrchestrationInstanceCompleted " + + "event with ID = " + taskId); + return; + } + String rawResult = subOrchestrationInstanceCompletedEvent.getResult().getValue(); + + if (!this.isReplaying) { + // TODO: Structured logging + // TODO: Would it make more sense to put this log in the activity executor? + this.logger.fine(() -> String.format( + "%s: Sub-orchestrator '%s' (#%d) completed with serialized output: %s", + this.instanceId, + record.getTaskName(), + taskId, + rawResult != null ? rawResult : "(null)")); + + } + CompletableTask task = record.getTask(); + try { + Object result = this.dataConverter.deserialize(rawResult, record.getDataType()); + task.complete(result); + } catch (Exception ex) { + task.completeExceptionally(ex); + } + } + + private void handleSubOrchestrationFailed(OrchestratorService.HistoryEvent e) { + OrchestratorService.SubOrchestrationInstanceFailedEvent subOrchestrationInstanceFailedEvent = + e.getSubOrchestrationInstanceFailed(); + int taskId = subOrchestrationInstanceFailedEvent.getTaskScheduledId(); + TaskRecord record = this.openTasks.remove(taskId); + if (record == null) { + // TODO: Log a warning about a potential duplicate task completion event + return; + } + + FailureDetails details = new FailureDetails(subOrchestrationInstanceFailedEvent.getFailureDetails()); + + if (!this.isReplaying) { + // TODO: Log task failure, including the number of bytes in the result + } + + CompletableTask task = record.getTask(); + TaskFailedException exception = new TaskFailedException( + record.taskName, + taskId, + details); + task.completeExceptionally(exception); + } + + private void handleExecutionTerminated(OrchestratorService.HistoryEvent e) { + OrchestratorService.ExecutionTerminatedEvent executionTerminatedEvent = e.getExecutionTerminated(); + this.completeInternal(executionTerminatedEvent.getInput().getValue(), null, + OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_TERMINATED); + } + + @Override + public void complete(Object output) { + if (this.continuedAsNew) { + this.completeInternal(this.continuedAsNewInput, + OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_CONTINUED_AS_NEW); + } else { + this.completeInternal(output, OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_COMPLETED); + } + } + + public void fail(FailureDetails failureDetails) { + // TODO: How does a parent orchestration use the output to construct an exception? + this.completeInternal(null, failureDetails, + OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_FAILED); + } + + private void completeInternal(Object output, OrchestratorService.OrchestrationStatus runtimeStatus) { + String resultAsJson = TaskOrchestrationExecutor.this.dataConverter.serialize(output); + this.completeInternal(resultAsJson, null, runtimeStatus); + } + + private void completeInternal( + @Nullable String rawOutput, + @Nullable FailureDetails failureDetails, + OrchestratorService.OrchestrationStatus runtimeStatus) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + + + OrchestratorService.CompleteOrchestrationAction.Builder builder = OrchestratorService.CompleteOrchestrationAction + .newBuilder(); + builder.setOrchestrationStatus(runtimeStatus); + + if (rawOutput != null) { + builder.setResult(StringValue.of(rawOutput)); + } + + if (failureDetails != null) { + builder.setFailureDetails(failureDetails.toProto()); + } + + if (this.continuedAsNew && this.preserveUnprocessedEvents) { + addCarryoverEvents(builder); + } + + if (!this.isReplaying) { + // TODO: Log completion, including the number of bytes in the output + } + + int id = this.sequenceNumber++; + OrchestratorService.OrchestratorAction action = OrchestratorService.OrchestratorAction.newBuilder() + .setId(id) + .setCompleteOrchestration(builder.build()) + .build(); + this.pendingActions.put(id, action); + this.isComplete = true; + } + + private void addCarryoverEvents(OrchestratorService.CompleteOrchestrationAction.Builder builder) { + // Add historyEvent in the unprocessedEvents buffer + // Add historyEvent in the new event list that haven't been added to the buffer. + // We don't check the event in the pass event list to avoid duplicated events. + Set externalEvents = new HashSet<>(this.unprocessedEvents); + List newEvents = this.historyEventPlayer.getNewEvents(); + int currentHistoryIndex = this.historyEventPlayer.getCurrentHistoryIndex(); + + // Only add events that haven't been processed to the carryOverEvents + // currentHistoryIndex will point to the first unprocessed event + for (int i = currentHistoryIndex; i < newEvents.size(); i++) { + OrchestratorService.HistoryEvent historyEvent = newEvents.get(i); + if (historyEvent.getEventTypeCase() == OrchestratorService.HistoryEvent.EventTypeCase.EVENTRAISED) { + externalEvents.add(historyEvent); + } + } + + externalEvents.forEach(builder::addCarryoverEvents); + } + + private boolean waitingForEvents() { + return this.outstandingEvents.size() > 0; + } + + private boolean processNextEvent() { + return this.historyEventPlayer.moveNext(); + } + + private void processEvent(OrchestratorService.HistoryEvent e) { + boolean overrideSuspension = e.getEventTypeCase() + == OrchestratorService.HistoryEvent.EventTypeCase.EXECUTIONRESUMED + || e.getEventTypeCase() == OrchestratorService.HistoryEvent.EventTypeCase.EXECUTIONTERMINATED; + if (this.isSuspended && !overrideSuspension) { + this.handleEventWhileSuspended(e); + } else { + this.logger.fine(() -> this.instanceId + ": Processing event: " + e.getEventTypeCase()); + switch (e.getEventTypeCase()) { + case ORCHESTRATORSTARTED: + Instant instant = DataConverter.getInstantFromTimestamp(e.getTimestamp()); + this.setCurrentInstant(instant); + this.logger.fine(() -> this.instanceId + ": Workflow orchestrator started"); + break; + case ORCHESTRATORCOMPLETED: + // No action needed + this.logger.fine(() -> this.instanceId + ": Workflow orchestrator completed"); + break; + case EXECUTIONSTARTED: + OrchestratorService.ExecutionStartedEvent executionStarted = e.getExecutionStarted(); + this.setName(executionStarted.getName()); + this.setInput(executionStarted.getInput().getValue()); + this.setInstanceId(executionStarted.getOrchestrationInstance().getInstanceId()); + this.logger.fine(() -> this.instanceId + ": Workflow execution started"); + this.setAppId(e.getRouter().getSourceAppID()); + + // Create and invoke the workflow orchestrator + TaskOrchestrationFactory factory = TaskOrchestrationExecutor.this.orchestrationFactories + .get(executionStarted.getName()); + if (factory == null) { + // Try getting the default orchestrator + factory = TaskOrchestrationExecutor.this.orchestrationFactories.get("*"); + } + // TODO: Throw if the factory is null (orchestration by that name doesn't exist) + if (factory == null) { + throw new IllegalStateException("No factory found for orchestrator: " + executionStarted.getName()); + } + + TaskOrchestration orchestrator = factory.create(); + orchestrator.run(this); + break; + case EXECUTIONCOMPLETED: + this.logger.fine(() -> this.instanceId + ": Workflow execution completed"); + break; + case EXECUTIONTERMINATED: + this.handleExecutionTerminated(e); + break; + case TASKSCHEDULED: + this.handleTaskScheduled(e); + break; + case TASKCOMPLETED: + this.handleTaskCompleted(e); + break; + case TASKFAILED: + this.handleTaskFailed(e); + break; + case TIMERCREATED: + this.handleTimerCreated(e); + break; + case TIMERFIRED: + this.handleTimerFired(e); + break; + case SUBORCHESTRATIONINSTANCECREATED: + this.handleSubOrchestrationCreated(e); + break; + case SUBORCHESTRATIONINSTANCECOMPLETED: + this.handleSubOrchestrationCompleted(e); + break; + case SUBORCHESTRATIONINSTANCEFAILED: + this.handleSubOrchestrationFailed(e); + break; + case EVENTRAISED: + this.handleEventRaised(e); + break; + case EXECUTIONSUSPENDED: + this.handleExecutionSuspended(e); + break; + case EXECUTIONRESUMED: + this.handleExecutionResumed(e); + break; + default: + throw new IllegalStateException("Don't know how to handle history type " + e.getEventTypeCase()); + } + } + } + + private class TaskRecord { + private final CompletableTask task; + private final String taskName; + private final Class dataType; + + public TaskRecord(CompletableTask task, String taskName, Class dataType) { + this.task = task; + this.taskName = taskName; + this.dataType = dataType; + } + + public CompletableTask getTask() { + return this.task; + } + + public String getTaskName() { + return this.taskName; + } + + public Class getDataType() { + return this.dataType; + } + } + + private class OrchestrationHistoryIterator { + private final List pastEvents; + private final List newEvents; + + private List currentHistoryList; + private int currentHistoryIndex; + + public OrchestrationHistoryIterator(List pastEvents, + List newEvents) { + this.pastEvents = pastEvents; + this.newEvents = newEvents; + this.currentHistoryList = pastEvents; + } + + public boolean moveNext() { + if (this.currentHistoryList == pastEvents && this.currentHistoryIndex >= pastEvents.size()) { + // Move forward to the next list + this.currentHistoryList = this.newEvents; + this.currentHistoryIndex = 0; + + ContextImplTask.this.setDoneReplaying(); + } + + if (this.currentHistoryList == this.newEvents && this.currentHistoryIndex >= this.newEvents.size()) { + // We're done enumerating the history + return false; + } + + // Process the next event in the history + OrchestratorService.HistoryEvent next = this.currentHistoryList.get(this.currentHistoryIndex++); + ContextImplTask.this.processEvent(next); + return true; + } + + List getNewEvents() { + return this.newEvents; + } + + int getCurrentHistoryIndex() { + return this.currentHistoryIndex; + } + } + + private class TimerTask extends CompletableTask { + private Instant finalFireAt; + CompletableTask task; + + public TimerTask(Instant finalFireAt) { + super(); + CompletableTask firstTimer = createTimerTask(finalFireAt); + CompletableFuture timerChain = createTimerChain(finalFireAt, firstTimer.future); + this.task = new CompletableTask<>(timerChain); + this.finalFireAt = finalFireAt; + } + + // For a short timer (less than maximumTimerInterval), once the currentFuture completes, + // we must have reached finalFireAt, so we return and no more sub-timers are created. For a long timer + // (more than maximumTimerInterval), once a given currentFuture completes, we check if we have not yet + // reached finalFireAt. If that is the case, we create a new sub-timer task and make a recursive call on + // that new sub-timer task so that once it completes, another sub-timer task is created + // if necessary. Otherwise, we return and no more sub-timers are created. + private CompletableFuture createTimerChain(Instant finalFireAt, CompletableFuture currentFuture) { + return currentFuture.thenRun(() -> { + Instant currentInstsanceMinusNanos = currentInstant.minusNanos(currentInstant.getNano()); + Instant finalFireAtMinusNanos = finalFireAt.minusNanos(finalFireAt.getNano()); + if (currentInstsanceMinusNanos.compareTo(finalFireAtMinusNanos) >= 0) { + return; + } + Task nextTimer = createTimerTask(finalFireAt); + createTimerChain(finalFireAt, nextTimer.future); + }); + } + + private CompletableTask createTimerTask(Instant finalFireAt) { + CompletableTask nextTimer; + Duration remainingTime = Duration.between(currentInstant, finalFireAt); + if (remainingTime.compareTo(maximumTimerInterval) > 0) { + Instant nextFireAt = currentInstant.plus(maximumTimerInterval); + nextTimer = createInstantTimer(sequenceNumber++, nextFireAt); + } else { + nextTimer = createInstantTimer(sequenceNumber++, finalFireAt); + } + nextTimer.setParentTask(this); + return nextTimer; + } + + private void handleSubTimerSuccess() { + // check if it is the last timer + Instant currentInstantMinusNanos = currentInstant.minusNanos(currentInstant.getNano()); + Instant finalFireAtMinusNanos = finalFireAt.minusNanos(finalFireAt.getNano()); + if (currentInstantMinusNanos.compareTo(finalFireAtMinusNanos) >= 0) { + this.complete(null); + } + } + + @Override + public Void await() { + return this.task.await(); + } + + } + + private class ExternalEventTask extends CompletableTask { + private final String eventName; + private final Duration timeout; + private final int taskId; + + public ExternalEventTask(String eventName, int taskId, Duration timeout) { + this.eventName = eventName; + this.taskId = taskId; + this.timeout = timeout; + } + + // TODO: Shouldn't this be throws TaskCanceledException? + @Override + protected void handleException(Throwable e) { + // Cancellation is caused by user-specified timeouts + if (e instanceof CancellationException) { + String message = String.format( + "Timeout of %s expired while waiting for an event named '%s' (ID = %d).", + this.timeout, + this.eventName, + this.taskId); + throw new TaskCanceledException(message, this.eventName, this.taskId); + } + + super.handleException(e); + } + } + + // Task implementation that implements a retry policy + private class RetriableTask extends CompletableTask { + private final RetryPolicy policy; + private final RetryHandler handler; + private final TaskOrchestrationContext context; + private final Instant firstAttempt; + private final TaskFactory taskFactory; + + private FailureDetails lastFailure; + private Duration totalRetryTime; + private Instant startTime; + private int attemptNumber; + private Task childTask; + + public RetriableTask(TaskOrchestrationContext context, TaskFactory taskFactory, RetryPolicy policy) { + this(context, taskFactory, policy, null); + } + + public RetriableTask(TaskOrchestrationContext context, TaskFactory taskFactory, RetryHandler handler) { + this(context, taskFactory, null, handler); + } + + private RetriableTask( + TaskOrchestrationContext context, + TaskFactory taskFactory, + @Nullable RetryPolicy retryPolicy, + @Nullable RetryHandler retryHandler) { + this.context = context; + this.taskFactory = taskFactory; + this.policy = retryPolicy; + this.handler = retryHandler; + this.firstAttempt = context.getCurrentInstant(); + this.totalRetryTime = Duration.ZERO; + this.createChildTask(taskFactory); + } + + // Every RetriableTask will have a CompletableTask as a child task. + private void createChildTask(TaskFactory taskFactory) { + CompletableTask childTask = (CompletableTask) taskFactory.create(); + this.setChildTask(childTask); + childTask.setParentTask(this); + } + + public void setChildTask(Task childTask) { + this.childTask = childTask; + } + + public Task getChildTask() { + return this.childTask; + } + + void handleChildSuccess(V result) { + this.complete(result); + } + + void handleChildException(Throwable ex) { + tryRetry((TaskFailedException) ex); + } + + void init() { + this.startTime = this.startTime == null ? this.context.getCurrentInstant() : this.startTime; + this.attemptNumber++; + } + + public void tryRetry(TaskFailedException ex) { + this.lastFailure = ex.getErrorDetails(); + if (!this.shouldRetry()) { + this.completeExceptionally(ex); + return; + } + + // Overflow/runaway retry protection + if (this.attemptNumber == Integer.MAX_VALUE) { + this.completeExceptionally(ex); + return; + } + + Duration delay = this.getNextDelay(); + if (!delay.isZero() && !delay.isNegative()) { + // Use a durable timer to create the delay between retries + this.context.createTimer(delay).await(); + } + + this.totalRetryTime = Duration.between(this.startTime, this.context.getCurrentInstant()); + this.createChildTask(this.taskFactory); + this.await(); + } + + @Override + public V await() { + this.init(); + // when awaiting the first child task, we will continue iterating over the history until a result is found + // for that task. If the result is an exception, the child task will invoke "handleChildException" on this + // object, which awaits a timer, *re-sets the current child task to correspond to a retry of this task*, + // and then awaits that child. + // This logic continues until either the operation succeeds, or are our retry quota is met. + // At that point, we break the `await()` on the child task. + // Therefore, once we return from the following `await`, + // we just need to await again on the *current* child task to obtain the result of this task + try { + this.getChildTask().await(); + } catch (OrchestratorBlockedException ex) { + throw ex; + } catch (Exception ignored) { + // ignore the exception from previous child tasks. + // Only needs to return result from the last child task, which is on next line. + } + // Always return the last child task result. + return this.getChildTask().await(); + } + + private boolean shouldRetry() { + if (this.lastFailure.isNonRetriable()) { + logger.warning("Not performing any retries because the error is non retriable"); + + return false; + } + + if (this.policy == null && this.handler == null) { + // We should never get here, but if we do, returning false is the natural behavior. + return false; + } + + RetryContext retryContext = new RetryContext( + this.context, + this.attemptNumber, + this.lastFailure, + this.totalRetryTime); + + // These must default to true if not provided, so it is possible to use only one of them at a time + boolean shouldRetryBasedOnPolicy = this.policy != null ? this.shouldRetryBasedOnPolicy() : true; + boolean shouldRetryBasedOnHandler = this.handler != null ? this.handler.handle(retryContext) : true; + + // Only log when not replaying, so only the current attempt is logged and not all previous attempts. + if (!this.context.getIsReplaying()) { + if (this.policy != null) { + logger.fine(() -> String.format("shouldRetryBasedOnPolicy: %s", shouldRetryBasedOnPolicy)); + } + + if (this.handler != null) { + logger.fine(() -> String.format("shouldRetryBasedOnHandler: %s", shouldRetryBasedOnHandler)); + } + } + + return shouldRetryBasedOnPolicy && shouldRetryBasedOnHandler; + } + + private boolean shouldRetryBasedOnPolicy() { + // Only log when not replaying, so only the current attempt is logged and not all previous attempts. + if (!this.context.getIsReplaying()) { + logger.fine(() -> String.format("Retry Policy: %d retries out of total %d performed ", this.attemptNumber, + this.policy.getMaxNumberOfAttempts())); + } + + if (this.attemptNumber >= this.policy.getMaxNumberOfAttempts()) { + // Max number of attempts exceeded + return false; + } + + // Duration.ZERO is interpreted as no maximum timeout + Duration retryTimeout = this.policy.getRetryTimeout(); + if (retryTimeout.compareTo(Duration.ZERO) > 0) { + Instant retryExpiration = this.firstAttempt.plus(retryTimeout); + if (this.context.getCurrentInstant().compareTo(retryExpiration) >= 0) { + // Max retry timeout exceeded + return false; + } + } + + // Keep retrying + return true; + } + + private Duration getNextDelay() { + if (this.policy != null) { + long maxDelayInMillis = this.policy.getMaxRetryInterval().toMillis(); + + long nextDelayInMillis; + try { + nextDelayInMillis = Math.multiplyExact( + this.policy.getFirstRetryInterval().toMillis(), + (long) Helpers.powExact(this.policy.getBackoffCoefficient(), this.attemptNumber)); + } catch (ArithmeticException overflowException) { + if (maxDelayInMillis > 0) { + return this.policy.getMaxRetryInterval(); + } else { + // If no maximum is specified, just throw + throw new ArithmeticException("The retry policy calculation resulted in an arithmetic " + + "overflow and no max retry interval was configured."); + } + } + + // NOTE: A max delay of zero or less is interpreted to mean no max delay + if (nextDelayInMillis > maxDelayInMillis && maxDelayInMillis > 0) { + return this.policy.getMaxRetryInterval(); + } else { + return Duration.ofMillis(nextDelayInMillis); + } + } + + // If there's no declarative retry policy defined, then the custom code retry handler + // is responsible for implementing any delays between retry attempts. + return Duration.ZERO; + } + } + + private class CompoundTask extends CompletableTask { + + List> subTasks; + + CompoundTask(List> subtasks, CompletableFuture future) { + super(future); + this.subTasks = subtasks; + } + + @Override + public U await() { + this.initSubTasks(); + return super.await(); + } + + private void initSubTasks() { + for (Task subTask : this.subTasks) { + if (subTask instanceof RetriableTask) { + ((RetriableTask) subTask).init(); + } + } + } + } + + private class CompletableTask extends Task { + private Task parentTask; + + public CompletableTask() { + this(new CompletableFuture<>()); + } + + CompletableTask(CompletableFuture future) { + super(future); + } + + public void setParentTask(Task parentTask) { + this.parentTask = parentTask; + } + + public Task getParentTask() { + return this.parentTask; + } + + @Override + public V await() { + do { + // If the future is done, return its value right away + if (this.future.isDone()) { + try { + return this.future.get(); + } catch (ExecutionException e) { + // rethrow if it's ContinueAsNewInterruption + if (e.getCause() instanceof ContinueAsNewInterruption) { + throw (ContinueAsNewInterruption) e.getCause(); + } + this.handleException(e.getCause()); + } catch (Exception e) { + this.handleException(e); + } + } + } while (processNextEvent()); + + // There's no more history left to replay and the current task is still not completed. This is normal. + // The OrchestratorBlockedException exception allows us to yield the current thread back to the executor so + // that we can send the current set of actions back to the worker and wait for new events to come in. + // This is *not* an exception - it's a normal part of orchestrator control flow. + throw new OrchestratorBlockedException( + "The orchestrator is blocked and waiting for new inputs. " + + "This Throwable should never be caught by user code."); + } + + private boolean processNextEvent() { + try { + return ContextImplTask.this.processNextEvent(); + } catch (OrchestratorBlockedException | ContinueAsNewInterruption exception) { + throw exception; + } catch (Exception e) { + // ignore + // + // We ignore the exception. Any Durable Task exceptions thrown here can be obtained when calling + //{code#future.get()} in the implementation of 'await'. We defer to that loop to handle the exception. + // + } + // Any exception happen we return true so that we will enter to the do-while block for the last time. + return true; + } + + @Override + public CompletableTask thenApply(Function fn) { + CompletableFuture newFuture = this.future.thenApply(fn); + return new CompletableTask<>(newFuture); + } + + @Override + public Task thenAccept(Consumer fn) { + CompletableFuture newFuture = this.future.thenAccept(fn); + return new CompletableTask<>(newFuture); + } + + protected void handleException(Throwable e) { + if (e instanceof TaskFailedException) { + throw (TaskFailedException) e; + } + + if (e instanceof CompositeTaskFailedException) { + throw (CompositeTaskFailedException) e; + } + + if (e instanceof DataConverter.DataConverterException) { + throw (DataConverter.DataConverterException) e; + } + + throw new RuntimeException("Unexpected failure in the task execution", e); + } + + @Override + public boolean isDone() { + return this.future.isDone(); + } + + public boolean complete(V value) { + Task parentTask = this.getParentTask(); + boolean result = this.future.complete(value); + if (parentTask instanceof RetriableTask) { + // notify parent task + ((RetriableTask) parentTask).handleChildSuccess(value); + } + if (parentTask instanceof TimerTask) { + // notify parent task + ((TimerTask) parentTask).handleSubTimerSuccess(); + } + return result; + } + + private boolean cancel() { + return this.future.cancel(true); + } + + public boolean completeExceptionally(Throwable ex) { + Task parentTask = this.getParentTask(); + boolean result = this.future.completeExceptionally(ex); + if (parentTask instanceof RetriableTask) { + // notify parent task + ((RetriableTask) parentTask).handleChildException(ex); + } + return result; + } + } + } + + @FunctionalInterface + private interface TaskFactory { + Task create(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationFactory.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationFactory.java new file mode 100644 index 000000000..274813b69 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Factory interface for producing {@link TaskOrchestration} implementations. + */ +public interface TaskOrchestrationFactory { + /** + * Gets the name of the orchestration this factory creates. + * + * @return the name of the orchestration + */ + String getName(); + + /** + * Creates a new instance of {@link TaskOrchestration}. + * + * @return the created orchestration instance + */ + TaskOrchestration create(); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestratorResult.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestratorResult.java new file mode 100644 index 000000000..705a41d5c --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestratorResult.java @@ -0,0 +1,40 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; + +import java.util.Collection; +import java.util.Collections; + +final class TaskOrchestratorResult { + + private final Collection actions; + + private final String customStatus; + + public TaskOrchestratorResult(Collection actions, String customStatus) { + this.actions = Collections.unmodifiableCollection(actions); + ; + this.customStatus = customStatus; + } + + public Collection getActions() { + return this.actions; + } + + public String getCustomStatus() { + return this.customStatus; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/interruption/ContinueAsNewInterruption.java b/durabletask-client/src/main/java/io/dapr/durabletask/interruption/ContinueAsNewInterruption.java new file mode 100644 index 000000000..e95c51157 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/interruption/ContinueAsNewInterruption.java @@ -0,0 +1,32 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask.interruption; + +import io.dapr.durabletask.TaskOrchestrationContext; + +/** + * Control flow {@code Throwable} class for orchestrator when invoke {@link TaskOrchestrationContext#continueAsNew}. + * This {@code Throwable} must never be caught by user + * code. + * + *

{@code ContinueAsNewInterruption} is thrown when an orchestrator calls + * {@link TaskOrchestrationContext#continueAsNew}. + * Catching {@code ContinueAsNewInterruption} in user code could prevent the orchestration from saving + * state and scheduling new tasks, resulting in the orchestration getting stuck.

+ */ +public class ContinueAsNewInterruption extends RuntimeException { + public ContinueAsNewInterruption(String message) { + super(message); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/interruption/OrchestratorBlockedException.java b/durabletask-client/src/main/java/io/dapr/durabletask/interruption/OrchestratorBlockedException.java new file mode 100644 index 000000000..7eff5248f --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/interruption/OrchestratorBlockedException.java @@ -0,0 +1,31 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask.interruption; + +import io.dapr.durabletask.Task; + +/** + * Control flow {@code Throwable} class for orchestrator functions. This {@code Throwable} must never be caught by user + * code. + * + *

{@code OrchestratorBlockedException} is thrown when an orchestrator calls {@link Task#await} on an uncompleted + * task. The purpose of throwing in this way is to halt execution of the orchestrator to save the current state and + * commit any side effects. Catching {@code OrchestratorBlockedException} in user code could prevent the orchestration + * from saving state and scheduling new tasks, resulting in the orchestration getting stuck.

+ */ +public final class OrchestratorBlockedException extends RuntimeException { + public OrchestratorBlockedException(String message) { + super(message); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/util/UuidGenerator.java b/durabletask-client/src/main/java/io/dapr/durabletask/util/UuidGenerator.java new file mode 100644 index 000000000..a55ed5fb1 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/util/UuidGenerator.java @@ -0,0 +1,63 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask.util; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.UUID; + +/** + * Utility class for generating UUIDs. + * + */ +public class UuidGenerator { + + /** + * Generates a UUID. + * @param version for the UUID generation + * @param algorithm to be used + * @param namespace for the UUID generation + * @param name for the UUID generation + * @return the generated UUID + */ + public static UUID generate(int version, String algorithm, UUID namespace, String name) { + + MessageDigest hasher = hasher(algorithm); + + if (namespace != null) { + ByteBuffer ns = ByteBuffer.allocate(16); + ns.putLong(namespace.getMostSignificantBits()); + ns.putLong(namespace.getLeastSignificantBits()); + hasher.update(ns.array()); + } + + hasher.update(name.getBytes(StandardCharsets.UTF_8)); + ByteBuffer hash = ByteBuffer.wrap(hasher.digest()); + + final long msb = (hash.getLong() & 0xffffffffffff0fffL) | (version & 0x0f) << 12; + final long lsb = (hash.getLong() & 0x3fffffffffffffffL) | 0x8000000000000000L; + + return new UUID(msb, lsb); + } + + private static MessageDigest hasher(String algorithm) { + try { + return MessageDigest.getInstance(algorithm); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(String.format("%s not supported.", algorithm)); + } + } +} diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskClientIT.java b/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskClientIT.java new file mode 100644 index 000000000..85c7de0e4 --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskClientIT.java @@ -0,0 +1,1785 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ +package io.dapr.durabletask; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +/** + * These integration tests are designed to exercise the core, high-level features of + * the Durable Task programming model. + *

+ * These tests currently require a sidecar process to be + * running on the local machine (the sidecar is what accepts the client operations and + * sends invocation instructions to the DurableTaskWorker). + */ +@Tag("integration") +public class DurableTaskClientIT extends IntegrationTestBase { + static final Duration defaultTimeout = Duration.ofSeconds(100); + // All tests that create a server should save it to this variable for proper shutdown + private DurableTaskGrpcWorker server; + + + @Test + void emptyOrchestration() throws TimeoutException { + final String orchestratorName = "EmptyOrchestration"; + final String input = "Hello " + Instant.now(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.complete(ctx.getInput(String.class))) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, input); + OrchestrationMetadata instance = client.waitForInstanceCompletion( + instanceId, + defaultTimeout, + true); + + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(input, instance.readInputAs(String.class)); + assertEquals(input, instance.readOutputAs(String.class)); + } + } + + @Test + void singleTimer() throws IOException, TimeoutException { + final String orchestratorName = "SingleTimer"; + final Duration delay = Duration.ofSeconds(3); + AtomicReferenceArray timestamps = new AtomicReferenceArray<>(2); + AtomicInteger counter = new AtomicInteger(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + timestamps.set(counter.get(), LocalDateTime.now()); + counter.incrementAndGet(); + ctx.createTimer(delay).await(); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = instance.getCreatedAt().plus(delay).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + // This should yield 2 (first invocation + replay invocations for internal timers) + assertEquals(2, counter.get()); + + // Verify that each timer is the expected length + int[] secondsElapsed = new int[1]; + for (int i = 0; i < timestamps.length() - 1; i++) { + secondsElapsed[i] = timestamps.get(i + 1).getSecond() - timestamps.get(i).getSecond(); + } + assertEquals(3, secondsElapsed[0]); + + } + } + + + @Test + void loopWithTimer() throws IOException, TimeoutException { + final String orchestratorName = "LoopWithTimer"; + final Duration delay = Duration.ofSeconds(2); + AtomicReferenceArray timestamps = new AtomicReferenceArray<>(4); + AtomicInteger counter = new AtomicInteger(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + for (int i = 0; i < 3; i++) { + if (!ctx.getIsReplaying()) { + timestamps.set(counter.get(), LocalDateTime.now()); + counter.incrementAndGet(); + } + ctx.createTimer(delay).await(); + } + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = instance.getCreatedAt().plus(delay).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + assertEquals(3, counter.get()); + + // Verify that each timer is the expected length + int[] secondsElapsed = new int[timestamps.length()]; + for (int i = 0; i < timestamps.length() - 1; i++) { + if (timestamps.get(i + 1) != null && timestamps.get(i) != null) { + secondsElapsed[i] = timestamps.get(i + 1).getSecond() - timestamps.get(i).getSecond(); + } else { + secondsElapsed[i] = -1; + } + } + assertEquals(2, secondsElapsed[0]); + assertEquals(2, secondsElapsed[1]); + assertEquals(-1, secondsElapsed[2]); + + + } + } + + @Test + void loopWithWaitForEvent() throws IOException, TimeoutException { + final String orchestratorName = "LoopWithTimer"; + final Duration delay = Duration.ofSeconds(2); + AtomicReferenceArray timestamps = new AtomicReferenceArray<>(4); + AtomicInteger counter = new AtomicInteger(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + for (int i = 0; i < 4; i++) { + try { + ctx.waitForExternalEvent("HELLO", delay).await(); + } catch (TaskCanceledException tce) { + if (!ctx.getIsReplaying()) { + timestamps.set(counter.get(), LocalDateTime.now()); + counter.incrementAndGet(); + } + + } + } + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = instance.getCreatedAt().plus(delay).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + assertEquals(4, counter.get()); + + // Verify that each timer is the expected length + int[] secondsElapsed = new int[timestamps.length()]; + for (int i = 0; i < timestamps.length() - 1; i++) { + if (timestamps.get(i + 1) != null && timestamps.get(i) != null) { + secondsElapsed[i] = timestamps.get(i + 1).getSecond() - timestamps.get(i).getSecond(); + } else { + secondsElapsed[i] = -1; + } + } + assertEquals(2, secondsElapsed[0]); + assertEquals(2, secondsElapsed[1]); + assertEquals(2, secondsElapsed[2]); + assertEquals(0, secondsElapsed[3]); + + + } + } + + @Test + void longTimer() throws TimeoutException { + final String orchestratorName = "LongTimer"; + final Duration delay = Duration.ofSeconds(7); + AtomicInteger counter = new AtomicInteger(); + AtomicReferenceArray timestamps = new AtomicReferenceArray<>(4); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + timestamps.set(counter.get(), LocalDateTime.now()); + counter.incrementAndGet(); + ctx.createTimer(delay).await(); + }) + .setMaximumTimerInterval(Duration.ofSeconds(3)) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus(), + String.format("Orchestration failed with error: %s", instance.getFailureDetails().getErrorMessage())); + + // Verify that the delay actually happened + long expectedCompletionSecond = instance.getCreatedAt().plus(delay).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + // This should yield 4 (first invocation + replay invocations for internal timers 3s + 3s + 1s) + assertEquals(4, counter.get()); + + // Verify that each timer is the expected length + int[] secondsElapsed = new int[3]; + for (int i = 0; i < timestamps.length() - 1; i++) { + secondsElapsed[i] = timestamps.get(i + 1).getSecond() - timestamps.get(i).getSecond(); + } + assertEquals(secondsElapsed[0], 3); + assertEquals(secondsElapsed[1], 3); + assertEquals(secondsElapsed[2], 1); + } + } + + @Test + void longTimerNonblocking() throws TimeoutException { + final String orchestratorName = "ActivityAnyOf"; + final String externalEventActivityName = "externalEvent"; + final String externalEventWinner = "The external event completed first"; + final String timerEventWinner = "The timer event completed first"; + final Duration timerDuration = Duration.ofSeconds(20); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + Task externalEvent = ctx.waitForExternalEvent(externalEventActivityName, String.class); + Task longTimer = ctx.createTimer(timerDuration); + Task winnerEvent = ctx.anyOf(externalEvent, longTimer).await(); + if (winnerEvent == externalEvent) { + ctx.complete(externalEventWinner); + } else { + ctx.complete(timerEventWinner); + } + }).setMaximumTimerInterval(Duration.ofSeconds(3)).buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + client.raiseEvent(instanceId, externalEventActivityName, "Hello world"); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + assertNotNull(output); + assertTrue(output.equals(externalEventWinner)); + + long createdTime = instance.getCreatedAt().getEpochSecond(); + long completedTime = instance.getLastUpdatedAt().getEpochSecond(); + // Timer did not block execution + assertTrue(completedTime - createdTime < 5); + } + } + + @Test + void longTimerNonblockingNoExternal() throws TimeoutException { + final String orchestratorName = "ActivityAnyOf"; + final String externalEventActivityName = "externalEvent"; + final String externalEventWinner = "The external event completed first"; + final String timerEventWinner = "The timer event completed first"; + final Duration timerDuration = Duration.ofSeconds(20); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + Task externalEvent = ctx.waitForExternalEvent(externalEventActivityName, String.class); + Task longTimer = ctx.createTimer(timerDuration); + Task winnerEvent = ctx.anyOf(externalEvent, longTimer).await(); + if (winnerEvent == externalEvent) { + ctx.complete(externalEventWinner); + } else { + ctx.complete(timerEventWinner); + } + }).setMaximumTimerInterval(Duration.ofSeconds(3)).buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + assertNotNull(output); + assertTrue(output.equals(timerEventWinner)); + + long expectedCompletionSecond = instance.getCreatedAt().plus(timerDuration).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + } + } + + + @Test + void longTimeStampTimer() throws TimeoutException { + final String orchestratorName = "LongTimeStampTimer"; + final Duration delay = Duration.ofSeconds(7); + final ZonedDateTime zonedDateTime = ZonedDateTime.of(LocalDateTime.now().plusSeconds(delay.getSeconds()), ZoneId.systemDefault()); + + AtomicInteger counter = new AtomicInteger(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + counter.incrementAndGet(); + ctx.createTimer(zonedDateTime).await(); + }) + .setMaximumTimerInterval(Duration.ofSeconds(3)) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = zonedDateTime.toInstant().getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + // This should yield 4 (first invocation + replay invocations for internal timers 3s + 3s + 2s) + // The timer can be created at 7s or 8s as clock is not precise, so we need to allow for that + assertTrue(counter.get() >= 4 && counter.get() <= 5); + } + } + + @Test + void singleTimeStampTimer() throws IOException, TimeoutException { + final String orchestratorName = "SingleTimeStampTimer"; + final Duration delay = Duration.ofSeconds(3); + final ZonedDateTime zonedDateTime = ZonedDateTime.of(LocalDateTime.now().plusSeconds(delay.getSeconds()), ZoneId.systemDefault()); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(zonedDateTime).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = zonedDateTime.toInstant().getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + } + } + + + @Test + void singleTimeStampCreateTimer() throws IOException, TimeoutException { + final String orchestratorName = "SingleTimeStampTimer"; + final Duration delay = Duration.ofSeconds(3); + final ZonedDateTime zonedDateTime = ZonedDateTime.of(LocalDateTime.now().plusSeconds(delay.getSeconds()), ZoneId.systemDefault()); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(zonedDateTime).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = zonedDateTime.toInstant().getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + } + } + + @Test + void isReplaying() throws IOException, InterruptedException, TimeoutException { + final String orchestratorName = "SingleTimer"; + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + ArrayList list = new ArrayList(); + list.add(ctx.getIsReplaying()); + ctx.createTimer(Duration.ofSeconds(0)).await(); + list.add(ctx.getIsReplaying()); + ctx.createTimer(Duration.ofSeconds(0)).await(); + list.add(ctx.getIsReplaying()); + ctx.complete(list); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion( + instanceId, + defaultTimeout, + true); + + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the orchestrator reported the correct isReplaying values. + // Note that only the values of the *final* replay are returned. + List results = instance.readOutputAs(List.class); + assertEquals(3, results.size()); + assertTrue((Boolean) results.get(0)); + assertTrue((Boolean) results.get(1)); + assertFalse((Boolean) results.get(2)); + } + } + + @Test + void singleActivity() throws IOException, InterruptedException, TimeoutException { + final String orchestratorName = "SingleActivity"; + final String activityName = "Echo"; + final String input = Instant.now().toString(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + String activityInput = ctx.getInput(String.class); + String output = ctx.callActivity(activityName, activityInput, String.class).await(); + ctx.complete(output); + }) + .addActivity(activityName, ctx -> { + return String.format("Hello, %s!", ctx.getInput(String.class)); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, input); + OrchestrationMetadata instance = client.waitForInstanceCompletion( + instanceId, + defaultTimeout, + true); + + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + String output = instance.readOutputAs(String.class); + String expected = String.format("Hello, %s!", input); + assertEquals(expected, output); + } + } + + @Test + void currentDateTimeUtc() throws IOException, TimeoutException { + final String orchestratorName = "CurrentDateTimeUtc"; + final String echoActivityName = "Echo"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + Instant currentInstant1 = ctx.getCurrentInstant(); + Instant originalInstant1 = ctx.callActivity(echoActivityName, currentInstant1, Instant.class).await(); + if (!currentInstant1.equals(originalInstant1)) { + ctx.complete(false); + return; + } + + Instant currentInstant2 = ctx.getCurrentInstant(); + Instant originalInstant2 = ctx.callActivity(echoActivityName, currentInstant2, Instant.class).await(); + if (!currentInstant2.equals(originalInstant2)) { + ctx.complete(false); + return; + } + + ctx.complete(!currentInstant1.equals(currentInstant2)); + }) + .addActivity(echoActivityName, ctx -> { + // Return the input back to the caller, regardless of its type + return ctx.getInput(Object.class); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertTrue(instance.readOutputAs(boolean.class)); + } + } + + @Test + void activityChain() throws IOException, TimeoutException { + final String orchestratorName = "ActivityChain"; + final String plusOneActivityName = "PlusOne"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + for (int i = 0; i < 10; i++) { + value = ctx.callActivity(plusOneActivityName, i, int.class).await(); + } + + ctx.complete(value); + }) + .addActivity(plusOneActivityName, ctx -> ctx.getInput(int.class) + 1) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(10, instance.readOutputAs(int.class)); + } + } + + @Test + void subOrchestration() throws TimeoutException { + final String orchestratorName = "SubOrchestration"; + DurableTaskGrpcWorker worker = this.createWorkerBuilder().addOrchestrator(orchestratorName, ctx -> { + int result = 5; + int input = ctx.getInput(int.class); + if (input < 3) { + result += ctx.callSubOrchestrator(orchestratorName, input + 1, int.class).await(); + } + ctx.complete(result); + }).buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 1); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(15, instance.readOutputAs(int.class)); + } + } + + @Test + void continueAsNew() throws TimeoutException { + final String orchestratorName = "continueAsNew"; + final Duration delay = Duration.ofSeconds(0); + DurableTaskGrpcWorker worker = this.createWorkerBuilder().addOrchestrator(orchestratorName, ctx -> { + int input = ctx.getInput(int.class); + if (input < 10) { + ctx.createTimer(delay).await(); + ctx.continueAsNew(input + 1); + } else { + ctx.complete(input); + } + }).buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 1); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(10, instance.readOutputAs(int.class)); + } + } + + @Test + void continueAsNewWithExternalEvents() throws TimeoutException, InterruptedException { + final String orchestratorName = "continueAsNewWithExternalEvents"; + final String eventName = "MyEvent"; + final int expectedEventCount = 10; + final Duration delay = Duration.ofSeconds(0); + DurableTaskGrpcWorker worker = this.createWorkerBuilder().addOrchestrator(orchestratorName, ctx -> { + int receivedEventCount = ctx.getInput(int.class); + + if (receivedEventCount < expectedEventCount) { + ctx.waitForExternalEvent(eventName, int.class).await(); + ctx.continueAsNew(receivedEventCount + 1, true); + } else { + ctx.complete(receivedEventCount); + } + }).buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + + for (int i = 0; i < expectedEventCount; i++) { + client.raiseEvent(instanceId, eventName, i); + } + + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(expectedEventCount, instance.readOutputAs(int.class)); + } + } + + @Test + void termination() throws TimeoutException { + final String orchestratorName = "Termination"; + final Duration delay = Duration.ofSeconds(3); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(delay).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + String expectOutput = "I'll be back."; + client.terminate(instanceId, expectOutput); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(instanceId, instance.getInstanceId()); + assertEquals(OrchestrationRuntimeStatus.TERMINATED, instance.getRuntimeStatus()); + assertEquals(expectOutput, instance.readOutputAs(String.class)); + } + } + + + @ParameterizedTest + @ValueSource(booleans = {true}) + void restartOrchestrationWithNewInstanceId(boolean restartWithNewInstanceId) throws TimeoutException { + final String orchestratorName = "restart"; + final Duration delay = Duration.ofSeconds(3); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(delay).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, "RestartTest"); + client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + String newInstanceId = client.restartInstance(instanceId, restartWithNewInstanceId); + OrchestrationMetadata instance = client.waitForInstanceCompletion(newInstanceId, defaultTimeout, true); + + if (restartWithNewInstanceId) { + assertNotEquals(instanceId, newInstanceId); + } else { + assertEquals(instanceId, newInstanceId); + } + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals("\"RestartTest\"", instance.getSerializedInput()); + } + } + + @Test + void restartOrchestrationThrowsException() { + final String orchestratorName = "restart"; + final Duration delay = Duration.ofSeconds(3); + final String nonExistentId = "123"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(delay).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + client.scheduleNewOrchestrationInstance(orchestratorName, "RestartTest"); + + assertThrows( + IllegalArgumentException.class, + () -> client.restartInstance(nonExistentId, true) + ); + } + + } + + @Test + @Disabled("Test is disabled for investigation, fixing the test retry pattern exposed the failure") + void suspendResumeOrchestration() throws TimeoutException, InterruptedException { + final String orchestratorName = "suspend"; + final String eventName = "MyEvent"; + final String eventPayload = "testPayload"; + final Duration suspendTimeout = Duration.ofSeconds(5); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + String payload = ctx.waitForExternalEvent(eventName, String.class).await(); + ctx.complete(payload); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + client.suspendInstance(instanceId); + OrchestrationMetadata instance = client.waitForInstanceStart(instanceId, defaultTimeout); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.SUSPENDED, instance.getRuntimeStatus()); + + client.raiseEvent(instanceId, eventName, eventPayload); + + assertThrows( + TimeoutException.class, + () -> client.waitForInstanceCompletion(instanceId, suspendTimeout, false), + "Expected to throw TimeoutException, but it didn't" + ); + + String resumeReason = "Resume for testing."; + client.resumeInstance(instanceId, resumeReason); + instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(instanceId, instance.getInstanceId()); + assertEquals(eventPayload, instance.readOutputAs(String.class)); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + } + } + + @Test + @Disabled("Test is disabled for investigation)") + void terminateSuspendOrchestration() throws TimeoutException, InterruptedException { + final String orchestratorName = "suspendResume"; + final String eventName = "MyEvent"; + final String eventPayload = "testPayload"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + String payload = ctx.waitForExternalEvent(eventName, String.class).await(); + ctx.complete(payload); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + String suspendReason = "Suspend for testing."; + client.suspendInstance(instanceId, suspendReason); + client.terminate(instanceId, null); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, false); + assertNotNull(instance); + assertEquals(instanceId, instance.getInstanceId()); + assertEquals(OrchestrationRuntimeStatus.TERMINATED, instance.getRuntimeStatus()); + } + } + + @Test + void activityFanOut() throws IOException, TimeoutException { + final String orchestratorName = "ActivityFanOut"; + final String activityName = "ToString"; + final int activityCount = 10; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + // Schedule each task to run in parallel + List> parallelTasks = IntStream.range(0, activityCount) + .mapToObj(i -> ctx.callActivity(activityName, i, String.class)) + .collect(Collectors.toList()); + + // Wait for all tasks to complete, then sort and reverse the results + List results = ctx.allOf(parallelTasks).await(); + Collections.sort(results); + Collections.reverse(results); + ctx.complete(results); + }) + .addActivity(activityName, ctx -> ctx.getInput(Object.class).toString()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + List output = instance.readOutputAs(List.class); + assertNotNull(output); + assertEquals(activityCount, output.size()); + assertEquals(String.class, output.get(0).getClass()); + + // Expected: ["9", "8", "7", "6", "5", "4", "3", "2", "1", "0"] + for (int i = 0; i < activityCount; i++) { + String expected = String.valueOf(activityCount - i - 1); + assertEquals(expected, output.get(i).toString()); + } + } + } + + @Test + void externalEvents() throws IOException, TimeoutException { + final String orchestratorName = "ExternalEvents"; + final String eventName = "MyEvent"; + final int eventCount = 10; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int i; + for (i = 0; i < eventCount; i++) { + // block until the event is received + int payload = ctx.waitForExternalEvent(eventName, int.class).await(); + if (payload != i) { + ctx.complete(-1); + return; + } + } + + ctx.complete(i); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + + for (int i = 0; i < eventCount; i++) { + client.raiseEvent(instanceId, eventName, i); + } + + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + int output = instance.readOutputAs(int.class); + assertEquals(eventCount, output); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void externalEventsWithTimeouts(boolean raiseEvent) throws IOException, TimeoutException { + final String orchestratorName = "ExternalEventsWithTimeouts"; + final String eventName = "MyEvent"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + try { + ctx.waitForExternalEvent(eventName, Duration.ofSeconds(3)).await(); + ctx.complete("received"); + } catch (TaskCanceledException e) { + ctx.complete(e.getMessage()); + } + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + + client.waitForInstanceStart(instanceId, defaultTimeout); + if (raiseEvent) { + client.raiseEvent(instanceId, eventName); + } + + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + if (raiseEvent) { + assertEquals("received", output); + } else { + assertEquals("Timeout of PT3S expired while waiting for an event named '" + eventName + "' (ID = 0).", output); + } + } + } + + @Test + void setCustomStatus() throws TimeoutException { + final String orchestratorName = "SetCustomStatus"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + ctx.setCustomStatus("Started!"); + Object customStatus = ctx.waitForExternalEvent("StatusEvent", Object.class).await(); + ctx.setCustomStatus(customStatus); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + + OrchestrationMetadata metadata = client.waitForInstanceStart(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals("Started!", metadata.readCustomStatusAs(String.class)); + + Map payload = new HashMap() {{ + put("Hello", 45); + }}; + client.raiseEvent(metadata.getInstanceId(), "StatusEvent", payload); + + metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertTrue(metadata.isCustomStatusFetched()); + assertEquals(payload, metadata.readCustomStatusAs(HashMap.class)); + } + } + + @Test + void clearCustomStatus() throws TimeoutException { + final String orchestratorName = "ClearCustomStatus"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + ctx.setCustomStatus("Started!"); + ctx.waitForExternalEvent("StatusEvent").await(); + ctx.clearCustomStatus(); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + + OrchestrationMetadata metadata = client.waitForInstanceStart(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals("Started!", metadata.readCustomStatusAs(String.class)); + + client.raiseEvent(metadata.getInstanceId(), "StatusEvent"); + + metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertFalse(metadata.isCustomStatusFetched()); + } + } + + // due to clock drift, client/worker and sidecar time are not exactly synchronized, this test needs to accommodate for client vs backend timestamps difference + @Test + @Disabled("Test is disabled for investigation, fixing the test retry pattern exposed the failure") + void multiInstanceQuery() throws TimeoutException { + final String plusOne = "plusOne"; + final String waitForEvent = "waitForEvent"; + final DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(plusOne, ctx -> { + int value = ctx.getInput(int.class); + for (int i = 0; i < 10; i++) { + value = ctx.callActivity(plusOne, value, int.class).await(); + } + ctx.complete(value); + }) + .addActivity(plusOne, ctx -> ctx.getInput(int.class) + 1) + .addOrchestrator(waitForEvent, ctx -> { + String name = ctx.getInput(String.class); + String output = ctx.waitForExternalEvent(name, String.class).await(); + ctx.complete(output); + }).buildAndStart(); + + try (worker; client) { + Instant startTime = Instant.now(); + String prefix = startTime.toString(); + + IntStream.range(0, 5).mapToObj(i -> { + String instanceId = String.format("%s.sequence.%d", prefix, i); + client.scheduleNewOrchestrationInstance(plusOne, 0, instanceId); + return instanceId; + }).collect(Collectors.toUnmodifiableList()).forEach(id -> { + try { + client.waitForInstanceCompletion(id, defaultTimeout, true); + } catch (TimeoutException e) { + e.printStackTrace(); + } + }); + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + } + + Instant sequencesFinishedTime = Instant.now(); + + IntStream.range(0, 5).mapToObj(i -> { + String instanceId = String.format("%s.waiter.%d", prefix, i); + client.scheduleNewOrchestrationInstance(waitForEvent, String.valueOf(i), instanceId); + return instanceId; + }).collect(Collectors.toUnmodifiableList()).forEach(id -> { + try { + client.waitForInstanceStart(id, defaultTimeout); + } catch (TimeoutException e) { + e.printStackTrace(); + } + }); + + // Create one query object and reuse it for multiple queries + OrchestrationStatusQuery query = new OrchestrationStatusQuery(); + OrchestrationStatusQueryResult result = null; + + // Return all instances + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test CreatedTimeTo filter + query.setCreatedTimeTo(startTime.minus(Duration.ofSeconds(1))); + result = client.queryInstances(query); + assertTrue(result.getOrchestrationState().isEmpty(), + "Result should be empty but found " + result.getOrchestrationState().size() + " instances: " + + "Start time: " + startTime + ", " + + result.getOrchestrationState().stream() + .map(state -> String.format("\nID: %s, Status: %s, Created: %s", + state.getInstanceId(), + state.getRuntimeStatus(), + state.getCreatedAt())) + .collect(Collectors.joining(", "))); + + query.setCreatedTimeTo(sequencesFinishedTime); + result = client.queryInstances(query); + // Verify all returned instances contain "sequence" in their IDs + assertEquals(5, result.getOrchestrationState().stream() + .filter(state -> state.getInstanceId().contains("sequence")) + .count(), + "Expected exactly 5 instances with 'sequence' in their IDs"); + + query.setCreatedTimeTo(Instant.now().plus(Duration.ofSeconds(1))); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test CreatedTimeFrom filter + query.setCreatedTimeFrom(Instant.now().plus(Duration.ofSeconds(1))); + result = client.queryInstances(query); + assertTrue(result.getOrchestrationState().isEmpty()); + + query.setCreatedTimeFrom(sequencesFinishedTime.minus(Duration.ofSeconds(5))); + result = client.queryInstances(query); + assertEquals(5, result.getOrchestrationState().stream() + .filter(state -> state.getInstanceId().contains("sequence")) + .count(), + "Expected exactly 5 instances with 'sequence' in their IDs"); + + query.setCreatedTimeFrom(startTime.minus(Duration.ofSeconds(1))); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test RuntimeStatus filter + HashSet statusFilters = Stream.of( + OrchestrationRuntimeStatus.PENDING, + OrchestrationRuntimeStatus.FAILED, + OrchestrationRuntimeStatus.TERMINATED + ).collect(Collectors.toCollection(HashSet::new)); + + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertTrue(result.getOrchestrationState().isEmpty()); + + statusFilters.add(OrchestrationRuntimeStatus.RUNNING); + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertEquals(5, result.getOrchestrationState().size()); + + statusFilters.add(OrchestrationRuntimeStatus.COMPLETED); + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + statusFilters.remove(OrchestrationRuntimeStatus.RUNNING); + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertEquals(5, result.getOrchestrationState().size()); + + statusFilters.clear(); + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test InstanceIdPrefix + query.setInstanceIdPrefix("Foo"); + result = client.queryInstances(query); + assertTrue(result.getOrchestrationState().isEmpty()); + + query.setInstanceIdPrefix(prefix); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test PageSize and ContinuationToken + HashSet instanceIds = new HashSet<>(); + query.setMaxInstanceCount(0); + while (query.getMaxInstanceCount() < 10) { + query.setMaxInstanceCount(query.getMaxInstanceCount() + 1); + result = client.queryInstances(query); + int total = result.getOrchestrationState().size(); + assertEquals(query.getMaxInstanceCount(), total); + result.getOrchestrationState().forEach(state -> assertTrue(instanceIds.add(state.getInstanceId()))); + while (total < 10) { + query.setContinuationToken(result.getContinuationToken()); + result = client.queryInstances(query); + int count = result.getOrchestrationState().size(); + assertNotEquals(0, count); + assertTrue(count <= query.getMaxInstanceCount()); + total += count; + assertTrue(total <= 10); + result.getOrchestrationState().forEach(state -> assertTrue(instanceIds.add(state.getInstanceId()))); + } + query.setContinuationToken(null); + instanceIds.clear(); + } + + // Test ShowInput + query.setFetchInputsAndOutputs(true); + query.setCreatedTimeFrom(sequencesFinishedTime); + result = client.queryInstances(query); + result.getOrchestrationState().forEach(state -> assertNotNull(state.readInputAs(String.class))); + + query.setFetchInputsAndOutputs(false); + query.setCreatedTimeFrom(sequencesFinishedTime); + result = client.queryInstances(query); + result.getOrchestrationState().forEach(state -> assertThrows(IllegalStateException.class, () -> state.readInputAs(String.class))); + } + } + + @Test + void purgeInstanceId() throws TimeoutException { + final String orchestratorName = "PurgeInstance"; + final String plusOneActivityName = "PlusOne"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOneActivityName, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusOneActivityName, ctx -> ctx.getInput(int.class) + 1) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + PurgeResult result = client.purgeInstance(instanceId); + assertEquals(1, result.getDeletedInstanceCount()); + + metadata = client.getInstanceMetadata(instanceId, true); + assertFalse(metadata.isInstanceFound()); + } + } + + @Test + @Disabled("Test is disabled as is not supported by the sidecar") + void purgeInstanceFilter() throws TimeoutException { + final String orchestratorName = "PurgeInstance"; + final String plusOne = "PlusOne"; + final String plusTwo = "PlusTwo"; + final String terminate = "Termination"; + + final Duration delay = Duration.ofSeconds(1); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOne, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusOne, ctx -> ctx.getInput(int.class) + 1) + .addOrchestrator(plusOne, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOne, value, int.class).await(); + ctx.complete(value); + }) + .addOrchestrator(plusTwo, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusTwo, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusTwo, ctx -> ctx.getInput(int.class) + 2) + .addOrchestrator(terminate, ctx -> ctx.createTimer(delay).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + Instant startTime = Instant.now(); + + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + // Test CreatedTimeFrom + PurgeInstanceCriteria criteria = new PurgeInstanceCriteria(); + criteria.setCreatedTimeFrom(startTime.minus(Duration.ofSeconds(1))); + + PurgeResult result = client.purgeInstances(criteria); + assertEquals(1, result.getDeletedInstanceCount()); + metadata = client.getInstanceMetadata(instanceId, true); + assertFalse(metadata.isInstanceFound()); + + // Test CreatedTimeTo + criteria.setCreatedTimeTo(Instant.now()); + + result = client.purgeInstances(criteria); + assertEquals(0, result.getDeletedInstanceCount()); + metadata = client.getInstanceMetadata(instanceId, true); + assertFalse(metadata.isInstanceFound()); + + // Test CreatedTimeFrom, CreatedTimeTo, and RuntimeStatus + String instanceId1 = client.scheduleNewOrchestrationInstance(plusOne, 0); + metadata = client.waitForInstanceCompletion(instanceId1, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + String instanceId2 = client.scheduleNewOrchestrationInstance(plusTwo, 10); + metadata = client.waitForInstanceCompletion(instanceId2, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(12, metadata.readOutputAs(int.class)); + + String instanceId3 = client.scheduleNewOrchestrationInstance(terminate); + client.terminate(instanceId3, terminate); + metadata = client.waitForInstanceCompletion(instanceId3, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.TERMINATED, metadata.getRuntimeStatus()); + assertEquals(terminate, metadata.readOutputAs(String.class)); + + HashSet runtimeStatusFilters = Stream.of( + OrchestrationRuntimeStatus.TERMINATED, + OrchestrationRuntimeStatus.COMPLETED + ).collect(Collectors.toCollection(HashSet::new)); + + criteria.setCreatedTimeTo(Instant.now()); + criteria.setRuntimeStatusList(new ArrayList<>(runtimeStatusFilters)); + result = client.purgeInstances(criteria); + + assertEquals(3, result.getDeletedInstanceCount()); + metadata = client.getInstanceMetadata(instanceId1, true); + assertFalse(metadata.isInstanceFound()); + metadata = client.getInstanceMetadata(instanceId2, true); + assertFalse(metadata.isInstanceFound()); + metadata = client.getInstanceMetadata(instanceId3, true); + assertFalse(metadata.isInstanceFound()); + } + } + + @Test + void purgeInstanceFilterTimeout() throws TimeoutException { + final String orchestratorName = "PurgeInstance"; + final String plusOne = "PlusOne"; + final String plusTwo = "PlusTwo"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOne, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusOne, ctx -> ctx.getInput(int.class) + 1) + .addOrchestrator(plusOne, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOne, value, int.class).await(); + ctx.complete(value); + }) + .addOrchestrator(plusTwo, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusTwo, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusTwo, ctx -> ctx.getInput(int.class) + 2) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + Instant startTime = Instant.now(); + + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + String instanceId1 = client.scheduleNewOrchestrationInstance(plusOne, 0); + metadata = client.waitForInstanceCompletion(instanceId1, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + String instanceId2 = client.scheduleNewOrchestrationInstance(plusTwo, 10); + metadata = client.waitForInstanceCompletion(instanceId2, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(12, metadata.readOutputAs(int.class)); + + PurgeInstanceCriteria criteria = new PurgeInstanceCriteria(); + criteria.setCreatedTimeFrom(startTime); + criteria.setTimeout(Duration.ofNanos(1)); + + assertThrows(TimeoutException.class, () -> client.purgeInstances(criteria)); + } + } + + @Test + void waitForInstanceStartThrowsException() { + final String orchestratorName = "orchestratorName"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + try { + // The orchestration remains in the "Pending" state until the first await statement + TimeUnit.SECONDS.sleep(5); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + var instanceId = UUID.randomUUID().toString(); + Thread thread = new Thread(() -> { + client.scheduleNewOrchestrationInstance(orchestratorName, null, instanceId); + }); + thread.start(); + + assertThrows(TimeoutException.class, () -> client.waitForInstanceStart(instanceId, Duration.ofSeconds(2))); + } + } + + @Test + void waitForInstanceCompletionThrowsException() { + final String orchestratorName = "orchestratorName"; + final String plusOneActivityName = "PlusOne"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOneActivityName, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusOneActivityName, ctx -> { + try { + // The orchestration is started but not completed within the orchestration completion timeout due the below activity delay + TimeUnit.SECONDS.sleep(5); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return ctx.getInput(int.class) + 1; + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + assertThrows(TimeoutException.class, () -> client.waitForInstanceCompletion(instanceId, Duration.ofSeconds(2), false)); + } + } + + @Test + void activityFanOutWithException() throws TimeoutException { + final String orchestratorName = "ActivityFanOut"; + final String activityName = "Divide"; + final int count = 10; + final String exceptionMessage = "2 out of 6 tasks failed with an exception. See the exceptions list for details."; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + // Schedule each task to run in parallel + List> parallelTasks = IntStream.of(1, 2, 0, 4, 0, 6) + .mapToObj(i -> ctx.callActivity(activityName, i, Integer.class)) + .collect(Collectors.toList()); + + // Wait for all tasks to complete + try { + List results = ctx.allOf(parallelTasks).await(); + ctx.complete(results); + } catch (CompositeTaskFailedException e) { + assertNotNull(e); + assertEquals(2, e.getExceptions().size()); + assertEquals(TaskFailedException.class, e.getExceptions().get(0).getClass()); + assertEquals(TaskFailedException.class, e.getExceptions().get(1).getClass()); + // taskId in the exception below is based on parallelTasks input + assertEquals(getExceptionMessage(activityName, 2, "/ by zero"), e.getExceptions().get(0).getMessage()); + assertEquals(getExceptionMessage(activityName, 4, "/ by zero"), e.getExceptions().get(1).getMessage()); + throw e; + } + }) + .addActivity(activityName, ctx -> count / ctx.getInput(Integer.class)) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + + List output = instance.readOutputAs(List.class); + assertNull(output); + + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + assertEquals(exceptionMessage, details.getErrorMessage()); + assertEquals("io.dapr.durabletask.CompositeTaskFailedException", details.getErrorType()); + assertNotNull(details.getStackTrace()); + } + } + + private static String getExceptionMessage(String taskName, int expectedTaskId, String expectedExceptionMessage) { + return String.format( + "Task '%s' (#%d) failed with an unhandled exception: %s", + taskName, + expectedTaskId, + expectedExceptionMessage); + } + + @Test + void thenApply() throws IOException, InterruptedException, TimeoutException { + final String orchestratorName = "thenApplyActivity"; + final String activityName = "Echo"; + final String suffix = "-test"; + final String input = Instant.now().toString(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + String activityInput = ctx.getInput(String.class); + String output = ctx.callActivity(activityName, activityInput, String.class).thenApply(s -> s + suffix).await(); + ctx.complete(output); + }) + .addActivity(activityName, ctx -> { + return String.format("Hello, %s!", ctx.getInput(String.class)); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, input); + OrchestrationMetadata instance = client.waitForInstanceCompletion( + instanceId, + defaultTimeout, + true); + + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + String output = instance.readOutputAs(String.class); + String expected = String.format("Hello, %s!%s", input, suffix); + assertEquals(expected, output); + } + } + + @Test + void externalEventThenAccept() throws InterruptedException, TimeoutException { + final String orchestratorName = "continueAsNewWithExternalEvents"; + final String eventName = "MyEvent"; + final int expectedEventCount = 10; + DurableTaskGrpcWorker worker = this.createWorkerBuilder().addOrchestrator(orchestratorName, ctx -> { + int receivedEventCount = ctx.getInput(int.class); + + if (receivedEventCount < expectedEventCount) { + ctx.waitForExternalEvent(eventName, int.class) + .thenAccept(s -> { + ctx.continueAsNew(receivedEventCount + 1); + return; + }) + .await(); + } else { + ctx.complete(receivedEventCount); + } + }).buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + + for (int i = 0; i < expectedEventCount; i++) { + client.raiseEvent(instanceId, eventName, i); + } + + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(expectedEventCount, instance.readOutputAs(int.class)); + } + } + + @Test + void activityAllOf() throws IOException, TimeoutException { + final String orchestratorName = "ActivityAllOf"; + final String activityName = "ToString"; + final String retryActivityName = "RetryToString"; + final int activityMiddle = 5; + final int activityCount = 10; + final AtomicBoolean throwException = new AtomicBoolean(true); + final RetryPolicy retryPolicy = new RetryPolicy(2, Duration.ofSeconds(5)); + final TaskOptions taskOptions = TaskOptions.withRetryPolicy(retryPolicy); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + List> parallelTasks = IntStream.range(0, activityMiddle * 2) + .mapToObj(i -> { + if (i < activityMiddle) { + return ctx.callActivity(activityName, i, String.class); + } else { + return ctx.callActivity(retryActivityName, i, taskOptions, String.class); + } + }) + .collect(Collectors.toList()); + + // Wait for all tasks to complete, then sort and reverse the results + List results = ctx.allOf(parallelTasks).await(); + Collections.sort(results); + Collections.reverse(results); + ctx.complete(results); + }) + .addActivity(activityName, ctx -> ctx.getInput(Object.class).toString()) + .addActivity(retryActivityName, ctx -> { + if (throwException.get()) { + throwException.compareAndSet(true, false); + throw new RuntimeException("test retry"); + } + return ctx.getInput(Object.class).toString(); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + List output = instance.readOutputAs(List.class); + assertNotNull(output); + assertEquals(activityCount, output.size()); + assertEquals(String.class, output.get(0).getClass()); + + // Expected: ["9", "8", "7", "6", "5", "4", "3", "2", "1", "0"] + for (int i = 0; i < activityCount; i++) { + String expected = String.valueOf(activityCount - i - 1); + assertEquals(expected, output.get(i).toString()); + } + } + } + + @Test + void activityAllOfException() throws IOException, TimeoutException { + final String orchestratorName = "ActivityAllOf"; + final String activityName = "ToString"; + final String retryActivityName = "RetryToStringException"; + final String result = "test fail"; + final int activityMiddle = 5; + final RetryPolicy retryPolicy = new RetryPolicy(2, Duration.ofSeconds(5)); + final TaskOptions taskOptions = TaskOptions.withRetryPolicy(retryPolicy); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + List> parallelTasks = IntStream.range(0, activityMiddle * 2) + .mapToObj(i -> { + if (i < activityMiddle) { + return ctx.callActivity(activityName, i, String.class); + } else { + return ctx.callActivity(retryActivityName, i, taskOptions, String.class); + } + }) + .collect(Collectors.toList()); + + // Wait for all tasks to complete, then sort and reverse the results + try { + List results = null; + results = ctx.allOf(parallelTasks).await(); + Collections.sort(results); + Collections.reverse(results); + ctx.complete(results); + } catch (CompositeTaskFailedException e) { + // only catch this type of exception to ensure the expected type of exception is thrown out. + for (Exception exception : e.getExceptions()) { + if (exception instanceof TaskFailedException) { + TaskFailedException taskFailedException = (TaskFailedException) exception; + System.out.println("Task: " + taskFailedException.getTaskName() + + " Failed for cause: " + taskFailedException.getErrorDetails().getErrorMessage()); + } + } + } + ctx.complete(result); + }) + .addActivity(activityName, ctx -> ctx.getInput(Object.class).toString()) + .addActivity(retryActivityName, ctx -> { + // only throw exception + throw new RuntimeException("test retry"); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + assertNotNull(output); + assertEquals(String.class, output.getClass()); + assertEquals(result, output); + } + } + + @Test + void activityAnyOf() throws IOException, TimeoutException { + final String orchestratorName = "ActivityAnyOf"; + final String activityName = "ToString"; + final String retryActivityName = "RetryToString"; + final int activityMiddle = 5; + final int activityCount = 10; + final AtomicBoolean throwException = new AtomicBoolean(true); + final RetryPolicy retryPolicy = new RetryPolicy(2, Duration.ofSeconds(5)); + final TaskOptions taskOptions = TaskOptions.withRetryPolicy(retryPolicy); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + List> parallelTasks = IntStream.range(0, activityMiddle * 2) + .mapToObj(i -> { + if (i < activityMiddle) { + return ctx.callActivity(activityName, i, String.class); + } else { + return ctx.callActivity(retryActivityName, i, taskOptions, String.class); + } + }) + .collect(Collectors.toList()); + + String results = (String) ctx.anyOf(parallelTasks).await().await(); + ctx.complete(results); + }) + .addActivity(activityName, ctx -> ctx.getInput(Object.class).toString()) + .addActivity(retryActivityName, ctx -> { + if (throwException.get()) { + throwException.compareAndSet(true, false); + throw new RuntimeException("test retry"); + } + return ctx.getInput(Object.class).toString(); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + assertNotNull(output); + assertTrue(Integer.parseInt(output) >= 0 && Integer.parseInt(output) < activityCount); + } + } + + @Test + public void newUUIDTest() { + String orchestratorName = "test-new-uuid"; + String echoActivityName = "Echo"; + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + // Test 1: Ensure two consequiteively created GUIDs are not unique + UUID currentUUID0 = ctx.newUuid(); + UUID currentUUID1 = ctx.newUuid(); + if (currentUUID0.equals(currentUUID1)) { + ctx.complete(false); + } + + // Test 2: Ensure that the same GUID values are created on each replay + UUID originalUUID1 = ctx.callActivity(echoActivityName, currentUUID1, UUID.class).await(); + if (!currentUUID1.equals(originalUUID1)) { + ctx.complete(false); + } + + // Test 3: Ensure that the same UUID values are created on each replay even after an await + UUID currentUUID2 = ctx.newUuid(); + UUID originalUUID2 = ctx.callActivity(echoActivityName, currentUUID2, UUID.class).await(); + if (!currentUUID2.equals(originalUUID2)) { + ctx.complete(false); + } + + // Test 4: Finish confirming that every generated UUID is unique + if (currentUUID1.equals(currentUUID2)) ctx.complete(false); + else ctx.complete(true); + }) + .addActivity(echoActivityName, ctx -> { + System.out.println("##### echoActivityName: " + ctx.getInput(UUID.class)); + return ctx.getInput(UUID.class); + }) + .buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertTrue(instance.readOutputAs(boolean.class)); + } catch (TimeoutException e) { + throw new RuntimeException(e); + } + } + + + @Test + public void taskExecutionIdTest() { + var orchestratorName = "test-task-execution-id"; + var retryActivityName = "RetryN"; + final RetryPolicy retryPolicy = new RetryPolicy(4, Duration.ofSeconds(3)); + final TaskOptions taskOptions = TaskOptions.withRetryPolicy(retryPolicy); + + var execMap = new HashMap(); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + ctx.callActivity(retryActivityName, null, taskOptions).await(); + ctx.callActivity(retryActivityName, null, taskOptions).await(); + ctx.complete(true); + }) + .addActivity(retryActivityName, ctx -> { + System.out.println("##### RetryN[executionId]: " + ctx.getTaskExecutionId()); + var c = execMap.get(ctx.getTaskExecutionId()); + if (c == null) { + c = 0; + } else { + c++; + } + + execMap.put(ctx.getTaskExecutionId(), c); + if (c < 2) { + throw new RuntimeException("test retry"); + } + return null; + }) + .buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(2, execMap.size()); + assertTrue(instance.readOutputAs(boolean.class)); + } catch (TimeoutException e) { + throw new RuntimeException(e); + } + + } + +} + + diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskGrpcClientTlsTest.java b/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskGrpcClientTlsTest.java new file mode 100644 index 000000000..b60b26be7 --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskGrpcClientTlsTest.java @@ -0,0 +1,342 @@ +///* +// * Copyright 2025 The Dapr Authors +// * Licensed under the Apache License, Version 2.0 (the "License"); +// * you may not use this file except in compliance with the License. +// * You may obtain a copy of the License at +// * http://www.apache.org/licenses/LICENSE-2.0 +// * Unless required by applicable law or agreed to in writing, software +// * distributed under the License is distributed on an "AS IS" BASIS, +// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// * See the License for the specific language governing permissions and +//limitations under the License. +//*/ +//package io.dapr.durabletask; +// +//import org.junit.jupiter.api.AfterEach; +//import org.junit.jupiter.api.Test; +//import org.junit.jupiter.api.io.TempDir; +//import org.junit.jupiter.api.condition.EnabledOnOs; +//import org.junit.jupiter.api.condition.OS; +//import org.junit.jupiter.api.Assumptions; +// +//import java.io.File; +//import java.nio.file.Files; +//import java.nio.file.Path; +//import java.security.KeyPair; +//import java.security.KeyPairGenerator; +//import java.security.cert.X509Certificate; +//import java.util.Base64; +//import java.util.Date; +//import java.math.BigInteger; +// +//import org.bouncycastle.asn1.x500.X500Name; +//import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; +//import org.bouncycastle.cert.X509v3CertificateBuilder; +//import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; +//import org.bouncycastle.operator.ContentSigner; +//import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; +// +//import static org.junit.jupiter.api.Assertions.*; +// +//public class DurableTaskGrpcClientTlsTest { +// private static final int DEFAULT_PORT = 4001; +// private static final String DEFAULT_SIDECAR_IP = "127.0.0.1"; +// +// @TempDir +// Path tempDir; +// +// // Track the client for cleanup +// private DurableTaskGrpcClient client; +// +// @AfterEach +// void tearDown() throws Exception { +// if (client != null) { +// client.close(); +// client = null; +// } +// } +// +// // Helper method to generate a key pair for testing +// private static KeyPair generateKeyPair() throws Exception { +// KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA"); +// keyPairGenerator.initialize(2048); +// return keyPairGenerator.generateKeyPair(); +// } +// +// // Helper method to generate a self-signed certificate +// private static X509Certificate generateCertificate(KeyPair keyPair) throws Exception { +// X500Name issuer = new X500Name("CN=Test Certificate"); +// X500Name subject = new X500Name("CN=Test Certificate"); +// Date notBefore = new Date(System.currentTimeMillis() - 24 * 60 * 60 * 1000); +// Date notAfter = new Date(System.currentTimeMillis() + 365 * 24 * 60 * 60 * 1000L); +// SubjectPublicKeyInfo publicKeyInfo = SubjectPublicKeyInfo.getInstance(keyPair.getPublic().getEncoded()); +// X509v3CertificateBuilder certBuilder = new X509v3CertificateBuilder( +// issuer, +// BigInteger.valueOf(System.currentTimeMillis()), +// notBefore, +// notAfter, +// subject, +// publicKeyInfo +// ); +// ContentSigner signer = new JcaContentSignerBuilder("SHA256withRSA").build(keyPair.getPrivate()); +// return new JcaX509CertificateConverter().getCertificate(certBuilder.build(signer)); +// } +// +// private static void writeCertificateToFile(X509Certificate cert, File file) throws Exception { +// String certPem = "-----BEGIN CERTIFICATE-----\n" + +// Base64.getEncoder().encodeToString(cert.getEncoded()) + +// "\n-----END CERTIFICATE-----"; +// Files.write(file.toPath(), certPem.getBytes()); +// } +// +// private static void writePrivateKeyToFile(KeyPair keyPair, File file) throws Exception { +// String keyPem = "-----BEGIN PRIVATE KEY-----\n" + +// Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()) + +// "\n-----END PRIVATE KEY-----"; +// Files.write(file.toPath(), keyPem.getBytes()); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTls() throws Exception { +// // Generate test certificate and key +// KeyPair keyPair = generateKeyPair(); +// X509Certificate cert = generateCertificate(keyPair); +// +// File certFile = File.createTempFile("test-cert", ".pem"); +// File keyFile = File.createTempFile("test-key", ".pem"); +// try { +// writeCertificateToFile(cert, certFile); +// writePrivateKeyToFile(keyPair, keyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCertPath(certFile.getAbsolutePath()) +// .tlsKeyPath(keyFile.getAbsolutePath()) +// .build(); +// +// assertNotNull(client); +// // Note: We can't easily test the actual TLS configuration without a real server +// } finally { +// certFile.delete(); +// keyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTlsAndEndpoint() throws Exception { +// // Generate test certificate and key +// KeyPair keyPair = generateKeyPair(); +// X509Certificate cert = generateCertificate(keyPair); +// +// File certFile = File.createTempFile("test-cert", ".pem"); +// File keyFile = File.createTempFile("test-key", ".pem"); +// try { +// writeCertificateToFile(cert, certFile); +// writePrivateKeyToFile(keyPair, keyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCertPath(certFile.getAbsolutePath()) +// .tlsKeyPath(keyFile.getAbsolutePath()) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } finally { +// certFile.delete(); +// keyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInvalidTlsCert() { +// assertThrows(RuntimeException.class, () -> { +// new DurableTaskGrpcClientBuilder() +// .tlsCertPath("/nonexistent/cert.pem") +// .tlsKeyPath("/nonexistent/key.pem") +// .build(); +// }); +// } +// +// @Test +// @EnabledOnOs({OS.LINUX, OS.MAC}) +// public void testBuildGrpcManagedChannelWithTlsAndUnixSocket() throws Exception { +// // Skip this test since Unix socket support is not implemented yet +// Assumptions.assumeTrue(false, "Unix socket support not implemented yet"); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTlsAndDnsAuthority() throws Exception { +// // Generate test certificate and key +// KeyPair keyPair = generateKeyPair(); +// X509Certificate cert = generateCertificate(keyPair); +// +// File certFile = File.createTempFile("test-cert", ".pem"); +// File keyFile = File.createTempFile("test-key", ".pem"); +// try { +// writeCertificateToFile(cert, certFile); +// writePrivateKeyToFile(keyPair, keyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCertPath(certFile.getAbsolutePath()) +// .tlsKeyPath(keyFile.getAbsolutePath()) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } finally { +// certFile.delete(); +// keyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTlsAndCaCert() throws Exception { +// // Generate test CA certificate +// KeyPair caKeyPair = generateKeyPair(); +// X509Certificate caCert = generateCertificate(caKeyPair); +// +// File caCertFile = File.createTempFile("test-ca-cert", ".pem"); +// try { +// writeCertificateToFile(caCert, caCertFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCaPath(caCertFile.getAbsolutePath()) +// .build(); +// +// assertNotNull(client); +// } finally { +// caCertFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTlsAndCaCertAndEndpoint() throws Exception { +// // Generate test CA certificate +// KeyPair caKeyPair = generateKeyPair(); +// X509Certificate caCert = generateCertificate(caKeyPair); +// +// File caCertFile = File.createTempFile("test-ca-cert", ".pem"); +// try { +// writeCertificateToFile(caCert, caCertFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCaPath(caCertFile.getAbsolutePath()) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } finally { +// caCertFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInvalidCaCert() { +// assertThrows(RuntimeException.class, () -> { +// new DurableTaskGrpcClientBuilder() +// .tlsCaPath("/nonexistent/ca.pem") +// .build(); +// }); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithMtlsAndCaCert() throws Exception { +// // Generate test certificates +// KeyPair caKeyPair = generateKeyPair(); +// X509Certificate caCert = generateCertificate(caKeyPair); +// KeyPair clientKeyPair = generateKeyPair(); +// X509Certificate clientCert = generateCertificate(clientKeyPair); +// +// File caCertFile = File.createTempFile("test-ca-cert", ".pem"); +// File clientCertFile = File.createTempFile("test-client-cert", ".pem"); +// File clientKeyFile = File.createTempFile("test-client-key", ".pem"); +// try { +// writeCertificateToFile(caCert, caCertFile); +// writeCertificateToFile(clientCert, clientCertFile); +// writePrivateKeyToFile(clientKeyPair, clientKeyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCaPath(caCertFile.getAbsolutePath()) +// .tlsCertPath(clientCertFile.getAbsolutePath()) +// .tlsKeyPath(clientKeyFile.getAbsolutePath()) +// .build(); +// +// assertNotNull(client); +// } finally { +// caCertFile.delete(); +// clientCertFile.delete(); +// clientKeyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInsecureTls() throws Exception { +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .insecure(true) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInsecureTlsAndMtls() throws Exception { +// // Generate test certificates +// KeyPair caKeyPair = generateKeyPair(); +// X509Certificate caCert = generateCertificate(caKeyPair); +// KeyPair clientKeyPair = generateKeyPair(); +// X509Certificate clientCert = generateCertificate(clientKeyPair); +// +// File caCertFile = File.createTempFile("test-ca-cert", ".pem"); +// File clientCertFile = File.createTempFile("test-client-cert", ".pem"); +// File clientKeyFile = File.createTempFile("test-client-key", ".pem"); +// try { +// writeCertificateToFile(caCert, caCertFile); +// writeCertificateToFile(clientCert, clientCertFile); +// writePrivateKeyToFile(clientKeyPair, clientKeyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .insecure(true) +// .tlsCaPath(caCertFile.getAbsolutePath()) +// .tlsCertPath(clientCertFile.getAbsolutePath()) +// .tlsKeyPath(clientKeyFile.getAbsolutePath()) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } finally { +// caCertFile.delete(); +// clientCertFile.delete(); +// clientKeyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInsecureTlsAndCustomEndpoint() throws Exception { +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .insecure(true) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithPlaintext() throws Exception { +// // No TLS config provided, should use plaintext +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .port(443) +// .build(); +// +// assertNotNull(client); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithPlaintextAndCustomEndpoint() throws Exception { +// // No TLS config provided, should use plaintext +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .port(50001) // Custom port +// .build(); +// +// assertNotNull(client); +// } +//} \ No newline at end of file diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/ErrorHandlingIT.java b/durabletask-client/src/test/java/io/dapr/durabletask/ErrorHandlingIT.java new file mode 100644 index 000000000..f1c868f0a --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/ErrorHandlingIT.java @@ -0,0 +1,306 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.time.Duration; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * These integration tests are designed to exercise the core, high-level error-handling features of the Durable Task + * programming model. + *

+ * These tests currently require a sidecar process to be running on the local machine (the sidecar is what accepts the + * client operations and sends invocation instructions to the DurableTaskWorker). + */ +@Tag("integration") +public class ErrorHandlingIT extends IntegrationTestBase { + @Test + void orchestratorException() throws TimeoutException { + final String orchestratorName = "OrchestratorWithException"; + final String errorMessage = "Kah-BOOOOOM!!!"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + throw new RuntimeException(errorMessage); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + assertEquals("java.lang.RuntimeException", details.getErrorType()); + assertTrue(details.getErrorMessage().contains(errorMessage)); + assertNotNull(details.getStackTrace()); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void activityException(boolean handleException) throws TimeoutException { + final String orchestratorName = "OrchestratorWithActivityException"; + final String activityName = "Throw"; + final String errorMessage = "Kah-BOOOOOM!!!"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + try { + ctx.callActivity(activityName).await(); + } catch (TaskFailedException ex) { + if (handleException) { + ctx.complete("handled"); + } else { + throw ex; + } + } + }) + .addActivity(activityName, ctx -> { + throw new RuntimeException(errorMessage); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, ""); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + + if (handleException) { + String result = instance.readOutputAs(String.class); + assertNotNull(result); + assertEquals("handled", result); + } else { + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + + String expectedMessage = String.format( + "Task '%s' (#0) failed with an unhandled exception: %s", + activityName, + errorMessage); + assertEquals(expectedMessage, details.getErrorMessage()); + assertEquals("io.dapr.durabletask.TaskFailedException", details.getErrorType()); + assertNotNull(details.getStackTrace()); + // CONSIDER: Additional validation of getErrorDetails? + } + } + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void retryActivityFailures(int maxNumberOfAttempts) throws TimeoutException { + // There is one task for each activity call and one task between each retry + int expectedTaskCount = (maxNumberOfAttempts * 2) - 1; + this.retryOnFailuresCoreTest(maxNumberOfAttempts, expectedTaskCount, ctx -> { + RetryPolicy retryPolicy = getCommonRetryPolicy(maxNumberOfAttempts); + ctx.callActivity( + "BustedActivity", + null, + TaskOptions.withRetryPolicy(retryPolicy)).await(); + }); + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void retryActivityFailuresWithCustomLogic(int maxNumberOfAttempts) throws TimeoutException { + // This gets incremented every time the retry handler is invoked + AtomicInteger retryHandlerCalls = new AtomicInteger(); + + // Run the test and get back the details of the last failure + this.retryOnFailuresCoreTest(maxNumberOfAttempts, maxNumberOfAttempts, ctx -> { + RetryHandler retryHandler = getCommonRetryHandler(retryHandlerCalls, maxNumberOfAttempts); + TaskOptions options = TaskOptions.withRetryHandler(retryHandler); + ctx.callActivity("BustedActivity", null, options).await(); + }); + + // Assert that the retry handle got invoked the expected number of times + assertEquals(maxNumberOfAttempts, retryHandlerCalls.get()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void subOrchestrationException(boolean handleException) throws TimeoutException { + final String orchestratorName = "OrchestrationWithBustedSubOrchestrator"; + final String subOrchestratorName = "BustedSubOrchestrator"; + final String errorMessage = "Kah-BOOOOOM!!!"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + try { + String result = ctx.callSubOrchestrator(subOrchestratorName, "", String.class).await(); + ctx.complete(result); + } catch (TaskFailedException ex) { + if (handleException) { + ctx.complete("handled"); + } else { + throw ex; + } + } + }) + .addOrchestrator(subOrchestratorName, ctx -> { + throw new RuntimeException(errorMessage); + }) + .buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 1); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + if (handleException) { + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + String result = instance.readOutputAs(String.class); + assertNotNull(result); + assertEquals("handled", result); + } else { + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + String expectedMessage = String.format( + "Task '%s' (#0) failed with an unhandled exception: %s", + subOrchestratorName, + errorMessage); + assertEquals(expectedMessage, details.getErrorMessage()); + assertEquals("io.dapr.durabletask.TaskFailedException", details.getErrorType()); + assertNotNull(details.getStackTrace()); + // CONSIDER: Additional validation of getStackTrace? + } + } + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void retrySubOrchestratorFailures(int maxNumberOfAttempts) throws TimeoutException { + // There is one task for each sub-orchestrator call and one task between each retry + int expectedTaskCount = (maxNumberOfAttempts * 2) - 1; + this.retryOnFailuresCoreTest(maxNumberOfAttempts, expectedTaskCount, ctx -> { + RetryPolicy retryPolicy = getCommonRetryPolicy(maxNumberOfAttempts); + ctx.callSubOrchestrator( + "BustedSubOrchestrator", + null, + null, + TaskOptions.withRetryPolicy(retryPolicy)).await(); + }); + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void retrySubOrchestrationFailuresWithCustomLogic(int maxNumberOfAttempts) throws TimeoutException { + // This gets incremented every time the retry handler is invoked + AtomicInteger retryHandlerCalls = new AtomicInteger(); + + // Run the test and get back the details of the last failure + this.retryOnFailuresCoreTest(maxNumberOfAttempts, maxNumberOfAttempts, ctx -> { + RetryHandler retryHandler = getCommonRetryHandler(retryHandlerCalls, maxNumberOfAttempts); + TaskOptions options = TaskOptions.withRetryHandler(retryHandler); + ctx.callSubOrchestrator("BustedSubOrchestrator", null, null, options).await(); + }); + + // Assert that the retry handle got invoked the expected number of times + assertEquals(maxNumberOfAttempts, retryHandlerCalls.get()); + } + + private static RetryPolicy getCommonRetryPolicy(int maxNumberOfAttempts) { + // Include a small delay between each retry to exercise the implicit timer path + return new RetryPolicy(maxNumberOfAttempts, Duration.ofMillis(1)); + } + + private static RetryHandler getCommonRetryHandler(AtomicInteger handlerInvocationCounter, int maxNumberOfAttempts) { + return ctx -> { + // Retry handlers get executed on the orchestrator thread and go through replay + if (!ctx.getOrchestrationContext().getIsReplaying()) { + handlerInvocationCounter.getAndIncrement(); + } + + // The isCausedBy() method is designed to handle exception inheritance + if (!ctx.getLastFailure().isCausedBy(Exception.class)) { + return false; + } + + // This is the actual exception type we care about + if (!ctx.getLastFailure().isCausedBy(RuntimeException.class)) { + return false; + } + + // Quit after N attempts + return ctx.getLastAttemptNumber() < maxNumberOfAttempts; + }; + } + + /** + * Shared logic for execution an orchestration with an activity that constantly fails. + * + * @param maxNumberOfAttempts The expected maximum number of activity execution attempts + * @param expectedTaskCount The expected number of tasks to be scheduled by the main orchestration. + * @param mainOrchestration The main orchestration implementation, which is expected to call either the + * "BustedActivity" activity or the "BustedSubOrchestrator" sub-orchestration. + * @return Returns the details of the last activity or sub-orchestration failure. + */ + private FailureDetails retryOnFailuresCoreTest( + int maxNumberOfAttempts, + int expectedTaskCount, + TaskOrchestration mainOrchestration) throws TimeoutException { + final String orchestratorName = "MainOrchestrator"; + + AtomicInteger actualAttemptCount = new AtomicInteger(); + + // The caller of this test provides the top-level orchestration implementation. This method provides both a + // failing sub-orchestration and a failing activity implementation for it to use. The expectation is that the + // main orchestration tries to invoke just one of them and is configured with retry configuration. + AtomicBoolean isActivityPath = new AtomicBoolean(false); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, mainOrchestration) + .addOrchestrator("BustedSubOrchestrator", ctx -> { + actualAttemptCount.getAndIncrement(); + throw new RuntimeException("Error #" + actualAttemptCount.get()); + }) + .addActivity("BustedActivity", ctx -> { + actualAttemptCount.getAndIncrement(); + isActivityPath.set(true); + throw new RuntimeException("Error #" + actualAttemptCount.get()); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, ""); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + + // Make sure the exception details are still what we expect + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + + // Confirm the number of attempts + assertEquals(maxNumberOfAttempts, actualAttemptCount.get()); + + return details; + } + } +} \ No newline at end of file diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/IntegrationTestBase.java b/durabletask-client/src/test/java/io/dapr/durabletask/IntegrationTestBase.java new file mode 100644 index 000000000..bbfcde046 --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/IntegrationTestBase.java @@ -0,0 +1,91 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import org.junit.jupiter.api.AfterEach; + +import java.time.Duration; + +public class IntegrationTestBase { + protected static final Duration defaultTimeout = Duration.ofSeconds(10); + + // All tests that create a server should save it to this variable for proper shutdown + private DurableTaskGrpcWorker server; + + @AfterEach + public void shutdown() { + if (this.server != null) { + this.server.stop(); + } + } + + + protected TestDurableTaskWorkerBuilder createWorkerBuilder() { + return new TestDurableTaskWorkerBuilder(); + } + + public class TestDurableTaskWorkerBuilder { + final DurableTaskGrpcWorkerBuilder innerBuilder; + + private TestDurableTaskWorkerBuilder() { + this.innerBuilder = new DurableTaskGrpcWorkerBuilder(); + } + + public DurableTaskGrpcWorker buildAndStart() { + DurableTaskGrpcWorker server = this.innerBuilder.build(); + IntegrationTestBase.this.server = server; + server.start(); + return server; + } + + public TestDurableTaskWorkerBuilder setMaximumTimerInterval(Duration maximumTimerInterval) { + this.innerBuilder.maximumTimerInterval(maximumTimerInterval); + return this; + } + + public TestDurableTaskWorkerBuilder addOrchestrator( + String name, + TaskOrchestration implementation) { + this.innerBuilder.addOrchestration(new TaskOrchestrationFactory() { + @Override + public String getName() { + return name; + } + + @Override + public TaskOrchestration create() { + return implementation; + } + }); + return this; + } + + public TestDurableTaskWorkerBuilder addActivity( + String name, + TaskActivity implementation) { + this.innerBuilder.addActivity(new TaskActivityFactory() { + @Override + public String getName() { + return name; + } + + @Override + public TaskActivity create() { + return implementation; + } + }); + return this; + } + } +} diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/TaskOptionsTest.java b/durabletask-client/src/test/java/io/dapr/durabletask/TaskOptionsTest.java new file mode 100644 index 000000000..43fad5f52 --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/TaskOptionsTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import org.junit.jupiter.api.Test; + +import java.time.Duration; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Unit tests for TaskOptions with cross-app workflow support. + */ +public class TaskOptionsTest { + + @Test + void taskOptionsWithAppID() { + TaskOptions options = TaskOptions.withAppID("app1"); + + assertTrue(options.hasAppID()); + assertEquals("app1", options.getAppID()); + assertFalse(options.hasRetryPolicy()); + assertFalse(options.hasRetryHandler()); + } + + @Test + void taskOptionsWithRetryPolicyAndAppID() { + RetryPolicy retryPolicy = new RetryPolicy(3, Duration.ofSeconds(1)); + TaskOptions options = TaskOptions.builder() + .retryPolicy(retryPolicy) + .appID("app2") + .build(); + + assertTrue(options.hasAppID()); + assertEquals("app2", options.getAppID()); + assertTrue(options.hasRetryPolicy()); + assertEquals(retryPolicy, options.getRetryPolicy()); + assertFalse(options.hasRetryHandler()); + } + + @Test + void taskOptionsWithRetryHandlerAndAppID() { + RetryHandler retryHandler = new RetryHandler() { + @Override + public boolean handle(RetryContext context) { + return context.getLastAttemptNumber() < 2; + } + }; + TaskOptions options = TaskOptions.builder() + .retryHandler(retryHandler) + .appID("app3") + .build(); + + assertTrue(options.hasAppID()); + assertEquals("app3", options.getAppID()); + assertFalse(options.hasRetryPolicy()); + assertTrue(options.hasRetryHandler()); + assertEquals(retryHandler, options.getRetryHandler()); + } + + @Test + void taskOptionsWithoutAppID() { + TaskOptions options = TaskOptions.create(); + + assertFalse(options.hasAppID()); + assertNull(options.getAppID()); + } + + @Test + void taskOptionsWithEmptyAppID() { + TaskOptions options = TaskOptions.withAppID(""); + + assertFalse(options.hasAppID()); + assertEquals("", options.getAppID()); + } + + @Test + void taskOptionsWithNullAppID() { + TaskOptions options = TaskOptions.builder().appID(null).build(); + + assertFalse(options.hasAppID()); + assertNull(options.getAppID()); + } + + @Test + void taskOptionsWithRetryPolicy() { + RetryPolicy retryPolicy = new RetryPolicy(5, Duration.ofMinutes(1)); + TaskOptions options = TaskOptions.withRetryPolicy(retryPolicy); + + assertTrue(options.hasRetryPolicy()); + assertEquals(retryPolicy, options.getRetryPolicy()); + assertFalse(options.hasRetryHandler()); + assertFalse(options.hasAppID()); + } + + @Test + void taskOptionsWithRetryHandler() { + RetryHandler retryHandler = new RetryHandler() { + @Override + public boolean handle(RetryContext context) { + return context.getLastAttemptNumber() < 3; + } + }; + TaskOptions options = TaskOptions.withRetryHandler(retryHandler); + + assertTrue(options.hasRetryHandler()); + assertEquals(retryHandler, options.getRetryHandler()); + assertFalse(options.hasRetryPolicy()); + assertFalse(options.hasAppID()); + } + + @Test + void taskOptionsWithBuilderChaining() { + RetryPolicy retryPolicy = new RetryPolicy(3, Duration.ofSeconds(1)); + RetryHandler retryHandler = context -> true; + + TaskOptions options = TaskOptions.builder() + .retryPolicy(retryPolicy) + .retryHandler(retryHandler) + .appID("test-app") + .build(); + + assertNotNull(options); + assertTrue(options.hasRetryPolicy()); + assertEquals(retryPolicy, options.getRetryPolicy()); + assertTrue(options.hasRetryHandler()); + assertEquals(retryHandler, options.getRetryHandler()); + assertTrue(options.hasAppID()); + assertEquals("test-app", options.getAppID()); + } +} \ No newline at end of file diff --git a/pom.xml b/pom.xml index b145738aa..37c6ecea7 100644 --- a/pom.xml +++ b/pom.xml @@ -17,6 +17,7 @@ 1.69.0 3.25.5 https://raw.githubusercontent.com/dapr/dapr/v1.16.0-rc.5/dapr/proto + https://raw.githubusercontent.com/dapr/durabletask-protobuf/main/protos/orchestrator_service.proto 1.17.0-SNAPSHOT 1.7.1 3.8.1 @@ -41,12 +42,7 @@ 11 11 true - - 2.16.1 + 2.16.2 true true ${maven.multiModuleProjectDirectory}/spotbugs-exclude.xml @@ -80,7 +76,6 @@ 5.7.0 1.7.0 3.5.12 - 1.5.10 2.2.2 2.0.9 3.11.2 @@ -359,6 +354,11 @@ junit-jupiter-engine ${junit-bom.version} + + org.junit.jupiter + junit-jupiter-engine + ${junit-bom.version} + org.junit.jupiter junit-jupiter-params @@ -369,11 +369,6 @@ reactor-core ${reactor.version} - - io.dapr - durabletask-client - ${durabletask-client.version} - com.redis testcontainers-redis @@ -496,6 +491,10 @@ check + + io/dapr/durabletask/**/* + io/dapr/springboot/examples/**/* + BUNDLE @@ -698,6 +697,7 @@ spring-boot-examples testcontainers-dapr + durabletask-client @@ -706,6 +706,7 @@ sdk-tests spring-boot-examples + durabletask-client diff --git a/sdk-workflows/pom.xml b/sdk-workflows/pom.xml index 1773d7fa0..7fd95807f 100644 --- a/sdk-workflows/pom.xml +++ b/sdk-workflows/pom.xml @@ -45,27 +45,7 @@ io.dapr durabletask-client - - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 + ${project.parent.version} diff --git a/sdk-workflows/src/main/java/io/dapr/workflows/runtime/DefaultWorkflowContext.java b/sdk-workflows/src/main/java/io/dapr/workflows/runtime/DefaultWorkflowContext.java index 067850c93..4ccf73e9b 100644 --- a/sdk-workflows/src/main/java/io/dapr/workflows/runtime/DefaultWorkflowContext.java +++ b/sdk-workflows/src/main/java/io/dapr/workflows/runtime/DefaultWorkflowContext.java @@ -246,7 +246,7 @@ public void continueAsNew(Object input, boolean preserveUnprocessedEvents) { */ @Override public UUID newUuid() { - return this.innerContext.newUUID(); + return this.innerContext.newUuid(); } private TaskOptions toTaskOptions(WorkflowTaskOptions options) { diff --git a/sdk-workflows/src/test/java/io/dapr/workflows/DefaultWorkflowContextTest.java b/sdk-workflows/src/test/java/io/dapr/workflows/DefaultWorkflowContextTest.java index b573e2611..b6ca38ecb 100644 --- a/sdk-workflows/src/test/java/io/dapr/workflows/DefaultWorkflowContextTest.java +++ b/sdk-workflows/src/test/java/io/dapr/workflows/DefaultWorkflowContextTest.java @@ -422,7 +422,7 @@ public void setCustomStatusWorkflow() { @Test public void newUuidTest() { context.newUuid(); - verify(mockInnerContext, times(1)).newUUID(); + verify(mockInnerContext, times(1)).newUuid(); } @Test From 0a3cb18556978469b9a891b7a3d65f1046344b9a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Dec 2025 14:34:53 -0800 Subject: [PATCH 12/18] Bump codecov/codecov-action from 5.5.1 to 5.5.2 (#1607) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 5.5.1 to 5.5.2. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v5.5.1...v5.5.2) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-version: 5.5.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: salaboy --- .github/workflows/build.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 47f26c9e2..1252c515f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -33,8 +33,6 @@ jobs: run: ./mvnw clean install -B -q -DskipITs=true - name: Codecov uses: codecov/codecov-action@v5.5.1 - with: - token: ${{ secrets.CODECOV_TOKEN }} - name: Upload test report for sdk uses: actions/upload-artifact@v5 with: From 3bc93fdd914075e05c703c01ee94b73feb205e5a Mon Sep 17 00:00:00 2001 From: artur-ciocanu Date: Sat, 3 Jan 2026 04:15:48 +0200 Subject: [PATCH 13/18] Create Dapr WaitStrategy to improve ITs ergonomics (#1609) * Create Dapr WaitStrategy to improve ITs ergonomics Signed-off-by: Artur Ciocanu * Improve unit tests naming and coverage Signed-off-by: Artur Ciocanu * Fix a potential NPE and remove extra "for..." methods for pubsub and topic. Signed-off-by: Artur Ciocanu * Fix NPE properly for actor match Signed-off-by: Artur Ciocanu * Fix NPE another potential NPEs. Signed-off-by: Artur Ciocanu * Rename to use wait strategy to use abstract prefix. Signed-off-by: Artur Ciocanu * Add more tests to cover null checks Signed-off-by: Artur Ciocanu * Fix pubsub outbox IT. Signed-off-by: Artur Ciocanu * Ignore pubsub outbox for now. Signed-off-by: Artur Ciocanu * Disable pubsub outbox for now. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy --- .../pubsub/outbox/DaprPubSubOutboxIT.java | 20 +- .../outbox/ProductWebhookController.java | 11 +- testcontainers-dapr/pom.xml | 4 + .../strategy/AbstractDaprWaitStrategy.java | 143 +++++++++++++ .../wait/strategy/ActorWaitStrategy.java | 68 ++++++ .../wait/strategy/DaprWait.java | 99 +++++++++ .../strategy/SubscriptionWaitStrategy.java | 68 ++++++ .../wait/strategy/metadata/Actor.java | 41 ++++ .../wait/strategy/metadata/Component.java | 61 ++++++ .../wait/strategy/metadata/Metadata.java | 82 ++++++++ .../wait/strategy/metadata/Subscription.java | 107 ++++++++++ .../wait/strategy/ActorWaitStrategyTest.java | 142 +++++++++++++ .../wait/strategy/DaprWaitTest.java | 110 ++++++++++ .../SubscriptionWaitStrategyTest.java | 154 ++++++++++++++ .../wait/strategy/metadata/MetadataTest.java | 197 ++++++++++++++++++ 15 files changed, 1298 insertions(+), 9 deletions(-) create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/AbstractDaprWaitStrategy.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategy.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/DaprWait.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategy.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Actor.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Component.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Metadata.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Subscription.java create mode 100644 testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategyTest.java create mode 100644 testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/DaprWaitTest.java create mode 100644 testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategyTest.java create mode 100644 testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/metadata/MetadataTest.java diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java index 423ae05e5..2aef82f14 100644 --- a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java @@ -23,11 +23,14 @@ import io.dapr.testcontainers.DaprLogLevel; import org.assertj.core.api.Assertions; import org.awaitility.Awaitility; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.DynamicPropertyRegistry; import org.springframework.test.context.DynamicPropertySource; @@ -44,6 +47,7 @@ import static io.dapr.it.testcontainers.ContainerConstants.DAPR_RUNTIME_IMAGE_TAG; +@Disabled("Unclear why this test is failing intermittently in CI") @SpringBootTest( webEnvironment = SpringBootTest.WebEnvironment.DEFINED_PORT, classes = { @@ -81,6 +85,9 @@ public class DaprPubSubOutboxIT { .withAppChannelAddress("host.testcontainers.internal") .withAppPort(PORT); + @Autowired + private ProductWebhookController productWebhookController; + /** * Expose the Dapr ports to the host. * @@ -93,17 +100,18 @@ static void daprProperties(DynamicPropertyRegistry registry) { registry.add("server.port", () -> PORT); } - - @BeforeEach - public void setUp() { + @BeforeAll + public static void beforeAll(){ org.testcontainers.Testcontainers.exposeHostPorts(PORT); } + @BeforeEach + public void beforeEach() { + Wait.forLogMessage(APP_FOUND_MESSAGE_PATTERN, 1).waitUntilReady(DAPR_CONTAINER); + } @Test public void shouldPublishUsingOutbox() throws Exception { - Wait.forLogMessage(APP_FOUND_MESSAGE_PATTERN, 1).waitUntilReady(DAPR_CONTAINER); - try (DaprClient client = DaprClientFactory.createDaprClientBuilder(DAPR_CONTAINER).build()) { ExecuteStateTransactionRequest transactionRequest = new ExecuteStateTransactionRequest(STATE_STORE_NAME); @@ -123,7 +131,7 @@ public void shouldPublishUsingOutbox() throws Exception { Awaitility.await().atMost(Duration.ofSeconds(10)) .ignoreExceptions() - .untilAsserted(() -> Assertions.assertThat(ProductWebhookController.EVENT_LIST).isNotEmpty()); + .untilAsserted(() -> Assertions.assertThat(productWebhookController.getEventList()).isNotEmpty()); } } diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java index 283dabf88..f35f335fe 100644 --- a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java @@ -26,12 +26,17 @@ @RequestMapping("/webhooks/products") public class ProductWebhookController { - public static final List> EVENT_LIST = new CopyOnWriteArrayList<>(); + public final List> events = new CopyOnWriteArrayList<>(); @PostMapping("/created") @Topic(name = "product.created", pubsubName = "pubsub") - public void handleEvent(@RequestBody CloudEvent cloudEvent) { + public void handleEvent(@RequestBody CloudEvent cloudEvent) { System.out.println("Received product.created event: " + cloudEvent.getData()); - EVENT_LIST.add(cloudEvent); + + events.add(cloudEvent); + } + + public List> getEventList() { + return events; } } diff --git a/testcontainers-dapr/pom.xml b/testcontainers-dapr/pom.xml index 786ec56a9..04d60ec32 100644 --- a/testcontainers-dapr/pom.xml +++ b/testcontainers-dapr/pom.xml @@ -33,6 +33,10 @@ org.testcontainers testcontainers + + com.fasterxml.jackson.core + jackson-databind + diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/AbstractDaprWaitStrategy.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/AbstractDaprWaitStrategy.java new file mode 100644 index 000000000..06d057149 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/AbstractDaprWaitStrategy.java @@ -0,0 +1,143 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import org.testcontainers.containers.ContainerLaunchException; +import org.testcontainers.containers.wait.strategy.AbstractWaitStrategy; +import org.testcontainers.shaded.org.awaitility.Awaitility; + +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; + +/** + * Base wait strategy for Dapr containers that polls the metadata endpoint. + * Subclasses implement specific conditions to wait for. + */ +public abstract class AbstractDaprWaitStrategy extends AbstractWaitStrategy { + + private static final int DAPR_HTTP_PORT = 3500; + private static final String METADATA_ENDPOINT = "/v1.0/metadata"; + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + private Duration pollInterval = Duration.ofMillis(500); + + /** + * Sets the poll interval for checking the metadata endpoint. + * + * @param pollInterval the interval between polling attempts + * @return this strategy for chaining + */ + public AbstractDaprWaitStrategy withPollInterval(Duration pollInterval) { + this.pollInterval = pollInterval; + return this; + } + + @Override + protected void waitUntilReady() { + String host = waitStrategyTarget.getHost(); + Integer port = waitStrategyTarget.getMappedPort(DAPR_HTTP_PORT); + String metadataUrl = String.format("http://%s:%d%s", host, port, METADATA_ENDPOINT); + + try { + Awaitility.await() + .atMost(startupTimeout.getSeconds(), TimeUnit.SECONDS) + .pollInterval(pollInterval.toMillis(), TimeUnit.MILLISECONDS) + .ignoreExceptions() + .until(() -> checkCondition(metadataUrl)); + } catch (Exception e) { + throw new ContainerLaunchException( + String.format("Timed out waiting for Dapr condition: %s", getConditionDescription()), e); + } + } + + /** + * Checks if the wait condition is satisfied. + * + * @param metadataUrl the URL to the metadata endpoint + * @return true if the condition is met + * @throws IOException if there's an error fetching metadata + */ + protected boolean checkCondition(String metadataUrl) throws IOException { + Metadata metadata = fetchMetadata(metadataUrl); + return isConditionMet(metadata); + } + + /** + * Fetches metadata from the Dapr sidecar. + * + * @param metadataUrl the URL to fetch metadata from + * @return the parsed metadata + * @throws IOException if there's an error fetching or parsing + */ + protected Metadata fetchMetadata(String metadataUrl) throws IOException { + HttpURLConnection connection = (HttpURLConnection) new URL(metadataUrl).openConnection(); + connection.setRequestMethod("GET"); + connection.setConnectTimeout(1000); + connection.setReadTimeout(1000); + + try { + int responseCode = connection.getResponseCode(); + if (responseCode != 200) { + throw new IOException("Metadata endpoint returned status: " + responseCode); + } + return OBJECT_MAPPER.readValue(connection.getInputStream(), Metadata.class); + } finally { + connection.disconnect(); + } + } + + /** + * Checks if the specific wait condition is met based on the metadata. + * + * @param metadata the current Dapr metadata + * @return true if the condition is satisfied + */ + protected abstract boolean isConditionMet(Metadata metadata); + + /** + * Returns a description of what this strategy is waiting for. + * + * @return a human-readable description of the condition + */ + protected abstract String getConditionDescription(); + + /** + * Creates a predicate-based wait strategy for custom conditions. + * + * @param predicate the predicate to test against metadata + * @param description a description of what the predicate checks + * @return a new wait strategy + */ + public static AbstractDaprWaitStrategy forCondition(Predicate predicate, String description) { + return new AbstractDaprWaitStrategy() { + @Override + protected boolean isConditionMet(Metadata metadata) { + return predicate.test(metadata); + } + + @Override + protected String getConditionDescription() { + return description; + } + }; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategy.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategy.java new file mode 100644 index 000000000..188e3a281 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategy.java @@ -0,0 +1,68 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Actor; +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; + +/** + * Wait strategy that waits for actors to be registered with Dapr. + */ +public class ActorWaitStrategy extends AbstractDaprWaitStrategy { + + private final String actorType; + + /** + * Creates a wait strategy that waits for any actor to be registered. + */ + public ActorWaitStrategy() { + this.actorType = null; + } + + /** + * Creates a wait strategy that waits for a specific actor type to be registered. + * + * @param actorType the actor type to wait for + */ + public ActorWaitStrategy(String actorType) { + this.actorType = actorType; + } + + @Override + protected boolean isConditionMet(Metadata metadata) { + if (metadata == null) { + return false; + } + if (actorType == null) { + return !metadata.getActors().isEmpty(); + } + return metadata.getActors().stream() + .anyMatch(this::matchesActorType); + } + + private boolean matchesActorType(Actor actor) { + if (actor == null || actorType == null) { + return false; + } + return actorType.equals(actor.getType()); + } + + @Override + protected String getConditionDescription() { + if (actorType != null) { + return String.format("actor type '%s'", actorType); + } + return "any registered actors"; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/DaprWait.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/DaprWait.java new file mode 100644 index 000000000..e11f70417 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/DaprWait.java @@ -0,0 +1,99 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; + +import java.util.function.Predicate; + +/** + * Factory class for creating Dapr-specific wait strategies. + * + *

This class provides static factory methods to create wait strategies + * that poll the Dapr metadata endpoint to determine when specific conditions are met. + * This is more reliable than log-based waiting strategies.

+ * + *

Example usage:

+ *
{@code
+ * // Wait for a subscription to be registered
+ * DaprWait.forSubscription("pubsub", "my-topic")
+ *     .withStartupTimeout(Duration.ofSeconds(30))
+ *     .waitUntilReady(daprContainer);
+ *
+ * // Wait for any actors to be registered
+ * DaprWait.forActors()
+ *     .waitUntilReady(daprContainer);
+ *
+ * // Wait for a specific actor type
+ * DaprWait.forActorType("MyActor")
+ *     .waitUntilReady(daprContainer);
+ * }
+ * + * @see Dapr Metadata API + */ +public final class DaprWait { + + private DaprWait() { + // Utility class, no instantiation + } + + /** + * Creates a wait strategy that waits for a subscription to be registered. + * + * @param pubsubName the name of the pub/sub component (can be null to match any) + * @param topic the topic name to wait for (can be null to match any) + * @return a new subscription wait strategy + */ + public static SubscriptionWaitStrategy forSubscription(String pubsubName, String topic) { + return new SubscriptionWaitStrategy(pubsubName, topic); + } + + /** + * Creates a wait strategy that waits for any actors to be registered. + * + * @return a new actor wait strategy + */ + public static ActorWaitStrategy forActors() { + return new ActorWaitStrategy(); + } + + /** + * Creates a wait strategy that waits for a specific actor type to be registered. + * + * @param actorType the actor type to wait for + * @return a new actor wait strategy + */ + public static ActorWaitStrategy forActorType(String actorType) { + return new ActorWaitStrategy(actorType); + } + + /** + * Creates a wait strategy with a custom condition based on Dapr metadata. + * + *

Example:

+ *
{@code
+   * DaprWait.forCondition(
+   *     metadata -> metadata.getComponents().size() >= 2,
+   *     "at least 2 components to be loaded"
+   * );
+   * }
+ * + * @param predicate the condition to check against the metadata + * @param description a human-readable description of the condition + * @return a new custom wait strategy + */ + public static AbstractDaprWaitStrategy forCondition(Predicate predicate, String description) { + return AbstractDaprWaitStrategy.forCondition(predicate, description); + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategy.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategy.java new file mode 100644 index 000000000..4fff91a63 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategy.java @@ -0,0 +1,68 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import io.dapr.testcontainers.wait.strategy.metadata.Subscription; + +/** + * Wait strategy that waits for a specific subscription to be registered with Dapr. + */ +public class SubscriptionWaitStrategy extends AbstractDaprWaitStrategy { + + private final String pubsubName; + private final String topic; + + /** + * Creates a wait strategy for a specific subscription. + * + * @param pubsubName the name of the pub/sub component + * @param topic the topic name to wait for + */ + public SubscriptionWaitStrategy(String pubsubName, String topic) { + this.pubsubName = pubsubName; + this.topic = topic; + } + + @Override + protected boolean isConditionMet(Metadata metadata) { + if (metadata == null) { + return false; + } + return metadata.getSubscriptions().stream() + .anyMatch(this::matchesSubscription); + } + + private boolean matchesSubscription(Subscription subscription) { + if (subscription == null) { + return false; + } + boolean pubsubMatches = pubsubName == null || pubsubName.equals(subscription.getPubsubname()); + boolean topicMatches = topic == null || topic.equals(subscription.getTopic()); + return pubsubMatches && topicMatches; + } + + @Override + protected String getConditionDescription() { + if (pubsubName != null && topic != null) { + return String.format("subscription for pubsub '%s' and topic '%s'", pubsubName, topic); + } else if (pubsubName != null) { + return String.format("subscription for pubsub '%s'", pubsubName); + } else if (topic != null) { + return String.format("subscription for topic '%s'", topic); + } else { + return "any subscription"; + } + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Actor.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Actor.java new file mode 100644 index 000000000..8a859151c --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Actor.java @@ -0,0 +1,41 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +/** + * Represents an actor entry from the Dapr metadata API response. + */ +public class Actor { + private String type; + private int count; + + public Actor() { + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public int getCount() { + return count; + } + + public void setCount(int count) { + this.count = count; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Component.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Component.java new file mode 100644 index 000000000..08915b18b --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Component.java @@ -0,0 +1,61 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +import java.util.List; + +/** + * Represents a component entry from the Dapr metadata API response. + */ +public class Component { + private String name; + private String type; + private String version; + private List capabilities; + + public Component() { + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + public List getCapabilities() { + return capabilities; + } + + public void setCapabilities(List capabilities) { + this.capabilities = capabilities; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Metadata.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Metadata.java new file mode 100644 index 000000000..4ad8080d8 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Metadata.java @@ -0,0 +1,82 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +import java.util.Collections; +import java.util.List; + +/** + * Represents the response from the Dapr metadata API (/v1.0/metadata). + * + * @see Dapr Metadata API + */ +public class Metadata { + private String id; + private String runtimeVersion; + private List enabledFeatures; + private List actors; + private List components; + private List subscriptions; + + public Metadata() { + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getRuntimeVersion() { + return runtimeVersion; + } + + public void setRuntimeVersion(String runtimeVersion) { + this.runtimeVersion = runtimeVersion; + } + + public List getEnabledFeatures() { + return enabledFeatures; + } + + public void setEnabledFeatures(List enabledFeatures) { + this.enabledFeatures = enabledFeatures; + } + + public List getActors() { + return actors != null ? actors : Collections.emptyList(); + } + + public void setActors(List actors) { + this.actors = actors; + } + + public List getComponents() { + return components != null ? components : Collections.emptyList(); + } + + public void setComponents(List components) { + this.components = components; + } + + public List getSubscriptions() { + return subscriptions != null ? subscriptions : Collections.emptyList(); + } + + public void setSubscriptions(List subscriptions) { + this.subscriptions = subscriptions; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Subscription.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Subscription.java new file mode 100644 index 000000000..8d775b600 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Subscription.java @@ -0,0 +1,107 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +import java.util.List; +import java.util.Map; + +/** + * Represents a subscription entry from the Dapr metadata API response. + */ +public class Subscription { + private String pubsubname; + private String topic; + private String deadLetterTopic; + private Map metadata; + private List rules; + private String type; + + public Subscription() { + } + + public String getPubsubname() { + return pubsubname; + } + + public void setPubsubname(String pubsubname) { + this.pubsubname = pubsubname; + } + + public String getTopic() { + return topic; + } + + public void setTopic(String topic) { + this.topic = topic; + } + + public String getDeadLetterTopic() { + return deadLetterTopic; + } + + public void setDeadLetterTopic(String deadLetterTopic) { + this.deadLetterTopic = deadLetterTopic; + } + + public Map getMetadata() { + return metadata; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + public List getRules() { + return rules; + } + + public void setRules(List rules) { + this.rules = rules; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + /** + * Represents a routing rule for a subscription. + */ + public static class Rule { + private String match; + private String path; + + public Rule() { + } + + public String getMatch() { + return match; + } + + public void setMatch(String match) { + this.match = match; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + } +} diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategyTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategyTest.java new file mode 100644 index 000000000..d8ae653f7 --- /dev/null +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategyTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Actor; +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class ActorWaitStrategyTest { + + @Test + @DisplayName("Should match any actor when no specific type is specified") + void shouldMatchAnyActorWhenNoTypeSpecified() { + ActorWaitStrategy strategy = new ActorWaitStrategy(); + Metadata metadata = createMetadataWithActor("SomeActor"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when no actors exist and no type is specified") + void shouldNotMatchWhenNoActorsAndNoTypeSpecified() { + ActorWaitStrategy strategy = new ActorWaitStrategy(); + Metadata metadata = new Metadata(); + + metadata.setActors(Collections.emptyList()); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should match when specific actor type exists") + void shouldMatchSpecificActorType() { + ActorWaitStrategy strategy = new ActorWaitStrategy("MyActor"); + Metadata metadata = createMetadataWithActor("MyActor"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when actor type differs from expected") + void shouldNotMatchWhenActorTypeDiffers() { + ActorWaitStrategy strategy = new ActorWaitStrategy("MyActor"); + Metadata metadata = createMetadataWithActor("OtherActor"); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when no actors exist but specific type is expected") + void shouldNotMatchWhenNoActorsAndTypeSpecified() { + ActorWaitStrategy strategy = new ActorWaitStrategy("MyActor"); + Metadata metadata = new Metadata(); + + metadata.setActors(Collections.emptyList()); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should find matching actor among multiple registered actors") + void shouldFindMatchAmongMultipleActors() { + ActorWaitStrategy strategy = new ActorWaitStrategy("TargetActor"); + + Actor actor1 = createActor("FirstActor"); + Actor actor2 = createActor("TargetActor"); + Actor actor3 = createActor("ThirdActor"); + + Metadata metadata = new Metadata(); + metadata.setActors(Arrays.asList(actor1, actor2, actor3)); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should provide correct human-readable condition description") + void shouldProvideCorrectDescription() { + ActorWaitStrategy anyActors = new ActorWaitStrategy(); + assertEquals("any registered actors", anyActors.getConditionDescription()); + + ActorWaitStrategy specificActor = new ActorWaitStrategy("MyActor"); + assertEquals("actor type 'MyActor'", specificActor.getConditionDescription()); + } + + @Test + @DisplayName("Should handle null actor in list without throwing NPE") + void shouldHandleNullActorInList() { + ActorWaitStrategy strategy = new ActorWaitStrategy("TargetActor"); + Metadata metadata = new Metadata(); + metadata.setActors(Arrays.asList(null, createActor("TargetActor"))); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should return false when metadata is null") + void shouldReturnFalseWhenMetadataIsNull() { + ActorWaitStrategy strategy = new ActorWaitStrategy(); + + assertFalse(strategy.isConditionMet(null)); + } + + @Test + @DisplayName("Should return false when metadata is null and actor type is specified") + void shouldReturnFalseWhenMetadataIsNullAndActorTypeSpecified() { + ActorWaitStrategy strategy = new ActorWaitStrategy("MyActor"); + + assertFalse(strategy.isConditionMet(null)); + } + + private Metadata createMetadataWithActor(String actorType) { + Metadata metadata = new Metadata(); + metadata.setActors(Collections.singletonList(createActor(actorType))); + return metadata; + } + + private Actor createActor(String type) { + Actor actor = new Actor(); + actor.setType(type); + actor.setCount(1); + return actor; + } +} diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/DaprWaitTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/DaprWaitTest.java new file mode 100644 index 000000000..556f76cf7 --- /dev/null +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/DaprWaitTest.java @@ -0,0 +1,110 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Component; +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.time.Duration; +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class DaprWaitTest { + + @Test + @DisplayName("forSubscription should create SubscriptionWaitStrategy") + void forSubscriptionShouldCreateSubscriptionWaitStrategy() { + AbstractDaprWaitStrategy strategy = DaprWait.forSubscription("pubsub", "orders"); + + assertInstanceOf(SubscriptionWaitStrategy.class, strategy); + } + + @Test + @DisplayName("forSubscription with null topic should match any topic") + void forSubscriptionWithNullTopicShouldMatchAnyTopic() { + SubscriptionWaitStrategy strategy = DaprWait.forSubscription("pubsub", null); + + assertNotNull(strategy); + assertEquals("subscription for pubsub 'pubsub'", strategy.getConditionDescription()); + } + + @Test + @DisplayName("forSubscription with null pubsub should match any pubsub") + void forSubscriptionWithNullPubsubShouldMatchAnyPubsub() { + SubscriptionWaitStrategy strategy = DaprWait.forSubscription(null, "orders"); + + assertNotNull(strategy); + assertEquals("subscription for topic 'orders'", strategy.getConditionDescription()); + } + + @Test + @DisplayName("forActors should create ActorWaitStrategy for any actor") + void forActorsShouldCreateActorWaitStrategyForAnyActor() { + ActorWaitStrategy strategy = DaprWait.forActors(); + + assertNotNull(strategy); + assertEquals("any registered actors", strategy.getConditionDescription()); + } + + @Test + @DisplayName("forActorType should create ActorWaitStrategy for specific type") + void forActorTypeShouldCreateActorWaitStrategyForSpecificType() { + ActorWaitStrategy strategy = DaprWait.forActorType("MyActor"); + + assertNotNull(strategy); + assertEquals("actor type 'MyActor'", strategy.getConditionDescription()); + } + + @Test + @DisplayName("forCondition should create custom wait strategy with predicate") + void forConditionShouldCreateCustomWaitStrategy() { + AbstractDaprWaitStrategy strategy = DaprWait.forCondition( + metadata -> metadata.getComponents().size() >= 2, + "at least 2 components" + ); + + assertNotNull(strategy); + assertEquals("at least 2 components", strategy.getConditionDescription()); + + Metadata metadataWith2Components = new Metadata(); + Component comp1 = new Component(); + comp1.setName("comp1"); + Component comp2 = new Component(); + comp2.setName("comp2"); + metadataWith2Components.setComponents(Arrays.asList(comp1, comp2)); + + Metadata metadataWith1Component = new Metadata(); + metadataWith1Component.setComponents(Arrays.asList(comp1)); + + assertTrue(strategy.isConditionMet(metadataWith2Components)); + assertFalse(strategy.isConditionMet(metadataWith1Component)); + } + + @Test + @DisplayName("Strategy should support fluent configuration with poll interval and timeout") + void strategyShouldSupportFluentConfiguration() { + AbstractDaprWaitStrategy strategy = DaprWait.forSubscription("pubsub", "orders") + .withPollInterval(Duration.ofMillis(250)); + strategy.withStartupTimeout(Duration.ofSeconds(60)); + + assertNotNull(strategy); + } +} diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategyTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategyTest.java new file mode 100644 index 000000000..014c883c1 --- /dev/null +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategyTest.java @@ -0,0 +1,154 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import io.dapr.testcontainers.wait.strategy.metadata.Subscription; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class SubscriptionWaitStrategyTest { + + @Test + @DisplayName("Should match when pubsub and topic exactly match") + void shouldMatchExactSubscription() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = createMetadataWithSubscription("pubsub", "orders"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when pubsub name differs") + void shouldNotMatchWhenPubsubDiffers() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = createMetadataWithSubscription("other-pubsub", "orders"); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when topic name differs") + void shouldNotMatchWhenTopicDiffers() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = createMetadataWithSubscription("pubsub", "other-topic"); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when no subscriptions exist") + void shouldNotMatchWhenNoSubscriptions() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = new Metadata(); + metadata.setSubscriptions(Collections.emptyList()); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should match any topic when topic filter is null") + void shouldMatchAnyTopicWhenTopicIsNull() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", null); + Metadata metadata = createMetadataWithSubscription("pubsub", "any-topic"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should match any pubsub when pubsub filter is null") + void shouldMatchAnyPubsubWhenPubsubIsNull() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy(null, "orders"); + Metadata metadata = createMetadataWithSubscription("any-pubsub", "orders"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should match any subscription when both filters are null") + void shouldMatchAnySubscriptionWhenBothAreNull() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy(null, null); + Metadata metadata = createMetadataWithSubscription("any-pubsub", "any-topic"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should find matching subscription among multiple subscriptions") + void shouldFindMatchAmongMultipleSubscriptions() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Subscription sub1 = createSubscription("other-pubsub", "other-topic"); + Subscription sub2 = createSubscription("pubsub", "orders"); + Subscription sub3 = createSubscription("another-pubsub", "another-topic"); + + Metadata metadata = new Metadata(); + metadata.setSubscriptions(Arrays.asList(sub1, sub2, sub3)); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should provide correct human-readable condition description") + void shouldProvideCorrectDescription() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + assertEquals("subscription for pubsub 'pubsub' and topic 'orders'", strategy.getConditionDescription()); + + SubscriptionWaitStrategy pubsubOnly = new SubscriptionWaitStrategy("pubsub", null); + assertEquals("subscription for pubsub 'pubsub'", pubsubOnly.getConditionDescription()); + + SubscriptionWaitStrategy topicOnly = new SubscriptionWaitStrategy(null, "orders"); + assertEquals("subscription for topic 'orders'", topicOnly.getConditionDescription()); + + SubscriptionWaitStrategy any = new SubscriptionWaitStrategy(null, null); + assertEquals("any subscription", any.getConditionDescription()); + } + + @Test + @DisplayName("Should return false when metadata is null") + void shouldReturnFalseWhenMetadataIsNull() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + + assertFalse(strategy.isConditionMet(null)); + } + + @Test + @DisplayName("Should handle null subscription in list without throwing NPE") + void shouldHandleNullSubscriptionInList() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = new Metadata(); + metadata.setSubscriptions(Arrays.asList(null, createSubscription("pubsub", "orders"))); + + assertTrue(strategy.isConditionMet(metadata)); + } + + private Metadata createMetadataWithSubscription(String pubsubName, String topic) { + Metadata metadata = new Metadata(); + metadata.setSubscriptions(Collections.singletonList(createSubscription(pubsubName, topic))); + return metadata; + } + + private Subscription createSubscription(String pubsubName, String topic) { + Subscription subscription = new Subscription(); + subscription.setPubsubname(pubsubName); + subscription.setTopic(topic); + return subscription; + } +} diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/metadata/MetadataTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/metadata/MetadataTest.java new file mode 100644 index 000000000..c7f7c579c --- /dev/null +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/metadata/MetadataTest.java @@ -0,0 +1,197 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class MetadataTest { + + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + @Test + @DisplayName("Metadata should return empty list when actors is null") + void metadataShouldReturnEmptyListWhenActorsIsNull() { + Metadata metadata = new Metadata(); + + assertNotNull(metadata.getActors()); + assertTrue(metadata.getActors().isEmpty()); + } + + @Test + @DisplayName("Metadata should return empty list when components is null") + void metadataShouldReturnEmptyListWhenComponentsIsNull() { + Metadata metadata = new Metadata(); + + assertNotNull(metadata.getComponents()); + assertTrue(metadata.getComponents().isEmpty()); + } + + @Test + @DisplayName("Metadata should return empty list when subscriptions is null") + void metadataShouldReturnEmptyListWhenSubscriptionsIsNull() { + Metadata metadata = new Metadata(); + + assertNotNull(metadata.getSubscriptions()); + assertTrue(metadata.getSubscriptions().isEmpty()); + } + + @Test + @DisplayName("Metadata should store and retrieve all fields correctly") + void metadataShouldStoreAndRetrieveAllFields() { + Metadata metadata = new Metadata(); + metadata.setId("test-app"); + metadata.setRuntimeVersion("1.14.0"); + metadata.setEnabledFeatures(Arrays.asList("feature1", "feature2")); + + Actor actor = new Actor(); + actor.setType("MyActor"); + metadata.setActors(Collections.singletonList(actor)); + + Component component = new Component(); + component.setName("statestore"); + metadata.setComponents(Collections.singletonList(component)); + + Subscription subscription = new Subscription(); + subscription.setTopic("orders"); + metadata.setSubscriptions(Collections.singletonList(subscription)); + + assertEquals("test-app", metadata.getId()); + assertEquals("1.14.0", metadata.getRuntimeVersion()); + assertEquals(2, metadata.getEnabledFeatures().size()); + assertEquals(1, metadata.getActors().size()); + assertEquals(1, metadata.getComponents().size()); + assertEquals(1, metadata.getSubscriptions().size()); + } + + @Test + @DisplayName("Actor should store and retrieve all fields correctly") + void actorShouldStoreAndRetrieveAllFields() { + Actor actor = new Actor(); + actor.setType("OrderActor"); + actor.setCount(5); + + assertEquals("OrderActor", actor.getType()); + assertEquals(5, actor.getCount()); + } + + @Test + @DisplayName("Component should store and retrieve all fields correctly") + void componentShouldStoreAndRetrieveAllFields() { + Component component = new Component(); + component.setName("statestore"); + component.setType("state.redis"); + component.setVersion("v1"); + component.setCapabilities(Arrays.asList("ETAG", "TRANSACTIONAL")); + + assertEquals("statestore", component.getName()); + assertEquals("state.redis", component.getType()); + assertEquals("v1", component.getVersion()); + assertEquals(2, component.getCapabilities().size()); + assertTrue(component.getCapabilities().contains("ETAG")); + } + + @Test + @DisplayName("Subscription should store and retrieve all fields including rules") + void subscriptionShouldStoreAndRetrieveAllFields() { + Subscription subscription = new Subscription(); + subscription.setPubsubname("pubsub"); + subscription.setTopic("orders"); + subscription.setDeadLetterTopic("orders-dlq"); + subscription.setType("declarative"); + + Map meta = new HashMap<>(); + meta.put("key", "value"); + subscription.setMetadata(meta); + + Subscription.Rule rule = new Subscription.Rule(); + rule.setMatch("event.type == 'order'"); + rule.setPath("/orders"); + subscription.setRules(Collections.singletonList(rule)); + + assertEquals("pubsub", subscription.getPubsubname()); + assertEquals("orders", subscription.getTopic()); + assertEquals("orders-dlq", subscription.getDeadLetterTopic()); + assertEquals("declarative", subscription.getType()); + assertEquals("value", subscription.getMetadata().get("key")); + assertEquals(1, subscription.getRules().size()); + assertEquals("event.type == 'order'", subscription.getRules().get(0).getMatch()); + assertEquals("/orders", subscription.getRules().get(0).getPath()); + } + + @Test + @DisplayName("Should deserialize complete Dapr metadata JSON response") + void shouldDeserializeMetadataFromJson() throws Exception { + String json = "{" + + "\"id\": \"my-app\"," + + "\"runtimeVersion\": \"1.14.0\"," + + "\"enabledFeatures\": [\"ServiceInvocationStreaming\"]," + + "\"actors\": [{\"type\": \"OrderActor\", \"count\": 3}]," + + "\"components\": [{\"name\": \"statestore\", \"type\": \"state.redis\", \"version\": \"v1\", \"capabilities\": [\"ETAG\"]}]," + + "\"subscriptions\": [{" + + " \"pubsubname\": \"pubsub\"," + + " \"topic\": \"orders\"," + + " \"deadLetterTopic\": \"orders-dlq\"," + + " \"type\": \"programmatic\"," + + " \"rules\": [{\"match\": \"\", \"path\": \"/orders\"}]" + + "}]" + + "}"; + + Metadata metadata = OBJECT_MAPPER.readValue(json, Metadata.class); + + assertEquals("my-app", metadata.getId()); + assertEquals("1.14.0", metadata.getRuntimeVersion()); + assertEquals(1, metadata.getEnabledFeatures().size()); + + assertEquals(1, metadata.getActors().size()); + assertEquals("OrderActor", metadata.getActors().get(0).getType()); + assertEquals(3, metadata.getActors().get(0).getCount()); + + assertEquals(1, metadata.getComponents().size()); + assertEquals("statestore", metadata.getComponents().get(0).getName()); + assertEquals("state.redis", metadata.getComponents().get(0).getType()); + + assertEquals(1, metadata.getSubscriptions().size()); + assertEquals("pubsub", metadata.getSubscriptions().get(0).getPubsubname()); + assertEquals("orders", metadata.getSubscriptions().get(0).getTopic()); + assertEquals(1, metadata.getSubscriptions().get(0).getRules().size()); + } + + @Test + @DisplayName("Should ignore unknown fields when deserializing JSON") + void shouldDeserializeMetadataWithUnknownFields() throws Exception { + String json = "{" + + "\"id\": \"my-app\"," + + "\"unknownField\": \"should be ignored\"," + + "\"anotherUnknown\": {\"nested\": true}" + + "}"; + + Metadata metadata = OBJECT_MAPPER.readValue(json, Metadata.class); + + assertEquals("my-app", metadata.getId()); + assertTrue(metadata.getActors().isEmpty()); + } +} From 4917b8f046ab5280ca124d1dad3eea23aec3ddfa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Jan 2026 19:35:20 -0800 Subject: [PATCH 14/18] Bump actions/upload-artifact from 4 to 6 (#1606) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 6. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v4...v6) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Cassie Coyle Co-authored-by: Dapr Bot <56698301+dapr-bot@users.noreply.github.com> Signed-off-by: salaboy --- .github/workflows/build.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1252c515f..17c5c13b5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,12 +34,12 @@ jobs: - name: Codecov uses: codecov/codecov-action@v5.5.1 - name: Upload test report for sdk - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: test-dapr-java-sdk-jdk${{ env.JDK_VER }} path: sdk/target/jacoco-report/ - name: Upload test report for sdk-actors - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: report-dapr-java-sdk-actors-jdk${{ env.JDK_VER }} path: sdk-actors/target/jacoco-report/ @@ -83,7 +83,7 @@ jobs: run: docker kill durabletask-sidecar - name: Upload Durable Task Sidecar Logs - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: Durable Task Sidecar Logs path: durabletask-sidecar.log @@ -200,13 +200,13 @@ jobs: run: PRODUCT_SPRING_BOOT_VERSION=${{ matrix.spring-boot-version }} ./mvnw -B -pl !durabletask-client -Pintegration-tests dependency:copy-dependencies verify - name: Upload failsafe test report for sdk-tests on failure if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }} - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: failsafe-report-sdk-tests-jdk${{ matrix.java }}-sb${{ matrix.spring-boot-version }} path: sdk-tests/target/failsafe-reports - name: Upload surefire test report for sdk-tests on failure if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }} - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: surefire-report-sdk-tests-jdk${{ matrix.java }}-sb${{ matrix.spring-boot-version }} path: sdk-tests/target/surefire-reports From af47956fb5cbfb1b9d14ad0ff5062728b9ceb323 Mon Sep 17 00:00:00 2001 From: salaboy Date: Mon, 5 Jan 2026 16:04:30 +0100 Subject: [PATCH 15/18] Jobs promotion to DaprClient (#1602) * job promotion to DaprClient Signed-off-by: salaboy * updating Jobs readme Signed-off-by: salaboy * fixing IT tests for Jobs Signed-off-by: salaboy * Remove SDK docs due to migration to main Docs repo (#1593) * Remove SDK docs due to migration to main Docs repo Signed-off-by: Marc Duiker * Remove sed lines related to sdk docs Signed-off-by: Marc Duiker --------- Signed-off-by: Marc Duiker Co-authored-by: salaboy Signed-off-by: salaboy * adding client config for sdk tests Signed-off-by: salaboy --------- Signed-off-by: salaboy Signed-off-by: Marc Duiker Co-authored-by: Marc Duiker Co-authored-by: Siri Varma Vegiraju Co-authored-by: Dapr Bot <56698301+dapr-bot@users.noreply.github.com> Signed-off-by: salaboy --- .../io/dapr/examples/jobs/DemoJobsClient.java | 3 +- .../main/java/io/dapr/examples/jobs/README.md | 4 +- .../DaprClientConfiguration.java | 41 ++ .../it/testcontainers/jobs/DaprJobsIT.java | 43 +- .../main/java/io/dapr/client/DaprClient.java | 36 + .../io/dapr/client/DaprPreviewClient.java | 32 - .../io/dapr/client/DaprClientGrpcTest.java | 647 +++++++++++++++++ .../client/DaprPreviewClientGrpcTest.java | 650 ------------------ 8 files changed, 749 insertions(+), 707 deletions(-) create mode 100644 sdk-tests/src/test/java/io/dapr/it/testcontainers/DaprClientConfiguration.java diff --git a/examples/src/main/java/io/dapr/examples/jobs/DemoJobsClient.java b/examples/src/main/java/io/dapr/examples/jobs/DemoJobsClient.java index 87ccf0801..ddc8ac78e 100644 --- a/examples/src/main/java/io/dapr/examples/jobs/DemoJobsClient.java +++ b/examples/src/main/java/io/dapr/examples/jobs/DemoJobsClient.java @@ -13,6 +13,7 @@ package io.dapr.examples.jobs; +import io.dapr.client.DaprClient; import io.dapr.client.DaprClientBuilder; import io.dapr.client.DaprPreviewClient; import io.dapr.client.domain.GetJobRequest; @@ -35,7 +36,7 @@ public static void main(String[] args) throws Exception { Properties.GRPC_PORT, "51439" ); - try (DaprPreviewClient client = new DaprClientBuilder().withPropertyOverrides(overrides).buildPreviewClient()) { + try (DaprClient client = new DaprClientBuilder().withPropertyOverrides(overrides).build()) { // Schedule a job. System.out.println("**** Scheduling a Job with name dapr-jobs-1 *****"); diff --git a/examples/src/main/java/io/dapr/examples/jobs/README.md b/examples/src/main/java/io/dapr/examples/jobs/README.md index 4b899ac4a..392c0969b 100644 --- a/examples/src/main/java/io/dapr/examples/jobs/README.md +++ b/examples/src/main/java/io/dapr/examples/jobs/README.md @@ -64,7 +64,7 @@ export DAPR_API_TOKEN="your-dapr-api-token" This example uses the Java SDK Dapr client in order to **Schedule and Get** Jobs. `DemoJobsClient.java` is the example class demonstrating these features. -Kindly check [DaprPreviewClient.java](https://github.com/dapr/java-sdk/blob/master/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java) for a detailed description of the supported APIs. +Kindly check [DaprClient.java](https://github.com/dapr/java-sdk/blob/master/sdk/src/main/java/io/dapr/client/DaprClient.java) for a detailed description of the supported APIs. ```java public class DemoJobsClient { @@ -77,7 +77,7 @@ public class DemoJobsClient { Properties.GRPC_PORT, "51439" ); - try (DaprPreviewClient client = new DaprClientBuilder().withPropertyOverrides(overrides).buildPreviewClient()) { + try (DaprClient client = new DaprClientBuilder().withPropertyOverrides(overrides).build()) { // Schedule a job. ScheduleJobRequest scheduleJobRequest = new ScheduleJobRequest("dapr-job-1", diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/DaprClientConfiguration.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/DaprClientConfiguration.java new file mode 100644 index 000000000..80046d45e --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/DaprClientConfiguration.java @@ -0,0 +1,41 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.it.testcontainers; + +import io.dapr.client.DaprClient; +import io.dapr.client.DaprClientBuilder; +import io.dapr.client.DaprPreviewClient; +import io.dapr.config.Properties; +import io.dapr.config.Property; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import java.util.Map; + +@Configuration +public class DaprClientConfiguration { + @Bean + public DaprClient daprClient( + @Value("${dapr.http.endpoint}") String daprHttpEndpoint, + @Value("${dapr.grpc.endpoint}") String daprGrpcEndpoint + ){ + Map, String> overrides = Map.of( + Properties.HTTP_ENDPOINT, daprHttpEndpoint, + Properties.GRPC_ENDPOINT, daprGrpcEndpoint + ); + + return new DaprClientBuilder().withPropertyOverrides(overrides).build(); + } +} diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/jobs/DaprJobsIT.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/jobs/DaprJobsIT.java index ac2b4a71b..b17c83413 100644 --- a/sdk-tests/src/test/java/io/dapr/it/testcontainers/jobs/DaprJobsIT.java +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/jobs/DaprJobsIT.java @@ -13,7 +13,7 @@ package io.dapr.it.testcontainers.jobs; -import io.dapr.client.DaprPreviewClient; +import io.dapr.client.DaprClient; import io.dapr.client.domain.ConstantFailurePolicy; import io.dapr.client.domain.DeleteJobRequest; import io.dapr.client.domain.DropFailurePolicy; @@ -22,13 +22,12 @@ import io.dapr.client.domain.GetJobResponse; import io.dapr.client.domain.JobSchedule; import io.dapr.client.domain.ScheduleJobRequest; -import io.dapr.it.testcontainers.DaprPreviewClientConfiguration; +import io.dapr.it.testcontainers.DaprClientConfiguration; import io.dapr.testcontainers.DaprContainer; import io.dapr.testcontainers.DaprLogLevel; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import org.junit.runner.notification.Failure; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.context.SpringBootTest.WebEnvironment; @@ -51,7 +50,7 @@ @SpringBootTest( webEnvironment = WebEnvironment.RANDOM_PORT, classes = { - DaprPreviewClientConfiguration.class, + DaprClientConfiguration.class, TestJobsApplication.class } ) @@ -85,7 +84,7 @@ static void daprProperties(DynamicPropertyRegistry registry) { } @Autowired - private DaprPreviewClient daprPreviewClient; + private DaprClient daprClient; @BeforeEach public void setUp(){ @@ -98,12 +97,12 @@ public void testJobScheduleCreationWithDueTime() { .withZone(ZoneOffset.UTC); Instant currentTime = Instant.now(); - daprPreviewClient.scheduleJob(new ScheduleJobRequest("Job", currentTime).setOverwrite(true)).block(); + daprClient.scheduleJob(new ScheduleJobRequest("Job", currentTime).setOverwrite(true)).block(); GetJobResponse getJobResponse = - daprPreviewClient.getJob(new GetJobRequest("Job")).block(); + daprClient.getJob(new GetJobRequest("Job")).block(); - daprPreviewClient.deleteJob(new DeleteJobRequest("Job")).block(); + daprClient.deleteJob(new DeleteJobRequest("Job")).block(); assertEquals(iso8601Formatter.format(currentTime), getJobResponse.getDueTime().toString()); assertEquals("Job", getJobResponse.getName()); @@ -115,13 +114,13 @@ public void testJobScheduleCreationWithSchedule() { .withZone(ZoneOffset.UTC); Instant currentTime = Instant.now(); - daprPreviewClient.scheduleJob(new ScheduleJobRequest("Job", JobSchedule.hourly()) + daprClient.scheduleJob(new ScheduleJobRequest("Job", JobSchedule.hourly()) .setDueTime(currentTime).setOverwrite(true)).block(); GetJobResponse getJobResponse = - daprPreviewClient.getJob(new GetJobRequest("Job")).block(); + daprClient.getJob(new GetJobRequest("Job")).block(); - daprPreviewClient.deleteJob(new DeleteJobRequest("Job")).block(); + daprClient.deleteJob(new DeleteJobRequest("Job")).block(); assertEquals(iso8601Formatter.format(currentTime), getJobResponse.getDueTime().toString()); assertEquals(JobSchedule.hourly().getExpression(), getJobResponse.getSchedule().getExpression()); @@ -136,7 +135,7 @@ public void testJobScheduleCreationWithAllParameters() { String cronExpression = "2 * 3 * * FRI"; - daprPreviewClient.scheduleJob(new ScheduleJobRequest("Job", currentTime) + daprClient.scheduleJob(new ScheduleJobRequest("Job", currentTime) .setTtl(currentTime.plus(2, ChronoUnit.HOURS)) .setData("Job data".getBytes()) .setRepeat(3) @@ -144,9 +143,9 @@ public void testJobScheduleCreationWithAllParameters() { .setSchedule(JobSchedule.fromString(cronExpression))).block(); GetJobResponse getJobResponse = - daprPreviewClient.getJob(new GetJobRequest("Job")).block(); + daprClient.getJob(new GetJobRequest("Job")).block(); - daprPreviewClient.deleteJob(new DeleteJobRequest("Job")).block(); + daprClient.deleteJob(new DeleteJobRequest("Job")).block(); assertEquals(iso8601Formatter.format(currentTime), getJobResponse.getDueTime().toString()); assertEquals("2 * 3 * * FRI", getJobResponse.getSchedule().getExpression()); @@ -165,7 +164,7 @@ public void testJobScheduleCreationWithDropFailurePolicy() { String cronExpression = "2 * 3 * * FRI"; - daprPreviewClient.scheduleJob(new ScheduleJobRequest("Job", currentTime) + daprClient.scheduleJob(new ScheduleJobRequest("Job", currentTime) .setTtl(currentTime.plus(2, ChronoUnit.HOURS)) .setData("Job data".getBytes()) .setRepeat(3) @@ -173,9 +172,9 @@ public void testJobScheduleCreationWithDropFailurePolicy() { .setSchedule(JobSchedule.fromString(cronExpression))).block(); GetJobResponse getJobResponse = - daprPreviewClient.getJob(new GetJobRequest("Job")).block(); + daprClient.getJob(new GetJobRequest("Job")).block(); - daprPreviewClient.deleteJob(new DeleteJobRequest("Job")).block(); + daprClient.deleteJob(new DeleteJobRequest("Job")).block(); assertEquals(FailurePolicyType.DROP, getJobResponse.getFailurePolicy().getFailurePolicyType()); } @@ -188,7 +187,7 @@ public void testJobScheduleCreationWithConstantFailurePolicy() { String cronExpression = "2 * 3 * * FRI"; - daprPreviewClient.scheduleJob(new ScheduleJobRequest("Job", currentTime) + daprClient.scheduleJob(new ScheduleJobRequest("Job", currentTime) .setTtl(currentTime.plus(2, ChronoUnit.HOURS)) .setData("Job data".getBytes()) .setRepeat(3) @@ -197,9 +196,9 @@ public void testJobScheduleCreationWithConstantFailurePolicy() { .setSchedule(JobSchedule.fromString(cronExpression))).block(); GetJobResponse getJobResponse = - daprPreviewClient.getJob(new GetJobRequest("Job")).block(); + daprClient.getJob(new GetJobRequest("Job")).block(); - daprPreviewClient.deleteJob(new DeleteJobRequest("Job")).block(); + daprClient.deleteJob(new DeleteJobRequest("Job")).block(); ConstantFailurePolicy jobFailurePolicyConstant = (ConstantFailurePolicy) getJobResponse.getFailurePolicy(); assertEquals(FailurePolicyType.CONSTANT, getJobResponse.getFailurePolicy().getFailurePolicyType()); @@ -214,13 +213,13 @@ public void testDeleteJobRequest() { String cronExpression = "2 * 3 * * FRI"; - daprPreviewClient.scheduleJob(new ScheduleJobRequest("Job", currentTime) + daprClient.scheduleJob(new ScheduleJobRequest("Job", currentTime) .setTtl(currentTime.plus(2, ChronoUnit.HOURS)) .setData("Job data".getBytes()) .setRepeat(3) .setOverwrite(true) .setSchedule(JobSchedule.fromString(cronExpression))).block(); - daprPreviewClient.deleteJob(new DeleteJobRequest("Job")).block(); + daprClient.deleteJob(new DeleteJobRequest("Job")).block(); } } diff --git a/sdk/src/main/java/io/dapr/client/DaprClient.java b/sdk/src/main/java/io/dapr/client/DaprClient.java index 6ac6086e7..f341344b3 100644 --- a/sdk/src/main/java/io/dapr/client/DaprClient.java +++ b/sdk/src/main/java/io/dapr/client/DaprClient.java @@ -15,11 +15,14 @@ import io.dapr.client.domain.ConfigurationItem; import io.dapr.client.domain.DaprMetadata; +import io.dapr.client.domain.DeleteJobRequest; import io.dapr.client.domain.DeleteStateRequest; import io.dapr.client.domain.ExecuteStateTransactionRequest; import io.dapr.client.domain.GetBulkSecretRequest; import io.dapr.client.domain.GetBulkStateRequest; import io.dapr.client.domain.GetConfigurationRequest; +import io.dapr.client.domain.GetJobRequest; +import io.dapr.client.domain.GetJobResponse; import io.dapr.client.domain.GetSecretRequest; import io.dapr.client.domain.GetStateRequest; import io.dapr.client.domain.HttpExtension; @@ -27,6 +30,7 @@ import io.dapr.client.domain.InvokeMethodRequest; import io.dapr.client.domain.PublishEventRequest; import io.dapr.client.domain.SaveStateRequest; +import io.dapr.client.domain.ScheduleJobRequest; import io.dapr.client.domain.State; import io.dapr.client.domain.StateOptions; import io.dapr.client.domain.SubscribeConfigurationRequest; @@ -702,6 +706,38 @@ Flux subscribeConfiguration(String storeName, Li */ Mono getMetadata(); + /** + * Schedules a job using the provided job request details. + * + * @param scheduleJobRequest The request containing the details of the job to schedule. + * Must include a name and optional schedule, data, and other related properties. + * @return A {@link Mono} that completes when the job scheduling operation is successful or raises an error. + * @throws IllegalArgumentException If the request or its required fields like name are null or empty. + */ + public Mono scheduleJob(ScheduleJobRequest scheduleJobRequest); + + /** + * Retrieves details of a specific job. + * + * @param getJobRequest The request containing the job name for which the details are to be fetched. + * The name property is mandatory. + * @return A {@link Mono} that emits the {@link GetJobResponse} containing job details or raises an + * error if the job is not found. + * @throws IllegalArgumentException If the request or its required fields like name are null or empty. + */ + + public Mono getJob(GetJobRequest getJobRequest); + + /** + * Deletes a job based on the given request. + * + * @param deleteJobRequest The request containing the job name to be deleted. + * The name property is mandatory. + * @return A {@link Mono} that completes when the job is successfully deleted or raises an error. + * @throws IllegalArgumentException If the request or its required fields like name are null or empty. + */ + public Mono deleteJob(DeleteJobRequest deleteJobRequest); + /** * Gracefully shutdown the dapr runtime. * diff --git a/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java b/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java index 545b8e5dc..9d8192369 100644 --- a/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java +++ b/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java @@ -291,38 +291,6 @@ Subscription subscribeToEvents( */ Flux> subscribeToEvents(String pubsubName, String topic, TypeRef type); - /** - * Schedules a job using the provided job request details. - * - * @param scheduleJobRequest The request containing the details of the job to schedule. - * Must include a name and optional schedule, data, and other related properties. - * @return A {@link Mono} that completes when the job scheduling operation is successful or raises an error. - * @throws IllegalArgumentException If the request or its required fields like name are null or empty. - */ - public Mono scheduleJob(ScheduleJobRequest scheduleJobRequest); - - /** - * Retrieves details of a specific job. - * - * @param getJobRequest The request containing the job name for which the details are to be fetched. - * The name property is mandatory. - * @return A {@link Mono} that emits the {@link GetJobResponse} containing job details or raises an - * error if the job is not found. - * @throws IllegalArgumentException If the request or its required fields like name are null or empty. - */ - - public Mono getJob(GetJobRequest getJobRequest); - - /** - * Deletes a job based on the given request. - * - * @param deleteJobRequest The request containing the job name to be deleted. - * The name property is mandatory. - * @return A {@link Mono} that completes when the job is successfully deleted or raises an error. - * @throws IllegalArgumentException If the request or its required fields like name are null or empty. - */ - public Mono deleteJob(DeleteJobRequest deleteJobRequest); - /* * Converse with an LLM. * diff --git a/sdk/src/test/java/io/dapr/client/DaprClientGrpcTest.java b/sdk/src/test/java/io/dapr/client/DaprClientGrpcTest.java index 7ac6ab3cf..642c3b4f2 100644 --- a/sdk/src/test/java/io/dapr/client/DaprClientGrpcTest.java +++ b/sdk/src/test/java/io/dapr/client/DaprClientGrpcTest.java @@ -20,14 +20,21 @@ import io.dapr.client.domain.AppConnectionPropertiesMetadata; import io.dapr.client.domain.ComponentMetadata; import io.dapr.client.domain.ConfigurationItem; +import io.dapr.client.domain.ConstantFailurePolicy; import io.dapr.client.domain.DaprMetadata; +import io.dapr.client.domain.DeleteJobRequest; import io.dapr.client.domain.DeleteStateRequest; +import io.dapr.client.domain.DropFailurePolicy; import io.dapr.client.domain.ExecuteStateTransactionRequest; import io.dapr.client.domain.GetBulkStateRequest; +import io.dapr.client.domain.GetJobRequest; +import io.dapr.client.domain.GetJobResponse; import io.dapr.client.domain.GetStateRequest; import io.dapr.client.domain.InvokeBindingRequest; +import io.dapr.client.domain.JobSchedule; import io.dapr.client.domain.PublishEventRequest; import io.dapr.client.domain.RuleMetadata; +import io.dapr.client.domain.ScheduleJobRequest; import io.dapr.client.domain.State; import io.dapr.client.domain.StateOptions; import io.dapr.client.domain.SubscribeConfigurationResponse; @@ -53,17 +60,26 @@ import io.grpc.StatusRuntimeException; import io.grpc.protobuf.StatusProto; import io.grpc.stub.StreamObserver; +import org.junit.Assert; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; import org.mockito.stubbing.Answer; import reactor.core.publisher.Mono; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -73,10 +89,12 @@ import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static io.dapr.utils.TestUtils.assertThrowsDaprException; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -88,6 +106,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -2298,6 +2317,634 @@ public void getMetadataTest() { assertEquals(healthProperties.getHealthThreshold(), healthMetadata.getHealthThreshold()); } + @Test + public void scheduleJobShouldSucceedWhenAllFieldsArePresentInRequest() { + DateTimeFormatter iso8601Formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") + .withZone(ZoneOffset.UTC); + + ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", + JobSchedule.fromString("*/5 * * * *")) + .setData("testData".getBytes()) + .setTtl(Instant.now().plus(1, ChronoUnit.DAYS)) + .setRepeat(5) + .setDueTime(Instant.now().plus(10, ChronoUnit.MINUTES)); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onCompleted(); // Simulate successful response + return null; + }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); + + assertDoesNotThrow(() -> client.scheduleJob(expectedScheduleJobRequest).block()); + + ArgumentCaptor captor = + ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); + + verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); + DaprProtos.ScheduleJobRequest actualScheduleJobReq = captor.getValue(); + + assertEquals("testJob", actualScheduleJobReq.getJob().getName()); + assertEquals("testData", + new String(actualScheduleJobReq.getJob().getData().getValue().toByteArray(), StandardCharsets.UTF_8)); + assertEquals("*/5 * * * *", actualScheduleJobReq.getJob().getSchedule()); + assertEquals(iso8601Formatter.format(expectedScheduleJobRequest.getTtl()), actualScheduleJobReq.getJob().getTtl()); + assertEquals(expectedScheduleJobRequest.getRepeats(), actualScheduleJobReq.getJob().getRepeats()); + assertEquals(iso8601Formatter.format(expectedScheduleJobRequest.getDueTime()), actualScheduleJobReq.getJob().getDueTime()); + } + + @Test + public void scheduleJobShouldSucceedWhenRequiredFieldsNameAndDueTimeArePresentInRequest() { + DateTimeFormatter iso8601Formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") + .withZone(ZoneOffset.UTC); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onCompleted(); // Simulate successful response + return null; + }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); + + ScheduleJobRequest expectedScheduleJobRequest = + new ScheduleJobRequest("testJob", Instant.now().plus(10, ChronoUnit.MINUTES)); + assertDoesNotThrow(() -> client.scheduleJob(expectedScheduleJobRequest).block()); + + ArgumentCaptor captor = + ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); + + verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); + DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); + DaprProtos.Job job = actualScheduleJobRequest.getJob(); + assertEquals("testJob", job.getName()); + assertFalse(job.hasData()); + assertFalse(job.hasSchedule()); + assertEquals(0, job.getRepeats()); + assertFalse(job.hasTtl()); + assertEquals(iso8601Formatter.format(expectedScheduleJobRequest.getDueTime()), + actualScheduleJobRequest.getJob().getDueTime()); + } + + @Test + public void scheduleJobShouldSucceedWhenRequiredFieldsNameAndScheduleArePresentInRequest() { + DateTimeFormatter iso8601Formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") + .withZone(ZoneOffset.UTC); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onCompleted(); // Simulate successful response + return null; + }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); + + ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", + JobSchedule.fromString("* * * * * *")); + assertDoesNotThrow(() -> client.scheduleJob(expectedScheduleJobRequest).block()); + + ArgumentCaptor captor = + ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); + + verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); + DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); + DaprProtos.Job job = actualScheduleJobRequest.getJob(); + assertEquals("testJob", job.getName()); + assertFalse(job.hasData()); + assertEquals( "* * * * * *", job.getSchedule()); + assertEquals(0, job.getRepeats()); + assertFalse(job.hasTtl()); + } + + @Test + public void scheduleJobShouldThrowWhenRequestIsNull() { + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + client.scheduleJob(null).block(); + }); + assertEquals("scheduleJobRequest cannot be null", exception.getMessage()); + } + + @Test + public void scheduleJobShouldThrowWhenInvalidRequest() { + ScheduleJobRequest scheduleJobRequest = new ScheduleJobRequest(null, Instant.now()); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + client.scheduleJob(scheduleJobRequest).block(); + }); + assertEquals("Name in the request cannot be null or empty", exception.getMessage()); + } + + @Test + public void scheduleJobShouldThrowWhenNameInRequestIsEmpty() { + ScheduleJobRequest scheduleJobRequest = new ScheduleJobRequest("", Instant.now()); + + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + client.scheduleJob(scheduleJobRequest).block(); + }); + assertEquals("Name in the request cannot be null or empty", exception.getMessage()); + } + + @Test + public void scheduleJobShouldHavePolicyWhenPolicyIsSet() { + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onCompleted(); // Simulate successful response + return null; + }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); + + ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", + JobSchedule.fromString("* * * * * *")) + .setFailurePolicy(new DropFailurePolicy()); + + client.scheduleJob(expectedScheduleJobRequest).block(); + + ArgumentCaptor captor = + ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); + + verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); + DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); + DaprProtos.Job job = actualScheduleJobRequest.getJob(); + assertEquals("testJob", job.getName()); + assertFalse(job.hasData()); + assertEquals( "* * * * * *", job.getSchedule()); + assertEquals(0, job.getRepeats()); + assertFalse(job.hasTtl()); + Assertions.assertTrue(job.hasFailurePolicy()); + } + + @Test + public void scheduleJobShouldHaveConstantPolicyWithMaxRetriesWhenConstantPolicyIsSetWithMaxRetries() { + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onCompleted(); // Simulate successful response + return null; + }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); + + ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", + JobSchedule.fromString("* * * * * *")) + .setFailurePolicy(new ConstantFailurePolicy(2)); + + client.scheduleJob(expectedScheduleJobRequest).block(); + + ArgumentCaptor captor = + ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); + + verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); + DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); + DaprProtos.Job job = actualScheduleJobRequest.getJob(); + assertEquals("testJob", job.getName()); + assertFalse(job.hasData()); + assertEquals( "* * * * * *", job.getSchedule()); + assertEquals(0, job.getRepeats()); + assertFalse(job.hasTtl()); + Assertions.assertTrue(job.hasFailurePolicy()); + assertEquals(2, job.getFailurePolicy().getConstant().getMaxRetries()); + } + + @Test + public void scheduleJobShouldHaveConstantPolicyWithIntervalWhenConstantPolicyIsSetWithInterval() { + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onCompleted(); // Simulate successful response + return null; + }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); + + ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", + JobSchedule.fromString("* * * * * *")) + .setFailurePolicy(new ConstantFailurePolicy(Duration.of(2, ChronoUnit.SECONDS))); + + client.scheduleJob(expectedScheduleJobRequest).block(); + + ArgumentCaptor captor = + ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); + + verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); + DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); + DaprProtos.Job job = actualScheduleJobRequest.getJob(); + assertEquals("testJob", job.getName()); + assertFalse(job.hasData()); + assertEquals( "* * * * * *", job.getSchedule()); + assertEquals(0, job.getRepeats()); + assertFalse(job.hasTtl()); + Assertions.assertTrue(job.hasFailurePolicy()); + assertEquals(Duration.of(2, ChronoUnit.SECONDS).getNano(), + job.getFailurePolicy().getConstant().getInterval().getNanos()); + } + + @Test + public void scheduleJobShouldHaveBothRetiresAndIntervalWhenConstantPolicyIsSetWithRetriesAndInterval() { + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onCompleted(); // Simulate successful response + return null; + }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); + + ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", + JobSchedule.fromString("* * * * * *")) + .setFailurePolicy(new ConstantFailurePolicy(Duration.of(2, ChronoUnit.SECONDS)) + .setMaxRetries(10)); + + client.scheduleJob(expectedScheduleJobRequest).block(); + + ArgumentCaptor captor = + ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); + + verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); + DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); + DaprProtos.Job job = actualScheduleJobRequest.getJob(); + assertEquals("testJob", job.getName()); + assertFalse(job.hasData()); + assertEquals( "* * * * * *", job.getSchedule()); + assertEquals(0, job.getRepeats()); + assertFalse(job.hasTtl()); + Assertions.assertTrue(job.hasFailurePolicy()); + assertEquals(Duration.of(2, ChronoUnit.SECONDS).getNano(), + job.getFailurePolicy().getConstant().getInterval().getNanos()); + assertEquals(10, job.getFailurePolicy().getConstant().getMaxRetries()); + } + + @Test + public void scheduleJobShouldThrowWhenNameAlreadyExists() { + AtomicInteger callCount = new AtomicInteger(0); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + if (callCount.incrementAndGet() == 1) { + // First call succeeds + observer.onCompleted(); + } else { + // Second call fails with ALREADY_EXISTS + observer.onError(newStatusRuntimeException("ALREADY_EXISTS", "Job with name 'testJob' already exists")); + } + return null; + }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); + + // First call should succeed + ScheduleJobRequest firstRequest = new ScheduleJobRequest("testJob", Instant.now()); + assertDoesNotThrow(() -> client.scheduleJob(firstRequest).block()); + + ArgumentCaptor captor = + ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); + + verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); + DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); + DaprProtos.Job job = actualScheduleJobRequest.getJob(); + assertEquals("testJob", job.getName()); + assertFalse(job.hasData()); + assertEquals(0, job.getRepeats()); + assertFalse(job.hasTtl()); + + // Second call with same name should fail + ScheduleJobRequest secondRequest = new ScheduleJobRequest("testJob", Instant.now()); + + assertThrowsDaprException( + ExecutionException.class, + "ALREADY_EXISTS", + "ALREADY_EXISTS: Job with name 'testJob' already exists", + () -> client.scheduleJob(secondRequest).block()); + } + + @Test + public void scheduleJobShouldSucceedWhenNameAlreadyExistsWithOverwrite() { + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onCompleted(); // Simulate successful response for both calls + return null; + }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); + + // First call should succeed + ScheduleJobRequest firstRequest = new ScheduleJobRequest("testJob", Instant.now()); + assertDoesNotThrow(() -> client.scheduleJob(firstRequest).block()); + + // Second call with same name but overwrite=true should also succeed + ScheduleJobRequest secondRequest = new ScheduleJobRequest("testJob", Instant.now()) + .setOverwrite(true); + assertDoesNotThrow(() -> client.scheduleJob(secondRequest).block()); + + // Verify that both calls were made successfully + ArgumentCaptor captor = + ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); + verify(daprStub, times(2)).scheduleJobAlpha1(captor.capture(), any()); + + // Verify the first call doesn't have overwrite set + DaprProtos.ScheduleJobRequest firstActualRequest = captor.getAllValues().get(0); + assertFalse(firstActualRequest.getOverwrite()); + assertEquals("testJob", firstActualRequest.getJob().getName()); + + // Verify the second call has overwrite set to true + DaprProtos.ScheduleJobRequest secondActualRequest = captor.getAllValues().get(1); + Assert.assertTrue(secondActualRequest.getOverwrite()); + assertEquals("testJob", secondActualRequest.getJob().getName()); + } + + @Test + public void getJobShouldReturnResponseWhenAllFieldsArePresentInRequest() { + DateTimeFormatter iso8601Formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") + .withZone(ZoneOffset.UTC); + + GetJobRequest getJobRequest = new GetJobRequest("testJob"); + + DaprProtos.Job job = DaprProtos.Job.newBuilder() + .setName("testJob") + .setTtl(OffsetDateTime.now().format(iso8601Formatter)) + .setData(Any.newBuilder().setValue(ByteString.copyFrom("testData".getBytes())).build()) + .setSchedule("*/5 * * * *") + .setRepeats(5) + .setDueTime(iso8601Formatter.format(Instant.now().plus(10, ChronoUnit.MINUTES))) + .build(); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onNext(DaprProtos.GetJobResponse.newBuilder() + .setJob(job) + .build()); + observer.onCompleted(); + return null; + }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); + + Mono resultMono = client.getJob(getJobRequest); + + GetJobResponse response = resultMono.block(); + assertNotNull(response); + assertEquals("testJob", response.getName()); + assertEquals("testData", new String(response.getData(), StandardCharsets.UTF_8)); + assertEquals("*/5 * * * *", response.getSchedule().getExpression()); + assertEquals(5, response.getRepeats()); + assertEquals(job.getTtl(), iso8601Formatter.format(response.getTtl())); + assertEquals(job.getDueTime(), iso8601Formatter.format(response.getDueTime())); + } + + @Test + public void getJobShouldReturnResponseWithScheduleSetWhenResponseHasSchedule() { + GetJobRequest getJobRequest = new GetJobRequest("testJob"); + + DaprProtos.Job job = DaprProtos.Job.newBuilder() + .setName("testJob") + .setSchedule("0 0 0 1 1 *") + .build(); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onNext(DaprProtos.GetJobResponse.newBuilder() + .setJob(job) + .build()); + observer.onCompleted(); + return null; + }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); + + Mono resultMono = client.getJob(getJobRequest); + + GetJobResponse response = resultMono.block(); + assertNotNull(response); + assertEquals("testJob", response.getName()); + assertNull(response.getData()); + assertEquals("0 0 0 1 1 *", response.getSchedule().getExpression()); + assertNull(response.getRepeats()); + assertNull(response.getTtl()); + assertNull(response.getDueTime()); + } + + @Test + public void getJobShouldReturnResponseWithDueTimeSetWhenResponseHasDueTime() { + GetJobRequest getJobRequest = new GetJobRequest("testJob"); + + String datetime = OffsetDateTime.now().toString(); + DaprProtos.Job job = DaprProtos.Job.newBuilder() + .setName("testJob") + .setDueTime(datetime) + .build(); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onNext(DaprProtos.GetJobResponse.newBuilder() + .setJob(job) + .build()); + observer.onCompleted(); + return null; + }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); + + Mono resultMono = client.getJob(getJobRequest); + + GetJobResponse response = resultMono.block(); + assertNotNull(response); + assertEquals("testJob", response.getName()); + assertNull(response.getData()); + assertNull(response.getSchedule()); + assertNull(response.getRepeats()); + assertNull(response.getTtl()); + assertEquals(job.getDueTime(), datetime); + } + + @Test + public void getJobShouldReturnResponseWithDropFailurePolicySet() { + GetJobRequest getJobRequest = new GetJobRequest("testJob"); + + String datetime = OffsetDateTime.now().toString(); + DaprProtos.Job job = DaprProtos.Job.newBuilder() + .setName("testJob") + .setDueTime(datetime) + .setFailurePolicy(CommonProtos.JobFailurePolicy.newBuilder() + .setDrop(CommonProtos.JobFailurePolicyDrop.newBuilder().build()).build()) + .build(); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onNext(DaprProtos.GetJobResponse.newBuilder() + .setJob(job) + .build()); + observer.onCompleted(); + return null; + }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); + + Mono resultMono = client.getJob(getJobRequest); + + GetJobResponse response = resultMono.block(); + assertNotNull(response); + assertEquals("testJob", response.getName()); + assertNull(response.getData()); + assertNull(response.getSchedule()); + assertNull(response.getRepeats()); + assertNull(response.getTtl()); + assertEquals(job.getDueTime(), datetime); + Assert.assertTrue(job.hasFailurePolicy()); + Assert.assertTrue(job.getFailurePolicy().hasDrop()); + } + + @Test + public void getJobShouldReturnResponseWithConstantFailurePolicyAndMaxRetriesSet() { + GetJobRequest getJobRequest = new GetJobRequest("testJob"); + + String datetime = OffsetDateTime.now().toString(); + DaprProtos.Job job = DaprProtos.Job.newBuilder() + .setName("testJob") + .setDueTime(datetime) + .setFailurePolicy(CommonProtos.JobFailurePolicy.newBuilder() + .setConstant(CommonProtos.JobFailurePolicyConstant.newBuilder().setMaxRetries(2).build()).build()) + .build(); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onNext(DaprProtos.GetJobResponse.newBuilder() + .setJob(job) + .build()); + observer.onCompleted(); + return null; + }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); + + Mono resultMono = client.getJob(getJobRequest); + + GetJobResponse response = resultMono.block(); + assertNotNull(response); + assertEquals("testJob", response.getName()); + assertNull(response.getData()); + assertNull(response.getSchedule()); + assertNull(response.getRepeats()); + assertNull(response.getTtl()); + assertEquals(job.getDueTime(), datetime); + Assert.assertTrue(job.hasFailurePolicy()); + Assert.assertTrue(job.getFailurePolicy().hasConstant()); + assertEquals(2, job.getFailurePolicy().getConstant().getMaxRetries()); + } + + @Test + public void getJobShouldReturnResponseWithConstantFailurePolicyAndIntervalSet() { + GetJobRequest getJobRequest = new GetJobRequest("testJob"); + + String datetime = OffsetDateTime.now().toString(); + DaprProtos.Job job = DaprProtos.Job.newBuilder() + .setName("testJob") + .setDueTime(datetime) + .setFailurePolicy(CommonProtos.JobFailurePolicy.newBuilder() + .setConstant(CommonProtos.JobFailurePolicyConstant.newBuilder() + .setInterval(com.google.protobuf.Duration.newBuilder().setNanos(5).build()).build()).build()) + .build(); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onNext(DaprProtos.GetJobResponse.newBuilder() + .setJob(job) + .build()); + observer.onCompleted(); + return null; + }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); + + Mono resultMono = client.getJob(getJobRequest); + + GetJobResponse response = resultMono.block(); + assertNotNull(response); + assertEquals("testJob", response.getName()); + assertNull(response.getData()); + assertNull(response.getSchedule()); + assertNull(response.getRepeats()); + assertNull(response.getTtl()); + assertEquals(job.getDueTime(), datetime); + Assert.assertTrue(job.hasFailurePolicy()); + Assert.assertTrue(job.getFailurePolicy().hasConstant()); + assertEquals(5, job.getFailurePolicy().getConstant().getInterval().getNanos()); + } + + @Test + public void getJobShouldReturnResponseWithConstantFailurePolicyIntervalAndMaxRetriesSet() { + GetJobRequest getJobRequest = new GetJobRequest("testJob"); + + String datetime = OffsetDateTime.now().toString(); + DaprProtos.Job job = DaprProtos.Job.newBuilder() + .setName("testJob") + .setDueTime(datetime) + .setFailurePolicy(CommonProtos.JobFailurePolicy.newBuilder() + .setConstant(CommonProtos.JobFailurePolicyConstant.newBuilder() + .setMaxRetries(10) + .setInterval(com.google.protobuf.Duration.newBuilder().setNanos(5).build()).build()).build()) + .build(); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onNext(DaprProtos.GetJobResponse.newBuilder() + .setJob(job) + .build()); + observer.onCompleted(); + return null; + }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); + + Mono resultMono = client.getJob(getJobRequest); + + GetJobResponse response = resultMono.block(); + assertNotNull(response); + assertEquals("testJob", response.getName()); + assertNull(response.getData()); + assertNull(response.getSchedule()); + assertNull(response.getRepeats()); + assertNull(response.getTtl()); + assertEquals(job.getDueTime(), datetime); + Assert.assertTrue(job.hasFailurePolicy()); + Assert.assertTrue(job.getFailurePolicy().hasConstant()); + assertEquals(10, job.getFailurePolicy().getConstant().getMaxRetries()); + assertEquals(5, job.getFailurePolicy().getConstant().getInterval().getNanos()); + } + + + @Test + public void getJobShouldThrowWhenRequestIsNull() { + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + client.getJob(null).block(); + }); + assertEquals("getJobRequest cannot be null", exception.getMessage()); + } + + @Test + public void getJobShouldThrowWhenNameIsNullRequest() { + GetJobRequest getJobRequest = new GetJobRequest(null); + + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + client.getJob(getJobRequest).block(); + }); + assertEquals("Name in the request cannot be null or empty", exception.getMessage()); + } + + @Test + public void getJobShouldThrowWhenNameIsEmptyRequest() { + GetJobRequest getJobRequest =new GetJobRequest("");; + + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + client.getJob(getJobRequest).block(); + }); + assertEquals("Name in the request cannot be null or empty", exception.getMessage()); + } + + @Test + public void deleteJobShouldSucceedWhenValidRequest() { + DeleteJobRequest deleteJobRequest = new DeleteJobRequest("testJob"); + + doAnswer(invocation -> { + StreamObserver observer = invocation.getArgument(1); + observer.onCompleted(); // Simulate successful response + return null; + }).when(daprStub).deleteJobAlpha1(any(DaprProtos.DeleteJobRequest.class), any()); + + Mono resultMono = client.deleteJob(deleteJobRequest); + + assertDoesNotThrow(() -> resultMono.block()); + } + + @Test + public void deleteJobShouldThrowRequestIsNull() { + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + client.deleteJob(null).block(); + }); + assertEquals("deleteJobRequest cannot be null", exception.getMessage()); + } + + @Test + public void deleteJobShouldThrowWhenNameIsNullRequest() { + DeleteJobRequest deleteJobRequest = new DeleteJobRequest(null); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + client.deleteJob(deleteJobRequest).block(); + }); + assertEquals("Name in the request cannot be null or empty", exception.getMessage()); + } + + @Test + public void deleteJobShouldThrowWhenNameIsEmptyRequest() { + DeleteJobRequest deleteJobRequest = new DeleteJobRequest(""); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { + client.deleteJob(deleteJobRequest).block(); + }); + assertEquals("Name in the request cannot be null or empty", exception.getMessage()); + } + @Test public void getMetadataExceptionTest() { doAnswer((Answer) invocation -> { diff --git a/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java b/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java index a42c4f946..1566c7d2c 100644 --- a/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java +++ b/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java @@ -16,8 +16,6 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.Lists; -import com.google.protobuf.Any; import com.google.protobuf.ByteString; import io.dapr.client.domain.AssistantMessage; import io.dapr.client.domain.BulkPublishEntry; @@ -35,23 +33,13 @@ import io.dapr.client.domain.ConversationResultChoices; import io.dapr.client.domain.ConversationToolCalls; import io.dapr.client.domain.ConversationTools; -import io.dapr.client.domain.DeleteJobRequest; import io.dapr.client.domain.DeveloperMessage; -import io.dapr.client.domain.GetJobRequest; -import io.dapr.client.domain.GetJobResponse; -import io.dapr.client.domain.ConstantFailurePolicy; import io.dapr.client.domain.ConversationInput; import io.dapr.client.domain.ConversationRequest; import io.dapr.client.domain.ConversationResponse; -import io.dapr.client.domain.DeleteJobRequest; -import io.dapr.client.domain.DropFailurePolicy; -import io.dapr.client.domain.GetJobRequest; -import io.dapr.client.domain.GetJobResponse; -import io.dapr.client.domain.JobSchedule; import io.dapr.client.domain.QueryStateItem; import io.dapr.client.domain.QueryStateRequest; import io.dapr.client.domain.QueryStateResponse; -import io.dapr.client.domain.ScheduleJobRequest; import io.dapr.client.domain.SystemMessage; import io.dapr.client.domain.ToolMessage; import io.dapr.client.domain.UnlockResponseStatus; @@ -60,7 +48,6 @@ import io.dapr.serializer.DaprObjectSerializer; import io.dapr.serializer.DefaultObjectSerializer; import io.dapr.utils.TypeRef; -import io.dapr.v1.CommonProtos; import io.dapr.v1.DaprAppCallbackProtos; import io.dapr.v1.DaprGrpc; import io.dapr.v1.DaprProtos; @@ -78,13 +65,6 @@ import reactor.core.publisher.Mono; import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.time.Duration; -import java.time.Instant; -import java.time.OffsetDateTime; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; -import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -99,9 +79,7 @@ import static io.dapr.utils.TestUtils.assertThrowsDaprException; import static org.junit.Assert.assertTrue; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -802,634 +780,6 @@ public void converseShouldReturnConversationResponseWhenRequiredAndOptionalInput response.getConversationOutputs().get(0).getResult()); } - @Test - public void scheduleJobShouldSucceedWhenAllFieldsArePresentInRequest() { - DateTimeFormatter iso8601Formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") - .withZone(ZoneOffset.UTC); - - ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", - JobSchedule.fromString("*/5 * * * *")) - .setData("testData".getBytes()) - .setTtl(Instant.now().plus(1, ChronoUnit.DAYS)) - .setRepeat(5) - .setDueTime(Instant.now().plus(10, ChronoUnit.MINUTES)); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onCompleted(); // Simulate successful response - return null; - }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); - - assertDoesNotThrow(() -> previewClient.scheduleJob(expectedScheduleJobRequest).block()); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); - - verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); - DaprProtos.ScheduleJobRequest actualScheduleJobReq = captor.getValue(); - - assertEquals("testJob", actualScheduleJobReq.getJob().getName()); - assertEquals("testData", - new String(actualScheduleJobReq.getJob().getData().getValue().toByteArray(), StandardCharsets.UTF_8)); - assertEquals("*/5 * * * *", actualScheduleJobReq.getJob().getSchedule()); - assertEquals(iso8601Formatter.format(expectedScheduleJobRequest.getTtl()), actualScheduleJobReq.getJob().getTtl()); - assertEquals(expectedScheduleJobRequest.getRepeats(), actualScheduleJobReq.getJob().getRepeats()); - assertEquals(iso8601Formatter.format(expectedScheduleJobRequest.getDueTime()), actualScheduleJobReq.getJob().getDueTime()); - } - - @Test - public void scheduleJobShouldSucceedWhenRequiredFieldsNameAndDueTimeArePresentInRequest() { - DateTimeFormatter iso8601Formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") - .withZone(ZoneOffset.UTC); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onCompleted(); // Simulate successful response - return null; - }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); - - ScheduleJobRequest expectedScheduleJobRequest = - new ScheduleJobRequest("testJob", Instant.now().plus(10, ChronoUnit.MINUTES)); - assertDoesNotThrow(() -> previewClient.scheduleJob(expectedScheduleJobRequest).block()); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); - - verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); - DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); - DaprProtos.Job job = actualScheduleJobRequest.getJob(); - assertEquals("testJob", job.getName()); - assertFalse(job.hasData()); - assertFalse(job.hasSchedule()); - assertEquals(0, job.getRepeats()); - assertFalse(job.hasTtl()); - assertEquals(iso8601Formatter.format(expectedScheduleJobRequest.getDueTime()), - actualScheduleJobRequest.getJob().getDueTime()); - } - - @Test - public void scheduleJobShouldSucceedWhenRequiredFieldsNameAndScheduleArePresentInRequest() { - DateTimeFormatter iso8601Formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") - .withZone(ZoneOffset.UTC); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onCompleted(); // Simulate successful response - return null; - }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); - - ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", - JobSchedule.fromString("* * * * * *")); - assertDoesNotThrow(() -> previewClient.scheduleJob(expectedScheduleJobRequest).block()); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); - - verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); - DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); - DaprProtos.Job job = actualScheduleJobRequest.getJob(); - assertEquals("testJob", job.getName()); - assertFalse(job.hasData()); - assertEquals( "* * * * * *", job.getSchedule()); - assertEquals(0, job.getRepeats()); - assertFalse(job.hasTtl()); - } - - @Test - public void scheduleJobShouldThrowWhenRequestIsNull() { - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { - previewClient.scheduleJob(null).block(); - }); - assertEquals("scheduleJobRequest cannot be null", exception.getMessage()); - } - - @Test - public void scheduleJobShouldThrowWhenInvalidRequest() { - ScheduleJobRequest scheduleJobRequest = new ScheduleJobRequest(null, Instant.now()); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { - previewClient.scheduleJob(scheduleJobRequest).block(); - }); - assertEquals("Name in the request cannot be null or empty", exception.getMessage()); - } - - @Test - public void scheduleJobShouldThrowWhenNameInRequestIsEmpty() { - ScheduleJobRequest scheduleJobRequest = new ScheduleJobRequest("", Instant.now()); - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { - previewClient.scheduleJob(scheduleJobRequest).block(); - }); - assertEquals("Name in the request cannot be null or empty", exception.getMessage()); - } - - @Test - public void scheduleJobShouldHavePolicyWhenPolicyIsSet() { - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onCompleted(); // Simulate successful response - return null; - }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); - - ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", - JobSchedule.fromString("* * * * * *")) - .setFailurePolicy(new DropFailurePolicy()); - - previewClient.scheduleJob(expectedScheduleJobRequest).block(); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); - - verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); - DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); - DaprProtos.Job job = actualScheduleJobRequest.getJob(); - assertEquals("testJob", job.getName()); - assertFalse(job.hasData()); - assertEquals( "* * * * * *", job.getSchedule()); - assertEquals(0, job.getRepeats()); - assertFalse(job.hasTtl()); - Assertions.assertTrue(job.hasFailurePolicy()); - } - - @Test - public void scheduleJobShouldHaveConstantPolicyWithMaxRetriesWhenConstantPolicyIsSetWithMaxRetries() { - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onCompleted(); // Simulate successful response - return null; - }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); - - ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", - JobSchedule.fromString("* * * * * *")) - .setFailurePolicy(new ConstantFailurePolicy(2)); - - previewClient.scheduleJob(expectedScheduleJobRequest).block(); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); - - verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); - DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); - DaprProtos.Job job = actualScheduleJobRequest.getJob(); - assertEquals("testJob", job.getName()); - assertFalse(job.hasData()); - assertEquals( "* * * * * *", job.getSchedule()); - assertEquals(0, job.getRepeats()); - assertFalse(job.hasTtl()); - Assertions.assertTrue(job.hasFailurePolicy()); - assertEquals(2, job.getFailurePolicy().getConstant().getMaxRetries()); - } - - @Test - public void scheduleJobShouldHaveConstantPolicyWithIntervalWhenConstantPolicyIsSetWithInterval() { - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onCompleted(); // Simulate successful response - return null; - }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); - - ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", - JobSchedule.fromString("* * * * * *")) - .setFailurePolicy(new ConstantFailurePolicy(Duration.of(2, ChronoUnit.SECONDS))); - - previewClient.scheduleJob(expectedScheduleJobRequest).block(); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); - - verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); - DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); - DaprProtos.Job job = actualScheduleJobRequest.getJob(); - assertEquals("testJob", job.getName()); - assertFalse(job.hasData()); - assertEquals( "* * * * * *", job.getSchedule()); - assertEquals(0, job.getRepeats()); - assertFalse(job.hasTtl()); - Assertions.assertTrue(job.hasFailurePolicy()); - assertEquals(Duration.of(2, ChronoUnit.SECONDS).getNano(), - job.getFailurePolicy().getConstant().getInterval().getNanos()); - } - - @Test - public void scheduleJobShouldHaveBothRetiresAndIntervalWhenConstantPolicyIsSetWithRetriesAndInterval() { - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onCompleted(); // Simulate successful response - return null; - }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); - - ScheduleJobRequest expectedScheduleJobRequest = new ScheduleJobRequest("testJob", - JobSchedule.fromString("* * * * * *")) - .setFailurePolicy(new ConstantFailurePolicy(Duration.of(2, ChronoUnit.SECONDS)) - .setMaxRetries(10)); - - previewClient.scheduleJob(expectedScheduleJobRequest).block(); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); - - verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); - DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); - DaprProtos.Job job = actualScheduleJobRequest.getJob(); - assertEquals("testJob", job.getName()); - assertFalse(job.hasData()); - assertEquals( "* * * * * *", job.getSchedule()); - assertEquals(0, job.getRepeats()); - assertFalse(job.hasTtl()); - Assertions.assertTrue(job.hasFailurePolicy()); - assertEquals(Duration.of(2, ChronoUnit.SECONDS).getNano(), - job.getFailurePolicy().getConstant().getInterval().getNanos()); - assertEquals(10, job.getFailurePolicy().getConstant().getMaxRetries()); - } - - @Test - public void scheduleJobShouldThrowWhenNameAlreadyExists() { - AtomicInteger callCount = new AtomicInteger(0); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - if (callCount.incrementAndGet() == 1) { - // First call succeeds - observer.onCompleted(); - } else { - // Second call fails with ALREADY_EXISTS - observer.onError(newStatusRuntimeException("ALREADY_EXISTS", "Job with name 'testJob' already exists")); - } - return null; - }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); - - // First call should succeed - ScheduleJobRequest firstRequest = new ScheduleJobRequest("testJob", Instant.now()); - assertDoesNotThrow(() -> previewClient.scheduleJob(firstRequest).block()); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); - - verify(daprStub, times(1)).scheduleJobAlpha1(captor.capture(), Mockito.any()); - DaprProtos.ScheduleJobRequest actualScheduleJobRequest = captor.getValue(); - DaprProtos.Job job = actualScheduleJobRequest.getJob(); - assertEquals("testJob", job.getName()); - assertFalse(job.hasData()); - assertEquals(0, job.getRepeats()); - assertFalse(job.hasTtl()); - - // Second call with same name should fail - ScheduleJobRequest secondRequest = new ScheduleJobRequest("testJob", Instant.now()); - - assertThrowsDaprException( - ExecutionException.class, - "ALREADY_EXISTS", - "ALREADY_EXISTS: Job with name 'testJob' already exists", - () -> previewClient.scheduleJob(secondRequest).block()); - } - - @Test - public void scheduleJobShouldSucceedWhenNameAlreadyExistsWithOverwrite() { - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onCompleted(); // Simulate successful response for both calls - return null; - }).when(daprStub).scheduleJobAlpha1(any(DaprProtos.ScheduleJobRequest.class), any()); - - // First call should succeed - ScheduleJobRequest firstRequest = new ScheduleJobRequest("testJob", Instant.now()); - assertDoesNotThrow(() -> previewClient.scheduleJob(firstRequest).block()); - - // Second call with same name but overwrite=true should also succeed - ScheduleJobRequest secondRequest = new ScheduleJobRequest("testJob", Instant.now()) - .setOverwrite(true); - assertDoesNotThrow(() -> previewClient.scheduleJob(secondRequest).block()); - - // Verify that both calls were made successfully - ArgumentCaptor captor = - ArgumentCaptor.forClass(DaprProtos.ScheduleJobRequest.class); - verify(daprStub, times(2)).scheduleJobAlpha1(captor.capture(), any()); - - // Verify the first call doesn't have overwrite set - DaprProtos.ScheduleJobRequest firstActualRequest = captor.getAllValues().get(0); - assertFalse(firstActualRequest.getOverwrite()); - assertEquals("testJob", firstActualRequest.getJob().getName()); - - // Verify the second call has overwrite set to true - DaprProtos.ScheduleJobRequest secondActualRequest = captor.getAllValues().get(1); - assertTrue(secondActualRequest.getOverwrite()); - assertEquals("testJob", secondActualRequest.getJob().getName()); - } - - @Test - public void getJobShouldReturnResponseWhenAllFieldsArePresentInRequest() { - DateTimeFormatter iso8601Formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") - .withZone(ZoneOffset.UTC); - - GetJobRequest getJobRequest = new GetJobRequest("testJob"); - - DaprProtos.Job job = DaprProtos.Job.newBuilder() - .setName("testJob") - .setTtl(OffsetDateTime.now().format(iso8601Formatter)) - .setData(Any.newBuilder().setValue(ByteString.copyFrom("testData".getBytes())).build()) - .setSchedule("*/5 * * * *") - .setRepeats(5) - .setDueTime(iso8601Formatter.format(Instant.now().plus(10, ChronoUnit.MINUTES))) - .build(); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onNext(DaprProtos.GetJobResponse.newBuilder() - .setJob(job) - .build()); - observer.onCompleted(); - return null; - }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); - - Mono resultMono = previewClient.getJob(getJobRequest); - - GetJobResponse response = resultMono.block(); - assertNotNull(response); - assertEquals("testJob", response.getName()); - assertEquals("testData", new String(response.getData(), StandardCharsets.UTF_8)); - assertEquals("*/5 * * * *", response.getSchedule().getExpression()); - assertEquals(5, response.getRepeats()); - assertEquals(job.getTtl(), iso8601Formatter.format(response.getTtl())); - assertEquals(job.getDueTime(), iso8601Formatter.format(response.getDueTime())); - } - - @Test - public void getJobShouldReturnResponseWithScheduleSetWhenResponseHasSchedule() { - GetJobRequest getJobRequest = new GetJobRequest("testJob"); - - DaprProtos.Job job = DaprProtos.Job.newBuilder() - .setName("testJob") - .setSchedule("0 0 0 1 1 *") - .build(); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onNext(DaprProtos.GetJobResponse.newBuilder() - .setJob(job) - .build()); - observer.onCompleted(); - return null; - }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); - - Mono resultMono = previewClient.getJob(getJobRequest); - - GetJobResponse response = resultMono.block(); - assertNotNull(response); - assertEquals("testJob", response.getName()); - assertNull(response.getData()); - assertEquals("0 0 0 1 1 *", response.getSchedule().getExpression()); - assertNull(response.getRepeats()); - assertNull(response.getTtl()); - assertNull(response.getDueTime()); - } - - @Test - public void getJobShouldReturnResponseWithDueTimeSetWhenResponseHasDueTime() { - GetJobRequest getJobRequest = new GetJobRequest("testJob"); - - String datetime = OffsetDateTime.now().toString(); - DaprProtos.Job job = DaprProtos.Job.newBuilder() - .setName("testJob") - .setDueTime(datetime) - .build(); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onNext(DaprProtos.GetJobResponse.newBuilder() - .setJob(job) - .build()); - observer.onCompleted(); - return null; - }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); - - Mono resultMono = previewClient.getJob(getJobRequest); - - GetJobResponse response = resultMono.block(); - assertNotNull(response); - assertEquals("testJob", response.getName()); - assertNull(response.getData()); - assertNull(response.getSchedule()); - assertNull(response.getRepeats()); - assertNull(response.getTtl()); - assertEquals(job.getDueTime(), datetime); - } - - @Test - public void getJobShouldReturnResponseWithDropFailurePolicySet() { - GetJobRequest getJobRequest = new GetJobRequest("testJob"); - - String datetime = OffsetDateTime.now().toString(); - DaprProtos.Job job = DaprProtos.Job.newBuilder() - .setName("testJob") - .setDueTime(datetime) - .setFailurePolicy(CommonProtos.JobFailurePolicy.newBuilder() - .setDrop(CommonProtos.JobFailurePolicyDrop.newBuilder().build()).build()) - .build(); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onNext(DaprProtos.GetJobResponse.newBuilder() - .setJob(job) - .build()); - observer.onCompleted(); - return null; - }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); - - Mono resultMono = previewClient.getJob(getJobRequest); - - GetJobResponse response = resultMono.block(); - assertNotNull(response); - assertEquals("testJob", response.getName()); - assertNull(response.getData()); - assertNull(response.getSchedule()); - assertNull(response.getRepeats()); - assertNull(response.getTtl()); - assertEquals(job.getDueTime(), datetime); - assertTrue(job.hasFailurePolicy()); - assertTrue(job.getFailurePolicy().hasDrop()); - } - - @Test - public void getJobShouldReturnResponseWithConstantFailurePolicyAndMaxRetriesSet() { - GetJobRequest getJobRequest = new GetJobRequest("testJob"); - - String datetime = OffsetDateTime.now().toString(); - DaprProtos.Job job = DaprProtos.Job.newBuilder() - .setName("testJob") - .setDueTime(datetime) - .setFailurePolicy(CommonProtos.JobFailurePolicy.newBuilder() - .setConstant(CommonProtos.JobFailurePolicyConstant.newBuilder().setMaxRetries(2).build()).build()) - .build(); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onNext(DaprProtos.GetJobResponse.newBuilder() - .setJob(job) - .build()); - observer.onCompleted(); - return null; - }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); - - Mono resultMono = previewClient.getJob(getJobRequest); - - GetJobResponse response = resultMono.block(); - assertNotNull(response); - assertEquals("testJob", response.getName()); - assertNull(response.getData()); - assertNull(response.getSchedule()); - assertNull(response.getRepeats()); - assertNull(response.getTtl()); - assertEquals(job.getDueTime(), datetime); - assertTrue(job.hasFailurePolicy()); - assertTrue(job.getFailurePolicy().hasConstant()); - assertEquals(2, job.getFailurePolicy().getConstant().getMaxRetries()); - } - - @Test - public void getJobShouldReturnResponseWithConstantFailurePolicyAndIntervalSet() { - GetJobRequest getJobRequest = new GetJobRequest("testJob"); - - String datetime = OffsetDateTime.now().toString(); - DaprProtos.Job job = DaprProtos.Job.newBuilder() - .setName("testJob") - .setDueTime(datetime) - .setFailurePolicy(CommonProtos.JobFailurePolicy.newBuilder() - .setConstant(CommonProtos.JobFailurePolicyConstant.newBuilder() - .setInterval(com.google.protobuf.Duration.newBuilder().setNanos(5).build()).build()).build()) - .build(); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onNext(DaprProtos.GetJobResponse.newBuilder() - .setJob(job) - .build()); - observer.onCompleted(); - return null; - }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); - - Mono resultMono = previewClient.getJob(getJobRequest); - - GetJobResponse response = resultMono.block(); - assertNotNull(response); - assertEquals("testJob", response.getName()); - assertNull(response.getData()); - assertNull(response.getSchedule()); - assertNull(response.getRepeats()); - assertNull(response.getTtl()); - assertEquals(job.getDueTime(), datetime); - assertTrue(job.hasFailurePolicy()); - assertTrue(job.getFailurePolicy().hasConstant()); - assertEquals(5, job.getFailurePolicy().getConstant().getInterval().getNanos()); - } - - @Test - public void getJobShouldReturnResponseWithConstantFailurePolicyIntervalAndMaxRetriesSet() { - GetJobRequest getJobRequest = new GetJobRequest("testJob"); - - String datetime = OffsetDateTime.now().toString(); - DaprProtos.Job job = DaprProtos.Job.newBuilder() - .setName("testJob") - .setDueTime(datetime) - .setFailurePolicy(CommonProtos.JobFailurePolicy.newBuilder() - .setConstant(CommonProtos.JobFailurePolicyConstant.newBuilder() - .setMaxRetries(10) - .setInterval(com.google.protobuf.Duration.newBuilder().setNanos(5).build()).build()).build()) - .build(); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onNext(DaprProtos.GetJobResponse.newBuilder() - .setJob(job) - .build()); - observer.onCompleted(); - return null; - }).when(daprStub).getJobAlpha1(any(DaprProtos.GetJobRequest.class), any()); - - Mono resultMono = previewClient.getJob(getJobRequest); - - GetJobResponse response = resultMono.block(); - assertNotNull(response); - assertEquals("testJob", response.getName()); - assertNull(response.getData()); - assertNull(response.getSchedule()); - assertNull(response.getRepeats()); - assertNull(response.getTtl()); - assertEquals(job.getDueTime(), datetime); - assertTrue(job.hasFailurePolicy()); - assertTrue(job.getFailurePolicy().hasConstant()); - assertEquals(10, job.getFailurePolicy().getConstant().getMaxRetries()); - assertEquals(5, job.getFailurePolicy().getConstant().getInterval().getNanos()); - } - - - @Test - public void getJobShouldThrowWhenRequestIsNull() { - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { - previewClient.getJob(null).block(); - }); - assertEquals("getJobRequest cannot be null", exception.getMessage()); - } - - @Test - public void getJobShouldThrowWhenNameIsNullRequest() { - GetJobRequest getJobRequest = new GetJobRequest(null); - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { - previewClient.getJob(getJobRequest).block(); - }); - assertEquals("Name in the request cannot be null or empty", exception.getMessage()); - } - - @Test - public void getJobShouldThrowWhenNameIsEmptyRequest() { - GetJobRequest getJobRequest =new GetJobRequest("");; - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { - previewClient.getJob(getJobRequest).block(); - }); - assertEquals("Name in the request cannot be null or empty", exception.getMessage()); - } - - @Test - public void deleteJobShouldSucceedWhenValidRequest() { - DeleteJobRequest deleteJobRequest = new DeleteJobRequest("testJob"); - - doAnswer(invocation -> { - StreamObserver observer = invocation.getArgument(1); - observer.onCompleted(); // Simulate successful response - return null; - }).when(daprStub).deleteJobAlpha1(any(DaprProtos.DeleteJobRequest.class), any()); - - Mono resultMono = previewClient.deleteJob(deleteJobRequest); - - assertDoesNotThrow(() -> resultMono.block()); - } - - @Test - public void deleteJobShouldThrowRequestIsNull() { - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { - previewClient.deleteJob(null).block(); - }); - assertEquals("deleteJobRequest cannot be null", exception.getMessage()); - } - - @Test - public void deleteJobShouldThrowWhenNameIsNullRequest() { - DeleteJobRequest deleteJobRequest = new DeleteJobRequest(null); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { - previewClient.deleteJob(deleteJobRequest).block(); - }); - assertEquals("Name in the request cannot be null or empty", exception.getMessage()); - } - - @Test - public void deleteJobShouldThrowWhenNameIsEmptyRequest() { - DeleteJobRequest deleteJobRequest = new DeleteJobRequest(""); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { - previewClient.deleteJob(deleteJobRequest).block(); - }); - assertEquals("Name in the request cannot be null or empty", exception.getMessage()); - } - @Test public void converseAlpha2ShouldThrowIllegalArgumentExceptionWhenNameIsNull() { List messages = new ArrayList<>(); From c4893756f4a5b21b266f29d68ff3124e619b31e8 Mon Sep 17 00:00:00 2001 From: Siri Varma Vegiraju Date: Tue, 6 Jan 2026 09:42:25 -0800 Subject: [PATCH 16/18] Add Cryptography APIs to the Java SDK (#1599) * Bringing Durable Task Java as a Maven module inside the Java SDK (#1575) * fixing checkstyle and javadocs Signed-off-by: salaboy * Replace openjdk:17-jdk-slim to eclipse-temurin:17-jdk-jammy (#1574) Signed-off-by: Matheus Cruz Signed-off-by: salaboy * Align Java API with other languages (#1560) * Align Java API with other languages Signed-off-by: Matheus Cruz * Update documentation Signed-off-by: Matheus Cruz * Change return type of waitForWorkflowStart method Signed-off-by: artur-ciocanu --------- Signed-off-by: Matheus Cruz Signed-off-by: artur-ciocanu Co-authored-by: artur-ciocanu Signed-off-by: salaboy * use built in durable task Signed-off-by: salaboy * exclude jacoco rules for examples and durabletask-client Signed-off-by: salaboy * increasing timeout for IT Signed-off-by: salaboy * removing dt build from matrix Signed-off-by: salaboy * adding java to dt build Signed-off-by: salaboy * Fix dependencies multi app build and add proper test deps (#1572) * Force Jackson version to override the SB Jackson version Signed-off-by: Artur Ciocanu * Move all the Jackson deps to parent POM. Signed-off-by: Artur Ciocanu * Ensure app JAR build order Signed-off-by: Artur Ciocanu * Remove explicit Jackson from sdk-tests module. Signed-off-by: Artur Ciocanu * Make sure test is used for test dependencies. Signed-off-by: Artur Ciocanu * Remove extra Jackson modules. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * docs: add architecture diagram to README (#1549) * Preview New README * Preview New README 2 * Preview New README 3 * docs: add architecture diagram showing Java SDK interaction with Dapr runtime (close #<915>) * docs: add architecture diagram showing Java SDK interaction with Dapr runtime CORRECTION (close #<915>) * docs: add architecture diagram showing Java SDK interaction with Dapr runtime (close #<915>) * docs: add architecture diagram showing Java SDK interaction with Dapr runtime (close #<915>) --------- Co-authored-by: Siri Varma Vegiraju Co-authored-by: artur-ciocanu Co-authored-by: Cassie Coyle Signed-off-by: salaboy * Add statestore example with Outbox pattern (#1582) * Add statestore example with Outbox pattern Signed-off-by: Matheus Cruz * Clean events after each test Signed-off-by: Matheus Cruz * Add license header Signed-off-by: Matheus Cruz * Apply pull request suggestions Signed-off-by: Matheus Cruz --------- Signed-off-by: Matheus Cruz Co-authored-by: salaboy Signed-off-by: salaboy * adding new method signature plus test (#1570) * adding new method signature plus test Signed-off-by: salaboy * re adding imports Signed-off-by: salaboy * fixing style Signed-off-by: salaboy * checking empty metadata Signed-off-by: salaboy * copy meta for safety and check if key is present Signed-off-by: salaboy * Centralize Maven dependency version management (#1564) Signed-off-by: salaboy * Fix dependencies multi app build and add proper test deps (#1572) * Force Jackson version to override the SB Jackson version Signed-off-by: Artur Ciocanu * Move all the Jackson deps to parent POM. Signed-off-by: Artur Ciocanu * Ensure app JAR build order Signed-off-by: Artur Ciocanu * Remove explicit Jackson from sdk-tests module. Signed-off-by: Artur Ciocanu * Make sure test is used for test dependencies. Signed-off-by: Artur Ciocanu * Remove extra Jackson modules. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * reverting pom Signed-off-by: salaboy * fix codestyle Signed-off-by: salaboy * using metaCopy Signed-off-by: salaboy --------- Signed-off-by: salaboy Signed-off-by: Artur Ciocanu Co-authored-by: artur-ciocanu Signed-off-by: salaboy * Bump actions/upload-artifact from 4 to 5 (#1587) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 5. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: salaboy * Add gRPC support to Dapr testcontainer (#1586) * Add gRPC support to Dapr testcontainer Signed-off-by: wlfgang * Avoid using null to indicate default value Signed-off-by: wlfgang --------- Signed-off-by: wlfgang Co-authored-by: artur-ciocanu Co-authored-by: wlfgang Signed-off-by: salaboy * Use dependencies BOM and remove duplicates. (#1588) Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * Examples + Docs for App API Token authentication for gRPC and HTTP (#1589) * example Signed-off-by: Cassandra Coyle * docs for example Signed-off-by: Cassandra Coyle --------- Signed-off-by: Cassandra Coyle Signed-off-by: salaboy * Another set of Maven version, properties and plugin improvements (#1596) Signed-off-by: salaboy * Adding a Flux based subscribeToEvents method (#1598) * Adding a Flux based subscribeToEvents method Signed-off-by: Artur Ciocanu * Simplify GRPC stream handling Signed-off-by: Artur Ciocanu * Simplify Javadoc Signed-off-by: Artur Ciocanu * Fix unit tests and simplify implementation Signed-off-by: Artur Ciocanu * Adding event subscriber stream observer to simplify subscription logic Signed-off-by: Artur Ciocanu * Use start() method to start stream subscription Signed-off-by: Artur Ciocanu * Add unit test for event suscriber observer Signed-off-by: Artur Ciocanu * Improve the tests a little bit Signed-off-by: Artur Ciocanu * Remove the unnecessary method Signed-off-by: Artur Ciocanu * Improve error handling and use CloudEvent wrapper Signed-off-by: Artur Ciocanu * Fix unit tests asserts Signed-off-by: Artur Ciocanu * Adjust Java examples for Subscriber Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * Remove SDK docs due to migration to main Docs repo (#1593) * Remove SDK docs due to migration to main Docs repo Signed-off-by: Marc Duiker * Remove sed lines related to sdk docs Signed-off-by: Marc Duiker --------- Signed-off-by: Marc Duiker Co-authored-by: salaboy Signed-off-by: salaboy * cleaning up sdk version script Signed-off-by: salaboy --------- Signed-off-by: salaboy Signed-off-by: Matheus Cruz Signed-off-by: artur-ciocanu Signed-off-by: Artur Ciocanu Signed-off-by: dependabot[bot] Signed-off-by: wlfgang Signed-off-by: Cassandra Coyle Signed-off-by: Marc Duiker Co-authored-by: Matheus Cruz <56329339+mcruzdev@users.noreply.github.com> Co-authored-by: artur-ciocanu Co-authored-by: Raymundo Zamora Co-authored-by: Siri Varma Vegiraju Co-authored-by: Cassie Coyle Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: wlfgang <14792753+wlfgang@users.noreply.github.com> Co-authored-by: wlfgang Co-authored-by: Marc Duiker Signed-off-by: siri-varma * Add Cryptography API support with encrypt/decrypt operations - Add EncryptRequestAlpha1 and DecryptRequestAlpha1 domain classes - Implement encrypt() and decrypt() methods in DaprPreviewClient - Add CryptoExample and StreamingCryptoExample with documentation - Add integration tests for crypto operations - Add localstorage crypto component configuration Signed-off-by: siri-varma * Add keys Signed-off-by: siri-varma * Update localstorage.yaml key path to relative Signed-off-by: Siri Varma Vegiraju * Fix things Signed-off-by: siri-varma * fix things Signed-off-by: siri-varma * Add missing imports Signed-off-by: siri-varma --------- Signed-off-by: salaboy Signed-off-by: Matheus Cruz Signed-off-by: artur-ciocanu Signed-off-by: Artur Ciocanu Signed-off-by: dependabot[bot] Signed-off-by: wlfgang Signed-off-by: Cassandra Coyle Signed-off-by: Marc Duiker Signed-off-by: siri-varma Signed-off-by: Siri Varma Vegiraju Co-authored-by: salaboy Co-authored-by: Matheus Cruz <56329339+mcruzdev@users.noreply.github.com> Co-authored-by: artur-ciocanu Co-authored-by: Raymundo Zamora Co-authored-by: Cassie Coyle Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: wlfgang <14792753+wlfgang@users.noreply.github.com> Co-authored-by: wlfgang Co-authored-by: Marc Duiker Signed-off-by: salaboy --- .github/workflows/validate.yml | 6 + examples/components/crypto/keys/.gitkeep | 0 examples/components/crypto/localstorage.yaml | 12 + .../dapr/examples/crypto/CryptoExample.java | 173 +++++ .../java/io/dapr/examples/crypto/README.md | 178 +++++ .../crypto/StreamingCryptoExample.java | 245 +++++++ .../crypto/DaprPreviewClientCryptoIT.java | 373 ++++++++++ .../java/io/dapr/client/DaprClientImpl.java | 194 +++++ .../io/dapr/client/DaprPreviewClient.java | 22 + .../client/domain/DecryptRequestAlpha1.java | 79 ++ .../client/domain/EncryptRequestAlpha1.java | 152 ++++ .../client/DaprPreviewClientGrpcTest.java | 673 ++++++++++++++++++ .../dapr/client/ProtobufValueHelperTest.java | 26 +- .../domain/DecryptRequestAlpha1Test.java | 346 +++++++++ .../domain/EncryptRequestAlpha1Test.java | 476 +++++++++++++ 15 files changed, 2942 insertions(+), 13 deletions(-) create mode 100644 examples/components/crypto/keys/.gitkeep create mode 100644 examples/components/crypto/localstorage.yaml create mode 100644 examples/src/main/java/io/dapr/examples/crypto/CryptoExample.java create mode 100644 examples/src/main/java/io/dapr/examples/crypto/README.md create mode 100644 examples/src/main/java/io/dapr/examples/crypto/StreamingCryptoExample.java create mode 100644 sdk-tests/src/test/java/io/dapr/it/testcontainers/crypto/DaprPreviewClientCryptoIT.java create mode 100644 sdk/src/main/java/io/dapr/client/domain/DecryptRequestAlpha1.java create mode 100644 sdk/src/main/java/io/dapr/client/domain/EncryptRequestAlpha1.java create mode 100644 sdk/src/test/java/io/dapr/client/domain/DecryptRequestAlpha1Test.java create mode 100644 sdk/src/test/java/io/dapr/client/domain/EncryptRequestAlpha1Test.java diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index 727f783df..43d19bcd3 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -113,6 +113,10 @@ jobs: run: sleep 30 && docker logs dapr_scheduler && nc -vz localhost 50006 - name: Install jars run: ./mvnw clean install -DskipTests -q + - name: Validate crypto example + working-directory: ./examples + run: | + mm.py ./src/main/java/io/dapr/examples/crypto/README.md - name: Validate workflows example working-directory: ./examples run: | @@ -186,3 +190,5 @@ jobs: run: | mm.py ./src/main/java/io/dapr/examples/pubsub/stream/README.md + + diff --git a/examples/components/crypto/keys/.gitkeep b/examples/components/crypto/keys/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/examples/components/crypto/localstorage.yaml b/examples/components/crypto/localstorage.yaml new file mode 100644 index 000000000..7d7aa5d73 --- /dev/null +++ b/examples/components/crypto/localstorage.yaml @@ -0,0 +1,12 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: localstoragecrypto +spec: + type: crypto.dapr.localstorage + version: v1 + metadata: + # Path to the directory containing keys (PEM files) + # This path is relative to the resources-path directory + - name: path + value: "./components/crypto/keys/" diff --git a/examples/src/main/java/io/dapr/examples/crypto/CryptoExample.java b/examples/src/main/java/io/dapr/examples/crypto/CryptoExample.java new file mode 100644 index 000000000..977ac76c3 --- /dev/null +++ b/examples/src/main/java/io/dapr/examples/crypto/CryptoExample.java @@ -0,0 +1,173 @@ +/* + * Copyright 2021 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.examples.crypto; + +import io.dapr.client.DaprClientBuilder; +import io.dapr.client.DaprPreviewClient; +import io.dapr.client.domain.DecryptRequestAlpha1; +import io.dapr.client.domain.EncryptRequestAlpha1; +import io.dapr.config.Properties; +import io.dapr.config.Property; +import reactor.core.publisher.Flux; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; +import java.util.Base64; +import java.util.Map; + +/** + * CryptoExample demonstrates using the Dapr Cryptography building block + * to encrypt and decrypt data using a cryptography component. + * + *

This example shows: + *

    + *
  • Encrypting plaintext data with a specified key and algorithm
  • + *
  • Decrypting ciphertext data back to plaintext
  • + *
  • Automatic key generation if keys don't exist
  • + *
+ * + *

Prerequisites: + *

    + *
  • Dapr installed and initialized
  • + *
  • A cryptography component configured (e.g., local storage crypto)
  • + *
+ */ +public class CryptoExample { + + private static final String CRYPTO_COMPONENT_NAME = "localstoragecrypto"; + private static final String KEY_NAME = "rsa-private-key"; + private static final String KEY_WRAP_ALGORITHM = "RSA"; + private static final String KEYS_DIR = "components/crypto/keys"; + + /** + * The main method demonstrating encryption and decryption with Dapr. + * + * @param args Command line arguments (unused). + */ + public static void main(String[] args) throws Exception { + // Generate keys if they don't exist + generateKeysIfNeeded(); + + Map, String> overrides = Map.of( + Properties.HTTP_PORT, "3500", + Properties.GRPC_PORT, "50001" + ); + + try (DaprPreviewClient client = new DaprClientBuilder().withPropertyOverrides(overrides).buildPreviewClient()) { + + String originalMessage = "This is a secret message"; + byte[] plainText = originalMessage.getBytes(StandardCharsets.UTF_8); + + System.out.println("=== Dapr Cryptography Example ==="); + System.out.println("Original message: " + originalMessage); + System.out.println(); + + // Encrypt the message + System.out.println("Encrypting message..."); + EncryptRequestAlpha1 encryptRequest = new EncryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(plainText), + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + byte[] encryptedData = client.encrypt(encryptRequest) + .collectList() + .map(CryptoExample::combineChunks) + .block(); + + System.out.println("Encryption successful!"); + System.out.println("Encrypted data length: " + encryptedData.length + " bytes"); + System.out.println(); + + // Decrypt the message + System.out.println("Decrypting message..."); + DecryptRequestAlpha1 decryptRequest = new DecryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(encryptedData) + ); + + byte[] decryptedData = client.decrypt(decryptRequest) + .collectList() + .map(CryptoExample::combineChunks) + .block(); + + String decryptedMessage = new String(decryptedData, StandardCharsets.UTF_8); + System.out.println("Decryption successful!"); + System.out.println("Decrypted message: " + decryptedMessage); + System.out.println(); + + if (originalMessage.equals(decryptedMessage)) { + System.out.println("SUCCESS: The decrypted message matches the original."); + } else { + System.out.println("ERROR: The decrypted message does not match the original."); + } + + } catch (Exception e) { + System.err.println("Error during crypto operations: " + e.getMessage()); + throw new RuntimeException(e); + } + } + + /** + * Generates RSA key pair if the key file doesn't exist. + */ + private static void generateKeysIfNeeded() throws NoSuchAlgorithmException, IOException { + Path keysDir = Paths.get(KEYS_DIR); + Path keyFile = keysDir.resolve(KEY_NAME + ".pem"); + + if (Files.exists(keyFile)) { + System.out.println("Using existing key: " + keyFile.toAbsolutePath()); + return; + } + + System.out.println("Generating RSA key pair..."); + Files.createDirectories(keysDir); + + KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); + keyGen.initialize(4096); + KeyPair keyPair = keyGen.generateKeyPair(); + + String privateKeyPem = "-----BEGIN PRIVATE KEY-----\n" + + Base64.getMimeEncoder(64, "\n".getBytes()).encodeToString(keyPair.getPrivate().getEncoded()) + + "\n-----END PRIVATE KEY-----\n"; + + String publicKeyPem = "-----BEGIN PUBLIC KEY-----\n" + + Base64.getMimeEncoder(64, "\n".getBytes()).encodeToString(keyPair.getPublic().getEncoded()) + + "\n-----END PUBLIC KEY-----\n"; + + Files.writeString(keyFile, privateKeyPem + publicKeyPem); + System.out.println("Key generated: " + keyFile.toAbsolutePath()); + } + + /** + * Combines byte array chunks into a single byte array. + */ + private static byte[] combineChunks(java.util.List chunks) { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + } +} diff --git a/examples/src/main/java/io/dapr/examples/crypto/README.md b/examples/src/main/java/io/dapr/examples/crypto/README.md new file mode 100644 index 000000000..c9006df20 --- /dev/null +++ b/examples/src/main/java/io/dapr/examples/crypto/README.md @@ -0,0 +1,178 @@ +## Dapr Cryptography API Examples + +This example provides the different capabilities provided by Dapr Java SDK for Cryptography. For further information about Cryptography APIs please refer to [this link](https://docs.dapr.io/developing-applications/building-blocks/cryptography/cryptography-overview/) + +### Using the Cryptography API + +The Java SDK exposes several methods for this - +* `client.encrypt(...)` for encrypting data using a cryptography component. +* `client.decrypt(...)` for decrypting data using a cryptography component. + +## Pre-requisites + +* [Dapr CLI](https://docs.dapr.io/getting-started/install-dapr-cli/). +* Java JDK 11 (or greater): + * [Microsoft JDK 11](https://docs.microsoft.com/en-us/java/openjdk/download#openjdk-11) + * [Oracle JDK 11](https://www.oracle.com/technetwork/java/javase/downloads/index.html#JDK11) + * [OpenJDK 11](https://jdk.java.net/11/) +* [Apache Maven](https://maven.apache.org/install.html) version 3.x. + +### Checking out the code + +Clone this repository: + +```sh +git clone https://github.com/dapr/java-sdk.git +cd java-sdk +``` + +Then build the Maven project: + +```sh +# make sure you are in the `java-sdk` directory. +mvn install +``` + +Then get into the examples directory: + +```sh +cd examples +``` + +### Initialize Dapr + +Run `dapr init` to initialize Dapr in Self-Hosted Mode if it's not already initialized. + +### Running the Example + +This example uses the Java SDK Dapr client to **Encrypt and Decrypt** data. The example automatically generates RSA keys if they don't exist. + +#### Example 1: Basic Crypto Example + +`CryptoExample.java` demonstrates basic encryption and decryption of a simple message. + +```java +public class CryptoExample { + private static final String CRYPTO_COMPONENT_NAME = "localstoragecrypto"; + private static final String KEY_NAME = "rsa-private-key"; + private static final String KEY_WRAP_ALGORITHM = "RSA"; + + public static void main(String[] args) { + try (DaprPreviewClient client = new DaprClientBuilder().buildPreviewClient()) { + + String originalMessage = "This is a secret message"; + byte[] plainText = originalMessage.getBytes(StandardCharsets.UTF_8); + + // Encrypt the message + EncryptRequestAlpha1 encryptRequest = new EncryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(plainText), + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + byte[] encryptedData = client.encrypt(encryptRequest) + .collectList() + .map(chunks -> /* combine chunks */) + .block(); + + // Decrypt the message + DecryptRequestAlpha1 decryptRequest = new DecryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(encryptedData) + ); + + byte[] decryptedData = client.decrypt(decryptRequest) + .collectList() + .map(chunks -> /* combine chunks */) + .block(); + } + } +} +``` + +Use the following command to run this example: + + + +```bash +mkdir -p ./components/crypto/keys && openssl genrsa -out ./components/crypto/keys/rsa-private-key.pem 4096 && openssl rsa -in ./components/crypto/keys/rsa-private-key.pem -pubout -out ./components/crypto/keys/rsa-private-key.pub.pem && cp ./components/crypto/keys/rsa-private-key.pem ./components/crypto/keys/rsa-private-key && echo "Keys generated successfully" +``` + + + + + +```bash +dapr run --resources-path ./components/crypto --app-id crypto-app --dapr-http-port 3500 --dapr-grpc-port 50001 -- java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.crypto.CryptoExample +``` + + + +#### Example 2: Streaming Crypto Example + +`StreamingCryptoExample.java` demonstrates advanced scenarios including: +- Multi-chunk data encryption +- Large data encryption (100KB+) +- Custom encryption ciphers + +```bash +dapr run --resources-path ./components/crypto --app-id crypto-app --dapr-http-port 3500 --dapr-grpc-port 50001 -- java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.crypto.StreamingCryptoExample +``` + +### Sample Output + +``` +=== Dapr Cryptography Example === +Original message: This is a secret message + +Encrypting message... +Encryption successful! +Encrypted data length: 512 bytes + +Decrypting message... +Decryption successful! +Decrypted message: This is a secret message + +SUCCESS: The decrypted message matches the original. +``` + +### Supported Key Wrap Algorithms + +The following key wrap algorithms are supported: +- `A256KW` (alias: `AES`) - AES key wrap +- `A128CBC`, `A192CBC`, `A256CBC` - AES CBC modes +- `RSA-OAEP-256` (alias: `RSA`) - RSA OAEP with SHA-256 + +### Supported Data Encryption Ciphers + +Optional data encryption ciphers: +- `aes-gcm` (default) - AES in GCM mode +- `chacha20-poly1305` - ChaCha20-Poly1305 cipher + +### Cleanup + +To stop the app, run (or press CTRL+C): + + + +```bash +dapr stop --app-id crypto-app +``` + + diff --git a/examples/src/main/java/io/dapr/examples/crypto/StreamingCryptoExample.java b/examples/src/main/java/io/dapr/examples/crypto/StreamingCryptoExample.java new file mode 100644 index 000000000..038cf8ad9 --- /dev/null +++ b/examples/src/main/java/io/dapr/examples/crypto/StreamingCryptoExample.java @@ -0,0 +1,245 @@ +/* + * Copyright 2021 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.examples.crypto; + +import io.dapr.client.DaprClientBuilder; +import io.dapr.client.DaprPreviewClient; +import io.dapr.client.domain.DecryptRequestAlpha1; +import io.dapr.client.domain.EncryptRequestAlpha1; +import io.dapr.config.Properties; +import io.dapr.config.Property; +import reactor.core.publisher.Flux; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; +import java.util.Base64; +import java.util.Map; +import java.util.Random; + +/** + * StreamingCryptoExample demonstrates using the Dapr Cryptography building block + * with streaming data for handling large payloads efficiently. + * + *

This example shows: + *

    + *
  • Encrypting large data using streaming
  • + *
  • Using optional parameters like data encryption cipher
  • + *
  • Handling chunked data for encryption/decryption
  • + *
+ */ +public class StreamingCryptoExample { + + private static final String CRYPTO_COMPONENT_NAME = "localstoragecrypto"; + private static final String KEY_NAME = "rsa-private-key"; + private static final String KEY_WRAP_ALGORITHM = "RSA"; + private static final String KEYS_DIR = "components/crypto/keys"; + + /** + * The main method demonstrating streaming encryption and decryption with Dapr. + * + * @param args Command line arguments (unused). + */ + public static void main(String[] args) throws Exception { + // Generate keys if they don't exist + generateKeysIfNeeded(); + + Map, String> overrides = Map.of( + Properties.HTTP_PORT, "3500", + Properties.GRPC_PORT, "50001" + ); + + try (DaprPreviewClient client = new DaprClientBuilder().withPropertyOverrides(overrides).buildPreviewClient()) { + + System.out.println("=== Dapr Streaming Cryptography Example ==="); + System.out.println(); + + // Example 1: Streaming multiple chunks + System.out.println("--- Example 1: Multi-chunk Encryption ---"); + demonstrateChunkedEncryption(client); + System.out.println(); + + // Example 2: Large data encryption + System.out.println("--- Example 2: Large Data Encryption ---"); + demonstrateLargeDataEncryption(client); + System.out.println(); + + // Example 3: Custom encryption cipher + System.out.println("--- Example 3: Custom Encryption Cipher ---"); + demonstrateCustomCipher(client); + + } catch (Exception e) { + System.err.println("Error during crypto operations: " + e.getMessage()); + throw new RuntimeException(e); + } + } + + /** + * Generates RSA key pair if the key file doesn't exist. + */ + private static void generateKeysIfNeeded() throws NoSuchAlgorithmException, IOException { + Path keysDir = Paths.get(KEYS_DIR); + Path keyFile = keysDir.resolve(KEY_NAME + ".pem"); + + if (Files.exists(keyFile)) { + System.out.println("Using existing key: " + keyFile.toAbsolutePath()); + return; + } + + System.out.println("Generating RSA key pair..."); + Files.createDirectories(keysDir); + + KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); + keyGen.initialize(4096); + KeyPair keyPair = keyGen.generateKeyPair(); + + String privateKeyPem = "-----BEGIN PRIVATE KEY-----\n" + + Base64.getMimeEncoder(64, "\n".getBytes()).encodeToString(keyPair.getPrivate().getEncoded()) + + "\n-----END PRIVATE KEY-----\n"; + + String publicKeyPem = "-----BEGIN PUBLIC KEY-----\n" + + Base64.getMimeEncoder(64, "\n".getBytes()).encodeToString(keyPair.getPublic().getEncoded()) + + "\n-----END PUBLIC KEY-----\n"; + + Files.writeString(keyFile, privateKeyPem + publicKeyPem); + System.out.println("Key generated: " + keyFile.toAbsolutePath()); + } + + /** + * Demonstrates encrypting data sent in multiple chunks. + */ + private static void demonstrateChunkedEncryption(DaprPreviewClient client) { + byte[] chunk1 = "First chunk of data. ".getBytes(StandardCharsets.UTF_8); + byte[] chunk2 = "Second chunk of data. ".getBytes(StandardCharsets.UTF_8); + byte[] chunk3 = "Third and final chunk.".getBytes(StandardCharsets.UTF_8); + + byte[] fullData = new byte[chunk1.length + chunk2.length + chunk3.length]; + System.arraycopy(chunk1, 0, fullData, 0, chunk1.length); + System.arraycopy(chunk2, 0, fullData, chunk1.length, chunk2.length); + System.arraycopy(chunk3, 0, fullData, chunk1.length + chunk2.length, chunk3.length); + + System.out.println("Original data: " + new String(fullData, StandardCharsets.UTF_8)); + System.out.println("Sending as 3 chunks..."); + + EncryptRequestAlpha1 encryptRequest = new EncryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(chunk1, chunk2, chunk3), + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + byte[] encryptedData = collectBytes(client.encrypt(encryptRequest)); + System.out.println("Encrypted data size: " + encryptedData.length + " bytes"); + + DecryptRequestAlpha1 decryptRequest = new DecryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(encryptedData) + ); + + byte[] decryptedData = collectBytes(client.decrypt(decryptRequest)); + String decryptedMessage = new String(decryptedData, StandardCharsets.UTF_8); + System.out.println("Decrypted data: " + decryptedMessage); + System.out.println("Verification: " + (new String(fullData, StandardCharsets.UTF_8).equals(decryptedMessage) + ? "SUCCESS" : "FAILED")); + } + + /** + * Demonstrates encrypting a large data payload. + */ + private static void demonstrateLargeDataEncryption(DaprPreviewClient client) { + int size = 100 * 1024; + byte[] largeData = new byte[size]; + new Random().nextBytes(largeData); + + System.out.println("Original data size: " + size + " bytes (100KB)"); + + EncryptRequestAlpha1 encryptRequest = new EncryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(largeData), + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + long startTime = System.currentTimeMillis(); + byte[] encryptedData = collectBytes(client.encrypt(encryptRequest)); + long encryptTime = System.currentTimeMillis() - startTime; + System.out.println("Encrypted data size: " + encryptedData.length + " bytes (took " + encryptTime + "ms)"); + + DecryptRequestAlpha1 decryptRequest = new DecryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(encryptedData) + ); + + startTime = System.currentTimeMillis(); + byte[] decryptedData = collectBytes(client.decrypt(decryptRequest)); + long decryptTime = System.currentTimeMillis() - startTime; + System.out.println("Decrypted data size: " + decryptedData.length + " bytes (took " + decryptTime + "ms)"); + + boolean matches = java.util.Arrays.equals(largeData, decryptedData); + System.out.println("Verification: " + (matches ? "SUCCESS" : "FAILED")); + } + + /** + * Demonstrates using a custom data encryption cipher. + */ + private static void demonstrateCustomCipher(DaprPreviewClient client) { + String message = "Message encrypted with custom cipher (aes-gcm)"; + byte[] plainText = message.getBytes(StandardCharsets.UTF_8); + + System.out.println("Original message: " + message); + + EncryptRequestAlpha1 encryptRequest = new EncryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(plainText), + KEY_NAME, + KEY_WRAP_ALGORITHM + ).setDataEncryptionCipher("aes-gcm"); + + byte[] encryptedData = collectBytes(client.encrypt(encryptRequest)); + System.out.println("Encrypted with aes-gcm cipher, size: " + encryptedData.length + " bytes"); + + DecryptRequestAlpha1 decryptRequest = new DecryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(encryptedData) + ); + + byte[] decryptedData = collectBytes(client.decrypt(decryptRequest)); + String decryptedMessage = new String(decryptedData, StandardCharsets.UTF_8); + System.out.println("Decrypted message: " + decryptedMessage); + System.out.println("Verification: " + (message.equals(decryptedMessage) ? "SUCCESS" : "FAILED")); + } + + /** + * Helper method to collect streaming bytes into a single byte array. + */ + private static byte[] collectBytes(Flux stream) { + return stream.collectList() + .map(chunks -> { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + }) + .block(); + } +} diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/crypto/DaprPreviewClientCryptoIT.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/crypto/DaprPreviewClientCryptoIT.java new file mode 100644 index 000000000..984be0297 --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/crypto/DaprPreviewClientCryptoIT.java @@ -0,0 +1,373 @@ +/* + * Copyright 2024 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.it.testcontainers.crypto; + +import io.dapr.client.DaprClientBuilder; +import io.dapr.client.DaprPreviewClient; +import io.dapr.client.domain.DecryptRequestAlpha1; +import io.dapr.client.domain.EncryptRequestAlpha1; +import io.dapr.config.Properties; +import io.dapr.testcontainers.Component; +import io.dapr.testcontainers.DaprContainer; +import io.dapr.testcontainers.MetadataEntry; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.testcontainers.containers.BindMode; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; +import reactor.core.publisher.Flux; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Random; + +import static io.dapr.it.testcontainers.ContainerConstants.DAPR_RUNTIME_IMAGE_TAG; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Integration tests for the Dapr Cryptography Alpha1 API. + */ +@Testcontainers +@Tag("testcontainers") +public class DaprPreviewClientCryptoIT { + + private static final String CRYPTO_COMPONENT_NAME = "localstoragecrypto"; + private static final String KEY_NAME = "testkey"; + private static final String CONTAINER_KEYS_PATH = "/keys"; + + private static Path tempKeysDir; + private static DaprPreviewClient daprPreviewClient; + + @Container + private static final DaprContainer DAPR_CONTAINER = createDaprContainer(); + + private static DaprContainer createDaprContainer() { + try { + // Create temporary directory for keys + tempKeysDir = Files.createTempDirectory("dapr-crypto-keys"); + + // Generate and save a test RSA key pair in PEM format + generateAndSaveRsaKeyPair(tempKeysDir); + + // Create the crypto component + Component cryptoComponent = new Component( + CRYPTO_COMPONENT_NAME, + "crypto.dapr.localstorage", + "v1", + List.of(new MetadataEntry("path", CONTAINER_KEYS_PATH)) + ); + + return new DaprContainer(DAPR_RUNTIME_IMAGE_TAG) + .withAppName("crypto-test-app") + .withComponent(cryptoComponent) + .withFileSystemBind(tempKeysDir.toString(), CONTAINER_KEYS_PATH, BindMode.READ_ONLY); + + } catch (Exception e) { + throw new RuntimeException("Failed to initialize test container", e); + } + } + + private static void generateAndSaveRsaKeyPair(Path keysDir) throws NoSuchAlgorithmException, IOException { + KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); + keyGen.initialize(4096); + KeyPair keyPair = keyGen.generateKeyPair(); + + // Save the private key in PEM format + String privateKeyPem = "-----BEGIN PRIVATE KEY-----\n" + + Base64.getMimeEncoder(64, "\n".getBytes()).encodeToString(keyPair.getPrivate().getEncoded()) + + "\n-----END PRIVATE KEY-----\n"; + + // Save the public key in PEM format + String publicKeyPem = "-----BEGIN PUBLIC KEY-----\n" + + Base64.getMimeEncoder(64, "\n".getBytes()).encodeToString(keyPair.getPublic().getEncoded()) + + "\n-----END PUBLIC KEY-----\n"; + + // Combine both keys in one PEM file + String combinedPem = privateKeyPem + publicKeyPem; + + Path keyFile = keysDir.resolve(KEY_NAME); + Files.writeString(keyFile, combinedPem); + + // Make the key file and directory readable by all (needed for container access) + keyFile.toFile().setReadable(true, false); + keysDir.toFile().setReadable(true, false); + keysDir.toFile().setExecutable(true, false); + } + + @BeforeAll + static void setUp() { + daprPreviewClient = new DaprClientBuilder() + .withPropertyOverride(Properties.HTTP_ENDPOINT, DAPR_CONTAINER.getHttpEndpoint()) + .withPropertyOverride(Properties.GRPC_ENDPOINT, DAPR_CONTAINER.getGrpcEndpoint()) + .buildPreviewClient(); + } + + @AfterAll + static void tearDown() throws Exception { + if (daprPreviewClient != null) { + daprPreviewClient.close(); + } + // Clean up temp keys directory + if (tempKeysDir != null && Files.exists(tempKeysDir)) { + Files.walk(tempKeysDir) + .sorted((a, b) -> -a.compareTo(b)) + .forEach(path -> { + try { + Files.delete(path); + } catch (IOException e) { + // Ignore cleanup errors + } + }); + } + } + + @Test + public void testEncryptAndDecryptSmallData() { + String originalData = "Hello, World! This is a test message."; + byte[] plainText = originalData.getBytes(StandardCharsets.UTF_8); + + // Encrypt + EncryptRequestAlpha1 encryptRequest = new EncryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(plainText), + KEY_NAME, + "RSA-OAEP-256" + ); + + byte[] encryptedData = daprPreviewClient.encrypt(encryptRequest) + .collectList() + .map(chunks -> { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + }) + .block(); + + assertNotNull(encryptedData); + assertTrue(encryptedData.length > 0); + + // Decrypt + DecryptRequestAlpha1 decryptRequest = new DecryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(encryptedData) + ); + + byte[] decryptedData = daprPreviewClient.decrypt(decryptRequest) + .collectList() + .map(chunks -> { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + }) + .block(); + + assertNotNull(decryptedData); + assertArrayEquals(plainText, decryptedData); + assertEquals(originalData, new String(decryptedData, StandardCharsets.UTF_8)); + } + + @Test + public void testEncryptAndDecryptLargeData() { + // Generate a large data payload (1MB) + byte[] largeData = new byte[1024 * 1024]; + new Random().nextBytes(largeData); + + // Encrypt + EncryptRequestAlpha1 encryptRequest = new EncryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(largeData), + KEY_NAME, + "RSA-OAEP-256" + ); + + byte[] encryptedData = daprPreviewClient.encrypt(encryptRequest) + .collectList() + .map(chunks -> { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + }) + .block(); + + assertNotNull(encryptedData); + assertTrue(encryptedData.length > 0); + + // Decrypt + DecryptRequestAlpha1 decryptRequest = new DecryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(encryptedData) + ); + + byte[] decryptedData = daprPreviewClient.decrypt(decryptRequest) + .collectList() + .map(chunks -> { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + }) + .block(); + + assertNotNull(decryptedData); + assertArrayEquals(largeData, decryptedData); + } + + @Test + public void testEncryptAndDecryptStreamedData() { + // Create chunked data to simulate streaming + byte[] chunk1 = "First chunk of data. ".getBytes(StandardCharsets.UTF_8); + byte[] chunk2 = "Second chunk of data. ".getBytes(StandardCharsets.UTF_8); + byte[] chunk3 = "Third and final chunk.".getBytes(StandardCharsets.UTF_8); + + // Combine for comparison later + byte[] fullData = new byte[chunk1.length + chunk2.length + chunk3.length]; + System.arraycopy(chunk1, 0, fullData, 0, chunk1.length); + System.arraycopy(chunk2, 0, fullData, chunk1.length, chunk2.length); + System.arraycopy(chunk3, 0, fullData, chunk1.length + chunk2.length, chunk3.length); + + // Encrypt with multiple chunks + EncryptRequestAlpha1 encryptRequest = new EncryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(chunk1, chunk2, chunk3), + KEY_NAME, + "RSA-OAEP-256" + ); + + byte[] encryptedData = daprPreviewClient.encrypt(encryptRequest) + .collectList() + .map(chunks -> { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + }) + .block(); + + assertNotNull(encryptedData); + assertTrue(encryptedData.length > 0); + + // Decrypt + DecryptRequestAlpha1 decryptRequest = new DecryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(encryptedData) + ); + + byte[] decryptedData = daprPreviewClient.decrypt(decryptRequest) + .collectList() + .map(chunks -> { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + }) + .block(); + + assertNotNull(decryptedData); + assertArrayEquals(fullData, decryptedData); + } + + @Test + public void testEncryptWithOptionalParameters() { + String originalData = "Test message with optional parameters."; + byte[] plainText = originalData.getBytes(StandardCharsets.UTF_8); + + // Encrypt with optional data encryption cipher + EncryptRequestAlpha1 encryptRequest = new EncryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(plainText), + KEY_NAME, + "RSA-OAEP-256" + ).setDataEncryptionCipher("aes-gcm"); + + byte[] encryptedData = daprPreviewClient.encrypt(encryptRequest) + .collectList() + .map(chunks -> { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + }) + .block(); + + assertNotNull(encryptedData); + assertTrue(encryptedData.length > 0); + + // Decrypt + DecryptRequestAlpha1 decryptRequest = new DecryptRequestAlpha1( + CRYPTO_COMPONENT_NAME, + Flux.just(encryptedData) + ); + + byte[] decryptedData = daprPreviewClient.decrypt(decryptRequest) + .collectList() + .map(chunks -> { + int totalSize = chunks.stream().mapToInt(chunk -> chunk.length).sum(); + byte[] result = new byte[totalSize]; + int pos = 0; + for (byte[] chunk : chunks) { + System.arraycopy(chunk, 0, result, pos, chunk.length); + pos += chunk.length; + } + return result; + }) + .block(); + + assertNotNull(decryptedData); + assertArrayEquals(plainText, decryptedData); + } +} diff --git a/sdk/src/main/java/io/dapr/client/DaprClientImpl.java b/sdk/src/main/java/io/dapr/client/DaprClientImpl.java index 0dfb1b644..05b555b5e 100644 --- a/sdk/src/main/java/io/dapr/client/DaprClientImpl.java +++ b/sdk/src/main/java/io/dapr/client/DaprClientImpl.java @@ -48,9 +48,11 @@ import io.dapr.client.domain.ConversationTools; import io.dapr.client.domain.ConversationToolsFunction; import io.dapr.client.domain.DaprMetadata; +import io.dapr.client.domain.DecryptRequestAlpha1; import io.dapr.client.domain.DeleteJobRequest; import io.dapr.client.domain.DeleteStateRequest; import io.dapr.client.domain.DropFailurePolicy; +import io.dapr.client.domain.EncryptRequestAlpha1; import io.dapr.client.domain.ExecuteStateTransactionRequest; import io.dapr.client.domain.FailurePolicy; import io.dapr.client.domain.FailurePolicyType; @@ -2108,4 +2110,196 @@ private AppConnectionPropertiesHealthMetadata getAppConnectionPropertiesHealth( return new AppConnectionPropertiesHealthMetadata(healthCheckPath, healthProbeInterval, healthProbeTimeout, healthThreshold); } + + /** + * {@inheritDoc} + */ + @Override + public Flux encrypt(EncryptRequestAlpha1 request) { + try { + if (request == null) { + throw new IllegalArgumentException("EncryptRequestAlpha1 cannot be null."); + } + if (request.getComponentName() == null || request.getComponentName().trim().isEmpty()) { + throw new IllegalArgumentException("Component name cannot be null or empty."); + } + if (request.getKeyName() == null || request.getKeyName().trim().isEmpty()) { + throw new IllegalArgumentException("Key name cannot be null or empty."); + } + if (request.getKeyWrapAlgorithm() == null || request.getKeyWrapAlgorithm().trim().isEmpty()) { + throw new IllegalArgumentException("Key wrap algorithm cannot be null or empty."); + } + if (request.getPlainTextStream() == null) { + throw new IllegalArgumentException("Plaintext stream cannot be null."); + } + + return Flux.create(sink -> { + // Create response observer to receive encrypted data + final StreamObserver responseObserver = + new StreamObserver() { + @Override + public void onNext(DaprProtos.EncryptResponse response) { + if (response.hasPayload()) { + byte[] data = response.getPayload().getData().toByteArray(); + if (data.length > 0) { + sink.next(data); + } + } + } + + @Override + public void onError(Throwable t) { + sink.error(DaprException.propagate(new DaprException("ENCRYPT_ERROR", + "Error during encryption: " + t.getMessage(), t))); + } + + @Override + public void onCompleted() { + sink.complete(); + } + }; + + // Build options for the first message + DaprProtos.EncryptRequestOptions.Builder optionsBuilder = DaprProtos.EncryptRequestOptions.newBuilder() + .setComponentName(request.getComponentName()) + .setKeyName(request.getKeyName()) + .setKeyWrapAlgorithm(request.getKeyWrapAlgorithm()); + + if (request.getDataEncryptionCipher() != null && !request.getDataEncryptionCipher().isEmpty()) { + optionsBuilder.setDataEncryptionCipher(request.getDataEncryptionCipher()); + } + optionsBuilder.setOmitDecryptionKeyName(request.isOmitDecryptionKeyName()); + if (request.getDecryptionKeyName() != null && !request.getDecryptionKeyName().isEmpty()) { + optionsBuilder.setDecryptionKeyName(request.getDecryptionKeyName()); + } + + final DaprProtos.EncryptRequestOptions options = optionsBuilder.build(); + final long[] sequenceNumber = {0}; + final boolean[] firstMessage = {true}; + + // Get the request stream observer from gRPC + final StreamObserver requestObserver = + intercept(null, asyncStub).encryptAlpha1(responseObserver); + + // Subscribe to the plaintext stream and send chunks + request.getPlainTextStream() + .doOnNext(chunk -> { + DaprProtos.EncryptRequest.Builder reqBuilder = DaprProtos.EncryptRequest.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(chunk)) + .setSeq(sequenceNumber[0]++) + .build()); + + // Include options only in the first message + if (firstMessage[0]) { + reqBuilder.setOptions(options); + firstMessage[0] = false; + } + + requestObserver.onNext(reqBuilder.build()); + }) + .doOnError(error -> { + requestObserver.onError(error); + sink.error(DaprException.propagate(new DaprException("ENCRYPT_ERROR", + "Error reading plaintext stream: " + error.getMessage(), error))); + }) + .doOnComplete(() -> { + requestObserver.onCompleted(); + }) + .subscribe(); + }); + } catch (Exception ex) { + return DaprException.wrapFlux(ex); + } + } + + /** + * {@inheritDoc} + */ + @Override + public Flux decrypt(DecryptRequestAlpha1 request) { + try { + if (request == null) { + throw new IllegalArgumentException("DecryptRequestAlpha1 cannot be null."); + } + if (request.getComponentName() == null || request.getComponentName().trim().isEmpty()) { + throw new IllegalArgumentException("Component name cannot be null or empty."); + } + if (request.getCipherTextStream() == null) { + throw new IllegalArgumentException("Ciphertext stream cannot be null."); + } + + return Flux.create(sink -> { + // Create response observer to receive decrypted data + final StreamObserver responseObserver = + new StreamObserver() { + @Override + public void onNext(DaprProtos.DecryptResponse response) { + if (response.hasPayload()) { + byte[] data = response.getPayload().getData().toByteArray(); + if (data.length > 0) { + sink.next(data); + } + } + } + + @Override + public void onError(Throwable t) { + sink.error(DaprException.propagate(new DaprException("DECRYPT_ERROR", + "Error during decryption: " + t.getMessage(), t))); + } + + @Override + public void onCompleted() { + sink.complete(); + } + }; + + // Build options for the first message + DaprProtos.DecryptRequestOptions.Builder optionsBuilder = DaprProtos.DecryptRequestOptions.newBuilder() + .setComponentName(request.getComponentName()); + + if (request.getKeyName() != null && !request.getKeyName().isEmpty()) { + optionsBuilder.setKeyName(request.getKeyName()); + } + + final DaprProtos.DecryptRequestOptions options = optionsBuilder.build(); + final long[] sequenceNumber = {0}; + final boolean[] firstMessage = {true}; + + // Get the request stream observer from gRPC + final StreamObserver requestObserver = + intercept(null, asyncStub).decryptAlpha1(responseObserver); + + // Subscribe to the ciphertext stream and send chunks + request.getCipherTextStream() + .doOnNext(chunk -> { + DaprProtos.DecryptRequest.Builder reqBuilder = DaprProtos.DecryptRequest.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(chunk)) + .setSeq(sequenceNumber[0]++) + .build()); + + // Include options only in the first message + if (firstMessage[0]) { + reqBuilder.setOptions(options); + firstMessage[0] = false; + } + + requestObserver.onNext(reqBuilder.build()); + }) + .doOnError(error -> { + requestObserver.onError(error); + sink.error(DaprException.propagate(new DaprException("DECRYPT_ERROR", + "Error reading ciphertext stream: " + error.getMessage(), error))); + }) + .doOnComplete(() -> { + requestObserver.onCompleted(); + }) + .subscribe(); + }); + } catch (Exception ex) { + return DaprException.wrapFlux(ex); + } + } } diff --git a/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java b/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java index 9d8192369..17ca13649 100644 --- a/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java +++ b/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java @@ -22,7 +22,9 @@ import io.dapr.client.domain.ConversationRequestAlpha2; import io.dapr.client.domain.ConversationResponse; import io.dapr.client.domain.ConversationResponseAlpha2; +import io.dapr.client.domain.DecryptRequestAlpha1; import io.dapr.client.domain.DeleteJobRequest; +import io.dapr.client.domain.EncryptRequestAlpha1; import io.dapr.client.domain.GetJobRequest; import io.dapr.client.domain.GetJobResponse; import io.dapr.client.domain.LockRequest; @@ -307,4 +309,24 @@ Subscription subscribeToEvents( * @return {@link ConversationResponseAlpha2}. */ public Mono converseAlpha2(ConversationRequestAlpha2 conversationRequestAlpha2); + + /** + * Encrypt data using the Dapr cryptography building block. + * This method uses streaming to handle large payloads efficiently. + * + * @param request The encryption request containing component name, key information, and plaintext stream. + * @return A Flux of encrypted byte arrays (ciphertext chunks). + * @throws IllegalArgumentException if required parameters are missing. + */ + Flux encrypt(EncryptRequestAlpha1 request); + + /** + * Decrypt data using the Dapr cryptography building block. + * This method uses streaming to handle large payloads efficiently. + * + * @param request The decryption request containing component name, optional key name, and ciphertext stream. + * @return A Flux of decrypted byte arrays (plaintext chunks). + * @throws IllegalArgumentException if required parameters are missing. + */ + Flux decrypt(DecryptRequestAlpha1 request); } diff --git a/sdk/src/main/java/io/dapr/client/domain/DecryptRequestAlpha1.java b/sdk/src/main/java/io/dapr/client/domain/DecryptRequestAlpha1.java new file mode 100644 index 000000000..61cf3206e --- /dev/null +++ b/sdk/src/main/java/io/dapr/client/domain/DecryptRequestAlpha1.java @@ -0,0 +1,79 @@ +/* + * Copyright 2024 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.client.domain; + +import reactor.core.publisher.Flux; + +/** + * Request to decrypt data using the Dapr Cryptography building block. + * Uses streaming to handle large payloads efficiently. + */ +public class DecryptRequestAlpha1 { + + private final String componentName; + private final Flux cipherTextStream; + private String keyName; + + /** + * Constructor for DecryptRequestAlpha1. + * + * @param componentName Name of the cryptography component. Required. + * @param cipherTextStream Stream of ciphertext data to decrypt. Required. + */ + public DecryptRequestAlpha1(String componentName, Flux cipherTextStream) { + this.componentName = componentName; + this.cipherTextStream = cipherTextStream; + } + + /** + * Gets the cryptography component name. + * + * @return the component name + */ + public String getComponentName() { + return componentName; + } + + /** + * Gets the ciphertext data stream to decrypt. + * + * @return the ciphertext stream as Flux of byte arrays + */ + public Flux getCipherTextStream() { + return cipherTextStream; + } + + /** + * Gets the key name (or name/version) to use for decryption. + * + * @return the key name, or null if using the key embedded in the ciphertext + */ + public String getKeyName() { + return keyName; + } + + /** + * Sets the key name (or name/version) to decrypt the message. + * This overrides any key reference included in the message if present. + * This is required if the message doesn't include a key reference + * (i.e., was created with omitDecryptionKeyName set to true). + * + * @param keyName the key name to use for decryption + * @return this request instance for method chaining + */ + public DecryptRequestAlpha1 setKeyName(String keyName) { + this.keyName = keyName; + return this; + } +} diff --git a/sdk/src/main/java/io/dapr/client/domain/EncryptRequestAlpha1.java b/sdk/src/main/java/io/dapr/client/domain/EncryptRequestAlpha1.java new file mode 100644 index 000000000..bab82bc22 --- /dev/null +++ b/sdk/src/main/java/io/dapr/client/domain/EncryptRequestAlpha1.java @@ -0,0 +1,152 @@ +/* + * Copyright 2024 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.client.domain; + +import reactor.core.publisher.Flux; + +/** + * Request to encrypt data using the Dapr Cryptography building block. + * Uses streaming to handle large payloads efficiently. + */ +public class EncryptRequestAlpha1 { + + private final String componentName; + private final Flux plainTextStream; + private final String keyName; + private final String keyWrapAlgorithm; + private String dataEncryptionCipher; + private boolean omitDecryptionKeyName; + private String decryptionKeyName; + + /** + * Constructor for EncryptRequestAlpha1. + * + * @param componentName Name of the cryptography component. Required. + * @param plainTextStream Stream of plaintext data to encrypt. Required. + * @param keyName Name (or name/version) of the key to use for encryption. Required. + * @param keyWrapAlgorithm Key wrapping algorithm to use. Required. + * Supported options: A256KW (alias: AES), A128CBC, A192CBC, A256CBC, + * RSA-OAEP-256 (alias: RSA). + */ + public EncryptRequestAlpha1(String componentName, Flux plainTextStream, + String keyName, String keyWrapAlgorithm) { + this.componentName = componentName; + this.plainTextStream = plainTextStream; + this.keyName = keyName; + this.keyWrapAlgorithm = keyWrapAlgorithm; + } + + /** + * Gets the cryptography component name. + * + * @return the component name + */ + public String getComponentName() { + return componentName; + } + + /** + * Gets the plaintext data stream to encrypt. + * + * @return the plaintext stream as Flux of byte arrays + */ + public Flux getPlainTextStream() { + return plainTextStream; + } + + /** + * Gets the key name (or name/version). + * + * @return the key name + */ + public String getKeyName() { + return keyName; + } + + /** + * Gets the key wrap algorithm. + * + * @return the key wrap algorithm + */ + public String getKeyWrapAlgorithm() { + return keyWrapAlgorithm; + } + + /** + * Gets the data encryption cipher. + * + * @return the data encryption cipher, or null if not set + */ + public String getDataEncryptionCipher() { + return dataEncryptionCipher; + } + + /** + * Sets the cipher used to encrypt data. + * Optional. Supported values: "aes-gcm" (default), "chacha20-poly1305". + * + * @param dataEncryptionCipher the cipher to use for data encryption + * @return this request instance for method chaining + */ + public EncryptRequestAlpha1 setDataEncryptionCipher(String dataEncryptionCipher) { + this.dataEncryptionCipher = dataEncryptionCipher; + return this; + } + + /** + * Checks if the decryption key name should be omitted from the encrypted document. + * + * @return true if the key name should be omitted + */ + public boolean isOmitDecryptionKeyName() { + return omitDecryptionKeyName; + } + + /** + * Sets whether to omit the decryption key name from the encrypted document. + * If true, calls to decrypt must provide a key reference (name or name/version). + * Defaults to false. + * + * @param omitDecryptionKeyName whether to omit the key name + * @return this request instance for method chaining + */ + public EncryptRequestAlpha1 setOmitDecryptionKeyName(boolean omitDecryptionKeyName) { + this.omitDecryptionKeyName = omitDecryptionKeyName; + return this; + } + + /** + * Gets the decryption key name to embed in the encrypted document. + * + * @return the decryption key name, or null if not set + */ + public String getDecryptionKeyName() { + return decryptionKeyName; + } + + /** + * Sets the key reference to embed in the encrypted document (name or name/version). + * This is helpful if the reference of the key used to decrypt the document is + * different from the one used to encrypt it. + * If unset, uses the reference of the key used to encrypt the document. + * This option is ignored if omitDecryptionKeyName is true. + * + * @param decryptionKeyName the key name to embed for decryption + * @return this request instance for method chaining + */ + public EncryptRequestAlpha1 setDecryptionKeyName(String decryptionKeyName) { + this.decryptionKeyName = decryptionKeyName; + return this; + } +} diff --git a/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java b/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java index 1566c7d2c..af54b6490 100644 --- a/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java +++ b/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java @@ -33,10 +33,18 @@ import io.dapr.client.domain.ConversationResultChoices; import io.dapr.client.domain.ConversationToolCalls; import io.dapr.client.domain.ConversationTools; +import io.dapr.client.domain.DecryptRequestAlpha1; +import io.dapr.client.domain.DeleteJobRequest; import io.dapr.client.domain.DeveloperMessage; import io.dapr.client.domain.ConversationInput; import io.dapr.client.domain.ConversationRequest; import io.dapr.client.domain.ConversationResponse; +import io.dapr.client.domain.DeleteJobRequest; +import io.dapr.client.domain.DropFailurePolicy; +import io.dapr.client.domain.EncryptRequestAlpha1; +import io.dapr.client.domain.GetJobRequest; +import io.dapr.client.domain.GetJobResponse; +import io.dapr.client.domain.JobSchedule; import io.dapr.client.domain.QueryStateItem; import io.dapr.client.domain.QueryStateRequest; import io.dapr.client.domain.QueryStateResponse; @@ -48,6 +56,7 @@ import io.dapr.serializer.DaprObjectSerializer; import io.dapr.serializer.DefaultObjectSerializer; import io.dapr.utils.TypeRef; +import io.dapr.v1.CommonProtos; import io.dapr.v1.DaprAppCallbackProtos; import io.dapr.v1.DaprGrpc; import io.dapr.v1.DaprProtos; @@ -57,14 +66,17 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatchers; import org.mockito.Mockito; import org.mockito.stubbing.Answer; +import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -79,6 +91,8 @@ import static io.dapr.utils.TestUtils.assertThrowsDaprException; import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -1276,4 +1290,663 @@ private DaprProtos.QueryStateItem buildQueryStateItem(QueryStateItem item) th private static StatusRuntimeException newStatusRuntimeException(String status, String message) { return new StatusRuntimeException(Status.fromCode(Status.Code.valueOf(status)).withDescription(message)); } + + // ==================== Encrypt Tests ==================== + + @Test + @DisplayName("encrypt should throw IllegalArgumentException when request is null") + public void encryptNullRequestTest() { + assertThrows(IllegalArgumentException.class, () -> { + previewClient.encrypt(null).blockFirst(); + }); + } + + @Test + @DisplayName("encrypt should throw IllegalArgumentException when component name is null") + public void encryptNullComponentNameTest() { + Flux plainTextStream = Flux.just("test data".getBytes(StandardCharsets.UTF_8)); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + null, + plainTextStream, + "mykey", + "RSA-OAEP-256" + ); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.encrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("encrypt should throw IllegalArgumentException when component name is empty") + public void encryptEmptyComponentNameTest() { + Flux plainTextStream = Flux.just("test data".getBytes(StandardCharsets.UTF_8)); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "", + plainTextStream, + "mykey", + "RSA-OAEP-256" + ); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.encrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("encrypt should throw IllegalArgumentException when component name is whitespace only") + public void encryptWhitespaceComponentNameTest() { + Flux plainTextStream = Flux.just("test data".getBytes(StandardCharsets.UTF_8)); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + " ", + plainTextStream, + "mykey", + "RSA-OAEP-256" + ); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.encrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("encrypt should throw IllegalArgumentException when key name is null") + public void encryptNullKeyNameTest() { + Flux plainTextStream = Flux.just("test data".getBytes(StandardCharsets.UTF_8)); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + null, + "RSA-OAEP-256" + ); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.encrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("encrypt should throw IllegalArgumentException when key name is empty") + public void encryptEmptyKeyNameTest() { + Flux plainTextStream = Flux.just("test data".getBytes(StandardCharsets.UTF_8)); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "", + "RSA-OAEP-256" + ); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.encrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("encrypt should throw IllegalArgumentException when key wrap algorithm is null") + public void encryptNullKeyWrapAlgorithmTest() { + Flux plainTextStream = Flux.just("test data".getBytes(StandardCharsets.UTF_8)); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "mykey", + null + ); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.encrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("encrypt should throw IllegalArgumentException when key wrap algorithm is empty") + public void encryptEmptyKeyWrapAlgorithmTest() { + Flux plainTextStream = Flux.just("test data".getBytes(StandardCharsets.UTF_8)); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "mykey", + "" + ); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.encrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("encrypt should throw IllegalArgumentException when plaintext stream is null") + public void encryptNullPlaintextStreamTest() { + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + null, + "mykey", + "RSA-OAEP-256" + ); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.encrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("encrypt should successfully encrypt data with required fields") + public void encryptSuccessTest() { + byte[] plaintext = "Hello, World!".getBytes(StandardCharsets.UTF_8); + byte[] encryptedData = "encrypted-data".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + // Simulate returning encrypted data + DaprProtos.EncryptResponse response = DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(encryptedData)) + .setSeq(0) + .build()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).encryptAlpha1(any()); + + Flux plainTextStream = Flux.just(plaintext); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "mykey", + "RSA-OAEP-256" + ); + + List results = previewClient.encrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + assertArrayEquals(encryptedData, results.get(0)); + } + + @Test + @DisplayName("encrypt should handle multiple response chunks") + public void encryptMultipleChunksResponseTest() { + byte[] plaintext = "Hello, World!".getBytes(StandardCharsets.UTF_8); + byte[] chunk1 = "chunk1".getBytes(StandardCharsets.UTF_8); + byte[] chunk2 = "chunk2".getBytes(StandardCharsets.UTF_8); + byte[] chunk3 = "chunk3".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + // Simulate returning multiple chunks + responseObserver.onNext(DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(chunk1)) + .setSeq(0) + .build()) + .build()); + responseObserver.onNext(DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(chunk2)) + .setSeq(1) + .build()) + .build()); + responseObserver.onNext(DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(chunk3)) + .setSeq(2) + .build()) + .build()); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).encryptAlpha1(any()); + + Flux plainTextStream = Flux.just(plaintext); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "mykey", + "RSA-OAEP-256" + ); + + List results = previewClient.encrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(3, results.size()); + assertArrayEquals(chunk1, results.get(0)); + assertArrayEquals(chunk2, results.get(1)); + assertArrayEquals(chunk3, results.get(2)); + } + + @Test + @DisplayName("encrypt should handle optional data encryption cipher") + public void encryptWithDataEncryptionCipherTest() { + byte[] plaintext = "Hello, World!".getBytes(StandardCharsets.UTF_8); + byte[] encryptedData = "encrypted-data".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + DaprProtos.EncryptResponse response = DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(encryptedData)) + .setSeq(0) + .build()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).encryptAlpha1(any()); + + Flux plainTextStream = Flux.just(plaintext); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "mykey", + "RSA-OAEP-256" + ).setDataEncryptionCipher("aes-gcm"); + + List results = previewClient.encrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + } + + @Test + @DisplayName("encrypt should handle omit decryption key name option") + public void encryptWithOmitDecryptionKeyNameTest() { + byte[] plaintext = "Hello, World!".getBytes(StandardCharsets.UTF_8); + byte[] encryptedData = "encrypted-data".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + DaprProtos.EncryptResponse response = DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(encryptedData)) + .setSeq(0) + .build()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).encryptAlpha1(any()); + + Flux plainTextStream = Flux.just(plaintext); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "mykey", + "RSA-OAEP-256" + ).setOmitDecryptionKeyName(true); + + List results = previewClient.encrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + } + + @Test + @DisplayName("encrypt should handle decryption key name option") + public void encryptWithDecryptionKeyNameTest() { + byte[] plaintext = "Hello, World!".getBytes(StandardCharsets.UTF_8); + byte[] encryptedData = "encrypted-data".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + DaprProtos.EncryptResponse response = DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(encryptedData)) + .setSeq(0) + .build()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).encryptAlpha1(any()); + + Flux plainTextStream = Flux.just(plaintext); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "mykey", + "RSA-OAEP-256" + ).setDecryptionKeyName("different-key"); + + List results = previewClient.encrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + } + + @Test + @DisplayName("encrypt should handle all optional fields") + public void encryptWithAllOptionalFieldsTest() { + byte[] plaintext = "Hello, World!".getBytes(StandardCharsets.UTF_8); + byte[] encryptedData = "encrypted-data".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + DaprProtos.EncryptResponse response = DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(encryptedData)) + .setSeq(0) + .build()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).encryptAlpha1(any()); + + Flux plainTextStream = Flux.just(plaintext); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "mykey", + "RSA-OAEP-256" + ) + .setDataEncryptionCipher("chacha20-poly1305") + .setOmitDecryptionKeyName(true) + .setDecryptionKeyName("decrypt-key"); + + List results = previewClient.encrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + } + + @Test + @DisplayName("encrypt should filter empty data from response") + public void encryptFilterEmptyDataTest() { + byte[] plaintext = "Hello, World!".getBytes(StandardCharsets.UTF_8); + byte[] validData = "valid-data".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + // Send empty data - should be filtered + responseObserver.onNext(DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.EMPTY) + .setSeq(0) + .build()) + .build()); + + // Send valid data + responseObserver.onNext(DaprProtos.EncryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(validData)) + .setSeq(1) + .build()) + .build()); + + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).encryptAlpha1(any()); + + Flux plainTextStream = Flux.just(plaintext); + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "mycomponent", + plainTextStream, + "mykey", + "RSA-OAEP-256" + ); + + List results = previewClient.encrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + assertArrayEquals(validData, results.get(0)); + } + + // ==================== Decrypt Tests ==================== + + @Test + @DisplayName("decrypt should throw IllegalArgumentException when request is null") + public void decryptNullRequestTest() { + assertThrows(IllegalArgumentException.class, () -> { + previewClient.decrypt(null).blockFirst(); + }); + } + + @Test + @DisplayName("decrypt should throw IllegalArgumentException when component name is null") + public void decryptNullComponentNameTest() { + Flux cipherTextStream = Flux.just("encrypted data".getBytes(StandardCharsets.UTF_8)); + DecryptRequestAlpha1 request = new DecryptRequestAlpha1(null, cipherTextStream); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.decrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("decrypt should throw IllegalArgumentException when component name is empty") + public void decryptEmptyComponentNameTest() { + Flux cipherTextStream = Flux.just("encrypted data".getBytes(StandardCharsets.UTF_8)); + DecryptRequestAlpha1 request = new DecryptRequestAlpha1("", cipherTextStream); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.decrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("decrypt should throw IllegalArgumentException when component name is whitespace only") + public void decryptWhitespaceComponentNameTest() { + Flux cipherTextStream = Flux.just("encrypted data".getBytes(StandardCharsets.UTF_8)); + DecryptRequestAlpha1 request = new DecryptRequestAlpha1(" ", cipherTextStream); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.decrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("decrypt should throw IllegalArgumentException when ciphertext stream is null") + public void decryptNullCiphertextStreamTest() { + DecryptRequestAlpha1 request = new DecryptRequestAlpha1("mycomponent", null); + + assertThrows(IllegalArgumentException.class, () -> { + previewClient.decrypt(request).blockFirst(); + }); + } + + @Test + @DisplayName("decrypt should successfully decrypt data with required fields") + public void decryptSuccessTest() { + byte[] ciphertext = "encrypted-data".getBytes(StandardCharsets.UTF_8); + byte[] decryptedData = "Hello, World!".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + DaprProtos.DecryptResponse response = DaprProtos.DecryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(decryptedData)) + .setSeq(0) + .build()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).decryptAlpha1(any()); + + Flux cipherTextStream = Flux.just(ciphertext); + DecryptRequestAlpha1 request = new DecryptRequestAlpha1("mycomponent", cipherTextStream); + + List results = previewClient.decrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + assertArrayEquals(decryptedData, results.get(0)); + } + + @Test + @DisplayName("decrypt should handle multiple response chunks") + public void decryptMultipleChunksResponseTest() { + byte[] ciphertext = "encrypted-data".getBytes(StandardCharsets.UTF_8); + byte[] chunk1 = "chunk1".getBytes(StandardCharsets.UTF_8); + byte[] chunk2 = "chunk2".getBytes(StandardCharsets.UTF_8); + byte[] chunk3 = "chunk3".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + responseObserver.onNext(DaprProtos.DecryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(chunk1)) + .setSeq(0) + .build()) + .build()); + responseObserver.onNext(DaprProtos.DecryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(chunk2)) + .setSeq(1) + .build()) + .build()); + responseObserver.onNext(DaprProtos.DecryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(chunk3)) + .setSeq(2) + .build()) + .build()); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).decryptAlpha1(any()); + + Flux cipherTextStream = Flux.just(ciphertext); + DecryptRequestAlpha1 request = new DecryptRequestAlpha1("mycomponent", cipherTextStream); + + List results = previewClient.decrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(3, results.size()); + assertArrayEquals(chunk1, results.get(0)); + assertArrayEquals(chunk2, results.get(1)); + assertArrayEquals(chunk3, results.get(2)); + } + + @Test + @DisplayName("decrypt should handle optional key name") + public void decryptWithKeyNameTest() { + byte[] ciphertext = "encrypted-data".getBytes(StandardCharsets.UTF_8); + byte[] decryptedData = "Hello, World!".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + DaprProtos.DecryptResponse response = DaprProtos.DecryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(decryptedData)) + .setSeq(0) + .build()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).decryptAlpha1(any()); + + Flux cipherTextStream = Flux.just(ciphertext); + DecryptRequestAlpha1 request = new DecryptRequestAlpha1("mycomponent", cipherTextStream) + .setKeyName("mykey"); + + List results = previewClient.decrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + assertArrayEquals(decryptedData, results.get(0)); + } + + @Test + @DisplayName("decrypt should filter empty data from response") + public void decryptFilterEmptyDataTest() { + byte[] ciphertext = "encrypted-data".getBytes(StandardCharsets.UTF_8); + byte[] validData = "valid-data".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + // Send empty data - should be filtered + responseObserver.onNext(DaprProtos.DecryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.EMPTY) + .setSeq(0) + .build()) + .build()); + + // Send valid data + responseObserver.onNext(DaprProtos.DecryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(validData)) + .setSeq(1) + .build()) + .build()); + + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).decryptAlpha1(any()); + + Flux cipherTextStream = Flux.just(ciphertext); + DecryptRequestAlpha1 request = new DecryptRequestAlpha1("mycomponent", cipherTextStream); + + List results = previewClient.decrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + assertArrayEquals(validData, results.get(0)); + } + + @Test + @DisplayName("decrypt should handle key name with version") + public void decryptWithKeyNameVersionTest() { + byte[] ciphertext = "encrypted-data".getBytes(StandardCharsets.UTF_8); + byte[] decryptedData = "Hello, World!".getBytes(StandardCharsets.UTF_8); + + doAnswer((Answer>) invocation -> { + StreamObserver responseObserver = + (StreamObserver) invocation.getArguments()[0]; + + DaprProtos.DecryptResponse response = DaprProtos.DecryptResponse.newBuilder() + .setPayload(CommonProtos.StreamPayload.newBuilder() + .setData(ByteString.copyFrom(decryptedData)) + .setSeq(0) + .build()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + return mock(StreamObserver.class); + }).when(daprStub).decryptAlpha1(any()); + + Flux cipherTextStream = Flux.just(ciphertext); + DecryptRequestAlpha1 request = new DecryptRequestAlpha1("mycomponent", cipherTextStream) + .setKeyName("mykey/v2"); + + List results = previewClient.decrypt(request).collectList().block(); + + assertNotNull(results); + assertEquals(1, results.size()); + } } diff --git a/sdk/src/test/java/io/dapr/client/ProtobufValueHelperTest.java b/sdk/src/test/java/io/dapr/client/ProtobufValueHelperTest.java index c345f34ff..c6bfa5eb2 100644 --- a/sdk/src/test/java/io/dapr/client/ProtobufValueHelperTest.java +++ b/sdk/src/test/java/io/dapr/client/ProtobufValueHelperTest.java @@ -353,49 +353,49 @@ public void testToProtobufValue_OpenAPIFunctionSchema() throws IOException { functionSchema.put("type", "function"); functionSchema.put("name", "get_horoscope"); functionSchema.put("description", "Get today's horoscope for an astrological sign."); - + Map parameters = new LinkedHashMap<>(); parameters.put("type", "object"); - + Map properties = new LinkedHashMap<>(); Map signProperty = new LinkedHashMap<>(); signProperty.put("type", "string"); signProperty.put("description", "An astrological sign like Taurus or Aquarius"); properties.put("sign", signProperty); - + parameters.put("properties", properties); parameters.put("required", Arrays.asList("sign")); - + functionSchema.put("parameters", parameters); - + Value result = ProtobufValueHelper.toProtobufValue(functionSchema); - + assertNotNull(result); assertTrue(result.hasStructValue()); Struct rootStruct = result.getStructValue(); - + // Verify root level fields assertEquals("function", rootStruct.getFieldsMap().get("type").getStringValue()); assertEquals("get_horoscope", rootStruct.getFieldsMap().get("name").getStringValue()); - assertEquals("Get today's horoscope for an astrological sign.", + assertEquals("Get today's horoscope for an astrological sign.", rootStruct.getFieldsMap().get("description").getStringValue()); - + // Verify parameters object assertTrue(rootStruct.getFieldsMap().get("parameters").hasStructValue()); Struct parametersStruct = rootStruct.getFieldsMap().get("parameters").getStructValue(); assertEquals("object", parametersStruct.getFieldsMap().get("type").getStringValue()); - + // Verify properties object assertTrue(parametersStruct.getFieldsMap().get("properties").hasStructValue()); Struct propertiesStruct = parametersStruct.getFieldsMap().get("properties").getStructValue(); - + // Verify sign property assertTrue(propertiesStruct.getFieldsMap().get("sign").hasStructValue()); Struct signStruct = propertiesStruct.getFieldsMap().get("sign").getStructValue(); assertEquals("string", signStruct.getFieldsMap().get("type").getStringValue()); - assertEquals("An astrological sign like Taurus or Aquarius", + assertEquals("An astrological sign like Taurus or Aquarius", signStruct.getFieldsMap().get("description").getStringValue()); - + // Verify required array assertTrue(parametersStruct.getFieldsMap().get("required").hasListValue()); ListValue requiredList = parametersStruct.getFieldsMap().get("required").getListValue(); diff --git a/sdk/src/test/java/io/dapr/client/domain/DecryptRequestAlpha1Test.java b/sdk/src/test/java/io/dapr/client/domain/DecryptRequestAlpha1Test.java new file mode 100644 index 000000000..499615a03 --- /dev/null +++ b/sdk/src/test/java/io/dapr/client/domain/DecryptRequestAlpha1Test.java @@ -0,0 +1,346 @@ +/* + * Copyright 2024 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.client.domain; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; + +public class DecryptRequestAlpha1Test { + + private static final String COMPONENT_NAME = "mycomponent"; + private static final String ENCRYPTED_DATA = "encrypted data"; + + @Test + @DisplayName("Constructor should set required fields correctly") + public void testConstructorWithRequiredFields() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + cipherTextStream + ); + + assertEquals(COMPONENT_NAME, request.getComponentName()); + assertNotNull(request.getCipherTextStream()); + assertNull(request.getKeyName()); + } + + @Test + @DisplayName("setKeyName should set key name correctly") + public void testFluentSetKeyName() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + cipherTextStream + ).setKeyName("mykey"); + + assertEquals("mykey", request.getKeyName()); + } + + @Test + @DisplayName("setKeyName should return same instance for method chaining") + public void testFluentSetterReturnsSameInstance() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + cipherTextStream + ); + + DecryptRequestAlpha1 sameRequest = request.setKeyName("mykey"); + assertSame(request, sameRequest); + } + + @Test + @DisplayName("Constructor should accept null component name") + public void testNullComponentName() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + null, + cipherTextStream + ); + + assertNull(request.getComponentName()); + } + + @Test + @DisplayName("Constructor should accept null ciphertext stream") + public void testNullCipherTextStream() { + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + null + ); + + assertNull(request.getCipherTextStream()); + } + + @Test + @DisplayName("Constructor should accept empty stream") + public void testEmptyStream() { + Flux emptyStream = Flux.empty(); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + emptyStream + ); + + assertNotNull(request.getCipherTextStream()); + StepVerifier.create(request.getCipherTextStream()) + .verifyComplete(); + } + + @Test + @DisplayName("Should handle multiple chunks in stream") + public void testMultipleChunksStream() { + byte[] chunk1 = "chunk1".getBytes(StandardCharsets.UTF_8); + byte[] chunk2 = "chunk2".getBytes(StandardCharsets.UTF_8); + byte[] chunk3 = "chunk3".getBytes(StandardCharsets.UTF_8); + + Flux multiChunkStream = Flux.just(chunk1, chunk2, chunk3); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + multiChunkStream + ); + + assertNotNull(request.getCipherTextStream()); + + List collectedChunks = new ArrayList<>(); + StepVerifier.create(request.getCipherTextStream()) + .recordWith(() -> collectedChunks) + .expectNextCount(3) + .verifyComplete(); + + assertEquals(3, collectedChunks.size()); + assertArrayEquals(chunk1, collectedChunks.get(0)); + assertArrayEquals(chunk2, collectedChunks.get(1)); + assertArrayEquals(chunk3, collectedChunks.get(2)); + } + + @Test + @DisplayName("setKeyName should accept null value") + public void testSetKeyNameNull() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + cipherTextStream + ) + .setKeyName("some-key") + .setKeyName(null); + + assertNull(request.getKeyName()); + } + + @Test + @DisplayName("Should handle key name with version") + public void testKeyNameWithVersion() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + String keyNameWithVersion = "mykey/v1"; + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + cipherTextStream + ).setKeyName(keyNameWithVersion); + + assertEquals(keyNameWithVersion, request.getKeyName()); + } + + @Test + @DisplayName("Should handle empty component name") + public void testEmptyComponentName() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + "", + cipherTextStream + ); + + assertEquals("", request.getComponentName()); + } + + @Test + @DisplayName("Should handle whitespace-only component name") + public void testWhitespaceComponentName() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + " ", + cipherTextStream + ); + + assertEquals(" ", request.getComponentName()); + } + + @Test + @DisplayName("Should handle empty key name") + public void testEmptyKeyName() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + cipherTextStream + ).setKeyName(""); + + assertEquals("", request.getKeyName()); + } + + @Test + @DisplayName("Should handle large data stream") + public void testLargeDataStream() { + byte[] largeChunk = new byte[1024 * 1024]; // 1MB chunk + for (int i = 0; i < largeChunk.length; i++) { + largeChunk[i] = (byte) (i % 256); + } + + Flux largeStream = Flux.just(largeChunk); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + largeStream + ); + + assertNotNull(request.getCipherTextStream()); + + StepVerifier.create(request.getCipherTextStream()) + .expectNextMatches(data -> data.length == 1024 * 1024) + .verifyComplete(); + } + + @Test + @DisplayName("Should handle empty byte array in stream") + public void testEmptyByteArrayInStream() { + byte[] emptyArray = new byte[0]; + + Flux stream = Flux.just(emptyArray); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + stream + ); + + assertNotNull(request.getCipherTextStream()); + + StepVerifier.create(request.getCipherTextStream()) + .expectNextMatches(data -> data.length == 0) + .verifyComplete(); + } + + @Test + @DisplayName("Should handle stream with binary data") + public void testStreamWithBinaryData() { + byte[] binaryData = new byte[] {0x00, 0x01, 0x02, (byte) 0xFF, (byte) 0xFE, (byte) 0xFD}; + + Flux stream = Flux.just(binaryData); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + stream + ); + + StepVerifier.create(request.getCipherTextStream()) + .expectNextMatches(data -> { + if (data.length != binaryData.length) return false; + for (int i = 0; i < data.length; i++) { + if (data[i] != binaryData[i]) return false; + } + return true; + }) + .verifyComplete(); + } + + @Test + @DisplayName("Complete decryption request with key name") + public void testCompleteConfiguration() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + cipherTextStream + ).setKeyName("decryption-key/v2"); + + assertEquals(COMPONENT_NAME, request.getComponentName()); + assertNotNull(request.getCipherTextStream()); + assertEquals("decryption-key/v2", request.getKeyName()); + } + + @Test + @DisplayName("Should handle multiple setKeyName calls") + public void testMultipleSetKeyNameCalls() { + Flux cipherTextStream = Flux.just(ENCRYPTED_DATA.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + cipherTextStream + ) + .setKeyName("key1") + .setKeyName("key2") + .setKeyName("key3"); + + assertEquals("key3", request.getKeyName()); + } + + @Test + @DisplayName("Should handle many chunks stream") + public void testManyChunksStream() { + int numberOfChunks = 100; + List chunks = new ArrayList<>(); + for (int i = 0; i < numberOfChunks; i++) { + chunks.add(("chunk" + i).getBytes(StandardCharsets.UTF_8)); + } + + Flux manyChunksStream = Flux.fromIterable(chunks); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + manyChunksStream + ); + + assertNotNull(request.getCipherTextStream()); + + StepVerifier.create(request.getCipherTextStream()) + .expectNextCount(numberOfChunks) + .verifyComplete(); + } + + @Test + @DisplayName("Should handle stream with special characters in data") + public void testStreamWithSpecialCharacters() { + String specialData = "特殊字符 🔓 データ"; + Flux stream = Flux.just(specialData.getBytes(StandardCharsets.UTF_8)); + + DecryptRequestAlpha1 request = new DecryptRequestAlpha1( + COMPONENT_NAME, + stream + ); + + StepVerifier.create(request.getCipherTextStream()) + .expectNextMatches(data -> new String(data, StandardCharsets.UTF_8).equals(specialData)) + .verifyComplete(); + } +} diff --git a/sdk/src/test/java/io/dapr/client/domain/EncryptRequestAlpha1Test.java b/sdk/src/test/java/io/dapr/client/domain/EncryptRequestAlpha1Test.java new file mode 100644 index 000000000..b4479ce94 --- /dev/null +++ b/sdk/src/test/java/io/dapr/client/domain/EncryptRequestAlpha1Test.java @@ -0,0 +1,476 @@ +/* + * Copyright 2024 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.client.domain; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class EncryptRequestAlpha1Test { + + private static final String COMPONENT_NAME = "mycomponent"; + private static final String KEY_NAME = "mykey"; + private static final String KEY_WRAP_ALGORITHM = "RSA-OAEP-256"; + private static final String TEST_DATA = "test data"; + + @Test + @DisplayName("Constructor should set all required fields correctly") + public void testConstructorWithRequiredFields() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + assertEquals(COMPONENT_NAME, request.getComponentName()); + assertNotNull(request.getPlainTextStream()); + assertEquals(KEY_NAME, request.getKeyName()); + assertEquals(KEY_WRAP_ALGORITHM, request.getKeyWrapAlgorithm()); + assertNull(request.getDataEncryptionCipher()); + assertFalse(request.isOmitDecryptionKeyName()); + assertNull(request.getDecryptionKeyName()); + } + + @Test + @DisplayName("Fluent setters should set optional fields correctly") + public void testFluentSetters() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ) + .setDataEncryptionCipher("aes-gcm") + .setOmitDecryptionKeyName(true) + .setDecryptionKeyName("decrypt-key"); + + assertEquals("aes-gcm", request.getDataEncryptionCipher()); + assertTrue(request.isOmitDecryptionKeyName()); + assertEquals("decrypt-key", request.getDecryptionKeyName()); + } + + @Test + @DisplayName("Fluent setters should return same instance for method chaining") + public void testFluentSettersReturnSameInstance() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + EncryptRequestAlpha1 sameRequest = request.setDataEncryptionCipher("aes-gcm"); + assertSame(request, sameRequest); + + sameRequest = request.setOmitDecryptionKeyName(true); + assertSame(request, sameRequest); + + sameRequest = request.setDecryptionKeyName("decrypt-key"); + assertSame(request, sameRequest); + } + + @Test + @DisplayName("Constructor should accept null component name") + public void testNullComponentName() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + null, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + assertNull(request.getComponentName()); + } + + @Test + @DisplayName("Constructor should accept null plaintext stream") + public void testNullPlainTextStream() { + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + null, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + assertNull(request.getPlainTextStream()); + } + + @Test + @DisplayName("Constructor should accept null key name") + public void testNullKeyName() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + null, + KEY_WRAP_ALGORITHM + ); + + assertNull(request.getKeyName()); + } + + @Test + @DisplayName("Constructor should accept null key wrap algorithm") + public void testNullKeyWrapAlgorithm() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + null + ); + + assertNull(request.getKeyWrapAlgorithm()); + } + + @Test + @DisplayName("Constructor should accept empty stream") + public void testEmptyStream() { + Flux emptyStream = Flux.empty(); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + emptyStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + assertNotNull(request.getPlainTextStream()); + StepVerifier.create(request.getPlainTextStream()) + .verifyComplete(); + } + + @Test + @DisplayName("Should handle multiple chunks in stream") + public void testMultipleChunksStream() { + byte[] chunk1 = "chunk1".getBytes(StandardCharsets.UTF_8); + byte[] chunk2 = "chunk2".getBytes(StandardCharsets.UTF_8); + byte[] chunk3 = "chunk3".getBytes(StandardCharsets.UTF_8); + + Flux multiChunkStream = Flux.just(chunk1, chunk2, chunk3); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + multiChunkStream, + KEY_NAME, + "A256KW" + ); + + assertNotNull(request.getPlainTextStream()); + assertEquals("A256KW", request.getKeyWrapAlgorithm()); + + List collectedChunks = new ArrayList<>(); + StepVerifier.create(request.getPlainTextStream()) + .recordWith(() -> collectedChunks) + .expectNextCount(3) + .verifyComplete(); + + assertEquals(3, collectedChunks.size()); + assertArrayEquals(chunk1, collectedChunks.get(0)); + assertArrayEquals(chunk2, collectedChunks.get(1)); + assertArrayEquals(chunk3, collectedChunks.get(2)); + } + + @ParameterizedTest + @DisplayName("Should support various key wrap algorithms") + @ValueSource(strings = {"A256KW", "AES", "A128CBC", "A192CBC", "A256CBC", "RSA-OAEP-256", "RSA"}) + public void testVariousKeyWrapAlgorithms(String algorithm) { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + algorithm + ); + + assertEquals(algorithm, request.getKeyWrapAlgorithm()); + } + + @ParameterizedTest + @DisplayName("Should support various data encryption ciphers") + @ValueSource(strings = {"aes-gcm", "chacha20-poly1305"}) + public void testVariousDataEncryptionCiphers(String cipher) { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ).setDataEncryptionCipher(cipher); + + assertEquals(cipher, request.getDataEncryptionCipher()); + } + + @Test + @DisplayName("setDataEncryptionCipher should accept null value") + public void testSetDataEncryptionCipherNull() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ) + .setDataEncryptionCipher("aes-gcm") + .setDataEncryptionCipher(null); + + assertNull(request.getDataEncryptionCipher()); + } + + @Test + @DisplayName("setDecryptionKeyName should accept null value") + public void testSetDecryptionKeyNameNull() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ) + .setDecryptionKeyName("some-key") + .setDecryptionKeyName(null); + + assertNull(request.getDecryptionKeyName()); + } + + @Test + @DisplayName("setOmitDecryptionKeyName should toggle boolean value") + public void testSetOmitDecryptionKeyNameToggle() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + assertFalse(request.isOmitDecryptionKeyName()); + + request.setOmitDecryptionKeyName(true); + assertTrue(request.isOmitDecryptionKeyName()); + + request.setOmitDecryptionKeyName(false); + assertFalse(request.isOmitDecryptionKeyName()); + } + + @Test + @DisplayName("Should handle large data stream") + public void testLargeDataStream() { + byte[] largeChunk = new byte[1024 * 1024]; // 1MB chunk + for (int i = 0; i < largeChunk.length; i++) { + largeChunk[i] = (byte) (i % 256); + } + + Flux largeStream = Flux.just(largeChunk); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + largeStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + assertNotNull(request.getPlainTextStream()); + + StepVerifier.create(request.getPlainTextStream()) + .expectNextMatches(data -> data.length == 1024 * 1024) + .verifyComplete(); + } + + @Test + @DisplayName("Should handle empty byte array in stream") + public void testEmptyByteArrayInStream() { + byte[] emptyArray = new byte[0]; + + Flux stream = Flux.just(emptyArray); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + stream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + assertNotNull(request.getPlainTextStream()); + + StepVerifier.create(request.getPlainTextStream()) + .expectNextMatches(data -> data.length == 0) + .verifyComplete(); + } + + @Test + @DisplayName("Should handle key name with version") + public void testKeyNameWithVersion() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + String keyNameWithVersion = "mykey/v1"; + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + keyNameWithVersion, + KEY_WRAP_ALGORITHM + ); + + assertEquals(keyNameWithVersion, request.getKeyName()); + } + + @Test + @DisplayName("Should handle decryption key name with version") + public void testDecryptionKeyNameWithVersion() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + String decryptionKeyWithVersion = "decrypt-key/v2"; + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ).setDecryptionKeyName(decryptionKeyWithVersion); + + assertEquals(decryptionKeyWithVersion, request.getDecryptionKeyName()); + } + + @Test + @DisplayName("Should handle empty component name") + public void testEmptyComponentName() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + "", + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + assertEquals("", request.getComponentName()); + } + + @Test + @DisplayName("Should handle whitespace-only component name") + public void testWhitespaceComponentName() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + " ", + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + assertEquals(" ", request.getComponentName()); + } + + @Test + @DisplayName("Should handle empty key name") + public void testEmptyKeyName() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + "", + KEY_WRAP_ALGORITHM + ); + + assertEquals("", request.getKeyName()); + } + + @Test + @DisplayName("Should handle empty key wrap algorithm") + public void testEmptyKeyWrapAlgorithm() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + "" + ); + + assertEquals("", request.getKeyWrapAlgorithm()); + } + + @Test + @DisplayName("Should handle stream with special characters in data") + public void testStreamWithSpecialCharacters() { + String specialData = "特殊字符 🔐 データ"; + Flux stream = Flux.just(specialData.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + stream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ); + + StepVerifier.create(request.getPlainTextStream()) + .expectNextMatches(data -> new String(data, StandardCharsets.UTF_8).equals(specialData)) + .verifyComplete(); + } + + @Test + @DisplayName("Complete configuration with all optional fields") + public void testCompleteConfiguration() { + Flux plainTextStream = Flux.just(TEST_DATA.getBytes(StandardCharsets.UTF_8)); + + EncryptRequestAlpha1 request = new EncryptRequestAlpha1( + COMPONENT_NAME, + plainTextStream, + KEY_NAME, + KEY_WRAP_ALGORITHM + ) + .setDataEncryptionCipher("chacha20-poly1305") + .setOmitDecryptionKeyName(true) + .setDecryptionKeyName("different-key/v3"); + + assertEquals(COMPONENT_NAME, request.getComponentName()); + assertNotNull(request.getPlainTextStream()); + assertEquals(KEY_NAME, request.getKeyName()); + assertEquals(KEY_WRAP_ALGORITHM, request.getKeyWrapAlgorithm()); + assertEquals("chacha20-poly1305", request.getDataEncryptionCipher()); + assertTrue(request.isOmitDecryptionKeyName()); + assertEquals("different-key/v3", request.getDecryptionKeyName()); + } +} From eb9d7c9b7896b980f227d9b352819d7b372b73f1 Mon Sep 17 00:00:00 2001 From: artur-ciocanu Date: Tue, 6 Jan 2026 21:32:33 +0200 Subject: [PATCH 17/18] Adding DaprSpringBootTest and DaprSidecarContainer annotation for easier ITs authoring (#1610) * Adding DaprSpringBootTest and DaprSidecarContainer annotation for easier IT authoring. Signed-off-by: Artur Ciocanu # Conflicts: # testcontainers-dapr/pom.xml * Adding DaprSpringBootTest and DaprSidecarContainer annotation for easier IT authoring. Signed-off-by: Artur Ciocanu * Move all the helper Dapr SpringBoot annotations to tests, to avoid exposing it as public API Signed-off-by: Artur Ciocanu * Fix a few issues related to Dapr container usage in ITs. Signed-off-by: Artur Ciocanu * Addressing code review comments to ensure things are internal. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Co-authored-by: Cassie Coyle Signed-off-by: salaboy --- sdk-tests/pom.xml | 5 + .../testcontainers/actors/DaprActorsIT.java | 56 ++-------- .../internal/DaprContainerFactory.java | 64 +++++++++++ .../internal/DaprSidecarContainer.java | 68 ++++++++++++ .../DaprSpringBootContextInitializer.java | 104 ++++++++++++++++++ .../spring/DaprSpringBootExtension.java | 89 +++++++++++++++ .../internal/spring/DaprSpringBootTest.java | 80 ++++++++++++++ testcontainers-dapr/pom.xml | 23 ++-- 8 files changed, 435 insertions(+), 54 deletions(-) create mode 100644 sdk-tests/src/test/java/io/dapr/testcontainers/internal/DaprContainerFactory.java create mode 100644 sdk-tests/src/test/java/io/dapr/testcontainers/internal/DaprSidecarContainer.java create mode 100644 sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootContextInitializer.java create mode 100644 sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootExtension.java create mode 100644 sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootTest.java diff --git a/sdk-tests/pom.xml b/sdk-tests/pom.xml index 29d051135..f22e42a6c 100644 --- a/sdk-tests/pom.xml +++ b/sdk-tests/pom.xml @@ -143,6 +143,11 @@ org.testcontainers junit-jupiter + + io.dapr + testcontainers-dapr + test + org.springframework.data spring-data-keyvalue diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/actors/DaprActorsIT.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/actors/DaprActorsIT.java index ba8cec619..f0ff24d8f 100644 --- a/sdk-tests/src/test/java/io/dapr/it/testcontainers/actors/DaprActorsIT.java +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/actors/DaprActorsIT.java @@ -20,64 +20,32 @@ import io.dapr.testcontainers.Component; import io.dapr.testcontainers.DaprContainer; import io.dapr.testcontainers.DaprLogLevel; +import io.dapr.testcontainers.internal.DaprContainerFactory; +import io.dapr.testcontainers.internal.DaprSidecarContainer; +import io.dapr.testcontainers.internal.spring.DaprSpringBootTest; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.boot.test.context.SpringBootTest.WebEnvironment; -import org.springframework.test.context.DynamicPropertyRegistry; -import org.springframework.test.context.DynamicPropertySource; -import org.testcontainers.containers.Network; import org.testcontainers.containers.wait.strategy.Wait; -import org.testcontainers.junit.jupiter.Container; -import org.testcontainers.junit.jupiter.Testcontainers; import java.util.Map; -import java.util.Random; import java.util.UUID; -import static io.dapr.it.testcontainers.ContainerConstants.DAPR_RUNTIME_IMAGE_TAG; import static org.junit.jupiter.api.Assertions.assertEquals; -@SpringBootTest( - webEnvironment = WebEnvironment.RANDOM_PORT, - classes = { - TestActorsApplication.class, - TestDaprActorsConfiguration.class - } -) -@Testcontainers +@DaprSpringBootTest(classes = {TestActorsApplication.class, TestDaprActorsConfiguration.class}) @Tag("testcontainers") public class DaprActorsIT { - private static final Network DAPR_NETWORK = Network.newNetwork(); - private static final Random RANDOM = new Random(); - private static final int PORT = RANDOM.nextInt(1000) + 8000; private static final String ACTORS_MESSAGE_PATTERN = ".*Actor runtime started.*"; - @Container - private static final DaprContainer DAPR_CONTAINER = new DaprContainer(DAPR_RUNTIME_IMAGE_TAG) - .withAppName("actor-dapr-app") - .withNetwork(DAPR_NETWORK) - .withComponent(new Component("kvstore", "state.in-memory", "v1", - Map.of("actorStateStore", "true"))) - .withDaprLogLevel(DaprLogLevel.DEBUG) - .withLogConsumer(outputFrame -> System.out.println(outputFrame.getUtf8String())) - .withAppChannelAddress("host.testcontainers.internal") - .withAppPort(PORT); - - /** - * Expose the Dapr ports to the host. - * - * @param registry the dynamic property registry - */ - @DynamicPropertySource - static void daprProperties(DynamicPropertyRegistry registry) { - registry.add("dapr.http.endpoint", DAPR_CONTAINER::getHttpEndpoint); - registry.add("dapr.grpc.endpoint", DAPR_CONTAINER::getGrpcEndpoint); - registry.add("server.port", () -> PORT); - } + @DaprSidecarContainer + private static final DaprContainer DAPR_CONTAINER = DaprContainerFactory.createForSpringBootTest("actor-dapr-app") + .withComponent(new Component("kvstore", "state.in-memory", "v1", + Map.of("actorStateStore", "true"))) + .withDaprLogLevel(DaprLogLevel.DEBUG) + .withLogConsumer(outputFrame -> System.out.println(outputFrame.getUtf8String())); @Autowired private ActorClient daprActorClient; @@ -86,8 +54,8 @@ static void daprProperties(DynamicPropertyRegistry registry) { private ActorRuntime daprActorRuntime; @BeforeEach - public void setUp(){ - org.testcontainers.Testcontainers.exposeHostPorts(PORT); + public void setUp() { + org.testcontainers.Testcontainers.exposeHostPorts(DAPR_CONTAINER.getAppPort()); daprActorRuntime.registerActor(TestActorImpl.class); // Wait for actor runtime to start. diff --git a/sdk-tests/src/test/java/io/dapr/testcontainers/internal/DaprContainerFactory.java b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/DaprContainerFactory.java new file mode 100644 index 000000000..d94e884ac --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/DaprContainerFactory.java @@ -0,0 +1,64 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.internal; + +import io.dapr.testcontainers.DaprContainer; + +import java.io.IOException; +import java.net.ServerSocket; + +import static io.dapr.testcontainers.DaprContainerConstants.DAPR_RUNTIME_IMAGE_TAG; + +/** + * Factory for creating DaprContainer instances configured for Spring Boot integration tests. + * + *

This class handles the common setup required for bidirectional communication + * between Spring Boot applications and the Dapr sidecar in test scenarios.

+ */ +public final class DaprContainerFactory { + + private DaprContainerFactory() { + // Utility class + } + + /** + * Creates a DaprContainer pre-configured for Spring Boot integration tests. + * This factory method handles the common setup required for bidirectional + * communication between Spring Boot and the Dapr sidecar: + *
    + *
  • Allocates a free port for the Spring Boot application
  • + *
  • Configures the app channel address for container-to-host communication
  • + *
+ * + * @param appName the Dapr application name + * @return a pre-configured DaprContainer for Spring Boot tests + */ + public static DaprContainer createForSpringBootTest(String appName) { + int port = allocateFreePort(); + + return new DaprContainer(DAPR_RUNTIME_IMAGE_TAG) + .withAppName(appName) + .withAppPort(port) + .withAppChannelAddress("host.testcontainers.internal"); + } + + private static int allocateFreePort() { + try (ServerSocket socket = new ServerSocket(0)) { + socket.setReuseAddress(true); + return socket.getLocalPort(); + } catch (IOException e) { + throw new IllegalStateException("Failed to allocate free port", e); + } + } +} diff --git a/sdk-tests/src/test/java/io/dapr/testcontainers/internal/DaprSidecarContainer.java b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/DaprSidecarContainer.java new file mode 100644 index 000000000..12a06220e --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/DaprSidecarContainer.java @@ -0,0 +1,68 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.internal; + +import io.dapr.testcontainers.internal.spring.DaprSpringBootTest; +import org.testcontainers.junit.jupiter.Container; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Marks a static field containing a {@link io.dapr.testcontainers.DaprContainer} + * for automatic integration with Spring Boot tests. + * + *

This annotation combines the Testcontainers {@link Container} annotation + * with Dapr-specific configuration. When used with {@link DaprSpringBootTest}, + * it automatically:

+ *
    + *
  • Manages the container lifecycle via Testcontainers
  • + *
  • Configures Spring properties (server.port, dapr.http.endpoint, dapr.grpc.endpoint)
  • + *
+ * + *

Important: For tests that require Dapr-to-app communication (like actor tests), + * you must call {@code Testcontainers.exposeHostPorts(container.getAppPort())} + * in your {@code @BeforeEach} method before registering actors or making Dapr calls.

+ * + *

Example usage:

+ *
{@code
+ * @DaprSpringBootTest(classes = MyApplication.class)
+ * class MyDaprIT {
+ *
+ *     @DaprSidecarContainer
+ *     private static final DaprContainer DAPR = DaprContainer.createForSpringBootTest("my-app")
+ *         .withComponent(new Component("statestore", "state.in-memory", "v1", Map.of()));
+ *
+ *     @BeforeEach
+ *     void setUp() {
+ *         Testcontainers.exposeHostPorts(DAPR.getAppPort());
+ *     }
+ *
+ *     @Test
+ *     void testSomething() {
+ *         // Your test code here
+ *     }
+ * }
+ * }
+ * + * @see DaprSpringBootTest + * @see io.dapr.testcontainers.DaprContainer#createForSpringBootTest(String) + */ +@Target(ElementType.FIELD) +@Retention(RetentionPolicy.RUNTIME) +@Container +public @interface DaprSidecarContainer { +} diff --git a/sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootContextInitializer.java b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootContextInitializer.java new file mode 100644 index 000000000..dc4de7a0f --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootContextInitializer.java @@ -0,0 +1,104 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.internal.spring; + +import io.dapr.testcontainers.DaprContainer; +import org.springframework.context.ApplicationContextInitializer; +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.core.env.MapPropertySource; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; + +/** + * Spring {@link ApplicationContextInitializer} that configures Dapr-related properties + * based on the {@link DaprContainer} registered by {@link DaprSpringBootExtension}. + * + *

This initializer sets the following properties:

+ *
    + *
  • {@code server.port} - The port allocated for the Spring Boot application
  • + *
  • {@code dapr.http.endpoint} - The HTTP endpoint of the Dapr sidecar
  • + *
  • {@code dapr.grpc.endpoint} - The gRPC endpoint of the Dapr sidecar
  • + *
+ * + *

This initializer is automatically registered when using {@link DaprSpringBootTest}.

+ */ +public class DaprSpringBootContextInitializer + implements ApplicationContextInitializer { + + private static final String PROPERTY_SOURCE_NAME = "daprTestcontainersProperties"; + + @Override + public void initialize(ConfigurableApplicationContext applicationContext) { + DaprContainer container = findContainer(); + + if (container == null) { + throw new IllegalStateException( + "No DaprContainer found in registry. Ensure you are using @DaprSpringBootTest " + + "with a @DaprSidecarContainer annotated field." + ); + } + + // Create a property source with lazy resolution for endpoints + // server.port can be resolved immediately since it's set at container creation time + // Dapr endpoints are resolved lazily since the container may not be started yet + applicationContext.getEnvironment().getPropertySources() + .addFirst(new DaprLazyPropertySource(PROPERTY_SOURCE_NAME, container)); + } + + private DaprContainer findContainer() { + // Return the first container in the registry + // In a test scenario, there should only be one test class running at a time + return DaprSpringBootExtension.CONTAINER_REGISTRY.values().stream() + .findFirst() + .orElse(null); + } + + /** + * Custom PropertySource that lazily resolves Dapr container endpoints. + * This allows the endpoints to be resolved after the container has started. + */ + private static class DaprLazyPropertySource extends MapPropertySource { + private final Map> lazyProperties; + + DaprLazyPropertySource(String name, DaprContainer container) { + super(name, new HashMap<>()); + + this.lazyProperties = new HashMap<>(); + lazyProperties.put("server.port", container::getAppPort); + lazyProperties.put("dapr.http.endpoint", container::getHttpEndpoint); + lazyProperties.put("dapr.grpc.endpoint", container::getGrpcEndpoint); + } + + @Override + public Object getProperty(String name) { + Supplier supplier = lazyProperties.get(name); + if (supplier != null) { + return supplier.get(); + } + return null; + } + + @Override + public boolean containsProperty(String name) { + return lazyProperties.containsKey(name); + } + + @Override + public String[] getPropertyNames() { + return lazyProperties.keySet().toArray(new String[0]); + } + } +} diff --git a/sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootExtension.java b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootExtension.java new file mode 100644 index 000000000..d258f13df --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootExtension.java @@ -0,0 +1,89 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.internal.spring; + +import io.dapr.testcontainers.DaprContainer; +import io.dapr.testcontainers.internal.DaprSidecarContainer; +import org.junit.jupiter.api.extension.BeforeAllCallback; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.platform.commons.support.AnnotationSupport; + +import java.lang.reflect.Field; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * JUnit 5 extension that handles Dapr container setup for Spring Boot tests. + * + *

This extension:

+ *
    + *
  • Discovers fields annotated with {@link DaprSidecarContainer}
  • + *
  • Registers the container for property injection by {@link DaprSpringBootContextInitializer}
  • + *
+ * + *

This extension is automatically registered when using {@link DaprSpringBootTest}.

+ */ +public class DaprSpringBootExtension implements BeforeAllCallback { + + /** + * Registry of DaprContainers by test class. Used by {@link DaprSpringBootContextInitializer} + * to configure Spring properties. + */ + static final Map, DaprContainer> CONTAINER_REGISTRY = new ConcurrentHashMap<>(); + + @Override + public void beforeAll(ExtensionContext context) throws Exception { + Class testClass = context.getRequiredTestClass(); + + // Find fields annotated with @DaprSidecarContainer + List containerFields = AnnotationSupport.findAnnotatedFields( + testClass, + DaprSidecarContainer.class, + field -> DaprContainer.class.isAssignableFrom(field.getType()) + ); + + if (containerFields.isEmpty()) { + throw new IllegalStateException( + "No @DaprSidecarContainer annotated field of type DaprContainer found in " + testClass.getName() + + ". Add a static field like: @DaprSidecarContainer private static final DaprContainer DAPR = " + + "DaprContainer.createForSpringBootTest(\"my-app\");" + ); + } + + if (containerFields.size() > 1) { + throw new IllegalStateException( + "Multiple @DaprSidecarContainer annotated fields found in " + testClass.getName() + + ". Only one DaprContainer per test class is supported." + ); + } + + Field containerField = containerFields.get(0); + containerField.setAccessible(true); + + DaprContainer container = (DaprContainer) containerField.get(null); + + if (container == null) { + throw new IllegalStateException( + "@DaprSidecarContainer field '" + containerField.getName() + "' is null in " + testClass.getName() + ); + } + + // Register container for the context initializer + CONTAINER_REGISTRY.put(testClass, container); + + // Note: Testcontainers.exposeHostPorts() is NOT called here because of timing requirements. + // It must be called in @BeforeEach, after the container starts to ensure proper Dapr-to-app communication. + } +} diff --git a/sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootTest.java b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootTest.java new file mode 100644 index 000000000..846b4adc2 --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/testcontainers/internal/spring/DaprSpringBootTest.java @@ -0,0 +1,80 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.internal.spring; + +import io.dapr.testcontainers.internal.DaprSidecarContainer; +import org.junit.jupiter.api.extension.ExtendWith; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.SpringBootTest.WebEnvironment; +import org.springframework.core.annotation.AliasFor; +import org.springframework.test.context.ContextConfiguration; +import org.testcontainers.junit.jupiter.Testcontainers; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Composed annotation that combines {@link SpringBootTest}, {@link Testcontainers}, + * and the necessary extensions for Dapr integration testing. + * + *

This annotation simplifies the setup of Spring Boot integration tests with Dapr + * by handling port allocation, property configuration, and container lifecycle automatically.

+ * + *

Example usage:

+ *
{@code
+ * @DaprSpringBootTest(classes = MyApplication.class)
+ * class MyDaprIT {
+ *
+ *     @DaprSidecarContainer
+ *     private static final DaprContainer DAPR = DaprContainer.createForSpringBootTest("my-app")
+ *         .withComponent(new Component("statestore", "state.in-memory", "v1", Map.of()));
+ *
+ *     @Test
+ *     void testSomething() {
+ *         // Your test code here
+ *     }
+ * }
+ * }
+ * + * @see DaprSidecarContainer + * @see io.dapr.testcontainers.DaprContainer#createForSpringBootTest(String) + */ +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +@ExtendWith(DaprSpringBootExtension.class) // Must be first to register container before Spring starts +@Testcontainers // Starts containers via @Container/@DaprSidecarContainer +@ContextConfiguration(initializers = DaprSpringBootContextInitializer.class) +@SpringBootTest(webEnvironment = WebEnvironment.DEFINED_PORT) // Starts Spring context last +public @interface DaprSpringBootTest { + + /** + * The application classes to use for the test. + * Alias for {@link SpringBootTest#classes()}. + * + * @return the application classes + */ + @AliasFor(annotation = SpringBootTest.class, attribute = "classes") + Class[] classes() default {}; + + /** + * Additional properties to configure the test. + * Alias for {@link SpringBootTest#properties()}. + * + * @return additional properties + */ + @AliasFor(annotation = SpringBootTest.class, attribute = "properties") + String[] properties() default {}; +} diff --git a/testcontainers-dapr/pom.xml b/testcontainers-dapr/pom.xml index 04d60ec32..0bd315754 100644 --- a/testcontainers-dapr/pom.xml +++ b/testcontainers-dapr/pom.xml @@ -15,16 +15,7 @@ jar - - org.junit.jupiter - junit-jupiter - test - - - org.mockito - mockito-core - test - + org.yaml snakeyaml @@ -37,6 +28,18 @@ com.fasterxml.jackson.core jackson-databind + + + + org.junit.jupiter + junit-jupiter + test + + + org.mockito + mockito-core + test + From fa9084efad6f9bf6480e86a0fc30394c28c88a76 Mon Sep 17 00:00:00 2001 From: salaboy Date: Wed, 7 Jan 2026 10:55:35 +0100 Subject: [PATCH 18/18] adding constant for dashboard Signed-off-by: salaboy --- .github/workflows/build.yml | 4 +++- .../java/io/dapr/testcontainers/DaprContainerConstants.java | 2 ++ .../io/dapr/testcontainers/WorkflowDashboardContainer.java | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 17c5c13b5..d6fe51579 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -32,7 +32,9 @@ jobs: - name: Run tests run: ./mvnw clean install -B -q -DskipITs=true - name: Codecov - uses: codecov/codecov-action@v5.5.1 + uses: codecov/codecov-action@v5.5.2 + with: + token: ${{ secrets.CODECOV_TOKEN }} - name: Upload test report for sdk uses: actions/upload-artifact@v6 with: diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/DaprContainerConstants.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/DaprContainerConstants.java index f32607336..2e2ccea79 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/DaprContainerConstants.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/DaprContainerConstants.java @@ -15,7 +15,9 @@ public interface DaprContainerConstants { String DAPR_VERSION = "1.16.0-rc.5"; + String DAPR_WORKFLOWS_DASHBOARD_VERSION = "0.0.1"; String DAPR_RUNTIME_IMAGE_TAG = "daprio/daprd:" + DAPR_VERSION; String DAPR_PLACEMENT_IMAGE_TAG = "daprio/placement:" + DAPR_VERSION; String DAPR_SCHEDULER_IMAGE_TAG = "daprio/scheduler:" + DAPR_VERSION; + String DAPR_WORKFLOWS_DASHBOARD = "ghcr.io/diagridio/diagrid-dashboard:" + DAPR_WORKFLOWS_DASHBOARD_VERSION; } diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java index 950b609b4..5138390a7 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/WorkflowDashboardContainer.java @@ -31,7 +31,7 @@ public class WorkflowDashboardContainer extends GenericContainer COMPONENT_CONVERTER = new ComponentYamlConverter(YAML_MAPPER); public static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName - .parse("ghcr.io/diagridio/diagrid-dashboard:0.0.1"); + .parse(DaprContainerConstants.DAPR_WORKFLOWS_DASHBOARD); private int dashboardPort = 8080; private Component stateStoreComponent;