From efb742a4c0c222b0efcf2136b1dd382c751eb5aa Mon Sep 17 00:00:00 2001 From: Annie Liang Date: Wed, 4 Mar 2026 12:51:01 -0800 Subject: [PATCH 1/5] Refactor azure-cosmos-benchmark: move all configs from CLI to workload config JSON - Remove ~50 CLI parameters from Configuration.java; all workload/connection params now come from the workload config JSON file (TenantWorkloadConfig) - Configuration.java reduced from 1057 to 131 lines with only 6 CLI params: workloadConfig, cycles, settleTimeMs, gcBetweenCycles, suppressCleanup, help - Move metrics/reporting/result-upload configs into a nested 'metrics' section in the workload config JSON with 'resultUpload' and 'runMetadata' sub-objects - Remove all Graphite reporting support (use Application Insights instead) - Remove TenantWorkloadConfig.fromConfiguration() factory method - Make workload config file mandatory (-workloadConfig flag) - Rename -tenantsFile CLI flag to -workloadConfig - Update all consumer classes (29 files) to use TenantWorkloadConfig for workload params and BenchmarkConfig for global/reporting params Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../cosmos/benchmark/BenchmarkConfig.java | 127 ++- .../cosmos/benchmark/BenchmarkHelper.java | 7 +- .../benchmark/BenchmarkOrchestrator.java | 51 +- .../azure/cosmos/benchmark/Configuration.java | 950 +----------------- .../java/com/azure/cosmos/benchmark/Main.java | 93 +- .../benchmark/ScheduledReporterFactory.java | 36 +- .../azure/cosmos/benchmark/SyncBenchmark.java | 186 ++-- .../cosmos/benchmark/SyncReadBenchmark.java | 4 +- .../cosmos/benchmark/SyncWriteBenchmark.java | 12 +- .../benchmark/TenantWorkloadConfig.java | 153 +-- .../benchmark/ctl/AsyncCtlWorkload.java | 100 +- .../encryption/AsyncEncryptionBenchmark.java | 158 ++- .../AsyncEncryptionQueryBenchmark.java | 27 +- ...ncryptionQuerySinglePartitionMultiple.java | 7 +- .../AsyncEncryptionReadBenchmark.java | 11 +- .../AsyncEncryptionWriteBenchmark.java | 21 +- .../linkedin/AsyncClientFactory.java | 12 +- .../linkedin/CollectionResourceManager.java | 10 +- .../linkedin/CompositeReadTestRunner.java | 6 +- .../cosmos/benchmark/linkedin/DataLoader.java | 30 +- .../linkedin/DatabaseResourceManager.java | 6 +- .../benchmark/linkedin/GetTestRunner.java | 6 +- .../benchmark/linkedin/LICtlWorkload.java | 46 +- .../benchmark/linkedin/QueryTestRunner.java | 6 +- .../cosmos/benchmark/linkedin/TestRunner.java | 38 +- .../data/InvitationsEntityConfiguration.java | 4 +- .../ReadMyWritesConsistencyTest.java | 40 +- ...ntWorkloadConfigFromConfigurationTest.java | 147 --- .../azure/cosmos/benchmark/WorkflowTest.java | 132 ++- 29 files changed, 635 insertions(+), 1791 deletions(-) delete mode 100644 sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/TenantWorkloadConfigFromConfigurationTest.java diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java index 4f2206988d30..a9e9d10ec3e8 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java @@ -81,44 +81,19 @@ public static BenchmarkConfig fromConfiguration(Configuration cfg) throws IOExce } config.gcBetweenCycles = cfg.isGcBetweenCycles(); - config.enableJvmStats = cfg.isEnableJvmStats(); - config.enableNettyHttpMetrics = cfg.isEnableNettyHttpMetrics(); - - // Reporting - config.reportingDirectory = cfg.getReportingDirectory() != null - ? cfg.getReportingDirectory().getPath() : null; - config.printingInterval = cfg.getPrintingInterval(); - config.resultUploadEndpoint = cfg.getServiceEndpointForRunResultsUploadAccount(); - config.resultUploadKey = cfg.getMasterKeyForRunResultsUploadAccount(); - config.resultUploadDatabase = cfg.getResultUploadDatabase(); - config.resultUploadContainer = cfg.getResultUploadContainer(); - - // Run metadata - config.testVariationName = cfg.getTestVariationName(); - config.branchName = cfg.getBranchName(); - config.commitId = cfg.getCommitId(); - - // Tenants - String tenantsFile = cfg.getTenantsFile(); - if (tenantsFile != null && new File(tenantsFile).exists()) { - // tenants.json takes priority over CLI workload args (operation, concurrency, etc.) - logger.info("Loading tenant configs from {}. " + - "Workload parameters from tenants.json will take priority over CLI args.", tenantsFile); - config.tenantWorkloads = TenantWorkloadConfig.parseTenantsFile(new File(tenantsFile)); - - // Extract JVM-global system properties from globalDefaults - config.loadGlobalSystemPropertiesFromTenantsFile(new File(tenantsFile)); - } else { - // Single tenant from CLI args - use fromConfiguration() to copy ALL fields - config.tenantWorkloads = Collections.singletonList( - TenantWorkloadConfig.fromConfiguration(cfg)); - - // JVM-global system properties from CLI - config.isPartitionLevelCircuitBreakerEnabled = cfg.isPartitionLevelCircuitBreakerEnabled(); - config.isPerPartitionAutomaticFailoverRequired = cfg.isPerPartitionAutomaticFailoverRequired(); - config.minConnectionPoolSizePerEndpoint = cfg.getMinConnectionPoolSizePerEndpoint(); + + // Workload config - ALWAYS from config file + String workloadConfigPath = cfg.getWorkloadConfig(); + if (workloadConfigPath == null || !new File(workloadConfigPath).exists()) { + throw new IllegalArgumentException( + "A workload configuration file is required. Use -workloadConfig to specify the path." + + (workloadConfigPath != null ? " File not found: " + workloadConfigPath : "")); } + logger.info("Loading workload configs from {}.", workloadConfigPath); + config.tenantWorkloads = TenantWorkloadConfig.parseWorkloadConfig(new File(workloadConfigPath)); + config.loadGlobalSystemPropertiesFromWorkloadConfig(new File(workloadConfigPath)); + return config; } @@ -161,28 +136,76 @@ public String toString() { } /** - * Reads JVM-global system properties from the globalDefaults section of a tenants.json file. + * Reads JVM-global system properties from the globalDefaults section of the workload config file. * These properties are JVM-wide and cannot vary per tenant. */ - private void loadGlobalSystemPropertiesFromTenantsFile(File tenantsFile) throws IOException { + private void loadGlobalSystemPropertiesFromWorkloadConfig(File workloadConfigFile) throws IOException { ObjectMapper mapper = new ObjectMapper(); - JsonNode root = mapper.readTree(tenantsFile); + JsonNode root = mapper.readTree(workloadConfigFile); + + // JVM-global system properties from globalDefaults JsonNode defaults = root.get("globalDefaults"); - if (defaults == null || !defaults.isObject()) { - return; + if (defaults != null && defaults.isObject()) { + if (defaults.has("isPartitionLevelCircuitBreakerEnabled")) { + isPartitionLevelCircuitBreakerEnabled = + Boolean.parseBoolean(defaults.get("isPartitionLevelCircuitBreakerEnabled").asText()); + } + if (defaults.has("isPerPartitionAutomaticFailoverRequired")) { + isPerPartitionAutomaticFailoverRequired = + Boolean.parseBoolean(defaults.get("isPerPartitionAutomaticFailoverRequired").asText()); + } + if (defaults.has("minConnectionPoolSizePerEndpoint")) { + minConnectionPoolSizePerEndpoint = + Integer.parseInt(defaults.get("minConnectionPoolSizePerEndpoint").asText()); + } } - if (defaults.has("isPartitionLevelCircuitBreakerEnabled")) { - isPartitionLevelCircuitBreakerEnabled = - Boolean.parseBoolean(defaults.get("isPartitionLevelCircuitBreakerEnabled").asText()); - } - if (defaults.has("isPerPartitionAutomaticFailoverRequired")) { - isPerPartitionAutomaticFailoverRequired = - Boolean.parseBoolean(defaults.get("isPerPartitionAutomaticFailoverRequired").asText()); - } - if (defaults.has("minConnectionPoolSizePerEndpoint")) { - minConnectionPoolSizePerEndpoint = - Integer.parseInt(defaults.get("minConnectionPoolSizePerEndpoint").asText()); + // Metrics, reporting, and result upload from top-level "metrics" section + JsonNode metrics = root.get("metrics"); + if (metrics != null && metrics.isObject()) { + if (metrics.has("enableJvmStats")) { + enableJvmStats = Boolean.parseBoolean(metrics.get("enableJvmStats").asText()); + } + if (metrics.has("enableNettyHttpMetrics")) { + enableNettyHttpMetrics = Boolean.parseBoolean(metrics.get("enableNettyHttpMetrics").asText()); + } + if (metrics.has("printingInterval")) { + printingInterval = Integer.parseInt(metrics.get("printingInterval").asText()); + } + if (metrics.has("reportingDirectory")) { + reportingDirectory = metrics.get("reportingDirectory").asText(); + } + + // Result upload sub-section + JsonNode resultUpload = metrics.get("resultUpload"); + if (resultUpload != null && resultUpload.isObject()) { + if (resultUpload.has("serviceEndpoint")) { + resultUploadEndpoint = resultUpload.get("serviceEndpoint").asText(); + } + if (resultUpload.has("masterKey")) { + resultUploadKey = resultUpload.get("masterKey").asText(); + } + if (resultUpload.has("database")) { + resultUploadDatabase = resultUpload.get("database").asText(); + } + if (resultUpload.has("container")) { + resultUploadContainer = resultUpload.get("container").asText(); + } + } + + // Run metadata sub-section + JsonNode runMetadata = metrics.get("runMetadata"); + if (runMetadata != null && runMetadata.isObject()) { + if (runMetadata.has("testVariationName")) { + testVariationName = runMetadata.get("testVariationName").asText(); + } + if (runMetadata.has("branchName")) { + branchName = runMetadata.get("branchName").asText(); + } + if (runMetadata.has("commitId")) { + commitId = runMetadata.get("commitId").asText(); + } + } } } } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkHelper.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkHelper.java index b4061b22f708..2bd8b5bd2b8a 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkHelper.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkHelper.java @@ -20,10 +20,9 @@ public static PojoizedJson generateDocument(String idString, String dataFieldVal return instance; } - public static boolean shouldContinue(long startTimeMillis, long iterationCount, Configuration configuration) { - - Duration maxDurationTime = configuration.getMaxRunningTimeDuration(); - int maxNumberOfOperations = configuration.getNumberOfOperations(); + public static boolean shouldContinue(long startTimeMillis, long iterationCount, TenantWorkloadConfig workloadConfig) { + Duration maxDurationTime = workloadConfig.getMaxRunningTimeDuration(); + int maxNumberOfOperations = workloadConfig.getNumberOfOperations(); if (maxDurationTime == null) { return iterationCount < maxNumberOfOperations; diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java index 2f243629d336..1b26bbb01666 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java @@ -354,12 +354,55 @@ private MeterRegistry buildCosmosMicrometerRegistry() { StringUtils.defaultString( com.google.common.base.Strings.emptyToNull( System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING")), null)); - if (instrumentationKey != null || appInsightsConnStr != null) { - Configuration tempCfg = new Configuration(); - return tempCfg.getAzureMonitorMeterRegistry(); + if (instrumentationKey == null && appInsightsConnStr == null) { + return null; } - return null; + java.time.Duration step = java.time.Duration.ofSeconds( + Integer.getInteger("azure.cosmos.monitoring.azureMonitor.step", 10)); + String testCategoryTag = System.getProperty("azure.cosmos.monitoring.azureMonitor.testCategory"); + boolean enabled = !Boolean.getBoolean("azure.cosmos.monitoring.azureMonitor.disabled"); + + final String connStr = appInsightsConnStr; + final String instrKey = instrumentationKey; + final io.micrometer.azuremonitor.AzureMonitorConfig amConfig = new io.micrometer.azuremonitor.AzureMonitorConfig() { + @Override + public String get(String key) { return null; } + + @Override + public String instrumentationKey() { + return connStr != null ? null : instrKey; + } + + @Override + public String connectionString() { return connStr; } + + @Override + public java.time.Duration step() { return step; } + + @Override + public boolean enabled() { return enabled; } + }; + + String roleName = System.getenv("APPLICATIONINSIGHTS_ROLE_NAME"); + if (roleName != null) { + com.microsoft.applicationinsights.TelemetryConfiguration.getActive().setRoleName(roleName); + } + + MeterRegistry registry = new io.micrometer.azuremonitor.AzureMonitorMeterRegistry( + amConfig, io.micrometer.core.instrument.Clock.SYSTEM); + java.util.List globalTags = new java.util.ArrayList<>(); + if (!com.google.common.base.Strings.isNullOrEmpty(testCategoryTag)) { + globalTags.add(io.micrometer.core.instrument.Tag.of("TestCategory", testCategoryTag)); + } + + String roleInstance = System.getenv("APPLICATIONINSIGHTS_ROLE_INSTANCE"); + if (roleName != null) { + globalTags.add(io.micrometer.core.instrument.Tag.of("cloud_RoleInstance", roleInstance)); + } + + registry.config().commonTags(globalTags); + return registry; } // ======== Global system properties ======== diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java index 940ca2e07b81..884e14fb641a 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java @@ -3,118 +3,20 @@ package com.azure.cosmos.benchmark; -import com.azure.cosmos.ConnectionMode; -import com.azure.cosmos.ConsistencyLevel; import com.beust.jcommander.IStringConverter; import com.beust.jcommander.Parameter; -import com.beust.jcommander.ParameterException; -import com.google.common.base.Strings; -import com.google.common.net.HostAndPort; -import com.google.common.net.PercentEscaper; -import com.microsoft.applicationinsights.TelemetryConfiguration; -import io.micrometer.azuremonitor.AzureMonitorConfig; -import io.micrometer.azuremonitor.AzureMonitorMeterRegistry; -import io.micrometer.core.instrument.Clock; -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Tag; -import io.micrometer.core.instrument.config.NamingConvention; -import io.micrometer.core.lang.Nullable; -import io.micrometer.graphite.GraphiteConfig; -import io.micrometer.graphite.GraphiteMeterRegistry; -import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import java.io.File; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; import java.util.function.Function; public class Configuration { - public static final String SUCCESS_COUNTER_METER_NAME = "#Successful Operations"; - public static final String FAILURE_COUNTER_METER_NAME = "#Unsuccessful Operations"; - public static final String LATENCY_METER_NAME = "Latency"; - public final static String DEFAULT_PARTITION_KEY_PATH = "/pk"; - private final static int DEFAULT_GRAPHITE_SERVER_PORT = 2003; - private MeterRegistry azureMonitorMeterRegistry; - private MeterRegistry graphiteMeterRegistry; - - @Parameter(names = "-serviceEndpoint", description = "Service Endpoint") - private String serviceEndpoint; - - @Parameter(names = "-masterKey", description = "Master Key") - private String masterKey; - - @Parameter(names = "-serviceEndpointForResultsUploadAccount", description = "Service Endpoint for run results upload account") - private String serviceEndpointForRunResultsUploadAccount; - - @Parameter(names = "-masterKeyForResultsUploadAccount", description = "Master Key for run results upload account") - private String masterKeyForRunResultsUploadAccount; - - @Parameter(names = "-databaseId", description = "Database ID") - private String databaseId; - - @Parameter(names = "-collectionId", description = "Collection ID") - private String collectionId; - - @Parameter(names = "-useNameLink", description = "Use name Link") - private boolean useNameLink = false; - - @Parameter(names = "-documentDataFieldSize", description = "Length of a document data field in characters (16-bit)") - private int documentDataFieldSize = 20; - - @Parameter(names = "-documentDataFieldCount", description = "Number of data fields in document") - private int documentDataFieldCount = 5; - - @Parameter(names = "-maxConnectionPoolSize", description = "Max Connection Pool Size") - private Integer maxConnectionPoolSize = 1000; - - @Parameter(names = "-connectionSharingAcrossClientsEnabled", description = "Enable connection sharing across CosmosClient instances (Gateway mode). Reduces connection count for multi-tenant scenarios.") - private boolean connectionSharingAcrossClientsEnabled = false; - - @Parameter(names = "-diagnosticsThresholdDuration", description = "Latency threshold for printing diagnostics", converter = DurationConverter.class) - private Duration diagnosticsThresholdDuration = Duration.ofSeconds(60); - - @Parameter(names = "-disablePassingPartitionKeyAsOptionOnWrite", description = "Disables passing partition in request options for write operation;" + - " in this case, json will be parsed and partition key will be extracted (this requires more computational overhead).") - private boolean disablePassingPartitionKeyAsOptionOnWrite = false; - - @Parameter(names = "-consistencyLevel", description = "Consistency Level", converter = ConsistencyLevelConverter.class) - private ConsistencyLevel consistencyLevel = ConsistencyLevel.SESSION; - - @Parameter(names = "-connectionMode", description = "Connection Mode") - private ConnectionMode connectionMode = ConnectionMode.DIRECT; - - @Parameter(names = "-graphiteEndpoint", description = "Graphite endpoint") - private String graphiteEndpoint; - - @Parameter(names = "-enableJvmStats", description = "Enables JVM Stats") - private boolean enableJvmStats; - - @Parameter(names = "-enableNettyHttpMetrics", description = "Enables Reactor Netty HTTP client metrics (connection pool gauges via COSMOS.NETTY_HTTP_CLIENT_METRICS_ENABLED)") - private boolean enableNettyHttpMetrics; - - @Parameter(names = "-throughput", description = "provisioned throughput for test container") - private int throughput = 100000; - - @Parameter(names = "-numberOfCollectionForCtl", description = "Number of collections for ctl load") - private int numberOfCollectionForCtl = 4; - - @Parameter(names = "-readWriteQueryReadManyPct", description = "Comma separated read write query readMany workload percent") - private String readWriteQueryReadManyPct = "90,8,1,1"; - - @Parameter(names = "-manageDatabase", description = "Control switch for creating/deleting underlying database resource") - private boolean manageDatabase = false; @Parameter(names = "-suppressCleanup", description = "Skip deleting database/container on shutdown (for multi-cycle CHURN)") private boolean suppressCleanup = false; - @Parameter(names = "-tenantsFile", description = "Path to tenants.json for multi-tenant benchmarks") - private String tenantsFile; + @Parameter(names = "-workloadConfig", description = "Path to workload configuration JSON file") + private String workloadConfig; @Parameter(names = "-cycles", description = "Number of create/destroy cycles (1 = single run)") private int cycles = 1; @@ -125,179 +27,8 @@ public class Configuration { @Parameter(names = "-gcBetweenCycles", description = "Force GC during settle period between cycles") private boolean gcBetweenCycles = true; - - @Parameter(names = "-preferredRegionsList", description = "Comma separated preferred regions list") - private String preferredRegionsList; - - @Parameter(names = "-encryptedStringFieldCount", description = "Number of string field that need to be encrypted") - private int encryptedStringFieldCount = 1; - - @Parameter(names = "-encryptedLongFieldCount", description = "Number of long field that need to be encrypted") - private int encryptedLongFieldCount = 0; - - @Parameter(names = "-encryptedDoubleFieldCount", description = "Number of double field that need to be encrypted") - private int encryptedDoubleFieldCount = 0; - - @Parameter(names = "-encryptionEnabled", description = "Control switch to enable the encryption operation") - private boolean encryptionEnabled = false; - - @Parameter(names = "-defaultLog4jLoggerEnabled", description = "Control switch to enable the default log4j logger in 4.42 and above") - private String defaultLog4jLoggerEnabled = String.valueOf(false); - - - @Parameter(names = "-tupleSize", description = "Number of cosmos identity tuples to be queried using readMany") - private int tupleSize = 1; - - @Parameter(names = "-isProactiveConnectionManagementEnabled", description = "Mode which denotes whether connections are proactively established during warm up.") - private String isProactiveConnectionManagementEnabled = String.valueOf(false); - - @Parameter(names = "-isUseUnWarmedUpContainer", description = "Mode which denotes whether to use a container with no warmed up connections. NOTE: " + - "To be used when isProactiveConnectionManagementEnabled is set to false and isUseUnWarmedUpContainer is set to true") - private String isUseUnWarmedUpContainer = String.valueOf(false); - - @Parameter(names = "-proactiveConnectionRegionsCount", description = "Number of regions where endpoints are to be proactively connected to.") - private int proactiveConnectionRegionsCount = 1; - - @Parameter(names = "-minConnectionPoolSizePerEndpoint", description = "Minimum number of connections to establish per endpoint for proactive connection management") - private int minConnectionPoolSizePerEndpoint = 0; - - @Parameter(names = "-aggressiveWarmupDuration", description = "The duration for which proactive connections are aggressively established", converter = DurationConverter.class) - private Duration aggressiveWarmupDuration = Duration.ZERO; - - @Parameter(names = "-isRegionScopedSessionContainerEnabled", description = "A flag to denote whether region scoped session container is enabled") - private String isRegionScopedSessionContainerEnabled = String.valueOf(false); - - @Parameter(names = "isPartitionLevelCircuitBreakerEnabled", description = "A flag to denote whether partition level circuit breaker is enabled.") - private String isPartitionLevelCircuitBreakerEnabled = String.valueOf(true); - - @Parameter(names = "-aadLoginEndpoint", description = "AAD login endpoint for this configuration instance. Overrides COSMOS.AAD_LOGIN_ENDPOINT / COSMOS_AAD_LOGIN_ENDPOINT.") - private String aadLoginEndpoint; - - @Parameter(names = "-aadTenantId", description = "AAD tenant ID for this configuration instance. Overrides COSMOS.AAD_TENANT_ID / COSMOS_AAD_TENANT_ID.") - private String aadTenantId; - - @Parameter(names = "-aadManagedIdentityClientId", description = "AAD managed identity client ID for this configuration instance. Overrides COSMOS.AAD_MANAGED_IDENTITY_ID / COSMOS_AAD_MANAGED_IDENTITY_ID.") - private String aadManagedIdentityClientId; - - @Parameter(names = "-isManagedIdentityRequired", description = "A flag to denote whether benchmark-specific CosmosClient instance should use Managed Identity to authenticate.") - private String isManagedIdentityRequired = String.valueOf(false); - - // ── Multi-tenancy orchestrator flags (not CLI — set programmatically) ── - - - @Parameter(names = "-isPerPartitionAutomaticFailoverRequired", description = "A flag to denote whether per-partition automatic failover is required.") - private String isPerPartitionAutomaticFailoverRequired = String.valueOf(true); - - @Parameter(names = "-operation", description = "Type of Workload:\n" - + "\tReadThroughput- run a READ workload that prints only throughput *\n" - + "\tWriteThroughput - run a Write workload that prints only throughput\n" - + "\tReadLatency - run a READ workload that prints both throughput and latency *\n" - + "\tWriteLatency - run a Write workload that prints both throughput and latency\n" - + "\tQueryInClauseParallel - run a 'Select * from c where c.pk in (....)' workload that prints latency\n" - + "\tQueryCross - run a 'Select * from c where c._rid = SOME_RID' workload that prints throughput\n" - + "\tQuerySingle - run a 'Select * from c where c.pk = SOME_PK' workload that prints throughput\n" - + "\tQuerySingleMany - run a 'Select * from c where c.pk = \"pk\"' workload that prints throughput\n" - + "\tQueryParallel - run a 'Select * from c' workload that prints throughput\n" - + "\tQueryOrderby - run a 'Select * from c order by c._ts' workload that prints throughput\n" - + "\tQueryAggregate - run a 'Select value max(c._ts) from c' workload that prints throughput\n" - + "\tQueryAggregateTopOrderby - run a 'Select top 1 value count(c) from c order by c._ts' workload that prints throughput\n" - + "\tQueryTopOrderby - run a 'Select top 1000 * from c order by c._ts' workload that prints throughput\n" - + "\tMixed - runa workload of 90 reads, 9 writes and 1 QueryTopOrderby per 100 operations *\n" - + "\tReadMyWrites - run a workflow of writes followed by reads and queries attempting to read the write.*\n" - + "\tCtlWorkload - run a ctl workflow.*\n" - + "\tReadAllItemsOfLogicalPartition - run a workload that uses readAllItems for a logical partition and prints throughput\n" - + "\n\t* writes 10k documents initially, which are used in the reads" - + "\tLinkedInCtlWorkload - ctl for LinkedIn workload.*\n" - + "\tReadManyLatency - run a workload for readMany for a finite number of cosmos identity tuples that prints both throughput and latency*\n" - + "\tReadManyThroughput - run a workload for readMany for a finite no of cosmos identity tuples that prints throughput*\n", - converter = Operation.OperationTypeConverter.class) - private Operation operation = Operation.WriteThroughput; - - @Parameter(names = "-concurrency", description = "Degree of Concurrency in Inserting Documents." - + " If this value is not specified, the max connection pool size will be used as the concurrency level.") - private Integer concurrency; - - @Parameter(names = "-numberOfOperations", description = "Total NUMBER Of Documents To Insert") - private int numberOfOperations = 100000; - - public Boolean isManagedIdentityRequired() { - return Boolean.parseBoolean(this.isManagedIdentityRequired); - } - - public Boolean isPerPartitionAutomaticFailoverRequired() { - return Boolean.parseBoolean(this.isPerPartitionAutomaticFailoverRequired); - } - - static class DurationConverter implements IStringConverter { - @Override - public Duration convert(String value) { - if (value == null) { - return null; - } - - return Duration.parse(value); - } - } - - @Parameter(names = "-maxRunningTimeDuration", description = "Max Running Time Duration", converter = DurationConverter.class) - private Duration maxRunningTimeDuration; - - @Parameter(names = "-printingInterval", description = "Interval of time after which Metrics should be printed (seconds)") - private int printingInterval = 10; - - @Parameter(names = "-reportingDirectory", description = "Location of a directory to which metrics should be printed as comma-separated values") - private String reportingDirectory = null; - - @Parameter(names = "-numberOfPreCreatedDocuments", description = "Total NUMBER Of Documents To pre create for a read workload to use") - private int numberOfPreCreatedDocuments = 1000; - - @Parameter(names = "-sparsityWaitTime", description = "Sleep time before making each request. Default is no sleep time." - + " NOTE: For now only ReadLatency and ReadThroughput support this." - + " Format: A string representation of this duration using ISO-8601 seconds based representation, such as " - + "PT20.345S (20.345 seconds), PT15M (15 minutes)", converter = DurationConverter.class) - private Duration sparsityWaitTime = null; - - @Parameter(names = "-skipWarmUpOperations", description = "the number of operations to be skipped before starting perf numbers.") - private int skipWarmUpOperations = 0; - - @Parameter(names = "-useSync", description = "Uses Sync API") - private boolean useSync = false; - - @Parameter(names = "-contentResponseOnWriteEnabled", description = "if set to false, does not returns content response on document write operations") - private String contentResponseOnWriteEnabled = String.valueOf(true); - - @Parameter(names = "-bulkloadBatchSize", description = "Control the number of documents uploaded in each BulkExecutor load iteration (Only supported for the LinkedInCtlWorkload)") - private int bulkloadBatchSize = 200000; - - @Parameter(names = "-testScenario", description = "The test scenario (GET, QUERY) for the LinkedInCtlWorkload") - private String testScenario = "GET"; - - @Parameter(names = "-applicationName", description = "The application name suffix in the user agent header") - private String applicationName = ""; - - @Parameter(names = "-accountNameInGraphiteReporter", description = "if set, account name with be appended in graphite reporter") - private boolean accountNameInGraphiteReporter = false; - - @Parameter(names = "-pointLatencyThresholdMs", description = "Latency threshold for point operations") - private int pointLatencyThresholdMs = -1; - - @Parameter(names = "-nonPointLatencyThresholdMs", description = "Latency threshold for non-point operations") - private int nonPointLatencyThresholdMs = -1; - - @Parameter(names = "-testVariationName", description = "An identifier for the test variation") - private String testVariationName = ""; - - @Parameter(names = "-branchName", description = "The branch name form where the source code being tested was built") - private String branchName = ""; - - @Parameter(names = "-commitId", description = "A commit identifier showing the version of the source code being tested") - private String commitId = ""; - - @Parameter(names = "-resultUploadDatabase", description = "The name of the database into which to upload the results") - private String resultUploadDatabase = ""; - - @Parameter(names = "-resultUploadContainer", description = "AThe name of the container inot which to upload the results") - private String resultUploadContainer = ""; + @Parameter(names = {"-h", "-help", "--help"}, description = "Help", help = true) + private boolean help = false; public enum Environment { Daily, // This is the CTL environment where we run the workload for a fixed number of hours @@ -315,319 +46,16 @@ public Environment convert(String value) { } } - @Parameter(names = "-environment", description = "The CTL Environment we are validating the workload", - converter = Environment.EnvironmentConverter.class) - private Environment environment = Environment.Daily; - - @Parameter(names = {"-h", "-help", "--help"}, description = "Help", help = true) - private boolean help = false; - - // Operation enum extracted to standalone Operation.java - - private static ConsistencyLevel fromString(String code) { - for (ConsistencyLevel output : ConsistencyLevel.values()) { - if (output.toString().equalsIgnoreCase(code)) { - return output; - } - } - return null; - } - - static class ConsistencyLevelConverter implements IStringConverter { - - /* - * (non-Javadoc) - * - * @see com.beust.jcommander.IStringConverter#convert(java.lang.STRING) - */ - @Override - public ConsistencyLevel convert(String value) { - ConsistencyLevel ret = fromString(value); - if (ret == null) { - throw new ParameterException("Value " + value + " can not be converted to ClientType. " - + "Available values are: " + Arrays.toString(Operation.values())); - } - return ret; - } - } - - public int getSkipWarmUpOperations() { - return skipWarmUpOperations; - } - - public Duration getSparsityWaitTime() { - return sparsityWaitTime; - } - - public boolean isDisablePassingPartitionKeyAsOptionOnWrite() { - return disablePassingPartitionKeyAsOptionOnWrite; - } - - public boolean isSync() { - return useSync; - } - - public boolean isAccountNameInGraphiteReporter() { - return accountNameInGraphiteReporter; - } - - public Duration getMaxRunningTimeDuration() { - return maxRunningTimeDuration; - } - - public Operation getOperationType() { - return operation; - } - - public int getNumberOfOperations() { - return numberOfOperations; - } - - public int getThroughput() { - return throughput; - } - - public String getServiceEndpoint() { - return serviceEndpoint; - } - - public String getMasterKey() { - return masterKey; - } - - public String getServiceEndpointForRunResultsUploadAccount() { - return serviceEndpointForRunResultsUploadAccount; - } - - public String getMasterKeyForRunResultsUploadAccount() { - return masterKeyForRunResultsUploadAccount; - } - - public String getApplicationName() { - return applicationName; - } - - public void setApplicationName(String applicationName) { - this.applicationName = applicationName; - } - - public void setServiceEndpoint(String serviceEndpoint) { - this.serviceEndpoint = serviceEndpoint; - } - - public void setMasterKey(String masterKey) { - this.masterKey = masterKey; - } - - public void setDatabaseId(String databaseId) { - this.databaseId = databaseId; - } - - public void setCollectionId(String collectionId) { - this.collectionId = collectionId; - } - - public void setOperation(Operation operation) { - this.operation = operation; - } - - public void setOperationFromString(String operationName) { - Operation op = Operation.fromString(operationName); - if (op != null) { - this.operation = op; - } - } - - public void setConcurrency(int concurrency) { - this.concurrency = concurrency; - } - - public void setConnectionMode(ConnectionMode connectionMode) { - this.connectionMode = connectionMode; - } - - public void setMaxConnectionPoolSize(int maxConnectionPoolSize) { - this.maxConnectionPoolSize = maxConnectionPoolSize; - } - - public void setNumberOfOperations(int numberOfOperations) { - this.numberOfOperations = numberOfOperations; - } - - public void setNumberOfPreCreatedDocuments(int numberOfPreCreatedDocuments) { - this.numberOfPreCreatedDocuments = numberOfPreCreatedDocuments; - } - - public void setConsistencyLevel(ConsistencyLevel consistencyLevel) { - this.consistencyLevel = consistencyLevel; - } - - public void setThroughput(int throughput) { - this.throughput = throughput; - } - - public void setManageDatabase(boolean manageDatabase) { - this.manageDatabase = manageDatabase; - } - - public void setPreferredRegionsList(String preferredRegionsList) { - this.preferredRegionsList = preferredRegionsList; - } - - public void setSkipWarmUpOperations(int skipWarmUpOperations) { - this.skipWarmUpOperations = skipWarmUpOperations; - } - public boolean isHelp() { return help; } - public int getDocumentDataFieldSize() { - return documentDataFieldSize; - } - - public int getDocumentDataFieldCount() { - return documentDataFieldCount; - } - - public Integer getMaxConnectionPoolSize() { - return maxConnectionPoolSize; - } - - public boolean isConnectionSharingAcrossClientsEnabled() { - return connectionSharingAcrossClientsEnabled; - } - - public ConnectionMode getConnectionMode() { - return connectionMode; - } - - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - public boolean isContentResponseOnWriteEnabled() { - return Boolean.parseBoolean(contentResponseOnWriteEnabled); - } - - public String getDatabaseId() { - return databaseId; - } - - public String getCollectionId() { - return collectionId; - } - - public int getNumberOfPreCreatedDocuments() { - return numberOfPreCreatedDocuments; - } - - public int getPrintingInterval() { - return printingInterval; - } - - public Duration getDiagnosticsThresholdDuration() { - return diagnosticsThresholdDuration; - } - - public File getReportingDirectory() { - return reportingDirectory == null ? null : new File(reportingDirectory); - } - - public int getConcurrency() { - if (this.concurrency != null) { - return concurrency; - } else { - return this.maxConnectionPoolSize; - } - } - - public boolean isUseNameLink() { - return useNameLink; - } - - public boolean isEnableJvmStats() { - return enableJvmStats; - } - - public boolean isEnableNettyHttpMetrics() { - return enableNettyHttpMetrics; - } - - public MeterRegistry getAzureMonitorMeterRegistry() { - String instrumentationKey = System.getProperty("azure.cosmos.monitoring.azureMonitor.instrumentationKey", - StringUtils.defaultString(Strings.emptyToNull( - System.getenv().get("AZURE_INSTRUMENTATION_KEY")), null)); - String connectionString = System.getProperty("applicationinsights.connection.string", - StringUtils.defaultString(Strings.emptyToNull( - System.getenv().get("APPLICATIONINSIGHTS_CONNECTION_STRING")), null)); - return instrumentationKey == null && connectionString == null - ? null - : this.azureMonitorMeterRegistry(connectionString, instrumentationKey); - } - - public MeterRegistry getGraphiteMeterRegistry() { - String serviceAddress = System.getProperty("azure.cosmos.monitoring.graphite.serviceAddress", - StringUtils.defaultString(Strings.emptyToNull( - System.getenv().get("GRAPHITE_SERVICE_ADDRESS")), null)); - return serviceAddress == null ? null : this.graphiteMeterRegistry(serviceAddress); - } - - public String getGraphiteEndpoint() { - if (graphiteEndpoint == null) { - return null; - } - - return StringUtils.substringBeforeLast(graphiteEndpoint, ":"); - } - - public int getGraphiteEndpointPort() { - if (graphiteEndpoint == null) { - return -1; - } - - String portAsString = Strings.emptyToNull(StringUtils.substringAfterLast(graphiteEndpoint, ":")); - if (portAsString == null) { - return DEFAULT_GRAPHITE_SERVER_PORT; - } else { - return Integer.parseInt(portAsString); - } - } - - public String getTestVariationName() { - return this.testVariationName; - } - - public String getBranchName() { - return this.branchName; - } - - public String getCommitId() { - return this.commitId; - } - - public int getNumberOfCollectionForCtl(){ - return this.numberOfCollectionForCtl; - } - - public String getReadWriteQueryReadManyPct() { - return this.readWriteQueryReadManyPct; - } - - public boolean shouldManageDatabase() { - return this.manageDatabase; - } - - public boolean isSuppressCleanup() { - return this.suppressCleanup; - } - - public void setSuppressCleanup(boolean suppressCleanup) { - this.suppressCleanup = suppressCleanup; + public boolean isGcBetweenCycles() { + return gcBetweenCycles; } - public String getTenantsFile() { - return tenantsFile; + public String getWorkloadConfig() { + return workloadConfig; } public int getCycles() { @@ -638,321 +66,18 @@ public long getSettleTimeMs() { return settleTimeMs; } - public boolean isGcBetweenCycles() { - return gcBetweenCycles; - } - - - public int getBulkloadBatchSize() { - return this.bulkloadBatchSize; - } - - public String getTestScenario() { - return this.testScenario; + public boolean isSuppressCleanup() { + return this.suppressCleanup; } - public Environment getEnvironment() { - return this.environment; + public void setSuppressCleanup(boolean suppressCleanup) { + this.suppressCleanup = suppressCleanup; } public String toString() { return ToStringBuilder.reflectionToString(this, ToStringStyle.MULTI_LINE_STYLE); } - public List getPreferredRegionsList() { - List preferredRegions = null; - if (StringUtils.isNotEmpty(preferredRegionsList)) { - String[] preferredArray = preferredRegionsList.split(","); - if (preferredArray != null && preferredArray.length > 0) { - preferredRegions = new ArrayList<>(Arrays.asList(preferredArray)); - } - } - return preferredRegions; - } - - public int getEncryptedStringFieldCount() { - return encryptedStringFieldCount; - } - - public int getEncryptedLongFieldCount() { - return encryptedLongFieldCount; - } - - public int getEncryptedDoubleFieldCount() { - return encryptedDoubleFieldCount; - } - - public boolean isEncryptionEnabled() { - return encryptionEnabled; - } - - public boolean isDefaultLog4jLoggerEnabled() { - return Boolean.parseBoolean(defaultLog4jLoggerEnabled); - } - - public Integer getTupleSize() { - return tupleSize; - } - - public Duration getPointOperationThreshold() { - if (this.pointLatencyThresholdMs < 0) { - return Duration.ofDays(300); - } - - return Duration.ofMillis(this.pointLatencyThresholdMs); - } - - public Duration getNonPointOperationThreshold() { - if (this.nonPointLatencyThresholdMs < 0) { - return Duration.ofDays(300); - } - - return Duration.ofMillis(this.nonPointLatencyThresholdMs); - } - - public boolean isProactiveConnectionManagementEnabled() { - return Boolean.parseBoolean(isProactiveConnectionManagementEnabled); - } - - public boolean isUseUnWarmedUpContainer() { - return Boolean.parseBoolean(isUseUnWarmedUpContainer); - } - - public Integer getProactiveConnectionRegionsCount() { - return proactiveConnectionRegionsCount; - } - - public Duration getAggressiveWarmupDuration() { - return aggressiveWarmupDuration; - } - - public Integer getMinConnectionPoolSizePerEndpoint() { - return minConnectionPoolSizePerEndpoint; - } - - public String getResultUploadDatabase() { - return Strings.emptyToNull(resultUploadDatabase); - } - - public String getResultUploadContainer() { - return Strings.emptyToNull(resultUploadContainer); - } - - public boolean isRegionScopedSessionContainerEnabled() { - return Boolean.parseBoolean(isRegionScopedSessionContainerEnabled); - } - - public boolean isPartitionLevelCircuitBreakerEnabled() { - return Boolean.parseBoolean(isPartitionLevelCircuitBreakerEnabled); - } - - public void tryGetValuesFromSystem() { - serviceEndpoint = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("SERVICE_END_POINT")), - serviceEndpoint); - - masterKey = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MASTER_KEY")), masterKey); - - databaseId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DATABASE_ID")), databaseId); - - collectionId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COLLECTION_ID")), - collectionId); - - documentDataFieldSize = Integer.parseInt( - StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("DOCUMENT_DATA_FIELD_SIZE")), - Integer.toString(documentDataFieldSize))); - - maxConnectionPoolSize = Integer.parseInt( - StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("MAX_CONNECTION_POOL_SIZE")), - Integer.toString(maxConnectionPoolSize))); - - ConsistencyLevelConverter consistencyLevelConverter = new ConsistencyLevelConverter(); - consistencyLevel = consistencyLevelConverter.convert(StringUtils - .defaultString(Strings.emptyToNull(System.getenv().get("CONSISTENCY_LEVEL")), consistencyLevel.name())); - - Operation.OperationTypeConverter operationTypeConverter = new Operation.OperationTypeConverter(); - operation = operationTypeConverter.convert( - StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("OPERATION")), operation.name())); - - String concurrencyValue = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("CONCURRENCY")), - concurrency == null ? null : Integer.toString(concurrency)); - concurrency = concurrencyValue == null ? null : Integer.parseInt(concurrencyValue); - - String numberOfOperationsValue = StringUtils.defaultString( - Strings.emptyToNull(System.getenv().get("NUMBER_OF_OPERATIONS")), Integer.toString(numberOfOperations)); - numberOfOperations = Integer.parseInt(numberOfOperationsValue); - - String throughputValue = StringUtils.defaultString( - Strings.emptyToNull(System.getenv().get("THROUGHPUT")), Integer.toString(throughput)); - throughput = Integer.parseInt(throughputValue); - - preferredRegionsList = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get( - "PREFERRED_REGIONS_LIST")), preferredRegionsList); - - encryptedStringFieldCount = Integer.parseInt( - StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("ENCRYPTED_STRING_FIELD_COUNT")), - Integer.toString(encryptedStringFieldCount))); - - encryptedLongFieldCount = Integer.parseInt( - StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("ENCRYPTED_LONG_FIELD_COUNT")), - Integer.toString(encryptedLongFieldCount))); - - encryptedDoubleFieldCount = Integer.parseInt( - StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("ENCRYPTED_DOUBLE_FIELD_COUNT")), - Integer.toString(encryptedDoubleFieldCount))); - - encryptionEnabled = Boolean.parseBoolean(StringUtils.defaultString(Strings.emptyToNull(System.getenv().get( - "ENCRYPTED_ENABLED")), - Boolean.toString(encryptionEnabled))); - - tupleSize = Integer.parseInt( - StringUtils.defaultString(Strings.emptyToNull(System.getenv().get("COSMOS_IDENTITY_TUPLE_SIZE")), - Integer.toString(tupleSize))); - - testVariationName = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get( - "COSMOS_TEST_VARIATION_NAME")), testVariationName); - - branchName = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get( - "COSMOS_BRANCH_NAME")), branchName); - - commitId = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get( - "COSMOS_COMMIT_ID")), commitId); - - resultUploadDatabase = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get( - "COSMOS_RESULT_UPLOAD_DATABASE")), resultUploadDatabase); - - resultUploadContainer = StringUtils.defaultString(Strings.emptyToNull(System.getenv().get( - "COSMOS_RESULT_UPLOAD_CONTAINER")), resultUploadContainer); - } - - private synchronized MeterRegistry azureMonitorMeterRegistry(String connectionString, String instrumentationKey) { - - if (this.azureMonitorMeterRegistry == null) { - - Duration step = Duration.ofSeconds(Integer.getInteger("azure.cosmos.monitoring.azureMonitor.step", this.printingInterval)); - String testCategoryTag = System.getProperty("azure.cosmos.monitoring.azureMonitor.testCategory"); - boolean enabled = !Boolean.getBoolean("azure.cosmos.monitoring.azureMonitor.disabled"); - - final AzureMonitorConfig config = new AzureMonitorConfig() { - - @Override - @Nullable - public String get(@Nullable String key) { - return null; - } - - @Override - @Nullable - public String instrumentationKey() { - return connectionString != null ? null : instrumentationKey; - } - - @Override - public String connectionString() { return connectionString; } - - - @Override - public Duration step() { - return step; - } - - @Override - public boolean enabled() { - return enabled; - } - }; - - String roleName = System.getenv("APPLICATIONINSIGHTS_ROLE_NAME"); - if (roleName != null) { - TelemetryConfiguration.getActive().setRoleName(roleName); - } - - this.azureMonitorMeterRegistry = new AzureMonitorMeterRegistry(config, Clock.SYSTEM); - List globalTags = new ArrayList<>(); - if (!Strings.isNullOrEmpty(testCategoryTag)) { - globalTags.add(Tag.of("TestCategory", testCategoryTag)); - } - - String roleInstance = System.getenv("APPLICATIONINSIGHTS_ROLE_INSTANCE"); - if (roleName != null) { - globalTags.add(Tag.of("cloud_RoleInstance", roleInstance)); - } - - this.azureMonitorMeterRegistry.config().commonTags(globalTags); - } - - return this.azureMonitorMeterRegistry; - } - - @SuppressWarnings("UnstableApiUsage") - private synchronized MeterRegistry graphiteMeterRegistry(String serviceAddress) { - - if (this.graphiteMeterRegistry == null) { - - HostAndPort address = HostAndPort.fromString(serviceAddress); - - String host = address.getHost(); - int port = address.getPortOrDefault(DEFAULT_GRAPHITE_SERVER_PORT); - boolean enabled = !Boolean.getBoolean("azure.cosmos.monitoring.graphite.disabled"); - Duration step = Duration.ofSeconds(Integer.getInteger("azure.cosmos.monitoring.graphite.step", this.printingInterval)); - - final GraphiteConfig config = new GraphiteConfig() { - - private String[] tagNames = { "source" }; - - @Override - @Nullable - public String get(@Nullable String key) { - return null; - } - - @Override - public boolean enabled() { - return enabled; - } - - @Override - @Nullable - public String host() { - return host; - } - - @Override - @Nullable - public int port() { - return port; - } - - @Override - @Nullable - public Duration step() { - return step; - } - - @Override - @Nullable - public String[] tagsAsPrefix() { - return this.tagNames; - } - }; - - this.graphiteMeterRegistry = new GraphiteMeterRegistry(config, Clock.SYSTEM); - String source; - - try { - PercentEscaper escaper = new PercentEscaper("_-", false); - source = escaper.escape(InetAddress.getLocalHost().getHostName()); - } catch (UnknownHostException error) { - source = "unknown-host"; - } - - this.graphiteMeterRegistry.config() - .namingConvention(NamingConvention.dot) - .commonTags("source", source); - } - - return this.graphiteMeterRegistry; - } - public static String getAadLoginUri() { return getOptionalConfigProperty( "AAD_LOGIN_ENDPOINT", @@ -968,57 +93,6 @@ public static String getAadTenantId() { return getOptionalConfigProperty("AAD_TENANT_ID", null, v -> v); } - /** - * Returns the AAD login endpoint for this configuration instance. - * Falls back to the static/system-property value if not set per-instance. - */ - public String getInstanceAadLoginEndpoint() { - return aadLoginEndpoint != null ? aadLoginEndpoint : getAadLoginUri(); - } - - /** - * Returns the AAD managed identity client ID for this configuration instance. - * Falls back to the static/system-property value if not set per-instance. - */ - public String getInstanceAadManagedIdentityClientId() { - return aadManagedIdentityClientId != null ? aadManagedIdentityClientId : getAadManagedIdentityId(); - } - - /** - * Returns the AAD tenant ID for this configuration instance. - * Falls back to the static/system-property value if not set per-instance. - */ - public String getInstanceAadTenantId() { - return aadTenantId != null ? aadTenantId : getAadTenantId(); - } - - /** - * Builds a {@link com.azure.core.credential.TokenCredential} based on this configuration instance's - * AAD settings. Each call returns a new credential, allowing per-tenant identity in multi-tenant benchmarks. - * - * @return a new TokenCredential configured with this instance's AAD login endpoint, tenant ID, - * and managed identity client ID. - */ - public com.azure.core.credential.TokenCredential buildTokenCredential() { - return new com.azure.identity.DefaultAzureCredentialBuilder() - .managedIdentityClientId(getInstanceAadManagedIdentityClientId()) - .authorityHost(getInstanceAadLoginEndpoint()) - .tenantId(getInstanceAadTenantId()) - .build(); - } - - public void setAadLoginEndpoint(String aadLoginEndpoint) { - this.aadLoginEndpoint = aadLoginEndpoint; - } - - public void setAadTenantId(String aadTenantId) { - this.aadTenantId = aadTenantId; - } - - public void setAadManagedIdentityClientId(String aadManagedIdentityClientId) { - this.aadManagedIdentityClientId = aadManagedIdentityClientId; - } - private static T getOptionalConfigProperty(String name, T defaultValue, Function conversion) { String textValue = getConfigPropertyOrNull(name); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java index 5748cf6c9c75..053d2d45bdff 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java @@ -28,7 +28,6 @@ public static void main(String[] args) throws Exception { try { LOGGER.debug("Parsing the arguments ..."); Configuration cfg = new Configuration(); - cfg.tryGetValuesFromSystem(); JCommander jcommander = new JCommander(cfg, args); if (cfg.isHelp()) { @@ -36,19 +35,23 @@ public static void main(String[] args) throws Exception { return; } - validateConfiguration(cfg); + // Build BenchmarkConfig (requires workload config file) + BenchmarkConfig benchConfig = BenchmarkConfig.fromConfiguration(cfg); + TenantWorkloadConfig firstTenant = benchConfig.getTenantWorkloads().get(0); - if (cfg.isSync()) { - syncBenchmark(cfg); + validateConfiguration(firstTenant, cfg); + + if (firstTenant.isSync()) { + syncBenchmark(firstTenant, benchConfig); } else { - if (cfg.getOperationType().equals(CtlWorkload)) { - asyncCtlWorkload(cfg); - } else if (cfg.getOperationType().equals(LinkedInCtlWorkload)) { - linkedInCtlWorkload(cfg); - } else if (cfg.isEncryptionEnabled()) { - asyncEncryptionBenchmark(cfg); + if (firstTenant.getOperationType().equals(CtlWorkload)) { + asyncCtlWorkload(firstTenant, benchConfig); + } else if (firstTenant.getOperationType().equals(LinkedInCtlWorkload)) { + linkedInCtlWorkload(firstTenant, benchConfig); + } else if (firstTenant.isEncryptionEnabled()) { + asyncEncryptionBenchmark(firstTenant, benchConfig); } else { - asyncBenchmark(cfg); + asyncBenchmark(benchConfig); } } } catch (ParameterException e) { @@ -58,49 +61,46 @@ public static void main(String[] args) throws Exception { } } - private static void validateConfiguration(Configuration cfg) { - switch (cfg.getOperationType()) { + private static void validateConfiguration(TenantWorkloadConfig workloadCfg, Configuration cfg) { + switch (workloadCfg.getOperationType()) { case WriteLatency: case WriteThroughput: break; default: - if (!cfg.isContentResponseOnWriteEnabled()) { + if (!workloadCfg.isContentResponseOnWriteEnabled()) { throw new IllegalArgumentException("contentResponseOnWriteEnabled parameter can only be set to false " + "for write latency and write throughput operations"); } } - switch (cfg.getOperationType()) { + switch (workloadCfg.getOperationType()) { case ReadLatency: case ReadThroughput: break; default: - if (cfg.getSparsityWaitTime() != null) { - throw new IllegalArgumentException("sparsityWaitTime is not supported for " + cfg.getOperationType()); + if (workloadCfg.getSparsityWaitTime() != null) { + throw new IllegalArgumentException("sparsityWaitTime is not supported for " + workloadCfg.getOperationType()); } } } - private static void syncBenchmark(Configuration cfg) throws Exception { + private static void syncBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchConfig) throws Exception { LOGGER.info("Sync benchmark ..."); SyncBenchmark benchmark = null; try { - switch (cfg.getOperationType()) { + switch (workloadCfg.getOperationType()) { case ReadThroughput: case ReadLatency: - benchmark = new SyncReadBenchmark(cfg); + benchmark = new SyncReadBenchmark(workloadCfg, benchConfig); break; - case WriteLatency: case WriteThroughput: - benchmark = new SyncWriteBenchmark(cfg); + benchmark = new SyncWriteBenchmark(workloadCfg, benchConfig); break; - default: - throw new RuntimeException(cfg.getOperationType() + " is not supported"); + throw new RuntimeException(workloadCfg.getOperationType() + " is not supported"); } - - LOGGER.info("Starting {}", cfg.getOperationType()); + LOGGER.info("Starting {}", workloadCfg.getOperationType()); benchmark.run(); } finally { if (benchmark != null) { @@ -111,48 +111,42 @@ private static void syncBenchmark(Configuration cfg) throws Exception { /** * Async benchmark path: builds BenchmarkConfig from CLI args and delegates to BenchmarkOrchestrator. - * Handles both single-tenant (CLI args) and multi-tenant (tenants.json) modes. + * Handles both single-tenant and multi-tenant modes via workload config file. */ - private static void asyncBenchmark(Configuration cfg) throws Exception { - BenchmarkConfig benchConfig = BenchmarkConfig.fromConfiguration(cfg); + private static void asyncBenchmark(BenchmarkConfig benchConfig) throws Exception { LOGGER.info("Async benchmark via BenchmarkOrchestrator ({} tenants, {} cycles)...", benchConfig.getTenantWorkloads().size(), benchConfig.getCycles()); new BenchmarkOrchestrator().run(benchConfig); } - private static void asyncEncryptionBenchmark(Configuration cfg) throws Exception { + private static void asyncEncryptionBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchConfig) throws Exception { LOGGER.info("Async encryption benchmark ..."); AsyncEncryptionBenchmark benchmark = null; try { - switch (cfg.getOperationType()) { + switch (workloadCfg.getOperationType()) { case WriteThroughput: case WriteLatency: - benchmark = new AsyncEncryptionWriteBenchmark(cfg); + benchmark = new AsyncEncryptionWriteBenchmark(workloadCfg, benchConfig); break; - case ReadThroughput: case ReadLatency: - benchmark = new AsyncEncryptionReadBenchmark(cfg); + benchmark = new AsyncEncryptionReadBenchmark(workloadCfg, benchConfig); break; - case QueryCross: case QuerySingle: case QueryParallel: case QueryOrderby: case QueryTopOrderby: case QueryInClauseParallel: - benchmark = new AsyncEncryptionQueryBenchmark(cfg); + benchmark = new AsyncEncryptionQueryBenchmark(workloadCfg, benchConfig); break; - case QuerySingleMany: - benchmark = new AsyncEncryptionQuerySinglePartitionMultiple(cfg); + benchmark = new AsyncEncryptionQuerySinglePartitionMultiple(workloadCfg, benchConfig); break; - default: - throw new RuntimeException(cfg.getOperationType() + " is not supported"); + throw new RuntimeException(workloadCfg.getOperationType() + " is not supported"); } - - LOGGER.info("Starting {}", cfg.getOperationType()); + LOGGER.info("Starting {}", workloadCfg.getOperationType()); benchmark.run(); } finally { if (benchmark != null) { @@ -161,12 +155,12 @@ private static void asyncEncryptionBenchmark(Configuration cfg) throws Exception } } - private static void asyncCtlWorkload(Configuration cfg) throws Exception { + private static void asyncCtlWorkload(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchConfig) throws Exception { LOGGER.info("Async ctl workload"); AsyncCtlWorkload benchmark = null; try { - benchmark = new AsyncCtlWorkload(cfg); - LOGGER.info("Starting {}", cfg.getOperationType()); + benchmark = new AsyncCtlWorkload(workloadCfg, benchConfig); + LOGGER.info("Starting {}", workloadCfg.getOperationType()); benchmark.run(); } finally { if (benchmark != null) { @@ -175,22 +169,19 @@ private static void asyncCtlWorkload(Configuration cfg) throws Exception { } } - private static void linkedInCtlWorkload(Configuration cfg) { + private static void linkedInCtlWorkload(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchConfig) { LOGGER.info("Executing the LinkedIn ctl workload"); LICtlWorkload workload = null; try { - workload = new LICtlWorkload(cfg); - + workload = new LICtlWorkload(workloadCfg, benchConfig); LOGGER.info("Setting up the LinkedIn ctl workload"); workload.setup(); - LOGGER.info("Starting the LinkedIn ctl workload"); workload.run(); } catch (Exception e) { LOGGER.error("Exception received while executing the LinkedIn ctl workload", e); throw e; - } - finally { + } finally { Optional.ofNullable(workload) .ifPresent(LICtlWorkload::shutdown); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ScheduledReporterFactory.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ScheduledReporterFactory.java index 132709c615b7..09b9a8864145 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ScheduledReporterFactory.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ScheduledReporterFactory.java @@ -5,14 +5,9 @@ import com.codahale.metrics.ConsoleReporter; import com.codahale.metrics.CsvReporter; -import com.codahale.metrics.MetricFilter; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.ScheduledReporter; -import com.codahale.metrics.graphite.Graphite; -import com.codahale.metrics.graphite.GraphiteReporter; -import java.net.InetSocketAddress; -import java.net.URI; -import java.net.URISyntaxException; +import java.io.File; import java.util.concurrent.TimeUnit; @@ -22,38 +17,17 @@ private ScheduledReporterFactory() { } /** - * @param configuration CTL workload parameters + * @param benchConfig benchmark-level configuration (reporting directory) * @param metricsRegistry MetricRegistry instance for tracking various execution metrics * @return ScheduledReporter for reporting the captured metrics */ - public static ScheduledReporter create(final Configuration configuration, + public static ScheduledReporter create(final BenchmarkConfig benchConfig, final MetricRegistry metricsRegistry) { - if (configuration.getGraphiteEndpoint() != null) { - final Graphite graphite = new Graphite(new InetSocketAddress( - configuration.getGraphiteEndpoint(), - configuration.getGraphiteEndpointPort())); - - String graphiteReporterPrefix = configuration.getOperationType().name(); - if (configuration.isAccountNameInGraphiteReporter()) { - try { - URI uri = new URI(configuration.getServiceEndpoint()); - graphiteReporterPrefix = graphiteReporterPrefix + "-" + uri.getHost().substring(0, uri.getHost().indexOf(".")); - } catch (URISyntaxException e) { - // do nothing, graphiteReporterPrefix will be configuration.getOperationType().name() - } - } - - return GraphiteReporter.forRegistry(metricsRegistry) - .prefixedWith(graphiteReporterPrefix) - .convertDurationsTo(TimeUnit.MILLISECONDS) - .convertRatesTo(TimeUnit.SECONDS) - .filter(MetricFilter.ALL) - .build(graphite); - } else if (configuration.getReportingDirectory() != null) { + if (benchConfig.getReportingDirectory() != null) { return CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) - .build(configuration.getReportingDirectory()); + .build(new File(benchConfig.getReportingDirectory())); } else { return ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncBenchmark.java index 422125b52226..df03d2441462 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncBenchmark.java @@ -4,7 +4,6 @@ package com.azure.cosmos.benchmark; import com.azure.core.credential.TokenCredential; -import com.azure.cosmos.BridgeInternal; import com.azure.cosmos.ConnectionMode; import com.azure.cosmos.CosmosClient; import com.azure.cosmos.CosmosClientBuilder; @@ -21,26 +20,21 @@ import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.ThroughputProperties; import com.codahale.metrics.ConsoleReporter; +import com.codahale.metrics.CsvReporter; import com.codahale.metrics.Meter; -import com.codahale.metrics.MetricFilter; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.ScheduledReporter; import com.codahale.metrics.Timer; -import com.codahale.metrics.graphite.Graphite; -import com.codahale.metrics.graphite.GraphiteReporter; import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet; import com.codahale.metrics.jvm.GarbageCollectorMetricSet; import com.codahale.metrics.jvm.MemoryUsageGaugeSet; -import io.micrometer.core.instrument.MeterRegistry; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; import org.mpierce.metrics.reservoir.hdrhistogram.HdrHistogramResetOnSnapshotReservoir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.net.InetSocketAddress; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -76,21 +70,12 @@ abstract class SyncBenchmark { CosmosDatabase cosmosDatabase; final String partitionKey; - final Configuration configuration; + final TenantWorkloadConfig workloadConfig; + final BenchmarkConfig benchConfig; final List docsToRead; final Semaphore concurrencyControlSemaphore; Timer latency; - private static final List CONFIGURED_HIGH_AVAILABILITY_SYSTEM_PROPERTIES = Arrays.asList( - "COSMOS.IS_PER_PARTITION_AUTOMATIC_FAILOVER_ENABLED", - "COSMOS.IS_SESSION_TOKEN_FALSE_PROGRESS_MERGE_ENABLED", - "COSMOS.E2E_TIMEOUT_ERROR_HIT_THRESHOLD_FOR_PPAF", - "COSMOS.E2E_TIMEOUT_ERROR_HIT_TIME_WINDOW_IN_SECONDS_FOR_PPAF", - "COSMOS.STALE_PARTITION_UNAVAILABILITY_REFRESH_INTERVAL_IN_SECONDS", - "COSMOS.ALLOWED_PARTITION_UNAVAILABILITY_DURATION_IN_SECONDS", - "COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG" // Implicitly set when COSMOS.IS_PER_PARTITION_AUTOMATIC_FAILOVER_ENABLED is set to true - ); - static abstract class ResultHandler implements BiFunction { ResultHandler() { } @@ -123,84 +108,64 @@ public T apply(T o, Throwable throwable) { } } - SyncBenchmark(Configuration cfg) throws Exception { - executorService = Executors.newFixedThreadPool(cfg.getConcurrency()); - configuration = cfg; + SyncBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws Exception { + executorService = Executors.newFixedThreadPool(workloadCfg.getConcurrency()); + workloadConfig = workloadCfg; + benchConfig = benchCfg; logger = LoggerFactory.getLogger(this.getClass()); - if (configuration.isPartitionLevelCircuitBreakerEnabled()) { - System.setProperty( - "COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG", - "{\"isPartitionLevelCircuitBreakerEnabled\": true, " - + "\"circuitBreakerType\": \"CONSECUTIVE_EXCEPTION_COUNT_BASED\"," - + "\"consecutiveExceptionCountToleratedForReads\": 10," - + "\"consecutiveExceptionCountToleratedForWrites\": 5," - + "}"); - - System.setProperty("COSMOS.STALE_PARTITION_UNAVAILABILITY_REFRESH_INTERVAL_IN_SECONDS", "60"); - System.setProperty("COSMOS.ALLOWED_PARTITION_UNAVAILABILITY_DURATION_IN_SECONDS", "30"); - } - - if (configuration.isPerPartitionAutomaticFailoverRequired()) { - System.setProperty( - "COSMOS.IS_PER_PARTITION_AUTOMATIC_FAILOVER_ENABLED", "true"); - System.setProperty("COSMOS.IS_SESSION_TOKEN_FALSE_PROGRESS_MERGE_ENABLED", "true"); - System.setProperty("COSMOS.E2E_TIMEOUT_ERROR_HIT_THRESHOLD_FOR_PPAF", "5"); - System.setProperty("COSMOS.E2E_TIMEOUT_ERROR_HIT_TIME_WINDOW_IN_SECONDS_FOR_PPAF", "120"); - } - - boolean isManagedIdentityRequired = configuration.isManagedIdentityRequired(); + boolean isManagedIdentityRequired = workloadCfg.isManagedIdentityRequired(); final TokenCredential credential = isManagedIdentityRequired - ? cfg.buildTokenCredential() + ? workloadCfg.buildTokenCredential() : null; CosmosClientBuilder benchmarkSpecificClientBuilder = isManagedIdentityRequired ? new CosmosClientBuilder() .credential(credential) : new CosmosClientBuilder() - .key(cfg.getMasterKey()); + .key(workloadCfg.getMasterKey()); CosmosClientBuilder resultUploadClientBuilder = new CosmosClientBuilder(); - benchmarkSpecificClientBuilder.preferredRegions(cfg.getPreferredRegionsList()) - .endpoint(cfg.getServiceEndpoint()) - .userAgentSuffix(configuration.getApplicationName()) - .consistencyLevel(cfg.getConsistencyLevel()) - .contentResponseOnWriteEnabled(cfg.isContentResponseOnWriteEnabled()); + benchmarkSpecificClientBuilder.preferredRegions(workloadCfg.getPreferredRegionsList()) + .endpoint(workloadCfg.getServiceEndpoint()) + .userAgentSuffix(workloadCfg.getApplicationName()) + .consistencyLevel(workloadCfg.getConsistencyLevel()) + .contentResponseOnWriteEnabled(workloadCfg.isContentResponseOnWriteEnabled()); clientBuilderAccessor - .setRegionScopedSessionCapturingEnabled(benchmarkSpecificClientBuilder, cfg.isRegionScopedSessionContainerEnabled()); + .setRegionScopedSessionCapturingEnabled(benchmarkSpecificClientBuilder, workloadCfg.isRegionScopedSessionContainerEnabled()); - if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { + if (workloadCfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { benchmarkSpecificClientBuilder = benchmarkSpecificClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); - gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); + gatewayConnectionConfig.setMaxConnectionPoolSize(workloadCfg.getMaxConnectionPoolSize()); benchmarkSpecificClientBuilder = benchmarkSpecificClientBuilder.gatewayMode(gatewayConnectionConfig); } CosmosClientTelemetryConfig telemetryConfig = new CosmosClientTelemetryConfig() .diagnosticsThresholds( new CosmosDiagnosticsThresholds() - .setPointOperationLatencyThreshold(cfg.getPointOperationThreshold()) - .setNonPointOperationLatencyThreshold(cfg.getNonPointOperationThreshold()) + .setPointOperationLatencyThreshold(workloadCfg.getPointOperationThreshold()) + .setNonPointOperationLatencyThreshold(workloadCfg.getNonPointOperationThreshold()) ); - if (configuration.isDefaultLog4jLoggerEnabled()) { + if (workloadCfg.isDefaultLog4jLoggerEnabled()) { telemetryConfig.diagnosticsHandler(CosmosDiagnosticsHandler.DEFAULT_LOGGING_HANDLER); } benchmarkWorkloadClient = benchmarkSpecificClientBuilder.buildClient(); this.resultUploaderClient = resultUploadClientBuilder - .endpoint(StringUtils.isNotEmpty(configuration.getServiceEndpointForRunResultsUploadAccount()) ? configuration.getServiceEndpointForRunResultsUploadAccount() : configuration.getServiceEndpoint()) - .key(StringUtils.isNotEmpty(configuration.getMasterKeyForRunResultsUploadAccount()) ? configuration.getMasterKeyForRunResultsUploadAccount() : configuration.getMasterKey()) + .endpoint(StringUtils.isNotEmpty(benchConfig.getResultUploadEndpoint()) ? benchConfig.getResultUploadEndpoint() : workloadCfg.getServiceEndpoint()) + .key(StringUtils.isNotEmpty(benchConfig.getResultUploadKey()) ? benchConfig.getResultUploadKey() : workloadCfg.getMasterKey()) .buildClient(); try { - cosmosDatabase = benchmarkWorkloadClient.getDatabase(this.configuration.getDatabaseId()); + cosmosDatabase = benchmarkWorkloadClient.getDatabase(workloadCfg.getDatabaseId()); cosmosDatabase.read(); - logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); + logger.info("Database {} is created for this test", workloadCfg.getDatabaseId()); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { @@ -209,8 +174,8 @@ public T apply(T o, Throwable throwable) { "either pre-create a database and a container or use the management SDK."); } - benchmarkWorkloadClient.createDatabase(cfg.getDatabaseId()); - cosmosDatabase = benchmarkWorkloadClient.getDatabase(cfg.getDatabaseId()); + benchmarkWorkloadClient.createDatabase(workloadCfg.getDatabaseId()); + cosmosDatabase = benchmarkWorkloadClient.getDatabase(workloadCfg.getDatabaseId()); databaseCreated = true; } else { throw e; @@ -218,7 +183,7 @@ public T apply(T o, Throwable throwable) { } try { - cosmosContainer = cosmosDatabase.getContainer(this.configuration.getCollectionId()); + cosmosContainer = cosmosDatabase.getContainer(workloadCfg.getContainerId()); cosmosContainer.read(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { @@ -228,11 +193,11 @@ public T apply(T o, Throwable throwable) { "either pre-create a database and a container or use the management SDK."); } - cosmosDatabase.createContainer(this.configuration.getCollectionId(), - Configuration.DEFAULT_PARTITION_KEY_PATH, - ThroughputProperties.createManualThroughput(this.configuration.getThroughput())); - cosmosContainer = cosmosDatabase.getContainer(this.configuration.getCollectionId()); - logger.info("Collection {} is created for this test", this.configuration.getCollectionId()); + cosmosDatabase.createContainer(workloadCfg.getContainerId(), + TenantWorkloadConfig.DEFAULT_PARTITION_KEY_PATH, + ThroughputProperties.createManualThroughput(workloadCfg.getThroughput())); + cosmosContainer = cosmosDatabase.getContainer(workloadCfg.getContainerId()); + logger.info("Collection {} is created for this test", workloadCfg.getContainerId()); // add some delay to allow container to be created across multiple regions // container creation across regions is an async operation @@ -252,20 +217,20 @@ public T apply(T o, Throwable throwable) { partitionKey = cosmosContainer.read().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; - concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); + concurrencyControlSemaphore = new Semaphore(workloadCfg.getConcurrency()); ArrayList> createDocumentFutureList = new ArrayList<>(); - if (configuration.getOperationType() != Operation.WriteLatency - && configuration.getOperationType() != Operation.WriteThroughput - && configuration.getOperationType() != Operation.ReadMyWrites) { - String dataFieldValue = RandomStringUtils.randomAlphabetic(cfg.getDocumentDataFieldSize()); - for (int i = 0; i < cfg.getNumberOfPreCreatedDocuments(); i++) { + if (workloadCfg.getOperationType() != Operation.WriteLatency + && workloadCfg.getOperationType() != Operation.WriteThroughput + && workloadCfg.getOperationType() != Operation.ReadMyWrites) { + String dataFieldValue = RandomStringUtils.randomAlphabetic(workloadCfg.getDocumentDataFieldSize()); + for (int i = 0; i < workloadCfg.getNumberOfPreCreatedDocuments(); i++) { String uuid = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uuid, dataFieldValue, partitionKey, - configuration.getDocumentDataFieldCount()); + workloadCfg.getDocumentDataFieldCount()); CompletableFuture futureResult = CompletableFuture.supplyAsync(() -> { try { @@ -285,56 +250,39 @@ public T apply(T o, Throwable throwable) { docsToRead = createDocumentFutureList.stream().map(future -> getOrThrow(future)).collect(Collectors.toList()); init(); - if (configuration.isEnableJvmStats()) { + if (benchConfig.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } - if (configuration.getGraphiteEndpoint() != null) { - final Graphite graphite = new Graphite(new InetSocketAddress(configuration.getGraphiteEndpoint(), configuration.getGraphiteEndpointPort())); - reporter = GraphiteReporter.forRegistry(metricsRegistry) - .prefixedWith(configuration.getOperationType().name()) - .convertRatesTo(TimeUnit.SECONDS) - .convertDurationsTo(TimeUnit.MILLISECONDS) - .filter(MetricFilter.ALL) - .build(graphite); + if (benchConfig.getReportingDirectory() != null) { + reporter = CsvReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS) + .convertDurationsTo(TimeUnit.MILLISECONDS).build(new java.io.File(benchConfig.getReportingDirectory())); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS) - .convertDurationsTo(TimeUnit.MILLISECONDS).build(); + .convertDurationsTo(TimeUnit.MILLISECONDS).build(); } - if (configuration.getResultUploadDatabase() != null && configuration.getResultUploadContainer() != null) { - String op = configuration.isSync() - ? "SYNC_" + configuration.getOperationType().name() - : configuration.getOperationType().name(); + if (benchConfig.getResultUploadDatabase() != null && benchConfig.getResultUploadContainer() != null) { + String op = workloadConfig.isSync() + ? "SYNC_" + workloadCfg.getOperationType().name() + : workloadCfg.getOperationType().name(); resultReporter = CosmosTotalResultReporter .forRegistry( metricsRegistry, - this.resultUploaderClient.getDatabase(configuration.getResultUploadDatabase()).getContainer(configuration.getResultUploadContainer()), + this.resultUploaderClient.getDatabase(benchConfig.getResultUploadDatabase()).getContainer(benchConfig.getResultUploadContainer()), op, - configuration.getTestVariationName(), - configuration.getBranchName(), - configuration.getCommitId(), - configuration.getConcurrency()) + benchConfig.getTestVariationName(), + benchConfig.getBranchName(), + benchConfig.getCommitId(), + workloadCfg.getConcurrency()) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS).build(); } else { resultReporter = null; } - - MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); - - if (registry != null) { - BridgeInternal.monitorTelemetry(registry); - } - - registry = configuration.getGraphiteMeterRegistry(); - - if (registry != null) { - BridgeInternal.monitorTelemetry(registry); - } } protected void init() { @@ -342,16 +290,12 @@ protected void init() { void shutdown() { - for (String key : CONFIGURED_HIGH_AVAILABILITY_SYSTEM_PROPERTIES) { - System.clearProperty(key); - } - if (this.databaseCreated) { cosmosDatabase.delete(); - logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); + logger.info("Deleted temporary database {} created for this test", workloadConfig.getDatabaseId()); } else if (this.collectionCreated) { cosmosContainer.delete(); - logger.info("Deleted temporary collection {} created for this test", this.configuration.getCollectionId()); + logger.info("Deleted temporary collection {} created for this test", workloadConfig.getContainerId()); } resultUploaderClient.close(); @@ -369,10 +313,10 @@ protected void onError(Throwable throwable) { void run() throws Exception { - successMeter = metricsRegistry.meter(Configuration.SUCCESS_COUNTER_METER_NAME); - failureMeter = metricsRegistry.meter(Configuration.FAILURE_COUNTER_METER_NAME); + successMeter = metricsRegistry.meter(TenantWorkloadConfig.SUCCESS_COUNTER_METER_NAME); + failureMeter = metricsRegistry.meter(TenantWorkloadConfig.FAILURE_COUNTER_METER_NAME); - switch (configuration.getOperationType()) { + switch (workloadConfig.getOperationType()) { case ReadLatency: case WriteLatency: // TODO: support for other operationTypes will be added later @@ -386,22 +330,22 @@ void run() throws Exception { // case QueryAggregateTopOrderby: // case QueryTopOrderby: case Mixed: - latency = metricsRegistry.register(Configuration.LATENCY_METER_NAME, new Timer(new HdrHistogramResetOnSnapshotReservoir())); + latency = metricsRegistry.register(TenantWorkloadConfig.LATENCY_METER_NAME, new Timer(new HdrHistogramResetOnSnapshotReservoir())); break; default: break; } - reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); + reporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); if (resultReporter != null) { - resultReporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); + resultReporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); } long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; - for ( i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { + for ( i = 0; BenchmarkHelper.shouldContinue(startTime, i, workloadConfig); i++) { ResultHandler resultHandler = new ResultHandler() { @Override @@ -437,7 +381,7 @@ public T apply(T t, Throwable throwable) { concurrencyControlSemaphore.acquire(); final long cnt = i; - switch (configuration.getOperationType()) { + switch (workloadConfig.getOperationType()) { case ReadLatency: case WriteLatency: // TODO: support for other operation types will be added later @@ -482,7 +426,7 @@ public T apply(T t, Throwable throwable) { long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", - configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); + workloadConfig.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncReadBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncReadBenchmark.java index 21ee8c8259d9..abd4c0ee5409 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncReadBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncReadBenchmark.java @@ -10,8 +10,8 @@ class SyncReadBenchmark extends SyncBenchmark { - SyncReadBenchmark(Configuration cfg) throws Exception { - super(cfg); + SyncReadBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws Exception { + super(workloadCfg, benchCfg); } @Override diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncWriteBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncWriteBenchmark.java index 700a00505bd6..55996540c5dc 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncWriteBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncWriteBenchmark.java @@ -14,31 +14,31 @@ class SyncWriteBenchmark extends SyncBenchmark { private final String dataFieldValue; private final String uuid; - SyncWriteBenchmark(Configuration cfg) throws Exception { - super(cfg); + SyncWriteBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws Exception { + super(workloadCfg, benchCfg); uuid = UUID.randomUUID().toString(); dataFieldValue = - RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); + RandomStringUtils.randomAlphabetic(workloadConfig.getDocumentDataFieldSize()); } @Override protected CosmosItemResponse performWorkload(long i) throws Exception { String id = uuid + i; CosmosItemResponse response; - if (configuration.isDisablePassingPartitionKeyAsOptionOnWrite()) { + if (workloadConfig.isDisablePassingPartitionKeyAsOptionOnWrite()) { // require parsing partition key from the doc return cosmosContainer.createItem(BenchmarkHelper.generateDocument(id, dataFieldValue, partitionKey, - configuration.getDocumentDataFieldCount())); + workloadConfig.getDocumentDataFieldCount())); } // more optimized for write as partition key is already passed as config return cosmosContainer.createItem(BenchmarkHelper.generateDocument(id, dataFieldValue, partitionKey, - configuration.getDocumentDataFieldCount()), + workloadConfig.getDocumentDataFieldCount()), new PartitionKey(id), null); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java index 9df4ce80d658..b0c083069218 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java @@ -137,6 +137,9 @@ public class TenantWorkloadConfig { @JsonProperty("maxRunningTimeDuration") private String maxRunningTimeDuration; + @JsonProperty("diagnosticsThresholdDuration") + private String diagnosticsThresholdDuration; + @JsonProperty("sparsityWaitTime") private String sparsityWaitTime; @@ -146,6 +149,36 @@ public class TenantWorkloadConfig { @JsonProperty("isUseUnWarmedUpContainer") private Boolean isUseUnWarmedUpContainer; + @JsonProperty("numberOfCollectionForCtl") + private Integer numberOfCollectionForCtl; + + @JsonProperty("readWriteQueryReadManyPct") + private String readWriteQueryReadManyPct; + + @JsonProperty("encryptedStringFieldCount") + private Integer encryptedStringFieldCount; + + @JsonProperty("encryptedLongFieldCount") + private Integer encryptedLongFieldCount; + + @JsonProperty("encryptedDoubleFieldCount") + private Integer encryptedDoubleFieldCount; + + @JsonProperty("encryptionEnabled") + private Boolean encryptionEnabled; + + @JsonProperty("bulkloadBatchSize") + private Integer bulkloadBatchSize; + + @JsonProperty("testScenario") + private String testScenario; + + @JsonProperty("environment") + private String environment; + + @JsonProperty("useSync") + private Boolean useSync; + @JsonProperty("proactiveConnectionRegionsCount") private Integer proactiveConnectionRegionsCount; @@ -241,6 +274,11 @@ public boolean isDefaultLog4jLoggerEnabled() { return isDefaultLog4jLoggerEnabled != null && isDefaultLog4jLoggerEnabled; } + public Duration getDiagnosticsThresholdDuration() { + if (diagnosticsThresholdDuration == null) return Duration.ofSeconds(60); + return Duration.parse(diagnosticsThresholdDuration); + } + public Duration getMaxRunningTimeDuration() { if (maxRunningTimeDuration == null) return null; return Duration.parse(maxRunningTimeDuration); @@ -268,6 +306,20 @@ public Duration getAggressiveWarmupDuration() { return Duration.parse(aggressiveWarmupDuration); } + public int getNumberOfCollectionForCtl() { return numberOfCollectionForCtl != null ? numberOfCollectionForCtl : 4; } + public String getReadWriteQueryReadManyPct() { return readWriteQueryReadManyPct != null ? readWriteQueryReadManyPct : "90,8,1,1"; } + public int getEncryptedStringFieldCount() { return encryptedStringFieldCount != null ? encryptedStringFieldCount : 1; } + public int getEncryptedLongFieldCount() { return encryptedLongFieldCount != null ? encryptedLongFieldCount : 0; } + public int getEncryptedDoubleFieldCount() { return encryptedDoubleFieldCount != null ? encryptedDoubleFieldCount : 0; } + public boolean isEncryptionEnabled() { return encryptionEnabled != null && encryptionEnabled; } + public int getBulkloadBatchSize() { return bulkloadBatchSize != null ? bulkloadBatchSize : 200000; } + public String getTestScenario() { return testScenario != null ? testScenario : "GET"; } + public Configuration.Environment getEnvironment() { + if (environment == null) return Configuration.Environment.Daily; + return Configuration.Environment.valueOf(environment); + } + public boolean isSync() { return useSync != null && useSync; } + public ConnectionMode getConnectionMode() { if (connectionMode == null) return ConnectionMode.DIRECT; return ConnectionMode.valueOf(connectionMode.toUpperCase()); @@ -412,6 +464,8 @@ private void applyField(String key, String value, boolean overwrite) { if (overwrite || isDefaultLog4jLoggerEnabled == null) isDefaultLog4jLoggerEnabled = Boolean.parseBoolean(value); break; case "maxRunningTimeDuration": if (overwrite || maxRunningTimeDuration == null) maxRunningTimeDuration = value; break; + case "diagnosticsThresholdDuration": + if (overwrite || diagnosticsThresholdDuration == null) diagnosticsThresholdDuration = value; break; case "sparsityWaitTime": if (overwrite || sparsityWaitTime == null) sparsityWaitTime = value; break; case "isProactiveConnectionManagementEnabled": @@ -434,6 +488,26 @@ private void applyField(String key, String value, boolean overwrite) { if (overwrite || preferredRegionsList == null) preferredRegionsList = value; break; case "manageDatabase": if (overwrite || manageDatabase == null) manageDatabase = Boolean.parseBoolean(value); break; + case "numberOfCollectionForCtl": + if (overwrite || numberOfCollectionForCtl == null) numberOfCollectionForCtl = Integer.parseInt(value); break; + case "readWriteQueryReadManyPct": + if (overwrite || readWriteQueryReadManyPct == null) readWriteQueryReadManyPct = value; break; + case "encryptedStringFieldCount": + if (overwrite || encryptedStringFieldCount == null) encryptedStringFieldCount = Integer.parseInt(value); break; + case "encryptedLongFieldCount": + if (overwrite || encryptedLongFieldCount == null) encryptedLongFieldCount = Integer.parseInt(value); break; + case "encryptedDoubleFieldCount": + if (overwrite || encryptedDoubleFieldCount == null) encryptedDoubleFieldCount = Integer.parseInt(value); break; + case "encryptionEnabled": + if (overwrite || encryptionEnabled == null) encryptionEnabled = Boolean.parseBoolean(value); break; + case "bulkloadBatchSize": + if (overwrite || bulkloadBatchSize == null) bulkloadBatchSize = Integer.parseInt(value); break; + case "testScenario": + if (overwrite || testScenario == null) testScenario = value; break; + case "environment": + if (overwrite || environment == null) environment = value; break; + case "useSync": + if (overwrite || useSync == null) useSync = Boolean.parseBoolean(value); break; // JVM-global properties (minConnectionPoolSizePerEndpoint, isPartitionLevelCircuitBreakerEnabled, // isPerPartitionAutomaticFailoverRequired) are handled in BenchmarkConfig, not per-tenant. case "minConnectionPoolSizePerEndpoint": @@ -449,81 +523,10 @@ private void applyField(String key, String value, boolean overwrite) { } } - // ======== Factory from Configuration (for tests and legacy paths) ======== - - /** - * Build a TenantWorkloadConfig from a legacy Configuration object. - * Used by tests and CLI paths that still parse via JCommander. - */ - public static TenantWorkloadConfig fromConfiguration(Configuration cfg) { - TenantWorkloadConfig t = new TenantWorkloadConfig(); - t.id = "cli-tenant"; - t.serviceEndpoint = cfg.getServiceEndpoint(); - t.masterKey = cfg.getMasterKey(); - t.databaseId = cfg.getDatabaseId(); - t.containerId = cfg.getCollectionId(); - t.operation = cfg.getOperationType().name(); - t.concurrency = cfg.getConcurrency(); - t.numberOfOperations = cfg.getNumberOfOperations(); - t.numberOfPreCreatedDocuments = cfg.getNumberOfPreCreatedDocuments(); - t.throughput = cfg.getThroughput(); - t.skipWarmUpOperations = cfg.getSkipWarmUpOperations(); - t.documentDataFieldSize = cfg.getDocumentDataFieldSize(); - t.documentDataFieldCount = cfg.getDocumentDataFieldCount(); - t.contentResponseOnWriteEnabled = cfg.isContentResponseOnWriteEnabled(); - t.disablePassingPartitionKeyAsOptionOnWrite = cfg.isDisablePassingPartitionKeyAsOptionOnWrite(); - t.useNameLink = cfg.isUseNameLink(); - t.connectionMode = cfg.getConnectionMode().name(); - t.consistencyLevel = cfg.getConsistencyLevel().name(); - t.maxConnectionPoolSize = cfg.getMaxConnectionPoolSize(); - t.connectionSharingAcrossClientsEnabled = cfg.isConnectionSharingAcrossClientsEnabled(); - t.manageDatabase = cfg.shouldManageDatabase(); - t.applicationName = cfg.getApplicationName(); - t.isManagedIdentityRequired = cfg.isManagedIdentityRequired(); - - // AAD auth - t.aadLoginEndpoint = cfg.getInstanceAadLoginEndpoint(); - t.aadTenantId = cfg.getInstanceAadTenantId(); - t.aadManagedIdentityClientId = cfg.getInstanceAadManagedIdentityClientId(); - - // Workload details - t.tupleSize = cfg.getTupleSize(); - if (cfg.getMaxRunningTimeDuration() != null) { - t.maxRunningTimeDuration = cfg.getMaxRunningTimeDuration().toString(); - } - if (cfg.getSparsityWaitTime() != null) { - t.sparsityWaitTime = cfg.getSparsityWaitTime().toString(); - } - - // Diagnostics thresholds - t.pointOperationLatencyThresholdMs = cfg.getPointOperationThreshold().toMillis() < Duration.ofDays(100).toMillis() - ? (int) cfg.getPointOperationThreshold().toMillis() : null; - t.nonPointOperationLatencyThresholdMs = cfg.getNonPointOperationThreshold().toMillis() < Duration.ofDays(100).toMillis() - ? (int) cfg.getNonPointOperationThreshold().toMillis() : null; - - // Feature flags - t.isRegionScopedSessionContainerEnabled = cfg.isRegionScopedSessionContainerEnabled(); - t.isDefaultLog4jLoggerEnabled = cfg.isDefaultLog4jLoggerEnabled(); - - // Proactive connection management - t.isProactiveConnectionManagementEnabled = cfg.isProactiveConnectionManagementEnabled(); - t.isUseUnWarmedUpContainer = cfg.isUseUnWarmedUpContainer(); - t.proactiveConnectionRegionsCount = cfg.getProactiveConnectionRegionsCount(); - if (cfg.getAggressiveWarmupDuration() != null) { - t.aggressiveWarmupDuration = cfg.getAggressiveWarmupDuration().toString(); - } - - // Connection - t.preferredRegionsList = cfg.getPreferredRegionsList() != null - ? String.join(",", cfg.getPreferredRegionsList()) : null; - - return t; - } - // ======== Static parsing ======== - public static List parseTenantsFile(File tenantsFile) throws IOException { - JsonNode root = OBJECT_MAPPER.readTree(tenantsFile); + public static List parseWorkloadConfig(File workloadConfigFile) throws IOException { + JsonNode root = OBJECT_MAPPER.readTree(workloadConfigFile); Map globalDefaults = new HashMap<>(); JsonNode defaultsNode = root.get("globalDefaults"); @@ -536,7 +539,7 @@ public static List parseTenantsFile(File tenantsFile) thro } if (!globalDefaults.isEmpty()) { - logger.info("tenants.json globalDefaults applied to all tenants (per-tenant values take priority): {}", + logger.info("globalDefaults applied to all tenants (per-tenant values take priority): {}", globalDefaults.keySet()); } @@ -552,7 +555,7 @@ public static List parseTenantsFile(File tenantsFile) thro } } - logger.info("Parsed {} tenants from {}", tenants.size(), tenantsFile.getName()); + logger.info("Parsed {} tenants from {}", tenants.size(), workloadConfigFile.getName()); return tenants; } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ctl/AsyncCtlWorkload.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ctl/AsyncCtlWorkload.java index 70ab569cc1e1..cec1ed3239b7 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ctl/AsyncCtlWorkload.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ctl/AsyncCtlWorkload.java @@ -4,7 +4,6 @@ package com.azure.cosmos.benchmark.ctl; import com.azure.core.credential.TokenCredential; -import com.azure.cosmos.BridgeInternal; import com.azure.cosmos.ConnectionMode; import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.CosmosAsyncContainer; @@ -13,11 +12,12 @@ import com.azure.cosmos.CosmosException; import com.azure.cosmos.DirectConnectionConfig; import com.azure.cosmos.GatewayConnectionConfig; +import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.BenchmarkHelper; import com.azure.cosmos.benchmark.BenchmarkRequestSubscriber; -import com.azure.cosmos.benchmark.Configuration; import com.azure.cosmos.benchmark.PojoizedJson; import com.azure.cosmos.benchmark.ScheduledReporterFactory; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.implementation.HttpConstants; import com.azure.cosmos.implementation.OperationType; import com.azure.cosmos.implementation.RequestOptions; @@ -32,7 +32,6 @@ import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet; import com.codahale.metrics.jvm.GarbageCollectorMetricSet; import com.codahale.metrics.jvm.MemoryUsageGaugeSet; -import io.micrometer.core.instrument.MeterRegistry; import org.apache.commons.lang3.RandomStringUtils; import org.mpierce.metrics.reservoir.hdrhistogram.HdrHistogramResetOnSnapshotReservoir; import org.slf4j.Logger; @@ -61,7 +60,8 @@ public class AsyncCtlWorkload { private final MetricRegistry metricsRegistry = new MetricRegistry(); private final Logger logger; private final CosmosAsyncClient cosmosClient; - private final Configuration configuration; + private final TenantWorkloadConfig workloadConfig; + private final BenchmarkConfig benchConfig; private final Map> docsToRead = new HashMap<>(); private final Map> itemIdentityMap = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; @@ -91,65 +91,55 @@ public class AsyncCtlWorkload { private int queryPct; private int readManyPct; - public AsyncCtlWorkload(Configuration cfg) { - final TokenCredential credential = cfg.isManagedIdentityRequired() - ? cfg.buildTokenCredential() + public AsyncCtlWorkload(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) { + final TokenCredential credential = workloadCfg.isManagedIdentityRequired() + ? workloadCfg.buildTokenCredential() : null; - CosmosClientBuilder cosmosClientBuilder = cfg.isManagedIdentityRequired() ? + CosmosClientBuilder cosmosClientBuilder = workloadCfg.isManagedIdentityRequired() ? new CosmosClientBuilder().credential(credential) : - new CosmosClientBuilder().key(cfg.getMasterKey()); + new CosmosClientBuilder().key(workloadCfg.getMasterKey()); cosmosClientBuilder - .preferredRegions(cfg.getPreferredRegionsList()) - .endpoint(cfg.getServiceEndpoint()) - .consistencyLevel(cfg.getConsistencyLevel()) - .contentResponseOnWriteEnabled(cfg.isContentResponseOnWriteEnabled()); + .preferredRegions(workloadCfg.getPreferredRegionsList()) + .endpoint(workloadCfg.getServiceEndpoint()) + .consistencyLevel(workloadCfg.getConsistencyLevel()) + .contentResponseOnWriteEnabled(workloadCfg.isContentResponseOnWriteEnabled()); - if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { + if (workloadCfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); - gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); + gatewayConnectionConfig.setMaxConnectionPoolSize(workloadCfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); - configuration = cfg; + workloadConfig = workloadCfg; + benchConfig = benchCfg; logger = LoggerFactory.getLogger(this.getClass()); - parsedReadWriteQueryReadManyPct(configuration.getReadWriteQueryReadManyPct()); + parsedReadWriteQueryReadManyPct(workloadConfig.getReadWriteQueryReadManyPct()); - createDatabaseAndContainers(configuration); + createDatabaseAndContainers(workloadCfg); partitionKey = containers.get(0).read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; - concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); + concurrencyControlSemaphore = new Semaphore(workloadCfg.getConcurrency()); - logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); - dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); - createPrePopulatedDocs(configuration.getNumberOfPreCreatedDocuments()); + logger.info("PRE-populating {} documents ....", workloadCfg.getNumberOfPreCreatedDocuments()); + dataFieldValue = RandomStringUtils.randomAlphabetic(workloadConfig.getDocumentDataFieldSize()); + createPrePopulatedDocs(workloadConfig.getNumberOfPreCreatedDocuments()); createItemIdentityMap(docsToRead); - if (configuration.isEnableJvmStats()) { + if (benchConfig.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } - reporter = ScheduledReporterFactory.create(cfg, metricsRegistry); + reporter = ScheduledReporterFactory.create(benchCfg, metricsRegistry); - MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); - - if (registry != null) { - BridgeInternal.monitorTelemetry(registry); - } - - registry = configuration.getGraphiteMeterRegistry(); - - if (registry != null) { - BridgeInternal.monitorTelemetry(registry); - } prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } @@ -157,7 +147,7 @@ public AsyncCtlWorkload(Configuration cfg) { public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); - logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); + logger.info("Deleted temporary database {} created for this test", this.workloadConfig.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { for (String id : containerToClearAfterTest) { cosmosAsyncDatabase.getContainer(id).delete().block(); @@ -174,7 +164,7 @@ private void performWorkload(BaseSubscriber documentSubscriber, Operatio PojoizedJson data = BenchmarkHelper.generateDocument(prefixUuidForCreate + i, dataFieldValue, partitionKey, - configuration.getDocumentDataFieldCount()); + workloadConfig.getDocumentDataFieldCount()); obs = container.createItem(data).flux(); } else if (type.equals(OperationType.Query) && !isReadMany) { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); @@ -214,21 +204,21 @@ public void run() throws Exception { queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); readManyLatency = metricsRegistry.register("Read Many Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); - reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); + reporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; int writeRange = readPct + writePct; int queryRange = readPct + writePct + queryPct; - for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { + for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, workloadConfig); i++) { int index = (int) i % 100; if (index < readPct) { BenchmarkRequestSubscriber readSubscriber = new BenchmarkRequestSubscriber<>(readSuccessMeter, readFailureMeter, concurrencyControlSemaphore, count, - configuration.getDiagnosticsThresholdDuration()); + workloadConfig.getDiagnosticsThresholdDuration()); readSubscriber.context = readLatency.time(); performWorkload(readSubscriber, OperationType.Read, i, false); } else if (index < writeRange) { @@ -236,7 +226,7 @@ public void run() throws Exception { writeFailureMeter, concurrencyControlSemaphore, count, - configuration.getDiagnosticsThresholdDuration()); + workloadConfig.getDiagnosticsThresholdDuration()); writeSubscriber.context = writeLatency.time(); performWorkload(writeSubscriber, OperationType.Create, i, false); @@ -245,7 +235,7 @@ public void run() throws Exception { queryFailureMeter, concurrencyControlSemaphore, count, - configuration.getDiagnosticsThresholdDuration()); + workloadConfig.getDiagnosticsThresholdDuration()); querySubscriber.context = queryLatency.time(); performWorkload(querySubscriber, OperationType.Query, i, false); } else { @@ -253,7 +243,7 @@ public void run() throws Exception { readManyFailureMeter, concurrencyControlSemaphore, count, - configuration.getDiagnosticsThresholdDuration()); + workloadConfig.getDiagnosticsThresholdDuration()); readManySubscriber.context = readManyLatency.time(); performWorkload(readManySubscriber, OperationType.Query, i, true); } @@ -267,7 +257,7 @@ public void run() throws Exception { long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", - configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); + workloadConfig.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); @@ -303,7 +293,7 @@ private void createPrePopulatedDocs(int numberOfPreCreatedDocuments) { PojoizedJson newDoc = BenchmarkHelper.generateDocument(uId, dataFieldValue, partitionKey, - configuration.getDocumentDataFieldCount()); + workloadConfig.getDocumentDataFieldCount()); Flux obs = container.createItem(newDoc).map(resp -> { PojoizedJson x = @@ -337,22 +327,22 @@ private void createItemIdentityMap(Map> docsToRead) { .add(new CosmosItemIdentity(new PartitionKey(pojoizedJson.getId()), pojoizedJson.getId())))); } - private void createDatabaseAndContainers(Configuration cfg) { + private void createDatabaseAndContainers(TenantWorkloadConfig cfg) { try { - cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); + cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); cosmosAsyncDatabase.read().block(); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { - cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(this.configuration.getThroughput())).block(); + cosmosClient.createDatabase(cfg.getDatabaseId(), ThroughputProperties.createManualThroughput(cfg.getThroughput())).block(); cosmosAsyncDatabase = cosmosClient.getDatabase(cfg.getDatabaseId()); - logger.info("Database {} is created for this test", this.configuration.getDatabaseId()); + logger.info("Database {} is created for this test", cfg.getDatabaseId()); databaseCreated = true; } else { throw e; } } - int numberOfCollection = cfg.getNumberOfCollectionForCtl(); + int numberOfCollection = workloadConfig.getNumberOfCollectionForCtl(); if (numberOfCollection < 1) { numberOfCollection = 1; } @@ -360,7 +350,7 @@ private void createDatabaseAndContainers(Configuration cfg) { for (int i = 1; i <= numberOfCollection; i++) { try { CosmosAsyncContainer cosmosAsyncContainer = - cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); + cosmosAsyncDatabase.getContainer(cfg.getContainerId() + "_" + i); cosmosAsyncContainer.read().block(); containers.add(cosmosAsyncContainer); @@ -368,14 +358,14 @@ private void createDatabaseAndContainers(Configuration cfg) { } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { cosmosAsyncDatabase.createContainer( - this.configuration.getCollectionId() + "_" + i, - Configuration.DEFAULT_PARTITION_KEY_PATH + cfg.getContainerId() + "_" + i, + TenantWorkloadConfig.DEFAULT_PARTITION_KEY_PATH ).block(); CosmosAsyncContainer cosmosAsyncContainer = - cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId() + "_" + i); + cosmosAsyncDatabase.getContainer(cfg.getContainerId() + "_" + i); logger.info("Collection {} is created for this test", - this.configuration.getCollectionId() + "_" + i); + cfg.getContainerId() + "_" + i); containers.add(cosmosAsyncContainer); containerToClearAfterTest.add(cosmosAsyncContainer.getId()); } else { diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionBenchmark.java index 9788c0a5ec82..21fe0a03854b 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionBenchmark.java @@ -4,7 +4,6 @@ package com.azure.cosmos.benchmark.encryption; import com.azure.core.credential.TokenCredential; -import com.azure.cosmos.BridgeInternal; import com.azure.cosmos.ConnectionMode; import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.CosmosAsyncContainer; @@ -13,9 +12,10 @@ import com.azure.cosmos.CosmosException; import com.azure.cosmos.DirectConnectionConfig; import com.azure.cosmos.GatewayConnectionConfig; +import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.BenchmarkHelper; -import com.azure.cosmos.benchmark.Configuration; import com.azure.cosmos.benchmark.Operation; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.PojoizedJson; import com.azure.cosmos.encryption.CosmosEncryptionAsyncClient; import com.azure.cosmos.encryption.CosmosEncryptionAsyncContainer; @@ -40,17 +40,15 @@ import com.azure.security.keyvault.keys.cryptography.models.EncryptionAlgorithm; import com.codahale.metrics.ConsoleReporter; import com.codahale.metrics.CsvReporter; +import com.codahale.metrics.ConsoleReporter; +import com.codahale.metrics.CsvReporter; import com.codahale.metrics.Meter; -import com.codahale.metrics.MetricFilter; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.ScheduledReporter; import com.codahale.metrics.Timer; -import com.codahale.metrics.graphite.Graphite; -import com.codahale.metrics.graphite.GraphiteReporter; import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet; import com.codahale.metrics.jvm.GarbageCollectorMetricSet; import com.codahale.metrics.jvm.MemoryUsageGaugeSet; -import io.micrometer.core.instrument.MeterRegistry; import org.apache.commons.lang3.RandomStringUtils; import org.mpierce.metrics.reservoir.hdrhistogram.HdrHistogramResetOnSnapshotReservoir; import org.reactivestreams.Subscription; @@ -63,7 +61,6 @@ import java.io.IOException; import java.io.InputStream; -import java.net.InetSocketAddress; import java.time.Duration; import java.util.ArrayList; import java.util.List; @@ -92,7 +89,8 @@ public abstract class AsyncEncryptionBenchmark { final CosmosAsyncClient cosmosClient; final String partitionKey; - final Configuration configuration; + final BenchmarkConfig benchConfig; + final TenantWorkloadConfig workloadConfig; final List docsToRead; final Semaphore concurrencyControlSemaphore; Timer latency; @@ -112,57 +110,59 @@ public abstract class AsyncEncryptionBenchmark { private AtomicBoolean warmupMode = new AtomicBoolean(false); - AsyncEncryptionBenchmark(Configuration cfg) throws IOException { + AsyncEncryptionBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { + + workloadConfig = workloadCfg; - final TokenCredential credential = cfg.isManagedIdentityRequired() - ? cfg.buildTokenCredential() + final TokenCredential credential = workloadCfg.isManagedIdentityRequired() + ? workloadCfg.buildTokenCredential() : null; - CosmosClientBuilder cosmosClientBuilder = cfg.isManagedIdentityRequired() ? + CosmosClientBuilder cosmosClientBuilder = workloadCfg.isManagedIdentityRequired() ? new CosmosClientBuilder().credential(credential) : - new CosmosClientBuilder().key(cfg.getMasterKey()); + new CosmosClientBuilder().key(workloadCfg.getMasterKey()); cosmosClientBuilder - .preferredRegions(cfg.getPreferredRegionsList()) - .endpoint(cfg.getServiceEndpoint()) - .consistencyLevel(cfg.getConsistencyLevel()) - .contentResponseOnWriteEnabled(cfg.isContentResponseOnWriteEnabled()); + .preferredRegions(workloadCfg.getPreferredRegionsList()) + .endpoint(workloadCfg.getServiceEndpoint()) + .consistencyLevel(workloadCfg.getConsistencyLevel()) + .contentResponseOnWriteEnabled(workloadCfg.isContentResponseOnWriteEnabled()); - if (cfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { + if (workloadCfg.getConnectionMode().equals(ConnectionMode.DIRECT)) { cosmosClientBuilder = cosmosClientBuilder.directMode(DirectConnectionConfig.getDefaultConfig()); } else { GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); - gatewayConnectionConfig.setMaxConnectionPoolSize(cfg.getMaxConnectionPoolSize()); + gatewayConnectionConfig.setMaxConnectionPoolSize(workloadCfg.getMaxConnectionPoolSize()); cosmosClientBuilder = cosmosClientBuilder.gatewayMode(gatewayConnectionConfig); } cosmosClient = cosmosClientBuilder.buildAsyncClient(); cosmosEncryptionAsyncClient = createEncryptionClientInstance(cosmosClient); - configuration = cfg; + benchConfig = benchCfg; logger = LoggerFactory.getLogger(this.getClass()); createEncryptionDatabaseAndContainer(); partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition() .getPaths().iterator().next().split("/")[1]; - concurrencyControlSemaphore = new Semaphore(cfg.getConcurrency()); + concurrencyControlSemaphore = new Semaphore(workloadCfg.getConcurrency()); ArrayList> createDocumentObservables = new ArrayList<>(); - if (configuration.getOperationType() != Operation.WriteLatency - && configuration.getOperationType() != Operation.WriteThroughput - && configuration.getOperationType() != Operation.ReadMyWrites) { - logger.info("PRE-populating {} documents ....", cfg.getNumberOfPreCreatedDocuments()); - String dataFieldValue = RandomStringUtils.randomAlphabetic(cfg.getDocumentDataFieldSize()); - for (int i = 0; i < cfg.getNumberOfPreCreatedDocuments(); i++) { + if (workloadConfig.getOperationType() != Operation.WriteLatency + && workloadConfig.getOperationType() != Operation.WriteThroughput + && workloadConfig.getOperationType() != Operation.ReadMyWrites) { + logger.info("PRE-populating {} documents ....", workloadCfg.getNumberOfPreCreatedDocuments()); + String dataFieldValue = RandomStringUtils.randomAlphabetic(workloadCfg.getDocumentDataFieldSize()); + for (int i = 0; i < workloadCfg.getNumberOfPreCreatedDocuments(); i++) { String uuid = UUID.randomUUID().toString(); PojoizedJson newDoc = BenchmarkHelper.generateDocument(uuid, dataFieldValue, partitionKey, - configuration.getDocumentDataFieldCount()); - for (int j = 1; j <= cfg.getEncryptedStringFieldCount(); j++) { + workloadConfig.getDocumentDataFieldCount()); + for (int j = 1; j <= workloadCfg.getEncryptedStringFieldCount(); j++) { newDoc.setProperty(ENCRYPTED_STRING_FIELD + j, uuid); } - for (int j = 1; j <= cfg.getEncryptedLongFieldCount(); j++) { + for (int j = 1; j <= workloadCfg.getEncryptedLongFieldCount(); j++) { newDoc.setProperty(ENCRYPTED_LONG_FIELD + j, 1234l); } - for (int j = 1; j <= cfg.getEncryptedDoubleFieldCount(); j++) { + for (int j = 1; j <= workloadCfg.getEncryptedDoubleFieldCount(); j++) { newDoc.setProperty(ENCRYPTED_DOUBLE_FIELD + j, 1234.01d); } @@ -208,49 +208,27 @@ uuid, new PartitionKey(partitionKey), PojoizedJson.class) } docsToRead = Flux.merge(Flux.fromIterable(createDocumentObservables), 100).collectList().block(); - logger.info("Finished pre-populating {} documents", cfg.getNumberOfPreCreatedDocuments()); + logger.info("Finished pre-populating {} documents", workloadCfg.getNumberOfPreCreatedDocuments()); init(); - if (configuration.isEnableJvmStats()) { + if (benchConfig.isEnableJvmStats()) { metricsRegistry.register("gc", new GarbageCollectorMetricSet()); metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); metricsRegistry.register("memory", new MemoryUsageGaugeSet()); } - if (configuration.getGraphiteEndpoint() != null) { - final Graphite graphite = new Graphite(new InetSocketAddress( - configuration.getGraphiteEndpoint(), - configuration.getGraphiteEndpointPort())); - reporter = GraphiteReporter.forRegistry(metricsRegistry) - .prefixedWith(configuration.getOperationType().name()) - .convertDurationsTo(TimeUnit.MILLISECONDS) - .convertRatesTo(TimeUnit.SECONDS) - .filter(MetricFilter.ALL) - .build(graphite); - } else if (configuration.getReportingDirectory() != null) { + if (benchConfig.getReportingDirectory() != null) { reporter = CsvReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) - .build(configuration.getReportingDirectory()); + .build(new java.io.File(benchConfig.getReportingDirectory())); } else { reporter = ConsoleReporter.forRegistry(metricsRegistry) .convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS) .build(); } - - MeterRegistry registry = configuration.getAzureMonitorMeterRegistry(); - - if (registry != null) { - BridgeInternal.monitorTelemetry(registry); - } - - registry = configuration.getGraphiteMeterRegistry(); - - if (registry != null) { - BridgeInternal.monitorTelemetry(registry); - } } protected void init() { @@ -259,10 +237,10 @@ protected void init() { public void shutdown() { if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); - logger.info("Deleted temporary database {} created for this test", this.configuration.getDatabaseId()); + logger.info("Deleted temporary database {} created for this test", this.workloadConfig.getDatabaseId()); } else if (this.collectionCreated) { cosmosAsyncContainer.delete().block(); - logger.info("Deleted temporary collection {} created for this test", this.configuration.getCollectionId()); + logger.info("Deleted temporary collection {} created for this test", this.workloadConfig.getContainerId()); } cosmosClient.close(); @@ -272,15 +250,15 @@ protected void onSuccess() { } protected void initializeMetersIfSkippedEnoughOperations(AtomicLong count) { - if (configuration.getSkipWarmUpOperations() > 0) { - if (count.get() >= configuration.getSkipWarmUpOperations()) { + if (workloadConfig.getSkipWarmUpOperations() > 0) { + if (count.get() >= workloadConfig.getSkipWarmUpOperations()) { if (warmupMode.get()) { synchronized (this) { if (warmupMode.get()) { logger.info("Warmup phase finished. Starting capturing perf numbers ...."); resetMeters(); initializeMeter(); - reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); + reporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); warmupMode.set(false); } } @@ -297,7 +275,7 @@ protected void onError(Throwable throwable) { private void resetMeters() { metricsRegistry.remove(SUCCESS_COUNTER_METER_NAME); metricsRegistry.remove(FAILURE_COUNTER_METER_NAME); - if (latencyAwareOperations(configuration.getOperationType())) { + if (latencyAwareOperations(workloadConfig.getOperationType())) { metricsRegistry.remove(LATENCY_METER_NAME); } } @@ -305,14 +283,14 @@ private void resetMeters() { private void initializeMeter() { successMeter = metricsRegistry.meter(SUCCESS_COUNTER_METER_NAME); failureMeter = metricsRegistry.meter(FAILURE_COUNTER_METER_NAME); - if (latencyAwareOperations(configuration.getOperationType())) { + if (latencyAwareOperations(workloadConfig.getOperationType())) { latency = metricsRegistry.register(LATENCY_METER_NAME, new Timer(new HdrHistogramResetOnSnapshotReservoir())); } } private boolean latencyAwareOperations(Operation operation) { - switch (configuration.getOperationType()) { + switch (workloadConfig.getOperationType()) { case ReadLatency: case WriteLatency: case QueryInClauseParallel: @@ -331,12 +309,12 @@ private boolean latencyAwareOperations(Operation operation) { public void run() throws Exception { initializeMeter(); - if (configuration.getSkipWarmUpOperations() > 0) { + if (workloadConfig.getSkipWarmUpOperations() > 0) { logger.info("Starting warm up phase. Executing {} operations to warm up ...", - configuration.getSkipWarmUpOperations()); + workloadConfig.getSkipWarmUpOperations()); warmupMode.set(true); } else { - reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); + reporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); } long startTime = System.currentTimeMillis(); @@ -344,7 +322,7 @@ public void run() throws Exception { AtomicLong count = new AtomicLong(0); long i; - for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, configuration); i++) { + for (i = 0; BenchmarkHelper.shouldContinue(startTime, i, workloadConfig); i++) { BaseSubscriber baseSubscriber = new BaseSubscriber() { @Override @@ -403,16 +381,16 @@ protected void hookOnError(Throwable throwable) { long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", - configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); + workloadConfig.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); } protected Mono sparsityMono(long i) { - Duration duration = configuration.getSparsityWaitTime(); + Duration duration = workloadConfig.getSparsityWaitTime(); if (duration != null && !duration.isZero()) { - if (configuration.getSkipWarmUpOperations() > i) { + if (workloadConfig.getSkipWarmUpOperations() > i) { // don't wait on the initial warm up time. return null; } @@ -473,14 +451,14 @@ private String getConfiguration(String key, Properties properties) { private void createEncryptionDatabaseAndContainer() { try { - cosmosAsyncDatabase = cosmosClient.getDatabase(this.configuration.getDatabaseId()); + cosmosAsyncDatabase = cosmosClient.getDatabase(this.workloadConfig.getDatabaseId()); cosmosAsyncDatabase.read().block(); FeedResponse keyFeedResponse = cosmosAsyncDatabase.readAllClientEncryptionKeys().byPage().blockFirst(); if (keyFeedResponse.getResults().size() < 1) { throw new IllegalArgumentException(String.format("database %s does not have any client encryption key" + " %s" + - "key", this.configuration.getDatabaseId(), dataEncryptionKeyId)); + "key", this.workloadConfig.getDatabaseId(), dataEncryptionKeyId)); } else { boolean containsDataEncryptionKeyId = false; for (CosmosClientEncryptionKeyProperties keyProperties : keyFeedResponse.getResults()) { @@ -492,17 +470,17 @@ private void createEncryptionDatabaseAndContainer() { if (!containsDataEncryptionKeyId) { throw new IllegalArgumentException(String.format("database %s does not have any client encryption" + " key %s" + - "key", this.configuration.getDatabaseId(), dataEncryptionKeyId)); + "key", this.workloadConfig.getDatabaseId(), dataEncryptionKeyId)); } } cosmosEncryptionAsyncDatabase = - cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(this.configuration.getDatabaseId()); + cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(this.workloadConfig.getDatabaseId()); } catch (CosmosException e) { if (e.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND) { - cosmosClient.createDatabase(configuration.getDatabaseId()).block(); - cosmosAsyncDatabase = cosmosClient.getDatabase(configuration.getDatabaseId()); + cosmosClient.createDatabase(workloadConfig.getDatabaseId()).block(); + cosmosAsyncDatabase = cosmosClient.getDatabase(workloadConfig.getDatabaseId()); cosmosEncryptionAsyncDatabase = - cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(this.configuration.getDatabaseId()); + cosmosEncryptionAsyncClient.getCosmosEncryptionAsyncDatabase(this.workloadConfig.getDatabaseId()); String masterKeyUrlFromConfig = getConfiguration("KeyVaultMasterKeyUrl", keyVaultProperties); if (StringUtils.isEmpty(masterKeyUrlFromConfig)) { throw new IllegalArgumentException("Please specify a valid MasterKeyUrl in the appSettings.json"); @@ -519,14 +497,14 @@ private void createEncryptionDatabaseAndContainer() { CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName(), metadata).block().getProperties(); logger.info("Database {} is created for this test with client encryption key {}", - this.configuration.getDatabaseId(), dataEncryptionKeyId); + this.workloadConfig.getDatabaseId(), dataEncryptionKeyId); databaseCreated = true; } else { throw e; } } - cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId()); + cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.workloadConfig.getContainerId()); try { cosmosAsyncContainer.delete().block(); } catch (CosmosException ex) { @@ -534,7 +512,7 @@ private void createEncryptionDatabaseAndContainer() { } List encryptionPaths = new ArrayList<>(); - for (int i = 1; i <= configuration.getEncryptedStringFieldCount(); i++) { + for (int i = 1; i <= workloadConfig.getEncryptedStringFieldCount(); i++) { ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath(); includedPath.setClientEncryptionKeyId(dataEncryptionKeyId); includedPath.setPath("/" + ENCRYPTED_STRING_FIELD + i); @@ -542,7 +520,7 @@ private void createEncryptionDatabaseAndContainer() { includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); encryptionPaths.add(includedPath); } - for (int i = 1; i <= configuration.getEncryptedDoubleFieldCount(); i++) { + for (int i = 1; i <= workloadConfig.getEncryptedDoubleFieldCount(); i++) { ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath(); includedPath.setClientEncryptionKeyId(dataEncryptionKeyId); includedPath.setPath("/" + ENCRYPTED_LONG_FIELD + i); @@ -550,7 +528,7 @@ private void createEncryptionDatabaseAndContainer() { includedPath.setEncryptionAlgorithm(CosmosEncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA256.getName()); encryptionPaths.add(includedPath); } - for (int i = 1; i <= configuration.getEncryptedLongFieldCount(); i++) { + for (int i = 1; i <= workloadConfig.getEncryptedLongFieldCount(); i++) { ClientEncryptionIncludedPath includedPath = new ClientEncryptionIncludedPath(); includedPath.setClientEncryptionKeyId(dataEncryptionKeyId); includedPath.setPath("/" + ENCRYPTED_DOUBLE_FIELD + i); @@ -560,18 +538,18 @@ private void createEncryptionDatabaseAndContainer() { } ClientEncryptionPolicy clientEncryptionPolicy = new ClientEncryptionPolicy(encryptionPaths); CosmosContainerProperties containerProperties = - new CosmosContainerProperties(this.configuration.getCollectionId(), - Configuration.DEFAULT_PARTITION_KEY_PATH); + new CosmosContainerProperties(this.workloadConfig.getContainerId(), + TenantWorkloadConfig.DEFAULT_PARTITION_KEY_PATH); containerProperties.setClientEncryptionPolicy(clientEncryptionPolicy); cosmosAsyncDatabase.createContainer(containerProperties, - ThroughputProperties.createManualThroughput(this.configuration.getThroughput()) + ThroughputProperties.createManualThroughput(this.workloadConfig.getThroughput()) ).block(); - cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.configuration.getCollectionId()); + cosmosAsyncContainer = cosmosAsyncDatabase.getContainer(this.workloadConfig.getContainerId()); logger.info("Collection {} is created for this test with encryption paths", - this.configuration.getCollectionId()); + this.workloadConfig.getContainerId()); collectionCreated = true; cosmosEncryptionAsyncContainer = - cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(this.configuration.getCollectionId()); + cosmosEncryptionAsyncDatabase.getCosmosEncryptionAsyncContainer(this.workloadConfig.getContainerId()); } } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQueryBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQueryBenchmark.java index 26efcaad8b4d..3d2c68423aa6 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQueryBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQueryBenchmark.java @@ -3,9 +3,10 @@ package com.azure.cosmos.benchmark.encryption; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.Operation; import com.azure.cosmos.benchmark.PojoizedJson; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.models.CosmosQueryRequestOptions; import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.PartitionKey; @@ -53,8 +54,8 @@ protected void hookOnError(Throwable throwable) { } } - public AsyncEncryptionQueryBenchmark(Configuration cfg) throws IOException { - super(cfg); + public AsyncEncryptionQueryBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { + super(workloadCfg, benchCfg); } @Override @@ -74,34 +75,34 @@ protected void performWorkload(BaseSubscriber> baseSu Random r = new Random(); CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); - if (configuration.getOperationType() == Operation.QueryCross) { + if (workloadConfig.getOperationType() == Operation.QueryCross) { - int index = r.nextInt(this.configuration.getNumberOfPreCreatedDocuments()); + int index = r.nextInt(this.workloadConfig.getNumberOfPreCreatedDocuments()); String sqlQuery = "Select * from c where c.id = \"" + docsToRead.get(index).getId() + "\""; obs = cosmosEncryptionAsyncContainer.queryItems(sqlQuery, options, PojoizedJson.class).byPage(); - } else if (configuration.getOperationType() == Operation.QuerySingle) { + } else if (workloadConfig.getOperationType() == Operation.QuerySingle) { - int index = r.nextInt(this.configuration.getNumberOfPreCreatedDocuments()); + int index = r.nextInt(this.workloadConfig.getNumberOfPreCreatedDocuments()); String pk = (String) docsToRead.get(index).getProperty(partitionKey); options.setPartitionKey(new PartitionKey(pk)); String sqlQuery = "Select * from c where c." + partitionKey + " = \"" + pk + "\""; obs = cosmosEncryptionAsyncContainer.queryItems(sqlQuery, options, PojoizedJson.class).byPage(); - } else if (configuration.getOperationType() == Operation.QueryParallel) { + } else if (workloadConfig.getOperationType() == Operation.QueryParallel) { String sqlQuery = "Select * from c"; obs = cosmosEncryptionAsyncContainer.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); - } else if (configuration.getOperationType() == Operation.QueryOrderby) { + } else if (workloadConfig.getOperationType() == Operation.QueryOrderby) { String sqlQuery = "Select * from c order by c._ts"; obs = cosmosEncryptionAsyncContainer.queryItems(sqlQuery, options, PojoizedJson.class).byPage(10); - } else if (configuration.getOperationType() == Operation.QueryTopOrderby) { + } else if (workloadConfig.getOperationType() == Operation.QueryTopOrderby) { String sqlQuery = "Select top 1000 * from c order by c._ts"; obs = cosmosEncryptionAsyncContainer.queryItems(sqlQuery, options, PojoizedJson.class).byPage(); - } else if (configuration.getOperationType() == Operation.ReadAllItemsOfLogicalPartition) { - throw new IllegalArgumentException("Unsupported Operation on encryption: " + configuration.getOperationType()); + } else if (workloadConfig.getOperationType() == Operation.ReadAllItemsOfLogicalPartition) { + throw new IllegalArgumentException("Unsupported Operation on encryption: " + workloadConfig.getOperationType()); } else { - throw new IllegalArgumentException("Unsupported Operation: " + configuration.getOperationType()); + throw new IllegalArgumentException("Unsupported Operation: " + workloadConfig.getOperationType()); } concurrencyControlSemaphore.acquire(); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQuerySinglePartitionMultiple.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQuerySinglePartitionMultiple.java index e18cebba3dda..386e1d9eb3b0 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQuerySinglePartitionMultiple.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQuerySinglePartitionMultiple.java @@ -3,8 +3,9 @@ package com.azure.cosmos.benchmark.encryption; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.PojoizedJson; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.models.CosmosQueryRequestOptions; import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.PartitionKey; @@ -20,8 +21,8 @@ public class AsyncEncryptionQuerySinglePartitionMultiple extends AsyncEncryption private CosmosQueryRequestOptions options; private int pageCount = 0; - public AsyncEncryptionQuerySinglePartitionMultiple(Configuration cfg) throws IOException { - super(cfg); + public AsyncEncryptionQuerySinglePartitionMultiple(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { + super(workloadCfg, benchCfg); options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("pk")); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionReadBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionReadBenchmark.java index e5af86a99a20..28e5b9be5d6e 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionReadBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionReadBenchmark.java @@ -3,8 +3,9 @@ package com.azure.cosmos.benchmark.encryption; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.PojoizedJson; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.models.CosmosItemRequestOptions; import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.PartitionKey; @@ -49,8 +50,8 @@ protected void hookOnError(Throwable throwable) { } } - public AsyncEncryptionReadBenchmark(Configuration cfg) throws IOException { - super(cfg); + public AsyncEncryptionReadBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { + super(workloadCfg, benchCfg); } @Override @@ -66,7 +67,7 @@ protected void performWorkload(BaseSubscriber baseSubscriber, long concurrencyControlSemaphore.acquire(); - switch (configuration.getOperationType()) { + switch (workloadConfig.getOperationType()) { case ReadThroughput: readThroughput(result, baseSubscriber, i); break; @@ -74,7 +75,7 @@ protected void performWorkload(BaseSubscriber baseSubscriber, long readLatency(result, baseSubscriber, i); break; default: - throw new IllegalArgumentException("invalid workload type " + configuration.getOperationType()); + throw new IllegalArgumentException("invalid workload type " + workloadConfig.getOperationType()); } } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionWriteBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionWriteBenchmark.java index 194096aeb756..6af727228af0 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionWriteBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionWriteBenchmark.java @@ -3,10 +3,11 @@ package com.azure.cosmos.benchmark.encryption; +import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.BenchmarkHelper; -import com.azure.cosmos.benchmark.Configuration; import com.azure.cosmos.benchmark.Operation; import com.azure.cosmos.benchmark.PojoizedJson; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.models.CosmosItemRequestOptions; import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.PartitionKey; @@ -56,10 +57,10 @@ protected void hookOnError(Throwable throwable) { } } - public AsyncEncryptionWriteBenchmark(Configuration cfg) throws IOException { - super(cfg); + public AsyncEncryptionWriteBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { + super(workloadCfg, benchCfg); uuid = UUID.randomUUID().toString(); - dataFieldValue = RandomStringUtils.randomAlphabetic(configuration.getDocumentDataFieldSize()); + dataFieldValue = RandomStringUtils.randomAlphabetic(workloadConfig.getDocumentDataFieldSize()); } @Override @@ -69,17 +70,17 @@ protected void performWorkload(BaseSubscriber baseSubscriber PojoizedJson newDoc = BenchmarkHelper.generateDocument(id, dataFieldValue, partitionKey, - configuration.getDocumentDataFieldCount()); - for (int j = 1; j <= configuration.getEncryptedStringFieldCount(); j++) { + workloadConfig.getDocumentDataFieldCount()); + for (int j = 1; j <= workloadConfig.getEncryptedStringFieldCount(); j++) { newDoc.setProperty(ENCRYPTED_STRING_FIELD + j, uuid); } - for (int j = 1; j <= configuration.getEncryptedLongFieldCount(); j++) { + for (int j = 1; j <= workloadConfig.getEncryptedLongFieldCount(); j++) { newDoc.setProperty(ENCRYPTED_LONG_FIELD + j, 1234l); } - for (int j = 1; j <= configuration.getEncryptedDoubleFieldCount(); j++) { + for (int j = 1; j <= workloadConfig.getEncryptedDoubleFieldCount(); j++) { newDoc.setProperty(ENCRYPTED_DOUBLE_FIELD + j, 1234.01d); } - if (configuration.isDisablePassingPartitionKeyAsOptionOnWrite()) { + if (workloadConfig.isDisablePassingPartitionKeyAsOptionOnWrite()) { // require parsing partition key from the doc obs = cosmosEncryptionAsyncContainer.createItem(newDoc, new PartitionKey(id), new CosmosItemRequestOptions()); @@ -92,7 +93,7 @@ protected void performWorkload(BaseSubscriber baseSubscriber concurrencyControlSemaphore.acquire(); - if (configuration.getOperationType() == Operation.WriteThroughput) { + if (workloadConfig.getOperationType() == Operation.WriteThroughput) { obs.subscribeOn(Schedulers.parallel()).subscribe(baseSubscriber); } else { LatencySubscriber latencySubscriber = new LatencySubscriber<>(baseSubscriber); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/AsyncClientFactory.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/AsyncClientFactory.java index 4b1fbc9b43df..eda0fdb20ade 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/AsyncClientFactory.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/AsyncClientFactory.java @@ -10,7 +10,7 @@ import com.azure.cosmos.DirectConnectionConfig; import com.azure.cosmos.GatewayConnectionConfig; import com.azure.cosmos.ThrottlingRetryOptions; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.google.common.base.Preconditions; import java.time.Duration; @@ -38,10 +38,10 @@ private AsyncClientFactory() { /** * Builds a Cosmos async client using the configuration options defined * - * @param cfg Configuration encapsulating options for configuring the AsyncClient - * @return CosmosAsyncClient initialized using the parameters in the Configuration + * @param cfg TenantWorkloadConfig encapsulating options for configuring the AsyncClient + * @return CosmosAsyncClient initialized using the parameters in the TenantWorkloadConfig */ - public static CosmosAsyncClient buildAsyncClient(final Configuration cfg) { + public static CosmosAsyncClient buildAsyncClient(final TenantWorkloadConfig cfg) { Preconditions.checkNotNull(cfg, "The Workload configuration defining the parameters can not be null"); final CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .endpoint(cfg.getServiceEndpoint()) @@ -67,10 +67,10 @@ public static CosmosAsyncClient buildAsyncClient(final Configuration cfg) { * Builds a Cosmos async client used for bulk loading the data in the collection. The throttling * and the direct connection configs will be set differently for this. * - * @param cfg Configuration encapsulating options for configuring the Bulkload AsyncClient + * @param cfg TenantWorkloadConfig encapsulating options for configuring the Bulkload AsyncClient * @return CosmosAsyncClient for Bulk loading the data into the collection */ - public static CosmosAsyncClient buildBulkLoadAsyncClient(final Configuration cfg) { + public static CosmosAsyncClient buildBulkLoadAsyncClient(final TenantWorkloadConfig cfg) { Preconditions.checkNotNull(cfg, "The Workload configuration defining the parameters can not be null"); final CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CollectionResourceManager.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CollectionResourceManager.java index fca7ac51ecd3..4748c95fd1b3 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CollectionResourceManager.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CollectionResourceManager.java @@ -7,7 +7,7 @@ import com.azure.cosmos.CosmosAsyncContainer; import com.azure.cosmos.CosmosAsyncDatabase; import com.azure.cosmos.CosmosException; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.CollectionAttributes; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; import com.azure.cosmos.benchmark.linkedin.impl.Constants; @@ -34,11 +34,11 @@ public class CollectionResourceManager implements ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(CollectionResourceManager.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); - private final Configuration _configuration; + private final TenantWorkloadConfig _configuration; private final EntityConfiguration _entityConfiguration; private final CosmosAsyncClient _client; - public CollectionResourceManager(final Configuration configuration, + public CollectionResourceManager(final TenantWorkloadConfig configuration, final EntityConfiguration entityConfiguration, final CosmosAsyncClient client) { Preconditions.checkNotNull(configuration, @@ -54,7 +54,7 @@ public CollectionResourceManager(final Configuration configuration, @Override public void createResources() throws CosmosException { - final String containerName = _configuration.getCollectionId(); + final String containerName = _configuration.getContainerId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CollectionAttributes collectionAttributes = _entityConfiguration.collectionAttributes(); try { @@ -76,7 +76,7 @@ public void createResources() throws CosmosException { @Override public void deleteResources() { - LOGGER.info("The Collection {} will not be deleted.", _configuration.getCollectionId()); + LOGGER.info("The Collection {} will not be deleted.", _configuration.getContainerId()); } private Optional getContainerProperties(CosmosAsyncContainer container) { diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CompositeReadTestRunner.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CompositeReadTestRunner.java index 995f6b75e571..095a18935469 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CompositeReadTestRunner.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/CompositeReadTestRunner.java @@ -4,7 +4,7 @@ package com.azure.cosmos.benchmark.linkedin; import com.azure.cosmos.CosmosAsyncClient; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; import com.azure.cosmos.benchmark.linkedin.data.Key; import com.azure.cosmos.benchmark.linkedin.impl.exceptions.AccessorException; @@ -28,11 +28,11 @@ public class CompositeReadTestRunner extends TestRunner { */ private final Random _randomNumberGenerator; - CompositeReadTestRunner(final Configuration configuration, + CompositeReadTestRunner(final TenantWorkloadConfig workloadConfig, final CosmosAsyncClient client, final MetricRegistry metricsRegistry, final EntityConfiguration entityConfiguration) { - super(configuration, client, metricsRegistry, entityConfiguration); + super(workloadConfig, client, metricsRegistry, entityConfiguration); _queryGenerator = new QueryTestRunner.QueryGenerator(); _randomNumberGenerator = new Random(System.currentTimeMillis()); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataLoader.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataLoader.java index c9c789850797..3e49bf683c52 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataLoader.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DataLoader.java @@ -6,7 +6,7 @@ import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.CosmosAsyncContainer; import com.azure.cosmos.CosmosAsyncDatabase; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; import com.azure.cosmos.benchmark.linkedin.data.Key; import com.azure.cosmos.implementation.ImplementationBridgeHelpers; @@ -38,14 +38,14 @@ public class DataLoader { private static final String COUNT_ALL_QUERY = "SELECT COUNT(1) FROM c"; private static final String COUNT_ALL_QUERY_RESULT_FIELD = "$1"; - private final Configuration _configuration; + private final TenantWorkloadConfig _workloadConfig; private final EntityConfiguration _entityConfiguration; private final CosmosAsyncClient _client; - public DataLoader(final Configuration configuration, + public DataLoader(final TenantWorkloadConfig workloadConfig, final EntityConfiguration entityConfiguration, final CosmosAsyncClient client) { - _configuration = Preconditions.checkNotNull(configuration, + _workloadConfig = Preconditions.checkNotNull(workloadConfig, "The Workload configuration defining the parameters can not be null"); _entityConfiguration = Preconditions.checkNotNull(entityConfiguration, "The test entity configuration can not be null"); @@ -54,27 +54,27 @@ public DataLoader(final Configuration configuration, } public void loadData() { - final String containerName = _configuration.getCollectionId(); - final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); + final String containerName = _workloadConfig.getContainerId(); + final CosmosAsyncDatabase database = _client.getDatabase(_workloadConfig.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Check container {} for existing data", containerName); final int documentCount = getDocumentCount(container); - final int documentsToLoad = _configuration.getNumberOfPreCreatedDocuments() - documentCount; + final int documentsToLoad = _workloadConfig.getNumberOfPreCreatedDocuments() - documentCount; if (documentsToLoad <= 0) { LOGGER.info("Container {} already has the requisite number of documents: {} [desired: {}]", - containerName, documentCount, _configuration.getNumberOfPreCreatedDocuments()); + containerName, documentCount, _workloadConfig.getNumberOfPreCreatedDocuments()); return; } LOGGER.info("Starting batched data loading to load {} documents, with {} documents in each iteration", documentsToLoad, - _configuration.getBulkloadBatchSize()); + _workloadConfig.getBulkloadBatchSize()); final DataGenerationIterator dataGenerator = new DataGenerationIterator(_entityConfiguration.dataGenerator(), documentsToLoad, - _configuration.getBulkloadBatchSize()); + _workloadConfig.getBulkloadBatchSize()); while (dataGenerator.hasNext()) { final Map newDocuments = dataGenerator.next(); @@ -82,7 +82,7 @@ public void loadData() { newDocuments.clear(); } - validateDataCreation(_configuration.getNumberOfPreCreatedDocuments()); + validateDataCreation(_workloadConfig.getNumberOfPreCreatedDocuments()); } private void bulkCreateItems(final CosmosAsyncDatabase database, @@ -104,11 +104,11 @@ private void bulkCreateItems(final CosmosAsyncDatabase database, } private void validateDataCreation(int expectedSize) { - final String containerName = _configuration.getCollectionId(); - final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); + final String containerName = _workloadConfig.getContainerId(); + final CosmosAsyncDatabase database = _client.getDatabase(_workloadConfig.getDatabaseId()); final CosmosAsyncContainer container = database.getContainer(containerName); LOGGER.info("Validating {} documents were loaded into [{}:{}]", - expectedSize, _configuration.getDatabaseId(), containerName); + expectedSize, _workloadConfig.getDatabaseId(), containerName); final int resultCount = getDocumentCount(container); if (resultCount < (expectedSize * 0.90)) { @@ -118,7 +118,7 @@ private void validateDataCreation(int expectedSize) { } LOGGER.info("Validated {} out of the {} expected documents were loaded into [{}:{}]", - resultCount, expectedSize, _configuration.getDatabaseId(), containerName); + resultCount, expectedSize, _workloadConfig.getDatabaseId(), containerName); } private int getDocumentCount(CosmosAsyncContainer container) { diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DatabaseResourceManager.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DatabaseResourceManager.java index 23eed5900a47..887a6077918e 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DatabaseResourceManager.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/DatabaseResourceManager.java @@ -6,7 +6,7 @@ import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.CosmosAsyncDatabase; import com.azure.cosmos.CosmosException; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; import com.google.common.base.Preconditions; import java.time.Duration; @@ -24,11 +24,11 @@ public class DatabaseResourceManager implements ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(DatabaseResourceManager.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); - private final Configuration _configuration; + private final TenantWorkloadConfig _configuration; private final CosmosAsyncClient _client; private final CollectionResourceManager _collectionResourceManager; - public DatabaseResourceManager(final Configuration configuration, + public DatabaseResourceManager(final TenantWorkloadConfig configuration, final EntityConfiguration entityConfiguration, final CosmosAsyncClient client) { Preconditions.checkNotNull(configuration, diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/GetTestRunner.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/GetTestRunner.java index f7960be00b31..7dfed6770765 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/GetTestRunner.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/GetTestRunner.java @@ -4,7 +4,7 @@ package com.azure.cosmos.benchmark.linkedin; import com.azure.cosmos.CosmosAsyncClient; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; import com.azure.cosmos.benchmark.linkedin.data.Key; import com.azure.cosmos.benchmark.linkedin.impl.exceptions.AccessorException; @@ -25,11 +25,11 @@ public class GetTestRunner extends TestRunner { private static final Logger LOGGER = LoggerFactory.getLogger(GetTestRunner.class); - GetTestRunner(final Configuration configuration, + GetTestRunner(final TenantWorkloadConfig workloadConfig, final CosmosAsyncClient client, final MetricRegistry metricsRegistry, final EntityConfiguration entityConfiguration) { - super(configuration, client, metricsRegistry, entityConfiguration); + super(workloadConfig, client, metricsRegistry, entityConfiguration); } @Override diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java index 513952cdb7d1..b511aba0e2e7 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java @@ -5,8 +5,9 @@ import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.CosmosException; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.ScheduledReporterFactory; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; import com.azure.cosmos.benchmark.linkedin.data.InvitationsEntityConfiguration; import com.codahale.metrics.MetricRegistry; @@ -32,7 +33,8 @@ public enum Scenario { COMPOSITE_READ } - private final Configuration _configuration; + private final TenantWorkloadConfig _workloadConfig; + private final BenchmarkConfig _benchConfig; private final EntityConfiguration _entityConfiguration; private final CosmosAsyncClient _client; private final CosmosAsyncClient _bulkLoadClient; @@ -42,24 +44,26 @@ public enum Scenario { private final DataLoader _dataLoader; private final TestRunner _testRunner; - public LICtlWorkload(final Configuration configuration) { - Preconditions.checkNotNull(configuration, "The Workload configuration defining the parameters can not be null"); + public LICtlWorkload(final TenantWorkloadConfig workloadCfg, final BenchmarkConfig benchConfig) { + Preconditions.checkNotNull(workloadCfg, "The Workload configuration defining the parameters can not be null"); + Preconditions.checkNotNull(benchConfig, "The benchmark configuration defining the parameters can not be null"); - _configuration = configuration; - _entityConfiguration = new InvitationsEntityConfiguration(configuration); - _client = AsyncClientFactory.buildAsyncClient(configuration); - _bulkLoadClient = AsyncClientFactory.buildBulkLoadAsyncClient(configuration); + _workloadConfig = workloadCfg; + _benchConfig = benchConfig; + _entityConfiguration = new InvitationsEntityConfiguration(workloadCfg); + _client = AsyncClientFactory.buildAsyncClient(workloadCfg); + _bulkLoadClient = AsyncClientFactory.buildBulkLoadAsyncClient(workloadCfg); _metricsRegistry = new MetricRegistry(); - _reporter = ScheduledReporterFactory.create(_configuration, _metricsRegistry); - _resourceManager = _configuration.shouldManageDatabase() - ? new DatabaseResourceManager(_configuration, _entityConfiguration, _client) - : new CollectionResourceManager(_configuration, _entityConfiguration, _client); - _dataLoader = new DataLoader(_configuration, _entityConfiguration, _bulkLoadClient); - _testRunner = createTestRunner(_configuration); + _reporter = ScheduledReporterFactory.create(_benchConfig, _metricsRegistry); + _resourceManager = workloadCfg.shouldManageDatabase() + ? new DatabaseResourceManager(workloadCfg, _entityConfiguration, _client) + : new CollectionResourceManager(workloadCfg, _entityConfiguration, _client); + _dataLoader = new DataLoader(workloadCfg, _entityConfiguration, _bulkLoadClient); + _testRunner = createTestRunner(workloadCfg); } public void setup() throws CosmosException { - if (_configuration.isEnableJvmStats()) { + if (_benchConfig.isEnableJvmStats()) { LOGGER.info("Enabling JVM stats collection"); _metricsRegistry.register("gc", new GarbageCollectorMetricSet()); _metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); @@ -80,7 +84,7 @@ public void setup() throws CosmosException { public void run() { LOGGER.info("Executing the CosmosDB test"); - _reporter.start(_configuration.getPrintingInterval(), TimeUnit.SECONDS); + _reporter.start(_benchConfig.getPrintingInterval(), TimeUnit.SECONDS); _testRunner.run(); @@ -97,16 +101,16 @@ public void shutdown() { _reporter.close(); } - private TestRunner createTestRunner(Configuration configuration) { - final Scenario scenario = Scenario.valueOf(configuration.getTestScenario()); + private TestRunner createTestRunner(TenantWorkloadConfig workloadCfg) { + final Scenario scenario = Scenario.valueOf(workloadCfg.getTestScenario()); switch (scenario) { case QUERY: - return new QueryTestRunner(_configuration, _client, _metricsRegistry, _entityConfiguration); + return new QueryTestRunner(workloadCfg, _client, _metricsRegistry, _entityConfiguration); case COMPOSITE_READ: - return new CompositeReadTestRunner(_configuration, _client, _metricsRegistry, _entityConfiguration); + return new CompositeReadTestRunner(workloadCfg, _client, _metricsRegistry, _entityConfiguration); case GET: default: - return new GetTestRunner(_configuration, _client, _metricsRegistry, _entityConfiguration); + return new GetTestRunner(workloadCfg, _client, _metricsRegistry, _entityConfiguration); } } } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/QueryTestRunner.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/QueryTestRunner.java index e48e497cff50..36c8fd780ccd 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/QueryTestRunner.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/QueryTestRunner.java @@ -4,7 +4,7 @@ package com.azure.cosmos.benchmark.linkedin; import com.azure.cosmos.CosmosAsyncClient; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; import com.azure.cosmos.benchmark.linkedin.data.Key; import com.azure.cosmos.benchmark.linkedin.impl.Constants; @@ -26,11 +26,11 @@ public class QueryTestRunner extends TestRunner { private final QueryGenerator _queryGenerator; - QueryTestRunner(final Configuration configuration, + QueryTestRunner(final TenantWorkloadConfig workloadConfig, final CosmosAsyncClient client, final MetricRegistry metricsRegistry, final EntityConfiguration entityConfiguration) { - super(configuration, client, metricsRegistry, entityConfiguration); + super(workloadConfig, client, metricsRegistry, entityConfiguration); _queryGenerator = new QueryGenerator(); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/TestRunner.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/TestRunner.java index 7d9c921acb1c..3dc51f24259d 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/TestRunner.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/TestRunner.java @@ -7,7 +7,7 @@ import com.azure.cosmos.CosmosAsyncContainer; import com.azure.cosmos.CosmosAsyncDatabase; import com.azure.cosmos.benchmark.BenchmarkHelper; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; import com.azure.cosmos.benchmark.linkedin.data.Key; import com.azure.cosmos.benchmark.linkedin.data.KeyGenerator; @@ -47,7 +47,7 @@ public abstract class TestRunner { private static final Logger LOGGER = LoggerFactory.getLogger(TestRunner.class); private static final Duration TERMINATION_WAIT_DURATION = Duration.ofSeconds(60); - protected final Configuration _configuration; + protected final TenantWorkloadConfig _workloadConfig; protected final EntityConfiguration _entityConfiguration; protected final Accessor _accessor; protected final ExecutorService _executorService; @@ -55,11 +55,11 @@ public abstract class TestRunner { protected final AtomicLong _errorCount; private final Semaphore _semaphore; - TestRunner(final Configuration configuration, + TestRunner(final TenantWorkloadConfig workloadConfig, final CosmosAsyncClient client, final MetricRegistry metricsRegistry, final EntityConfiguration entityConfiguration) { - Preconditions.checkNotNull(configuration, + Preconditions.checkNotNull(workloadConfig, "The Workload configuration defining the parameters can not be null"); Preconditions.checkNotNull(client, "Need a non-null client for setting up the Database and containers for the test"); @@ -68,13 +68,13 @@ public abstract class TestRunner { Preconditions.checkNotNull(entityConfiguration, "The Test entity configuration can not be null"); - _configuration = configuration; + _workloadConfig = workloadConfig; _entityConfiguration = entityConfiguration; - _accessor = createAccessor(configuration, client, metricsRegistry); - _executorService = Executors.newFixedThreadPool(configuration.getConcurrency()); + _accessor = createAccessor(workloadConfig, client, metricsRegistry); + _executorService = Executors.newFixedThreadPool(workloadConfig.getConcurrency()); _successCount = new AtomicLong(0); _errorCount = new AtomicLong(0); - _semaphore = new Semaphore(configuration.getConcurrency()); + _semaphore = new Semaphore(workloadConfig.getConcurrency()); } public void init() { @@ -87,8 +87,8 @@ public void run() { KeyGenerator keyGenerator = getNewKeyGenerator(); final long runStartTime = System.currentTimeMillis(); long i = 0; - for (; BenchmarkHelper.shouldContinue(runStartTime, i, _configuration); i++) { - if (i > _configuration.getNumberOfPreCreatedDocuments()) { + for (; BenchmarkHelper.shouldContinue(runStartTime, i, _workloadConfig); i++) { + if (i > _workloadConfig.getNumberOfPreCreatedDocuments()) { keyGenerator = getNewKeyGenerator(); } final Key documentKey = keyGenerator.key(); @@ -140,28 +140,28 @@ private void runOperation(final Key key) { } } - private Accessor createAccessor(final Configuration configuration, + private Accessor createAccessor(final TenantWorkloadConfig workloadConfig, final CosmosAsyncClient client, final MetricRegistry metricsRegistry) { - final StaticDataLocator dataLocator = createDataLocator(configuration, client); + final StaticDataLocator dataLocator = createDataLocator(workloadConfig, client); final KeyExtractor keyExtractor = new KeyExtractorImpl(); final DocumentTransformer documentTransformer = new IdentityDocumentTransformer<>(); final Clock clock = Clock.systemUTC(); return new CosmosDBDataAccessor<>(dataLocator, keyExtractor, new ResponseHandler<>(documentTransformer, keyExtractor), - new MetricsFactory(metricsRegistry, clock, configuration.getEnvironment()), + new MetricsFactory(metricsRegistry, clock, workloadConfig.getEnvironment()), clock, new OperationsLogger(Duration.ofSeconds(10))); } - private StaticDataLocator createDataLocator(Configuration configuration, CosmosAsyncClient client) { - final CollectionKey collectionKey = new CollectionKey(configuration.getServiceEndpoint(), - configuration.getDatabaseId(), - configuration.getCollectionId()); - final CosmosAsyncDatabase database = client.getDatabase(configuration.getDatabaseId()); - final CosmosAsyncContainer container = database.getContainer(configuration.getCollectionId()); + private StaticDataLocator createDataLocator(TenantWorkloadConfig workloadConfig, CosmosAsyncClient client) { + final CollectionKey collectionKey = new CollectionKey(workloadConfig.getServiceEndpoint(), + workloadConfig.getDatabaseId(), + workloadConfig.getContainerId()); + final CosmosAsyncDatabase database = client.getDatabase(workloadConfig.getDatabaseId()); + final CosmosAsyncContainer container = database.getContainer(workloadConfig.getContainerId()); return new StaticDataLocator(collectionKey, container); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/data/InvitationsEntityConfiguration.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/data/InvitationsEntityConfiguration.java index 70a91a09c1a0..76998e666c6b 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/data/InvitationsEntityConfiguration.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/data/InvitationsEntityConfiguration.java @@ -3,7 +3,7 @@ package com.azure.cosmos.benchmark.linkedin.data; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.entity.InvitationDataGenerator; import com.azure.cosmos.benchmark.linkedin.data.entity.InvitationsCollectionAttributes; import com.azure.cosmos.benchmark.linkedin.data.entity.InvitationsKeyGenerator; @@ -20,7 +20,7 @@ public class InvitationsEntityConfiguration implements EntityConfiguration { private final DataGenerator _dataGenerator; private final CollectionAttributes _collectionAttributes; - public InvitationsEntityConfiguration(final Configuration configuration) { + public InvitationsEntityConfiguration(final TenantWorkloadConfig configuration) { Preconditions.checkNotNull(configuration, "The test configuration can not be null"); _keyGenerator = () -> new InvitationsKeyGenerator(configuration.getNumberOfPreCreatedDocuments()); _dataGenerator = new InvitationDataGenerator(_keyGenerator.get()); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/ReadMyWritesConsistencyTest.java b/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/ReadMyWritesConsistencyTest.java index 690642513a88..15ac834b1543 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/ReadMyWritesConsistencyTest.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/ReadMyWritesConsistencyTest.java @@ -16,7 +16,6 @@ import com.azure.cosmos.models.IndexingPolicy; import com.azure.cosmos.models.PartitionKeyDefinition; import com.codahale.metrics.MetricRegistry; -import com.beust.jcommander.JCommander; import com.google.common.base.Strings; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; @@ -100,37 +99,22 @@ public void readMyWrites(boolean useNameLink) throws Exception { int concurrency = 5; - String cmdFormat = "-serviceEndpoint %s -masterKey %s" + - " -databaseId %s" + - " -collectionId %s" + - " -consistencyLevel %s" + - " -concurrency %s" + - " -numberOfOperations %s" + - " -maxRunningTimeDuration %s" + - " -operation ReadMyWrites" + - " -connectionMode Direct" + - " -numberOfPreCreatedDocuments 100" + - " -printingInterval 60" + - "%s"; - - String cmd = String.format(cmdFormat, - TestConfigurations.HOST, - TestConfigurations.MASTER_KEY, - database.getId(), - collection.getId(), - desiredConsistency, - concurrency, - numberOfOperationsAsString, - maxRunningTime, - (useNameLink ? " -useNameLink" : "")); - - Configuration cfg = new Configuration(); - new JCommander(cfg, StringUtils.split(cmd)); + TenantWorkloadConfig cfg = new TenantWorkloadConfig(); + cfg.setServiceEndpoint(TestConfigurations.HOST); + cfg.setMasterKey(TestConfigurations.MASTER_KEY); + cfg.setDatabaseId(database.getId()); + cfg.setContainerId(collection.getId()); + cfg.setConsistencyLevel(desiredConsistency); + cfg.setConcurrency(concurrency); + cfg.setNumberOfOperations(Integer.parseInt(numberOfOperationsAsString)); + cfg.setOperation("ReadMyWrites"); + cfg.setConnectionMode("Direct"); + cfg.setNumberOfPreCreatedDocuments(100); AtomicInteger success = new AtomicInteger(); AtomicInteger error = new AtomicInteger(); - ReadMyWriteWorkflow wf = new ReadMyWriteWorkflow(TenantWorkloadConfig.fromConfiguration(cfg), new MetricRegistry()) { + ReadMyWriteWorkflow wf = new ReadMyWriteWorkflow(cfg, new MetricRegistry()) { @Override protected void onError(Throwable throwable) { logger.error("Error occurred in ReadMyWriteWorkflow", throwable); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/TenantWorkloadConfigFromConfigurationTest.java b/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/TenantWorkloadConfigFromConfigurationTest.java deleted file mode 100644 index e3070e1ca9d8..000000000000 --- a/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/TenantWorkloadConfigFromConfigurationTest.java +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.cosmos.benchmark; - -import com.beust.jcommander.Parameter; -import org.testng.annotations.Test; - -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -import static org.assertj.core.api.Assertions.assertThat; - -/** - * Ensures that {@link TenantWorkloadConfig#fromConfiguration(Configuration)} maps all - * workload-relevant fields from {@link Configuration}. When a new {@code @Parameter} field - * is added to {@code Configuration}, this test will fail unless the field is either: - *
    - *
  • mapped in {@code fromConfiguration()}, or
  • - *
  • added to the {@code EXCLUDED_FIELDS} allowlist with a justification.
  • - *
- */ -public class TenantWorkloadConfigFromConfigurationTest { - - /** - * Configuration fields intentionally NOT copied to TenantWorkloadConfig. - * Each entry has a comment explaining why it is excluded. - */ - private static final Set EXCLUDED_FIELDS = new HashSet<>(Arrays.asList( - // Lifecycle/orchestrator-level fields (handled in BenchmarkConfig, not per-tenant) - "tenantsFile", - "cycles", - "settleTimeMs", - "suppressCleanup", - "gcBetweenCycles", - "enableJvmStats", - "enableNettyHttpMetrics", - "printingInterval", - "numberOfAutoGeneratedKeysForReadBenchmark", - - // Result upload fields (handled in BenchmarkConfig) - "serviceEndpointForRunResultsUploadAccount", - "masterKeyForRunResultsUploadAccount", - "resultUploadDatabase", - "resultUploadContainer", - "testVariationName", - "branchName", - "commitId", - - // JVM-global system properties (handled in BenchmarkConfig) - "isPartitionLevelCircuitBreakerEnabled", - "isPerPartitionAutomaticFailoverRequired", - "minConnectionPoolSizePerEndpoint", - - // Azure Monitor config (handled at orchestrator level) - "azureMonitorMeterRegistry", - - // Reporting output (handled in BenchmarkConfig) - "reportingDirectory", - "graphiteEndpoint", - "graphiteEndpointPort", - "accountNameInGraphiteReporter", - - // Sync-only flag (multi-tenant benchmark is async-only) - "useSync", - - // LinkedIn-specific CTL fields (not applicable to multi-tenant benchmark) - "numberOfCollectionForCtl", - "readWriteQueryReadManyPct", - "bulkloadBatchSize", - "testScenario", - "encryptedStringFieldCount", - "encryptedLongFieldCount", - "encryptedDoubleFieldCount", - "encryptionEnabled", - "environment", - - // CLI help flag - "help", - - // Internal/infra fields - "metricsRegistry", - - // Diagnostics threshold (mapped via separate pointLatencyThresholdMs/nonPointLatencyThresholdMs) - "diagnosticsThresholdDuration" - )); - - @Test(groups = {"unit"}) - public void fromConfigurationShouldMapAllWorkloadFields() { - // Collect all @Parameter field names from Configuration - Set configFieldNames = new HashSet<>(); - for (Field field : Configuration.class.getDeclaredFields()) { - if (field.isAnnotationPresent(Parameter.class)) { - configFieldNames.add(field.getName()); - } - } - - // Get the source of fromConfiguration to find which fields are referenced - // We check that every Configuration @Parameter field is either mapped or excluded - Set unmappedFields = new HashSet<>(); - for (String fieldName : configFieldNames) { - if (!EXCLUDED_FIELDS.contains(fieldName) && !isMappedInFromConfiguration(fieldName)) { - unmappedFields.add(fieldName); - } - } - - assertThat(unmappedFields) - .as("Configuration @Parameter fields not mapped in TenantWorkloadConfig.fromConfiguration() " - + "and not in EXCLUDED_FIELDS allowlist. Either add mapping in fromConfiguration() " - + "or add to EXCLUDED_FIELDS with justification.") - .isEmpty(); - } - - /** - * Checks whether a Configuration field is referenced in fromConfiguration() by verifying - * that a corresponding getter is called (heuristic: a getter for the field exists in - * Configuration and TenantWorkloadConfig has a field with a matching name). - */ - private boolean isMappedInFromConfiguration(String configFieldName) { - // Check if TenantWorkloadConfig has a field with the same or similar name - Set tenantFieldNames = Arrays.stream(TenantWorkloadConfig.class.getDeclaredFields()) - .map(Field::getName) - .collect(Collectors.toSet()); - - // Direct name match - if (tenantFieldNames.contains(configFieldName)) { - return true; - } - - // Check common naming variations between Configuration and TenantWorkloadConfig - Set knownMappings = new HashSet<>(Arrays.asList( - "collectionId", // mapped to containerId - "operationType", // mapped via cfg.getOperationType().name() -> operation - "serviceEndpoint", // direct match - "masterKey", // direct match - "pointLatencyThresholdMs", // mapped to pointOperationLatencyThresholdMs - "nonPointLatencyThresholdMs", // mapped to nonPointOperationLatencyThresholdMs - "defaultLog4jLoggerEnabled" // mapped to isDefaultLog4jLoggerEnabled - )); - - return knownMappings.contains(configFieldName); - } -} diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/WorkflowTest.java b/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/WorkflowTest.java index f5fa349335b1..a4d6c4321238 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/WorkflowTest.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/test/java/com/azure/cosmos/benchmark/WorkflowTest.java @@ -12,7 +12,6 @@ import com.azure.cosmos.models.IndexingPolicy; import com.azure.cosmos.models.PartitionKeyDefinition; import com.codahale.metrics.MetricRegistry; -import com.beust.jcommander.JCommander; import org.apache.commons.lang3.StringUtils; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; @@ -49,26 +48,23 @@ public void readMyWritesCLI() throws Exception { @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "fast", timeOut = TIMEOUT) public void readMyWrites(boolean useNameLink) throws Exception { int numberOfOperations = 123; - String cmdFormat = "-serviceEndpoint %s -masterKey %s" + - " -databaseId %s -collectionId %s" + - " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + - " -operation ReadMyWrites -connectionMode DIRECT -numberOfPreCreatedDocuments 100"; - - String cmd = String.format(cmdFormat, - TestConfigurations.HOST, - TestConfigurations.MASTER_KEY, - database.getId(), - collection.getId(), - numberOfOperations) - + (useNameLink ? " -useNameLink" : ""); - Configuration cfg = new Configuration(); - new JCommander(cfg, StringUtils.split(cmd)); + TenantWorkloadConfig cfg = new TenantWorkloadConfig(); + cfg.setServiceEndpoint(TestConfigurations.HOST); + cfg.setMasterKey(TestConfigurations.MASTER_KEY); + cfg.setDatabaseId(database.getId()); + cfg.setContainerId(collection.getId()); + cfg.setConsistencyLevel("SESSION"); + cfg.setConcurrency(2); + cfg.setNumberOfOperations(numberOfOperations); + cfg.setOperation("ReadMyWrites"); + cfg.setConnectionMode("DIRECT"); + cfg.setNumberOfPreCreatedDocuments(100); AtomicInteger success = new AtomicInteger(); AtomicInteger error = new AtomicInteger(); - ReadMyWriteWorkflow wf = new ReadMyWriteWorkflow(TenantWorkloadConfig.fromConfiguration(cfg), new MetricRegistry()) { + ReadMyWriteWorkflow wf = new ReadMyWriteWorkflow(cfg, new MetricRegistry()) { @Override protected void onError(Throwable throwable) { error.incrementAndGet(); @@ -105,26 +101,22 @@ public void writeLatencyCLI() throws Exception { @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "fast", timeOut = TIMEOUT) public void writeLatency(boolean useNameLink) throws Exception { int numberOfOperations = 123; - String cmdFormat = "-serviceEndpoint %s -masterKey %s" + - " -databaseId %s -collectionId %s" + - " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + - " -operation WriteLatency -connectionMode DIRECT"; - String cmd = String.format(cmdFormat, - TestConfigurations.HOST, - TestConfigurations.MASTER_KEY, - database.getId(), - collection.getId(), - numberOfOperations) - + (useNameLink ? " -useNameLink" : ""); - - Configuration cfg = new Configuration(); - new JCommander(cfg, StringUtils.split(cmd)); + TenantWorkloadConfig cfg = new TenantWorkloadConfig(); + cfg.setServiceEndpoint(TestConfigurations.HOST); + cfg.setMasterKey(TestConfigurations.MASTER_KEY); + cfg.setDatabaseId(database.getId()); + cfg.setContainerId(collection.getId()); + cfg.setConsistencyLevel("SESSION"); + cfg.setConcurrency(2); + cfg.setNumberOfOperations(numberOfOperations); + cfg.setOperation("WriteLatency"); + cfg.setConnectionMode("DIRECT"); AtomicInteger success = new AtomicInteger(); AtomicInteger error = new AtomicInteger(); - AsyncWriteBenchmark wf = new AsyncWriteBenchmark(TenantWorkloadConfig.fromConfiguration(cfg), new MetricRegistry()) { + AsyncWriteBenchmark wf = new AsyncWriteBenchmark(cfg, new MetricRegistry()) { @Override protected void onError(Throwable throwable) { error.incrementAndGet(); @@ -146,26 +138,22 @@ protected void onSuccess() { @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "fast", timeOut = TIMEOUT) public void writeThroughput(boolean useNameLink) throws Exception { int numberOfOperations = 123; - String cmdFormat = "-serviceEndpoint %s -masterKey %s" + - " -databaseId %s -collectionId %s" + - " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + - " -operation WriteThroughput -connectionMode DIRECT"; - String cmd = String.format(cmdFormat, - TestConfigurations.HOST, - TestConfigurations.MASTER_KEY, - database.getId(), - collection.getId(), - numberOfOperations) - + (useNameLink ? " -useNameLink" : ""); - - Configuration cfg = new Configuration(); - new JCommander(cfg, StringUtils.split(cmd)); + TenantWorkloadConfig cfg = new TenantWorkloadConfig(); + cfg.setServiceEndpoint(TestConfigurations.HOST); + cfg.setMasterKey(TestConfigurations.MASTER_KEY); + cfg.setDatabaseId(database.getId()); + cfg.setContainerId(collection.getId()); + cfg.setConsistencyLevel("SESSION"); + cfg.setConcurrency(2); + cfg.setNumberOfOperations(numberOfOperations); + cfg.setOperation("WriteThroughput"); + cfg.setConnectionMode("DIRECT"); AtomicInteger success = new AtomicInteger(); AtomicInteger error = new AtomicInteger(); - AsyncWriteBenchmark wf = new AsyncWriteBenchmark(TenantWorkloadConfig.fromConfiguration(cfg), new MetricRegistry()) { + AsyncWriteBenchmark wf = new AsyncWriteBenchmark(cfg, new MetricRegistry()) { @Override protected void onError(Throwable throwable) { error.incrementAndGet(); @@ -187,26 +175,22 @@ protected void onSuccess() { @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "fast", timeOut = TIMEOUT) public void readLatency(boolean useNameLink) throws Exception { int numberOfOperations = 123; - String cmdFormat = "-serviceEndpoint %s -masterKey %s" + - " -databaseId %s -collectionId %s" + - " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + - " -operation ReadLatency -connectionMode DIRECT"; - - String cmd = String.format(cmdFormat, - TestConfigurations.HOST, - TestConfigurations.MASTER_KEY, - database.getId(), - collection.getId(), - numberOfOperations) - + (useNameLink ? " -useNameLink" : ""); - Configuration cfg = new Configuration(); - new JCommander(cfg, StringUtils.split(cmd)); + TenantWorkloadConfig cfg = new TenantWorkloadConfig(); + cfg.setServiceEndpoint(TestConfigurations.HOST); + cfg.setMasterKey(TestConfigurations.MASTER_KEY); + cfg.setDatabaseId(database.getId()); + cfg.setContainerId(collection.getId()); + cfg.setConsistencyLevel("SESSION"); + cfg.setConcurrency(2); + cfg.setNumberOfOperations(numberOfOperations); + cfg.setOperation("ReadLatency"); + cfg.setConnectionMode("DIRECT"); AtomicInteger success = new AtomicInteger(); AtomicInteger error = new AtomicInteger(); - AsyncReadBenchmark wf = new AsyncReadBenchmark(TenantWorkloadConfig.fromConfiguration(cfg), new MetricRegistry()) { + AsyncReadBenchmark wf = new AsyncReadBenchmark(cfg, new MetricRegistry()) { @Override protected void onError(Throwable throwable) { error.incrementAndGet(); @@ -228,26 +212,22 @@ protected void onSuccess() { @Test(dataProvider = "collectionLinkTypeArgProvider", groups = "fast", timeOut = TIMEOUT) public void readThroughput(boolean useNameLink) throws Exception { int numberOfOperations = 123; - String cmdFormat = "-serviceEndpoint %s -masterKey %s" + - " -databaseId %s -collectionId %s" + - " -consistencyLevel SESSION -concurrency 2 -numberOfOperations %s" + - " -operation ReadThroughput -connectionMode DIRECT"; - - String cmd = String.format(cmdFormat, - TestConfigurations.HOST, - TestConfigurations.MASTER_KEY, - database.getId(), - collection.getId(), - numberOfOperations) - + (useNameLink ? " -useNameLink" : ""); - Configuration cfg = new Configuration(); - new JCommander(cfg, StringUtils.split(cmd)); + TenantWorkloadConfig cfg = new TenantWorkloadConfig(); + cfg.setServiceEndpoint(TestConfigurations.HOST); + cfg.setMasterKey(TestConfigurations.MASTER_KEY); + cfg.setDatabaseId(database.getId()); + cfg.setContainerId(collection.getId()); + cfg.setConsistencyLevel("SESSION"); + cfg.setConcurrency(2); + cfg.setNumberOfOperations(numberOfOperations); + cfg.setOperation("ReadThroughput"); + cfg.setConnectionMode("DIRECT"); AtomicInteger success = new AtomicInteger(); AtomicInteger error = new AtomicInteger(); - AsyncReadBenchmark wf = new AsyncReadBenchmark(TenantWorkloadConfig.fromConfiguration(cfg), new MetricRegistry()) { + AsyncReadBenchmark wf = new AsyncReadBenchmark(cfg, new MetricRegistry()) { @Override protected void onError(Throwable throwable) { error.incrementAndGet(); From e57eeb3de57050a2fcd9caad5ec1af04464bfda7 Mon Sep 17 00:00:00 2001 From: Annie Liang Date: Wed, 4 Mar 2026 14:50:08 -0800 Subject: [PATCH 2/5] Unify all benchmarks under BenchmarkOrchestrator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Route sync, CTL, encryption, and LinkedIn benchmarks through the orchestrator instead of having separate code paths in Main.java. - Add Benchmark interface (run + shutdown) implemented by all benchmark types - Refactor SyncBenchmark, AsyncCtlWorkload, AsyncEncryptionBenchmark, and LICtlWorkload to accept injected MetricRegistry (like AsyncBenchmark) - Remove self-managed reporter, result uploader, and JVM stats from each benchmark — orchestrator handles all infrastructure concerns - Expand orchestrator factory to dispatch based on operationType + flags (isSync, isEncryptionEnabled) - Simplify Main.java from 5 code paths to a single orchestrator call Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../cosmos/benchmark/AsyncBenchmark.java | 6 +- .../com/azure/cosmos/benchmark/Benchmark.java | 14 ++ .../benchmark/BenchmarkOrchestrator.java | 70 +++++++-- .../java/com/azure/cosmos/benchmark/Main.java | 134 +----------------- .../cosmos/benchmark/ReadMyWriteWorkflow.java | 2 +- .../azure/cosmos/benchmark/SyncBenchmark.java | 82 ++--------- .../cosmos/benchmark/SyncReadBenchmark.java | 6 +- .../cosmos/benchmark/SyncWriteBenchmark.java | 6 +- .../benchmark/ctl/AsyncCtlWorkload.java | 34 ++--- .../encryption/AsyncEncryptionBenchmark.java | 44 +----- .../AsyncEncryptionQueryBenchmark.java | 6 +- ...ncryptionQuerySinglePartitionMultiple.java | 6 +- .../AsyncEncryptionReadBenchmark.java | 6 +- .../AsyncEncryptionWriteBenchmark.java | 6 +- .../benchmark/linkedin/LICtlWorkload.java | 45 ++---- 15 files changed, 137 insertions(+), 330 deletions(-) create mode 100644 sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Benchmark.java diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/AsyncBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/AsyncBenchmark.java index aaf2c4f4cb8b..263930ce3576 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/AsyncBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/AsyncBenchmark.java @@ -45,7 +45,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -abstract class AsyncBenchmark { +abstract class AsyncBenchmark implements Benchmark { private static final ImplementationBridgeHelpers.CosmosClientBuilderHelper.CosmosClientBuilderAccessor clientBuilderAccessor = ImplementationBridgeHelpers.CosmosClientBuilderHelper.getCosmosClientBuilderAccessor(); @@ -343,7 +343,7 @@ uuid, new PartitionKey(partitionKey), PojoizedJson.class) protected void init() { } - void shutdown() { + public void shutdown() { if (workloadConfig.isSuppressCleanup()) { logger.info("Skipping cleanup of database/container (suppressCleanup=true)"); } else if (this.databaseCreated) { @@ -420,7 +420,7 @@ private boolean latencyAwareOperations(Operation operation) { } } - void run() throws Exception { + public void run() throws Exception { initializeMeter(); if (workloadConfig.getSkipWarmUpOperations() > 0) { logger.info("Starting warm up phase. Executing {} operations to warm up ...", workloadConfig.getSkipWarmUpOperations()); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Benchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Benchmark.java new file mode 100644 index 000000000000..8cb53a609bbc --- /dev/null +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Benchmark.java @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.benchmark; + +/** + * Common contract for all benchmark workloads. + * Implementations are created by {@link BenchmarkOrchestrator} and participate + * in its lifecycle loop (create → run → shutdown → settle × N cycles). + */ +public interface Benchmark { + void run() throws Exception; + void shutdown(); +} diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java index 1b26bbb01666..dee395d632cb 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java @@ -8,6 +8,12 @@ import com.codahale.metrics.ScheduledReporter; import com.azure.cosmos.CosmosClient; import com.azure.cosmos.CosmosClientBuilder; +import com.azure.cosmos.benchmark.ctl.AsyncCtlWorkload; +import com.azure.cosmos.benchmark.encryption.AsyncEncryptionQueryBenchmark; +import com.azure.cosmos.benchmark.encryption.AsyncEncryptionQuerySinglePartitionMultiple; +import com.azure.cosmos.benchmark.encryption.AsyncEncryptionReadBenchmark; +import com.azure.cosmos.benchmark.encryption.AsyncEncryptionWriteBenchmark; +import com.azure.cosmos.benchmark.linkedin.LICtlWorkload; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet; import com.codahale.metrics.jvm.GarbageCollectorMetricSet; @@ -191,7 +197,7 @@ private void runLifecycleLoop(BenchmarkConfig config, MetricRegistry registry, logger.info("[LIFECYCLE] CYCLE_START cycle={} timestamp={}", cycle, Instant.now()); // 1. Create clients - List> benchmarks = createBenchmarks(config, registry); + List benchmarks = createBenchmarks(config, registry); reporter.report(); logger.info("[LIFECYCLE] POST_CREATE cycle={} clients={} timestamp={}", cycle, benchmarks.size(), Instant.now()); @@ -244,18 +250,18 @@ private void runLifecycleLoop(BenchmarkConfig config, MetricRegistry registry, totalCycles, durationSec, Instant.now()); } - private List> createBenchmarks(BenchmarkConfig config, MetricRegistry registry) { - List> benchmarks = new ArrayList<>(); + private List createBenchmarks(BenchmarkConfig config, MetricRegistry registry) throws Exception { + List benchmarks = new ArrayList<>(); for (TenantWorkloadConfig tenant : config.getTenantWorkloads()) { benchmarks.add(createBenchmarkForOperation(tenant, registry)); } return benchmarks; } - private void runWorkload(List> benchmarks, int cycle, ExecutorService executor) throws Exception { + private void runWorkload(List benchmarks, int cycle, ExecutorService executor) throws Exception { List> futures = new ArrayList<>(); final int currentCycle = cycle; - for (AsyncBenchmark benchmark : benchmarks) { + for (Benchmark benchmark : benchmarks) { futures.add(executor.submit(() -> { try { benchmark.run(); @@ -269,8 +275,8 @@ private void runWorkload(List> benchmarks, int cycle, Executor } } - private void shutdownBenchmarks(List> benchmarks, int cycle) { - for (AsyncBenchmark benchmark : benchmarks) { + private void shutdownBenchmarks(List benchmarks, int cycle) { + for (Benchmark benchmark : benchmarks) { try { benchmark.shutdown(); } catch (Exception e) { @@ -311,7 +317,55 @@ private void prepareTenants(BenchmarkConfig config) { // ======== Benchmark factory ======== - private AsyncBenchmark createBenchmarkForOperation(TenantWorkloadConfig cfg, MetricRegistry registry) { + private Benchmark createBenchmarkForOperation(TenantWorkloadConfig cfg, MetricRegistry registry) throws Exception { + // Sync benchmarks + if (cfg.isSync()) { + switch (cfg.getOperationType()) { + case ReadThroughput: + case ReadLatency: + return new SyncReadBenchmark(cfg, registry); + case WriteThroughput: + case WriteLatency: + return new SyncWriteBenchmark(cfg, registry); + default: + throw new IllegalArgumentException( + "Sync mode is not supported for operation: " + cfg.getOperationType()); + } + } + + // CTL workloads + if (cfg.getOperationType() == Operation.CtlWorkload) { + return new AsyncCtlWorkload(cfg, registry); + } + if (cfg.getOperationType() == Operation.LinkedInCtlWorkload) { + return new LICtlWorkload(cfg, registry); + } + + // Encryption benchmarks + if (cfg.isEncryptionEnabled()) { + switch (cfg.getOperationType()) { + case WriteThroughput: + case WriteLatency: + return new AsyncEncryptionWriteBenchmark(cfg, registry); + case ReadThroughput: + case ReadLatency: + return new AsyncEncryptionReadBenchmark(cfg, registry); + case QueryCross: + case QuerySingle: + case QueryParallel: + case QueryOrderby: + case QueryTopOrderby: + case QueryInClauseParallel: + return new AsyncEncryptionQueryBenchmark(cfg, registry); + case QuerySingleMany: + return new AsyncEncryptionQuerySinglePartitionMultiple(cfg, registry); + default: + throw new IllegalArgumentException( + "Encryption is not supported for operation: " + cfg.getOperationType()); + } + } + + // Default: async benchmarks switch (cfg.getOperationType()) { case ReadThroughput: case ReadLatency: diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java index 053d2d45bdff..7a966a6ca36b 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java @@ -3,23 +3,11 @@ package com.azure.cosmos.benchmark; -import com.azure.cosmos.benchmark.ctl.AsyncCtlWorkload; -import com.azure.cosmos.benchmark.encryption.AsyncEncryptionBenchmark; -import com.azure.cosmos.benchmark.encryption.AsyncEncryptionQueryBenchmark; -import com.azure.cosmos.benchmark.encryption.AsyncEncryptionQuerySinglePartitionMultiple; -import com.azure.cosmos.benchmark.encryption.AsyncEncryptionReadBenchmark; -import com.azure.cosmos.benchmark.encryption.AsyncEncryptionWriteBenchmark; -import com.azure.cosmos.benchmark.linkedin.LICtlWorkload; import com.beust.jcommander.JCommander; import com.beust.jcommander.ParameterException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Optional; - -import static com.azure.cosmos.benchmark.Operation.CtlWorkload; -import static com.azure.cosmos.benchmark.Operation.LinkedInCtlWorkload; - public class Main { private final static Logger LOGGER = LoggerFactory.getLogger(Main.class); @@ -39,21 +27,9 @@ public static void main(String[] args) throws Exception { BenchmarkConfig benchConfig = BenchmarkConfig.fromConfiguration(cfg); TenantWorkloadConfig firstTenant = benchConfig.getTenantWorkloads().get(0); - validateConfiguration(firstTenant, cfg); + validateConfiguration(firstTenant); - if (firstTenant.isSync()) { - syncBenchmark(firstTenant, benchConfig); - } else { - if (firstTenant.getOperationType().equals(CtlWorkload)) { - asyncCtlWorkload(firstTenant, benchConfig); - } else if (firstTenant.getOperationType().equals(LinkedInCtlWorkload)) { - linkedInCtlWorkload(firstTenant, benchConfig); - } else if (firstTenant.isEncryptionEnabled()) { - asyncEncryptionBenchmark(firstTenant, benchConfig); - } else { - asyncBenchmark(benchConfig); - } - } + new BenchmarkOrchestrator().run(benchConfig); } catch (ParameterException e) { System.err.println("INVALID Usage: " + e.getMessage()); System.err.println("Try '-help' for more information."); @@ -61,7 +37,7 @@ public static void main(String[] args) throws Exception { } } - private static void validateConfiguration(TenantWorkloadConfig workloadCfg, Configuration cfg) { + private static void validateConfiguration(TenantWorkloadConfig workloadCfg) { switch (workloadCfg.getOperationType()) { case WriteLatency: case WriteThroughput: @@ -83,108 +59,4 @@ private static void validateConfiguration(TenantWorkloadConfig workloadCfg, Conf } } } - - private static void syncBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchConfig) throws Exception { - LOGGER.info("Sync benchmark ..."); - SyncBenchmark benchmark = null; - try { - switch (workloadCfg.getOperationType()) { - case ReadThroughput: - case ReadLatency: - benchmark = new SyncReadBenchmark(workloadCfg, benchConfig); - break; - case WriteLatency: - case WriteThroughput: - benchmark = new SyncWriteBenchmark(workloadCfg, benchConfig); - break; - default: - throw new RuntimeException(workloadCfg.getOperationType() + " is not supported"); - } - LOGGER.info("Starting {}", workloadCfg.getOperationType()); - benchmark.run(); - } finally { - if (benchmark != null) { - benchmark.shutdown(); - } - } - } - - /** - * Async benchmark path: builds BenchmarkConfig from CLI args and delegates to BenchmarkOrchestrator. - * Handles both single-tenant and multi-tenant modes via workload config file. - */ - private static void asyncBenchmark(BenchmarkConfig benchConfig) throws Exception { - LOGGER.info("Async benchmark via BenchmarkOrchestrator ({} tenants, {} cycles)...", - benchConfig.getTenantWorkloads().size(), benchConfig.getCycles()); - new BenchmarkOrchestrator().run(benchConfig); - } - - private static void asyncEncryptionBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchConfig) throws Exception { - LOGGER.info("Async encryption benchmark ..."); - AsyncEncryptionBenchmark benchmark = null; - try { - switch (workloadCfg.getOperationType()) { - case WriteThroughput: - case WriteLatency: - benchmark = new AsyncEncryptionWriteBenchmark(workloadCfg, benchConfig); - break; - case ReadThroughput: - case ReadLatency: - benchmark = new AsyncEncryptionReadBenchmark(workloadCfg, benchConfig); - break; - case QueryCross: - case QuerySingle: - case QueryParallel: - case QueryOrderby: - case QueryTopOrderby: - case QueryInClauseParallel: - benchmark = new AsyncEncryptionQueryBenchmark(workloadCfg, benchConfig); - break; - case QuerySingleMany: - benchmark = new AsyncEncryptionQuerySinglePartitionMultiple(workloadCfg, benchConfig); - break; - default: - throw new RuntimeException(workloadCfg.getOperationType() + " is not supported"); - } - LOGGER.info("Starting {}", workloadCfg.getOperationType()); - benchmark.run(); - } finally { - if (benchmark != null) { - benchmark.shutdown(); - } - } - } - - private static void asyncCtlWorkload(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchConfig) throws Exception { - LOGGER.info("Async ctl workload"); - AsyncCtlWorkload benchmark = null; - try { - benchmark = new AsyncCtlWorkload(workloadCfg, benchConfig); - LOGGER.info("Starting {}", workloadCfg.getOperationType()); - benchmark.run(); - } finally { - if (benchmark != null) { - benchmark.shutdown(); - } - } - } - - private static void linkedInCtlWorkload(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchConfig) { - LOGGER.info("Executing the LinkedIn ctl workload"); - LICtlWorkload workload = null; - try { - workload = new LICtlWorkload(workloadCfg, benchConfig); - LOGGER.info("Setting up the LinkedIn ctl workload"); - workload.setup(); - LOGGER.info("Starting the LinkedIn ctl workload"); - workload.run(); - } catch (Exception e) { - LOGGER.error("Exception received while executing the LinkedIn ctl workload", e); - throw e; - } finally { - Optional.ofNullable(workload) - .ifPresent(LICtlWorkload::shutdown); - } - LOGGER.info("Completed LinkedIn ctl workload execution"); - } } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ReadMyWriteWorkflow.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ReadMyWriteWorkflow.java index 49feffb637b2..8a67caa1c6df 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ReadMyWriteWorkflow.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ReadMyWriteWorkflow.java @@ -501,7 +501,7 @@ protected String getDocumentLink(Document doc) { } @Override - void shutdown() { + public void shutdown() { if (this.client != null) { this.client.close(); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncBenchmark.java index df03d2441462..c7f6eb7d13aa 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncBenchmark.java @@ -19,17 +19,10 @@ import com.azure.cosmos.models.CosmosClientTelemetryConfig; import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.ThroughputProperties; -import com.codahale.metrics.ConsoleReporter; -import com.codahale.metrics.CsvReporter; import com.codahale.metrics.Meter; import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.ScheduledReporter; import com.codahale.metrics.Timer; -import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet; -import com.codahale.metrics.jvm.GarbageCollectorMetricSet; -import com.codahale.metrics.jvm.MemoryUsageGaugeSet; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.StringUtils; import org.mpierce.metrics.reservoir.hdrhistogram.HdrHistogramResetOnSnapshotReservoir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,15 +40,12 @@ import java.util.function.BiFunction; import java.util.stream.Collectors; -abstract class SyncBenchmark { +abstract class SyncBenchmark implements Benchmark { private static final ImplementationBridgeHelpers.CosmosClientBuilderHelper.CosmosClientBuilderAccessor clientBuilderAccessor = ImplementationBridgeHelpers.CosmosClientBuilderHelper.getCosmosClientBuilderAccessor(); - private final MetricRegistry metricsRegistry = new MetricRegistry(); - private final ScheduledReporter reporter; - - private final ScheduledReporter resultReporter; + private final MetricRegistry metricsRegistry; private final ExecutorService executorService; private Meter successMeter; @@ -65,13 +55,11 @@ abstract class SyncBenchmark { final Logger logger; final CosmosClient benchmarkWorkloadClient; - final CosmosClient resultUploaderClient; CosmosContainer cosmosContainer; CosmosDatabase cosmosDatabase; final String partitionKey; final TenantWorkloadConfig workloadConfig; - final BenchmarkConfig benchConfig; final List docsToRead; final Semaphore concurrencyControlSemaphore; Timer latency; @@ -108,10 +96,10 @@ public T apply(T o, Throwable throwable) { } } - SyncBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws Exception { + SyncBenchmark(TenantWorkloadConfig workloadCfg, MetricRegistry sharedRegistry) throws Exception { executorService = Executors.newFixedThreadPool(workloadCfg.getConcurrency()); workloadConfig = workloadCfg; - benchConfig = benchCfg; + metricsRegistry = sharedRegistry; logger = LoggerFactory.getLogger(this.getClass()); boolean isManagedIdentityRequired = workloadCfg.isManagedIdentityRequired(); @@ -126,8 +114,6 @@ public T apply(T o, Throwable throwable) { new CosmosClientBuilder() .key(workloadCfg.getMasterKey()); - CosmosClientBuilder resultUploadClientBuilder = new CosmosClientBuilder(); - benchmarkSpecificClientBuilder.preferredRegions(workloadCfg.getPreferredRegionsList()) .endpoint(workloadCfg.getServiceEndpoint()) .userAgentSuffix(workloadCfg.getApplicationName()) @@ -157,10 +143,6 @@ public T apply(T o, Throwable throwable) { } benchmarkWorkloadClient = benchmarkSpecificClientBuilder.buildClient(); - this.resultUploaderClient = resultUploadClientBuilder - .endpoint(StringUtils.isNotEmpty(benchConfig.getResultUploadEndpoint()) ? benchConfig.getResultUploadEndpoint() : workloadCfg.getServiceEndpoint()) - .key(StringUtils.isNotEmpty(benchConfig.getResultUploadKey()) ? benchConfig.getResultUploadKey() : workloadCfg.getMasterKey()) - .buildClient(); try { cosmosDatabase = benchmarkWorkloadClient.getDatabase(workloadCfg.getDatabaseId()); @@ -249,48 +231,15 @@ public T apply(T o, Throwable throwable) { docsToRead = createDocumentFutureList.stream().map(future -> getOrThrow(future)).collect(Collectors.toList()); init(); - - if (benchConfig.isEnableJvmStats()) { - metricsRegistry.register("gc", new GarbageCollectorMetricSet()); - metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); - metricsRegistry.register("memory", new MemoryUsageGaugeSet()); - } - - if (benchConfig.getReportingDirectory() != null) { - reporter = CsvReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS) - .convertDurationsTo(TimeUnit.MILLISECONDS).build(new java.io.File(benchConfig.getReportingDirectory())); - } else { - reporter = ConsoleReporter.forRegistry(metricsRegistry).convertRatesTo(TimeUnit.SECONDS) - .convertDurationsTo(TimeUnit.MILLISECONDS).build(); - } - - if (benchConfig.getResultUploadDatabase() != null && benchConfig.getResultUploadContainer() != null) { - String op = workloadConfig.isSync() - ? "SYNC_" + workloadCfg.getOperationType().name() - : workloadCfg.getOperationType().name(); - resultReporter = CosmosTotalResultReporter - .forRegistry( - metricsRegistry, - this.resultUploaderClient.getDatabase(benchConfig.getResultUploadDatabase()).getContainer(benchConfig.getResultUploadContainer()), - op, - benchConfig.getTestVariationName(), - benchConfig.getBranchName(), - benchConfig.getCommitId(), - workloadCfg.getConcurrency()) - .convertRatesTo(TimeUnit.SECONDS) - .convertDurationsTo(TimeUnit.MILLISECONDS).build(); - } else { - resultReporter = null; - } - } protected void init() { } - void shutdown() { - - if (this.databaseCreated) { + public void shutdown() { + if (workloadConfig.isSuppressCleanup()) { + logger.info("Skipping cleanup of database/container (suppressCleanup=true)"); + } else if (this.databaseCreated) { cosmosDatabase.delete(); logger.info("Deleted temporary database {} created for this test", workloadConfig.getDatabaseId()); } else if (this.collectionCreated) { @@ -298,7 +247,6 @@ void shutdown() { logger.info("Deleted temporary collection {} created for this test", workloadConfig.getContainerId()); } - resultUploaderClient.close(); benchmarkWorkloadClient.close(); executorService.shutdown(); } @@ -311,7 +259,7 @@ protected void onError(Throwable throwable) { protected abstract T performWorkload(long i) throws Exception; - void run() throws Exception { + public void run() throws Exception { successMeter = metricsRegistry.meter(TenantWorkloadConfig.SUCCESS_COUNTER_METER_NAME); failureMeter = metricsRegistry.meter(TenantWorkloadConfig.FAILURE_COUNTER_METER_NAME); @@ -336,10 +284,6 @@ void run() throws Exception { break; } - reporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); - if (resultReporter != null) { - resultReporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); - } long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); @@ -427,14 +371,6 @@ public T apply(T t, Throwable throwable) { long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", workloadConfig.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); - - reporter.report(); - reporter.close(); - - if (resultReporter != null) { - resultReporter.report(); - resultReporter.close(); - } } RuntimeException propagate(Exception e) { diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncReadBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncReadBenchmark.java index abd4c0ee5409..52a3a6819a2d 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncReadBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncReadBenchmark.java @@ -8,10 +8,12 @@ import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.PartitionKey; +import com.codahale.metrics.MetricRegistry; + class SyncReadBenchmark extends SyncBenchmark { - SyncReadBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws Exception { - super(workloadCfg, benchCfg); + SyncReadBenchmark(TenantWorkloadConfig workloadCfg, MetricRegistry sharedRegistry) throws Exception { + super(workloadCfg, sharedRegistry); } @Override diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncWriteBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncWriteBenchmark.java index 55996540c5dc..d90a40200eed 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncWriteBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/SyncWriteBenchmark.java @@ -7,6 +7,8 @@ import com.azure.cosmos.models.PartitionKey; import org.apache.commons.lang3.RandomStringUtils; +import com.codahale.metrics.MetricRegistry; + import java.util.UUID; class SyncWriteBenchmark extends SyncBenchmark { @@ -14,8 +16,8 @@ class SyncWriteBenchmark extends SyncBenchmark { private final String dataFieldValue; private final String uuid; - SyncWriteBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws Exception { - super(workloadCfg, benchCfg); + SyncWriteBenchmark(TenantWorkloadConfig workloadCfg, MetricRegistry sharedRegistry) throws Exception { + super(workloadCfg, sharedRegistry); uuid = UUID.randomUUID().toString(); dataFieldValue = diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ctl/AsyncCtlWorkload.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ctl/AsyncCtlWorkload.java index cec1ed3239b7..6959ff042dec 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ctl/AsyncCtlWorkload.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/ctl/AsyncCtlWorkload.java @@ -12,11 +12,10 @@ import com.azure.cosmos.CosmosException; import com.azure.cosmos.DirectConnectionConfig; import com.azure.cosmos.GatewayConnectionConfig; -import com.azure.cosmos.benchmark.BenchmarkConfig; +import com.azure.cosmos.benchmark.Benchmark; import com.azure.cosmos.benchmark.BenchmarkHelper; import com.azure.cosmos.benchmark.BenchmarkRequestSubscriber; import com.azure.cosmos.benchmark.PojoizedJson; -import com.azure.cosmos.benchmark.ScheduledReporterFactory; import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.implementation.HttpConstants; import com.azure.cosmos.implementation.OperationType; @@ -27,11 +26,7 @@ import com.azure.cosmos.models.ThroughputProperties; import com.codahale.metrics.Meter; import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.ScheduledReporter; import com.codahale.metrics.Timer; -import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet; -import com.codahale.metrics.jvm.GarbageCollectorMetricSet; -import com.codahale.metrics.jvm.MemoryUsageGaugeSet; import org.apache.commons.lang3.RandomStringUtils; import org.mpierce.metrics.reservoir.hdrhistogram.HdrHistogramResetOnSnapshotReservoir; import org.slf4j.Logger; @@ -48,20 +43,18 @@ import java.util.Random; import java.util.UUID; import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -public class AsyncCtlWorkload { +public class AsyncCtlWorkload implements Benchmark { private final String PERCENT_PARSING_ERROR = "Unable to parse user provided readWriteQueryReadManyPct "; private final String prefixUuidForCreate; private final String dataFieldValue; private final String partitionKey; - private final MetricRegistry metricsRegistry = new MetricRegistry(); + private final MetricRegistry metricsRegistry; private final Logger logger; private final CosmosAsyncClient cosmosClient; private final TenantWorkloadConfig workloadConfig; - private final BenchmarkConfig benchConfig; private final Map> docsToRead = new HashMap<>(); private final Map> itemIdentityMap = new HashMap<>(); private final Semaphore concurrencyControlSemaphore; @@ -71,7 +64,6 @@ public class AsyncCtlWorkload { private Timer writeLatency; private Timer queryLatency; private Timer readManyLatency; - private ScheduledReporter reporter; private Meter readSuccessMeter; private Meter readFailureMeter; @@ -91,7 +83,7 @@ public class AsyncCtlWorkload { private int queryPct; private int readManyPct; - public AsyncCtlWorkload(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) { + public AsyncCtlWorkload(TenantWorkloadConfig workloadCfg, MetricRegistry sharedRegistry) { final TokenCredential credential = workloadCfg.isManagedIdentityRequired() ? workloadCfg.buildTokenCredential() : null; @@ -115,7 +107,7 @@ public AsyncCtlWorkload(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchC } cosmosClient = cosmosClientBuilder.buildAsyncClient(); workloadConfig = workloadCfg; - benchConfig = benchCfg; + metricsRegistry = sharedRegistry; logger = LoggerFactory.getLogger(this.getClass()); parsedReadWriteQueryReadManyPct(workloadConfig.getReadWriteQueryReadManyPct()); @@ -132,20 +124,14 @@ public AsyncCtlWorkload(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchC createPrePopulatedDocs(workloadConfig.getNumberOfPreCreatedDocuments()); createItemIdentityMap(docsToRead); - if (benchConfig.isEnableJvmStats()) { - metricsRegistry.register("gc", new GarbageCollectorMetricSet()); - metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); - metricsRegistry.register("memory", new MemoryUsageGaugeSet()); - } - - reporter = ScheduledReporterFactory.create(benchCfg, metricsRegistry); - prefixUuidForCreate = UUID.randomUUID().toString(); random = new Random(); } public void shutdown() { - if (this.databaseCreated) { + if (workloadConfig.isSuppressCleanup()) { + logger.info("Skipping cleanup of database/container (suppressCleanup=true)"); + } else if (this.databaseCreated) { cosmosAsyncDatabase.delete().block(); logger.info("Deleted temporary database {} created for this test", this.workloadConfig.getDatabaseId()); } else if (containerToClearAfterTest.size() > 0) { @@ -204,7 +190,6 @@ public void run() throws Exception { queryLatency = metricsRegistry.register("Query Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); readManyLatency = metricsRegistry.register("Read Many Latency", new Timer(new HdrHistogramResetOnSnapshotReservoir())); - reporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); @@ -258,9 +243,6 @@ public void run() throws Exception { long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", workloadConfig.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); - - reporter.report(); - reporter.close(); } private void parsedReadWriteQueryReadManyPct(String readWriteQueryReadManyPct) { diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionBenchmark.java index 21fe0a03854b..aae7fc15d9b6 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionBenchmark.java @@ -12,7 +12,7 @@ import com.azure.cosmos.CosmosException; import com.azure.cosmos.DirectConnectionConfig; import com.azure.cosmos.GatewayConnectionConfig; -import com.azure.cosmos.benchmark.BenchmarkConfig; +import com.azure.cosmos.benchmark.Benchmark; import com.azure.cosmos.benchmark.BenchmarkHelper; import com.azure.cosmos.benchmark.Operation; import com.azure.cosmos.benchmark.TenantWorkloadConfig; @@ -38,17 +38,9 @@ import com.azure.identity.ClientSecretCredentialBuilder; import com.azure.security.keyvault.keys.cryptography.KeyEncryptionKeyClientBuilder; import com.azure.security.keyvault.keys.cryptography.models.EncryptionAlgorithm; -import com.codahale.metrics.ConsoleReporter; -import com.codahale.metrics.CsvReporter; -import com.codahale.metrics.ConsoleReporter; -import com.codahale.metrics.CsvReporter; import com.codahale.metrics.Meter; import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.ScheduledReporter; import com.codahale.metrics.Timer; -import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet; -import com.codahale.metrics.jvm.GarbageCollectorMetricSet; -import com.codahale.metrics.jvm.MemoryUsageGaugeSet; import org.apache.commons.lang3.RandomStringUtils; import org.mpierce.metrics.reservoir.hdrhistogram.HdrHistogramResetOnSnapshotReservoir; import org.reactivestreams.Subscription; @@ -72,9 +64,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -public abstract class AsyncEncryptionBenchmark { - private final MetricRegistry metricsRegistry = new MetricRegistry(); - private ScheduledReporter reporter; +public abstract class AsyncEncryptionBenchmark implements Benchmark { + private final MetricRegistry metricsRegistry; private volatile Meter successMeter; private volatile Meter failureMeter; @@ -89,7 +80,6 @@ public abstract class AsyncEncryptionBenchmark { final CosmosAsyncClient cosmosClient; final String partitionKey; - final BenchmarkConfig benchConfig; final TenantWorkloadConfig workloadConfig; final List docsToRead; final Semaphore concurrencyControlSemaphore; @@ -110,7 +100,7 @@ public abstract class AsyncEncryptionBenchmark { private AtomicBoolean warmupMode = new AtomicBoolean(false); - AsyncEncryptionBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { + AsyncEncryptionBenchmark(TenantWorkloadConfig workloadCfg, MetricRegistry sharedRegistry) throws IOException { workloadConfig = workloadCfg; @@ -137,7 +127,7 @@ public abstract class AsyncEncryptionBenchmark { } cosmosClient = cosmosClientBuilder.buildAsyncClient(); cosmosEncryptionAsyncClient = createEncryptionClientInstance(cosmosClient); - benchConfig = benchCfg; + metricsRegistry = sharedRegistry; logger = LoggerFactory.getLogger(this.getClass()); createEncryptionDatabaseAndContainer(); partitionKey = cosmosAsyncContainer.read().block().getProperties().getPartitionKeyDefinition() @@ -211,24 +201,6 @@ uuid, new PartitionKey(partitionKey), PojoizedJson.class) logger.info("Finished pre-populating {} documents", workloadCfg.getNumberOfPreCreatedDocuments()); init(); - - if (benchConfig.isEnableJvmStats()) { - metricsRegistry.register("gc", new GarbageCollectorMetricSet()); - metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); - metricsRegistry.register("memory", new MemoryUsageGaugeSet()); - } - - if (benchConfig.getReportingDirectory() != null) { - reporter = CsvReporter.forRegistry(metricsRegistry) - .convertDurationsTo(TimeUnit.MILLISECONDS) - .convertRatesTo(TimeUnit.SECONDS) - .build(new java.io.File(benchConfig.getReportingDirectory())); - } else { - reporter = ConsoleReporter.forRegistry(metricsRegistry) - .convertDurationsTo(TimeUnit.MILLISECONDS) - .convertRatesTo(TimeUnit.SECONDS) - .build(); - } } protected void init() { @@ -258,7 +230,6 @@ protected void initializeMetersIfSkippedEnoughOperations(AtomicLong count) { logger.info("Warmup phase finished. Starting capturing perf numbers ...."); resetMeters(); initializeMeter(); - reporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); warmupMode.set(false); } } @@ -313,8 +284,6 @@ public void run() throws Exception { logger.info("Starting warm up phase. Executing {} operations to warm up ...", workloadConfig.getSkipWarmUpOperations()); warmupMode.set(true); - } else { - reporter.start(benchConfig.getPrintingInterval(), TimeUnit.SECONDS); } long startTime = System.currentTimeMillis(); @@ -382,9 +351,6 @@ protected void hookOnError(Throwable throwable) { long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", workloadConfig.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); - - reporter.report(); - reporter.close(); } protected Mono sparsityMono(long i) { diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQueryBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQueryBenchmark.java index 3d2c68423aa6..c5ee726b6926 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQueryBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQueryBenchmark.java @@ -3,13 +3,13 @@ package com.azure.cosmos.benchmark.encryption; -import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.Operation; import com.azure.cosmos.benchmark.PojoizedJson; import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.models.CosmosQueryRequestOptions; import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.PartitionKey; +import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.Timer; import org.reactivestreams.Subscription; import reactor.core.publisher.BaseSubscriber; @@ -54,8 +54,8 @@ protected void hookOnError(Throwable throwable) { } } - public AsyncEncryptionQueryBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { - super(workloadCfg, benchCfg); + public AsyncEncryptionQueryBenchmark(TenantWorkloadConfig workloadCfg, MetricRegistry sharedRegistry) throws IOException { + super(workloadCfg, sharedRegistry); } @Override diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQuerySinglePartitionMultiple.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQuerySinglePartitionMultiple.java index 386e1d9eb3b0..d6fd06b05d8f 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQuerySinglePartitionMultiple.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionQuerySinglePartitionMultiple.java @@ -3,9 +3,9 @@ package com.azure.cosmos.benchmark.encryption; -import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.PojoizedJson; import com.azure.cosmos.benchmark.TenantWorkloadConfig; +import com.codahale.metrics.MetricRegistry; import com.azure.cosmos.models.CosmosQueryRequestOptions; import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.PartitionKey; @@ -21,8 +21,8 @@ public class AsyncEncryptionQuerySinglePartitionMultiple extends AsyncEncryption private CosmosQueryRequestOptions options; private int pageCount = 0; - public AsyncEncryptionQuerySinglePartitionMultiple(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { - super(workloadCfg, benchCfg); + public AsyncEncryptionQuerySinglePartitionMultiple(TenantWorkloadConfig workloadCfg, MetricRegistry sharedRegistry) throws IOException { + super(workloadCfg, sharedRegistry); options = new CosmosQueryRequestOptions(); options.setPartitionKey(new PartitionKey("pk")); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionReadBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionReadBenchmark.java index 28e5b9be5d6e..ad74e9fd9e45 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionReadBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionReadBenchmark.java @@ -3,12 +3,12 @@ package com.azure.cosmos.benchmark.encryption; -import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.PojoizedJson; import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.models.CosmosItemRequestOptions; import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.PartitionKey; +import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.Timer; import org.reactivestreams.Subscription; import reactor.core.publisher.BaseSubscriber; @@ -50,8 +50,8 @@ protected void hookOnError(Throwable throwable) { } } - public AsyncEncryptionReadBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { - super(workloadCfg, benchCfg); + public AsyncEncryptionReadBenchmark(TenantWorkloadConfig workloadCfg, MetricRegistry sharedRegistry) throws IOException { + super(workloadCfg, sharedRegistry); } @Override diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionWriteBenchmark.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionWriteBenchmark.java index 6af727228af0..3367c76c7c0f 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionWriteBenchmark.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/encryption/AsyncEncryptionWriteBenchmark.java @@ -3,7 +3,6 @@ package com.azure.cosmos.benchmark.encryption; -import com.azure.cosmos.benchmark.BenchmarkConfig; import com.azure.cosmos.benchmark.BenchmarkHelper; import com.azure.cosmos.benchmark.Operation; import com.azure.cosmos.benchmark.PojoizedJson; @@ -11,6 +10,7 @@ import com.azure.cosmos.models.CosmosItemRequestOptions; import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.PartitionKey; +import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.Timer; import org.apache.commons.lang3.RandomStringUtils; import org.reactivestreams.Subscription; @@ -57,8 +57,8 @@ protected void hookOnError(Throwable throwable) { } } - public AsyncEncryptionWriteBenchmark(TenantWorkloadConfig workloadCfg, BenchmarkConfig benchCfg) throws IOException { - super(workloadCfg, benchCfg); + public AsyncEncryptionWriteBenchmark(TenantWorkloadConfig workloadCfg, MetricRegistry sharedRegistry) throws IOException { + super(workloadCfg, sharedRegistry); uuid = UUID.randomUUID().toString(); dataFieldValue = RandomStringUtils.randomAlphabetic(workloadConfig.getDocumentDataFieldSize()); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java index b511aba0e2e7..de1a0cb8dfc2 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/LICtlWorkload.java @@ -5,23 +5,17 @@ import com.azure.cosmos.CosmosAsyncClient; import com.azure.cosmos.CosmosException; -import com.azure.cosmos.benchmark.BenchmarkConfig; -import com.azure.cosmos.benchmark.ScheduledReporterFactory; +import com.azure.cosmos.benchmark.Benchmark; import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.data.EntityConfiguration; import com.azure.cosmos.benchmark.linkedin.data.InvitationsEntityConfiguration; import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.ScheduledReporter; -import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet; -import com.codahale.metrics.jvm.GarbageCollectorMetricSet; -import com.codahale.metrics.jvm.MemoryUsageGaugeSet; import com.google.common.base.Preconditions; -import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class LICtlWorkload { +public class LICtlWorkload implements Benchmark { private static final Logger LOGGER = LoggerFactory.getLogger(LICtlWorkload.class); /** @@ -34,27 +28,23 @@ public enum Scenario { } private final TenantWorkloadConfig _workloadConfig; - private final BenchmarkConfig _benchConfig; private final EntityConfiguration _entityConfiguration; private final CosmosAsyncClient _client; private final CosmosAsyncClient _bulkLoadClient; private final MetricRegistry _metricsRegistry; - private final ScheduledReporter _reporter; private final ResourceManager _resourceManager; private final DataLoader _dataLoader; private final TestRunner _testRunner; - public LICtlWorkload(final TenantWorkloadConfig workloadCfg, final BenchmarkConfig benchConfig) { + public LICtlWorkload(final TenantWorkloadConfig workloadCfg, final MetricRegistry sharedRegistry) { Preconditions.checkNotNull(workloadCfg, "The Workload configuration defining the parameters can not be null"); - Preconditions.checkNotNull(benchConfig, "The benchmark configuration defining the parameters can not be null"); + Preconditions.checkNotNull(sharedRegistry, "The shared MetricRegistry can not be null"); _workloadConfig = workloadCfg; - _benchConfig = benchConfig; _entityConfiguration = new InvitationsEntityConfiguration(workloadCfg); _client = AsyncClientFactory.buildAsyncClient(workloadCfg); _bulkLoadClient = AsyncClientFactory.buildBulkLoadAsyncClient(workloadCfg); - _metricsRegistry = new MetricRegistry(); - _reporter = ScheduledReporterFactory.create(_benchConfig, _metricsRegistry); + _metricsRegistry = sharedRegistry; _resourceManager = workloadCfg.shouldManageDatabase() ? new DatabaseResourceManager(workloadCfg, _entityConfiguration, _client) : new CollectionResourceManager(workloadCfg, _entityConfiguration, _client); @@ -62,13 +52,8 @@ public LICtlWorkload(final TenantWorkloadConfig workloadCfg, final BenchmarkConf _testRunner = createTestRunner(workloadCfg); } - public void setup() throws CosmosException { - if (_benchConfig.isEnableJvmStats()) { - LOGGER.info("Enabling JVM stats collection"); - _metricsRegistry.register("gc", new GarbageCollectorMetricSet()); - _metricsRegistry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS)); - _metricsRegistry.register("memory", new MemoryUsageGaugeSet()); - } + public void run() { + LOGGER.info("Setting up the LinkedIn ctl workload"); LOGGER.info("Creating resources"); _resourceManager.createResources(); @@ -80,25 +65,19 @@ public void setup() throws CosmosException { _bulkLoadClient.close(); _testRunner.init(); - } - public void run() { LOGGER.info("Executing the CosmosDB test"); - _reporter.start(_benchConfig.getPrintingInterval(), TimeUnit.SECONDS); - _testRunner.run(); - - _reporter.report(); } - /** - * Close all existing resources, from CosmosDB collections to open connections - */ public void shutdown() { _testRunner.cleanup(); - _resourceManager.deleteResources(); + if (_workloadConfig.isSuppressCleanup()) { + LOGGER.info("Skipping cleanup of resources (suppressCleanup=true)"); + } else { + _resourceManager.deleteResources(); + } _client.close(); - _reporter.close(); } private TestRunner createTestRunner(TenantWorkloadConfig workloadCfg) { From a486439264afe533d39bfcec9d6ef8d569428869 Mon Sep 17 00:00:00 2001 From: Annie Liang Date: Wed, 4 Mar 2026 15:03:49 -0800 Subject: [PATCH 3/5] Rename globalDefaults to tenantDefaults in workload config The section provides default values for tenant-level configuration, so tenantDefaults better reflects its purpose. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../azure/cosmos/benchmark/BenchmarkConfig.java | 8 ++++---- .../cosmos/benchmark/TenantWorkloadConfig.java | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java index a9e9d10ec3e8..edd8d2f14db6 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java @@ -18,7 +18,7 @@ * Contains lifecycle params, reporting config, and fully-resolved tenant workloads. * *

Each {@link TenantWorkloadConfig} carries its complete effective config - * (account info + workload params), so no separate globalDefaults map is needed.

+ * (account info + workload params), so no separate tenantDefaults map is needed.

* *

When {@code cycles > 1}, sensible defaults are applied automatically * unless explicitly overridden (settleTimeMs=90s, suppressCleanup=true).

@@ -136,15 +136,15 @@ public String toString() { } /** - * Reads JVM-global system properties from the globalDefaults section of the workload config file. + * Reads JVM-global system properties from the tenantDefaults section of the workload config file. * These properties are JVM-wide and cannot vary per tenant. */ private void loadGlobalSystemPropertiesFromWorkloadConfig(File workloadConfigFile) throws IOException { ObjectMapper mapper = new ObjectMapper(); JsonNode root = mapper.readTree(workloadConfigFile); - // JVM-global system properties from globalDefaults - JsonNode defaults = root.get("globalDefaults"); + // JVM-global system properties from tenantDefaults + JsonNode defaults = root.get("tenantDefaults"); if (defaults != null && defaults.isObject()) { if (defaults.has("isPartitionLevelCircuitBreakerEnabled")) { isPartitionLevelCircuitBreakerEnabled = diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java index b0c083069218..816f801f4ae3 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java @@ -29,7 +29,7 @@ * Fully-resolved configuration for a single tenant workload. * Contains account connection info, AAD auth, workload params, and connection * settings. Each instance is the effective config after merging - * globalDefaults with per-tenant overrides at parse time. + * tenantDefaults with per-tenant overrides at parse time. * *

This is the single config object passed to {@link AsyncBenchmark} -- * no intermediate Configuration conversion needed.

@@ -528,19 +528,19 @@ private void applyField(String key, String value, boolean overwrite) { public static List parseWorkloadConfig(File workloadConfigFile) throws IOException { JsonNode root = OBJECT_MAPPER.readTree(workloadConfigFile); - Map globalDefaults = new HashMap<>(); - JsonNode defaultsNode = root.get("globalDefaults"); + Map tenantDefaults = new HashMap<>(); + JsonNode defaultsNode = root.get("tenantDefaults"); if (defaultsNode != null && defaultsNode.isObject()) { Iterator> fields = defaultsNode.fields(); while (fields.hasNext()) { Map.Entry entry = fields.next(); - globalDefaults.put(entry.getKey(), entry.getValue().asText()); + tenantDefaults.put(entry.getKey(), entry.getValue().asText()); } } - if (!globalDefaults.isEmpty()) { - logger.info("globalDefaults applied to all tenants (per-tenant values take priority): {}", - globalDefaults.keySet()); + if (!tenantDefaults.isEmpty()) { + logger.info("tenantDefaults applied to all tenants (per-tenant values take priority): {}", + tenantDefaults.keySet()); } List tenants = new ArrayList<>(); @@ -549,7 +549,7 @@ public static List parseWorkloadConfig(File workloadConfig if (tenantsNode != null && tenantsNode.isArray()) { for (JsonNode tenantNode : tenantsNode) { TenantWorkloadConfig tenant = OBJECT_MAPPER.treeToValue(tenantNode, TenantWorkloadConfig.class); - tenant.applyMap(globalDefaults, false); + tenant.applyMap(tenantDefaults, false); validateTenantConfig(tenant); tenants.add(tenant); } From 6bcf05607003f153cdec2e715eaa0be4744b8adb Mon Sep 17 00:00:00 2001 From: Annie Liang Date: Wed, 4 Mar 2026 15:18:28 -0800 Subject: [PATCH 4/5] Address PR review comments - Split loadGlobalSystemPropertiesFromWorkloadConfig into 4 focused methods: loadJvmSystemProperties, loadMetricsConfig, loadResultUploadConfig, loadRunMetadata - Add clarifying comment in Main.java explaining Configuration -> BenchmarkConfig -> BenchmarkOrchestrator flow Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../cosmos/benchmark/BenchmarkConfig.java | 99 ++++++++++++------- .../java/com/azure/cosmos/benchmark/Main.java | 5 +- 2 files changed, 69 insertions(+), 35 deletions(-) diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java index edd8d2f14db6..73da0914e2cd 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkConfig.java @@ -91,8 +91,9 @@ public static BenchmarkConfig fromConfiguration(Configuration cfg) throws IOExce } logger.info("Loading workload configs from {}.", workloadConfigPath); - config.tenantWorkloads = TenantWorkloadConfig.parseWorkloadConfig(new File(workloadConfigPath)); - config.loadGlobalSystemPropertiesFromWorkloadConfig(new File(workloadConfigPath)); + File workloadFile = new File(workloadConfigPath); + config.tenantWorkloads = TenantWorkloadConfig.parseWorkloadConfig(workloadFile); + config.loadWorkloadConfigSections(workloadFile); return config; } @@ -136,14 +137,24 @@ public String toString() { } /** - * Reads JVM-global system properties from the tenantDefaults section of the workload config file. - * These properties are JVM-wide and cannot vary per tenant. + * Loads all non-tenant sections from the workload config file: + * JVM system properties, metrics config, result upload, and run metadata. */ - private void loadGlobalSystemPropertiesFromWorkloadConfig(File workloadConfigFile) throws IOException { + private void loadWorkloadConfigSections(File workloadConfigFile) throws IOException { ObjectMapper mapper = new ObjectMapper(); JsonNode root = mapper.readTree(workloadConfigFile); - // JVM-global system properties from tenantDefaults + loadJvmSystemProperties(root); + loadMetricsConfig(root); + loadResultUploadConfig(root); + loadRunMetadata(root); + } + + /** + * JVM-global system properties from the tenantDefaults section. + * These are JVM-wide and cannot vary per tenant. + */ + private void loadJvmSystemProperties(JsonNode root) { JsonNode defaults = root.get("tenantDefaults"); if (defaults != null && defaults.isObject()) { if (defaults.has("isPartitionLevelCircuitBreakerEnabled")) { @@ -159,8 +170,12 @@ private void loadGlobalSystemPropertiesFromWorkloadConfig(File workloadConfigFil Integer.parseInt(defaults.get("minConnectionPoolSizePerEndpoint").asText()); } } + } - // Metrics, reporting, and result upload from top-level "metrics" section + /** + * Metrics and reporting settings from the top-level "metrics" section. + */ + private void loadMetricsConfig(JsonNode root) { JsonNode metrics = root.get("metrics"); if (metrics != null && metrics.isObject()) { if (metrics.has("enableJvmStats")) { @@ -175,36 +190,52 @@ private void loadGlobalSystemPropertiesFromWorkloadConfig(File workloadConfigFil if (metrics.has("reportingDirectory")) { reportingDirectory = metrics.get("reportingDirectory").asText(); } + } + } - // Result upload sub-section - JsonNode resultUpload = metrics.get("resultUpload"); - if (resultUpload != null && resultUpload.isObject()) { - if (resultUpload.has("serviceEndpoint")) { - resultUploadEndpoint = resultUpload.get("serviceEndpoint").asText(); - } - if (resultUpload.has("masterKey")) { - resultUploadKey = resultUpload.get("masterKey").asText(); - } - if (resultUpload.has("database")) { - resultUploadDatabase = resultUpload.get("database").asText(); - } - if (resultUpload.has("container")) { - resultUploadContainer = resultUpload.get("container").asText(); - } + /** + * Result upload configuration from "metrics.resultUpload". + */ + private void loadResultUploadConfig(JsonNode root) { + JsonNode metrics = root.get("metrics"); + if (metrics == null || !metrics.isObject()) { + return; + } + JsonNode resultUpload = metrics.get("resultUpload"); + if (resultUpload != null && resultUpload.isObject()) { + if (resultUpload.has("serviceEndpoint")) { + resultUploadEndpoint = resultUpload.get("serviceEndpoint").asText(); + } + if (resultUpload.has("masterKey")) { + resultUploadKey = resultUpload.get("masterKey").asText(); + } + if (resultUpload.has("database")) { + resultUploadDatabase = resultUpload.get("database").asText(); } + if (resultUpload.has("container")) { + resultUploadContainer = resultUpload.get("container").asText(); + } + } + } - // Run metadata sub-section - JsonNode runMetadata = metrics.get("runMetadata"); - if (runMetadata != null && runMetadata.isObject()) { - if (runMetadata.has("testVariationName")) { - testVariationName = runMetadata.get("testVariationName").asText(); - } - if (runMetadata.has("branchName")) { - branchName = runMetadata.get("branchName").asText(); - } - if (runMetadata.has("commitId")) { - commitId = runMetadata.get("commitId").asText(); - } + /** + * Run metadata from "metrics.runMetadata" (tagged on uploaded results). + */ + private void loadRunMetadata(JsonNode root) { + JsonNode metrics = root.get("metrics"); + if (metrics == null || !metrics.isObject()) { + return; + } + JsonNode runMetadata = metrics.get("runMetadata"); + if (runMetadata != null && runMetadata.isObject()) { + if (runMetadata.has("testVariationName")) { + testVariationName = runMetadata.get("testVariationName").asText(); + } + if (runMetadata.has("branchName")) { + branchName = runMetadata.get("branchName").asText(); + } + if (runMetadata.has("commitId")) { + commitId = runMetadata.get("commitId").asText(); } } } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java index 7a966a6ca36b..556342a3e346 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java @@ -23,7 +23,10 @@ public static void main(String[] args) throws Exception { return; } - // Build BenchmarkConfig (requires workload config file) + // Configuration holds only CLI lifecycle params (cycles, settleTimeMs, etc.). + // BenchmarkConfig consumes them and loads all workload config from the JSON file. + // BenchmarkOrchestrator handles dispatch for all benchmark types (async, sync, + // CTL, encryption, LinkedIn) based on operationType and flags in TenantWorkloadConfig. BenchmarkConfig benchConfig = BenchmarkConfig.fromConfiguration(cfg); TenantWorkloadConfig firstTenant = benchConfig.getTenantWorkloads().get(0); From e51835efa58efbbb2cd869683cfd7c2af8533808 Mon Sep 17 00:00:00 2001 From: Annie Liang Date: Wed, 4 Mar 2026 19:38:39 -0800 Subject: [PATCH 5/5] Address remaining PR review comments - Validate all tenants, not just the first one - Guard against empty tenants list with clear error message - Fix copy-paste bug: roleInstance null check used roleName instead - Move Environment enum from Configuration to TenantWorkloadConfig to remove coupling to CLI-only class Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../cosmos/benchmark/BenchmarkOrchestrator.java | 2 +- .../azure/cosmos/benchmark/Configuration.java | 17 ----------------- .../java/com/azure/cosmos/benchmark/Main.java | 10 ++++++++-- .../cosmos/benchmark/TenantWorkloadConfig.java | 14 +++++++++++--- .../linkedin/impl/metrics/MetricsFactory.java | 6 +++--- .../linkedin/impl/metrics/MetricsImpl.java | 4 ++-- 6 files changed, 25 insertions(+), 28 deletions(-) diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java index dee395d632cb..dad9937b166d 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/BenchmarkOrchestrator.java @@ -451,7 +451,7 @@ public String instrumentationKey() { } String roleInstance = System.getenv("APPLICATIONINSIGHTS_ROLE_INSTANCE"); - if (roleName != null) { + if (roleInstance != null) { globalTags.add(io.micrometer.core.instrument.Tag.of("cloud_RoleInstance", roleInstance)); } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java index 884e14fb641a..ef32a0d4d2f6 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Configuration.java @@ -3,7 +3,6 @@ package com.azure.cosmos.benchmark; -import com.beust.jcommander.IStringConverter; import com.beust.jcommander.Parameter; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; @@ -30,22 +29,6 @@ public class Configuration { @Parameter(names = {"-h", "-help", "--help"}, description = "Help", help = true) private boolean help = false; - public enum Environment { - Daily, // This is the CTL environment where we run the workload for a fixed number of hours - Staging; // This is the CTL environment where the worload runs as a long running job - - static class EnvironmentConverter implements IStringConverter { - @Override - public Environment convert(String value) { - if (value == null) { - return Environment.Daily; - } - - return Environment.valueOf(value); - } - } - } - public boolean isHelp() { return help; } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java index 556342a3e346..fafd97fbc296 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/Main.java @@ -28,9 +28,15 @@ public static void main(String[] args) throws Exception { // BenchmarkOrchestrator handles dispatch for all benchmark types (async, sync, // CTL, encryption, LinkedIn) based on operationType and flags in TenantWorkloadConfig. BenchmarkConfig benchConfig = BenchmarkConfig.fromConfiguration(cfg); - TenantWorkloadConfig firstTenant = benchConfig.getTenantWorkloads().get(0); - validateConfiguration(firstTenant); + if (benchConfig.getTenantWorkloads().isEmpty()) { + throw new IllegalArgumentException( + "No tenants defined in workload config. The 'tenants' array must contain at least one entry."); + } + + for (TenantWorkloadConfig tenant : benchConfig.getTenantWorkloads()) { + validateConfiguration(tenant); + } new BenchmarkOrchestrator().run(benchConfig); } catch (ParameterException e) { diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java index 816f801f4ae3..6140df01d463 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/TenantWorkloadConfig.java @@ -37,6 +37,14 @@ @JsonIgnoreProperties(ignoreUnknown = true) public class TenantWorkloadConfig { + /** + * Benchmark execution environment. + */ + public enum Environment { + Daily, // CTL environment where we run the workload for a fixed number of hours + Staging // CTL environment where the workload runs as a long running job + } + private static final Logger logger = LoggerFactory.getLogger(TenantWorkloadConfig.class); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); @@ -314,9 +322,9 @@ public Duration getAggressiveWarmupDuration() { public boolean isEncryptionEnabled() { return encryptionEnabled != null && encryptionEnabled; } public int getBulkloadBatchSize() { return bulkloadBatchSize != null ? bulkloadBatchSize : 200000; } public String getTestScenario() { return testScenario != null ? testScenario : "GET"; } - public Configuration.Environment getEnvironment() { - if (environment == null) return Configuration.Environment.Daily; - return Configuration.Environment.valueOf(environment); + public Environment getEnvironment() { + if (environment == null) return Environment.Daily; + return Environment.valueOf(environment); } public boolean isSync() { return useSync != null && useSync; } diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/metrics/MetricsFactory.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/metrics/MetricsFactory.java index 1398cbf83878..67604f6702a1 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/metrics/MetricsFactory.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/metrics/MetricsFactory.java @@ -3,7 +3,7 @@ package com.azure.cosmos.benchmark.linkedin.impl.metrics; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.impl.Metrics; import com.azure.cosmos.benchmark.linkedin.impl.models.CollectionKey; import com.codahale.metrics.MetricRegistry; @@ -20,7 +20,7 @@ public class MetricsFactory { private final MetricRegistry _metricsRegistry; private final Clock _clock; - private final Configuration.Environment _environment; + private final TenantWorkloadConfig.Environment _environment; // Local cache enables reusing the same Metric instance // {CollectionKey -> {OperationName -> Metrics} map} @@ -28,7 +28,7 @@ public class MetricsFactory { public MetricsFactory(final MetricRegistry metricsRegistry, final Clock clock, - final Configuration.Environment environment) { + final TenantWorkloadConfig.Environment environment) { Preconditions.checkNotNull(metricsRegistry, "The MetricsRegistry can not be null"); Preconditions.checkNotNull(clock, "Need a non-null Clock instance for latency tracking"); Preconditions.checkNotNull(environment, "Need a valid value for the CTL environment"); diff --git a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/metrics/MetricsImpl.java b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/metrics/MetricsImpl.java index 3267a50abd18..aad4b74b71e7 100644 --- a/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/metrics/MetricsImpl.java +++ b/sdk/cosmos/azure-cosmos-benchmark/src/main/java/com/azure/cosmos/benchmark/linkedin/impl/metrics/MetricsImpl.java @@ -3,7 +3,7 @@ package com.azure.cosmos.benchmark.linkedin.impl.metrics; -import com.azure.cosmos.benchmark.Configuration; +import com.azure.cosmos.benchmark.TenantWorkloadConfig; import com.azure.cosmos.benchmark.linkedin.impl.Metrics; import com.azure.cosmos.benchmark.linkedin.impl.models.CollectionKey; import com.codahale.metrics.Meter; @@ -27,7 +27,7 @@ public MetricsImpl(final MetricRegistry metricsRegistry, final Clock clock, final CollectionKey collectionKey, final String operationName, - final Configuration.Environment environment) { + final TenantWorkloadConfig.Environment environment) { Preconditions.checkNotNull(metricsRegistry, "The MetricsRegistry can not be null"); Preconditions.checkNotNull(clock, "Need a non-null Clock instance for latency tracking"); Preconditions.checkNotNull(operationName, "The operation name can not be null");