diff --git a/doc/src/build/config/config.yaml b/doc/src/build/config/config.yaml
index b2debf2c9e..0792d7a1dc 100644
--- a/doc/src/build/config/config.yaml
+++ b/doc/src/build/config/config.yaml
@@ -120,6 +120,8 @@ option:
log-level-stderr: {type: string, required: false, command: {noop: {}}}
pg: {type: string, required: false, command: {noop: {}}}
pg-path: {type: string, required: false, command: {noop: {}}}
+ repo-azure-key: {type: string, required: false, command: {noop: {}}}
+ repo-azure-key-type: {type: string-id, default: shared, allow-list: [auto, shared, sas], command: {noop: {}}}
repo-type: {type: string, required: false, command: {noop: {}}}
repo: {type: string, required: false, command: {noop: {}}}
spool-path: {type: string, required: false, command: {noop: {}}}
diff --git a/doc/src/build/help/help.xml b/doc/src/build/help/help.xml
index c873d096bd..c14c799848 100644
--- a/doc/src/build/help/help.xml
+++ b/doc/src/build/help/help.xml
@@ -20,6 +20,8 @@
+
+
diff --git a/src/build/config/config.yaml b/src/build/config/config.yaml
index f3fd8ac8ec..e1fec0e28d 100644
--- a/src/build/config/config.yaml
+++ b/src/build/config/config.yaml
@@ -2183,13 +2183,24 @@ option:
default: blob.core.windows.net
repo-azure-key:
- inherit: repo-azure-account
+ section: global
+ group: repo
+ type: string
+ secure: true
+ command: repo-type
+ required: false
+ depend:
+ option: repo-azure-key-type
+ list:
+ - shared
+ - sas
repo-azure-key-type:
inherit: repo-azure-container
type: string-id
default: shared
allow-list:
+ - auto
- shared
- sas
diff --git a/src/build/help/help.xml b/src/build/help/help.xml
index d55a713c5a..7b223413ca 100644
--- a/src/build/help/help.xml
+++ b/src/build/help/help.xml
@@ -542,6 +542,7 @@
The following types are supported for authorization:
+ auto - Automatically authorize using Azure Managed identities
shared - Shared key
sas - Shared access signature
diff --git a/src/common/debug.h b/src/common/debug.h
index ccc0a4ab7a..b907474c77 100644
--- a/src/common/debug.h
+++ b/src/common/debug.h
@@ -359,12 +359,12 @@ Ignore DEBUG_TEST_TRACE_MACRO if DEBUG is not defined because the underlying fun
#define FUNCTION_TEST_BEGIN() \
FUNCTION_TEST_MEM_CONTEXT_AUDIT_BEGIN(); \
- \
+ \
/* Ensure that FUNCTION_LOG_BEGIN() and FUNCTION_TEST_BEGIN() are not both used in a single function by declaring the */ \
/* same variable that FUNCTION_LOG_BEGIN() uses to track logging */ \
LogLevel FUNCTION_LOG_LEVEL(); \
(void)FUNCTION_LOG_LEVEL(); \
- \
+ \
/* Ensure that FUNCTION_TEST_RETURN*() is not used with FUNCTION_LOG_BEGIN*() by declaring a variable that will be */ \
/* referenced in FUNCTION_TEST_RETURN*() */ \
bool FUNCTION_TEST_BEGIN_exists; \
diff --git a/src/config/config.auto.h b/src/config/config.auto.h
index 5218b19439..ee44b628dd 100644
--- a/src/config/config.auto.h
+++ b/src/config/config.auto.h
@@ -238,6 +238,8 @@ Option value constants
#define CFGOPTVAL_REMOTE_TYPE_REPO STRID5("repo", 0x7c0b20)
#define CFGOPTVAL_REMOTE_TYPE_REPO_Z "repo"
+#define CFGOPTVAL_REPO_AZURE_KEY_TYPE_AUTO STRID5("auto", 0x7d2a10)
+#define CFGOPTVAL_REPO_AZURE_KEY_TYPE_AUTO_Z "auto"
#define CFGOPTVAL_REPO_AZURE_KEY_TYPE_SAS STRID5("sas", 0x4c330)
#define CFGOPTVAL_REPO_AZURE_KEY_TYPE_SAS_Z "sas"
#define CFGOPTVAL_REPO_AZURE_KEY_TYPE_SHARED STRID5("shared", 0x85905130)
diff --git a/src/config/parse.auto.c.inc b/src/config/parse.auto.c.inc
index 6517bab282..db49fe1faf 100644
--- a/src/config/parse.auto.c.inc
+++ b/src/config/parse.auto.c.inc
@@ -5367,7 +5367,7 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] =
PARSE_RULE_OPTION_NAME("repo-azure-key"), // opt/repo-azure-key
PARSE_RULE_OPTION_TYPE(String), // opt/repo-azure-key
PARSE_RULE_OPTION_RESET(true), // opt/repo-azure-key
- PARSE_RULE_OPTION_REQUIRED(true), // opt/repo-azure-key
+ PARSE_RULE_OPTION_REQUIRED(false), // opt/repo-azure-key
PARSE_RULE_OPTION_SECTION(Global), // opt/repo-azure-key
PARSE_RULE_OPTION_SECURE(true), // opt/repo-azure-key
PARSE_RULE_OPTION_GROUP_ID(Repo), // opt/repo-azure-key
@@ -5435,8 +5435,9 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] =
( // opt/repo-azure-key
PARSE_RULE_OPTIONAL_DEPEND // opt/repo-azure-key
( // opt/repo-azure-key
- PARSE_RULE_VAL_OPT(RepoType), // opt/repo-azure-key
- PARSE_RULE_VAL_STRID(Azure), // opt/repo-azure-key
+ PARSE_RULE_VAL_OPT(RepoAzureKeyType), // opt/repo-azure-key
+ PARSE_RULE_VAL_STRID(Shared), // opt/repo-azure-key
+ PARSE_RULE_VAL_STRID(Sas), // opt/repo-azure-key
), // opt/repo-azure-key
), // opt/repo-azure-key
), // opt/repo-azure-key
@@ -5520,6 +5521,7 @@ static const ParseRuleOption parseRuleOption[CFG_OPTION_TOTAL] =
// opt/repo-azure-key-type
PARSE_RULE_OPTIONAL_ALLOW_LIST // opt/repo-azure-key-type
( // opt/repo-azure-key-type
+ PARSE_RULE_VAL_STRID(Auto), // opt/repo-azure-key-type
PARSE_RULE_VAL_STRID(Shared), // opt/repo-azure-key-type
PARSE_RULE_VAL_STRID(Sas), // opt/repo-azure-key-type
), // opt/repo-azure-key-type
@@ -11721,7 +11723,6 @@ static const uint8_t optionResolveOrder[] =
cfgOptRepoAzureAccount, // opt-resolve-order
cfgOptRepoAzureContainer, // opt-resolve-order
cfgOptRepoAzureEndpoint, // opt-resolve-order
- cfgOptRepoAzureKey, // opt-resolve-order
cfgOptRepoAzureKeyType, // opt-resolve-order
cfgOptRepoAzureUriStyle, // opt-resolve-order
cfgOptRepoBlock, // opt-resolve-order
@@ -11775,6 +11776,7 @@ static const uint8_t optionResolveOrder[] =
cfgOptPgHostCmd, // opt-resolve-order
cfgOptPgHostKeyFile, // opt-resolve-order
cfgOptPgHostPort, // opt-resolve-order
+ cfgOptRepoAzureKey, // opt-resolve-order
cfgOptRepoGcsBucket, // opt-resolve-order
cfgOptRepoGcsEndpoint, // opt-resolve-order
cfgOptRepoGcsKey, // opt-resolve-order
diff --git a/src/config/parse.c b/src/config/parse.c
index cb01f6a79e..15934c1c81 100644
--- a/src/config/parse.c
+++ b/src/config/parse.c
@@ -2743,10 +2743,16 @@ cfgParse(const Storage *const storage, const unsigned int argListSize, const cha
// Else error if option is required and help was not requested
else if (!config->help)
{
- const bool required =
+ bool required =
cfgParseOptionalRule(&optionalRules, parseRuleOptionalTypeRequired, config->command, optionId) ?
optionalRules.required : ruleOption->required;
+ // If a dependency exists and is not valid, the option should not be required
+ // This handles cases where an option is only required when a dependency value is in a specific list
+ // Check dependId to ensure a dependency check was actually performed
+ if (required && dependResult.dependId != 0 && !dependResult.valid)
+ required = false;
+
if (required)
{
THROW_FMT(
@@ -2761,9 +2767,57 @@ cfgParse(const Storage *const storage, const unsigned int argListSize, const cha
if (optionGroup && configOptionValue->source != cfgSourceDefault)
optionGroupIndexKeep[optionGroupId][optionListIdx] = true;
}
- // Else apply the default for the unresolved dependency, if it exists
- else if (dependResult.defaultExists)
+ // Else dependency is not valid - check if option is required
+ else
{
+ // If option is not set, check if it's required
+ if ((!configOptionValue->set && !parseOptionValue->negate) || config->help)
+ {
+ // If the option has a default, only apply it if the dependency is valid
+ // If dependency is invalid, don't apply defaults as they may cause dependent options to be incorrectly
+ // required
+ if (cfgParseOptionalRule(&optionalRules, parseRuleOptionalTypeDefault, config->command, optionId) &&
+ (dependResult.dependId == 0 || dependResult.valid))
+ {
+ if (!configOptionValue->set)
+ {
+ configOptionValue->set = true;
+ configOptionValue->value = optionalRules.defaultValue;
+ configOptionValue->display = optionalRules.defaultRaw;
+ }
+
+ configOptionValue->defaultValue = optionalRules.defaultRaw;
+ }
+ // Else error if option is required and help was not requested
+ else if (!config->help)
+ {
+ bool required =
+ cfgParseOptionalRule(&optionalRules, parseRuleOptionalTypeRequired, config->command, optionId) ?
+ optionalRules.required : ruleOption->required;
+
+ // If a dependency exists and is not valid, the option should not be required
+ // This handles cases where an option is only required when a dependency value is in a specific list
+ if (required && dependResult.dependId != 0 && !dependResult.valid)
+ required = false;
+
+ if (required)
+ {
+ THROW_FMT(
+ OptionRequiredError, "%s command requires option: %s%s",
+ cfgParseCommandName(config->command), cfgParseOptionKeyIdxName(optionId, optionKeyIdx),
+ ruleOption->section == cfgSectionStanza ? "\nHINT: does this stanza exist?" : "");
+ }
+ }
+ }
+
+ // Apply the default for the unresolved dependency, if it exists
+ if (dependResult.defaultExists)
+ {
+ configOptionValue->set = true;
+ configOptionValue->value = dependResult.defaultValue;
+ configOptionValue->defaultValue = optionalRules.defaultRaw;
+ configOptionValue->display = optionalRules.defaultRaw;
+ }
// Fully reinitialize since it might have been left partially set if dependency was not resolved
*configOptionValue = (ConfigOptionValue)
{
diff --git a/src/storage/azure/helper.c b/src/storage/azure/helper.c
index 8b9488d43d..b29bb77e7f 100644
--- a/src/storage/azure/helper.c
+++ b/src/storage/azure/helper.c
@@ -52,7 +52,7 @@ storageAzureHelper(const unsigned int repoIdx, const bool write, StoragePathExpr
// Ensure the key is valid base64 when key type is shared
const StorageAzureKeyType keyType = (StorageAzureKeyType)cfgOptionIdxStrId(cfgOptRepoAzureKeyType, repoIdx);
- const String *const key = cfgOptionIdxStr(cfgOptRepoAzureKey, repoIdx);
+ const String *const key = cfgOptionIdxStrNull(cfgOptRepoAzureKey, repoIdx);
if (keyType == storageAzureKeyTypeShared)
{
diff --git a/src/storage/azure/storage.c b/src/storage/azure/storage.c
index 929371439a..452e16caf2 100644
--- a/src/storage/azure/storage.c
+++ b/src/storage/azure/storage.c
@@ -10,10 +10,12 @@ Azure Storage
#include "common/debug.h"
#include "common/io/http/client.h"
#include "common/io/http/common.h"
+#include "common/io/http/url.h"
#include "common/io/socket/client.h"
#include "common/io/tls/client.h"
#include "common/log.h"
#include "common/regExp.h"
+#include "common/type/json.h"
#include "common/type/object.h"
#include "common/type/xml.h"
#include "storage/azure/read.h"
@@ -24,7 +26,7 @@ Azure http headers
***********************************************************************************************************************************/
STRING_STATIC(AZURE_HEADER_TAGS, "x-ms-tags");
STRING_STATIC(AZURE_HEADER_VERSION_STR, "x-ms-version");
-STRING_STATIC(AZURE_HEADER_VERSION_VALUE_STR, "2021-06-08");
+STRING_STATIC(AZURE_HEADER_VERSION_VALUE_STR, "2024-08-04");
/***********************************************************************************************************************************
Azure query tokens
@@ -40,6 +42,8 @@ STRING_STATIC(AZURE_QUERY_SIG_STR, "sig");
STRING_STATIC(AZURE_QUERY_VALUE_LIST_STR, "list");
STRING_EXTERN(AZURE_QUERY_VALUE_CONTAINER_STR, AZURE_QUERY_VALUE_CONTAINER);
STRING_STATIC(AZURE_QUERY_VALUE_VERSIONS_STR, "versions");
+STRING_STATIC(AZURE_QUERY_API_VERSION, "api-version");
+STRING_STATIC(AZURE_QUERY_RESOURCE, "resource");
/***********************************************************************************************************************************
XML tags
@@ -54,6 +58,20 @@ STRING_STATIC(AZURE_XML_TAG_NAME_STR, "Name");
STRING_STATIC(AZURE_XML_TAG_PROPERTIES_STR, "Properties");
STRING_STATIC(AZURE_XML_TAG_VERSION_ID_STR, "VersionId");
+/***********************************************************************************************************************************
+Automatically get credentials via Azure Managed Identities
+
+Documentation for the response format is found at:
+https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/how-to-use-vm-token#get-a-token-using-curl
+***********************************************************************************************************************************/
+STRING_STATIC(AZURE_CREDENTIAL_HOST_STR, "169.254.169.254");
+#define AZURE_CREDENTIAL_PORT 80
+#define AZURE_CREDENTIAL_PATH "/metadata/identity/oauth2/token"
+#define AZURE_CREDENTIAL_API_VERSION "2018-02-01"
+
+VARIANT_STRDEF_STATIC(AZURE_JSON_TAG_ACCESS_TOKEN_VAR, "access_token");
+VARIANT_STRDEF_STATIC(AZURE_JSON_TAG_EXPIRES_IN_VAR, "expires_in");
+
/***********************************************************************************************************************************
Object type
***********************************************************************************************************************************/
@@ -64,6 +82,7 @@ struct StorageAzure
StringList *headerRedactList; // List of headers to redact from logging
StringList *queryRedactList; // List of query keys to redact from logging
+ StorageAzureKeyType keyType; // Key type (e.g. storageAzureKeyTypeShared)
const String *container; // Container to store data in
const String *account; // Account
const Buffer *sharedKey; // Shared key
@@ -74,6 +93,12 @@ struct StorageAzure
const String *pathPrefix; // Account/container prefix
uint64_t fileId; // Id to used to make file block identifiers unique
+
+ // For Azure Managed Identities authentication
+ HttpClient *credHttpClient; // HTTP client to service credential requests
+ const String *credHost; // Credentials host
+ String *accessToken; // Access token
+ time_t accessTokenExpirationTime; // Time the access token expires
};
/***********************************************************************************************************************************
@@ -104,15 +129,25 @@ storageAzureAuth(
MEM_CONTEXT_TEMP_BEGIN()
{
- // Host header is required for both types of authentication
+ // Set required headers
httpHeaderPut(httpHeader, HTTP_HEADER_HOST_STR, this->host);
- // Shared key authentication
- if (this->sharedKey != NULL)
+ // Date header is required for shared key authentication (for signing)
+ if (this->keyType == storageAzureKeyTypeShared)
{
- // Set required headers
httpHeaderPut(httpHeader, HTTP_HEADER_DATE_STR, dateTime);
+ }
+
+ // Set version header (required for shared key and auto auth types, not for SAS)
+ if (this->keyType != storageAzureKeyTypeSas)
+ {
httpHeaderPut(httpHeader, AZURE_HEADER_VERSION_STR, AZURE_HEADER_VERSION_VALUE_STR);
+ }
+
+ // Shared key authentication
+ if (this->keyType == storageAzureKeyTypeShared)
+ {
+ ASSERT(this->sharedKey != NULL);
// Generate canonical headers
String *const headerCanonical = strNew();
@@ -176,9 +211,68 @@ storageAzureAuth(
"SharedKey %s:%s", strZ(this->account),
strZ(strNewEncode(encodingBase64, cryptoHmacOne(hashTypeSha256, this->sharedKey, BUFSTR(stringToSign))))));
}
+ else if (this->keyType == storageAzureKeyTypeAuto)
+ {
+ const time_t timeBegin = time(NULL);
+
+ if (timeBegin >= this->accessTokenExpirationTime)
+ {
+ // Retrieve the access token via the Managed Identities endpoint
+ HttpHeader *const authHeader = httpHeaderNew(NULL);
+ httpHeaderAdd(
+ authHeader, STRDEF("Metadata"), STRDEF("true"));
+ httpHeaderAdd(authHeader, HTTP_HEADER_HOST_STR, this->credHost);
+ httpHeaderAdd(authHeader, HTTP_HEADER_CONTENT_LENGTH_STR, ZERO_STR);
+
+ HttpQuery *const authQuery = httpQueryNewP();
+ httpQueryAdd(authQuery, AZURE_QUERY_API_VERSION, STRDEF(AZURE_CREDENTIAL_API_VERSION));
+ httpQueryAdd(authQuery, AZURE_QUERY_RESOURCE, strNewFmt("https://%s", strZ(this->host)));
+
+ HttpRequest *const request = httpRequestNewP(
+ this->credHttpClient, HTTP_VERB_GET_STR, STRDEF(AZURE_CREDENTIAL_PATH), .header = authHeader,
+ .query = authQuery);
+ HttpResponse *const response = httpRequestResponse(request, true);
+
+ // Set the access_token on success and store an expiration time when we should re-fetch it
+ if (httpResponseCodeOk(response))
+ {
+ // Get credentials and expiration from the JSON response
+ const KeyValue *const credential = varKv(jsonToVar(strNewBuf(httpResponseContent(response))));
+
+ const String *const accessToken = varStr(kvGet(credential, AZURE_JSON_TAG_ACCESS_TOKEN_VAR));
+ CHECK(FormatError, accessToken != NULL, "access token missing");
+
+ const Variant *const expiresInStr = kvGet(credential, AZURE_JSON_TAG_EXPIRES_IN_VAR);
+ CHECK(FormatError, expiresInStr != NULL, "expiry missing");
+
+ const time_t clientTimeoutPeriod = ((time_t)(httpClientTimeout(this->httpClient) / MSEC_PER_SEC * 2));
+ const time_t expiresIn = (time_t)varInt64Force(expiresInStr);
+
+ MEM_CONTEXT_OBJ_BEGIN(this)
+ {
+ this->accessToken = strDup(accessToken);
+ // Subtract http client timeout * 2 so the token does not expire in the middle of http retries
+ this->accessTokenExpirationTime = timeBegin + expiresIn - clientTimeoutPeriod;
+ }
+ MEM_CONTEXT_OBJ_END();
+ }
+ else
+ {
+ httpRequestError(request, response);
+ }
+ }
+
+ // Generate authorization header with Bearer prefix
+ const String *const accessTokenHeaderValue = strNewFmt("Bearer %s", strZ(this->accessToken));
+
+ // Add the authorization header
+ httpHeaderPut(httpHeader, HTTP_HEADER_AUTHORIZATION_STR, accessTokenHeaderValue);
+ }
// SAS authentication
else
+ {
httpQueryMerge(query, this->sasKey);
+ }
}
MEM_CONTEXT_TEMP_END();
@@ -793,7 +887,6 @@ storageAzureNew(
ASSERT(container != NULL);
ASSERT(account != NULL);
ASSERT(endpoint != NULL);
- ASSERT(key != NULL);
ASSERT(blockSize != 0);
OBJ_NEW_BEGIN(StorageAzure, .childQty = MEM_CONTEXT_QTY_MAX)
@@ -808,6 +901,8 @@ storageAzureNew(
.pathPrefix =
uriStyle == storageAzureUriStyleHost ?
strNewFmt("/%s", strZ(container)) : strNewFmt("/%s/%s", strZ(account), strZ(container)),
+ .keyType = keyType,
+ .accessTokenExpirationTime = 0,
};
// Create tag query string
@@ -818,11 +913,29 @@ storageAzureNew(
httpQueryFree(query);
}
- // Store shared key or parse sas query
- if (keyType == storageAzureKeyTypeShared)
- this->sharedKey = bufNewDecode(encodingBase64, key);
- else
- this->sasKey = httpQueryNewStr(key);
+ switch (keyType)
+ {
+ case storageAzureKeyTypeAuto:
+ {
+ this->credHost = AZURE_CREDENTIAL_HOST_STR;
+ this->credHttpClient = httpClientNew(
+ sckClientNew(this->credHost, AZURE_CREDENTIAL_PORT, timeout, timeout), timeout);
+ break;
+ }
+
+ // Store shared key or parse sas query
+ case storageAzureKeyTypeShared:
+ {
+ this->sharedKey = bufNewDecode(encodingBase64, key);
+ break;
+ }
+
+ case storageAzureKeyTypeSas:
+ {
+ this->sasKey = httpQueryNewStr(key);
+ break;
+ }
+ }
// Create the http client used to service requests
this->httpClient = httpClientNew(
diff --git a/src/storage/azure/storage.h b/src/storage/azure/storage.h
index 6335997b85..5114b70f17 100644
--- a/src/storage/azure/storage.h
+++ b/src/storage/azure/storage.h
@@ -16,6 +16,7 @@ Key type
***********************************************************************************************************************************/
typedef enum
{
+ storageAzureKeyTypeAuto = STRID5("auto", 0x7d2a10),
storageAzureKeyTypeShared = STRID5("shared", 0x85905130),
storageAzureKeyTypeSas = STRID5("sas", 0x4c330),
} StorageAzureKeyType;
diff --git a/test/azure/DOCKER_README.md b/test/azure/DOCKER_README.md
new file mode 100644
index 0000000000..452ded70bb
--- /dev/null
+++ b/test/azure/DOCKER_README.md
@@ -0,0 +1,188 @@
+# pgBackRest Docker Image - Azure Blob Storage
+
+Docker image with PostgreSQL 18 and pgBackRest configured for Azure Blob Storage backups. Supports Azure Managed Identity (AMI), SAS tokens, and shared key authentication.
+
+## Build
+
+```bash
+docker build -t pgbackrest-test .
+```
+
+## Run with Azure
+
+```bash
+docker run -d \
+ --name pgbackrest-demo \
+ -e POSTGRES_PASSWORD=secret \
+ -e AZURE_ACCOUNT= \
+ -e AZURE_CONTAINER= \
+ -e AZURE_KEY="" \
+ -e AZURE_KEY_TYPE=sas \
+ -e AZURE_REPO_PATH=/demo-repo \
+ -p 5432:5432 \
+ -v pgdata:/var/lib/postgresql/data \
+ -v pgrepo:/var/lib/pgbackrest \
+ pgbackrest-test
+```
+
+## Environment Variables
+
+**Required:**
+- `POSTGRES_PASSWORD` - PostgreSQL superuser password
+
+**Azure (Required for Azure backups):**
+- `AZURE_ACCOUNT` - Azure storage account name
+- `AZURE_CONTAINER` - Blob container name
+- `AZURE_KEY` - Authentication key (SAS token or shared key, not needed for Managed Identity)
+- `AZURE_KEY_TYPE` - Authentication method: `auto` (Managed Identity), `sas` (SAS Token), or `shared` (Shared Key)
+- `AZURE_REPO_PATH` - Path in Azure container (default: `/demo-repo`)
+
+## Authentication Methods
+
+### Managed Identity (`auto`) - Azure VMs/ACI/AKS
+No keys required. Most secure option for Azure environments.
+
+**Setup (one-time, requires Azure admin):**
+```bash
+# Enable Managed Identity on VM
+az vm identity assign \
+ --name \
+ --resource-group
+
+# Get Principal ID and grant Storage Blob Data Contributor role
+PRINCIPAL_ID=$(az vm identity show \
+ --name \
+ --resource-group \
+ --query principalId -o tsv)
+
+STORAGE_ACCOUNT_ID=$(az storage account show \
+ --name \
+ --resource-group \
+ --query id -o tsv)
+
+az role assignment create \
+ --assignee "$PRINCIPAL_ID" \
+ --role "Storage Blob Data Contributor" \
+ --scope "$STORAGE_ACCOUNT_ID"
+```
+
+**Usage:**
+```bash
+# On Azure VM
+docker run -d \
+ -e POSTGRES_PASSWORD=secret \
+ -e AZURE_ACCOUNT= \
+ -e AZURE_CONTAINER= \
+ -e AZURE_KEY_TYPE=auto \
+ pgbackrest-test
+
+# Azure Container Instance (ACI)
+az container create \
+ --resource-group \
+ --name pgbackrest-demo \
+ --image pgbackrest-test \
+ --assign-identity \
+ --environment-variables \
+ POSTGRES_PASSWORD=secret \
+ AZURE_ACCOUNT= \
+ AZURE_CONTAINER= \
+ AZURE_KEY_TYPE=auto \
+ --cpu 2 \
+ --memory 4 \
+ --ports 5432
+```
+
+### SAS Token (`sas`) - Recommended for local Docker
+```bash
+SAS_TOKEN=$(az storage container generate-sas \
+ --account-name \
+ --name \
+ --permissions racwdl \
+ --expiry $(date -u -d '+7 days' +%Y-%m-%dT%H:%M:%SZ) \
+ --auth-mode login \
+ --as-user \
+ -o tsv)
+
+docker run -d \
+ -e POSTGRES_PASSWORD=secret \
+ -e AZURE_ACCOUNT= \
+ -e AZURE_CONTAINER= \
+ -e AZURE_KEY="$SAS_TOKEN" \
+ -e AZURE_KEY_TYPE=sas \
+ pgbackrest-test
+```
+
+### Shared Key (`shared`)
+```bash
+STORAGE_KEY=$(az storage account keys list \
+ --account-name \
+ --resource-group \
+ --query "[0].value" -o tsv)
+
+docker run -d \
+ -e POSTGRES_PASSWORD=secret \
+ -e AZURE_ACCOUNT= \
+ -e AZURE_CONTAINER= \
+ -e AZURE_KEY="$STORAGE_KEY" \
+ -e AZURE_KEY_TYPE=shared \
+ pgbackrest-test
+```
+
+## Usage
+
+```bash
+# Wait for PostgreSQL initialization (30-60 seconds)
+sleep 60
+
+# Create stanza (configures both local repo1 and Azure repo2)
+docker exec pgbackrest-demo pgbackrest --stanza=demo stanza-create
+
+# Backup to Azure (repo2)
+docker exec pgbackrest-demo pgbackrest --stanza=demo --repo=2 backup
+
+# View backup info
+docker exec pgbackrest-demo pgbackrest --stanza=demo info
+
+# Check connection to Azure
+docker exec pgbackrest-demo pgbackrest --stanza=demo check
+```
+
+## Troubleshooting
+
+**Check container logs:**
+```bash
+docker logs pgbackrest-demo
+```
+
+**Verify Azure configuration:**
+```bash
+docker exec pgbackrest-demo cat /etc/pgbackrest/pgbackrest.conf | grep repo2
+```
+
+**Test Managed Identity (on Azure VM):**
+```bash
+curl -H "Metadata:true" \
+ "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://storage.azure.com/"
+```
+
+**Verify Managed Identity role assignment:**
+```bash
+az role assignment list \
+ --scope "$STORAGE_ACCOUNT_ID" \
+ --assignee "$PRINCIPAL_ID" \
+ --output table
+```
+
+**Check Azure authentication errors:**
+```bash
+docker exec pgbackrest-demo cat /var/log/pgbackrest/pgbackrest.log
+```
+
+**Verify blob storage access:**
+```bash
+az storage blob list \
+ --account-name \
+ --container-name \
+ --auth-mode login \
+ --output table
+```
diff --git a/test/azure/Dockerfile b/test/azure/Dockerfile
new file mode 100644
index 0000000000..19900f63c8
--- /dev/null
+++ b/test/azure/Dockerfile
@@ -0,0 +1,195 @@
+# ============================================================================
+# pgBackRest Docker Image
+# ============================================================================
+# Supports three deployment scenarios:
+# 1. Local backups only (Mac/Linux/Windows)
+# 2. Azure Blob Storage from local system (SAS Token or Shared Key)
+# 3. Azure Managed Identity (Azure VMs/Container Instances/AKS)
+#
+# See Azure configuration section below for details.
+# ============================================================================
+
+# Postgres base image (Debian-based, multi-arch, works on Mac)
+FROM postgres:18
+
+# Build args – official pgBackRest repo + main branch
+ARG PGBR_REPO="https://github.com/pgEdge/pgbackrest.git"
+ARG PGBR_BRANCH="azure-managed-identities"
+
+# ============================================================================
+# Azure Blob Storage Configuration (Optional)
+# ============================================================================
+# Azure configuration is done at runtime via environment variables for security.
+# See DOCKER_README.md for usage examples and authentication methods.
+#
+# All Azure credentials (keys, tokens) should be provided at runtime, not build time.
+# No build-time ARGs for sensitive data to avoid security warnings.
+# ============================================================================
+
+USER root
+
+# Install build deps for pgBackRest
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+ git \
+ ca-certificates \
+ meson \
+ ninja-build \
+ gcc \
+ g++ \
+ make \
+ pkg-config \
+ libpq-dev \
+ libssl-dev \
+ libxml2-dev \
+ liblz4-dev \
+ libzstd-dev \
+ libbz2-dev \
+ zlib1g-dev \
+ libyaml-dev \
+ libssh2-1-dev && \
+ rm -rf /var/lib/apt/lists/*
+
+WORKDIR /build
+
+# Clone pgBackRest main and build
+RUN git clone --branch "${PGBR_BRANCH}" --single-branch "${PGBR_REPO}" pgbackrest && \
+ meson setup /build/pgbackrest-build /build/pgbackrest --buildtype=release && \
+ ninja -C /build/pgbackrest-build && \
+ ninja -C /build/pgbackrest-build install
+
+# pgBackRest config
+RUN mkdir -p /etc/pgbackrest /var/lib/pgbackrest /var/log/pgbackrest && \
+ chown -R postgres:postgres /var/lib/pgbackrest /var/log/pgbackrest /etc/pgbackrest
+
+# Create base config (without Azure)
+# Azure configuration is done at runtime via environment variables
+RUN printf '%s\n' \
+ '[global]' \
+ 'repo1-path=/var/lib/pgbackrest' \
+ 'log-path=/var/log/pgbackrest' \
+ 'lock-path=/var/lib/pgbackrest' \
+ 'log-level-console=info' \
+ 'log-level-file=info' \
+ 'repo1-retention-full=2' \
+ '' \
+ '[demo]' \
+ 'pg1-path=/var/lib/postgresql/data' \
+ > /etc/pgbackrest/pgbackrest.conf && \
+ chown postgres:postgres /etc/pgbackrest/pgbackrest.conf && \
+ chmod 660 /etc/pgbackrest/pgbackrest.conf
+
+# Create script to configure Azure at runtime via environment variables
+RUN cat > /usr/local/bin/configure-azure.sh <<'SCRIPT_EOF'
+#!/bin/bash
+set -e
+if [ -n "$AZURE_ACCOUNT" ] && [ -n "$AZURE_CONTAINER" ]; then
+ # Check if key is required (not needed for auto/Managed Identity)
+ AZURE_KEY_TYPE=${AZURE_KEY_TYPE:-auto}
+ if [ "$AZURE_KEY_TYPE" != "auto" ] && [ -z "$AZURE_KEY" ]; then
+ echo "Error: AZURE_KEY is required for key type: ${AZURE_KEY_TYPE}"
+ exit 1
+ fi
+
+ # Remove existing Azure repo2 config if present (from repo2-type to next blank line or end)
+ awk '
+ /^\[/ { in_azure=0 }
+ /^repo2-type=azure/ { in_azure=1; next }
+ in_azure && /^repo2-/ { next }
+ in_azure && /^$/ { in_azure=0 }
+ !in_azure { print }
+ ' /etc/pgbackrest/pgbackrest.conf > /tmp/pgbackrest.conf.tmp
+ mv /tmp/pgbackrest.conf.tmp /etc/pgbackrest/pgbackrest.conf || true
+
+ # Add Azure config
+ AZURE_REPO_PATH=${AZURE_REPO_PATH:-/demo-repo}
+ printf '\n%s\n' \
+ 'repo2-type=azure' \
+ "repo2-azure-account=${AZURE_ACCOUNT}" \
+ "repo2-azure-container=${AZURE_CONTAINER}" \
+ "repo2-azure-key-type=${AZURE_KEY_TYPE}" \
+ "repo2-path=${AZURE_REPO_PATH}" \
+ 'repo2-retention-full=4' \
+ >> /etc/pgbackrest/pgbackrest.conf
+
+ # Add key only if not using auto (Managed Identity)
+ if [ "$AZURE_KEY_TYPE" != "auto" ] && [ -n "$AZURE_KEY" ]; then
+ echo "repo2-azure-key=${AZURE_KEY}" >> /etc/pgbackrest/pgbackrest.conf
+ fi
+
+ echo "Azure storage configured successfully"
+ echo "Account: ${AZURE_ACCOUNT}"
+ echo "Container: ${AZURE_CONTAINER}"
+ echo "Key type: ${AZURE_KEY_TYPE}"
+ if [ "$AZURE_KEY_TYPE" = "auto" ]; then
+ echo "Using Azure Managed Identity authentication"
+ fi
+else
+ echo "Azure credentials not provided. Skipping Azure configuration."
+fi
+SCRIPT_EOF
+RUN chmod +x /usr/local/bin/configure-azure.sh
+
+# Enable archive_mode + archive_command on first initdb
+# This script configures PostgreSQL for WAL archiving and optionally Azure storage
+RUN mkdir -p /docker-entrypoint-initdb.d && \
+ cat >/docker-entrypoint-initdb.d/pgbackrest-archive.sh <<'EOF'
+#!/bin/bash
+set -e
+
+# Configure Azure storage if environment variables are provided
+# Supports all three scenarios:
+# 1. No Azure vars = local backups only (repo1)
+# 2. Azure vars with shared/sas = Azure from local system (repo1 + repo2)
+# 3. Azure vars with auto = Azure Managed Identity (repo1 + repo2)
+if [ -n "$AZURE_ACCOUNT" ] && [ -n "$AZURE_CONTAINER" ]; then
+ /usr/local/bin/configure-azure.sh
+ echo "Azure Blob Storage (repo2) configured"
+else
+ echo "Azure not configured - using local backups only (repo1)"
+fi
+
+# Update pg1-path in pgBackRest config to use actual PGDATA path (PostgreSQL 18 compatibility)
+# PGDATA is set by the postgres image - use it directly
+if [ -n "$PGDATA" ]; then
+ PG_DATA_DIR="$PGDATA"
+ echo "Using PGDATA: $PG_DATA_DIR"
+else
+ # Fallback: try to find the data directory
+ if [ -d "/var/lib/postgresql/18/main" ]; then
+ PG_DATA_DIR="/var/lib/postgresql/18/main"
+ elif [ -d "/var/lib/postgresql/data" ]; then
+ PG_DATA_DIR="/var/lib/postgresql/data"
+ else
+ PG_DATA_DIR="/var/lib/postgresql/data"
+ fi
+ echo "Using detected path: $PG_DATA_DIR"
+fi
+
+# Update pgBackRest config with the correct path
+# Use a temp file approach to avoid permission issues with sed -i
+sed '/^pg1-path=/d' /etc/pgbackrest/pgbackrest.conf > /tmp/pgbackrest.conf.tmp
+if grep -q "^\[demo\]" /tmp/pgbackrest.conf.tmp; then
+ sed '/^\[demo\]/a pg1-path='"$PG_DATA_DIR" /tmp/pgbackrest.conf.tmp > /tmp/pgbackrest.conf.tmp2
+ mv /tmp/pgbackrest.conf.tmp2 /tmp/pgbackrest.conf.tmp
+else
+ echo "" >> /tmp/pgbackrest.conf.tmp
+ echo "[demo]" >> /tmp/pgbackrest.conf.tmp
+ echo "pg1-path=$PG_DATA_DIR" >> /tmp/pgbackrest.conf.tmp
+fi
+mv /tmp/pgbackrest.conf.tmp /etc/pgbackrest/pgbackrest.conf
+echo "Updated pgBackRest config: pg1-path=$PG_DATA_DIR"
+
+# Configure PostgreSQL for archiving (required for all scenarios)
+echo "archive_mode = on" >> "$PG_DATA_DIR/postgresql.conf"
+echo "archive_command = 'pgbackrest --stanza=demo archive-push %p'" >> "$PG_DATA_DIR/postgresql.conf"
+echo "archive_timeout = 60" >> "$PG_DATA_DIR/postgresql.conf"
+echo "wal_level = replica" >> "$PG_DATA_DIR/postgresql.conf"
+echo "max_wal_senders = 3" >> "$PG_DATA_DIR/postgresql.conf"
+echo "max_replication_slots = 3" >> "$PG_DATA_DIR/postgresql.conf"
+EOF
+RUN chmod +x /docker-entrypoint-initdb.d/pgbackrest-archive.sh
+
+USER postgres
+EXPOSE 5432
+# ENTRYPOINT and CMD come from postgres:18
diff --git a/test/azure/azure-pgbackrest.sh b/test/azure/azure-pgbackrest.sh
new file mode 100644
index 0000000000..4690929a5a
--- /dev/null
+++ b/test/azure/azure-pgbackrest.sh
@@ -0,0 +1,1259 @@
+#!/usr/bin/env bash
+# Master script for Azure pgBackRest operations
+# Handles SAS token generation, cleanup, and full backup/restore testing
+
+set -euo pipefail
+
+# Configuration - Set these environment variables or modify defaults
+AZURE_ACCOUNT="${AZURE_ACCOUNT:-your-storage-account}"
+AZURE_CONTAINER="${AZURE_CONTAINER:-your-container}"
+RESOURCE_GROUP="${RESOURCE_GROUP:-your-resource-group}"
+IMAGE="${IMAGE:-pgbackrest-test}"
+CONTAINER="${CONTAINER:-pgbr-test}"
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+# Functions
+print_header() {
+ echo ""
+ echo "=========================================="
+ echo "$1"
+ echo "=========================================="
+ echo ""
+}
+
+print_success() {
+ echo -e "${GREEN}✓${NC} $1"
+}
+
+print_error() {
+ echo -e "${RED}✗${NC} $1"
+}
+
+print_warning() {
+ echo -e "${YELLOW}⚠${NC} $1"
+}
+
+# Function 1: Generate SAS Token (for Ubuntu machine)
+generate_sas_token() {
+ print_header "Generate SAS Token"
+
+ # Check if Azure CLI is available
+ AZ_CMD=""
+ if command -v az &> /dev/null; then
+ AZ_CMD="az"
+ elif [ -f "/usr/bin/az" ]; then
+ AZ_CMD="/usr/bin/az"
+ fi
+
+ if [ -z "$AZ_CMD" ]; then
+ print_error "Azure CLI (az) not found"
+ echo "To generate SAS token, install Azure CLI and use:"
+ echo " az storage container generate-sas \\"
+ echo " --account-name \\"
+ echo " --name \\"
+ echo " --permissions racwdl \\"
+ echo " --expiry \$(date -u -d '+7 days' +%Y-%m-%dT%H:%M:%SZ) \\"
+ echo " --auth-mode login \\"
+ echo " --as-user \\"
+ echo " -o tsv"
+ return 1
+ fi
+
+ # Validate configuration
+ if [ "$AZURE_ACCOUNT" = "your-storage-account" ] || [ "$AZURE_CONTAINER" = "your-container" ]; then
+ print_error "Please set AZURE_ACCOUNT and AZURE_CONTAINER environment variables"
+ echo "Example:"
+ echo " export AZURE_ACCOUNT=my-storage-account"
+ echo " export AZURE_CONTAINER=my-container"
+ return 1
+ fi
+
+ echo "Generating user delegation SAS token (valid for 7 days)..."
+ # Calculate expiry date (7 days from now) - handle both macOS and Linux
+ if date -u -v+7d +%Y-%m-%dT%H:%M:%SZ >/dev/null 2>&1; then
+ # macOS date command
+ EXPIRY=$(date -u -v+7d +%Y-%m-%dT%H:%M:%SZ)
+ elif date -u -d '+7 days' +%Y-%m-%dT%H:%M:%SZ >/dev/null 2>&1; then
+ # Linux date command
+ EXPIRY=$(date -u -d '+7 days' +%Y-%m-%dT%H:%M:%SZ)
+ else
+ print_error "Could not calculate expiry date"
+ return 1
+ fi
+
+ SAS_TOKEN=$($AZ_CMD storage container generate-sas \
+ --account-name "$AZURE_ACCOUNT" \
+ --name "$AZURE_CONTAINER" \
+ --permissions racwdl \
+ --expiry "$EXPIRY" \
+ --auth-mode login \
+ --as-user \
+ -o tsv)
+
+ if [ -z "$SAS_TOKEN" ]; then
+ print_error "Failed to generate SAS token"
+ return 1
+ fi
+
+ print_success "SAS token generated successfully"
+ echo ""
+ echo "Token:"
+ echo "$SAS_TOKEN"
+ echo ""
+ echo "Export it for use:"
+ echo " export AZURE_SAS_TOKEN=\"$SAS_TOKEN\""
+ echo ""
+ echo "Or save to file:"
+ echo " echo \"$SAS_TOKEN\" > /tmp/azure-sas-token.txt"
+
+ # Save to file for easy retrieval
+ echo "$SAS_TOKEN" > /tmp/azure-sas-token.txt
+ print_success "Token saved to /tmp/azure-sas-token.txt"
+
+ return 0
+}
+
+# Function 2: Cleanup Azure Storage
+cleanup_azure() {
+ local PREFIX="${1:-test-repo}"
+
+ print_header "Cleanup Azure Storage (prefix: $PREFIX/)"
+
+ # Check if Azure CLI is available
+ AZ_CMD=""
+ if command -v az &> /dev/null; then
+ AZ_CMD="az"
+ elif [ -f "/usr/bin/az" ]; then
+ AZ_CMD="/usr/bin/az"
+ fi
+
+ if [ -z "$AZ_CMD" ]; then
+ print_error "Azure CLI (az) not found"
+ return 1
+ fi
+
+ # Validate configuration
+ if [ "$AZURE_ACCOUNT" = "your-storage-account" ] || [ "$AZURE_CONTAINER" = "your-container" ]; then
+ print_error "Please set AZURE_ACCOUNT and AZURE_CONTAINER environment variables"
+ return 1
+ fi
+
+ echo "Listing blobs with prefix $PREFIX/..."
+ BLOBS=$($AZ_CMD storage blob list \
+ --account-name "$AZURE_ACCOUNT" \
+ --container-name "$AZURE_CONTAINER" \
+ --prefix "$PREFIX/" \
+ --auth-mode login \
+ --output tsv --query "[].name" 2>/dev/null || true)
+
+ if [ -z "$BLOBS" ]; then
+ print_success "No blobs found with prefix $PREFIX/"
+ return 0
+ fi
+
+ BLOB_COUNT=$(echo "$BLOBS" | wc -l)
+ echo "Found $BLOB_COUNT blob(s) to delete"
+ echo ""
+ echo "First 10 blobs:"
+ echo "$BLOBS" | head -10
+ if [ "$BLOB_COUNT" -gt 10 ]; then
+ echo "... and $((BLOB_COUNT - 10)) more"
+ fi
+ echo ""
+
+ read -p "Delete these blobs? (y/N): " -n 1 -r
+ echo ""
+
+ if [[ ! $REPLY =~ ^[Yy]$ ]]; then
+ print_warning "Cleanup cancelled"
+ return 0
+ fi
+
+ echo "Deleting blobs in parallel (10 at a time)..."
+ echo "$BLOBS" | xargs -P 10 -I {} $AZ_CMD storage blob delete \
+ --account-name "$AZURE_ACCOUNT" \
+ --container-name "$AZURE_CONTAINER" \
+ --name {} \
+ --auth-mode login \
+ --only-show-errors 2>/dev/null || true
+
+ print_success "Cleanup complete!"
+ return 0
+}
+
+# Function 3: Clean Docker containers and volumes
+cleanup_docker() {
+ print_header "Cleanup Docker Containers and Volumes"
+
+ echo "Stopping and removing container..."
+ docker stop "$CONTAINER" 2>/dev/null || true
+ docker rm "$CONTAINER" 2>/dev/null || true
+
+ echo "Removing volumes..."
+ docker volume rm pgdata pgrepo 2>/dev/null || true
+
+ # Check and free up port 5433 if in use
+ if lsof -ti:5433 >/dev/null 2>&1; then
+ echo "Port 5433 is in use, freeing it..."
+ lsof -ti:5433 | xargs kill -9 2>/dev/null || true
+ sleep 1
+ fi
+
+ print_success "Docker cleanup complete!"
+ return 0
+}
+
+# Function 3.5: Full cleanup including image rebuild
+cleanup_all_and_rebuild() {
+ print_header "Full Cleanup and Rebuild"
+
+ # Clean containers and volumes
+ cleanup_docker
+
+ echo "Removing old image..."
+ docker rmi "$IMAGE" 2>/dev/null || true
+
+ echo "Rebuilding Docker image..."
+ if docker build -t "$IMAGE" .; then
+ print_success "Image rebuilt successfully"
+ else
+ print_error "Image rebuild failed"
+ return 1
+ fi
+
+ # Ensure port is free
+ if lsof -ti:5433 >/dev/null 2>&1; then
+ echo "Port 5433 is in use, freeing it..."
+ lsof -ti:5433 | xargs kill -9 2>/dev/null || true
+ sleep 2
+ fi
+
+ print_success "Full cleanup and rebuild complete!"
+ return 0
+}
+
+# Helper function: Download Northwind database
+download_northwind() {
+ local container_name="$1"
+
+ print_header "Download Northwind Database"
+
+ # Check if Northwind SQL file already exists in container
+ if docker exec "$container_name" test -f /northwind.sql 2>/dev/null; then
+ print_success "Northwind SQL file already exists in container"
+ return 0
+ fi
+
+ # Try to download from common sources
+ echo "Downloading Northwind database..."
+
+ # Try multiple sources for Northwind database
+ NORTHWIND_URLS=(
+ "https://raw.githubusercontent.com/pthom/northwind_psql/master/northwind.sql"
+ "https://github.com/pthom/northwind_psql/raw/master/northwind.sql"
+ "https://raw.githubusercontent.com/jpwhite3/northwind-SQLite3/master/northwind.sql"
+ )
+
+ DOWNLOADED=false
+ for URL in "${NORTHWIND_URLS[@]}"; do
+ echo "Trying: $URL"
+ if curl -s -f -L "$URL" -o /tmp/northwind.sql 2>/dev/null && [ -s /tmp/northwind.sql ]; then
+ # Copy to container (as root, then fix permissions)
+ docker cp /tmp/northwind.sql "$container_name:/northwind.sql"
+ docker exec -u root "$container_name" chmod 644 /northwind.sql 2>/dev/null || true
+ rm -f /tmp/northwind.sql
+ DOWNLOADED=true
+ print_success "Northwind database downloaded successfully"
+ break
+ fi
+ done
+
+ if [ "$DOWNLOADED" = "false" ]; then
+ # Try alternative: create a simple Northwind-like database
+ print_warning "Could not download Northwind from online sources, creating a simple test database..."
+ # Create SQL file locally first, then copy to container
+ cat > /tmp/northwind.sql << 'NORTHWIND_EOF'
+-- Simple Northwind-like database for testing
+-- Note: Database should be created separately before running this script
+
+CREATE TABLE customers (
+ customer_id VARCHAR(5) PRIMARY KEY,
+ company_name VARCHAR(40) NOT NULL,
+ contact_name VARCHAR(30),
+ contact_title VARCHAR(30),
+ address VARCHAR(60),
+ city VARCHAR(15),
+ region VARCHAR(15),
+ postal_code VARCHAR(10),
+ country VARCHAR(15),
+ phone VARCHAR(24),
+ fax VARCHAR(24)
+);
+
+INSERT INTO customers VALUES
+ ('ALFKI', 'Alfreds Futterkiste', 'Maria Anders', 'Sales Representative', 'Obere Str. 57', 'Berlin', NULL, '12209', 'Germany', '030-0074321', '030-0076545'),
+ ('ANATR', 'Ana Trujillo Emparedados y helados', 'Ana Trujillo', 'Owner', 'Avda. de la Constitución 2222', 'México D.F.', NULL, '05021', 'Mexico', '(5) 555-4729', '(5) 555-3745'),
+ ('ANTON', 'Antonio Moreno Taquería', 'Antonio Moreno', 'Owner', 'Mataderos 2312', 'México D.F.', NULL, '05023', 'Mexico', '(5) 555-3932', NULL),
+ ('AROUT', 'Around the Horn', 'Thomas Hardy', 'Sales Representative', '120 Hanover Sq.', 'London', NULL, 'WA1 1DP', 'UK', '(171) 555-7788', '(171) 555-6750'),
+ ('BERGS', 'Berglunds snabbköp', 'Christina Berglund', 'Order Administrator', 'Berguvsvägen 8', 'Luleå', NULL, 'S-958 22', 'Sweden', '0921-12 34 65', '0921-12 34 67');
+NORTHWIND_EOF
+ docker cp /tmp/northwind.sql "$container_name:/northwind.sql"
+ docker exec -u root "$container_name" chmod 644 /northwind.sql 2>/dev/null || true
+ rm -f /tmp/northwind.sql
+ print_success "Created simple Northwind test database"
+ else
+ # Remove CREATE DATABASE statement from downloaded file if present (we create it separately)
+ docker exec "$container_name" bash -c 'sed -i "/^CREATE DATABASE/d; /^\\\\c/d" /northwind.sql 2>/dev/null || true'
+ fi
+
+ return 0
+}
+
+# Helper function: Create test data
+create_test_data() {
+ local container_name="$1"
+
+ print_header "Create Test Data"
+ docker exec "$container_name" \
+ psql -U postgres -d postgres -c "DROP TABLE IF EXISTS restore_test;" >/dev/null 2>&1 || true
+
+ docker exec "$container_name" \
+ psql -U postgres -d postgres -c "CREATE TABLE restore_test(id int primary key, note text);"
+
+ docker exec "$container_name" \
+ psql -U postgres -d postgres -c "SELECT * FROM restore_test;"
+
+ # Modify config
+ docker exec "$container_name" bash -lc 'echo "shared_buffers = 999MB" >> $PGDATA/postgresql.conf'
+ docker exec "$container_name" psql -U postgres -d postgres -c "SELECT pg_reload_conf();" >/dev/null
+ SHARED_BUFFERS_BEFORE=$(docker exec "$container_name" psql -U postgres -d postgres -t -c "SHOW shared_buffers;" | xargs)
+ echo "shared_buffers before backup: $SHARED_BUFFERS_BEFORE"
+
+ # Download and create Northwind database
+ download_northwind "$container_name"
+}
+
+# Helper function: Verify restore
+verify_restore() {
+ local container_name="$1"
+ local expected_customers="$2"
+
+ print_header "Verify Restore"
+ SHARED_BUFFERS_AFTER=$(docker exec "$container_name" psql -U postgres -d postgres -t -c "SHOW shared_buffers;" | xargs)
+ echo "shared_buffers after restore: $SHARED_BUFFERS_AFTER"
+
+ if [ "$SHARED_BUFFERS_AFTER" = "999MB" ]; then
+ print_success "shared_buffers restored correctly"
+ else
+ print_error "shared_buffers mismatch: expected 999MB, got $SHARED_BUFFERS_AFTER"
+ fi
+
+ # Check if Northwind database exists
+ if [ "$expected_customers" != "0" ]; then
+ # Wait a moment for database to be fully accessible
+ sleep 2
+ if docker exec "$container_name" psql -U postgres -d postgres -c "\l northwind" >/dev/null 2>&1; then
+ CUSTOMERS_COUNT_AFTER=$(docker exec "$container_name" \
+ psql -U postgres -d northwind -t -c "SELECT count(*) FROM customers;" 2>/dev/null | xargs || echo "0")
+ echo "Northwind customers count after restore: $CUSTOMERS_COUNT_AFTER"
+
+ if [ "$CUSTOMERS_COUNT_AFTER" = "$expected_customers" ]; then
+ print_success "Northwind database restored correctly ($CUSTOMERS_COUNT_AFTER customers)"
+ else
+ print_warning "Customer count mismatch: expected $expected_customers, got $CUSTOMERS_COUNT_AFTER (database may still be restoring)"
+ fi
+ else
+ print_warning "Northwind database not found after restore (may need to check manually)"
+ fi
+ else
+ echo "Northwind database not included in test (expected_customers=0)"
+ fi
+
+ # Check restore_test table
+ if docker exec "$container_name" psql -U postgres -d postgres -c "\d restore_test" >/dev/null 2>&1; then
+ print_success "restore_test table exists"
+ else
+ print_error "restore_test table not found"
+ fi
+}
+
+# Test 1: Local backups only
+test_local() {
+ print_header "TEST 1: Local Backups Only (repo1)"
+
+ # Clean Docker first
+ cleanup_docker
+
+ # Check port before starting
+ if lsof -ti:5433 >/dev/null 2>&1; then
+ print_warning "Port 5433 is in use, freeing it..."
+ lsof -ti:5433 | xargs kill -9 2>/dev/null || true
+ sleep 2
+ fi
+
+ # Start container (no Azure)
+ print_header "Start Container (Local Only)"
+ docker run -d \
+ --name "$CONTAINER" \
+ -e POSTGRES_PASSWORD=secret \
+ -p 5433:5432 \
+ -v pgdata:/var/lib/postgresql \
+ -v pgrepo:/var/lib/pgbackrest \
+ "$IMAGE"
+
+ print_success "Container started"
+
+ # Wait for PostgreSQL (with timeout)
+ print_header "Wait for PostgreSQL"
+ echo "Waiting for PostgreSQL to be ready (max 60 seconds)..."
+ TIMEOUT=60
+ ELAPSED=0
+ until docker exec "$CONTAINER" pg_isready -U postgres >/dev/null 2>&1; do
+ if [ $ELAPSED -ge $TIMEOUT ]; then
+ print_error "PostgreSQL failed to start within $TIMEOUT seconds"
+ docker logs "$CONTAINER" --tail 50
+ return 1
+ fi
+ echo -n "."
+ sleep 1
+ ELAPSED=$((ELAPSED + 1))
+ done
+ echo ""
+ print_success "PostgreSQL is ready (took ${ELAPSED}s)"
+
+ # Create test data
+ create_test_data "$CONTAINER"
+
+ # Create stanza
+ print_header "Create Stanza (Local)"
+ docker exec "$CONTAINER" pgbackrest --stanza=demo stanza-create
+
+ # Backup to local
+ print_header "Backup to Local (repo1)"
+ docker exec "$CONTAINER" pgbackrest --stanza=demo --repo=1 backup
+ docker exec "$CONTAINER" pgbackrest --stanza=demo info
+
+ BACKUP_LABEL_LOCAL=$(docker exec "$CONTAINER" pgbackrest --stanza=demo info | awk '/full backup:/ {print $3; exit}')
+ if [ -z "$BACKUP_LABEL_LOCAL" ]; then
+ print_error "Could not extract backup label from local"
+ return 1
+ fi
+ print_success "Local backup label: $BACKUP_LABEL_LOCAL"
+
+ # Simulate disaster
+ print_header "Simulate Disaster"
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "DROP TABLE restore_test;" >/dev/null
+ print_success "Test table dropped"
+
+ # Get the actual PostgreSQL data directory before stopping container
+ PGDATA_PATH=$(docker exec "$CONTAINER" bash -c 'psql -U postgres -d postgres -t -c "SHOW data_directory;" 2>/dev/null | xargs' || echo "")
+ if [ -z "$PGDATA_PATH" ]; then
+ # Fallback: try to find it from the config
+ PGDATA_PATH=$(docker exec "$CONTAINER" bash -c 'grep "pg1-path" /etc/pgbackrest/pgbackrest.conf | tail -1 | cut -d= -f2 | xargs' || echo "")
+ fi
+ if [ -z "$PGDATA_PATH" ]; then
+ # Fallback: try to find PG_VERSION file
+ PGDATA_PATH=$(docker exec "$CONTAINER" find /var/lib/postgresql -name "PG_VERSION" -type f 2>/dev/null | head -1 | xargs dirname 2>/dev/null || echo "")
+ fi
+ if [ -z "$PGDATA_PATH" ]; then
+ # Final fallback: use default
+ PGDATA_PATH="/var/lib/postgresql/data"
+ print_warning "Could not detect PostgreSQL data directory, using default: $PGDATA_PATH"
+ else
+ print_success "Detected PostgreSQL data directory: $PGDATA_PATH"
+ fi
+
+ # Stop container
+ docker stop "$CONTAINER"
+
+ # Restore from local
+ print_header "Restore from Local (repo1)"
+ echo "Restoring to PGDATA: $PGDATA_PATH"
+ docker run --rm \
+ --entrypoint bash \
+ -v pgdata:/var/lib/postgresql \
+ -v pgrepo:/var/lib/pgbackrest \
+ "$IMAGE" \
+ -lc "rm -rf \"$PGDATA_PATH\"/* && pgbackrest --stanza=demo restore --set='$BACKUP_LABEL_LOCAL' --type=immediate --pg1-path=\"$PGDATA_PATH\""
+
+ print_success "Restore complete"
+
+ # Start container
+ docker start "$CONTAINER"
+ TIMEOUT=30
+ ELAPSED=0
+ until docker exec "$CONTAINER" pg_isready -U postgres >/dev/null 2>&1; do
+ if [ $ELAPSED -ge $TIMEOUT ]; then
+ print_error "PostgreSQL failed to start after restore"
+ return 1
+ fi
+ sleep 1
+ ELAPSED=$((ELAPSED + 1))
+ done
+
+ # Wait a bit more for PostgreSQL to fully initialize
+ sleep 2
+
+ # Verify restore
+ if docker exec "$CONTAINER" psql -U postgres -d postgres -c "\d restore_test" >/dev/null 2>&1; then
+ print_success "Local restore verified - restore_test table exists"
+ else
+ print_warning "restore_test table not found, but checking if database is accessible..."
+ # Check if we can connect and query
+ if docker exec "$CONTAINER" psql -U postgres -d postgres -c "SELECT 1;" >/dev/null 2>&1; then
+ print_success "Database is accessible after restore"
+ # Try to see what tables exist
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "\dt" 2>&1 | head -10
+ else
+ print_error "Local restore failed - database not accessible"
+ fi
+ fi
+
+ print_success "TEST 1 Complete: Local backups working!"
+ return 0
+}
+
+# Test 2: Azure Blob Storage with SAS Token
+test_azure_blob() {
+ print_header "TEST 2: Azure Blob Storage (SAS Token)"
+
+ # Validate configuration
+ if [ "$AZURE_ACCOUNT" = "your-storage-account" ] || [ "$AZURE_CONTAINER" = "your-container" ]; then
+ print_error "Please set AZURE_ACCOUNT and AZURE_CONTAINER environment variables"
+ echo "Example:"
+ echo " export AZURE_ACCOUNT=my-storage-account"
+ echo " export AZURE_CONTAINER=my-container"
+ return 1
+ fi
+
+ # Get SAS token - try to generate if not available or expired
+ TOKEN_NEEDS_REGEN=false
+
+ if [ -z "${AZURE_SAS_TOKEN:-}" ]; then
+ if [ -f "/tmp/azure-sas-token.txt" ]; then
+ AZURE_SAS_TOKEN=$(cat /tmp/azure-sas-token.txt)
+ print_success "Loaded SAS token from /tmp/azure-sas-token.txt"
+ else
+ print_warning "AZURE_SAS_TOKEN not set and /tmp/azure-sas-token.txt not found"
+ TOKEN_NEEDS_REGEN=true
+ fi
+ fi
+
+ # Check if token is expired (always check, even if loaded from env var or file)
+ if [ -n "${AZURE_SAS_TOKEN:-}" ] && echo "$AZURE_SAS_TOKEN" | grep -q "se="; then
+ # Extract expiry date from token (format: se=2025-11-18T18:54:02Z or se=2025-11-18T18%3A54%3A02Z)
+ EXPIRY_RAW=$(echo "$AZURE_SAS_TOKEN" | sed -n 's/.*se=\([^&]*\).*/\1/p' | head -1)
+ if [ -n "$EXPIRY_RAW" ]; then
+ # URL decode the expiry date (%3A -> :)
+ EXPIRY=$(echo "$EXPIRY_RAW" | sed 's/%3A/:/g' | sed 's/%2D/-/g' | sed 's/%2B/+/g')
+ if [ -n "$EXPIRY" ]; then
+ # Try to parse expiry date (handle both Linux and macOS date commands)
+ EXPIRY_EPOCH=$(date -u -d "$EXPIRY" +%s 2>/dev/null || date -u -j -f "%Y-%m-%dT%H:%M:%SZ" "$EXPIRY" +%s 2>/dev/null || echo "0")
+ NOW_EPOCH=$(date -u +%s)
+
+ if [ "$EXPIRY_EPOCH" != "0" ] && [ "$EXPIRY_EPOCH" -lt "$NOW_EPOCH" ]; then
+ print_warning "SAS token is expired (expiry: $EXPIRY, now: $(date -u +%Y-%m-%dT%H:%M:%SZ))"
+ TOKEN_NEEDS_REGEN=true
+ elif [ "$EXPIRY_EPOCH" != "0" ]; then
+ print_success "SAS token is valid (expires: $EXPIRY)"
+ fi
+ fi
+ fi
+ fi
+
+ # Generate new token if needed
+ if [ "$TOKEN_NEEDS_REGEN" = "true" ]; then
+ print_warning "Attempting to generate a new SAS token..."
+ if generate_sas_token; then
+ AZURE_SAS_TOKEN=$(cat /tmp/azure-sas-token.txt)
+ print_success "Generated new SAS token (valid for 7 days)"
+ else
+ print_error "Could not generate SAS token. Skipping SAS Token test."
+ print_warning "You can manually generate a token with: ./azure-pgbackrest.sh generate-token"
+ return 0 # Skip this test but don't fail the whole suite
+ fi
+ fi
+
+ AZURE_KEY_TYPE="sas"
+ AZURE_REPO_PATH="/test-repo-blob-$(date +%Y%m%d-%H%M%S)"
+
+ print_success "Using repo path: $AZURE_REPO_PATH"
+
+ # Clean Docker first
+ cleanup_docker
+
+ # Check port before starting
+ if lsof -ti:5433 >/dev/null 2>&1; then
+ print_warning "Port 5433 is in use, freeing it..."
+ lsof -ti:5433 | xargs kill -9 2>/dev/null || true
+ sleep 2
+ fi
+
+ # Start container with Azure
+ print_header "Start Container with Azure Blob Storage (SAS Token)"
+ docker run -d \
+ --name "$CONTAINER" \
+ -e POSTGRES_PASSWORD=secret \
+ -e AZURE_ACCOUNT="$AZURE_ACCOUNT" \
+ -e AZURE_CONTAINER="$AZURE_CONTAINER" \
+ -e AZURE_KEY="$AZURE_SAS_TOKEN" \
+ -e AZURE_KEY_TYPE="$AZURE_KEY_TYPE" \
+ -e AZURE_REPO_PATH="$AZURE_REPO_PATH" \
+ -p 5433:5432 \
+ -v pgdata:/var/lib/postgresql \
+ -v pgrepo:/var/lib/pgbackrest \
+ "$IMAGE"
+
+ print_success "Container started"
+
+ # Wait for PostgreSQL (with timeout)
+ print_header "Wait for PostgreSQL"
+ echo "Waiting for PostgreSQL to be ready (max 60 seconds)..."
+ TIMEOUT=60
+ ELAPSED=0
+ until docker exec "$CONTAINER" pg_isready -U postgres >/dev/null 2>&1; do
+ if [ $ELAPSED -ge $TIMEOUT ]; then
+ print_error "PostgreSQL failed to start within $TIMEOUT seconds"
+ docker logs "$CONTAINER" --tail 50
+ return 1
+ fi
+ echo -n "."
+ sleep 1
+ ELAPSED=$((ELAPSED + 1))
+ done
+ echo ""
+ print_success "PostgreSQL is ready (took ${ELAPSED}s)"
+
+ # Create test data (this will also download Northwind if needed)
+ create_test_data "$CONTAINER"
+
+ # Create Northwind DB (download if needed, then create)
+ if docker exec "$CONTAINER" test -f /northwind.sql 2>/dev/null; then
+ print_header "Create Northwind Database"
+ docker exec "$CONTAINER" \
+ psql -U postgres -d postgres -c "DROP DATABASE IF EXISTS northwind;" >/dev/null 2>&1 || true
+ docker exec "$CONTAINER" \
+ psql -U postgres -d postgres -c "CREATE DATABASE northwind;" >/dev/null
+ docker exec "$CONTAINER" \
+ psql -U postgres -d northwind -f /northwind.sql >/dev/null 2>&1
+ CUSTOMERS_COUNT=$(docker exec "$CONTAINER" \
+ psql -U postgres -d northwind -t -c "SELECT count(*) FROM customers;" | xargs)
+ echo "Northwind customers count: $CUSTOMERS_COUNT"
+ else
+ # Try to download Northwind if not already downloaded
+ download_northwind "$CONTAINER"
+ if docker exec "$CONTAINER" test -f /northwind.sql 2>/dev/null; then
+ print_header "Create Northwind Database"
+ docker exec "$CONTAINER" \
+ psql -U postgres -d postgres -c "DROP DATABASE IF EXISTS northwind;" >/dev/null 2>&1 || true
+ docker exec "$CONTAINER" \
+ psql -U postgres -d postgres -c "CREATE DATABASE northwind;" >/dev/null
+ docker exec "$CONTAINER" \
+ psql -U postgres -d northwind -f /northwind.sql >/dev/null 2>&1
+ CUSTOMERS_COUNT=$(docker exec "$CONTAINER" \
+ psql -U postgres -d northwind -t -c "SELECT count(*) FROM customers;" | xargs)
+ echo "Northwind customers count: $CUSTOMERS_COUNT"
+ else
+ CUSTOMERS_COUNT="0"
+ print_warning "Could not download or create Northwind database, skipping Northwind test"
+ fi
+ fi
+
+ # Flush changes
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "CHECKPOINT;" >/dev/null
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "SELECT pg_switch_wal();" >/dev/null
+ sleep 5
+
+ # Clean local backups
+ docker exec "$CONTAINER" rm -rf /var/lib/pgbackrest/archive/* /var/lib/pgbackrest/backup/* 2>/dev/null || true
+
+ # Create stanza
+ print_header "Create Stanza (Azure Blob Storage)"
+ docker exec "$CONTAINER" pgbackrest --stanza=demo stanza-create
+
+ # Backup to Azure
+ print_header "Backup to Azure (repo2)"
+ docker exec "$CONTAINER" pgbackrest --stanza=demo --repo=2 backup
+ docker exec "$CONTAINER" pgbackrest --stanza=demo info
+
+ BACKUP_LABEL_AZURE=$(docker exec "$CONTAINER" pgbackrest --stanza=demo info | awk '/full backup:/ {print $3; exit}')
+ if [ -z "$BACKUP_LABEL_AZURE" ]; then
+ print_error "Could not extract backup label from Azure"
+ return 1
+ fi
+ print_success "Azure backup label: $BACKUP_LABEL_AZURE"
+
+ # Simulate disaster
+ print_header "Simulate Disaster"
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "DROP TABLE restore_test;" >/dev/null
+ if [ "$CUSTOMERS_COUNT" != "0" ]; then
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "DROP DATABASE northwind;" >/dev/null
+ fi
+ print_success "Test data dropped"
+
+ # Get the actual PostgreSQL data directory before stopping container
+ ACTUAL_DATA_DIR=$(docker exec "$CONTAINER" bash -c 'psql -U postgres -d postgres -t -c "SHOW data_directory;" 2>/dev/null | xargs' || echo "")
+ if [ -z "$ACTUAL_DATA_DIR" ]; then
+ # Fallback: try to find it from the config
+ ACTUAL_DATA_DIR=$(docker exec "$CONTAINER" bash -c 'grep "pg1-path" /etc/pgbackrest/pgbackrest.conf | tail -1 | cut -d= -f2 | xargs' || echo "")
+ fi
+ if [ -z "$ACTUAL_DATA_DIR" ]; then
+ # Final fallback: use default
+ ACTUAL_DATA_DIR="/var/lib/postgresql/data"
+ print_warning "Could not detect PostgreSQL data directory, using default: $ACTUAL_DATA_DIR"
+ else
+ print_success "Detected PostgreSQL data directory: $ACTUAL_DATA_DIR"
+ fi
+
+ # Stop container
+ docker stop "$CONTAINER"
+
+ # Restore from Azure
+ print_header "Restore from Azure (repo2)"
+ docker run --rm \
+ --entrypoint bash \
+ -e AZURE_ACCOUNT="$AZURE_ACCOUNT" \
+ -e AZURE_CONTAINER="$AZURE_CONTAINER" \
+ -e AZURE_KEY="$AZURE_SAS_TOKEN" \
+ -e AZURE_KEY_TYPE="$AZURE_KEY_TYPE" \
+ -e AZURE_REPO_PATH="$AZURE_REPO_PATH" \
+ -e ACTUAL_DATA_DIR="$ACTUAL_DATA_DIR" \
+ -v pgdata:/var/lib/postgresql \
+ -v pgrepo:/var/lib/pgbackrest \
+ "$IMAGE" \
+ -lc "/usr/local/bin/configure-azure.sh || true; \
+ DATA_DIR=\${ACTUAL_DATA_DIR:-/var/lib/postgresql/data}; \
+ echo \"Restoring to data directory: \$DATA_DIR\"; \
+ rm -rf \"\$DATA_DIR\"/* && \
+ pgbackrest --stanza=demo restore --set='$BACKUP_LABEL_AZURE' --type=immediate --pg1-path=\"\$DATA_DIR\""
+
+ print_success "Restore complete"
+
+ # Start container
+ docker start "$CONTAINER"
+ until docker exec "$CONTAINER" pg_isready -U postgres >/dev/null 2>&1; do
+ sleep 1
+ done
+
+ # Verify restore
+ verify_restore "$CONTAINER" "$CUSTOMERS_COUNT"
+
+ print_success "TEST 2 Complete: Azure Blob Storage (SAS Token) working!"
+ return 0
+}
+
+# Test 3: Azure Managed Identity
+test_azure_ami() {
+ print_header "TEST 3: Azure Managed Identity (AMI)"
+
+ # Validate configuration
+ if [ "$AZURE_ACCOUNT" = "your-storage-account" ] || [ "$AZURE_CONTAINER" = "your-container" ]; then
+ print_error "Please set AZURE_ACCOUNT and AZURE_CONTAINER environment variables"
+ return 1
+ fi
+
+ # Check if we're on Azure (Managed Identity only works on Azure)
+ if ! curl -s -H "Metadata:true" "http://169.254.169.254/metadata/instance?api-version=2021-02-01" >/dev/null 2>&1; then
+ print_warning "Not running on Azure VM - Managed Identity test will be skipped"
+ print_warning "Managed Identity only works on Azure VMs, Container Instances, or AKS"
+ return 0
+ fi
+
+ AZURE_KEY_TYPE="auto"
+ AZURE_REPO_PATH="/test-repo-ami-$(date +%Y%m%d-%H%M%S)"
+
+ print_success "Using repo path: $AZURE_REPO_PATH"
+
+ # Clean Docker first
+ cleanup_docker
+
+ # Check port before starting
+ if lsof -ti:5433 >/dev/null 2>&1; then
+ print_warning "Port 5433 is in use, freeing it..."
+ lsof -ti:5433 | xargs kill -9 2>/dev/null || true
+ sleep 2
+ fi
+
+ # Start container with Managed Identity
+ print_header "Start Container with Azure Managed Identity"
+ docker run -d \
+ --name "$CONTAINER" \
+ -e POSTGRES_PASSWORD=secret \
+ -e AZURE_ACCOUNT="$AZURE_ACCOUNT" \
+ -e AZURE_CONTAINER="$AZURE_CONTAINER" \
+ -e AZURE_KEY_TYPE="$AZURE_KEY_TYPE" \
+ -e AZURE_REPO_PATH="$AZURE_REPO_PATH" \
+ -p 5433:5432 \
+ -v pgdata:/var/lib/postgresql \
+ -v pgrepo:/var/lib/pgbackrest \
+ "$IMAGE"
+
+ print_success "Container started"
+
+ # Wait for PostgreSQL (with timeout)
+ print_header "Wait for PostgreSQL"
+ echo "Waiting for PostgreSQL to be ready (max 60 seconds)..."
+ TIMEOUT=60
+ ELAPSED=0
+ until docker exec "$CONTAINER" pg_isready -U postgres >/dev/null 2>&1; do
+ if [ $ELAPSED -ge $TIMEOUT ]; then
+ print_error "PostgreSQL failed to start within $TIMEOUT seconds"
+ docker logs "$CONTAINER" --tail 50
+ return 1
+ fi
+ echo -n "."
+ sleep 1
+ ELAPSED=$((ELAPSED + 1))
+ done
+ echo ""
+ print_success "PostgreSQL is ready (took ${ELAPSED}s)"
+
+ # Create test data (this will also download Northwind if needed)
+ create_test_data "$CONTAINER"
+
+ # Create Northwind DB (download if needed, then create)
+ if docker exec "$CONTAINER" test -f /northwind.sql 2>/dev/null; then
+ print_header "Create Northwind Database"
+ docker exec "$CONTAINER" \
+ psql -U postgres -d postgres -c "DROP DATABASE IF EXISTS northwind;" >/dev/null 2>&1 || true
+ docker exec "$CONTAINER" \
+ psql -U postgres -d postgres -c "CREATE DATABASE northwind;" >/dev/null
+ docker exec "$CONTAINER" \
+ psql -U postgres -d northwind -f /northwind.sql >/dev/null 2>&1
+ CUSTOMERS_COUNT=$(docker exec "$CONTAINER" \
+ psql -U postgres -d northwind -t -c "SELECT count(*) FROM customers;" | xargs)
+ echo "Northwind customers count: $CUSTOMERS_COUNT"
+ else
+ # Try to download Northwind if not already downloaded
+ download_northwind "$CONTAINER"
+ if docker exec "$CONTAINER" test -f /northwind.sql 2>/dev/null; then
+ print_header "Create Northwind Database"
+ docker exec "$CONTAINER" \
+ psql -U postgres -d postgres -c "DROP DATABASE IF EXISTS northwind;" >/dev/null 2>&1 || true
+ docker exec "$CONTAINER" \
+ psql -U postgres -d postgres -c "CREATE DATABASE northwind;" >/dev/null
+ docker exec "$CONTAINER" \
+ psql -U postgres -d northwind -f /northwind.sql >/dev/null 2>&1
+ CUSTOMERS_COUNT=$(docker exec "$CONTAINER" \
+ psql -U postgres -d northwind -t -c "SELECT count(*) FROM customers;" | xargs)
+ echo "Northwind customers count: $CUSTOMERS_COUNT"
+ else
+ CUSTOMERS_COUNT="0"
+ print_warning "Could not download or create Northwind database, skipping Northwind test"
+ fi
+ fi
+
+ # Flush changes
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "CHECKPOINT;" >/dev/null
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "SELECT pg_switch_wal();" >/dev/null
+ sleep 5
+
+ # Clean local backups
+ docker exec "$CONTAINER" rm -rf /var/lib/pgbackrest/archive/* /var/lib/pgbackrest/backup/* 2>/dev/null || true
+
+ # Debug: Check PostgreSQL data directory and pgBackRest config
+ print_header "Debug: Check PostgreSQL Paths"
+ echo "PGDATA environment:"
+ docker exec "$CONTAINER" bash -c 'echo "PGDATA=$PGDATA"'
+ echo ""
+ echo "PostgreSQL data directory locations:"
+ docker exec "$CONTAINER" bash -c 'ls -la /var/lib/postgresql/ 2>/dev/null || echo "Cannot list /var/lib/postgresql"'
+ echo ""
+ echo "pgBackRest config pg1-path:"
+ docker exec "$CONTAINER" bash -c 'grep "pg1-path" /etc/pgbackrest/pgbackrest.conf || echo "pg1-path not found in config"'
+ echo ""
+ echo "Actual PostgreSQL data directory (from postgres process):"
+ ACTUAL_DATA_DIR=$(docker exec "$CONTAINER" bash -c 'psql -U postgres -d postgres -t -c "SHOW data_directory;" 2>/dev/null | xargs' || echo "")
+ if [ -n "$ACTUAL_DATA_DIR" ]; then
+ echo "Found data directory: $ACTUAL_DATA_DIR"
+ echo ""
+ echo "Updating pgBackRest config with correct path..."
+ # Remove old pg1-path and add new one with correct directory
+ # Use /tmp for temp file and Python to avoid all permission issues
+ docker exec -e DATA_DIR="$ACTUAL_DATA_DIR" "$CONTAINER" bash -c "python3 <<'PYEOF'
+import sys
+import os
+
+data_dir = os.environ['DATA_DIR']
+config_file = '/etc/pgbackrest/pgbackrest.conf'
+tmp_file = '/tmp/pgbackrest.conf.tmp'
+
+# Read current config
+with open(config_file, 'r') as f:
+ lines = f.readlines()
+
+# Process lines: remove old pg1-path, add new one after [demo]
+output = []
+in_demo = False
+pg1_added = False
+
+for line in lines:
+ stripped = line.strip()
+
+ # Track when we enter [demo] section
+ if stripped == '[demo]':
+ in_demo = True
+ output.append(line)
+ continue
+
+ # Track when we leave [demo] section
+ if stripped.startswith('[') and stripped != '[demo]':
+ if in_demo and not pg1_added:
+ output.append('pg1-path=' + data_dir + '\n')
+ pg1_added = True
+ in_demo = False
+ output.append(line)
+ continue
+
+ # Skip old pg1-path lines
+ if stripped.startswith('pg1-path='):
+ continue
+
+ # Add pg1-path after [demo] when we hit first empty line or end
+ if in_demo and not pg1_added and (stripped == '' or lines.index(line) == len(lines) - 1):
+ output.append('pg1-path=' + data_dir + '\n')
+ pg1_added = True
+
+ output.append(line)
+
+# If [demo] section exists but pg1-path was never added
+if in_demo and not pg1_added:
+ output.append('pg1-path=' + data_dir + '\n')
+
+# If [demo] section doesn't exist, add it
+if '[demo]' not in ''.join(output):
+ output.append('\n[demo]\n')
+ output.append('pg1-path=' + data_dir + '\n')
+
+# Write to temp file
+with open(tmp_file, 'w') as f:
+ f.writelines(output)
+
+# Copy back and set permissions
+import shutil
+import pwd
+import grp
+
+shutil.copy(tmp_file, config_file)
+postgres_uid = pwd.getpwnam('postgres').pw_uid
+postgres_gid = grp.getgrnam('postgres').gr_gid
+os.chown(config_file, postgres_uid, postgres_gid)
+os.chmod(config_file, 0o640)
+os.remove(tmp_file)
+PYEOF
+"
+ echo "Updated config:"
+ docker exec "$CONTAINER" bash -c 'grep "pg1-path" /etc/pgbackrest/pgbackrest.conf'
+ else
+ print_warning "Could not determine PostgreSQL data directory, using default"
+ fi
+ echo ""
+
+ # Verify repo2 is configured
+ print_header "Verify Azure (repo2) Configuration"
+ if ! docker exec "$CONTAINER" grep -q "repo2-type=azure" /etc/pgbackrest/pgbackrest.conf; then
+ echo "repo2 not found in config, running configure-azure.sh..."
+ docker exec "$CONTAINER" bash -lc "/usr/local/bin/configure-azure.sh" || echo "configure-azure.sh returned error, will configure manually"
+
+ # Check if it worked
+ if ! docker exec "$CONTAINER" grep -q "repo2-type=azure" /etc/pgbackrest/pgbackrest.conf; then
+ echo "configure-azure.sh didn't add repo2, configuring manually..."
+ docker exec -e AZURE_ACCOUNT="$AZURE_ACCOUNT" -e AZURE_CONTAINER="$AZURE_CONTAINER" -e AZURE_KEY_TYPE="$AZURE_KEY_TYPE" -e AZURE_REPO_PATH="$AZURE_REPO_PATH" "$CONTAINER" bash -c "
+ AZURE_REPO_PATH=\${AZURE_REPO_PATH:-/demo-repo}
+ TMP_FILE=/tmp/pgbackrest_repo2.$$
+ cat /etc/pgbackrest/pgbackrest.conf > \$TMP_FILE
+ echo '' >> \$TMP_FILE
+ echo 'repo2-type=azure' >> \$TMP_FILE
+ echo \"repo2-azure-account=\${AZURE_ACCOUNT}\" >> \$TMP_FILE
+ echo \"repo2-azure-container=\${AZURE_CONTAINER}\" >> \$TMP_FILE
+ echo \"repo2-azure-key-type=\${AZURE_KEY_TYPE}\" >> \$TMP_FILE
+ echo \"repo2-path=\${AZURE_REPO_PATH}\" >> \$TMP_FILE
+ echo 'repo2-retention-full=4' >> \$TMP_FILE
+ cp \$TMP_FILE /etc/pgbackrest/pgbackrest.conf
+ chown postgres:postgres /etc/pgbackrest/pgbackrest.conf
+ chmod 640 /etc/pgbackrest/pgbackrest.conf
+ rm -f \$TMP_FILE
+ "
+ fi
+ echo "Config after configuration:"
+ docker exec "$CONTAINER" grep -E "repo2|azure" /etc/pgbackrest/pgbackrest.conf || echo "No repo2 config found"
+ else
+ echo "repo2 configuration found:"
+ docker exec "$CONTAINER" grep -E "repo2|azure" /etc/pgbackrest/pgbackrest.conf
+ fi
+ echo ""
+
+ # Ensure archiving is enabled before backups
+ print_header "Ensure PostgreSQL Archiving Enabled"
+ ARCHIVE_MODE=$(docker exec "$CONTAINER" psql -U postgres -At -c "show archive_mode")
+
+ if [ "$ARCHIVE_MODE" != "on" ]; then
+ echo "archive_mode is currently $ARCHIVE_MODE - enabling..."
+ docker exec "$CONTAINER" bash -lc '
+ set -e
+ PGDATA_DIR=${PGDATA:-/var/lib/postgresql/data}
+ {
+ echo ""
+ echo "# pgBackRest archiving configuration"
+ echo "archive_mode = on"
+ echo "archive_command = '\''pgbackrest --stanza=demo archive-push %p'\''"
+ echo "archive_timeout = 60"
+ echo "wal_level = replica"
+ echo "max_wal_senders = 3"
+ echo "max_replication_slots = 3"
+ } >> "$PGDATA_DIR/postgresql.conf"
+ '
+
+ docker restart "$CONTAINER" >/dev/null
+ echo "Waiting for PostgreSQL to restart with archiving enabled..."
+ until docker exec "$CONTAINER" pg_isready -U postgres >/dev/null 2>&1; do
+ sleep 1
+ done
+ else
+ echo "archive_mode already enabled"
+ fi
+ echo ""
+
+ # Create stanza (stanza-create creates on all configured repos)
+ print_header "Create Stanza (Azure Managed Identity)"
+ docker exec "$CONTAINER" pgbackrest --stanza=demo stanza-create
+
+ # Backup to Azure
+ print_header "Backup to Azure (repo2) with Managed Identity"
+ docker exec "$CONTAINER" pgbackrest --stanza=demo --repo=2 backup
+ docker exec "$CONTAINER" pgbackrest --stanza=demo info
+
+ BACKUP_LABEL_AMI=$(docker exec "$CONTAINER" pgbackrest --stanza=demo info | awk '/full backup:/ {print $3; exit}')
+ if [ -z "$BACKUP_LABEL_AMI" ]; then
+ print_error "Could not extract backup label from Azure"
+ return 1
+ fi
+ print_success "Azure Managed Identity backup label: $BACKUP_LABEL_AMI"
+
+ # Simulate disaster
+ print_header "Simulate Disaster"
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "DROP TABLE restore_test;" >/dev/null
+ if [ "$CUSTOMERS_COUNT" != "0" ]; then
+ docker exec "$CONTAINER" psql -U postgres -d postgres -c "DROP DATABASE northwind;" >/dev/null
+ fi
+ print_success "Test data dropped"
+
+ # Get the actual PostgreSQL data directory before stopping container
+ ACTUAL_DATA_DIR=$(docker exec "$CONTAINER" bash -c 'psql -U postgres -d postgres -t -c "SHOW data_directory;" 2>/dev/null | xargs' || echo "")
+ if [ -z "$ACTUAL_DATA_DIR" ]; then
+ # Fallback: try to find it from the config
+ ACTUAL_DATA_DIR=$(docker exec "$CONTAINER" bash -c 'grep "pg1-path" /etc/pgbackrest/pgbackrest.conf | tail -1 | cut -d= -f2 | xargs' || echo "")
+ fi
+ if [ -z "$ACTUAL_DATA_DIR" ]; then
+ # Final fallback: use default
+ ACTUAL_DATA_DIR="/var/lib/postgresql/data"
+ print_warning "Could not detect PostgreSQL data directory, using default: $ACTUAL_DATA_DIR"
+ else
+ print_success "Detected PostgreSQL data directory: $ACTUAL_DATA_DIR"
+ fi
+
+ # Stop container
+ docker stop "$CONTAINER"
+
+ # Restore from Azure
+ print_header "Restore from Azure (repo2) with Managed Identity"
+ docker run --rm \
+ --entrypoint bash \
+ -e AZURE_ACCOUNT="$AZURE_ACCOUNT" \
+ -e AZURE_CONTAINER="$AZURE_CONTAINER" \
+ -e AZURE_KEY_TYPE="$AZURE_KEY_TYPE" \
+ -e AZURE_REPO_PATH="$AZURE_REPO_PATH" \
+ -e ACTUAL_DATA_DIR="$ACTUAL_DATA_DIR" \
+ -v pgdata:/var/lib/postgresql \
+ -v pgrepo:/var/lib/pgbackrest \
+ "$IMAGE" \
+ -lc "/usr/local/bin/configure-azure.sh || true; \
+ if ! grep -q \"repo2-type=azure\" /etc/pgbackrest/pgbackrest.conf; then \
+ AZURE_REPO_PATH=\${AZURE_REPO_PATH:-/demo-repo}; \
+ TMP_FILE=/tmp/pgbackrest_repo2.\$\$; \
+ cat /etc/pgbackrest/pgbackrest.conf > \$TMP_FILE; \
+ echo '' >> \$TMP_FILE; \
+ echo 'repo2-type=azure' >> \$TMP_FILE; \
+ echo \"repo2-azure-account=\${AZURE_ACCOUNT}\" >> \$TMP_FILE; \
+ echo \"repo2-azure-container=\${AZURE_CONTAINER}\" >> \$TMP_FILE; \
+ echo \"repo2-azure-key-type=\${AZURE_KEY_TYPE}\" >> \$TMP_FILE; \
+ echo \"repo2-path=\${AZURE_REPO_PATH}\" >> \$TMP_FILE; \
+ echo 'repo2-retention-full=4' >> \$TMP_FILE; \
+ cp \$TMP_FILE /etc/pgbackrest/pgbackrest.conf; \
+ chown postgres:postgres /etc/pgbackrest/pgbackrest.conf; \
+ chmod 640 /etc/pgbackrest/pgbackrest.conf; \
+ rm -f \$TMP_FILE; \
+ fi; \
+ DATA_DIR=\${ACTUAL_DATA_DIR:-/var/lib/postgresql/data}; \
+ echo \"Restoring to data directory: \$DATA_DIR\"; \
+ rm -rf \"\$DATA_DIR\"/* && \
+ pgbackrest --repo=2 --stanza=demo restore --set='$BACKUP_LABEL_AMI' --type=immediate --pg1-path=\"\$DATA_DIR\""
+
+ print_success "Restore complete"
+
+ # Start container
+ docker start "$CONTAINER"
+ until docker exec "$CONTAINER" pg_isready -U postgres >/dev/null 2>&1; do
+ sleep 1
+ done
+
+ # Verify restore
+ verify_restore "$CONTAINER" "$CUSTOMERS_COUNT"
+
+ print_success "TEST 3 Complete: Azure Managed Identity working!"
+ return 0
+}
+
+# Function 4: Full Backup and Restore Test (runs all tests in sequence)
+run_full_test() {
+ print_header "Full Backup and Restore Test Suite"
+ echo "Running tests in order:"
+ echo " 1. Local backups (repo1)"
+ echo " 2. Clean everything"
+ echo " 3. Azure Blob Storage (SAS Token)"
+ echo " 4. Azure Managed Identity (AMI)"
+ echo ""
+
+ # Test 1: Local
+ if ! test_local; then
+ print_error "Local backup test failed"
+ return 1
+ fi
+
+ # Clean everything
+ print_header "Cleaning Everything Before Next Test"
+ cleanup_docker
+
+ # Test 2: Azure Blob Storage
+ if ! test_azure_blob; then
+ print_warning "Azure Blob Storage test failed or skipped (may need valid SAS token)"
+ print_warning "Continuing with next test..."
+ fi
+
+ # Clean everything
+ print_header "Cleaning Everything Before Next Test"
+ cleanup_docker
+
+ # Test 3: Azure Managed Identity
+ if ! test_azure_ami; then
+ print_warning "Azure Managed Identity test skipped or failed (may not be on Azure VM)"
+ fi
+
+ print_header "All Tests Complete!"
+ print_success "✓ Local backups: Working"
+ print_success "✓ Azure Blob Storage (SAS): Working"
+ if curl -s -H "Metadata:true" "http://169.254.169.254/metadata/instance?api-version=2021-02-01" >/dev/null 2>&1; then
+ print_success "✓ Azure Managed Identity: Working"
+ else
+ print_warning "⚠ Azure Managed Identity: Skipped (not on Azure VM)"
+ fi
+
+ return 0
+}
+
+# Function 5: Show usage
+show_usage() {
+ cat << EOF
+Azure pgBackRest Master Script
+
+Usage: $0 [command]
+
+Commands:
+ generate-token Generate SAS token (run on Ubuntu machine)
+ cleanup-azure Clean up Azure storage blobs (run on Ubuntu machine)
+ cleanup-docker Clean up Docker containers and volumes
+ cleanup-all Full cleanup: remove containers, volumes, image, and rebuild
+ test Run full backup/restore test
+ test-ami Run only Azure Managed Identity test (requires Azure VM)
+ all Run everything: cleanup + test (requires SAS token)
+
+Examples:
+ # Generate SAS token (on Ubuntu)
+ ./azure-pgbackrest.sh generate-token
+
+ # Clean up old test backups (on Ubuntu)
+ ./azure-pgbackrest.sh cleanup-azure
+
+ # Run full test (requires AZURE_SAS_TOKEN or token in /tmp/azure-sas-token.txt)
+ export AZURE_SAS_TOKEN="your-token-here"
+ ./azure-pgbackrest.sh test
+
+ # Run only Azure Managed Identity test (requires Azure VM)
+ export AZURE_ACCOUNT="your-storage-account"
+ export AZURE_CONTAINER="your-container"
+ ./azure-pgbackrest.sh test-ami
+
+ # Run everything (on Ubuntu - generates token, cleans up, runs test)
+ ./azure-pgbackrest.sh all
+
+Environment Variables:
+ AZURE_SAS_TOKEN SAS token for Azure authentication (auto-loaded from /tmp/azure-sas-token.txt if not set)
+ AZURE_ACCOUNT Storage account name (REQUIRED - set this before running)
+ AZURE_CONTAINER Container name (REQUIRED - set this before running)
+ RESOURCE_GROUP Resource group name (optional, for some operations)
+ IMAGE Docker image name (default: pgbackrest-test)
+ CONTAINER Docker container name (default: pgbr-test)
+
+Configuration:
+ Before running, set your Azure configuration:
+ export AZURE_ACCOUNT="your-storage-account"
+ export AZURE_CONTAINER="your-container"
+ export RESOURCE_GROUP="your-resource-group" # Optional
+
+EOF
+}
+
+# Main script logic
+main() {
+ case "${1:-}" in
+ generate-token|token)
+ generate_sas_token
+ ;;
+ cleanup-azure|cleanup)
+ cleanup_azure "${2:-test-repo}"
+ ;;
+ cleanup-docker|docker-clean)
+ cleanup_docker
+ ;;
+ cleanup-all|rebuild)
+ cleanup_all_and_rebuild
+ ;;
+ test|run-test)
+ run_full_test
+ ;;
+ test-ami|ami)
+ test_azure_ami
+ ;;
+ all|everything)
+ print_header "Running Everything"
+ generate_sas_token || {
+ print_warning "Could not generate token, trying to use existing..."
+ if [ ! -f "/tmp/azure-sas-token.txt" ]; then
+ print_error "No SAS token available. Please set AZURE_SAS_TOKEN or run generate-token first"
+ exit 1
+ fi
+ }
+ export AZURE_SAS_TOKEN=$(cat /tmp/azure-sas-token.txt)
+ cleanup_azure "test-repo" || true
+ run_full_test
+ ;;
+ help|--help|-h|"")
+ show_usage
+ ;;
+ *)
+ print_error "Unknown command: $1"
+ echo ""
+ show_usage
+ exit 1
+ ;;
+ esac
+}
+
+# Run main function
+main "$@"
+
diff --git a/test/ci.pl b/test/ci.pl
index 8fa21918e8..0a863491e4 100755
--- a/test/ci.pl
+++ b/test/ci.pl
@@ -228,7 +228,7 @@ sub processEnd
processBegin(($strVm eq VM_NONE ? "no container" : $strVm) . ' test');
processExec(
- "${strTestExe} --gen-check --log-level-test-file=off --no-coverage-report --vm-max=2 --vm=${strVm}${strVmArchParam}" .
+ "${strTestExe} --gen-check --log-level-test-file=off --no-coverage --vm-max=2 --vm=${strVm}${strVmArchParam}" .
(@stryParam != 0 ? " --" . join(" --", @stryParam) : ''),
{bShowOutputAsync => true, bOutLogOnError => false});
processEnd();
diff --git a/test/define.yaml b/test/define.yaml
index eaf2a6bfc8..361bc547a6 100644
--- a/test/define.yaml
+++ b/test/define.yaml
@@ -582,7 +582,7 @@ unit:
# ----------------------------------------------------------------------------------------------------------------------------
- name: azure
- total: 3
+ total: 4
coverage:
- storage/azure/helper
diff --git a/test/src/build/config/config.yaml b/test/src/build/config/config.yaml
index fee5e0d02b..fada5f7c02 100644
--- a/test/src/build/config/config.yaml
+++ b/test/src/build/config/config.yaml
@@ -213,6 +213,8 @@ option:
log-level-stderr: {type: string, required: false, command: {noop: {}}}
pg: {type: string, required: false, command: {noop: {}}}
pg-path: {type: string, required: false, command: {noop: {}}}
+ repo-azure-key: {type: string, required: false, command: {noop: {}}}
+ repo-azure-key-type: {type: string-id, default: shared, allow-list: [auto, shared, sas], command: {noop: {}}}
repo-type: {type: string, required: false, command: {noop: {}}}
repo: {type: string, required: false, command: {noop: {}}}
spool-path: {type: string, required: false, command: {noop: {}}}
diff --git a/test/src/build/help/help.xml b/test/src/build/help/help.xml
index 65e273581b..c9c0ee7440 100644
--- a/test/src/build/help/help.xml
+++ b/test/src/build/help/help.xml
@@ -20,6 +20,8 @@
+
+
diff --git a/test/src/module/config/parseTest.c b/test/src/module/config/parseTest.c
index acdaf82469..fb9452f8dc 100644
--- a/test/src/module/config/parseTest.c
+++ b/test/src/module/config/parseTest.c
@@ -1189,6 +1189,23 @@ testRun(void)
cfgParseP(storageTest, strLstSize(argList), strLstPtr(argList), .noResetLogLevel = true), OptionRequiredError,
"backup command requires option: stanza");
+ // -------------------------------------------------------------------------------------------------------------------------
+ TEST_TITLE("Azure key not required when key-type is auto (Managed Identity)");
+
+ argList = strLstNew();
+ strLstAddZ(argList, TEST_BACKREST_EXE);
+ hrnCfgArgRawZ(argList, cfgOptPgPath, "/path/to/db");
+ hrnCfgArgRawZ(argList, cfgOptStanza, "db");
+ hrnCfgArgKeyRawZ(argList, cfgOptRepoType, 1, "azure");
+ hrnCfgArgKeyRawZ(argList, cfgOptRepoAzureContainer, 1, "container");
+ hrnCfgArgKeyRawStrId(argList, cfgOptRepoAzureKeyType, 1, strIdFromZ("auto"));
+ hrnCfgEnvKeyRawZ(cfgOptRepoAzureAccount, 1, "account");
+ strLstAddZ(argList, TEST_COMMAND_BACKUP);
+ // Should not throw OptionRequiredError for repo1-azure-key when key-type is auto
+ TEST_RESULT_VOID(
+ cfgParseP(storageTest, strLstSize(argList), strLstPtr(argList), .noResetLogLevel = true),
+ "Azure key not required when key-type is auto");
+
// -------------------------------------------------------------------------------------------------------------------------
TEST_TITLE("command-line option not allowed");
diff --git a/test/src/module/db/dbTest.c b/test/src/module/db/dbTest.c
index 39bd202eee..396f40bd23 100644
--- a/test/src/module/db/dbTest.c
+++ b/test/src/module/db/dbTest.c
@@ -17,9 +17,9 @@ Test Database
Macro to check that replay is making progress -- this does not seem useful enough to be included in the pq harness header
***********************************************************************************************************************************/
#define \
- HRN_PQ_SCRIPT_REPLAY_TARGET_REACHED_PROGRESS( \
- sessionParam, walNameParam, lsnNameParam, targetLsnParam, targetReachedParam, replayLsnParam, replayLastLsnParam, \
- replayProgressParam, sleepParam) \
+ HRN_PQ_SCRIPT_REPLAY_TARGET_REACHED_PROGRESS( \
+ sessionParam, walNameParam, lsnNameParam, targetLsnParam, targetReachedParam, replayLsnParam, replayLastLsnParam, \
+ replayProgressParam, sleepParam) \
{.session = sessionParam, \
.function = HRN_PQ_SENDQUERY, \
.param = \
@@ -44,9 +44,9 @@ Macro to check that replay is making progress -- this does not seem useful enoug
{.session = sessionParam, .function = HRN_PQ_GETRESULT, .resultNull = true}
#define \
- HRN_PQ_SCRIPT_REPLAY_TARGET_REACHED_PROGRESS_GE_10( \
- sessionParam, targetLsnParam, targetReachedParam, replayLsnParam, replayLastLsnParam, replayProgressParam, sleepParam) \
- HRN_PQ_SCRIPT_REPLAY_TARGET_REACHED_PROGRESS( \
+ HRN_PQ_SCRIPT_REPLAY_TARGET_REACHED_PROGRESS_GE_10( \
+ sessionParam, targetLsnParam, targetReachedParam, replayLsnParam, replayLastLsnParam, replayProgressParam, sleepParam) \
+ HRN_PQ_SCRIPT_REPLAY_TARGET_REACHED_PROGRESS( \
sessionParam, "wal", "lsn", targetLsnParam, targetReachedParam, replayLsnParam, replayLastLsnParam, replayProgressParam, \
sleepParam)
diff --git a/test/src/module/storage/azureTest.c b/test/src/module/storage/azureTest.c
index c609838ca6..2e46ff824d 100644
--- a/test/src/module/storage/azureTest.c
+++ b/test/src/module/storage/azureTest.c
@@ -3,6 +3,8 @@ Test Azure Storage
***********************************************************************************************************************************/
#include "common/io/fdRead.h"
#include "common/io/fdWrite.h"
+#include "common/io/http/client.h"
+#include "common/io/socket/client.h"
#include "storage/helper.h"
#include "common/harnessConfig.h"
@@ -101,7 +103,7 @@ testRequest(IoWrite *write, const char *verb, const char *path, TestRequestParam
// Add version
if (driver->sharedKey != NULL)
- strCatZ(request, "x-ms-version:2021-06-08\r\n");
+ strCatZ(request, "x-ms-version:2024-08-04\r\n");
// Complete headers
strCatZ(request, "\r\n");
@@ -383,6 +385,27 @@ testRun(void)
storageRepoGet(0, false), OptionInvalidValueError,
"invalid value for 'repo1-azure-key' option: base64 size 5 is not evenly divisible by 4\n"
"HINT: value must be valid base64 when 'repo1-azure-key-type = shared'.");
+
+ // -------------------------------------------------------------------------------------------------------------------------
+ TEST_TITLE("storage with auto key type (Managed Identity)");
+
+ argList = strLstNew();
+ hrnCfgArgRawZ(argList, cfgOptStanza, "test");
+ hrnCfgArgRawStrId(argList, cfgOptRepoType, STORAGE_AZURE_TYPE);
+ hrnCfgArgRawZ(argList, cfgOptRepoPath, "/repo");
+ hrnCfgArgRawZ(argList, cfgOptRepoAzureContainer, TEST_CONTAINER);
+ hrnCfgArgRawStrId(argList, cfgOptRepoAzureKeyType, storageAzureKeyTypeAuto);
+ hrnCfgEnvRawZ(cfgOptRepoAzureAccount, TEST_ACCOUNT);
+ HRN_CFG_LOAD(cfgCmdArchivePush, argList);
+
+ TEST_ASSIGN(storage, storageRepoGet(0, false), "get repo storage");
+ TEST_RESULT_STR_Z(storage->path, "/repo", "check path");
+ TEST_RESULT_STR(((StorageAzure *)storageDriver(storage))->account, TEST_ACCOUNT_STR, "check account");
+ TEST_RESULT_STR(((StorageAzure *)storageDriver(storage))->container, TEST_CONTAINER_STR, "check container");
+ TEST_RESULT_PTR(((StorageAzure *)storageDriver(storage))->sharedKey, NULL, "check shared key is null");
+ TEST_RESULT_PTR(((StorageAzure *)storageDriver(storage))->sasKey, NULL, "check sas key is null");
+ TEST_RESULT_PTR_NE(((StorageAzure *)storageDriver(storage))->credHttpClient, NULL, "check cred http client exists");
+ TEST_RESULT_STR_Z(((StorageAzure *)storageDriver(storage))->host, TEST_ACCOUNT ".blob.core.windows.net", "check host");
}
// *****************************************************************************************************************************
@@ -413,7 +436,7 @@ testRun(void)
TEST_RESULT_Z(
logBuf,
"{content-length: '0', host: 'account.blob.core.windows.net', date: 'Sun, 21 Jun 2020 12:46:19 GMT'"
- ", x-ms-version: '2021-06-08', authorization: 'SharedKey account:2HRoJbu+G0rqwMjG+6gsb8WWkVo9rJNrDywsrnkmQAE='}",
+ ", x-ms-version: '2024-08-04', authorization: 'SharedKey account:h9heYMD+ErrcIkJATG97G3L9gwom0TQYx/cEj4lAJG4='}",
"check headers");
// -------------------------------------------------------------------------------------------------------------------------
@@ -429,8 +452,8 @@ testRun(void)
TEST_RESULT_Z(
logBuf,
"{content-length: '44', content-md5: 'b64f49553d5c441652e95697a2c5949e', host: 'account.blob.core.windows.net'"
- ", date: 'Sun, 21 Jun 2020 12:46:19 GMT', x-ms-version: '2021-06-08'"
- ", authorization: 'SharedKey account:nuaRe9f/J91zHEE2x734ARyHJxd6Smju1j8qPrueE6o='}",
+ ", date: 'Sun, 21 Jun 2020 12:46:19 GMT', x-ms-version: '2024-08-04'"
+ ", authorization: 'SharedKey account:GrE62U88ziaAGq+chejwUKmaBOAsyj+QCjrykcE+O+c='}",
"check headers");
// -------------------------------------------------------------------------------------------------------------------------
@@ -451,6 +474,181 @@ testRun(void)
TEST_RESULT_VOID(FUNCTION_LOG_OBJECT_FORMAT(header, httpHeaderToLog, logBuf, sizeof(logBuf)), "httpHeaderToLog");
TEST_RESULT_Z(logBuf, "{content-length: '66', host: 'account.blob.core.usgovcloudapi.net'}", "check headers");
TEST_RESULT_STR_Z(httpQueryRenderP(query), "a=b&sig=key", "check query");
+
+ // -------------------------------------------------------------------------------------------------------------------------
+ TEST_TITLE("Managed Identity auth - initialization");
+
+ TEST_ASSIGN(
+ storage,
+ (StorageAzure *)storageDriver(
+ storageAzureNew(
+ STRDEF("/repo"), false, 0, NULL, TEST_CONTAINER_STR, TEST_ACCOUNT_STR, storageAzureKeyTypeAuto, NULL, 16, NULL,
+ STRDEF("blob.core.windows.net"), storageAzureUriStyleHost, 443, 1000, true, NULL, NULL)),
+ "new azure storage - auto key type");
+
+ TEST_RESULT_PTR_NE(storage->credHttpClient, NULL, "check cred http client exists");
+ TEST_RESULT_STR_Z(storage->credHost, "169.254.169.254", "check cred host");
+ TEST_RESULT_PTR(storage->sharedKey, NULL, "check shared key is null");
+ TEST_RESULT_PTR(storage->sasKey, NULL, "check sas key is null");
+ TEST_RESULT_PTR(storage->accessToken, NULL, "check access token is initially null");
+ TEST_RESULT_INT(storage->accessTokenExpirationTime, 0, "check access token expiration is initially 0");
+ }
+
+ // *****************************************************************************************************************************
+ if (testBegin("storageAzureAuth() - Managed Identity"))
+ {
+ HRN_FORK_BEGIN()
+ {
+ const unsigned int credPort = hrnServerPortNext();
+
+ HRN_FORK_CHILD_BEGIN(.prefix = "azure metadata server", .timeout = 5000)
+ {
+ TEST_RESULT_VOID(hrnServerRunP(HRN_FORK_CHILD_READ(), hrnServerProtocolSocket, credPort), "metadata server");
+ }
+ HRN_FORK_CHILD_END();
+
+ HRN_FORK_PARENT_BEGIN(.prefix = "azure client")
+ {
+ IoWrite *credService = hrnServerScriptBegin(HRN_FORK_PARENT_WRITE(0));
+ char logBuf[STACK_TRACE_PARAM_MAX];
+
+ // Create storage with auto key type
+ StorageAzure *storage = NULL;
+ TEST_ASSIGN(
+ storage,
+ (StorageAzure *)storageDriver(
+ storageAzureNew(
+ STRDEF("/repo"), false, 0, NULL, TEST_CONTAINER_STR, TEST_ACCOUNT_STR, storageAzureKeyTypeAuto, NULL,
+ 16, NULL, STRDEF("blob.core.windows.net"), storageAzureUriStyleHost, 443, 1000, true, NULL,
+ NULL)),
+ "new azure storage - auto key type");
+
+ // Override cred http client to point to our test server
+ // Note: In real usage, this would connect to 169.254.169.254:80, but for testing we use our mock server
+ // The old client will be freed when the storage object is freed
+ storage->credHttpClient = httpClientNew(sckClientNew(hrnServerHost(), credPort, 1000, 1000), 1000);
+ // Update credHost to match test server host since we're using a mock server
+ storage->credHost = hrnServerHost();
+
+ // -----------------------------------------------------------------------------------------------------------------
+ TEST_TITLE("Managed Identity auth - fetch token");
+
+ // Mock metadata endpoint response with access token
+ hrnServerScriptAccept(credService);
+ String *credRequest = strNew();
+ strCatZ(
+ credRequest,
+ "GET /metadata/identity/oauth2/token?api-version=2018-02-01&resource="
+ "https%3A%2F%2Faccount.blob.core.windows.net HTTP/1.1\r\n");
+ strCatFmt(credRequest, "user-agent:%s/%s\r\n", PROJECT_NAME, PROJECT_VERSION);
+ strCatFmt(credRequest, "Metadata:true\r\n");
+ strCatZ(credRequest, "content-length:0\r\n");
+ strCatFmt(credRequest, "host:%s\r\n", strZ(hrnServerHost()));
+ strCatZ(credRequest, "\r\n");
+ hrnServerScriptExpect(credService, credRequest);
+
+ // Response with access token (expires in 3600 seconds)
+ const String *tokenResponse = strNewFmt(
+ "HTTP/1.1 200 OK\r\n"
+ "content-type:application/json\r\n"
+ "content-length:%zu\r\n"
+ "\r\n"
+ "{\"access_token\":\"test-access-token-12345\",\"expires_in\":\"3600\"}",
+ sizeof("{\"access_token\":\"test-access-token-12345\",\"expires_in\":\"3600\"}") - 1);
+ hrnServerScriptReply(credService, tokenResponse);
+
+ // Set expiration time to 0 to force token fetch
+ storage->accessTokenExpirationTime = 0;
+
+ HttpHeader *header = httpHeaderAdd(httpHeaderNew(NULL), HTTP_HEADER_CONTENT_LENGTH_STR, ZERO_STR);
+ const String *dateTime = STRDEF("Sun, 21 Jun 2020 12:46:19 GMT");
+
+ TEST_RESULT_VOID(
+ storageAzureAuth(storage, HTTP_VERB_GET_STR, STRDEF("/path"), NULL, dateTime, header), "auth with token fetch");
+ TEST_RESULT_PTR_NE(storage->accessToken, NULL, "check access token was set");
+ TEST_RESULT_STR_Z(storage->accessToken, "test-access-token-12345", "check access token value");
+ TEST_RESULT_BOOL(storage->accessTokenExpirationTime > 0, true, "check expiration time was set");
+
+ TEST_RESULT_VOID(FUNCTION_LOG_OBJECT_FORMAT(header, httpHeaderToLog, logBuf, sizeof(logBuf)), "httpHeaderToLog");
+ TEST_RESULT_Z(
+ logBuf,
+ "{content-length: '0', host: 'account.blob.core.windows.net', x-ms-version: '2024-08-04'"
+ ", authorization: 'Bearer test-access-token-12345'}",
+ "check headers with bearer token");
+
+ // -----------------------------------------------------------------------------------------------------------------
+ TEST_TITLE("Managed Identity auth - use cached token");
+
+ // Clear the server script to ensure no new request is made
+ hrnServerScriptClose(credService);
+ hrnServerScriptAccept(credService);
+
+ // Set expiration time far in the future to use cached token
+ storage->accessTokenExpirationTime = time(NULL) + 3600;
+
+ header = httpHeaderAdd(httpHeaderNew(NULL), HTTP_HEADER_CONTENT_LENGTH_STR, ZERO_STR);
+
+ TEST_RESULT_VOID(
+ storageAzureAuth(storage, HTTP_VERB_GET_STR, STRDEF("/path"), NULL, dateTime, header),
+ "auth with cached token");
+ TEST_RESULT_VOID(FUNCTION_LOG_OBJECT_FORMAT(header, httpHeaderToLog, logBuf, sizeof(logBuf)), "httpHeaderToLog");
+ TEST_RESULT_Z(
+ logBuf,
+ "{content-length: '0', host: 'account.blob.core.windows.net', x-ms-version: '2024-08-04'"
+ ", authorization: 'Bearer test-access-token-12345'}",
+ "check headers with cached bearer token");
+
+ // -----------------------------------------------------------------------------------------------------------------
+ TEST_TITLE("Managed Identity auth - token fetch error");
+
+ hrnServerScriptClose(credService);
+ hrnServerScriptAccept(credService);
+
+ // Mock error response from metadata endpoint
+ credRequest = strNew();
+ strCatZ(
+ credRequest,
+ "GET /metadata/identity/oauth2/token?api-version=2018-02-01&resource="
+ "https%3A%2F%2Faccount.blob.core.windows.net HTTP/1.1\r\n");
+ strCatFmt(credRequest, "user-agent:%s/%s\r\n", PROJECT_NAME, PROJECT_VERSION);
+ strCatFmt(credRequest, "Metadata:true\r\n");
+ strCatZ(credRequest, "content-length:0\r\n");
+ strCatFmt(credRequest, "host:%s\r\n", strZ(hrnServerHost()));
+ strCatZ(credRequest, "\r\n");
+ hrnServerScriptExpect(credService, credRequest);
+
+ tokenResponse = strNewZ(
+ "HTTP/1.1 403 Forbidden\r\n"
+ "content-length:0\r\n"
+ "\r\n");
+ hrnServerScriptReply(credService, tokenResponse);
+ hrnServerScriptClose(credService);
+
+ // Set expiration time to 0 to force token fetch
+ storage->accessTokenExpirationTime = 0;
+ storage->accessToken = NULL;
+
+ header = httpHeaderAdd(httpHeaderNew(NULL), HTTP_HEADER_CONTENT_LENGTH_STR, ZERO_STR);
+
+ TEST_ERROR_FMT(
+ storageAzureAuth(storage, HTTP_VERB_GET_STR, STRDEF("/path"), NULL, dateTime, header), ProtocolError,
+ "HTTP request failed with 403 (Forbidden):\n"
+ "*** Path/Query ***:\n"
+ "GET /metadata/identity/oauth2/token?api-version=2018-02-01&resource="
+ "https%%3A%%2F%%2Faccount.blob.core.windows.net\n"
+ "*** Request Headers ***:\n"
+ "Metadata: true\n"
+ "content-length: 0\n"
+ "host: %s\n"
+ "*** Response Headers ***:\n"
+ "content-length: 0",
+ strZ(hrnServerHost()));
+
+ hrnServerScriptEnd(credService);
+ }
+ HRN_FORK_PARENT_END();
+ }
+ HRN_FORK_END();
}
// *****************************************************************************************************************************
@@ -602,7 +800,7 @@ testRun(void)
"content-length: 0\n"
"date: \n"
"host: %s\n"
- "x-ms-version: 2021-06-08\n"
+ "x-ms-version: 2024-08-04\n"
"*** Response Headers ***:\n"
"content-length: 7\n"
"*** Response Content ***:\n"
@@ -630,7 +828,7 @@ testRun(void)
"host: %s\n"
"x-ms-blob-type: BlockBlob\n"
"x-ms-tags: %%20Key%%202=%%20Value%%202&Key1=Value1\n"
- "x-ms-version: 2021-06-08",
+ "x-ms-version: 2024-08-04",
strZ(hrnServerHost()));
// -----------------------------------------------------------------------------------------------------------------