diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs
index 6ebdfa7978..3431a4b52b 100644
--- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs
+++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs
@@ -20,6 +20,19 @@ namespace Microsoft.Agents.AI;
/// serves as the foundational class for implementing AI agents that can participate in conversations
/// and process user requests. An agent instance may participate in multiple concurrent conversations, and each conversation
/// may involve multiple agents working together.
+///
+/// Security considerations: An orchestrates data flow across trust boundaries —
+/// messages are sent to external AI services, context providers, chat history stores, and function tools. Agent Framework
+/// passes messages through as-is without validation or sanitization. Developers must be aware that:
+///
+/// - User-supplied messages may contain prompt injection attempts designed to manipulate LLM behavior.
+/// - LLM responses should be treated as untrusted output — they may contain hallucinations, malicious payloads (e.g., scripts, SQL),
+/// or content influenced by indirect prompt injection. Always validate and sanitize LLM output before rendering in HTML, executing as code,
+/// or using in database queries.
+/// - Messages with different roles carry different trust levels: system messages have the highest trust and must be developer-controlled;
+/// user, assistant, and tool messages should be treated as untrusted.
+///
+///
///
[DebuggerDisplay("{DebuggerDisplay,nq}")]
public abstract partial class AIAgent
@@ -165,6 +178,11 @@ public ValueTask CreateSessionAsync(CancellationToken cancellation
/// This method enables saving conversation sessions to persistent storage,
/// allowing conversations to resume across application restarts or be migrated between
/// different agent instances. Use to restore the session.
+ ///
+ /// Security consideration: Serialized sessions may contain conversation content, session identifiers,
+ /// and other potentially sensitive data including PII. Ensure that serialized session data is stored securely with
+ /// appropriate access controls and encryption at rest.
+ ///
///
public ValueTask SerializeSessionAsync(AgentSession session, JsonSerializerOptions? jsonSerializerOptions = null, CancellationToken cancellationToken = default)
=> this.SerializeSessionCoreAsync(session, jsonSerializerOptions, cancellationToken);
@@ -194,6 +212,12 @@ public ValueTask SerializeSessionAsync(AgentSession session, JsonSe
/// This method enables restoration of conversation sessions from previously saved state,
/// allowing conversations to resume across application restarts or be migrated between
/// different agent instances.
+ ///
+ /// Security consideration: Restoring a session from an untrusted source is equivalent to accepting untrusted input.
+ /// Serialized sessions may contain conversation content, session identifiers, and potentially sensitive data. A compromised
+ /// storage backend could alter message roles to escalate trust, or inject adversarial content that influences LLM behavior.
+ /// Treat serialized session data as sensitive and ensure it is stored and transmitted securely.
+ ///
///
public ValueTask DeserializeSessionAsync(JsonElement serializedState, JsonSerializerOptions? jsonSerializerOptions = null, CancellationToken cancellationToken = default)
=> this.DeserializeSessionCoreAsync(serializedState, jsonSerializerOptions, cancellationToken);
@@ -301,6 +325,11 @@ public Task RunAsync(
/// The messages are processed in the order provided and become part of the conversation history.
/// The agent's response will also be added to if one is provided.
///
+ ///
+ /// Security consideration: Agent Framework does not validate or sanitize message content — it is passed through
+ /// to the underlying AI service as-is. If input messages include untrusted user content, developers should be aware of prompt injection risks.
+ /// System-role messages must be developer-controlled and should never contain end-user input.
+ ///
///
public Task RunAsync(
IEnumerable messages,
@@ -426,6 +455,11 @@ public IAsyncEnumerable RunStreamingAsync(
/// Each represents a portion of the complete response, allowing consumers
/// to display partial results, implement progressive loading, or provide immediate feedback to users.
///
+ ///
+ /// Security consideration: Agent Framework does not validate or sanitize message content — it is passed through
+ /// to the underlying AI service as-is. If input messages include untrusted user content, developers should be aware of prompt injection risks.
+ /// System-role messages must be developer-controlled and should never contain end-user input.
+ ///
///
public async IAsyncEnumerable RunStreamingAsync(
IEnumerable messages,
diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIContextProvider.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIContextProvider.cs
index 5ccf139363..9c1286c9b9 100644
--- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIContextProvider.cs
+++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIContextProvider.cs
@@ -28,6 +28,14 @@ namespace Microsoft.Agents.AI;
/// to provide context, and optionally called at the end of invocation via
/// to process results.
///
+///
+/// Security considerations: Context providers may inject messages with any role, including system, which
+/// has the highest trust level and directly shapes LLM behavior. Developers must ensure that all providers attached to an agent
+/// are trusted. Agent Framework does not validate or filter the data returned by providers — it is accepted as-is and merged into
+/// the request context. If a provider retrieves data from an external source (e.g., a vector database or memory service), be aware
+/// that a compromised data source could introduce adversarial content designed to manipulate LLM behavior via indirect prompt injection.
+/// Implementers should validate and sanitize data retrieved from external sources before returning it.
+///
///
public abstract class AIContextProvider
{
@@ -96,6 +104,11 @@ protected AIContextProvider(
/// - Injecting contextual messages from conversation history
///
///
+ ///
+ /// Security consideration: Data retrieved from external sources (e.g., vector databases, memory services, or
+ /// knowledge bases) may contain adversarial content designed to influence LLM behavior via indirect prompt injection.
+ /// Implementers should validate data integrity and consider the trustworthiness of the data source.
+ ///
///
public ValueTask InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default)
=> this.InvokingCoreAsync(Throw.IfNull(context), cancellationToken);
@@ -195,6 +208,11 @@ protected virtual async ValueTask InvokingCoreAsync(InvokingContext c
/// In contrast with , this method only returns additional context to be merged with the input,
/// while is responsible for returning the full merged for the invocation.
///
+ ///
+ /// Security consideration: Any messages, tools, or instructions returned by this method will be merged into the
+ /// AI request context. If data is retrieved from external or untrusted sources, implementers should validate and sanitize it
+ /// to prevent indirect prompt injection attacks.
+ ///
///
/// Contains the request context including the caller provided messages that will be used by the agent for this invocation.
/// The to monitor for cancellation requests. The default is .
@@ -299,6 +317,10 @@ protected virtual ValueTask InvokedCoreAsync(InvokedContext context, Cancellatio
///
/// The default implementation of only calls this method if the invocation succeeded.
///
+ ///
+ /// Security consideration: Messages being processed/stored may contain PII and sensitive conversation content.
+ /// Implementers should ensure appropriate encryption at rest and access controls for the storage backend.
+ ///
///
protected virtual ValueTask StoreAIContextAsync(InvokedContext context, CancellationToken cancellationToken = default) =>
default;
diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentSession.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentSession.cs
index a154b0a9f5..1960a4ce06 100644
--- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentSession.cs
+++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentSession.cs
@@ -42,6 +42,15 @@ namespace Microsoft.Agents.AI;
/// and the method
/// can be used to deserialize the session.
///
+///
+/// Security considerations: Serialized sessions may contain conversation content, session identifiers,
+/// and other potentially sensitive data including PII. Developers should:
+///
+/// - Treat serialized session data as sensitive and store it securely with appropriate access controls and encryption at rest.
+/// - Treat restoring a session from an untrusted source as equivalent to accepting untrusted input. A compromised storage backend
+/// could alter message roles to escalate trust, or inject adversarial content that influences LLM behavior.
+///
+///
///
///
///
@@ -67,6 +76,11 @@ protected AgentSession(AgentSessionStateBag stateBag)
///
/// Gets any arbitrary state associated with this session.
///
+ ///
+ /// Data stored in the will be included when the session is serialized.
+ /// Avoid storing secrets, credentials, or highly sensitive data in the state bag without appropriate encryption,
+ /// as this data may be persisted to external storage.
+ ///
[JsonPropertyName("stateBag")]
public AgentSessionStateBag StateBag { get; protected set; } = new();
diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProvider.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProvider.cs
index c7dfb4a233..f4f198df97 100644
--- a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProvider.cs
+++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProvider.cs
@@ -37,6 +37,14 @@ namespace Microsoft.Agents.AI;
/// A is only relevant for scenarios where the underlying AI service that the agent is using
/// does not use in-service chat history storage.
///
+///
+/// Security considerations: Agent Framework does not validate or filter the messages returned by the provider
+/// during load — they are accepted as-is and treated identically to user-supplied messages. Implementers must ensure that only
+/// trusted data is returned. If the underlying storage is compromised, adversarial content could influence LLM behavior via
+/// indirect prompt injection — for example, injected messages could alter the conversation context or impersonate different roles.
+/// Messages stored in chat history may contain PII and sensitive conversation content; implementers should consider encryption
+/// at rest and appropriate access controls for the storage backend.
+///
///
public abstract class ChatHistoryProvider
{
@@ -159,6 +167,11 @@ protected virtual async ValueTask> InvokingCoreAsync(In
/// Messages are returned in chronological order to maintain proper conversation flow and context for the agent.
/// The oldest messages appear first in the collection, followed by more recent messages.
///
+ ///
+ /// Security consideration: Messages loaded from storage should be treated with the same caution as user-supplied
+ /// messages. A compromised storage backend could alter message roles to escalate trust (e.g., changing user messages to
+ /// system messages) or inject adversarial content that influences LLM behavior.
+ ///
///
/// Contains the request context including the caller provided messages that will be used by the agent for this invocation.
/// The to monitor for cancellation requests. The default is .
@@ -273,6 +286,10 @@ protected virtual ValueTask InvokedCoreAsync(InvokedContext context, Cancellatio
///
/// The default implementation of only calls this method if the invocation succeeded.
///
+ ///
+ /// Security consideration: Messages being stored may contain PII and sensitive conversation content.
+ /// Implementers should ensure appropriate encryption at rest and access controls for the storage backend.
+ ///
///
protected virtual ValueTask StoreChatHistoryAsync(InvokedContext context, CancellationToken cancellationToken = default) =>
default;
diff --git a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatHistoryProvider.cs b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatHistoryProvider.cs
index c9238889c9..a8096b89c3 100644
--- a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatHistoryProvider.cs
+++ b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatHistoryProvider.cs
@@ -17,6 +17,24 @@ namespace Microsoft.Agents.AI;
///
/// Provides a Cosmos DB implementation of the abstract class.
///
+///
+///
+/// Security considerations:
+///
+/// - PII and sensitive data: Chat history stored in Cosmos DB may contain PII, sensitive conversation
+/// content, and system instructions. Ensure the Cosmos DB account is configured with appropriate access controls, encryption at rest,
+/// and network security (e.g., private endpoints, virtual network rules). The property can be used to
+/// automatically expire messages and limit data retention.
+/// - Compromised store risks: Agent Framework does not validate or filter messages loaded from the
+/// store — they are accepted as-is. If the Cosmos DB store is compromised, adversarial content could be injected into the conversation
+/// context, potentially influencing LLM behavior via indirect prompt injection. Altered message roles (e.g., changing user to
+/// system) could escalate trust levels.
+/// - Authentication: Agent Framework does not manage authentication or encryption for the Cosmos DB
+/// connection — these are the responsibility of the configuration. Use managed identity
+/// or token-based authentication where possible, and avoid embedding connection strings with keys in source code.
+///
+///
+///
[RequiresUnreferencedCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with trimming.")]
[RequiresDynamicCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with NativeAOT.")]
public sealed class CosmosChatHistoryProvider : ChatHistoryProvider, IDisposable
diff --git a/dotnet/src/Microsoft.Agents.AI.Mem0/Mem0Provider.cs b/dotnet/src/Microsoft.Agents.AI.Mem0/Mem0Provider.cs
index 678905e395..d7c54e2114 100644
--- a/dotnet/src/Microsoft.Agents.AI.Mem0/Mem0Provider.cs
+++ b/dotnet/src/Microsoft.Agents.AI.Mem0/Mem0Provider.cs
@@ -13,16 +13,38 @@
namespace Microsoft.Agents.AI.Mem0;
+#pragma warning disable IDE0001 // Simplify Names - Microsoft.Extensions.Logging.LogLevel.Trace doesn't get found in net472 when removing the namespace.
///
/// Provides a Mem0 backed that persists conversation messages as memories
/// and retrieves related memories to augment the agent invocation context.
///
///
+///
/// The provider stores user, assistant and system messages as Mem0 memories and retrieves relevant memories
/// for new invocations using a semantic search endpoint. Retrieved memories are injected as user messages
/// to the model, prefixed by a configurable context prompt.
+///
+///
+/// Security considerations:
+///
+/// - External service trust: This provider communicates with an external Mem0 service over HTTP.
+/// Agent Framework does not manage authentication, encryption, or connection details for this service — these are the responsibility
+/// of the configuration. Ensure the HTTP client is configured with appropriate authentication
+/// and uses HTTPS to protect data in transit.
+/// - PII and sensitive data: Conversation messages (including user inputs, LLM responses, and system
+/// instructions) are sent to the external Mem0 service for storage. These messages may contain PII or sensitive information.
+/// Ensure the Mem0 service is configured with appropriate data retention policies and access controls.
+/// - Indirect prompt injection: Memories retrieved from the Mem0 service are injected into the LLM
+/// context as user messages. If the memory store is compromised, adversarial content could influence LLM behavior. The data
+/// returned from the service is accepted as-is without validation or sanitization.
+/// - Trace logging: When is enabled,
+/// full memory content (including search queries and results) may be logged. This data may contain PII and should not be enabled
+/// in production environments.
+///
+///
///
public sealed class Mem0Provider : MessageAIContextProvider
+#pragma warning restore IDE0001 // Simplify Names
{
private const string DefaultContextPrompt = "## Memories\nConsider the following memories when answering user questions:";
diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs
index e4b772160e..adb6eb9f83 100644
--- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs
+++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs
@@ -17,6 +17,25 @@ namespace Microsoft.Agents.AI;
///
/// Provides an that delegates to an implementation.
///
+///
+///
+/// Security considerations: The orchestrates data flow across trust boundaries.
+/// The underlying AI service is an external endpoint and LLM responses should be treated as untrusted output. Developers should be aware of:
+///
+/// - Hallucination: LLMs may generate plausible-sounding but factually incorrect information.
+/// Do not treat LLM output as authoritative without verification.
+/// - Indirect prompt injection: Data retrieved by tools, AI context providers, or chat history providers may
+/// contain adversarial content designed to influence LLM behavior or exfiltrate data through tool calls.
+/// - Malicious payloads: LLM output may contain content that is harmful if rendered or executed without
+/// sanitization — for example, HTML/JavaScript for cross-site scripting, SQL for injection, or shell commands.
+/// - Tool invocation: By default, all tools provided to the agent are invoked without user approval.
+/// The AI selects which functions to call and with what arguments. Function arguments should be treated as untrusted input.
+/// Developers should require explicit approval for tools with side effects, data sensitivity, or irreversibility.
+///
+/// Developers should validate and sanitize LLM output before rendering it in HTML, executing it as code, using it in database queries,
+/// or passing it to any security-sensitive context. Apply defense-in-depth by combining tool approval requirements with output validation.
+///
+///
public sealed partial class ChatClientAgent : AIAgent
{
private readonly ChatClientAgentOptions? _agentOptions;
@@ -44,6 +63,9 @@ public sealed partial class ChatClientAgent : AIAgent
/// Optional collection of tools that the agent can invoke during conversations.
/// These tools augment any tools that may be provided to the agent via when
/// the agent is run.
+ /// By default, all provided tools are invoked without user approval. The AI selects which functions to call and chooses
+ /// the arguments — these arguments should be treated as untrusted input. Developers should require explicit approval
+ /// for tools that have side effects, access sensitive data, or perform irreversible operations.
///
///
/// Optional logger factory for creating loggers used by the agent and its components.
diff --git a/dotnet/src/Microsoft.Agents.AI/Memory/ChatHistoryMemoryProvider.cs b/dotnet/src/Microsoft.Agents.AI/Memory/ChatHistoryMemoryProvider.cs
index 0cc35fe85e..6881f7303f 100644
--- a/dotnet/src/Microsoft.Agents.AI/Memory/ChatHistoryMemoryProvider.cs
+++ b/dotnet/src/Microsoft.Agents.AI/Memory/ChatHistoryMemoryProvider.cs
@@ -13,6 +13,7 @@
namespace Microsoft.Agents.AI;
+#pragma warning disable IDE0001 // Simplify Names - Microsoft.Extensions.Logging.LogLevel.Trace doesn't get found in net472 when removing the namespace.
///
/// A context provider that stores all chat history in a vector store and is able to
/// retrieve related chat history later to augment the current conversation.
@@ -33,8 +34,25 @@ namespace Microsoft.Agents.AI;
/// exposes a function tool that the model can invoke to retrieve relevant memories on demand instead of
/// injecting them automatically on each invocation.
///
+///
+/// Security considerations:
+///
+/// - Indirect prompt injection: Messages retrieved from the vector store via semantic search
+/// are injected into the LLM context. If the vector store is compromised, adversarial content could influence LLM behavior.
+/// The data returned from the store is accepted as-is without validation or sanitization.
+/// - PII and sensitive data: Conversation messages (including user inputs and LLM responses)
+/// are stored as vectors in the underlying store. These messages may contain PII or sensitive information. Ensure the vector
+/// store is configured with appropriate access controls and encryption at rest.
+/// - On-demand search tool: When using ,
+/// the AI model controls when and what to search for. The search query is AI-generated and should be treated as untrusted input
+/// by the vector store implementation.
+/// - Trace logging: When is enabled,
+/// full search queries and results may be logged. This data may contain PII.
+///
+///
///
public sealed class ChatHistoryMemoryProvider : MessageAIContextProvider, IDisposable
+#pragma warning restore IDE0001 // Simplify Names
{
private const string DefaultContextPrompt = "## Memories\nConsider the following memories when answering user questions:";
private const int DefaultMaxResults = 3;
diff --git a/dotnet/src/Microsoft.Agents.AI/OpenTelemetryAgent.cs b/dotnet/src/Microsoft.Agents.AI/OpenTelemetryAgent.cs
index 7ec8a53161..fd1c2fd7f5 100644
--- a/dotnet/src/Microsoft.Agents.AI/OpenTelemetryAgent.cs
+++ b/dotnet/src/Microsoft.Agents.AI/OpenTelemetryAgent.cs
@@ -70,6 +70,12 @@ public OpenTelemetryAgent(AIAgent innerAgent, string? sourceName = null) : base(
/// and outputs, such as message content, function call arguments, and function call results.
/// The default value can be overridden by setting the OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT
/// environment variable to "true". Explicitly setting this property will override the environment variable.
+ ///
+ /// Security consideration: When sensitive data capture is enabled, the full text of chat messages —
+ /// including user inputs, LLM responses, function call arguments, and function results — is emitted as telemetry.
+ /// This data may contain PII or other sensitive information. Ensure that your telemetry pipeline is configured
+ /// with appropriate access controls and data retention policies.
+ ///
///
public bool EnableSensitiveData
{
diff --git a/dotnet/src/Microsoft.Agents.AI/TextSearchProvider.cs b/dotnet/src/Microsoft.Agents.AI/TextSearchProvider.cs
index 11611f0f69..e389b02294 100644
--- a/dotnet/src/Microsoft.Agents.AI/TextSearchProvider.cs
+++ b/dotnet/src/Microsoft.Agents.AI/TextSearchProvider.cs
@@ -31,6 +31,18 @@ namespace Microsoft.Agents.AI;
/// to the current request messages when forming the search input. This can improve search relevance by providing
/// multi-turn context to the retrieval layer without permanently altering the conversation history.
///
+///
+/// Security considerations: Search results retrieved from external sources are injected into the LLM context and may
+/// contain adversarial content designed to manipulate LLM behavior via indirect prompt injection. Developers should be aware that:
+///
+/// - The search query may be constructed from user input or LLM-generated content, both of which are untrusted.
+/// Implementers of the search delegate should validate search inputs and apply appropriate access controls to search results.
+/// - Retrieved documents are formatted and injected as messages in the AI request context. If the external data source
+/// is compromised, adversarial content could influence the LLM's responses.
+/// - When using , the AI model controls
+/// when and what to search for — the search query text is AI-generated and should be treated as untrusted input by the search implementation.
+///
+///
///
public sealed class TextSearchProvider : MessageAIContextProvider
{