From 6da595a9550a93787f226b758bc0b4f1f49fc82a Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 10:57:42 +0100 Subject: [PATCH 01/10] Python: expand Foundry tool helpers across Azure clients Align hosted tool parameter typing for file_ids/vector_store_ids, split generic web search from Bing tool variants, add/restore test coverage, and update provider samples for direct client helper usage. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../agent_framework_azure_ai/_client.py | 205 ++++++- .../azure-ai/tests/test_azure_ai_client.py | 158 ++++++ .../packages/core/agent_framework/_clients.py | 17 +- .../azure/_responses_client.py | 49 +- .../core/agent_framework/azure/_shared.py | 502 +++++++++++++++++- .../openai/_responses_client.py | 84 ++- .../azure/test_azure_responses_client.py | 100 ++++ .../openai/test_openai_responses_client.py | 44 ++ .../azure_ai/azure_ai_with_agent_to_agent.py | 15 +- .../azure_ai/azure_ai_with_azure_ai_search.py | 18 +- .../azure_ai_with_bing_custom_search.py | 15 +- .../azure_ai/azure_ai_with_bing_grounding.py | 14 +- .../azure_ai_with_browser_automation.py | 12 +- .../azure_ai/azure_ai_with_file_search.py | 2 +- .../azure_ai/azure_ai_with_memory_search.py | 13 +- .../azure_ai_with_microsoft_fabric.py | 14 +- .../azure_ai/azure_ai_with_openapi.py | 17 +- .../azure_ai/azure_ai_with_sharepoint.py | 14 +- .../providers/azure_openai/README.md | 6 + ...ure_responses_client_with_foundry_tools.py | 124 +++++ 20 files changed, 1276 insertions(+), 147 deletions(-) create mode 100644 python/samples/02-agents/providers/azure_openai/azure_responses_client_with_foundry_tools.py diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py index 7c698847cc..092c7e6e10 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -31,12 +31,22 @@ from agent_framework._settings import load_settings from agent_framework._tools import ToolTypes from agent_framework.azure._entra_id_authentication import AzureCredentialTypes +from agent_framework.azure._shared import ( + create_bing_tool, + create_a2a_tool, + create_azure_ai_search_tool, + create_browser_automation_tool, + create_fabric_data_agent_tool, + create_memory_search_tool, + create_openapi_tool, + create_sharepoint_grounding_tool, + create_web_search_tool, +) from agent_framework.observability import ChatTelemetryLayer from agent_framework.openai import OpenAIResponsesOptions from agent_framework.openai._responses_client import RawOpenAIResponsesClient from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import ( - ApproximateLocation, CodeInterpreterTool, CodeInterpreterToolAuto, ImageGenTool, @@ -827,17 +837,61 @@ def _enrich_update(update: ChatResponseUpdate) -> ChatResponseUpdate: # region Hosted Tool Factory Methods (Azure-specific overrides) + @staticmethod + def _normalize_hosted_ids( + value: str | Content | Sequence[str | Content] | None, + *, + expected_content_type: Literal["hosted_file", "hosted_vector_store"], + content_id_field: Literal["file_id", "vector_store_id"], + parameter_name: Literal["file_ids", "vector_store_ids"], + ) -> list[str] | None: + """Normalize string/Content id inputs with strict hosted content validation.""" + if value is None: + return None + + items: list[str | Content] + if isinstance(value, (str, Content)): + items = [value] + else: + items = list(value) + + normalized_ids: list[str] = [] + for item in items: + if isinstance(item, str): + normalized_ids.append(item) + continue + + if isinstance(item, Content): + if item.type != expected_content_type: + raise TypeError( + f"{parameter_name} accepts string IDs or Content of type {expected_content_type}." + ) + content_id = getattr(item, content_id_field) + if not content_id: + raise ValueError( + f"{parameter_name} Content items must include '{content_id_field}'." + ) + normalized_ids.append(content_id) + continue + + raise TypeError( + f"{parameter_name} accepts string IDs or Content of type {expected_content_type}." + ) + + return normalized_ids + @staticmethod def get_code_interpreter_tool( # type: ignore[override] *, - file_ids: list[str] | None = None, + file_ids: str | Content | Sequence[str | Content] | None = None, container: Literal["auto"] | dict[str, Any] = "auto", **kwargs: Any, ) -> CodeInterpreterTool: """Create a code interpreter tool configuration for Azure AI Projects. Keyword Args: - file_ids: Optional list of file IDs to make available to the code interpreter. + file_ids: File IDs for the code interpreter. Accepts a string ID, hosted_file Content, + or a sequence containing either form. container: Container configuration. Use "auto" for automatic container management. Note: Custom container settings from this parameter are not used by Azure AI Projects; use file_ids instead. @@ -856,14 +910,21 @@ def get_code_interpreter_tool( # type: ignore[override] """ # Extract file_ids from container if provided as dict and file_ids not explicitly set if file_ids is None and isinstance(container, dict): - file_ids = container.get("file_ids") - tool_container = CodeInterpreterToolAuto(file_ids=file_ids if file_ids else None) + file_ids = cast("str | Content | Sequence[str | Content] | None", container.get("file_ids")) + + normalized_file_ids = RawAzureAIClient._normalize_hosted_ids( + file_ids, + expected_content_type="hosted_file", + content_id_field="file_id", + parameter_name="file_ids", + ) + tool_container = CodeInterpreterToolAuto(file_ids=normalized_file_ids if normalized_file_ids else None) return CodeInterpreterTool(container=tool_container, **kwargs) @staticmethod def get_file_search_tool( *, - vector_store_ids: list[str], + vector_store_ids: str | Content | Sequence[str | Content], max_num_results: int | None = None, ranking_options: dict[str, Any] | None = None, filters: dict[str, Any] | None = None, @@ -872,7 +933,8 @@ def get_file_search_tool( """Create a file search tool configuration for Azure AI Projects. Keyword Args: - vector_store_ids: List of vector store IDs to search. + vector_store_ids: Vector store IDs to search. Accepts a string ID, + hosted_vector_store Content, or a sequence containing either form. max_num_results: Maximum number of results to return (1-50). ranking_options: Ranking options for search results. filters: A filter to apply (ComparisonFilter or CompoundFilter). @@ -894,10 +956,16 @@ def get_file_search_tool( ) agent = ChatAgent(client, tools=[tool]) """ - if not vector_store_ids: + normalized_vector_store_ids = RawAzureAIClient._normalize_hosted_ids( + vector_store_ids, + expected_content_type="hosted_vector_store", + content_id_field="vector_store_id", + parameter_name="vector_store_ids", + ) + if not normalized_vector_store_ids: raise ValueError("File search tool requires 'vector_store_ids' to be specified.") return ProjectsFileSearchTool( - vector_store_ids=vector_store_ids, + vector_store_ids=normalized_vector_store_ids, max_num_results=max_num_results, ranking_options=ranking_options, # type: ignore[arg-type] filters=filters, # type: ignore[arg-type] @@ -911,7 +979,7 @@ def get_web_search_tool( # type: ignore[override] search_context_size: Literal["low", "medium", "high"] | None = None, **kwargs: Any, ) -> WebSearchPreviewTool: - """Create a web search preview tool configuration for Azure AI Projects. + """Create a generic web search preview tool configuration for Azure AI Projects. Keyword Args: user_location: Location context for search results. Dict with keys like @@ -937,17 +1005,55 @@ def get_web_search_tool( # type: ignore[override] search_context_size="high", ) """ - ws_tool = WebSearchPreviewTool(search_context_size=search_context_size, **kwargs) - - if user_location: - ws_tool.user_location = ApproximateLocation( - city=user_location.get("city"), - country=user_location.get("country"), - region=user_location.get("region"), - timezone=user_location.get("timezone"), - ) + return create_web_search_tool( + user_location=user_location, + search_context_size=search_context_size, + **kwargs, + ) - return ws_tool + @staticmethod + def get_bing_tool( + *, + variant: Literal[ + "grounding", + "custom_search", + ] = "grounding", + project_connection_id: str | None = None, + instance_name: str | None = None, + count: int | None = None, + market: str | None = None, + set_lang: str | None = None, + freshness: str | None = None, + **kwargs: Any, + ) -> dict[str, Any]: + """Create a Bing grounding/custom search tool configuration for Azure AI Projects. + + Keyword Args: + variant: Bing tool variant to create. + project_connection_id: Optional Foundry connection id for Bing variants. + instance_name: Optional Bing custom search instance name for custom variants. + count: Optional result count for Bing variants. + market: Optional market code for Bing variants. + set_lang: Optional language code for Bing variants. + freshness: Optional freshness filter for Bing variants. + **kwargs: Additional arguments for the selected Bing payload. + + Returns: + A Bing tool payload ready to pass to ChatAgent. + + Notes: + ``custom_search`` emits the ``bing_custom_search_preview`` payload schema. + """ + return create_bing_tool( + variant=variant, + project_connection_id=project_connection_id, + instance_name=instance_name, + count=count, + market=market, + set_lang=set_lang, + freshness=freshness, + **kwargs, + ) @staticmethod def get_image_generation_tool( # type: ignore[override] @@ -1080,6 +1186,65 @@ def get_mcp_tool( return mcp + @staticmethod + def get_fabric_data_agent_tool(*, project_connection_id: str | None = None) -> dict[str, Any]: + """Create a Fabric Data Agent tool configuration for Azure AI Projects.""" + return create_fabric_data_agent_tool(project_connection_id=project_connection_id) + + @staticmethod + def get_sharepoint_grounding_tool(*, project_connection_id: str | None = None) -> dict[str, Any]: + """Create a SharePoint grounding tool configuration for Azure AI Projects.""" + return create_sharepoint_grounding_tool(project_connection_id=project_connection_id) + + @staticmethod + def get_azure_ai_search_tool( + *, + project_connection_id: str | None = None, + index_name: str | None = None, + query_type: str | None = None, + ) -> dict[str, Any]: + """Create an Azure AI Search tool configuration for Azure AI Projects.""" + return create_azure_ai_search_tool( + project_connection_id=project_connection_id, + index_name=index_name, + query_type=query_type, + ) + + @staticmethod + def get_browser_automation_tool(*, project_connection_id: str | None = None) -> dict[str, Any]: + """Create a browser automation tool configuration for Azure AI Projects.""" + return create_browser_automation_tool(project_connection_id=project_connection_id) + + @staticmethod + def get_openapi_tool( + *, + name: str, + spec: dict[str, Any], + description: str | None = None, + auth: dict[str, Any] | None = None, + ) -> dict[str, Any]: + """Create an OpenAPI tool configuration for Azure AI Projects.""" + return create_openapi_tool(name=name, spec=spec, description=description, auth=auth) + + @staticmethod + def get_a2a_tool( + *, + project_connection_id: str | None = None, + base_url: str | None = None, + ) -> dict[str, Any]: + """Create an A2A tool configuration for Azure AI Projects.""" + return create_a2a_tool(project_connection_id=project_connection_id, base_url=base_url) + + @staticmethod + def get_memory_search_tool( + *, + memory_store_name: str, + scope: str, + update_delay: int | None = None, + ) -> dict[str, Any]: + """Create a memory search tool configuration for Azure AI Projects.""" + return create_memory_search_tool(memory_store_name=memory_store_name, scope=scope, update_delay=update_delay) + # endregion @override diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index 4ec1b90971..f0ad36580f 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -51,6 +51,22 @@ or os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") == "", reason="No real AZURE_AI_PROJECT_ENDPOINT or AZURE_AI_MODEL_DEPLOYMENT_NAME provided; skipping integration tests.", ) +skip_if_azure_ai_foundry_helper_integration_tests_disabled = pytest.mark.skipif( + any( + os.getenv(name, "") == "" + for name in ( + "FABRIC_PROJECT_CONNECTION_ID", + "SHAREPOINT_PROJECT_CONNECTION_ID", + "BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", + "BING_CUSTOM_SEARCH_INSTANCE_NAME", + "AI_SEARCH_PROJECT_CONNECTION_ID", + "AI_SEARCH_INDEX_NAME", + "BROWSER_AUTOMATION_PROJECT_CONNECTION_ID", + "A2A_PROJECT_CONNECTION_ID", + ) + ), + reason="Required Foundry helper tool settings are missing; skipping integration smoke tests.", +) @pytest.fixture @@ -1583,6 +1599,76 @@ async def test_integration_web_search() -> None: assert response.text is not None +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_azure_ai_integration_tests_disabled +@skip_if_azure_ai_foundry_helper_integration_tests_disabled +@pytest.mark.parametrize( + "tool_name", + [ + param("fabric_data_agent", id="fabric_data_agent"), + param("sharepoint_grounding", id="sharepoint_grounding"), + param("bing_custom_search", id="bing_custom_search"), + param("azure_ai_search", id="azure_ai_search"), + param("browser_automation", id="browser_automation"), + param("openapi", id="openapi"), + param("a2a", id="a2a"), + param("memory_search", id="memory_search"), + ], +) +async def test_integration_foundry_helper_tools_smoke(tool_name: str, client: AzureAIClient) -> None: + """Smoke test Foundry helper tools can be passed to Azure AI responses.""" + if tool_name == "fabric_data_agent": + tool = client.get_fabric_data_agent_tool() + elif tool_name == "sharepoint_grounding": + tool = client.get_sharepoint_grounding_tool() + elif tool_name == "bing_custom_search": + tool = client.get_bing_tool(variant="custom_search") + elif tool_name == "azure_ai_search": + tool = client.get_azure_ai_search_tool(query_type="simple") + elif tool_name == "browser_automation": + tool = client.get_browser_automation_tool() + elif tool_name == "openapi": + tool = client.get_openapi_tool( + name="status_api", + spec={ + "openapi": "3.0.0", + "info": {"title": "Status API", "version": "1.0.0"}, + "paths": { + "/status": { + "get": { + "operationId": "getStatus", + "responses": {"200": {"description": "OK"}}, + } + } + }, + }, + auth={"type": "anonymous"}, + ) + elif tool_name == "a2a": + tool = client.get_a2a_tool(base_url=os.getenv("A2A_ENDPOINT", "https://example.com")) + else: + tool = client.get_memory_search_tool(memory_store_name="agent-framework-memory-store", scope="test-scope") + + for streaming in [False, True]: + content = { + "messages": [Message(role="user", text="Say 'Hello World' briefly.")], + "options": { + "tool_choice": "none", + "tools": [tool], + }, + } + if streaming: + response = await client.get_response(stream=True, **content).get_final_response() + else: + response = await client.get_response(**content) + + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None + assert len(response.text) > 0 + + @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_ai_integration_tests_disabled @@ -1678,6 +1764,17 @@ def test_get_code_interpreter_tool_basic() -> None: assert isinstance(tool, CodeInterpreterTool) +def test_get_code_interpreter_tool_with_scalar_file_ids_inputs() -> None: + """Test get_code_interpreter_tool accepts scalar string and hosted file Content.""" + string_tool = AzureAIClient.get_code_interpreter_tool(file_ids="file-123") + assert isinstance(string_tool, CodeInterpreterTool) + assert string_tool["container"]["file_ids"] == ["file-123"] + + content_tool = AzureAIClient.get_code_interpreter_tool(file_ids=Content.from_hosted_file(file_id="file-234")) + assert isinstance(content_tool, CodeInterpreterTool) + assert content_tool["container"]["file_ids"] == ["file-234"] + + def test_get_code_interpreter_tool_with_file_ids() -> None: """Test get_code_interpreter_tool with file_ids.""" tool = AzureAIClient.get_code_interpreter_tool(file_ids=["file-123", "file-456"]) @@ -1685,6 +1782,21 @@ def test_get_code_interpreter_tool_with_file_ids() -> None: assert tool["container"]["file_ids"] == ["file-123", "file-456"] +def test_get_code_interpreter_tool_with_file_content_and_string_id() -> None: + """Test get_code_interpreter_tool accepts hosted file Content and string ids.""" + tool = AzureAIClient.get_code_interpreter_tool( + file_ids=[Content.from_hosted_file(file_id="file-123"), "file-456"], + ) + assert isinstance(tool, CodeInterpreterTool) + assert tool["container"]["file_ids"] == ["file-123", "file-456"] + + +def test_get_code_interpreter_tool_rejects_non_hosted_file_content() -> None: + """Test get_code_interpreter_tool rejects unsupported Content types.""" + with pytest.raises(TypeError, match="hosted_file"): + AzureAIClient.get_code_interpreter_tool(file_ids=Content.from_text("not-a-file")) + + def test_get_file_search_tool_basic() -> None: """Test get_file_search_tool returns FileSearchTool.""" tool = AzureAIClient.get_file_search_tool(vector_store_ids=["vs-123"]) @@ -1692,6 +1804,34 @@ def test_get_file_search_tool_basic() -> None: assert tool["vector_store_ids"] == ["vs-123"] +def test_get_file_search_tool_with_scalar_vector_store_ids_inputs() -> None: + """Test get_file_search_tool accepts scalar string and hosted vector store Content.""" + string_tool = AzureAIClient.get_file_search_tool(vector_store_ids="vs-123") + assert isinstance(string_tool, FileSearchTool) + assert string_tool["vector_store_ids"] == ["vs-123"] + + content_tool = AzureAIClient.get_file_search_tool( + vector_store_ids=Content.from_hosted_vector_store(vector_store_id="vs-234"), + ) + assert isinstance(content_tool, FileSearchTool) + assert content_tool["vector_store_ids"] == ["vs-234"] + + +def test_get_file_search_tool_with_vector_store_content_and_string_id() -> None: + """Test get_file_search_tool accepts hosted vector store Content and string ids.""" + tool = AzureAIClient.get_file_search_tool( + vector_store_ids=[Content.from_hosted_vector_store(vector_store_id="vs-123"), "vs-456"], + ) + assert isinstance(tool, FileSearchTool) + assert tool["vector_store_ids"] == ["vs-123", "vs-456"] + + +def test_get_file_search_tool_rejects_non_hosted_vector_store_content() -> None: + """Test get_file_search_tool rejects unsupported Content types.""" + with pytest.raises(TypeError, match="hosted_vector_store"): + AzureAIClient.get_file_search_tool(vector_store_ids=Content.from_hosted_file(file_id="file-123")) + + def test_get_file_search_tool_with_options() -> None: """Test get_file_search_tool with max_num_results.""" tool = AzureAIClient.get_file_search_tool( @@ -1732,6 +1872,24 @@ def test_get_web_search_tool_with_search_context_size() -> None: assert tool.search_context_size == "high" +def test_get_bing_tool_grounding_variant() -> None: + """Test get_bing_tool with Bing grounding variant.""" + tool = AzureAIClient.get_bing_tool(variant="grounding", project_connection_id="conn-123") + assert tool["type"] == "bing_grounding" + assert tool["bing_grounding"]["search_configurations"][0]["project_connection_id"] == "conn-123" + + +def test_get_bing_tool_custom_search_variant() -> None: + """Test get_bing_tool with Bing custom search variant.""" + custom_tool = AzureAIClient.get_bing_tool( + variant="custom_search", + project_connection_id="conn-123", + instance_name="instance-1", + ) + assert custom_tool["type"] == "bing_custom_search_preview" + assert custom_tool["bing_custom_search_preview"]["search_configurations"][0]["instance_name"] == "instance-1" + + def test_get_mcp_tool_basic() -> None: """Test get_mcp_tool returns MCPTool.""" tool = AzureAIClient.get_mcp_tool(name="test_mcp", url="https://example.com") diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index 278657a154..5cbb594d73 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -35,6 +35,7 @@ from ._types import ( ChatResponse, ChatResponseUpdate, + Content, EmbeddingGenerationOptions, EmbeddingInputT, EmbeddingT, @@ -529,10 +530,16 @@ class SupportsCodeInterpreterTool(Protocol): """ @staticmethod - def get_code_interpreter_tool(**kwargs: Any) -> Any: + def get_code_interpreter_tool( + *, + file_ids: str | Content | Sequence[str | Content] | None = None, + **kwargs: Any, + ) -> Any: """Create a code interpreter tool configuration. Keyword Args: + file_ids: Optional file IDs to expose to code interpreter tools when supported. + Accepts string IDs, hosted_file Content, or a sequence containing either form. **kwargs: Provider-specific configuration options. Returns: @@ -650,10 +657,16 @@ class SupportsFileSearchTool(Protocol): """ @staticmethod - def get_file_search_tool(**kwargs: Any) -> Any: + def get_file_search_tool( + *, + vector_store_ids: str | Content | Sequence[str | Content] | None = None, + **kwargs: Any, + ) -> Any: """Create a file search tool configuration. Keyword Args: + vector_store_ids: Optional vector store IDs for file search when supported. + Accepts string IDs, hosted_vector_store Content, or a sequence containing either form. **kwargs: Provider-specific configuration options. Returns: diff --git a/python/packages/core/agent_framework/azure/_responses_client.py b/python/packages/core/agent_framework/azure/_responses_client.py index b5b0ca1b5e..ea86e78f74 100644 --- a/python/packages/core/agent_framework/azure/_responses_client.py +++ b/python/packages/core/agent_framework/azure/_responses_client.py @@ -21,6 +21,19 @@ AzureOpenAIConfigMixin, AzureOpenAISettings, _apply_azure_defaults, + create_a2a_tool, + create_azure_ai_search_tool, + create_bing_tool, + create_browser_automation_tool, + create_code_interpreter_tool, + create_fabric_data_agent_tool, + create_file_search_tool, + create_image_generation_tool, + create_mcp_tool, + create_memory_search_tool, + create_openapi_tool, + create_sharepoint_grounding_tool, + create_web_search_tool, ) if sys.version_info >= (3, 13): @@ -183,14 +196,6 @@ class MyOptions(AzureOpenAIResponsesOptions, total=False): if model_id := kwargs.pop("model_id", None) and not deployment_name: deployment_name = str(model_id) - # Project client path: create OpenAI client from an Azure AI Foundry project - if async_client is None and (project_client is not None or project_endpoint is not None): - async_client = self._create_client_from_project( - project_client=project_client, - project_endpoint=project_endpoint, - credential=credential, - ) - azure_openai_settings = load_settings( AzureOpenAISettings, env_prefix="AZURE_OPENAI_", @@ -203,6 +208,16 @@ class MyOptions(AzureOpenAIResponsesOptions, total=False): env_file_encoding=env_file_encoding, token_endpoint=token_endpoint, ) + is_project_mode = project_client is not None or project_endpoint is not None + + # Project client path: create OpenAI client from an Azure AI Foundry project + if async_client is None and is_project_mode: + async_client = self._create_client_from_project( + project_client=project_client, + project_endpoint=project_endpoint, + credential=credential, + ) + _apply_azure_defaults(azure_openai_settings, default_api_version="preview") # TODO(peterychang): This is a temporary hack to ensure that the base_url is set correctly # while this feature is in preview. @@ -235,6 +250,24 @@ class MyOptions(AzureOpenAIResponsesOptions, total=False): middleware=middleware, function_invocation_configuration=function_invocation_configuration, ) + if is_project_mode: + self._attach_project_tool_methods() + + def _attach_project_tool_methods(self) -> None: + """Attach project-mode hosted tool methods dynamically.""" + self.get_code_interpreter_tool = create_code_interpreter_tool + self.get_file_search_tool = create_file_search_tool + self.get_web_search_tool = create_web_search_tool + self.get_bing_tool = create_bing_tool + self.get_image_generation_tool = create_image_generation_tool + self.get_mcp_tool = create_mcp_tool + self.get_fabric_data_agent_tool = create_fabric_data_agent_tool + self.get_sharepoint_grounding_tool = create_sharepoint_grounding_tool + self.get_azure_ai_search_tool = create_azure_ai_search_tool + self.get_browser_automation_tool = create_browser_automation_tool + self.get_openapi_tool = create_openapi_tool + self.get_a2a_tool = create_a2a_tool + self.get_memory_search_tool = create_memory_search_tool @staticmethod def _create_client_from_project( diff --git a/python/packages/core/agent_framework/azure/_shared.py b/python/packages/core/agent_framework/azure/_shared.py index dce116a242..906d3e5062 100644 --- a/python/packages/core/agent_framework/azure/_shared.py +++ b/python/packages/core/agent_framework/azure/_shared.py @@ -4,15 +4,25 @@ import logging import sys -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from copy import copy -from typing import Any, ClassVar, Final - +from typing import Any, ClassVar, Final, Literal, cast + +from azure.ai.projects.models import ( + ApproximateLocation, + CodeInterpreterTool, + CodeInterpreterToolAuto, + ImageGenTool, + MCPTool, + WebSearchPreviewTool, +) +from azure.ai.projects.models import FileSearchTool as ProjectsFileSearchTool from openai import AsyncOpenAI from openai.lib.azure import AsyncAzureOpenAI -from .._settings import SecretString +from .._settings import SecretString, load_settings from .._telemetry import APP_INFO, prepend_agent_framework_to_user_agent +from .._types import Content from ..openai._shared import OpenAIBase from ._entra_id_authentication import AzureCredentialTypes, AzureTokenProvider, resolve_credential_to_token_provider @@ -218,3 +228,487 @@ def __init__( self.default_headers = def_headers super().__init__(model_id=deployment_name, client=client, **kwargs) + + +class FoundryProjectSettings(TypedDict, total=False): + """Environment-backed Foundry project settings.""" + + project_endpoint: str | None + model_deployment_name: str | None + + +class FoundryToolSettings(TypedDict, total=False): + """Environment-backed Foundry tool settings.""" + + fabric_project_connection_id: str | None + sharepoint_project_connection_id: str | None + bing_project_connection_id: str | None + bing_custom_search_project_connection_id: str | None + bing_custom_search_instance_name: str | None + ai_search_project_connection_id: str | None + ai_search_index_name: str | None + browser_automation_project_connection_id: str | None + a2a_project_connection_id: str | None + a2a_endpoint: str | None + + +def load_foundry_project_settings( + *, + env_file_path: str | None = None, + env_file_encoding: str | None = None, +) -> FoundryProjectSettings: + """Load Foundry project settings from ``FOUNDRY_*`` environment variables. + + This resolves the following variables (or matching entries in ``env_file_path`` + when provided): + + - ``FOUNDRY_PROJECT_ENDPOINT`` + - ``FOUNDRY_MODEL_DEPLOYMENT_NAME`` + """ + return load_settings( + FoundryProjectSettings, + env_prefix="FOUNDRY_", + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + + +def _load_foundry_tool_settings( + *, + env_file_path: str | None = None, + env_file_encoding: str | None = None, +) -> FoundryToolSettings: + """Load shared Foundry tool settings from environment variables. + + With an empty ``env_prefix``, ``load_settings`` reads these variable names + directly (or from ``env_file_path`` when provided): + + - ``FABRIC_PROJECT_CONNECTION_ID`` + - ``SHAREPOINT_PROJECT_CONNECTION_ID`` + - ``BING_PROJECT_CONNECTION_ID`` + - ``BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID`` + - ``BING_CUSTOM_SEARCH_INSTANCE_NAME`` + - ``AI_SEARCH_PROJECT_CONNECTION_ID`` + - ``AI_SEARCH_INDEX_NAME`` + - ``BROWSER_AUTOMATION_PROJECT_CONNECTION_ID`` + - ``A2A_PROJECT_CONNECTION_ID`` + - ``A2A_ENDPOINT`` + """ + return load_settings( + FoundryToolSettings, + env_prefix="", + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + + +def _require_string(value: str | None, param_name: str) -> str: + if not value: + raise ValueError(f"'{param_name}' is required.") + return value + + +def _normalize_hosted_ids( + value: str | Content | Sequence[str | Content] | None, + *, + expected_content_type: Literal["hosted_file", "hosted_vector_store"], + content_id_field: Literal["file_id", "vector_store_id"], + parameter_name: Literal["file_ids", "vector_store_ids"], +) -> list[str] | None: + """Normalize string/Content id inputs with strict hosted content validation.""" + if value is None: + return None + + items: list[str | Content] + if isinstance(value, (str, Content)): + items = [value] + else: + items = list(value) + + normalized_ids: list[str] = [] + for item in items: + if isinstance(item, str): + normalized_ids.append(item) + continue + + if isinstance(item, Content): + if item.type != expected_content_type: + raise TypeError( + f"{parameter_name} accepts string IDs or Content of type {expected_content_type}." + ) + content_id = getattr(item, content_id_field) + if not content_id: + raise ValueError( + f"{parameter_name} Content items must include '{content_id_field}'." + ) + normalized_ids.append(content_id) + continue + + raise TypeError( + f"{parameter_name} accepts string IDs or Content of type {expected_content_type}." + ) + + return normalized_ids + + +def create_code_interpreter_tool( + *, + file_ids: str | Content | Sequence[str | Content] | None = None, + container: Literal["auto"] | dict[str, Any] = "auto", + **kwargs: Any, +) -> CodeInterpreterTool: + """Create a code interpreter tool configuration for Azure AI Projects. + + Keyword Args: + file_ids: File IDs for the code interpreter. Accepts a string ID, + hosted_file Content, or a sequence containing either form. + container: Existing container payload. + **kwargs: Additional arguments passed to the SDK CodeInterpreterTool constructor. + """ + if file_ids is None and isinstance(container, dict): + file_ids = cast("str | Content | Sequence[str | Content] | None", container.get("file_ids")) + + normalized_file_ids = _normalize_hosted_ids( + file_ids, + expected_content_type="hosted_file", + content_id_field="file_id", + parameter_name="file_ids", + ) + + tool_container = CodeInterpreterToolAuto(file_ids=normalized_file_ids if normalized_file_ids else None) + return CodeInterpreterTool(container=tool_container, **kwargs) + + +def create_file_search_tool( + *, + vector_store_ids: str | Content | Sequence[str | Content] | None = None, + max_num_results: int | None = None, + ranking_options: dict[str, Any] | None = None, + filters: dict[str, Any] | None = None, + **kwargs: Any, +) -> ProjectsFileSearchTool: + """Create a file search tool configuration for Azure AI Projects. + + Keyword Args: + vector_store_ids: Vector store IDs to search. Accepts a string ID, + hosted_vector_store Content, or a sequence containing either form. + max_num_results: Maximum number of results to return (1-50). + ranking_options: Ranking options for search results. + filters: A filter to apply (ComparisonFilter or CompoundFilter). + **kwargs: Additional arguments passed to the SDK FileSearchTool constructor. + """ + normalized_vector_store_ids = _normalize_hosted_ids( + vector_store_ids, + expected_content_type="hosted_vector_store", + content_id_field="vector_store_id", + parameter_name="vector_store_ids", + ) + + if not normalized_vector_store_ids: + raise ValueError("File search tool requires 'vector_store_ids' to be specified.") + + return ProjectsFileSearchTool( + vector_store_ids=normalized_vector_store_ids, + max_num_results=max_num_results, + ranking_options=ranking_options, # type: ignore[arg-type] + filters=filters, # type: ignore[arg-type] + **kwargs, + ) + +def create_web_search_tool( + *, + user_location: dict[str, str] | None = None, + search_context_size: Literal["low", "medium", "high"] | None = None, + **kwargs: Any, +) -> WebSearchPreviewTool: + """Create a generic web search preview tool. + + Keyword Args: + user_location: Location context for search results. + search_context_size: Search context size ("low", "medium", or "high"). + **kwargs: Additional arguments passed to ``WebSearchPreviewTool``. + """ + ws_tool = WebSearchPreviewTool(search_context_size=search_context_size, **kwargs) + if user_location: + ws_tool.user_location = ApproximateLocation( + city=user_location.get("city"), + country=user_location.get("country"), + region=user_location.get("region"), + timezone=user_location.get("timezone"), + ) + return ws_tool + + +def create_bing_tool( + *, + variant: Literal[ + "grounding", + "custom_search", + ] = "grounding", + project_connection_id: str | None = None, + instance_name: str | None = None, + count: int | None = None, + market: str | None = None, + set_lang: str | None = None, + freshness: str | None = None, + **kwargs: Any, +) -> dict[str, Any]: + """Create a Bing grounding/custom search tool. + + Environment-backed fallbacks (used when optional arguments are omitted): + + - For ``variant`` in ``{"grounding"}``: ``project_connection_id`` falls + back to ``BING_PROJECT_CONNECTION_ID``. + - For ``variant`` in ``{"custom_search"}``: + ``project_connection_id`` falls back to + ``BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID`` and ``instance_name`` falls back + to ``BING_CUSTOM_SEARCH_INSTANCE_NAME``. + + Notes: + ``custom_search`` emits the ``bing_custom_search_preview`` tool payload, which is + the currently supported schema for Bing Custom Search in Foundry tools. + """ + if not project_connection_id: + settings = _load_foundry_tool_settings() + if variant == "custom_search": + project_connection_id = settings.get("bing_custom_search_project_connection_id") + instance_name = instance_name or settings.get("bing_custom_search_instance_name") + else: + project_connection_id = settings.get("bing_project_connection_id") + project_connection_id = _require_string(project_connection_id, "project_connection_id") + config: dict[str, Any] = {"project_connection_id": project_connection_id} + if count is not None: + config["count"] = count + if market: + config["market"] = market + if set_lang: + config["set_lang"] = set_lang + if freshness: + config["freshness"] = freshness + config.update(kwargs) + + if variant == "custom_search": + instance_name = _require_string(instance_name, "instance_name") + config["instance_name"] = instance_name + return { + "type": "bing_custom_search_preview", + "bing_custom_search_preview": {"search_configurations": [config]}, + } + + return { + "type": "bing_grounding", + "bing_grounding": {"search_configurations": [config]}, + } + + +def create_image_generation_tool( + *, + model: Literal["gpt-image-1"] | str | None = None, + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] | None = None, + output_format: Literal["png", "webp", "jpeg"] | None = None, + quality: Literal["low", "medium", "high", "auto"] | None = None, + background: Literal["transparent", "opaque", "auto"] | None = None, + partial_images: int | None = None, + moderation: Literal["auto", "low"] | None = None, + output_compression: int | None = None, + **kwargs: Any, +) -> ImageGenTool: + """Create an image generation tool configuration for Azure AI Projects.""" + return ImageGenTool( # type: ignore[misc] + model=model, # type: ignore[arg-type] + size=size, + output_format=output_format, + quality=quality, + background=background, + partial_images=partial_images, + moderation=moderation, + output_compression=output_compression, + **kwargs, + ) + + +def create_mcp_tool( + *, + name: str, + url: str | None = None, + description: str | None = None, + approval_mode: Literal["always_require", "never_require"] | dict[str, list[str]] | None = None, + allowed_tools: list[str] | None = None, + headers: dict[str, str] | None = None, + project_connection_id: str | None = None, + **kwargs: Any, +) -> MCPTool: + """Create a hosted MCP tool configuration for Azure AI.""" + _require_string(name, "name") + mcp = MCPTool(server_label=name.replace(" ", "_"), server_url=url or "", **kwargs) + + if description: + mcp["server_description"] = description + + if project_connection_id: + mcp["project_connection_id"] = project_connection_id + elif headers: + mcp["headers"] = headers + + if allowed_tools: + mcp["allowed_tools"] = allowed_tools + + if approval_mode: + if isinstance(approval_mode, str): + mcp["require_approval"] = "always" if approval_mode == "always_require" else "never" + else: + if always_require := approval_mode.get("always_require_approval"): + mcp["require_approval"] = {"always": {"tool_names": always_require}} + if never_require := approval_mode.get("never_require_approval"): + mcp["require_approval"] = {"never": {"tool_names": never_require}} + + return mcp + + +def create_fabric_data_agent_tool(*, project_connection_id: str | None = None) -> dict[str, Any]: + """Create a Microsoft Fabric data agent tool payload. + + If ``project_connection_id`` is omitted, it falls back to + ``FABRIC_PROJECT_CONNECTION_ID``. + """ + if not project_connection_id: + project_connection_id = _load_foundry_tool_settings().get("fabric_project_connection_id") + project_connection_id = _require_string(project_connection_id, "project_connection_id") + return { + "type": "fabric_dataagent_preview", + "fabric_dataagent_preview": { + "project_connections": [{"project_connection_id": project_connection_id}], + }, + } + + +def create_sharepoint_grounding_tool(*, project_connection_id: str | None = None) -> dict[str, Any]: + """Create a SharePoint grounding tool payload. + + If ``project_connection_id`` is omitted, it falls back to + ``SHAREPOINT_PROJECT_CONNECTION_ID``. + """ + if not project_connection_id: + project_connection_id = _load_foundry_tool_settings().get("sharepoint_project_connection_id") + project_connection_id = _require_string(project_connection_id, "project_connection_id") + return { + "type": "sharepoint_grounding_preview", + "sharepoint_grounding_preview": { + "project_connections": [{"project_connection_id": project_connection_id}], + }, + } + + +def create_azure_ai_search_tool( + *, + project_connection_id: str | None = None, + index_name: str | None = None, + query_type: str | None = None, + **kwargs: Any, +) -> dict[str, Any]: + """Create an Azure AI Search tool payload. + + Environment-backed fallbacks (used when optional arguments are omitted): + + - ``project_connection_id`` falls back to ``AI_SEARCH_PROJECT_CONNECTION_ID``. + - ``index_name`` falls back to ``AI_SEARCH_INDEX_NAME``. + """ + if not project_connection_id or not index_name: + settings = _load_foundry_tool_settings() + project_connection_id = project_connection_id or settings.get("ai_search_project_connection_id") + index_name = index_name or settings.get("ai_search_index_name") + project_connection_id = _require_string(project_connection_id, "project_connection_id") + index_name = _require_string(index_name, "index_name") + index: dict[str, Any] = { + "project_connection_id": project_connection_id, + "index_name": index_name, + } + if query_type: + index["query_type"] = query_type + index.update(kwargs) + return { + "type": "azure_ai_search", + "azure_ai_search": {"indexes": [index]}, + } + + +def create_browser_automation_tool(*, project_connection_id: str | None = None) -> dict[str, Any]: + """Create a browser automation tool payload. + + If ``project_connection_id`` is omitted, it falls back to + ``BROWSER_AUTOMATION_PROJECT_CONNECTION_ID``. + """ + if not project_connection_id: + project_connection_id = _load_foundry_tool_settings().get("browser_automation_project_connection_id") + project_connection_id = _require_string(project_connection_id, "project_connection_id") + return { + "type": "browser_automation_preview", + "browser_automation_preview": { + "connection": {"project_connection_id": project_connection_id}, + }, + } + + +def create_openapi_tool( + *, + name: str, + spec: Mapping[str, Any], + description: str | None = None, + auth: Mapping[str, Any] | None = None, + **kwargs: Any, +) -> dict[str, Any]: + """Create an OpenAPI tool payload.""" + _require_string(name, "name") + config: dict[str, Any] = {"name": name, "spec": dict(spec)} + if description: + config["description"] = description + if auth: + config["auth"] = dict(auth) + config.update(kwargs) + return {"type": "openapi", "openapi": config} + + +def create_a2a_tool( + *, + project_connection_id: str | None = None, + base_url: str | None = None, + **kwargs: Any, +) -> dict[str, Any]: + """Create an A2A tool payload. + + Environment-backed fallbacks (used when optional arguments are omitted): + + - ``project_connection_id`` falls back to ``A2A_PROJECT_CONNECTION_ID``. + - ``base_url`` falls back to ``A2A_ENDPOINT``. + """ + if not project_connection_id or not base_url: + settings = _load_foundry_tool_settings() + project_connection_id = project_connection_id or settings.get("a2a_project_connection_id") + base_url = base_url or settings.get("a2a_endpoint") + project_connection_id = _require_string(project_connection_id, "project_connection_id") + result: dict[str, Any] = {"type": "a2a_preview", "project_connection_id": project_connection_id} + if base_url: + result["base_url"] = base_url + result.update(kwargs) + return result + + +def create_memory_search_tool( + *, + memory_store_name: str, + scope: str, + update_delay: int | None = None, + **kwargs: Any, +) -> dict[str, Any]: + """Create a memory search tool payload.""" + _require_string(memory_store_name, "memory_store_name") + _require_string(scope, "scope") + result: dict[str, Any] = { + "type": "memory_search", + "memory_store_name": memory_store_name, + "scope": scope, + } + if update_delay is not None: + result["update_delay"] = update_delay + result.update(kwargs) + return result diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 5ba0bbc686..e13505f75c 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -464,16 +464,60 @@ def _prepare_tools_for_openai( # region Hosted Tool Factory Methods + @staticmethod + def _normalize_hosted_ids( + value: str | Content | Sequence[str | Content] | None, + *, + expected_content_type: Literal["hosted_file", "hosted_vector_store"], + content_id_field: Literal["file_id", "vector_store_id"], + parameter_name: Literal["file_ids", "vector_store_ids"], + ) -> list[str] | None: + """Normalize string/Content id inputs with strict hosted content validation.""" + if value is None: + return None + + items: list[str | Content] + if isinstance(value, (str, Content)): + items = [value] + else: + items = list(value) + + normalized_ids: list[str] = [] + for item in items: + if isinstance(item, str): + normalized_ids.append(item) + continue + + if isinstance(item, Content): + if item.type != expected_content_type: + raise TypeError( + f"{parameter_name} accepts string IDs or Content of type {expected_content_type}." + ) + content_id = getattr(item, content_id_field) + if not content_id: + raise ValueError( + f"{parameter_name} Content items must include '{content_id_field}'." + ) + normalized_ids.append(content_id) + continue + + raise TypeError( + f"{parameter_name} accepts string IDs or Content of type {expected_content_type}." + ) + + return normalized_ids + @staticmethod def get_code_interpreter_tool( *, - file_ids: list[str] | None = None, + file_ids: str | Content | Sequence[str | Content] | None = None, container: Literal["auto"] | CodeInterpreterContainerCodeInterpreterToolAuto = "auto", ) -> Any: """Create a code interpreter tool configuration for the Responses API. Keyword Args: - file_ids: List of file IDs to make available to the code interpreter. + file_ids: File IDs for the code interpreter. Accepts a string ID, hosted_file Content, + or a sequence containing either form. container: Container configuration. Use "auto" for automatic container management, or provide a TypedDict with custom container settings. @@ -495,11 +539,24 @@ def get_code_interpreter_tool( agent = ChatAgent(client, tools=[tool]) """ container_config: CodeInterpreterContainerCodeInterpreterToolAuto = ( - container if isinstance(container, dict) else {"type": "auto"} + dict(container) if isinstance(container, dict) else {"type": "auto"} ) - if file_ids: - container_config["file_ids"] = file_ids + if file_ids is None and isinstance(container_config, dict): + file_ids = cast("str | Content | Sequence[str | Content] | None", container_config.get("file_ids")) + + normalized_file_ids = OpenAIResponsesClient._normalize_hosted_ids( + file_ids, + expected_content_type="hosted_file", + content_id_field="file_id", + parameter_name="file_ids", + ) + + if normalized_file_ids is not None: + if normalized_file_ids: + container_config["file_ids"] = normalized_file_ids + else: + container_config.pop("file_ids", None) return CodeInterpreter(type="code_interpreter", container=container_config) @@ -716,13 +773,14 @@ def get_mcp_tool( @staticmethod def get_file_search_tool( *, - vector_store_ids: list[str], + vector_store_ids: str | Content | Sequence[str | Content], max_num_results: int | None = None, ) -> Any: """Create a file search tool configuration for the Responses API. Keyword Args: - vector_store_ids: List of vector store IDs to search within. + vector_store_ids: Vector store IDs to search. Accepts a string ID, + hosted_vector_store Content, or a sequence containing either form. max_num_results: Maximum number of results to return. Defaults to 50 if not specified. Returns: @@ -746,9 +804,19 @@ def get_file_search_tool( agent = ChatAgent(client, tools=[tool]) """ + normalized_vector_store_ids = OpenAIResponsesClient._normalize_hosted_ids( + vector_store_ids, + expected_content_type="hosted_vector_store", + content_id_field="vector_store_id", + parameter_name="vector_store_ids", + ) + + if not normalized_vector_store_ids: + raise ValueError("File search tool requires 'vector_store_ids' to be specified.") + tool = FileSearchToolParam( type="file_search", - vector_store_ids=vector_store_ids, + vector_store_ids=normalized_vector_store_ids, ) if max_num_results is not None: diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index 4e9b25ca6a..a046a4b688 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -26,6 +26,19 @@ os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.com"), reason="No real AZURE_OPENAI_ENDPOINT provided; skipping integration tests.", ) +skip_if_azure_project_bing_custom_search_integration_tests_disabled = pytest.mark.skipif( + ( + os.getenv("FOUNDRY_PROJECT_ENDPOINT", "") == "" + and os.getenv("AZURE_AI_PROJECT_ENDPOINT", "") == "" + ) + or ( + os.getenv("FOUNDRY_MODEL_DEPLOYMENT_NAME", "") == "" + and os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", "") == "" + ) + or os.getenv("BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", "") == "" + or os.getenv("BING_CUSTOM_SEARCH_INSTANCE_NAME", "") == "", + reason="Missing Foundry project or Bing Custom Search settings; skipping project-mode integration test.", +) logger = logging.getLogger(__name__) @@ -107,6 +120,58 @@ def test_init_with_default_header(azure_openai_unit_test_env: dict[str, str]) -> assert azure_responses_client.client.default_headers[key] == value +def test_get_code_interpreter_tool_accepts_string_and_hosted_file_content_scalars() -> None: + """Test AzureOpenAIResponsesClient code interpreter accepts scalar file_ids inputs.""" + string_tool = AzureOpenAIResponsesClient.get_code_interpreter_tool(file_ids="file-123") + assert string_tool["container"]["file_ids"] == ["file-123"] + + content_tool = AzureOpenAIResponsesClient.get_code_interpreter_tool( + file_ids=Content.from_hosted_file(file_id="file-234") + ) + assert content_tool["container"]["file_ids"] == ["file-234"] + + +def test_get_code_interpreter_tool_accepts_hosted_file_content_and_string_id() -> None: + """Test AzureOpenAIResponsesClient code interpreter tool accepts hosted file Content and string ids.""" + tool = AzureOpenAIResponsesClient.get_code_interpreter_tool( + file_ids=[Content.from_hosted_file(file_id="file-123"), "file-456"] + ) + + assert tool["container"]["file_ids"] == ["file-123", "file-456"] + + +def test_get_code_interpreter_tool_rejects_non_hosted_file_content() -> None: + """Test AzureOpenAIResponsesClient code interpreter tool rejects unsupported Content types.""" + with pytest.raises(TypeError, match="hosted_file"): + AzureOpenAIResponsesClient.get_code_interpreter_tool(file_ids=Content.from_text("not-a-file")) + + +def test_get_file_search_tool_accepts_string_and_hosted_vector_store_content_scalars() -> None: + """Test AzureOpenAIResponsesClient file search accepts scalar vector_store_ids inputs.""" + string_tool = AzureOpenAIResponsesClient.get_file_search_tool(vector_store_ids="vs-123") + assert string_tool["vector_store_ids"] == ["vs-123"] + + content_tool = AzureOpenAIResponsesClient.get_file_search_tool( + vector_store_ids=Content.from_hosted_vector_store(vector_store_id="vs-234") + ) + assert content_tool["vector_store_ids"] == ["vs-234"] + + +def test_get_file_search_tool_accepts_hosted_vector_store_content_and_string_id() -> None: + """Test AzureOpenAIResponsesClient file search tool accepts hosted vector store Content and string ids.""" + tool = AzureOpenAIResponsesClient.get_file_search_tool( + vector_store_ids=[Content.from_hosted_vector_store(vector_store_id="vs-123"), "vs-456"] + ) + + assert tool["vector_store_ids"] == ["vs-123", "vs-456"] + + +def test_get_file_search_tool_rejects_non_hosted_vector_store_content() -> None: + """Test AzureOpenAIResponsesClient file search tool rejects unsupported Content types.""" + with pytest.raises(TypeError, match="hosted_vector_store"): + AzureOpenAIResponsesClient.get_file_search_tool(vector_store_ids=Content.from_hosted_file(file_id="file-123")) + + @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"]], indirect=True) def test_init_with_empty_model_id(azure_openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(ValueError): @@ -438,6 +503,41 @@ async def test_integration_web_search() -> None: assert response.text is not None +@pytest.mark.flaky +@pytest.mark.integration +@skip_if_azure_project_bing_custom_search_integration_tests_disabled +async def test_integration_project_mode_web_search_bing_custom_search() -> None: + project_endpoint = os.getenv("FOUNDRY_PROJECT_ENDPOINT") or os.getenv("AZURE_AI_PROJECT_ENDPOINT") + deployment_name = os.getenv("FOUNDRY_MODEL_DEPLOYMENT_NAME") or os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") + assert project_endpoint is not None + assert deployment_name is not None + + client = AzureOpenAIResponsesClient( + project_endpoint=project_endpoint, + deployment_name=deployment_name, + credential=AzureCliCredential(), + ) + + for streaming in [False, True]: + content = { + "messages": [Message(role="user", text="What is GitHub Copilot? Use web search to answer briefly.")], + "options": { + "tool_choice": "none", + "tools": [client.get_bing_tool(variant="custom_search")], + }, + "stream": streaming, + } + if streaming: + response = await client.get_response(**content).get_final_response() + else: + response = await client.get_response(**content) + + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None + assert len(response.text) > 0 + + @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index 7eaae1e776..4da8fd822c 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -244,6 +244,50 @@ async def test_web_search_tool_with_location() -> None: ) +def test_get_code_interpreter_tool_accepts_string_and_hosted_file_content() -> None: + """Test OpenAIResponsesClient code interpreter accepts scalar and list file_ids inputs.""" + scalar_string_tool = OpenAIResponsesClient.get_code_interpreter_tool(file_ids="file-123") + assert scalar_string_tool["container"]["file_ids"] == ["file-123"] + + scalar_content_tool = OpenAIResponsesClient.get_code_interpreter_tool( + file_ids=Content.from_hosted_file(file_id="file-234") + ) + assert scalar_content_tool["container"]["file_ids"] == ["file-234"] + + mixed_list_tool = OpenAIResponsesClient.get_code_interpreter_tool( + file_ids=[Content.from_hosted_file(file_id="file-345"), "file-456"] + ) + assert mixed_list_tool["container"]["file_ids"] == ["file-345", "file-456"] + + +def test_get_code_interpreter_tool_rejects_non_hosted_file_content() -> None: + """Test OpenAIResponsesClient code interpreter rejects unsupported Content types.""" + with pytest.raises(TypeError, match="hosted_file"): + OpenAIResponsesClient.get_code_interpreter_tool(file_ids=Content.from_text("not-a-file")) + + +def test_get_file_search_tool_accepts_string_and_hosted_vector_store_content() -> None: + """Test OpenAIResponsesClient file search accepts scalar and list vector_store_ids inputs.""" + scalar_string_tool = OpenAIResponsesClient.get_file_search_tool(vector_store_ids="vs-123") + assert scalar_string_tool["vector_store_ids"] == ["vs-123"] + + scalar_content_tool = OpenAIResponsesClient.get_file_search_tool( + vector_store_ids=Content.from_hosted_vector_store(vector_store_id="vs-234") + ) + assert scalar_content_tool["vector_store_ids"] == ["vs-234"] + + mixed_list_tool = OpenAIResponsesClient.get_file_search_tool( + vector_store_ids=[Content.from_hosted_vector_store(vector_store_id="vs-345"), "vs-456"] + ) + assert mixed_list_tool["vector_store_ids"] == ["vs-345", "vs-456"] + + +def test_get_file_search_tool_rejects_non_hosted_vector_store_content() -> None: + """Test OpenAIResponsesClient file search rejects unsupported Content types.""" + with pytest.raises(TypeError, match="hosted_vector_store"): + OpenAIResponsesClient.get_file_search_tool(vector_store_ids=Content.from_hosted_file(file_id="file-123")) + + async def test_code_interpreter_tool_variations() -> None: """Test HostedCodeInterpreterTool with and without file inputs.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_agent_to_agent.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_agent_to_agent.py index 881b4a38f1..c25ce21e82 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_agent_to_agent.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_agent_to_agent.py @@ -1,8 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio -import os -from agent_framework.azure import AzureAIProjectAgentProvider +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -25,16 +24,6 @@ async def main() -> None: - # Configure A2A tool with connection ID - a2a_tool = { - "type": "a2a_preview", - "project_connection_id": os.environ["A2A_PROJECT_CONNECTION_ID"], - } - - # If the connection is missing a target, we need to set the A2A endpoint URL - if os.environ.get("A2A_ENDPOINT"): - a2a_tool["base_url"] = os.environ["A2A_ENDPOINT"] - async with ( AzureCliCredential() as credential, AzureAIProjectAgentProvider(credential=credential) as provider, @@ -44,7 +33,7 @@ async def main() -> None: instructions="""You are a helpful assistant that can communicate with other agents. Use the A2A tool when you need to interact with other agents to complete tasks or gather information from specialized agents.""", - tools=a2a_tool, + tools=AzureAIClient.get_a2a_tool(), ) query = "What can the secondary agent do?" diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_azure_ai_search.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_azure_ai_search.py index 3e7ce71096..96c3b91be1 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_azure_ai_search.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_azure_ai_search.py @@ -1,9 +1,8 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio -import os from agent_framework import Annotation -from agent_framework.azure import AzureAIProjectAgentProvider +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -37,19 +36,8 @@ async def main() -> None: "You are a helpful agent that searches hotel information using Azure AI Search. " "Always use the search tool and index to find hotel data and provide accurate information." ), - tools={ - "type": "azure_ai_search", - "azure_ai_search": { - "indexes": [ - { - "project_connection_id": os.environ["AI_SEARCH_PROJECT_CONNECTION_ID"], - "index_name": os.environ["AI_SEARCH_INDEX_NAME"], - # For query_type=vector, ensure your index has a field with vectorized data. - "query_type": "simple", - } - ] - }, - }, + # For query_type=vector, ensure your index has a field with vectorized data. + tools=AzureAIClient.get_azure_ai_search_tool(query_type="simple"), ) query = ( diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_custom_search.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_custom_search.py index 123ee82431..3bf8b0b0b3 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_custom_search.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_custom_search.py @@ -1,8 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio -import os -from agent_framework.azure import AzureAIProjectAgentProvider +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -31,17 +30,7 @@ async def main() -> None: name="MyCustomSearchAgent", instructions="""You are a helpful agent that can use Bing Custom Search tools to assist users. Use the available Bing Custom Search tools to answer questions and perform tasks.""", - tools={ - "type": "bing_custom_search_preview", - "bing_custom_search_preview": { - "search_configurations": [ - { - "project_connection_id": os.environ["BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID"], - "instance_name": os.environ["BING_CUSTOM_SEARCH_INSTANCE_NAME"], - } - ] - }, - }, + tools=AzureAIClient.get_bing_tool(variant="custom_search"), ) query = "Tell me more about foundry agent service" diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_grounding.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_grounding.py index e3a3e8330c..7ba859d416 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_grounding.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_grounding.py @@ -1,8 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio -import os -from agent_framework.azure import AzureAIProjectAgentProvider +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -38,16 +37,7 @@ async def main() -> None: instructions="""You are a helpful assistant that can search the web for current information. Use the Bing search tool to find up-to-date information and provide accurate, well-sourced answers. Always cite your sources when possible.""", - tools={ - "type": "bing_grounding", - "bing_grounding": { - "search_configurations": [ - { - "project_connection_id": os.environ["BING_PROJECT_CONNECTION_ID"], - } - ] - }, - }, + tools=AzureAIClient.get_bing_tool(variant="grounding"), ) query = "What is today's date and weather in Seattle?" diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_browser_automation.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_browser_automation.py index 33cd302485..46188bc800 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_browser_automation.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_browser_automation.py @@ -1,8 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio -import os -from agent_framework.azure import AzureAIProjectAgentProvider +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -32,14 +31,7 @@ async def main() -> None: instructions="""You are an Agent helping with browser automation tasks. You can answer questions, provide information, and assist with various tasks related to web browsing using the Browser Automation tool available to you.""", - tools={ - "type": "browser_automation_preview", - "browser_automation_preview": { - "connection": { - "project_connection_id": os.environ["BROWSER_AUTOMATION_PROJECT_CONNECTION_ID"], - } - }, - }, + tools=AzureAIClient.get_browser_automation_tool(), ) query = """Your goal is to report the percent of Microsoft year-to-date stock price change. diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_file_search.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_file_search.py index c15edd95dd..0d26cd398a 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_file_search.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_file_search.py @@ -53,7 +53,7 @@ async def main() -> None: # 2. Create a file search tool client = AzureAIClient(project_client=project_client) - file_search_tool = client.get_file_search_tool(vector_store_ids=[vector_store.id]) + file_search_tool = client.get_file_search_tool(vector_store_ids=vector_store.id) # 3. Create an agent with file search capabilities using the provider agent = await provider.create_agent( diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_memory_search.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_memory_search.py index 2d1cb43c30..aaa97d4d8a 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_memory_search.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_memory_search.py @@ -3,7 +3,7 @@ import os import uuid -from agent_framework.azure import AzureAIProjectAgentProvider +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import MemoryStoreDefaultDefinition, MemoryStoreDefaultOptions from azure.identity.aio import AzureCliCredential @@ -56,12 +56,11 @@ async def main() -> None: name="MyMemoryAgent", instructions="""You are a helpful assistant that remembers past conversations. Use the memory search tool to recall relevant information from previous interactions.""", - tools={ - "type": "memory_search", - "memory_store_name": memory_store.name, - "scope": "user_123", - "update_delay": 1, # Wait 1 second before updating memories (use higher value in production) - }, + tools=AzureAIClient.get_memory_search_tool( + memory_store_name=memory_store.name, + scope="user_123", + update_delay=1, # Wait 1 second before updating memories (use higher value in production) + ), ) # First interaction - establish some preferences diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_microsoft_fabric.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_microsoft_fabric.py index 531a18cb69..9702c9827f 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_microsoft_fabric.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_microsoft_fabric.py @@ -1,8 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio -import os -from agent_framework.azure import AzureAIProjectAgentProvider +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -30,16 +29,7 @@ async def main() -> None: agent = await provider.create_agent( name="MyFabricAgent", instructions="You are a helpful assistant.", - tools={ - "type": "fabric_dataagent_preview", - "fabric_dataagent_preview": { - "project_connections": [ - { - "project_connection_id": os.environ["FABRIC_PROJECT_CONNECTION_ID"], - } - ] - }, - }, + tools=AzureAIClient.get_fabric_data_agent_tool(), ) query = "Tell me about sales records" diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_openapi.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_openapi.py index 2565c6ea23..6244aa4467 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_openapi.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_openapi.py @@ -3,7 +3,7 @@ import json from pathlib import Path -from agent_framework.azure import AzureAIProjectAgentProvider +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -37,15 +37,12 @@ async def main() -> None: name="MyOpenAPIAgent", instructions="""You are a helpful assistant that can use country APIs to provide information. Use the available OpenAPI tools to answer questions about countries, currencies, and demographics.""", - tools={ - "type": "openapi", - "openapi": { - "name": "get_countries", - "spec": openapi_countries, - "description": "Retrieve information about countries by currency code", - "auth": {"type": "anonymous"}, - }, - }, + tools=AzureAIClient.get_openapi_tool( + name="get_countries", + spec=openapi_countries, + description="Retrieve information about countries by currency code", + auth={"type": "anonymous"}, + ), ) query = "What is the name and population of the country that uses currency with abbreviation THB?" diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_sharepoint.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_sharepoint.py index ce6bf85837..b5c88f9a2b 100644 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_sharepoint.py +++ b/python/samples/02-agents/providers/azure_ai/azure_ai_with_sharepoint.py @@ -1,8 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio -import os -from agent_framework.azure import AzureAIProjectAgentProvider +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -31,16 +30,7 @@ async def main() -> None: name="MySharePointAgent", instructions="""You are a helpful agent that can use SharePoint tools to assist users. Use the available SharePoint tools to answer questions and perform tasks.""", - tools={ - "type": "sharepoint_grounding_preview", - "sharepoint_grounding_preview": { - "project_connections": [ - { - "project_connection_id": os.environ["SHAREPOINT_PROJECT_CONNECTION_ID"], - } - ] - }, - }, + tools=AzureAIClient.get_sharepoint_grounding_tool(), ) query = "What is Contoso whistleblower policy?" diff --git a/python/samples/02-agents/providers/azure_openai/README.md b/python/samples/02-agents/providers/azure_openai/README.md index 6971183ccf..427b2f5ca6 100644 --- a/python/samples/02-agents/providers/azure_openai/README.md +++ b/python/samples/02-agents/providers/azure_openai/README.md @@ -23,6 +23,7 @@ This folder contains examples demonstrating different ways to create and use age | [`azure_responses_client_with_explicit_settings.py`](azure_responses_client_with_explicit_settings.py) | Shows how to initialize an agent with a specific responses client, configuring settings explicitly including endpoint and deployment name. | | [`azure_responses_client_with_file_search.py`](azure_responses_client_with_file_search.py) | Demonstrates using `AzureOpenAIResponsesClient.get_file_search_tool()` with Azure OpenAI Responses Client for direct document-based question answering and information retrieval from vector stores. | | [`azure_responses_client_with_foundry.py`](azure_responses_client_with_foundry.py) | Shows how to create an agent using an Azure AI Foundry project endpoint instead of a direct Azure OpenAI endpoint. Requires the `azure-ai-projects` package. | +| [`azure_responses_client_with_foundry_tools.py`](azure_responses_client_with_foundry_tools.py) | Shows a single `Agent` in explicit Foundry project mode, configured with one combined inline tools list built directly via `client.get_..._tool(...)` helpers. The sample is intentionally non-defensive and instructs you to comment out tools you have not configured. | | [`azure_responses_client_with_function_tools.py`](azure_responses_client_with_function_tools.py) | Demonstrates how to use function tools with agents. Shows both agent-level tools (defined when creating the agent) and query-level tools (provided with specific queries). | | [`azure_responses_client_with_hosted_mcp.py`](azure_responses_client_with_hosted_mcp.py) | Shows how to integrate Azure OpenAI Responses Client with hosted Model Context Protocol (MCP) servers using `AzureOpenAIResponsesClient.get_mcp_tool()` for extended functionality. | | [`azure_responses_client_with_local_mcp.py`](azure_responses_client_with_local_mcp.py) | Shows how to integrate Azure OpenAI Responses Client with local Model Context Protocol (MCP) servers using MCPStreamableHTTPTool for extended functionality. | @@ -39,6 +40,11 @@ Make sure to set the following environment variables before running the examples For the Foundry project sample (`azure_responses_client_with_foundry.py`), also set: - `AZURE_AI_PROJECT_ENDPOINT`: Your Azure AI Foundry project endpoint +For the Foundry tools sample (`azure_responses_client_with_foundry_tools.py`), set: +- `FOUNDRY_PROJECT_ENDPOINT` (or `AZURE_AI_PROJECT_ENDPOINT`) +- `FOUNDRY_MODEL_DEPLOYMENT_NAME` (or `AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME`) +- Tool-specific connection variables as needed (for example `FABRIC_PROJECT_CONNECTION_ID`, `BING_PROJECT_CONNECTION_ID`). + Optionally, you can set: - `AZURE_OPENAI_API_VERSION`: The API version to use (default is `2024-02-15-preview`) - `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (if not using `AzureCliCredential`) diff --git a/python/samples/02-agents/providers/azure_openai/azure_responses_client_with_foundry_tools.py b/python/samples/02-agents/providers/azure_openai/azure_responses_client_with_foundry_tools.py new file mode 100644 index 0000000000..a1c11d11cb --- /dev/null +++ b/python/samples/02-agents/providers/azure_openai/azure_responses_client_with_foundry_tools.py @@ -0,0 +1,124 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os +from typing import Any + +from agent_framework import Agent +from agent_framework.azure import AzureOpenAIResponsesClient +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + +""" +Azure OpenAI Responses Client with Foundry Tools Example. + +If a ``AzureOpenAIResponsesClient`` is initialized with a Foundry project endpoint and valid credentials, +it will automatically wire up Foundry-hosted tools that are available to the agent. +This sample demonstrates how to set up such a client and use it within an agent, along with a variety of Foundry tools. + +The same tools are available directly on the ``AzureAIClient`` as well, so this wiring is not unique to the responses client. + +Important: +- This sample is intentionally non-defensive and includes direct tool wiring. +- Comment out any tool entries you do not want to use, or whose required environment + variables/connections you have not configured yet. + +Required project settings: +- AZURE_AI_PROJECT_ENDPOINT +- AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME + +Tool-to-environment mapping used in this sample: +- client.get_file_search_tool(...): FILE_SEARCH_VECTOR_STORE_ID (explicitly read in code). +- client.get_bing_tool(variant="grounding"): BING_PROJECT_CONNECTION_ID. +- client.get_bing_tool(variant="custom_search"): + BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID and BING_CUSTOM_SEARCH_INSTANCE_NAME. +- client.get_fabric_data_agent_tool(): FABRIC_PROJECT_CONNECTION_ID. +- client.get_sharepoint_grounding_tool(): SHAREPOINT_PROJECT_CONNECTION_ID. +- client.get_azure_ai_search_tool(...): AI_SEARCH_PROJECT_CONNECTION_ID and AI_SEARCH_INDEX_NAME. +- client.get_browser_automation_tool(): BROWSER_AUTOMATION_PROJECT_CONNECTION_ID. +- client.get_a2a_tool(): A2A_PROJECT_CONNECTION_ID (optionally A2A_ENDPOINT for base_url). + +No additional environment settings are required for: +- client.get_code_interpreter_tool() +- client.get_web_search_tool(...) +- client.get_image_generation_tool(...) +- client.get_mcp_tool(...) +- client.get_openapi_tool(...) +- client.get_memory_search_tool(...) +""" + + +async def main() -> None: + print("=== Azure OpenAI Responses Client with Foundry Tools Example ===") + + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") + deployment_name = os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") + + client: Any = AzureOpenAIResponsesClient( + project_endpoint=project_endpoint, + deployment_name=deployment_name, + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a helpful assistant that can use Foundry-hosted tools when useful.", + tools=[ + client.get_code_interpreter_tool(), + client.get_web_search_tool( + user_location={"country": "US", "city": "Seattle"} + ), + client.get_image_generation_tool(), + client.get_mcp_tool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + approval_mode="never_require", + ), + client.get_openapi_tool( + name="get_countries", + spec={ + "openapi": "3.0.0", + "info": {"title": "Countries API", "version": "1.0.0"}, + "paths": { + "/countries": { + "get": { + "operationId": "listCountries", + "responses": {"200": {"description": "OK"}}, + } + } + }, + }, + description="Retrieve information about countries.", + auth={"type": "anonymous"}, + ), + client.get_memory_search_tool( + memory_store_name="agent-framework-memory-store", + scope="user_123", + update_delay=1, + ), + client.get_file_search_tool( + vector_store_ids=os.environ["FILE_SEARCH_VECTOR_STORE_ID"] + ), + client.get_bing_tool(variant="grounding"), + client.get_bing_tool(variant="custom_search"), + client.get_fabric_data_agent_tool(), + client.get_sharepoint_grounding_tool(), + client.get_azure_ai_search_tool(query_type="simple"), + client.get_browser_automation_tool(), + client.get_a2a_tool(), + ], + ) + + query = ( + "List the tool categories available to you and when each category is useful." + ) + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result}") + + +if __name__ == "__main__": + asyncio.run(main()) From e3438602ff70896411ee5f710649b9f8216f4c83 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 11:03:15 +0100 Subject: [PATCH 02/10] Python: consolidate AzureAI Foundry tool samples Add a single AzureAI sample named using_foundry_tools.py that demonstrates all Foundry-hosted tool helpers, update the Azure AI provider README to reference the consolidated sample, and remove the separate per-tool Foundry samples it replaces. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../02-agents/providers/azure_ai/README.md | 10 +- .../azure_ai/azure_ai_with_agent_to_agent.py | 46 -------- .../azure_ai/azure_ai_with_azure_ai_search.py | 98 ---------------- .../azure_ai_with_bing_custom_search.py | 43 ------- .../azure_ai/azure_ai_with_bing_grounding.py | 50 -------- .../azure_ai_with_browser_automation.py | 50 -------- .../azure_ai/azure_ai_with_memory_search.py | 91 --------------- .../azure_ai_with_microsoft_fabric.py | 42 ------- .../azure_ai/azure_ai_with_openapi.py | 55 --------- .../azure_ai/azure_ai_with_sharepoint.py | 43 ------- .../providers/azure_ai/using_foundry_tools.py | 108 ++++++++++++++++++ 11 files changed, 109 insertions(+), 527 deletions(-) delete mode 100644 python/samples/02-agents/providers/azure_ai/azure_ai_with_agent_to_agent.py delete mode 100644 python/samples/02-agents/providers/azure_ai/azure_ai_with_azure_ai_search.py delete mode 100644 python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_custom_search.py delete mode 100644 python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_grounding.py delete mode 100644 python/samples/02-agents/providers/azure_ai/azure_ai_with_browser_automation.py delete mode 100644 python/samples/02-agents/providers/azure_ai/azure_ai_with_memory_search.py delete mode 100644 python/samples/02-agents/providers/azure_ai/azure_ai_with_microsoft_fabric.py delete mode 100644 python/samples/02-agents/providers/azure_ai/azure_ai_with_openapi.py delete mode 100644 python/samples/02-agents/providers/azure_ai/azure_ai_with_sharepoint.py create mode 100644 python/samples/02-agents/providers/azure_ai/using_foundry_tools.py diff --git a/python/samples/02-agents/providers/azure_ai/README.md b/python/samples/02-agents/providers/azure_ai/README.md index d49147989f..48554a1b97 100644 --- a/python/samples/02-agents/providers/azure_ai/README.md +++ b/python/samples/02-agents/providers/azure_ai/README.md @@ -10,11 +10,7 @@ This folder contains examples demonstrating different ways to create and use age | [`azure_ai_provider_methods.py`](azure_ai_provider_methods.py) | Comprehensive guide to `AzureAIProjectAgentProvider` methods: `create_agent()` for creating new agents, `get_agent()` for retrieving existing agents (by name, reference, or details), and `as_agent()` for wrapping SDK objects without HTTP calls. | | [`azure_ai_use_latest_version.py`](azure_ai_use_latest_version.py) | Demonstrates how to reuse the latest version of an existing agent instead of creating a new agent version on each instantiation by using `provider.get_agent()` to retrieve the latest version. | | [`azure_ai_with_agent_as_tool.py`](azure_ai_with_agent_as_tool.py) | Shows how to use the agent-as-tool pattern with Azure AI agents, where one agent delegates work to specialized sub-agents wrapped as tools using `as_tool()`. Demonstrates hierarchical agent architectures. | -| [`azure_ai_with_agent_to_agent.py`](azure_ai_with_agent_to_agent.py) | Shows how to use Agent-to-Agent (A2A) capabilities with Azure AI agents to enable communication with other agents using the A2A protocol. Requires an A2A connection configured in your Azure AI project. | -| [`azure_ai_with_azure_ai_search.py`](azure_ai_with_azure_ai_search.py) | Shows how to use Azure AI Search with Azure AI agents to search through indexed data and answer user questions with proper citations. Requires an Azure AI Search connection and index configured in your Azure AI project. | -| [`azure_ai_with_bing_grounding.py`](azure_ai_with_bing_grounding.py) | Shows how to use Bing Grounding search with Azure AI agents to search the web for current information and provide grounded responses with citations. Requires a Bing connection configured in your Azure AI project. | -| [`azure_ai_with_bing_custom_search.py`](azure_ai_with_bing_custom_search.py) | Shows how to use Bing Custom Search with Azure AI agents to search custom search instances and provide responses with relevant results. Requires a Bing Custom Search connection and instance configured in your Azure AI project. | -| [`azure_ai_with_browser_automation.py`](azure_ai_with_browser_automation.py) | Shows how to use Browser Automation with Azure AI agents to perform automated web browsing tasks and provide responses based on web interactions. Requires a Browser Automation connection configured in your Azure AI project. | +| [`using_foundry_tools.py`](using_foundry_tools.py) | Consolidated sample that wires all Azure AI Foundry-hosted tool helpers directly from `AzureAIClient` in a single `Agent` (Bing, Fabric, SharePoint, Azure AI Search, Browser Automation, A2A, OpenAPI, memory, MCP, code interpreter, file search, web search, and image generation). | | [`azure_ai_with_code_interpreter.py`](azure_ai_with_code_interpreter.py) | Shows how to use `AzureAIClient.get_code_interpreter_tool()` with Azure AI agents to write and execute Python code for mathematical problem solving and data analysis. | | [`azure_ai_with_code_interpreter_file_generation.py`](azure_ai_with_code_interpreter_file_generation.py) | Shows how to retrieve file IDs from code interpreter generated files using both streaming and non-streaming approaches. | | [`azure_ai_with_code_interpreter_file_download.py`](azure_ai_with_code_interpreter_file_download.py) | Shows how to download files generated by code interpreter using the OpenAI containers API. | @@ -30,12 +26,8 @@ This folder contains examples demonstrating different ways to create and use age | [`azure_ai_with_runtime_json_schema.py`](azure_ai_with_runtime_json_schema.py) | Shows how to use structured outputs (response format) with Azure AI agents using a JSON schema to enforce specific response schemas. | | [`azure_ai_with_search_context_agentic.py`](../../context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py) | Shows how to use AzureAISearchContextProvider with agentic mode. Uses Knowledge Bases for multi-hop reasoning across documents with query planning. Recommended for most scenarios - slightly slower with more token consumption for query planning, but more accurate results. | | [`azure_ai_with_search_context_semantic.py`](../../context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py) | Shows how to use AzureAISearchContextProvider with semantic mode. Fast hybrid search with vector + keyword search and semantic ranking for RAG. Best for simple queries where speed is critical. | -| [`azure_ai_with_sharepoint.py`](azure_ai_with_sharepoint.py) | Shows how to use SharePoint grounding with Azure AI agents to search through SharePoint content and answer user questions with proper citations. Requires a SharePoint connection configured in your Azure AI project. | | [`azure_ai_with_session.py`](azure_ai_with_session.py) | Demonstrates session management with Azure AI agents, including automatic session creation for stateless conversations and explicit session management for maintaining conversation context across multiple interactions. | | [`azure_ai_with_image_generation.py`](azure_ai_with_image_generation.py) | Shows how to use `AzureAIClient.get_image_generation_tool()` with Azure AI agents to generate images based on text prompts. | -| [`azure_ai_with_memory_search.py`](azure_ai_with_memory_search.py) | Shows how to use memory search functionality with Azure AI agents for conversation persistence. Demonstrates creating memory stores and enabling agents to search through conversation history. | -| [`azure_ai_with_microsoft_fabric.py`](azure_ai_with_microsoft_fabric.py) | Shows how to use Microsoft Fabric with Azure AI agents to query Fabric data sources and provide responses based on data analysis. Requires a Microsoft Fabric connection configured in your Azure AI project. | -| [`azure_ai_with_openapi.py`](azure_ai_with_openapi.py) | Shows how to integrate OpenAPI specifications with Azure AI agents using dictionary-based tool configuration. Demonstrates using external REST APIs for dynamic data lookup. | | [`azure_ai_with_reasoning.py`](azure_ai_with_reasoning.py) | Shows how to enable reasoning for a model that supports it. | | [`azure_ai_with_web_search.py`](azure_ai_with_web_search.py) | Shows how to use `AzureAIClient.get_web_search_tool()` with Azure AI agents to perform web searches and retrieve up-to-date information from the internet. | diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_agent_to_agent.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_agent_to_agent.py deleted file mode 100644 index c25ce21e82..0000000000 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_agent_to_agent.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio - -from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Azure AI Agent with Agent-to-Agent (A2A) Example - -This sample demonstrates usage of AzureAIProjectAgentProvider with Agent-to-Agent (A2A) capabilities -to enable communication with other agents using the A2A protocol. - -Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. -2. Ensure you have an A2A connection configured in your Azure AI project - and set A2A_PROJECT_CONNECTION_ID environment variable. -3. (Optional) A2A_ENDPOINT - If the connection is missing target (e.g., "Custom keys" type), - set the A2A endpoint URL directly. -""" - - -async def main() -> None: - async with ( - AzureCliCredential() as credential, - AzureAIProjectAgentProvider(credential=credential) as provider, - ): - agent = await provider.create_agent( - name="MyA2AAgent", - instructions="""You are a helpful assistant that can communicate with other agents. - Use the A2A tool when you need to interact with other agents to complete tasks - or gather information from specialized agents.""", - tools=AzureAIClient.get_a2a_tool(), - ) - - query = "What can the secondary agent do?" - print(f"User: {query}") - result = await agent.run(query) - print(f"Result: {result}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_azure_ai_search.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_azure_ai_search.py deleted file mode 100644 index 96c3b91be1..0000000000 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_azure_ai_search.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio - -from agent_framework import Annotation -from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Azure AI Agent with Azure AI Search Example - -This sample demonstrates usage of AzureAIProjectAgentProvider with Azure AI Search -to search through indexed data and answer user questions about it. - -Citations from Azure AI Search are automatically enriched with document-specific -URLs (get_url) that can be used to retrieve the original documents. - -Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. -2. Ensure you have an Azure AI Search connection configured in your Azure AI project - and set AI_SEARCH_PROJECT_CONNECTION_ID and AI_SEARCH_INDEX_NAME environment variable. -""" - - -async def main() -> None: - async with ( - AzureCliCredential() as credential, - AzureAIProjectAgentProvider(credential=credential) as provider, - ): - agent = await provider.create_agent( - name="MySearchAgent", - instructions=( - "You are a helpful agent that searches hotel information using Azure AI Search. " - "Always use the search tool and index to find hotel data and provide accurate information." - ), - # For query_type=vector, ensure your index has a field with vectorized data. - tools=AzureAIClient.get_azure_ai_search_tool(query_type="simple"), - ) - - query = ( - "Use Azure AI search knowledge tool to find detailed information about a winter hotel." - " Use the search tool and index." # You can modify prompt to force tool usage - ) - print(f"User: {query}") - - # Non-streaming: get response with enriched citations - result = await agent.run(query) - print(f"Result: {result}\n") - - # Display citations with document-specific URLs - if result.messages: - citations: list[Annotation] = [] - for msg in result.messages: - for content in msg.contents: - if hasattr(content, "annotations") and content.annotations: - citations.extend(content.annotations) - - if citations: - print("Citations:") - for i, citation in enumerate(citations, 1): - url = citation.get("url", "N/A") - # get_url contains the document-specific REST API URL from Azure AI Search - get_url = (citation.get("additional_properties") or {}).get("get_url") - print(f" [{i}] {citation.get('title', 'N/A')}") - print(f" URL: {url}") - if get_url: - print(f" Document URL: {get_url}") - - # Streaming: collect citations from streamed response - print("\n--- Streaming ---") - print(f"User: {query}") - print("Agent: ", end="", flush=True) - streaming_citations: list[Annotation] = [] - async for chunk in agent.run(query, stream=True): - if chunk.text: - print(chunk.text, end="", flush=True) - for content in getattr(chunk, "contents", []): - annotations = getattr(content, "annotations", []) - if annotations: - streaming_citations.extend(annotations) - - print() - if streaming_citations: - print("\nStreaming Citations:") - for i, citation in enumerate(streaming_citations, 1): - url = citation.get("url", "N/A") - get_url = (citation.get("additional_properties") or {}).get("get_url") - print(f" [{i}] {citation.get('title', 'N/A')}") - print(f" URL: {url}") - if get_url: - print(f" Document URL: {get_url}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_custom_search.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_custom_search.py deleted file mode 100644 index 3bf8b0b0b3..0000000000 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_custom_search.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio - -from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Azure AI Agent with Bing Custom Search Example - -This sample demonstrates usage of AzureAIProjectAgentProvider with Bing Custom Search -to search custom search instances and provide responses with relevant results. - -Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. -2. Ensure you have a Bing Custom Search connection configured in your Azure AI project - and set BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID and BING_CUSTOM_SEARCH_INSTANCE_NAME environment variables. -""" - - -async def main() -> None: - async with ( - AzureCliCredential() as credential, - AzureAIProjectAgentProvider(credential=credential) as provider, - ): - agent = await provider.create_agent( - name="MyCustomSearchAgent", - instructions="""You are a helpful agent that can use Bing Custom Search tools to assist users. - Use the available Bing Custom Search tools to answer questions and perform tasks.""", - tools=AzureAIClient.get_bing_tool(variant="custom_search"), - ) - - query = "Tell me more about foundry agent service" - print(f"User: {query}") - result = await agent.run(query) - print(f"Result: {result}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_grounding.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_grounding.py deleted file mode 100644 index 7ba859d416..0000000000 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_bing_grounding.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio - -from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Azure AI Agent with Bing Grounding Example - -This sample demonstrates usage of AzureAIProjectAgentProvider with Bing Grounding -to search the web for current information and provide grounded responses. - -Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. -2. Ensure you have a Bing connection configured in your Azure AI project - and set BING_PROJECT_CONNECTION_ID environment variable. - -To get your Bing connection ID: -- Go to Azure AI Foundry portal (https://ai.azure.com) -- Navigate to your project's "Connected resources" section -- Add a new connection for "Grounding with Bing Search" -- Copy the connection ID and set it as the BING_PROJECT_CONNECTION_ID environment variable -""" - - -async def main() -> None: - async with ( - AzureCliCredential() as credential, - AzureAIProjectAgentProvider(credential=credential) as provider, - ): - agent = await provider.create_agent( - name="MyBingGroundingAgent", - instructions="""You are a helpful assistant that can search the web for current information. - Use the Bing search tool to find up-to-date information and provide accurate, well-sourced answers. - Always cite your sources when possible.""", - tools=AzureAIClient.get_bing_tool(variant="grounding"), - ) - - query = "What is today's date and weather in Seattle?" - print(f"User: {query}") - result = await agent.run(query) - print(f"Result: {result}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_browser_automation.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_browser_automation.py deleted file mode 100644 index 46188bc800..0000000000 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_browser_automation.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio - -from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Azure AI Agent with Browser Automation Example - -This sample demonstrates usage of AzureAIProjectAgentProvider with Browser Automation -to perform automated web browsing tasks and provide responses based on web interactions. - -Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. -2. Ensure you have a Browser Automation connection configured in your Azure AI project - and set BROWSER_AUTOMATION_PROJECT_CONNECTION_ID environment variable. -""" - - -async def main() -> None: - async with ( - AzureCliCredential() as credential, - AzureAIProjectAgentProvider(credential=credential) as provider, - ): - agent = await provider.create_agent( - name="MyBrowserAutomationAgent", - instructions="""You are an Agent helping with browser automation tasks. - You can answer questions, provide information, and assist with various tasks - related to web browsing using the Browser Automation tool available to you.""", - tools=AzureAIClient.get_browser_automation_tool(), - ) - - query = """Your goal is to report the percent of Microsoft year-to-date stock price change. - To do that, go to the website finance.yahoo.com. - At the top of the page, you will find a search bar. - Enter the value 'MSFT', to get information about the Microsoft stock price. - At the top of the resulting page you will see a default chart of Microsoft stock price. - Click on 'YTD' at the top of that chart, and report the percent value that shows up just below it.""" - - print(f"User: {query}") - result = await agent.run(query) - print(f"Result: {result}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_memory_search.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_memory_search.py deleted file mode 100644 index aaa97d4d8a..0000000000 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_memory_search.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio -import os -import uuid - -from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider -from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import MemoryStoreDefaultDefinition, MemoryStoreDefaultOptions -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Azure AI Agent with Memory Search Example - -This sample demonstrates usage of AzureAIProjectAgentProvider with memory search capabilities -to retrieve relevant past user messages and maintain conversation context across sessions. -It shows explicit memory store creation using Azure AI Projects client and agent creation -using the Agent Framework. - -Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. -2. Set AZURE_AI_CHAT_MODEL_DEPLOYMENT_NAME for the memory chat model. -3. Set AZURE_AI_EMBEDDING_MODEL_DEPLOYMENT_NAME for the memory embedding model. -4. Deploy both a chat model (e.g. gpt-4.1) and an embedding model (e.g. text-embedding-3-small). -""" - - -async def main() -> None: - endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] - # Generate a unique memory store name to avoid conflicts - memory_store_name = f"agent_framework_memory_store_{uuid.uuid4().hex[:8]}" - - async with AzureCliCredential() as credential: - # Create the memory store using Azure AI Projects client - async with AIProjectClient(endpoint=endpoint, credential=credential) as project_client: - # Create a memory store using proper model classes - memory_store_definition = MemoryStoreDefaultDefinition( - chat_model=os.environ["AZURE_AI_CHAT_MODEL_DEPLOYMENT_NAME"], - embedding_model=os.environ["AZURE_AI_EMBEDDING_MODEL_DEPLOYMENT_NAME"], - options=MemoryStoreDefaultOptions(user_profile_enabled=True, chat_summary_enabled=True), - ) - - memory_store = await project_client.memory_stores.create( - name=memory_store_name, - description="Memory store for Agent Framework conversations", - definition=memory_store_definition, - ) - print(f"Created memory store: {memory_store.name} ({memory_store.id}): {memory_store.description}") - - # Then, create the agent using Agent Framework provider - async with AzureAIProjectAgentProvider(credential=credential) as provider: - agent = await provider.create_agent( - name="MyMemoryAgent", - instructions="""You are a helpful assistant that remembers past conversations. - Use the memory search tool to recall relevant information from previous interactions.""", - tools=AzureAIClient.get_memory_search_tool( - memory_store_name=memory_store.name, - scope="user_123", - update_delay=1, # Wait 1 second before updating memories (use higher value in production) - ), - ) - - # First interaction - establish some preferences - print("=== First conversation ===") - query1 = "I prefer dark roast coffee" - print(f"User: {query1}") - result1 = await agent.run(query1) - print(f"Agent: {result1}\n") - - # Wait for memories to be processed - print("Waiting for memories to be stored...") - await asyncio.sleep(5) # Reduced wait time for demo purposes - - # Second interaction - test memory recall - print("=== Second conversation ===") - query2 = "Please order my usual coffee" - print(f"User: {query2}") - result2 = await agent.run(query2) - print(f"Agent: {result2}\n") - - # Clean up - delete the memory store - async with AIProjectClient(endpoint=endpoint, credential=credential) as project_client: - await project_client.memory_stores.delete(memory_store_name) - print("Memory store deleted") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_microsoft_fabric.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_microsoft_fabric.py deleted file mode 100644 index 9702c9827f..0000000000 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_microsoft_fabric.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio - -from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Azure AI Agent with Microsoft Fabric Example - -This sample demonstrates usage of AzureAIProjectAgentProvider with Microsoft Fabric -to query Fabric data sources and provide responses based on data analysis. - -Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. -2. Ensure you have a Microsoft Fabric connection configured in your Azure AI project - and set FABRIC_PROJECT_CONNECTION_ID environment variable. -""" - - -async def main() -> None: - async with ( - AzureCliCredential() as credential, - AzureAIProjectAgentProvider(credential=credential) as provider, - ): - agent = await provider.create_agent( - name="MyFabricAgent", - instructions="You are a helpful assistant.", - tools=AzureAIClient.get_fabric_data_agent_tool(), - ) - - query = "Tell me about sales records" - print(f"User: {query}") - result = await agent.run(query) - print(f"Result: {result}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_openapi.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_openapi.py deleted file mode 100644 index 6244aa4467..0000000000 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_openapi.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio -import json -from pathlib import Path - -from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Azure AI Agent with OpenAPI Tool Example - -This sample demonstrates usage of AzureAIProjectAgentProvider with OpenAPI tools -to call external APIs defined by OpenAPI specifications. - -Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. -2. The countries.json OpenAPI specification is included in the resources folder. -""" - - -async def main() -> None: - # Load the OpenAPI specification - resources_path = Path(__file__).parents[3] / "shared" / "resources" / "countries.json" - - with open(resources_path) as f: - openapi_countries = json.load(f) - - async with ( - AzureCliCredential() as credential, - AzureAIProjectAgentProvider(credential=credential) as provider, - ): - agent = await provider.create_agent( - name="MyOpenAPIAgent", - instructions="""You are a helpful assistant that can use country APIs to provide information. - Use the available OpenAPI tools to answer questions about countries, currencies, and demographics.""", - tools=AzureAIClient.get_openapi_tool( - name="get_countries", - spec=openapi_countries, - description="Retrieve information about countries by currency code", - auth={"type": "anonymous"}, - ), - ) - - query = "What is the name and population of the country that uses currency with abbreviation THB?" - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure_ai/azure_ai_with_sharepoint.py b/python/samples/02-agents/providers/azure_ai/azure_ai_with_sharepoint.py deleted file mode 100644 index b5c88f9a2b..0000000000 --- a/python/samples/02-agents/providers/azure_ai/azure_ai_with_sharepoint.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -import asyncio - -from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -Azure AI Agent with SharePoint Example - -This sample demonstrates usage of AzureAIProjectAgentProvider with SharePoint -to search through SharePoint content and answer user questions about it. - -Prerequisites: -1. Set AZURE_AI_PROJECT_ENDPOINT and AZURE_AI_MODEL_DEPLOYMENT_NAME environment variables. -2. Ensure you have a SharePoint connection configured in your Azure AI project - and set SHAREPOINT_PROJECT_CONNECTION_ID environment variable. -""" - - -async def main() -> None: - async with ( - AzureCliCredential() as credential, - AzureAIProjectAgentProvider(credential=credential) as provider, - ): - agent = await provider.create_agent( - name="MySharePointAgent", - instructions="""You are a helpful agent that can use SharePoint tools to assist users. - Use the available SharePoint tools to answer questions and perform tasks.""", - tools=AzureAIClient.get_sharepoint_grounding_tool(), - ) - - query = "What is Contoso whistleblower policy?" - print(f"User: {query}") - result = await agent.run(query) - print(f"Result: {result}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py b/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py new file mode 100644 index 0000000000..d5e028163b --- /dev/null +++ b/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os + +from agent_framework import Agent +from agent_framework.azure import AzureAIClient +from azure.identity.aio import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + +""" +Azure AI Client with Foundry Tools Example. + +Important: +- This sample is intentionally non-defensive and includes direct tool wiring. +- Comment out any tool entries you do not want to use, or whose required environment + variables/connections you have not configured yet. + +Required project settings: +- AZURE_AI_PROJECT_ENDPOINT +- AZURE_AI_MODEL_DEPLOYMENT_NAME + +Tool-to-environment mapping used in this sample: +- client.get_file_search_tool(...): FILE_SEARCH_VECTOR_STORE_ID (explicitly read in code). +- client.get_bing_tool(variant="grounding"): BING_PROJECT_CONNECTION_ID. +- client.get_bing_tool(variant="custom_search"): + BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID and BING_CUSTOM_SEARCH_INSTANCE_NAME. +- client.get_fabric_data_agent_tool(): FABRIC_PROJECT_CONNECTION_ID. +- client.get_sharepoint_grounding_tool(): SHAREPOINT_PROJECT_CONNECTION_ID. +- client.get_azure_ai_search_tool(...): AI_SEARCH_PROJECT_CONNECTION_ID and AI_SEARCH_INDEX_NAME. +- client.get_browser_automation_tool(): BROWSER_AUTOMATION_PROJECT_CONNECTION_ID. +- client.get_a2a_tool(): A2A_PROJECT_CONNECTION_ID (optionally A2A_ENDPOINT for base_url). + +No additional environment settings are required for: +- client.get_code_interpreter_tool() +- client.get_web_search_tool(...) +- client.get_image_generation_tool(...) +- client.get_mcp_tool(...) +- client.get_openapi_tool(...) +- client.get_memory_search_tool(...) +""" + + +async def main() -> None: + print("=== Azure AI Client with Foundry Tools Example ===") + + async with AzureCliCredential() as credential: + client = AzureAIClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + model_deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=credential, + ) + + async with Agent( + client=client, + instructions="You are a helpful assistant that can use Foundry-hosted tools when useful.", + tools=[ + client.get_code_interpreter_tool(), + client.get_web_search_tool(user_location={"country": "US", "city": "Seattle"}), + client.get_image_generation_tool(), + client.get_mcp_tool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + approval_mode="never_require", + ), + client.get_openapi_tool( + name="get_countries", + spec={ + "openapi": "3.0.0", + "info": {"title": "Countries API", "version": "1.0.0"}, + "paths": { + "/countries": { + "get": { + "operationId": "listCountries", + "responses": {"200": {"description": "OK"}}, + } + } + }, + }, + description="Retrieve information about countries.", + auth={"type": "anonymous"}, + ), + client.get_memory_search_tool( + memory_store_name="agent-framework-memory-store", + scope="user_123", + update_delay=1, + ), + client.get_file_search_tool(vector_store_ids=os.environ["FILE_SEARCH_VECTOR_STORE_ID"]), + client.get_bing_tool(variant="grounding"), + client.get_bing_tool(variant="custom_search"), + client.get_fabric_data_agent_tool(), + client.get_sharepoint_grounding_tool(), + client.get_azure_ai_search_tool(query_type="simple"), + client.get_browser_automation_tool(), + client.get_a2a_tool(), + ], + ) as agent: + query = "List the tool categories available to you and when each category is useful." + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result}") + + +if __name__ == "__main__": + asyncio.run(main()) From 2e8e87f1e86e08f0cebb5dd5fa4afe3cf6af36c7 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 11:13:30 +0100 Subject: [PATCH 03/10] Python: use provider setup in consolidated AzureAI sample Update using_foundry_tools.py to initialize via AzureAIProjectAgentProvider while keeping AzureAIClient tool helper usage, and align README description. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../02-agents/providers/azure_ai/README.md | 2 +- .../providers/azure_ai/using_foundry_tools.py | 33 +++++++++++-------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/python/samples/02-agents/providers/azure_ai/README.md b/python/samples/02-agents/providers/azure_ai/README.md index 48554a1b97..75c30ffcac 100644 --- a/python/samples/02-agents/providers/azure_ai/README.md +++ b/python/samples/02-agents/providers/azure_ai/README.md @@ -10,7 +10,7 @@ This folder contains examples demonstrating different ways to create and use age | [`azure_ai_provider_methods.py`](azure_ai_provider_methods.py) | Comprehensive guide to `AzureAIProjectAgentProvider` methods: `create_agent()` for creating new agents, `get_agent()` for retrieving existing agents (by name, reference, or details), and `as_agent()` for wrapping SDK objects without HTTP calls. | | [`azure_ai_use_latest_version.py`](azure_ai_use_latest_version.py) | Demonstrates how to reuse the latest version of an existing agent instead of creating a new agent version on each instantiation by using `provider.get_agent()` to retrieve the latest version. | | [`azure_ai_with_agent_as_tool.py`](azure_ai_with_agent_as_tool.py) | Shows how to use the agent-as-tool pattern with Azure AI agents, where one agent delegates work to specialized sub-agents wrapped as tools using `as_tool()`. Demonstrates hierarchical agent architectures. | -| [`using_foundry_tools.py`](using_foundry_tools.py) | Consolidated sample that wires all Azure AI Foundry-hosted tool helpers directly from `AzureAIClient` in a single `Agent` (Bing, Fabric, SharePoint, Azure AI Search, Browser Automation, A2A, OpenAPI, memory, MCP, code interpreter, file search, web search, and image generation). | +| [`using_foundry_tools.py`](using_foundry_tools.py) | Consolidated sample that uses `AzureAIProjectAgentProvider` for setup and wires all Azure AI Foundry-hosted tool helpers from `AzureAIClient` in a single agent (Bing, Fabric, SharePoint, Azure AI Search, Browser Automation, A2A, OpenAPI, memory, MCP, code interpreter, file search, web search, and image generation). | | [`azure_ai_with_code_interpreter.py`](azure_ai_with_code_interpreter.py) | Shows how to use `AzureAIClient.get_code_interpreter_tool()` with Azure AI agents to write and execute Python code for mathematical problem solving and data analysis. | | [`azure_ai_with_code_interpreter_file_generation.py`](azure_ai_with_code_interpreter_file_generation.py) | Shows how to retrieve file IDs from code interpreter generated files using both streaming and non-streaming approaches. | | [`azure_ai_with_code_interpreter_file_download.py`](azure_ai_with_code_interpreter_file_download.py) | Shows how to download files generated by code interpreter using the OpenAI containers API. | diff --git a/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py b/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py index d5e028163b..42587d3858 100644 --- a/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py +++ b/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py @@ -3,8 +3,7 @@ import asyncio import os -from agent_framework import Agent -from agent_framework.azure import AzureAIClient +from agent_framework.azure import AzureAIClient, AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -14,6 +13,9 @@ """ Azure AI Client with Foundry Tools Example. +This sample uses ``AzureAIProjectAgentProvider`` for agent setup while sourcing +all Foundry tool configurations from ``AzureAIClient`` helper methods. + Important: - This sample is intentionally non-defensive and includes direct tool wiring. - Comment out any tool entries you do not want to use, or whose required environment @@ -47,15 +49,17 @@ async def main() -> None: print("=== Azure AI Client with Foundry Tools Example ===") - async with AzureCliCredential() as credential: - client = AzureAIClient( + async with ( + AzureCliCredential() as credential, + AzureAIProjectAgentProvider( project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], - model_deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], credential=credential, - ) - - async with Agent( - client=client, + ) as provider, + ): + client = AzureAIClient(credential=credential) + agent = await provider.create_agent( + name="FoundryToolsAgent", instructions="You are a helpful assistant that can use Foundry-hosted tools when useful.", tools=[ client.get_code_interpreter_tool(), @@ -97,11 +101,12 @@ async def main() -> None: client.get_browser_automation_tool(), client.get_a2a_tool(), ], - ) as agent: - query = "List the tool categories available to you and when each category is useful." - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}") + ) + + query = "List the tool categories available to you and when each category is useful." + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result}") if __name__ == "__main__": From 6885dc8b26689ced9fab3f84565670f3f73b2e1c Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 11:14:11 +0100 Subject: [PATCH 04/10] updated readme --- python/samples/02-agents/providers/azure_openai/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/samples/02-agents/providers/azure_openai/README.md b/python/samples/02-agents/providers/azure_openai/README.md index 427b2f5ca6..0cc83aa8d2 100644 --- a/python/samples/02-agents/providers/azure_openai/README.md +++ b/python/samples/02-agents/providers/azure_openai/README.md @@ -41,8 +41,8 @@ For the Foundry project sample (`azure_responses_client_with_foundry.py`), also - `AZURE_AI_PROJECT_ENDPOINT`: Your Azure AI Foundry project endpoint For the Foundry tools sample (`azure_responses_client_with_foundry_tools.py`), set: -- `FOUNDRY_PROJECT_ENDPOINT` (or `AZURE_AI_PROJECT_ENDPOINT`) -- `FOUNDRY_MODEL_DEPLOYMENT_NAME` (or `AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME`) +- `AZURE_AI_PROJECT_ENDPOINT` +- `AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME` - Tool-specific connection variables as needed (for example `FABRIC_PROJECT_CONNECTION_ID`, `BING_PROJECT_CONNECTION_ID`). Optionally, you can set: From 7ca17da60437aaf7718554466cf5cd6337aea653 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 11:25:19 +0100 Subject: [PATCH 05/10] Python: reuse shared hosted-id normalization and address CI flakes Remove RawAzureAIClient._normalize_hosted_ids and reuse agent_framework.azure._shared._normalize_hosted_ids, include the pending Azure responses model_id assignment fix and pyright config update, and replace a flaky external blog link in Azure AI Search README with stable Foundry IQ docs to reduce markdown link-check failures. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../agent_framework_azure_ai/_client.py | 48 ++----------------- .../azure/_responses_client.py | 2 +- python/pyproject.toml | 4 +- .../azure_ai_search/README.md | 4 +- 4 files changed, 8 insertions(+), 50 deletions(-) diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py index 092c7e6e10..b58eac0cca 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -32,6 +32,7 @@ from agent_framework._tools import ToolTypes from agent_framework.azure._entra_id_authentication import AzureCredentialTypes from agent_framework.azure._shared import ( + _normalize_hosted_ids, create_bing_tool, create_a2a_tool, create_azure_ai_search_tool, @@ -837,49 +838,6 @@ def _enrich_update(update: ChatResponseUpdate) -> ChatResponseUpdate: # region Hosted Tool Factory Methods (Azure-specific overrides) - @staticmethod - def _normalize_hosted_ids( - value: str | Content | Sequence[str | Content] | None, - *, - expected_content_type: Literal["hosted_file", "hosted_vector_store"], - content_id_field: Literal["file_id", "vector_store_id"], - parameter_name: Literal["file_ids", "vector_store_ids"], - ) -> list[str] | None: - """Normalize string/Content id inputs with strict hosted content validation.""" - if value is None: - return None - - items: list[str | Content] - if isinstance(value, (str, Content)): - items = [value] - else: - items = list(value) - - normalized_ids: list[str] = [] - for item in items: - if isinstance(item, str): - normalized_ids.append(item) - continue - - if isinstance(item, Content): - if item.type != expected_content_type: - raise TypeError( - f"{parameter_name} accepts string IDs or Content of type {expected_content_type}." - ) - content_id = getattr(item, content_id_field) - if not content_id: - raise ValueError( - f"{parameter_name} Content items must include '{content_id_field}'." - ) - normalized_ids.append(content_id) - continue - - raise TypeError( - f"{parameter_name} accepts string IDs or Content of type {expected_content_type}." - ) - - return normalized_ids - @staticmethod def get_code_interpreter_tool( # type: ignore[override] *, @@ -912,7 +870,7 @@ def get_code_interpreter_tool( # type: ignore[override] if file_ids is None and isinstance(container, dict): file_ids = cast("str | Content | Sequence[str | Content] | None", container.get("file_ids")) - normalized_file_ids = RawAzureAIClient._normalize_hosted_ids( + normalized_file_ids = _normalize_hosted_ids( file_ids, expected_content_type="hosted_file", content_id_field="file_id", @@ -956,7 +914,7 @@ def get_file_search_tool( ) agent = ChatAgent(client, tools=[tool]) """ - normalized_vector_store_ids = RawAzureAIClient._normalize_hosted_ids( + normalized_vector_store_ids = _normalize_hosted_ids( vector_store_ids, expected_content_type="hosted_vector_store", content_id_field="vector_store_id", diff --git a/python/packages/core/agent_framework/azure/_responses_client.py b/python/packages/core/agent_framework/azure/_responses_client.py index ea86e78f74..f34c73edc7 100644 --- a/python/packages/core/agent_framework/azure/_responses_client.py +++ b/python/packages/core/agent_framework/azure/_responses_client.py @@ -193,7 +193,7 @@ class MyOptions(AzureOpenAIResponsesOptions, total=False): client: AzureOpenAIResponsesClient[MyOptions] = AzureOpenAIResponsesClient() response = await client.get_response("Hello", options={"my_custom_option": "value"}) """ - if model_id := kwargs.pop("model_id", None) and not deployment_name: + if (model_id := kwargs.pop("model_id", None)) and not deployment_name: deployment_name = str(model_id) azure_openai_settings = load_settings( diff --git a/python/pyproject.toml b/python/pyproject.toml index e4e45f0290..dcb2876299 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -182,8 +182,8 @@ omit = [ ] [tool.pyright] -include = ["agent_framework*"] -exclude = ["**/tests/**", "**/.venv/**", "packages/devui/frontend/**"] +include = ["packages/agent_framework*"] +exclude = ["**/tests/**", "**/.venv/**", "packages/devui/frontend/**", "samples/**", "scripts/**"] typeCheckingMode = "strict" reportUnnecessaryIsInstance = false reportMissingTypeStubs = false diff --git a/python/samples/02-agents/context_providers/azure_ai_search/README.md b/python/samples/02-agents/context_providers/azure_ai_search/README.md index 49403d106c..e51e941836 100644 --- a/python/samples/02-agents/context_providers/azure_ai_search/README.md +++ b/python/samples/02-agents/context_providers/azure_ai_search/README.md @@ -8,7 +8,7 @@ This folder contains examples demonstrating how to use the Azure AI Search conte | File | Description | |------|-------------| -| [`azure_ai_with_search_context_agentic.py`](azure_ai_with_search_context_agentic.py) | **Agentic mode** (recommended for most scenarios): Uses Knowledge Bases in Azure AI Search for query planning and multi-hop reasoning. Provides more accurate results through intelligent retrieval with automatic query reformulation. Slightly slower with more token consumption for query planning. [Learn more](https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/foundry-iq-boost-response-relevance-by-36-with-agentic-retrieval/4470720) | +| [`azure_ai_with_search_context_agentic.py`](azure_ai_with_search_context_agentic.py) | **Agentic mode** (recommended for most scenarios): Uses Knowledge Bases in Azure AI Search for query planning and multi-hop reasoning. Provides more accurate results through intelligent retrieval with automatic query reformulation. Slightly slower with more token consumption for query planning. [Learn more](https://learn.microsoft.com/en-us/azure/foundry/agents/concepts/what-is-foundry-iq?view=foundry&tabs=portal) | | [`azure_ai_with_search_context_semantic.py`](azure_ai_with_search_context_semantic.py) | **Semantic mode** (fast queries): Fast hybrid search combining vector and keyword search with semantic ranking. Returns raw search results as context. Best for scenarios where speed is critical and simple retrieval is sufficient. | ## Installation @@ -261,4 +261,4 @@ async with Agent( - [RAG with Azure AI Search](https://learn.microsoft.com/azure/search/retrieval-augmented-generation-overview) - [Semantic Search in Azure AI Search](https://learn.microsoft.com/azure/search/semantic-search-overview) - [Knowledge Bases in Azure AI Search](https://learn.microsoft.com/azure/search/knowledge-store-concept-intro) -- [Agentic Retrieval Blog Post](https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/foundry-iq-boost-response-relevance-by-36-with-agentic-retrieval/4470720) +- [Foundry IQ Documentation](https://learn.microsoft.com/en-us/azure/foundry/agents/concepts/what-is-foundry-iq?view=foundry&tabs=portal) From 1ad188e3aef78fe6ac628c274d22e858fff17406 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 11:26:01 +0100 Subject: [PATCH 06/10] Python: broaden pyright include glob Adjust tool.pyright include from packages/agent_framework* to packages/**/agent_framework* to cover package layouts consistently. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index dcb2876299..668aea73ff 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -182,7 +182,7 @@ omit = [ ] [tool.pyright] -include = ["packages/agent_framework*"] +include = ["packages/**/agent_framework*"] exclude = ["**/tests/**", "**/.venv/**", "packages/devui/frontend/**", "samples/**", "scripts/**"] typeCheckingMode = "strict" reportUnnecessaryIsInstance = false From 141d4a0f9a722c5e167d64e2977cc44550cb6d4d Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 11:28:19 +0100 Subject: [PATCH 07/10] undo pyproject --- python/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/pyproject.toml b/python/pyproject.toml index 668aea73ff..e4e45f0290 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -182,8 +182,8 @@ omit = [ ] [tool.pyright] -include = ["packages/**/agent_framework*"] -exclude = ["**/tests/**", "**/.venv/**", "packages/devui/frontend/**", "samples/**", "scripts/**"] +include = ["agent_framework*"] +exclude = ["**/tests/**", "**/.venv/**", "packages/devui/frontend/**"] typeCheckingMode = "strict" reportUnnecessaryIsInstance = false reportMissingTypeStubs = false From db54523d7fdc7cc3bfcddc39182883cf479b888b Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 12:09:43 +0100 Subject: [PATCH 08/10] Fix core typing issues and expand azure shared tests Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../azure/_responses_client.py | 32 +-- .../core/agent_framework/azure/_shared.py | 6 +- .../openai/_responses_client.py | 11 +- .../core/tests/azure/test_azure_shared.py | 186 ++++++++++++++++++ 4 files changed, 209 insertions(+), 26 deletions(-) create mode 100644 python/packages/core/tests/azure/test_azure_shared.py diff --git a/python/packages/core/agent_framework/azure/_responses_client.py b/python/packages/core/agent_framework/azure/_responses_client.py index f34c73edc7..0e32538907 100644 --- a/python/packages/core/agent_framework/azure/_responses_client.py +++ b/python/packages/core/agent_framework/azure/_responses_client.py @@ -3,7 +3,7 @@ from __future__ import annotations import sys -from collections.abc import Mapping, Sequence +from collections.abc import Callable, Mapping, Sequence from typing import TYPE_CHECKING, Any, Generic from urllib.parse import urljoin, urlparse @@ -255,19 +255,23 @@ class MyOptions(AzureOpenAIResponsesOptions, total=False): def _attach_project_tool_methods(self) -> None: """Attach project-mode hosted tool methods dynamically.""" - self.get_code_interpreter_tool = create_code_interpreter_tool - self.get_file_search_tool = create_file_search_tool - self.get_web_search_tool = create_web_search_tool - self.get_bing_tool = create_bing_tool - self.get_image_generation_tool = create_image_generation_tool - self.get_mcp_tool = create_mcp_tool - self.get_fabric_data_agent_tool = create_fabric_data_agent_tool - self.get_sharepoint_grounding_tool = create_sharepoint_grounding_tool - self.get_azure_ai_search_tool = create_azure_ai_search_tool - self.get_browser_automation_tool = create_browser_automation_tool - self.get_openapi_tool = create_openapi_tool - self.get_a2a_tool = create_a2a_tool - self.get_memory_search_tool = create_memory_search_tool + tool_methods: dict[str, Callable[..., Any]] = { + "get_code_interpreter_tool": create_code_interpreter_tool, + "get_file_search_tool": create_file_search_tool, + "get_web_search_tool": create_web_search_tool, + "get_bing_tool": create_bing_tool, + "get_image_generation_tool": create_image_generation_tool, + "get_mcp_tool": create_mcp_tool, + "get_fabric_data_agent_tool": create_fabric_data_agent_tool, + "get_sharepoint_grounding_tool": create_sharepoint_grounding_tool, + "get_azure_ai_search_tool": create_azure_ai_search_tool, + "get_browser_automation_tool": create_browser_automation_tool, + "get_openapi_tool": create_openapi_tool, + "get_a2a_tool": create_a2a_tool, + "get_memory_search_tool": create_memory_search_tool, + } + for method_name, method in tool_methods.items(): + setattr(self, method_name, method) @staticmethod def _create_client_from_project( diff --git a/python/packages/core/agent_framework/azure/_shared.py b/python/packages/core/agent_framework/azure/_shared.py index 906d3e5062..a1c825014c 100644 --- a/python/packages/core/agent_framework/azure/_shared.py +++ b/python/packages/core/agent_framework/azure/_shared.py @@ -319,11 +319,7 @@ def _normalize_hosted_ids( if value is None: return None - items: list[str | Content] - if isinstance(value, (str, Content)): - items = [value] - else: - items = list(value) + items: list[str | Content] = [value] if isinstance(value, (str, Content)) else list(value) normalized_ids: list[str] = [] for item in items: diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index e13505f75c..cf0a331365 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -476,11 +476,7 @@ def _normalize_hosted_ids( if value is None: return None - items: list[str | Content] - if isinstance(value, (str, Content)): - items = [value] - else: - items = list(value) + items: list[str | Content] = [value] if isinstance(value, (str, Content)) else list(value) normalized_ids: list[str] = [] for item in items: @@ -538,8 +534,9 @@ def get_code_interpreter_tool( # Use with agent agent = ChatAgent(client, tools=[tool]) """ - container_config: CodeInterpreterContainerCodeInterpreterToolAuto = ( - dict(container) if isinstance(container, dict) else {"type": "auto"} + container_config = cast( + "CodeInterpreterContainerCodeInterpreterToolAuto", + dict(container) if isinstance(container, dict) else {"type": "auto"}, ) if file_ids is None and isinstance(container_config, dict): diff --git a/python/packages/core/tests/azure/test_azure_shared.py b/python/packages/core/tests/azure/test_azure_shared.py new file mode 100644 index 0000000000..02f5e89e2b --- /dev/null +++ b/python/packages/core/tests/azure/test_azure_shared.py @@ -0,0 +1,186 @@ +# Copyright (c) Microsoft. All rights reserved. + +import pytest + +from agent_framework import Content +from agent_framework.azure._shared import ( + _normalize_hosted_ids, + _require_string, + create_a2a_tool, + create_azure_ai_search_tool, + create_bing_tool, + create_browser_automation_tool, + create_code_interpreter_tool, + create_fabric_data_agent_tool, + create_file_search_tool, + create_image_generation_tool, + create_mcp_tool, + create_memory_search_tool, + create_openapi_tool, + create_sharepoint_grounding_tool, + create_web_search_tool, + load_foundry_project_settings, +) + + +def test_require_string_success_and_failure() -> None: + assert _require_string("value", "field") == "value" + with pytest.raises(ValueError, match="'field' is required."): + _require_string("", "field") + + +def test_normalize_hosted_ids_variants() -> None: + assert _normalize_hosted_ids( + "file-1", + expected_content_type="hosted_file", + content_id_field="file_id", + parameter_name="file_ids", + ) == ["file-1"] + assert _normalize_hosted_ids( + [Content.from_hosted_file(file_id="file-2"), "file-3"], + expected_content_type="hosted_file", + content_id_field="file_id", + parameter_name="file_ids", + ) == ["file-2", "file-3"] + + +def test_normalize_hosted_ids_invalid_content_type_raises() -> None: + with pytest.raises(TypeError, match="hosted_vector_store"): + _normalize_hosted_ids( + Content.from_hosted_file(file_id="file-1"), + expected_content_type="hosted_vector_store", + content_id_field="vector_store_id", + parameter_name="vector_store_ids", + ) + + +def test_create_code_interpreter_tool_normalizes_from_container() -> None: + tool = create_code_interpreter_tool(container={"file_ids": ["file-1", "file-2"]}) + assert tool["container"]["file_ids"] == ["file-1", "file-2"] + + +def test_create_file_search_tool_with_content_and_requires_ids() -> None: + tool = create_file_search_tool( + vector_store_ids=[Content.from_hosted_vector_store(vector_store_id="vs-1"), "vs-2"], + max_num_results=5, + ) + assert tool["vector_store_ids"] == ["vs-1", "vs-2"] + assert tool["max_num_results"] == 5 + + with pytest.raises(ValueError, match="vector_store_ids"): + create_file_search_tool(vector_store_ids=None) + + +def test_create_web_search_tool_with_location() -> None: + tool = create_web_search_tool(user_location={"city": "Seattle", "country": "US"}, search_context_size="high") + assert tool.search_context_size == "high" + assert tool.user_location is not None + assert tool.user_location.city == "Seattle" + + +def test_create_bing_tool_grounding_explicit_connection() -> None: + tool = create_bing_tool(variant="grounding", project_connection_id="conn-1", market="en-US") + config = tool["bing_grounding"]["search_configurations"][0] + assert tool["type"] == "bing_grounding" + assert config["project_connection_id"] == "conn-1" + assert config["market"] == "en-US" + + +def test_create_bing_tool_custom_search_from_env(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", "conn-custom") + monkeypatch.setenv("BING_CUSTOM_SEARCH_INSTANCE_NAME", "instance-1") + tool = create_bing_tool(variant="custom_search") + config = tool["bing_custom_search_preview"]["search_configurations"][0] + assert tool["type"] == "bing_custom_search_preview" + assert config["project_connection_id"] == "conn-custom" + assert config["instance_name"] == "instance-1" + + +def test_create_bing_tool_custom_search_requires_instance_name() -> None: + with pytest.raises(ValueError, match="'instance_name' is required."): + create_bing_tool(variant="custom_search", project_connection_id="conn-only") + + +def test_create_image_generation_tool_sets_values() -> None: + tool = create_image_generation_tool(model="gpt-image-1", size="1024x1024", output_format="png", quality="high") + assert tool["model"] == "gpt-image-1" + assert tool["size"] == "1024x1024" + assert tool["output_format"] == "png" + assert tool["quality"] == "high" + + +def test_create_mcp_tool_with_approval_modes() -> None: + tool = create_mcp_tool( + name="my mcp", + url="https://example.com", + approval_mode={"always_require_approval": ["danger"], "never_require_approval": ["safe"]}, + allowed_tools=["safe"], + headers={"Authorization": "Bearer token"}, + ) + assert tool["server_label"] == "my_mcp" + assert tool["allowed_tools"] == ["safe"] + assert "require_approval" in tool + + +def test_create_fabric_and_sharepoint_tools_from_env(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("FABRIC_PROJECT_CONNECTION_ID", "fabric-conn") + monkeypatch.setenv("SHAREPOINT_PROJECT_CONNECTION_ID", "sharepoint-conn") + assert ( + create_fabric_data_agent_tool()["fabric_dataagent_preview"]["project_connections"][0]["project_connection_id"] + == "fabric-conn" + ) + assert ( + create_sharepoint_grounding_tool()["sharepoint_grounding_preview"]["project_connections"][0][ + "project_connection_id" + ] + == "sharepoint-conn" + ) + + +def test_create_azure_ai_search_tool_from_env(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("AI_SEARCH_PROJECT_CONNECTION_ID", "search-conn") + monkeypatch.setenv("AI_SEARCH_INDEX_NAME", "index-1") + tool = create_azure_ai_search_tool(query_type="simple") + index = tool["azure_ai_search"]["indexes"][0] + assert tool["type"] == "azure_ai_search" + assert index["project_connection_id"] == "search-conn" + assert index["index_name"] == "index-1" + assert index["query_type"] == "simple" + + +def test_create_browser_automation_tool_from_env(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("BROWSER_AUTOMATION_PROJECT_CONNECTION_ID", "browser-conn") + tool = create_browser_automation_tool() + assert tool["browser_automation_preview"]["connection"]["project_connection_id"] == "browser-conn" + + +def test_create_openapi_tool_with_auth() -> None: + tool = create_openapi_tool( + name="status_api", + spec={"openapi": "3.0.0", "info": {"title": "Status", "version": "1.0.0"}, "paths": {}}, + description="Status endpoint", + auth={"type": "anonymous"}, + ) + assert tool["type"] == "openapi" + assert tool["openapi"]["name"] == "status_api" + assert tool["openapi"]["auth"]["type"] == "anonymous" + + +def test_create_a2a_tool_and_memory_search(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("A2A_PROJECT_CONNECTION_ID", "a2a-conn") + monkeypatch.setenv("A2A_ENDPOINT", "https://a2a.example.com") + a2a_tool = create_a2a_tool() + assert a2a_tool["project_connection_id"] == "a2a-conn" + assert a2a_tool["base_url"] == "https://a2a.example.com" + + memory_tool = create_memory_search_tool(memory_store_name="store-1", scope="scope-1", update_delay=5) + assert memory_tool["type"] == "memory_search" + assert memory_tool["update_delay"] == 5 + + +def test_load_foundry_project_settings(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("FOUNDRY_PROJECT_ENDPOINT", "https://project.example.com") + monkeypatch.setenv("FOUNDRY_MODEL_DEPLOYMENT_NAME", "gpt-4o") + settings = load_foundry_project_settings() + assert settings["project_endpoint"] == "https://project.example.com" + assert settings["model_deployment_name"] == "gpt-4o" From 022bb582f92ef55c6a83a6b08b97b7f6059d71ce Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 14:17:31 +0100 Subject: [PATCH 09/10] Use per-test env skips for Foundry integrations Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../azure-ai/tests/test_azure_ai_client.py | 29 ++++++++----------- .../azure/test_azure_responses_client.py | 27 +++++++---------- 2 files changed, 23 insertions(+), 33 deletions(-) diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index f0ad36580f..d5c7d604b9 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -51,22 +51,6 @@ or os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") == "", reason="No real AZURE_AI_PROJECT_ENDPOINT or AZURE_AI_MODEL_DEPLOYMENT_NAME provided; skipping integration tests.", ) -skip_if_azure_ai_foundry_helper_integration_tests_disabled = pytest.mark.skipif( - any( - os.getenv(name, "") == "" - for name in ( - "FABRIC_PROJECT_CONNECTION_ID", - "SHAREPOINT_PROJECT_CONNECTION_ID", - "BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", - "BING_CUSTOM_SEARCH_INSTANCE_NAME", - "AI_SEARCH_PROJECT_CONNECTION_ID", - "AI_SEARCH_INDEX_NAME", - "BROWSER_AUTOMATION_PROJECT_CONNECTION_ID", - "A2A_PROJECT_CONNECTION_ID", - ) - ), - reason="Required Foundry helper tool settings are missing; skipping integration smoke tests.", -) @pytest.fixture @@ -1602,7 +1586,6 @@ async def test_integration_web_search() -> None: @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_ai_integration_tests_disabled -@skip_if_azure_ai_foundry_helper_integration_tests_disabled @pytest.mark.parametrize( "tool_name", [ @@ -1618,6 +1601,18 @@ async def test_integration_web_search() -> None: ) async def test_integration_foundry_helper_tools_smoke(tool_name: str, client: AzureAIClient) -> None: """Smoke test Foundry helper tools can be passed to Azure AI responses.""" + required_env_vars: dict[str, tuple[str, ...]] = { + "fabric_data_agent": ("FABRIC_PROJECT_CONNECTION_ID",), + "sharepoint_grounding": ("SHAREPOINT_PROJECT_CONNECTION_ID",), + "bing_custom_search": ("BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", "BING_CUSTOM_SEARCH_INSTANCE_NAME"), + "azure_ai_search": ("AI_SEARCH_PROJECT_CONNECTION_ID", "AI_SEARCH_INDEX_NAME"), + "browser_automation": ("BROWSER_AUTOMATION_PROJECT_CONNECTION_ID",), + "a2a": ("A2A_PROJECT_CONNECTION_ID",), + } + missing_env_vars = [name for name in required_env_vars.get(tool_name, ()) if os.getenv(name, "") == ""] + if missing_env_vars: + pytest.skip(f"Missing required env vars for {tool_name}: {', '.join(missing_env_vars)}") + if tool_name == "fabric_data_agent": tool = client.get_fabric_data_agent_tool() elif tool_name == "sharepoint_grounding": diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index a046a4b688..6636589847 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -26,19 +26,6 @@ os.getenv("AZURE_OPENAI_ENDPOINT", "") in ("", "https://test-endpoint.com"), reason="No real AZURE_OPENAI_ENDPOINT provided; skipping integration tests.", ) -skip_if_azure_project_bing_custom_search_integration_tests_disabled = pytest.mark.skipif( - ( - os.getenv("FOUNDRY_PROJECT_ENDPOINT", "") == "" - and os.getenv("AZURE_AI_PROJECT_ENDPOINT", "") == "" - ) - or ( - os.getenv("FOUNDRY_MODEL_DEPLOYMENT_NAME", "") == "" - and os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME", "") == "" - ) - or os.getenv("BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", "") == "" - or os.getenv("BING_CUSTOM_SEARCH_INSTANCE_NAME", "") == "", - reason="Missing Foundry project or Bing Custom Search settings; skipping project-mode integration test.", -) logger = logging.getLogger(__name__) @@ -505,12 +492,20 @@ async def test_integration_web_search() -> None: @pytest.mark.flaky @pytest.mark.integration -@skip_if_azure_project_bing_custom_search_integration_tests_disabled async def test_integration_project_mode_web_search_bing_custom_search() -> None: project_endpoint = os.getenv("FOUNDRY_PROJECT_ENDPOINT") or os.getenv("AZURE_AI_PROJECT_ENDPOINT") deployment_name = os.getenv("FOUNDRY_MODEL_DEPLOYMENT_NAME") or os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") - assert project_endpoint is not None - assert deployment_name is not None + missing_env_vars: list[str] = [] + if not project_endpoint: + missing_env_vars.append("FOUNDRY_PROJECT_ENDPOINT or AZURE_AI_PROJECT_ENDPOINT") + if not deployment_name: + missing_env_vars.append("FOUNDRY_MODEL_DEPLOYMENT_NAME or AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") + if os.getenv("BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", "") == "": + missing_env_vars.append("BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID") + if os.getenv("BING_CUSTOM_SEARCH_INSTANCE_NAME", "") == "": + missing_env_vars.append("BING_CUSTOM_SEARCH_INSTANCE_NAME") + if missing_env_vars: + pytest.skip(f"Missing required env vars: {', '.join(missing_env_vars)}") client = AzureOpenAIResponsesClient( project_endpoint=project_endpoint, From 4e67c74f2fd1da069ca4d29d6964a5bf465fe31d Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 3 Mar 2026 21:01:55 +0100 Subject: [PATCH 10/10] Refactor Foundry responses parsing and memory tool surface Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../agent_framework_azure_ai/_client.py | 245 ++--------- .../azure-ai/tests/test_azure_ai_client.py | 31 +- .../core/agent_framework/azure/__init__.py | 2 + .../azure/_responses_client.py | 407 +++++++++++++++++- .../core/agent_framework/azure/_shared.py | 21 - .../openai/_responses_client.py | 11 +- .../azure/test_azure_responses_client.py | 277 +++++++++++- .../core/tests/azure/test_azure_shared.py | 7 +- .../02-agents/providers/azure_ai/README.md | 7 +- .../providers/azure_ai/using_foundry_tools.py | 12 +- .../providers/azure_openai/README.md | 3 + ...ure_responses_client_with_foundry_tools.py | 92 ++-- 12 files changed, 791 insertions(+), 324 deletions(-) diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py index b58eac0cca..e6f774a462 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -4,48 +4,41 @@ import json import logging -import re import sys -from collections.abc import Awaitable, Callable, Mapping, Sequence +from collections.abc import Callable, Mapping, Sequence from contextlib import suppress from typing import Any, ClassVar, Generic, Literal, TypedDict, TypeVar, cast from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, Agent, - Annotation, BaseContextProvider, ChatAndFunctionMiddlewareTypes, ChatMiddlewareLayer, - ChatResponse, - ChatResponseUpdate, Content, FunctionInvocationConfiguration, FunctionInvocationLayer, FunctionTool, Message, MiddlewareTypes, - ResponseStream, - TextSpanRegion, ) from agent_framework._settings import load_settings from agent_framework._tools import ToolTypes from agent_framework.azure._entra_id_authentication import AzureCredentialTypes +from agent_framework.azure._responses_client import RawAzureOpenAIResponsesClient from agent_framework.azure._shared import ( _normalize_hosted_ids, - create_bing_tool, create_a2a_tool, create_azure_ai_search_tool, + create_bing_tool, create_browser_automation_tool, create_fabric_data_agent_tool, - create_memory_search_tool, create_openapi_tool, create_sharepoint_grounding_tool, create_web_search_tool, ) from agent_framework.observability import ChatTelemetryLayer from agent_framework.openai import OpenAIResponsesOptions -from agent_framework.openai._responses_client import RawOpenAIResponsesClient from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import ( CodeInterpreterTool, @@ -97,10 +90,9 @@ class AzureAIProjectAgentOptions(OpenAIResponsesOptions, total=False): covariant=True, ) -_DOC_INDEX_PATTERN = re.compile(r"doc_(\d+)") - - -class RawAzureAIClient(RawOpenAIResponsesClient[AzureAIClientOptionsT], Generic[AzureAIClientOptionsT]): +class RawAzureAIClient( + RawAzureOpenAIResponsesClient[AzureAIClientOptionsT], Generic[AzureAIClientOptionsT] +): """Raw Azure AI client without middleware, telemetry, or function invocation layers. Warning: @@ -318,7 +310,11 @@ async def configure_azure_monitor( "Install it with: pip install azure-monitor-opentelemetry" ) from exc - from agent_framework.observability import create_metric_views, create_resource, enable_instrumentation + from agent_framework.observability import ( + create_metric_views, + create_resource, + enable_instrumentation, + ) # Create resource if not provided in kwargs if "resource" not in kwargs: @@ -636,206 +632,6 @@ def _update_agent_name_and_description(self, agent_name: str | None, description if description and not self.agent_description: self.agent_description = description - # region Azure AI Search Citation Enhancement - - def _extract_azure_search_urls(self, output_items: Any) -> list[str]: - """Extract document URLs from azure_ai_search_call_output items. - - Args: - output_items: The response output items to scan. - - Returns: - A flat list of get_urls from all azure_ai_search_call_output items. - """ - get_urls: list[str] = [] - for item in output_items: - if item.type != "azure_ai_search_call_output": - continue - output = item.output - if isinstance(output, str): - try: - output = json.loads(output) - except (json.JSONDecodeError, TypeError): - continue - if isinstance(output, list): - # Streaming "added" events send output as an empty list; skip. - continue - if output is not None: - urls = output.get("get_urls") if isinstance(output, dict) else output.get_urls - if urls and isinstance(urls, list): - get_urls.extend(urls) - return get_urls - - def _get_search_doc_url(self, citation_title: str | None, get_urls: list[str]) -> str | None: - """Map a citation title like 'doc_0' to its corresponding get_url. - - Args: - citation_title: The annotation title (e.g., "doc_0"). - get_urls: The list of document URLs from azure_ai_search_call_output. - - Returns: - The matching document URL if found, otherwise None. - """ - if not citation_title or not get_urls: - return None - match = _DOC_INDEX_PATTERN.search(citation_title) - if not match: - return None - doc_index = int(match.group(1)) - if 0 <= doc_index < len(get_urls): - return str(get_urls[doc_index]) - return None - - def _enrich_annotations_with_search_urls(self, contents: list[Content], get_urls: list[str]) -> None: - """Enrich citation annotations in contents with real document URLs from Azure AI Search. - - Looks for annotations with ``type == "citation"`` and a ``title`` matching ``doc_N``, - then adds the corresponding document URL from *get_urls* to ``additional_properties["get_url"]``. - - Args: - contents: The parsed content list from a ChatResponse or ChatResponseUpdate. - get_urls: Document URLs extracted from azure_ai_search_call_output. - """ - if not get_urls: - return - for content in contents: - if not content.annotations: - continue - for annotation in content.annotations: - if not isinstance(annotation, dict): - continue - if annotation.get("type") != "citation": - continue - title = annotation.get("title") - doc_url = self._get_search_doc_url(title, get_urls) - if doc_url: - annotation.setdefault("additional_properties", {})["get_url"] = doc_url - - def _build_url_citation_content( - self, annotation_data: dict[str, Any], get_urls: list[str], raw_event: Any - ) -> Content: - """Build a Content with a citation Annotation from a url_citation streaming event. - - The base class does not handle ``url_citation`` annotations in streaming, so this - method creates the appropriate framework content for them. - - Args: - annotation_data: The raw annotation dict from the streaming event. - get_urls: Captured document URLs for enrichment. - raw_event: The raw streaming event for raw_representation. - - Returns: - A Content object containing the citation annotation. - """ - ann_title = str(annotation_data.get("title") or "") - ann_url = str(annotation_data.get("url") or "") - ann_start = annotation_data.get("start_index") - ann_end = annotation_data.get("end_index") - - additional_props: dict[str, Any] = { - "annotation_index": raw_event.annotation_index, - } - doc_url = self._get_search_doc_url(ann_title, get_urls) - if doc_url: - additional_props["get_url"] = doc_url - - annotation_obj = Annotation( - type="citation", - title=ann_title, - url=ann_url, - additional_properties=additional_props, - raw_representation=annotation_data, - ) - if ann_start is not None and ann_end is not None: - annotation_obj["annotated_regions"] = [ - TextSpanRegion(type="text_span", start_index=ann_start, end_index=ann_end) - ] - - return Content.from_text(text="", annotations=[annotation_obj], raw_representation=raw_event) - - @override - def _inner_get_response( - self, - *, - messages: Sequence[Message], - options: Mapping[str, Any], - stream: bool = False, - **kwargs: Any, - ) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]: - """Wrap base response to enrich Azure AI Search citation annotations. - - For non-streaming responses, the ``ChatResponse.raw_representation`` carries the - full response including ``azure_ai_search_call_output`` items. After the base class - parses the response, ``url_citation`` annotations are enriched with per-document URLs. - - For streaming responses, a transform hook is registered on the ``ResponseStream`` to - capture ``get_urls`` from search output events and enrich ``url_citation`` annotations - as they arrive. The captured URL state is local to the stream closure, so concurrent - streams do not interfere. - """ - if not stream: - - async def _enrich_response() -> ChatResponse: - response = await super(RawAzureAIClient, self)._inner_get_response( - messages=messages, options=options, stream=False, **kwargs - ) - get_urls = self._extract_azure_search_urls(response.raw_representation.output) # type: ignore[union-attr] - if get_urls: - for msg in response.messages: - self._enrich_annotations_with_search_urls(list(msg.contents or []), get_urls) - return response - - return _enrich_response() - - # Streaming: use a closure-local list so concurrent streams don't interfere - stream_result = super()._inner_get_response( # type: ignore[assignment] - messages=messages, options=options, stream=True, **kwargs - ) - search_get_urls: list[str] = [] - - def _enrich_update(update: ChatResponseUpdate) -> ChatResponseUpdate: - raw = update.raw_representation - if raw is None: - return update - event_type = raw.type - - # Capture get_urls from azure_ai_search_call_output items. - # Check both "added" and "done" events because the output data (including - # get_urls) may only be fully populated in the "done" event. - if event_type in ("response.output_item.added", "response.output_item.done"): - urls = self._extract_azure_search_urls([raw.item]) - if urls: - search_get_urls.extend(urls) - - # Handle url_citation annotations (not handled by the base class in streaming) - if event_type == "response.output_text.annotation.added": - ann = raw.annotation - if ann.get("type") == "url_citation": - citation_content = self._build_url_citation_content(ann, search_get_urls, raw) - contents_list = list(update.contents or []) - contents_list.append(citation_content) - return ChatResponseUpdate( - contents=contents_list, - conversation_id=update.conversation_id, - response_id=update.response_id, - role=update.role, - model_id=update.model_id, - continuation_token=update.continuation_token, - additional_properties=update.additional_properties, - raw_representation=update.raw_representation, - ) - - # Enrich any citation annotations already parsed by the base class - if update.contents and search_get_urls: - self._enrich_annotations_with_search_urls(list(update.contents), search_get_urls) - - return update - - stream_result.with_transform_hook(_enrich_update) # type: ignore[union-attr] - return stream_result - - # endregion - # region Hosted Tool Factory Methods (Azure-specific overrides) @staticmethod @@ -1200,8 +996,23 @@ def get_memory_search_tool( scope: str, update_delay: int | None = None, ) -> dict[str, Any]: - """Create a memory search tool configuration for Azure AI Projects.""" - return create_memory_search_tool(memory_store_name=memory_store_name, scope=scope, update_delay=update_delay) + """Create a memory search tool configuration for Azure AI Projects. + + Note: + Prefer ``FoundryMemoryProvider`` for agents that will primarily run locally. + """ + if not memory_store_name: + raise ValueError("'memory_store_name' is required.") + if not scope: + raise ValueError("'scope' is required.") + result: dict[str, Any] = { + "type": "memory_search", + "memory_store_name": memory_store_name, + "scope": scope, + } + if update_delay is not None: + result["update_delay"] = update_delay + return result # endregion diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index d5c7d604b9..ddd1ea247b 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -25,6 +25,8 @@ ) from agent_framework._settings import load_settings from agent_framework.openai._responses_client import RawOpenAIResponsesClient +from agent_framework_azure_ai import AzureAIClient, AzureAISettings +from agent_framework_azure_ai._shared import from_azure_ai_tools from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import ( ApproximateLocation, @@ -43,9 +45,6 @@ from pydantic import BaseModel, ConfigDict, Field from pytest import fixture, param -from agent_framework_azure_ai import AzureAIClient, AzureAISettings -from agent_framework_azure_ai._shared import from_azure_ai_tools - skip_if_azure_ai_integration_tests_disabled = pytest.mark.skipif( os.getenv("AZURE_AI_PROJECT_ENDPOINT", "") in ("", "https://test-project.cognitiveservices.azure.com/") or os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") == "", @@ -1537,6 +1536,20 @@ async def test_integration_agent_options( assert "seattle" in response_value["location"].lower() +def test_get_memory_search_tool_warns_and_returns_payload() -> None: + with pytest.warns(UserWarning, match="FoundryMemoryProvider"): + tool = AzureAIClient.get_memory_search_tool( + memory_store_name="agent-framework-memory-store", + scope="test-scope", + update_delay=1, + ) + + assert tool["type"] == "memory_search" + assert tool["memory_store_name"] == "agent-framework-memory-store" + assert tool["scope"] == "test-scope" + assert tool["update_delay"] == 1 + + @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_ai_integration_tests_disabled @@ -1592,6 +1605,7 @@ async def test_integration_web_search() -> None: param("fabric_data_agent", id="fabric_data_agent"), param("sharepoint_grounding", id="sharepoint_grounding"), param("bing_custom_search", id="bing_custom_search"), + param("bing_grounding", id="bing_grounding"), param("azure_ai_search", id="azure_ai_search"), param("browser_automation", id="browser_automation"), param("openapi", id="openapi"), @@ -1604,7 +1618,14 @@ async def test_integration_foundry_helper_tools_smoke(tool_name: str, client: Az required_env_vars: dict[str, tuple[str, ...]] = { "fabric_data_agent": ("FABRIC_PROJECT_CONNECTION_ID",), "sharepoint_grounding": ("SHAREPOINT_PROJECT_CONNECTION_ID",), - "bing_custom_search": ("BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", "BING_CUSTOM_SEARCH_INSTANCE_NAME"), + "bing_custom_search": ( + "BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", + "BING_CUSTOM_SEARCH_INSTANCE_NAME", + ), + "bing_grounding": ( + "BING_GROUNDING_PROJECT_CONNECTION_ID", + "BING_GROUNDING_INSTANCE_NAME", + ), "azure_ai_search": ("AI_SEARCH_PROJECT_CONNECTION_ID", "AI_SEARCH_INDEX_NAME"), "browser_automation": ("BROWSER_AUTOMATION_PROJECT_CONNECTION_ID",), "a2a": ("A2A_PROJECT_CONNECTION_ID",), @@ -1619,6 +1640,8 @@ async def test_integration_foundry_helper_tools_smoke(tool_name: str, client: Az tool = client.get_sharepoint_grounding_tool() elif tool_name == "bing_custom_search": tool = client.get_bing_tool(variant="custom_search") + elif tool_name == "bing_grounding": + tool = client.get_bing_tool(variant="grounding") elif tool_name == "azure_ai_search": tool = client.get_azure_ai_search_tool(query_type="simple") elif tool_name == "browser_automation": diff --git a/python/packages/core/agent_framework/azure/__init__.py b/python/packages/core/agent_framework/azure/__init__.py index dcf9fc321e..ba26c87f94 100644 --- a/python/packages/core/agent_framework/azure/__init__.py +++ b/python/packages/core/agent_framework/azure/__init__.py @@ -10,6 +10,7 @@ - AzureAIAgentClient - AzureOpenAIChatClient - AzureOpenAIResponsesClient +- RawAzureOpenAIResponsesClient - AzureAISearchContextProvider - DurableAIAgent """ @@ -40,6 +41,7 @@ "AzureOpenAIEmbeddingClient": ("agent_framework.azure._embedding_client", "agent-framework-core"), "AzureOpenAIResponsesClient": ("agent_framework.azure._responses_client", "agent-framework-core"), "AzureOpenAIResponsesOptions": ("agent_framework.azure._responses_client", "agent-framework-core"), + "RawAzureOpenAIResponsesClient": ("agent_framework.azure._responses_client", "agent-framework-core"), "AzureOpenAISettings": ("agent_framework.azure._shared", "agent-framework-core"), "AzureUserSecurityContext": ("agent_framework.azure._chat_client", "agent-framework-core"), "DurableAIAgent": ("agent_framework_durabletask", "agent-framework-durabletask"), diff --git a/python/packages/core/agent_framework/azure/_responses_client.py b/python/packages/core/agent_framework/azure/_responses_client.py index 0e32538907..041f27d53d 100644 --- a/python/packages/core/agent_framework/azure/_responses_client.py +++ b/python/packages/core/agent_framework/azure/_responses_client.py @@ -2,9 +2,11 @@ from __future__ import annotations +import json +import re import sys -from collections.abc import Callable, Mapping, Sequence -from typing import TYPE_CHECKING, Any, Generic +from collections.abc import Awaitable, Callable, Mapping, Sequence +from typing import TYPE_CHECKING, Any, Generic, Literal, cast from urllib.parse import urljoin, urlparse from azure.ai.projects.aio import AIProjectClient @@ -14,6 +16,15 @@ from .._settings import load_settings from .._telemetry import AGENT_FRAMEWORK_USER_AGENT from .._tools import FunctionInvocationConfiguration, FunctionInvocationLayer +from .._types import ( + Annotation, + ChatResponse, + ChatResponseUpdate, + Content, + Message, + ResponseStream, + TextSpanRegion, +) from ..observability import ChatTelemetryLayer from ..openai._responses_client import RawOpenAIResponsesClient from ._entra_id_authentication import AzureCredentialTypes, AzureTokenProvider @@ -30,7 +41,6 @@ create_file_search_tool, create_image_generation_tool, create_mcp_tool, - create_memory_search_tool, create_openapi_tool, create_sharepoint_grounding_tool, create_web_search_tool, @@ -62,12 +72,320 @@ ) +class _AzureAIProjectSettings(TypedDict, total=False): + project_endpoint: str | None + model_deployment_name: str | None + + +_DOC_INDEX_PATTERN = re.compile(r"doc_(\d+)") + + +class RawAzureOpenAIResponsesClient( + RawOpenAIResponsesClient[AzureOpenAIResponsesOptionsT], + Generic[AzureOpenAIResponsesOptionsT], +): + """Raw Azure OpenAI responses client with Foundry and Azure AI parse adaptations.""" + + @staticmethod + def _parse_foundry_tool_output(value: Any) -> Any: + """Parse Foundry tool output payloads when represented as JSON strings.""" + if not isinstance(value, str): + return value + + stripped = value.strip() + if not stripped: + return None + + try: + return json.loads(stripped) + except json.JSONDecodeError: + return value + + def _parse_foundry_preview_item(self, item: Any) -> list[Content]: + """Parse Foundry preview tool output items into function call/result content.""" + item_type = getattr(item, "type", None) + if not isinstance(item_type, str): + return [] + + if item_type.endswith("_preview_call"): + call_id = getattr(item, "call_id", None) or getattr(item, "id", None) + if not call_id: + return [] + + tool_name = item_type.removesuffix("_call") + additional_properties: dict[str, Any] = { + "tool_type": item_type, + "tool_name": tool_name, + "item_id": getattr(item, "id", None), + "status": getattr(item, "status", None), + } + arguments = getattr(item, "arguments", None) + return [ + Content.from_function_call( + call_id=call_id, + name=tool_name, + arguments=arguments if arguments is not None else "", + additional_properties={ + k: v for k, v in additional_properties.items() if v is not None + }, + raw_representation=item, + ) + ] + + if item_type.endswith("_preview_call_output"): + call_id = getattr(item, "call_id", None) or getattr(item, "id", None) + if not call_id: + return [] + + tool_name = item_type.removesuffix("_call_output") + output: Any = getattr(item, "output", None) + if output is None: + output = getattr(item, "result", None) + if output is None: + output = getattr(item, "outputs", None) + + additional_properties = { + "tool_type": item_type, + "tool_name": tool_name, + "item_id": getattr(item, "id", None), + "status": getattr(item, "status", None), + } + return [ + Content.from_function_result( + call_id=call_id, + result=self._parse_foundry_tool_output(output), + additional_properties={ + k: v for k, v in additional_properties.items() if v is not None + }, + raw_representation=item, + ) + ] + + return [] + + @override + def _parse_response_from_openai( + self, response: Any, options: dict[str, Any] + ) -> ChatResponse: + parsed_response = super()._parse_response_from_openai( + response=response, options=options + ) + + foundry_contents: list[Content] = [] + for item in getattr(response, "output", []) or []: + foundry_contents.extend(self._parse_foundry_preview_item(item)) + + if not foundry_contents: + return parsed_response + + if parsed_response.messages: + existing_contents = list(parsed_response.messages[0].contents or []) + parsed_response.messages[0].contents = [ + *foundry_contents, + *existing_contents, + ] + else: + parsed_response.messages = [ + Message(role="assistant", contents=foundry_contents) + ] + + return parsed_response + + @override + def _parse_chunk_from_openai( + self, + event: Any, + options: dict[str, Any], + function_call_ids: dict[int, tuple[str, str]], + ) -> ChatResponseUpdate: + update = super()._parse_chunk_from_openai( + event=event, + options=options, + function_call_ids=function_call_ids, + ) + if getattr(event, "type", None) != "response.output_item.done": + return update + + foundry_contents = self._parse_foundry_preview_item( + getattr(event, "item", None) + ) + if foundry_contents: + update.contents = [*list(update.contents or []), *foundry_contents] + return update + + def _extract_azure_search_urls(self, output_items: Any) -> list[str]: + """Extract document URLs from azure_ai_search_call_output items.""" + get_urls: list[str] = [] + for item in output_items: + if item.type != "azure_ai_search_call_output": + continue + output = item.output + if isinstance(output, str): + try: + output = json.loads(output) + except (json.JSONDecodeError, TypeError): + continue + if isinstance(output, list): + continue + if output is not None: + urls = ( + output.get("get_urls") + if isinstance(output, dict) + else output.get_urls + ) + if urls and isinstance(urls, list): + get_urls.extend(urls) + return get_urls + + def _get_search_doc_url( + self, citation_title: str | None, get_urls: list[str] + ) -> str | None: + """Map a citation title like ``doc_0`` to its corresponding get_url.""" + if not citation_title or not get_urls: + return None + match = _DOC_INDEX_PATTERN.search(citation_title) + if not match: + return None + doc_index = int(match.group(1)) + if 0 <= doc_index < len(get_urls): + return str(get_urls[doc_index]) + return None + + def _enrich_annotations_with_search_urls( + self, contents: list[Content], get_urls: list[str] + ) -> None: + """Enrich citation annotations in contents with real document URLs from Azure AI Search.""" + if not get_urls: + return + for content in contents: + if not content.annotations: + continue + for annotation in content.annotations: + if not isinstance(annotation, dict): + continue + if annotation.get("type") != "citation": + continue + title = annotation.get("title") + doc_url = self._get_search_doc_url(title, get_urls) + if doc_url: + annotation.setdefault("additional_properties", {})["get_url"] = ( + doc_url + ) + + def _build_url_citation_content( + self, + annotation_data: dict[str, Any], + get_urls: list[str], + raw_event: Any, + ) -> Content: + """Build a citation ``Content`` from a ``url_citation`` streaming annotation event.""" + ann_title = str(annotation_data.get("title") or "") + ann_url = str(annotation_data.get("url") or "") + ann_start = annotation_data.get("start_index") + ann_end = annotation_data.get("end_index") + + additional_props: dict[str, Any] = { + "annotation_index": getattr(raw_event, "annotation_index", None), + } + doc_url = self._get_search_doc_url(ann_title, get_urls) + if doc_url: + additional_props["get_url"] = doc_url + + annotation_obj = Annotation( + type="citation", + title=ann_title, + url=ann_url, + additional_properties={ + k: v for k, v in additional_props.items() if v is not None + }, + raw_representation=annotation_data, + ) + if ann_start is not None and ann_end is not None: + annotation_obj["annotated_regions"] = [ + TextSpanRegion( + type="text_span", start_index=ann_start, end_index=ann_end + ) + ] + + return Content.from_text( + text="", annotations=[annotation_obj], raw_representation=raw_event + ) + + @override + def _inner_get_response( + self, + *, + messages: Sequence[Message], + options: Mapping[str, Any], + stream: bool = False, + **kwargs: Any, + ) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]: + """Wrap base response to enrich Azure AI Search citation annotations.""" + if not stream: + + async def _enrich_response() -> ChatResponse: + response = await super( + RawAzureOpenAIResponsesClient, self + )._inner_get_response( + messages=messages, options=options, stream=False, **kwargs + ) + parsed_response = cast("ChatResponse", response) + raw_output = getattr(parsed_response.raw_representation, "output", None) + if raw_output: + get_urls = self._extract_azure_search_urls(raw_output) + if get_urls: + for msg in parsed_response.messages: + self._enrich_annotations_with_search_urls( + list(msg.contents or []), get_urls + ) + return parsed_response + + return _enrich_response() + + stream_result = super()._inner_get_response( # type: ignore[assignment] + messages=messages, options=options, stream=True, **kwargs + ) + search_get_urls: list[str] = [] + + def _enrich_update(update: ChatResponseUpdate) -> ChatResponseUpdate: + raw = update.raw_representation + if raw is None: + return update + event_type = raw.type + + if event_type in ( + "response.output_item.added", + "response.output_item.done", + ): + urls = self._extract_azure_search_urls([raw.item]) + if urls: + search_get_urls.extend(urls) + + if event_type == "response.output_text.annotation.added": + ann = raw.annotation + if isinstance(ann, dict) and ann.get("type") == "url_citation": + citation_content = self._build_url_citation_content( + ann, search_get_urls, raw + ) + update.contents = [*list(update.contents or []), citation_content] + + if update.contents and search_get_urls: + self._enrich_annotations_with_search_urls( + list(update.contents), search_get_urls + ) + + return update + + stream_result.with_transform_hook(_enrich_update) # type: ignore[union-attr] + return stream_result + + class AzureOpenAIResponsesClient( # type: ignore[misc] AzureOpenAIConfigMixin, ChatMiddlewareLayer[AzureOpenAIResponsesOptionsT], FunctionInvocationLayer[AzureOpenAIResponsesOptionsT], ChatTelemetryLayer[AzureOpenAIResponsesOptionsT], - RawOpenAIResponsesClient[AzureOpenAIResponsesOptionsT], + RawAzureOpenAIResponsesClient[AzureOpenAIResponsesOptionsT], Generic[AzureOpenAIResponsesOptionsT], ): """Azure Responses completion class with middleware, telemetry, and function invocation support.""" @@ -86,11 +404,13 @@ def __init__( async_client: AsyncOpenAI | None = None, project_client: Any | None = None, project_endpoint: str | None = None, + backend: Literal["azure_openai", "foundry"] | None = None, env_file_path: str | None = None, env_file_encoding: str | None = None, instruction_role: str | None = None, middleware: Sequence[MiddlewareTypes] | None = None, - function_invocation_configuration: FunctionInvocationConfiguration | None = None, + function_invocation_configuration: FunctionInvocationConfiguration + | None = None, **kwargs: Any, ) -> None: """Initialize an Azure OpenAI Responses client. @@ -109,6 +429,7 @@ def __init__( deployment_name: The deployment name. If provided, will override the value (responses_deployment_name) in the env vars or .env file. Can also be set via environment variable AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME. + In project mode, AZURE_AI_MODEL_DEPLOYMENT_NAME is also used as a fallback. endpoint: The deployment endpoint. If provided will override the value in the env vars or .env file. Can also be set via environment variable AZURE_OPENAI_ENDPOINT. @@ -133,6 +454,11 @@ def __init__( project_endpoint: The Azure AI Foundry project endpoint URL. When provided with ``credential``, an ``AIProjectClient`` will be created and used to obtain the OpenAI client. Requires the ``azure-ai-projects`` package. + backend: Backend mode for settings resolution. + Use ``"foundry"`` to load only ``AZURE_AI_*`` settings + (for example, ``AZURE_AI_PROJECT_ENDPOINT`` and ``AZURE_AI_MODEL_DEPLOYMENT_NAME``). + Use ``"azure_openai"`` to load ``AZURE_OPENAI_*`` settings. + When ``project_client`` or ``project_endpoint`` is provided, Foundry mode is inferred. env_file_path: Use the environment settings file as a fallback to using env vars. env_file_encoding: The encoding of the environment settings file, defaults to 'utf-8'. instruction_role: The role to use for 'instruction' messages, for example, summarization @@ -196,25 +522,55 @@ class MyOptions(AzureOpenAIResponsesOptions, total=False): if (model_id := kwargs.pop("model_id", None)) and not deployment_name: deployment_name = str(model_id) - azure_openai_settings = load_settings( - AzureOpenAISettings, - env_prefix="AZURE_OPENAI_", - api_key=api_key, - base_url=base_url, - endpoint=endpoint, - responses_deployment_name=deployment_name, - api_version=api_version, - env_file_path=env_file_path, - env_file_encoding=env_file_encoding, - token_endpoint=token_endpoint, + is_foundry_backend = ( + backend == "foundry" + or project_client is not None + or project_endpoint is not None ) - is_project_mode = project_client is not None or project_endpoint is not None + resolved_project_endpoint = project_endpoint + azure_openai_settings: AzureOpenAISettings + if is_foundry_backend: + azure_ai_project_settings = load_settings( + _AzureAIProjectSettings, + env_prefix="AZURE_AI_", + project_endpoint=project_endpoint, + model_deployment_name=deployment_name, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + resolved_project_endpoint = azure_ai_project_settings.get( + "project_endpoint" + ) + azure_openai_settings = { + "api_key": None, + "base_url": base_url, + "endpoint": endpoint, + "responses_deployment_name": azure_ai_project_settings.get( + "model_deployment_name" + ), + "api_version": api_version, + "token_endpoint": token_endpoint, + } + else: + azure_openai_settings = load_settings( + AzureOpenAISettings, + env_prefix="AZURE_OPENAI_", + api_key=api_key, + base_url=base_url, + endpoint=endpoint, + responses_deployment_name=deployment_name, + api_version=api_version, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + token_endpoint=token_endpoint, + ) + # Project client path: create OpenAI client from an Azure AI Foundry project - if async_client is None and is_project_mode: + if async_client is None and is_foundry_backend: async_client = self._create_client_from_project( project_client=project_client, - project_endpoint=project_endpoint, + project_endpoint=resolved_project_endpoint, credential=credential, ) @@ -230,14 +586,20 @@ class MyOptions(AzureOpenAIResponsesOptions, total=False): ): azure_openai_settings["base_url"] = urljoin(str(azure_openai_settings["endpoint"]), "/openai/v1/") - if not azure_openai_settings["responses_deployment_name"]: + resolved_deployment_name = azure_openai_settings.get("responses_deployment_name") + if not resolved_deployment_name: + if is_foundry_backend: + raise ValueError( + "Azure OpenAI deployment name is required. Set via 'deployment_name' parameter " + "or 'AZURE_AI_MODEL_DEPLOYMENT_NAME' environment variable." + ) raise ValueError( "Azure OpenAI deployment name is required. Set via 'deployment_name' parameter " "or 'AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME' environment variable." ) super().__init__( - deployment_name=azure_openai_settings["responses_deployment_name"], + deployment_name=resolved_deployment_name, endpoint=azure_openai_settings["endpoint"], base_url=azure_openai_settings["base_url"], api_version=azure_openai_settings["api_version"], # type: ignore @@ -250,7 +612,7 @@ class MyOptions(AzureOpenAIResponsesOptions, total=False): middleware=middleware, function_invocation_configuration=function_invocation_configuration, ) - if is_project_mode: + if is_foundry_backend: self._attach_project_tool_methods() def _attach_project_tool_methods(self) -> None: @@ -268,7 +630,6 @@ def _attach_project_tool_methods(self) -> None: "get_browser_automation_tool": create_browser_automation_tool, "get_openapi_tool": create_openapi_tool, "get_a2a_tool": create_a2a_tool, - "get_memory_search_tool": create_memory_search_tool, } for method_name, method in tool_methods.items(): setattr(self, method_name, method) diff --git a/python/packages/core/agent_framework/azure/_shared.py b/python/packages/core/agent_framework/azure/_shared.py index a1c825014c..f1a445434d 100644 --- a/python/packages/core/agent_framework/azure/_shared.py +++ b/python/packages/core/agent_framework/azure/_shared.py @@ -687,24 +687,3 @@ def create_a2a_tool( result["base_url"] = base_url result.update(kwargs) return result - - -def create_memory_search_tool( - *, - memory_store_name: str, - scope: str, - update_delay: int | None = None, - **kwargs: Any, -) -> dict[str, Any]: - """Create a memory search tool payload.""" - _require_string(memory_store_name, "memory_store_name") - _require_string(scope, "scope") - result: dict[str, Any] = { - "type": "memory_search", - "memory_store_name": memory_store_name, - "scope": scope, - } - if update_delay is not None: - result["update_delay"] = update_delay - result.update(kwargs) - return result diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index cf0a331365..d0cbdc9720 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -14,7 +14,16 @@ ) from datetime import datetime, timezone from itertools import chain -from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, NoReturn, TypedDict, cast +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Generic, + Literal, + NoReturn, + TypedDict, + cast, +) from openai import AsyncOpenAI, BadRequestError from openai.types.responses.file_search_tool_param import FileSearchToolParam diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index 6636589847..ca08d7346f 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -216,6 +216,126 @@ def test_init_with_project_endpoint(azure_openai_unit_test_env: dict[str, str]) assert isinstance(azure_responses_client, SupportsChatGetResponse) +def test_project_mode_does_not_attach_memory_search_tool(azure_openai_unit_test_env: dict[str, str]) -> None: + """Test project mode does not expose get_memory_search_tool on AzureOpenAIResponsesClient.""" + from unittest.mock import patch + + from openai import AsyncOpenAI + + mock_openai_client = MagicMock(spec=AsyncOpenAI) + mock_openai_client.default_headers = {} + + with patch( + "agent_framework.azure._responses_client.AzureOpenAIResponsesClient._create_client_from_project", + return_value=mock_openai_client, + ): + azure_responses_client = AzureOpenAIResponsesClient( + project_endpoint="https://test-project.services.ai.azure.com", + deployment_name="gpt-4o", + credential=AzureCliCredential(), + ) + + assert not hasattr(azure_responses_client, "get_memory_search_tool") + + +@pytest.mark.parametrize( + "exclude_list,override_env_param_dict", + [(["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"], {"AZURE_AI_MODEL_DEPLOYMENT_NAME": "project-test-model"})], + indirect=True, +) +def test_init_with_project_endpoint_uses_azure_ai_model_deployment_name( + azure_openai_unit_test_env: dict[str, str], +) -> None: + """Test project mode fallback to AZURE_AI_MODEL_DEPLOYMENT_NAME.""" + from unittest.mock import patch + + from openai import AsyncOpenAI + + mock_openai_client = MagicMock(spec=AsyncOpenAI) + mock_openai_client.default_headers = {} + + with patch( + "agent_framework.azure._responses_client.AzureOpenAIResponsesClient._create_client_from_project", + return_value=mock_openai_client, + ): + azure_responses_client = AzureOpenAIResponsesClient( + project_endpoint="https://test-project.services.ai.azure.com", + credential=AzureCliCredential(), + ) + + assert azure_responses_client.model_id == azure_openai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + assert azure_responses_client.client is mock_openai_client + + +@pytest.mark.parametrize( + "exclude_list,override_env_param_dict", + [(["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"], {"AZURE_AI_MODEL_DEPLOYMENT_NAME": "project-test-model"})], + indirect=True, +) +def test_init_without_project_mode_ignores_azure_ai_model_deployment_name( + azure_openai_unit_test_env: dict[str, str], +) -> None: + """Test AZURE_AI_MODEL_DEPLOYMENT_NAME is only used in project mode.""" + _ = azure_openai_unit_test_env + + with pytest.raises(ValueError, match="Azure OpenAI deployment name is required"): + AzureOpenAIResponsesClient() + + +@pytest.mark.parametrize( + "override_env_param_dict", + [ + { + "AZURE_AI_PROJECT_ENDPOINT": "https://env-project.services.ai.azure.com", + "AZURE_AI_MODEL_DEPLOYMENT_NAME": "env-foundry-model", + "AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME": "openai-model-ignored", + } + ], + indirect=True, +) +def test_init_with_foundry_backend_uses_only_azure_ai_env(azure_openai_unit_test_env: dict[str, str]) -> None: + """Test foundry backend uses AZURE_AI_* env values and ignores AZURE_OPENAI_* deployment.""" + from unittest.mock import patch + + from openai import AsyncOpenAI + + mock_openai_client = MagicMock(spec=AsyncOpenAI) + mock_openai_client.default_headers = {} + credential = AzureCliCredential() + + with patch( + "agent_framework.azure._responses_client.AzureOpenAIResponsesClient._create_client_from_project", + return_value=mock_openai_client, + ) as mock_create_client: + azure_responses_client = AzureOpenAIResponsesClient( + backend="foundry", + credential=credential, + ) + + assert azure_responses_client.model_id == "env-foundry-model" + assert azure_responses_client.client is mock_openai_client + mock_create_client.assert_called_once_with( + project_client=None, + project_endpoint=azure_openai_unit_test_env["AZURE_AI_PROJECT_ENDPOINT"], + credential=credential, + ) + + +@pytest.mark.parametrize("override_env_param_dict", [{"AZURE_AI_MODEL_DEPLOYMENT_NAME": ""}], indirect=True) +def test_init_with_foundry_backend_ignores_azure_openai_deployment_env( + azure_openai_unit_test_env: dict[str, str], +) -> None: + """Test foundry backend does not fallback to AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME.""" + _ = azure_openai_unit_test_env + mock_project_client = MagicMock() + + with pytest.raises(ValueError, match="AZURE_AI_MODEL_DEPLOYMENT_NAME"): + AzureOpenAIResponsesClient( + backend="foundry", + project_client=mock_project_client, + ) + + def test_create_client_from_project_with_project_client() -> None: """Test _create_client_from_project with an existing project client.""" from openai import AsyncOpenAI @@ -299,6 +419,91 @@ def test_serialize(azure_openai_unit_test_env: dict[str, str]) -> None: assert "User-Agent" not in dumped_settings["default_headers"] +def test_parse_response_with_browser_automation_preview_items(azure_openai_unit_test_env: dict[str, str]) -> None: + client = AzureOpenAIResponsesClient( + deployment_name=azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] + ) + + mock_response = MagicMock() + mock_response.output_parsed = None + mock_response.metadata = {} + mock_response.usage = None + mock_response.id = "resp-id" + mock_response.model = "test-model" + mock_response.created_at = 1000000000 + mock_response.status = "completed" + + mock_call_item = MagicMock() + mock_call_item.type = "browser_automation_preview_call" + mock_call_item.id = "fc_123" + mock_call_item.call_id = "call_123" + mock_call_item.status = "completed" + mock_call_item.arguments = '{"query":"Open https://example.com and tell me the page title."}' + + mock_output_item = MagicMock() + mock_output_item.type = "browser_automation_preview_call_output" + mock_output_item.id = "fco_123" + mock_output_item.call_id = "call_123" + mock_output_item.status = "completed" + mock_output_item.output = '[{"step":"open"}]' + + mock_message_content = MagicMock() + mock_message_content.type = "output_text" + mock_message_content.text = "The page title is Example Domain." + mock_message_content.annotations = [] + mock_message = MagicMock() + mock_message.type = "message" + mock_message.content = [mock_message_content] + + mock_response.output = [mock_call_item, mock_output_item, mock_message] + + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore[arg-type] + + assert [content.type for content in response.messages[0].contents] == ["function_call", "function_result", "text"] + assert response.messages[0].contents[0].name == "browser_automation_preview" + assert response.messages[0].contents[0].call_id == "call_123" + assert response.messages[0].contents[1].call_id == "call_123" + assert response.messages[0].contents[1].result == [{"step": "open"}] + + +def test_parse_chunk_with_browser_automation_preview_item_done(azure_openai_unit_test_env: dict[str, str]) -> None: + client = AzureOpenAIResponsesClient( + deployment_name=azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] + ) + + mock_call_event = MagicMock() + mock_call_event.type = "response.output_item.done" + mock_call_item = MagicMock() + mock_call_item.type = "browser_automation_preview_call" + mock_call_item.id = "fc_789" + mock_call_item.call_id = "call_789" + mock_call_item.status = "completed" + mock_call_item.arguments = '{"query":"Open https://example.com and report the page title."}' + mock_call_event.item = mock_call_item + + call_update = client._parse_chunk_from_openai(mock_call_event, options={}, function_call_ids={}) + assert len(call_update.contents) == 1 + assert call_update.contents[0].type == "function_call" + assert call_update.contents[0].name == "browser_automation_preview" + assert call_update.contents[0].call_id == "call_789" + + mock_output_event = MagicMock() + mock_output_event.type = "response.output_item.done" + mock_output_item = MagicMock() + mock_output_item.type = "browser_automation_preview_call_output" + mock_output_item.id = "fco_789" + mock_output_item.call_id = "call_789" + mock_output_item.status = "completed" + mock_output_item.output = "[]" + mock_output_event.item = mock_output_item + + result_update = client._parse_chunk_from_openai(mock_output_event, options={}, function_call_ids={}) + assert len(result_update.contents) == 1 + assert result_update.contents[0].type == "function_result" + assert result_update.contents[0].call_id == "call_789" + assert result_update.contents[0].result == [] + + # region Integration Tests @@ -494,12 +699,19 @@ async def test_integration_web_search() -> None: @pytest.mark.integration async def test_integration_project_mode_web_search_bing_custom_search() -> None: project_endpoint = os.getenv("FOUNDRY_PROJECT_ENDPOINT") or os.getenv("AZURE_AI_PROJECT_ENDPOINT") - deployment_name = os.getenv("FOUNDRY_MODEL_DEPLOYMENT_NAME") or os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") + deployment_name = ( + os.getenv("FOUNDRY_MODEL_DEPLOYMENT_NAME") + or os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME") + or os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") + ) missing_env_vars: list[str] = [] if not project_endpoint: missing_env_vars.append("FOUNDRY_PROJECT_ENDPOINT or AZURE_AI_PROJECT_ENDPOINT") if not deployment_name: - missing_env_vars.append("FOUNDRY_MODEL_DEPLOYMENT_NAME or AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") + missing_env_vars.append( + "FOUNDRY_MODEL_DEPLOYMENT_NAME, AZURE_AI_MODEL_DEPLOYMENT_NAME, " + "or AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME" + ) if os.getenv("BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID", "") == "": missing_env_vars.append("BING_CUSTOM_SEARCH_PROJECT_CONNECTION_ID") if os.getenv("BING_CUSTOM_SEARCH_INSTANCE_NAME", "") == "": @@ -533,6 +745,67 @@ async def test_integration_project_mode_web_search_bing_custom_search() -> None: assert len(response.text) > 0 +@pytest.mark.flaky +@pytest.mark.integration +async def test_integration_project_mode_browser_tool_captures_function_contents() -> None: + project_endpoint = os.getenv("FOUNDRY_PROJECT_ENDPOINT") or os.getenv("AZURE_AI_PROJECT_ENDPOINT") + deployment_name = ( + os.getenv("FOUNDRY_MODEL_DEPLOYMENT_NAME") + or os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME") + or os.getenv("AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME") + ) + missing_env_vars: list[str] = [] + if not project_endpoint: + missing_env_vars.append("FOUNDRY_PROJECT_ENDPOINT or AZURE_AI_PROJECT_ENDPOINT") + if not deployment_name: + missing_env_vars.append( + "FOUNDRY_MODEL_DEPLOYMENT_NAME, AZURE_AI_MODEL_DEPLOYMENT_NAME, " + "or AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME" + ) + if os.getenv("BROWSER_AUTOMATION_PROJECT_CONNECTION_ID", "") == "": + missing_env_vars.append("BROWSER_AUTOMATION_PROJECT_CONNECTION_ID") + if missing_env_vars: + pytest.skip(f"Missing required env vars: {', '.join(missing_env_vars)}") + + client = AzureOpenAIResponsesClient( + project_endpoint=project_endpoint, + deployment_name=deployment_name, + credential=AzureCliCredential(), + ) + + for streaming in [False, True]: + content = { + "messages": [ + Message( + role="user", + text="Use the browser tool to open https://example.com and tell me the page title.", + ) + ], + "options": { + "tool_choice": "required", + "tools": [client.get_browser_automation_tool()], + }, + "stream": streaming, + } + if streaming: + response = await client.get_response(**content).get_final_response() + else: + response = await client.get_response(**content) + + assert response is not None + assert isinstance(response, ChatResponse) + assert response.text is not None + + browser_calls = [c for c in response.messages[0].contents if c.type == "function_call"] + browser_calls = [c for c in browser_calls if c.name == "browser_automation_preview"] + browser_results = [c for c in response.messages[0].contents if c.type == "function_result"] + assert browser_calls + assert browser_results + assert {result.call_id for result in browser_results if result.call_id} & { + call.call_id for call in browser_calls if call.call_id + } + + @pytest.mark.flaky @pytest.mark.integration @skip_if_azure_integration_tests_disabled diff --git a/python/packages/core/tests/azure/test_azure_shared.py b/python/packages/core/tests/azure/test_azure_shared.py index 02f5e89e2b..9435c8d987 100644 --- a/python/packages/core/tests/azure/test_azure_shared.py +++ b/python/packages/core/tests/azure/test_azure_shared.py @@ -15,7 +15,6 @@ create_file_search_tool, create_image_generation_tool, create_mcp_tool, - create_memory_search_tool, create_openapi_tool, create_sharepoint_grounding_tool, create_web_search_tool, @@ -166,17 +165,13 @@ def test_create_openapi_tool_with_auth() -> None: assert tool["openapi"]["auth"]["type"] == "anonymous" -def test_create_a2a_tool_and_memory_search(monkeypatch: pytest.MonkeyPatch) -> None: +def test_create_a2a_tool(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("A2A_PROJECT_CONNECTION_ID", "a2a-conn") monkeypatch.setenv("A2A_ENDPOINT", "https://a2a.example.com") a2a_tool = create_a2a_tool() assert a2a_tool["project_connection_id"] == "a2a-conn" assert a2a_tool["base_url"] == "https://a2a.example.com" - memory_tool = create_memory_search_tool(memory_store_name="store-1", scope="scope-1", update_delay=5) - assert memory_tool["type"] == "memory_search" - assert memory_tool["update_delay"] == 5 - def test_load_foundry_project_settings(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("FOUNDRY_PROJECT_ENDPOINT", "https://project.example.com") diff --git a/python/samples/02-agents/providers/azure_ai/README.md b/python/samples/02-agents/providers/azure_ai/README.md index 75c30ffcac..d1444befbb 100644 --- a/python/samples/02-agents/providers/azure_ai/README.md +++ b/python/samples/02-agents/providers/azure_ai/README.md @@ -10,7 +10,7 @@ This folder contains examples demonstrating different ways to create and use age | [`azure_ai_provider_methods.py`](azure_ai_provider_methods.py) | Comprehensive guide to `AzureAIProjectAgentProvider` methods: `create_agent()` for creating new agents, `get_agent()` for retrieving existing agents (by name, reference, or details), and `as_agent()` for wrapping SDK objects without HTTP calls. | | [`azure_ai_use_latest_version.py`](azure_ai_use_latest_version.py) | Demonstrates how to reuse the latest version of an existing agent instead of creating a new agent version on each instantiation by using `provider.get_agent()` to retrieve the latest version. | | [`azure_ai_with_agent_as_tool.py`](azure_ai_with_agent_as_tool.py) | Shows how to use the agent-as-tool pattern with Azure AI agents, where one agent delegates work to specialized sub-agents wrapped as tools using `as_tool()`. Demonstrates hierarchical agent architectures. | -| [`using_foundry_tools.py`](using_foundry_tools.py) | Consolidated sample that uses `AzureAIProjectAgentProvider` for setup and wires all Azure AI Foundry-hosted tool helpers from `AzureAIClient` in a single agent (Bing, Fabric, SharePoint, Azure AI Search, Browser Automation, A2A, OpenAPI, memory, MCP, code interpreter, file search, web search, and image generation). | +| [`using_foundry_tools.py`](using_foundry_tools.py) | Consolidated sample that uses `AzureAIProjectAgentProvider` for setup and wires Azure AI Foundry-hosted tool helpers from `AzureAIClient` in a single agent (Bing, Fabric, SharePoint, Azure AI Search, Browser Automation, A2A, OpenAPI, MCP, code interpreter, file search, web search, and image generation). | | [`azure_ai_with_code_interpreter.py`](azure_ai_with_code_interpreter.py) | Shows how to use `AzureAIClient.get_code_interpreter_tool()` with Azure AI agents to write and execute Python code for mathematical problem solving and data analysis. | | [`azure_ai_with_code_interpreter_file_generation.py`](azure_ai_with_code_interpreter_file_generation.py) | Shows how to retrieve file IDs from code interpreter generated files using both streaming and non-streaming approaches. | | [`azure_ai_with_code_interpreter_file_download.py`](azure_ai_with_code_interpreter_file_download.py) | Shows how to download files generated by code interpreter using the OpenAI containers API. | @@ -74,6 +74,11 @@ All examples use `AzureCliCredential` for authentication by default. Before runn Alternatively, you can replace `AzureCliCredential` with other authentication options like `DefaultAzureCredential` or environment-based credentials. +## Memory capabilities note + +For memory retrieval/update workflows, prefer `FoundryMemoryProvider` instead of tool-based memory search helpers. +See [`../../context_providers/azure_ai_foundry_memory.py`](../../context_providers/azure_ai_foundry_memory.py). + ## Running the Examples Each example can be run independently. Navigate to this directory and run any example: diff --git a/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py b/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py index 42587d3858..f7db20017d 100644 --- a/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py +++ b/python/samples/02-agents/providers/azure_ai/using_foundry_tools.py @@ -42,7 +42,11 @@ - client.get_image_generation_tool(...) - client.get_mcp_tool(...) - client.get_openapi_tool(...) + +For Memory, we have two approaches: - client.get_memory_search_tool(...) +To run the memory service in Foundry within the Agent Framework code as a ContextProvider, see: +- samples/02-agents/context_providers/azure_ai_foundry_memory.py """ @@ -63,7 +67,9 @@ async def main() -> None: instructions="You are a helpful assistant that can use Foundry-hosted tools when useful.", tools=[ client.get_code_interpreter_tool(), - client.get_web_search_tool(user_location={"country": "US", "city": "Seattle"}), + client.get_web_search_tool( + user_location={"country": "US", "city": "Seattle"} + ), client.get_image_generation_tool(), client.get_mcp_tool( name="Microsoft Learn MCP", @@ -92,7 +98,9 @@ async def main() -> None: scope="user_123", update_delay=1, ), - client.get_file_search_tool(vector_store_ids=os.environ["FILE_SEARCH_VECTOR_STORE_ID"]), + client.get_file_search_tool( + vector_store_ids=os.environ["FILE_SEARCH_VECTOR_STORE_ID"] + ), client.get_bing_tool(variant="grounding"), client.get_bing_tool(variant="custom_search"), client.get_fabric_data_agent_tool(), diff --git a/python/samples/02-agents/providers/azure_openai/README.md b/python/samples/02-agents/providers/azure_openai/README.md index 0cc83aa8d2..c09f587c9a 100644 --- a/python/samples/02-agents/providers/azure_openai/README.md +++ b/python/samples/02-agents/providers/azure_openai/README.md @@ -45,6 +45,9 @@ For the Foundry tools sample (`azure_responses_client_with_foundry_tools.py`), s - `AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME` - Tool-specific connection variables as needed (for example `FABRIC_PROJECT_CONNECTION_ID`, `BING_PROJECT_CONNECTION_ID`). +For memory capabilities, prefer `FoundryMemoryProvider` in the Azure AI context provider sample: +[`../../context_providers/azure_ai_foundry_memory.py`](../../context_providers/azure_ai_foundry_memory.py). + Optionally, you can set: - `AZURE_OPENAI_API_VERSION`: The API version to use (default is `2024-02-15-preview`) - `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (if not using `AzureCliCredential`) diff --git a/python/samples/02-agents/providers/azure_openai/azure_responses_client_with_foundry_tools.py b/python/samples/02-agents/providers/azure_openai/azure_responses_client_with_foundry_tools.py index a1c11d11cb..0b7655328a 100644 --- a/python/samples/02-agents/providers/azure_openai/azure_responses_client_with_foundry_tools.py +++ b/python/samples/02-agents/providers/azure_openai/azure_responses_client_with_foundry_tools.py @@ -19,7 +19,8 @@ it will automatically wire up Foundry-hosted tools that are available to the agent. This sample demonstrates how to set up such a client and use it within an agent, along with a variety of Foundry tools. -The same tools are available directly on the ``AzureAIClient`` as well, so this wiring is not unique to the responses client. +The same tools are available directly on the ``AzureAIClient`` as well, +so this wiring is not unique to the responses client. Important: - This sample is intentionally non-defensive and includes direct tool wiring. @@ -47,7 +48,9 @@ - client.get_image_generation_tool(...) - client.get_mcp_tool(...) - client.get_openapi_tool(...) -- client.get_memory_search_tool(...) + +For memory capabilities, prefer ``FoundryMemoryProvider`` from: +- samples/02-agents/context_providers/azure_ai_foundry_memory.py """ @@ -68,56 +71,51 @@ async def main() -> None: instructions="You are a helpful assistant that can use Foundry-hosted tools when useful.", tools=[ client.get_code_interpreter_tool(), - client.get_web_search_tool( - user_location={"country": "US", "city": "Seattle"} - ), + # client.get_web_search_tool( + # user_location={"country": "US", "city": "Seattle"} + # ), client.get_image_generation_tool(), - client.get_mcp_tool( - name="Microsoft Learn MCP", - url="https://learn.microsoft.com/api/mcp", - approval_mode="never_require", - ), - client.get_openapi_tool( - name="get_countries", - spec={ - "openapi": "3.0.0", - "info": {"title": "Countries API", "version": "1.0.0"}, - "paths": { - "/countries": { - "get": { - "operationId": "listCountries", - "responses": {"200": {"description": "OK"}}, - } - } - }, - }, - description="Retrieve information about countries.", - auth={"type": "anonymous"}, - ), - client.get_memory_search_tool( - memory_store_name="agent-framework-memory-store", - scope="user_123", - update_delay=1, - ), - client.get_file_search_tool( - vector_store_ids=os.environ["FILE_SEARCH_VECTOR_STORE_ID"] - ), - client.get_bing_tool(variant="grounding"), - client.get_bing_tool(variant="custom_search"), - client.get_fabric_data_agent_tool(), - client.get_sharepoint_grounding_tool(), - client.get_azure_ai_search_tool(query_type="simple"), + # client.get_mcp_tool( + # name="Microsoft Learn MCP", + # url="https://learn.microsoft.com/api/mcp", + # approval_mode="never_require", + # ), + # client.get_openapi_tool( + # name="get_countries", + # spec={ + # "openapi": "3.0.0", + # "info": {"title": "Countries API", "version": "1.0.0"}, + # "paths": { + # "/countries": { + # "get": { + # "operationId": "listCountries", + # "responses": {"200": {"description": "OK"}}, + # } + # } + # }, + # }, + # description="Retrieve information about countries.", + # auth={"type": "anonymous"}, + # ), + # client.get_file_search_tool( + # vector_store_ids=os.environ["FILE_SEARCH_VECTOR_STORE_ID"] + # ), + # client.get_bing_tool(variant="grounding"), + # client.get_bing_tool(variant="custom_search"), + # client.get_fabric_data_agent_tool(), + # client.get_sharepoint_grounding_tool(), + # client.get_azure_ai_search_tool(query_type="simple"), client.get_browser_automation_tool(), - client.get_a2a_tool(), + # client.get_a2a_tool(), ], ) + session = agent.create_session() - query = ( - "List the tool categories available to you and when each category is useful." - ) - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}") + user_message = input("User: ") + while user_message.lower() not in {"exit", "quit"}: + result = await agent.run(user_message, session=session) + print(f"Agent: {result}") + user_message = input("User: ") if __name__ == "__main__":