From 4f6a4f2120805b0c923457f96fbcdcdc0ec0877f Mon Sep 17 00:00:00 2001 From: Christopher Mancini Date: Thu, 29 Jan 2026 10:03:33 -0500 Subject: [PATCH 1/3] feat: expanded LLM report output - include cluster information - include failure excerpts from logs - expand test coverage - add integration tests for sending slack messages --- internal/reporter/README.md | 156 +++++ internal/reporter/slack.go | 295 ++++++-- internal/reporter/slack_testoutput_test.go | 297 ++++++++ internal/reporter/slack_workflow_test.go | 426 ++++++++++++ .../build-log.txt | 646 ++++++++++++++++++ pkg/common/slack/client.go | 8 +- pkg/e2e/e2e.go | 7 +- pkg/e2e/e2e_test.go | 10 +- pkg/e2e/slack_integration_test.go | 251 +++++++ 9 files changed, 2051 insertions(+), 45 deletions(-) create mode 100644 internal/reporter/slack_testoutput_test.go create mode 100644 internal/reporter/slack_workflow_test.go create mode 100644 internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/build-log.txt create mode 100644 pkg/e2e/slack_integration_test.go diff --git a/internal/reporter/README.md b/internal/reporter/README.md index 60064910e0..b38bf950e3 100644 --- a/internal/reporter/README.md +++ b/internal/reporter/README.md @@ -69,3 +69,159 @@ if len(reporters) > 0 { } } ``` + +## Slack Workflow Integration + +The Slack reporter sends test failure notifications using a **Slack Workflow** that creates threaded messages. This allows teams to add the shared workflow to their channels and receive structured failure notifications. + +### How It Works + +The workflow creates three messages in a thread: + +1. **Initial Message** - Failure summary with cluster and test suite info +2. **First Reply** - AI-powered analysis with root cause and recommendations +3. **Second Reply** - Extracted test failure logs (only failure blocks, not full stdout) + +### Setup Instructions + +#### 1. Add Workflow to Your Slack Channel + +Each team adds the shared workflow to their channel: + +1. Open the workflow link: https://slack.com/shortcuts/Ft09RL7M2AMV/60f07b46919da20d103806a8f5bba094 +2. Click **Add to Slack** +3. Select your destination channel +4. **Copy the webhook URL** (starts with `https://hooks.slack.com/workflows/...`) + +#### 2. Get Your Channel ID + +The workflow requires a Slack **channel ID** (not channel name). + +**To find your channel ID:** +1. Right-click the channel name in Slack +2. Select **View channel details** +3. Scroll to bottom and **copy the channel ID** (starts with `C`) + +**Example:** `C06HQR8HN0L` + +#### 3. Configure Pipeline + +Set these environment variables in your CI/CD pipeline or Vault: + +```bash +LOG_ANALYSIS_SLACK_WEBHOOK=https://hooks.slack.com/workflows/T.../A.../... +LOG_ANALYSIS_SLACK_CHANNEL=C06HQR8HN0L # Channel ID, not #channel-name +``` + +#### 4. Enable in Config + +```yaml +tests: + enableSlackNotify: true +logAnalysis: + enableAnalysis: true +``` + +### Environment Variables + +| Variable | Required | Description | +|----------|----------|-------------| +| `LOG_ANALYSIS_SLACK_WEBHOOK` | Yes | Workflow webhook URL from step 1 | +| `LOG_ANALYSIS_SLACK_CHANNEL` | Yes | Channel ID (starts with `C`) | + +### Message Format + +**Summary (Initial Message):** +``` +:failed: Pipeline Failed at E2E Test + +====== ☸️ Cluster Information ====== +• Cluster ID: `abc-123` +• Name: `my-cluster` +• Version: `4.20` +• Provider: `aws` +• Expiration: `2026-01-28T10:00:00Z` + +====== 🧪 Test Suite Information ====== +• Image: `quay.io/openshift/osde2e-tests` +• Commit: `abc123` +• Environment: `stage` +``` + +**Analysis (First Reply):** +``` +====== 🔍 Possible Cause ====== + + +====== 💡 Recommendations ====== +1. +2. +``` + +**Extended Logs (Second Reply):** +``` +Found 3 test failure(s): + +[FAILED] test description + +... +``` + +### Testing + +#### Unit Tests +```bash +# Run all reporter tests +go test -v github.com/openshift/osde2e/internal/reporter + +# Run specific workflow tests +go test -v -run TestSlackReporter_buildWorkflowPayload +go test -v -run TestSlackReporter_extractFailureBlocks +``` + +#### Integration Test (with real Slack) +```bash +# Set environment variables +export LOG_ANALYSIS_SLACK_WEBHOOK="https://hooks.slack.com/workflows/..." +export LOG_ANALYSIS_SLACK_CHANNEL="C06HQR8HN0L" + +# Run integration test +go test -v -run TestSlackReporter_Integration github.com/openshift/osde2e/pkg/e2e +``` + +**Note:** Integration test automatically skips if environment variables are not set. + +### Workflow Payload Structure + +The reporter sends this JSON payload to the Slack Workflow: + +```json +{ + "channel": "C06HQR8HN0L", + "summary": "Pipeline Failed at E2E Test\n\n# Cluster Info...", + "analysis": "# Possible Cause\n...", + "extended_logs": "Found 3 test failure(s):\n...", + "image": "quay.io/openshift/osde2e:abc123", + "env": "stage", + "commit": "abc123" +} +``` + +### Troubleshooting + +**Workflow not posting threaded messages:** +- Verify webhook URL is from the workflow (not a legacy incoming webhook) +- Workflow URLs contain `/workflows/` in the path +- Legacy incoming webhook URLs contain `/services/` instead + +**Channel not receiving messages:** +- Ensure you're using the channel ID (starts with `C`), not channel name +- Channel ID is case-sensitive + +**Missing fields in Slack message:** +- Check that all required fields are present: `channel`, `summary`, `analysis` +- Verify environment variables are set correctly + +**Analysis too long:** +- The workflow handles message splitting automatically +- Payload limits: 30KB per field (enforced by code) diff --git a/internal/reporter/slack.go b/internal/reporter/slack.go index d0f4fc7606..61d20ee455 100644 --- a/internal/reporter/slack.go +++ b/internal/reporter/slack.go @@ -4,11 +4,29 @@ import ( "context" "encoding/json" "fmt" + "os" + "path/filepath" "strings" commonslack "github.com/openshift/osde2e/pkg/common/slack" ) +const ( + // Slack workflow payload limits (conservative estimate) + // Slack workflows can handle much larger payloads than webhooks + maxWorkflowFieldLength = 30000 // 30KB per field + + // Test output truncation thresholds + fullOutputThreshold = 250 + initialContextLines = 20 + finalSummaryLines = 80 + + // Failure block extraction + maxFailureBlocks = 3 + failureContextLines = 5 + failureBlockLines = 30 +) + // SlackReporter implements Reporter interface for Slack webhook notifications type SlackReporter struct { client *commonslack.Client @@ -29,7 +47,7 @@ func (s *SlackReporter) Name() string { // Report sends the analysis result to Slack via webhook func (s *SlackReporter) Report(ctx context.Context, result *AnalysisResult, config *ReporterConfig) error { if !config.Enabled { - return nil // Skip disabled reporters + return nil } webhookURL, ok := config.Settings["webhook_url"].(string) @@ -37,63 +55,186 @@ func (s *SlackReporter) Report(ctx context.Context, result *AnalysisResult, conf return fmt.Errorf("webhook_url is required and must be a string") } - // Create simple message - message := s.formatMessage(result, config) + // Build workflow payload + payload := s.buildWorkflowPayload(result, config) - // Send to Slack using common package - if err := s.client.SendWebhook(ctx, webhookURL, message); err != nil { + // Send to Slack workflow webhook + if err := s.client.SendWebhook(ctx, webhookURL, payload); err != nil { return fmt.Errorf("failed to send to Slack: %w", err) } return nil } -// Message represents a simple Slack message payload -type Message struct { - Analysis string `json:"analysis"` - Summary string `json:"summary,omitempty"` - Channel string `json:"channel,omitempty"` +// WorkflowPayload represents the Slack workflow webhook payload +type WorkflowPayload struct { + Channel string `json:"channel"` // Required - Slack channel ID + Summary string `json:"summary"` // Required - Initial message + Analysis string `json:"analysis"` // Required - AI analysis (posted as reply) + ExtendedLogs string `json:"extended_logs,omitempty"` // Optional - Test failures (posted as reply) + Image string `json:"image,omitempty"` // Optional - Test image + Env string `json:"env,omitempty"` // Optional - Environment + Commit string `json:"commit,omitempty"` // Optional - Commit hash } -// formatMessage creates a simple text message for Slack -func (s *SlackReporter) formatMessage(result *AnalysisResult, config *ReporterConfig) *Message { - // Create simple text message - statusEmoji := ":failed:" - summary := fmt.Sprintf("%s Pipeline Failed at E2E Test\n", statusEmoji) - text := "" +// ClusterInfo holds cluster information for reporting +type ClusterInfo struct { + ID string + Name string + Provider string + Region string + CloudProvider string + Version string + Expiration string +} + +// buildWorkflowPayload constructs the JSON payload for the Slack Workflow +func (s *SlackReporter) buildWorkflowPayload(result *AnalysisResult, config *ReporterConfig) *WorkflowPayload { + payload := &WorkflowPayload{} + + // Required: channel ID + if channel, ok := config.Settings["channel"].(string); ok && channel != "" { + payload.Channel = channel + } + + // Required: summary (initial message) + payload.Summary = s.buildSummaryField(config) + + // Required: analysis (AI response) + payload.Analysis = s.buildAnalysisField(result) + + // Optional: extended_logs (test failures) + if reportDir, ok := config.Settings["report_dir"].(string); ok && reportDir != "" { + if testOutput := s.readTestOutput(reportDir); testOutput != "" { + payload.ExtendedLogs = s.enforceFieldLimit(testOutput, maxWorkflowFieldLength) + } else { + // Provide fallback when logs exist but couldn't be read + payload.ExtendedLogs = "No test failure logs found in the report directory." + } + } else { + // Provide fallback when no report directory is configured + payload.ExtendedLogs = "Test output logs not available (no report directory configured)." + } + // Optional metadata if image, ok := config.Settings["image"].(string); ok && image != "" { - imageInfo := strings.Split(image, ":") - image := imageInfo[0] - commit := imageInfo[1] - env := config.Settings["env"].(string) - summary += fmt.Sprintf("Test suite: %s \nCommit: %s \nEnvironment: %s\n", image, commit, env) + payload.Image = image + // Extract commit from image tag if present + parts := strings.Split(image, ":") + if len(parts) == 2 { + payload.Commit = parts[1] + } } - // Try to parse and format JSON analysis + if env, ok := config.Settings["env"].(string); ok && env != "" { + payload.Env = env + } + + return payload +} + +// buildSummaryField creates the initial message content +func (s *SlackReporter) buildSummaryField(config *ReporterConfig) string { + var builder strings.Builder + + // Header + builder.WriteString(":failed: Pipeline Failed at E2E Test\n\n") + + // Cluster info + builder.WriteString(s.buildClusterInfoSection(config)) + + // Test suite info + builder.WriteString(s.buildTestSuiteSection(config)) + + return s.enforceFieldLimit(builder.String(), maxWorkflowFieldLength) +} + +// buildAnalysisField formats the AI analysis +func (s *SlackReporter) buildAnalysisField(result *AnalysisResult) string { + var builder strings.Builder + + // Format the analysis content (handles JSON parsing) if formattedAnalysis := s.formatAnalysisContent(result.Content); formattedAnalysis != "" { - text += formattedAnalysis - } else { - text += fmt.Sprintf("Analysis:\n%s", result.Content) + builder.WriteString(formattedAnalysis) + } else if result.Content != "" { + builder.WriteString(result.Content) } + + // Add error if present if result.Error != "" { - text += fmt.Sprintf("\n\n Error: %s", result.Error) + if builder.Len() > 0 { + builder.WriteString("\n\n") + } + builder.WriteString("====== ⚠️ Error ======\n") + builder.WriteString(result.Error) } - message := &Message{ - Summary: summary, - Analysis: text, + + return s.enforceFieldLimit(builder.String(), maxWorkflowFieldLength) +} + +// buildClusterInfoSection creates the cluster information section +func (s *SlackReporter) buildClusterInfoSection(config *ReporterConfig) string { + clusterInfo, ok := config.Settings["cluster_info"].(*ClusterInfo) + if !ok || clusterInfo == nil { + return "" } - // Add channel if specified (for workflow webhooks) - if channel, ok := config.Settings["channel"].(string); ok && channel != "" { - message.Channel = channel + + var builder strings.Builder + builder.WriteString("====== ☸️ Cluster Information ======\n") + builder.WriteString(fmt.Sprintf("• Cluster ID: `%s`\n", clusterInfo.ID)) + if clusterInfo.Name != "" { + builder.WriteString(fmt.Sprintf("• Name: `%s`\n", clusterInfo.Name)) + } + if clusterInfo.Version != "" { + builder.WriteString(fmt.Sprintf("• Version: `%s`\n", clusterInfo.Version)) + } + if clusterInfo.Provider != "" { + builder.WriteString(fmt.Sprintf("• Provider: `%s`\n", clusterInfo.Provider)) } + if clusterInfo.Expiration != "" { + builder.WriteString(fmt.Sprintf("• Expiration: `%s`\n", clusterInfo.Expiration)) + } + builder.WriteString("\n") + + return builder.String() +} + +// buildTestSuiteSection creates the test suite information section +func (s *SlackReporter) buildTestSuiteSection(config *ReporterConfig) string { + image, ok := config.Settings["image"].(string) + if !ok || image == "" { + return "" + } + + imageInfo := strings.Split(image, ":") + if len(imageInfo) < 2 { + return "" + } + + var builder strings.Builder + builder.WriteString("====== 🧪 Test Suite Information ======\n") + builder.WriteString(fmt.Sprintf("• Image: `%s`\n", imageInfo[0])) + builder.WriteString(fmt.Sprintf("• Commit: `%s`\n", imageInfo[1])) + if env, ok := config.Settings["env"].(string); ok && env != "" { + builder.WriteString(fmt.Sprintf("• Environment: `%s`\n", env)) + } + builder.WriteString("\n") - return message + return builder.String() +} + +// enforceFieldLimit truncates a field to the maximum allowed length +func (s *SlackReporter) enforceFieldLimit(content string, maxLength int) string { + if len(content) <= maxLength { + return content + } + // Truncate and add notice + truncated := content[:maxLength-100] + return truncated + "\n\n... (content truncated due to length)" } // formatAnalysisContent tries to parse JSON and format it nicely for Slack func (s *SlackReporter) formatAnalysisContent(content string) string { - // Look for JSON content in code blocks lines := strings.Split(content, "\n") var jsonContent strings.Builder inJSONBlock := false @@ -115,7 +256,6 @@ func (s *SlackReporter) formatAnalysisContent(content string) string { return "" } - // Parse JSON var analysis map[string]interface{} if err := json.Unmarshal([]byte(jsonContent.String()), &analysis); err != nil { return "" @@ -123,14 +263,12 @@ func (s *SlackReporter) formatAnalysisContent(content string) string { var formatted strings.Builder - // Format root cause if rootCause, ok := analysis["root_cause"].(string); ok && rootCause != "" { formatted.WriteString("====== 🔍 Possible Cause ======\n") formatted.WriteString(rootCause) formatted.WriteString("\n\n") } - // Format recommendations if recommendations, ok := analysis["recommendations"].([]interface{}); ok && len(recommendations) > 0 { formatted.WriteString("====== 💡 Recommendations ======\n") for i, rec := range recommendations { @@ -143,6 +281,85 @@ func (s *SlackReporter) formatAnalysisContent(content string) string { return formatted.String() } +// readTestOutput reads the test stdout from test_output.txt, test_output.log, or build-log.txt +func (s *SlackReporter) readTestOutput(reportDir string) string { + for _, filename := range []string{"test_output.txt", "test_output.log", "build-log.txt"} { + filePath := filepath.Join(reportDir, filename) + if content, err := os.ReadFile(filepath.Clean(filePath)); err == nil { + lines := strings.Split(strings.TrimRight(string(content), "\n"), "\n") + totalLines := len(lines) + + if totalLines <= fullOutputThreshold { + return string(content) + } + + // For large logs, extract only failure blocks - this is what matters + failureBlocks := s.extractFailureBlocks(lines, 0, totalLines) + if len(failureBlocks) > 0 { + var result strings.Builder + result.WriteString(fmt.Sprintf("Found %d test failure(s):\n\n", len(failureBlocks))) + for i, block := range failureBlocks { + if i > 0 { + result.WriteString("\n---\n\n") + } + result.WriteString(block) + } + return result.String() + } + + // No failures found, return summary section + lastN := finalSummaryLines + var result strings.Builder + result.WriteString("No [FAILED] markers found. Showing final output:\n\n") + startIdx := totalLines - lastN + if startIdx < 0 { + startIdx = 0 + } + for i := startIdx; i < totalLines; i++ { + result.WriteString(lines[i]) + result.WriteString("\n") + } + return result.String() + } + } + return "" +} + +// extractFailureBlocks finds [FAILED] test blocks and extracts them with context +func (s *SlackReporter) extractFailureBlocks(lines []string, startIdx, endIdx int) []string { + var blocks []string + + for i := startIdx; i < endIdx && len(blocks) < maxFailureBlocks; i++ { + if strings.Contains(lines[i], "[FAILED]") || strings.Contains(lines[i], "• [FAILED]") { + var block strings.Builder + + start := i - failureContextLines + if start < startIdx { + start = startIdx + } + + end := i + failureBlockLines + if end > endIdx { + end = endIdx + } + + for j := start; j < end; j++ { + block.WriteString(lines[j]) + if j < end-1 { + block.WriteString("\n") + } + } + + blocks = append(blocks, block.String()) + + // Skip ahead to avoid overlapping blocks + i = end - 1 + } + } + + return blocks +} + // SlackReporterConfig creates a reporter config for Slack func SlackReporterConfig(webhookURL string, enabled bool) ReporterConfig { return ReporterConfig{ @@ -155,13 +372,15 @@ func SlackReporterConfig(webhookURL string, enabled bool) ReporterConfig { } // BuildNotificationConfig creates notification configuration for log analysis. -func BuildNotificationConfig(webhook string, channel string) *NotificationConfig { +func BuildNotificationConfig(webhook string, channel string, clusterInfo interface{}, reportDir string) *NotificationConfig { if webhook == "" || channel == "" { return nil } slackConfig := SlackReporterConfig(webhook, true) slackConfig.Settings["channel"] = channel + slackConfig.Settings["cluster_info"] = clusterInfo + slackConfig.Settings["report_dir"] = reportDir return &NotificationConfig{ Enabled: true, diff --git a/internal/reporter/slack_testoutput_test.go b/internal/reporter/slack_testoutput_test.go new file mode 100644 index 0000000000..dd5faff130 --- /dev/null +++ b/internal/reporter/slack_testoutput_test.go @@ -0,0 +1,297 @@ +package reporter + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +func TestSlackReporter_readTestOutput(t *testing.T) { + reporter := NewSlackReporter() + + t.Run("extracts failure blocks from real Prow data", func(t *testing.T) { + reportDir := "testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws" + result := reporter.readTestOutput(reportDir) + + if result == "" { + t.Fatal("expected non-empty result from real test data") + } + + // Should contain failure count + if !strings.Contains(result, "Found") && !strings.Contains(result, "test failure") { + t.Error("result should indicate test failures found") + } + + // Should contain [FAILED] markers or indicate no failures + if !strings.Contains(result, "[FAILED]") && !strings.Contains(result, "No [FAILED] markers found") { + t.Error("result should contain failure markers or indicate none found") + } + + t.Logf("Extracted test output (%d chars):\n%s", len(result), result[:min(500, len(result))]) + }) + + t.Run("returns empty for non-existent directory", func(t *testing.T) { + result := reporter.readTestOutput("/nonexistent/directory") + if result != "" { + t.Errorf("expected empty string for non-existent directory, got: %s", result) + } + }) + + t.Run("handles small test output", func(t *testing.T) { + tmpDir := t.TempDir() + content := "line 1\nline 2\nline 3\n" + if err := os.WriteFile(filepath.Join(tmpDir, "test_output.txt"), []byte(content), 0o644); err != nil { + t.Fatal(err) + } + + result := reporter.readTestOutput(tmpDir) + if result != content { + t.Errorf("expected full content for small file, got: %s", result) + } + }) + + t.Run("extracts failure blocks from synthetic data", func(t *testing.T) { + tmpDir := t.TempDir() + var content strings.Builder + for i := 1; i <= 500; i++ { + if i == 100 { + content.WriteString("Running test: authentication\n") + content.WriteString("[FAILED] authentication failed\n") + content.WriteString("Expected: true\n") + content.WriteString("Got: false\n") + } else if i == 300 { + content.WriteString("Running test: database connection\n") + content.WriteString("• [FAILED] connection timeout\n") + content.WriteString("Timeout after 30s\n") + } else { + content.WriteString("line " + string(rune('0'+i%10)) + "\n") + } + } + + if err := os.WriteFile(filepath.Join(tmpDir, "test_output.txt"), []byte(content.String()), 0o644); err != nil { + t.Fatal(err) + } + + result := reporter.readTestOutput(tmpDir) + + // Should extract both failures + if !strings.Contains(result, "[FAILED] authentication failed") { + t.Error("should contain first failure") + } + if !strings.Contains(result, "• [FAILED] connection timeout") { + t.Error("should contain second failure") + } + if !strings.Contains(result, "Found 2 test failure(s)") { + t.Error("should indicate 2 failures found") + } + }) +} + +func TestSlackReporter_extractFailureBlocks(t *testing.T) { + reporter := NewSlackReporter() + + t.Run("extracts single failure", func(t *testing.T) { + lines := []string{ + "line 1", + "line 2", + "[FAILED] test failed", + "error details", + "line 5", + } + + blocks := reporter.extractFailureBlocks(lines, 0, len(lines)) + + if len(blocks) != 1 { + t.Fatalf("expected 1 block, got %d", len(blocks)) + } + + if !strings.Contains(blocks[0], "[FAILED] test failed") { + t.Error("block should contain failure marker") + } + if !strings.Contains(blocks[0], "error details") { + t.Error("block should contain context after failure") + } + }) + + t.Run("extracts multiple failures", func(t *testing.T) { + lines := []string{ + "start", + "[FAILED] test 1", + "error 1", + "padding 1", "padding 2", "padding 3", "padding 4", "padding 5", + "padding 6", "padding 7", "padding 8", "padding 9", "padding 10", + "padding 11", "padding 12", "padding 13", "padding 14", "padding 15", + "padding 16", "padding 17", "padding 18", "padding 19", "padding 20", + "padding 21", "padding 22", "padding 23", "padding 24", "padding 25", + "padding 26", "padding 27", "padding 28", "padding 29", "padding 30", + "padding 31", "padding 32", "padding 33", "padding 34", "padding 35", + "[FAILED] test 2", + "error 2", + "end", + } + + blocks := reporter.extractFailureBlocks(lines, 0, len(lines)) + + if len(blocks) != 2 { + t.Fatalf("expected 2 blocks, got %d", len(blocks)) + } + + if !strings.Contains(blocks[0], "[FAILED] test 1") { + t.Error("first block should contain first failure") + } + if !strings.Contains(blocks[1], "[FAILED] test 2") { + t.Error("second block should contain second failure") + } + }) + + t.Run("limits to max failures", func(t *testing.T) { + lines := make([]string, 0) + for i := 0; i < 10; i++ { + lines = append(lines, "line before") + lines = append(lines, "[FAILED] test "+string(rune('0'+i))) + lines = append(lines, "line after") + } + + blocks := reporter.extractFailureBlocks(lines, 0, len(lines)) + + if len(blocks) > maxFailureBlocks { + t.Errorf("expected max %d blocks, got %d", maxFailureBlocks, len(blocks)) + } + }) + + t.Run("handles no failures", func(t *testing.T) { + lines := []string{"line 1", "line 2", "line 3"} + blocks := reporter.extractFailureBlocks(lines, 0, len(lines)) + + if len(blocks) != 0 { + t.Errorf("expected 0 blocks for no failures, got %d", len(blocks)) + } + }) +} + +func TestSlackReporter_buildClusterInfoSection(t *testing.T) { + reporter := NewSlackReporter() + + t.Run("builds complete cluster info", func(t *testing.T) { + clusterInfo := &ClusterInfo{ + ID: "cluster-abc", + Name: "production-cluster", + Version: "4.23", + Provider: "aws", + Expiration: "2026-03-01T00:00:00Z", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "cluster_info": clusterInfo, + }, + } + + result := reporter.buildClusterInfoSection(config) + + expectedFields := []string{ + "====== ☸️ Cluster Information ======", + "cluster-abc", + "production-cluster", + "4.23", + "aws", + "2026-03-01T00:00:00Z", + } + + for _, field := range expectedFields { + if !strings.Contains(result, field) { + t.Errorf("cluster info should contain %q", field) + } + } + }) + + t.Run("handles minimal cluster info", func(t *testing.T) { + clusterInfo := &ClusterInfo{ + ID: "cluster-xyz", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "cluster_info": clusterInfo, + }, + } + + result := reporter.buildClusterInfoSection(config) + + if !strings.Contains(result, "cluster-xyz") { + t.Error("should contain cluster ID") + } + }) + + t.Run("returns empty for nil cluster info", func(t *testing.T) { + config := &ReporterConfig{ + Settings: map[string]interface{}{}, + } + + result := reporter.buildClusterInfoSection(config) + + if result != "" { + t.Errorf("expected empty string for nil cluster info, got: %s", result) + } + }) +} + +func TestSlackReporter_buildTestSuiteSection(t *testing.T) { + reporter := NewSlackReporter() + + t.Run("builds test suite info", func(t *testing.T) { + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "image": "quay.io/openshift/test:v2.0", + "env": "staging", + }, + } + + result := reporter.buildTestSuiteSection(config) + + if !strings.Contains(result, "quay.io/openshift/test") { + t.Error("should contain image name") + } + if !strings.Contains(result, "v2.0") { + t.Error("should contain commit/tag") + } + if !strings.Contains(result, "staging") { + t.Error("should contain environment") + } + }) + + t.Run("returns empty for missing image", func(t *testing.T) { + config := &ReporterConfig{ + Settings: map[string]interface{}{}, + } + + result := reporter.buildTestSuiteSection(config) + + if result != "" { + t.Errorf("expected empty string for missing image, got: %s", result) + } + }) + + t.Run("returns empty for invalid image format", func(t *testing.T) { + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "image": "invalid-no-tag", + }, + } + + result := reporter.buildTestSuiteSection(config) + + if result != "" { + t.Errorf("expected empty string for invalid image format, got: %s", result) + } + }) +} + +// Helper function +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/internal/reporter/slack_workflow_test.go b/internal/reporter/slack_workflow_test.go new file mode 100644 index 0000000000..becdb6a5ea --- /dev/null +++ b/internal/reporter/slack_workflow_test.go @@ -0,0 +1,426 @@ +package reporter + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" +) + +func TestSlackReporter_buildWorkflowPayload(t *testing.T) { + reporter := NewSlackReporter() + + result := &AnalysisResult{ + Content: `Here is the analysis + +` + "```json" + ` +{ + "root_cause": "Test failed due to timeout", + "recommendations": ["Increase timeout", "Check network"] +} +` + "```" + ` +`, + } + + clusterInfo := &ClusterInfo{ + ID: "test-123", + Name: "test-cluster", + Version: "4.20", + Provider: "aws", + Expiration: "2026-01-28T10:00:00Z", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "webhook_url": "https://hooks.slack.com/test", + "channel": "C06HQR8HN0L", + "cluster_info": clusterInfo, + "image": "quay.io/test:abc123", + "env": "stage", + }, + } + + payload := reporter.buildWorkflowPayload(result, config) + + // Verify required fields + if payload.Channel != "C06HQR8HN0L" { + t.Errorf("expected channel C06HQR8HN0L, got %s", payload.Channel) + } + + if payload.Summary == "" { + t.Error("summary field is required but empty") + } + + if payload.Analysis == "" { + t.Error("analysis field is required but empty") + } + + // Verify summary contains cluster info + if !contains(payload.Summary, "test-123") { + t.Error("summary should contain cluster ID") + } + if !contains(payload.Summary, "4.20") { + t.Error("summary should contain version") + } + + // Verify analysis contains formatted content + if !contains(payload.Analysis, "====== 🔍 Possible Cause ======") { + t.Error("analysis should contain formatted root cause") + } + if !contains(payload.Analysis, "====== 💡 Recommendations ======") { + t.Error("analysis should contain formatted recommendations") + } + + // Verify optional fields + if payload.Image != "quay.io/test:abc123" { + t.Errorf("expected image quay.io/test:abc123, got %s", payload.Image) + } + + if payload.Commit != "abc123" { + t.Errorf("expected commit abc123, got %s", payload.Commit) + } + + if payload.Env != "stage" { + t.Errorf("expected env stage, got %s", payload.Env) + } +} + +func TestSlackReporter_Report_WorkflowFormat(t *testing.T) { + // Create a test server to capture the webhook payload + var capturedPayload WorkflowPayload + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + t.Fatalf("failed to read request body: %v", err) + } + + if err := json.Unmarshal(body, &capturedPayload); err != nil { + t.Fatalf("failed to unmarshal payload: %v", err) + } + + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + reporter := NewSlackReporter() + result := &AnalysisResult{ + Content: "Analysis content here", + } + + clusterInfo := &ClusterInfo{ + ID: "test-456", + Name: "prod-cluster", + Version: "4.21", + } + + config := &ReporterConfig{ + Enabled: true, + Settings: map[string]interface{}{ + "webhook_url": server.URL, + "channel": "C123456", + "cluster_info": clusterInfo, + "image": "quay.io/openshift/test:v1.0", + "env": "production", + }, + } + + // Call Report + if err := reporter.Report(context.Background(), result, config); err != nil { + t.Fatalf("Report() failed: %v", err) + } + + // Verify the captured payload + if capturedPayload.Channel != "C123456" { + t.Errorf("expected channel C123456, got %s", capturedPayload.Channel) + } + + if capturedPayload.Summary == "" { + t.Error("summary should not be empty") + } + + if capturedPayload.Analysis == "" { + t.Error("analysis should not be empty") + } + + if !contains(capturedPayload.Summary, "test-456") { + t.Error("summary should contain cluster ID") + } + + if capturedPayload.Image != "quay.io/openshift/test:v1.0" { + t.Errorf("expected image quay.io/openshift/test:v1.0, got %s", capturedPayload.Image) + } + + if capturedPayload.Commit != "v1.0" { + t.Errorf("expected commit v1.0, got %s", capturedPayload.Commit) + } +} + +func TestSlackReporter_buildSummaryField(t *testing.T) { + reporter := NewSlackReporter() + + clusterInfo := &ClusterInfo{ + ID: "cluster-789", + Name: "my-test-cluster", + Version: "4.22", + Provider: "gcp", + Expiration: "2026-02-01T12:00:00Z", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "cluster_info": clusterInfo, + "image": "quay.io/app:commit-xyz", + "env": "dev", + }, + } + + summary := reporter.buildSummaryField(config) + + // Check for header + if !contains(summary, ":failed:") { + t.Error("summary should contain failure emoji") + } + if !contains(summary, "Pipeline Failed") { + t.Error("summary should contain failure message") + } + + // Check for cluster info + if !contains(summary, "cluster-789") { + t.Error("summary should contain cluster ID") + } + if !contains(summary, "my-test-cluster") { + t.Error("summary should contain cluster name") + } + if !contains(summary, "4.22") { + t.Error("summary should contain version") + } + if !contains(summary, "gcp") { + t.Error("summary should contain provider") + } + + // Check for test suite info + if !contains(summary, "quay.io/app") { + t.Error("summary should contain test image") + } + if !contains(summary, "commit-xyz") { + t.Error("summary should contain commit") + } + if !contains(summary, "dev") { + t.Error("summary should contain environment") + } +} + +func TestSlackReporter_buildAnalysisField(t *testing.T) { + reporter := NewSlackReporter() + + tests := []struct { + name string + result *AnalysisResult + expectedContains []string + unexpectedContains []string + }{ + { + name: "formatted JSON analysis", + result: &AnalysisResult{ + Content: "```json\n{\"root_cause\": \"Network issue\", \"recommendations\": [\"Fix network\"]}\n```", + }, + expectedContains: []string{"====== 🔍 Possible Cause ======", "Network issue", "====== 💡 Recommendations ======", "Fix network"}, + }, + { + name: "plain text analysis", + result: &AnalysisResult{ + Content: "This is plain text analysis", + }, + expectedContains: []string{"This is plain text analysis"}, + }, + { + name: "analysis with error", + result: &AnalysisResult{ + Content: "Analysis content", + Error: "Something went wrong", + }, + expectedContains: []string{"Analysis content", "====== ⚠️ Error ======", "Something went wrong"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + analysis := reporter.buildAnalysisField(tt.result) + + for _, expected := range tt.expectedContains { + if !contains(analysis, expected) { + t.Errorf("analysis should contain %q", expected) + } + } + + for _, unexpected := range tt.unexpectedContains { + if contains(analysis, unexpected) { + t.Errorf("analysis should not contain %q", unexpected) + } + } + }) + } +} + +func TestSlackReporter_enforceFieldLimit(t *testing.T) { + reporter := NewSlackReporter() + + tests := []struct { + name string + content string + maxLength int + wantLen int + }{ + { + name: "content within limit", + content: "short content", + maxLength: 100, + wantLen: 13, + }, + { + name: "content exceeds limit", + content: string(make([]byte, 1000)), + maxLength: 500, + wantLen: 500, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := reporter.enforceFieldLimit(tt.content, tt.maxLength) + + if len(result) > tt.maxLength { + t.Errorf("result length %d exceeds max length %d", len(result), tt.maxLength) + } + + if tt.name == "content exceeds limit" { + if !contains(result, "truncated") { + t.Error("truncated content should contain notice") + } + } + }) + } +} + +func TestSlackReporter_ExtendedLogsFallback(t *testing.T) { + reporter := NewSlackReporter() + + t.Run("no report_dir returns fallback message", func(t *testing.T) { + result := &AnalysisResult{ + Content: "Test analysis", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "webhook_url": "https://test.com", + "channel": "C123456", + // No report_dir + }, + } + + payload := reporter.buildWorkflowPayload(result, config) + + if payload.ExtendedLogs == "" { + t.Error("ExtendedLogs should not be empty when no report_dir") + } + + if !contains(payload.ExtendedLogs, "not available") { + t.Errorf("Expected fallback message, got: %s", payload.ExtendedLogs) + } + }) + + t.Run("empty report_dir returns fallback message", func(t *testing.T) { + result := &AnalysisResult{ + Content: "Test analysis", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "webhook_url": "https://test.com", + "channel": "C123456", + "report_dir": "", // Empty string + }, + } + + payload := reporter.buildWorkflowPayload(result, config) + + if payload.ExtendedLogs == "" { + t.Error("ExtendedLogs should not be empty when report_dir is empty string") + } + + if !contains(payload.ExtendedLogs, "not available") { + t.Errorf("Expected fallback message, got: %s", payload.ExtendedLogs) + } + }) + + t.Run("nonexistent report_dir returns fallback message", func(t *testing.T) { + result := &AnalysisResult{ + Content: "Test analysis", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "webhook_url": "https://test.com", + "channel": "C123456", + "report_dir": "/nonexistent/path", + }, + } + + payload := reporter.buildWorkflowPayload(result, config) + + if payload.ExtendedLogs == "" { + t.Error("ExtendedLogs should not be empty when report_dir doesn't exist") + } + + // When readTestOutput returns empty string, we get the "not found" fallback + if !contains(payload.ExtendedLogs, "No test failure logs found") { + t.Errorf("Expected fallback message, got: %s", payload.ExtendedLogs) + } + }) + + t.Run("valid report_dir with logs returns actual logs", func(t *testing.T) { + result := &AnalysisResult{ + Content: "Test analysis", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "webhook_url": "https://test.com", + "channel": "C123456", + "report_dir": "testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws", + }, + } + + payload := reporter.buildWorkflowPayload(result, config) + + if payload.ExtendedLogs == "" { + t.Error("ExtendedLogs should not be empty when valid logs exist") + } + + // Should contain actual failure logs, not fallback message + if contains(payload.ExtendedLogs, "not available") { + t.Errorf("Should contain real logs, not fallback. Got: %s", payload.ExtendedLogs[:100]) + } + + // Should contain failure markers from real data + if !contains(payload.ExtendedLogs, "Found") && !contains(payload.ExtendedLogs, "test failure") { + t.Error("Should contain failure count or marker") + } + }) +} + +// Helper function +func contains(s, substr string) bool { + return len(s) > 0 && len(substr) > 0 && (s == substr || len(s) > len(substr) && hasSubstring(s, substr)) +} + +func hasSubstring(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/build-log.txt b/internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/build-log.txt new file mode 100644 index 0000000000..d2d5415600 --- /dev/null +++ b/internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/build-log.txt @@ -0,0 +1,646 @@ +I0119 09:38:16.741960 27 main.go:88] "configured logging" outputFile="/logs/artifacts/test_output.log" reportDir="/logs/artifacts" sharedDir="/tmp/secret" +2026/01/19 09:38:16 Will load config aws +2026/01/19 09:38:16 Will load config stage +2026/01/19 09:38:16 Will load config e2e-suite +2026/01/19 09:38:16 Found secret for key tests.slackWebhook. +2026/01/19 09:38:16 Found secret for key cad.pagerDutyRoutingKey. +2026/01/19 09:38:16 Found secret for key config.aws.account. +2026/01/19 09:38:16 Found secret for key config.aws.accessKey. +2026/01/19 09:38:16 Found secret for key config.aws.secretAccessKey. +2026/01/19 09:38:16 Found secret for key config.aws.region. +2026/01/19 09:38:16 Found secret for key gcp.credsJSON. +2026/01/19 09:38:16 Found secret for key ocm.token. +2026/01/19 09:38:16 Found secret for key ocm.clientID. +2026/01/19 09:38:16 Found secret for key ocm.clientSecret. +2026/01/19 09:38:16 Querying cluster versions endpoint. +2026/01/19 09:38:17 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:38:17 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:38:17 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:38:17 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:39:15 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:39:15 specific nightly value: "" +2026/01/19 09:39:15 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:39:15 Using version selector "triggered nightly" +2026/01/19 09:39:15 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:39:15 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:39:15 Waiting for CIS to sync with the Release Controller +2026/01/19 09:39:16 Querying cluster versions endpoint. +2026/01/19 09:39:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:39:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:39:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:39:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:40:16 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:40:16 specific nightly value: "" +2026/01/19 09:40:16 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:40:16 Using version selector "triggered nightly" +2026/01/19 09:40:16 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:40:16 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:40:16 Waiting for CIS to sync with the Release Controller +2026/01/19 09:40:16 Querying cluster versions endpoint. +2026/01/19 09:40:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:40:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:40:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:40:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:41:16 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:41:16 specific nightly value: "" +2026/01/19 09:41:16 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:41:16 Using version selector "triggered nightly" +2026/01/19 09:41:16 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:41:16 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:41:16 Waiting for CIS to sync with the Release Controller +2026/01/19 09:41:16 Querying cluster versions endpoint. +2026/01/19 09:41:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:41:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:41:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:41:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:42:16 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:42:16 specific nightly value: "" +2026/01/19 09:42:16 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:42:16 Using version selector "triggered nightly" +2026/01/19 09:42:16 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:42:16 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:42:16 Waiting for CIS to sync with the Release Controller +2026/01/19 09:42:16 Querying cluster versions endpoint. +2026/01/19 09:42:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:42:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:42:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:42:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:43:16 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:43:16 specific nightly value: "" +2026/01/19 09:43:16 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:43:16 Using version selector "triggered nightly" +2026/01/19 09:43:16 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:43:16 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:43:16 Waiting for CIS to sync with the Release Controller +2026/01/19 09:43:16 Querying cluster versions endpoint. +2026/01/19 09:43:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:43:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:43:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:43:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:44:15 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:44:15 specific nightly value: "" +2026/01/19 09:44:15 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:44:15 Using version selector "triggered nightly" +2026/01/19 09:44:15 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:44:15 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:44:15 Waiting for CIS to sync with the Release Controller +2026/01/19 09:44:16 Querying cluster versions endpoint. +2026/01/19 09:44:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:44:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:44:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:44:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:45:15 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:45:15 specific nightly value: "" +2026/01/19 09:45:15 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:45:15 Using version selector "triggered nightly" +2026/01/19 09:45:15 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:45:15 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:45:15 Waiting for CIS to sync with the Release Controller +2026/01/19 09:45:16 Querying cluster versions endpoint. +2026/01/19 09:45:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:45:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:45:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:45:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:46:15 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:46:15 specific nightly value: "" +2026/01/19 09:46:15 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:46:15 Using version selector "triggered nightly" +2026/01/19 09:46:15 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:46:15 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:46:15 Waiting for CIS to sync with the Release Controller +2026/01/19 09:46:16 Querying cluster versions endpoint. +2026/01/19 09:46:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:46:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:46:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:46:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:47:15 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:47:15 specific nightly value: "" +2026/01/19 09:47:15 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:47:15 Using version selector "triggered nightly" +2026/01/19 09:47:15 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:47:15 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:47:15 Waiting for CIS to sync with the Release Controller +2026/01/19 09:47:16 Querying cluster versions endpoint. +2026/01/19 09:47:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:47:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:47:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:47:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:48:14 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:48:14 specific nightly value: "" +2026/01/19 09:48:14 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:48:14 Using version selector "triggered nightly" +2026/01/19 09:48:14 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:48:14 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:48:14 Waiting for CIS to sync with the Release Controller +2026/01/19 09:48:16 Querying cluster versions endpoint. +2026/01/19 09:48:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:48:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:48:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:48:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:49:15 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:49:15 specific nightly value: "" +2026/01/19 09:49:15 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:49:15 Using version selector "triggered nightly" +2026/01/19 09:49:15 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:49:15 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:49:15 Waiting for CIS to sync with the Release Controller +2026/01/19 09:49:16 Querying cluster versions endpoint. +2026/01/19 09:49:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:49:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:49:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:49:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:50:16 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:50:16 specific nightly value: "" +2026/01/19 09:50:16 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:50:16 Using version selector "triggered nightly" +2026/01/19 09:50:16 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:50:16 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:50:16 Waiting for CIS to sync with the Release Controller +2026/01/19 09:50:16 Querying cluster versions endpoint. +2026/01/19 09:50:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:50:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:50:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:50:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:51:18 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:51:18 specific nightly value: "" +2026/01/19 09:51:18 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:51:18 Using version selector "triggered nightly" +2026/01/19 09:51:18 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:51:18 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:51:18 Waiting for CIS to sync with the Release Controller +2026/01/19 09:51:18 Querying cluster versions endpoint. +2026/01/19 09:51:18 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:51:18 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:51:18 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:51:18 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:52:17 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:52:17 specific nightly value: "" +2026/01/19 09:52:17 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:52:17 Using version selector "triggered nightly" +2026/01/19 09:52:17 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:52:17 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:52:17 Waiting for CIS to sync with the Release Controller +2026/01/19 09:52:17 Querying cluster versions endpoint. +2026/01/19 09:52:17 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:52:17 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:52:17 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:52:17 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:53:15 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:53:15 specific nightly value: "" +2026/01/19 09:53:15 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:53:15 Using version selector "triggered nightly" +2026/01/19 09:53:15 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:53:15 Unable to find image using selector `triggered nightly`. Error: failed to find version "4.20.0-0.nightly-2026-01-19-092932-nightly" +2026/01/19 09:53:15 Waiting for CIS to sync with the Release Controller +2026/01/19 09:53:16 Querying cluster versions endpoint. +2026/01/19 09:53:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:53:16 could not parse version 'aaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbbaaaaaaaaaabbbbbbbbbb-imageset': invalid semantic version +2026/01/19 09:53:16 could not parse version 'dgoodwin1-imageset': invalid semantic version +2026/01/19 09:53:16 could not parse version 'dgoodwin-stg-imageset': invalid semantic version +2026/01/19 09:54:15 specific image value: "registry.build04.ci.openshift.org/ci-op-3cikwr1p/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd" +2026/01/19 09:54:15 specific nightly value: "" +2026/01/19 09:54:15 PROW_JOB_ID: "4.20.0-0.nightly-2026-01-19-092932-openshift-dedicated-aws" +2026/01/19 09:54:15 Using version selector "triggered nightly" +2026/01/19 09:54:15 Looking to match 4.20.0-0.nightly-2026-01-19-092932-nightly +2026/01/19 09:54:15 Using the triggered nightly '4.20.0-0.nightly-2026-01-19-092932-nightly' +2026/01/19 09:54:15 No upgrade selector found. Not selecting an upgrade version. +2026/01/19 09:54:15 no clusterid found, provisioning cluster +2026/01/19 09:54:15 cluster name set to osde2e-3laaf +2026/01/19 09:54:15 Using flavour: osd-4 +2026/01/19 09:54:15 Using SKU: +2026/01/19 09:54:15 No SKU specified, will not check if enough quota is available. +2026/01/19 09:54:15 Image source not found: imageContentSources: +- mirrors: + - quay.io/openshift-release-dev/ocp-release + - pull.q1w2.quay.rhcloud.com/openshift-release-dev/ocp-release + source: quay.io/openshift-release-dev/ocp-release +- mirrors: + - quay.io/openshift-release-dev/ocp-v4.0-art-dev + - pull.q1w2.quay.rhcloud.com/openshift-release-dev/ocp-art-dev + source: quay.io/openshift-release-dev/ocp-v4.0-art-dev +- mirrors: + - quay.io/app-sre/managed-upgrade-operator + - pull.q1w2.quay.rhcloud.com/app-sre/managed-upgrade-operator + source: quay.io/app-sre/managed-upgrade-operator +- mirrors: + - quay.io/app-sre/managed-upgrade-operator-registry + - pull.q1w2.quay.rhcloud.com/app-sre/managed-upgrade-operator-registry + source: quay.io/app-sre/managed-upgrade-operator-registry +2026/01/19 09:54:15 Install config: + +2026/01/19 09:54:16 CLUSTER_ID set to 2ntr2hoo8487ite28bd98pg5ph0m04gf from OCM. +2026/01/19 09:54:16 clusterutil.go:168: Waiting 135 minutes for cluster to be ready... +2026/01/19 09:54:46 Successfully added property[Status] - waiting-for-ready +2026/01/19 09:54:47 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:55:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:55:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:56:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:56:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:57:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:57:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:58:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:58:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:59:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 09:59:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:00:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:00:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:01:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:01:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:02:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:02:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:03:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:03:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:04:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:04:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:05:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:05:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:06:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:06:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:07:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:07:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:08:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:08:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:09:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:09:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:10:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:10:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:11:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:11:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:12:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:12:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:13:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:13:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:14:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:14:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:15:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:15:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:16:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:16:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:17:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:17:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:18:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:18:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:19:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:19:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:20:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:20:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:21:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:21:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:22:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:22:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:23:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:23:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:24:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:24:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:25:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:25:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:26:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:26:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:27:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:27:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:28:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:28:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:29:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:29:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:30:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:30:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:31:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:31:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:32:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:32:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:33:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:33:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:34:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:34:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:35:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:35:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:36:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:36:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:37:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:37:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:38:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:38:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:39:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:39:46 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:40:16 clusterutil.go:207: cluster is not ready, state is: installing +2026/01/19 10:40:46 Successfully added property[Status] - health-check +2026/01/19 10:40:46 Cluster status is ready +2026/01/19 10:40:46 Wrote cluster ID to shared dir: 2ntr2hoo8487ite28bd98pg5ph0m04gf +2026/01/19 10:40:47 Successfully added property[UpgradeVersion] - + "level"=0 "msg"="Cluster job finished successfully!" "job_name"="osd-cluster-ready" +2026/01/19 11:15:32 healthcheckjob.go:35: Healthcheck job passed +2026/01/19 11:15:33 Successfully added property[Status] - healthy +2026/01/19 11:15:33 Cluster is healthy and ready for testing +2026/01/19 11:15:35 Successfully retrieved kubeconfig from OCM. +2026/01/19 11:15:35 Passed kubeconfig to prow steps. +2026/01/19 11:15:35 CLUSTER_NAME set to osde2e-3laaf from OCM. +2026/01/19 11:15:35 CLUSTER_VERSION set to openshift-v4.20.0-0.nightly-2026-01-19-092932-nightly from OCM, for channel group nightly +2026/01/19 11:15:35 CLOUD_PROVIDER_ID set to aws from OCM. +2026/01/19 11:15:35 CLOUD_PROVIDER_REGION set to XXXXXXXXX from OCM. +2026/01/19 11:15:36 Running e2e tests... +[1768815496] OSD e2e suite - 67/90 specs SW0119 11:16:23.189480 27 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice +2026/01/19 11:16:23 Downloading collect-prometheus-err.txt +2026/01/19 11:16:23 Downloading collect-prometheus-out.txt +2026/01/19 11:16:24 Downloading prometheus.tar.gz +•PS••••SSSSSSS2026/01/19 11:19:25 Waiting 4h59m59.968253092s for builds-pruner-76rmx job to complete +•2026/01/19 11:19:30 Waiting 4h59m59.971311572s for deployments-pruner-qqxhw job to complete +•SS••••SS••S••••••••••••••••••••••••••••••••W0119 11:21:09.033922 27 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice +W0119 11:21:54.419684 27 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice +••••SSSSSSSSS••••••• +------------------------------ +• [FAILED] [50.649 seconds] +Ad Hoc Test Images execution [It] quay.io/redhat-services-prod/openshift/osd-metrics-exporter-e2e should pass [AdHocTestImages] +/go/src/github.com/openshift/osde2e/pkg/e2e/adhoctestimages/adhoctestimages.go:49 + + Timeline >> + "level"=0 "msg"="running test suite" "suite"="quay.io/redhat-services-prod/openshift/osd-metrics-exporter-e2e" "timeout"="30m0s" + executor "level"=0 "msg"="created namespace" "name"="osde2e-executor-cp39q" + executor "level"=0 "msg"="created service account" "name"="cluster-admin" + executor "level"=0 "msg"="created job" "name"="executor-pbf5j" + executor "level"=0 "msg"="waiting for suite to complete" + executor "level"=0 "msg"="e2e-suite has terminated" "state"={"terminated"="&ContainerStateTerminated{ExitCode:1,Signal:0,Reason:Error,Message:,StartedAt:2026-01-19 11:32:37 +0000 UTC,FinishedAt:2026-01-19 11:33:13 +0000 UTC,ContainerID:cri-o://fe8b3ad57ca60745cdbf924ec56d44943ada7d80d9f2fc469641b01dc17a1a90,}"} + executor "level"=0 "msg"="fetching artifacts" + executor "level"=0 "msg"="processing test results" + executor "level"=0 "msg"="found junit files" "count"=1 + executor "level"=0 "msg"="processed test results" "total"=2 "passed"=1 "failed"=0 "skipped"=0 "errors"=1 + executor "level"=0 "msg"="Skipping cleanup" + [FAILED] in [It] - /go/src/github.com/openshift/osde2e/pkg/e2e/adhoctestimages/adhoctestimages.go:83 @ 01/19/26 11:33:24.872 + << Timeline + + [FAILED] failed test case: "[It] osd-metrics-exporter is exporting metrics" + Expected + : + [PANICKED] Test Panicked + In [It] at: /go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:333 @ 01/19/26 11:33:13.322 + + runtime error: index out of range [0] with length 0 + + Full Stack Trace + github.com/onsi/gomega/internal.(*AsyncAssertion).buildActualPoller.func3.1() + /go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:333 +0x186 + panic({0x286bb20?, 0xc000057740?}) + /usr/lib/golang/src/runtime/panic.go:792 +0x132 + github.com/openshift/osd-metrics-exporter/test/e2e.init.func1.3.1({0x7f92ada6c0a0?, 0xc000d24060?}) + /go/src/github.com/openshift/osd-metrics-exporter/test/e2e/osd_metrics_exporter_tests.go:131 +0xd4 + reflect.Value.call({0x2575ea0?, 0xc0005360e0?, 0x3?}, {0x2a6344c, 0x4}, {0xc000118ee8, 0x1, 0x90000000276dd20?}) + /usr/lib/golang/src/reflect/value.go:584 +0xca6 + reflect.Value.Call({0x2575ea0?, 0xc0005360e0?, 0x0?}, {0xc000118ee8?, 0xc00051ac00?, 0x2c38448?}) + /usr/lib/golang/src/reflect/value.go:368 +0xb9 + github.com/onsi/gomega/internal.(*AsyncAssertion).buildActualPoller.func3() + /go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:337 +0x11f + github.com/onsi/gomega/internal.(*AsyncAssertion).match(0xc0004313b0, {0x2e8d380, 0xc0005b4ba0}, 0x1, {0xc000d26e40, 0x1, 0x1}) + /go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:560 +0x7a2 + github.com/onsi/gomega/internal.(*AsyncAssertion).Should(0xc0004313b0, {0x2e8d380, 0xc0005b4ba0}, {0xc000d26e40, 0x1, 0x1}) + /go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:145 +0x85 + github.com/openshift/osd-metrics-exporter/test/e2e.init.func1.3({0x7f92ada6c0a0, 0xc000d24060}) + /go/src/github.com/openshift/osd-metrics-exporter/test/e2e/osd_metrics_exporter_tests.go:135 +0xa32 + github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x2ea8e70?, 0xc000d24060?}) + /go/pkg/mod/github.com/openshift/onsi-ginkgo/v2@v2.6.1-0.20241205171354-8006f302fd12/internal/node.go:465 +0x3e + github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3() + /go/pkg/mod/github.com/openshift/onsi-ginkgo/v2@v2.6.1-0.20241205171354-8006f302fd12/internal/suite.go:901 +0x7b + created by github.com/onsi/ginkgo/v2/internal.(*Suite).runNode in goroutine 11 + /go/pkg/mod/github.com/openshift/onsi-ginkgo/v2@v2.6.1-0.20241205171354-8006f302fd12/internal/suite.go:888 +0xd7b + + { + Message: "runtime error: index out of range [0] with length 0", + Type: "panicked", + Body: "[PANICKED] Test Panicked\nIn [It] at: /go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:333 @ 01/19/26 11:33:13.322\n\nruntime error: index out of range [0] with length 0\n\nFull Stack Trace\n github.com/onsi/gomega/internal.(*AsyncAssertion).buildActualPoller.func3.1()\n \t/go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:333 +0x186\n panic({0x286bb20?, 0xc000057740?})\n \t/usr/lib/golang/src/runtime/panic.go:792 +0x132\n github.com/openshift/osd-metrics-exporter/test/e2e.init.func1.3.1({0x7f92ada6c0a0?, 0xc000d24060?})\n \t/go/src/github.com/openshift/osd-metrics-exporter/test/e2e/osd_metrics_exporter_tests.go:131 +0xd4\n reflect.Value.call({0x2575ea0?, 0xc0005360e0?, 0x3?}, {0x2a6344c, 0x4}, {0xc000118ee8, 0x1, 0x90000000276dd20?})\n \t/usr/lib/golang/src/reflect/value.go:584 +0xca6\n reflect.Value.Call({0x2575ea0?, 0xc0005360e0?, 0x0?}, {0xc000118ee8?, 0xc00051ac00?, 0x2c38448?})\n \t/usr/lib/golang/src/reflect/value.go:368 +0xb9\n github.com/onsi/gomega/internal.(*AsyncAssertion).buildActualPoller.func3()\n \t/go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:337 +0x11f\n github.com/onsi/gomega/internal.(*AsyncAssertion).match(0xc0004313b0, {0x2e8d380, 0xc0005b4ba0}, 0x1, {0xc000d26e40, 0x1, 0x1})\n \t/go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:560 +0x7a2\n github.com/onsi/gomega/internal.(*AsyncAssertion).Should(0xc0004313b0, {0x2e8d380, 0xc0005b4ba0}, {0xc000d26e40, 0x1, 0x1})\n \t/go/pkg/mod/github.com/onsi/gomega@v1.37.0/internal/async_assertion.go:145 +0x85\n github.com/openshift/osd-metrics-exporter/test/e2e.init.func1.3({0x7f92ada6c0a0, 0xc000d24060})\n \t/go/src/github.com/openshift/osd-metrics-exporter/test/e2e/osd_metrics_exporter_tests.go:135 +0xa32\n github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func2({0x2ea8e70?, 0xc000d24060?})\n \t/go/pkg/mod/github.com/openshift/onsi-ginkgo/v2@v2.6.1-0.20241205171354-8006f302fd12/internal/node.go:465 +0x3e\n github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func3()\n \t/go/pkg/mod/github.com/openshift/onsi-ginkgo/v2@v2.6.1-0.20241205171354-8006f302fd12/internal/suite.go:901 +0x7b\n created by github.com/onsi/ginkgo/v2/internal.(*Suite).runNode in goroutine 11\n \t/go/pkg/mod/github.com/openshift/onsi-ginkgo/v2@v2.6.1-0.20241205171354-8006f302fd12/internal/suite.go:888 +0xd7b\n", + } + to be nil + In [It] at: /go/src/github.com/openshift/osde2e/pkg/e2e/adhoctestimages/adhoctestimages.go:83 @ 01/19/26 11:33:24.872 +------------------------------ +••• +------------------------------ +• [FAILED] [30.566 seconds] +Ad Hoc Test Images execution [It] quay.io/redhat-services-prod/openshift/managed-cluster-validating-webhooks-e2e should pass [AdHocTestImages] +/go/src/github.com/openshift/osde2e/pkg/e2e/adhoctestimages/adhoctestimages.go:49 + + Timeline >> + "level"=0 "msg"="running test suite" "suite"="quay.io/redhat-services-prod/openshift/managed-cluster-validating-webhooks-e2e" "timeout"="30m0s" + executor "level"=0 "msg"="created namespace" "name"="osde2e-executor-8uhh1" + executor "level"=0 "msg"="created service account" "name"="cluster-admin" + executor "level"=0 "msg"="created job" "name"="executor-zw6ws" + executor "level"=0 "msg"="waiting for suite to complete" + executor "level"=0 "msg"="e2e-suite has terminated" "state"={"terminated"="&ContainerStateTerminated{ExitCode:1,Signal:0,Reason:Error,Message:,StartedAt:2026-01-19 11:35:09 +0000 UTC,FinishedAt:2026-01-19 11:35:30 +0000 UTC,ContainerID:cri-o://2dc8686451f954efec0ae5705ea1c6427e96902df499de5d2fed55fa5f17a1ec,}"} + executor "level"=0 "msg"="fetching artifacts" + executor "level"=0 "msg"="processing test results" + executor "level"=0 "msg"="found junit files" "count"=1 + executor "level"=0 "msg"="processed test results" "total"=24 "passed"=8 "failed"=1 "skipped"=15 "errors"=0 + executor "level"=0 "msg"="Skipping cleanup" + [FAILED] in [It] - /go/src/github.com/openshift/osde2e/pkg/e2e/adhoctestimages/adhoctestimages.go:83 @ 01/19/26 11:35:37.292 + << Timeline + + [FAILED] failed test case: "[It] Managed Cluster Validating Webhooks sre-regular-user-validation only blocks configmap/user-ca-bundle changes" + Expected + : + [FAILED] Expected to create ConfigMap in test namespace + Unexpected error: + <*errors.StatusError | 0xc001124460>: + configmaps is forbidden: User "test-user@redhat.com" cannot create resource "configmaps" in API group "" in the namespace "osde2e-temp-ns" + { + ErrStatus: { + TypeMeta: {Kind: "", APIVersion: ""}, + ListMeta: { + SelfLink: "", + ResourceVersion: "", + Continue: "", + RemainingItemCount: nil, + }, + Status: "Failure", + Message: "configmaps is forbidden: User \"test-user@redhat.com\" cannot create resource \"configmaps\" in API group \"\" in the namespace \"osde2e-temp-ns\"", + Reason: "Forbidden", + Details: {Name: "", Group: "", Kind: "configmaps", UID: "", Causes: nil, RetryAfterSeconds: 0}, + Code: 403, + }, + } + occurred + In [It] at: /opt/app-root/src/test/e2e/validation_webhook_tests.go:303 @ 01/19/26 11:35:20.134 + + { + Message: "Expected to create ConfigMap in test namespace\nUnexpected error:\n <*errors.StatusError | 0xc001124460>: \n configmaps is forbidden: User \"test-user@redhat.com\" cannot create resource \"configmaps\" in API group \"\" in the namespace \"osde2e-temp-ns\"\n {\n ErrStatus: {\n TypeMeta: {Kind: \"\", APIVersion: \"\"},\n ListMeta: {\n SelfLink: \"\",\n ResourceVersion: \"\",\n Continue: \"\",\n RemainingItemCount: nil,\n },\n Status: \"Failure\",\n Message: \"configmaps is forbidden: User \\\"test-user@redhat.com\\\" cannot create resource \\\"configmaps\\\" in API group \\\"\\\" in the namespace \\\"osde2e-temp-ns\\\"\",\n Reason: \"Forbidden\",\n Details: {Name: \"\", Group: \"\", Kind: \"configmaps\", UID: \"\", Causes: nil, RetryAfterSeconds: 0},\n Code: 403,\n },\n }\noccurred", + Type: "failed", + Body: "[FAILED] Expected to create ConfigMap in test namespace\nUnexpected error:\n <*errors.StatusError | 0xc001124460>: \n configmaps is forbidden: User \"test-user@redhat.com\" cannot create resource \"configmaps\" in API group \"\" in the namespace \"osde2e-temp-ns\"\n {\n ErrStatus: {\n TypeMeta: {Kind: \"\", APIVersion: \"\"},\n ListMeta: {\n SelfLink: \"\",\n ResourceVersion: \"\",\n Continue: \"\",\n RemainingItemCount: nil,\n },\n Status: \"Failure\",\n Message: \"configmaps is forbidden: User \\\"test-user@redhat.com\\\" cannot create resource \\\"configmaps\\\" in API group \\\"\\\" in the namespace \\\"osde2e-temp-ns\\\"\",\n Reason: \"Forbidden\",\n Details: {Name: \"\", Group: \"\", Kind: \"configmaps\", UID: \"\", Causes: nil, RetryAfterSeconds: 0},\n Code: 403,\n },\n }\noccurred\nIn [It] at: /opt/app-root/src/test/e2e/validation_webhook_tests.go:303 @ 01/19/26 11:35:20.134\n", + } + to be nil + In [It] at: /go/src/github.com/openshift/osde2e/pkg/e2e/adhoctestimages/adhoctestimages.go:83 @ 01/19/26 11:35:37.292 +------------------------------ +••••• + +Summarizing 2 Failures: + [FAIL] Ad Hoc Test Images execution [It] quay.io/redhat-services-prod/openshift/osd-metrics-exporter-e2e should pass [AdHocTestImages] + /go/src/github.com/openshift/osde2e/pkg/e2e/adhoctestimages/adhoctestimages.go:83 + [FAIL] Ad Hoc Test Images execution [It] quay.io/redhat-services-prod/openshift/managed-cluster-validating-webhooks-e2e should pass [AdHocTestImages] + /go/src/github.com/openshift/osde2e/pkg/e2e/adhoctestimages/adhoctestimages.go:83 + +Ran 66 of 90 Specs in 1316.647 seconds +FAIL! -- 64 Passed | 2 Failed | 1 Pending | 23 Skipped +2026/01/19 11:37:40 Looking up job history from https://deck-ci.apps.ci.l2s4.p1.openshiftapps.com/job-history/gs/origin-ci-test/logs/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws +2026/01/19 11:37:46 Grabbing diff from https://gcsweb-ci.apps.ci.l2s4.p1.openshiftapps.com/gcs/test-platform-results/logs/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/2013060602901041152/artifacts/osd-aws/osde2e-test/artifacts/install/dependencies.txt +2026/01/19 11:37:47 -MCC: 0a21f87e4b1eb3bad0e02ed4de46da54e563b6e7 +2026/01/19 11:37:47 +MCC: 694a27f91edef0a68290bf6cc50f8d54f376fa3d +2026/01/19 11:37:47 ----- +2026/01/19 11:37:47 Unknown/alertmanager : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:117beca810a6438cb30146aafb16c88a7fdeb0d2a595ce709d4df47d552115cf +2026/01/19 11:37:47 Unknown/collect-prometheus : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3b884829ee84fc173433b240a9d6c2fc231e84a3e6dacc70db893837f46f4b2f +2026/01/19 11:37:47 Unknown/config-reloader : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c42ed662d1c73a7e41c554aff6279d6460679f8e63fe1f58298d3d245f27b0e5 +2026/01/19 11:37:47 Unknown/create-firewall : quay.io/openshift-release-dev/ocp-release@sha256:0a4c44daf1666f069258aa983a66afa2f3998b78ced79faa6174e0a0f438f0a5 +2026/01/19 11:37:47 Unknown/dns : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:90c3e20d684e0b90706a8fabb9c9bda2929bb957ffe3e1641ea68fe9dc919cbb +2026/01/19 11:37:47 Unknown/dns-node-resolver : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e577dce25a9f06468c866700c8ff6d4122516ddbbd9995e5cf4ea1156d61988a +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/app-sre/managed-upgrade-operator-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/app-sre/must-gather-operator-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/app-sre/ocm-agent-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/app-sre/rbac-permissions-operator-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/app-sre/splunk-forwarder-operator-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/redhat-services-prod/openshift/aws-vpce-operator-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/redhat-services-prod/openshift/cloud-ingress-operator-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/redhat-services-prod/openshift/managed-cluster-validating-webhooks-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/redhat-services-prod/openshift/managed-node-metadata-operator-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/redhat-services-prod/openshift/ocm-agent-operator-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/redhat-services-prod/openshift/osd-metrics-exporter-e2e +2026/01/19 11:37:47 Unknown/e2e-suite : quay.io/redhat-services-prod/openshift/route-monitor-operator-e2e +2026/01/19 11:37:47 Unknown/exporter : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c92bfedc218768d9d1f78392b68b775022f89df537956b35b1e6feeab24042cd +2026/01/19 11:37:47 Unknown/extract : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7290800c1daf30f811a950110a15ab75cd366eba88327d15030dc2be641e1d73 +2026/01/19 11:37:47 Unknown/extract-content : registry.redhat.io/redhat/certified-operator-index:v4.20 +2026/01/19 11:37:47 Unknown/extract-content : registry.redhat.io/redhat/community-operator-index:v4.20 +2026/01/19 11:37:47 Unknown/extract-content : registry.redhat.io/redhat/redhat-marketplace-index:v4.20 +2026/01/19 11:37:47 Unknown/extract-content : registry.redhat.io/redhat/redhat-operator-index:v4.20 +2026/01/19 11:37:47 Unknown/extractor : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1c466fb45bd3c74a418a8396a4c9921d21dda976dd74d68542a2bd4540e22a6e +2026/01/19 11:37:47 Unknown/kube-state-metrics : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:eeeeced0d5466346a4e55ae15ffaf94d1b536673c85e45bdd130018b7d026b31 +2026/01/19 11:37:47 Unknown/manager : quay.io/app-sre/addon-operator@sha256:e0f8f872f07e2256907001697ada472fbf86508b68b1adf3c158bc8da7dc7789 +2026/01/19 11:37:47 Unknown/manager : quay.io/app-sre/package-operator-manager@sha256:92513ffd0abbbd9a11754a93b6d2d7b3d0961a73012efe2985fa153647f19cc2 +2026/01/19 11:37:47 Unknown/manager : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f7d0d3af8ca46727b17eae293799af55706dedd1ff2e9ef0fa73f804fd7a93fa +2026/01/19 11:37:47 Unknown/metrics-server : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cffe1ed139c1ad34e99127684f184827b11c71df96350b52a69accb245f1b2af +2026/01/19 11:37:47 Unknown/monitoring-plugin : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:77288f24d51b6077469f02fe88c1316eab46f5cbf963a895f5c103e12282cdbf +2026/01/19 11:37:47 Unknown/networking-console-plugin : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:144be6c7447f775d70368a593faa4a06558619097b5c9162579c7bc0e58af2d4 +2026/01/19 11:37:47 Unknown/node-exporter : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5f0275578312c055162cdb4468f1f08c09f0776a4d44c5e1d8ee4e9cc56ec3eb +2026/01/19 11:37:47 Unknown/oadp-oadp-velero-plugin-for-aws-rhel8 : registry.redhat.io/oadp/oadp-velero-plugin-for-aws-rhel8@sha256:317149aaba6bbe1600330a381ba2f8a7c2aba36db4f7cbd68545e037cfeed9db +2026/01/19 11:37:47 Unknown/openshift-state-metrics : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b056c0bec20e7b9ed01e3d641c34b87ec81b84e64f8267880e548b3260ff8d29 +2026/01/19 11:37:47 Unknown/osd-cluster-ready : quay.io/redhat-services-prod/openshift/osd-cluster-ready@sha256:b31101e1483fdedab934c8a214fce74d94d3005510b3d7df247d618fa573de7e +2026/01/19 11:37:47 Unknown/osd-delete-ownerrefs-serviceaccounts : image-registry.openshift-image-registry.svc:5000/openshift/cli:latest +2026/01/19 11:37:47 Unknown/pause-for-artifacts : busybox:latest +2026/01/19 11:37:47 Unknown/prom-label-proxy : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2333e75896298787fb09e4381d8f640d8f445a5c30856634aa193624c74594d2 +2026/01/19 11:37:47 Unknown/prometheus : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:852562a69b65e84c454aec7dab14b3addb0df6ccc58133c593708457fc4934b7 +2026/01/19 11:37:47 Unknown/prometheus-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b985fe559b00bd2ce208766ff7c152678f8748511f7ce08560579284a0c1b6e1 +2026/01/19 11:37:47 Unknown/prometheus-operator-admission-webhook : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5928b467e38cce9a970336428d3722f4ce77639e67a3a21b7b4fbf4bb2bf5f48 +2026/01/19 11:37:47 -Unknown/pull : quay.io/app-sre/deployment-validation-operator-bundle:g8e9ca69 +2026/01/19 11:37:47 +Unknown/pull : quay.io/app-sre/deployment-validation-operator-bundle:g13e8861 +2026/01/19 11:37:47 Unknown/registry : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:794f3e12469556df772c9409466e331e5cc6e2a58248ac68657575d78fec847f +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/addon-operator-registry@sha256:df1139f752392b12e4ecac52f94e1183a69b91c813feb68658a2020badd4826c +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/cloud-ingress-operator-registry@sha256:2aed8d3e3ce55868579da5d89eccdd7a1bc1735d84fa5ee870e03135a28323e0 +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/configure-alertmanager-operator-registry@sha256:de622b81cd2e639e82429d1744505b66fff044e39234c98657d2d47732855f16 +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/custom-domains-operator-registry@sha256:ab7dc9367721c5be83144544e8e8d2bb0f8a6ce846654ce41fefd2da3eb54476 +2026/01/19 11:37:47 -Unknown/registry-server : quay.io/app-sre/deployment-validation-operator-catalog@sha256:d732efeff08192a6e594c984d88a4be534ea3477bc08353c93263cb29c389a85 +2026/01/19 11:37:47 +Unknown/registry-server : quay.io/app-sre/deployment-validation-operator-catalog@sha256:3590f6fdb219e2155d3209b7ed9d0e7985f1ceb660520dc30e065fc080fcbb0b +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/managed-node-metadata-operator-registry@sha256:a55ff458383da793ef897cb1cfcfce14fb41f049c195c8052d9ec2ab2e358b12 +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/managed-upgrade-operator-registry@sha256:fd2864968cb9f47fc3df39f6f7ad90b8c4017f33e968ff6ae795c2275dfeab47 +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/managed-velero-operator-registry@sha256:34cda25f7df8e181277468ae0a2acfc116e22d99bf9a506a29cea72725f2e53b +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/must-gather-operator-registry@sha256:bbc87530533e44e5d8554e5d1ceae40ac633f2341f7bdcc953b1b3f16ca623cd +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/ocm-agent-operator-registry@sha256:17e5d0e51818f585799242ef712933e46e7fb5c068e85756b13fbaf83df61799 +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/osd-metrics-exporter-registry@sha256:b3837b561090dc9031b44ada02e4055ed59c3b6ffdc27f80fa400390a269b3a4 +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/rbac-permissions-operator-registry@sha256:24198915f8517190fe6c501795aa09189b2959c85cbe811977fddcd739fdc9b6 +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/route-monitor-operator-registry@sha256:24bce383bbe659571c30e67864cd25e7ba8a388c266d8ab0f1370bcc8d1e2f8f +2026/01/19 11:37:47 Unknown/registry-server : quay.io/app-sre/splunk-forwarder-operator-registry@sha256:489d4cfa73079b125df72fae9cc3a6eef343c4cc6d2f47ff6474aa7d9296f206 +2026/01/19 11:37:47 Unknown/router : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cbf62d739e5d9109b736a9b1689b15541929b0d77563ccf0c3a5bc90eda6d973 +2026/01/19 11:37:47 Unknown/serve-healthcheck-canary : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2eebce8fba164d649042a1f04dc5b7aef61bb696c00c43b65908efcfacc46cbf +2026/01/19 11:37:47 Unknown/telemeter-client : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2176c1fee072fecc19c765be4dc9340e4cb009360d2fd8204240652ecaeb73b7 +2026/01/19 11:37:47 Unknown/thanos-sidecar : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b4df16e8c76e8f0d1a81d1356a29dc15230c72dcb9a5dcc12931b8a565ecdeb3 +2026/01/19 11:37:47 Unknown/util : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:941c71daa16180c08c62cb401a0337fdb7931e0bbf363172723eede6439b6439 +2026/01/19 11:37:47 Unknown/velero : registry.redhat.io/oadp/oadp-velero-rhel8@sha256:035f48844600bd3beebd6740bf85cf54d98a9232f01c31621d4e995ff366690a +2026/01/19 11:37:47 Unknown/webhook : quay.io/app-sre/addon-operator-webhook@sha256:ac0a6a48350b3ae7ed66670f51210b067a983466084763232f6fc5a17536a01c +2026/01/19 11:37:47 audit-exporter/audit-exporter : quay.io/app-sre/splunk-audit-exporter@sha256:9f1d08ea66bda7dcaf49c56dc1456d11e6705a22045ca02ac59cd8b540cec9c9 +2026/01/19 11:37:47 authentication-operator/authentication-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e0961ebf9e09b100ab7da477ad98bf48db105564f6dcfdcf2b7f326bc3edbd03 +2026/01/19 11:37:47 aws-cloud-controller-manager/cloud-controller-manager : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:94a949679080eb25f4a442ed5f5ea67daa06a85ac4f96cf943cd70194c0ae744 +2026/01/19 11:37:47 aws-ebs-csi-driver-controller/csi-attacher : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbec5c24870ed6cd27985bed8371a888e9322898e5ae4d9989e099f05a1c6a23 +2026/01/19 11:37:47 aws-ebs-csi-driver-controller/csi-driver : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16eec5fa72b90a0210d17a4497e5f430e1461046890512fc480568fa54542a80 +2026/01/19 11:37:47 aws-ebs-csi-driver-controller/csi-liveness-probe : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:688fb2404fabe8055015a5750740f8c43a61dc3f96d0242b15345a54da695af1 +2026/01/19 11:37:47 aws-ebs-csi-driver-controller/csi-provisioner : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6dce12cdd98f375e8a7688f4938dd832e5f894c0ba816d4a5a8d6720ff975904 +2026/01/19 11:37:47 aws-ebs-csi-driver-controller/csi-resizer : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a3d4db4816440b5141aecead32da7cfa9eee4efb8a663c3f0d30a4e23d96d2bf +2026/01/19 11:37:47 aws-ebs-csi-driver-controller/csi-snapshotter : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:deb4bbeb305282d6d6f762ef9e6988b2cd74c4a48c45bcc639f218f06e2da941 +2026/01/19 11:37:47 aws-ebs-csi-driver-controller/init-aws-credentials-file : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e10d54774aee431303272e849ca1d84d199d742d2ceabafab773ccc2e299db9f +2026/01/19 11:37:47 aws-ebs-csi-driver-node/csi-node-driver-registrar : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2049797466a1536cbe9a72805e650920b4847965b8d55585545e9736385f262c +2026/01/19 11:37:47 aws-ebs-csi-driver-operator/aws-ebs-csi-driver-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a127655796264964b0aeb27fd0cf27157c6d4702554655aa35018829c98878cb +2026/01/19 11:37:47 blackbox-exporter/blackbox-exporter : quay.io/prometheus/blackbox-exporter@sha256:b04a9fef4fa086a02fc7fcd8dcdbc4b7b35cc30cdee860fdc6a19dd8b208d63e +2026/01/19 11:37:47 cloud-credential-operator/cloud-credential-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1b82b0c6c41d1226bbe63659ac7491f45bdea3de83d563e65451bb63848aa24 +2026/01/19 11:37:47 cloud-ingress-operator/cloud-ingress-operator : quay.io/app-sre/cloud-ingress-operator@sha256:92846b3246f41afb473f1a3d6fc8b3e8f06a3dd6a1497405c8e3d9a8ecc0484f +2026/01/19 11:37:47 cloud-manager-operator/cluster-cloud-controller-manager : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fa5cc6ccc9113ab5809d1c3203d585b8620671c91d3e8b2ca34a25da4f6305c +2026/01/19 11:37:47 cloud-manager-operator/kube-rbac-proxy : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0de83cf18d336c19ea214987e67e382cffab05022e12fce60349cc89644374b +2026/01/19 11:37:47 cloud-network-config-controller/controller : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:433e71999e326ef1d70ba325ed777289659ab8f2b1b659a6906aae5f4b511482 +2026/01/19 11:37:47 cluster-autoscaler-operator/cluster-autoscaler-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b553d1f77711e80d30195893ed21fbad638f84dd049a34f83eb2400a0f79e704 +2026/01/19 11:37:47 cluster-baremetal-operator/cluster-baremetal-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:629d9904eead42901e6641296c332f8461f179624be37262e5360e719a2f9876 +2026/01/19 11:37:47 cluster-image-registry-operator/cluster-image-registry-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d21a50efdfeb977ec3b52c82a910e31e90a2172c08afa958f8f031d736ac90f1 +2026/01/19 11:37:47 cluster-monitoring-operator/cluster-monitoring-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf063af3d63c18f387deef046b84edd219fde4594cb73257ac44ab602f718b8a +2026/01/19 11:37:47 cluster-node-tuning-operator/cluster-node-tuning-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:49c9e6b171e348d9c1381db533c86583677acddd9c70a37508ab9854562f0f77 +2026/01/19 11:37:47 cluster-olm-operator/cluster-olm-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1ce75bb705ce22107366132679b7b435eb0d2e5ca6096f5669ea5c4eba39da1a +2026/01/19 11:37:47 cluster-olm-operator/copy-operator-controller-manifests : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3f0a8a5999709f6e5fd01bc9c73c9aae75ea34e4eb919693b17ac272a2903b59 +2026/01/19 11:37:47 cluster-samples-operator/cluster-samples-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a88df5e988177c695609c5e2c3907e649c76ae5655493fa2e373e5dee27a5787 +2026/01/19 11:37:47 cluster-storage-operator/cluster-storage-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:724fcbb751a5b0dbe321f26f1faa369b8c11db774dd006a4bdd07b91c337282f +2026/01/19 11:37:47 -cluster-version-operator/cluster-version-operator : registry.ci.openshift.org/ocp/release@sha256:a6218a031404a9b90b1fd3c8116202dce70d8b1c9df45aa7c6804f85a5580112 +2026/01/19 11:37:47 +cluster-version-operator/cluster-version-operator : registry.ci.openshift.org/ocp/release@sha256:ab7400593fc9728c82045614be306dcc0794fcc36a8b38e7d1550eda7832d2dd +2026/01/19 11:37:47 configure-alertmanager-operator/configure-alertmanager-operator : quay.io/app-sre/configure-alertmanager-operator@sha256:851ffe0f0269d90e91c987eb5dc12bf9830ca9fa2d6dafe093a64c1fa9fbee8a +2026/01/19 11:37:47 console-operator/console-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d0ad732112def4c292306e2b7caa8712d1d5ae274b12330fa33d3f4cd447f70d +2026/01/19 11:37:47 console/console : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d5331caff08680d54a839c9b057ac60ff5190473115b253cfbb8e8c08483778d +2026/01/19 11:37:47 console/download-server : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:11cc1a07e6e81c0ec7350b3f73a241268227884c9f25e6e7b5fadae3adda0641 +2026/01/19 11:37:47 control-plane-machine-set-operator/control-plane-machine-set-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:868c9ecf78ac840a89bc1d26215c73f569b739d55e4f8c31485318f9cdb78d25 +2026/01/19 11:37:47 controller/machine-controller : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:edd3ad79d82cd5aa57213f3e2020b97525e5d70c8b0af34ac0187541d1fce0db +2026/01/19 11:37:47 controller/machineset-controller : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:736b58f15407884529ad2bdbf0291b47c954f418b702ac91cc886469df654aad +2026/01/19 11:37:47 csi-snapshot-controller-operator/csi-snapshot-controller-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bc800ab3580aa0208848fefa70a039b63f51a2de06b1f60e44a41fe79a05a049 +2026/01/19 11:37:47 csi-snapshot-controller/snapshot-controller : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06da8736fec692e52b7ac6ca0903e609f8c56c80ae052d9d7c3378160bd734d6 +2026/01/19 11:37:47 custom-domains-operator/custom-domains-operator : quay.io/app-sre/custom-domains-operator@sha256:ba5f518b82f871bcf5c6c58d32f1fee7b2fd97c3dfa5b84c69211e5249e9b393 +2026/01/19 11:37:47 -deployment-validation-operator/deployment-validation-operator : quay.io/app-sre/deployment-validation-operator:0.1.545-g8e9ca69 +2026/01/19 11:37:47 +deployment-validation-operator/deployment-validation-operator : quay.io/app-sre/deployment-validation-operator:0.1.546-g13e8861 +2026/01/19 11:37:47 dns-operator/dns-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1d52b7d2a87c85312bded9db1a8f81f928b6f346f4dd906d3c208403f0f002af +2026/01/19 11:37:47 etcd-operator/etcd-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:15f04c05213f230b93b63ac6ad35aabfee6d09e990166de1638b582088ad9af2 +2026/01/19 11:37:47 etcd/etcdctl : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:81078647a13d027b300d6958824ac091ddd3d91d5ec1a3dfbc80046e4fccc155 +2026/01/19 11:37:47 insights-operator/insights-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d563f26ef566da43a1c8ab34d29852f477533686074d0adbd29bb98e51195571 +2026/01/19 11:37:47 kube-controller-manager-operator/kube-controller-manager-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f96b1808afd9f4668b6ac008696616690a796ace4bdf46e4f2d2c3b46eaafc5e +2026/01/19 11:37:47 kube-controller-manager/cluster-policy-controller : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:845aa0726919c6280d01d1aa488a139444bf86198082e33f1da3ddb9d91aa3b7 +2026/01/19 11:37:47 kube-storage-version-migrator-operator/kube-storage-version-migrator-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:152d1eb7c35baf2bf21c14332a130e7ac5735a89645f955945beda115a54a7ce +2026/01/19 11:37:47 machine-approver/machine-approver-controller : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2d454f6cdfa310ea8c032786a5dbd5f63cd70b7696b8dc7c2e18d2ffa2719aa3 +2026/01/19 11:37:47 machine-config-controller/machine-config-controller : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0d090b422424031f03ae0e94399b0a70e944a434f266d925e302b67e30577471 +2026/01/19 11:37:47 managed-node-metadata-operator/managed-node-metadata-operator : quay.io/app-sre/managed-node-metadata-operator:v0.1.311-g12d9b99 +2026/01/19 11:37:47 managed-upgrade-operator/managed-upgrade-operator : quay.io/app-sre/managed-upgrade-operator@sha256:c55d07c728004397f9798a102c18d716600b0a2677b23697742b924a58fe721f +2026/01/19 11:37:47 managed-velero-operator/managed-velero-operator : quay.io/app-sre/managed-velero-operator@sha256:4beeea713b6a13da907e98ca8bc5411f5039449d4309979c97ec1a14657b08d2 +2026/01/19 11:37:47 marketplace-operator/marketplace-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b6602902d7ba0ef54550782ccb514bbe26215b7d0da7239276fe3c51ce6b8126 +2026/01/19 11:37:47 migrator/migrator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7035421c98933b1006ada36c6d4be4ea2c84c8805c92063a5cfea283d5145a8c +2026/01/19 11:37:47 multus-additional-cni-plugins/bond-cni-plugin : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:454733fd181aace8613cae5f7cf9fcd2de5611445eeb0a78140e6ece467d123c +2026/01/19 11:37:47 multus-additional-cni-plugins/cni-plugins : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e0645fdd4a85c0bfda5083073b9fb5a0510dae53714eefeee2c6ba91826b831 +2026/01/19 11:37:47 multus-additional-cni-plugins/egress-router-binary-copy : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a91e3de8081f371c9ff9f2f16619d915621264378f0c861e2ba1366f10053cb1 +2026/01/19 11:37:47 multus-additional-cni-plugins/routeoverride-cni : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5521bde2b6ea859e756b1e38fafd336461e31f852e9d702256343b5813bce97c +2026/01/19 11:37:47 multus-additional-cni-plugins/whereabouts-cni-bincopy : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b76ef910c8a1cb86aa50b11f659bb7dd777fb2f506ca92861d804f0234bcd832 +2026/01/19 11:37:47 multus-admission-controller/multus-admission-controller : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c745a7824df72a7a16acc1d384630cb7a5a317b2a4079ae053ecf4abce4785ea +2026/01/19 11:37:47 multus/kube-multus : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:78da13ad39dfe6aaaf55da62536d131bf1c2a769d6daa6e29d3b9e031c8a9003 +2026/01/19 11:37:47 must-gather-operator/must-gather-operator : quay.io/app-sre/must-gather-operator@sha256:f31b35dcbb5c4eb89214b1558c20db42d885c20da93234ae0407ec1d0fbb6a41 +2026/01/19 11:37:47 network-check-source/check-endpoints : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b5c764b41263998643ffda2148fdc0430f910b06a907a4101bf05a1582f27d15 +2026/01/19 11:37:47 network-metrics-daemon/network-metrics-daemon : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d9bfae1f007c39d04a73292907669a1c0eb274202324001dd9363a0586bef1bc +2026/01/19 11:37:47 network-node-identity/webhook : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d732505bbda1adfc700e58ea8b52de16c3e511aaeedc03c6d1b932057b9b3f49 +2026/01/19 11:37:47 oauth-openshift/oauth-openshift : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b74655c6c5b87e4acd3da9a59ba881f438e5d26b11208e6083dc2d7b594de000 +2026/01/19 11:37:47 ocm-agent-operator/ocm-agent-operator : quay.io/app-sre/ocm-agent-operator@sha256:d9ff1c08a9fbf8b408337248d53323156c6a1cc733c5dc48bfbf5b7d83506286 +2026/01/19 11:37:47 ocm-agent/ocm-agent : quay.io/app-sre/ocm-agent@sha256:ab03a47872fc3c15c7d38ce87ab7fe565ccb508ab0c9811c09f549eccc12547e +2026/01/19 11:37:47 openshift-apiserver-a/openshift-apiserver : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1635e15282fbc5014166f94bca3bfd66ea67c8a39498c82c3df3644cd4ed329 +2026/01/19 11:37:47 openshift-apiserver-a/openshift-apiserver-check-endpoints : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0554bda0a96d190367ed63955b942dd3552e1d90e8586dfd9f8879c964bf245d +2026/01/19 11:37:47 openshift-apiserver-operator/openshift-apiserver-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:14203332e69dfb497d5c7cfa891e593c23cc4533026b810883075f4907ae5f5b +2026/01/19 11:37:47 openshift-config-operator/openshift-api : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dfef273bdf82465bc631b1a77b1dfeb5b72bae9fbb9ec5934663234d74a1fae2 +2026/01/19 11:37:47 openshift-config-operator/openshift-config-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:674f91f59e682bba705485d65e9ebf56942283772439cbdb2399a756fa51dd78 +2026/01/19 11:37:47 openshift-controller-manager-a/controller-manager : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4d56d44948e48ebc0586cd8b06645d0c7bcb85b08b99fde5e7877b0ea3284e5f +2026/01/19 11:37:47 openshift-controller-manager-operator/openshift-controller-manager-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ac80947f89b414a228a5fa246952d7f0d2a365b865473996de6f8a59387bf65 +2026/01/19 11:37:47 openshift-kube-apiserver/kube-apiserver : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fb1529a27e685fb1b5b13a5d1e935a27c9b9591e884ec8f6c22025208ec02596 +2026/01/19 11:37:47 openshift-kube-scheduler-operator/kube-scheduler-operator-container : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8da3ae0c230867d7c67107ad8fcd99839b6058f4e9ba9c94c5fc3a865bd20b9f +2026/01/19 11:37:47 openshift-oauth-apiserver/oauth-apiserver : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8ddd77f3c414e02cc89cf68a3ffe7dd44a259583e0daa338f0eb69de0c560176 +2026/01/19 11:37:47 osd-metrics-exporter/osd-metrics-exporter : quay.io/app-sre/osd-metrics-exporter@sha256:84b6bbcaae864048be38d474ca2cd936227dd347f0a0bca4bdfea957e05a72fc +2026/01/19 11:37:47 pod-identity-webhook/pod-identity-webhook : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e9c4c574b2fb985c848dbd2d9bba5f94c039ce574439840d77d7421552f2b69d +2026/01/19 11:37:47 rbac-permissions-operator/rbac-permissions-operator : quay.io/app-sre/rbac-permissions-operator@sha256:c1e95e5c455b126e4524488260ad3d0544f0e817615701d9f4e60cf16319fbb8 +2026/01/19 11:37:47 route-controller-manager/route-controller-manager : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c2ff51ed35c889dfdc665f42d758d0d33907e36ffb7c8328a967cd0a4ff52607 +2026/01/19 11:37:47 route-monitor-operator/manager : quay.io/app-sre/route-monitor-operator@sha256:b2f5af37992f344bcea08fdd8f601ab77d1796b2f0d8f9070a8d0f180cba4bd7 +2026/01/19 11:37:47 service-ca-operator/service-ca-operator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c38fdb4e629c6abe3ec3202c84381d0d383cb706b82d485867e518bf96d2ae1e +2026/01/19 11:37:47 splunk-forwarder-operator/splunk-forwarder-operator : quay.io/app-sre/splunk-forwarder-operator@sha256:1c69c7af839433cf138a4fd421a1d362ecf8eb83507519449773fa31b917ad96 +2026/01/19 11:37:47 splunk-forwarder/splunk-uf : quay.io/app-sre/splunk-forwarder@sha256:e0f318f8570d568bb2033881df9a4e3132279d7325050acda6e92390f7a18941 +2026/01/19 11:37:47 validation-webhook/webhooks : quay.io/app-sre/managed-cluster-validating-webhooks@sha256:afc60a032eea7d347c656c2f6b22634f3f615142eccb9f4e2c3c5d160adc46e6 +2026/01/19 11:37:47 volume-data-source-validator/volume-data-source-validator : quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5e0deb66c1015d9e084c78a8a5cdfbf5e795aeaaa2dad96d2388f760d14c30dc +2026/01/19 11:37:47 Tests failed: tests failed +2026/01/19 11:37:48 Cluster state: ready, flavor: osd-4 +2026/01/19 11:37:49 Successfully added property[JobName] - +2026/01/19 11:37:49 Successfully added property[Availability] - used +2026/01/19 11:37:49 Successfully added property[Status] - completed-failing +2026/01/19 11:37:50 Successfully added property[JobID] - +2026/01/19 11:37:50 Setting expiration on prod clusters is not allowed. Skipping... +2026/01/19 11:37:50 Setting expiration on prod clusters is not allowed. Skipping... +2026/01/19 11:37:50 Cluster 2ntr2hoo8487ite28bd98pg5ph0m04gf preserved in environment stage diff --git a/pkg/common/slack/client.go b/pkg/common/slack/client.go index e53b27fee9..60da381532 100644 --- a/pkg/common/slack/client.go +++ b/pkg/common/slack/client.go @@ -66,7 +66,13 @@ func (c *Client) SendWebhook(ctx context.Context, webhookURL string, payload int // Check response status if resp.StatusCode != http.StatusOK { - return fmt.Errorf("slack webhook returned status %d: %s", resp.StatusCode, resp.Status) + // Read response body for debugging + bodyBytes := make([]byte, 1024) + n, _ := resp.Body.Read(bodyBytes) + bodyText := string(bodyBytes[:n]) + + return fmt.Errorf("slack webhook returned status %d: %s\nResponse body: %s\nPayload sent: %s", + resp.StatusCode, resp.Status, bodyText, string(jsonData)) } return nil diff --git a/pkg/e2e/e2e.go b/pkg/e2e/e2e.go index b523d62f7e..9c87ace018 100644 --- a/pkg/e2e/e2e.go +++ b/pkg/e2e/e2e.go @@ -199,7 +199,12 @@ func (o *E2EOrchestrator) AnalyzeLogs(ctx context.Context, testErr error) error Version: viper.GetString(config.Cluster.Version), } if viper.GetBool(config.Tests.EnableSlackNotify) { - notificationConfig = reporter.BuildNotificationConfig(viper.GetString(config.LogAnalysis.SlackWebhook), viper.GetString(config.LogAnalysis.SlackChannel)) + notificationConfig = reporter.BuildNotificationConfig( + viper.GetString(config.LogAnalysis.SlackWebhook), + viper.GetString(config.LogAnalysis.SlackChannel), + clusterInfo, + reportDir, + ) } engineConfig := &analysisengine.Config{ diff --git a/pkg/e2e/e2e_test.go b/pkg/e2e/e2e_test.go index 25b8de1fd2..87b7ced5e5 100644 --- a/pkg/e2e/e2e_test.go +++ b/pkg/e2e/e2e_test.go @@ -138,7 +138,7 @@ func TestE2EOrchestrator_Result(t *testing.T) { func TestBuildNotificationConfig_Disabled(t *testing.T) { setupTestConfig(t) - cfg := reporter.BuildNotificationConfig("", "") + cfg := reporter.BuildNotificationConfig("", "", nil, "") if cfg != nil { t.Error("Expected nil config when slack notifications disabled") @@ -148,7 +148,7 @@ func TestBuildNotificationConfig_Disabled(t *testing.T) { func TestBuildNotificationConfig_MissingCredentials(t *testing.T) { setupTestConfig(t) - cfg := reporter.BuildNotificationConfig("", "") + cfg := reporter.BuildNotificationConfig("", "", nil, "") if cfg != nil { t.Error("Expected nil config when webhook/channel missing") @@ -158,7 +158,7 @@ func TestBuildNotificationConfig_MissingCredentials(t *testing.T) { func TestBuildNotificationConfig_MissingWebhook(t *testing.T) { setupTestConfig(t) - cfg := reporter.BuildNotificationConfig("", "#test") + cfg := reporter.BuildNotificationConfig("", "#test", nil, "") if cfg != nil { t.Error("Expected nil config when webhook missing") @@ -168,7 +168,7 @@ func TestBuildNotificationConfig_MissingWebhook(t *testing.T) { func TestBuildNotificationConfig_MissingChannel(t *testing.T) { setupTestConfig(t) - cfg := reporter.BuildNotificationConfig("https://hooks.slack.com/test", "") + cfg := reporter.BuildNotificationConfig("https://hooks.slack.com/test", "", nil, "") if cfg != nil { t.Error("Expected nil config when channel missing") @@ -178,7 +178,7 @@ func TestBuildNotificationConfig_MissingChannel(t *testing.T) { func TestBuildNotificationConfig_Enabled(t *testing.T) { setupTestConfig(t) - cfg := reporter.BuildNotificationConfig("https://hooks.slack.com/test", "#test-channel") + cfg := reporter.BuildNotificationConfig("https://hooks.slack.com/test", "#test-channel", nil, "") if cfg == nil { t.Fatal("Expected non-nil notification config") diff --git a/pkg/e2e/slack_integration_test.go b/pkg/e2e/slack_integration_test.go new file mode 100644 index 0000000000..fce5284d43 --- /dev/null +++ b/pkg/e2e/slack_integration_test.go @@ -0,0 +1,251 @@ +package e2e + +import ( + "context" + "os" + "testing" + + "github.com/openshift/osde2e/internal/reporter" +) + +// Slack Integration Tests +// +// These tests send real messages to Slack to verify the workflow integration works correctly. +// +// SETUP: +// +// 1. Add the E2E Test Notifications workflow to your test channel: +// - Open: https://slack.com/shortcuts/Ft09RL7M2AMV/60f07b46919da20d103806a8f5bba094 +// - Click "Add to Slack" +// - Select your test channel +// - Copy the webhook URL (starts with https://hooks.slack.com/workflows/...) +// +// 2. Get your Slack channel ID (NOT the name): +// - Right-click your test channel in Slack +// - Select "View channel details" +// - Scroll to bottom and copy the channel ID (starts with C, e.g., C06HQR8HN0L) +// +// 3. Set environment variables: +// export LOG_ANALYSIS_SLACK_WEBHOOK="https://hooks.slack.com/workflows/..." +// export LOG_ANALYSIS_SLACK_CHANNEL="C06HQR8HN0L" +// +// RUNNING THE TESTS: +// +// # Run all integration tests +// dotenv run go test -v ./pkg/e2e -run ^TestSlackReporter_Integration +// +// # Run specific test +// dotenv run go test -v ./pkg/e2e -run ^TestSlackReporter_Integration$ +// +// # Tests automatically skip if env vars are not set +// +// WHAT TO EXPECT IN SLACK: +// +// Test 1 (Full): 3 threaded messages +// - Initial: Failure summary with cluster info and test suite info +// - Reply 1: AI analysis with root cause and recommendations +// - Reply 2: Extracted test failure logs from real Prow data +// +// Test 2 (Minimal): 3 threaded messages +// - Initial: Failure summary with minimal cluster info +// - Reply 1: Plain text analysis (no JSON formatting) +// - Reply 2: Fallback message (no logs configured) +// +// Test 3 (Error): 3 threaded messages +// - Initial: Failure summary with cluster info +// - Reply 1: Analysis with error section appended +// - Reply 2: Fallback message (no logs in config) +// +// TROUBLESHOOTING: +// +// - "400 Bad Request": Check that channel ID is correct (starts with C) +// - "invalid_workflow_input": Channel name used instead of ID +// - Messages not threaded: Wrong webhook type (must be workflow webhook) +// - "invalid_blocks" in Slack: Empty field (now fixed with fallback messages) + +// TestSlackReporter_Integration tests the Slack reporter with a real webhook. +// +// This test verifies that: +// 1. The workflow payload structure is correct +// 2. The webhook accepts the payload +// 3. Messages appear in the configured Slack channel +// +// Required environment variables: +// +// LOG_ANALYSIS_SLACK_WEBHOOK - Workflow webhook URL +// LOG_ANALYSIS_SLACK_CHANNEL - Channel ID (e.g., C06HQR8HN0L) +// +// To run: +// +// export LOG_ANALYSIS_SLACK_WEBHOOK="https://hooks.slack.com/workflows/..." +// export LOG_ANALYSIS_SLACK_CHANNEL="C06HQR8HN0L" +// go test -v -run TestSlackReporter_Integration github.com/openshift/osde2e/pkg/e2e +func TestSlackReporter_Integration(t *testing.T) { + webhookURL := os.Getenv("LOG_ANALYSIS_SLACK_WEBHOOK") + channelID := os.Getenv("LOG_ANALYSIS_SLACK_CHANNEL") + + if webhookURL == "" || channelID == "" { + t.Skip("Skipping integration test: LOG_ANALYSIS_SLACK_WEBHOOK or LOG_ANALYSIS_SLACK_CHANNEL not set") + } + + // Create test cluster info + clusterInfo := &reporter.ClusterInfo{ + ID: "test-cluster-123", + Name: "integration-test-cluster", + Version: "4.20", + Provider: "aws", + Expiration: "2026-02-01T00:00:00Z", + } + + // Create analysis result with JSON content + result := &reporter.AnalysisResult{ + Content: `Based on the test output, here is my analysis: + +` + "```json" + ` +{ + "root_cause": "Integration test: This is a test failure notification from the osde2e Slack reporter integration test", + "recommendations": [ + "This is a test message to verify Slack Workflow integration", + "Check that this message appears in the configured channel", + "Verify that analysis and logs appear as threaded replies" + ] +} +` + "```" + ` +`, + } + + // Create reporter config + config := &reporter.ReporterConfig{ + Type: "slack", + Enabled: true, + Settings: map[string]interface{}{ + "webhook_url": webhookURL, + "channel": channelID, + "cluster_info": clusterInfo, + "image": "quay.io/openshift/osde2e-tests:integration-test", + "env": "test", + "report_dir": "../../internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws", + }, + } + + // Send notification + slackReporter := reporter.NewSlackReporter() + ctx := context.Background() + + // Debug: Show what we're sending + t.Log("=== SENDING PAYLOAD ===") + t.Logf("Webhook URL: %s", webhookURL[:50]+"...") + t.Logf("Channel ID: %s", channelID) + + err := slackReporter.Report(ctx, result, config) + if err != nil { + t.Logf("=== ERROR DETAILS ===") + t.Logf("Error: %v", err) + t.Fatalf("Failed to send Slack notification: %v", err) + } + + t.Log("✅ Integration test successful!") + t.Log("Check your Slack channel for the test message with threaded replies") + t.Logf("Channel: %s", channelID) + t.Log("Expected:") + t.Log(" 1. Initial message with cluster info and test suite info") + t.Log(" 2. First reply with AI analysis (root cause and recommendations)") + t.Log(" 3. Second reply with test failure logs (if testdata exists)") +} + +// TestSlackReporter_Integration_MinimalPayload tests with minimal required fields. +func TestSlackReporter_Integration_MinimalPayload(t *testing.T) { + webhookURL := os.Getenv("LOG_ANALYSIS_SLACK_WEBHOOK") + channelID := os.Getenv("LOG_ANALYSIS_SLACK_CHANNEL") + + if webhookURL == "" || channelID == "" { + t.Skip("Skipping integration test: LOG_ANALYSIS_SLACK_WEBHOOK or LOG_ANALYSIS_SLACK_CHANNEL not set") + } + + // Minimal cluster info + clusterInfo := &reporter.ClusterInfo{ + ID: "minimal-test-123", + } + + // Plain text analysis (no JSON) + result := &reporter.AnalysisResult{ + Content: "This is a minimal test with plain text analysis content. No JSON formatting.", + } + + // Minimal config + config := &reporter.ReporterConfig{ + Type: "slack", + Enabled: true, + Settings: map[string]interface{}{ + "webhook_url": webhookURL, + "channel": channelID, + "cluster_info": clusterInfo, + }, + } + + // Send notification + slackReporter := reporter.NewSlackReporter() + ctx := context.Background() + + err := slackReporter.Report(ctx, result, config) + if err != nil { + t.Fatalf("Failed to send minimal Slack notification: %v", err) + } + + t.Log("✅ Minimal payload test successful!") + t.Log("Check your Slack channel for the minimal test message") +} + +// TestSlackReporter_Integration_WithError tests error handling and display. +func TestSlackReporter_Integration_WithError(t *testing.T) { + webhookURL := os.Getenv("LOG_ANALYSIS_SLACK_WEBHOOK") + channelID := os.Getenv("LOG_ANALYSIS_SLACK_CHANNEL") + + if webhookURL == "" || channelID == "" { + t.Skip("Skipping integration test: LOG_ANALYSIS_SLACK_WEBHOOK or LOG_ANALYSIS_SLACK_CHANNEL not set") + } + + clusterInfo := &reporter.ClusterInfo{ + ID: "error-test-456", + Name: "error-handling-test", + Version: "4.21", + Provider: "gcp", + } + + // Analysis with error + result := &reporter.AnalysisResult{ + Content: `Test analysis content with an error condition. + +` + "```json" + ` +{ + "root_cause": "Simulated error in test execution", + "recommendations": ["Check error message below"] +} +` + "```" + ` +`, + Error: "Integration test: This is a simulated error message to verify error display in Slack", + } + + config := &reporter.ReporterConfig{ + Type: "slack", + Enabled: true, + Settings: map[string]interface{}{ + "webhook_url": webhookURL, + "channel": channelID, + "cluster_info": clusterInfo, + "image": "quay.io/openshift/osde2e-tests:error-test", + "env": "test", + }, + } + + slackReporter := reporter.NewSlackReporter() + ctx := context.Background() + + err := slackReporter.Report(ctx, result, config) + if err != nil { + t.Fatalf("Failed to send error Slack notification: %v", err) + } + + t.Log("✅ Error handling test successful!") + t.Log("Check your Slack channel - the analysis should include an 'Error' section") +} From c7e3a153559d46b92224b50c84d1128342af1062 Mon Sep 17 00:00:00 2001 From: Christopher Mancini Date: Fri, 30 Jan 2026 15:13:51 -0500 Subject: [PATCH 2/3] feat: rearrange order of messages --- internal/reporter/slack.go | 29 +++-- internal/reporter/slack_workflow_test.go | 141 +++++++++++++++++++---- pkg/e2e/slack_integration_test.go | 28 +++-- 3 files changed, 155 insertions(+), 43 deletions(-) diff --git a/internal/reporter/slack.go b/internal/reporter/slack.go index 61d20ee455..568307d990 100644 --- a/internal/reporter/slack.go +++ b/internal/reporter/slack.go @@ -68,13 +68,14 @@ func (s *SlackReporter) Report(ctx context.Context, result *AnalysisResult, conf // WorkflowPayload represents the Slack workflow webhook payload type WorkflowPayload struct { - Channel string `json:"channel"` // Required - Slack channel ID - Summary string `json:"summary"` // Required - Initial message - Analysis string `json:"analysis"` // Required - AI analysis (posted as reply) - ExtendedLogs string `json:"extended_logs,omitempty"` // Optional - Test failures (posted as reply) - Image string `json:"image,omitempty"` // Optional - Test image - Env string `json:"env,omitempty"` // Optional - Environment - Commit string `json:"commit,omitempty"` // Optional - Commit hash + Channel string `json:"channel"` // Required - Slack channel ID + Summary string `json:"summary"` // Required - Initial message (test suite info) + Analysis string `json:"analysis"` // Required - AI analysis (posted as reply 1) + ExtendedLogs string `json:"extended_logs,omitempty"` // Optional - Test failures (posted as reply 2) + ClusterDetails string `json:"cluster_details,omitempty"` // Optional - Cluster info for debugging (posted as reply 3) + Image string `json:"image,omitempty"` // Optional - Test image + Env string `json:"env,omitempty"` // Optional - Environment + Commit string `json:"commit,omitempty"` // Optional - Commit hash } // ClusterInfo holds cluster information for reporting @@ -116,6 +117,14 @@ func (s *SlackReporter) buildWorkflowPayload(result *AnalysisResult, config *Rep payload.ExtendedLogs = "Test output logs not available (no report directory configured)." } + // Optional: cluster_details (for debugging) + if clusterDetails := s.buildClusterInfoSection(config); clusterDetails != "" { + payload.ClusterDetails = clusterDetails + } else { + // Provide fallback when no cluster info is configured + payload.ClusterDetails = "Cluster information not available." + } + // Optional metadata if image, ok := config.Settings["image"].(string); ok && image != "" { payload.Image = image @@ -140,10 +149,7 @@ func (s *SlackReporter) buildSummaryField(config *ReporterConfig) string { // Header builder.WriteString(":failed: Pipeline Failed at E2E Test\n\n") - // Cluster info - builder.WriteString(s.buildClusterInfoSection(config)) - - // Test suite info + // Test suite info (what failed) builder.WriteString(s.buildTestSuiteSection(config)) return s.enforceFieldLimit(builder.String(), maxWorkflowFieldLength) @@ -297,6 +303,7 @@ func (s *SlackReporter) readTestOutput(reportDir string) string { failureBlocks := s.extractFailureBlocks(lines, 0, totalLines) if len(failureBlocks) > 0 { var result strings.Builder + result.WriteString("====== Log Extract ======\n") result.WriteString(fmt.Sprintf("Found %d test failure(s):\n\n", len(failureBlocks))) for i, block := range failureBlocks { if i > 0 { diff --git a/internal/reporter/slack_workflow_test.go b/internal/reporter/slack_workflow_test.go index becdb6a5ea..49669eef1a 100644 --- a/internal/reporter/slack_workflow_test.go +++ b/internal/reporter/slack_workflow_test.go @@ -57,12 +57,26 @@ func TestSlackReporter_buildWorkflowPayload(t *testing.T) { t.Error("analysis field is required but empty") } - // Verify summary contains cluster info - if !contains(payload.Summary, "test-123") { - t.Error("summary should contain cluster ID") + // Verify summary contains test suite info (what failed) + if !contains(payload.Summary, "quay.io/test") { + t.Error("summary should contain image name") } - if !contains(payload.Summary, "4.20") { - t.Error("summary should contain version") + if !contains(payload.Summary, "abc123") { + t.Error("summary should contain commit") + } + if !contains(payload.Summary, "stage") { + t.Error("summary should contain environment") + } + + // Verify cluster_details contains cluster info (for debugging) + if payload.ClusterDetails == "" { + t.Error("cluster_details should not be empty when cluster info is provided") + } + if !contains(payload.ClusterDetails, "test-123") { + t.Error("cluster_details should contain cluster ID") + } + if !contains(payload.ClusterDetails, "4.20") { + t.Error("cluster_details should contain version") } // Verify analysis contains formatted content @@ -144,8 +158,17 @@ func TestSlackReporter_Report_WorkflowFormat(t *testing.T) { t.Error("analysis should not be empty") } - if !contains(capturedPayload.Summary, "test-456") { - t.Error("summary should contain cluster ID") + // Verify cluster info is in cluster_details field (not summary) + if capturedPayload.ClusterDetails == "" { + t.Error("cluster_details should not be empty when cluster info is provided") + } + if !contains(capturedPayload.ClusterDetails, "test-456") { + t.Error("cluster_details should contain cluster ID") + } + + // Verify summary contains test suite info + if !contains(capturedPayload.Summary, "quay.io/openshift/test") { + t.Error("summary should contain test image") } if capturedPayload.Image != "quay.io/openshift/test:v1.0" { @@ -186,19 +209,8 @@ func TestSlackReporter_buildSummaryField(t *testing.T) { t.Error("summary should contain failure message") } - // Check for cluster info - if !contains(summary, "cluster-789") { - t.Error("summary should contain cluster ID") - } - if !contains(summary, "my-test-cluster") { - t.Error("summary should contain cluster name") - } - if !contains(summary, "4.22") { - t.Error("summary should contain version") - } - if !contains(summary, "gcp") { - t.Error("summary should contain provider") - } + // Summary should NOT contain cluster info (it's in cluster_details now) + // Summary should ONLY contain test suite info (what failed) // Check for test suite info if !contains(summary, "quay.io/app") { @@ -411,6 +423,95 @@ func TestSlackReporter_ExtendedLogsFallback(t *testing.T) { }) } +func TestSlackReporter_ClusterDetailsFallback(t *testing.T) { + reporter := NewSlackReporter() + + t.Run("no cluster_info returns fallback message", func(t *testing.T) { + result := &AnalysisResult{ + Content: "Test analysis", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "webhook_url": "https://test.com", + "channel": "C123456", + // No cluster_info + }, + } + + payload := reporter.buildWorkflowPayload(result, config) + + if payload.ClusterDetails == "" { + t.Error("ClusterDetails should not be empty when no cluster_info") + } + + if !contains(payload.ClusterDetails, "not available") { + t.Errorf("Expected fallback message, got: %s", payload.ClusterDetails) + } + }) + + t.Run("nil cluster_info returns fallback message", func(t *testing.T) { + result := &AnalysisResult{ + Content: "Test analysis", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "webhook_url": "https://test.com", + "channel": "C123456", + "cluster_info": nil, + }, + } + + payload := reporter.buildWorkflowPayload(result, config) + + if payload.ClusterDetails == "" { + t.Error("ClusterDetails should not be empty when cluster_info is nil") + } + + if !contains(payload.ClusterDetails, "not available") { + t.Errorf("Expected fallback message, got: %s", payload.ClusterDetails) + } + }) + + t.Run("valid cluster_info returns cluster details", func(t *testing.T) { + result := &AnalysisResult{ + Content: "Test analysis", + } + + clusterInfo := &ClusterInfo{ + ID: "test-cluster-123", + Name: "my-cluster", + Version: "4.20", + Provider: "aws", + } + + config := &ReporterConfig{ + Settings: map[string]interface{}{ + "webhook_url": "https://test.com", + "channel": "C123456", + "cluster_info": clusterInfo, + }, + } + + payload := reporter.buildWorkflowPayload(result, config) + + if payload.ClusterDetails == "" { + t.Error("ClusterDetails should not be empty when valid cluster_info exists") + } + + // Should contain actual cluster info, not fallback message + if contains(payload.ClusterDetails, "not available") { + t.Errorf("Should contain real cluster info, not fallback. Got: %s", payload.ClusterDetails) + } + + // Should contain cluster details + if !contains(payload.ClusterDetails, "test-cluster-123") { + t.Error("Should contain cluster ID") + } + }) +} + // Helper function func contains(s, substr string) bool { return len(s) > 0 && len(substr) > 0 && (s == substr || len(s) > len(substr) && hasSubstring(s, substr)) diff --git a/pkg/e2e/slack_integration_test.go b/pkg/e2e/slack_integration_test.go index fce5284d43..6ae759879e 100644 --- a/pkg/e2e/slack_integration_test.go +++ b/pkg/e2e/slack_integration_test.go @@ -41,20 +41,23 @@ import ( // // WHAT TO EXPECT IN SLACK: // -// Test 1 (Full): 3 threaded messages -// - Initial: Failure summary with cluster info and test suite info -// - Reply 1: AI analysis with root cause and recommendations -// - Reply 2: Extracted test failure logs from real Prow data -// -// Test 2 (Minimal): 3 threaded messages -// - Initial: Failure summary with minimal cluster info +// Test 1 (Full): 4 threaded messages +// - Initial: Test suite info (what failed) +// - Reply 1: AI analysis with root cause and recommendations (briefly why) +// - Reply 2: Extracted test failure logs from real Prow data (evidence) +// - Reply 3: Cluster information for debugging +// +// Test 2 (Minimal): 4 threaded messages +// - Initial: Test suite info (minimal) // - Reply 1: Plain text analysis (no JSON formatting) // - Reply 2: Fallback message (no logs configured) +// - Reply 3: Minimal cluster info (cluster ID only) // -// Test 3 (Error): 3 threaded messages -// - Initial: Failure summary with cluster info +// Test 3 (Error): 4 threaded messages +// - Initial: Test suite info // - Reply 1: Analysis with error section appended // - Reply 2: Fallback message (no logs in config) +// - Reply 3: Cluster information // // TROUBLESHOOTING: // @@ -148,9 +151,10 @@ func TestSlackReporter_Integration(t *testing.T) { t.Log("Check your Slack channel for the test message with threaded replies") t.Logf("Channel: %s", channelID) t.Log("Expected:") - t.Log(" 1. Initial message with cluster info and test suite info") - t.Log(" 2. First reply with AI analysis (root cause and recommendations)") - t.Log(" 3. Second reply with test failure logs (if testdata exists)") + t.Log(" 1. Initial message with test suite info (what failed)") + t.Log(" 2. First reply with AI analysis (briefly why)") + t.Log(" 3. Second reply with test failure logs (evidence)") + t.Log(" 4. Third reply with cluster information (for debugging)") } // TestSlackReporter_Integration_MinimalPayload tests with minimal required fields. From e1d1a5a9ba618ea9a2bf7468610ac19cea550bc5 Mon Sep 17 00:00:00 2001 From: Christopher Mancini Date: Tue, 3 Feb 2026 10:17:30 -0500 Subject: [PATCH 3/3] fix: use ERROR marker, consolidate code, improve READMEs --- README.md | 55 ++++++++ internal/aggregator/aggregator.go | 4 +- internal/reporter/README.md | 120 ++++++---------- internal/reporter/slack.go | 72 +++++----- internal/reporter/slack_testoutput_test.go | 129 +++++++++++++++++- .../{build-log.txt => test_output.log} | 0 pkg/common/util/util.go | 10 ++ pkg/common/util/util_test.go | 114 ++++++++++++++++ 8 files changed, 384 insertions(+), 120 deletions(-) rename internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/{build-log.txt => test_output.log} (100%) create mode 100644 pkg/common/util/util_test.go diff --git a/README.md b/README.md index 564269fed9..1c6e95d7ad 100644 --- a/README.md +++ b/README.md @@ -327,6 +327,61 @@ that ran and statistics about them (e.g. pass/fail, duration). These XML files w used by external applications to present metrics and data for others to see into. An example of this is they are used to present data in [TestGrid Dashboards][TestGrid Dashboard]. +## Slack Notifications + +OSDe2e can send AI-powered failure analysis to Slack when tests fail. Each test suite can notify a different Slack channel with failure details, analysis, and logs. + +### Setup + +**1. Add Workflow to Your Slack Channel** + +Each team adds the shared E2E Test Notifications workflow to their channel: + +1. Open the workflow: https://slack.com/shortcuts/Ft09RL7M2AMV/60f07b46919da20d103806a8f5bba094 +2. Click **Add to Slack** +3. Select your destination channel +4. Copy the webhook URL (starts with `https://hooks.slack.com/workflows/...`) + +**2. Get Your Channel ID** + +Right-click your channel → **View channel details** → copy the channel ID (starts with `C`, e.g., `C06HQR8HN0L`) + +**3. Configure Test Suites** + +Set `TEST_SUITES_YAML` with your test images, webhook URLs, and Slack channel IDs: + +```bash +export TEST_SUITES_YAML=' +- image: quay.io/openshift/osde2e-tests:latest + slackWebhook: https://hooks.slack.com/workflows/T.../A.../... + slackChannel: C06HQR8HN0L +- image: quay.io/openshift/custom-tests:v1.0 + slackWebhook: https://hooks.slack.com/workflows/T.../B.../... + slackChannel: C07ABC123XY +' +``` + +**4. Enable Notifications** + +Enable Slack notifications in your config: + +```yaml +tests: + enableSlackNotify: true +logAnalysis: + enableAnalysis: true +``` + +### What You'll Receive + +When tests fail, you'll get a threaded Slack message with: +1. **Main message**: Test suite info (what failed) +2. **Reply 1**: AI analysis (why it failed) +3. **Reply 2**: Test failure logs (evidence) +4. **Reply 3**: Cluster details (for debugging) + +For implementation details, see [internal/reporter/README.md](internal/reporter/README.md). + ## CI Jobs Periodic jobs are run daily validating Managed OpenShift clusters, using diff --git a/internal/aggregator/aggregator.go b/internal/aggregator/aggregator.go index 7ff5454005..2d763da664 100644 --- a/internal/aggregator/aggregator.go +++ b/internal/aggregator/aggregator.go @@ -13,6 +13,7 @@ import ( "github.com/go-logr/logr" "github.com/joshdk/go-junit" "github.com/openshift/osde2e/internal/sanitizer" + "github.com/openshift/osde2e/pkg/common/util" ) type Aggregator struct { @@ -277,8 +278,7 @@ func extractErrorsFromLogFile(logFile string) (string, error) { // use string builder to collect errors var errors strings.Builder for _, line := range lines { - // Check all case variants directly - fastest approach - if strings.Contains(line, "error") || strings.Contains(line, "Error") || strings.Contains(line, "ERROR") { + if util.ContainsErrorMarker(line) { errors.WriteString(line) errors.WriteString("\n") // Add newline separator } diff --git a/internal/reporter/README.md b/internal/reporter/README.md index b38bf950e3..d4683afec5 100644 --- a/internal/reporter/README.md +++ b/internal/reporter/README.md @@ -1,6 +1,8 @@ -# Reporter System +# Reporter System (Developer Documentation) -The reporter system handles notification delivery after LLM analysis completion, providing a flexible and extensible way to send analysis results to external systems. +The reporter system handles notification delivery after LLM analysis completion. This document covers the internal architecture and implementation details for developers working on the reporter system. + +**For user setup instructions, see the [root README](../../README.md#slack-notifications).** ## Architecture Overview @@ -76,79 +78,28 @@ The Slack reporter sends test failure notifications using a **Slack Workflow** t ### How It Works -The workflow creates three messages in a thread: - -1. **Initial Message** - Failure summary with cluster and test suite info -2. **First Reply** - AI-powered analysis with root cause and recommendations -3. **Second Reply** - Extracted test failure logs (only failure blocks, not full stdout) - -### Setup Instructions - -#### 1. Add Workflow to Your Slack Channel - -Each team adds the shared workflow to their channel: - -1. Open the workflow link: https://slack.com/shortcuts/Ft09RL7M2AMV/60f07b46919da20d103806a8f5bba094 -2. Click **Add to Slack** -3. Select your destination channel -4. **Copy the webhook URL** (starts with `https://hooks.slack.com/workflows/...`) - -#### 2. Get Your Channel ID - -The workflow requires a Slack **channel ID** (not channel name). - -**To find your channel ID:** -1. Right-click the channel name in Slack -2. Select **View channel details** -3. Scroll to bottom and **copy the channel ID** (starts with `C`) - -**Example:** `C06HQR8HN0L` - -#### 3. Configure Pipeline - -Set these environment variables in your CI/CD pipeline or Vault: - -```bash -LOG_ANALYSIS_SLACK_WEBHOOK=https://hooks.slack.com/workflows/T.../A.../... -LOG_ANALYSIS_SLACK_CHANNEL=C06HQR8HN0L # Channel ID, not #channel-name -``` - -#### 4. Enable in Config - -```yaml -tests: - enableSlackNotify: true -logAnalysis: - enableAnalysis: true -``` +The workflow creates four messages in a thread: -### Environment Variables +1. **Initial Message** - Test suite information (what failed) +2. **First Reply** - AI-powered analysis with root cause and recommendations (briefly why) +3. **Second Reply** - Extracted test failure logs (evidence - only failure blocks, not full stdout) +4. **Third Reply** - Cluster information for debugging (least important - cluster is ephemeral) -| Variable | Required | Description | -|----------|----------|-------------| -| `LOG_ANALYSIS_SLACK_WEBHOOK` | Yes | Workflow webhook URL from step 1 | -| `LOG_ANALYSIS_SLACK_CHANNEL` | Yes | Channel ID (starts with `C`) | +**Note:** The code sends fallback messages (e.g., "Test output logs not available") when data is unavailable. This ensures the workflow is resilient to version drift between code and workflow changes. ### Message Format -**Summary (Initial Message):** +**Summary (Initial Message - What Failed):** ``` :failed: Pipeline Failed at E2E Test -====== ☸️ Cluster Information ====== -• Cluster ID: `abc-123` -• Name: `my-cluster` -• Version: `4.20` -• Provider: `aws` -• Expiration: `2026-01-28T10:00:00Z` - ====== 🧪 Test Suite Information ====== • Image: `quay.io/openshift/osde2e-tests` • Commit: `abc123` • Environment: `stage` ``` -**Analysis (First Reply):** +**Analysis (First Reply - Briefly Why):** ``` ====== 🔍 Possible Cause ====== @@ -158,7 +109,7 @@ logAnalysis: 2. ``` -**Extended Logs (Second Reply):** +**Extended Logs (Second Reply - Evidence):** ``` Found 3 test failure(s): @@ -167,6 +118,16 @@ Found 3 test failure(s): ... ``` +**Cluster Details (Third Reply - For Debugging):** +``` +====== ☸️ Cluster Information ====== +• Cluster ID: `abc-123` +• Name: `my-cluster` +• Version: `4.20` +• Provider: `aws` +• Expiration: `2026-01-28T10:00:00Z` +``` + ### Testing #### Unit Tests @@ -198,30 +159,35 @@ The reporter sends this JSON payload to the Slack Workflow: ```json { "channel": "C06HQR8HN0L", - "summary": "Pipeline Failed at E2E Test\n\n# Cluster Info...", + "summary": "Pipeline Failed at E2E Test\n\n# Test Suite Info...", "analysis": "# Possible Cause\n...", "extended_logs": "Found 3 test failure(s):\n...", + "cluster_details": "# Cluster Information\nCluster ID: abc-123\n...", "image": "quay.io/openshift/osde2e:abc123", "env": "stage", "commit": "abc123" } ``` -### Troubleshooting +## Implementation Notes -**Workflow not posting threaded messages:** -- Verify webhook URL is from the workflow (not a legacy incoming webhook) -- Workflow URLs contain `/workflows/` in the path -- Legacy incoming webhook URLs contain `/services/` instead +**Workflow vs Legacy Webhooks:** +- Workflow webhooks use `/workflows/` in the URL path +- Legacy incoming webhooks use `/services/` instead +- The code uses workflow webhooks to support threaded messages -**Channel not receiving messages:** -- Ensure you're using the channel ID (starts with `C`), not channel name -- Channel ID is case-sensitive +**Payload Limits:** +- Maximum field length: 30KB per field (enforced by `maxWorkflowFieldLength` constant) +- Content exceeding limits is truncated with a notice +- Slack workflows handle much larger payloads than legacy webhooks -**Missing fields in Slack message:** -- Check that all required fields are present: `channel`, `summary`, `analysis` -- Verify environment variables are set correctly +**Fallback Behavior:** +- All optional fields provide fallback messages when data is unavailable +- This ensures resilience to version drift between code and workflow changes +- Required fields: `channel`, `summary`, `analysis` -**Analysis too long:** -- The workflow handles message splitting automatically -- Payload limits: 30KB per field (enforced by code) +**Log Extraction Strategy:** +- For logs ≤250 lines: return full content +- For logs >250 lines: extract up to 3 failure blocks (max 30 lines each) +- Failure detection: `[FAILED]` markers and `ERROR`/`Error`/`error` strings +- Block deduplication: skip-ahead logic prevents overlapping extractions diff --git a/internal/reporter/slack.go b/internal/reporter/slack.go index 568307d990..982942d431 100644 --- a/internal/reporter/slack.go +++ b/internal/reporter/slack.go @@ -9,6 +9,7 @@ import ( "strings" commonslack "github.com/openshift/osde2e/pkg/common/slack" + "github.com/openshift/osde2e/pkg/common/util" ) const ( @@ -287,57 +288,56 @@ func (s *SlackReporter) formatAnalysisContent(content string) string { return formatted.String() } -// readTestOutput reads the test stdout from test_output.txt, test_output.log, or build-log.txt +// readTestOutput reads the test stdout from test_output.log func (s *SlackReporter) readTestOutput(reportDir string) string { - for _, filename := range []string{"test_output.txt", "test_output.log", "build-log.txt"} { - filePath := filepath.Join(reportDir, filename) - if content, err := os.ReadFile(filepath.Clean(filePath)); err == nil { - lines := strings.Split(strings.TrimRight(string(content), "\n"), "\n") - totalLines := len(lines) - - if totalLines <= fullOutputThreshold { - return string(content) - } + filePath := filepath.Join(reportDir, "test_output.log") + if content, err := os.ReadFile(filepath.Clean(filePath)); err == nil { + lines := strings.Split(strings.TrimRight(string(content), "\n"), "\n") + totalLines := len(lines) - // For large logs, extract only failure blocks - this is what matters - failureBlocks := s.extractFailureBlocks(lines, 0, totalLines) - if len(failureBlocks) > 0 { - var result strings.Builder - result.WriteString("====== Log Extract ======\n") - result.WriteString(fmt.Sprintf("Found %d test failure(s):\n\n", len(failureBlocks))) - for i, block := range failureBlocks { - if i > 0 { - result.WriteString("\n---\n\n") - } - result.WriteString(block) - } - return result.String() - } + if totalLines <= fullOutputThreshold { + return string(content) + } - // No failures found, return summary section - lastN := finalSummaryLines + // For large logs, extract only failure blocks - this is what matters + failureBlocks := s.extractFailureBlocks(lines, 0, totalLines) + if len(failureBlocks) > 0 { var result strings.Builder - result.WriteString("No [FAILED] markers found. Showing final output:\n\n") - startIdx := totalLines - lastN - if startIdx < 0 { - startIdx = 0 - } - for i := startIdx; i < totalLines; i++ { - result.WriteString(lines[i]) - result.WriteString("\n") + result.WriteString("====== Log Extract ======\n") + result.WriteString(fmt.Sprintf("Found %d test failure(s):\n\n", len(failureBlocks))) + for i, block := range failureBlocks { + if i > 0 { + result.WriteString("\n---\n\n") + } + result.WriteString(block) } return result.String() } + + // No failures or errors found, return summary section + lastN := finalSummaryLines + var result strings.Builder + result.WriteString("No [FAILED] or ERROR markers found. Showing final output:\n\n") + startIdx := totalLines - lastN + if startIdx < 0 { + startIdx = 0 + } + for i := startIdx; i < totalLines; i++ { + result.WriteString(lines[i]) + result.WriteString("\n") + } + return result.String() } return "" } -// extractFailureBlocks finds [FAILED] test blocks and extracts them with context +// extractFailureBlocks finds [FAILED] test blocks and ERROR lines, then extracts them with context func (s *SlackReporter) extractFailureBlocks(lines []string, startIdx, endIdx int) []string { var blocks []string for i := startIdx; i < endIdx && len(blocks) < maxFailureBlocks; i++ { - if strings.Contains(lines[i], "[FAILED]") || strings.Contains(lines[i], "• [FAILED]") { + line := lines[i] + if util.ContainsFailureMarker(line) || util.ContainsErrorMarker(line) { var block strings.Builder start := i - failureContextLines diff --git a/internal/reporter/slack_testoutput_test.go b/internal/reporter/slack_testoutput_test.go index dd5faff130..a4e1bf5356 100644 --- a/internal/reporter/slack_testoutput_test.go +++ b/internal/reporter/slack_testoutput_test.go @@ -23,9 +23,11 @@ func TestSlackReporter_readTestOutput(t *testing.T) { t.Error("result should indicate test failures found") } - // Should contain [FAILED] markers or indicate no failures - if !strings.Contains(result, "[FAILED]") && !strings.Contains(result, "No [FAILED] markers found") { - t.Error("result should contain failure markers or indicate none found") + // Should contain [FAILED]/ERROR/Error: markers or indicate none found + if !strings.Contains(result, "[FAILED]") && !strings.Contains(result, "ERROR") && + !strings.Contains(result, "Error:") && !strings.Contains(result, "error:") && + !strings.Contains(result, "No [FAILED] or ERROR markers found") { + t.Error("result should contain failure/error markers or indicate none found") } t.Logf("Extracted test output (%d chars):\n%s", len(result), result[:min(500, len(result))]) @@ -41,7 +43,7 @@ func TestSlackReporter_readTestOutput(t *testing.T) { t.Run("handles small test output", func(t *testing.T) { tmpDir := t.TempDir() content := "line 1\nline 2\nline 3\n" - if err := os.WriteFile(filepath.Join(tmpDir, "test_output.txt"), []byte(content), 0o644); err != nil { + if err := os.WriteFile(filepath.Join(tmpDir, "test_output.log"), []byte(content), 0o644); err != nil { t.Fatal(err) } @@ -69,7 +71,7 @@ func TestSlackReporter_readTestOutput(t *testing.T) { } } - if err := os.WriteFile(filepath.Join(tmpDir, "test_output.txt"), []byte(content.String()), 0o644); err != nil { + if err := os.WriteFile(filepath.Join(tmpDir, "test_output.log"), []byte(content.String()), 0o644); err != nil { t.Fatal(err) } @@ -168,6 +170,123 @@ func TestSlackReporter_extractFailureBlocks(t *testing.T) { t.Errorf("expected 0 blocks for no failures, got %d", len(blocks)) } }) + + t.Run("extracts ERROR markers", func(t *testing.T) { + lines := []string{ + "line 1", + "line 2", + "ERROR: connection failed", + "stack trace line 1", + "stack trace line 2", + "line 6", + } + + blocks := reporter.extractFailureBlocks(lines, 0, len(lines)) + + if len(blocks) != 1 { + t.Fatalf("expected 1 block, got %d", len(blocks)) + } + + if !strings.Contains(blocks[0], "ERROR: connection failed") { + t.Error("block should contain ERROR marker") + } + if !strings.Contains(blocks[0], "stack trace") { + t.Error("block should contain context after error") + } + }) + + t.Run("extracts mixed ERROR and error markers", func(t *testing.T) { + lines := []string{ + "start", + "ERROR: first error", + "details 1", + "padding 1", "padding 2", "padding 3", "padding 4", "padding 5", + "padding 6", "padding 7", "padding 8", "padding 9", "padding 10", + "padding 11", "padding 12", "padding 13", "padding 14", "padding 15", + "padding 16", "padding 17", "padding 18", "padding 19", "padding 20", + "padding 21", "padding 22", "padding 23", "padding 24", "padding 25", + "padding 26", "padding 27", "padding 28", "padding 29", "padding 30", + "padding 31", "padding 32", "padding 33", "padding 34", "padding 35", + "Error reading file", + "details 2", + "end", + } + + blocks := reporter.extractFailureBlocks(lines, 0, len(lines)) + + if len(blocks) != 2 { + t.Fatalf("expected 2 blocks, got %d", len(blocks)) + } + + if !strings.Contains(blocks[0], "ERROR: first error") { + t.Error("first block should contain ERROR marker") + } + if !strings.Contains(blocks[1], "Error reading file") { + t.Error("second block should contain Error marker") + } + }) + + t.Run("deduplicates [FAILED] and ERROR in same block", func(t *testing.T) { + lines := []string{ + "start", + "line 1", + "line 2", + "ERROR: connection failed", + "line 4", + "line 5", + "[FAILED] test failed", + "line 7", + "line 8", + "end", + } + + blocks := reporter.extractFailureBlocks(lines, 0, len(lines)) + + // Should only extract 1 block because [FAILED] is within 30 lines of ERROR + // The skip-ahead logic (i = end - 1) prevents extracting both + if len(blocks) != 1 { + t.Fatalf("expected 1 block (deduplicated), got %d", len(blocks)) + } + + // The first marker (ERROR) should be captured + if !strings.Contains(blocks[0], "ERROR: connection failed") { + t.Error("block should contain ERROR marker") + } + + // The second marker ([FAILED]) should also be in the same block (within context) + if !strings.Contains(blocks[0], "[FAILED] test failed") { + t.Error("block should contain [FAILED] marker within context") + } + }) + + t.Run("extracts separate blocks when markers are far apart", func(t *testing.T) { + lines := []string{ + "start", + "ERROR: first error", + "context line 1", + } + // Add 50 padding lines to separate the markers + for i := 0; i < 50; i++ { + lines = append(lines, "padding line") + } + lines = append(lines, "[FAILED] second failure") + lines = append(lines, "context line 2") + lines = append(lines, "end") + + blocks := reporter.extractFailureBlocks(lines, 0, len(lines)) + + // Should extract 2 blocks because markers are >30 lines apart + if len(blocks) != 2 { + t.Fatalf("expected 2 blocks, got %d", len(blocks)) + } + + if !strings.Contains(blocks[0], "ERROR: first error") { + t.Error("first block should contain ERROR marker") + } + if !strings.Contains(blocks[1], "[FAILED] second failure") { + t.Error("second block should contain [FAILED] marker") + } + }) } func TestSlackReporter_buildClusterInfoSection(t *testing.T) { diff --git a/internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/build-log.txt b/internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/test_output.log similarity index 100% rename from internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/build-log.txt rename to internal/reporter/testdata/periodic-ci-openshift-osde2e-main-nightly-4.20-osd-aws/test_output.log diff --git a/pkg/common/util/util.go b/pkg/common/util/util.go index 3dc07c0583..38b0fec052 100644 --- a/pkg/common/util/util.go +++ b/pkg/common/util/util.go @@ -32,3 +32,13 @@ func OpenshiftVersionToSemver(openshiftVersion string) (*semver.Version, error) func SemverToOpenshiftVersion(version *semver.Version) string { return VersionPrefix + version.String() } + +// ContainsErrorMarker checks if a line contains an error marker (error, Error, or ERROR) +func ContainsErrorMarker(line string) bool { + return strings.Contains(line, "error") || strings.Contains(line, "Error") || strings.Contains(line, "ERROR") +} + +// ContainsFailureMarker checks if a line contains a test failure marker +func ContainsFailureMarker(line string) bool { + return strings.Contains(line, "[FAILED]") || strings.Contains(line, "• [FAILED]") +} diff --git a/pkg/common/util/util_test.go b/pkg/common/util/util_test.go new file mode 100644 index 0000000000..0be03f8cf8 --- /dev/null +++ b/pkg/common/util/util_test.go @@ -0,0 +1,114 @@ +package util + +import "testing" + +func TestContainsErrorMarker(t *testing.T) { + tests := []struct { + name string + line string + expected bool + }{ + { + name: "contains ERROR uppercase", + line: "ERROR: connection failed", + expected: true, + }, + { + name: "contains Error mixed case", + line: "Error reading file", + expected: true, + }, + { + name: "contains error lowercase", + line: "error: invalid input", + expected: true, + }, + { + name: "contains error in middle of word", + line: "generator failed", + expected: false, // "generator" doesn't contain "error", "Error", or "ERROR" + }, + { + name: "contains error as substring", + line: "this is an error in the system", + expected: true, + }, + { + name: "no error marker", + line: "successful operation", + expected: false, + }, + { + name: "empty line", + line: "", + expected: false, + }, + { + name: "Error with colon", + line: "Error: connection timeout", + expected: true, + }, + { + name: "error with colon", + line: "2026/01/19 09:39:15 Unable to find image. error: failed to find version", + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ContainsErrorMarker(tt.line) + if result != tt.expected { + t.Errorf("ContainsErrorMarker(%q) = %v, expected %v", tt.line, result, tt.expected) + } + }) + } +} + +func TestContainsFailureMarker(t *testing.T) { + tests := []struct { + name string + line string + expected bool + }{ + { + name: "contains [FAILED]", + line: "[FAILED] test description", + expected: true, + }, + { + name: "contains bullet [FAILED]", + line: "• [FAILED] test description", + expected: true, + }, + { + name: "no failure marker", + line: "test passed successfully", + expected: false, + }, + { + name: "contains PASSED not FAILED", + line: "[PASSED] test description", + expected: false, + }, + { + name: "empty line", + line: "", + expected: false, + }, + { + name: "failed lowercase", + line: "test failed", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ContainsFailureMarker(tt.line) + if result != tt.expected { + t.Errorf("ContainsFailureMarker(%q) = %v, expected %v", tt.line, result, tt.expected) + } + }) + } +}