Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/build-and-push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@ jobs:
registry_username: ${{ secrets.QUAY_IMAGE_SCLORG_BUILDER_USERNAME }}
registry_token: ${{ secrets.QUAY_IMAGE_SCLORG_BUILDER_TOKEN }}
dockerfile: Dockerfile.daily-tests
tag: "0.8.0"
tag: "0.8.1"
image_name: "upstream-daily-tests"
quay_application_token: ${{ secrets.QUAY_IMAGE_SCLORG_UPDATE_DESC }}
8 changes: 4 additions & 4 deletions Dockerfile.daily-tests
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ FROM quay.io/fedora/fedora:42

ENV SHARED_DIR="/var/ci-scripts" \
VERSION="42" \
RELEASE_UPSTREAM="0.8.0" \
RELEASE_UPSTREAM="0.8.1" \
UPSTREAM_TMT_REPO="https://github.com/sclorg/sclorg-testing-farm" \
UPSTREAM_TMT_DIR="sclorg-testing-farm" \
HOME="/home/nightly" \
Expand All @@ -21,9 +21,9 @@ LABEL summary="${SUMMARY}" \
RUN dnf install -y python3.13-pip git nss_wrapper && \
dnf clean all

COPY requirements.sh "${WORK_DIR}/requirements.sh"
RUN bash "${WORK_DIR}/requirements.sh"
RUN cd "${WORK_DIR}" && git clone "${UPSTREAM_TMT_REPO}" "${UPSTREAM_TMT_DIR}"
COPY requirements.sh requirements.txt "${WORK_DIR}"
RUN bash "${WORK_DIR}/requirements.sh" && pip3 install -r "${WORK_DIR}/requirements.txt"
# RUN cd "${WORK_DIR}" && git clone "${UPSTREAM_TMT_REPO}" "${UPSTREAM_TMT_DIR}"

COPY . /root/ci-scripts
WORKDIR "${HOME}"
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ shellcheck:
./run-shellcheck.sh `git ls-files *.sh`

build_images:
podman build -t quay.io/sclorg/upstream-daily-tests:0.8.0 -f Dockerfile.daily-tests .
podman build -t quay.io/sclorg/upstream-daily-tests:0.8.1 -f Dockerfile.daily-tests .
65 changes: 39 additions & 26 deletions daily_tests/daily_nightly_tests_report.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
from email.utils import formatdate
import os
import smtplib
import sys
import argparse
import subprocess
Expand Down Expand Up @@ -123,6 +125,8 @@ def __init__(self):
self.sclorg_dir = SCLORG_DIR / self.date
self.add_email = []
self.full_success = False
self.smtp_port = 25
self.smtp_server = "smtp.redhat.com"
if self.args.upstream_tests:
self.available_test_case = TEST_UPSTREAM_CASES
else:
Expand Down Expand Up @@ -157,6 +161,7 @@ def return_plan_name(self, item) -> str:
)

def load_mails_from_environment(self):
print(os.environ)
if "DB_MAILS" in os.environ:
SCLORG_MAILS["mariadb-container"] = os.environ["DB_MAILS"].split(",")
SCLORG_MAILS["mysql-container"] = os.environ["DB_MAILS"].split(",")
Expand All @@ -169,10 +174,12 @@ def load_mails_from_environment(self):
SCLORG_MAILS["s2i-nodejs-container"] = os.environ["NODEJS_MAILS"].split(",")
if "PERL_MAILS" in os.environ:
SCLORG_MAILS["s2i-perl-container"] = os.environ["PERL_MAILS"].split(",")
if "UPSTREAM_TESTS_MAILS" in os.environ:
SCLORG_MAILS["upstream-tests"] = os.environ["UPSTREAM_TESTS_MAILS"].split(
","
)
if "UPSTREAM_MAILS" in os.environ:
SCLORG_MAILS["upstream-tests"] = os.environ["UPSTREAM_MAILS"].split(",")
if "SMTP_SERVER" in os.environ:
self.smtp_server = os.getenv("SMTP_SERVER", "smtp.redhat.com")
if "SMTP_PORT" in os.environ:
self.smtp_port = int(os.getenv("SMTP_PORT", 25))
if "DEFAULT_MAILS" in os.environ:
default_mails.extend(os.environ["DEFAULT_MAILS"].split(","))
self.send_email = os.environ.get("SEND_EMAIL", False)
Expand Down Expand Up @@ -243,6 +250,7 @@ def collect_data(self):
# Collect data to class dictionary
# self.data_dict['tmt'] item is used for Testing Farm errors per each OS and test case
# self.data_dict[test_case] contains failed logs for given test case. E.g. 'fedora-test'
print("=======Collecting data for all test cases=====")
self.data_dict["tmt"] = {
"logs": [],
"msg": [],
Expand All @@ -263,8 +271,9 @@ def collect_data(self):
print(f"The test case {path_dir} does not exist that is weird")
continue
plan_name = self.return_plan_name(plan)
print(f"Collecting data for test case {test_case} with plan {plan_name}")
print(f"Path for test case {test_case} is: {path_dir}")
print(
f"Path for test case {test_case} is: {path_dir} and plan name is: {plan_name}"
)
# It looks like TMT is still running for long time
if (path_dir / "tmt_running").exists():
print(f"tmt tests for case {test_case} is still running.")
Expand All @@ -278,26 +287,16 @@ def collect_data(self):
self.store_tmt_logs_to_dict(path_dir, test_case)
failed_tests = True
continue
data_dir = path_dir / "plans/nightly" / plan_name / "data"
print(f"Data dir for test case {test_case} is: {data_dir}")
if not data_dir.is_dir():
self.store_tmt_logs_to_dict(path_dir, test_case, not_exists=True)
self.send_file_to_pastebin(
log_path=self.sclorg_dir / f"{test_case}" / "log.txt",
log_name=f"{path_dir}/{test_case}.log.txt",
)
failed_tests = True
continue
results_dir = data_dir / "results"
print("Results dir is for failed_container: ", results_dir)
failed_containers = list(results_dir.rglob("*.log"))
data_dir = path_dir / "results"
print("Results dir is for failed_container: ", data_dir)
success_containers = list(path_dir.rglob("*.log"))
print("Success containers are: ", success_containers)
failed_containers = list(data_dir.rglob("*.log"))
print("Failed containers are: ", failed_containers)
if not failed_containers:
self.data_dict["SUCCESS"].append(test_case)
if self.args.upstream_tests:
success_logs = list(
(path_dir / "plans" / plan / "data").rglob("*.log")
)
success_logs = list((path_dir).rglob("*.log"))
self.data_dict["SUCCESS_DATA"].extend(
[(test_case, str(f), str(f.name)) for f in success_logs]
)
Expand All @@ -310,7 +309,10 @@ def collect_data(self):
]
if not failed_tests:
self.full_success = True
print(f"collect data: {self.data_dict}")
print("collected data are:")
import pprint

pprint.pprint(self.data_dict)

def generate_email_body(self):
if self.args.upstream_tests:
Expand Down Expand Up @@ -425,10 +427,21 @@ def send_emails(self):
self.mime_msg["From"] = send_from
self.mime_msg["To"] = ", ".join(send_to)
self.mime_msg["Subject"] = subject_msg
self.mime_msg["Date"] = formatdate(localtime=True)
print(f"Sending email with subject: '{subject_msg}' to: '{send_to}'")
print(f"Email body: {self.body}")
print(f"Message: {self.mime_msg}")
self.mime_msg.attach(MIMEText(self.body, "html"))
smtp = SMTP("smtp.redhat.com")
smtp.sendmail(send_from, send_to, self.mime_msg.as_string())
smtp.close()
try:
smtp = SMTP(self.smtp_server, int(self.smtp_port))
smtp.set_debuglevel(5)
smtp.sendmail(send_from, send_to, self.mime_msg.as_string())
except smtplib.SMTPRecipientsRefused as e:
print(f"Error sending email(SMTPRecipientsRefused): {e.strerror}")
except smtplib.SMTPException as e:
print(f"Error sending email(SMTPException): {e}")
finally:
smtp.close()
print("Sending email finished")


Expand Down
13 changes: 5 additions & 8 deletions daily_tests/daily_nightly_tests_report.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,10 @@ set -x

cd /root/ci-scripts/daily_tests
CUR_DATE=$(date +%Y-%m-%d)
id

#find "/var/ci-scripts" -ctime +30 -type d -exec rm-rf {} \;
echo "Daily nightly reports log files every 10 minutes..."
postfix start &
ls -la $SHARED_DIR/${CUR_DATE}
find "${SHARED_DIR}/${CUR_DATE}" -type f -name "tmt_*"
echo "--------------------"
python3 ./daily_nightly_tests_report.py
echo "Bash arguments: $@"
# Let's sleep for 10 minutes
env
find "/var/ci-scripts/daily_reports_dir/${CUR_DATE}" -type f -name "tmt_*"
echo "--------------------"
python3 ./daily_nightly_tests_report.py "$1"
2 changes: 0 additions & 2 deletions daily_tests/daily_scl_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@ TMP_DIR="${TMT_PLAN_DATA}"
RESULT_DIR="${TMP_DIR}/results/"
KUBECONFIG=/root/.kube/config
KUBEPASSWD=/root/.kube/ocp-kube
PBINCLI=/usr/local/bin/pbincli
PBINCLI_OPTS="--server https://privatebin.corp.redhat.com --expire 1week --no-insecure-warning --no-check-certificate --format plaintext"

mkdir -p "${RESULT_DIR}"

Expand Down
198 changes: 198 additions & 0 deletions daily_tests/download_logs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
#!/usr/bin/env python3
import os
import time
import sys
import re
import argparse
import requests
import xmltodict
import urllib3

from datetime import date
from pathlib import Path

urllib3.disable_warnings()


CONTAINERS = [
"httpd-container",
"mariadb-container",
"mysql-container",
"nginx-container",
"postgresql-container",
"redis-container",
"s2i-base-container",
"s2i-nodejs-container",
"s2i-perl-container",
"s2i-php-container",
"s2i-python-container",
"s2i-ruby-container",
"valkey-container",
"varnish-container",
]

REPORTS_PUBLIC_URL = "https://artifacts.dev.testing-farm.io"
REPORTS_PRIVATE_URL = "https://artifacts.osci.redhat.com/testing-farm"
LOG_DIR = os.getenv("SHARED_DIR")


class TestingFarmLogDownloader:
"""
Download logs from Testing Farm and store them in the log directory.
"""

def __init__(self, log_file: str, target: str, test: str):
"""
Initialize the TestingFarmLogDownloader class.
"""
self.log_file: Path = Path(log_file)
self.target: str = target
self.test: str = test
self.request_id: str = None
self.xml_dict: dict = None
self.date = date.today().strftime("%Y-%m-%d")
self.data_dir_url_link: str = None
self.log_dir: Path = (
Path(LOG_DIR)
/ "daily_reports_dir"
/ self.date
/ f"{self.target}-{self.test}"
)

def get_request_id(self) -> bool:
"""
Get the request ID from the log file.
"""
with self.log_file.open() as f:
lines = f.readlines()

for line in lines:
if "api https://api.dev.testing-farm.io/v0.1/requests/" in line:
self.request_id = (
line.replace("api https://api.dev.testing-farm.io/v0.1/", "")
.split("/")[1]
.strip()
)
break

if not self.request_id:
print("Request ID not found in the log.")
return False

print(f"Request ID: {self.request_id}")
return True

def download_log(self, log_name_url: str, log_name: str = None) -> bool:
"""
Download a log from the Testing Farm.
"""
for _ in range(5):
file_name_url = log_name_url + "/" + log_name
print(f"Downloading log: {file_name_url}")
response = requests.get(file_name_url, verify=False)
if response.status_code == 200:
with (self.log_dir / log_name).open("wb") as f:
f.write(response.content)
return True
else:
print(f"Failed to download log: {response.status_code}")
time.sleep(3) # Wait before retrying
else:
print("Failed to download log after multiple attempts.")
return False

def download_tmt_logs(self):
"""
Download TMT logs from the Testing Farm.
"""
if not self.xml_dict:
print("XML report not found.")
return False
list_logs_to_download = ["tmt-verbose-log", "tmt-log"]
for log in self.xml_dict["testsuites"]["testsuite"]["logs"]["log"]:
if log["@name"] in list_logs_to_download:
self.download_log(log["@href"], log["@name"])
continue
if log["@name"] == "data":
self.data_dir_url_link = log["@href"]

def get_list_of_containers_logs(self, html_content: str):
"""
Get the list of failed containers from the HTML content.
"""
try:
list_of_failed_containers = []
for line in html_content.split("\n"):
if re.search(r"<a href=\"[a-zA-Z0-9.-]+\">", line):
list_of_failed_containers.append(
re.search(r"<a href=\"[a-zA-Z0-9.-]+\">", line).group(0)
)
return list_of_failed_containers
except Exception as e:
print(f"Failed to get list of failed containers: {e}")
return False

def download_container_logs(self, failed: bool = False) -> bool:
"""
Download the failed container logs from the Testing Farm.
"""
if not self.data_dir_url_link:
print("Data directory URL link not found.")
return False
url_link = self.data_dir_url_link
if failed:
url_link += "/results"

print(f"Data directory URL link: {url_link}")
response = requests.get(url_link, verify=False)
if response.status_code == 200:
print(response.text)
else:
print(f"Failed to download data/results directory: {response.status_code}")
return False
for cont in CONTAINERS:
self.download_log(f"{url_link}/{cont}.log", f"{cont}.log")
return True

def get_xml_report(self) -> bool:
"""
Get the XML report from the Testing Farm.
"""
if self.target in ["fedora", "c9s", "c10s"]:
xml_report_url = f"{REPORTS_PUBLIC_URL}/{self.request_id}/results.xml"
else:
xml_report_url = f"{REPORTS_PRIVATE_URL}/{self.request_id}/results.xml"
print(f"XML Report URL: {xml_report_url}")
for _ in range(5):
response = requests.get(xml_report_url, verify=False)
if response.status_code == 200:
self.xml_dict = xmltodict.parse(response.content)
break
else:
print(f"Failed to download XML report: {response.status_code}")
time.sleep(3) # Wait before retrying
else:
print("Failed to download XML report after multiple attempts.")
return False
return True


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Download logs from Testing Farm.")
parser.add_argument("log_file", type=str, help="Path to the log file")
parser.add_argument("target", type=str, help="Target environment")
parser.add_argument("test", type=str, help="Test name")

args = parser.parse_args()

downloader = TestingFarmLogDownloader(args.log_file, args.target, args.test)
downloader.get_request_id()
if not downloader.request_id:
print("Cannot download logs without a valid request ID.")
sys.exit(1)
if not downloader.get_xml_report():
print("Failed to download XML report.")
sys.exit(1)
downloader.download_tmt_logs()
downloader.download_container_logs()
downloader.download_container_logs(failed=True)
Loading