diff --git a/.github/workflows/build-and-push.yml b/.github/workflows/build-and-push.yml
index 23b31e2..68be123 100644
--- a/.github/workflows/build-and-push.yml
+++ b/.github/workflows/build-and-push.yml
@@ -19,6 +19,6 @@ jobs:
registry_username: ${{ secrets.QUAY_IMAGE_SCLORG_BUILDER_USERNAME }}
registry_token: ${{ secrets.QUAY_IMAGE_SCLORG_BUILDER_TOKEN }}
dockerfile: Dockerfile.daily-tests
- tag: "0.8.0"
+ tag: "0.8.1"
image_name: "upstream-daily-tests"
quay_application_token: ${{ secrets.QUAY_IMAGE_SCLORG_UPDATE_DESC }}
diff --git a/Dockerfile.daily-tests b/Dockerfile.daily-tests
index c19fba8..a5a87da 100644
--- a/Dockerfile.daily-tests
+++ b/Dockerfile.daily-tests
@@ -2,7 +2,7 @@ FROM quay.io/fedora/fedora:42
ENV SHARED_DIR="/var/ci-scripts" \
VERSION="42" \
- RELEASE_UPSTREAM="0.8.0" \
+ RELEASE_UPSTREAM="0.8.1" \
UPSTREAM_TMT_REPO="https://github.com/sclorg/sclorg-testing-farm" \
UPSTREAM_TMT_DIR="sclorg-testing-farm" \
HOME="/home/nightly" \
@@ -21,9 +21,9 @@ LABEL summary="${SUMMARY}" \
RUN dnf install -y python3.13-pip git nss_wrapper && \
dnf clean all
-COPY requirements.sh "${WORK_DIR}/requirements.sh"
-RUN bash "${WORK_DIR}/requirements.sh"
-RUN cd "${WORK_DIR}" && git clone "${UPSTREAM_TMT_REPO}" "${UPSTREAM_TMT_DIR}"
+COPY requirements.sh requirements.txt "${WORK_DIR}"
+RUN bash "${WORK_DIR}/requirements.sh" && pip3 install -r "${WORK_DIR}/requirements.txt"
+# RUN cd "${WORK_DIR}" && git clone "${UPSTREAM_TMT_REPO}" "${UPSTREAM_TMT_DIR}"
COPY . /root/ci-scripts
WORKDIR "${HOME}"
diff --git a/Makefile b/Makefile
index c763144..b2dc525 100644
--- a/Makefile
+++ b/Makefile
@@ -7,4 +7,4 @@ shellcheck:
./run-shellcheck.sh `git ls-files *.sh`
build_images:
- podman build -t quay.io/sclorg/upstream-daily-tests:0.8.0 -f Dockerfile.daily-tests .
+ podman build -t quay.io/sclorg/upstream-daily-tests:0.8.1 -f Dockerfile.daily-tests .
diff --git a/daily_tests/daily_nightly_tests_report.py b/daily_tests/daily_nightly_tests_report.py
index eff5554..d02262c 100755
--- a/daily_tests/daily_nightly_tests_report.py
+++ b/daily_tests/daily_nightly_tests_report.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
+from email.utils import formatdate
import os
+import smtplib
import sys
import argparse
import subprocess
@@ -123,6 +125,8 @@ def __init__(self):
self.sclorg_dir = SCLORG_DIR / self.date
self.add_email = []
self.full_success = False
+ self.smtp_port = 25
+ self.smtp_server = "smtp.redhat.com"
if self.args.upstream_tests:
self.available_test_case = TEST_UPSTREAM_CASES
else:
@@ -157,6 +161,7 @@ def return_plan_name(self, item) -> str:
)
def load_mails_from_environment(self):
+ print(os.environ)
if "DB_MAILS" in os.environ:
SCLORG_MAILS["mariadb-container"] = os.environ["DB_MAILS"].split(",")
SCLORG_MAILS["mysql-container"] = os.environ["DB_MAILS"].split(",")
@@ -169,10 +174,12 @@ def load_mails_from_environment(self):
SCLORG_MAILS["s2i-nodejs-container"] = os.environ["NODEJS_MAILS"].split(",")
if "PERL_MAILS" in os.environ:
SCLORG_MAILS["s2i-perl-container"] = os.environ["PERL_MAILS"].split(",")
- if "UPSTREAM_TESTS_MAILS" in os.environ:
- SCLORG_MAILS["upstream-tests"] = os.environ["UPSTREAM_TESTS_MAILS"].split(
- ","
- )
+ if "UPSTREAM_MAILS" in os.environ:
+ SCLORG_MAILS["upstream-tests"] = os.environ["UPSTREAM_MAILS"].split(",")
+ if "SMTP_SERVER" in os.environ:
+ self.smtp_server = os.getenv("SMTP_SERVER", "smtp.redhat.com")
+ if "SMTP_PORT" in os.environ:
+ self.smtp_port = int(os.getenv("SMTP_PORT", 25))
if "DEFAULT_MAILS" in os.environ:
default_mails.extend(os.environ["DEFAULT_MAILS"].split(","))
self.send_email = os.environ.get("SEND_EMAIL", False)
@@ -243,6 +250,7 @@ def collect_data(self):
# Collect data to class dictionary
# self.data_dict['tmt'] item is used for Testing Farm errors per each OS and test case
# self.data_dict[test_case] contains failed logs for given test case. E.g. 'fedora-test'
+ print("=======Collecting data for all test cases=====")
self.data_dict["tmt"] = {
"logs": [],
"msg": [],
@@ -263,8 +271,9 @@ def collect_data(self):
print(f"The test case {path_dir} does not exist that is weird")
continue
plan_name = self.return_plan_name(plan)
- print(f"Collecting data for test case {test_case} with plan {plan_name}")
- print(f"Path for test case {test_case} is: {path_dir}")
+ print(
+ f"Path for test case {test_case} is: {path_dir} and plan name is: {plan_name}"
+ )
# It looks like TMT is still running for long time
if (path_dir / "tmt_running").exists():
print(f"tmt tests for case {test_case} is still running.")
@@ -278,26 +287,16 @@ def collect_data(self):
self.store_tmt_logs_to_dict(path_dir, test_case)
failed_tests = True
continue
- data_dir = path_dir / "plans/nightly" / plan_name / "data"
- print(f"Data dir for test case {test_case} is: {data_dir}")
- if not data_dir.is_dir():
- self.store_tmt_logs_to_dict(path_dir, test_case, not_exists=True)
- self.send_file_to_pastebin(
- log_path=self.sclorg_dir / f"{test_case}" / "log.txt",
- log_name=f"{path_dir}/{test_case}.log.txt",
- )
- failed_tests = True
- continue
- results_dir = data_dir / "results"
- print("Results dir is for failed_container: ", results_dir)
- failed_containers = list(results_dir.rglob("*.log"))
+ data_dir = path_dir / "results"
+ print("Results dir is for failed_container: ", data_dir)
+ success_containers = list(path_dir.rglob("*.log"))
+ print("Success containers are: ", success_containers)
+ failed_containers = list(data_dir.rglob("*.log"))
print("Failed containers are: ", failed_containers)
if not failed_containers:
self.data_dict["SUCCESS"].append(test_case)
if self.args.upstream_tests:
- success_logs = list(
- (path_dir / "plans" / plan / "data").rglob("*.log")
- )
+ success_logs = list((path_dir).rglob("*.log"))
self.data_dict["SUCCESS_DATA"].extend(
[(test_case, str(f), str(f.name)) for f in success_logs]
)
@@ -310,7 +309,10 @@ def collect_data(self):
]
if not failed_tests:
self.full_success = True
- print(f"collect data: {self.data_dict}")
+ print("collected data are:")
+ import pprint
+
+ pprint.pprint(self.data_dict)
def generate_email_body(self):
if self.args.upstream_tests:
@@ -425,10 +427,21 @@ def send_emails(self):
self.mime_msg["From"] = send_from
self.mime_msg["To"] = ", ".join(send_to)
self.mime_msg["Subject"] = subject_msg
+ self.mime_msg["Date"] = formatdate(localtime=True)
+ print(f"Sending email with subject: '{subject_msg}' to: '{send_to}'")
+ print(f"Email body: {self.body}")
+ print(f"Message: {self.mime_msg}")
self.mime_msg.attach(MIMEText(self.body, "html"))
- smtp = SMTP("smtp.redhat.com")
- smtp.sendmail(send_from, send_to, self.mime_msg.as_string())
- smtp.close()
+ try:
+ smtp = SMTP(self.smtp_server, int(self.smtp_port))
+ smtp.set_debuglevel(5)
+ smtp.sendmail(send_from, send_to, self.mime_msg.as_string())
+ except smtplib.SMTPRecipientsRefused as e:
+ print(f"Error sending email(SMTPRecipientsRefused): {e.strerror}")
+ except smtplib.SMTPException as e:
+ print(f"Error sending email(SMTPException): {e}")
+ finally:
+ smtp.close()
print("Sending email finished")
diff --git a/daily_tests/daily_nightly_tests_report.sh b/daily_tests/daily_nightly_tests_report.sh
index 4b41e10..9ed84bf 100755
--- a/daily_tests/daily_nightly_tests_report.sh
+++ b/daily_tests/daily_nightly_tests_report.sh
@@ -4,13 +4,10 @@ set -x
cd /root/ci-scripts/daily_tests
CUR_DATE=$(date +%Y-%m-%d)
-id
-#find "/var/ci-scripts" -ctime +30 -type d -exec rm-rf {} \;
-echo "Daily nightly reports log files every 10 minutes..."
-postfix start &
-ls -la $SHARED_DIR/${CUR_DATE}
-find "${SHARED_DIR}/${CUR_DATE}" -type f -name "tmt_*"
-echo "--------------------"
-python3 ./daily_nightly_tests_report.py
+echo "Bash arguments: $@"
# Let's sleep for 10 minutes
+env
+find "/var/ci-scripts/daily_reports_dir/${CUR_DATE}" -type f -name "tmt_*"
+echo "--------------------"
+python3 ./daily_nightly_tests_report.py "$1"
diff --git a/daily_tests/daily_scl_tests.sh b/daily_tests/daily_scl_tests.sh
index de0d85b..55a8e7c 100755
--- a/daily_tests/daily_scl_tests.sh
+++ b/daily_tests/daily_scl_tests.sh
@@ -36,8 +36,6 @@ TMP_DIR="${TMT_PLAN_DATA}"
RESULT_DIR="${TMP_DIR}/results/"
KUBECONFIG=/root/.kube/config
KUBEPASSWD=/root/.kube/ocp-kube
-PBINCLI=/usr/local/bin/pbincli
-PBINCLI_OPTS="--server https://privatebin.corp.redhat.com --expire 1week --no-insecure-warning --no-check-certificate --format plaintext"
mkdir -p "${RESULT_DIR}"
diff --git a/daily_tests/download_logs.py b/daily_tests/download_logs.py
new file mode 100755
index 0000000..2bd6fbb
--- /dev/null
+++ b/daily_tests/download_logs.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python3
+import os
+import time
+import sys
+import re
+import argparse
+import requests
+import xmltodict
+import urllib3
+
+from datetime import date
+from pathlib import Path
+
+urllib3.disable_warnings()
+
+
+CONTAINERS = [
+ "httpd-container",
+ "mariadb-container",
+ "mysql-container",
+ "nginx-container",
+ "postgresql-container",
+ "redis-container",
+ "s2i-base-container",
+ "s2i-nodejs-container",
+ "s2i-perl-container",
+ "s2i-php-container",
+ "s2i-python-container",
+ "s2i-ruby-container",
+ "valkey-container",
+ "varnish-container",
+]
+
+REPORTS_PUBLIC_URL = "https://artifacts.dev.testing-farm.io"
+REPORTS_PRIVATE_URL = "https://artifacts.osci.redhat.com/testing-farm"
+LOG_DIR = os.getenv("SHARED_DIR")
+
+
+class TestingFarmLogDownloader:
+ """
+ Download logs from Testing Farm and store them in the log directory.
+ """
+
+ def __init__(self, log_file: str, target: str, test: str):
+ """
+ Initialize the TestingFarmLogDownloader class.
+ """
+ self.log_file: Path = Path(log_file)
+ self.target: str = target
+ self.test: str = test
+ self.request_id: str = None
+ self.xml_dict: dict = None
+ self.date = date.today().strftime("%Y-%m-%d")
+ self.data_dir_url_link: str = None
+ self.log_dir: Path = (
+ Path(LOG_DIR)
+ / "daily_reports_dir"
+ / self.date
+ / f"{self.target}-{self.test}"
+ )
+
+ def get_request_id(self) -> bool:
+ """
+ Get the request ID from the log file.
+ """
+ with self.log_file.open() as f:
+ lines = f.readlines()
+
+ for line in lines:
+ if "api https://api.dev.testing-farm.io/v0.1/requests/" in line:
+ self.request_id = (
+ line.replace("api https://api.dev.testing-farm.io/v0.1/", "")
+ .split("/")[1]
+ .strip()
+ )
+ break
+
+ if not self.request_id:
+ print("Request ID not found in the log.")
+ return False
+
+ print(f"Request ID: {self.request_id}")
+ return True
+
+ def download_log(self, log_name_url: str, log_name: str = None) -> bool:
+ """
+ Download a log from the Testing Farm.
+ """
+ for _ in range(5):
+ file_name_url = log_name_url + "/" + log_name
+ print(f"Downloading log: {file_name_url}")
+ response = requests.get(file_name_url, verify=False)
+ if response.status_code == 200:
+ with (self.log_dir / log_name).open("wb") as f:
+ f.write(response.content)
+ return True
+ else:
+ print(f"Failed to download log: {response.status_code}")
+ time.sleep(3) # Wait before retrying
+ else:
+ print("Failed to download log after multiple attempts.")
+ return False
+
+ def download_tmt_logs(self):
+ """
+ Download TMT logs from the Testing Farm.
+ """
+ if not self.xml_dict:
+ print("XML report not found.")
+ return False
+ list_logs_to_download = ["tmt-verbose-log", "tmt-log"]
+ for log in self.xml_dict["testsuites"]["testsuite"]["logs"]["log"]:
+ if log["@name"] in list_logs_to_download:
+ self.download_log(log["@href"], log["@name"])
+ continue
+ if log["@name"] == "data":
+ self.data_dir_url_link = log["@href"]
+
+ def get_list_of_containers_logs(self, html_content: str):
+ """
+ Get the list of failed containers from the HTML content.
+ """
+ try:
+ list_of_failed_containers = []
+ for line in html_content.split("\n"):
+ if re.search(r"", line):
+ list_of_failed_containers.append(
+ re.search(r"", line).group(0)
+ )
+ return list_of_failed_containers
+ except Exception as e:
+ print(f"Failed to get list of failed containers: {e}")
+ return False
+
+ def download_container_logs(self, failed: bool = False) -> bool:
+ """
+ Download the failed container logs from the Testing Farm.
+ """
+ if not self.data_dir_url_link:
+ print("Data directory URL link not found.")
+ return False
+ url_link = self.data_dir_url_link
+ if failed:
+ url_link += "/results"
+
+ print(f"Data directory URL link: {url_link}")
+ response = requests.get(url_link, verify=False)
+ if response.status_code == 200:
+ print(response.text)
+ else:
+ print(f"Failed to download data/results directory: {response.status_code}")
+ return False
+ for cont in CONTAINERS:
+ self.download_log(f"{url_link}/{cont}.log", f"{cont}.log")
+ return True
+
+ def get_xml_report(self) -> bool:
+ """
+ Get the XML report from the Testing Farm.
+ """
+ if self.target in ["fedora", "c9s", "c10s"]:
+ xml_report_url = f"{REPORTS_PUBLIC_URL}/{self.request_id}/results.xml"
+ else:
+ xml_report_url = f"{REPORTS_PRIVATE_URL}/{self.request_id}/results.xml"
+ print(f"XML Report URL: {xml_report_url}")
+ for _ in range(5):
+ response = requests.get(xml_report_url, verify=False)
+ if response.status_code == 200:
+ self.xml_dict = xmltodict.parse(response.content)
+ break
+ else:
+ print(f"Failed to download XML report: {response.status_code}")
+ time.sleep(3) # Wait before retrying
+ else:
+ print("Failed to download XML report after multiple attempts.")
+ return False
+ return True
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Download logs from Testing Farm.")
+ parser.add_argument("log_file", type=str, help="Path to the log file")
+ parser.add_argument("target", type=str, help="Target environment")
+ parser.add_argument("test", type=str, help="Test name")
+
+ args = parser.parse_args()
+
+ downloader = TestingFarmLogDownloader(args.log_file, args.target, args.test)
+ downloader.get_request_id()
+ if not downloader.request_id:
+ print("Cannot download logs without a valid request ID.")
+ sys.exit(1)
+ if not downloader.get_xml_report():
+ print("Failed to download XML report.")
+ sys.exit(1)
+ downloader.download_tmt_logs()
+ downloader.download_container_logs()
+ downloader.download_container_logs(failed=True)
diff --git a/daily_tests/show_logs.py b/daily_tests/show_logs.py
index ddac3fe..81bf1db 100755
--- a/daily_tests/show_logs.py
+++ b/daily_tests/show_logs.py
@@ -69,25 +69,10 @@ def iter_results_in_directory(self):
print(f"{x}\n")
def return_failed_tests(self, directory, item) -> list:
- plan_name = "".join([x[1] for x in TEST_CASES if item.name.startswith(x[0])])
- dir_path = directory / f"plans/{plan_name}/data/results"
+ dir_path = directory / "results"
print(f"Looking for failed tests in directory: {dir_path}")
return list(dir_path.rglob("*.log"))
- def iter_over_executed_tests(self):
- """View all executed tests in the given directory."""
- for item in self.scl_tests_dir.iterdir():
- print(f"Inspecting item in '{self.scl_tests_dir}' directory: {item}")
- item_dir = self.scl_tests_dir / item.name
- if not item_dir.is_dir():
- continue
- failed_container_tests = self.return_failed_tests(item_dir, item)
- if not failed_container_tests:
- print(f"No container test failures found in {item}.")
- continue
- print(f"!!!!Failed container tests for {item.name}!!!!\n")
- print({"\n".join(failed_container_tests)})
-
def show_all_available_tests(self):
print("All previous available tests are:")
for item in DAILY_REPORTS_DIR.iterdir():
@@ -96,12 +81,6 @@ def show_all_available_tests(self):
def print_report(self):
print(f"Summary ({self.date}) of daily SCL tests reports:")
- if not self.scl_tests_dir.is_dir():
- print(
- f"The directory {self.scl_tests_dir} does not exist. Tests were not executed yet."
- )
- else:
- self.iter_over_executed_tests()
if not self.reports_dir.is_dir():
print(
f"The directory {self.reports_dir} does not exist. Tests were not finished yet."
diff --git a/daily_tests/tests/no_logs_in_results.txt b/daily_tests/tests/no_logs_in_results.txt
new file mode 100644
index 0000000..56942f4
--- /dev/null
+++ b/daily_tests/tests/no_logs_in_results.txt
@@ -0,0 +1,6 @@
+
+Index of /2647213f-ac56-4fbf-8482-adc7cfd93a5f/work-nightly-c10srmobb_4c/plans/nightly/nightly-c10s/data/results/
+
+Index of /2647213f-ac56-4fbf-8482-adc7cfd93a5f/work-nightly-c10srmobb_4c/plans/nightly/nightly-c10s/data/results/
../
+
+
diff --git a/daily_tests/tests/results.xml b/daily_tests/tests/results.xml
new file mode 100644
index 0000000..d02c66e
--- /dev/null
+++ b/daily_tests/tests/results.xml
@@ -0,0 +1,61 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/daily_tests/tests/test_show_logs.py b/daily_tests/tests/test_show_logs.py
index 9a97760..c32f048 100644
--- a/daily_tests/tests/test_show_logs.py
+++ b/daily_tests/tests/test_show_logs.py
@@ -53,17 +53,6 @@ def test_return_failed_tests_finds_logs(tmp_path):
assert log_file not in results
-def test_iter_over_executed_tests_no_failures(report_env, capsys):
- report, _, _ = report_env
- report.scl_tests_dir.mkdir(parents=True)
- (report.scl_tests_dir / "c9s-test").mkdir()
-
- report.iter_over_executed_tests()
- output = capsys.readouterr().out
-
- assert "No container test failures found in" in output
-
-
def test_show_all_available_tests_lists_dirs(report_env, capsys):
report, reports_dir, _ = report_env
reports_dir.mkdir()
@@ -85,7 +74,6 @@ def test_print_report_missing_directories(report_env, capsys):
report.print_report()
output = capsys.readouterr().out
- assert "Tests were not executed yet." in output
assert "Tests were not finished yet." in output
diff --git a/daily_tests/tests/tmt_log_output b/daily_tests/tests/tmt_log_output
new file mode 100644
index 0000000..8b07e6c
--- /dev/null
+++ b/daily_tests/tests/tmt_log_output
@@ -0,0 +1,9 @@
+Wed Feb 18 09:52:08 UTC 2026
+TARGET is: c10s and test is: tests
+📦 repository https://github.com/sclorg/sclorg-testing-farm ref main test-type fmf
+💻 CentOS-Stream-10 on x86_64
+🔎 api https://api.dev.testing-farm.io/v0.1/requests/062cb820-8519-4e7b-adf1-30e9f42d3c8f
+💡 waiting for request to finish, use ctrl+c to skip
+👶 request is waiting to be queued
+🚀 request is running
+🚢 artifacts https://artifacts.dev.testing-farm.io/062cb820-8519-4e7b-adf1-30e9f42d3c8f
diff --git a/requirements.txt b/requirements.txt
index b516b66..e66fbf4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,3 +2,5 @@ requests
PyYAML
urllib3
GitPython
+slack_sdk
+xmltodict
diff --git a/run_nightly_tests.sh b/run_nightly_tests.sh
index 1314f32..9b712f9 100755
--- a/run_nightly_tests.sh
+++ b/run_nightly_tests.sh
@@ -15,10 +15,12 @@ else
exit 1
fi
+cd "/root/ci-scripts/"
+
# Local working directories
CUR_DATE=$(date +%Y-%m-%d)
WORK_DIR="${HOME}/ci-scripts"
-LOCAL_LOGS_DIR="${HOME}/logs/"
+LOCAL_LOGS_DIR="${HOME}/logs"
# Shared directories between runs
DAILY_REPORTS_DIR="${SHARED_DIR}/daily_reports_dir/${CUR_DATE}"
@@ -32,82 +34,90 @@ LOG_FILE="${LOCAL_LOGS_DIR}/${TARGET}-${TESTS}.log"
export USER_ID=$(id -u)
export GROUP_ID=$(id -g)
-
-function generate_passwd_file() {
- grep -v ^ci-scripts /etc/passwd > "$HOME/passwd"
- echo "ci-scripts:x:${USER_ID}:${GROUP_ID}:User for running ci-scripts:${HOME}:/bin/bash" >> "$HOME/passwd"
- export LD_PRELOAD=libnss_wrapper.so
- export NSS_WRAPPER_PASSWD=${HOME}/passwd
- export NSS_WRAPPER_GROUP=/etc/group
-}
-
+API_KEY="API_KEY_PRIVATE"
+# function generate_passwd_file() {
+# grep -v ^ci-scripts /etc/passwd > "$HOME/passwd"
+# echo "ci-scripts:x:${USER_ID}:${GROUP_ID}:User for running ci-scripts:${HOME}:/bin/bash" >> "$HOME/passwd"
+# export LD_PRELOAD=libnss_wrapper.so
+# export NSS_WRAPPER_PASSWD=${HOME}/passwd
+# export NSS_WRAPPER_GROUP=/etc/group
+# }
+BRANCH="master"
function prepare_environment() {
mkdir -p "${LOCAL_LOGS_DIR}"
mkdir -p "${WORK_DIR}"
mkdir -p "${DIR}"
- mkdir -p "${DAILY_REPORTS_TESTS_DIR}/plans/${TFT_PLAN}/data/results"
+ mkdir -p "${DAILY_REPORTS_TESTS_DIR}/results"
mkdir -p "${DAILY_SCLORG_TESTS_DIR}"
}
function get_compose() {
if [[ "$TARGET" == "rhel8" ]]; then
- COMPOSE="1MT-RHEL-8.10.0-updates"
- TMT_PLAN_DIR="$DOWNSTREAM_TMT_DIR"
+ # COMPOSE="1MT-RHEL-8.10.0-updates"
+ COMPOSE="RHEL-8.10.0-Nightly"
+ TMT_PLAN_URL="$DOWNSTREAM_TMT_REPO"
elif [[ "$TARGET" == "rhel9" ]]; then
- COMPOSE="1MT-RHEL-9.6.0-updates"
- TMT_PLAN_DIR="$DOWNSTREAM_TMT_DIR"
+ # COMPOSE="1MT-RHEL-9.6.0-updates"
+ COMPOSE="RHEL-9.6.0-Nightly"
+ TMT_PLAN_URL="$DOWNSTREAM_TMT_REPO"
elif [[ "$TARGET" == "rhel10" ]]; then
- COMPOSE="1MT-RHEL-10.0"
- TMT_PLAN_DIR="$DOWNSTREAM_TMT_DIR"
+ # COMPOSE="1MT-RHEL-10.0"
+ COMPOSE="RHEL-10-Nightly"
+ TMT_PLAN_URL="$DOWNSTREAM_TMT_REPO"
elif [[ "$TARGET" == "fedora" ]]; then
- COMPOSE="1MT-Fedora-${VERSION}"
- TMT_PLAN_DIR="$UPSTREAM_TMT_DIR"
+ # COMPOSE="1MT-Fedora-${VERSION}"
+ COMPOSE="Fedora-latest"
+ TMT_PLAN_URL="$UPSTREAM_TMT_REPO"
TFT_PLAN="nightly/nightly-fedora"
+ API_KEY="API_KEY_PUBLIC"
+ BRANCH="main"
elif [[ "$TARGET" == "c9s" ]]; then
- COMPOSE="1MT-CentOS-Stream-9"
- TMT_PLAN_DIR="$UPSTREAM_TMT_DIR"
+ # COMPOSE="1MT-CentOS-Stream-9"
+ COMPOSE="CentOS-Stream-9"
+ TMT_PLAN_URL="$UPSTREAM_TMT_REPO"
TFT_PLAN="nightly/nightly-c9s"
+ API_KEY="API_KEY_PUBLIC"
+ BRANCH="main"
elif [[ "$TARGET" == "c10s" ]]; then
- COMPOSE="1MT-CentOS-Stream-10"
- TMT_PLAN_DIR="$UPSTREAM_TMT_DIR"
+ # COMPOSE="1MT-CentOS-Stream-10"
+ COMPOSE="CentOS-Stream-10"
+ TMT_PLAN_URL="$UPSTREAM_TMT_REPO"
TFT_PLAN="nightly/nightly-c10s"
+ API_KEY="API_KEY_PUBLIC"
+ BRANCH="main"
else
echo "This target is not supported"
exit 1
fi
- COMPOSE=$(tmt -q run provision -h minute --list-images | grep $COMPOSE | head -n 1 | tr -d '[:space:]')
- export COMPOSE
+ # COMPOSE=$(tmt -q run provision -h minute --list-images | grep $COMPOSE | head -n 1 | tr -d '[:space:]')
+ # export COMPOSE
+ # export TMT_PLAN
}
function run_tests() {
- # -e CI=true is set for NodeJS Upstream tests
- ENV_VARIABLES="-e DEBUG=yes -e OS=$TARGET -e TEST=$TESTS"
- TMT_COMMAND="tmt run -v -v -d -d --all ${ENV_VARIABLES} --id ${DIR} plan --name $TFT_PLAN provision -v -v --how minute --auto-select-network --image ${COMPOSE}"
- echo "TMT command is: $TMT_COMMAND" | tee -a "${LOG_FILE}"
+ env
touch "${DAILY_SCLORG_TESTS_DIR}/tmt_running"
- set -o pipefail
- $TMT_COMMAND | tee -a "${LOG_FILE}"
+ cat "$HOME/fmf_data" | grep "$API_KEY" | cut -d '=' -f2
+ export TESTING_FARM_API_TOKEN=$(cat "$HOME/fmf_data" | grep "$API_KEY" | cut -d '=' -f2)
+ TESTING_FARM_CMD="testing-farm request --compose ${COMPOSE} -e OS=${TARGET} -e TEST=${TESTS} --git-url ${TMT_PLAN_URL} --git-ref ${BRANCH} --plan ${TFT_PLAN} --duration 240"
+ $TESTING_FARM_CMD | tee -a "${LOG_FILE}"
ret_code=$?
- set +o pipefail
rm -f "${DAILY_SCLORG_TESTS_DIR}/tmt_running"
+ # Let's sleep 5 minutes to let the testing farm to start the tests and generate some logs
+ sleep 300
if [[ $ret_code -ne 0 ]]; then
- echo "TMT command $TMT_COMMAND has failed."
+ echo "Testing Farm command $TESTING_FARM_CMD has failed."
touch "${DAILY_REPORTS_TESTS_DIR}/tmt_failed"
- else
- touch "${DAILY_REPORTS_TESTS_DIR}/tmt_success"
- fi
- if [[ -d "${DIR}/plans/${TFT_PLAN}/execute/date/guest/" ]]; then
- cp -rv "${DIR}/plans/${TFT_PLAN}/execute/date/guest/" "${DAILY_SCLORG_TESTS_DIR}/"
fi
- cp -r "${DIR}/*" "${DAILY_SCLORG_TESTS_DIR}/"
- cp "${LOG_FILE}" "${DAILY_SCLORG_TESTS_DIR}/log_${TARGET}_${TESTS}.txt"
- if [[ -d "${DIR}/plans/${TFT_PLAN}/data" ]]; then
- ls -laR "${DIR}/plans/${TFT_PLAN}/data/" > "$DAILY_SCLORG_TESTS_DIR/all_files_${TARGET}_${TESTS}.txt"
- cp -rv "${DIR}/plans/${TFT_PLAN}/data/results" "${DAILY_REPORTS_TESTS_DIR}/plans/${TFT_PLAN}/data/"
- cp -v "${DIR}/plans/${TFT_PLAN}/data/*.log" "${DAILY_REPORTS_TESTS_DIR}/plans/${TFT_PLAN}/data/"
+ grep "tests passed" "${LOG_FILE}"
+ if grep "tests passed" "${LOG_FILE}"; then
+ touch "${DAILY_REPORTS_TESTS_DIR}/tmt_success"
+ else
+ touch "${DAILY_REPORTS_TESTS_DIR}/tmt_failed"
fi
- cp "${DIR}/log.txt" "${DAILY_REPORTS_TESTS_DIR}/"
+ cp "${LOG_FILE}" "${DAILY_REPORTS_TESTS_DIR}/testing_farm_${TARGET}_${TESTS}.txt"
+ python3 /root/ci-scripts/daily_tests/download_logs.py "${LOG_FILE}" "${TARGET}" "${TESTS}"
}
if [[ "$TESTS" != "test" ]] && [[ "$TESTS" != "test-pytest" ]] && [[ "$TESTS" != "test-upstream" ]] && [[ "$TESTS" != "test-openshift-pytest" ]] && [[ "$TESTS" != "test-openshift-4" ]]; then
@@ -118,19 +128,15 @@ fi
CWD=$(pwd)
cd "$HOME" || { echo "Could not switch to $HOME"; exit 1; }
-generate_passwd_file
+#generate_passwd_file
prepare_environment
get_compose
date > "${LOG_FILE}"
-curl --insecure -L https://url.corp.redhat.com/fmf-data > "/tmp/fmf_data"
-source "/tmp/fmf_data"
env
-echo "Switching to $WORK_DIR/$TMT_PLAN_DIR"
-cd "$WORK_DIR/$TMT_PLAN_DIR" || { echo "Could not switch to $WORK_DIR/$TMT_PLAN_DIR"; exit 1; }
echo "TARGET is: ${TARGET} and test is: ${TESTS}" | tee -a "${LOG_FILE}"
run_tests