From 63a3759eb93211a469948c441aaafcf12e7bc28b Mon Sep 17 00:00:00 2001 From: Igor Bezukh Date: Wed, 18 Dec 2024 14:31:23 +0200 Subject: [PATCH 1/3] add scripts and target for kubevirtci bump Signed-off-by: Igor Bezukh --- Makefile | 6 ++++- hack/bump-kubevirtci.sh | 10 ++++++++ hack/config.sh | 1 + hack/sync-kubevirtci.sh | 57 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 73 insertions(+), 1 deletion(-) create mode 100755 hack/bump-kubevirtci.sh create mode 100755 hack/config.sh create mode 100755 hack/sync-kubevirtci.sh diff --git a/Makefile b/Makefile index 306a9da1..f67855cd 100755 --- a/Makefile +++ b/Makefile @@ -21,7 +21,8 @@ goveralls \ release-description \ bazel-build-images push-images \ - fossa + fossa \ + bump-kubevirtci all: build build: wasp manifest-generator @@ -89,3 +90,6 @@ fmt: run: build sudo ./wasp + +bump-kubevirtci: + ./hack/bump-kubevirtci.sh \ No newline at end of file diff --git a/hack/bump-kubevirtci.sh b/hack/bump-kubevirtci.sh new file mode 100755 index 00000000..db3b7b6c --- /dev/null +++ b/hack/bump-kubevirtci.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -ex + +source $(dirname "$0")/config.sh + +val=$(curl -L https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirtci/latest) +sed -i "/^[[:blank:]]*kubevirtci_git_hash[[:blank:]]*=/s/=.*/=\"${val}\"/" hack/config.sh + +hack/sync-kubevirtci.sh diff --git a/hack/config.sh b/hack/config.sh new file mode 100755 index 00000000..0e334cc5 --- /dev/null +++ b/hack/config.sh @@ -0,0 +1 @@ +kubevirtci_git_hash="2412171619-fbd31717" \ No newline at end of file diff --git a/hack/sync-kubevirtci.sh b/hack/sync-kubevirtci.sh new file mode 100755 index 00000000..6bf0a47f --- /dev/null +++ b/hack/sync-kubevirtci.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +set -ex + +# Required for kubevirtci_git_hash +source $(dirname "$0")/config.sh + +WASP_DIR="$( + cd "$(dirname "$BASH_SOURCE[0]")/../" + pwd +)" + +# update cluster-up if needed +version_file="kubevirtci/cluster-up/version.txt" +sha_file="kubevirtci/cluster-up-sha.txt" +download_cluster_up=true +function getClusterUpShasum() { + ( + cd ${WASP_DIR} + # We use LC_ALL=C to make sort canonical between machines, this is + # from sort man page [1]: + # ``` + # *** WARNING *** The locale specified by the environment affects sort + # order. Set LC_ALL=C to get the traditional sort order that uses + # native byte values. + # ``` + # [1] https://man7.org/linux/man-pages/man1/sort.1.html + find kubevirtci/cluster-up -type f | LC_ALL=C sort | xargs sha1sum | sha1sum | awk '{print $1}' + ) +} + +# check if we got a new cluster-up git commit hash +if [[ -f "${version_file}" ]] && [[ $(cat ${version_file}) == ${kubevirtci_git_hash} ]]; then + # check if files are modified + current_sha=$(getClusterUpShasum) + if [[ -f "${sha_file}" ]] && [[ $(cat ${sha_file}) == ${current_sha} ]]; then + echo "cluster-up is up to date and not modified" + download_cluster_up=false + else + echo "cluster-up was modified" + fi +else + echo "cluster-up git commit hash was updated" +fi +if [[ "$download_cluster_up" == true ]]; then + echo "downloading cluster-up" + rm -rf kubevirtci/cluster-up + ( + cd kubevirtci + curl --fail -L https://github.com/kubevirt/kubevirtci/archive/refs/tags/${kubevirtci_git_hash}.tar.gz | tar xz kubevirtci-${kubevirtci_git_hash}/cluster-up --strip-component 1 + ) + + echo ${kubevirtci_git_hash} >${version_file} + new_sha=$(getClusterUpShasum) + echo ${new_sha} >${sha_file} + echo "KUBEVIRTCI_TAG=${kubevirtci_git_hash}" >>kubevirtci/cluster-up/hack/common.sh +fi From 34e65110d24f0df2f225f6ff09e2493cdd5d41fd Mon Sep 17 00:00:00 2001 From: Igor Bezukh Date: Wed, 18 Dec 2024 14:37:28 +0200 Subject: [PATCH 2/3] initial from-scratch bump of kubevirtci Signed-off-by: Igor Bezukh --- kubevirtci/cluster-up-sha.txt | 1 + kubevirtci/cluster-up/.gitignore | 1 + kubevirtci/cluster-up/README.md | 36 ++ kubevirtci/cluster-up/check.sh | 60 +++ kubevirtci/cluster-up/cli.sh | 35 ++ kubevirtci/cluster-up/cluster/K8S.md | 103 +++++ .../cluster-up/cluster/K8S_AUTOMATION.md | 12 + .../cluster-up/cluster/K8S_DEV_GUIDE.md | 160 ++++++++ kubevirtci/cluster-up/cluster/README_VGPU.md | 44 +++ .../cluster/ephemeral-provider-common.sh | 259 +++++++++++++ .../cluster-up/cluster/external/README.md | 18 + .../cluster-up/cluster/external/provider.sh | 54 +++ .../cluster-up/cluster/k8s-1.29/provider.sh | 9 + .../cluster-up/cluster/k8s-1.30/provider.sh | 9 + .../cluster-up/cluster/k8s-1.31/provider.sh | 9 + .../cluster-up/cluster/k8s-1.32/provider.sh | 9 + .../cluster-up/cluster/k8s-provider-common.sh | 106 ++++++ .../cluster-up/cluster/kind-1.28/README.md | 44 +++ .../cluster/kind-1.28/conformance.json | 47 +++ kubevirtci/cluster-up/cluster/kind-1.28/image | 1 + .../cluster-up/cluster/kind-1.28/provider.sh | 53 +++ .../cluster-up/cluster/kind-1.28/version | 1 + .../kind-1.30-vgpu/config_vgpu_cluster.sh | 19 + .../cluster/kind-1.30-vgpu/conformance.json | 47 +++ .../cluster-up/cluster/kind-1.30-vgpu/image | 1 + .../cluster/kind-1.30-vgpu/provider.sh | 58 +++ .../cluster-up/cluster/kind-1.30-vgpu/version | 1 + .../cluster/kind-1.30-vgpu/vgpu-node/node.sh | 32 ++ .../cluster-up/cluster/kind-1.31/README.md | 44 +++ .../cluster/kind-1.31/conformance.json | 47 +++ kubevirtci/cluster-up/cluster/kind-1.31/image | 1 + .../cluster-up/cluster/kind-1.31/provider.sh | 53 +++ .../cluster-up/cluster/kind-1.31/version | 1 + .../cluster-up/cluster/kind-ovn/README.md | 29 ++ .../cluster/kind-ovn/install-ovn.sh | 47 +++ .../cluster-up/cluster/kind-ovn/provider.sh | 88 +++++ .../cluster-up/cluster/kind-sriov/README.md | 100 +++++ .../cluster/kind-sriov/TROUBLESHOOTING.md | 60 +++ .../kind-sriov/config_sriov_cluster.sh | 73 ++++ .../cluster/kind-sriov/conformance.json | 47 +++ .../cluster-up/cluster/kind-sriov/image | 1 + .../cluster-up/cluster/kind-sriov/provider.sh | 78 ++++ .../manifests/kustomization.yaml | 27 ++ .../manifests/multus/kustomization.yaml | 14 + .../manifests/multus/multus.yaml | 206 ++++++++++ .../manifests/multus/patch-args.yaml | 6 + .../manifests/patch-node-selector.yaml.in | 3 + .../patch-sriovdp-resource-prefix.yaml.in | 3 + .../manifests/sriov-cni-daemonset.yaml | 51 +++ .../sriov-components/manifests/sriov-ns.yaml | 4 + .../manifests/sriovdp-config.yaml.in | 17 + .../manifests/sriovdp-daemonset.yaml | 221 +++++++++++ .../sriov-components/sriov_components.sh | 212 +++++++++++ .../kind-sriov/sriov-node/configure_vfs.sh | 103 +++++ .../cluster/kind-sriov/sriov-node/node.sh | 124 +++++++ .../cluster/kind-sriov/sriovdp_setup.sh | 42 +++ .../cluster-up/cluster/kind-sriov/version | 1 + kubevirtci/cluster-up/cluster/kind/README.md | 13 + .../cluster-up/cluster/kind/bump-kind.sh | 47 +++ .../cluster/kind/check-cluster-up.sh | 83 +++++ kubevirtci/cluster-up/cluster/kind/common.sh | 351 ++++++++++++++++++ .../cluster/kind/configure-registry-proxy.sh | 40 ++ .../cluster/kind/manifests/kind.yaml | 8 + .../cluster/kind/manifests/local-volume.yaml | 130 +++++++ kubevirtci/cluster-up/down.sh | 12 + kubevirtci/cluster-up/hack/common.key | 27 ++ kubevirtci/cluster-up/hack/common.sh | 58 +++ kubevirtci/cluster-up/hack/config-default.sh | 4 + kubevirtci/cluster-up/hack/config.sh | 10 + kubevirtci/cluster-up/kubeconfig.sh | 31 ++ kubevirtci/cluster-up/kubectl.sh | 40 ++ kubevirtci/cluster-up/ssh.sh | 30 ++ kubevirtci/cluster-up/up.sh | 38 ++ kubevirtci/cluster-up/version.txt | 1 + kubevirtci/cluster-up/virtctl.sh | 52 +++ 75 files changed, 3907 insertions(+) create mode 100644 kubevirtci/cluster-up-sha.txt create mode 100644 kubevirtci/cluster-up/.gitignore create mode 100644 kubevirtci/cluster-up/README.md create mode 100755 kubevirtci/cluster-up/check.sh create mode 100755 kubevirtci/cluster-up/cli.sh create mode 100644 kubevirtci/cluster-up/cluster/K8S.md create mode 100644 kubevirtci/cluster-up/cluster/K8S_AUTOMATION.md create mode 100644 kubevirtci/cluster-up/cluster/K8S_DEV_GUIDE.md create mode 100644 kubevirtci/cluster-up/cluster/README_VGPU.md create mode 100644 kubevirtci/cluster-up/cluster/ephemeral-provider-common.sh create mode 100644 kubevirtci/cluster-up/cluster/external/README.md create mode 100644 kubevirtci/cluster-up/cluster/external/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/k8s-1.29/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/k8s-1.30/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/k8s-1.31/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/k8s-1.32/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/k8s-provider-common.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-1.28/README.md create mode 100644 kubevirtci/cluster-up/cluster/kind-1.28/conformance.json create mode 100644 kubevirtci/cluster-up/cluster/kind-1.28/image create mode 100755 kubevirtci/cluster-up/cluster/kind-1.28/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-1.28/version create mode 100755 kubevirtci/cluster-up/cluster/kind-1.30-vgpu/config_vgpu_cluster.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-1.30-vgpu/conformance.json create mode 100644 kubevirtci/cluster-up/cluster/kind-1.30-vgpu/image create mode 100755 kubevirtci/cluster-up/cluster/kind-1.30-vgpu/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-1.30-vgpu/version create mode 100644 kubevirtci/cluster-up/cluster/kind-1.30-vgpu/vgpu-node/node.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-1.31/README.md create mode 100644 kubevirtci/cluster-up/cluster/kind-1.31/conformance.json create mode 100644 kubevirtci/cluster-up/cluster/kind-1.31/image create mode 100755 kubevirtci/cluster-up/cluster/kind-1.31/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-1.31/version create mode 100644 kubevirtci/cluster-up/cluster/kind-ovn/README.md create mode 100755 kubevirtci/cluster-up/cluster/kind-ovn/install-ovn.sh create mode 100755 kubevirtci/cluster-up/cluster/kind-ovn/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/README.md create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/TROUBLESHOOTING.md create mode 100755 kubevirtci/cluster-up/cluster/kind-sriov/config_sriov_cluster.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/conformance.json create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/image create mode 100755 kubevirtci/cluster-up/cluster/kind-sriov/provider.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/kustomization.yaml create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/kustomization.yaml create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/multus.yaml create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/patch-args.yaml create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/patch-node-selector.yaml.in create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriov-ns.yaml create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriovdp-config.yaml.in create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriovdp-daemonset.yaml create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/sriov_components.sh create mode 100755 kubevirtci/cluster-up/cluster/kind-sriov/sriov-node/configure_vfs.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/sriov-node/node.sh create mode 100755 kubevirtci/cluster-up/cluster/kind-sriov/sriovdp_setup.sh create mode 100644 kubevirtci/cluster-up/cluster/kind-sriov/version create mode 100644 kubevirtci/cluster-up/cluster/kind/README.md create mode 100755 kubevirtci/cluster-up/cluster/kind/bump-kind.sh create mode 100755 kubevirtci/cluster-up/cluster/kind/check-cluster-up.sh create mode 100755 kubevirtci/cluster-up/cluster/kind/common.sh create mode 100755 kubevirtci/cluster-up/cluster/kind/configure-registry-proxy.sh create mode 100644 kubevirtci/cluster-up/cluster/kind/manifests/kind.yaml create mode 100644 kubevirtci/cluster-up/cluster/kind/manifests/local-volume.yaml create mode 100755 kubevirtci/cluster-up/down.sh create mode 100644 kubevirtci/cluster-up/hack/common.key create mode 100644 kubevirtci/cluster-up/hack/common.sh create mode 100644 kubevirtci/cluster-up/hack/config-default.sh create mode 100644 kubevirtci/cluster-up/hack/config.sh create mode 100755 kubevirtci/cluster-up/kubeconfig.sh create mode 100755 kubevirtci/cluster-up/kubectl.sh create mode 100755 kubevirtci/cluster-up/ssh.sh create mode 100755 kubevirtci/cluster-up/up.sh create mode 100644 kubevirtci/cluster-up/version.txt create mode 100755 kubevirtci/cluster-up/virtctl.sh diff --git a/kubevirtci/cluster-up-sha.txt b/kubevirtci/cluster-up-sha.txt new file mode 100644 index 00000000..6110efb5 --- /dev/null +++ b/kubevirtci/cluster-up-sha.txt @@ -0,0 +1 @@ +c8e41b0449bf6b53024d0f1b6eb4f07a7795e9ee diff --git a/kubevirtci/cluster-up/.gitignore b/kubevirtci/cluster-up/.gitignore new file mode 100644 index 00000000..bdc1c789 --- /dev/null +++ b/kubevirtci/cluster-up/.gitignore @@ -0,0 +1 @@ +cluster/kind-k8s-sriov*/certcreator/*.cert diff --git a/kubevirtci/cluster-up/README.md b/kubevirtci/cluster-up/README.md new file mode 100644 index 00000000..a5c20347 --- /dev/null +++ b/kubevirtci/cluster-up/README.md @@ -0,0 +1,36 @@ +# cluster-up + +## Prerequisites: podman or docker + +cluster-up requires that either podman or docker be installed on the host. + +If podman is being used, it is also necessary to enable podman socket with: + +``` +sudo systemctl enable podman.socket +sudo systemctl start podman.socket +``` + +for more information see: + +https://github.com/kubevirt/kubevirtci/blob/main/PODMAN.md + + +## How to use cluster-up + +This directory provides a wrapper around gocli. It can be vendored into other +git repos and integrated to provide in the kubevirt well-known cluster commands +like `make cluster-up` and `make cluster-down`. + +In order to properly use it, one has to vendor this folder from a git tag, +which can be found on the github release page. + +Then, before calling one of the make targets, the environment variable +`KUBEVIRTCI_TAG` must be exported and set to the tag which was used to vendor +kubevirtci. It allow the content to find the right `gocli` version. + +``` +export KUBEVIRTCI_TAG=`curl -L -Ss https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirtci/latest` +``` + +Find more kubevirtci tags at https://quay.io/repository/kubevirtci/gocli?tab=tags. diff --git a/kubevirtci/cluster-up/check.sh b/kubevirtci/cluster-up/check.sh new file mode 100755 index 00000000..b285e492 --- /dev/null +++ b/kubevirtci/cluster-up/check.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2019 Red Hat, Inc. +# + +set -e +if [ ! -c /dev/kvm ]; then + echo "[ERR ] missing /dev/kvm" +else + echo "[ OK ] found /dev/kvm" +fi + +KVM_ARCH="" +KVM_NESTED="unknown" +KVM_HPAGE="unknown" +if [ -f "/sys/module/kvm_intel/parameters/nested" ]; then + KVM_NESTED=$( cat /sys/module/kvm_intel/parameters/nested ) + KVM_ARCH="intel" +elif [ -f "/sys/module/kvm_amd/parameters/nested" ]; then + KVM_NESTED=$( cat /sys/module/kvm_amd/parameters/nested ) + KVM_ARCH="amd" +elif [ -f "/sys/module/kvm/parameters/nested" ]; then + KVM_NESTED=$( cat /sys/module/kvm/parameters/nested ) + KVM_ARCH="s390x" + KVM_HPAGE=$( cat /sys/module/kvm/parameters/hpage ) +fi + +function is_enabled() { + if [ "$1" == "1" ]; then + return 0 + fi + if [ "$1" == "Y" ] || [ "$1" == "y" ]; then + return 0 + fi + return 1 +} + +if is_enabled "$KVM_NESTED"; then + echo "[ OK ] $KVM_ARCH nested virtualization enabled" +else + echo "[ERR ] $KVM_ARCH nested virtualization not enabled" +fi + +if is_enabled "$KVM_HPAGE" && [ "$(uname -m)" = "s390x" ]; then + echo "[ERR ] $KVM_HPAGE KVM hugepage enabled. It needs to be disabled while nested virtualization is enabled for s390x" +fi \ No newline at end of file diff --git a/kubevirtci/cluster-up/cli.sh b/kubevirtci/cluster-up/cli.sh new file mode 100755 index 00000000..4d0cbb3b --- /dev/null +++ b/kubevirtci/cluster-up/cli.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2019 Red Hat, Inc. +# + +set -e + +if [ -z "$KUBEVIRTCI_PATH" ]; then + KUBEVIRTCI_PATH="$( + cd "$(dirname "$BASH_SOURCE[0]")/" + echo "$(pwd)/" + )" +fi + +source ${KUBEVIRTCI_PATH}/hack/common.sh + +test -t 1 && USE_TTY="-it" +source ${KUBEVIRTCI_CLUSTER_PATH}/$KUBEVIRT_PROVIDER/provider.sh +source ${KUBEVIRTCI_PATH}/hack/config.sh + +${_cli} --prefix $provider_prefix "$@" diff --git a/kubevirtci/cluster-up/cluster/K8S.md b/kubevirtci/cluster-up/cluster/K8S.md new file mode 100644 index 00000000..847d4f19 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/K8S.md @@ -0,0 +1,103 @@ +# Kubernetes 1.x in ephemeral containers + +Provides a pre-deployed Kubernetes with version 1.x purely in docker +containers with qemu. The provided VMs are completely ephemeral and are +recreated on every cluster restart. + +## Docker registry + +There's a docker registry available which is exposed at `localhost:5000`. + +## Choosing a cluster version + +The env variable `KUBEVIRT_PROVIDER` tells kubevirtci what cluster version to spin up. + +```bash +export KUBEVIRT_PROVIDER=k8s-1.22 # choose kubevirtci provider version by subdirectory name +``` + +## Bringing the cluster up + +```bash +export KUBEVIRT_NUM_NODES=2 # control-plane + one node +make cluster-up +``` + +The cluster can be accessed as usual: + +```bash +$ cluster/kubectl.sh get nodes +NAME STATUS ROLES AGE VERSION +node01 NotReady control-plane 31s v1.22.1 +node02 NotReady 5s v1.22.1 +``` + +Note: for further configuration environment variables please see [cluster-up/hack/common.sh](../hack/common.sh) + +## Bringing the cluster up with cluster-network-addons-operator provisioned + +```bash +export KUBEVIRT_WITH_CNAO=true +make cluster-up +``` + +To get more info about CNAO you can check the github project documentation +here https://github.com/kubevirt/cluster-network-addons-operator + +## Bringing the cluster up with cgroup v2 + +```bash +export KUBEVIRT_CGROUPV2=true +make cluster-up +``` + +## Use slim provider (without pre-pulled images of the optional components such as CDI, CNAO etc) + +```bash +export KUBEVIRT_SLIM=true +make cluster-up +``` + +## Enabling IPv6 connectivity + +In order to be able to reach from the cluster to the host's IPv6 network, IPv6 +has to be enabled on your Docker. Add following to your +`/etc/docker/daemon.json` and restart docker service: + +```json +{ + "ipv6": true, + "fixed-cidr-v6": "2001:db8:1::/64" +} +``` + +```bash +systemctl restart docker +``` + +With an IPv6-connected host, you may want the pods to be able to reach the rest +of the IPv6 world, too. In order to allow that, enable IPv6 NAT on your host: + +```bash +ip6tables -t nat -A POSTROUTING -s 2001:db8:1::/64 -j MASQUERADE +``` + +## Bringing the cluster down + +```bash +make cluster-down +``` + +This destroys the whole cluster. Recreating the cluster is fast, since k8s is +already pre-deployed. The only state which is kept is the state of the local +docker registry. + +## Destroying the docker registry state + +The docker registry survives a `make cluster-down`. It's state is stored in a +docker volume called `kubevirt_registry`. If the volume gets too big or the +volume contains corrupt data, it can be deleted with + +```bash +docker volume rm kubevirt_registry +``` diff --git a/kubevirtci/cluster-up/cluster/K8S_AUTOMATION.md b/kubevirtci/cluster-up/cluster/K8S_AUTOMATION.md new file mode 100644 index 00000000..5317edfa --- /dev/null +++ b/kubevirtci/cluster-up/cluster/K8S_AUTOMATION.md @@ -0,0 +1,12 @@ +# KubeVirtCI K8S providers update automation + +There exist automated steps for creating, updating and integrating k8s providers. These are all described as prow jobs in [project-infra](https://github.com/kubevirt/project-infra/). + +| Trigger | Job | Result | +| ----------- | ----------- | ----------- | +| release of a new kubernetes minor version | [`periodic-kubevirtci-cluster-minorversion-updater`](https://github.com/kubevirt/project-infra/search?q=periodic-kubevirtci-cluster-minorversion-updater) | Creates a new provider for that release | +| release of a new kubernetes minor version | [`periodic-kubevirtci-provider-presubmit-creator`](https://github.com/kubevirt/project-infra/search?q=periodic-kubevirtci-provider-presubmit-creator) | Creates a PR with a new check-provision job to enable testing of the new provider | +| release of a new kubernetes minor version | [`periodic-kubevirt-job-copier`](https://github.com/kubevirt/project-infra/search?q=periodic-kubevirt-job-copier) | Creates a PR with a new set of kubevirt sig jobs to enable testing of kubevirt with the new provider | +| release of new kubernetes patch version | [`periodic-kubevirtci-cluster-patchversion-updater`](https://github.com/kubevirt/project-infra/search?q=periodic-kubevirtci-cluster-patchversion-updater) | Creates a PR that updates the patch version for each KubeVirtCI k8s provider | +| merge to kubevirt/kubevirtci main branch | [`periodic-kubevirtci-bump-kubevirt`](https://github.com/kubevirt/project-infra/search?q=periodic-kubevirtci-bump-kubevirt) | Creates a PR to update KubeVirtCI in kubevirt/kubevirt | +| at the start of each month | [`periodic-kubevirt-presubmit-requirer`](https://github.com/kubevirt/project-infra/search?q=periodic-kubevirt-presubmit-requirer) | Checks always_run and optional states of latest kubevirt sig test jobs | diff --git a/kubevirtci/cluster-up/cluster/K8S_DEV_GUIDE.md b/kubevirtci/cluster-up/cluster/K8S_DEV_GUIDE.md new file mode 100644 index 00000000..d2360ade --- /dev/null +++ b/kubevirtci/cluster-up/cluster/K8S_DEV_GUIDE.md @@ -0,0 +1,160 @@ +# kubevirtci K8s provider dev guide. + +Note: in the following scenarios we are using `${KUBEVIRT_PROVIDER_VERSION}` as pointer to the `major.minor` k8s version we are using + +This then can map to any of these folders: +* `cluster-provision/k8s/${KUBEVIRT_PROVIDER_VERSION}` +* `cluster-up/cluster/k8s-${KUBEVIRT_PROVIDER_VERSION}` + +## Creating or updating a provider + +The purpose of kubevirtci is to create pre-provisioned K8s clusters as container images, +allowing people to easily run a K8s cluster. + +The target audience is developers of kubevirtci, who want to create a new provider, or to update an existing one. + +Please refer first to the following documents on how to run k8s-1.x:\ +[k8s-1.x cluster-up](./K8S.md) + +In this doc, we will go on what kubevirtci provider image consists of, what its inner architecture is, +flow of starting a pre-provisioned cluster, flow of creating a new provider, and how to create a new provider. + +A provider includes all the images (K8s base image, nodes OS image) and the scripts that allows it to start a +cluster offline, without downloading / installing / compiling new resources. +Deploying a cluster will create containers, which communicate with each other, in order to act as a K8s cluster. +It's a bit different from running bare-metal cluster where the nodes are physical machines or when the nodes are virtual machines on the host itself, +It gives us isolation advantage and state freezing of the needed components, allowing offline deploy, agnostic of the host OS, and installed packages. + +# Project structure +* cluster-provision folder - creating preprovisioned clusters. +* cluster-up folder - spinning up preprovisioned clusters. +* gocli - gocli is a binary that assist in provisioning and spinning up a cluster. sources of gocli are at cluster-provision/gocli. + +# K8s Deployment +Running `make cluster-up` will deploy a pre-provisioned cluster. +Upon finishing deployment of a K8s deploy, we will have 3 containers: +* k8s-${KUBEVIRT_PROVIDER_VERSION} vm container - a container that runs a qemu VM, which is the K8s node, in which the pods will run. +* Registry container - a shared image registry. +* k8s-${KUBEVIRT_PROVIDER_VERSION} dnsmasq container - a container that run dnsmasq, which gives dns and dhcp services. + +The containers are running and look like this: +``` +[root@modi01 1.22.0]# docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +3589e85efc7d kubevirtci/k8s-1.22.0 "/bin/bash -c '/vm.s…" About an hour ago Up About an hour k8s-1.22.0-node01 +4742dc02add2 registry:2.7.1 "/entrypoint.sh /etc…" About an hour ago Up About an hour k8s-1.22.0-registry +13787e7d4ac9 kubevirtci/k8s-1.22.0 "/bin/bash -c /dnsma…" About an hour ago Up About an hour 127.0.0.1:8443->8443/tcp, 0.0.0.0:32794->2201/tcp, 0.0.0.0:32793->5000/tcp, 0.0.0.0:32792->5901/tcp, 0.0.0.0:32791->6443/tcp k8s-1.22.0-dnsmasq +``` + +Nodes: +``` +[root@modi01 kubevirtci]# oc get nodes +NAME STATUS ROLES AGE VERSION +node01 Ready control-plane 83m v1.22.0 +``` + +# Inner look of a deployed cluster +We can connect to the node of the cluster by: +``` +./cluster-up/ssh.sh node01 +``` + +List the pods +``` +[vagrant@node01 ~]$ sudo crictl pods +POD ID CREATED STATE NAME NAMESPACE ATTEMPT +403513878c8b7 10 minutes ago Ready coredns-6955765f44-m6ckl kube-system 4 +0c3e25e58b9d0 10 minutes ago Ready local-volume-provisioner-fkzgk default 4 +e6d96770770f4 10 minutes ago Ready coredns-6955765f44-mhfgg kube-system 4 +19ad529c78acc 10 minutes ago Ready kube-flannel-ds-amd64-mq5cx kube-system 0 +47acef4276900 10 minutes ago Ready kube-proxy-vtj59 kube-system 0 +df5863c55a52f 11 minutes ago Ready kube-scheduler-node01 kube-system 0 +ca0637d5ac82f 11 minutes ago Ready kube-apiserver-node01 kube-system 0 +f0d90506ce3b8 11 minutes ago Ready kube-controller-manager-node01 kube-system 0 +f873785341215 11 minutes ago Ready etcd-node01 kube-system 0 +``` + +Check kubelet service status +``` +[vagrant@node01 ~]$ systemctl status kubelet +● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled) + Drop-In: /usr/lib/systemd/system/kubelet.service.d + └─10-kubeadm.conf + Active: active (running) since Wed 2020-01-15 13:39:54 UTC; 11min ago + Docs: https://kubernetes.io/docs/ + Main PID: 4294 (kubelet) + CGroup: /system.slice/kubelet.service + ‣ 4294 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/boo... +``` + +Connect to the container that runs the vm: +``` +CONTAINER=$(docker ps | grep vm | awk '{print $1}') +docker exec -it $CONTAINER bash +``` + +From within the container we can see there is a process of qemu which runs the node as a virtual machine. +``` +[root@855de8c8310f /]# ps -ef | grep qemu +root 1 0 36 13:39 ? 00:05:22 qemu-system-x86_64 -enable-kvm -drive format=qcow2,file=/var/run/disk/disk.qcow2,if=virtio,cache=unsafe -device virtio-net-pci,netdev=network0,mac=52:55:00:d1:55:01 -netdev tap,id=network0,ifname=tap01,script=no,downscript=no -device virtio-rng-pci -vnc :01 -cpu host -m 5120M -smp 5 -serial pty +``` + +# Flow of K8s provisioning ${KUBEVIRT_PROVIDER_VERSION} +`cluster-provision/k8s/${KUBEVIRT_PROVIDER_VERSION}/provision.sh` +* Runs the common cluster-provision/k8s/provision.sh. + * Runs cluster-provision/cli/cli (bash script). + * Creates a container for dnsmasq and runs dnsmasq.sh in it. + * Create a container, and runs vm.sh in it. + * Creates a vm using qemu, and checks its ready (according ssh). + * Runs cluster-provision/k8s/scripts/provision.sh in the container. + * Update docker trusted registries. + * Start kubelet service and K8s cluster. + * Enable ip routing. + * Apply additional manifests, such as flannel. + * Wait for pods to become ready. + * Pull needed images such as Ceph CSI. + * Create local volume directories. + * Shutdown the vm and commit its container. + +# Flow of K8s cluster-up ${KUBEVIRT_PROVIDER_VERSION} +Run +``` +export KUBEVIRT_PROVIDER=k8s-${KUBEVIRT_PROVIDER_VERSION} +make cluster-up +``` +* Runs cluster-up/up.sh which sources the following: + * cluster-up/cluster/k8s-${KUBEVIRT_PROVIDER_VERSION}/provider.sh (selected according $KUBEVIRT_PROVIDER), which sources: + * cluster-up/cluster/k8s-provider-common.sh +* Runs `up` (which appears at cluster-up/cluster/k8s-provider-common.sh). +It Triggers `gocli run` - (cluster-provision/gocli/cmd/run.go) which create the following containers: + * Cluster container (that one with the vm from the provisioning, vm.sh is used with parameters here that starts an already created vm). + * Registry. + * Container for dnsmasq (provides dns, dhcp services). + +# Creating new K8s provider +Clone folders of k8s, folder name should be x/y as in the provider name x-y (ie. k8s-${KUBEVIRT_PROVIDER_VERSION}.0) and includes: +* cluster-provision/k8s/${KUBEVIRT_PROVIDER_VERSION}/provision.sh # used to create a new provider +* cluster-provision/k8s/${KUBEVIRT_PROVIDER_VERSION}/publish.sh # used to publish new provider +* cluster-up/cluster/k8s-${KUBEVIRT_PROVIDER_VERSION}/provider.sh # used by cluster-up +* cluster-up/cluster/k8s-${KUBEVIRT_PROVIDER_VERSION}/README.md + +# Example - Adding a new manifest to K8s +* First add the file at cluster-provision/manifests, this folder would be copied to /tmp in the container, +by cluster-provision/cli/cli as part of provision. +* Add this snippet at cluster-provision/k8s/scripts/provision.sh, before "Wait at least for 7 pods" line. +``` +custom_manifest="/tmp/custom_manifest.yaml" +kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f "$custom_manifest" +``` +* Run ./cluster-provision/k8s/${KUBEVIRT_PROVIDER_VERSION}/provision.sh, it will create a new provision and test it. + +# Manual steps for publishing a new provider + +The steps to create, test and integrate a new KubeVirtCI provider are [mostly automated](./K8S_AUTOMATION.md), but just in case you need to do it manually: + +* Run `./cluster-provision/k8s/${KUBEVIRT_PROVIDER_DIR}/publish.sh`, it will publish the new created image to quay.io +* Create a PR with the following files: + * The new manifest. + * Updated `cluster-provision/k8s/scripts/provision.sh` + * Updated `cluster-up/cluster/images.sh`. diff --git a/kubevirtci/cluster-up/cluster/README_VGPU.md b/kubevirtci/cluster-up/cluster/README_VGPU.md new file mode 100644 index 00000000..d262d747 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/README_VGPU.md @@ -0,0 +1,44 @@ +# K8S with mdev support in a Kind cluster + +Provides a pre-deployed k8s cluster that runs using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart. +The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at +`localhost:5000`. + +## Bringing the cluster up + +The following needs to be executed as root. + +```bash +export KUBEVIRT_PROVIDER=kind-1.x-vgpu +make cluster-up +``` + +The cluster can be accessed as usual: + +```bash +$ cluster-up/kubectl.sh get nodes +NAME STATUS ROLES AGE VERSION +vgpu-control-plane Ready master 6m14s v1.x.y +``` + +## Bringing the cluster down + +```bash +make cluster-down +``` + +This destroys the whole cluster. + +## Setting a custom kind version + +In order to use a custom kind image / kind version, +export KIND_NODE_IMAGE, KIND_VERSION before running cluster-up. +For example in order to use kind 0.9.0 (which is based on k8s-1.19.1) use: +```bash +export KIND_NODE_IMAGE="kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600" +export KIND_VERSION="0.9.0" +``` +This allows users to test or use custom images / different kind versions before making them official. +See https://github.com/kubernetes-sigs/kind/releases for details about node images according to the kind version. + +- In order to use `make cluster-down` please make sure the right `CLUSTER_NAME` is exported. diff --git a/kubevirtci/cluster-up/cluster/ephemeral-provider-common.sh b/kubevirtci/cluster-up/cluster/ephemeral-provider-common.sh new file mode 100644 index 00000000..d7d83f45 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/ephemeral-provider-common.sh @@ -0,0 +1,259 @@ +#!/usr/bin/env bash + +set -e + +KUBEVIRT_WITH_ETC_IN_MEMORY=${KUBEVIRT_WITH_ETC_IN_MEMORY:-false} +KUBEVIRT_WITH_ETC_CAPACITY=${KUBEVIRT_WITH_ETC_CAPACITY:-none} +KUBEVIRT_DNS_HOST_PORT=${KUBEVIRT_DNS_HOST_PORT:-31111} + +export KUBEVIRTCI_PODMAN_SOCKET=${KUBEVIRTCI_PODMAN_SOCKET:-"/run/podman/podman.sock"} + +if [ -z "${KUBEVIRTCI_TAG}" ] && [ -z "${KUBEVIRTCI_GOCLI_CONTAINER}" ]; then + >&2 echo "FATAL: either KUBEVIRTCI_TAG or KUBEVIRTCI_GOCLI_CONTAINER must be set" + exit 1 +fi + +if [ -n "${KUBEVIRTCI_TAG}" ] && [ -n "${KUBEVIRTCI_GOCLI_CONTAINER}" ]; then + >&2 echo "WARNING: KUBEVIRTCI_GOCLI_CONTAINER is set and will take precedence over the also set KUBEVIRTCI_TAG" +fi + +detect_podman_socket() { + if curl --unix-socket "${KUBEVIRTCI_PODMAN_SOCKET}" http://d/v3.0.0/libpod/info >/dev/null 2>&1; then + echo "${KUBEVIRTCI_PODMAN_SOCKET}" + fi +} + +if [ "${KUBEVIRTCI_RUNTIME}" = "podman" ]; then + _cri_socket=$(detect_podman_socket) + _cri_bin="podman --remote --url=unix://$_cri_socket" +elif [ "${KUBEVIRTCI_RUNTIME}" = "docker" ]; then + _cri_bin=docker + _cri_socket="/var/run/docker.sock" +else + _cri_socket=$(detect_podman_socket) + if [ -n "$_cri_socket" ]; then + _cri_bin="podman --remote --url=unix://$_cri_socket" + >&2 echo "selecting podman as container runtime" + elif docker ps >/dev/null 2>&1; then + _cri_bin=docker + _cri_socket="/var/run/docker.sock" + >&2 echo "selecting docker as container runtime" + else + >&2 echo "no working container runtime found. Neither docker nor podman seems to work." + exit 1 + fi +fi + +_cli_container="${KUBEVIRTCI_GOCLI_CONTAINER:-quay.io/kubevirtci/gocli:${KUBEVIRTCI_TAG}}" +_cli="${_cri_bin} run --privileged --net=host --rm ${USE_TTY} -v ${_cri_socket}:/var/run/docker.sock" +# gocli will try to mount /lib/modules to make it accessible to dnsmasq in +# in case it exists +if [ -d /lib/modules ]; then + _cli="${_cli} -v /lib/modules/:/lib/modules/" +fi + +# Workaround https://github.com/containers/conmon/issues/315 by not dumping file content to stdout +if [[ ${_cri_bin} = podman* ]]; then + _cli="${_cli} -v ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER:/kubevirtci_config" +fi + +_cli="${_cli} ${_cli_container}" + +function _main_ip() { + echo 127.0.0.1 +} + +function _port() { + # shellcheck disable=SC2154 + ${_cli} ports --prefix $provider_prefix "$@" +} + +function prepare_config() { + BASE_PATH=${KUBEVIRTCI_CONFIG_PATH:-$PWD} + cat >$BASE_PATH/$KUBEVIRT_PROVIDER/config-provider-$KUBEVIRT_PROVIDER.sh < "$PROVIDER_CONFIG_FILE_PATH" <> "$PROVIDER_CONFIG_FILE_PATH" + else + if which kubectl; then + echo "kubectl=$(which kubectl)" >> "$PROVIDER_CONFIG_FILE_PATH" + fi + fi + + + if which oc; then + echo "oc=$(which oc)" >> "$PROVIDER_CONFIG_FILE_PATH" + fi + +} + +# The external cluster is assumed to be up. Do a simple check +function up() { + prepare_config + if ! _kubectl version >/dev/null; then + echo -e "\n*** Unable to reach external cluster. Please check configuration ***" + echo -e "*** Type \"kubectl config view\" for current settings ***\n" + exit 1 + fi + echo "Cluster is up" +} + +function down() { + echo "Not supported by this provider" +} + diff --git a/kubevirtci/cluster-up/cluster/k8s-1.29/provider.sh b/kubevirtci/cluster-up/cluster/k8s-1.29/provider.sh new file mode 100644 index 00000000..e2bf40cd --- /dev/null +++ b/kubevirtci/cluster-up/cluster/k8s-1.29/provider.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -e + +if [ "${KUBEVIRT_CGROUPV2}" == "true" ]; then + export KUBEVIRT_PROVIDER_EXTRA_ARGS="${KUBEVIRT_PROVIDER_EXTRA_ARGS} --kernel-args='systemd.unified_cgroup_hierarchy=1'" +fi + +# shellcheck disable=SC1090 +source "${KUBEVIRTCI_PATH}/cluster/k8s-provider-common.sh" diff --git a/kubevirtci/cluster-up/cluster/k8s-1.30/provider.sh b/kubevirtci/cluster-up/cluster/k8s-1.30/provider.sh new file mode 100644 index 00000000..e2bf40cd --- /dev/null +++ b/kubevirtci/cluster-up/cluster/k8s-1.30/provider.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -e + +if [ "${KUBEVIRT_CGROUPV2}" == "true" ]; then + export KUBEVIRT_PROVIDER_EXTRA_ARGS="${KUBEVIRT_PROVIDER_EXTRA_ARGS} --kernel-args='systemd.unified_cgroup_hierarchy=1'" +fi + +# shellcheck disable=SC1090 +source "${KUBEVIRTCI_PATH}/cluster/k8s-provider-common.sh" diff --git a/kubevirtci/cluster-up/cluster/k8s-1.31/provider.sh b/kubevirtci/cluster-up/cluster/k8s-1.31/provider.sh new file mode 100644 index 00000000..e2bf40cd --- /dev/null +++ b/kubevirtci/cluster-up/cluster/k8s-1.31/provider.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -e + +if [ "${KUBEVIRT_CGROUPV2}" == "true" ]; then + export KUBEVIRT_PROVIDER_EXTRA_ARGS="${KUBEVIRT_PROVIDER_EXTRA_ARGS} --kernel-args='systemd.unified_cgroup_hierarchy=1'" +fi + +# shellcheck disable=SC1090 +source "${KUBEVIRTCI_PATH}/cluster/k8s-provider-common.sh" diff --git a/kubevirtci/cluster-up/cluster/k8s-1.32/provider.sh b/kubevirtci/cluster-up/cluster/k8s-1.32/provider.sh new file mode 100644 index 00000000..e2bf40cd --- /dev/null +++ b/kubevirtci/cluster-up/cluster/k8s-1.32/provider.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -e + +if [ "${KUBEVIRT_CGROUPV2}" == "true" ]; then + export KUBEVIRT_PROVIDER_EXTRA_ARGS="${KUBEVIRT_PROVIDER_EXTRA_ARGS} --kernel-args='systemd.unified_cgroup_hierarchy=1'" +fi + +# shellcheck disable=SC1090 +source "${KUBEVIRTCI_PATH}/cluster/k8s-provider-common.sh" diff --git a/kubevirtci/cluster-up/cluster/k8s-provider-common.sh b/kubevirtci/cluster-up/cluster/k8s-provider-common.sh new file mode 100644 index 00000000..c0fcf02d --- /dev/null +++ b/kubevirtci/cluster-up/cluster/k8s-provider-common.sh @@ -0,0 +1,106 @@ +#!/usr/bin/env bash + +set -e + +# shellcheck source=cluster-up/cluster/ephemeral-provider-common.sh +source "${KUBEVIRTCI_PATH}/cluster/ephemeral-provider-common.sh" + + + +function deploy_kwok() { + if [[ ${KUBEVIRT_DEPLOY_KWOK} == "true" ]]; then + $kubectl create -f /opt/kwok/kwok.yaml + $kubectl create -f /opt/kwok/stage-fast.yaml + fi +} + +# copy_istio_cni_conf_files copy the generated Istio CNI net conf file +# (at '/etc/cni/multus/net.d/') to where Multus expect CNI net conf files ('/etc/cni/net.d/') +function copy_istio_cni_conf_files() { + if [ "$KUBEVIRT_DEPLOY_ISTIO" == "true" ] && [ "$KUBEVIRT_WITH_CNAO" == "true" ]; then + for nodeNum in $(seq -f "%02g" 1 $KUBEVIRT_NUM_NODES); do + $ssh node${nodeNum} -- "until ls /etc/cni/multus > /dev/null 2>&1; do sleep 1; done" + $ssh node${nodeNum} -- sudo cp -uv /etc/cni/multus/net.d/*istio*.conf /etc/cni/net.d/ + done + fi +} + +# configure Prometheus to select kubevirt prometheusrules +function configure_prometheus() { + if [[ $KUBEVIRT_DEPLOY_PROMETHEUS == "true" ]] && $kubectl get crd prometheuses.monitoring.coreos.com; then + _kubectl patch prometheus k8s -n monitoring --type='json' -p='[{"op": "replace", "path": "/spec/ruleSelector", "value":{}}, {"op": "replace", "path": "/spec/ruleNamespaceSelector", "value":{"matchLabels": {}}}]' + fi +} + + +function wait_for_kwok_ready() { + if [ "KUBEVIRT_DEPLOY_KWOK" == "true" ]; then + $kubectl wait deployment -n kube-system kwok-controller --for condition=Available --timeout=200s + fi +} + +function configure_cpu_manager() { + if [ ${KUBEVIRT_CPU_MANAGER_POLICY} == "static" ]; then + for node in $($kubectl get nodes -l "node-role.kubernetes.io/worker" --no-headers -o custom-columns=":metadata.name" | tr -d '\r'); do + # FIXME Replace with kubelet config drop ins once all providers are using k8s >= 1.28 + # https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/#kubelet-conf-d + $kubectl drain ${node} + $ssh ${node} -- sudo systemctl stop kubelet + # FIXME ${ssh} is broken when using HereDocs, fix and replace this mess if possible. + # https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#configuration + $ssh ${node} -- "sudo rm -f /var/lib/kubelet/cpu_manager_state && sudo echo -e 'cpuManagerPolicy: static\nkubeReserved:\n cpu: \"1\"\n memory: \"1Gi\"\ncpuManagerPolicyOptions:\n full-pcpus-only: \"true\"' | sudo tee -a /var/lib/kubelet/config.yaml && sudo sed -i 's/cpuManagerReconcilePeriod\:\ 0s/cpuManagerReconcilePeriod\:\ 5s/g' /var/lib/kubelet/config.yaml" + $ssh ${node} -- sudo systemctl start kubelet + $kubectl label --overwrite node/${node} cpumanager=true + $kubectl uncordon ${node} + done + fi +} + +function up() { + params=$(_add_common_params) + if echo "$params" | grep -q ERROR; then + echo -e "$params" + exit 1 + fi + eval ${_cli:?} run $params + + ${_cli} scp --prefix $provider_prefix /etc/kubernetes/admin.conf - >${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig + + # Set server and disable tls check + export KUBECONFIG=${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig + kubectl config set-cluster kubernetes --server="https://$(_main_ip):$(_port k8s)" + kubectl config set-cluster kubernetes --insecure-skip-tls-verify=true + + ${_cli} scp --prefix ${provider_prefix:?} /usr/bin/kubectl - >${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl + + chmod u+x ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl + + # Make sure that local config is correct + prepare_config + ssh="${_cli} --prefix $provider_prefix ssh" + kubectl="$ssh node01 -- sudo kubectl --kubeconfig=/etc/kubernetes/admin.conf" + + # For multinode cluster Label all the non control-plane nodes as workers, + # for one node cluster label control-plane with 'control-plane,worker' roles + if [ "$KUBEVIRT_NUM_NODES" -gt 1 ]; then + label="!node-role.kubernetes.io/control-plane" + else + label="node-role.kubernetes.io/control-plane" + fi + $kubectl label node -l $label node-role.kubernetes.io/worker='' + + configure_prometheus + configure_cpu_manager + + deploy_kwok + + until wait_for_kwok_ready; do + echo "Waiting for cluster components..." + sleep 5 + done + + # FIXME: remove 'copy_istio_cni_conf_files()' as soon as [1] and [2] are resolved + # [1] https://github.com/kubevirt/kubevirtci/issues/906 + # [2] https://github.com/k8snetworkplumbingwg/multus-cni/issues/982 + copy_istio_cni_conf_files +} diff --git a/kubevirtci/cluster-up/cluster/kind-1.28/README.md b/kubevirtci/cluster-up/cluster/kind-1.28/README.md new file mode 100644 index 00000000..bf48a0e4 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.28/README.md @@ -0,0 +1,44 @@ +# K8S with mdev support in a Kind cluster + +Provides a pre-deployed k8s cluster that runs using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart. +The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at +`localhost:5000`. + +## Bringing the cluster up + +The following needs to be executed as root. Please refer to the name of the directory to get the kind version. + +```bash +export KUBEVIRT_PROVIDER=kind-x.yz +make cluster-up +``` + +The cluster can be accessed as usual: + +```bash +$ cluster-up/kubectl.sh get nodes +NAME STATUS ROLES AGE +kind-x.yz-control-plane Ready master 6m14s +``` + +## Bringing the cluster down + +```bash +make cluster-down +``` + +This destroys the whole cluster. + +## Setting a custom kind version + +In order to use a custom kind image / kind version, +export KIND_NODE_IMAGE, KIND_VERSION before running cluster-up. +For example in order to use kind 0.9.0 (which is based on k8s-1.19.1) use: +```bash +export KIND_NODE_IMAGE="kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600" +export KIND_VERSION="0.9.0" +``` +This allows users to test or use custom images / different kind versions before making them official. +See https://github.com/kubernetes-sigs/kind/releases for details about node images according to the kind version. + +- In order to use `make cluster-down` please make sure the right `CLUSTER_NAME` is exported. diff --git a/kubevirtci/cluster-up/cluster/kind-1.28/conformance.json b/kubevirtci/cluster-up/cluster/kind-1.28/conformance.json new file mode 100644 index 00000000..2ff6e83a --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.28/conformance.json @@ -0,0 +1,47 @@ +{ + "Description": "DEFAULT", + "UUID": "", + "Version": "v0.56.9", + "ResultsDir": "/tmp/sonobuoy/results", + "Resources": null, + "Filters": { + "Namespaces": ".*", + "LabelSelector": "" + }, + "Limits": { + "PodLogs": { + "Namespaces": "kube-system", + "SonobuoyNamespace": true, + "FieldSelectors": [], + "LabelSelector": "", + "Previous": false, + "SinceSeconds": null, + "SinceTime": null, + "Timestamps": false, + "TailLines": null, + "LimitBytes": null + } + }, + "QPS": 30, + "Burst": 50, + "Server": { + "bindaddress": "0.0.0.0", + "bindport": 8080, + "advertiseaddress": "", + "timeoutseconds": 21600 + }, + "Plugins": null, + "PluginSearchPath": [ + "./plugins.d", + "/etc/sonobuoy/plugins.d", + "~/sonobuoy/plugins.d" + ], + "Namespace": "sonobuoy", + "WorkerImage": "sonobuoy/sonobuoy:v0.56.9", + "ImagePullPolicy": "IfNotPresent", + "ImagePullSecrets": "", + "AggregatorPermissions": "clusterAdmin", + "ServiceAccountName": "sonobuoy-serviceaccount", + "ProgressUpdatesPort": "8099", + "SecurityContextMode": "nonroot" +} diff --git a/kubevirtci/cluster-up/cluster/kind-1.28/image b/kubevirtci/cluster-up/cluster/kind-1.28/image new file mode 100644 index 00000000..790a30ea --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.28/image @@ -0,0 +1 @@ +kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 diff --git a/kubevirtci/cluster-up/cluster/kind-1.28/provider.sh b/kubevirtci/cluster-up/cluster/kind-1.28/provider.sh new file mode 100755 index 00000000..0acd74b2 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.28/provider.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +set -e + +DEFAULT_CLUSTER_NAME="kind-1.28" +DEFAULT_HOST_PORT=5000 +ALTERNATE_HOST_PORT=5001 +export CLUSTER_NAME=${CLUSTER_NAME:-$DEFAULT_CLUSTER_NAME} + +if [ $CLUSTER_NAME == $DEFAULT_CLUSTER_NAME ]; then + export HOST_PORT=$DEFAULT_HOST_PORT +else + export HOST_PORT=$ALTERNATE_HOST_PORT +fi + +function set_kind_params() { + version=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/version") + export KIND_VERSION="${KIND_VERSION:-$version}" + + image=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/image") + export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-$image}" +} + +function configure_registry_proxy() { + [ "$CI" != "true" ] && return + + echo "Configuring cluster nodes to work with CI mirror-proxy..." + + local -r ci_proxy_hostname="docker-mirror-proxy.kubevirt-prow.svc" + local -r kind_binary_path="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kind" + local -r configure_registry_proxy_script="${KUBEVIRTCI_PATH}/cluster/kind/configure-registry-proxy.sh" + + KIND_BIN="$kind_binary_path" PROXY_HOSTNAME="$ci_proxy_hostname" $configure_registry_proxy_script +} + +function up() { + cp $KIND_MANIFESTS_DIR/kind.yaml ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml + _add_kubeadm_cpu_manager_config_patch + _add_extra_mounts + export CONFIG_WORKER_CPU_MANAGER=true + kind_up + + configure_registry_proxy + + # remove the rancher.io kind default storageClass + _kubectl delete sc standard + + echo "$KUBEVIRT_PROVIDER cluster '$CLUSTER_NAME' is ready" +} + +set_kind_params + +source ${KUBEVIRTCI_PATH}/cluster/kind/common.sh diff --git a/kubevirtci/cluster-up/cluster/kind-1.28/version b/kubevirtci/cluster-up/cluster/kind-1.28/version new file mode 100644 index 00000000..5a03fb73 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.28/version @@ -0,0 +1 @@ +0.20.0 diff --git a/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/config_vgpu_cluster.sh b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/config_vgpu_cluster.sh new file mode 100755 index 00000000..87087c6d --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/config_vgpu_cluster.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +[ $(id -u) -ne 0 ] && echo "FATAL: this script requires sudo privileges" >&2 && exit 1 + +set -xe + +SCRIPT_PATH=$(dirname "$(realpath "$0")") + +source ${SCRIPT_PATH}/vgpu-node/node.sh +echo "_kubectl: " ${_kubectl} +echo "KUBEVIRTCI_PATH: " ${KUBEVIRTCI_PATH} +source ${KUBEVIRTCI_PATH}/cluster/kind/common.sh +echo "_kubectl: " ${_kubectl} + +nodes=($(_kubectl get nodes -o custom-columns=:.metadata.name --no-headers)) +node::remount_sysfs "${nodes[*]}" +node::discover_host_gpus + +_kubectl get nodes diff --git a/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/conformance.json b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/conformance.json new file mode 100644 index 00000000..2ff6e83a --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/conformance.json @@ -0,0 +1,47 @@ +{ + "Description": "DEFAULT", + "UUID": "", + "Version": "v0.56.9", + "ResultsDir": "/tmp/sonobuoy/results", + "Resources": null, + "Filters": { + "Namespaces": ".*", + "LabelSelector": "" + }, + "Limits": { + "PodLogs": { + "Namespaces": "kube-system", + "SonobuoyNamespace": true, + "FieldSelectors": [], + "LabelSelector": "", + "Previous": false, + "SinceSeconds": null, + "SinceTime": null, + "Timestamps": false, + "TailLines": null, + "LimitBytes": null + } + }, + "QPS": 30, + "Burst": 50, + "Server": { + "bindaddress": "0.0.0.0", + "bindport": 8080, + "advertiseaddress": "", + "timeoutseconds": 21600 + }, + "Plugins": null, + "PluginSearchPath": [ + "./plugins.d", + "/etc/sonobuoy/plugins.d", + "~/sonobuoy/plugins.d" + ], + "Namespace": "sonobuoy", + "WorkerImage": "sonobuoy/sonobuoy:v0.56.9", + "ImagePullPolicy": "IfNotPresent", + "ImagePullSecrets": "", + "AggregatorPermissions": "clusterAdmin", + "ServiceAccountName": "sonobuoy-serviceaccount", + "ProgressUpdatesPort": "8099", + "SecurityContextMode": "nonroot" +} diff --git a/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/image b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/image new file mode 100644 index 00000000..babda2ac --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/image @@ -0,0 +1 @@ +kindest/node:v1.30.0@sha256:047357ac0cfea04663786a612ba1eaba9702bef25227a794b52890dd8bcd692e diff --git a/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/provider.sh b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/provider.sh new file mode 100755 index 00000000..5767a919 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/provider.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +set -e + +DEFAULT_CLUSTER_NAME="vgpu" +DEFAULT_HOST_PORT=5000 +ALTERNATE_HOST_PORT=5001 +export CLUSTER_NAME=${CLUSTER_NAME:-$DEFAULT_CLUSTER_NAME} + +if [ $CLUSTER_NAME == $DEFAULT_CLUSTER_NAME ]; then + export HOST_PORT=$DEFAULT_HOST_PORT +else + export HOST_PORT=$ALTERNATE_HOST_PORT +fi + +function set_kind_params() { + version=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/version") + export KIND_VERSION="${KIND_VERSION:-$version}" + + image=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/image") + export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-$image}" +} + +function configure_registry_proxy() { + [ "$CI" != "true" ] && return + + echo "Configuring cluster nodes to work with CI mirror-proxy..." + + local -r ci_proxy_hostname="docker-mirror-proxy.kubevirt-prow.svc" + local -r kind_binary_path="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kind" + local -r configure_registry_proxy_script="${KUBEVIRTCI_PATH}/cluster/kind/configure-registry-proxy.sh" + + KIND_BIN="$kind_binary_path" PROXY_HOSTNAME="$ci_proxy_hostname" $configure_registry_proxy_script +} + +function up() { + # print hardware info for easier debugging based on logs + echo 'Available cards' + ${CRI_BIN} run --rm --cap-add=SYS_RAWIO quay.io/phoracek/lspci@sha256:0f3cacf7098202ef284308c64e3fc0ba441871a846022bb87d65ff130c79adb1 sh -c "lspci -k | grep -EA2 'VGA|3D'" + echo "" + + cp $KIND_MANIFESTS_DIR/kind.yaml ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml + _add_extra_mounts + kind_up + + configure_registry_proxy + + # remove the rancher.io kind default storageClass + _kubectl delete sc standard + + ${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/config_vgpu_cluster.sh + + echo "$KUBEVIRT_PROVIDER cluster '$CLUSTER_NAME' is ready" +} + +set_kind_params + +source ${KUBEVIRTCI_PATH}/cluster/kind/common.sh diff --git a/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/version b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/version new file mode 100644 index 00000000..ca222b7c --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/version @@ -0,0 +1 @@ +0.23.0 diff --git a/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/vgpu-node/node.sh b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/vgpu-node/node.sh new file mode 100644 index 00000000..4a69c54a --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.30-vgpu/vgpu-node/node.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +function node::discover_host_gpus() { + local -r gpu_types=( $(find /sys/class/mdev_bus/*/mdev_supported_types) ) + [ "${#gpu_types[@]}" -eq 0 ] && echo "FATAL: Could not find available GPUs on host" >&2 && return 1 + + local gpu_addr + local gpu_addresses=() + for path in "${gpu_types}"; do + gpu_addr="${gpu_types#/sys/class/mdev_bus/}" + gpu_addr=${gpu_addr%/*} + + gpu_addresses+=( $gpu_addr ) + done + + echo "${gpu_addresses[@]}" +} + +function node::remount_sysfs() { + local -r nodes_array=($1) + local node_exec + + for node in "${nodes_array[@]}"; do + + # KIND mounts sysfs as read-only by default, remount as R/W" + node_exec="${CRI_BIN} exec $node" + $node_exec mount -o remount,rw /sys + $node_exec chmod 666 /dev/vfio/vfio + + done +} + diff --git a/kubevirtci/cluster-up/cluster/kind-1.31/README.md b/kubevirtci/cluster-up/cluster/kind-1.31/README.md new file mode 100644 index 00000000..bf48a0e4 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.31/README.md @@ -0,0 +1,44 @@ +# K8S with mdev support in a Kind cluster + +Provides a pre-deployed k8s cluster that runs using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart. +The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at +`localhost:5000`. + +## Bringing the cluster up + +The following needs to be executed as root. Please refer to the name of the directory to get the kind version. + +```bash +export KUBEVIRT_PROVIDER=kind-x.yz +make cluster-up +``` + +The cluster can be accessed as usual: + +```bash +$ cluster-up/kubectl.sh get nodes +NAME STATUS ROLES AGE +kind-x.yz-control-plane Ready master 6m14s +``` + +## Bringing the cluster down + +```bash +make cluster-down +``` + +This destroys the whole cluster. + +## Setting a custom kind version + +In order to use a custom kind image / kind version, +export KIND_NODE_IMAGE, KIND_VERSION before running cluster-up. +For example in order to use kind 0.9.0 (which is based on k8s-1.19.1) use: +```bash +export KIND_NODE_IMAGE="kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600" +export KIND_VERSION="0.9.0" +``` +This allows users to test or use custom images / different kind versions before making them official. +See https://github.com/kubernetes-sigs/kind/releases for details about node images according to the kind version. + +- In order to use `make cluster-down` please make sure the right `CLUSTER_NAME` is exported. diff --git a/kubevirtci/cluster-up/cluster/kind-1.31/conformance.json b/kubevirtci/cluster-up/cluster/kind-1.31/conformance.json new file mode 100644 index 00000000..2ff6e83a --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.31/conformance.json @@ -0,0 +1,47 @@ +{ + "Description": "DEFAULT", + "UUID": "", + "Version": "v0.56.9", + "ResultsDir": "/tmp/sonobuoy/results", + "Resources": null, + "Filters": { + "Namespaces": ".*", + "LabelSelector": "" + }, + "Limits": { + "PodLogs": { + "Namespaces": "kube-system", + "SonobuoyNamespace": true, + "FieldSelectors": [], + "LabelSelector": "", + "Previous": false, + "SinceSeconds": null, + "SinceTime": null, + "Timestamps": false, + "TailLines": null, + "LimitBytes": null + } + }, + "QPS": 30, + "Burst": 50, + "Server": { + "bindaddress": "0.0.0.0", + "bindport": 8080, + "advertiseaddress": "", + "timeoutseconds": 21600 + }, + "Plugins": null, + "PluginSearchPath": [ + "./plugins.d", + "/etc/sonobuoy/plugins.d", + "~/sonobuoy/plugins.d" + ], + "Namespace": "sonobuoy", + "WorkerImage": "sonobuoy/sonobuoy:v0.56.9", + "ImagePullPolicy": "IfNotPresent", + "ImagePullSecrets": "", + "AggregatorPermissions": "clusterAdmin", + "ServiceAccountName": "sonobuoy-serviceaccount", + "ProgressUpdatesPort": "8099", + "SecurityContextMode": "nonroot" +} diff --git a/kubevirtci/cluster-up/cluster/kind-1.31/image b/kubevirtci/cluster-up/cluster/kind-1.31/image new file mode 100644 index 00000000..92a8c448 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.31/image @@ -0,0 +1 @@ +kindest/node:v1.31.2@sha256:18fbefc20a7113353c7b75b5c869d7145a6abd6269154825872dc59c1329912e diff --git a/kubevirtci/cluster-up/cluster/kind-1.31/provider.sh b/kubevirtci/cluster-up/cluster/kind-1.31/provider.sh new file mode 100755 index 00000000..3f10e2bc --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.31/provider.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +set -e + +DEFAULT_CLUSTER_NAME="kind-1.31" +DEFAULT_HOST_PORT=5000 +ALTERNATE_HOST_PORT=5001 +export CLUSTER_NAME=${CLUSTER_NAME:-$DEFAULT_CLUSTER_NAME} + +if [ $CLUSTER_NAME == $DEFAULT_CLUSTER_NAME ]; then + export HOST_PORT=$DEFAULT_HOST_PORT +else + export HOST_PORT=$ALTERNATE_HOST_PORT +fi + +function set_kind_params() { + version=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/version") + export KIND_VERSION="${KIND_VERSION:-$version}" + + image=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/image") + export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-$image}" +} + +function configure_registry_proxy() { + [ "$CI" != "true" ] && return + + echo "Configuring cluster nodes to work with CI mirror-proxy..." + + local -r ci_proxy_hostname="docker-mirror-proxy.kubevirt-prow.svc" + local -r kind_binary_path="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kind" + local -r configure_registry_proxy_script="${KUBEVIRTCI_PATH}/cluster/kind/configure-registry-proxy.sh" + + KIND_BIN="$kind_binary_path" PROXY_HOSTNAME="$ci_proxy_hostname" $configure_registry_proxy_script +} + +function up() { + cp $KIND_MANIFESTS_DIR/kind.yaml ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml + _add_kubeadm_cpu_manager_config_patch + _add_extra_mounts + export CONFIG_WORKER_CPU_MANAGER=true + kind_up + + configure_registry_proxy + + # remove the rancher.io kind default storageClass + _kubectl delete sc standard + + echo "$KUBEVIRT_PROVIDER cluster '$CLUSTER_NAME' is ready" +} + +set_kind_params + +source ${KUBEVIRTCI_PATH}/cluster/kind/common.sh diff --git a/kubevirtci/cluster-up/cluster/kind-1.31/version b/kubevirtci/cluster-up/cluster/kind-1.31/version new file mode 100644 index 00000000..d21d277b --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-1.31/version @@ -0,0 +1 @@ +0.25.0 diff --git a/kubevirtci/cluster-up/cluster/kind-ovn/README.md b/kubevirtci/cluster-up/cluster/kind-ovn/README.md new file mode 100644 index 00000000..11630ef8 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-ovn/README.md @@ -0,0 +1,29 @@ +# OVN K8S in a Kind cluster + +Provides a k8s cluster that runs using [KinD](https://github.com/kubernetes-sigs/kind) +The cluster is completely ephemeral and is recreated on every cluster restart. The KubeVirt containers are built on the +local machine and are then pushed to a registry which is exposed at +`localhost:5000`. + +## Bringing the cluster up + +```bash +export KUBEVIRT_PROVIDER=kind-ovn +make cluster-up +``` + +## Bringing the cluster down + +```bash +export KUBEVIRT_PROVIDER=kind-ovn +make cluster-down +``` + +## FAQ + +In case the cluster deployment fails, you need to make sure you have enough watches +add those to /etc/sysctl.conf, and apply it `sysctl -p /etc/sysctl.conf`. +``` +sysctl fs.inotify.max_user_watches=1048576 +sysctl fs.inotify.max_user_instances=512 +``` diff --git a/kubevirtci/cluster-up/cluster/kind-ovn/install-ovn.sh b/kubevirtci/cluster-up/cluster/kind-ovn/install-ovn.sh new file mode 100755 index 00000000..f4a448ed --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-ovn/install-ovn.sh @@ -0,0 +1,47 @@ +#!/bin/bash -e +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2024 Red Hat, Inc. +# + +export OVNK_COMMIT=c77ee8c38c6a6d9e55131a1272db5fad5b606e44 + +OVNK_REPO='https://github.com/ovn-org/ovn-kubernetes.git' +CLUSTER_PATH=${CLUSTER_PATH:-"${KUBEVIRTCI_CONFIG_PATH}/${KUBEVIRT_PROVIDER}/_ovnk"} + +function cluster::_get_repo() { + git --git-dir ${CLUSTER_PATH}/.git config --get remote.origin.url +} + +function cluster::_get_sha() { + git --git-dir ${CLUSTER_PATH}/.git rev-parse HEAD +} + +function cluster::install() { + if [ -d ${CLUSTER_PATH} ]; then + if [ $(cluster::_get_repo) != ${OVNK_REPO} -o $(cluster::_get_sha) != ${OVNK_COMMIT} ]; then + rm -rf ${CLUSTER_PATH} + fi + fi + + if [ ! -d ${CLUSTER_PATH} ]; then + git clone ${OVNK_REPO} ${CLUSTER_PATH} + ( + cd ${CLUSTER_PATH} + git checkout ${OVNK_COMMIT} + ) + fi +} diff --git a/kubevirtci/cluster-up/cluster/kind-ovn/provider.sh b/kubevirtci/cluster-up/cluster/kind-ovn/provider.sh new file mode 100755 index 00000000..8a350609 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-ovn/provider.sh @@ -0,0 +1,88 @@ +#!/bin/bash -ex +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2024 Red Hat, Inc. +# + +KIND_VERSION=0.19.0 +export KIND_IMAGE=kindest/node +export K8S_VERSION=v1.28.0@sha256:dad5a6238c5e41d7cac405fae3b5eda2ad1de6f1190fa8bfc64ff5bb86173213 + +KIND_PATH=${KIND_PATH:-"${KUBEVIRTCI_CONFIG_PATH}/${KUBEVIRT_PROVIDER}/_kind"} +CLUSTER_PATH=${CLUSTER_PATH:-"${KUBEVIRTCI_CONFIG_PATH}/${KUBEVIRT_PROVIDER}/_ovnk"} +CLUSTER_NAME=${KUBEVIRT_PROVIDER} + +function calculate_mtu() { + overlay_overhead=58 + current_mtu=$(cat /sys/class/net/$(ip route | grep "default via" | head -1 | awk '{print $5}')/mtu) + expr $current_mtu - $overlay_overhead +} + +MTU=${MTU:-$(calculate_mtu)} + +PLATFORM=$(uname -m) +case ${PLATFORM} in +x86_64* | i?86_64* | amd64*) + ARCH="amd64" + ;; +aarch64* | arm64*) + ARCH="arm64" + ;; +*) + echo "invalid Arch, only support x86_64, aarch64" + exit 1 + ;; +esac + +function fetch_kind() { + mkdir -p $KIND_PATH + current_kind_version=$($KIND_PATH/kind --version |& awk '{print $3}') + if [[ $current_kind_version != $KIND_VERSION ]]; then + echo "Downloading kind v$KIND_VERSION" + curl -LSs https://github.com/kubernetes-sigs/kind/releases/download/v$KIND_VERSION/kind-linux-${ARCH} -o "$KIND_PATH/kind" + chmod +x "$KIND_PATH/kind" + fi + export PATH=$KIND_PATH:$PATH +} + +function prepare_config() { + echo "STEP: Prepare provider config" + cat >$KUBEVIRTCI_CONFIG_PATH/$KUBEVIRT_PROVIDER/config-provider-$KUBEVIRT_PROVIDER.sh <` the non used PFs, in order to prevent them from being allocated to + the current cluster. The user can list the PFs that should not be allocated to the current cluster, keeping in mind + that at least one (or 2 in case of migration), should not be listed, so they would be allocated for the current + cluster. Note: another reason to blacklist a PF, is in case its has a defect or should be kept for other operations ( + for example sniffing). + +- Clusters should be created one by another and not in parallel (to avoid races over SR-IOV PF's). +- The cluster names must be different. This can be achieved by setting `export CLUSTER_NAME=sriov2` on the 2nd cluster. + The default `CLUSTER_NAME` is `sriov`. The 2nd cluster registry would be exposed at `localhost:5001` automatically, + once the `CLUSTER_NAME` + is set to a non default value. +- Each cluster should be created on its own git clone folder, i.e: + `/root/project/kubevirtci1` + `/root/project/kubevirtci2` + In order to switch between them, change dir to that folder and set the env variables `KUBECONFIG` + and `KUBEVIRT_PROVIDER`. +- In case only one PF exists, for example if running on prow which will assign only one PF per job in its own DinD, + Kubevirtci is agnostic and nothing needs to be done, since all conditions above are met. +- Upper limit of the number of clusters that can be run on the same time equals number of PFs / number of PFs per + cluster, therefore, in case there is only one PF, only one cluster can be created. Locally the actual limit currently + supported is two clusters. +- In order to use `make cluster-down` please make sure the right `CLUSTER_NAME` is exported. diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/TROUBLESHOOTING.md b/kubevirtci/cluster-up/cluster/kind-sriov/TROUBLESHOOTING.md new file mode 100644 index 00000000..7b699427 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/TROUBLESHOOTING.md @@ -0,0 +1,60 @@ +# How to troubleshoot a failing kind job + +If logging and output artifacts are not enough, there is a way to connect to a running CI pod and troubleshoot directly from there. + +## Pre-requisites + +- A working (enabled) account on the [CI cluster](shift.ovirt.org), specifically enabled to the `kubevirt-prow-jobs` project. +- The [mkpj tool](https://github.com/kubernetes/test-infra/tree/master/prow/cmd/mkpj) installed + +## Launching a custom job + +Through the `mkpj` tool, it's possible to craft a custom Prow Job that can be executed on the CI cluster. + +Just `go get` it by running `go get k8s.io/test-infra/prow/cmd/mkpj` + +Then run the following command from a checkout of the [project-infra repo](https://github.com/kubevirt/project-infra): + +```bash +mkpj --pull-number $KUBEVIRTPRNUMBER -job pull-kubevirt-e2e-kind-k8s-sriov-1.17.0 -job-config-path github/ci/prow/files/jobs/kubevirt/kubevirt-presubmits.yaml --config-path github/ci/prow/files/config.yaml > debugkind.yaml +``` + +You will end up having a ProwJob manifest in the `debugkind.yaml` file. + +It's strongly recommended to replace the job's name, as it will be easier to find and debug the relative pod, by replacing `metadata.name` with something more recognizeable. + +The $KUBEVIRTPRNUMBER can be an actual PR on the [kubevirt repo](https://github.com/kubevirt/kubevirt). + +In case we just want to debug the cluster provided by the CI, it's recommended to override the entry point, either in the test PR we are instrumenting (a good sample can be found [here](https://github.com/kubevirt/kubevirt/pull/3022)), or by overriding the entry point directly in the prow job's manifest. + +Remember that we want the cluster long living, so a long sleep must be provided as part of the entry point. + +Make sure you switch to the `kubevirt-prow-jobs` project, and apply the manifest: + +```bash + kubectl apply -f debugkind.yaml +``` + +You will end up with a ProwJob object, and a pod with the same name you gave to the ProwJob. + +Once the pod is up & running, connect to it via bash: + +```bash + kubectl exec -it debugprowjobpod bash +``` + +### Logistics + +Once you are in the pod, you'll be able to troubleshoot what's happening in the environment CI is running its tests. + +Run the follow to bring up a [kind](https://github.com/kubernetes-sigs/kind) cluster with a single node setup and the SR-IOV operator already setup to go (if it wasn't already done by the job itself). + +```bash +KUBEVIRT_PROVIDER=kind-k8s-sriov-1.17.0 make cluster-up +``` + +The kubeconfig file will be available under `/root/.kube/kind-config-sriov`. + +The `kubectl` binary is already on board and in `$PATH`. + +The container acting as node is the one named `sriov-control-plane`. You can even see what's in there by running `docker exec -it sriov-control-plane bash`. diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/config_sriov_cluster.sh b/kubevirtci/cluster-up/cluster/kind-sriov/config_sriov_cluster.sh new file mode 100755 index 00000000..af53bb91 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/config_sriov_cluster.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +[ $(id -u) -ne 0 ] && echo "FATAL: this script requires sudo privileges" >&2 && exit 1 + +set -xe + +PF_COUNT_PER_NODE=${PF_COUNT_PER_NODE:-1} +[ $PF_COUNT_PER_NODE -le 0 ] && echo "FATAL: PF_COUNT_PER_NODE must be a positive integer" >&2 && exit 1 + +SCRIPT_PATH=$(dirname "$(realpath "$0")") + +source ${SCRIPT_PATH}/sriov-node/node.sh +source ${SCRIPT_PATH}/sriov-components/sriov_components.sh + +CONFIGURE_VFS_SCRIPT_PATH="$SCRIPT_PATH/sriov-node/configure_vfs.sh" + +SRIOV_COMPONENTS_NAMESPACE="sriov" +SRIOV_NODE_LABEL_KEY="sriov_capable" +SRIOV_NODE_LABEL_VALUE="true" +SRIOV_NODE_LABEL="$SRIOV_NODE_LABEL_KEY=$SRIOV_NODE_LABEL_VALUE" +SRIOVDP_RESOURCE_PREFIX="kubevirt.io" +SRIOVDP_RESOURCE_NAME="sriov_net" +VFS_DRIVER="vfio-pci" +VFS_DRIVER_KMODULE="vfio_pci" +VFS_COUNT="6" + +function validate_nodes_sriov_allocatable_resource() { + local -r resource_name="$SRIOVDP_RESOURCE_PREFIX/$SRIOVDP_RESOURCE_NAME" + local -r sriov_nodes=$(_kubectl get nodes -l $SRIOV_NODE_LABEL -o custom-columns=:.metadata.name --no-headers) + + local num_vfs + for sriov_node in $sriov_nodes; do + num_vfs=$(node::total_vfs_count "$sriov_node") + sriov_components::wait_allocatable_resource "$sriov_node" "$resource_name" "$num_vfs" + done +} + +worker_nodes=($(_kubectl get nodes -l node-role.kubernetes.io/worker -o custom-columns=:.metadata.name --no-headers)) +worker_nodes_count=${#worker_nodes[@]} +[ "$worker_nodes_count" -eq 0 ] && echo "FATAL: no worker nodes found" >&2 && exit 1 + +pfs_names=($(node::discover_host_pfs)) +pf_count="${#pfs_names[@]}" +[ "$pf_count" -eq 0 ] && echo "FATAL: Could not find available sriov PF's" >&2 && exit 1 + +total_pf_required=$((worker_nodes_count*PF_COUNT_PER_NODE)) +[ "$pf_count" -lt "$total_pf_required" ] && \ + echo "FATAL: there are not enough PF's on the host, try to reduce PF_COUNT_PER_NODE + Worker nodes count: $worker_nodes_count + PF per node count: $PF_COUNT_PER_NODE + Total PF count required: $total_pf_required" >&2 && exit 1 + +## Move SR-IOV Physical Functions to worker nodes +PFS_IN_USE="" +node::configure_sriov_pfs "${worker_nodes[*]}" "${pfs_names[*]}" "$PF_COUNT_PER_NODE" "PFS_IN_USE" + +## Create VFs and configure their drivers on each SR-IOV node +node::configure_sriov_vfs "${worker_nodes[*]}" "$VFS_DRIVER" "$VFS_DRIVER_KMODULE" "$VFS_COUNT" + +## Deploy Multus and SRIOV components +sriov_components::deploy_multus +sriov_components::deploy \ + "$PFS_IN_USE" \ + "$VFS_DRIVER" \ + "$SRIOVDP_RESOURCE_PREFIX" "$SRIOVDP_RESOURCE_NAME" \ + "$SRIOV_NODE_LABEL_KEY" "$SRIOV_NODE_LABEL_VALUE" + +# Verify that each sriov capable node has sriov VFs allocatable resource +validate_nodes_sriov_allocatable_resource +sriov_components::wait_pods_ready + +_kubectl get nodes +_kubectl get pods -n $SRIOV_COMPONENTS_NAMESPACE diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/conformance.json b/kubevirtci/cluster-up/cluster/kind-sriov/conformance.json new file mode 100644 index 00000000..2ff6e83a --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/conformance.json @@ -0,0 +1,47 @@ +{ + "Description": "DEFAULT", + "UUID": "", + "Version": "v0.56.9", + "ResultsDir": "/tmp/sonobuoy/results", + "Resources": null, + "Filters": { + "Namespaces": ".*", + "LabelSelector": "" + }, + "Limits": { + "PodLogs": { + "Namespaces": "kube-system", + "SonobuoyNamespace": true, + "FieldSelectors": [], + "LabelSelector": "", + "Previous": false, + "SinceSeconds": null, + "SinceTime": null, + "Timestamps": false, + "TailLines": null, + "LimitBytes": null + } + }, + "QPS": 30, + "Burst": 50, + "Server": { + "bindaddress": "0.0.0.0", + "bindport": 8080, + "advertiseaddress": "", + "timeoutseconds": 21600 + }, + "Plugins": null, + "PluginSearchPath": [ + "./plugins.d", + "/etc/sonobuoy/plugins.d", + "~/sonobuoy/plugins.d" + ], + "Namespace": "sonobuoy", + "WorkerImage": "sonobuoy/sonobuoy:v0.56.9", + "ImagePullPolicy": "IfNotPresent", + "ImagePullSecrets": "", + "AggregatorPermissions": "clusterAdmin", + "ServiceAccountName": "sonobuoy-serviceaccount", + "ProgressUpdatesPort": "8099", + "SecurityContextMode": "nonroot" +} diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/image b/kubevirtci/cluster-up/cluster/kind-sriov/image new file mode 100644 index 00000000..92a8c448 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/image @@ -0,0 +1 @@ +kindest/node:v1.31.2@sha256:18fbefc20a7113353c7b75b5c869d7145a6abd6269154825872dc59c1329912e diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/provider.sh b/kubevirtci/cluster-up/cluster/kind-sriov/provider.sh new file mode 100755 index 00000000..6a0f77b2 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/provider.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +set -e + +DEFAULT_CLUSTER_NAME="sriov" +DEFAULT_HOST_PORT=5000 +ALTERNATE_HOST_PORT=5001 +export CLUSTER_NAME=${CLUSTER_NAME:-$DEFAULT_CLUSTER_NAME} + +if [ $CLUSTER_NAME == $DEFAULT_CLUSTER_NAME ]; then + export HOST_PORT=$DEFAULT_HOST_PORT +else + export HOST_PORT=$ALTERNATE_HOST_PORT +fi + +function print_available_nics() { + # print hardware info for easier debugging based on logs + echo 'Available NICs' + ${CRI_BIN} run --rm --cap-add=SYS_RAWIO quay.io/phoracek/lspci@sha256:0f3cacf7098202ef284308c64e3fc0ba441871a846022bb87d65ff130c79adb1 sh -c "lspci | egrep -i 'network|ethernet'" + echo "" +} + +function set_kind_params() { + version=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/version") + export KIND_VERSION="${KIND_VERSION:-$version}" + + image=$(cat "${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/image") + export KIND_NODE_IMAGE="${KIND_NODE_IMAGE:-$image}" +} + +function print_sriov_data() { + nodes=$(_kubectl get nodes -o=custom-columns=:.metadata.name | awk NF) + for node in $nodes; do + if [[ ! "$node" =~ .*"control-plane".* ]]; then + echo "Node: $node" + echo "VFs:" + ${CRI_BIN} exec $node bash -c "ls -l /sys/class/net/*/device/virtfn*" + echo "PFs PCI Addresses:" + ${CRI_BIN} exec $node bash -c "grep PCI_SLOT_NAME /sys/class/net/*/device/uevent" + fi + done +} + +function configure_registry_proxy() { + [ "$CI" != "true" ] && return + + echo "Configuring cluster nodes to work with CI mirror-proxy..." + + local -r ci_proxy_hostname="docker-mirror-proxy.kubevirt-prow.svc" + local -r kind_binary_path="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kind" + local -r configure_registry_proxy_script="${KUBEVIRTCI_PATH}/cluster/kind/configure-registry-proxy.sh" + + KIND_BIN="$kind_binary_path" PROXY_HOSTNAME="$ci_proxy_hostname" $configure_registry_proxy_script +} + +function deploy_sriov() { + print_available_nics + ${KUBEVIRTCI_PATH}/cluster/$KUBEVIRT_PROVIDER/config_sriov_cluster.sh + print_sriov_data +} + +function up() { + cp $KIND_MANIFESTS_DIR/kind.yaml ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml + export CONFIG_WORKER_CPU_MANAGER=true + kind_up + + configure_registry_proxy + + # remove the rancher.io kind default storageClass + _kubectl delete sc standard + + deploy_sriov + echo "$KUBEVIRT_PROVIDER cluster '$CLUSTER_NAME' is ready" +} + +set_kind_params + +source ${KUBEVIRTCI_PATH}/cluster/kind/common.sh diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/kustomization.yaml b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/kustomization.yaml new file mode 100644 index 00000000..0c1caec1 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/kustomization.yaml @@ -0,0 +1,27 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: sriov +resources: +- sriov-ns.yaml +- sriov-cni-daemonset.yaml +- sriovdp-daemonset.yaml +- sriovdp-config.yaml +patchesJson6902: +- target: + group: apps + version: v1 + kind: DaemonSet + name: kube-sriov-cni-ds-amd64 + path: patch-node-selector.yaml +- target: + group: apps + version: v1 + kind: DaemonSet + name: kube-sriov-device-plugin-amd64 + path: patch-node-selector.yaml +- target: + group: apps + version: v1 + kind: DaemonSet + name: kube-sriov-device-plugin-amd64 + path: patch-sriovdp-resource-prefix.yaml diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/kustomization.yaml b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/kustomization.yaml new file mode 100644 index 00000000..65706170 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/kustomization.yaml @@ -0,0 +1,14 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- multus.yaml +images: +- name: ghcr.io/k8snetworkplumbingwg/multus-cni + newTag: v3.8 +patchesJson6902: +- path: patch-args.yaml + target: + group: apps + version: v1 + kind: DaemonSet + name: kube-multus-ds diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/multus.yaml b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/multus.yaml new file mode 100644 index 00000000..4b6b950d --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/multus.yaml @@ -0,0 +1,206 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + scope: Namespaced + names: + plural: network-attachment-definitions + singular: network-attachment-definition + kind: NetworkAttachmentDefinition + shortNames: + - net-attach-def + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing + Working Group to express the intent for attaching pods to one or more logical or physical + networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' + type: object + properties: + config: + description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' + type: string +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +rules: + - apiGroups: ["k8s.cni.cncf.io"] + resources: + - '*' + verbs: + - '*' + - apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: multus-cni-config + namespace: kube-system + labels: + tier: node + app: multus +data: + # NOTE: If you'd prefer to manually apply a configuration file, you may create one here. + # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod + # change the "args" line below from + # - "--multus-conf-file=auto" + # to: + # "--multus-conf-file=/tmp/multus-conf/70-multus.conf" + # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the + # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet. + cni-conf.json: | + { + "name": "multus-cni-network", + "type": "multus", + "capabilities": { + "portMappings": true + }, + "delegates": [ + { + "cniVersion": "0.3.1", + "name": "default-cni-network", + "plugins": [ + { + "type": "flannel", + "name": "flannel.1", + "delegate": { + "isDefaultGateway": true, + "hairpinMode": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + ], + "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig" + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-multus-ds + namespace: kube-system + labels: + tier: node + app: multus + name: multus +spec: + selector: + matchLabels: + name: multus + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + tier: node + app: multus + name: multus + spec: + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: multus + containers: + - name: kube-multus + image: ghcr.io/k8snetworkplumbingwg/multus-cni:stable + command: ["/entrypoint.sh"] + args: + - "--multus-conf-file=auto" + - "--cni-version=0.3.1" + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: cnibin + mountPath: /host/opt/cni/bin + - name: multus-cfg + mountPath: /tmp/multus-conf + terminationGracePeriodSeconds: 10 + volumes: + - name: cni + hostPath: + path: /etc/cni/net.d + - name: cnibin + hostPath: + path: /opt/cni/bin + - name: multus-cfg + configMap: + name: multus-cni-config + items: + - key: cni-conf.json + path: 70-multus.conf diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/patch-args.yaml b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/patch-args.yaml new file mode 100644 index 00000000..ea9cd109 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/multus/patch-args.yaml @@ -0,0 +1,6 @@ +- op: add + path: /spec/template/spec/containers/0/args/- + value: "--multus-log-level=debug" +- op: add + path: /spec/template/spec/containers/0/args/- + value: "--multus-log-file=/var/log/multus.log" diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/patch-node-selector.yaml.in b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/patch-node-selector.yaml.in new file mode 100644 index 00000000..0117c8cd --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/patch-node-selector.yaml.in @@ -0,0 +1,3 @@ +- op: add + path: /spec/template/spec/nodeSelector/$LABEL_KEY + value: "$LABEL_VALUE" diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in new file mode 100644 index 00000000..563e606a --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/patch-sriovdp-resource-prefix.yaml.in @@ -0,0 +1,3 @@ +- op: add + path: /spec/template/spec/containers/0/args/-1 + value: --resource-prefix=$RESOURCE_PREFIX diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml new file mode 100644 index 00000000..7d0150e3 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriov-cni-daemonset.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-cni-ds-amd64 + namespace: kube-system + labels: + tier: node + app: sriov-cni +spec: + selector: + matchLabels: + name: sriov-cni + template: + metadata: + labels: + name: sriov-cni + tier: node + app: sriov-cni + spec: + nodeSelector: + kubernetes.io/arch: amd64 + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + containers: + - name: kube-sriov-cni + image: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.7.0 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + volumeMounts: + - name: cnibin + mountPath: /host/opt/cni/bin + volumes: + - name: cnibin + hostPath: + path: /opt/cni/bin diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriov-ns.yaml b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriov-ns.yaml new file mode 100644 index 00000000..bfe55b30 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriov-ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: sriov diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriovdp-config.yaml.in b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriovdp-config.yaml.in new file mode 100644 index 00000000..5e978816 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriovdp-config.yaml.in @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: sriovdp-config + namespace: kube-system +data: + config.json: | + { + "resourceList": [{ + "resourceName": "$RESOURCE_NAME", + "selectors": { + "drivers": $DRIVERS, + "pfNames": $PF_NAMES + } + }] + } diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriovdp-daemonset.yaml b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriovdp-daemonset.yaml new file mode 100644 index 00000000..322a2239 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/manifests/sriovdp-daemonset.yaml @@ -0,0 +1,221 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sriov-device-plugin + namespace: kube-system + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-device-plugin-amd64 + namespace: kube-system + labels: + tier: node + app: sriovdp +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + name: sriov-device-plugin + tier: node + app: sriovdp + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + serviceAccountName: sriov-device-plugin + containers: + - name: kube-sriovdp + image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.4.0 + imagePullPolicy: IfNotPresent + args: + - --log-dir=sriovdp + - --log-level=10 + securityContext: + privileged: true + resources: + requests: + cpu: "250m" + memory: "40Mi" + limits: + cpu: 1 + memory: "200Mi" + volumeMounts: + - name: devicesock + mountPath: /var/lib/kubelet/ + readOnly: false + - name: log + mountPath: /var/log + - name: config-volume + mountPath: /etc/pcidp + - name: device-info + mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + volumes: + - name: devicesock + hostPath: + path: /var/lib/kubelet/ + - name: log + hostPath: + path: /var/log + - name: device-info + hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + - name: config-volume + configMap: + name: sriovdp-config + items: + - key: config.json + path: config.json + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-device-plugin-ppc64le + namespace: kube-system + labels: + tier: node + app: sriovdp +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + name: sriov-device-plugin + tier: node + app: sriovdp + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: ppc64le + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + serviceAccountName: sriov-device-plugin + containers: + - name: kube-sriovdp + image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:latest-ppc64le + imagePullPolicy: IfNotPresent + args: + - --log-dir=sriovdp + - --log-level=10 + securityContext: + privileged: true + resources: + requests: + cpu: "250m" + memory: "40Mi" + limits: + cpu: 1 + memory: "200Mi" + volumeMounts: + - name: devicesock + mountPath: /var/lib/kubelet/ + readOnly: false + - name: log + mountPath: /var/log + - name: config-volume + mountPath: /etc/pcidp + - name: device-info + mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + volumes: + - name: devicesock + hostPath: + path: /var/lib/kubelet/ + - name: log + hostPath: + path: /var/log + - name: device-info + hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + - name: config-volume + configMap: + name: sriovdp-config + items: + - key: config.json + path: config.json +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-sriov-device-plugin-arm64 + namespace: kube-system + labels: + tier: node + app: sriovdp +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + name: sriov-device-plugin + tier: node + app: sriovdp + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: arm64 + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + serviceAccountName: sriov-device-plugin + containers: + - name: kube-sriovdp + image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:latest-arm64 + imagePullPolicy: IfNotPresent + args: + - --log-dir=sriovdp + - --log-level=10 + securityContext: + privileged: true + resources: + requests: + cpu: "250m" + memory: "40Mi" + limits: + cpu: 1 + memory: "200Mi" + volumeMounts: + - name: devicesock + mountPath: /var/lib/kubelet/ + readOnly: false + - name: log + mountPath: /var/log + - name: config-volume + mountPath: /etc/pcidp + - name: device-info + mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + volumes: + - name: devicesock + hostPath: + path: /var/lib/kubelet/ + - name: log + hostPath: + path: /var/log + - name: device-info + hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + - name: config-volume + configMap: + name: sriovdp-config + items: + - key: config.json + path: config.json diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/sriov_components.sh b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/sriov_components.sh new file mode 100644 index 00000000..23d87aee --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-components/sriov_components.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +MANIFESTS_DIR="${KUBEVIRTCI_PATH}/cluster/${KUBEVIRT_PROVIDER}/sriov-components/manifests" + +KUSTOMIZE_MULTUS_DIR="${MANIFESTS_DIR}/multus" +MULTUS_MANIFEST="${CUSTOM_MANIFESTS}/multus.yaml" + +CUSTOM_MANIFESTS="${KUBEVIRTCI_CONFIG_PATH}/${KUBEVIRT_PROVIDER}/manifests" +SRIOV_COMPONENTS_MANIFEST="${CUSTOM_MANIFESTS}/sriov-components.yaml" + +SRIOV_DEVICE_PLUGIN_CONFIG_TEMPLATE="${MANIFESTS_DIR}/sriovdp-config.yaml.in" +SRIOV_DEVICE_PLUGIN_CONFIG="${CUSTOM_MANIFESTS}/sriovdp-config.yaml" + +PATCH_SRIOVDP_RESOURCE_PREFIX_TEMPLATE="${MANIFESTS_DIR}/patch-sriovdp-resource-prefix.yaml.in" +PATCH_SRIOVDP_RESOURCE_PREFIX="${CUSTOM_MANIFESTS}/patch-sriovdp-resource-prefix.yaml" + +PATCH_NODE_SELECTOR_TEMPLATE="${MANIFESTS_DIR}/patch-node-selector.yaml.in" +PATCH_NODE_SELECTOR="${CUSTOM_MANIFESTS}/patch-node-selector.yaml" + +KUBECONFIG="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig" +KUBECTL="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl --kubeconfig=${KUBECONFIG}" + +function _kubectl() { + ${KUBECTL} "$@" +} + +function _retry() { + local -r tries=$1 + local -r wait_time=$2 + local -r action=$3 + local -r wait_message=$4 + local -r waiting_action=$5 + + eval $action + local return_code=$? + for i in $(seq $tries); do + if [[ $return_code -ne 0 ]]; then + echo "[$i/$tries] $wait_message" + eval $waiting_action + sleep $wait_time + eval $action + return_code=$? + else + return 0 + fi + done + + return 1 +} + +function _check_all_pods_ready() { + all_pods_ready_condition=$(_kubectl get pods -A --no-headers -o custom-columns=':.status.conditions[?(@.type == "Ready")].status') + if [ "$?" -eq 0 ]; then + pods_not_ready_count=$(grep -cw False <<<"$all_pods_ready_condition") + if [ "$pods_not_ready_count" -eq 0 ]; then + return 0 + fi + fi + + return 1 +} + +# not using kubectl wait since with the sriov operator the pods get restarted a couple of times and this is +# more reliable +function sriov_components::wait_pods_ready() { + local -r tries=30 + local -r wait_time=10 + + local -r wait_message="Waiting for all pods to become ready.." + local -r error_message="Not all pods were ready after $(($tries * $wait_time)) seconds" + + local -r get_pods='_kubectl get pods --all-namespaces' + local -r action="_check_all_pods_ready" + + set +x + trap "set -x" RETURN + + if ! _retry "$tries" "$wait_time" "$action" "$wait_message" "$get_pods"; then + echo $error_message + return 1 + fi + + echo "all pods are ready" + return 0 +} + +function sriov_components::wait_allocatable_resource() { + local -r node=$1 + local resource_name=$2 + local -r expected_value=$3 + + local -r tries=48 + local -r wait_time=10 + + local -r wait_message="wait for $node node to have allocatable resource: $resource_name: $expected_value" + local -r error_message="node $node doesnt have allocatable resource $resource_name:$expected_value" + + # it is necessary to add '\' before '.' in the resource name. + resource_name=$(echo $resource_name | sed s/\\./\\\\\./g) + local -r action='_kubectl get node $node -ocustom-columns=:.status.allocatable.$resource_name --no-headers | grep -w $expected_value' + + if ! _retry $tries $wait_time "$action" "$wait_message"; then + echo $error_message + return 1 + fi + + return 0 +} + +function sriov_components::deploy_multus() { + _kubectl kustomize "$KUSTOMIZE_MULTUS_DIR" > "$MULTUS_MANIFEST" + + echo "Deploying Multus:" + cat "$MULTUS_MANIFEST" + + _kubectl apply -f "$MULTUS_MANIFEST" + + return 0 +} + +function sriov_components::deploy() { + local -r pf_names=$1 + local -r drivers=$2 + local -r resource_prefix=$3 + local -r resource_name=$4 + local -r label_key=$5 + local -r label_value=$6 + + _create_custom_manifests_dir + _prepare_node_selector_patch "$label_key" "$label_value" + _prepare_sriovdp_resource_prefix_patch "$resource_prefix" + _prepare_device_plugin_config \ + "$pf_names" \ + "$resource_name" \ + "$drivers" + _deploy_sriov_components + + return 0 +} + +function _create_custom_manifests_dir() { + mkdir -p "$CUSTOM_MANIFESTS" + + cp -f $(find "$MANIFESTS_DIR"/*.yaml) "$CUSTOM_MANIFESTS" + + return 0 +} + +function _prepare_node_selector_patch() { + local -r label_key=$1 + local -r label_value=$2 + + ( + export LABEL_KEY=$label_key + export LABEL_VALUE=$label_value + envsubst < "$PATCH_NODE_SELECTOR_TEMPLATE" > "$PATCH_NODE_SELECTOR" + ) +} + +function _prepare_sriovdp_resource_prefix_patch() { + local -r resource_prefix=$1 + + ( + export RESOURCE_PREFIX=$resource_prefix + envsubst < "$PATCH_SRIOVDP_RESOURCE_PREFIX_TEMPLATE" > "$PATCH_SRIOVDP_RESOURCE_PREFIX" + ) +} + +function _prepare_device_plugin_config() { + local -r pf_names=$1 + local -r resource_name=$2 + local -r drivers=$3 + + ( + export RESOURCE_NAME=$resource_name + export DRIVERS=$(_format_json_array "$drivers") + export PF_NAMES=$(_format_json_array "$pf_names") + envsubst < "$SRIOV_DEVICE_PLUGIN_CONFIG_TEMPLATE" > "$SRIOV_DEVICE_PLUGIN_CONFIG" + ) + + return 0 +} + +function _format_json_array() { + local -r string=$1 + + local json_array="$string" + # Replace all spaces with ",": aa bb -> aa","bb + local -r replace='","' + json_array="${json_array// /$replace}" + + # Add opening quotes for first element, and closing quotes for last element + # aa","bb -> "aa","bb" + json_array="\"${json_array}\"" + + # Add brackets: "aa","bb" -> ["aa","bb"] + json_array="[${json_array}]" + + echo "$json_array" +} + +function _deploy_sriov_components() { + _kubectl kustomize "$CUSTOM_MANIFESTS" >"$SRIOV_COMPONENTS_MANIFEST" + + echo "Deploying SRIOV components:" + cat "$SRIOV_COMPONENTS_MANIFEST" + + _kubectl apply -f "$SRIOV_COMPONENTS_MANIFEST" + + return 0 +} + diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-node/configure_vfs.sh b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-node/configure_vfs.sh new file mode 100755 index 00000000..0312a975 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-node/configure_vfs.sh @@ -0,0 +1,103 @@ +#! /bin/bash + +set -ex + +function configure_vf_driver() { + local -r vf_sys_device=$1 + local -r driver=$2 + + vf_pci_address=$(basename $vf_sys_device) + # Check if a VF is bound to a different driver + if [ -d "$vf_sys_device/driver" ]; then + vf_bus_pci_device_driver=$(readlink -e $vf_sys_device/driver) + vf_driver_name=$(basename $vf_bus_pci_device_driver) + + # Check if VF already configured with supported driver + if [[ $vf_driver_name == $driver ]]; then + return + else + echo "Unbind VF $vf_pci_address from $vf_driver_name driver" + echo "$vf_pci_address" >> "$vf_bus_pci_device_driver/unbind" + fi + fi + + echo "Bind VF $vf_pci_address to $driver driver" + echo "$driver" >> "$vf_sys_device/driver_override" + echo "$vf_pci_address" >> "/sys/bus/pci/drivers/$driver/bind" + echo "" >> "$vf_sys_device/driver_override" + + return 0 +} + +function create_vfs() { + local -r pf_net_device=$1 + local -r vfs_count=$2 + + local -r pf_name=$(basename $pf_net_device) + local -r pf_sys_device=$(readlink -e $pf_net_device) + + local -r sriov_totalvfs_content=$(cat $pf_sys_device/sriov_totalvfs) + [ $sriov_totalvfs_content -lt $vfs_count ] && \ + echo "FATAL: PF $pf_name, VF's count should be up to sriov_totalvfs: $sriov_totalvfs_content" >&2 && return 1 + + echo "Creating $vfs_count VFs on PF $pf_name " + echo 0 >> "$pf_sys_device/sriov_numvfs" + echo "$vfs_count" >> "$pf_sys_device/sriov_numvfs" + sleep 3 + + return 0 +} + +function validate_run_with_sudo() { + [ "$(id -u)" -ne 0 ] && echo "FATAL: This script requires sudo privileges" >&2 && return 1 + + return 0 +} + +function validate_sysfs_mount_as_rw() { + local -r sysfs_permissions=$(grep -Po 'sysfs.*\K(ro|rw)' /proc/mounts) + [ "$sysfs_permissions" != rw ] && echo "FATAL: sysfs is read-only, try to remount as RW" >&2 && return 1 + + return 0 +} + +function ensure_driver_is_loaded() { + local -r driver_name=$1 + local -r module_name=$2 + + if ! grep "$module_name" /proc/modules; then + if ! modprobe "$driver_name"; then + echo "FATAL: failed to load $DRIVER kernel module $DRIVER_KMODULE" >&2 && return 1 + fi + fi + + return 0 +} + +DRIVER="${DRIVER:-vfio-pci}" +DRIVER_KMODULE="${DRIVER_KMODULE:-vfio_pci}" +VFS_COUNT=${VFS_COUNT:-6} + +[ $((VFS_COUNT)) -lt 1 ] && echo "INFO: VFS_COUNT is lower then 1, nothing to do..." && exit 0 + +validate_run_with_sudo +validate_sysfs_mount_as_rw +ensure_driver_is_loaded $DRIVER $DRIVER_KMODULE + +sriov_pfs=( $(find /sys/class/net/*/device/sriov_numvfs) ) +[ "${#sriov_pfs[@]}" -eq 0 ] && echo "FATAL: Could not find available sriov PFs" >&2 && exit 1 + +for pf_name in $sriov_pfs; do + pf_device=$(dirname "$pf_name") + + echo "Create VF's" + create_vfs "$pf_device" "$VFS_COUNT" + + echo "Configuring VF's drivers" + # /sys/class/net//device/virtfn* + vfs_sys_devices=$(readlink -e $pf_device/virtfn*) + for vf in $vfs_sys_devices; do + configure_vf_driver "$vf" $DRIVER + ls -l "$vf/driver" + done +done diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriov-node/node.sh b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-node/node.sh new file mode 100644 index 00000000..8d1a997c --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriov-node/node.sh @@ -0,0 +1,124 @@ +#!/bin/bash + +SCRIPT_PATH=${SCRIPT_PATH:-$(dirname "$(realpath "$0")")} + +CONFIGURE_VFS_SCRIPT_PATH="${SCRIPT_PATH}/configure_vfs.sh" +PFS_IN_USE=${PFS_IN_USE:-} + +function node::discover_host_pfs() { + local -r sriov_pfs=( $(find /sys/class/net/*/device/sriov_numvfs) ) + [ "${#sriov_pfs[@]}" -eq 0 ] && echo "FATAL: Could not find available sriov PFs on host" >&2 && return 1 + + local pf_name + local pf_names=() + for pf in "${sriov_pfs[@]}"; do + pf_name="${pf%%/device/*}" + pf_name="${pf_name##*/}" + if [ $(echo "${PF_BLACKLIST[@]}" | grep "${pf_name}") ]; then + continue + fi + + pfs_names+=( $pf_name ) + done + + echo "${pfs_names[@]}" +} + +# node::configure_sriov_pfs moves SR-IOV PFs to nodes netns. +# It exports 'PFS_IN_USE' environment variable with a list +# of SR-IOV PFs that been moved to the nodes netns. +function node::configure_sriov_pfs() { + local -r nodes_array=($1) + local -r pfs_names_array=($2) + local -r pf_count_per_node=$3 + local -r pfs_in_use_var_name=$4 + + local pfs_to_move=() + local pfs_array_offset=0 + local pfs_in_use=() + local node_exec + + # 'iplink' learns which network namespaces there are by checking /var/run/netns + mkdir -p /var/run/netns + for node in "${nodes_array[@]}"; do + prepare_node_netns "$node" + + ## Move PF's to node netns + # Slice '$pfs_names_array' to have unique silce for each node + # with '$pf_count_per_node' PF's names + pfs_to_move=( "${pfs_names_array[@]:$pfs_array_offset:$pf_count_per_node}" ) + echo "Moving '${pfs_to_move[*]}' PF's to '$node' netns" + for pf_name in "${pfs_to_move[@]}"; do + move_pf_to_node_netns "$node" "$pf_name" + done + # Increment the offset for next slice + pfs_array_offset=$((pfs_array_offset + pf_count_per_node)) + pfs_in_use+=( $pf_name ) + + # KIND mounts sysfs as read-only by default, remount as R/W" + node_exec="${CRI_BIN} exec $node" + $node_exec mount -o remount,rw /sys + + ls_node_dev_vfio="${node_exec} ls -la -Z /dev/vfio" + $ls_node_dev_vfio + $node_exec chmod 0666 /dev/vfio/vfio + $ls_node_dev_vfio + + _kubectl label node $node $SRIOV_NODE_LABEL + done + + # Set new variable with the used PF names that will consumed by the caller + eval $pfs_in_use_var_name="'${pfs_in_use[*]}'" +} + +# node::configure_sriov_vfs create SR-IOV VFs and configure their driver on each node. +function node::configure_sriov_vfs() { + local -r nodes_array=($1) + local -r driver=$2 + local -r driver_kmodule=$3 + local -r vfs_count=$4 + + local -r config_vf_script=$(basename "$CONFIGURE_VFS_SCRIPT_PATH") + + for node in "${nodes_array[@]}"; do + ${CRI_BIN} cp "$CONFIGURE_VFS_SCRIPT_PATH" "$node:/" + ${CRI_BIN} exec "$node" bash -c "DRIVER=$driver DRIVER_KMODULE=$driver_kmodule VFS_COUNT=$vfs_count ./$config_vf_script" + ${CRI_BIN} exec "$node" ls -la -Z /dev/vfio + done +} + +function prepare_node_netns() { + local -r node_name=$1 + local -r node_pid=$($CRI_BIN inspect -f '{{.State.Pid}}' "$node_name") + + # Docker does not create the required symlink for a container netns + # it perverts iplink from learning that container netns. + # Thus it is necessary to create symlink between the current + # worker node (container) netns to /var/run/netns (consumed by iplink) + # Now the node container netns named with the node name will be visible. + ln -sf "/proc/$node_pid/ns/net" "/var/run/netns/$node_name" +} + +function move_pf_to_node_netns() { + local -r node_name=$1 + local -r pf_name=$2 + + # Move PF to node network-namespace + ip link set "$pf_name" netns "$node_name" + # Ensure current PF is up + ip netns exec "$node_name" ip link set up dev "$pf_name" + ip netns exec "$node_name" ip link show +} + +function node::total_vfs_count() { + local -r node_name=$1 + local -r node_pid=$($CRI_BIN inspect -f '{{.State.Pid}}' "$node_name") + local -r pfs_sriov_numvfs=( $(cat /proc/$node_pid/root/sys/class/net/*/device/sriov_numvfs) ) + local total_vfs_on_node=0 + + for num_vfs in "${pfs_sriov_numvfs[@]}"; do + total_vfs_on_node=$((total_vfs_on_node + num_vfs)) + done + + echo "$total_vfs_on_node" +} diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/sriovdp_setup.sh b/kubevirtci/cluster-up/cluster/kind-sriov/sriovdp_setup.sh new file mode 100755 index 00000000..2eed8318 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/sriovdp_setup.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +get_sriov_pci_root_addresses() { + for dir in $(find /sys/devices/ -name sriov_totalvfs -exec dirname {} \;); do + if [ $(cat $dir/sriov_numvfs) -gt 0 ]; then + # use perl because sed doesn't support non-greedy matching + basename $dir | perl -pe 's|(.*?:)(.*)|\2|' + fi + done +} + +create_pci_string() { + local quoted_values=($(echo "${pci_addresses[@]}" | xargs printf "\"%s\" " )) + local quoted_as_string=${quoted_values[@]} + if [ "$quoted_as_string" = "\"\"" ]; then + pci_string="" + else + pci_string=${quoted_as_string// /, } + fi +} + +sriov_device_plugin() { + pci_addresses=$(get_sriov_pci_root_addresses) + create_pci_string + + cat < /etc/pcidp/config.json +{ + "resourceList": + [ + { + "resourceName": "sriov", + "rootDevices": [$pci_string], + "sriovMode": true, + "deviceType": "vfio" + } + ] +} +EOF +} + +mkdir -p /etc/pcidp +sriov_device_plugin diff --git a/kubevirtci/cluster-up/cluster/kind-sriov/version b/kubevirtci/cluster-up/cluster/kind-sriov/version new file mode 100644 index 00000000..d21d277b --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind-sriov/version @@ -0,0 +1 @@ +0.25.0 diff --git a/kubevirtci/cluster-up/cluster/kind/README.md b/kubevirtci/cluster-up/cluster/kind/README.md new file mode 100644 index 00000000..0b0eceb1 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind/README.md @@ -0,0 +1,13 @@ +# K8S in a Kind cluster + +This folder serves as base to spin a k8s cluster up using [kind](https://github.com/kubernetes-sigs/kind) The cluster is completely ephemeral and is recreated on every cluster restart. +The KubeVirt containers are built on the local machine and are then pushed to a registry which is exposed at +`localhost:5000`. + +A kind cluster must specify: +* KIND_NODE_IMAGE referring the kind node image as one among those listed [here](https://hub.docker.com/r/kindest/node/tags) (please be aware that there might be compatibility issues between the kind executable and the node version) +* CLUSTER_NAME representing the cluster name + +The provider is supposed to copy a valid `kind.yaml` file under `${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml` + +Check [kind-k8s-1.19](../kind-k8s-1.19) or [kind-1.22-sriov](kind-1.22-sriov) as examples on how to implement a kind cluster provider. diff --git a/kubevirtci/cluster-up/cluster/kind/bump-kind.sh b/kubevirtci/cluster-up/cluster/kind/bump-kind.sh new file mode 100755 index 00000000..e85c075b --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind/bump-kind.sh @@ -0,0 +1,47 @@ +#!/bin/bash -e +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2024 Red Hat, Inc. + + +# Usage ./hack/bump-kind.sh +# If no parameters beside provider, are used, it will take latest kind version, +# with k8s version according latest kubevirtci vm based provider. +# If only kind_version is used, it will take k8s version according latest kubevirtci vm based provider +# examples: ./hack/bump-kind.sh kind-sriov v0.19.0 +# ./hack/bump-kind.sh kind-sriov v0.19.0 1.28 +# +# Note: always takes the latest patch available +# +# https://github.com/kubernetes-sigs/kind/releases + +PROVIDER=${1:?"Error: Argument is missing"} +KIND_RELEASE=${2:-$(curl -s https://api.github.com/repos/kubernetes-sigs/kind/releases/latest | jq -r .tag_name)} +K8S_VERSION=${3:-$(find cluster-provision/k8s/* -maxdepth 0 -type d -printf '%f\n' | tail -1 | cut -d'-' -f1)} + +function main() { + image=$(curl -sL https://api.github.com/repos/kubernetes-sigs/kind/releases/tags/$KIND_RELEASE | jq -r '.body' | grep -E "$K8S_VERSION(\.[0-9])?:" | head -1 | awk '{print $3}' | tr -d \` | sed 's/\r//g') + if [[ $image == "" ]]; then + echo "ERROR: image not found for kind release $KIND_RELEASE, k8s version $K8S_VERSION" + exit 1 + fi + + echo $image > cluster-up/cluster/$PROVIDER/image + echo $KIND_RELEASE | cut -c2- > cluster-up/cluster/$PROVIDER/version + echo "Set $KIND_RELEASE, image: $image" +} + +main "$@" diff --git a/kubevirtci/cluster-up/cluster/kind/check-cluster-up.sh b/kubevirtci/cluster-up/cluster/kind/check-cluster-up.sh new file mode 100755 index 00000000..e6c889f2 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind/check-cluster-up.sh @@ -0,0 +1,83 @@ +#!/bin/bash +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2021 Red Hat, Inc. + +set -exuo pipefail + +SCRIPT_PATH=$(dirname "$(realpath "$0")") + +kubevirtci_path="$(realpath "${SCRIPT_PATH}/../../..")/" +PROVIDER_PATH="${kubevirtci_path}/cluster-up/cluster/${KUBEVIRT_PROVIDER}" + +RUN_KUBEVIRT_CONFORMANCE=${RUN_KUBEVIRT_CONFORMANCE:-"false"} + +function detect_cri() { + if podman ps >/dev/null 2>&1; then echo podman; elif docker ps >/dev/null 2>&1; then echo docker; fi +} + +export CRI_BIN=${CRI_BIN:-$(detect_cri)} + +( + cd $kubevirtci_path + kubectl="./cluster-up/kubectl.sh" + echo "Wait for pods to be ready.." + timeout 5m bash -c "until ${kubectl} wait --for=condition=Ready pod --timeout=30s --all -A; do sleep 1; done" + timeout 5m bash -c "until ${kubectl} wait --for=condition=Ready pod --timeout=30s -n kube-system --all; do sleep 1; done" + ${kubectl} get nodes + ${kubectl} get pods -A + echo "" + + nodes=$(${kubectl} get nodes --no-headers | awk '{print $1}') + for node in $nodes; do + node_exec="${CRI_BIN} exec ${node}" + echo "[$node] network interfaces status:" + ${node_exec} ip a + echo "" + echo "[$node] route table:" + ${node_exec} ip r + echo "" + echo "[$node] hosts file:" + ${node_exec} cat /etc/hosts + echo "" + echo "[$node] resolve config:" + ${node_exec} cat /etc/resolv.conf + echo "" + done + + if [ "$RUN_KUBEVIRT_CONFORMANCE" == "true" ]; then + nightly_build_base_url="https://storage.googleapis.com/kubevirt-prow/devel/nightly/release/kubevirt/kubevirt" + latest=$(curl -sL "${nightly_build_base_url}/latest") + + echo "Deploy latest nighly build Kubevirt" + if [ "$(kubectl get kubevirts -n kubevirt kubevirt -ojsonpath='{.status.phase}')" != "Deployed" ]; then + ${kubectl} apply -f "${nightly_build_base_url}/${latest}/kubevirt-operator.yaml" + ${kubectl} apply -f "${nightly_build_base_url}/${latest}/kubevirt-cr.yaml" + fi + ${kubectl} wait -n kubevirt kv kubevirt --for condition=Available --timeout 15m + + echo "Run latest nighly build Kubevirt conformance tests" + kubevirt_plugin="--plugin ${nightly_build_base_url}/${latest}/conformance.yaml" + SONOBUOY_EXTRA_ARGS="${SONOBUOY_EXTRA_ARGS} ${kubevirt_plugin}" + + commit=$(curl -sL "${nightly_build_base_url}/${latest}/commit") + commit="${commit:0:10}" + container_tag="--plugin-env kubevirt-conformance.CONTAINER_TAG=${latest}_${commit}" + SONOBUOY_EXTRA_ARGS="${SONOBUOY_EXTRA_ARGS} ${container_tag}" + + hack/conformance.sh ${PROVIDER_PATH}/conformance.json + fi +) diff --git a/kubevirtci/cluster-up/cluster/kind/common.sh b/kubevirtci/cluster-up/cluster/kind/common.sh new file mode 100755 index 00000000..e63614b9 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind/common.sh @@ -0,0 +1,351 @@ +#!/usr/bin/env bash + +set -e + +function detect_cri() { + if podman ps >/dev/null 2>&1; then echo podman; elif docker ps >/dev/null 2>&1; then echo docker; fi +} + +export CRI_BIN=${CRI_BIN:-$(detect_cri)} +CONFIG_WORKER_CPU_MANAGER=${CONFIG_WORKER_CPU_MANAGER:-false} +# only setup ipFamily when the environmental variable is not empty +# avaliable value: ipv4, ipv6, dual +IPFAMILY=${IPFAMILY} + +# check CPU arch +PLATFORM=$(uname -m) +case ${PLATFORM} in +x86_64* | i?86_64* | amd64*) + ARCH="amd64" + ;; +ppc64le) + ARCH="ppc64le" + ;; +aarch64* | arm64*) + ARCH="arm64" + ;; +*) + echo "invalid Arch, only support x86_64, ppc64le, aarch64" + exit 1 + ;; +esac + +NODE_CMD="${CRI_BIN} exec -it -d " +export KIND_MANIFESTS_DIR="${KUBEVIRTCI_PATH}/cluster/kind/manifests" +export KIND_NODE_CLI="${CRI_BIN} exec -it " +export KUBEVIRTCI_PATH +export KUBEVIRTCI_CONFIG_PATH +KIND_DEFAULT_NETWORK="kind" + +KUBECTL="${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl --kubeconfig=${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig" + +REGISTRY_NAME=${CLUSTER_NAME}-registry + +MASTER_NODES_PATTERN="control-plane" +WORKER_NODES_PATTERN="worker" + +KUBEVIRT_WITH_KIND_ETCD_IN_MEMORY=${KUBEVIRT_WITH_KIND_ETCD_IN_MEMORY:-"true"} +ETCD_IN_MEMORY_DATA_DIR="/tmp/kind-cluster-etcd" + +function _wait_kind_up { + echo "Waiting for kind to be ready ..." + if [[ $KUBEVIRT_PROVIDER =~ kind-.*1\.1.* ]]; then + selector="master" + else + selector="control-plane" + fi + while [ -z "$(${CRI_BIN} exec --privileged ${CLUSTER_NAME}-control-plane kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes --selector=node-role.kubernetes.io/${selector} -o=jsonpath='{.items..status.conditions[-1:].status}' | grep True)" ]; do + echo "Waiting for kind to be ready ..." + sleep 10 + done + echo "Waiting for dns to be ready ..." + _kubectl wait -n kube-system --timeout=12m --for=condition=Ready -l k8s-app=kube-dns pods +} + +function _wait_containers_ready { + echo "Waiting for all containers to become ready ..." + _kubectl wait --for=condition=Ready pod --all -n kube-system --timeout 12m +} + +function _fetch_kind() { + KIND="${KUBEVIRTCI_CONFIG_PATH}"/"$KUBEVIRT_PROVIDER"/.kind + current_kind_version=$($KIND --version |& awk '{print $3}') + if [[ $current_kind_version != $KIND_VERSION ]]; then + echo "Downloading kind v$KIND_VERSION" + curl -LSs https://github.com/kubernetes-sigs/kind/releases/download/v$KIND_VERSION/kind-linux-${ARCH} -o "$KIND" + chmod +x "$KIND" + fi +} + +function _configure-insecure-registry-and-reload() { + local cmd_context="${1}" # context to run command e.g. sudo, docker exec + ${cmd_context} "$(_insecure-registry-config-cmd)" + ${cmd_context} "$(_reload-containerd-daemon-cmd)" +} + +function _reload-containerd-daemon-cmd() { + echo "systemctl restart containerd" +} + +function _insecure-registry-config-cmd() { + echo "sed -i '/\[plugins.cri.registry.mirrors\]/a\ [plugins.cri.registry.mirrors.\"registry:5000\"]\n\ endpoint = [\"http://registry:5000\"]' /etc/containerd/config.toml" +} + +# this works since the nodes use the same names as containers +function _ssh_into_node() { + ${CRI_BIN} exec -it "$1" bash +} + +function _run_registry() { + local -r network=${1} + + until [ -z "$($CRI_BIN ps -a | grep $REGISTRY_NAME)" ]; do + ${CRI_BIN} stop $REGISTRY_NAME || true + ${CRI_BIN} rm $REGISTRY_NAME || true + sleep 5 + done + ${CRI_BIN} run -d --network=${network} -p $HOST_PORT:5000 --restart=always --name $REGISTRY_NAME quay.io/kubevirtci/library-registry:2.7.1 + +} + +function _configure_registry_on_node() { + local -r node=${1} + local -r network=${2} + + _configure-insecure-registry-and-reload "${NODE_CMD} ${node} bash -c" + ${NODE_CMD} ${node} sh -c "echo $(${CRI_BIN} inspect --format "{{.NetworkSettings.Networks.${network}.IPAddress }}" $REGISTRY_NAME)'\t'registry >> /etc/hosts" +} + +function _install_cnis { + _install_cni_plugins +} + +function _install_cni_plugins { + local CNI_VERSION="v0.8.5" + local CNI_ARCHIVE="cni-plugins-linux-${ARCH}-$CNI_VERSION.tgz" + local CNI_URL="https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/$CNI_ARCHIVE" + if [ ! -f ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/$CNI_ARCHIVE ]; then + echo "Downloading $CNI_ARCHIVE" + curl -sSL -o ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/$CNI_ARCHIVE $CNI_URL + fi + + for node in $(_get_nodes | awk '{print $1}'); do + ${CRI_BIN} cp "${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/$CNI_ARCHIVE" $node:/ + ${CRI_BIN} exec $node /bin/sh -c "tar xf $CNI_ARCHIVE -C /opt/cni/bin" + done +} + +function prepare_config() { + BASE_PATH=${KUBEVIRTCI_CONFIG_PATH:-$PWD} + cat >$BASE_PATH/$KUBEVIRT_PROVIDER/config-provider-$KUBEVIRT_PROVIDER.sh < ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig + + if ${CRI_BIN} exec ${CLUSTER_NAME}-control-plane ls /usr/bin/kubectl > /dev/null; then + kubectl_path=/usr/bin/kubectl + elif ${CRI_BIN} exec ${CLUSTER_NAME}-control-plane ls /bin/kubectl > /dev/null; then + kubectl_path=/bin/kubectl + else + echo "Error: kubectl not found on node, exiting" + exit 1 + fi + + ${CRI_BIN} cp ${CLUSTER_NAME}-control-plane:$kubectl_path ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl + + chmod u+x ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl + + if [ $KUBEVIRT_WITH_KIND_ETCD_IN_MEMORY == "true" ]; then + for node in $(_get_nodes | awk '{print $1}' | grep control-plane); do + echo "[$node] Checking KIND cluster etcd data is mounted to RAM: $ETCD_IN_MEMORY_DATA_DIR" + ${CRI_BIN} exec $node df -h $(dirname $ETCD_IN_MEMORY_DATA_DIR) | grep -P '(tmpfs|ramfs)' + [ $(echo $?) != 0 ] && echo "[$node] etcd data directory is not mounted to RAM" && return 1 + + ${CRI_BIN} exec $node du -h $ETCD_IN_MEMORY_DATA_DIR + [ $(echo $?) != 0 ] && echo "[$node] Failed to check etcd data directory" && return 1 + done + fi + + _install_cnis + + _wait_kind_up + _kubectl cluster-info + _fix_node_labels + + until _get_nodes + do + echo "Waiting for all nodes to become ready ..." + sleep 10 + done + + # wait until k8s pods are running + while [ -n "$(_get_pods | grep -v Running)" ]; do + echo "Waiting for all pods to enter the Running state ..." + _get_pods | >&2 grep -v Running || true + sleep 10 + done + + _wait_containers_ready + _run_registry "$KIND_DEFAULT_NETWORK" + + for node in $(_get_nodes | awk '{print $1}'); do + _configure_registry_on_node "$node" "$KIND_DEFAULT_NETWORK" + _configure_network "$node" + done + prepare_config +} + +function _add_extra_mounts() { + cat <> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml + extraMounts: + - containerPath: /var/log/audit + hostPath: /var/log/audit + readOnly: true +EOF + + if [[ "$KUBEVIRT_PROVIDER" =~ sriov.* || "$KUBEVIRT_PROVIDER" =~ vgpu.* ]]; then + cat <> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml + - containerPath: /dev/vfio/ + hostPath: /dev/vfio/ +EOF + fi +} + +function _add_kubeadm_cpu_manager_config_patch() { + cat << EOF >> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml + kubeadmConfigPatches: + - |- + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + "feature-gates": "CPUManager=true" + "cpu-manager-policy": "static" + "kube-reserved": "cpu=500m" + "system-reserved": "cpu=500m" +EOF +} + +function _add_workers() { + # appending eventual workers to the yaml + for ((n=0;n<$(($KUBEVIRT_NUM_NODES-1));n++)); do + cat << EOF >> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml +- role: worker +EOF + if [ $CONFIG_WORKER_CPU_MANAGER == true ]; then + _add_kubeadm_cpu_manager_config_patch + fi + _add_extra_mounts + done +} + +function _add_kubeadm_config_patches() { + if [ $KUBEVIRT_WITH_KIND_ETCD_IN_MEMORY == "true" ]; then + cat <> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + metadata: + name: config + etcd: + local: + dataDir: $ETCD_IN_MEMORY_DATA_DIR +EOF + echo "KIND cluster etcd data will be mounted to RAM on kind nodes: $ETCD_IN_MEMORY_DATA_DIR" + fi +} + +function _setup_ipfamily() { + if [ "$IPFAMILY" != "" ]; then + cat <> ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml +networking: + ipFamily: $IPFAMILY +EOF + echo "KIND cluster ip family has been set to $IPFAMILY" + fi +} + +function _prepare_kind_config() { + _add_workers + _add_kubeadm_config_patches + _setup_ipfamily + echo "Final KIND config:" + cat ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml +} + +function kind_up() { + _fetch_kind + _prepare_kind_config + setup_kind +} + +function _kubectl() { + ${KUBECTL} "$@" +} + +function down() { + _fetch_kind + if [ -z "$($KIND get clusters | grep ${CLUSTER_NAME})" ]; then + return + fi + + worker_nodes=$(_get_nodes | grep -i $WORKER_NODES_PATTERN | awk '{print $1}') + for worker_node in $worker_nodes; do + if ip netns exec $worker_node ip -details address | grep "vf 0" -B 2 > /dev/null; then + iface=$(ip netns exec $worker_node ip -details address | grep "vf 0" -B 2 | grep -E 'UP|DOWN' | awk -F": " '{print $2}') + ip netns exec $worker_node ip link set $iface netns 1 && echo "gracefully detached $iface from $worker_node" + fi + done + + # On CI, avoid failing an entire test run just because of a deletion error + $KIND delete cluster --name=${CLUSTER_NAME} || [ "$CI" = "true" ] + ${CRI_BIN} rm -f $REGISTRY_NAME >> /dev/null + rm -f ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/kind.yaml +} diff --git a/kubevirtci/cluster-up/cluster/kind/configure-registry-proxy.sh b/kubevirtci/cluster-up/cluster/kind/configure-registry-proxy.sh new file mode 100755 index 00000000..5b1a5abd --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind/configure-registry-proxy.sh @@ -0,0 +1,40 @@ +# source: https://github.com/rpardini/docker-registry-proxy#kind-cluster +# +# This script execute docker-registry-proxy cluster nodes +# setup script on each cluster node. +# Basically what the setup script does is loading the proxy certificate +# and set HTTP_PROXY and NO_PROXY env vars to enable direct communication +# between cluster components (e.g: pods, nodes and services). +# +# Args: +# KIND_BIN - KinD binary path. +# PROXY_HOSTNAME - docker-registry-proxy endpoint hostname. +# CLUSTER_NAME - KinD cluster name. +# +# Usage example: +# KIND_BIN="./kind" CLUSTER_NAME="test" PROXY_HOSTNAME="proxy.ci.com" \ +# ./configure-registry-proxy.sh +# + +#! /bin/bash + +set -ex + +CRI_BIN=${CRI_BIN:-docker} + +KIND_BIN="${KIND_BIN:-./kind}" +PROXY_HOSTNAME="${PROXY_HOSTNAME:-docker-registry-proxy}" +CLUSTER_NAME="${CLUSTER_NAME:-sriov}" + +SETUP_URL="http://${PROXY_HOSTNAME}:3128/setup/systemd" +pids="" +for node in $($KIND_BIN get nodes --name "$CLUSTER_NAME"); do + $CRI_BIN exec "$node" sh -c "\ + curl $SETUP_URL | \ + sed s/docker\.service/containerd\.service/g | \ + sed '/Environment/ s/$/ \"NO_PROXY=127.0.0.0\/8,10.0.0.0\/8,172.16.0.0\/12,192.168.0.0\/16\"/' | \ + bash" & + pids="$pids $!" +done +wait $pids + diff --git a/kubevirtci/cluster-up/cluster/kind/manifests/kind.yaml b/kubevirtci/cluster-up/cluster/kind/manifests/kind.yaml new file mode 100644 index 00000000..0e2ff6bc --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind/manifests/kind.yaml @@ -0,0 +1,8 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry:5000"] + endpoint = ["http://registry:5000"] +nodes: +- role: control-plane diff --git a/kubevirtci/cluster-up/cluster/kind/manifests/local-volume.yaml b/kubevirtci/cluster-up/cluster/kind/manifests/local-volume.yaml new file mode 100644 index 00000000..b357d1c7 --- /dev/null +++ b/kubevirtci/cluster-up/cluster/kind/manifests/local-volume.yaml @@ -0,0 +1,130 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: local-storage-config +data: + storageClassMap: | + local: + hostDir: /mnt/local-storage/local + mountDir: /mnt/local-storage/local +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-storage-provisioner-pv-binding +subjects: +- kind: ServiceAccount + name: local-storage-admin + namespace: default +roleRef: + kind: ClusterRole + name: system:persistent-volume-provisioner + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-storage-provisioner-node-clusterrole +rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-storage-provisioner-node-binding +subjects: +- kind: ServiceAccount + name: local-storage-admin + namespace: default +roleRef: + kind: ClusterRole + name: local-storage-provisioner-node-clusterrole + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: local-storage-provisioner-jobs-role +rules: +- apiGroups: + - 'batch' + resources: + - jobs + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: local-storage-provisioner-jobs-rolebinding +subjects: +- kind: ServiceAccount + name: local-storage-admin +roleRef: + kind: Role + name: local-storage-provisioner + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-storage-admin +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: local-volume-provisioner + labels: + app: local-volume-provisioner +spec: + selector: + matchLabels: + app: local-volume-provisioner + template: + metadata: + labels: + app: local-volume-provisioner + spec: + serviceAccountName: local-storage-admin + containers: + - image: "quay.io/external_storage/local-volume-provisioner:v2.3.4" + name: provisioner + securityContext: + privileged: true + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: MY_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: JOB_CONTAINER_IMAGE + value: "quay.io/external_storage/local-volume-provisioner:v2.3.4" + volumeMounts: + - mountPath: /etc/provisioner/config + name: provisioner-config + readOnly: true + - mountPath: /mnt/local-storage + name: local-storage + mountPropagation: "HostToContainer" + volumes: + - name: provisioner-config + configMap: + name: local-storage-config + - name: local-storage + hostPath: + path: /mnt/local-storage diff --git a/kubevirtci/cluster-up/down.sh b/kubevirtci/cluster-up/down.sh new file mode 100755 index 00000000..aca97efa --- /dev/null +++ b/kubevirtci/cluster-up/down.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +if [ -z "$KUBEVIRTCI_PATH" ]; then + KUBEVIRTCI_PATH="$( + cd "$(dirname "$BASH_SOURCE[0]")/" + echo "$(pwd)/" + )" +fi + +source ${KUBEVIRTCI_PATH}hack/common.sh +source ${KUBEVIRTCI_CLUSTER_PATH}/$KUBEVIRT_PROVIDER/provider.sh +down diff --git a/kubevirtci/cluster-up/hack/common.key b/kubevirtci/cluster-up/hack/common.key new file mode 100644 index 00000000..7d6a0839 --- /dev/null +++ b/kubevirtci/cluster-up/hack/common.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzI +w+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoP +kcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2 +hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NO +Td0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcW +yLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQIBIwKCAQEA4iqWPJXtzZA68mKd +ELs4jJsdyky+ewdZeNds5tjcnHU5zUYE25K+ffJED9qUWICcLZDc81TGWjHyAqD1 +Bw7XpgUwFgeUJwUlzQurAv+/ySnxiwuaGJfhFM1CaQHzfXphgVml+fZUvnJUTvzf +TK2Lg6EdbUE9TarUlBf/xPfuEhMSlIE5keb/Zz3/LUlRg8yDqz5w+QWVJ4utnKnK +iqwZN0mwpwU7YSyJhlT4YV1F3n4YjLswM5wJs2oqm0jssQu/BT0tyEXNDYBLEF4A +sClaWuSJ2kjq7KhrrYXzagqhnSei9ODYFShJu8UWVec3Ihb5ZXlzO6vdNQ1J9Xsf +4m+2ywKBgQD6qFxx/Rv9CNN96l/4rb14HKirC2o/orApiHmHDsURs5rUKDx0f9iP +cXN7S1uePXuJRK/5hsubaOCx3Owd2u9gD6Oq0CsMkE4CUSiJcYrMANtx54cGH7Rk +EjFZxK8xAv1ldELEyxrFqkbE4BKd8QOt414qjvTGyAK+OLD3M2QdCQKBgQDtx8pN +CAxR7yhHbIWT1AH66+XWN8bXq7l3RO/ukeaci98JfkbkxURZhtxV/HHuvUhnPLdX +3TwygPBYZFNo4pzVEhzWoTtnEtrFueKxyc3+LjZpuo+mBlQ6ORtfgkr9gBVphXZG +YEzkCD3lVdl8L4cw9BVpKrJCs1c5taGjDgdInQKBgHm/fVvv96bJxc9x1tffXAcj +3OVdUN0UgXNCSaf/3A/phbeBQe9xS+3mpc4r6qvx+iy69mNBeNZ0xOitIjpjBo2+ +dBEjSBwLk5q5tJqHmy/jKMJL4n9ROlx93XS+njxgibTvU6Fp9w+NOFD/HvxB3Tcz +6+jJF85D5BNAG3DBMKBjAoGBAOAxZvgsKN+JuENXsST7F89Tck2iTcQIT8g5rwWC +P9Vt74yboe2kDT531w8+egz7nAmRBKNM751U/95P9t88EDacDI/Z2OwnuFQHCPDF +llYOUI+SpLJ6/vURRbHSnnn8a/XG+nzedGH5JGqEJNQsz+xT2axM0/W/CRknmGaJ +kda/AoGANWrLCz708y7VYgAtW2Uf1DPOIYMdvo6fxIB5i9ZfISgcJ/bbCUkFrhoH ++vq/5CIWxCPp0f85R4qxxQ5ihxJ0YDQT9Jpx4TMss4PSavPaBH3RXow5Ohe+bYoQ +NE5OgEXk2wVfZczCZpigBKbKZHNYcelXtTt/nP3rsCuGcM4h53s= +-----END RSA PRIVATE KEY----- diff --git a/kubevirtci/cluster-up/hack/common.sh b/kubevirtci/cluster-up/hack/common.sh new file mode 100644 index 00000000..5bbbf695 --- /dev/null +++ b/kubevirtci/cluster-up/hack/common.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +if [ -z "$KUBEVIRTCI_PATH" ]; then + KUBEVIRTCI_PATH="$( + cd "$(dirname "$BASH_SOURCE[0]")/../" + echo "$(pwd)/" + )" +fi + + +if [ -z "$KUBEVIRTCI_CONFIG_PATH" ]; then + KUBEVIRTCI_CONFIG_PATH="$( + cd "$(dirname "$BASH_SOURCE[0]")/../../" + echo "$(pwd)/_ci-configs" + )" +fi + + +KUBEVIRTCI_CLUSTER_PATH=${KUBEVIRTCI_CLUSTER_PATH:-${KUBEVIRTCI_PATH}/cluster} +KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-k8s-1.30} +KUBEVIRT_NUM_NODES=${KUBEVIRT_NUM_NODES:-1} +KUBEVIRT_NUM_NUMA_NODES=${KUBEVIRT_NUM_NUMA_NODES:-1} +KUBEVIRT_NUM_VCPU=${KUBEVIRT_NUM_VCPU:-6} +KUBEVIRT_MEMORY_SIZE=${KUBEVIRT_MEMORY_SIZE:-5120M} +KUBEVIRT_NUM_SECONDARY_NICS=${KUBEVIRT_NUM_SECONDARY_NICS:-0} +KUBEVIRT_DEPLOY_ISTIO=${KUBEVIRT_DEPLOY_ISTIO:-false} +KUBEVIRT_PSA=${KUBEVIRT_PSA:-false} +KUBEVIRT_SINGLE_STACK=${KUBEVIRT_SINGLE_STACK:-false} +KUBEVIRT_NO_ETCD_FSYNC=${KUBEVIRT_NO_ETCD_FSYNC:-false} +KUBEVIRT_ENABLE_AUDIT=${KUBEVIRT_ENABLE_AUDIT:-false} +KUBEVIRT_DEPLOY_NFS_CSI=${KUBEVIRT_DEPLOY_NFS_CSI:-false} +KUBEVIRT_DEPLOY_PROMETHEUS=${KUBEVIRT_DEPLOY_PROMETHEUS:-false} +KUBEVIRT_DEPLOY_PROMETHEUS_ALERTMANAGER=${KUBEVIRT_DEPLOY_PROMETHEUS_ALERTMANAGER-false} +KUBEVIRT_DEPLOY_GRAFANA=${KUBEVIRT_DEPLOY_GRAFANA:-false} +KUBEVIRT_CGROUPV2=${KUBEVIRT_CGROUPV2:-false} +KUBEVIRT_DEPLOY_CDI=${KUBEVIRT_DEPLOY_CDI:-false} +KUBEVIRT_DEPLOY_AAQ=${KUBEVIRT_DEPLOY_AAQ:-false} +KUBEVIRT_CUSTOM_AAQ_VERSION=${KUBEVIRT_CUSTOM_AAQ_VERSION} +KUBEVIRT_CUSTOM_CDI_VERSION=${KUBEVIRT_CUSTOM_CDI_VERSION} +KUBEVIRT_SWAP_ON=${KUBEVIRT_SWAP_ON:-false} +KUBEVIRT_KSM_ON=${KUBEVIRT_KSM_ON:-false} +KUBEVIRT_UNLIMITEDSWAP=${KUBEVIRT_UNLIMITEDSWAP:-false} +KUBEVIRT_CPU_MANAGER_POLICY=${KUBEVIRT_CPU_MANAGER_POLICY:-none} +KUBVIRT_WITH_CNAO_SKIP_CONFIG=${KUBVIRT_WITH_CNAO_SKIP_CONFIG:-false} + +# If on a developer setup, expose ocp on 8443, so that the openshift web console can be used (the port is important because of auth redirects) +# http and https ports are accessed by testing framework and should not be randomized +if [ -z "${JOB_NAME}" ]; then + KUBEVIRT_PROVIDER_EXTRA_ARGS="${KUBEVIRT_PROVIDER_EXTRA_ARGS} --ocp-port 8443" +fi + +#If run on jenkins, let us create isolated environments based on the job and +# the executor number +provider_prefix=${JOB_NAME:-${KUBEVIRT_PROVIDER}}${EXECUTOR_NUMBER} +job_prefix=${JOB_NAME:-kubevirt}${EXECUTOR_NUMBER} + +mkdir -p $KUBEVIRTCI_CONFIG_PATH/$KUBEVIRT_PROVIDER +KUBEVIRTCI_TAG=2412171619-fbd31717 diff --git a/kubevirtci/cluster-up/hack/config-default.sh b/kubevirtci/cluster-up/hack/config-default.sh new file mode 100644 index 00000000..81e13249 --- /dev/null +++ b/kubevirtci/cluster-up/hack/config-default.sh @@ -0,0 +1,4 @@ + +docker_prefix=${DOCKER_PREFIX:-kubevirt} +master_ip=192.168.200.2 +network_provider=flannel diff --git a/kubevirtci/cluster-up/hack/config.sh b/kubevirtci/cluster-up/hack/config.sh new file mode 100644 index 00000000..86af7051 --- /dev/null +++ b/kubevirtci/cluster-up/hack/config.sh @@ -0,0 +1,10 @@ +unset docker_prefix master_ip network_provider kubeconfig manifest_docker_prefix + +KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-${PROVIDER}} + +source ${KUBEVIRTCI_PATH}hack/config-default.sh + +# Allow different providers to override default config values +test -f "${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/config-provider-${KUBEVIRT_PROVIDER}.sh" && source ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/config-provider-${KUBEVIRT_PROVIDER}.sh + +export docker_prefix master_ip network_provider kubeconfig manifest_docker_prefix diff --git a/kubevirtci/cluster-up/kubeconfig.sh b/kubevirtci/cluster-up/kubeconfig.sh new file mode 100755 index 00000000..839d4cf1 --- /dev/null +++ b/kubevirtci/cluster-up/kubeconfig.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2019 Red Hat, Inc. +# + +set -e + +if [ -z "$KUBEVIRTCI_PATH" ]; then + KUBEVIRTCI_PATH="$( + cd "$(dirname "$BASH_SOURCE[0]")/" + echo "$(pwd)/" + )" +fi + +source ${KUBEVIRTCI_PATH}/hack/common.sh + +echo "${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig" diff --git a/kubevirtci/cluster-up/kubectl.sh b/kubevirtci/cluster-up/kubectl.sh new file mode 100755 index 00000000..b8e7a616 --- /dev/null +++ b/kubevirtci/cluster-up/kubectl.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2017 Red Hat, Inc. +# + +set -e + +if [ -z "$KUBEVIRTCI_PATH" ]; then + KUBEVIRTCI_PATH="$( + cd "$(dirname "$BASH_SOURCE[0]")/" + echo "$(pwd)/" + )" +fi + +source ${KUBEVIRTCI_PATH}/hack/common.sh +source ${KUBEVIRTCI_CLUSTER_PATH}/$KUBEVIRT_PROVIDER/provider.sh +source ${KUBEVIRTCI_PATH}/hack/config.sh + +if [ "$1" == "console" ] || [ "$1" == "vnc" ] || [ "$1" == "start" ] || [ "$1" == "stop" ] || [ "$1" == "migrate" ]; then + ${KUBEVIRTCI_PATH}/virtctl.sh "$@" +elif [ "$1" == "virt" ]; then + shift + ${KUBEVIRTCI_PATH}/virtctl.sh "$@" +else + _kubectl "$@" +fi diff --git a/kubevirtci/cluster-up/ssh.sh b/kubevirtci/cluster-up/ssh.sh new file mode 100755 index 00000000..4c76f879 --- /dev/null +++ b/kubevirtci/cluster-up/ssh.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +set -e + +if [ -z "$KUBEVIRTCI_PATH" ]; then + KUBEVIRTCI_PATH="$( + cd "$(dirname "$BASH_SOURCE[0]")/" + echo "$(pwd)/" + )" +fi + +test -t 1 && USE_TTY="-it" + +source ${KUBEVIRTCI_PATH}/hack/common.sh + +source ${KUBEVIRTCI_CLUSTER_PATH}/$KUBEVIRT_PROVIDER/provider.sh +source ${KUBEVIRTCI_PATH}/hack/config.sh + +node=$1 + +if [ -z "$node" ]; then + echo "node name required as argument" + echo "k8s example: ./ssh node01" + exit 1 +fi + +if [[ $KUBEVIRT_PROVIDER =~ kind.* ]] || [[ $KUBEVIRT_PROVIDER =~ k3d.* ]]; then + _ssh_into_node "$@" +else + ${_cli} --prefix $provider_prefix ssh "$@" +fi diff --git a/kubevirtci/cluster-up/up.sh b/kubevirtci/cluster-up/up.sh new file mode 100755 index 00000000..fa48f152 --- /dev/null +++ b/kubevirtci/cluster-up/up.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +function validate_single_stack_ipv6() { + local kube_ns="kube-system" + local pod_label="calico-kube-controllers" + + echo "validating provider is single stack IPv6" + until _kubectl wait --for=condition=Ready pod --timeout=10s -n $kube_ns -lk8s-app=${pod_label}; do sleep 1; done > /dev/null 2>&1 + + local pod=$(_kubectl get pods -n ${kube_ns} -lk8s-app=${pod_label} -o=custom-columns=NAME:.metadata.name --no-headers) + local primary_ip=$(_kubectl get pod -n ${kube_ns} ${pod} -ojsonpath="{ @.status.podIP }") + + if [[ ! ${primary_ip} =~ fd00 ]]; then + echo "error: single stack primary ip ($primary_ip) is not IPv6 as expected" + exit 1 + fi + + if _kubectl get pod -n ${kube_ns} ${pod} -ojsonpath="{ @.status.podIPs[1] }" > /dev/null 2>&1; then + echo "error: single stack cluster expected" + exit 1 + fi +} + +if [ -z "$KUBEVIRTCI_PATH" ]; then + KUBEVIRTCI_PATH="$( + cd "$(dirname "$BASH_SOURCE[0]")/" + echo "$(pwd)/" + )" +fi + + +source ${KUBEVIRTCI_PATH}hack/common.sh +source ${KUBEVIRTCI_CLUSTER_PATH}/$KUBEVIRT_PROVIDER/provider.sh +up + +if [ ${KUBEVIRT_SINGLE_STACK} == true ]; then + validate_single_stack_ipv6 +fi diff --git a/kubevirtci/cluster-up/version.txt b/kubevirtci/cluster-up/version.txt new file mode 100644 index 00000000..b806a11a --- /dev/null +++ b/kubevirtci/cluster-up/version.txt @@ -0,0 +1 @@ +2412171619-fbd31717 diff --git a/kubevirtci/cluster-up/virtctl.sh b/kubevirtci/cluster-up/virtctl.sh new file mode 100755 index 00000000..1a65ab2e --- /dev/null +++ b/kubevirtci/cluster-up/virtctl.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2018 Red Hat, Inc. +# + +set -e + +>&2 echo "WARNING: usage of '${BASH_SOURCE[0]}' is deprecated!" +>&2 echo " see: https://github.com/kubevirt/kubevirtci/issues/1277" + +if [ -z "$KUBEVIRTCI_PATH" ]; then + KUBEVIRTCI_PATH="$( + cd "$(dirname "$BASH_SOURCE[0]")/" + echo "$(pwd)/" + )" +fi + +source ${KUBEVIRTCI_PATH}/hack/common.sh +source ${KUBEVIRTCI_CLUSTER_PATH}/$KUBEVIRT_PROVIDER/provider.sh +source ${KUBEVIRTCI_PATH}/hack/config.sh + +CONFIG_ARGS= + +if [ -n "$kubeconfig" ]; then + CONFIG_ARGS="--kubeconfig=${kubeconfig}" +elif [ -n "$KUBECONFIG" ]; then + CONFIG_ARGS="--kubeconfig=${KUBECONFIG}" +fi + +KUBEVIRT_OUT_PATH=${KUBEVIRTCI_PATH}/../_out +if [ ! -d ${KUBEVIRT_OUT_PATH} ]; then + # see https://github.com/kubevirt/kubevirt/pull/12872 + >&2 echo "WARNING: $KUBEVIRT_OUT_PATH not found, falling back to parent" + KUBEVIRT_OUT_PATH=${KUBEVIRTCI_PATH}/../../_out + >&2 echo " $KUBEVIRT_OUT_PATH" +fi +${KUBEVIRT_OUT_PATH}/cmd/virtctl/virtctl $CONFIG_ARGS "$@" + From 3806687a03d3be82dd6fef1992d36991323b3bd1 Mon Sep 17 00:00:00 2001 From: Igor Bezukh Date: Wed, 18 Dec 2024 14:38:09 +0200 Subject: [PATCH 3/3] adding false positive for the common.key file in kci this file intedned for the ephemeral local cluster setup therefore it doesn't impose any security breach by exposing it in the repo. Signed-off-by: Igor Bezukh --- .gitleaks.toml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .gitleaks.toml diff --git a/.gitleaks.toml b/.gitleaks.toml new file mode 100644 index 00000000..74324d63 --- /dev/null +++ b/.gitleaks.toml @@ -0,0 +1,5 @@ +[allowlist] + description = "kubevirtci allowlist" + paths = [ + '''kubevirtci\/cluster-up\/hack\/common.key$''', + ]