remove unecessary sudos

pull/217/head
Joël Séguillon 3 years ago
parent fdd86b29ba
commit 9377aa7c38
  1. 76
      .github/workflows/ci.yml

@ -94,7 +94,7 @@ jobs:
- name: Create single node Cluster
run: |
cat <<EOF | sudo kind create cluster -v7 --wait 1m --retain --config=-
cat <<EOF | kind create cluster -v7 --wait 1m --retain --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
@ -104,9 +104,9 @@ jobs:
- name: Wait and get Cluster status
run: |
# wait network is ready
sudo kubectl wait --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns
sudo kubectl get nodes -o wide
sudo kubectl get pods -A
kubectl wait --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns
kubectl get nodes -o wide
kubectl get pods -A
- name: Install kubevirt packages requirements
run: |
@ -127,9 +127,9 @@ jobs:
timeout_minutes: 5
max_attempts: 3
command: |
sudo kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/kubevirt-operator.yaml
sudo kubectl create configmap kubevirt-config -n kubevirt --from-literal debug.useEmulation=true
sudo kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/kubevirt-cr.yaml
kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/kubevirt-operator.yaml
kubectl create configmap kubevirt-config -n kubevirt --from-literal debug.useEmulation=true
kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/kubevirt-cr.yaml
- name: Install virtcl
uses: nick-invision/retry@v2
@ -140,7 +140,7 @@ jobs:
export ARCH=linux-amd64
curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/virtctl-v0.36.0-linux-amd64
chmod +x virtctl
sudo install virtctl /usr/local/bin
install virtctl /usr/local/bin
- name: Build molecule test container
run: |
@ -164,7 +164,7 @@ jobs:
docker build --build-arg PYTHON_BASE_IMAGE="${PYTHON_BASE_IMAGE}" --build-arg KUBERNETES_VERSION="${KUBERNETES_VERSION}" . -t molecule_kubevirt_runner:latest
# TODO : kind load is slow : set a private registry, as described here https://kind.sigs.k8s.io/docs/user/local-registry/
sudo kind load docker-image molecule_kubevirt_runner:latest
kind load docker-image molecule_kubevirt_runner:latest
- name: Push molecule test image to Kind
uses: nick-invision/retry@v2
@ -172,7 +172,7 @@ jobs:
timeout_minutes: 5
max_attempts: 3
command: |
sudo kind load docker-image molecule_kubevirt_runner:latest
kind load docker-image molecule_kubevirt_runner:latest
- name: Install kail
uses: nick-invision/retry@v2
@ -182,22 +182,22 @@ jobs:
command: |
curl -SL https://github.com/boz/kail/releases/download/v0.15.0/kail_0.15.0_linux_amd64.tar.gz -o kail.tar.gz
tar xf kail.tar.gz
sudo install kail /usr/local/bin
install kail /usr/local/bin
- name: Wait and get kubevirt status
run: |
# wait network is ready
sudo kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-operator
sudo kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-api || true
sudo kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-controller || true
sudo kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-handler || true
sudo kubectl get nodes -o wide
sudo kubectl get pods -A
kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-operator
kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-api || true
kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-controller || true
kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-handler || true
kubectl get nodes -o wide
kubectl get pods -A
- name: Prepare Job
run: |
# Service Account for Job
cat <<EOF | sudo kubectl apply -f -
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: ServiceAccount
@ -270,21 +270,21 @@ jobs:
- name: Launch background log scripts
run: |
# Kail monitors any logs from default namespace
sudo kail -n default 2>&1 > /tmp/kail.log || true &
kail -n default 2>&1 > /tmp/kail.log || true &
# Infinite script waits for new VMs up and fetch logs from console
(
while true; do
until sudo kubectl wait --for=condition=Ready pod -l kubevirt.io=virt-launcher --namespace default;
until kubectl wait --for=condition=Ready pod -l kubevirt.io=virt-launcher --namespace default;
do echo "Still Waiting Pod to start..."; sleep 5;
done
LOGFILE="virtcl-console-$(date '+%Y-%m-%d-%H-%M-%S').log"
echo "Starting virtctl console" >> /tmp/${LOGFILE}
sudo script -e -c "virtctl console instance" >> /tmp/${LOGFILE}
script -e -c "virtctl console instance" >> /tmp/${LOGFILE}
done
) &
# Event router reports any event as log, a great help for troubleshooting since Kind exports all running Pods logs
sudo kubectl create -f https://raw.githubusercontent.com/heptiolabs/eventrouter/master/yaml/eventrouter.yaml
kubectl create -f https://raw.githubusercontent.com/heptiolabs/eventrouter/master/yaml/eventrouter.yaml
- name: Launch test
uses: nick-invision/retry@v2
@ -292,14 +292,14 @@ jobs:
timeout_minutes: 25
max_attempts: 3
on_retry_command: |
sudo kubectl delete pods --all || true
sudo kubectl delete configmaps --all || true
sudo kubectl delete job --all || true
kubectl delete pods --all || true
kubectl delete configmaps --all || true
kubectl delete job --all || true
command: |
# Configmap will be waitinig untill it is deleted, telling one Pod ended the Job
sudo kubectl create configmap molecule-job-running --from-literal status=Started
kubectl create configmap molecule-job-running --from-literal status=Started
# Create Job
cat <<EOF | sudo kubectl apply -f -
cat <<EOF | kubectl apply -f -
---
apiVersion: batch/v1
kind: Job
@ -329,16 +329,16 @@ jobs:
echo "Job launched"
# Wait for molecule Job Pod to start
until (sudo kubectl get pods -l job-name=molecule | grep molecule); do echo "Wait molecule pod to start"; sleep 5; done
until (kubectl get pods -l job-name=molecule | grep molecule); do echo "Wait molecule pod to start"; sleep 5; done
sudo kubectl wait --for=condition=ready pods -l job-name=molecule --namespace default
kubectl wait --for=condition=ready pods -l job-name=molecule --namespace default
echo "Molecule pod is now running, waiting..."
# Wait for molecule Job to delete configmap, notifying one Job Pod ran till the end, whatever the result
sudo kubectl wait --for delete --timeout=20m configmap/molecule-job-running
kubectl wait --for delete --timeout=20m configmap/molecule-job-running
# Get molecule tests results and exit accordingly
MOLECULE_RESULT=$(sudo kubectl get configmap molecule-result -o "jsonpath={.data['exitCode']}")
MOLECULE_RESULT=$(kubectl get configmap molecule-result -o "jsonpath={.data['exitCode']}")
if [ ! "${MOLECULE_RESULT}" == "0" ]; then echo "Test non ok : ${MOLECULE_RESULT}"; exit 1; fi
@ -347,18 +347,18 @@ jobs:
run: |
mkdir -p ${LOG_DIR} || true
echo "** Events"
sudo kubectl get events | tee ${LOG_DIR}/events.txt || true
kubectl get events | tee ${LOG_DIR}/events.txt || true
echo "** Jobs"
sudo kubectl describe jobs | tee ${LOG_DIR}/jobs.txt || true
kubectl describe jobs | tee ${LOG_DIR}/jobs.txt || true
echo "** Configmap"
sudo kubectl describe cm | tee ${LOG_DIR}/cm.txt || true
kubectl describe cm | tee ${LOG_DIR}/cm.txt || true
echo "** Console log"
sudo cat /tmp/virtcl-console*.log || true
cat /tmp/virtcl-console*.log || true
sudo cp /tmp/kail.log ${LOG_DIR} || true
sudo cp /tmp/virtcl-console*.log ${LOG_DIR} || true
cp /tmp/kail.log ${LOG_DIR} || true
cp /tmp/virtcl-console*.log ${LOG_DIR} || true
sudo dmesg > ${LOG_DIR}/dmesg.txt || true
sudo kind export logs ${LOG_DIR} || true
kind export logs ${LOG_DIR} || true
sudo journalctl | cat > ${LOG_DIR}/journalctl.txt || true
sudo chown -R $USER:$USER ${LOG_DIR} || true
env: