remove unecessary sudos

pull/217/head
Joël Séguillon 3 years ago
parent fdd86b29ba
commit 9377aa7c38
  1. 76
      .github/workflows/ci.yml

@ -94,7 +94,7 @@ jobs:
- name: Create single node Cluster - name: Create single node Cluster
run: | run: |
cat <<EOF | sudo kind create cluster -v7 --wait 1m --retain --config=- cat <<EOF | kind create cluster -v7 --wait 1m --retain --config=-
kind: Cluster kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4 apiVersion: kind.x-k8s.io/v1alpha4
networking: networking:
@ -104,9 +104,9 @@ jobs:
- name: Wait and get Cluster status - name: Wait and get Cluster status
run: | run: |
# wait network is ready # wait network is ready
sudo kubectl wait --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns kubectl wait --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns
sudo kubectl get nodes -o wide kubectl get nodes -o wide
sudo kubectl get pods -A kubectl get pods -A
- name: Install kubevirt packages requirements - name: Install kubevirt packages requirements
run: | run: |
@ -127,9 +127,9 @@ jobs:
timeout_minutes: 5 timeout_minutes: 5
max_attempts: 3 max_attempts: 3
command: | command: |
sudo kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/kubevirt-operator.yaml kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/kubevirt-operator.yaml
sudo kubectl create configmap kubevirt-config -n kubevirt --from-literal debug.useEmulation=true kubectl create configmap kubevirt-config -n kubevirt --from-literal debug.useEmulation=true
sudo kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/kubevirt-cr.yaml kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/kubevirt-cr.yaml
- name: Install virtcl - name: Install virtcl
uses: nick-invision/retry@v2 uses: nick-invision/retry@v2
@ -140,7 +140,7 @@ jobs:
export ARCH=linux-amd64 export ARCH=linux-amd64
curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/virtctl-v0.36.0-linux-amd64 curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v0.36.0/virtctl-v0.36.0-linux-amd64
chmod +x virtctl chmod +x virtctl
sudo install virtctl /usr/local/bin install virtctl /usr/local/bin
- name: Build molecule test container - name: Build molecule test container
run: | run: |
@ -164,7 +164,7 @@ jobs:
docker build --build-arg PYTHON_BASE_IMAGE="${PYTHON_BASE_IMAGE}" --build-arg KUBERNETES_VERSION="${KUBERNETES_VERSION}" . -t molecule_kubevirt_runner:latest docker build --build-arg PYTHON_BASE_IMAGE="${PYTHON_BASE_IMAGE}" --build-arg KUBERNETES_VERSION="${KUBERNETES_VERSION}" . -t molecule_kubevirt_runner:latest
# TODO : kind load is slow : set a private registry, as described here https://kind.sigs.k8s.io/docs/user/local-registry/ # TODO : kind load is slow : set a private registry, as described here https://kind.sigs.k8s.io/docs/user/local-registry/
sudo kind load docker-image molecule_kubevirt_runner:latest kind load docker-image molecule_kubevirt_runner:latest
- name: Push molecule test image to Kind - name: Push molecule test image to Kind
uses: nick-invision/retry@v2 uses: nick-invision/retry@v2
@ -172,7 +172,7 @@ jobs:
timeout_minutes: 5 timeout_minutes: 5
max_attempts: 3 max_attempts: 3
command: | command: |
sudo kind load docker-image molecule_kubevirt_runner:latest kind load docker-image molecule_kubevirt_runner:latest
- name: Install kail - name: Install kail
uses: nick-invision/retry@v2 uses: nick-invision/retry@v2
@ -182,22 +182,22 @@ jobs:
command: | command: |
curl -SL https://github.com/boz/kail/releases/download/v0.15.0/kail_0.15.0_linux_amd64.tar.gz -o kail.tar.gz curl -SL https://github.com/boz/kail/releases/download/v0.15.0/kail_0.15.0_linux_amd64.tar.gz -o kail.tar.gz
tar xf kail.tar.gz tar xf kail.tar.gz
sudo install kail /usr/local/bin install kail /usr/local/bin
- name: Wait and get kubevirt status - name: Wait and get kubevirt status
run: | run: |
# wait network is ready # wait network is ready
sudo kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-operator kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-operator
sudo kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-api || true kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-api || true
sudo kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-controller || true kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-controller || true
sudo kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-handler || true kubectl wait --for=condition=ready pods --namespace=kubevirt -l kubevirt.io=virt-handler || true
sudo kubectl get nodes -o wide kubectl get nodes -o wide
sudo kubectl get pods -A kubectl get pods -A
- name: Prepare Job - name: Prepare Job
run: | run: |
# Service Account for Job # Service Account for Job
cat <<EOF | sudo kubectl apply -f - cat <<EOF | kubectl apply -f -
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
@ -270,21 +270,21 @@ jobs:
- name: Launch background log scripts - name: Launch background log scripts
run: | run: |
# Kail monitors any logs from default namespace # Kail monitors any logs from default namespace
sudo kail -n default 2>&1 > /tmp/kail.log || true & kail -n default 2>&1 > /tmp/kail.log || true &
# Infinite script waits for new VMs up and fetch logs from console # Infinite script waits for new VMs up and fetch logs from console
( (
while true; do while true; do
until sudo kubectl wait --for=condition=Ready pod -l kubevirt.io=virt-launcher --namespace default; until kubectl wait --for=condition=Ready pod -l kubevirt.io=virt-launcher --namespace default;
do echo "Still Waiting Pod to start..."; sleep 5; do echo "Still Waiting Pod to start..."; sleep 5;
done done
LOGFILE="virtcl-console-$(date '+%Y-%m-%d-%H-%M-%S').log" LOGFILE="virtcl-console-$(date '+%Y-%m-%d-%H-%M-%S').log"
echo "Starting virtctl console" >> /tmp/${LOGFILE} echo "Starting virtctl console" >> /tmp/${LOGFILE}
sudo script -e -c "virtctl console instance" >> /tmp/${LOGFILE} script -e -c "virtctl console instance" >> /tmp/${LOGFILE}
done done
) & ) &
# Event router reports any event as log, a great help for troubleshooting since Kind exports all running Pods logs # Event router reports any event as log, a great help for troubleshooting since Kind exports all running Pods logs
sudo kubectl create -f https://raw.githubusercontent.com/heptiolabs/eventrouter/master/yaml/eventrouter.yaml kubectl create -f https://raw.githubusercontent.com/heptiolabs/eventrouter/master/yaml/eventrouter.yaml
- name: Launch test - name: Launch test
uses: nick-invision/retry@v2 uses: nick-invision/retry@v2
@ -292,14 +292,14 @@ jobs:
timeout_minutes: 25 timeout_minutes: 25
max_attempts: 3 max_attempts: 3
on_retry_command: | on_retry_command: |
sudo kubectl delete pods --all || true kubectl delete pods --all || true
sudo kubectl delete configmaps --all || true kubectl delete configmaps --all || true
sudo kubectl delete job --all || true kubectl delete job --all || true
command: | command: |
# Configmap will be waitinig untill it is deleted, telling one Pod ended the Job # Configmap will be waitinig untill it is deleted, telling one Pod ended the Job
sudo kubectl create configmap molecule-job-running --from-literal status=Started kubectl create configmap molecule-job-running --from-literal status=Started
# Create Job # Create Job
cat <<EOF | sudo kubectl apply -f - cat <<EOF | kubectl apply -f -
--- ---
apiVersion: batch/v1 apiVersion: batch/v1
kind: Job kind: Job
@ -329,16 +329,16 @@ jobs:
echo "Job launched" echo "Job launched"
# Wait for molecule Job Pod to start # Wait for molecule Job Pod to start
until (sudo kubectl get pods -l job-name=molecule | grep molecule); do echo "Wait molecule pod to start"; sleep 5; done until (kubectl get pods -l job-name=molecule | grep molecule); do echo "Wait molecule pod to start"; sleep 5; done
sudo kubectl wait --for=condition=ready pods -l job-name=molecule --namespace default kubectl wait --for=condition=ready pods -l job-name=molecule --namespace default
echo "Molecule pod is now running, waiting..." echo "Molecule pod is now running, waiting..."
# Wait for molecule Job to delete configmap, notifying one Job Pod ran till the end, whatever the result # Wait for molecule Job to delete configmap, notifying one Job Pod ran till the end, whatever the result
sudo kubectl wait --for delete --timeout=20m configmap/molecule-job-running kubectl wait --for delete --timeout=20m configmap/molecule-job-running
# Get molecule tests results and exit accordingly # Get molecule tests results and exit accordingly
MOLECULE_RESULT=$(sudo kubectl get configmap molecule-result -o "jsonpath={.data['exitCode']}") MOLECULE_RESULT=$(kubectl get configmap molecule-result -o "jsonpath={.data['exitCode']}")
if [ ! "${MOLECULE_RESULT}" == "0" ]; then echo "Test non ok : ${MOLECULE_RESULT}"; exit 1; fi if [ ! "${MOLECULE_RESULT}" == "0" ]; then echo "Test non ok : ${MOLECULE_RESULT}"; exit 1; fi
@ -347,18 +347,18 @@ jobs:
run: | run: |
mkdir -p ${LOG_DIR} || true mkdir -p ${LOG_DIR} || true
echo "** Events" echo "** Events"
sudo kubectl get events | tee ${LOG_DIR}/events.txt || true kubectl get events | tee ${LOG_DIR}/events.txt || true
echo "** Jobs" echo "** Jobs"
sudo kubectl describe jobs | tee ${LOG_DIR}/jobs.txt || true kubectl describe jobs | tee ${LOG_DIR}/jobs.txt || true
echo "** Configmap" echo "** Configmap"
sudo kubectl describe cm | tee ${LOG_DIR}/cm.txt || true kubectl describe cm | tee ${LOG_DIR}/cm.txt || true
echo "** Console log" echo "** Console log"
sudo cat /tmp/virtcl-console*.log || true cat /tmp/virtcl-console*.log || true
sudo cp /tmp/kail.log ${LOG_DIR} || true cp /tmp/kail.log ${LOG_DIR} || true
sudo cp /tmp/virtcl-console*.log ${LOG_DIR} || true cp /tmp/virtcl-console*.log ${LOG_DIR} || true
sudo dmesg > ${LOG_DIR}/dmesg.txt || true sudo dmesg > ${LOG_DIR}/dmesg.txt || true
sudo kind export logs ${LOG_DIR} || true kind export logs ${LOG_DIR} || true
sudo journalctl | cat > ${LOG_DIR}/journalctl.txt || true sudo journalctl | cat > ${LOG_DIR}/journalctl.txt || true
sudo chown -R $USER:$USER ${LOG_DIR} || true sudo chown -R $USER:$USER ${LOG_DIR} || true
env: env: