Step: ipi-conf-libvirt

This step configures an install-config.yaml file to depoloy to the LIBVIRT project. An existing install-config.yaml file should already exist in the ${SHARED_DIR} directory. The file is modified to configure the cluster to run in the LIBVIRT project. The resulting file remains in the shared directory for future consumption.

Container image used for this step: libvirt-installer

libvirt-installer resolves to an image built or imported by the ci-operator configuration (documentation).

Environment

In addition to the default environment, the step exposes the following:

Variable Name Type Variable Content
ARCH Parameter[?]
BRANCH Parameter[?]
MASTER_REPLICAS Parameter[?] (default: 3)
WORKER_REPLICAS Parameter[?] (default: 2)

Source Code

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
#!/bin/bash

set -o nounset
set -o errexit
set -o pipefail

function write_shared_dir() {
  local key="$1"
  local value="$2"
  yq write --inplace ${SHARED_DIR}/cluster-config.yaml $key $value
}

# TO-DO Remove once Boskos Monitor Controller for MA CI merges
# Assume lease hasn't been cleaned
function cleanup_leftover_resources() {
  local CONNECT=${REMOTE_LIBVIRT_URI}

  # Remove conflicting domains
  for DOMAIN in $(mock-nss.sh virsh -c "${CONNECT}" list --all --name | grep "${LEASED_RESOURCE}")
  do
    mock-nss.sh virsh -c "${CONNECT}" destroy "${DOMAIN}" || true
    mock-nss.sh virsh -c "${CONNECT}" undefine "${DOMAIN}" || true
  done

  # Remove conflicting pools
  for POOL in $(mock-nss.sh virsh -c "${CONNECT}" pool-list --all --name | grep "${LEASED_RESOURCE}")
  do
    mock-nss.sh virsh -c "${CONNECT}" pool-destroy "${POOL}" || true
    mock-nss.sh virsh -c "${CONNECT}" pool-delete "${POOL}" || true
    mock-nss.sh virsh -c "${CONNECT}" pool-undefine "${POOL}" || true
  done

  # Remove conflicting networks
  for NET in $(mock-nss.sh virsh -c "${CONNECT}" net-list --all --name | grep "${LEASED_RESOURCE}")
  do
    mock-nss.sh virsh -c "${CONNECT}" net-destroy "${NET}" || true
    mock-nss.sh virsh -c "${CONNECT}" net-undefine "${NET}" || true
  done

  # Detect conflicts
  CONFLICTING_DOMAINS=$(mock-nss.sh virsh -c "${CONNECT}" list --all --name | grep "${LEASED_RESOURCE}" || true)
  CONFLICTING_POOLS=$(mock-nss.sh virsh -c "${CONNECT}" pool-list --all --name | grep "${LEASED_RESOURCE}" || true)
  CONFLICTING_NETWORKS=$(mock-nss.sh virsh -c "${CONNECT}" net-list --all --name | grep "${LEASED_RESOURCE}" || true)
  if [ ! -z "$CONFLICTING_DOMAINS" ] || [ ! -z "$CONFLICTING_POOLS" ] || [ ! -z "$CONFLICTING_NETWORKS" ]; then
    echo "Could not ensure clean state for lease ${LEASED_RESOURCE}"
    echo "Conflicting domains: $CONFLICTING_DOMAINS"
    echo "Conflicting pools: $CONFLICTING_POOLS"
    echo "Conflicting networks: $CONFLICTING_NETWORKS"
    exit 1
  fi
}

# ensure LEASED_RESOURCE is set
if [[ -z "${LEASED_RESOURCE}" ]]; then
  echo "Failed to acquire lease"
  exit 1
fi

# ensure RELEASE_IMAGE_LATEST is set
if [[ -z "$RELEASE_IMAGE_LATEST" ]]; then
  echo "RELEASE_IMAGE_LATEST is an empty string, exiting"
  exit 1
fi

# create a file for storing shared information between steps
touch ${SHARED_DIR}/cluster-config.yaml

echo "Installing from initial release ${RELEASE_IMAGE_LATEST}"

openshift-install version

CONFIG="${SHARED_DIR}/install-config.yaml"
# TO_DO Remove CLUSTER SUBNET 126 after HA-Proxy changes on host
declare -A SUBNETS
  SUBNETS["${CLUSTER_TYPE}-0-0"]="126"
  SUBNETS["${CLUSTER_TYPE}-0-1"]="1"
  SUBNETS["${CLUSTER_TYPE}-0-2"]="2"
  SUBNETS["${CLUSTER_TYPE}-0-3"]="3"
  SUBNETS["${CLUSTER_TYPE}-0-4"]="4"
  SUBNETS["${CLUSTER_TYPE}-0-5"]="5"
  SUBNETS["${CLUSTER_TYPE}-1-0"]="126"
  SUBNETS["${CLUSTER_TYPE}-1-1"]="1"
  SUBNETS["${CLUSTER_TYPE}-1-2"]="2"
  SUBNETS["${CLUSTER_TYPE}-1-3"]="3"
  SUBNETS["${CLUSTER_TYPE}-1-4"]="4"
  SUBNETS["${CLUSTER_TYPE}-1-5"]="5"
  SUBNETS["${CLUSTER_TYPE}-2-0"]="126"
  SUBNETS["${CLUSTER_TYPE}-2-1"]="1"
  SUBNETS["${CLUSTER_TYPE}-2-2"]="2"
  SUBNETS["${CLUSTER_TYPE}-2-3"]="3"
  SUBNETS["${CLUSTER_TYPE}-2-4"]="4"
  SUBNETS["${CLUSTER_TYPE}-2-5"]="5"

# get cluster subnet or default it to 126
# TO-DO default to 1 after HA-Proxy changes on host
CLUSTER_SUBNET="${SUBNETS[${LEASED_RESOURCE}]}"
if [[ -z "${CLUSTER_SUBNET}" ]]; then
  CLUSTER_SUBNET=126
fi

# Setting Hostnames
if [[ "${ARCH}" == "s390x" ]]; then
  REMOTE_LIBVIRT_HOSTNAME=lnxocp01
  REMOTE_LIBVIRT_HOSTNAME_1=lnxocp02
  REMOTE_LIBVIRT_HOSTNAME_2=""
elif [[ "${ARCH}" == "ppc64le" ]]; then
  REMOTE_LIBVIRT_HOSTNAME=C155F2U33
  REMOTE_LIBVIRT_HOSTNAME_1=C155F2U31
  REMOTE_LIBVIRT_HOSTNAME_2=C155F2U35
fi

declare -A LIBVIRT_HOSTS
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-0-0"]="${REMOTE_LIBVIRT_HOSTNAME}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-0-1"]="${REMOTE_LIBVIRT_HOSTNAME}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-0-2"]="${REMOTE_LIBVIRT_HOSTNAME}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-0-3"]="${REMOTE_LIBVIRT_HOSTNAME}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-0-4"]="${REMOTE_LIBVIRT_HOSTNAME}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-0-5"]="${REMOTE_LIBVIRT_HOSTNAME}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-1-0"]="${REMOTE_LIBVIRT_HOSTNAME_1}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-1-1"]="${REMOTE_LIBVIRT_HOSTNAME_1}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-1-2"]="${REMOTE_LIBVIRT_HOSTNAME_1}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-1-3"]="${REMOTE_LIBVIRT_HOSTNAME_1}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-1-4"]="${REMOTE_LIBVIRT_HOSTNAME_1}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-1-5"]="${REMOTE_LIBVIRT_HOSTNAME_1}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-2-0"]="${REMOTE_LIBVIRT_HOSTNAME_2}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-2-1"]="${REMOTE_LIBVIRT_HOSTNAME_2}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-2-2"]="${REMOTE_LIBVIRT_HOSTNAME_2}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-2-3"]="${REMOTE_LIBVIRT_HOSTNAME_2}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-2-4"]="${REMOTE_LIBVIRT_HOSTNAME_2}"
  LIBVIRT_HOSTS["${CLUSTER_TYPE}-2-5"]="${REMOTE_LIBVIRT_HOSTNAME_2}"

# get cluster libvirt uri or default it the first host
REMOTE_LIBVIRT_URI="qemu+tcp://${LIBVIRT_HOSTS[${LEASED_RESOURCE}]}/system"
if [[ -z "${REMOTE_LIBVIRT_URI}" ]]; then
  REMOTE_LIBVIRT_URI="qemu+tcp://${REMOTE_LIBVIRT_HOSTNAME}/system"
fi
# Debug echo "Remote Libvirt=${REMOTE_LIBVIRT_URI}"
write_shared_dir REMOTE_LIBVIRT_URI ${REMOTE_LIBVIRT_URI}

CLUSTER_NAME="${LEASED_RESOURCE}-${JOB_NAME_HASH}"
write_shared_dir CLUSTER_NAME ${CLUSTER_NAME}

# Test the remote connection
mock-nss.sh virsh -c ${REMOTE_LIBVIRT_URI} list

# in case the cluster deprovision failed in a previous run
cleanup_leftover_resources

BASE_DOMAIN="${LEASED_RESOURCE}.ci"
cat >> "${CONFIG}" << EOF
apiVersion: v1
baseDomain: ${BASE_DOMAIN}
metadata:
  name: ${CLUSTER_NAME}
controlPlane:
  architecture: ${ARCH}
  hyperthreading: Enabled
  name: master
  replicas: ${MASTER_REPLICAS}
networking:
  clusterNetwork:
  - cidr: 10.128.0.0/14
    hostPrefix: 23
  machineCIDR: 192.168.${CLUSTER_SUBNET}.0/24
  networkType: OpenShiftSDN
  serviceNetwork:
  - 172.30.0.0/16
compute:
- architecture: ${ARCH}
  hyperthreading: Enabled
  name: worker
  replicas: ${WORKER_REPLICAS}
platform:
  libvirt:
    URI: ${REMOTE_LIBVIRT_URI}
    network:
      dnsmasqOptions:
      - name: "address"
        value: "/.apps.${CLUSTER_NAME}.${BASE_DOMAIN}/192.168.${CLUSTER_SUBNET}.1"
      if: "br$(printf ${LEASED_RESOURCE} | tail -c 3)"
pullSecret: >
  $(<"${CLUSTER_PROFILE_DIR}/pull-secret")
sshKey: |
  $(<"${CLUSTER_PROFILE_DIR}/ssh-publickey")
EOF

# DNS records for libvirt versions that don't support dnsmasq options
cat >> ${SHARED_DIR}/worker-hostrecords.xml << EOF
<host ip='192.168.${CLUSTER_SUBNET}.1'>
  <hostname>alertmanager-main-openshift-monitoring.apps.${CLUSTER_NAME}.${BASE_DOMAIN}</hostname>
  <hostname>canary-openshift-ingress-canary.apps.${CLUSTER_NAME}.${BASE_DOMAIN}</hostname>
  <hostname>console-openshift-console.apps.${CLUSTER_NAME}.${BASE_DOMAIN}</hostname>
  <hostname>downloads-openshift-console.apps.${CLUSTER_NAME}.${BASE_DOMAIN}</hostname>
  <hostname>grafana-openshift-monitoring.apps.${CLUSTER_NAME}.${BASE_DOMAIN}</hostname>
  <hostname>oauth-openshift.apps.${CLUSTER_NAME}.${BASE_DOMAIN}</hostname>
  <hostname>prometheus-k8s-openshift-monitoring.apps.${CLUSTER_NAME}.${BASE_DOMAIN}</hostname>
  <hostname>test-disruption-openshift-image-registry.apps.${CLUSTER_NAME}.${BASE_DOMAIN}</hostname>
</host>
EOF

Properties

Property Value Description
Resource requests (cpu) 10m Used in .resources.requests of the pod running this step.
Resource requests (memory) 100Mi Used in .resources.requests of the pod running this step.

GitHub Link:

https://github.com/openshift/release/blob/master/ci-operator/step-registry/ipi/conf/libvirt/ipi-conf-libvirt-ref.yaml

Owners:

Approvers:

Source code for this page located on GitHub