#!/bin/sh

set -e

if ! [ -r /etc/oci-poc/oci-poc.conf ] ; then
	echo "Cannot read /etc/oci-poc/oci-poc.conf"
fi
. /etc/oci-poc/oci-poc.conf

CTRL1_IP=$(ocicli -csv machine-list --filter hostname=cl1-controller-1.infomaniak.ch | q -H -d, "SELECT Cur_ip FROM -")
CTRL2_IP=$(ocicli -csv machine-list --filter hostname=cl1-controller-2.infomaniak.ch | q -H -d, "SELECT Cur_ip FROM -")
CTRL3_IP=$(ocicli -csv machine-list --filter hostname=cl1-controller-3.infomaniak.ch | q -H -d, "SELECT Cur_ip FROM -")
NET1_IP=$(ocicli -csv machine-list --filter hostname=cl1-network-1.infomaniak.ch | q -H -d, "SELECT Cur_ip FROM -")
NET2_IP=$(ocicli -csv machine-list --filter hostname=cl1-network-2.infomaniak.ch | q -H -d, "SELECT Cur_ip FROM -")

CL3_CTRL1_IP=$(ocicli -csv machine-ip-list cl3-controller-1.infomaniak.ch | q -H -d, "SELECT ipaddr FROM - WHERE usefor='machine'")
CL3_CTRL2_IP=$(ocicli -csv machine-ip-list cl3-controller-2.infomaniak.ch | q -H -d, "SELECT ipaddr FROM - WHERE usefor='machine'")
CL3_CTRL3_IP=$(ocicli -csv machine-ip-list cl3-controller-3.infomaniak.ch | q -H -d, "SELECT ipaddr FROM - WHERE usefor='machine'")

sshi () {
	if [ "${1}" = "--host" ] ; then
		MYHOST=${2}
		shift
		shift
	else
		MYHOST=${CTRL1_IP}
	fi
	SSH_AUTH_SOCK= ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 ${MYHOST} $@
}

scpi () {
	local HOST SRC DST
	HOST="${1}"
	SRC="${2}"
	DST="${3}"
	SSH_AUTH_SOCK= scp -r -o StrictHostKeyChecking=no -o ConnectTimeout=5 ${SRC} root@${HOST}:${DST}
}

ocicli_cluster_wait_for () {
	local MACHINE_SERIAL=$1
	local FIELD=$2
	local VALUE=$3
	local CNT=1800
	while [ "${CNT}" -gt 0 ] && [ ""$(ocicli -csv machine-list -a | q -H -d, "SELECT ${FIELD} FROM - WHERE serial='${MACHINE_SERIAL}' OR hostname='${MACHINE_SERIAL}'" 2>/dev/null) != "${VALUE}" ] ; do
		echo -n "."
		sleep 2
		CNT=$((${CNT} - 1))
	done
	if [ "${CNT}" = 0 ] ; then
		echo "timed out."
		exit 1
	else
		echo "ok."
	fi
}

ocicli_cluster_wait_for_puppet () {
	local MACHINE_SERIAL=$1
	local CNT=6000
	local PUPPET_STATUS=$(ocicli -csv machine-list -s | q -H -d, "SELECT puppet FROM - WHERE serial='${MACHINE_SERIAL}' OR hostname='${MACHINE_SERIAL}'" 2>/dev/null)
	while [ "${CNT}" -gt 0 ] && [ "${PUPPET_STATUS}" != "success" ] && [ "${PUPPET_STATUS}" != "failure" ] ; do
		echo -n "."
		sleep 2
		PUPPET_STATUS=$(ocicli -csv machine-list -s | q -H -d, "SELECT puppet FROM - WHERE serial='${MACHINE_SERIAL}' OR hostname='${MACHINE_SERIAL}'" 2>/dev/null)
		CNT=$((${CNT} - 1))
	done
	if [ "${CNT}" = 0 ] ; then
		echo "timed out."
		exit 1
	else
		echo "ok."
	fi
}


# Copy the configuration files that the provision scripts will need
echo "===> Copying ikvswitch.conf and oci-poc.conf to controller-1"
sshi "mkdir -p /etc/ikvswitch ; mkdir -p /etc/oci-poc"
scpi $CTRL1_IP /etc/oci-poc/oci-poc.conf /etc/oci-poc
scpi $CTRL1_IP /etc/ikvswitch/ikvswitch.conf /etc/ikvswitch

echo "===> Restarting glance on all controllers for cluster1"
sshi --host ${CTRL1_IP} "systemctl restart glance-api"
sshi --host ${CTRL2_IP} "systemctl restart glance-api"
sshi --host ${CTRL3_IP} "systemctl restart glance-api"

echo "===> Provisioning a Debian image"
# We need --use-daily-image when using testing.
if [ "${USE_DAILY_IMAGE}" = "yes" ] ; then
	sshi "oci-poc-provision-image --use-daily-image"
else
	sshi oci-poc-provision-image
fi

echo "===> Provisioning networks"
sshi oci-poc-provision-network

echo "===> Restarting l3 agent on network nodes"
sshi --host ${NET1_IP} "systemctl restart neutron-l3-agent.service"
sshi --host ${NET2_IP} "systemctl restart neutron-l3-agent.service"

echo "===> Provisioning flavors"
sshi oci-poc-provision-flavors

echo "===> Provisionning AZs"
sshi oci-poc-provision-az

echo "===> Designate setup"
echo "-> Getting admin project ID and settting it up as Designate managed resource tenant id"
CL1_ADMIN_PROJECT_ID=$(sshi --host ${CTRL1_IP} ". /root/oci-openrc ; openstack project show admin --format value -c id")
ocicli cluster-set cl1 --designate-managed-resource-tenant-id ${CL1_ADMIN_PROJECT_ID} --designate-ns1 cl1-dns-1.infomaniak.ch --designate-ns2 cl1-dns-2.infomaniak.ch

echo "===> Provisioning Octavia"
echo "-> Octavia image"
sshi oci-poc-provision-octavia-image

echo "-> Octavia flavor"
sshi oci-poc-provision-octavia-flavor

echo "-> Octavia security groups"
sshi oci-poc-provision-octavia-secgroup
LB_MGMT_SEC_GRP=$(sshi ". /root/octavia-openrc ; openstack security group list --format csv | q -H -d, \"SELECT ID FROM - WHERE Name='lb-mgmt-sec-grp'\"")
LB_HEALTH_MGR_SEC_GRP=$(sshi ". /root/octavia-openrc ; openstack security group list --format csv | q -H -d, \"SELECT ID FROM - WHERE Name='lb-health-mgr-sec-grp'\"")
ocicli cluster-set cl1 --amp-secgroup-list $LB_MGMT_SEC_GRP,$LB_HEALTH_MGR_SEC_GRP

echo "-> Octavia networking"
sshi oci-poc-provision-octavia-network
LB_MGMT_NET=$(sshi ". /root/oci-openrc ; openstack network list --format csv | q -H -d, \"SELECT ID FROM - WHERE Name='int-octavia1'\"")
ocicli cluster-set cl1 --amp-boot-network-list ${LB_MGMT_NET}

echo "-> Octavia ssh key"
sshi oci-poc-provision-octavia-ssh-key

echo "-> Setting-up Octavia auto-signed SSL certificate"
sshi oci-octavia-certs

echo "-> Running puppet on all 3 controllers to apply Octavia boot net and security groups"
# We do || true, because it may return 2 (with --detailed-exitcodes of puppet agent)
sshi --host ${CTRL1_IP} oci-puppet || true
sshi --host ${CTRL2_IP} oci-puppet || true
sshi --host ${CTRL3_IP} oci-puppet || true

echo "===> Setting-up cloudkitty pricing"
sshi oci-poc-provision-cloudkitty

echo "===***===> Starting provisionning of cluster3"
sshi --host ${CL3_CTRL1_IP} "systemctl restart glance-api"
sshi --host ${CL3_CTRL2_IP} "systemctl restart glance-api"
sshi --host ${CL3_CTRL3_IP} "systemctl restart glance-api"

echo "===> Copying ikvswitch.conf and oci-poc.conf to controller-1"
sshi --host ${CL3_CTRL1_IP} "mkdir -p /etc/ikvswitch ; mkdir -p /etc/oci-poc"
scpi $CL3_CTRL1_IP /etc/oci-poc/oci-poc.conf /etc/oci-poc
scpi $CL3_CTRL1_IP /etc/ikvswitch/ikvswitch.conf /etc/ikvswitch

echo "===> Provisioning a Debian image"
# We need --use-daily-image when using testing.
if [ "${USE_DAILY_IMAGE}" = "yes" ] ; then
	sshi --host ${CL3_CTRL1_IP} "oci-poc-provision-image --use-daily-image"
else
	sshi --host ${CL3_CTRL1_IP} oci-poc-provision-image
fi

echo "===> Restarting l3 agent on controller nodes"
sshi --host ${CL3_CTRL1_IP} "systemctl restart neutron-l3-agent.service"
sshi --host ${CL3_CTRL2_IP} "systemctl restart neutron-l3-agent.service"
sshi --host ${CL3_CTRL3_IP} "systemctl restart neutron-l3-agent.service"

echo "===> Provisioning networks"
sshi --host ${CL3_CTRL3_IP} "mkdir -p /etc/ikvswitch ; mkdir -p /etc/oci-poc"
scpi $CL3_CTRL1_IP /etc/oci-poc/oci-poc.conf /etc/oci-poc
scpi $CL3_CTRL1_IP /etc/ikvswitch/ikvswitch.conf /etc/ikvswitch
sshi --host ${CL3_CTRL1_IP} oci-poc-provision-network-cl3

echo "===> Provisioning flavors"
sshi --host ${CL3_CTRL1_IP} oci-poc-provision-flavors

echo "===> Provision an ssh keypair"
sshi --host ${CL3_CTRL1_IP} ". oci-openrc ; openstack keypair create --fit-width --public-key .ssh/id_rsa.pub cl1-root-key"

echo "===***===> Your OpenStack cloud is ready, installing the puppet node"
echo "-> Installing tempest node OS"
ocicli machine-install-os cl1-tempest-1.infomaniak.ch
echo "-> Waiting for the tempest node to be installed"
ocicli_cluster_wait_for cl1-tempest-1.infomaniak.ch status installed
echo "-> Waiting for puppet to be run on cl1-tempest-1.infomaniak.ch"
ocicli_cluster_wait_for_puppet cl1-tempest-1.infomaniak.ch

echo "-> Copying debian image in the tempest node"
# This must be done *AFTER* cl1-tempest-1.infomaniak.ch is installed
TEMPEST1_IP=$(ocicli -csv machine-list --filter hostname=cl1-tempest-1.infomaniak.ch | q -H -d, "SELECT Cur_ip FROM -")

IMAGE_FILENAME=$(ssh ${CTRL1_IP} 'ls *generic*.qcow2' 2>/dev/null)
SSH_AUTH_SOCK= scp -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@${CTRL1_IP}:${IMAGE_FILENAME} ./debian.qcow2
SSH_AUTH_SOCK= scp -o StrictHostKeyChecking=no -o ConnectTimeout=5 debian.qcow2 root@${TEMPEST1_IP}:/root/debian.qcow2

echo "===***===> Making sure all compute 3 to 8 nodes are activated in cl1 (to be fixed?)"
sshi --host ${CTRL1_IP} "su nova -s /bin/sh -c 'nova-manage cell_v2 discover_hosts'"
sshi --host ${CTRL1_IP} ". oci-openrc && openstack compute service set --enable cl1-compute-3.infomaniak.ch nova-compute ; openstack compute service set --enable cl1-compute-4.infomaniak.ch nova-compute ;openstack compute service set --enable cl1-compute-5.infomaniak.ch nova-compute ;openstack compute service set --enable cl1-compute-6.infomaniak.ch nova-compute ; openstack compute service set --enable cl1-compute-7.infomaniak.ch nova-compute ;openstack compute service set --enable cl1-compute-8.infomaniak.ch nova-compute"

echo "===***===> Copying Octavia's SSH key in the tempest server"
# This is needed by octavia_tempest_plugin.tests.act_stdby_scenario.v2.test_active_standby_iptables.ActiveStandbyIptablesScenarioTest
SSH_AUTH_SOCK= ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@${TEMPEST1_IP} "mkdir -p /etc/octavia/.ssh"
SSH_AUTH_SOCK= scp -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@${CTRL1_IP}:/etc/octavia/.ssh/octavia_ssh_key .
SSH_AUTH_SOCK= scp -o StrictHostKeyChecking=no -o ConnectTimeout=5 octavia_ssh_key root@${TEMPEST1_IP}:/etc/octavia/.ssh

echo "===***===> All done, let's run tempest..."
SSH_AUTH_SOCK= scp -o StrictHostKeyChecking=no -o ConnectTimeout=5 /etc/oci-poc/exclude.conf root@${TEMPEST1_IP}:/etc/tempest
ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@${TEMPEST1_IP} "cd /var/lib/tempest ; tempest_debian_shell_wrapper | tee oci-poc-tempest-run.txt"
