← Back to team overview

vmbuilder team mailing list archive

[Merge] lp:~adeuring/vmbuilder/jenkins_kvm-fix-precise-env.yaml into lp:vmbuilder

 

Abel Deuring has proposed merging lp:~adeuring/vmbuilder/jenkins_kvm-fix-precise-env.yaml into lp:vmbuilder.

Requested reviews:
  VMBuilder (vmbuilder)

For more details, see:
https://code.launchpad.net/~adeuring/vmbuilder/jenkins_kvm-fix-precise-env.yaml/+merge/228502

This branch should fix the wrong value of default-series for Vagrant/Juju Precise images.

The fix is obvious:

  [ "${suite}" != "precise" -o "${suite}" != "saucy" ]

is always true -- we want instead a test if suite neither equals precise nor saucy.

(This is a kind of bug I wouldn't have caught if I had written myself ;)

-- 
https://code.launchpad.net/~adeuring/vmbuilder/jenkins_kvm-fix-precise-env.yaml/+merge/228502
Your team VMBuilder is requested to review the proposed merge of lp:~adeuring/vmbuilder/jenkins_kvm-fix-precise-env.yaml into lp:vmbuilder.
=== added file 'azure_config.sh'
--- azure_config.sh	1970-01-01 00:00:00 +0000
+++ azure_config.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+# Load up some libraries
+my_dir="$( cd "$( dirname "$0" )" && pwd )"
+source "${my_dir}/functions/locker"
+source "${my_dir}/functions/common"
+source "${my_dir}/functions/retry"
+
+short_opts="h"
+long_opts="out:,template:,serial:,tar:,tar-d:,version:,proposed"
+getopt_out=$(getopt --name "${0##*/}" \
+    --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+    eval set -- "${getopt_out}" || { echo "BAD INVOCATION!"; usage; exit 1; }
+
+usage() {
+    cat <<EOM
+${0##/} - Populated values in build temple.
+
+    Required:
+    --template      Template file
+    --serial        The build serial
+    --out           The output file
+    --tar           Name of tar file
+    --tar-d         Name of directory to tar up
+    --version       The version number of the distro
+    --proposed      Build against proposed
+EOM
+}
+
+debug() { echo "${@}"; }
+serial=$(date +%Y%m%d)
+template_f="${0%/*}/templates/img-azure.tmpl"
+template_r="${0%/*}/img-azure-daily.tmpl"
+
+while [ $# -ne 0 ]; do
+  cur=${1}; next=${2};
+  case "$cur" in
+    --template)                 template_f=$2; shift;;
+    --serial)                   serial=$2; shift;;
+    --tar)                      tar_f=$2; shift;;
+    --tar-d)                    tar_d=$2; shift;;
+    --out)                      out_f=$2; shift;;
+    --version)                  version=$2; shift;;
+    --proposed)                 proposed="true"; shift;;
+    --) shift; break;;
+  esac
+  shift;
+done
+
+fail() { echo "${@}" 2>&1; exit 1;}
+fail_usage() { fail "Must define $@"; }
+
+# Create the template file for image conversion
+sed -e "s,%S,${serial},g" \
+    -e "s,%v,${version},g" \
+    -e "s,%P,${proposed:-false},g" \
+    ${template_f} > ${out_f} ||
+        fail "Unable to write template file"
+
+# Support per-suite addins
+set -x
+awk '/ADDIN_HERE/{n++}{print >"template" n ".txt" }' ${out_f}
+addin_name="${template_f//.tmpl/}-${version}-addin.tmpl"
+if [ -e "${addin_name}" ]; then
+    cat template.txt ${addin_name} template[1-9].txt > ${out_f}
+else
+    cat template.txt template[1-9].txt > ${out_f}
+fi
+set +x
+
+rm template*
+sed -e "s,ADDIN_HERE,# END Addins,g" -i  ${out_f}
+
+debug "=================================================="
+debug "Content of template:"
+cat ${out_f}
+debug "=================================================="
+
+if [ -n "${tar_d}" ]; then
+   tar -C "${tar_d}" -cf "${tar_f}" . &&
+        debug "TAR'd up ${tar_d}" ||
+        fail  "Failed to tar up ${tar_d}"
+fi
+exit 0

=== added file 'base_indicies.sh'
--- base_indicies.sh	1970-01-01 00:00:00 +0000
+++ base_indicies.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Simple job for creating indicies
+suite="${1:-$SUITE}"
+serial="${2:-$SERIAL}"
+
+umask 022
+cronrun="/srv/builder/vmbuilder/bin/cronrun"
+
+# Override and set some home variables
+export HOME="/srv/builder/vmbuilder"
+export CDIMAGE_BIN="${HOME}/cdimage/bin"
+PUBLISH_SCRIPTS=${HOME}/ec2-publishing-scripts
+export CDIMAGE_ROOT="${HOME}/cdimage"
+export PATH="${PUBLISH_SCRIPTS}:${CDIMAGE_BIN}:${PATH}"
+
+fail() { echo "${@}" 2>&1; exit 1;}
+
+echo "Checksumming result directories"
+work_d="${WORKD:-/srv/ec2-images}/${suite}/${serial}"
+
+checksum-directory "${work_d}" &&
+    checksum-directory "${work_d}/unpacked" ||
+    fail "Failed to checksum result directories"
+
+update-build-indexes daily ${work_d} ${suite} &&
+    update-build-indexes daily ${work_d} ${suite} ||
+    fail "Failed to make the indexes for ${work_d}"

=== added file 'build-juju-local.sh'
--- build-juju-local.sh	1970-01-01 00:00:00 +0000
+++ build-juju-local.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+# Read in the common files
+myname=$(readlink -f ${0})
+mydir=$(dirname ${myname})
+mypdir=$(dirname ${mydir})
+
+# Scope stuff locally here
+# Create a temporary directory for the fun
+tmp_dir=$(mktemp -d builder.XXXXX --tmpdir=${TMPDIR:-/tmp})
+export TMPDIR=${tmp_dir}
+export WORKSPACE=${mydir}
+export HOME=${mydir}
+export LOCAL_BUILD=1
+
+clean() { [ -d ${tmp_dir} ] && rm -rf ${tmp_dir};
+          [ -d "${mydir}/Virtualbox\ VMS" ] && rm -rf "${mydir}/Virtualbox\ VMS";
+          exit "${@}"; }
+error() { echo "$@"; }
+debug() { error "$(date -R):" "$@"; }
+fail()  { debug "${1:-Something bad happend}"; clean 1; }
+
+# Fly with the safety on!
+trap fail EXIT
+trap fail SIGINT
+
+test_cmd_exists() {
+    which $1 >> /dev/null || fail "Command $1 does not exist! Please install $2"
+}
+
+[ "$(lsb_release -c -s)" != "trusty" ] &&
+    fail "This must be run on Ubuntu 14.04"
+
+test_cmd_exists qemu-nbd qemu-utils
+test_cmd_exists vboxmanage virtualbox
+test_cmd_exists bzr bzr
+test_cmd_exists sstream-query simplestreams
+
+# This defines what gets built
+build_for=${BUILD_FOR:-trusty:amd64 precise:amd64}
+
+for build in ${build_for[@]};
+do
+    suite=${build%%:*}
+    arch=${build##*:}
+    builder_img="${mydir}/${suite}-builder-${arch}.img"
+    results_d_arch="${mydir}/${suite}-${arch}"
+    built_img="${suite}-server-cloudimg-${arch}-juju-vagrant-disk1.img"
+
+    [ ! -e "${results_d_arch}" ] &&
+        mkdir -p "${results_d_arch}"
+
+    cmd=(
+       "${mydir}/standalone.sh"
+        "--cloud_cfg    ${mydir}/config/cloud-vps.cfg"
+        "--template     ${mydir}/templates/img-juju.tmpl"
+        "--suite        ${suite}"
+        "--arch         ${arch}"
+        "--use_img      ${builder_img}"
+        "--final_img    ${built_img}"
+        "--resize_final 40"
+        )
+
+    [ ! -e "${builder_img}" ] && cmd+=("--fetch_new")
+
+    [ -e "${results_d_arch}/${suite}-server-cloudimg-${arch}-juju-vagrant-disk1.img" ] ||
+        ( cd ${results_d_arch} && ${cmd[@]} )
+
+    # The following Vagrant-ifies the build
+    SUITE=${suite} \
+    	ARCH_TYPE=${arch} \
+        SERIAL="current" \
+        SRV_D="${mydir}/${suite}-${arch}" \
+        WORKSPACE="${mydir}/${suite}-${arch}" \
+    	${mydir}/jenkins/CloudImages_Juju.sh
+
+    expected_box="${results_d_arch}/${suite}-server-cloudimg-${arch}-juju-vagrant-disk1.box"
+    [ -f "${expected_box}" ] || fail "unable to find ${expected_box}; build failed!"
+    results_out+=("${build} ${expected_box}")
+done
+
+# Clear the traps
+trap - EXIT
+trap - SIGINT
+trap
+
+debug "Results are in following locations"
+echo -e "${results_out[@]}"
+
+debug "Done with the build!"
+clean 0

=== added file 'builder_config.sh'
--- builder_config.sh	1970-01-01 00:00:00 +0000
+++ builder_config.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,73 @@
+#!/bin/bash
+short_opts="h"
+long_opts="distro:,arch:,build-type:,bzr-automated-builds:,bzr-pubscripts:,bzr-livebuild:,bzr-vmbuilder:,out:,template:,serial:"
+getopt_out=$(getopt --name "${0##*/}" \
+    --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+    eval set -- "${getopt_out}" || { echo "BAD INVOCATION!"; usage; exit 1; }
+
+usage() {
+    cat <<EOM
+${0##/} - Populated values in build temple.
+
+    Required:
+    --distro        Distro code name, i.e. precise
+    --arch          Arch, i.e. amd64, i386, armel, armhf
+    --template      Template file
+    --serial        The build serial
+    --out           The output file
+
+    Optional:
+    --bzr-automated-builds  bzr branch for automated ec2 builds
+    --bzr-pubscripts        bzr branch of EC2 Publishing Scripts
+    --bzr-livebuild         bzr branch of live-builder
+    --bzr-vmbuilder         bzr branch of vmbuilder
+EOM
+}
+
+
+fail() { echo "${@}" 2>&1; exit 1;}
+
+serial=$(date +%Y%m%d)
+bzr_automated_builds="http://bazaar.launchpad.net/~ubuntu-on-ec2/vmbuilder/automated-ec2-builds";
+bzr_pubscripts="http://bazaar.launchpad.net/~ubuntu-on-ec2/ubuntu-on-ec2/ec2-publishing-scripts";
+bzr_livebuild="http://bazaar.launchpad.net/~ubuntu-on-ec2/live-build/cloud-images";
+bzr_vmbuilder="http://bazaar.launchpad.net/~ubuntu-on-ec2/vmbuilder/0.11a";
+template_f="${PWD}/img-build.tmpl"
+
+while [ $# -ne 0 ]; do
+  cur=${1}; next=${2};
+  case "$cur" in
+    --distro)                   distro=$2;  shift;;
+    --arch)                     arch=$2;    shift;;
+    --build-type)               build_type=$2;  shift;;
+    --bzr-automated-builds)     bzr_automated_builds=$2; shift;;
+    --bzr-pubscripts)           bzr_pubscripts=$2; shift;;
+    --bzr-livebuild)            bzr_livebuild=$2; shift;;
+    --bzr-vmbuilder)            bzr_vmbuilder=$2; shift;;
+    --template)                 template_f=$2; shift;;
+    --serial)                   serial=$2; shift;;
+    --out)                      out_f=$2; shift;;
+    --) shift; break;;
+  esac
+  shift;
+done
+
+fail_usage() { fail "Must define $@"; }
+
+[ -z "${distro}" ] && fail_usage "--distro"
+[ -z "${arch}" ] && fail_usage "--arch"
+[ -z "${build_type}" ] && fail_usage "--build-type"
+[ -z "${out_f}" ] && fail_usage "--out"
+
+sed -e "s,%d,${distro},g" \
+    -e "s,%a,${arch},g" \
+    -e "s,%b,${build_type},g" \
+    -e "s,%A,${bzr_automated_builds},g" \
+    -e "s,%P,${bzr_pubscripts},g" \
+    -e "s,%L,${bzr_livebuild},g" \
+    -e "s,%V,${bzr_vmbuilder},g" \
+    -e "s,%S,${serial},g" \
+    ${template_f} > ${out_f} ||
+        fail "Unable to write template file"
+
+exit 0

=== added directory 'config'
=== added file 'config/cloud-azure.cfg'
--- config/cloud-azure.cfg	1970-01-01 00:00:00 +0000
+++ config/cloud-azure.cfg	2014-07-28 14:45:55 +0000
@@ -0,0 +1,8 @@
+#cloud-config
+password: ubuntu
+chpasswd: { expire: False }
+ssh_pwauth: True
+packages:
+- pastebinit
+- zerofree
+- ubuntu-dev-tools

=== added file 'config/cloud-maas.cfg'
--- config/cloud-maas.cfg	1970-01-01 00:00:00 +0000
+++ config/cloud-maas.cfg	2014-07-28 14:45:55 +0000
@@ -0,0 +1,10 @@
+#cloud-config
+password: ubuntu
+packages:
+- bzr
+- kpartx
+- qemu-kvm
+- qemu-kvm-extras
+- qemu-kvm-extras-static
+- zerofree
+

=== added file 'config/cloud-maasv2.cfg'
--- config/cloud-maasv2.cfg	1970-01-01 00:00:00 +0000
+++ config/cloud-maasv2.cfg	2014-07-28 14:45:55 +0000
@@ -0,0 +1,9 @@
+#cloud-config
+#This is generic enough to build for both MAAS and general cloud images
+password: ubuntu
+packages:
+- bzr
+- qemu-utils
+- zerofree
+- gdisk
+- proot

=== added file 'config/cloud-precise.cfg'
--- config/cloud-precise.cfg	1970-01-01 00:00:00 +0000
+++ config/cloud-precise.cfg	2014-07-28 14:45:55 +0000
@@ -0,0 +1,20 @@
+#cloud-config
+password: ubuntu
+chpasswd: { expire: False }
+ssh_pwauth: True
+ssh_import_id: [utlemming, smoser, rcj]
+packages:
+- bzr
+- debootstrap
+- python-vm-builder
+- pastebinit
+- kpartx
+- qemu-kvm
+- qemu-kvm-extras
+- qemu-kvm-extras-static
+- debhelper
+- virtualbox
+- u-boot-tools
+- zerofree
+- gdisk
+- ubuntu-dev-tools

=== added file 'config/cloud-trusty-pp64el.cfg'
--- config/cloud-trusty-pp64el.cfg	1970-01-01 00:00:00 +0000
+++ config/cloud-trusty-pp64el.cfg	2014-07-28 14:45:55 +0000
@@ -0,0 +1,9 @@
+#cloud-config
+packages:
+- bzr
+- debootstrap
+- kpartx
+- debhelper
+- zerofree
+- gdisk
+- qemu-utils

=== added file 'config/cloud-trusty.cfg'
--- config/cloud-trusty.cfg	1970-01-01 00:00:00 +0000
+++ config/cloud-trusty.cfg	2014-07-28 14:45:55 +0000
@@ -0,0 +1,21 @@
+#cloud-config
+#This is generic enough to build for both MAAS and general cloud images
+password: ubuntu
+chpasswd: { expire: False }
+ssh_pwauth: True
+ssh_import_id: [utlemming, smoser, rcj]
+apt_sources:
+- source: deb $MIRROR $RELEASE multiverse
+packages:
+- bzr
+- debootstrap
+- kpartx
+- qemu-kvm
+- qemu-user-static
+- debhelper
+- virtualbox
+- zerofree
+- gdisk
+- proot
+- u-boot-tools
+- ubuntu-dev-tools

=== added file 'config/cloud-vps.cfg'
--- config/cloud-vps.cfg	1970-01-01 00:00:00 +0000
+++ config/cloud-vps.cfg	2014-07-28 14:45:55 +0000
@@ -0,0 +1,6 @@
+#cloud-config
+packages:
+- pastebinit
+- zerofree
+- btrfs-tools
+- ubuntu-dev-tools

=== added file 'config/cloud.cfg'
--- config/cloud.cfg	1970-01-01 00:00:00 +0000
+++ config/cloud.cfg	2014-07-28 14:45:55 +0000
@@ -0,0 +1,11 @@
+#cloud-config
+# Generic cloud-config for builder instance
+password: ubuntu
+chpasswd: { expire: False }
+ssh_pwauth: True
+apt_sources:
+- source: deb $MIRROR $RELEASE multiverse
+packages:
+- bzr
+- zerofree
+- gdisk

=== added file 'copy_to_final.sh'
--- copy_to_final.sh	1970-01-01 00:00:00 +0000
+++ copy_to_final.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# copies the files to their staging location
+
+DISTRO="${1}"
+WORKSPACE="${2}"
+SERIAL="${3}"
+BTYPE="${4:-server}"
+TEST_BUILD="${5:-0}"
+SANDBOX_BUILD="${6:-0}"
+
+ROOT_D="${ROOT_D:-/srv/ec2-images}"
+base_d="${ROOT_D}/${DISTRO}/${SERIAL}"
+[ "${TEST_BUILD}" -eq 1 ] && base_d="${ROOT_D}/test_builds/${DISTRO}/${SERIAL}"
+[ "${SANDBOX_BUILD}" -eq 1 ] && base_d="${ROOT_D}/sandbox/${DISTRO}/${SERIAL}"
+[ "${BTYPE}" = "desktop" ] && base_d="${ROOT_D}/desktop/${DISTRO}/${SERIAL}"
+
+for roottar in $(find . -iname "*root.tar.gz"); do
+    echo "Generating file listing"
+
+    case ${roottar} in
+        *amd64*)    arch_name="amd64";;
+        *i386*)     arch_name="i386";;
+        *armel*)    arch_name="armel";;
+        *armhf*)    arch_name="armhf";;
+        *)          arch_name="unknown-$(date +%s)"
+    esac
+
+    tar -tzvf ${roottar} >> "${WORKSPACE}/file-list-${arch_name}.log" ||
+        echo "Non fatal error. Failed to gather file list for ${roottar}"
+done
+
+cp -au ${DISTRO}-*/* ${base_d} || exit 1
+exit 0

=== added file 'ec2_publisher.sh'
--- ec2_publisher.sh	1970-01-01 00:00:00 +0000
+++ ec2_publisher.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+# Simple execution wrapper for publishing images to EC2 from within Jenkins
+#
+suite="${1}"
+serial="${2}"
+btype="${3}"
+work_d="${4}"
+test_build="${5:-0}"
+sandbox_build="${6:-0}"
+allow_existing="${7:-0}"
+pub_type="daily"
+
+umask 022
+ec2_pub_scripts="${EC2_PUB_LOC:-${PWD}/ec2-publishing-scripts}"
+cronrun="/srv/builder/vmbuilder/bin/cronrun"
+
+# Override and set some home variables
+export HOME="/srv/builder/vmbuilder"
+EC2_DAILY=${HOME}/ec2-daily
+CDIMAGE_BIN="${HOME}/cdimage/bin"
+AUTO_BUILDS=${EC2_DAILY}/automated-ec2-builds
+PUBLISH_SCRIPTS=${HOME}/ec2-publishing-scripts
+XC2_PATH="${EC2_DAILY}/xc2"
+S3CMD_PATH="${EC2_DAILY}/s3cmd-0.9.9.91"
+MISC_PATH="${EC2_DAILY}/misc"
+VMBUILDER_PATH="${EC2_DAILY}/vmbuilder"
+( which euca-version >> /dev/null >&1 ) || EUCA2OOLS_PATH="${EC2_DAILY}/euca2ools"
+BOTO_PATH="${EC2_DAILY}/boto-2.1.1"
+
+export EC2_AMITOOL_HOME="${EC2_DAILY}/ec2-ami-tools"
+export LIVE_BUILD_PATH="${EC2_DAILY}/live-build"
+MYPATH=${VMBUILDER_PATH}:${XC2_PATH}:${S3CMD_PATH}:${PUBLISH_SCRIPTS}:${AUTO_BUILDS}:${VMBUILDER_PATH}:${EC2_AMITOOL_HOME}/bin:$HOME/bin:${CDIMAGE_BIN}
+
+[ -n "${EUCA2OOLS_PATH}" ] && MYPATH="${MYPATH}:${EUCA2OOLS_PATH}/bin"
+
+export PYTHONPATH="${BOTO_PATH}:${EUCA2OOLS_PATH}"
+export PATH=${MYPATH}:/usr/bin:/usr/sbin:/usr/bin:/sbin:/bin
+export JAVA_HOME=/usr
+export START_D=${EC2_DAILY}
+export PUBLISH_BASE=/srv/ec2-images
+export XC2_RETRY_ON="Server.InternalError Read.timeout Server.Unavailable Unable.to.connect"
+
+export PATH="/srv/builder/vmbuilder/cdimage/bin:${ec2_pub_scripts}:${PATH}"
+
+fail() { echo "${@}" 2>&1; exit 1;}
+
+[ -e "${ec2_pub_scripts}" ] ||
+    fail "Please make sure that ec2-publishing-scripts in the current path or define EC2_PUB_LOC"
+
+[ "$#" -eq 4 -o "$#" -eq 5 -o "$#" -eq 6 -o "$#" -eq 7 ] ||
+    fail "Incorrect number of parameters. Must invoke with: <suite> <serial> <build type> <directory>"
+
+[ "${test_build}" -eq 1 ] && {
+    echo "Build has been marked as a test build!";
+    echo "Publishing image to sandbox location";
+    pub_type="testing";
+}
+
+[ "${sandbox_build}" -eq 1 ] && {
+    echo "Build has been marked as a sandbox build!";
+    echo "Publishing image to Sandbox location";
+    pub_type="sandbox";
+}
+
+echo "Checksumming result directories"
+checksum-directory "${work_d}" &&
+    checksum-directory "${work_d}/unpacked" ||
+    fail "Failed to checksum result directories"
+
+echo "Publishing to EC2"
+pub_args=(--verbose)
+[ "${allow_existing}" -eq 1 ] && pub_args+=(--allow-existing)
+${cronrun} publish-build \
+    "${pub_args[@]}" \
+    "${suite}" \
+    "${btype}" \
+    "${pub_type}" \
+    "${work_d}" ||
+    fail "failed publish-build ${suite} ${btype} daily ${work_d}"
+
+# Update current
+base_d="${work_d%/*}"
+serial_d="${work_d##*/}"
+current_d="${base_d}/current"
+[ -e "${current_d}" ] && rm "${current_d}"
+( cd "${base_d}" && ln -s "${serial_d}" current ) ||
+    fail "failed to update current directory"
+
+exit 0
+

=== added directory 'functions'
=== added file 'functions/common'
--- functions/common	1970-01-01 00:00:00 +0000
+++ functions/common	2014-07-28 14:45:55 +0000
@@ -0,0 +1,15 @@
+# Common functions
+error() { echo "$@" 1>&2; }
+fail() { error "$@"; exit 1; }
+debug() { echo "$(date -R): $@" 1>&2; }
+
+# Look for common names
+[ -z "${kvm}" -a -n "${kvm_builder}" ] && kvm="${kvm_builder}"
+[ -z "${kvm_builder}" -a -n "${kvm}" ] && kvm_builder="${kvm}"
+
+[ -n "${kvm}" ] && scripts="${kvm}"
+[ -n "${kvm_builder}" ] && scripts="${kvm_builder}"
+
+export kvm="${scripts}"
+export kvm_builder="${scripts}"
+export scripts

=== added file 'functions/locker'
--- functions/locker	1970-01-01 00:00:00 +0000
+++ functions/locker	2014-07-28 14:45:55 +0000
@@ -0,0 +1,49 @@
+# This prevents concurrent commands from running.
+_script=$(readlink -f "${BASH_SOURCE[0]:?}")
+_my_dir=$(dirname "$_script")
+source "${_my_dir}/common"
+source "${_my_dir}/retry"
+
+cmd_lock() {
+        LOCKFILE="/tmp/wrapper-`basename $1`"
+        LOCKFD=99
+
+        _lock()             { flock -$1 $LOCKFD; }
+        _no_more_locking()  { _lock u; _lock xn && rm -f $LOCKFILE; }
+        _prepare_locking()  { eval "exec $LOCKFD>\"$LOCKFILE\""; trap _no_more_locking EXIT; }
+
+        _prepare_locking
+
+        exlock_now()        { _lock xn; }  # obtain an exclusive lock immediately or fail
+        exlock()            { _lock x; }   # obtain an exclusive lock
+        shlock()            { _lock s; }   # obtain a shared lock
+        unlock()            { _lock u; }   # drop a lock
+
+        count=0
+        max_count=60
+
+        while (! exlock_now );
+        do
+            let wait_time=$RANDOM%30
+            error "Waiting ${wait_time} seconds due to concurrent ${1} command"
+            sleep ${wait_time}
+
+            count=$(expr ${count} + 1)
+
+            if [ ${count} -gt ${max_count} ]; then
+                echo "Max wait expired. Failing."
+                exit 1
+            fi
+        done
+
+        error "Executing command, lock is free for: ${@}"
+        "${@}"
+        unlock
+}
+
+_vbox_cmd() {
+    # Virtual box is a real pain. This function uses the locker function above to
+    # wrap up vboxmanage to prevent its stupid issues with concurrency. 
+    cmd_lock vboxmanage ${@} ||
+        fail "Failed to execute locked command: vboxmange ${@}"
+}

=== added file 'functions/retry'
--- functions/retry	1970-01-01 00:00:00 +0000
+++ functions/retry	2014-07-28 14:45:55 +0000
@@ -0,0 +1,16 @@
+# Code for retrying commands
+
+retry() {
+    local trycount=${1} sleep=${2}
+    shift; shift;
+    local i=0 smsg=" sleeping ${sleep}: $*" ret=0
+    for((i=0;i<${trycount};i++)); do
+        "$@" && return 0
+        ret=$?
+        [ $(($i+1)) -eq ${trycount} ] && smsg=""
+        debug 1 "Warning: cmd failed [try $(($i+1))/${trycount}].${smsg}"
+        sleep $sleep
+    done
+    return $ret
+}
+

=== added file 'get_serial.sh'
--- get_serial.sh	1970-01-01 00:00:00 +0000
+++ get_serial.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,115 @@
+#!/bin/bash
+#
+# Determine the build serial and place files into the build serial location
+# Also, handle the unlikely race condition in case multiple builders arrive
+# At the same point.
+#!/bin/bash
+# copies the files to their staging location
+# Prevent race conditions for populating the aggregate build directory
+
+# OUTPUT:
+# - serial.txt file in ${WORKSPACE}
+# - build_properties (OR ${BUILD_PROPERTIES}) file in ${PWD}
+# - build-info.txt in ${base_d}/unpacked (or ${base_nd}/unpacked})
+# NOTE: see code for how base_d and base_nd are computed
+
+DISTRO="${1}"
+WORKSPACE="${2}"
+BUILD_ID="${3}"
+BTYPE="${4:-server}"
+TEST_BUILD="${5:-0}"
+SANDBOX_BUILD="${6:-0}"
+ALLOW_EXISTING="${7:-1}"
+PUBLISH_IMAGE="${8:-0}"
+
+ROOT_D="${ROOT_D:-/srv/ec2-images}"
+base_d="${ROOT_D}/${DISTRO}"
+[ "${TEST_BUILD}" -eq 1 ] && base_d="${ROOT_D}/test_builds/${DISTRO}"
+[ "${SANDBOX_BUILD}" -eq 1 ] && base_d="${ROOT_D}/sandbox/${DISTRO}" && TEST_BUILD=0
+[ "${BTYPE}" = "desktop" ] && base_d="${ROOT_D}/desktop/${DISTRO}"
+
+let wait_time=$RANDOM%50
+sleep $wait_time # Make build collisions a bit harder
+
+make_meta() {
+   # Write the property file for publishing. This used
+   # to write trigger the EC2 publishing job
+   cat << EOF > ${BUILD_PROPERTIES:-build_properties}
+BUILD_TYPE=${BTYPE}
+SERIAL=${1##*/}
+SUITE=${DISTRO}
+TEST_BUILD=${TEST_BUILD}
+SANDBOX_BUILD=${SANDBOX_BUILD}
+PUBLISH_IMAGE=${PUBLISH_IMAGE}
+ALLOW_EXISTING=${ALLOW_EXISTING}
+EOF
+
+   # Write the build-info.txt file. This is used in
+   # the publishing process
+   [ -d "${1}/unpacked" ] || mkdir -p "${1}/unpacked"
+   cat << EOF > "${1}/unpacked/build-info.txt"
+serial=${1##*/}
+orig_prefix=${DISTRO}-${BTYPE}-cloudimg
+suite=${DISTRO}
+build_name=${BTYPE}
+EOF
+
+   exit 0
+}
+
+$(stat /tmp/${DISTRO}-${BUILD_ID} > /dev/null 2>&1) && {
+   echo "Another builder is/has reserved this part of the build. Deferring..."
+   while [ -z "${destdir}" ]
+   do
+      sleep 5
+      finaldir=""
+
+      [ -e "${WORKSPACE}/serial.txt" ] && {
+         read serial < "${WORKSPACE}/serial.txt"
+         destdir="${base_d}/${serial}"
+      }
+
+      while read destdir
+      do
+         echo "Candidate serial found: ${destdir##*/}"
+         finaldir="${destdir}"
+      done < /tmp/${DISTRO}-${BUILD_ID}
+
+      [ -n "${finaldir}" ] && {
+         echo "Aggregation directory reported as ${finaldir}"
+         echo "${finaldir##*/}" > "${WORKSPACE}/serial.txt"
+         exit 0 
+      } || {
+         echo "destdir is not defined!" && exit 10
+      }
+
+   done
+}
+
+# if we get here, then know that the build dir hasn't been created yet
+touch /tmp/${DISTRO}-$BUILD_ID
+base_d="${base_d}/$(date +%Y%m%d)"
+
+make_and_write() {
+   echo "Creating aggregation directory ${1}"
+   echo "${1##*/}" > "${WORKSPACE}/serial.txt"
+   mkdir -p "${1}" &&
+      echo "${1}" >> /tmp/${DISTRO}-$BUILD_ID ||
+      exit 10
+
+   # Copy stuff to where it should go
+   make_meta "${1}"
+}
+
+if [ ! -d "${base_d}" ]; then
+   make_and_write "${base_d}"
+else
+   for bs in {1..30}
+   do
+     base_nd="${base_d}.${bs}"
+     echo "Checking on directory ${base_nd}"
+     [ ! -d "${base_nd}" ] && make_and_write "${base_nd}"
+   done
+fi
+
+exit 0

=== added directory 'jenkins'
=== added file 'jenkins/CloudImage_CustomBuilder.sh'
--- jenkins/CloudImage_CustomBuilder.sh	1970-01-01 00:00:00 +0000
+++ jenkins/CloudImage_CustomBuilder.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+echo "_____ENVIRONMENT_________________________"
+env
+echo "_____END ENVIRONMENT_____________________"
+
+build_config="${PWD}/${DISTRO}-build.sh"
+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
+
+# Read in the common functions
+my_dir="$( cd "$( dirname "$0" )" && pwd )"
+base_dir=$(dirname ${my_dir})
+source "${base_dir}/functions/locker"
+source "${base_dir}/functions/common"
+source "${base_dir}/functions/retry"
+source ${my_dir}/build_lib.sh
+select_build_config
+
+# Get the serial number
+"${kvm_scripts}/get_serial.sh" "${DISTRO}" "${WORKSPACE}" "${BUILD_ID}" \
+         "${BTYPE}" "${TEST_BUILD}" "${SANDBOX_BUILD}" "${PUBLISH_IMAGE}"
+
+read SERIAL < serial.txt
+[ -z ${SERIAL} ] && echo "NO SERIAL" && exit 10
+
+# Create the configurations
+"${kvm_scripts}/builder_config.sh" \
+     --distro "${DISTRO}" \
+     --build-type "${BTYPE}" \
+     --arch "${ARCH_TYPE}" \
+     --template kvm/templates/img-build.tmpl \
+     --serial "${SERIAL}" \
+     --bzr-automated-builds "${BZR_AUTOMATED_EC2_BUILDS}" \
+     --bzr-pubscripts "${BZR_EC2_PUBSCRIPTS}" \
+     --bzr-livebuild "${BZR_LIVEBUILD}" \
+     --bzr-vmbuilder "${BZR_VMBUILDER}" \
+     --out "${build_config}" ||
+        fail "Failed to configure instance configuration"
+
+# Exit after configuring for arm if so configured
+dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
+if [[ "${ARCH_TYPE}" =~ (arm|arm64|aarch64) ]]; then
+   echo "This is an ARM build. ARM rules will apply"
+   [ "${BUILD_ARM}" -eq 0 ] && exit 0
+   [ "${ARCH_TYPE}" == "arm64" ] && { dist_ge "${DISTRO}" trusty || exit 0; }
+fi
+
+# Launch the builder
+"${kvm_scripts}/launch_kvm.sh" \
+     --id "${BUILD_ID}" \
+     --user-data "${build_config}" \
+     --cloud-config kvm/config/${cloud_init_cfg} \
+     --img-url "${BUILDER_CLOUD_IMAGE}" \
+     --raw-disk "${WORKSPACE}/${DISTRO}.raw" \
+     --raw-size 20 ||
+        fail "KVM instance failed"
+
+tar -xvvf "${WORKSPACE}/${DISTRO}.raw" ||
+    fail "Result tar failed to unpack"
+
+rm "${WORKSPACE}/${DISTRO}.raw"
+
+"${kvm_scripts}/copy_to_final.sh" \
+     "${DISTRO}" "${WORKSPACE}" "${SERIAL}" "${BTYPE}" "${TEST_BUILD}" "${SANDBOX_BUILD}" ||
+    fail "Failed to place final files to destination"
+
+find . -iname "build_properties" | xargs -I FILE cp FILE /var/lib/jenkins/jobs/CloudImage_CustomBuilder/workspace/build_properties ||
+    fail "Unable to find build_properties file. Aborting"
+
+echo "PUBLISH_IMAGE=${PUBLISH_IMAGE}" >> /var/lib/jenkins/jobs/CloudImage_CustomBuilder/workspace/build_properties
+
+cat /var/lib/jenkins/jobs/CloudImage_CustomBuilder/workspace/build_properties ||
+    fail "Build properties file was not found."

=== added file 'jenkins/CloudImages_Azure.sh'
--- jenkins/CloudImages_Azure.sh	1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Azure.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,162 @@
+#!/bin/bash
+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
+
+umask 022
+# Pre-setup: Read the build properties from the previous build
+# and discard what we don't want
+source build_properties
+
+# Load up some libraries
+my_dir="$( cd "$( dirname "$0" )" && pwd )"
+base_dir=$(dirname ${my_dir})
+source "${base_dir}/functions/locker"
+source "${base_dir}/functions/common"
+source "${base_dir}/functions/retry"
+
+debug() { echo "${@}"; }
+fail() { echo "${@}" 2>&1; exit 1;}
+
+
+# Shuffle stuff around"
+[ -e build_properties ] && mv build_properties parent_build_properties
+[ -e build.log ] && mv build.log parent_build.log
+[ -e "${SUITE}-build.sh" ] && rm "${SUITE}-build.sh"
+
+echo "-------------------"
+echo " Distro: ${SUITE}"
+echo " Serial: ${SERIAL}"
+echo " Type:   ${BUILD_TYPE}"
+echo "-------------------"
+
+set -x
+# Variables
+disk_name="${SUITE}-server-cloudimg-amd64-disk1.img"
+raw_name="${PWD}/${SUITE}-server-cloudimg-amd64-disk1.raw"
+vhd_name="${PWD}/${SUITE}-server-cloudimg-amd64-disk1.vhd"
+disk_root="/srv/ec2-images/${SUITE}/${SERIAL}"
+raw_disk="${PWD}/results.raw"
+launch_config="${PWD}/launch_config.sh"
+register_config="${PWD}/register_config.sh"
+pkg_tar="${PWD}/pkg.tar"
+pkg_tar_d="${kvm_builder}/azure_pkgs"
+proposed="${PROPOSED:-false}"
+vhd_size=30
+
+# Covert image to a RAW disk to work with. The raw image is used
+# to populate the daily VHD in Azure
+debug "Converting QCow2 to Raw Disk"
+qemu-img \
+    convert -O raw \
+    "${disk_root}/${disk_name}" \
+    "${raw_name}" &&
+        debug "Converted QCow2 to Raw disk for manipulation" ||
+        fail  "Failed to convert QCow2 to Raw disk"
+
+config_opts=()
+config_opts+=(
+    --version $(${kvm_builder}/ubuntu-adj2version ${SUITE})
+    --serial "${SERIAL}"
+    --out "${launch_config}"
+    )
+
+# Turns on building from proposed
+[ "${proposed}" == "true" ] &&
+    config_opts+=(--proposed)
+
+# Setup the configuration
+${kvm_builder}/azure_config.sh \
+    ${config_opts[@]} ||
+        fail "Failed to configure instance runtime"
+
+# Full disk populate for 12.04
+root_size=2
+if [ "${SUITE}" == "precise" ]; then
+    root_size=29
+    truncate -s 29G "${raw_name}.pre-vhd" &&
+        debug "Resized 12.04 image to full size" ||
+        fail  "Failed to resize 12.04 to full size"
+fi
+
+# Launch KVM to do the work
+${kvm_builder}/launch_kvm.sh \
+    --id ${BUILD_ID} \
+    --user-data "${launch_config}" \
+    --cloud-config "${kvm_builder}/config/cloud-azure.cfg" \
+    --extra-disk "${raw_name}" \
+    --raw-disk "${WORKSPACE}/${SUITE}-output.raw" \
+    --raw-size ${root_size} \
+    --img-url /srv/builder/images/precise-builder-latest.img ||
+        fail "KVM instance failed to build image."
+
+rm "${WORKSPACE}/${SUITE}-output.raw"
+
+
+# Copy the raw image to make it ready for VHD production
+cp  --sparse=always "${raw_name}" "${raw_name}.pre-vhd" &&
+    debug "Copied raw image VHD production" ||
+    fail "Failed to copy raw image to ${raw_name}.pre-vhd"
+
+# Resize the copied RAW image
+debug "Truncating image to ${vhd_size}G"
+truncate -s "${vhd_size}G" "${raw_name}.pre-vhd" &&
+    debug "Truncated image at ${vhd_size}G" ||
+    fail  "Failed to truncate disk image"
+
+# Convert to VHD first, step 1 of cheap hack
+# This is a cheap hack...half the time the next command
+# will fail with "VERR_INVALID_PARAMETER", so this is the,
+# er, workaround
+debug "Converting to VHD"
+_vbox_cmd convertfromraw --format VHD \
+    "${raw_name}.pre-vhd" \
+    "${vhd_name}.pre" &&
+    debug "Converted raw disk to VHD" ||
+    fail "Failed to convert raw image to VHD"
+
+# Clone the disk to fixed, VHD for Azure
+debug "Converting to VHD format from raw..."
+debug ".....this might take a while...."
+_vbox_cmd clonehd --format VHD --variant Fixed \
+    "${vhd_name}.pre" \
+    "${vhd_name}" &&
+    debug "Converted raw disk to VHD format using VirtualBox" ||
+    fail  "Failed to convert raw image to VHD disk!"
+
+# Remove the unneeded files
+rm "${vhd_name}.pre" "${raw_name}.pre-vhd"
+
+debug "Image Characteristics:"
+_vbox_cmd showhdinfo "${vhd_name}"
+
+
+debug "Raw image converted to VHD"
+
+# Archive the bzip2 file
+debug "Archiving the VHD image"
+pbzip2 -f "${vhd_name}" &&
+    debug "Created archive of the VHD image" ||
+    fail "Failed to compress image"
+
+exit 0
+
+# Log it
+cat << EOF > "${WORKSPACE}/register.txt"
+SUITE=${SUITE}
+SERIAL=${SERIAL}
+REGISTRATION=${upload_name}
+URL=http://ubuntupublishing.blob.core.windows.net/daily-vhd/${upload_name}
+BUILD_ID=${BUILD_ID}
+EOF
+
+
+[ "${proposed}" = "true" ] &&
+    stuff_f="${stuff_f//disk1/PPA-disk1}"
+
+#mv "${vhd_name}.bz2" "${stuff_f}" ||
+#    fail "Failed to move final image to final place"
+
+chown jenkins:serverteam-jenkins "${disk_root}/azure" ||
+    fail "Failed to assign ownership final image"
+
+chmod 0644 "${stuff_f}" ||
+    fail "Failed to assign permissions to final image"

=== added file 'jenkins/CloudImages_Base.sh'
--- jenkins/CloudImages_Base.sh	1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Base.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+build_config="${PWD}/${DISTRO}-build.sh"
+
+# Read in the common functions
+my_dir="$( cd "$( dirname "$0" )" && pwd )"
+base_dir=$(dirname ${my_dir})
+source "${base_dir}/functions/locker"
+source "${base_dir}/functions/common"
+source "${base_dir}/functions/retry"
+source ${my_dir}/build_lib.sh
+select_build_config
+
+# Only block for serial if serial is unknown
+[ -z "${SERIAL}" ] && {
+    # Get the serial number
+    retry 3 10 \
+        "${kvm_scripts}/get_serial.sh" \
+             "${DISTRO}" "${WORKSPACE}" "${BUILD_ID}" "${BTYPE}" 0 0 1 1 ||
+             fail "Failed to get serial for this build"
+
+    # Get the serial number
+    read SERIAL < serial.txt
+    [ -z ${SERIAL} ] && echo "NO SERIAL" && exit 10
+}
+
+# Create the configurations
+"${kvm_scripts}/builder_config.sh" \
+     --distro "${DISTRO}" \
+     --build-type "${BTYPE}" \
+     --arch "${ARCH_TYPE}" \
+     --template ${base_dir}/templates/img-build.tmpl \
+     --serial "${SERIAL}" \
+     --out "${build_config}" ||
+        fail "Failed to configure instance configuration"
+
+# Exit after configuring for arm if so configured
+if [[ "${ARCH_TYPE}" =~ (arm|aarch64|arm64) ]]; then
+   echo "This is an ARM build. ARM rules will apply"
+   [ "${BUILD_ARM}" -eq 0 ] && exit 0
+fi
+
+# Launch the builder
+# Retry building the image twice, waiting five
+# minutes. This should buffer most failures caused
+# by bad mirrors.
+retry 2 300 \
+    "${kvm_scripts}/launch_kvm.sh" \
+         --id "${BUILD_ID}" \
+         --user-data "${build_config}" \
+         --cloud-config "${base_dir}/config/${cloud_init_cfg}" \
+         --img-url "${BUILDER_CLOUD_IMAGE}" \
+         --raw-disk "${WORKSPACE}/${DISTRO}.raw" \
+         --raw-size 20 ||
+         fail "KVM instance failed"
+
+tar -xvvf "${WORKSPACE}/${DISTRO}.raw" ||
+    fail "Result tar failed to unpack"
+
+rm "${WORKSPACE}/${DISTRO}.raw" ||
+    fail "Failed to remove unnecessary file"
+
+# Put the bits in place
+"${kvm_scripts}/copy_to_final.sh" \
+     "${DISTRO}" "${WORKSPACE}" "${SERIAL}" "${BTYPE}" "${TEST_BUILD}" "${SANDBOX_BUILD}" ||
+    fail "Failed to place final files to destination"
+
+# Copy the build properties into the workspace
+cp ${BUILD_PROPERTIES} .
+echo "ARCH=${ARCH_TYPE}" >> build_properties

=== added file 'jenkins/CloudImages_Base_Release_Delta.sh'
--- jenkins/CloudImages_Base_Release_Delta.sh	1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Base_Release_Delta.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,207 @@
+#!/bin/bash
+
+# Write the build properties file
+cat << EOF > "${WORKSPACE}/build_properties"
+SUITE=${SUITE}
+STREAM=${STREAM}
+SERIAL=${SERIAL}
+BUILD_TYPE=${BUILD_TYPE}
+
+EOF
+
+# Write the environmental variables to the run file
+env > ${SUITE}.run
+
+fail() { echo "$@"; exit 1;}
+dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
+
+arches=(i386 amd64 armel armhf)
+exec_c="/srv/builder/vmbuilder/bin/cronrun"
+rel_base="/srv/ec2-images/releases/${SUITE}/release"
+rel_link=$(readlink ${rel_base})
+
+[ "${BUILD_TYPE}" = "desktop" ] &&
+    echo "Not valid for desktop builds" &&
+    exit 0
+
+# Find the existing manifest file
+old_manifest=$(find -L ${rel_base} -maxdepth 1 -iname '*amd64.manifest') ||
+    echo "Unable to find release manifest file"
+
+# Find the new manifest file
+new_manifest_d="/srv/ec2-images/${SUITE}/${SERIAL}"
+[ "${TEST_BUILD:-0}" -eq 1 ] && new_manifest_d="/srv/ec2-images/test_builds/${SUITE}/${SERIAL}"
+[ "${SANDBOX_BUILD:-0}" -eq 1 ] && new_manifest_d="/srv/ec2-images/sandbox/${SUITE}/${SERIAL}"
+new_manifest=$(find ${new_manifest_d} -maxdepth 1 -iname '*amd64.manifest') ||
+    fail "Unable to find new manifest file"
+
+# Find the previous serial if there was one
+previous_serial=$(find  /srv/ec2-images/${SUITE}/ -maxdepth 1 -type d |\
+    awk -F\/ '{print$NF}' | sort -rn | grep "." | grep -v "${SERIAL}" | head -n1) ||
+    echo "Unable to find prior daily manifest"
+
+previous_manifest=${new_manifest//$SERIAL/$previous_serial}
+
+# Generate the pure package diffs
+for arch in "${arches[@]}"
+do
+    nm=${new_manifest//amd64/$arch}
+    om=${old_manifest//amd64/$arch}
+    pm=${previous_manifest/amd64/$arch}
+
+    [ -e "${nm}" ] &&
+        cp "${nm}" "${WORKSPACE}/manifest-${arch}-daily-${SERIAL}.txt"
+
+    # Generate the diff from daily to release
+    if [ -e "${nm}" -a -e "${om}" ]; then
+        release_diff=${new_manifest##*/}
+        release_diff=${release_diff//.manifest/-$rel_link-to-daily_manifest.diff}
+        release_diff=${release_diff//amd64/$arch}
+        diff -u ${om} ${nm} > "${WORKSPACE}/${release_diff}"
+        cp ${om} "${WORKSPACE}/manifest-${arch}-release.txt"
+    fi
+
+    # Generate the diff from daily to old daily
+    if [ -e "${nm}" -a -e "${pm}" ]; then
+        daily_diff=${new_manifest##*/}
+        daily_diff=${daily_diff//.manifest/-$previous_serial-to-$SERIAL-manifest.diff}
+        daily_diff=${daily_diff//amd64/$arch}
+        diff -u ${pm} ${nm} > "${WORKSPACE}/${daily_diff}"
+        cp ${pm} "${WORKSPACE}/manifest-${arch}-previous_daily-${previous_serial}.txt"
+    fi
+done
+
+# Get the kernel differences
+if dist_ge ${SUITE} quantal; then
+    old_linux_kernel="$(awk '/linux-image.*generic/ {print$NF}' ${old_manifest})"
+    new_linux_kernel="$(awk '/linux-image.*generic/ {print$NF}' ${new_manifest})"
+else
+    old_linux_kernel="$(awk '/linux-image-virtual/ {print$NF}' ${old_manifest})"
+    new_linux_kernel="$(awk '/linux-image-virtual/ {print$NF}' ${new_manifest})"
+fi
+
+if [ "${old_linux_kernel}" != "${new_linux_kernel}" ]; then
+    echo "${new_linux_kernel}" > ${new_manifest_d}/unpacked/.trigger-test
+
+    cat << EOF > "${WORKSPACE}/${SUITE}-kernel-trigger"
+OLD: ${old_linux_kernel}
+NEW: ${new_linux_kernel}
+EOF
+
+    cp "${WORKSPACE}/${SUITE}-kernel-trigger" \
+       "${TRIGGER_LOCATION:-/srv/builder/triggers/kernel}/${PARENT_BUILDER_ID}.trigger"
+
+fi
+
+# Get differences in openssl
+old_openssl="$(awk '/^openssl/ {print$NF}' ${old_manifest})"
+new_openssl="$(awk '/^openssl/ {print$NF}' ${new_manifest})"
+
+if [ "${old_openssl}" != "${new_openssl}" ]; then
+    echo "${new_openssl}" >> ${new_manifest_d}/unpacked/.trigger-test
+
+    cat << EOF > "${WORKSPACE}/${SUITE}-openssl-trigger"
+OpenSSL
+OLD: ${old_openssl}
+NEW: ${new_openssl}
+EOF
+
+    cp "${WORKSPACE}/${SUITE}-openssl-trigger" \
+       "${TRIGGER_LOCATION:-/srv/builder/triggers/openssl}/${PARENT_BUILDER_ID}.trigger"
+fi
+
+
+# Copy the diffs into the current workspace
+cp ${WORKSPACE}/*.diff ${new_manifest_d}/unpacked
+
+# Generate the mfdiff between the dailies
+[ -e "${previous_manifest}" -a -e "${new_manifest}" ] &&
+    ${exec_c} mfdiff amd64 ${SUITE} ${previous_manifest} ${new_manifest} >\
+    "${WORKSPACE}/${SUITE}-daily.changelog"
+
+# Generate the diff between daily and the released image
+[ -e "${old_manifest}" -a -e "${new_manifest}" ] &&
+    ${exec_c} mfdiff amd64 ${SUITE} ${old_manifest} ${new_manifest} >\
+    "${WORKSPACE}/${SUITE}-${rel_link}-to-daily.changelog"
+
+# Copy the changelogs into the current workspace
+cp ${WORKSPACE}/*.changelog ${new_manifest_d}/unpacked
+
+# The rest of the operations are for released images only
+[ ! -e "${rel_base}" ] &&
+    echo "No current release, aborting comparison" &&
+    exit 0
+
+# Tar up the deltas
+tar -C ${WORKSPACE} -jcvf "${WORKSPACE}/${SUITE}-${SERIAL}.tar.bz2" \
+    *.changelog \
+    *.txt \
+    *.diff ||
+    fail "Failed to create tarball"
+
+# Start the email report work
+changed_pkgs=$(grep '=>' ${SUITE}-${rel_link}-to-daily.changelog | \
+    sed -e 's,====,,g' -e 's,^, *,g' | sort -k2)
+
+# Generate the email template
+VER=$(${kvm}/ubuntu-adj2version ${SUITE})
+
+case ${VER} in
+    *8.04*)  VER="${VER} LTS";
+             CODENAME="Hardy Heron";;
+    *10.04*) VER="${VER} LTS";
+             CODENAME="Lucid Lynx";;
+    *11.04*) CODENAME="Natty Narwhal";;
+    *11.10*) CODENAME="Oneiric Ocelot";;
+    *12.04*) VER="${VER} LTS";
+             CODENAME="Precise Pangolin";;
+    *12.10*) CODENAME="Quantal Queztal";;
+    *13.04*) CODENAME="Raring Ringtail";;
+    *13.10*) CODENAME="Saucy Salamander";;
+    *14.04*) VER="${VER} LTS";
+             CODENAME="Trusty Tahr";;
+    *14.10*) CODENAME="Utopic Unicorn";;
+esac
+
+email_name="${WORKSPACE}/${SUITE}-release_announcement.email"
+cat << EOF > "${email_name}"
+SUBJECT: Refreshed Cloud Images of ${VER} (${CODENAME}) [${SERIAL}]
+TO: ec2ubuntu@xxxxxxxxxxxxxxxx; ubuntu-cloud@xxxxxxxxxxxxxxxx; ubuntu-cloud-announce@xxxxxxxxxxxxxxxx
+
+A new release of the Ubuntu Cloud Images for stable Ubuntu release ${VER} (${CODENAME}) is available at [1]. These new images superseded the existing images [2]. Images are available for download or immediate use on EC2 via publish AMI ids. Users who wish to update their existing installations can do so with:
+   'sudo apt-get update && sudo apt-get dist-upgrade && sudo reboot'.
+
+EOF
+
+if [ -n "${old_linux_kernel}" -a -n "${new_linux_kernel}" ]; then
+    cat << EOF >> "${email_name}"
+The Linux kernel was updated from ${old_linux_kernel} [3] to ${new_linux_kernel} [4]
+
+EOF
+fi
+
+cat << EOF >> "${email_name}"
+The following packages have been updated. Please see the full changelogs
+for a complete listing of changes:
+${changed_pkgs}
+
+
+The following is a complete changelog for this image.
+$(cat ${SUITE}-${rel_link}-to-daily.changelog)
+
+--
+[1] http://cloud-images.ubuntu.com/releases/${SUITE}/release-${SERIAL}/
+[2] http://cloud-images.ubuntu.com/releases/${SUITE}/${rel_link}/
+EOF
+
+if [ -n "$old_linux_kernel" -a -n "${new_linux_kernel}" ]; then
+cat << EOF >> "${email_name}"
+[3] http://changelogs.ubuntu.com/changelogs/pool/main/l/linux/linux_${old_linux_kernel}/changelog
+[4] http://changelogs.ubuntu.com/changelogs/pool/main/l/linux/linux_${new_linux_kernel}/changelog
+EOF
+fi
+
+# Create release notes
+lnc=$(wc -l ${email_name} | awk '{print$1}')
+tail -n `expr $lnc - 3` ${email_name} > "${WORKSPACE}/release_notes.txt"
+cp ${WORKSPACE}/release_notes.txt ${new_manifest_d}/unpacked

=== added file 'jenkins/CloudImages_IBM_SmartCloud.sh'
--- jenkins/CloudImages_IBM_SmartCloud.sh	1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_IBM_SmartCloud.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+# Read in the common files
+source "${kvm}/functions/common"
+source "${kvm}/functions/retry"
+
+# Apply the build stuff
+find . -iname "*build_properties" | xargs -I FILE cp FILE .
+[ -e "build_properties" ] &&
+    source build_properties ||
+    fail "Failed to read build_properties. I don't know what I'm doing!"
+
+rm {failed,success} || /bin/true
+
+# Copy the target disk image
+disk_orig="${SUITE}-server-cloudimg-${ARCH}-disk1.img"
+disk_cp="${disk_orig//cloudimg/cloudimg-sce}"
+disk_root="/srv/ec2-images/${SUITE}/${SERIAL:-current}"
+disk_working="${WORKSPACE}/${disk_cp}.tmp"
+disk_final="${WORKSPACE}/${disk_cp}"
+raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
+
+cp "${disk_root}/${disk_orig}" "${disk_working}" ||
+    fail "Unable to copy ${disk_orig} from ${disk_root}"
+
+# Launch KVM to do the work
+${kvm}/launch_kvm.sh \
+        --id "${ARCH}-${BUILD_ID}" \
+        --user-data "${kvm}/config/cloud-vps.cfg" \
+        --cloud-config "${kvm}/templates/img-smartcloud.tmpl" \
+        --extra-disk "${disk_working}" \
+        --disk-gb 1 \
+        --raw-disk "${raw_f}" \
+        --raw-size 1 \
+        --img-url /srv/builder/images/precise-builder-latest.img ||
+            fail "KVM instance failed to build image."
+
+# Remove the results
+rm "${raw_f}" || /bin/true
+
+# Convert to raw disk
+qemu-img convert -O raw "${disk_working}" "${disk_final}" ||
+    fail "Failed to convert disk to raw format"
+
+# Remove working disk
+rm "${disk_working}" ||
+    /bin/true
+
+# Compress the disk
+gzip "${disk_final}" ||
+    fail "Failed to compress ${disk_final}"
+
+cp ${WORKSPACE}/packages.log ${disk_final}.packages

=== added file 'jenkins/CloudImages_Juju.sh'
--- jenkins/CloudImages_Juju.sh	1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Juju.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,196 @@
+#!/bin/bash
+
+# Read in the common files
+my_name=$(readlink -f ${0})
+my_dir=$(dirname ${my_name})
+my_pdir=$(dirname ${my_dir})
+
+# Source in the common functions
+source "${my_pdir}/functions/common"
+source "${my_pdir}/functions/retry"
+source "${my_pdir}/functions/locker"
+export HOME=${WORKSPACE}
+
+# Copy the target disk imags
+ARCH_TYPE=${ARCH_TYPE:-$ARCH}
+disk_orig="${SUITE}-server-cloudimg-${ARCH_TYPE}-disk1.img"
+disk_cp="${disk_orig//$ARCH_TYPE/$ARCH_TYPE-juju-vagrant}"
+disk_root="${SRV_D:-/srv/ec2-images}/${SUITE}/${SERIAL:-current}"
+disk_working="${WORKSPACE}/${disk_cp}"
+final_disk="${WORKSPACE}/box-disk1.vdi"
+final_location="${SRV_D:-/srv/ec2-images}/vagrant/${SUITE}/${SERIAL}"
+box_name="${disk_working//.img/.box}"
+raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
+build_host_suite=$(lsb_release -c -s)
+
+jenkins_build() {
+    [ -e "build_properties" ] &&
+        source build_properties ||
+        fail "Failed to read build_properties. I don't know what I'm doing!"
+
+    cp "${disk_root}/${disk_orig}" "${disk_working}" ||
+        fail "Unable to copy ${disk_orig} from ${disk_root}"
+
+    qemu-img resize ${disk_working} 40G
+
+    # Launch KVM to do the worK
+    ${my_pdir}/launch_kvm.sh \
+            --id "${ARCH_TYPE}-${BUILD_ID}" \
+            --user-data "${my_pdir}/config/cloud-vps.cfg" \
+            --cloud-config "${my_pdir}/templates/img-juju.tmpl" \
+            --extra-disk "${disk_working}" \
+            --disk-gb 1 \
+            --raw-disk "${raw_f}" \
+            --raw-size 1 \
+            --img-url /srv/builder/images/precise-builder-latest.img ||
+                fail "KVM instance failed to build image."
+}
+
+# Assume that we're building in Jenkins unless otherwise stated
+# What this allows us to do is to use the standalone builder for testing
+# and finish running the bits below
+[ "${LOCAL_BUILD:-0}" -eq 1 ] || jenkins_build
+
+# Covert to VMDK.
+qemu-img convert -O raw ${disk_working} ${disk_working//.img/.raw}
+
+_vbox_cmd convertfromraw \
+    --format vdi \
+    ${disk_working//.img/.raw} ${final_disk}
+
+# Create the VM
+vmname="ubuntu-cloudimg-${SUITE}-juju-vagrant-${ARCH_TYPE}"
+_vbox_cmd modifyhd --compact ${final_disk}
+
+dist_v="Ubuntu"
+[ "${ARCHTYPE}" = "amd64" ] && dist_v="Ubuntu_64"
+_vbox_cmd createvm \
+    --name ${vmname} \
+    --ostype ${dist_v} \
+    --register
+
+_vbox_cmd modifyvm ${vmname} \
+    --memory 2048 \
+    --boot1 disk \
+    --boot2 none \
+    --boot3 none \
+    --boot4 none \
+    --vram 12 \
+    --pae off \
+    --acpi on \
+    --ioapic on \
+    --rtcuseutc on \
+    --bioslogodisplaytime 0 \
+    --nic1 nat \
+    --nictype1 virtio
+
+if [ "${ARCH_TYPE}" = "i386" ]; then
+    _vbox_cmd modifyvm ${vmname} \
+        --ioapic off \
+        --pae on
+fi
+
+
+_vbox_cmd modifyvm ${vmname} --natpf1 "guestssh,tcp,,2222,,22"
+
+storage_cmd=(
+    _vbox_cmd storagectl "${vmname}"
+    --name "SATAController"
+    --add sata
+    --controller IntelAhci
+    --hostiocache on
+    )
+
+[ "${build_host_suite}" == "trusty" ] &&
+	storage_cmd+=(--portcount 1) ||
+	storage_cmd+=(--sataportcount 1)
+
+${storage_cmd[@]}
+
+_vbox_cmd storageattach ${vmname} \
+    --storagectl "SATAController" \
+    --port 0 \
+    --device 0 \
+    --type hdd \
+    --medium ${final_disk}
+
+# Set extra-data
+_vbox_cmd setextradata ${vmname} installdate ${serial}
+_vbox_cmd setextradata ${vmname} supported false
+
+# Set the Guest information to get rid of error message
+[ -e vagrant_image.pkgs ] && {
+
+    vbox_version=""
+    while read -r line
+    do
+        line=( $(echo ${line}) )
+        [[ ${line[0]} =~ virtualbox-guest-utils ]] && vbox_version=${line[1]}
+    done < vagrant_image.pkgs
+    debug "Guest Additions version is ${vbox_version}"
+
+    # Set the revision to some arbitrary value
+    _vbox_cmd guestproperty set ${vmname} \
+        "/VirtualBox/GuestAdd/Revision" '8000'
+
+    # Set the Ubuntu packaged version correctly
+    _vbox_cmd guestproperty set ${vmname} \
+        "/VirtualBox/GuestAdd/VersionExt" \
+        "${vbox_version//-dfsg-*/_Ubuntu}"
+
+    # Set the version string appropriately
+    _vbox_cmd guestproperty set ${vmname} \
+        "/VirtualBox/GuestAdd/Version" \
+        "${vbox_version//-dfsg-*/}"
+}
+
+mkdir ${WORKSPACE}/box
+_vbox_cmd export ${vmname} --output ${WORKSPACE}/box/box.ovf
+
+# Create the Vagrant file
+#macaddr="02:$(openssl rand -hex 5)"
+macaddr=$(awk '-F"' '/<Adapter slot="0" enabled="true"/ {print$6}' ${WORKSPACE}/box/box.ovf)
+cat << EOF > ${WORKSPACE}/box/Vagrantfile
+\$script = <<SCRIPT
+bzr branch lp:jujuredirector/quickstart /tmp/jujuredir
+bash /tmp/jujuredir/setup-juju.sh
+SCRIPT
+
+Vagrant.configure("2") do |config|
+  # This Vagrantfile is auto-generated by 'vagrant package' to contain
+  # the MAC address of the box. Custom configuration should be placed in
+  # the actual 'Vagrantfile' in this box.
+
+  config.vm.base_mac = "${macaddr}"
+  config.vm.network :forwarded_port, guest: 22, host: 2122, host_ip: "127.0.0.1"
+  config.vm.network :forwarded_port, guest: 80, host: 6080, host_ip: "127.0.0.1"
+  config.vm.network :forwarded_port, guest: 8001, host: 8001, host_ip: "127.0.0.1"
+  config.vm.network "private_network", ip: "172.16.250.15"
+  config.vm.provision "shell", inline: \$script
+
+end
+
+# Load include vagrant file if it exists after the auto-generated
+# so it can override any of the settings
+include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__)
+load include_vagrantfile if File.exist?(include_vagrantfile)
+EOF
+
+# Now pack it all up....
+tar -C ${WORKSPACE}/box -Scvf ${box_name} box.ovf Vagrantfile box-disk1.vmdk ||
+        fail "Unable to create box file"
+
+# Some minor cleanup
+rm ${disk_working} ${disk_working//.img/.raw} || /bin/true
+rm -rf ${WORKSPACE}/box *.vdi
+[ -e "${raw_f}" ] && rm "${raw_f}"
+
+# Bail here if this is a local build
+[ "${LOCAL_BUILD:-0}" -eq 1 ] && exit 0
+
+# Put the box in place
+mkdir -p "${final_location}" ||
+    fail "Unable to create the vagrant image location"
+
+cp ${box_name} ${final_location} ||
+    fail "Failed to place vagrant image in final home"

=== added file 'jenkins/CloudImages_Update_Builder.sh'
--- jenkins/CloudImages_Update_Builder.sh	1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Update_Builder.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# Read in the common files
+source "${kvm}/functions/common"
+source "${kvm}/functions/retry"
+
+# Apply the build stuff
+find . -iname "*build_properties" | xargs -I FILE cp FILE .
+[ -e "build_properties" ] &&
+    source build_properties ||
+    fail "Failed to read build_properties. I don't know what I'm doing!"
+
+[ -e failed ] && rm
+[ -e success ] && rm
+
+# Copy the target disk image
+disk_orig="${SUITE}-server-cloudimg-${ARCH}-disk1.img"
+disk_cp="${disk_orig//cloudimg/cloudimg-builder-$SERIAL}"
+disk_root="/srv/ec2-images/${SUITE}/${SERIAL:-current}"
+disk_working="${WORKSPACE}/${disk_cp}"
+raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
+
+cp "${disk_root}/${disk_orig}" "${disk_working}" ||
+    fail "Unable to copy ${disk_orig} from ${disk_root}"
+
+qemu-img resize "${disk_working}" 5G ||
+    fail "unable to resize disk"
+
+# Launch KVM to do the work
+${kvm}/launch_kvm.sh \
+        --id "${ARCH}-${BUILD_ID}" \
+        --user-data "${kvm}/config/cloud-vps.cfg" \
+        --cloud-config "${kvm}/templates/img-update.tmpl" \
+        --extra-disk "${disk_working}" \
+        --disk-gb 5 \
+        --raw-disk "${raw_f}" \
+        --raw-size 1 \
+        --img-url /srv/builder/images/precise-builder-latest.img ||
+            fail "KVM instance failed to build image."
+
+# Remove the results
+rm "${raw_f}" || /bin/true
+
+# Compress it down...
+mv "${disk_working}" "${disk_working}.new"
+qemu-img convert "${disk_working}.new" -c -O qcow2 "${disk_working}" ||
+    fail "Failed to create compressed image"
+
+rm "${disk_working}.new"
+

=== added file 'jenkins/CloudImages_VPS.sh'
--- jenkins/CloudImages_VPS.sh	1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_VPS.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Read in the common files
+source "${kvm}/functions/common"
+source "${kvm}/functions/retry"
+
+# Apply the build stuff
+find . -iname "*build_properties" | xargs -I FILE cp FILE .
+[ -e "build_properties" ] &&
+    source build_properties ||
+    fail "Failed to read build_properties. I don't know what I'm doing!"
+
+rm {failed,success} || /bin/true
+
+# Copy the target disk image
+disk_orig="${SUITE}-server-cloudimg-${ARCH}-disk1.img"
+disk_cp="${disk_orig//cloudimg/cloudimg-vps}"
+disk_root="/srv/ec2-images/${SUITE}/${SERIAL:-current}"
+disk_working="${WORKSPACE}/${disk_cp}"
+raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
+
+cp "${disk_root}/${disk_orig}" "${disk_working}" ||
+    fail "Unable to copy ${disk_orig} from ${disk_root}"
+
+# Launch KVM to do the work
+${kvm}/launch_kvm.sh \
+        --id "${ARCH}-${BUILD_ID}" \
+        --mem 1024 \
+        --user-data "${kvm}/config/cloud-vps.cfg" \
+        --cloud-config "${kvm}/templates/img-vps.tmpl" \
+        --extra-disk "${disk_working}" \
+        --disk-gb 1 \
+        --raw-disk "${raw_f}" \
+        --raw-size 1 \
+        --img-url /srv/builder/images/precise-builder-latest.img ||
+            fail "KVM instance failed to build image."
+
+# Remove the results
+rm "${raw_f}" || /bin/true

=== added file 'jenkins/CloudImages_Vagrant.sh'
--- jenkins/CloudImages_Vagrant.sh	1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Vagrant.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,204 @@
+#!/bin/bash
+
+# Read in the common files
+source "${kvm}/functions/common"
+source "${kvm}/functions/retry"
+source "${kvm}/functions/locker"
+export HOME=${WORKSPACE}
+
+# Apply the build stuff
+[ -e "build_properties" ] &&
+    source build_properties ||
+    fail "Failed to read build_properties. I don't know what I'm doing!"
+
+rm {failed,success} || /bin/true
+
+# Copy the target disk image
+ARCH_TYPE=${ARCH_TYPE:-$ARCH}
+disk_orig="${SUITE}-server-cloudimg-${ARCH_TYPE}-disk1.img"
+disk_cp="${disk_orig//$ARCH_TYPE/$ARCH_TYPE-vagrant}"
+disk_root="/srv/ec2-images/${SUITE}/${SERIAL:-current}"
+disk_working="${WORKSPACE}/${disk_cp}"
+final_disk="${WORKSPACE}/box-disk1.vdi"
+final_location="/srv/ec2-images/vagrant/${SUITE}/${SERIAL}"
+box_name="${disk_working//.img/.box}"
+raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
+
+[ -e "${final_location}/${box_name}" -a "${REBUILD}" != "true" ] && exit 0
+
+cp "${disk_root}/${disk_orig}" "${disk_working}" ||
+    fail "Unable to copy ${disk_orig} from ${disk_root}"
+
+# Resize it to 4G, but not the full 40G because we want it sparse
+qemu-img resize ${disk_working} 4G
+
+# Launch KVM to do the work
+${kvm}/launch_kvm.sh \
+        --id "${ARCH_TYPE}-${BUILD_ID}" \
+        --user-data "${kvm}/config/cloud-vps.cfg" \
+        --cloud-config "${kvm}/templates/img-vagrant.tmpl" \
+        --extra-disk "${disk_working}" \
+        --disk-gb 1 \
+        --raw-disk "${raw_f}" \
+        --raw-size 1 \
+        --img-url /srv/builder/images/precise-builder-latest.img ||
+            fail "KVM instance failed to build image."
+
+# Covert to VMDK.
+qemu-img convert -O raw ${disk_working} ${disk_working//.img/.raw}
+truncate -s 40G ${disk_working//.img/.raw}
+
+_vbox_cmd convertfromraw \
+    --format vdi \
+    ${disk_working//.img/.raw} ${final_disk}
+
+# Create the VM
+vmname="ubuntu-cloudimg-${SUITE}-vagrant-${ARCH_TYPE}"
+_vbox_cmd modifyhd --compact ${final_disk}
+
+dist_v="Ubuntu"
+[ "${ARCHTYPE}" = "amd64" ] && dist_v="Ubuntu_64"
+_vbox_cmd createvm \
+    --name ${vmname} \
+    --ostype ${dist_v} \
+    --register
+
+_vbox_cmd modifyvm ${vmname} \
+    --memory 512 \
+    --boot1 disk \
+    --boot2 none \
+    --boot3 none \
+    --boot4 none \
+    --vram 12 \
+    --pae off \
+    --acpi on \
+    --ioapic on \
+    --rtcuseutc on
+#    --natnet1 default \
+
+if [ "${ARCH_TYPE}" = "i386" ]; then
+    _vbox_cmd modifyvm ${vmname} \
+        --ioapic off \
+        --pae on
+fi
+
+
+_vbox_cmd modifyvm ${vmname} --natpf1 "guestssh,tcp,,2222,,22"
+
+_vbox_cmd storagectl "${vmname}" \
+    --name "SATAController" \
+    --add sata \
+    --controller IntelAhci \
+    --sataportcount 1 \
+    --hostiocache on
+
+_vbox_cmd storageattach ${vmname} \
+    --storagectl "SATAController" \
+    --port 0 \
+    --device 0 \
+    --type hdd \
+    --medium ${final_disk}
+
+# Set extra-data
+_vbox_cmd setextradata ${vmname} installdate ${serial}
+_vbox_cmd setextradata ${vmname} supported false
+
+# Set the Guest information to get rid of error message
+[ -e vagrant_image.pkgs ] && {
+
+    vbox_version=""
+    while read -r line
+    do
+        line=( $(echo ${line}) )
+        [[ ${line[0]} =~ virtualbox-guest-utils ]] && vbox_version=${line[1]}
+    done < vagrant_image.pkgs
+    debug "Guest Additions version is ${vbox_version}"
+
+    # Set the revision to some arbitrary value
+    _vbox_cmd guestproperty set ${vmname} \
+        "/VirtualBox/GuestAdd/Revision" '8000'
+
+    # Set the Ubuntu packaged version correctly
+    _vbox_cmd guestproperty set ${vmname} \
+        "/VirtualBox/GuestAdd/VersionExt" \
+        "${vbox_version//-dfsg-*/_Ubuntu}"
+
+    # Set the version string appropriately
+    _vbox_cmd guestproperty set ${vmname} \
+        "/VirtualBox/GuestAdd/Version" \
+        "${vbox_version//-dfsg-*/}"
+}
+
+mkdir box
+_vbox_cmd export ${vmname} --output box/box.ovf
+
+# Create the Vagrant file
+#macaddr="02:$(openssl rand -hex 5)"
+macaddr=$(awk '-F"' '/<Adapter slot="0" enabled="true"/ {print$6}' ${WORKSPACE}/box/box.ovf)
+cat << EOF > ${WORKSPACE}/box/Vagrantfile
+Vagrant::Config.run do |config|
+  # This Vagrantfile is auto-generated by 'vagrant package' to contain
+  # the MAC address of the box. Custom configuration should be placed in
+  # the actual 'Vagrantfile' in this box.
+  config.vm.base_mac = "${macaddr}"
+end
+
+# Load include vagrant file if it exists after the auto-generated
+# so it can override any of the settings
+include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__)
+load include_vagrantfile if File.exist?(include_vagrantfile)
+EOF
+
+# Now pack it all up....
+tar -C ${WORKSPACE}/box -Scvf ${box_name} box.ovf Vagrantfile box-disk1.vmdk ||
+        fail "Unable to create box file"
+
+# Some minor cleanup
+rm ${disk_working} ${disk_working//.img/.raw} || /bin/true
+rm -rf ${WORKSPACE}/box *.vdi
+rm "${raw_f}" || /bin/true
+
+# Put the box in place
+mkdir -p "${final_location}" ||
+    fail "Unable to create the vagrant image location"
+
+cp ${box_name} ${final_location} ||
+    fail "Failed to place vagrant image in final home"
+
+# Update the current link
+current_l="/srv/ec2-images/vagrant/${SUITE}/current"
+[ -e "${current_l}" ] && rm "${current_l}"
+( cd "/srv/ec2-images/vagrant/${SUITE}" && ln -s "${SERIAL}" current )
+
+# Cleanup old builds
+builds=($(find /srv/ec2-images/vagrant/${SUITE} -mindepth 1 -maxdepth 1 -type d | sort -r))
+build_count="${#builds[@]}"
+
+echo "------------------------"
+echo "Clean-up for prior builds"
+echo "Found ${build_count} builds for consideration"
+
+for b in ${builds[@]}
+do
+    echo " - found build ${b}"
+done
+echo ""
+
+[ "${build_count}" -gt 4 ] && {
+    for item in $(seq 4 ${build_count})
+    do
+        [ -e "${builds[$item]}" ] && {
+            echo "Removing build ${builds[$item]} for deletion"
+            rm -rf ${builds[$item]} ||
+                echo "Failed to remove build ${builds[$item]}"
+        }
+    done
+
+    for item in $(seq 0 3)
+    do
+        [ -e "${builds[$item]}" ] &&
+            echo "Preserving build ${builds[$item]}"
+    done
+
+} || echo "No builds marked for removal"
+

=== added file 'jenkins/MAAS_Builder.sh'
--- jenkins/MAAS_Builder.sh	1970-01-01 00:00:00 +0000
+++ jenkins/MAAS_Builder.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,171 @@
+#!/bin/bash
+set -x
+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
+
+find . -iname "*build_properties" -exec cp {} . \; ||
+    echo "Unable to copy build properties, this might be v2"
+
+[ -z "${SERIAL}" -a -z "${SUITE}" -a -e "build_properties" ] && {
+    source build_properties ||
+    fail "Failed to read build_properties. I don't know what I'm doing!";
+}
+
+# Read in the common functions
+my_dir="$( cd "$( dirname "$0" )" && pwd )"
+base_dir=$(dirname ${my_dir})
+export PATH="${base_dir}:${my_dir}:${PATH}"
+source "${base_dir}/functions/locker"
+source "${base_dir}/functions/common"
+source "${base_dir}/functions/retry"
+source ${my_dir}/build_lib.sh
+select_build_config
+
+export WORKSPACE="${WORKSPACE:-$WORKSPACE_R}"
+out_f="${WORKSPACE}/maas-${SUITE}-${STREAM}-config.sh"
+raw_f="${WORKSPACE}/${SUITE}-output.raw"
+query_t="${WORKSPACE}/cloud-images-query.tar"
+base_name="${SUITE}-server-cloudimg"
+rel_base_name="ubuntu-$(ubuntu-adj2version ${SUITE})-${stream//-/}-server-cloudimg"
+
+export maas_branch="${MAAS_BRANCH:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}";
+
+case "${STREAM}" in
+    release)  build_f="/srv/ec2-images/releases/${SUITE}/release-${SERIAL}";
+              base_name=${rel_base_name};
+              out_d="/srv/maas-images/ephemeral/releases/${SUITE}/release-${SERIAL}"
+              ;;
+    daily)    build_f="/srv/ec2-images/${SUITE}/${SERIAL}";
+              out_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}";
+              ;;
+    alpha*|beta*)  build_f="/srv/ec2-images/releases/${SUITE}/${STREAM}";
+              base_name=${rel_base_name};
+              out_d="/srv/maas-images/ephemeral/release/${SUITE}/${STREAM}";
+              ;;
+    *)        fail "Unknown stream ${STREAM}.";;
+esac
+
+final_out_d="${out_d}"
+
+[ -e "${final_out_d}" -a "${REBUILD:-false}" = "false" ] &&
+    fail "Build already exists. Rebuild is set to false. Failing this build"
+
+# Tar up query for use in-image
+[ ! -e "${query_t}" ] && {
+    tar cvf ${query_t} \
+        ${QUERY_D:-/srv/ec2-images/query} \
+        ${build_f}  \
+        --exclude "*img" --exclude "*azure*" --exclude "*html" \
+        --exclude "*armel*" --exclude "*root.tar.gz" \
+        --exclude "*floppy" ||
+        fail "Failed to pack up build elements for MAAS builder"; }
+
+# Generate the template file
+ci_cfg="${kvm_builder}/config/cloud-maas.cfg"
+template="${kvm_builder}/templates/img-maas.tmpl"
+[ "${IS_MAAS_V2:-0}" -eq 1 ] && {
+     template="${kvm_builder}/templates/img-maasv2.tmpl"
+     ci_cfg="${kvm_builder}/config/cloud-maasv2.cfg"
+}
+
+maas_config.sh \
+    --distro "${SUITE}" \
+    --stream "${STREAM}" \
+    --template "${template}" \
+    --base-name "${base_name}" \
+    --local "${build_f}" \
+    --serial "${SERIAL}" \
+    --out "${out_f}" \
+    --out_d "${out_d}" ||
+    fail "Failed to configure KVM instance for building"
+
+[ -n "${cloud_init_cfg}" ] && ci_cfg="${kvm_builder}/config/${cloud_init_cfg}"
+
+# Launch KVM to do the work
+launch_kvm.sh \
+    --id ${BUILD_ID} \
+    --user-data "${out_f}" \
+    --cloud-config "${ci_cfg}" \
+    --extra-disk "${query_t}" \
+    --disk-gb 50 \
+    --raw-disk "${raw_f}" \
+    --raw-size 20 \
+    --img-url ${BUILDER_CLOUD_IMAGE} ||
+        fail "KVM instance failed to build image."
+
+# Extract the result set
+tar -xvvf "${raw_f}" ||
+    fail "Failed to extract information from instance"
+
+# Useful for off-host builds, like ppc64el. Just make sure that any-off host
+# builds are done before the on-hosts builds.
+[ "${BUILD_ONLY:-0}" -eq 1 ] && exit 0
+
+# Extracted reslts should be here
+[ ! -e "${WORKSPACE}/${out_d}" ] && fail "Expected result directory is missing: ${WORKSPACE}/${out_d}"
+
+# Checksum the results (and sign 'em)
+export CDIMAGE_ROOT="/srv/builder/vmbuilder/cdimage"
+/srv/builder/vmbuilder/bin/cronrun checksum-directory "${WORKSPACE}/${out_d}" ||
+    fail "Failed to create checksums and GPG signatures"
+
+set -x
+# Put the bits where they go...
+mkdir -p "${final_out_d}" &&
+    cp -a ${WORKSPACE}${out_d}/* "${final_out_d}" &&
+    echo "Copied bits to final location ${final_out_d}" ||
+    fail "Unable to copy build bits to final location"
+
+# Produce build-info
+cat << EOF > "${final_out_d}/build-info.txt"
+serial=${SERIAL}
+orig_prefix=${SUITE}-ephemeral-maas
+suite=${SUITE}
+build_name=ephemeral
+EOF
+
+# Clean up the dailies
+if [ "${STREAM}" = "daily" ]; then
+    base_d="${out_d%/*}"
+    builds=( $(find ${base_d} -maxdepth 1 -mindepth 1 -type d | sort -r) )
+    build_count=${#builds[@]}
+
+    # Delete all but the 
+    if [ ${build_count} -gt 6 ]; then
+        for item in $(seq 6 ${build_count})
+        do
+            [ -e "${builds[$item]}" ] && {
+                rm -rf ${builds[$item]};
+                echo "Build ${SUITE} ${builds[$item]##*/} has been deleted";
+            }
+        done
+
+        for item in $(seq 0 5)
+        do
+                echo "Preserving ${SUITE} ${builds[$item]##*/}"
+        done
+    else
+        echo "No builds marked for deletion"
+    fi
+fi
+
+# Generate the Query2 tree
+src_tree="${WORKSPACE}/maas_src"
+bzr branch "${maas_branch}" "${src_tree}"
+${src_tree}/tree2query \
+    --commit-msg "Build ${BUILD_ID}" \
+    --namespace maas \
+    /srv/maas-images
+
+# Update current
+if [ "${STREAM}" = "daily" ]; then
+    cur_d="/srv/maas-images/ephemeral/daily/${SUITE}/current"
+    [ -e "${cur_d}" ] && rm "${cur_d}"
+    ln -s "${final_out_d}" "${cur_d}" ||
+        echo "Failed to update ${cur_d}"
+fi
+
+
+# Remove the results
+rm "${raw_f}" ||
+    fail "Failed to clean up files!"
+

=== added file 'jenkins/MAAS_Promotion.sh'
--- jenkins/MAAS_Promotion.sh	1970-01-01 00:00:00 +0000
+++ jenkins/MAAS_Promotion.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
+
+if [ "${TAG}" == "release" ]; then
+    TAG="release-${SERIAL}"
+fi
+
+src_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}"
+final_out_d="/srv/maas-images/ephemeral/releases/${SUITE}/${TAG}"
+
+[ -e ${src_d} ] ||
+    fail "Source ${src_d} does not exist"
+
+[ -e ${final_out_d} ] &&
+    fail "Serial has already been promoted"
+
+mkdir -p "${final_out_d}" &&
+    rsync -a ${src_d}/ ${final_out_d} &&
+    echo "Copied bits to final location ${final_out_d}" ||
+    fail "Unable to copy build bits to final location"
+
+# Generate the Query2 tree
+export maas_branch="${MAAS_BRANCH:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}";
+src_tree="${WORKSPACE}/maas_src"
+bzr branch "${maas_branch}" "${src_tree}"
+${src_tree}/tree2query \
+    --commit-msg "Build ${BUILD_ID}" \
+    --namespace maas \
+    /srv/maas-images
+

=== added file 'jenkins/MAASv2_Builder.sh'
--- jenkins/MAASv2_Builder.sh	1970-01-01 00:00:00 +0000
+++ jenkins/MAASv2_Builder.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,190 @@
+#!/bin/bash
+set -x
+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
+
+find . -iname "*build_properties" -exec cp {} . \; ||
+    echo "Unable to copy build properties, this might be v2"
+
+[ -z "${SERIAL}" -a -z "${SUITE}" -a -e "build_properties" ] && {
+    source build_properties ||
+    fail "Failed to read build_properties. I don't know what I'm doing!";
+}
+
+STREAM="${STREAM:-daily}"
+# Read in the common functions
+my_dir="$( cd "$( dirname "$0" )" && pwd )"
+base_dir=$(dirname ${my_dir})
+export PATH="${base_dir}:${my_dir}:${PATH}"
+source "${base_dir}/functions/locker"
+source "${base_dir}/functions/common"
+source "${base_dir}/functions/retry"
+source ${my_dir}/build_lib.sh
+select_build_config
+
+export WORKSPACE="${WORKSPACE:-$WORKSPACE_R}"
+out_f="${WORKSPACE}/maas-${SUITE}-${STREAM}-config.sh"
+raw_f="${WORKSPACE}/${SUITE}-output.raw"
+query_t="${WORKSPACE}/cloud-images-query.tar"
+base_name="${SUITE}-server-cloudimg"
+rel_base_name="ubuntu-${VERSION:-$(ubuntu-adj2version ${SUITE})}-${stream//-/}-server-cloudimg"
+
+export maas_branch_v1="http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral";
+export maas_branch="${MAAS_BRANCH:-$maas_branch_v1}"
+
+case "${STREAM}" in
+    release)  build_f="/srv/ec2-images/releases/${SUITE}/release-${SERIAL}";
+              base_name=${rel_base_name};
+              out_d="/srv/maas-images/ephemeral/releases/${SUITE}/release-${SERIAL}"
+              ;;
+    daily)    build_f="/srv/ec2-images/${SUITE}/${SERIAL}";
+              out_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}";
+              ;;
+    alpha*|beta*)  build_f="/srv/ec2-images/releases/${SUITE}/${STREAM}";
+              base_name=${rel_base_name};
+              out_d="/srv/maas-images/ephemeral/release/${SUITE}/${STREAM}";
+              ;;
+    *)        fail "Unknown stream ${STREAM}.";;
+esac
+
+final_out_d="${out_d}"
+
+[ -e "${final_out_d}" -a "${REBUILD:-false}" = "false" ] &&
+    fail "Build already exists. Rebuild is set to false. Failing this build"
+
+if [ ! -e "${query_t}" ]; then
+
+    if  [ "${MAASv2:-0}" -eq 1 ]; then
+        # MAAS v2 doesn't need this information
+        out_d="/tmp/maas_final"
+        touch ${WORKSPACE}/maasv2
+        tar cvf ${query_t} ${WORKSPACE}/maasv2
+
+        if  [ -e "${WORKSPACE}/tmp/maas-final" ]; then
+            tar cvf ${query_t} maas-final ||
+                fail "Failed to create tarball of MAAS images"
+        fi
+
+    else
+        # MAAS v1 need information
+        tar cvf ${query_t} \
+            ${QUERY_D:-/srv/ec2-images/query} \
+            ${build_f}  \
+            --exclude "*img" --exclude "*azure*" --exclude "*html" \
+            --exclude "*armel*" --exclude "*root.tar.gz" \
+            --exclude "*floppy" ||
+                fail "Failed to pack up build elements for MAAS builder";
+    fi
+fi
+
+# Select the right template
+tmpl="${kvm_builder}/templates/img-maas.tmpl"
+[ "${MAASv2:-0}" -eq 1 ] && tmpl="${tmpl//maas.tmpl/maasv2.tmpl}"
+
+# Construct the right template
+maas_config.sh \
+    --distro "${SUITE}" \
+    --stream "${STREAM}" \
+    --template "${tmpl}" \
+    --base-name "${base_name}" \
+    --local "${build_f}" \
+    --serial "${SERIAL}" \
+    --out "${out_f}" \
+    --maas-branch "${maas_branch}" \
+    --out_d "${out_d}" ||
+    fail "Failed to configure KVM instance for building"
+set +x
+
+ci_cfg="${kvm_builder}/config/cloud-maasv2.cfg"
+[ "$(uname -m)" == "ppc64" ] && ci_cfg="${kvm_builder}/config/cloud-trusty-pp64el.cfg"
+
+# Launch KVM to do the work
+launch_kvm.sh \
+    --id ${BUILD_ID} \
+    --user-data "${out_f}" \
+    --cloud-config "${ci_cfg}" \
+    --extra-disk "${query_t}" \
+    --disk-gb 50 \
+    --raw-disk "${raw_f}" \
+    --raw-size 20 \
+    --img-url ${BUILDER_CLOUD_IMAGE} ||
+        fail "KVM instance failed to build image."
+
+# Extract the result set
+tar -xvvf "${raw_f}" ||
+    fail "Failed to extract information from instance"
+
+# Useful for off-host builds, like ppc64el. Just make sure that any-off host
+# builds are done before the on-hosts builds.
+
+[ "${BUILD_ONLY:-0}" -eq 1 ] && exit 0
+[ "${MAASv2:-0}" -eq 1 ] && exit 0
+
+# Extracted reslts should be here
+[ ! -e "${WORKSPACE}/${out_d}" ] && fail "Expected result directory is missing: ${WORKSPACE}/${out_d}"
+
+# Checksum the results (and sign 'em)
+export CDIMAGE_ROOT="/srv/builder/vmbuilder/cdimage"
+/srv/builder/vmbuilder/bin/cronrun checksum-directory "${WORKSPACE}/${out_d}" ||
+    fail "Failed to create checksums and GPG signatures"
+
+set -x
+# Put the bits where they go...
+mkdir -p "${final_out_d}" &&
+    cp -a ${WORKSPACE}${out_d}/* "${final_out_d}" &&
+    echo "Copied bits to final location ${final_out_d}" ||
+    fail "Unable to copy build bits to final location"
+
+# Produce build-info
+cat << EOF > "${final_out_d}/build-info.txt"
+serial=${SERIAL}
+orig_prefix=${SUITE}-ephemeral-maas
+suite=${SUITE}
+build_name=ephemeral
+EOF
+
+# Clean up the dailies
+if [ "${STREAM}" = "daily" ]; then
+    base_d="${out_d%/*}"
+    builds=( $(find ${base_d} -maxdepth 1 -mindepth 1 -type d | sort -r) )
+    build_count=${#builds[@]}
+
+    # Delete all but the
+    if [ ${build_count} -gt 6 ]; then
+        for item in $(seq 6 ${build_count})
+        do
+            [ -e "${builds[$item]}" ] && {
+                rm -rf ${builds[$item]};
+                echo "Build ${SUITE} ${builds[$item]##*/} has been deleted";
+            }
+        done
+
+        for item in $(seq 0 5)
+        do
+                echo "Preserving ${SUITE} ${builds[$item]##*/}"
+        done
+    else
+        echo "No builds marked for deletion"
+    fi
+fi
+
+# Generate the Query2 tree
+src_tree="${WORKSPACE}/maas_src"
+bzr branch "${maas_branch_v1}" "${src_tree}"
+${src_tree}/tree2query \
+    --commit-msg "Build ${BUILD_ID}" \
+    --namespace maas \
+    /srv/maas-images
+
+# Update current
+if [ "${STREAM}" = "daily" ]; then
+    cur_d="/srv/maas-images/ephemeral/daily/${SUITE}/current"
+    [ -e "${cur_d}" ] && rm "${cur_d}"
+    ln -s "${final_out_d}" "${cur_d}" ||
+        echo "Failed to update ${cur_d}"
+fi
+
+
+# Remove the results
+rm "${raw_f}" ||
+    fail "Failed to clean up files!"
+

=== added file 'jenkins/Promote_Daily.sh'
--- jenkins/Promote_Daily.sh	1970-01-01 00:00:00 +0000
+++ jenkins/Promote_Daily.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,39 @@
+#!/bin/bash
+echo "---------------------------------------------------"
+echo "Instructed to Promote Daily job:
+echo "       Suite: ${SUITE}"
+echo "      Serial: ${SERIAL}"
+echo "   Milestone: ${MILESTONE_LABEL}"
+echo "      Stream: ${BTYPE}"
+echo "      Public: ${MAKE_PUBLIC}"
+echo "  PrePublish: ${PREPUBLISH}"
+echo "
+echo "---------------------------------------------------"
+
+cat << EOF > "${WORKSPACE}/build_properties"
+SUITE=${SUITE}
+SERIAL=${SERIAL}
+MILESTONE=${MILESTONE_LABEL}
+STREAM=${BTYPE}
+PUBLIC=${MAKE_PUBLIC}
+PREPUBLISH=${PREPUBLISH}
+EOF
+
+export HOME="/srv/builder/vmbuilder"
+
+cmd=(
+   '/srv/builder/vmbuilder/bin/cronrun'
+   'promote-daily'
+   '--verbose'
+   '--allow-existing' )
+
+if [ "${PREPUBLISH}" == "true" ]; then
+    echo "Pre-publishing rules, will not make public"
+else
+    [ "${MAKE_PUBLIC}" == "true" ] && cmd+=('--make-public')
+fi
+
+cmd+=("${MILESTONE_LABEL}" "/srv/ec2-images/${BTYPE}/${SUITE}/${SERIAL}")
+
+echo "Executing commnad:"
+exec ${cmd[@]}

=== added file 'jenkins/Promote_MAAS_Daily.sh'
--- jenkins/Promote_MAAS_Daily.sh	1970-01-01 00:00:00 +0000
+++ jenkins/Promote_MAAS_Daily.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+export maas_branch="${MAAS_BRANCH:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}";
+
+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
+
+[ -z "${SERIAL}" ] && fail "Serial must be defined"
+[ -z "${SUITE}" ] && fail "Suite must be defined"
+[ -z "${MILESTONE}" ] && fail "Milestone must be defined"
+
+cp_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}"
+finald="/srv/maas-images/ephemeral/releases/${SUITE}"
+
+case "${MILESTONE}" in
+    release)    final_d="${finald}/release-${SERIAL}"
+    alpha|beta) final_d="${finald}/${milestone}"
+esac
+
+# Sanity check
+[ ! -e "${cp_d}" ] && fail "Serial ${SERIAL} for ${SUITE} does not exist"
+[ -e "${final_d}" ] && fail "Already released ${SERIAL} for ${SUITE} as ${MILESTONE}"
+
+# Make the home directory
+mkdir -p "${final_d}" ||
+    fail "Unable to create final destination"
+
+# Put the files in final destination
+cp -au ${cp_d}/* "${final_d}" ||
+    fail "Failed to copy source files for promotion"
+
+# Generate the Query2 tree
+src_tree="${WORKSPACE}/maas_src"
+bzr branch "${maas_branch}" "${src_tree}"
+${src_tree}/tree2query \
+    --commit-msg "Build ${BUILD_ID}" \
+    --namespace maas \
+    /srv/maas-images
+
+# Update the "release" link
+if [ "${MILESTONE}" = "release" ]; then
+    cur_d="/srv/maas-images/ephemeral/releases/${SUITE}/release"
+    [ -e "${cur_d}" ] && rm "${cur_d}"
+    ln -s "${final_d}" "${cur_d}" ||
+        echo "Failed to update ${cur_d}"
+fi
+
+# Sync the stuff
+KEY=maas /srv/builder/vmbuilder/bin/trigger-sync

=== added file 'jenkins/Publish_EC2.sh'
--- jenkins/Publish_EC2.sh	1970-01-01 00:00:00 +0000
+++ jenkins/Publish_EC2.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,53 @@
+#!/bin/bash -x
+
+# Add in the retry stub
+source "${kvm}/functions/retry"
+source "${kvm}/functions/common"
+
+# Exit if trigger job does not want this published
+[ "${PUBLISH_IMAGE}" -eq 0 ] && exit 0
+
+# Set the build directories
+WORK_D="/srv/ec2-images/${BUILD_TYPE}/${SUITE}/${SERIAL}"
+[ "${TEST_BUILD}" -eq 1 ] && WORK_D="/srv/ec2-images/test_builds/${BUILD_TYPE}/${SUITE}/${SERIAL}"
+[ "${SANDBOX_BUILD}" -eq 1 ] && WORK_D="/srv/ec2-images/sandbox/${BUILD_TYPE}/${SUITE}/${SERIAL}"
+
+echo "Using ${WORK_D} as the directory"
+[ -e "${WORK_D}" ] || { echo "Working directory does not exist!"; exit 1; }
+
+ec2_pub="${PWD}/ec2-publishing-scripts"
+
+# Check out the scripts needed
+[ -e "${ec2_pub}" ] && rm -rf "${ec2_pub}"
+bzr branch "${EC2_PUB_SCRIPTS}" "${ec2_pub}"
+
+# Add some elements to the path
+export PATH="/srv/builder/vmbuilder/bin:/srv/builder/vmbuilder/ec2-daily/xc2:${PATH}"
+export HOME="/srv/builder/vmbuilder"
+export CDIMAGE_ROOT="/srv/builder/vmbuilder/cdimage"
+export EC2_PUB_LOC="${ec2_pub}"
+
+ec2publish() {
+    # Run the publisher job
+    ${kvm}/ec2_publisher.sh \
+           ${SUITE} \
+           ${SERIAL} \
+           ${BUILD_TYPE} \
+           ${WORK_D} \
+           ${TEST_BUILD} \
+           ${SANDBOX_BUILD} \
+           ${ALLOW_EXISTING}
+}
+
+# Retry the publishing up to 3 times
+retry 6 5 ec2publish ||
+    fail "Failed three attempts to publish EC2 images!"
+
+# Add the new daily to the tracker
+#exec_tracker=${ADD_TO_TRACKER:-0}
+#[ "${exec_tracker}" -eq 1 ] && {
+#    ${kvm}/tracker.sh daily ${SUITE} ${SERIAL} &&
+#         exit $? || fail "Unable to execute tracker!"
+#    }
+#
+#exit 0

=== added file 'jenkins/Publish_Results_to_Tracker.sh'
--- jenkins/Publish_Results_to_Tracker.sh	1970-01-01 00:00:00 +0000
+++ jenkins/Publish_Results_to_Tracker.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Environmental variables:
+#  HOST: the Jenkins host URL to poll from
+#  SUITE: Ubuntu codename
+#  MILESTONE: i.e. Alpha 2
+#  SERIAL: What is the build serial, i.e 20130213
+#  OUT: File to execute
+
+set -x
+
+# Setup the QA tracker code
+bzr branch http://bazaar.launchpad.net/~jibel/+junk/qatracker
+cd qatracker
+
+bzr branch http://bazaar.launchpad.net/~ubuntu-qa-website-devel/ubuntu-qa-website/python-qatracker
+ln -s python-qatracker/qatracker.py .
+export PATH="${PATH}:${WORKSPACE}/qatracker"
+
+# Get the actual working script
+${scripts}/tests/tracker.py \
+     --host ${HOST} \
+     --suite ${SUITE} \
+     --test ${TEST} \
+     --milestone "${MILESTONE}" \
+     --serial ${SERIAL} \
+     --out "${WORKSPACE}/script.sh"
+
+# Execute the script
+env API_USER="${API_USER}" \
+    API_KEY="${API_KEY}" \
+    bash ${WORKSPACE}/script.sh ||
+    exit 1 | tee publish.log

=== added file 'jenkins/README.txt'
--- jenkins/README.txt	1970-01-01 00:00:00 +0000
+++ jenkins/README.txt	2014-07-28 14:45:55 +0000
@@ -0,0 +1,1 @@
+This directory contains the jobs that Jenkins executes. Most of the jobs just setup up an environmental component and then call another script, usually one directory below

=== added file 'jenkins/Test_Azure.sh'
--- jenkins/Test_Azure.sh	1970-01-01 00:00:00 +0000
+++ jenkins/Test_Azure.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,17 @@
+#!/bin/bash
+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
+
+umask 022
+set -x
+source watch_properties || fail "Failed to read watch properties"
+
+echo "-------------------"
+echo "Image for testing:"
+cat watch_properties
+echo "-------------------"
+
+
+my_dir="$( cd "$( dirname "$0" )" && pwd )"
+base_dir=$(dirname ${my_dir})
+
+${my_dir}/tests/azure.sh ${1}

=== added file 'jenkins/build_lib.sh'
--- jenkins/build_lib.sh	1970-01-01 00:00:00 +0000
+++ jenkins/build_lib.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# Read in the common functions
+my_dir="$( cd "$( dirname "$0" )" && pwd )"
+base_dir=$(dirname ${my_dir})
+export PATH="${base_dir}:${my_dir}:${PATH}"
+source "${base_dir}/functions/locker"
+source "${base_dir}/functions/common"
+source "${base_dir}/functions/retry"
+
+dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
+
+[ -z "${DISTRO}" -a -n "${SUITE}" ] && DISTRO="${SUITE}"
+
+select_build_config() {
+    # Use the latest 12.04 LTS image to do the build.
+    local precise_rel_img="http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64-disk1.img";
+    local trusty_rel_img="http://cloud-images.ubuntu.com/releases/trusty/release/ubuntu-14.04-server-cloudimg-amd64-uefi1.img";
+    local trusty_daily_img="http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-uefi1.img";
+    local use_trusty=0
+    export cloud_init_cfg="cloud-precise.cfg"
+
+    [ -z "${BUILDER_CLOUD_IMAGE}" ] && {
+        BUILDER_CLOUD_IMAGE="${precise_rel_img}"
+
+        use_trusty=${USE_TRUSTY:-0}
+        dist_ge "${SUITE}" trusty && use_trusty=1
+        [[ "${ARCH_TYPE}" =~ (ppc64el|arm) ]] && use_trusty=1
+        [ "${use_trusty:-0}" -eq 1 ] &&  {
+            echo "Using Trusty to complete this build"
+            BUILDER_CLOUD_IMAGE="${trusty_rel_img}"
+            export cloud_init_cfg="cloud-trusty.cfg"
+
+            # TEMPORARY until 14.04 is released, but will make sure that when trusty is released
+            # that the released images are used
+            (curl -I "${BUILDER_CLOUD_IMAGE}" | egrep "HTTP.*200 OK")  || \
+                BUILDER_CLOUD_IMAGE="${trusty_daily_img}"
+            export BUILDER_CLOUD_IMAGE
+        }
+    }
+
+    # For ppc64el, we must use the trusty ppc64el config
+    [ "${DISTRO}" == "trusty" -a "${ARCH_TYPE}" == "ppc64el" ] && {
+            export cloud_init_cfg="cloud-trusty-pp64el.cfg"
+            BUILDER_CLOUD_IMAGE="${BUILDER_CLOUD_IMAGE//amd64/ppc64el}"
+            export BUILDER_CLOUD_IMAGE="${BUILDER_CLOUD_IMAGE//uefi1/disk1}"
+        }
+    echo "Using ${BUILDER_CLOUD_IMAGE} to do the build"
+}

=== added file 'jenkins/env-test.sh'
--- jenkins/env-test.sh	1970-01-01 00:00:00 +0000
+++ jenkins/env-test.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,2 @@
+#!/bin/bash
+env

=== added file 'launch_kvm.sh'
--- launch_kvm.sh	1970-01-01 00:00:00 +0000
+++ launch_kvm.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,198 @@
+#!/bin/bash
+usage() {
+cat << EOF
+This program is a KVM wrapper for performing tasks inside a KVM Environment.
+Its primary goal is to help developers do dangerous tasks that their IS/IT
+deparment won't allow them to do on an existing machine.
+    --id <ARG>           The ID you want to use to identify the KVM image
+                         this is used to name the image
+    --disk-gb <ARG>      Disk size you want to resize the image too
+                         Default it to _add_ 30GB
+    --smp <ARG>          KVM SMP options, defaults to:
+                         ${smp_opt}
+    --mem <ARG>          How much RAM do you want to use
+    --user-data <ARG>    Cloud-Init user-data file
+    --cloud-config <ARG> Cloud-Init cloud-config file
+    --img-url <ARG>      Location of the image file.
+    --raw-disk <ARG>     Name of RAW disk to create and attach.
+    --raw-size <ARG>     Size of RAW disk in GB.
+    --extra-disk <ARG>   Add an extra disk, starting with /dev/vdd
+EOF
+exit 1
+}
+
+short_opts="h"
+long_opts="id:,ssh_port,disk-gb:,mem:,bzr-automated-ec2-builds:,cloud-config:,user-data:,kernel-url:,img-url:,raw-disk:,raw-size:,smp:,extra-disk:,help"
+getopt_out=$(getopt --name "${0##*/}" \
+    --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+    eval set -- "${getopt_out}" ||
+    usage
+
+builder_id=$(uuidgen)
+uuid=${builder_id}
+bname="server"
+size_gb=15
+mem=512
+smp_opt="4"
+ud=""
+cloud_config=""
+img_loc="${BUILDER_CLOUD_IMAGE:-http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64-disk1.img}";
+KVM_PID=""
+
+while [ $# -ne 0 ]; do
+    cur=${1}; next=${2};
+    case "$cur" in
+    --id)                       id="$2"; shift;;
+    --disk-gb)                  size_gb="$2"; shift;;
+    --mem)                      mem="$2"; shift;;
+    --cloud-config)             ccloud="$2"; shift;;
+    --user-data)                ud="$2"; shift;;
+    --img-url)                  img_loc="$2"; shift;;
+    --raw-disk)                 raw_disk="$2"; shift;;
+    --raw-size)                 raw_size="$2"; shift;;
+    --smp)                      smp_opts="$2"; shift;;
+    --extra-disk)               [ -z "${extra_disk}" ] && extra_disk=$2 || extra_disk="${extra_disk} $2"; shift;;
+    -h|--help)                  usage; exit 0;;
+    --) shift; break;;
+  esac
+  shift;
+done
+
+work_d="$(mktemp -d /tmp/kvm-builder.XXXX)"
+kvm_pidfile="$(mktemp --tmpdir=${work_d})"
+
+error() { echo "$@" 1>&2; }
+cleanup() {
+        [ -n "${KVM_PID}" ] && kill -9 ${KVM_PID};
+        [ -n "${TAIL_PID}" ] && kill -9 ${TAIL_PID};
+        rm -rf "${work_d}";
+}
+fail() { error "$@"; cleanup;  exit 1; }
+debug() { error "$(date -R):" "$@"; }
+sysfail() { fail "Failure in commands detected; purging "; }
+
+# Make sure that we kill everything
+trap sysfail SIGINT SIGTERM
+
+[ -z "${ud}" ] && fail "Must define user-data script via --user-data"
+[ -z "${ccloud}" ] && fail "Must define cloud-config script via --cloud-config"
+
+debug "Creating Cloud-Init configuration..."
+write-mime-multipart -o "${work_d}/user-data.txt" "${ccloud}" "${ud}" ||
+    fail "Unable to create user-data"
+
+echo "instance-id: $(uuidgen)" > "${work_d}/meta-data"
+echo "local-hostname: builder" >> "${work_d}/meta-data"
+
+debug "Creating Seed for Cloud-Init..."
+"${0%/*}/make-seed.sh" "${work_d}/seed.img" "${work_d}/user-data.txt" "${work_d}/meta-data" ||
+    fail "Failed to create Configruation ISO"
+
+# Place the image in place
+debug "Build image location is ${img_loc}"
+if [[ "${img_loc}" =~ "http" ]]; then
+    debug "Fetching cloud image from ${img_loc}"
+    curl -s -o "${work_d}/img-${builder_id}" "${img_loc}" ||
+        fail "Unable to fetch pristine image from '${img_loc}'"
+else
+    cp "${img_loc}" "${work_d}/img-${builder_id}" ||
+        fail "Unable to copy '${img_loc}'"
+fi
+
+debug "Adding ${size_gb}G to image size"
+qemu-img resize "${work_d}/img-${builder_id}" +"${size_gb}G" ||
+    fail "Unable to resize image to ${size_gb}G"
+
+if [ -n "${raw_disk}" -a ! -e "${raw_disk}" ]; then
+    if [ -n "${raw_size}" ]; then
+        dd if=/dev/zero of=${raw_disk} bs=1k count=1 seek=$((${raw_size} * 1024000)) &&
+            debug "Create new raw disk" ||
+            fail "Unable to create raw disk"
+    else
+        fail "Undefined raw disk size"
+    fi
+else
+    debug "Using existing raw disk."
+fi
+
+
+debug "________________________________________________"
+debug "Launching instance..."
+kvm_cmd=(
+   ${QEMU_COMMAND:-kvm}
+   -name ${uuid}
+   -drive file=${work_d}/img-${builder_id},if=virtio,bus=0,cache=unsafe,unit=0
+   -drive file=${raw_disk},if=virtio,format=raw,bus=0,unit=1
+   -drive file=${work_d}/seed.img,if=virtio,media=cdrom,bus=0,cache=unsafe,unit=2
+   -net nic,model=virtio
+   -net user
+   -no-reboot
+   -display none
+   -daemonize
+   -serial file:${work_d}/console.log
+   -pidfile ${kvm_pidfile}
+   )
+kvm_cmd+=(${QEMU_ARGS[@]})
+
+# Arch independant stuff
+if [[ "$(uname -p)" =~ "ppc64" ]]; then
+    # Use more memory for building on PPC64
+    kvm_cmd+=(-m 4G)
+else
+    kvm_cmd+=(-smp ${smp_opt} -m ${mem})
+fi
+
+# Allow for kernel and append
+[ -n "${QEMU_KERNEL}" ] &&
+     kvm_cmd+=(-kernel ${QEMU_KERNEL}
+               -append "earlyprintk root=/dev/vda1 console=hvc0"
+              )
+
+unit_c=3
+for disk in ${extra_disk}
+do
+    if [[ $(file ${disk}) =~ (disk|qcow|QCOW|vmdk|VMDK|vdi|VDI) ]]; then
+        debug "Adding extra disk $disk to KVM configuration"
+        kvm_cmd+=(-drive file=${extra_disk},if=virtio,bus=1,unit=${unit_c})
+    else
+        debug "Adding extra disk as a raw formated disk"
+        kvm_cmd+=(-drive file=${extra_disk},if=virtio,format=raw,bus=1,unit=${unit_c})
+    fi
+    unit_c=$((unit_c+1))
+done
+
+debug "KVM command is: ${kvm_cmd[@]}"
+"${kvm_cmd[@]}" ||
+    fail "Failed to launch KVM image\n${kvm_out}"
+
+read KVM_PID < ${kvm_pidfile}
+debug "KVM PID is: ${KVM_PID}"
+
+tail -f "${work_d}/console.log" &
+TAIL_PID=$!
+
+while $(ps ${KVM_PID} > /dev/null 2>&1)
+do
+    sleep 10
+done
+
+debug "________________________________________________"
+debug "KVM PID has ended. Work is done"
+kill -15 ${TAIL_PID}
+
+unset KVM_PID
+unset TAIL_PID
+
+[ -n "${raw_disk}" ] &&
+    debug "Extracting raw tarball" &&
+    { tar xvvf "${raw_disk}" || /bin/true; }
+
+[ ! -e success ] &&
+    fail "Tarball contents reported failure"
+
+cp "${work_d}/console.log" .
+
+# Wait for Cloud-Init to finish any work
+debug "Cleaning up..."
+cleanup
+exit 0

=== added file 'maas_config.sh'
--- maas_config.sh	1970-01-01 00:00:00 +0000
+++ maas_config.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,75 @@
+#!/bin/bash
+short_opts="h"
+long_opts="distro:,stream:,maas-branch:,out:,template:,serial:,local:,base-name:,out_d:"
+getopt_out=$(getopt --name "${0##*/}" \
+    --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+    eval set -- "${getopt_out}" || { echo "BAD INVOCATION!"; usage; exit 1; }
+
+usage() {
+    cat <<EOM
+${0##/} - Populated values in build temple.
+
+    Required:
+    --distro        Distro code name, i.e. precise
+    --template      Template file
+    --stream        Stream, i.e. daily, release
+    --base-name     The name of the file to work on
+    --serial        The build serial
+    --out           The output file
+    --out_d         Where to stuff the output files
+
+    Optional:
+    --maas-branch   bzr branch for maas image code
+EOM
+}
+
+
+fail() { echo "${@}" 2>&1; exit 1;}
+
+serial="${serial:-$(date +%Y%m%d)}"
+maas_branch="${maas_branch:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}";
+template_f="${PWD}/img-maas.tmpl"
+
+while [ $# -ne 0 ]; do
+  cur=${1}; next=${2};
+  case "$cur" in
+    --distro)                   distro=$2;  shift;;
+    --stream)                   stream=$2;  shift;;
+    --local)                    local_d=$2; shift;;
+    --maas-branch)              maas_branch=$2; shift;;
+    --base-name)                base_name=$2; shift;;
+    --template)                 template_f=$2; shift;;
+    --out)                      out_f=$2; shift;;
+    --out_d)                    out_d=$2; shift;;
+    --) shift; break;;
+  esac
+  shift;
+done
+
+fail_usage() { fail "Must define $@"; }
+
+[ -z "${distro}" ] && fail_usage "--distro"
+[ -z "${stream}" ] && fail_usage "--stream"
+[ -z "${local_d}" ] && fail_usage "--local"
+[ -z "${out_f}" ] && fail_usage "--out"
+[ -z "${out_d}" ] && fail_usage "--out_d"
+[ -z "${base_name}" ] && fail_usage "--base-name"
+
+case "$distro" in
+   trusty) arches="${ARCH_TYPE:-i386 amd64 armhf}";
+           [[ "$(uname -m)" =~ ppc64 ]] && arches="ppc64el";;
+   *) arches="${ARCH_TYPE:-i386 amd64 armhf}";;
+esac
+
+sed -e "s,%d,${distro},g" \
+    -e "s,%S,${stream},g" \
+    -e "s,%M,${maas_branch},g" \
+    -e "s,%D,${local_d},g" \
+    -e "s,%B,${base_name},g" \
+    -e "s,%s,${serial},g" \
+    -e "s,%O,${out_d},g" \
+    -e "s,%A,${arches},g" \
+    ${template_f} > ${out_f} ||
+        fail "Unable to write template file"
+
+exit 0

=== added file 'make-seed.sh'
--- make-seed.sh	1970-01-01 00:00:00 +0000
+++ make-seed.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,147 @@
+#!/bin/bash
+
+VERBOSITY=0
+TEMP_D=""
+DEF_DISK_FORMAT="raw"
+DEF_FILESYSTEM="iso9660"
+
+error() { echo "$@" 1>&2; }
+errorp() { printf "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+failp() { [ $# -eq 0 ] || errorp "$@"; exit 1; }
+
+Usage() {
+    cat <<EOF
+Usage: ${0##*/} [ options ] output user-data [meta-data]
+
+   Create a disk for cloud-init to utilize nocloud
+
+   options:
+     -h | --help            show usage
+     -d | --disk-format D   disk format to output. default: raw
+     -f | --filesystem  F   filesystem format (vfat or iso), default: iso9660
+
+     -i | --interfaces  F   write network interfaces file into metadata
+     -m | --dsmode      M   add 'dsmode' ('local' or 'net') to the metadata
+                            default in cloud-init is 'net', meaning network is
+                            required.
+
+   Example:
+    * cat my-user-data
+      #cloud-config
+      password: passw0rd
+      chpasswd: { expire: False }
+      ssh_pwauth: True
+    * echo "instance-id: \$(uuidgen || echo i-abcdefg)" > my-meta-data
+    * ${0##*/} my-seed.img my-user-data my-meta-data
+EOF
+}
+
+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
+cleanup() {
+    [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
+}
+
+debug() {
+    local level=${1}; shift;
+    [ "${level}" -gt "${VERBOSITY}" ] && return
+    error "${@}"
+}
+
+short_opts="hi:d:f:m:o:v"
+long_opts="disk-format:,dsmode:,filesystem:,help,interfaces:,output:,verbose"
+getopt_out=$(getopt --name "${0##*/}" \
+    --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+    eval set -- "${getopt_out}" ||
+    bad_Usage
+
+## <<insert default variables here>>
+output=""
+userdata=""
+metadata=""
+filesystem=$DEF_FILESYSTEM
+diskformat=$DEF_DISK_FORMAT
+interfaces=_unset
+dsmode=""
+
+
+while [ $# -ne 0 ]; do
+    cur=${1}; next=${2};
+    case "$cur" in
+        -h|--help) Usage ; exit 0;;
+        -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
+        -d|--disk-format) diskformat=$next; shift;;
+        -f|--filesystem) filesystem=$next; shift;;
+        -m|--dsmode) dsmode=$next; shift;;
+        -i|--interfaces) interfaces=$next; shift;;
+        --) shift; break;;
+    esac
+    shift;
+done
+
+## check arguments here
+## how many args do you expect?
+[ $# -ge 1 ] || bad_Usage "must provide output, userdata"
+[ $# -le 3 ] || bad_Usage "confused by additional args"
+
+output=$1
+userdata=$2
+metadata=$3
+
+[ -n "$metadata" -a "${interfaces}" != "_unset" ] &&
+    fail "metadata and --interfaces are incompatible"
+[ -n "$metadata" -a -n "$dsmode" ] &&
+    fail "metadata and dsmode are incompatible"
+[ "$interfaces" = "_unset" -o -r "$interfaces" ] ||
+    fail "$interfaces: not a readable file"
+
+TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
+    fail "failed to make tempdir"
+trap cleanup EXIT
+
+if [ -n "$metadata" ]; then
+    cp "$metadata" "$TEMP_D/meta-data" || fail "$metadata: failed to copy"
+else
+    {
+    echo "instance-id: iid-local01"
+    [ -n "$dsmode" ] && echo "dsmode: $dsmode"
+    [ -n "$interfaces" ] && echo "interfaces: |" &&
+        sed 's,^,  ,' "$interfaces"
+    } > "$TEMP_D/meta-data"
+fi
+
+if [ "$userdata" = "-" ]; then
+    cat > "$TEMP_D/user-data" || fail "failed to read from stdin"
+else
+    cp "$userdata" "$TEMP_D/user-data" || fail "$userdata: failed to copy"
+fi
+
+## alternatively, create a vfat filesystem with same files
+img="$TEMP_D/seed.img"
+truncate --size 100K "$img" || fail "failed truncate image"
+
+case "$filesystem" in
+    iso9660|iso)
+        genisoimage  -output "$img" -volid cidata \
+            -joliet -rock "$TEMP_D/user-data" "$TEMP_D/meta-data" \
+            > "$TEMP_D/err" 2>&1 ||
+            { cat "$TEMP_D/err" 1>&2; fail "failed to genisoimage"; }
+        ;;
+    vfat)
+        mkfs.vfat -n cidata "$img" || fail "failed mkfs.vfat"
+        mcopy -oi "$img" "$TEMP_D/user-data" "$TEMP_D/meta-data" :: ||
+            fail "failed to copy user-data, meta-data to img"
+        ;;
+    *) fail "unknown filesystem $filesystem";;
+esac
+
+[ "$output" = "-" ] && output="$TEMP_D/final"
+qemu-img convert -f raw -O "$diskformat" "$img" "$output" ||
+    fail "failed to convert to disk format $diskformat"
+
+[ "$output" != "$TEMP_D/final" ] || { cat "$output" && output="-"; } ||
+    fail "failed to write to -"
+
+error "wrote ${output} with filesystem=$filesystem and diskformat=$diskformat"
+# vi: ts=4 noexpandtab
+

=== added file 'rss-cleanup.sh'
--- rss-cleanup.sh	1970-01-01 00:00:00 +0000
+++ rss-cleanup.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+# Re-generate the RSS feeds after a cleanup operation
+# on the daily images
+
+PATH="$(dirname $0):${PATH}"
+rssexec="$(which rss-generate.sh)"
+suites=(lucid natty oneiric precise quantal)
+
+for suite in ${suites[@]}
+do
+    for release in release daily
+    do
+        ${rssexec} ${suite} ${release}
+    done
+done

=== added file 'rss-generate.sh'
--- rss-generate.sh	1970-01-01 00:00:00 +0000
+++ rss-generate.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+fail() { echo "${@}"; exit 1;}
+
+rss_head() {
+cat <<EOF
+<rss version="2.0">
+<channel>
+<title></title>
+<link>http://cloud-images.ubuntu.com</link>
+<description>Ubuntu Cloud Images Feed for ${1}</description>
+<lastBuildDate>$(date | sed 's| |, |')</lastBuildDate>
+EOF
+}
+
+rss_item() {
+    ele_date=$(stat /srv/ec2-images/${2} | awk '/Modify/ {print$2" "$3}')
+    rss_date=$(date -d "${ele_date}" | sed 's| |, |')
+    unpacked_d="/srv/ec2-images/${2}/unpacked"
+    pub_d="/srv/ec2-images/${2}"
+
+    [ -e "/srv/ec2-images/${2}/HEADER.html" ] && { 
+        cat <<EOF
+<item>
+    <title>${1}</title>
+    <link>http://cloud-images.ubuntu.com/${2}</link>
+    <guid>http://cloud-images.ubuntu.com/${2}</guid>
+    <pubDate>${rss_date}</pubDate>
+EOF
+    } || return
+
+    [ -e "${pub_d}/published-ec2-daily.txt" ] &&
+        published="${pub_d}/published-ec2-daily.txt"
+
+    [ -e "${pub_d}/published-ec2-release.txt" ] &&
+        published="${pub_d}/published-ec2-release.txt"
+
+    amis=$(cat ${published} | grep -v kernel | awk '{print"<br>"$1"\t"$2"\t"$3"\t"$4}')
+
+    changelog=$(find ${unpacked_d}  -iname '${SUITE}-release*-to-daily.changelog' 2> /dev/null)
+
+    [ -n "${changelog}" ] &&
+        changelog_text=$(cat ${changelog} | sed ':a;N;$!ba;s|\n|<br>|g')
+
+    cat <<EOF
+    <description><![CDATA[<p>${1} Build
+EOF
+
+    [ -n "${amis}" ] &&
+    cat <<EOF
+<p>EC2 Publication Information:
+${amis}
+EOF
+
+    [ -n "${changelog_text}" ]  &&
+    cat <<EOF
+
+<p>Changelog from previous released image:
+${changelog_text}
+EOF
+
+    cat <<EOF
+    ]]></description>
+</item>
+EOF
+}
+
+rss_footer() {
+cat <<EOF
+</channel>
+</rss>
+EOF
+}
+PATH="$(dirname $0):${PATH}"
+SUITE="${SUITE:-$1}"
+MILESTONE="${MILESTONE:-$2}"
+
+srv_d="${SUITE}"
+adj2version=$(which ubuntu-adj2version)
+version=$(${adj2version} ${SUITE})
+
+[ "${MILESTONE}" != "daily" ] && srv_d="releases/${SUITE}"
+
+elements=()
+
+[ "${MILESTONE}" == "daily" ] && {
+    elements=($(find /srv/ec2-images/${srv_d} -maxdepth 1 -type d | awk -F/ '{print$5}' | grep . | sort)) ||
+    fail "Unable to determine serials for inclusion"
+    } || {
+    elements=($(find /srv/ec2-images/${srv_d} -maxdepth 1 -type d | awk -F/ '{print$6}' | grep . | sort)) ||
+    fail "Unable to determine serials for inclusion"
+    }
+
+[ "${#elements[@]}" -ge 1 ] ||
+    fail "Aborting! No directory contents available!"
+
+{
+    rss_head "${version} (${SUITE}) ${MILESTONE}"
+    for item in ${elements[@]}; do
+        rss_item "${item}" "${srv_d}/${item}"
+    done
+    rss_footer
+} > /srv/ec2-images/rss/${SUITE}-${MILESTONE}.xml

=== added file 'standalone.sh'
--- standalone.sh	1970-01-01 00:00:00 +0000
+++ standalone.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,274 @@
+#!/bin/bash
+#
+# Copyright 2014, Ben Howard <ben.howard@xxxxxxxxxxxxx>
+# Copyright 2014, Canonical Group, Ltd
+#
+# This is a runner script emulating the Ubuntu Cloud Image
+# derivative builds.
+nbd_dev=""
+mnt_dev=""
+
+clean_nbd() {
+    [ -n "${mnt_dev}" ] && {
+        sudo umount -f "${mnt_dev}" && unset mnt_dev &&
+        debug "Unmounted ${mnt_dev}" ||
+        error "BAD! ${mnt_dev} may still be mounted!";
+    }
+    sleep 3
+    [ -n "${nbd_dev}" ] && {
+        sudo qemu-nbd -d "${nbd_dev}" &&
+        unset nbd_dev &&
+        debug "Removed device ${nbd_dev}" ||
+        error "BAD! ${nbd_dev} may still exist!";
+    }
+}
+clean() { [ -d ${tmp_dir} ] && rm -rf ${tmp_dir};
+          clean_nbd; exit "${@}"; }
+error() { echo "$@"; }
+fail() { debug "${1:-Something bad happend}"; clean 1; }
+debug() { error "$(date -R):" "$@"; }
+
+my_name=$(readlink -f ${0})
+my_dir=$(dirname ${my_name})
+
+usage() {
+cat << EOF
+This program is used to create derivative Ubuntu Cloud Images outside the
+Canonical build environement.
+
+This program must run on Ubuntu 12.04 or 14.04.
+
+    --suite:     the Ubuntu Code name to build against
+    --use_img:   use a local file instead of fetching a new image
+    --install:   install dependencies of this builder
+    --streams:   Simple streams URL to use (optional)
+    --template:  template to use, defaults to default.tmpl
+    --addin:     the name of the addin to the template file to
+                 use.
+    --arch:      arch to build, i.e. i386 or amd64, defaults to system
+    --reuse:     Reuse checkouts, etc. from <DIR>
+    --nocheckout: Don't checkout the latest bzr branch
+    --resize:    Resize root disk to xGB
+    --config:    Read values from configuration file
+    --final_img: Name of final image
+    --cloud_cfg: File of Cloud-init #cloud-config file
+    --fetch_new: Fetch new image if missing
+    --resize_final: Resize final image to xGB
+EOF
+exit 1
+}
+
+host_suite=$(lsb_release --codename --short)
+install_deps() {
+    local pkgs=(bzr qemu-kvm simplestreams)
+    [ "${host_suite}" == "precise" ] && {
+        sudo add-apt-repository ppa:ubuntu-cloud-archive/cloud-tools-next ||
+            fail "Failed to add require repo!"
+    }
+    debug "Installing dependencies"
+    sudo apt-get update
+    sudo apt-get -y install ${pkg[@]}
+}
+
+short_opts="h"
+long_opts="suite:,use_img:,install,streams:,template:,addin:,arch:,resize:,config:,final_img:,cloud_cfg:,fetch_new,resize_final:"
+getopt_out=$(getopt --name "${0##*/}" \
+    --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+    eval set -- "${getopt_out}" ||
+    usage
+
+suite="${suite:-trusty}"
+build_arch="${build_arch:-$(dpkg --print-architecture)}"
+use_img="${use_img}"
+main_template="${main_template:-templates/default.tmpl}"
+addin_template=""
+install="${install:-0}"
+streams="${streams:-https://cloud-images.ubuntu.com/releases/streams/v1/com.ubuntu.cloud:released:download.json}";
+resize="${resize:-0}"
+resize_final=""
+config=""
+cloud_cfg=""
+fetch_new=0
+
+while [ $# -ne 0 ]; do
+    cur=${1}; next=${2};
+
+    case "${cur}" in
+        --suite)        suite="${2}"; shift;;
+        --use_img)      use_img="${2}"; shift;;
+        --install)      install_deps; shift;;
+        --streams)      streams="${2}"; shift;;
+        --template)     main_template="${2}"; shift;;
+        --addin)        addin_template="${2}"; shift;;
+        --arch)         build_arch="${2}"; shift;;
+        --resize)       resize="${2}"; shift;;
+        --config)       config="${2}"; shift;;
+        --cloud_cfg)    cloud_cfg="${2}"; shift;;
+        --final_img)    final_img="${2}"; shift;;
+        --fetch_new)    fetch_new=1; shift;;
+        --resize_final)   resize_final="${2}"; shift;;
+        -h|--help)      usage; exit 0;;
+        --) shift; break;;
+    esac
+    shift;
+done
+
+# Fly with the safety on!
+trap fail EXIT
+trap fail SIGINT
+
+# Readin the config if its there
+[ -n "${config}" -a -e "${config}" ] &&
+    . "${config}" &&
+    debug "Read in configuration from ${config}"
+
+[ -e "${cloud_cfg}" ] ||
+    fail "Unable to find cloud_cfg '${cloud_cfg}'"
+
+[ -e "${main_template}" ] ||
+    fail "Unable to find ${main_template}. You may need pass --template"
+main_template=$(readlink -f ${main_template})
+[ -n "${addin_template}" ] && addin_template=$(readlink -f ${addin_template})
+
+# Create a temporary directory for the fun
+tmp_dir=$(mktemp -d builder.XXXXX --tmpdir=${TMPDIR:-/tmp});
+
+# Fetch the file
+[ -z "${use_img}" -a -z "${suite}" ] &&
+    fail "Must define --use_img or --suite"
+
+[ ! -e "${use_img}" -a "${fetch_new:-0}" -eq 0 ] &&
+    fail "Image ${use_img} does not exist!"
+
+if [ ! -e "${use_img}" ]; then
+    debug "Getting location of the lastest image"
+    debug "   Looking for ${suite} / ${build_arch}"
+    stream_out=$(sstream-query ${streams} \
+                    release=${suite} arch=${build_arch} \
+                    item_name=disk1.img \
+                    --max=1 --output-format="%(item_url)s,%(sha256)s")
+    img_url=${stream_out%%,*}
+    sha256=${stream_out##*,}
+    use_img="${use_img:-$tmp_dir/$suite-$(date +%Y%m%d-%s).img}"
+    debug "Fetching image from ${img_url}"
+    debug "   Local file is ${use_img}"
+    wget -O "${use_img}" "${img_url}" &&
+         debug "Fetched imaged" ||
+         fail "Failed to fetch the image!"
+elif [ -e "${use_img}" ]; then
+    debug "Using image ${use_img}"
+fi
+
+# Set the default cloud_cfg if not set
+cloud_cfg=${cloud_cfg:-$(readlink -f config/cloud-$suite.cfg)}
+
+debug "Modifying upstream Cloud Image"
+
+# Put the final bits in place
+template_f="${tmp_dir}/new_template.txt"
+raw_f="${tmp_dir}/raw_f-$(date +%s).img"
+final_img="${final_img:-$PWD/final-$(date +%s).img}"
+attach_img="${tmp_dir}/$(uuidgen)-attach.img"
+
+# Just some information about the final image
+cp "${use_img}" "${attach_img}" ||
+    fail "failed to copy image to ${attach_img}"
+
+[ "${resize_final:-0}" -ne 0 ] && {
+    qemu-img resize "${attach_img}" "${resize_final}G" &&
+        debug "resized final disk to ${resize_final}G" ||
+        fail  "failed to resize final disk";
+}
+
+debug "Using ${attach_img} for working image"
+debug "Using ${final_img} as the final disk"
+
+# Create the template.
+cur_dir=${PWD}
+[ -n "${addin_template}" ] && {
+  cd ${tmp_dir};
+  awk '/ADDIN_HERE/{n++}{print >"template" n ".txt" }' ${main_template} ||
+        fail "failed to split template!";
+  cat template.txt ${addin_template} template1.txt > ${template_f};
+  sed -e "s,ADDIN_HERE,# END Addins,g" -i  ${template_f} ||
+        fail "Unable to finalize template!"
+  cd ${cur_dir}
+} || template_f="${main_template}"
+
+# We need to be safe with the image, so we will modify it
+debug "Modifying the builder image to make it work"
+debug "This requires root access"
+
+# We need nbd0
+(lsmod | grep nbd >> /dev/null) || {
+    debug "Adding kernel module nbd (cmd: sudo modprobe nbd)";
+    sudo modprobe nbd ||
+        fail "Failed to add nbd kernel module";
+}
+
+# Create a mod disk for working with
+mod_img="${tmp_dir}/mod_img.qcow2"
+cp "${use_img}" "${mod_img}" || fail "unable to copy ${use_img} to ${mod_img}"
+debug "Mounting builder image (cmd: sudo qemu-nbd -C /dev/nbd10 ${mod_img}"
+sudo qemu-nbd -c /dev/nbd10 ${mod_img} && {
+        nbd_dev="/dev/nbd10" && debug "Added nbd block /dev/nbd10"; } ||
+        fail "failed to setup nbd device!"
+
+# Now mount it
+[ -b "${nbd_dev}p1" ] && mnt_dev="${nbd_dev}p1" || mnt_dev="${nbd_dev}"
+
+# Give the filesystem a new label, because cloudimg-rootfs won't work
+# when we have two file systems with that
+new_label="bld-$(openssl rand -hex 3)"
+debug "Changing root label to ${new_label}"
+sudo tune2fs -L ${new_label} ${mnt_dev}
+
+nbd_mnt="${tmp_dir}/mnt"
+[ -d "${nbd_mnt}" ] || {
+    mkdir -p  "${nbd_mnt}" ||
+    fail "mount point for nbd device does not exist";
+}
+debug "Mounting ${mnt_dev} to ${nbd_mnt} (cmd: mount ${mnt_dev} ${nbd_mnt})"
+sudo mount ${mnt_dev} ${nbd_mnt} &&
+    debug "Mounted ${mnt_dev} to ${mnt_dev}" ||
+    fail  "Failed to mount ${mnt_dev}"
+
+# Now change some bits...
+debug "Changing label of builder root fs"
+sudo sed -i "s,cloudimg-rootfs,${new_label},g" \
+            "${nbd_mnt}/etc/fstab" \
+            "${nbd_mnt}/boot/grub/grub.cfg" \
+            "${nbd_mnt}/boot/grub/menu.lst" &&
+            debug "Modified image successfully" ||
+            fail  "Failed to modify image"
+
+# And unmount the stuff
+clean_nbd
+sleep 3
+
+# This is the command that will actually do the build
+mod_cmd=(
+    "${my_dir}/launch_kvm.sh"
+    "--user-data ${template_f}"
+    "--cloud-config ${cloud_cfg}"
+    "--disk-gb 5" "--mem 1024"
+    "--extra-disk $(readlink -f $attach_img)"
+    "--raw-disk $(readlink -f $raw_f)"
+    "--raw-size ${raw_size:-20}"
+    "--img-url $(readlink -f $mod_img)"
+)
+
+# Run the command
+debug "Command will be ${mod_cmd[@]}"
+${mod_cmd[@]} &&
+    debug "Finished with building image" ||
+    fail  "KVM instance indicates failure!"
+
+debug "Compressing ${final_img}"
+qemu-img convert -O qcow2 -c ${attach_img} ${final_img} ||
+    fail "failed to compress ${final_img}"
+
+# Clear the trap
+trap - EXIT SIGINT
+trap
+exit 0

=== added directory 'templates'
=== added file 'templates/default.tmpl'
--- templates/default.tmpl	1970-01-01 00:00:00 +0000
+++ templates/default.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,283 @@
+#!/bin/bash
+# vi: ts=4 noexpandtab
+
+## Copyright (C) 2013-2014 Ben Howard <ben.howard@xxxxxxxxxxxxx>
+## Copyright (C) 2012-2104 Canonical Group, Ltd <www.canonical.com>
+## Date: 23 March 2012
+
+## Diskss:
+### /dev/vdc is a CDROM for cloud-init
+### /dev/vdb is a raw device for outputting files via tar
+### /dev/vdd is the device that you want to operate on
+
+## This template is designed as a base for all templates moving
+## forward. Each cloud should enabled as addin.
+
+exec > >(tee /tmp/build.log) 2>&1
+
+CLOUD_IMG_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
+
+log_gather() {
+	new_logs=()
+	mkdir /tmp/logs
+	for f in "${logs[@]}"; do
+		[ -e ${f} ] && cp -au ${f} /tmp/logs && new_logs+=(${f})
+	done
+	[ "${1}" -eq 0 ] && touch /tmp/logs/success && new_logs+=(success)
+	tar -C /tmp/logs -cvf /dev/vdb ${new_logs[@]}
+	sync
+}
+
+end() { log_gather ${1:-0}; sh -c 'sleep 15 && /sbin/poweroff' & exit ${1:-0}; }
+error() { echo "$@"; }
+fail() { debug $1; cp build.log builder.log; end 1; }
+debug() { error "$(date -R):" "$@"; }
+
+# files to backup from the image and restore when done with chroot
+file_list=("usr/sbin/policy-rc.d"  "etc/mtab")
+del_list=()
+
+# This is used to cleanly unmount stuff
+mounts=()
+
+logs=(packages.manifest builder.log /var/log)
+image_dir="$(mktemp -d /tmp/builder.XXXX)"
+mp="$(mktemp -d /tmp/builder.XXXX)"
+builder_files="$(mktemp -d /tmp/builder.XXXX)"
+
+# Detect whether /dev/vdd is partitioned or not
+vdd="/dev/vdd"
+partprobe ${vdd}
+lsblk ${vdd}
+if [ -b "${vdd}1" ]; then
+	vdd="${vdd}1"
+	blkid "${vdd}"
+fi
+debug "Using ${vdd} as root device"
+
+# Get the device UUID
+dev_uuid="$(blkid ${vdd} -o udev | awk '-F=' '/ID_FS_UUID=/ {print$2}')" &&
+   debug "Filesystem UUID ${loop_fs_uuid}" ||
+   fail  "Unable to get UUID for file system"
+
+operation_files() {
+	# This setups the target filesystem in way that prevents services from
+	# running. As long as you are running installing LSB compliant scripts,
+	# it will work.
+
+	for i in $(seq 0 $((${#file_list[@]} - 1)))
+	do
+		f="${file_list[i]}"
+		mkdir -p "${image_dir}/orig/$(dirname ${f})" 2> /dev/null ||
+			fail "Unable to create backup directory of ${image_dir}/$(dirname ${f})"
+
+		cp -au "${mp}/${f}" "${image_dir}/orig/${f}" 2> /dev/null || {
+			error "${f} does not exist in image";
+			del_list=( "${del_list[@]}" "${f}" );
+			unset file_list[${i}];
+		}
+	done
+
+	cp /etc/mtab "${mp}/etc/mtab" &&
+		debug "Copied fake mtab over" ||
+		fail  "Unable to copy mtab in place"
+
+	cat > "${mp}/usr/sbin/policy-rc.d" << EOF
+#!/bin/sh
+echo "All runlevel operations denied by policy" >&2
+exit 101
+EOF
+	chmod 0755 "${mp}/usr/sbin/policy-rc.d" ||
+		fail "Unable to make policy-rc.d executable"
+
+	rm "${mp}/etc/resolv.conf" ||
+        fail "Unable to remove /etc/resolv.conf"
+
+    cp /etc/resolv.conf "${mp}/etc/resolv.conf" &&
+		debug "Placed resolv.conf in image" ||
+		fail  "Failed to place resolv.conf in image"
+
+    debug "Placed policy-rc.d to prevent init(.d) operations"
+}
+
+restore_files() {
+	# Restore files set aside during installation.
+
+	for f in "${file_list[@]}"
+	do
+		cp -au "${image_dir}/orig/${f}" "${mp}/${f}" &&
+			debug "Restored ${f}" ||
+			error "Unable to restore ${f}"
+	done
+
+	for f in "${del_list[@]}"
+	do
+		rm -rf "${mp}/${f}" &&
+			debug "Removed ${f}" ||
+			error "Unable to remove ${f}"
+	done
+
+	rm -rf "${image_dir}/orig" || true
+
+	rm "${mp}/etc/resolv.conf" ||
+		fail "Failed to remove /etc/resolv.conf"
+
+	chroot "${mp}" ln -snf ../run/resolvconf/resolv.conf /etc/resolv.conf
+}
+
+build_chroot() {
+	# This builds the chroot by:
+	#	1. Resizing the root file system
+	#	2. Mounting up the chroot
+	#	3. Mounting up devpts, sysfs and proc
+
+	if [ "${vdd}" == "/dev/vdd1" ]; then
+		growpart /dev/vdd 1 &&
+			debug "Expanded ${vdd}" ||
+			fail  "Failed to grow ${vdd}"
+	fi
+
+	e2fsck -f -y ${vdd} ||
+		fail "failed to check file system"
+
+	resize2fs ${vdd} ||
+		fail "failed to resize the root filesystem"
+
+	mount "${vdd}" "${mp}" &&
+		mounts+=("${mp}")
+		debug "Mounted root file system" ||
+		fail  "Unable to mount root file system"
+
+	mount --bind /dev "${mp}/dev" &&
+		mounts+=("${mp}/dev") &&
+		debug "Mounted bound dev under ${mp}" ||
+		fail  "Unable to mount dev under ${mp}"
+
+	mount devpts-live -t devpts "${mp}/dev/pts" &&
+		mounts+=("${mp}/dev/pts") &&
+		debug "Mounted devpts-live under ${mp}" ||
+		fail  "Error mounting devpts under ${mp}"
+
+	mount sysfs-live -t sysfs "${mp}/sys" &&
+		mounts+=("${mp}/sys") &&
+		debug "Mounted sysfs-live under ${mp}" ||
+		fail  "Failed to mount sysfs-live under ${mp}"
+
+	mount proc-live -t proc "${mp}/proc" &&
+		mounts+=("${mp}/proc") &&
+		debug "Mounted proc-live under ${mp}" ||
+		fail  "Failed to mount proc under ${mp}"
+
+	df -h "${mp}"
+	operation_files
+
+}
+
+tear_chroot() {
+	# Tear down the chroot
+
+	restore_files
+	sync
+	sleep 5
+
+	for	(( idx=${#mounts[@]}-1 ; idx>=0 ; idx-- )) ; do
+		ump=${mounts[idx]}
+		umount -f "${ump}" &&
+			debug "Unmounted ${ump}" ||
+			fail  "Unable to dismount ${ump}"
+	done
+
+	# This is important...it makes things smaller!
+	[ "${ZEROFREE:-1}" -eq 1 ]  && zerofree -v ${vdd}
+}
+
+
+xchroot() {
+	# This is a chroot helper to break the build if the chrooted command
+	# fails. Further, it defines the local as being "C" to prevent errors.
+	# WARNING: if your script requires a specific local, this may not work.
+
+	# This will conform to the bitness of the guest. Meaning that if your guest
+	# is on Ubuntu i386, then the build will use 'linux32' to be safe
+	_chroot=$(which chroot)
+    [ "${bitness}" == "32" ] && _chroot="linux32 ${_chroot}"
+    debug "Running chroot: ${@}"
+    ( DEBIAN_FRONTEND=noninteractive LANG=C LC_ALL=C ${_chroot} "${mp}" "${@}" ) &&
+        debug "   Command Successfull" ||
+        { debug "   Command FAILED!"; sleep 60; fail "chrooted-command failed!"; }
+}
+
+fake_cloud_init() {
+	# If the cloud does not provide a meta-data service this should be run
+	# This will setup a nocloud datasource.
+
+	seed_d="${mp}/var/lib/cloud/seed/nocloud-net"
+	mkdir -p "${seed_d}"
+
+    cat << EOF > "${seed_d}/meta-data"
+instance_id: builder-cloud_img-%s-${dev_uuid}
+EOF
+
+	debug "Cloud image instance ID set to cloud_img-%s-${dev_uuid}"
+    touch "${seed_d}/user-data"
+}
+
+builder_install() {
+	apt-get -y update
+    apt-get -y install "${@}"
+    apt-get -y autoclean
+    apt-get -y clean
+}
+
+xchroot_install() {
+	xchroot apt-get -y update
+    xchroot apt-get -y install "${@}"
+    xchroot apt-get -y autoclean
+    xchroot apt-get -y clean
+}
+
+xchroot_cloudinit_cfg() {
+	# Reconfigure cloud-init
+	debug "Reconfiguring Cloud-init for DS ${@}"
+	printf "%s\t%s\t%s\t${@// /,}\n" \
+			cloud-init cloud-init/datasources multiselect | xchroot debconf-set-selections
+
+	cat <<EOF > "${mp}/etc/cloud/cloud.cfg.d/90_dpkg.cfg"
+${CLOUD_IMG_STR}
+# to update this file, run dpkg-reconfigure cloud-init
+datasource_list: [ ${@// /, } ]
+EOF
+
+}
+
+build_chroot
+suite=$(chroot ${mp} lsb_release -c -s)
+debug "Suite is currently ${suite}"
+
+# dist_ge(dist1,dist2)
+# return true if dist1 is newer or the same as dist2. This can be used to
+# make decision based on the version of Ubuntu. i.e dist_ge "${suite}" "precise"
+dist_ge() { [[ "${suite}" > "$1" || "${suite}" == "$1" ]]; }
+
+# dist_le(dist1,dist2)
+# return true if dist2 is older than or the same as dist.
+dist_le() { [[ "${suite}" < "$1" || "${suite}" == "$1" ]]; }
+
+builder_install zerofree
+
+ADDIN_HERE
+
+# Get the package manifest
+chroot ${mp} dpkg-query -W | column -t > "/tmp/packages.manifest"
+
+
+# Cleanup
+if [ "${TEAR_CHROOT:-1}" -eq 1 ]; then
+	debug "Tearing stuff down";
+	tear_chroot
+else
+	debug "Not tearing down chroot per user instruction"
+fi
+
+# Finish
+end 0

=== added file 'templates/example-addin.tmpl'
--- templates/example-addin.tmpl	1970-01-01 00:00:00 +0000
+++ templates/example-addin.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,139 @@
+## THIS is the actual logic of what will be done inside the image.
+##
+## The basic idea is an authoriative cloud image will be modified in
+## a chrooot. This addin file defines the logic for the things that you
+## want to modify, while leveraging common functions to do the work.
+##
+## Common functions:
+##   debug <ARGS>: Put a nice message in the logs.
+##       debug "hi! this is some text"
+##   fail <ARGS>: Fail the build with a message.
+##       fail "ugh, something bad happened"
+##   end <ARG>: End the build with optional argument
+##
+##   xchroot <ARG>: run chroot command within the target OS
+##       If the command exits with a non-zero status it will
+##       fail. Command will be run at the bitness of the target, i.e.
+##       i386 target will have command run as 32-bit.
+##           xchroot apt-get -y update
+##
+##   builder_install <args>: install package into the builder
+##   xchroot_install <argS>: install package into the target
+##
+##   By default, the chroot will be built for you. If you don't want to
+##   have that done, make sure that "BUILD_CHROOT=0" is set.
+##   To manually build the chroot, run "build_chroot"
+##
+##   By default, the chroot will be torn down for you. If you don't want
+##   that, then make sure that "TEAR_CHROOT=0" is set.
+##   To manually tear down the chroot, run "tear_chroot".
+##
+##      If you manually setup the chroot by hand, you can still use
+##      "tear_chroot". Just make sure that you add your mounts to
+##      ${mounts}.
+##
+##   The ROOT partition of the guest will be mounted to "${mp}"
+##   LOGS will be tarred up to /dev/vdb and extracted once the build
+##   is done.
+##
+##   Diskss:
+##      /dev/vdc is a CDROM for cloud-init
+##      /dev/vdb is a raw device for outputting files via tar
+##      /dev/vdd is the device that you want to operate on
+##
+##      By default, ${vdd} is set to the location of the root file system.
+##          if you are trying to do something exotic, then you may
+##          set ${vdd} to where the root filesystem is and then run
+##          build_chroot by hand.
+##
+##
+##   You can get the UUID of the root file system via variable ${dev_uuid}
+##   You can find out the Ubuntu code name via variable ${suite}
+##   Set "ZEROFREE=0" to prevent zerofree from zero'ing free space
+##   Set "TEAR_CHROOT=0" to prevent the chroot from being torn down
+##
+##   To add files to the tarball output, add them to the $LOGS array, i.e
+##          LOGS+=(/foo/bar /bar/baz)
+##
+##   Okay, enough already. Some examples:
+##
+##   To install some programs:
+##       xchroot apt-get -y udpdate
+##       xchroot apt-get -y install foo bar baz
+##       xchroot apt-get -y autoclean
+##       xchroot apt-get -y clean
+##
+##   Or shorthanded as:
+##       xchroot_install foo bar baz
+##
+##   To install a package into the builder (not the target):
+##       builder_install foo bar baz
+##
+##   To change Cloud-init default datasource:
+##       xchroot_cloudinit_cfg "CloudDrive OVF EC2"
+##       NOTE: this is the safe way to modify stuff
+##
+##   To change something on certain releases, you can use "dist_ge" or "dist_le"
+##       where the argument is the comparison code name.
+##           if dist_ge "precise"; then ... fi
+##           if dist_le "trusty"; then ... fi
+##
+##   To statically add Google DNS servers:
+##       cat << EOF > "${mp}/etc/resolvconf/resolv.conf.d/base"
+##       nameserver 8.8.8.8
+##       nameserver 8.8.4.4
+##       search google.com
+##
+## Simple example: This is a simple customization. This example does some fairly
+##   common things: installs packages based on the suite, modify the grub settings
+##                  installed the LTS hardware enablement kernel on 12.04, makes
+##                  sure a kernel module is loaded on boot and updates the
+##                  initramfs.
+##
+##    # install a package into the builder instance
+##    builder_installer qemu-utils
+##
+##    if dist_le "precise"; then
+##        xchroot apt-add-repository -y ppa:foobar/baz
+##        xchroot_install foo bar baz
+##        xchroot_install linux-image-generic-lts-trusty
+##    elif [ dist_ge "quantal" -a dist_le "trusty" ]; then
+##        xchroot_install bar baz
+##    else
+##        xchroot_install baz
+##    fi
+##
+##    # This is how you update grub
+##    cat << EOF > ${mp}/etc/default/grub.d/50-cloudimg-settings.cfg
+##    # Windows Azure specific grub settings
+##    ${CLOUD_IMG_STR}
+##    # Set the default commandline
+##    GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300"
+##    # Set the grub console type
+##    GRUB_TERMINAL=serial
+##    # Set the serial command
+##    GRUB_SERIAL_COMMAND="serial --speed=9600 --unit=0 --word=8 --parity=no --stop=1"
+##    # Set the recordfail timeout
+##    GRUB_RECORDFAIL_TIMEOUT=0
+##    # Do not wait on grub prompt
+##    GRUB_TIMEOUT=0
+##    EOF
+##    xchroot update-grub
+##
+##    cat << EOF > "${mp}/etc/modules"
+##
+##    # Cloud Vendor Awesome cloud needs this module
+##    raid456
+##    EOF
+##
+##    # Update the initramfs
+##    xchroot initramfs -k all -u
+
+
+echo "_____________________________________________"
+blkid
+lsblk
+debug "Custom stuff goes here"
+debug "Root is at: ${vdd}"
+xchroot_install bzr
+echo "_____________________________________________"

=== added file 'templates/img-azure-12.04-addin.tmpl'
--- templates/img-azure-12.04-addin.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-azure-12.04-addin.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,40 @@
+# Config for 12.04
+CLOUD_IMG_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
+debug "Modifying in-image settings for Azure"
+xchroot sed -i \
+    's,GRUB_CMDLINE_LINUX_DEFAULT=.*,GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300",g' \
+    /etc/default/grub
+xchroot sed -i \
+    's,#GRUB_TERMINAL=console,GRUB_TERMINAL=serial,g' \
+    /etc/default/grub
+echo 'GRUB_SERIAL_COMMAND="serial --speed=9600 --unit=0 --word=8 --parity=no --stop=1"' >> ${mp}/etc/default/grub
+xchroot sed -i "1i${CLOUD_IMG_STR}" /etc/default/grub
+
+cat << EOF > "${mp}/etc/cloud/cloud.cfg.d/90-azure.cfg"
+${CLOUD_IMG_STR}
+system_info:
+   package_mirrors:
+     - arches: [i386, amd64]
+       failsafe:
+         primary: http://archive.ubuntu.com/ubuntu
+         security: http://security.ubuntu.com/ubuntu
+       search:
+         primary:
+           - http://azure.archive.ubuntu.com/ubuntu/
+         security: []
+     - arches: [armhf, armel, default]
+       failsafe:
+         primary: http://ports.ubuntu.com/ubuntu-ports
+         security: http://ports.ubuntu.com/ubuntu-ports
+
+EOF
+
+# Add in the client keep alive
+cat << EOF >> "${mp}/etc/ssh/sshd_config"
+
+${CLOUD_IMG_STR}
+ClientAliveInterval 120
+EOF
+
+
+

=== added file 'templates/img-azure-13.10-addin.tmpl'
--- templates/img-azure-13.10-addin.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-azure-13.10-addin.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,42 @@
+# Insert the Saucy Config
+CLOUD_IMG_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
+cat << EOF > ${mp}/etc/default/grub.d/50-cloudimg-settings.cfg
+# Windows Azure specific grub settings
+${CLOUD_IMG_STR}
+
+# Set the default commandline
+GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300"
+
+# Set the grub console type
+GRUB_TERMINAL=serial
+
+# Set the serial command
+GRUB_SERIAL_COMMAND="serial --speed=9600 --unit=0 --word=8 --parity=no --stop=1"
+
+# Set the recordfail timeout
+GRUB_RECORDFAIL_TIMEOUT=0
+
+# Do not wait on grub prompt
+GRUB_TIMEOUT=0
+
+EOF
+
+
+cat << EOF > "${mp}/etc/cloud/cloud.cfg.d/90-azure.cfg"
+${CLOUD_IMG_STR}
+system_info:
+   package_mirrors:
+     - arches: [i386, amd64]
+       failsafe:
+         primary: http://archive.ubuntu.com/ubuntu
+         security: http://security.ubuntu.com/ubuntu
+       search:
+         primary:
+           - http://azure.archive.ubuntu.com/ubuntu/
+         security: []
+     - arches: [armhf, armel, default]
+       failsafe:
+         primary: http://ports.ubuntu.com/ubuntu-ports
+         security: http://ports.ubuntu.com/ubuntu-ports
+
+EOF

=== added file 'templates/img-azure-14.04-addin.tmpl'
--- templates/img-azure-14.04-addin.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-azure-14.04-addin.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,49 @@
+# Insert the Trusty Config
+CLOUD_IMG_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
+cat << EOF > ${mp}/etc/default/grub.d/50-cloudimg-settings.cfg
+# Windows Azure specific grub settings
+${CLOUD_IMG_STR}
+
+# Set the default commandline
+GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300"
+
+# Set the grub console type
+GRUB_TERMINAL=serial
+
+# Set the serial command
+GRUB_SERIAL_COMMAND="serial --speed=9600 --unit=0 --word=8 --parity=no --stop=1"
+
+# Set the recordfail timeout
+GRUB_RECORDFAIL_TIMEOUT=0
+
+# Do not wait on grub prompt
+GRUB_TIMEOUT=0
+
+EOF
+
+# Add in the client keep alive
+cat << EOF >> "${mp}/etc/ssh/sshd_config"
+
+${CLOUD_IMG_STR}
+ClientAliveInterval 120
+EOF
+
+# Add the configuration for Cloud-init
+cat << EOF > "${mp}/etc/cloud/cloud.cfg.d/90-azure.cfg"
+${CLOUD_IMG_STR}
+system_info:
+   package_mirrors:
+     - arches: [i386, amd64]
+       failsafe:
+         primary: http://archive.ubuntu.com/ubuntu
+         security: http://security.ubuntu.com/ubuntu
+       search:
+         primary:
+           - http://azure.archive.ubuntu.com/ubuntu/
+         security: []
+     - arches: [armhf, armel, default]
+       failsafe:
+         primary: http://ports.ubuntu.com/ubuntu-ports
+         security: http://ports.ubuntu.com/ubuntu-ports
+
+EOF

=== added file 'templates/img-azure-14.10-addin.tmpl'
--- templates/img-azure-14.10-addin.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-azure-14.10-addin.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,49 @@
+# Insert the Trusty Config
+CLOUD_IMG_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
+cat << EOF > ${mp}/etc/default/grub.d/50-cloudimg-settings.cfg
+# Windows Azure specific grub settings
+${CLOUD_IMG_STR}
+
+# Set the default commandline
+GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300"
+
+# Set the grub console type
+GRUB_TERMINAL=serial
+
+# Set the serial command
+GRUB_SERIAL_COMMAND="serial --speed=9600 --unit=0 --word=8 --parity=no --stop=1"
+
+# Set the recordfail timeout
+GRUB_RECORDFAIL_TIMEOUT=0
+
+# Do not wait on grub prompt
+GRUB_TIMEOUT=0
+
+EOF
+
+# Add in the client keep alive
+cat << EOF >> "${mp}/etc/ssh/sshd_config"
+
+${CLOUD_IMG_STR}
+ClientAliveInterval 120
+EOF
+
+# Add the configuration for Cloud-init
+cat << EOF > "${mp}/etc/cloud/cloud.cfg.d/90-azure.cfg"
+${CLOUD_IMG_STR}
+system_info:
+   package_mirrors:
+     - arches: [i386, amd64]
+       failsafe:
+         primary: http://archive.ubuntu.com/ubuntu
+         security: http://security.ubuntu.com/ubuntu
+       search:
+         primary:
+           - http://azure.archive.ubuntu.com/ubuntu/
+         security: []
+     - arches: [armhf, armel, default]
+       failsafe:
+         primary: http://ports.ubuntu.com/ubuntu-ports
+         security: http://ports.ubuntu.com/ubuntu-ports
+
+EOF

=== added file 'templates/img-azure.tmpl'
--- templates/img-azure.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-azure.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,295 @@
+#!/bin/bash
+# vi: ts=4 noexpandtab
+
+## Copyright (C) 2012 Ben Howard <ben.howard@xxxxxxxxxxxxx>
+## Copyright (C) 2012 Canonical Group, Ltd <www.canonical.com>
+## Date: 23 March 2012
+
+## Macros
+#### %s will be replaced with the serial
+
+## Diskss:
+### /dev/vdc is a CDROM for cloud-init
+### /dev/vdb is a raw device for outputting files via tar
+### /dev/vdd is the device that you want to operate on
+exec > >(tee build.log) 2>&1
+
+CLOUD_IMG_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
+
+end() { shutdown -P now; exit ${1:-0}; }
+error() { echo "$@" 1>&2; echo "$@"; }
+fail() { debug $1; end 1; }
+debug() { error "$(date -R):" "$@"; }
+
+# files to backup from the image and restore when done with chroot
+file_list=("usr/sbin/policy-rc.d"  "etc/mtab")
+del_list=()
+
+image_dir="$(mktemp -d /tmp/azure.XXXX)"
+mp="$(mktemp -d /tmp/azure.XXXX)"
+azure_files="$(mktemp -d /tmp/azure.XXXX)"
+
+dev_uuid="$(blkid /dev/vdd1 -o udev | awk '-F=' '/ID_FS_UUID=/ {print$2}')" &&
+   debug "Filesystem UUID ${loop_fs_uuid}" ||
+   fail  "Unable to get UUID for file system"
+
+# dist_ge(dist1,dist2)
+# return true if dist1 is newer or the same as dist2
+dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
+
+# dist_le(dist1,dist2)
+# return true if dist2 is older than or the same as dist2
+dist_le() { [[ "$1" < "$2" || "$1" == "$2" ]]; }
+
+operation_files() {
+
+	for i in $(seq 0 $((${#file_list[@]} - 1)))
+	do
+		f="${file_list[i]}"
+		mkdir -p "${image_dir}/orig/$(dirname ${f})" 2> /dev/null ||
+			fail "Unable to create backup directory of ${image_dir}/$(dirname ${f})"
+
+		cp -au "${mp}/${f}" "${image_dir}/orig/${f}" 2> /dev/null || {
+			error "${f} does not exist in image";
+			del_list=( "${del_list[@]}" "${f}" );
+			unset file_list[${i}];
+		}
+	done
+
+	cp /etc/mtab "${mp}/etc/mtab" &&
+		debug "Copied fake mtab over" ||
+		fail  "Unable to copy mtab in place"
+
+	cat > "${mp}/usr/sbin/policy-rc.d" << EOF
+#!/bin/sh
+echo "All runlevel operations denied by policy" >&2
+exit 101
+EOF
+	chmod 0755 "${mp}/usr/sbin/policy-rc.d" ||
+		fail "Unable to make policy-rc.d executable"
+
+	rm "${mp}/etc/resolv.conf" ||
+        fail "Unable to remove /etc/resolv.conf"
+
+    cp /etc/resolv.conf "${mp}/etc/resolv.conf" &&
+		debug "Placed resolv.conf in image" ||
+		fail  "Failed to place resolv.conf in image"
+
+    debug "Placed policy-rc.d to prevent init(.d) operations"
+}
+
+restore_files() {
+	for f in "${file_list[@]}"
+	do
+		cp -au "${image_dir}/orig/${f}" "${mp}/${f}" &&
+			debug "Restored ${f}" ||
+			error "Unable to restore ${f}"
+	done
+
+	for f in "${del_list[@]}"
+	do
+		rm -rf "${mp}/${f}" &&
+			debug "Removed ${f}" ||
+			error "Unable to remove ${f}"
+	done
+
+	rm -rf "${image_dir}/orig" || true
+
+	rm "${mp}/etc/resolv.conf" ||
+		fail "Failed to remove /etc/resolv.conf"
+
+	chroot "${mp}" ln -snf ../run/resolvconf/resolv.conf /etc/resolv.conf
+}
+
+build_chroot() {
+	e2fsck -f -y /dev/vdd1 ||
+		fail "failed to check file system"
+
+	resize2fs /dev/vdd1 ||
+		fail "failed to resize the root filesystem"
+
+	mount /dev/vdd1 "${mp}" &&
+		debug "Mounted root file system" ||
+		fail  "Unable to mount root file system"
+
+	mount --bind /dev "${mp}/dev" &&
+		debug "Mounted bound dev under ${mp}" ||
+		fail  "Unable to mount dev under ${mp}"
+
+	mount devpts-live -t devpts "${mp}/dev/pts" &&
+		debug "Mounted devpts-live under ${mp}" ||
+		fail  "Error mounting devpts under ${mp}"
+
+	mount sysfs-live -t sysfs "${mp}/sys" &&
+		debug "Mounted sysfs-live under ${mp}" ||
+		fail  "Failed to mount sysfs-live under ${mp}"
+
+	mount proc-live -t proc "${mp}/proc" &&
+		debug "Mounted proc-live under ${mp}" ||
+		fail  "Failed to mount proc under ${mp}"
+
+	df -h "${mp}"
+	operation_files
+
+}
+
+tear_chroot() {
+	restore_files
+	sync
+	sleep 5
+
+	umount -f "${mp}/dev/pts" &&
+		debug "Unmounted /dev/pts in chroot" ||
+		fail "Unable to dismount /dev/pts in chroot"
+
+	umount -f "${mp}/dev" &&
+		debug "Unmounted /dev in chroot" ||
+		fail  "Unable to dismount /dev in chroot"
+
+	umount -f "${mp}/proc" &&
+		debug "Unmounted /proc in chroot" ||
+		fail "Unable to dismount /proc in chroot"
+
+	umount -f "${mp}/sys" &&
+		debug "Unmounted /sys in chroot" ||
+		fail "Unable to dismount /sys in chroot"
+
+    umount -f "${mp}" &&
+        debug "Umounted root" ||
+        fail "Unable to dismount root"
+
+	zerofree -v /dev/vdd1
+}
+
+
+xchroot() {
+	debug "Running chroot: ${@}"
+ 	( DEBIAN_FRONTEND=noninteractive LANG=C LC_ALL=C chroot "${mp}" "${@}" ) &&
+		debug "   Command Successfull" ||
+		{ debug "   Command FAILED!"; sleep 60; fail "chrooted-command failed!"; }
+}
+
+fake_cloud_init() {
+	seed_d="${mp}/var/lib/cloud/seed/nocloud-net"
+	mkdir -p "${seed_d}"
+
+    cat << EOF > "${seed_d}/meta-data"
+instance_id: azure-cloud_img-%s-${dev_uuid}
+EOF
+
+	debug "Cloud image instance ID set to azure-cloud_img-%s-${dev_uuid}"
+    touch "${seed_d}/user-data"
+}
+
+# Setup the environment
+build_chroot
+
+# Detect which suite we're working with
+suite="$(chroot ${mp} lsb_release -c -s)"
+debug "Suite is current ${suite}"
+
+# Installation of base packages
+pkgs=(linux-image-extra-virtual
+	  walinuxagent
+      linux-tools-common
+      hv-kvp-daemon-init)
+
+if dist_ge "${suite}" "utopic"; then
+	# hv-kvp-daemon-init is now in linux-cloud-tools
+	pkgs=(walinuxagent linux-cloud-tools-virtual)
+elif dist_ge "${suite}" "trusty"; then
+	# kv-kvp-daemon-init is a transitional package
+	pkgs=(walinuxagent linux-cloud-tools-virtual hv-kvp-daemon-init)
+elif dist_le "${suite}" "quantal"; then
+	pkgs+=(linux-backports-modules-hv-${suite}-virtual)
+fi
+
+# Test enablement for -proposed
+if [ "%P" = "true" ]; then
+	cat << EOF >> "${mp}/etc/apt/sources.list.d/proposed.list"
+${CLOUD_IMG_STR}
+# Test enablement for Windows Azure
+deb http://archive.ubuntu.com/ubuntu ${suite}-proposed main
+EOF
+fi
+
+# Make sure we get the latest version of cloud-init
+pkgs+=(cloud-init)
+
+# Installation of packages
+debug "Performing package operations"
+xchroot apt-get -y purge grub-legacy-ec2
+xchroot apt-get -y update
+xchroot apt-get -y install ${pkgs[@]}
+xchroot apt-get -y clean
+debug "Package operations complete"
+
+# Modify boot settings
+debug "Modifying in-image settings for Azure"
+xchroot sed -i "s,LABEL=cloudimg-rootfs,UUID=${dev_uuid},g" /etc/fstab
+xchroot sed -i "s|defaults|defaults,discard|g" /etc/fstab
+xchroot sed -i "1i${CLOUD_IMG_STR}" /etc/fstab
+xchroot sed -i 's,SHELL=/bin/sh,SHELL=/bin/bash,g' /etc/default/useradd
+xchroot sed -i "1i${CLOUD_IMG_STR}" /etc/default/useradd
+
+# Reconfigure initramfs
+debug "Reconfiguring initramfs"
+xchroot update-initramfs -u -k all
+
+# Addins if any, including Cloud-configs
+# BEGIN ADDINS
+ADDIN_HERE
+
+# Reconfigure grub, since the config is laid during the addin phase
+xchroot dpkg-reconfigure grub-pc
+xchroot update-grub
+
+# Azure requirements
+[ -e ${mp}/etc/resolvconf/resolv.conf.d/original ] &&
+	rm ${mp}/etc/resolvconf/resolv.conf.d/original
+
+[ -e ${mp}/etc/resolvconf/resolv.conf.d/tail ] &&
+	rm ${mp}/etc/resolvconf/resolv.conf.d/tail
+
+# Get the pacakge manifest
+debug "Gathering package information"
+chroot ${mp} dpkg-query -W | column -t > "${PWD}/azure_image.pkgs"
+
+set -x
+# Reconfigure cloud-init
+debug "Reconfiguring Cloud-init"
+if [ "${suite}" == "quantal" ]; then
+	# Quantal uses the legacy method.
+	debug "Configured for legacy Cloud-init"
+	fake_cloud_init
+	printf "%s\t%s\t%s\tConfigDrive, NoCloud\n" \
+		cloud-init cloud-init/datasources multiselect | xchroot debconf-set-selections
+
+elif [ dist_ge "${suite}" "trusty" ]; then
+	debug "WALinuxAgent has packaged cloud-init configuration"
+
+else
+	debug "Configured for Azure supported Cloud-init"
+	printf "%s\t%s\t%s\tAzure\n" \
+		cloud-init cloud-init/datasources multiselect | xchroot debconf-set-selections
+
+    cat <<EOF > "${mp}/etc/cloud/cloud.cfg.d/90_dpkg.cfg"
+${CLOUD_IMG_STR}
+# to update this file, run dpkg-reconfigure cloud-init
+datasource_list: [ Azure ]
+EOF
+
+fi
+xchroot dpkg-reconfigure --frontend=noninteractive cloud-init
+
+set +x
+# Cleanup
+debug "Tearing stuff down"
+tear_chroot
+
+# Final work
+touch success
+tar -cvf /dev/vdb azure_image.pkgs success build.log
+
+# Finish
+end 0

=== added file 'templates/img-build.tmpl'
--- templates/img-build.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-build.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,114 @@
+#!/bin/bash
+# Setup up the build environment.
+exec > >(tee /tmp/build.log) 2>&1
+echo "127.0.1.1 $HOSTNAME $HOSTNAME" >> /etc/hosts
+error() { echo "$@" 1>&2; }
+fail() {
+    [ $# -eq 0 ] || error "$@";
+    echo "Failed" > /tmp/failed;
+    tar -C /tmp -cvf /dev/vdb failed build.log;
+    shutdown -P now;
+    exit 1;
+}
+
+debug() { error "$(date -R):" "$@"; }
+sysfail() { fail "General failure!"; }
+
+trap sysfail SIGINT SIGTERM
+set -x
+distro="%d"
+arch="%a"
+build_name="%b"
+
+BZR_AUTOMATED_EC2_BUILDS="%A"
+BZR_EC2_PUBSCRIPTS="%P"
+BZR_LIVEBUILD="%L"
+BZR_VMBUILDER="%V"
+
+# Make sure that root is resized
+lsblk -f -r --out "MOUNTPOINT,KNAME,TYPE" | grep cloudimg-rootfs > /tmp/parts
+read mp label kname dtype < /tmp/parts
+case dtype in
+    disk)
+          part_number=$((${#kname} - 1))
+          growpart /dev/$kname ${part_number}
+          resize2fs /
+          ;;
+    part) resise2fs /
+          ;;
+esac
+
+cd /tmp
+
+# Set up the path
+AB_D="${PWD}/automated-ec2-builds"
+VB_D="${PWD}/vmbuilder-0.11"
+EC2PUB_D="${PWD}/ec2-publishing-scripts"
+LB_D="${PWD}/live-build"
+PATH="${AB_D}:${VB_D}:${EC2PUB_D}:$PATH"
+
+# Check out the builder tools
+bzr branch "${BZR_AUTOMATED_EC2_BUILDS}" "${AB_D}"
+bzr branch "${BZR_EC2_PUBSCRIPTS}" "${EC2PUB_D}"
+bzr branch "${BZR_LIVEBUILD}" "${LB_D}"
+bzr branch "${BZR_VMBUILDER}" "${VB_D}"
+
+# Set stuff up
+dest="${PWD}/${distro}-${arch}"
+img_base="${distro}-${arch}"
+description="${distro}-${arch}"
+partfile="${dest}/partfile"
+
+# Make sure the distro is supported
+debootstrap_parent_definition="/usr/share/debootstrap/scripts/gutsy"
+debootstrap_definition="/usr/share/debootstrap/scripts/${distro}"
+[ -e "${debootstrap_definition}" ] ||
+    ln -s "${debootstrap_parent_definition}" "${debootstrap_definition}" ||
+    fail "Unable to create debootstrap definition for ${distro}"
+
+mkdir -p "${dest}" ||
+    fail "Unable to create destination directory"
+
+export LIVE_BUILD_PATH="${LB_D}"
+
+# Get the tooling version information
+cur_d=${PWD}
+{
+for tool in ${AB_D} ${EC2PUB_D} ${LB_D} ${VB_D}
+do
+    debug "Gathering tooling information for ${tool}"
+    ( echo "====== ${tool##*/} [${tool}] ======";
+      cd "${tool}" &&
+        bzr info &&
+        bzr version-info &&
+        bzr log -p -r-1;
+      echo ""; ) >> "${dest}/tool-version-info.txt" 2>&1 ||
+            fail "Failed to gather tool version information on ${tool}"
+done
+}
+
+cd ${cur_d}
+serial="%S"
+
+if [[ "${arch}" =~ (i386|amd64) ]]; then
+    conf="${AB_D}/conf/${distro}-${build_name}"
+else
+    carch="${arch}"
+    case ${carch} in
+        armel|armhf) carch="arm";;
+        arm64) carch="arm64";;
+    esac
+    conf="${AB_D}/conf/${distro}-${carch}-${build_name}"
+fi
+
+source ${conf}.conf
+arches=${arch} ${AB_D}/build-ec2-image --serial=${serial} "${conf}.conf" "${dest}" ||
+    fail "Failed to build image!"
+
+echo "Taring up artifacts to /dev/vdb!"
+touch /tmp/success
+tar -C /tmp -cvf /dev/vdb "${distro}-${arch}" success build.log
+sync
+
+# Shutdown with out the ugly message
+sh -c 'sleep 15 && /sbin/poweroff' &

=== added file 'templates/img-juju-addin.tmpl'
--- templates/img-juju-addin.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-juju-addin.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,237 @@
+# This is the Juju Vagrant addin.
+
+ZEROFREE=0
+tear_chroot
+ZEROFREE=1
+unset build_chroot
+
+build_chroot() {
+
+ 	parted /dev/vdd -s -- mkpart primary 8196M 40000M &&
+		debug "Created lxc btrfs volume" ||
+		fail "Failed to create btrfs volume"
+
+	parted /dev/vdd -s -- rm 1 &&
+		debug  "Removed default root partition" ||
+		fail "Failed to remove root partition"
+
+	parted /dev/vdd -s -- mkpart primary 1049K 8196M &&
+		debug "Re-added root partition to 8196M" ||
+		fail "Faild to re-add root partition"
+
+	parted /dev/vdd -s -- print ||
+		fail "Unable to print new device"
+
+	partprobe -s /dev/vdd ||
+		fail "Failed to re-read partition table"
+
+	e2fsck -y -f /dev/vdd1 &&
+		debug "Checked root device" ||
+		fail "Check of root device faileD"
+
+	resize2fs /dev/vdd1 &&
+		debug "Resized root device to 8G" ||
+		fail "Failed to extend root device"
+
+	mkfs.btrfs -L juju-lxcfs /dev/vdd2 &&
+		debug "Created new btrfs on /dev/vdd2" ||
+		fail "Failed to create btrfs on /dev/vdd2"
+
+    mount /dev/vdd1 "${mp}" &&
+		mounts+=("${mp}") &&
+        debug "Mounted root file system" ||
+        fail  "Unable to mount root file system"
+
+	mount /dev/vdd2  "${mp}/mnt" &&
+		debug "Mounted btrfs to ${mp}/mnt" ||
+		fail "Failed to mount juju-lxcfs"
+
+	btrfs subvolume create "${mp}/mnt/@lxc" &&
+		debug "Created btrfs subvolume @lxc" ||
+		fail "Failed to create @lxc on juju-lxcfs"
+
+	umount "${mp}/mnt" ||
+		fail "Failed to unmount btrfs volume"
+
+    mount --bind /dev "${mp}/dev" &&
+		mounts+=("${mp}/dev") &&
+        debug "Mounted bound dev under ${mp}" ||
+        fail  "Unable to mount dev under ${mp}"
+
+    mount devpts-live -t devpts "${mp}/dev/pts" &&
+		mounts+=("${mp}/dev/pts") &&
+        debug "Mounted devpts-live under ${mp}" ||
+        fail  "Error mounting devpts under ${mp}"
+
+    mount sysfs-live -t sysfs "${mp}/sys" &&
+		mounts+=("${mp}/sys") &&
+        debug "Mounted sysfs-live under ${mp}" ||
+        fail  "Failed to mount sysfs-live under ${mp}"
+
+    mount proc-live -t proc "${mp}/proc" &&
+		mounts+=("${mp}/proc") &&
+        debug "Mounted proc-live under ${mp}" ||
+        fail  "Failed to mount proc under ${mp}"
+
+    df -h "${mp}"
+    operation_files
+
+}
+
+build_chroot
+arch="$(chroot ${mp} getconf LONG_BIT)"
+
+# Populate fake cloud-init
+fake_cloud_init ${suite}
+
+# add btrfs to /etc/fstab
+cat << EOF >> "${mp}/etc/fstab"
+LABEL=juju-lxcfs	/var/lib/lxc	btrfs	defaults,subvol=@lxc,autodefrag,discard,compress=lzo 0 0
+EOF
+
+# Setup the vagrant user
+xchroot useradd -m vagrant -s /bin/bash
+echo "root:vagrant" | chroot ${mp} chpasswd
+echo "vagrant:vagrant" | chroot ${mp} chpasswd
+
+cat << EOF > ${mp}/etc/sudoers.d/vagrant
+vagrant ALL=(ALL) NOPASSWD:ALL
+EOF
+
+xchroot chmod 0440 /etc/sudoers.d/vagrant
+xchroot mkdir -p /home/vagrant/.ssh
+
+cat << EOF > ${mp}/home/vagrant/.ssh/authorized_keys
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
+EOF
+
+
+cat << EOF >> ${mp}/etc/sysctl.conf
+EOF
+
+# Ownership issues
+xchroot chown -R vagrant: /home/vagrant/.ssh
+
+# Generate some keys
+xchroot openssl genrsa -out /home/vagrant/.ssh/id_rsa 2048
+xchroot chmod 0400 /home/vagrant/.ssh/id_rsa
+ssh-keygen -y -f ${mp}/home/vagrant/.ssh/id_rsa > ${mp}/home/vagrant/.ssh/id_rsa.pub
+rm "${mp}/home/vagrant/.ssh/.rnd"
+
+def_series="precise"
+def_series_v="12.04"
+[ "${suite}" != "precise" -o "${suite}" != "saucy" ] &&
+	def_series="trusty" && def_series_v="14.04"
+
+# Generate the Juju Configuration for the Ubuntu user
+vagrant_admin_secret=$(openssl rand -base64 6)
+xchroot mkdir -p /home/vagrant/.juju/local
+cat << EOF > "${mp}/home/vagrant/.juju/environments.yaml"
+default: local
+environments:
+  ## https://juju.ubuntu.com/get-started/local/
+  local:
+    type: local
+    admin-secret: ${vagrant_admin_secret}
+    storage-port: 62840
+    default-series: ${def_series}
+
+EOF
+
+# Give the host a hostname
+echo "vagrant-ubuntu-${suite}-${arch}" > ${mp}/etc/hostname
+
+# Make sure the users own their stuff
+xchroot chown -R vagrant: /home/vagrant
+
+# install support statement
+cat << EOF > "${mp}/etc/update-motd.d/98-cloudguest"
+#!/bin/sh
+#
+# Notification on support of vagrant images
+#
+echo ""
+echo "This is a customized Ubuntu Server Cloud Image for use Juju demonstration"
+echo "and Juju development within the Vagrant Development environment."
+echo ""
+echo "For more information about, see the following links:"
+echo "      Juju: https://juju.ubuntu.com/";
+echo "      Vagrant: htp://www.vagrantbox.es/"
+echo ""
+echo "Support notice: These images are provided as-is without warranty, support"
+echo "      or representation of any fitness whatsoever. These images are"
+echo "      are for demonstration and developement, but should not be used"
+echo "      for any production purpouses."
+echo ""
+echo "SECURITY NOTICE: These images contain the well-known Vagrant public"
+echo "      SSH key. This means that these images are insecure for SSH."
+echo ""
+echo "Juju GUI: To access the Juju GUI, point your browser at:"
+echo "      Username: admin"
+echo "      Password: ${vagrant_admin_secret}"
+EOF
+xchroot chmod 0755 /etc/update-motd.d/98-cloudguest
+xchroot rm /etc/update-motd.d/51-cloudguest
+
+# Set the package installation options
+pkgs=(virtualbox-guest-dkms
+    virtualbox-guest-utils
+    byobu
+    lxc
+    cgroup-lite
+    juju-core
+    bzr
+    git
+    mongodb-server
+	btrfs-tools
+	python-pip
+	libnss-myhostname
+	rsyslog-gnutls
+	juju-local
+    )
+
+if [ "${suite}" == "precise" ]; then
+	pkgs+=(linux-headers-generic-lts-raring
+           linux-generic-lts-raring)
+	xchroot add-apt-repository 'deb http://archive.ubuntu.com/ubuntu precise-backports main'
+fi
+
+# Package clean-ups
+rpkgs=(grub-legacy-ec2)
+if [ "${suite}" == "precise" ]; then
+    rpkgs=($(xchroot dpkg -l | awk '/linux-(image|headers).*virtual/ {print$2}'))
+    rpkgs+=(linux-image-virtual)
+fi
+
+# Show the boot console
+xchroot sed -i "s,console=ttyS0,console=tty0,g" /etc/default/grub
+
+xchroot add-apt-repository -y ppa:juju/stable
+xchroot apt-get -y update
+xchroot apt-get -y install ${pkgs[@]}
+xchroot apt-get -y purge "${rpkgs[@]}"
+xchroot apt-get -f install
+xchroot apt-get -y dist-upgrade
+xchroot apt-get -y clean
+xchroot apt-get -y autoclean
+xchroot apt-get -y autoremove
+xchroot apt-get -y update
+xchroot update-grub
+
+# Gets rid of an annoying message for shell provisioner
+xchroot sed -i "s|mesg n|tty -s \&\& mesg n|g" /root/.profile
+
+# Update the cloud-init configuration
+printf "%s\t%s\t%s\tNoCloud\n" \
+        cloud-init cloud-init/datasources multiselect | xchroot debconf-set-selections
+
+# populate the precise lxc cache
+arch_type="amd64"
+[ "${arch}" -eq 32 ] && arch_type="i386"
+
+mkdir -p "${mp}/var/cache/lxc/cloud-${def_series}"
+xchroot curl -o /var/cache/lxc/cloud-${def_series}/ubuntu-${def_series_v}-server-cloudimg-${arch_type}-root.tar.gz \
+   	http://cloud-images.ubuntu.com/releases/${def_series}/release/ubuntu-${def_series_v}-server-cloudimg-${arch_type}-root.tar.gz
+
+# Get the package list out
+chroot ${mp} dpkg-query -W | column -t > "${PWD}/vagrant_image.pkgs"

=== added file 'templates/img-juju.tmpl'
--- templates/img-juju.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-juju.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,382 @@
+#!/bin/bash
+# vi: ts=4 noexpandtab
+
+## Copyright (C) 2012 Ben Howard <ben.howard@xxxxxxxxxxxxx>
+## Copyright (C) 2012 Canonical Group, Ltd <www.canonical.com>
+## Date: 23 March 2012
+
+exec > >(tee build.log) 2>&1
+end() { shutdown -P now; exit ${1:-0}; }
+error() { echo "$@" 1>&2; echo "$@"; }
+fail() { debug $1; end 1; }
+debug() { error "$(date -R):" "$@"; }
+
+# files to backup from the image and restore when done with chroot
+file_list=("usr/sbin/policy-rc.d"  "etc/mtab")
+del_list=()
+
+image_dir="$(mktemp -d /tmp/vagrant.XXXX)"
+mp="$(mktemp -d /tmp/vagrant.XXXX)"
+vagrant_files="$(mktemp -d /tmp/vagrant.XXXX)"
+
+dev_uuid="$(blkid /dev/vdd1 -o udev | awk '-F=' '/ID_FS_UUID=/ {print$2}')" &&
+   debug "Filesystem UUID ${loop_fs_uuid}" ||
+   fail  "Unable to get UUID for file system"
+
+operation_files() {
+
+    for i in $(seq 0 $((${#file_list[@]} - 1)))
+    do
+        f="${file_list[i]}"
+        mkdir -p "${image_dir}/orig/$(dirname ${f})" 2> /dev/null ||
+            fail "Unable to create backup directory of ${image_dir}/$(dirname ${f})"
+
+        cp -au "${mp}/${f}" "${image_dir}/orig/${f}" 2> /dev/null || {
+            error "${f} does not exist in image";
+            del_list=( "${del_list[@]}" "${f}" );
+            unset file_list[${i}];
+        }
+    done
+
+    cp /etc/mtab "${mp}/etc/mtab" &&
+        debug "Copied fake mtab over" ||
+        fail  "Unable to copy mtab in place"
+
+    cat > "${mp}/usr/sbin/policy-rc.d" << EOF
+#!/bin/sh
+echo "All runlevel operations denied by policy" >&2
+exit 101
+EOF
+    chmod 0755 "${mp}/usr/sbin/policy-rc.d" ||
+        fail "Unable to make policy-rc.d executable"
+
+    rm "${mp}/etc/resolv.conf" ||
+        fail "Unable to remove /etc/resolv.conf"
+
+    cp /etc/resolv.conf "${mp}/etc/resolv.conf" &&
+        debug "Placed resolv.conf in image" ||
+        fail  "Failed to place resolv.conf in image"
+
+    debug "Placed policy-rc.d to prevent init(.d) operations"
+}
+
+restore_files() {
+    for f in "${file_list[@]}"
+    do
+        cp -au "${image_dir}/orig/${f}" "${mp}/${f}" &&
+            debug "Restored ${f}" ||
+            error "Unable to restore ${f}"
+    done
+
+    for f in "${del_list[@]}"
+    do
+        rm -rf "${mp}/${f}" &&
+            debug "Removed ${f}" ||
+            error "Unable to remove ${f}"
+    done
+
+    rm -rf "${image_dir}/orig" || true
+
+    rm "${mp}/etc/resolv.conf" ||
+        fail "Failed to remove /etc/resolv.conf"
+
+    chroot "${mp}" ln -snf ../run/resolvconf/resolv.conf /etc/resolv.conf
+}
+
+build_chroot() {
+
+ 	parted /dev/vdd -s -- mkpart primary 8196M 40000M &&
+		debug "Created lxc btrfs volume" ||
+		fail "Failed to create btrfs volume"
+
+	parted /dev/vdd -s -- rm 1 &&
+		debug  "Removed default root partition" ||
+		fail "Failed to remove root partition"
+
+	parted /dev/vdd -s -- mkpart primary 1049K 8196M &&
+		debug "Re-added root partition to 8196M" ||
+		fail "Faild to re-add root partition"
+
+	parted /dev/vdd -s -- print ||
+		fail "Unable to print new device"
+
+	partprobe -s /dev/vdd ||
+		fail "Failed to re-read partition table"
+
+	e2fsck -y -f /dev/vdd1 &&
+		debug "Checked root device" ||
+		fail "Check of root device faileD"
+
+	resize2fs /dev/vdd1 &&
+		debug "Resized root device to 8G" ||
+		fail "Failed to extend root device"
+
+	mkfs.btrfs -L juju-lxcfs /dev/vdd2 &&
+		debug "Created new btrfs on /dev/vdd2" ||
+		fail "Failed to create btrfs on /dev/vdd2"
+
+    mount /dev/vdd1 "${mp}" &&
+        debug "Mounted root file system" ||
+        fail  "Unable to mount root file system"
+
+	mount /dev/vdd2  "${mp}/mnt" &&
+		debug "Mounted btrfs to ${mp}/mnt" ||
+		fail "Failed to mount juju-lxcfs"
+
+	btrfs subvolume create "${mp}/mnt/@lxc" &&
+		debug "Created btrfs subvolume @lxc" ||
+		fail "Failed to create @lxc on juju-lxcfs"
+
+	umount "${mp}/mnt" ||
+		fail "Failed to unmount btrfs volume"
+
+    mount --bind /dev "${mp}/dev" &&
+        debug "Mounted bound dev under ${mp}" ||
+        fail  "Unable to mount dev under ${mp}"
+
+    mount devpts-live -t devpts "${mp}/dev/pts" &&
+        debug "Mounted devpts-live under ${mp}" ||
+        fail  "Error mounting devpts under ${mp}"
+
+    mount sysfs-live -t sysfs "${mp}/sys" &&
+        debug "Mounted sysfs-live under ${mp}" ||
+        fail  "Failed to mount sysfs-live under ${mp}"
+
+    mount proc-live -t proc "${mp}/proc" &&
+        debug "Mounted proc-live under ${mp}" ||
+        fail  "Failed to mount proc under ${mp}"
+
+    df -h "${mp}"
+    operation_files
+
+}
+
+tear_chroot() {
+    restore_files
+    sync
+    sleep 5
+
+    umount -f "${mp}/dev/pts" &&
+        debug "Unmounted /dev/pts in chroot" ||
+        fail "Unable to dismount /dev/pts in chroot"
+
+    umount -f "${mp}/dev" &&
+        debug "Unmounted /dev in chroot" ||
+        fail  "Unable to dismount /dev in chroot"
+
+    umount -f "${mp}/proc" &&
+        debug "Unmounted /proc in chroot" ||
+        fail "Unable to dismount /proc in chroot"
+
+    umount -f "${mp}/sys" &&
+        debug "Unmounted /sys in chroot" ||
+        fail "Unable to dismount /sys in chroot"
+
+    umount -f "${mp}" &&
+        debug "Unmounted ${mp}" ||
+        fail "Unable to dismount root"
+
+    # Zero out the disk for better compression
+    zerofree /dev/vdd1
+}
+
+
+xchroot() {
+    _chroot="chroot"
+    bitness=${bitness:-$(chroot "${mp}" getconf LONG_BIT)}
+
+    [ "${bitness}" == "32" ] && _chroot="linux32 chroot"
+
+    debug "Running chroot: ${@}"
+    ( DEBIAN_FRONTEND=noninteractive LANG=C LC_ALL=C ${_chroot} "${mp}" "${@}" ) &&
+        debug "   Command Successfull" ||
+        { debug "   Command FAILED!"; sleep 60; fail "chrooted-command failed!"; }
+}
+
+fake_cloud_init() {
+    seed_d="${mp}/var/lib/cloud/seed/nocloud"
+    mkdir -p "${seed_d}"
+
+    cat << EOF > "${seed_d}/meta-data"
+instance_id: juju-cloud_img-${1}-${2}
+EOF
+
+    debug "Cloud image instance ID set to juju-cloud_img-${1}-${2}"
+    cat << EOF > "${seed_d}/user-data"
+#cloud-config
+output: {all: '| tee -a /var/log/cloud-init-output.log'}
+ssh_pwauth: True
+EOF
+}
+
+# Setup the environment
+build_chroot
+
+# Detect which suite we're working with
+suite="$(chroot ${mp} lsb_release -c -s)"
+arch="$(chroot ${mp} getconf LONG_BIT)"
+
+# Populate fake cloud-init
+fake_cloud_init ${suite}
+
+# add btrfs to /etc/fstab
+cat << EOF >> "${mp}/etc/fstab"
+LABEL=juju-lxcfs	/var/lib/lxc	btrfs	defaults,subvol=@lxc,autodefrag,discard,compress=lzo 0 0
+EOF
+
+# Setup the vagrant user
+xchroot useradd -m vagrant -s /bin/bash
+echo "root:vagrant" | chroot ${mp} chpasswd
+echo "vagrant:vagrant" | chroot ${mp} chpasswd
+
+cat << EOF > ${mp}/etc/sudoers.d/vagrant
+vagrant ALL=(ALL) NOPASSWD:ALL
+EOF
+
+xchroot chmod 0440 /etc/sudoers.d/vagrant
+xchroot mkdir -p /home/vagrant/.ssh
+
+cat << EOF > ${mp}/home/vagrant/.ssh/authorized_keys
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
+EOF
+
+
+cat << EOF >> ${mp}/etc/sysctl.conf
+EOF
+
+# Ownership issues
+xchroot chown -R vagrant: /home/vagrant/.ssh
+
+# Generate some keys
+xchroot openssl genrsa -out /home/vagrant/.ssh/id_rsa 2048
+xchroot chmod 0400 /home/vagrant/.ssh/id_rsa
+ssh-keygen -y -f ${mp}/home/vagrant/.ssh/id_rsa > ${mp}/home/vagrant/.ssh/id_rsa.pub
+rm "${mp}/home/vagrant/.ssh/.rnd"
+
+def_series="precise"
+def_series_v="12.04"
+[ "${suite}" != "precise" -a "${suite}" != "saucy" ] &&
+	def_series="trusty" && def_series_v="14.04"
+
+# Generate the Juju Configuration for the Ubuntu user
+vagrant_admin_secret=$(openssl rand -base64 6)
+xchroot mkdir -p /home/vagrant/.juju/local
+cat << EOF > "${mp}/home/vagrant/.juju/environments.yaml"
+default: local
+environments:
+  ## https://juju.ubuntu.com/get-started/local/
+  local:
+    type: local
+    admin-secret: ${vagrant_admin_secret}
+    storage-port: 62840
+    default-series: ${def_series}
+
+EOF
+
+# Give the host a hostname
+echo "vagrant-ubuntu-${suite}-${arch}" > ${mp}/etc/hostname
+
+# Make sure the users own their stuff
+xchroot chown -R vagrant: /home/vagrant
+
+# install support statement
+cat << EOF > "${mp}/etc/update-motd.d/98-cloudguest"
+#!/bin/sh
+#
+# Notification on support of vagrant images
+#
+echo ""
+echo "This is a customized Ubuntu Server Cloud Image for use Juju demonstration"
+echo "and Juju development within the Vagrant Development environment."
+echo ""
+echo "For more information about, see the following links:"
+echo "      Juju: https://juju.ubuntu.com/";
+echo "      Vagrant: htp://www.vagrantbox.es/"
+echo ""
+echo "Support notice: These images are provided as-is without warranty, support"
+echo "      or representation of any fitness whatsoever. These images are"
+echo "      are for demonstration and developement, but should not be used"
+echo "      for any production purpouses."
+echo ""
+echo "SECURITY NOTICE: These images contain the well-known Vagrant public"
+echo "      SSH key. This means that these images are insecure for SSH."
+echo ""
+echo "Juju GUI: To access the Juju GUI, point your browser at:"
+echo "      Username: admin"
+echo "      Password: ${vagrant_admin_secret}"
+EOF
+xchroot chmod 0755 /etc/update-motd.d/98-cloudguest
+xchroot rm /etc/update-motd.d/51-cloudguest
+
+# Set the package installation options
+pkgs=(virtualbox-guest-dkms
+    virtualbox-guest-utils
+    byobu
+    lxc
+    cgroup-lite
+    juju-core
+    bzr
+    git
+    mongodb-server
+	btrfs-tools
+	python-pip
+	libnss-myhostname
+	rsyslog-gnutls
+	juju-local
+    )
+
+if [ "${suite}" == "precise" ]; then
+	pkgs+=(linux-headers-generic-lts-raring
+           linux-generic-lts-raring)
+	xchroot add-apt-repository 'deb http://archive.ubuntu.com/ubuntu precise-backports main'
+fi
+
+# Package clean-ups
+rpkgs=(grub-legacy-ec2)
+if [ "${suite}" == "precise" ]; then
+    rpkgs=($(xchroot dpkg -l | awk '/linux-(image|headers).*virtual/ {print$2}'))
+    rpkgs+=(linux-image-virtual)
+fi
+
+# Show the boot console
+xchroot sed -i "s,console=ttyS0,console=tty0,g" /etc/default/grub
+
+xchroot add-apt-repository -y ppa:juju/stable
+xchroot apt-get -y update
+xchroot apt-get -y install ${pkgs[@]}
+xchroot apt-get -y purge "${rpkgs[@]}"
+xchroot apt-get -f install
+xchroot apt-get -y dist-upgrade
+xchroot apt-get -y clean
+xchroot apt-get -y autoclean
+xchroot apt-get -y autoremove
+xchroot apt-get -y update
+xchroot update-grub
+
+# Gets rid of an annoying message for shell provisioner
+xchroot sed -i "s|mesg n|tty -s \&\& mesg n|g" /root/.profile
+
+# Update the cloud-init configuration
+printf "%s\t%s\t%s\tNoCloud\n" \
+        cloud-init cloud-init/datasources multiselect | xchroot debconf-set-selections
+
+# populate the precise lxc cache
+arch_type="amd64"
+[ "${arch}" -eq 32 ] && arch_type="i386"
+
+mkdir -p "${mp}/var/cache/lxc/cloud-${def_series}"
+xchroot curl -o /var/cache/lxc/cloud-${def_series}/ubuntu-${def_series_v}-server-cloudimg-${arch_type}-root.tar.gz \
+   	http://cloud-images.ubuntu.com/releases/${def_series}/release/ubuntu-${def_series_v}-server-cloudimg-${arch_type}-root.tar.gz
+
+# Get the package list out
+chroot ${mp} dpkg-query -W | column -t > "${PWD}/vagrant_image.pkgs"
+
+# Cleanup
+tear_chroot
+
+# Final work
+touch success
+tar -cvf /dev/vdb success build.log vagrant_image.pkgs
+
+# Finish
+end 0

=== added file 'templates/img-maas.tmpl'
--- templates/img-maas.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-maas.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,95 @@
+#!/bin/bash
+# Setup up the build environment.
+exec > >(tee /tmp/build.log) 2>&1
+echo "127.0.1.1 $HOSTNAME $HOSTNAME" >> /etc/hosts
+error() { echo "$@" 1>&2; }
+fail() {
+    [ $# -eq 0 ] || error "$@";
+    echo "Failed" > /tmp/failed;
+    tar -C /tmp -cvf /dev/vdb failed build.log;
+    shutdown -P now;
+    exit 1;
+}
+
+debug() { error "$(date -R):" "$@"; }
+sysfail() { fail "General failure!"; }
+
+trap sysfail SIGINT SIGTERM
+set -x
+
+# Variables from configuration step
+PACK_D="%D"
+BZR_MAAS="%M"
+suite="%d"
+stream="%S"
+serial="%s"
+
+# Set up the stuff
+export SRC_D="/tmp/maas"
+export START_D="/tmp/logs"
+export OUT_D="%O"
+
+# Make sure that the root disk is big enough
+# Work around for https://bugs.launchpad.net/ubuntu/+source/cloud-utils/+bug/1285197
+sudo cloud-init single --name growpart --frequency=always &&
+    sudo cloud-init single --name=resizefs --frequency=always
+df -h
+
+# Set up the code paths
+cd /tmp
+bzr branch "${BZR_MAAS}" "${SRC_D}" ||
+    fail "Failed to check out the BZR branch"
+mci2eph="${SRC_D}/maas-cloudimg2ephemeral"
+
+# Extract /dev/vdc (which has all the goodies including the image
+# for conversion)
+tar -C / -xvvf /dev/vdd
+
+# Make the dirs
+mkdir /tmp/maas_images
+mkdir "${START_D}"
+mkdir "${PUBLISH_BASE}"
+
+# Do the work
+ARCHES="%A"
+for arch in ${ARCHES:-i386 amd64 armhf}; do
+    prefix="${suite}-${stream}-maas-${arch}"
+    work_d="${OUT_D}/${arch}"
+    mkdir -p "${work_d}" ||
+        fail "Unable to create destination directory for ${arch}"
+
+    echo "Extracting tarball for ${prefix}"
+    tar -Sxvzf - -C "${work_d}" --wildcards "*.img" < "${PACK_D}/%B-${arch}.tar.gz" ||
+        fail "failed to extract tarball for ${prefix}"
+
+    img=$(echo "${work_d}/"*.img) && [ -f "$img" ] ||
+        fail "failed to find img in tarball at $r_url"
+
+    mv "${img}" "${work_d}/${prefix}.img" && img="${work_d}/${prefix}.img" ||
+        fail "failed to rename image"
+
+    echo "Converting ${arch} to ephemeral disk"
+
+    "$mci2eph" "$img" "${work_d}/${prefix}-vmlinuz" \
+        "${work_d}/${prefix}-initrd" "${work_d}/${prefix}.manifest" ||
+        fail "failed to turn $img into ephemeral"
+
+    echo "creating ${prefix}.tar.gz"
+    ( cd "$work_d" &&
+        tar -Scvzf - ${prefix}.img ${prefix}-vmlinuz ${prefix}-initrd ) \
+            > "$OUT_D/${prefix}.tar.gz" ||
+        fail "failed to make tar file"
+
+    mv "${work_d}/${prefix}.manifest" "${OUT_D}" ||
+        fail "failed to move manifest"
+
+    rm -Rf "${work_d}"
+    rm "${PACK_D}/${prefix}.tar.gz"
+done
+
+# Finish the work
+echo "Taring up artifacts to /dev/vdb!"
+touch /tmp/success
+tar -C /tmp -cvf /dev/vdb maas_images logs success build.log /var/log/* "${OUT_D}" >> /dev/null
+sync
+shutdown -P now

=== added file 'templates/img-maasv2.tmpl'
--- templates/img-maasv2.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-maasv2.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,134 @@
+#!/bin/bash
+# Setup up the build environment.
+exec > >(tee /tmp/build.log) 2>&1
+echo "127.0.1.1 $HOSTNAME $HOSTNAME" >> /etc/hosts
+error() { echo "$@" 1>&2; }
+fail() {
+    [ $# -eq 0 ] || error "$@";
+    echo "Failed" > /tmp/failed;
+    tar -C /tmp -cvf /dev/vdb failed build.log;
+    shutdown -P now;
+    exit 1;
+}
+
+debug() { error "$(date -R):" "$@"; }
+sysfail() { fail "General failure!"; }
+
+trap sysfail SIGINT SIGTERM
+
+# Variables from configuration step
+PACK_D="%D"
+BZR_MAAS="%M"
+suite="%d"
+stream="%S"
+serial="%s"
+
+# Set up the stuff
+export SRC_D="/tmp/maas"
+export START_D="/tmp/logs"
+export OUT_D="%O"
+
+# Make sure that the root disk is big enough
+# Work around for https://bugs.launchpad.net/ubuntu/+source/cloud-utils/+bug/1285197
+sudo cloud-init single --name growpart --frequency=always &&
+    sudo cloud-init single --name=resizefs --frequency=always
+df -h
+
+
+# Set up the code paths
+cd /tmp
+bzr branch "${BZR_MAAS}" "${SRC_D}" ||
+    fail "Failed to check out the BZR branch"
+
+# Assume we are MAAS v1
+MAASv=1
+mci2eph="${SRC_D}/maas-cloudimg2ephemeral"
+
+# Setup for V2 if we see that we have a v2 tree
+[ -e "${SRC_D}/system-setup" -a -e "${SRC_D}/bin/meph2-cloudimg-sync" ] && {
+    echo "Running against version 2";
+    find "${SRC_D}"
+    bash -x "${SRC_D}/system-setup";
+    mci2eph="${SRC_D}/bin/meph2-cloudimg-sync"
+    OUT_D="/tmp/maas_final"
+    mkdir -p "${OUT_D}"
+    MAASv=2;
+}
+
+# Extract /dev/vdc (which has all the goodies including the image
+# for conversion)
+tar -C / -xvvf /dev/vdd
+
+# Make the dirs
+mkdir /tmp/maas_images
+mkdir "${START_D}"
+
+
+# Do the work
+ARCHES="%A"
+ARCHES=${ARCHES:-i386 amd64 armhf}
+
+debug "building maas ephemerals v${MAASv} suite=$suite arches=[${ARCHES}]"
+for arch in ${ARCHES}; do
+    prefix="${suite}-${stream}-maas-${arch}"
+    work_d="${OUT_D}/${arch}"
+    mkdir -p "${work_d}" ||
+        fail "Unable to create destination directory for ${arch}"
+
+    debug "beginning arch $arch"
+    if [ "${MAASv:-1}" -eq 1 ]; then
+        echo "Extracting tarball for ${prefix}"
+        tar -Sxvzf - -C "${work_d}" --wildcards "*.img" < "${PACK_D}/%B-${arch}.tar.gz" ||
+            fail "failed to extract tarball for ${prefix}"
+
+        img=$(echo "${work_d}/"*.img) && [ -f "$img" ] ||
+            fail "failed to find img in tarball at $r_url"
+
+        mv "${img}" "${work_d}/${prefix}.img" && img="${work_d}/${prefix}.img" ||
+            fail "failed to rename image"
+
+        echo "Converting ${arch} to ephemeral disk"
+
+       # v1 command
+        cmd=("$mci2eph"
+             "$img"
+             "${work_d}/${prefix}-vmlinuz"
+             "${work_d}/${prefix}-initrd"
+             "${work_d}/${prefix}.manifest"
+        )
+
+        ${cmd[@]} || fail "Failed to create MAAS image"
+
+        echo "creating ${prefix}.tar.gz"
+        ( cd "$work_d" &&
+            tar -Scvzf - . ) > "$OUT_D/${prefix}.tar.gz" ||
+            fail "failed to make tar file"
+
+        [ -e "${work_d}/${prefix}.manifest" -a "${MAASv:-1}" -eq 2 ] && {
+             mv "${work_d}/${prefix}.manifest" "${OUT_D}" ||
+                fail "failed to move manifest"
+        }
+
+        [ -e "${work_d}" ] && rm -Rf "${work_d}"
+        [ -e "${PACK_D}/${prefix}.tar.gz" ] &&  rm "${PACK_D}/${prefix}.tar.gz"
+
+    elif [ "${MAASv:-1}" -eq 2 ]; then
+        # v2 command
+        cmd=(env
+             PYTHONPATH=${SRC_D}
+             PATH=${SRC_D}/bin:$PATH
+             "$mci2eph" "-vvv" "${OUT_D}" "arch=${arch}" "release=${suite}"
+        )
+
+       debug "running" "${cmd[@]}"
+       "${cmd[@]}" || fail "Failed to create MAAS image"
+    fi
+
+done
+
+# Finish the work
+echo "Taring up artifacts to /dev/vdb!"
+touch /tmp/success
+tar -C /tmp -cvf /dev/vdb maas_images /var/log/* logs success build.log "${OUT_D}" >> /dev/null
+sync
+shutdown -P now

=== added file 'templates/img-smartcloud.tmpl'
--- templates/img-smartcloud.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-smartcloud.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,113 @@
+#!/bin/bash
+# Disable the cloud-init functions.
+
+
+exec > >(tee /tmp/build-$(date +%s).log) 2>&1
+echo "127.0.1.1 $HOSTNAME $HOSTNAME" >> /etc/hosts
+error() { echo "$@" 1>&2; }
+fail() {
+    [ $# -eq 0 ] || error "$@";
+    echo "Failed" > /tmp/failed;
+    tar -C /tmp -cvf /dev/vdb failed build.log;
+    shutdown -P now;
+    exit 1;
+}
+
+debug() { error "$(date -R):" "$@"; }
+sysfail() { fail "General failure!"; }
+
+trap sysfail SIGINT SIGTERM
+set -x
+
+seed_d="/mnt/var/lib/cloud/seed/nocloud-net"
+rhostid=$(uuidgen | cut -c -8)
+
+# Open /dev/xvdb and write stuff
+mount /dev/vdd1 /mnt ||
+    fail "Unable to mount root file system"
+
+mkdir -p "${seed_d}" ||
+    fail "Unable to create seed directory"
+
+
+# See whats mounted
+blkid
+df
+
+# Create the smartcloud users
+chroot /mnt groupadd -g 501 idcuser
+chroot /mnt useradd -m -u 500 -g 501 idcuser
+chroot /mnt passwd -l idcuser
+
+# Make idcuser sudo-less
+cat > /mnt/etc/sudoers.d/idcuser <<EOF
+# Added by build system
+idcuser ALL=(ALL) NOPASSWD:ALL
+EOF
+
+# Seed config files
+cat << EOF > ${seed_d}/meta-data
+instance-id: sc-${rhostid}
+EOF
+
+cat << EOF > ${seed_d}/user-data
+#cloud-config
+manage_etc_hosts: localhost
+apt_mirror: http://ibm.clouds.archive.ubuntu.com
+preserve_hostname: False
+datasource_list: ["NoCloud"]
+
+cloud_init_modules:
+ - bootcmd
+ - resizefs
+ - ca-certs
+ - rsyslog
+ - ssh
+
+cloud_config_modules:
+ - mounts
+ - locale
+ - apt-pipelining
+ - apt-update-upgrade
+ - landscape
+ - timezone
+ - puppet
+ - chef
+ - salt-minion
+ - mcollective
+ - disable-ec2-metadata
+ - runcmd
+ - byobu
+
+cloud_final_modules:
+ - scripts-per-once
+ - scripts-per-boot
+ - scripts-per-instance
+ - scripts-user
+ - keys-to-console
+ - phone-home
+ - final-message
+
+EOF
+
+
+find /mnt/var/lib/cloud/
+
+[ -e "${seed_d}/user-data" ] &&
+    cat ${seed_d}/user-data ||
+    fail "User-data was not written to disk"
+
+[ -e "${seed_d}/meta-data" ] &&
+    cat ${seed_d}/meta-data ||
+    fail "Meta-data was not written to disk"
+
+# Cleanup
+umount /dev/vdd1
+
+
+# Finish the work
+echo "Taring up artifacts to /dev/vdb!"
+touch /tmp/success
+tar -C /tmp -cvf /dev/vdb maas_images success build*.log "${OUT_D}" >> /dev/null
+sync
+shutdown -P now

=== added file 'templates/img-update.tmpl'
--- templates/img-update.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-update.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,264 @@
+#!/bin/bash
+# vi: ts=4 noexpandtab
+
+## Copyright (C) 2013 Ben Howard <ben.howard@xxxxxxxxxxxxx>
+## Copyright (C) 2013 Canonical Group, Ltd <www.canonical.com>
+## Date: 2013-12-17
+#
+# This creates an update builder image
+#
+
+## Macros
+#### %s will be replaced with the serial
+
+## Diskss:
+### /dev/vdc is a CDROM for cloud-init
+### /dev/vdb is a raw device for outputting files via tar
+### /dev/vdd is the device that you want to operate on
+exec > >(tee build.log) 2>&1
+
+end() { shutdown -P now; exit ${1:-0}; }
+error() { echo "$@" 1>&2; echo "$@"; }
+fail() { debug $1; end 1; }
+debug() { error "$(date -R):" "$@"; }
+
+# files to backup from the image and restore when done with chroot
+file_list=("usr/sbin/policy-rc.d"  "etc/mtab")
+del_list=()
+
+image_dir="$(mktemp -d /tmp/builder.XXXX)"
+mp="$(mktemp -d /tmp/builder.XXXX)"
+builder_files="$(mktemp -d /tmp/azure.XXXX)"
+
+odev_uuid="$(blkid /dev/vdd1 -o udev | awk '-F=' '/ID_FS_UUID=/ {print$2}')" &&
+	debug "OLD Filesystem UUID ${odev_uuid}" ||
+	fail  "Unable to get UUID for file system"
+
+# Change the UUID of the builder to be unique to the builder
+tune2fs -U $(uuidgen) /dev/vdd1 || fail "failed to set unique uuid"
+
+dev_uuid="$(blkid /dev/vdd1 -o udev | awk '-F=' '/ID_FS_UUID=/ {print$2}')" &&
+   debug "Filesystem UUID ${dev_uuid}" ||
+   fail  "Unable to get UUID for file system"
+
+[ "${dev_uuid}" == "${odev_uuid}" ] && fail "device ID's are the same. This is bad"
+
+# dist_ge(dist1,dist2)
+# return true if dist1 is newer or the same as dist2
+dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
+
+# dist_ge(dist1,dist2)
+# return true if dist2 is older than or the same as dist2
+dist_le() { [[ "$1" < "$2" || "$1" == "$2" ]]; }
+
+operation_files() {
+
+	for i in $(seq 0 $((${#file_list[@]} - 1)))
+	do
+		f="${file_list[i]}"
+		mkdir -p "${image_dir}/orig/$(dirname ${f})" 2> /dev/null ||
+			fail "Unable to create backup directory of ${image_dir}/$(dirname ${f})"
+
+		cp -au "${mp}/${f}" "${image_dir}/orig/${f}" 2> /dev/null || {
+			error "${f} does not exist in image";
+			del_list=( "${del_list[@]}" "${f}" );
+			unset file_list[${i}];
+		}
+	done
+
+	cp /etc/mtab "${mp}/etc/mtab" &&
+		debug "Copied fake mtab over" ||
+		fail  "Unable to copy mtab in place"
+
+	cat > "${mp}/usr/sbin/policy-rc.d" << EOF
+#!/bin/sh
+echo "All runlevel operations denied by policy" >&2
+exit 101
+EOF
+	chmod 0755 "${mp}/usr/sbin/policy-rc.d" ||
+		fail "Unable to make policy-rc.d executable"
+
+	rm "${mp}/etc/resolv.conf" ||
+        fail "Unable to remove /etc/resolv.conf"
+
+    cp /etc/resolv.conf "${mp}/etc/resolv.conf" &&
+		debug "Placed resolv.conf in image" ||
+		fail  "Failed to place resolv.conf in image"
+
+    debug "Placed policy-rc.d to prevent init(.d) operations"
+}
+
+restore_files() {
+	for f in "${file_list[@]}"
+	do
+		cp -au "${image_dir}/orig/${f}" "${mp}/${f}" &&
+			debug "Restored ${f}" ||
+			error "Unable to restore ${f}"
+	done
+
+	for f in "${del_list[@]}"
+	do
+		rm -rf "${mp}/${f}" &&
+			debug "Removed ${f}" ||
+			error "Unable to remove ${f}"
+	done
+
+	rm -rf "${image_dir}/orig" || true
+
+	rm "${mp}/etc/resolv.conf" ||
+		fail "Failed to remove /etc/resolv.conf"
+
+	chroot "${mp}" ln -snf ../run/resolvconf/resolv.conf /etc/resolv.conf
+}
+
+build_chroot() {
+	growpart /dev/vdd 1 &&
+		debug "Extended /dev/vdd1 to use whole disk" ||
+		fail "Unable to grow /dev/vdd1 partition"
+
+	e2fsck -f -y /dev/vdd1  &&
+		debug "Checked /dev/vdd1 for resize" ||
+		fail "Failed during check of /dev/vdd"
+
+	resize2fs /dev/vdd1 &&
+		debug "Resized /dev/vdd1" ||
+		fail "Failed to resize /dev/vdd1"
+
+	mount /dev/vdd1 "${mp}" &&
+		debug "Mounted root file system" ||
+		fail  "Unable to mount root file system"
+
+	mount --bind /dev "${mp}/dev" &&
+		debug "Mounted bound dev under ${mp}" ||
+		fail  "Unable to mount dev under ${mp}"
+
+	mount devpts-live -t devpts "${mp}/dev/pts" &&
+		debug "Mounted devpts-live under ${mp}" ||
+		fail  "Error mounting devpts under ${mp}"
+
+	mount sysfs-live -t sysfs "${mp}/sys" &&
+		debug "Mounted sysfs-live under ${mp}" ||
+		fail  "Failed to mount sysfs-live under ${mp}"
+
+	mount proc-live -t proc "${mp}/proc" &&
+		debug "Mounted proc-live under ${mp}" ||
+		fail  "Failed to mount proc under ${mp}"
+
+	df -h "${mp}"
+	operation_files
+
+}
+
+tear_chroot() {
+	restore_files
+	sync
+	sleep 5
+
+	umount -f "${mp}/dev/pts" &&
+		debug "Unmounted /dev/pts in chroot" ||
+		fail "Unable to dismount /dev/pts in chroot"
+
+	umount -f "${mp}/dev" &&
+		debug "Unmounted /dev in chroot" ||
+		fail  "Unable to dismount /dev in chroot"
+
+	umount -f -l "${mp}/proc" &&
+		debug "Unmounted /proc in chroot" ||
+		fail "Unable to dismount /proc in chroot"
+
+	umount -f "${mp}/sys" &&
+		debug "Unmounted /sys in chroot" ||
+		fail "Unable to dismount /sys in chroot"
+
+    umount -f "${mp}" &&
+        debug "Umounted root" ||
+        fail "Unable to dismount root"
+
+	zerofree -v /dev/vdd1
+}
+
+
+xchroot() {
+	debug "Running chroot: ${@}"
+ 	( DEBIAN_FRONTEND=noninteractive LANG=C LC_ALL=C chroot "${mp}" "${@}" ) &&
+		debug "   Command Successfull" ||
+		{ debug "   Command FAILED!"; sleep 60; fail "chrooted-command failed!"; }
+}
+
+# Setup the environment
+build_chroot
+
+# Detect which suite we're working with
+suite="$(chroot ${mp} lsb_release -c -s)"
+debug "Suite is current ${suite}"
+
+# Installation of base packages
+kheaders=$(xchroot dpkg-query -W | awk '/linux-image-3.*virtual/{print$1}' | sed "s|image|headers|g")
+pkgs=(bzr debootstrap
+      python-vm-builder
+      kpartx
+	  qemu-kvm
+	  debhelper
+      virtualbox
+	  u-boot-tools
+      zerofree
+      ${kheaders}
+	  qemu-utils
+	  ubuntu-dev-tools
+      dkms
+      git)
+
+if [ "${suite}" == "precise" ]; then
+	pkgs+=(linux-source-3.2.0 qemu-kvm-extras qemu-kvm-extras-static)
+else
+	pkgs+=(qemu-user-static qemu-user)
+fi
+
+# Installation of packages
+debug "Performing package operations"
+xchroot apt-get -y update
+xchroot apt-get -y install ${pkgs[@]}
+xchroot apt-get -y clean
+debug "Package operations complete"
+
+# Install ZFS
+debug "Installing ZFS"
+xchroot apt-add-repository -y ppa:zfs-native/stable
+xchroot apt-get update
+xchroot apt-get -y install spl
+xchroot dpkg-reconfigure spl
+xchroot apt-get -y install ubuntu-zfs
+xchroot dpkg-reconfigure dkms
+debug "Installed ZFS"
+
+# Modify boot settings
+debug "Modifying in-image settings for builder"
+xchroot sed -i "s,LABEL=cloudimg-rootfs,UUID=${dev_uuid},g" /etc/fstab
+
+# Reconfigure initramfs
+debug "Reconfiguring initramfs"
+xchroot update-initramfs -u -k all
+xchroot dpkg-reconfigure grub-pc
+xchroot update-grub
+
+# builder requirements
+[ -e ${mp}/etc/resolvconf/resolv.conf.d/original ] &&
+	rm ${mp}/etc/resolvconf/resolv.conf.d/original
+
+[ -e ${mp}/etc/resolvconf/resolv.conf.d/tail ] &&
+	rm ${mp}/etc/resolvconf/resolv.conf.d/tail
+
+# Get the pacakge manifest
+debug "Gathering package information"
+chroot ${mp} dpkg-query -W | column -t > "${PWD}/builder_image.pkgs"
+
+# Cleanup
+debug "Tearing stuff down"
+tear_chroot
+
+# Final work
+touch success
+tar -cvf /dev/vdb builder_image.pkgs success build.log
+
+# Finish
+end 0

=== added file 'templates/img-vagrant.tmpl'
--- templates/img-vagrant.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-vagrant.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,298 @@
+#!/bin/bash
+# vi: ts=4 noexpandtab
+
+## Copyright (C) 2012 Ben Howard <ben.howard@xxxxxxxxxxxxx>
+## Copyright (C) 2012 Canonical Group, Ltd <www.canonical.com>
+## Date: 23 March 2012
+
+CLOUD_IMG_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
+
+exec > >(tee build.log) 2>&1
+end() { shutdown -P now; exit ${1:-0}; }
+error() { echo "$@" 1>&2; echo "$@"; }
+fail() { debug $1; end 1; }
+debug() { error "$(date -R):" "$@"; }
+
+# files to backup from the image and restore when done with chroot
+file_list=("usr/sbin/policy-rc.d"  "etc/mtab")
+del_list=()
+
+image_dir="$(mktemp -d /tmp/vagrant.XXXX)"
+mp="$(mktemp -d /tmp/vagrant.XXXX)"
+vagrant_files="$(mktemp -d /tmp/vagrant.XXXX)"
+
+dev_uuid="$(blkid /dev/vdd1 -o udev | awk '-F=' '/ID_FS_UUID=/ {print$2}')" &&
+   debug "Filesystem UUID ${loop_fs_uuid}" ||
+   fail  "Unable to get UUID for file system"
+
+operation_files() {
+
+	for i in $(seq 0 $((${#file_list[@]} - 1)))
+	do
+		f="${file_list[i]}"
+		mkdir -p "${image_dir}/orig/$(dirname ${f})" 2> /dev/null ||
+			fail "Unable to create backup directory of ${image_dir}/$(dirname ${f})"
+
+		cp -au "${mp}/${f}" "${image_dir}/orig/${f}" 2> /dev/null || {
+			error "${f} does not exist in image";
+			del_list=( "${del_list[@]}" "${f}" );
+			unset file_list[${i}];
+		}
+	done
+
+	cp /etc/mtab "${mp}/etc/mtab" &&
+		debug "Copied fake mtab over" ||
+		fail  "Unable to copy mtab in place"
+
+	cat > "${mp}/usr/sbin/policy-rc.d" << EOF
+#!/bin/sh
+echo "All runlevel operations denied by policy" >&2
+exit 101
+EOF
+	chmod 0755 "${mp}/usr/sbin/policy-rc.d" ||
+		fail "Unable to make policy-rc.d executable"
+
+	rm "${mp}/etc/resolv.conf" ||
+        fail "Unable to remove /etc/resolv.conf"
+
+    cp /etc/resolv.conf "${mp}/etc/resolv.conf" &&
+		debug "Placed resolv.conf in image" ||
+		fail  "Failed to place resolv.conf in image"
+
+    debug "Placed policy-rc.d to prevent init(.d) operations"
+}
+
+restore_files() {
+	for f in "${file_list[@]}"
+	do
+		cp -au "${image_dir}/orig/${f}" "${mp}/${f}" &&
+			debug "Restored ${f}" ||
+			error "Unable to restore ${f}"
+	done
+
+	for f in "${del_list[@]}"
+	do
+		rm -rf "${mp}/${f}" &&
+			debug "Removed ${f}" ||
+			error "Unable to remove ${f}"
+	done
+
+	rm -rf "${image_dir}/orig" || true
+
+	rm "${mp}/etc/resolv.conf" ||
+		fail "Failed to remove /etc/resolv.conf"
+
+	chroot "${mp}" ln -snf ../run/resolvconf/resolv.conf /etc/resolv.conf
+}
+
+build_chroot() {
+
+	growpart /dev/vdd 1 &&
+		debug "Partition 1 on /dev/vdd grown" ||
+		fail "Failed to grow partition"
+
+	e2fsck -y -f /dev/vdd1 &&
+		debug "Checking root file system" ||
+		fail "Unable to check root file system"
+
+	resize2fs -f /dev/vdd1 &&
+		debug "Resized /dev/vdd1" ||
+		fail "Unable to resize root file system"
+
+	mount /dev/vdd1 "${mp}" &&
+		debug "Mounted root file system" ||
+		fail  "Unable to mount root file system"
+
+	mount --bind /dev "${mp}/dev" &&
+		debug "Mounted bound dev under ${mp}" ||
+		fail  "Unable to mount dev under ${mp}"
+
+	mount devpts-live -t devpts "${mp}/dev/pts" &&
+		debug "Mounted devpts-live under ${mp}" ||
+		fail  "Error mounting devpts under ${mp}"
+
+	mount sysfs-live -t sysfs "${mp}/sys" &&
+		debug "Mounted sysfs-live under ${mp}" ||
+		fail  "Failed to mount sysfs-live under ${mp}"
+
+	mount proc-live -t proc "${mp}/proc" &&
+		debug "Mounted proc-live under ${mp}" ||
+		fail  "Failed to mount proc under ${mp}"
+
+	df -h "${mp}"
+	operation_files
+
+}
+
+tear_chroot() {
+	restore_files
+	sync
+	sleep 5
+
+	umount -f "${mp}/dev/pts" &&
+		debug "Unmounted /dev/pts in chroot" ||
+		fail "Unable to dismount /dev/pts in chroot"
+
+	umount -f "${mp}/dev" &&
+		debug "Unmounted /dev in chroot" ||
+		fail  "Unable to dismount /dev in chroot"
+
+	umount -f "${mp}/proc" &&
+		debug "Unmounted /proc in chroot" ||
+		fail "Unable to dismount /proc in chroot"
+
+	umount -f "${mp}/sys" &&
+		debug "Unmounted /sys in chroot" ||
+		fail "Unable to dismount /sys in chroot"
+
+	umount -f "${mp}" &&
+		debug "Unmounted ${mp}" ||
+		fail "Unable to dismount root"
+
+	# Zero out the disk for better compression
+	zerofree /dev/vdd1
+}
+
+
+xchroot() {
+	_chroot="chroot"
+	bitness=${bitness:-$(chroot "${mp}" getconf LONG_BIT)}
+
+	[ "${bitness}" == "32" ] && _chroot="linux32 chroot"
+
+	debug "Running chroot: ${@}"
+ 	( DEBIAN_FRONTEND=noninteractive LANG=C LC_ALL=C ${_chroot} "${mp}" "${@}" ) &&
+		debug "   Command Successfull" ||
+		{ debug "   Command FAILED!"; sleep 60; fail "chrooted-command failed!"; }
+}
+
+fake_cloud_init() {
+	seed_d="${mp}/var/lib/cloud/seed/nocloud-net"
+	mkdir -p "${seed_d}"
+
+    cat << EOF > "${seed_d}/meta-data"
+instance_id: vagrant-cloud_img-${1}-${2}
+EOF
+
+	debug "Cloud image instance ID set to vagrant-cloud_img-${1}-${2}"
+	cat << EOF > "${seed_d}/user-data"
+#cloud-config
+output: {all: '| tee -a /var/log/cloud-init-output.log'}
+ssh_pwauth: True
+EOF
+}
+
+# Setup the environment
+build_chroot
+
+# Detect which suite we're working with
+suite="$(chroot ${mp} lsb_release -c -s)"
+arch="$(chroot ${mp} getconf LONG_BIT)"
+
+# Populate fake cloud-init
+fake_cloud_init ${suite}
+
+# Setup the vagrant user
+xchroot useradd -m vagrant -s /bin/bash
+echo "root:vagrant" | chroot ${mp} chpasswd
+echo "vagrant:vagrant" | chroot ${mp} chpasswd
+
+cat << EOF > ${mp}/etc/sudoers.d/vagrant
+${CLOUD_IMG_STR}
+vagrant ALL=(ALL) NOPASSWD:ALL
+EOF
+
+xchroot chmod 0440 /etc/sudoers.d/vagrant
+xchroot mkdir -p /home/vagrant/.ssh
+
+cat << EOF > ${mp}/home/vagrant/.ssh/authorized_keys
+${CLOUD_IMG_STR}
+ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
+EOF
+
+# Dirty, dirty hack, but its vagrant, so who cares?
+xchroot dpkg-reconfigure openssh-server
+
+xchroot chown -R vagrant: /home/vagrant/.ssh
+
+# Gets rid of an annoying message for shell provisioner
+_chroot sed -i "s|mesg n|tty -s \&\& mesg n|g" /root/.profile
+_chroot sed -i "1i${CLOUD_IMG_STR}" /root/.profile
+
+# Give the host a hostname
+echo "vagrant-ubuntu-${suite}-${arch}" > ${mp}/etc/hostname
+
+# install support statement
+cat << EOF >> "${mp}/etc/update-motd.d/98-cloudguest"
+#!/bin/sh
+#
+# Notification on support of vagrant images
+#
+echo ""
+echo "This is a customized Ubuntu Server Cloud Image for use within"
+echo "the Vagrant Development environment. See http://http://www.vagrantbox.es/";
+echo "for more information"
+echo ""
+echo "Support: Only the base packages installed from the main archive are supported by"
+echo "         Canonical. Virtualbox, Vagrant, and Chef are 'universe' packages."
+echo "         The interaction between these components and universe dependencies are"
+echo "         supported by the universe community. These images are not Canonical"
+echo "         supported and are intended only for development purposes."
+echo ""
+${CLOUD_IMG_STR}
+EOF
+
+# install the virtalbox componenats
+xchroot apt-get -y update
+
+# Set the package installation options
+pkgs=(virtualbox-guest-dkms
+	virtualbox-guest-utils
+	linux-headers-generic
+	nfs-common
+	python-apport
+	puppet
+	byobu
+	juju
+    ruby
+	rubygems
+	libnss-myhostname
+	)
+
+rpkgs=(grub-legacy-ec2)
+
+if [ "${suite}" == "precise" ]; then
+	pkgs+=(linux-headers-generic-lts-raring
+		   linux-generic-lts-raring)
+	rpkgs+=($(xchroot dpkg -l | awk '/linux-(image|headers).*virtual/ {print$2}'))
+	rpkgs+=(linux-image-virtual)
+elif [ "${suite}" == "trusty" ]; then
+	pkgs=(${pkgs[@]//rubygems/} chef)
+else
+	pkgs+=(chef)
+fi
+
+cat ${mp}/etc/apt/sources.list
+
+xchroot sed -i "s,ttyS0,tty0,g" /etc/default/grub
+xchroot apt-get -y install ${pkgs[@]}
+xchroot apt-get -y purge ${rpkgs[@]}
+xchroot apt-get -f install
+xhcroot apt-get -y dist-upgrade
+xchroot apt-get -y clean
+xchroot apt-get -y autoclean
+xchroot apt-get -y autoremove
+xchroot apt-get -y update
+xchroot update-grub
+
+chroot ${mp} dpkg-query -W | column -t > "${PWD}/vagrant_image.pkgs"
+
+# Cleanup
+tear_chroot
+
+# Final work
+touch success
+tar -cvf /dev/vdb success build.log vagrant_image.pkgs
+
+# Finish
+end 0

=== added file 'templates/img-vps.tmpl'
--- templates/img-vps.tmpl	1970-01-01 00:00:00 +0000
+++ templates/img-vps.tmpl	2014-07-28 14:45:55 +0000
@@ -0,0 +1,67 @@
+#!/bin/bash
+# Disable the cloud-init functions.
+
+
+exec > >(tee /tmp/build-$(date +%s).log) 2>&1
+echo "127.0.1.1 $HOSTNAME $HOSTNAME" >> /etc/hosts
+error() { echo "$@" 1>&2; }
+fail() {
+    [ $# -eq 0 ] || error "$@";
+    echo "Failed" > /tmp/failed;
+    tar -C /tmp -cvf /dev/vdb failed build.log;
+    shutdown -P now;
+    exit 1;
+}
+
+debug() { error "$(date -R):" "$@"; }
+sysfail() { fail "General failure!"; }
+
+trap sysfail SIGINT SIGTERM
+set -x
+
+seed_d="/mnt/var/lib/cloud/seed/nocloud-net"
+rhostid=$(uuidgen | cut -c -8)
+
+# Open /dev/xvdb and write stuff
+mount /dev/vdd1 /mnt ||
+    fail "Unable to mount root file system"
+
+mkdir -p "${seed_d}" ||
+    fail "Unable to create seed directory"
+
+
+# See whats mounted
+blkid
+df
+
+# Seed config files
+cat > ${seed_d}/meta-data <<EOF
+instance-id: vps-${rhostid}
+EOF
+
+cat > ${seed_d}/user-data <<EOF
+#cloud-config
+manage_etc_hosts: localhost
+EOF
+
+
+find /mnt/var/lib/cloud/
+
+[ -e "${seed_d}/user-data" ] &&
+    cat ${seed_d}/user-data ||
+    fail "User-data was not written to disk"
+
+[ -e "${seed_d}/meta-data" ] &&
+    cat ${seed_d}/meta-data ||
+    fail "Meta-data was not written to disk"
+
+# Cleanup
+umount /dev/vdd1
+
+
+# Finish the work
+echo "Taring up artifacts to /dev/vdb!"
+touch /tmp/success
+tar -C /tmp -cvf /dev/vdb maas_images success build*.log "${OUT_D}" >> /dev/null
+sync
+shutdown -P now

=== added directory 'tests'
=== added file 'tests/azure-node-settings-tool.py'
--- tests/azure-node-settings-tool.py	1970-01-01 00:00:00 +0000
+++ tests/azure-node-settings-tool.py	2014-07-28 14:45:55 +0000
@@ -0,0 +1,111 @@
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+#    Copyright 2013, Ben Howard <ben.howard@xxxxxxxxxx>
+#
+import argparse
+import base64
+import json
+import shutil
+import os
+import os.path
+import sys
+import xml.etree.ElementTree as ET
+import xml.dom.minidom
+from OpenSSL.crypto import *
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--file',
+        metavar='file',
+        type=str,
+        required=True,
+        help='Your .publishsettings file')
+parser.add_argument('--out',
+        help="Where to dump the stuff",
+        default=None,
+        required=True)
+parser.add_argument('--base64',
+        action="store_true",
+        default=False,
+        help="Read the file in as base64")
+
+args = parser.parse_args()
+
+tree = None
+
+# Allow for reading the settings as base64
+if args.base64:
+    ET.fromstring
+    decoded = None
+    with open(args.file) as f:
+        decoded = base64.b64decode(f.read())
+    f.close()
+    tree = xml.etree.ElementTree.fromstring(decoded)
+else:
+    tree = xml.etree.ElementTree.parse(args.file)
+
+assert tree is not None
+
+pp = tree.find('PublishProfile')
+raw_cert, management_host, cert, subscription_id = None, None, None, None
+
+if pp.get('SchemaVersion') == "2.0":
+    subscription = pp.find('Subscription')
+    management_host = subscription.get('ServiceManagementUrl')
+    raw_cert = subscription.get('ManagementCertificate')
+    subscription_id = pp.find('Subscription').get('Id')
+
+else:
+    management_host = pp.get('Url')
+    raw_cert = pp.get('ManagementCertificate')
+    subscription_id = pp.find('Subscription').get('Id')
+
+cert = load_pkcs12(base64.decodestring(raw_cert))
+config_json = { 'endpoint': management_host,
+                'subscription': subscription_id }
+
+management_cert = "".join([
+                    dump_privatekey(FILETYPE_PEM, cert.get_privatekey()),
+                    dump_certificate(FILETYPE_PEM, cert.get_certificate()),
+                    ])
+
+# The cert is RSA, but for some reason it wasn't encoded properly. GAK
+management_cert = management_cert.replace('BEGIN PRIVATE', 'BEGIN RSA PRIVATE')
+management_cert = management_cert.replace('END PRIVATE', 'END RSA PRIVATE')
+
+# Make sure the spot exists
+azure_out = str(os.path.abspath("%s/.azure" % args.out))
+if os.path.exists(azure_out) and \
+    not os.path.isdir(azure_out):
+    raise Exception("%s must not be a regular file" % azure_out)
+
+elif not os.path.exists(azure_out):
+    print "Putting the files in %s" % azure_out
+    os.makedirs(azure_out)
+
+# Write the cert path
+cert_path = "%s/managementCertificate.pem" % azure_out
+with open(cert_path, 'w') as f:
+    f.write(management_cert)
+f.close()
+
+# Write the config_json
+json_path = "%s/config.json" % azure_out
+with open(json_path, 'w') as f:
+    json.dump(config_json, f)
+f.close()
+
+# Copy the settings into place
+settings_path = "%s/publishSettings.xml" % azure_out
+if not os.path.exists(settings_path):
+    shutil.copy(args.file, settings_path)

=== added file 'tests/azure.sh'
--- tests/azure.sh	1970-01-01 00:00:00 +0000
+++ tests/azure.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,274 @@
+#!/bin/bash
+retcode=0
+error() { echo "$@" 1>&2; }
+fail() { error "$@"; sleep 120; retcode=1; exit 1; }
+debug() { error "$(date -R):" "$@"; }
+#Look for REGISTRATION text. This contains the information
+
+
+#about what to test. This is important.
+REGISTRATION_RECORD="${WORKSPACE}/build_properties"
+[ -z "${OVERRIDE}" ] && {
+    [ -e "${REGISTRATION_RECORD}" ] && {
+    source ${REGISTRATION_RECORD} ||
+        fail "Registration information is missing. Failing."
+    }
+}
+
+#Setup control variables
+test_suite=${1}
+def_user="ubutest"
+def_pass="$(openssl rand -base64 13 | awk '-F=' '{print$1}')#1"
+vm_name_base="test-$(openssl rand -hex 10)"
+running=()  # collection of images running
+
+#Reset some pathing
+export HOME="${WORKSPACE}"
+export PATH="${NODE_LIB}:${LOCAL_LIB}:${LOCAL_LOCAL}:${LOCAL_BIN}:${PATH}"
+my_dir="$( cd "$( dirname "$0" )" && pwd )"
+base_dir=$(dirname ${my_dir})
+
+#Sanity checking!
+[ -z "${SUITE}" ] && fail "Suite is undefined!"
+[ -z "${SERIAL}" ] && fail "Serial is undefined!"
+[ -z "${REGISTRATION}" ] && fail "Registration name is undefined"
+
+# The certificate should exist in the workspace, base64 encoded
+# The file should be at ${WORKSPACE}/cred.b64
+
+cleaner() {
+    [ -e ${WORKSPACE}/.azure ] && wipe -R ${WORKSPACE}/.azure
+    }
+
+trap cleaner EXIT
+
+# Unpack the security creds
+export HOME=${WORKSPACE}
+python ${my_dir}/azure-node-settings-tool.py \
+    --file ${WORKSPACE}/cred.b64 \
+    --out ${WORKSPACE} \
+    --base64 ||
+        fail "Failed to decode the credential file"
+
+#Output something pretty
+set +x
+cat <<EOM
+_________________________________________________________________
+
+*****************************************************************
+
+TESTING:
+    Suite: ${SUITE}
+    Serial: ${SERIAL}
+    Registration: ${REGISTRATION}
+
+Test: ${test_suite}
+
+*****************************************************************
+_________________________________________________________________
+EOM
+set -x
+
+#Retry command
+retry() {
+    local trycount=${1} sleep=${2}
+    shift; shift;
+    local i=0 smsg=" sleeping ${sleep}: $*" ret=0
+    for((i=0;i<${trycount};i++)); do
+        "$@" && return 0
+        ret=$?
+        [ $(($i+1)) -eq ${trycount} ] && smsg=""
+        debug 1 "Warning: cmd failed [try $(($i+1))/${trycount}].${smsg}"
+        sleep $sleep
+    done
+    return $ret
+}
+
+#Kill instance
+terminate() {
+    local disk=$(azure vm disk list  2> /dev/null | grep ${1} | awk '{print$2}')
+
+    retry 3 30 azure vm delete -q ${1} ||
+        error "Failed to delete VM ${1}"
+
+    error "Sleeping for two minutes to allow disk to settle"
+    sleep 120
+
+    [ -n "${disk}" ] &&
+        retry 3 30 azure vm disk delete ${disk} ||
+            error "Failed to delete disk ${disk}"
+
+    azure service delete ${1} -q ||
+        error "Failed to delete hosted service"
+
+    #remove it from list of running images
+    running=(${running[@]//${1}/})
+}
+
+#Kill all instances
+terminate_all() {
+    for img in ${running[@]}
+    do
+        terminate ${img}
+    done
+}
+
+#Make sure any exit, kills all running instances
+trap terminate_all EXIT
+
+#Launch an instance
+run_instance() {
+    local vmname=${1}
+    local extras=(${@})
+    unset extras[0];
+
+    local run_cmd=(
+           azure
+           vm
+           create
+           ${vmname}
+           ${REGISTRATION}
+           --location 'North Europe'
+           --vm-size extrasmall
+           -v
+           -e 22
+           ${def_user}
+           ${def_pass}
+        )
+
+    [ -n "${extras[@]}" ] &&
+        run_cmd+=(${extras[@]})
+
+    "${run_cmd[@]}" || fail "Unable to launch instance"
+    running+=(${1})
+}
+
+check_instance() {
+    local status=$(azure vm show ${1} | awk '-F"' '/InstanceStatus/ {print$2}')
+    [ "${status}" = "ReadyRole" ] && return 0
+    [ "${status}" = "FailedProvisioning" ] && fail "Failed provisioning"
+    return 1
+}
+
+get_dns() {
+    dns=$(azure vm show ${1} |
+                 awk '-F"' '/DNSName/  {print$2}')
+
+    [ -n "${dns}" ] && echo "${dns}" ||
+        fail "Unable to determine DNS name for ${1}"
+}
+
+wait_for() {
+    local sleeptime=${1}
+    local max_count=${2}
+    local command=(${@})
+    unset command[0]; unset command[1];
+    local count=0
+
+    while $(sleep ${sleeptime}); do
+        ${command[@]} && return 0
+        count=$(expr ${count} + 1 )
+
+        if [ "${count}" -ge "${max_count}" ]; then
+            return 1
+        fi
+    done
+}
+
+
+# Test password-based SSH
+password_ssh() {
+    which sshpass 2> /dev/null &>1 ||
+        sudo apt-get -y install sshpass ||
+            fail "Required program, sshpass, is not installed"
+
+    # create instance
+    local vmname="${vm_name_base}-pass"
+    run_instance ${vmname}
+
+    wait_for 30 60 check_instance ${vmname} ||
+        fail "Instance failed to initialize in time"
+
+    local dnsname=$(get_dns ${vmname})
+    error "Sleeping for 3 mintues...to let things settle"
+    sleep 180
+
+    # Give ourselves sudo ability.....
+    # This is pretty much brute force...but it does test sudo
+    error "Clobbering sudo permissions via brute force..."
+    ${my_dir}/passless-sudoifer --host ${dnsname} --user ${def_user} \
+                                --passwd ${def_pass}
+
+    # Run Jenkins test runner
+    error "Running test suite..."
+    ${my_dir}/jenkins-ssh --host ${dnsname} --user ${def_user} \
+                          --passwd ${def_pass} --file ${my_dir}/run-azure.sh &&
+        error "Test suite passed successfully" ||
+        fail  "Test suite failed run"
+
+}
+
+# Test passwordless ssh
+key_ssh() {
+
+    # This doesn't work at this time
+    exit 0
+
+    # SSH key creation for Azure posted here:
+    # https://www.windowsazure.com/en-us/manage/linux/how-to-guides/ssh-into-linux/
+    openssl req \
+        -x509 \
+        -batch \
+        -nodes \
+        -days 365 \
+        -newkey rsa:2048 \
+        -keyout myPrivateKey.key \
+        -out myCert.pem
+    chmod 600 myPrivateKey.key
+    openssl  x509 -outform der -in myCert.pem -out cert.pem ||
+        fail "Failed to generate SSH certificate"
+
+    ssh-keygen -e -f myPrivateKey.key > openssh.pem ||
+        fail "Unable to create public SSH certificate"
+
+    # Launch the instance
+    vmname="${vm_name_base}-kssh"
+    vmargs="-t ${WORKSPACE}/cert.pem --no-ssh-password"
+    run_instance ${vmname} ${vmargs}
+
+    wait_for 15 60 check_instance ${vmname} ||
+        fail "Instance failed to initialize in time"
+
+    dnsname=$(get_dns ${vmname})
+
+    # Check if things are working
+    ssh_cmd="ssh -i ${WORKSPACE}/myPrivateKey.key -t -t ${def_user}@${dnsname}"
+
+    # Get the dmesg output
+    (${ssh_cmd} "dmesg") &&
+        error "Instance passed boot test" ||
+        fail  "Instance failed boot test"
+
+    # Get SSH commands
+    (${ssh_cmd} < ${WORKSPACE}/cmds.script) &&
+        error "Tests suite ran successfully" ||
+        fail  "Failed during test suite run"
+
+}
+
+# Look at image characteristics
+chacteristics() {
+    exit 0
+}
+
+case ${1} in
+    Password-SSH)       password_ssh;;
+    Key-SSH)            key_ssh;;
+    Characteristics)    chacteristics;;
+esac
+
+terminate_all
+
+# Unset trap
+trap - EXIT
+exit ${retcode}

=== added file 'tests/decider.py'
--- tests/decider.py	1970-01-01 00:00:00 +0000
+++ tests/decider.py	2014-07-28 14:45:55 +0000
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+
+# The legacy Jenkins test is buggy due to the cloud being the cloud.
+# This is a test evaluator that determines whether or not to pass/fail
+# a jenkins build flow. 
+
+import argparse
+import json
+import urllib2
+import sys
+import time
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--host',
+        action="store",
+        default="localhost:8080",
+        help="Host address to hit")
+parser.add_argument('--test',
+        action="store",
+        default=None,
+        help="Test suite to parse")
+parser.add_argument('--okay',
+        action="store",
+        type=int,
+        default=3,
+        help="How many failures are okay to pass test")
+
+opts = parser.parse_args()
+
+sys.stderr.write("Using host: %s\n" % opts.host)
+
+if not opts.test:
+    sys.stderr.write("Must define test with --test\n")
+    sys.exit(3)
+
+counts={}
+
+def check():
+    c = {}
+    r = {}
+    url = "http://%s/job/%s/api/json?pretty=true"; % (opts.host, opts.test)
+    response = urllib2.urlopen(url)
+    json_out = response.read()
+    json_d = json.loads(json_out)
+    results= json_d['activeConfigurations']
+
+    for result in results:
+        color = result['color']
+
+        if color not in c:
+            c[color] = 1
+        else:
+            c[color] += 1
+
+    print ""
+    print "Results from build are:"
+    print json.dumps(c, indent=4)
+    print ""
+
+    return c
+
+# Valid ball colors:
+# blue: Pass
+# grey: waiting
+# grey_anime: executing
+# red: Fail
+
+counts = check()
+
+# Wait for test to complete
+while 'grey_anime' in counts:
+    print "Sleeping 5 minutes to wait for build completion"
+    time.sleep(300)
+    counts = check()
+
+# Populate the colors to be safe
+check_colors = "aborted", "grey", "red", "green"
+for color in check_colors:
+    if color not in counts:
+        counts[color] = 0
+
+
+# Check 'em
+if counts['red'] > opts.okay:
+    print "Failure rate of %s is above threshhold, Failing." % counts['red']
+    sys.exit(2)
+
+elif counts['aborted'] > 0:
+    print "This test looks aborted. Failing!"
+    sys.exit(2)
+
+else:
+    print "Failure rate is below fail threshold. Passing."
+    sys.exit(0)
+

=== added file 'tests/jenkins-ssh'
--- tests/jenkins-ssh	1970-01-01 00:00:00 +0000
+++ tests/jenkins-ssh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+import argparse
+import os
+import paramiko
+import re
+import sys
+
+status_re="^status=\d$"
+
+parser = argparse.ArgumentParser(description='Run Azure tests')
+parser.add_argument('--user', default="ubutest",
+                    help="Username of the user")
+parser.add_argument('--passwd',
+                    help="Password of the user")
+parser.add_argument('--host', required=True,
+                    help="Hostname to test")
+parser.add_argument('--key', default=None,
+                    help="Key to use for authentication")
+parser.add_argument('--file', required=True,
+                    help="File to execute on on remote note")
+parser.add_argument('--port', default=22,
+                    help="Which SSH port to use")
+opts = parser.parse_args()
+
+if not opts.passwd and not opts.key:
+    raise Exception("Must define either --passwd or --key")
+
+if opts.key and not os.path.exists(opts.key):
+    raise IOError("SSH Key does not exist")
+
+if not os.path.exists(opts.file):
+    raise IOError("File %s does not exists" % opts.file)
+
+ssh = paramiko.SSHClient()
+ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ssh.connect(opts.host, username=opts.user, password=opts.passwd,
+            key_filename=opts.key, port=opts.port)
+
+cmd = None
+with open(opts.file, 'r') as f:
+    cmd = f.read()
+
+if not cmd:
+    raise Exception("Nothing to run on remote host")
+
+stdin, stdout, stderr = ssh.exec_command(cmd)
+out = stdout.read()
+err = stderr.read()
+ssh.close()
+
+print "Standard out: \n%s" % out
+print "Standard err: \n%s" % err
+
+# Extract the status cods
+status_codes = []
+status_codes.extend(re.findall("status=\d", out))
+status_codes.extend(re.findall("status=\d", err))
+
+if len(status_codes) > 0:
+    try:
+        last_code = int((status_codes[-1]).split('=')[-1])
+        print "Reported status code is %s" % last_code
+        sys.exit(last_code)
+    except Exception as e:
+        print "Errored on parsing"
+        pass
+
+sys.exit(-1)

=== added file 'tests/passless-sudoifer'
--- tests/passless-sudoifer	1970-01-01 00:00:00 +0000
+++ tests/passless-sudoifer	2014-07-28 14:45:55 +0000
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+# This gives us the ability to do SSH stuff
+import argparse
+import os
+import paramiko
+import re
+import sys
+
+status_re="^status=\d$"
+
+parser = argparse.ArgumentParser(description='Run Azure tests')
+parser.add_argument('--user', default="ubutest",
+                    help="Username of the user")
+parser.add_argument('--passwd',
+                    help="Password of the user")
+parser.add_argument('--host', required=True,
+                    help="Hostname to test")
+parser.add_argument('--key', default=None,
+                    help="Key to use for authentication")
+parser.add_argument('--port', default=22,
+                    help="Which SSH port to use")
+opts = parser.parse_args()
+
+if not opts.passwd and not opts.key:
+    raise Exception("Must define either --passwd or --key")
+
+if opts.key and not os.path.exists(opts.key):
+    raise IOError("SSH Key does not exist")
+
+ssh = paramiko.SSHClient()
+ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ssh.connect(opts.host, username=opts.user, password=opts.passwd,
+            key_filename=opts.key, port=opts.port)
+
+sudo_clober = """cat <<EOF > /tmp/askpass.sh
+#!/bin/bash
+echo %s
+EOF
+
+chmod 0700 /tmp/askpass.sh
+export SUDO_ASKPASS=/tmp/askpass.sh
+sudo -A sed -i -e 's|%s.*|%s ALL=(ALL) NOPASSWD:ALL|g' \
+                  /etc/sudoers.d/*
+
+sudo rm /etc/sudoers.d/README
+sudo -A cat /etc/sudoers.d/*
+rm /tmp/askpass.sh
+""" % (opts.passwd, opts.user, opts.user)
+
+
+ssh.connect(opts.host, username=opts.user, password=opts.passwd,
+            key_filename=opts.key, port=opts.port)
+
+stdin, stdout, stderr = ssh.exec_command(sudo_clober)
+print stdout.read()
+print stderr.read()
+ssh.close()

=== added file 'tests/run-azure.sh'
--- tests/run-azure.sh	1970-01-01 00:00:00 +0000
+++ tests/run-azure.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,24 @@
+#!/bin/bash
+exec > >(tee test.log)
+echo "--------------------------------------"
+set -x
+dmesg
+sudo cat /var/log/cloud-init.log
+sudo cat /var/lib/waagent.log
+sudo cat /var/lib/waagent/ovf-env.xml
+set -x
+echo "--------------------------------------"
+
+sudo apt-get -y update
+sudo apt-get -y install python-nose python-mocker bzr
+
+[ -e /tmp/jenkins_kvm ] && rm -rf /tmp/jenkins_kvm
+bzr branch http://bazaar.launchpad.net/~ubuntu-on-ec2/vmbuilder/jenkins_kvm \
+    /tmp/jenkins_kvm
+
+cd /tmp/jenkins_kvm/tests
+
+echo "--------------------------------------"
+nosetests test-azure.py
+echo "status=$?"
+echo "--------------------------------------"

=== added file 'tests/test-azure.py'
--- tests/test-azure.py	1970-01-01 00:00:00 +0000
+++ tests/test-azure.py	2014-07-28 14:45:55 +0000
@@ -0,0 +1,235 @@
+from mocker import MockerTestCase
+import lsb_release
+import os
+import re
+import logging
+import subprocess as subprc
+import shlex
+import traceback
+import yaml
+from cloudinit import util as ciutil
+
+# Setup the logger
+log = logging.getLogger('AzureTest')
+logging.basicConfig(format= \
+            '%(asctime)s  %(levelname)s - %(message)s')
+log.setLevel(logging.INFO)
+
+
+def run_cmd(cmd):
+    try:
+        out, _err = ciutil.subp(cmd)
+        return out, _err
+    except Exception as e:
+        log.critical("run_cmd: execution error")
+        log.critical(traceback.format_exc(e))
+        return None, e
+
+
+class TestConfig(MockerTestCase):
+    def setUp(self):
+        super(TestConfig, self).setUp()
+        self.ci_unsupported = ['quantal']
+        distinfo = lsb_release.get_distro_information()
+        self.suite = distinfo['CODENAME']
+
+    def is_cloud_init_supported(self):
+        if self.suite in self.ci_unsupported:
+            return False
+        return True
+
+    def test_suite(self):
+        """
+        Test that LSB functions work
+        """
+        log.info("Confirming that suite was detected")
+        assert self.suite is not None
+        log.info("Running test suite against %s" % self.suite)
+
+    def test_is_disk_big(self):
+        """
+        Confirm disk size is 30GB
+        """
+        cmd = ['df', '/']
+        out, _err = run_cmd(cmd)
+
+        log.info("Confirming root is proper size")
+        for line in out.splitlines():
+            _line = line.split()
+
+            if _line[-1] == "/":
+                log.info("Root disk size detected at %s" % _line[-2])
+
+                block_count = _line[1]
+                log.info("Root disk block count is %s" % block_count)
+
+                assert int(block_count) > 30000000
+
+    def test_ephemeral(self):
+        """
+        Test that ephemeral is mounted to the right place
+        """
+        mount = None
+        mt_pts = ["/mnt", "/mnt/resource"]
+        with open('/proc/mounts', 'r') as f:
+            for line in f.readlines():
+                dev, mnt, fs, _, _, _ = line.split()
+                if mnt in mt_pts:
+                    mount = mnt
+
+        assert mount is not None
+        if self.is_cloud_init_supported():
+            self.assertEquals(mount, "/mnt")
+        else:
+            self.assertEquals(mount, "/mnt/resource")
+
+
+    def test_sudo(self):
+        """
+        Test that passwordless sudo works for cloud-init supported releases
+        """
+        if not self.is_cloud_init_supported():
+            return
+
+        log.info("Testing ability to call sudo")
+        returncode = subprc.call(["/usr/bin/sudo", "/usr/bin/id", "-u"])
+
+        log.info("Got %s as the UID" % returncode)
+        assert int(returncode) == 0
+
+    def test_pkg_list(self):
+        """
+        Confirm that required packages are installed
+            Required Packages:
+                linux-image-extra-virtual
+                hv-kvp-daemon-init
+        """
+        log.info("Checking for dpkg output")
+        cmd = ['/usr/bin/dpkg-query', '-W']
+        out, _err = run_cmd(cmd)
+
+        log.info("Checking to make sure that output is good")
+        if not out:
+            log.critical(_err)
+            log.critical("dpkg-query failed")
+
+        assert out is not None
+
+        # Look for the kernel packages extra
+        packages = ['hv-kvp-daemon-init',
+                     'walinuxagent']
+
+        if self.suite == 'trusty':
+            packages.append('linux-cloud-tools-virtual')
+        else:
+            packages.append('linux-image-extra-virtual')
+
+        log.info("Checking on installed packages")
+        for pkg in packages:
+            if pkg in out:
+                log.info("Found package %s" % pkg)
+                found_k = True
+            else:
+                log.critical("package %s was not found" % pkg)
+                raise Exception("did not find kernel pkg %s" % pkg)
+
+    def test_archive(self):
+        """
+        Confirm archive configuration
+            Regular archive should have azure.archive.ubuntu.com/ubuntu
+            Security archive should have security.ubuntu.com/ubuntu
+        """
+        consider_re = re.compile("^deb.*")
+        sec_arch = "%s-security" % self.suite
+        archive_url = "http://azure.archive.ubuntu.com/ubuntu";
+        security_url = "http://security.ubuntu.com/ubuntu";
+
+        log.info("Parsing /etc/apt/sources.lists")
+        with open('/etc/apt/sources.list', 'r') as sources:
+            for line in sources.readlines():
+                if not consider_re.match(line):
+                    continue
+
+                _line = line.split()
+                if sec_arch in _line[2]:
+                    log.info("Found security archive at %s" % _line[1])
+                    assert security_url in _line[1]
+                else:
+                    log.info("Found archive at %s" % _line[1])
+                    assert archive_url in _line[1]
+
+        log.info("Finished archive parsing")
+
+    def test_cloud_init_datasources(self):
+        """
+        Confirm Azure datasource supported releases
+            Should not exist: /var/lib/cloud/seed/nocloud-net/user-data
+            Should not exist: /var/lib/cloud/seed/nocloud-net/meta-data
+            File attributes for /etc/cloud/cloud.cfg.d/90_dpkg.cfg
+                Datasource should HAVE Azure in list
+                Datasource should NOT HAVE NoCloud and ConfigDrive
+        """
+        if not self.is_cloud_init_supported():
+            log.info("Skipping test for cloud-init datasource")
+            return
+
+        log.info("Reading YAML of /etc/cloud/cloud.cfg.d/90_dpkg.cfg in")
+        cfg = None
+        with open('/etc/cloud/cloud.cfg.d/90_dpkg.cfg', 'r') as _cfg:
+            cfg = yaml.load(_cfg)
+
+
+        log.info("Checking that YAML read-in properly")
+        assert cfg is not None
+
+        log.info("Checking that datasource is a list")
+        assert isinstance(cfg['datasource_list'], list)
+
+        log.info("Checking that Azure is in datasource")
+        assert 'Azure' in cfg['datasource_list']
+
+        log.info("Making sure that legacy user-data is NOT present")
+        assert not os.path.exists('/var/lib/cloud/seed/nocloud-net/user-data')
+
+        log.info("Making sure that legacy meta-data is NOT present")
+        assert not os.path.exists('/var/lib/cloud/seed/nocloud-net/meta-data')
+
+        log.info("Tests passed for cloud-init datasource")
+
+    def test_non_cloud_init_datasource(self):
+        """
+        Confirm nocloud datasource releases
+            Should exist: /var/lib/cloud/seed/nocloud-net/user-data
+            Should exist: /var/lib/cloud/seed/nocloud-net/meta-data
+            File attributes for /etc/cloud/cloud.cfg.d/90_dpkg.cfg
+                Datasource should not have Azure in list
+                Datasource should have NoCloud and ConfigDrive
+        """
+        if self.is_cloud_init_supported():
+            return
+
+        log.info("Reading YAML of /etc/cloud/cloud.cfg.d/90_dpkg.cfg in")
+        cfg = None
+        with open('/etc/cloud/cloud.cfg.d/90_dpkg.cfg', 'r') as _cfg:
+            cfg = yaml.load(_cfg)
+
+        log.info("Testing that datasource YAML read-in")
+        assert cfg is not None
+
+        log.info("Making sure that datasource is a list")
+        assert isinstance(cfg['datasource_list'], list)
+
+        log.info("Making sure that Azure is NOT in datasource list")
+        assert 'Azure' not in cfg['datasource_list']
+
+        log.info("Making sure that ConfigDrive is in datasource")
+        assert 'ConfigDrive' in cfg['datasource_list']
+
+        log.info("Mkaing sure that NoCloud is in datasource")
+        assert 'NoCloud' in cfg['datasource_list']
+
+        log.info("Checking to make sure that legacy user-data is present")
+        assert os.path.exists('/var/lib/cloud/seed/nocloud-net/user-data')
+
+        log.info("Checking to make sure that legacy meta-data is present")
+        assert os.path.exists('/var/lib/cloud/seed/nocloud-net/meta-data')

=== added file 'tests/tracker.py'
--- tests/tracker.py	1970-01-01 00:00:00 +0000
+++ tests/tracker.py	2014-07-28 14:45:55 +0000
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+
+#
+# This reads the Jenkins json results and computes commands to run
+# against the ISOTracker
+
+import argparse
+import json
+import urllib2
+import sys
+import time
+
+from pprint import pprint as pp
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--host',
+        action="store",
+        default="localhost:8080",
+        help="Host address to hit")
+parser.add_argument('--test',
+        action="store",
+        default=None,
+        help="Test suite to parse")
+parser.add_argument('--suite',
+        action="store",
+        default="raring",
+        required=True,
+        help="Which suite are we working with")
+parser.add_argument('--milestone',
+        action="store",
+        default=None,
+        required=True,
+        help="Which milestone?")
+parser.add_argument('--out',
+        action="store",
+        default=None,
+        required=True,
+        help="Output file of commands")
+parser.add_argument('--serial',
+        action="store",
+        default=None,
+        required=True,
+        help="Serial to use")
+
+opts = parser.parse_args()
+
+sys.stderr.write("Using host: %s\n" % opts.host)
+
+if not opts.test:
+    sys.stderr.write("Must define test with --test\n")
+    sys.exit(3)
+
+
+# Translations for values.
+translations = {
+    'us-east-1': "US-East",
+    'ap-northeast-1': "Asia-Pacific-NorthEast",
+    'ap-southeast-1': "Asia-Pacific-SouthEast",
+    'ap-southeast-2': "Asia-Pacific-SouthEast-Australia",
+    'eu-west-1': "Europe",
+    'sa-east-1': "South-America-East-1",
+    'us-west-1': "US-West-1",
+    'us-west-2': "US-West-2",
+    'simple-user-data': "EC2 User Data",
+    'multi-instance': "EC2 Multiple Instances Run",
+    'all-types': "EC2 Multiple Instances Run",
+    'instance-store': "Instance",
+    'ebs': "EBS",
+    'hvm:EBS': "HVM",
+    'hvm:Instance': "HVM Instance",
+    }
+
+# Get the ami lookup
+url = "http://cloud-images.ubuntu.com/query/%s/server/daily.txt"; % \
+    opts.suite.lower()
+print "Fetching AMI listing..."
+response = urllib2.urlopen(url)
+ami_lookup = {}
+serial = None
+for line in str(response.read()).splitlines():
+
+    if len(line.split()) == 9:
+        suite, _, _, serial, disk, arch, region, ami, virt = \
+            line.split()
+    else:
+        suite, _, _, serial, disk, arch, region, ami, _, virt = \
+            line.split()
+
+    # Make sure we're reading the write file
+    if suite != opts.suite.lower():
+        raise Exception("Suite does not match suite query!")
+
+    # Make sure we're using the right serial
+    if opts.serial != serial:
+        continue
+
+    if region not in ami_lookup:
+        ami_lookup[region] = {}
+
+    if arch not in ami_lookup[region]:
+        ami_lookup[region][arch] = {}
+
+    if virt not in ami_lookup[region][arch]:
+        ami_lookup[region][arch][virt] = {}
+
+    if disk not in ami_lookup[region][arch][virt]:
+        ami_lookup[region][arch][virt][disk] = ami
+
+
+# Get the json for parsing
+url = "http://%s/job/%s/api/json?pretty=true"; % (opts.host, opts.test)
+print "Fetching results..."
+response = urllib2.urlopen(url)
+json_out = response.read()
+json_d = json.loads(json_out)
+results= json_d['activeConfigurations']
+
+pass_count, fail_count = 0, 0
+status = []
+for result in results:
+    r = {}
+    url = result['url']
+    r['URL'] = url
+
+    # Extract the elements
+    for item in (url.split('./'))[1].split(','):
+        k, v = item.split('=')
+        v = v.replace('/','')
+        r[k] = v
+
+    # Assume we're PV, unless otherwise stated
+    if 'VIRT_TYPE' not in r or r['VIRT_TYPE'] == 'paravirt':
+       r['VIRT_TYPE'] = 'paravirtual'
+
+    # Get the AMI id
+    ami = ami_lookup[r['REGION']][r['ARCH']][r['VIRT_TYPE']][r['STORAGE']]
+    r['AMI'] = ami
+    r['SERIAL'] = serial
+    r['STORAGE'] = translations[r['STORAGE']]
+    r['REGION'] = translations[r['REGION']]
+    r['MILESTONE'] = "%s %s" % (opts.suite.title(), opts.milestone.title())
+
+    if r['VIRT_TYPE'] == 'hvm':
+        r['STORAGE'] = translations['hvm:%s' % r['STORAGE']]
+
+    # Label the build
+    r['PRODUCT'] = '"Ubuntu Server EC2 %(STORAGE)s (%(REGION)s) %(ARCH)s"' \
+        % r
+
+    # Make a comment
+    r['COMMENT'] = \
+        "[ %(SERIAL)s %(ARCH)s/%(STORAGE)s/%(TEST)s/%(REGION)s ]" \
+        % r
+
+    # Translate the values
+    for k, v in r.iteritems():
+        if v in translations:
+            r[k] = translations[v]
+
+    if result['color'] == 'blue':
+        r['PASS'] = 'Passed'
+        pass_count += 1
+    else:
+        r['PASS'] = 'Failed'
+        fail_count += 1
+
+    status.append(r)
+
+print "Failed Count: %s" % fail_count
+print "Pass Count: %s" % pass_count
+
+with open(opts.out, 'w') as f:
+    for item in status:
+        cmd = \
+"""tracker_update_result -u ${API_USER} -p ${API_KEY} --debug """ \
+""" "--comment=%(COMMENT)s" "%(MILESTONE)s" """ \
+""" %(PRODUCT)s "%(TEST)s" %(AMI)s %(PASS)s\n""" % item
+        f.write(cmd)
+
+f.close()
+
+print "Wrote %s" % opts.out

=== added file 'tracker.sh'
--- tracker.sh	1970-01-01 00:00:00 +0000
+++ tracker.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,24 @@
+# Register images to tracker
+milestone=${1}
+suite=${2}
+serial=${3}
+pub_tracker=0
+
+tracker_milestone=${milestone//-/ /}
+tracker_suite=($(echo ${2} | fold -w1))
+tracker_suite[0]=$(echo ${tracker_suite[0]} | tr [:lower:] [:upper:])
+tracker_suite="$(echo ${tracker_suite[@]} | sed 's| ||g')"
+
+if [ "${tracker_milestone}" = "Daily" ]; then
+    [ "${distro}" = "precise" -o "${distro}" = "trusty" ] &&
+        pub_tracker=1
+fi
+
+[ "${tracker_milestone}" != "Daily" ] && pub_tracker=1
+
+[ "${pub_tracker}" -eq 1 ] && {
+    /srv/builder/tracker/ubuntu-archive-tools/post-amis-to-iso-tracker \
+        --milestone "${tracker_suite} ${tracker_milestone}" \
+        /srv/ec2-images/${suite}/${serial}/published-ec2-daily.txt || exit 1; }
+
+exit 0

=== added file 'tweet.sh'
--- tweet.sh	1970-01-01 00:00:00 +0000
+++ tweet.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,44 @@
+#!/bin/bash
+set -x
+# Copy new config into place for Tweeting
+cat << EOF > ${WORKSPACE}/.twitter_oauth
+${TWITTER_API_KEYS}
+EOF
+
+# Only work against server builds
+[ "${BUILD_TYPE:-server}" == "desktop" ] && exit 0
+
+# Get some version strings out
+case ${SUITE} in
+    hardy)      ver="Ubuntu 8.04 LTS";;
+    lucid)      ver="Ubuntu 10.04 LTS";;
+    natty)      ver="Ubuntu 11.04";;
+    oneiric)    ver="Ubuntu 11.10";;
+    precise)    ver="Ubuntu 12.04 LTS";;
+    quantal)    ver="Ubuntu 12.10";;
+    raring)     ver="Ubuntu 13.04";;
+esac
+
+# Urls to look for stuff
+base_url="http://cloud-images.ubuntu.com";
+release_url="${base_url}/releases/${SUITE}/release-${SERIAL}"
+milestone_url="${base_url}/release/${SUITE}/${MILESTONE_LABEL}"
+daily_url="${base_url}/${SUITE}/${SERIAL}"
+
+# Default to daily builds
+MILESTONE_LABEL=${MILESTONE_LABEL:-daily}
+
+# What message to send
+if [ "${MILESTONE_LABEL}" == "release" ]; then
+    message="Updated Cloud Images for ${ver} published at @ ${release_url}"
+elif [ -n "${DEVELOPMENT_BUILD}" ]; then
+    message="New daily devel Cloud Image for ${ver} @ ${daily_url}"
+elif [ -n "${MILESTONE_LABEL}" -a "${MILESTONE_LABEL}" != "daily" ]; then
+    message="${MILESTONE_LABEL} release for ${ver} Cloud Images released @ ${release_url}"
+else
+    message="Daily stable-release for ${ver} published @ ${release_url}"
+fi
+
+# send the thing...
+export HOME=${WORKSPACE}
+twitter set ${message}

=== added file 'ubuntu-adj2version'
--- ubuntu-adj2version	1970-01-01 00:00:00 +0000
+++ ubuntu-adj2version	2014-07-28 14:45:55 +0000
@@ -0,0 +1,53 @@
+#!/bin/sh
+# vi: ts=4 noexpandtab
+
+Usage() {
+   cat <<EOF
+Usage: ${0##*/} adjective
+  Output the version string for a given ubuntu adjective
+  Only pays attention to the first letter of the string, and as
+  such will fail to function after 'zippy zebra'
+
+  Example:
+  - $ ${0##*/} hardy 
+    8.04
+  - $ ${0##*/} PRETTY
+    12.04
+  - $ ${0##*/} yearning
+    16.10
+EOF
+}
+error() { echo "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+
+ubuntu_adj2ver() {
+	# over engineered function, convert ubuntu adjective to version
+	# based on its first letter.
+	local letter1="" list=defghijklmnopqrstuvwxyz
+	local baseyear=6 year="" month="04" tmp="" relnum=""
+
+	[ "$1" = "warty" -o "$1" = "Warty" -o "$1" = "WARTY" ] &&
+		{ _RET="5.10"; return 0; }
+
+	letter1=$(echo "${1}" | sed 's,\(.\).*,\1,') || return 1
+	case "${letter1}" in
+		[A-Z]) letter1=$(echo "${letter1}" | tr "[:upper:]" "[:lower:]");;
+		[a-z]) :;;
+		*) return 1;
+	esac
+
+	tmp=${list%${letter1}*}
+	[ "${tmp}" != "${list}" ] || return 1;
+	relnum=${#tmp} # number of release starting with 6.04
+	year=$((${relnum}/2 + ${baseyear}))
+
+   # if this is an even numbered release, then its in the 10th month
+   # odd numbered are in the 4th.
+	[ $((${relnum} % 2)) -eq 0 ] || month="10"
+	_RET="${year}.${month}"
+}
+
+[ $# -eq 1 ] || { Usage 1>&2; fail "must give single adjective"; }
+[ "$1" = "--help" -o "$1" = "-h" ] && { Usage; exit; }
+
+ubuntu_adj2ver "$1" && echo ${_RET}

=== added file 'wait_package.sh'
--- wait_package.sh	1970-01-01 00:00:00 +0000
+++ wait_package.sh	2014-07-28 14:45:55 +0000
@@ -0,0 +1,27 @@
+#!/bin/bash
+suite=${suite:-$1}
+package=${2}
+version=${3}
+sleep_t=${4:-120}
+url="http://archive.ubuntu.com/ubuntu/dists/${suite}/main/binary-amd64/Packages.bz2";
+go=""
+
+while [ -z ${go} ]
+do
+    echo "Checking package"
+    ver=$(curl -s ${url} | \
+            bunzip2 -c | egrep "Package.*${package}" -a8 | \
+            awk '/Version/ {print$2}')
+
+    [ "${ver}" = "${version}" -o -z "${version}" ] && {
+        echo "Found ${package} ${ver}";
+        go=1;
+        exit 0;
+    }
+
+    [ -z "${go}" ] && {
+        echo "....waiting another ${sleep_t} for ${package}";
+        sleep ${sleep_t}
+    }
+
+done


Follow ups