cloud-init-dev team mailing list archive
-
cloud-init-dev team
-
Mailing list archive
-
Message #00176
[Merge] lp:~racb/cloud-init/cloud-fingerprint into lp:cloud-init
Robie Basak has proposed merging lp:~racb/cloud-init/cloud-fingerprint into lp:cloud-init.
Commit message:
Add new tool cloud-fingerprint, which automatically and securely imports public keys into ~/.ssh/known_hosts using cloud-init console output.
Requested reviews:
cloud init development team (cloud-init-dev)
For more details, see:
https://code.launchpad.net/~racb/cloud-init/cloud-fingerprint/+merge/140188
--
https://code.launchpad.net/~racb/cloud-init/cloud-fingerprint/+merge/140188
Your team cloud init development team is requested to review the proposed merge of lp:~racb/cloud-init/cloud-fingerprint into lp:cloud-init.
=== added file 'ChangeLog'
--- ChangeLog 1970-01-01 00:00:00 +0000
+++ ChangeLog 2012-12-17 12:41:23 +0000
@@ -0,0 +1,21 @@
+0.27
+ - add '--hook-img' flag to cloud-publish-image and passthrough that
+ flag from cloud-publish-ubuntu and cloud-publish-tarball.
+0.26
+ - install ubuntu cloud image keyring into /usr/share/keyrings/
+ - ubuntu-cloudimg-query, ubuntu-ec2-run:
+ - be aware of m1.medium instance type
+ - support "amd64 on all sizes"
+ - be aware of hi1.4xlarge
+ - always pass block device mapping for sdb if instance type has it
+ rather than relying on ami registration.
+ - when downloading images use wget dot:mega for less verbose
+ - growpart:
+ - use 'sfdisk --no-reread' (LP: #942788)
+ - if sfdisk fails, send output to stderr
+ - cloud-publish-tarball: fix for tarballs without a ramdisk
+ - ubuntu-cloudimg-query: allow baseurl to be read from environment var
+ - growpart: support growpart of nbd devices (/dev/nbd[0-9]) and
+ /dev/loop devices.
+ - add cloud-localds utility
+ - ubuntu-cloudimg-query: add 'serial' to tokens availble for substitution
=== renamed file 'ChangeLog' => 'ChangeLog.moved'
=== added file 'Makefile'
--- Makefile 1970-01-01 00:00:00 +0000
+++ Makefile 2012-12-17 12:41:23 +0000
@@ -0,0 +1,29 @@
+NAME = cloud-utils
+libdir = /usr/share/$(NAME)
+LIBDIR = $(DESTDIR)$(libdir)
+BINDIR = $(DESTDIR)/usr/bin
+MANDIR = $(DESTDIR)/usr/share/man/man1
+DOCDIR = $(DESTDIR)/usr/share/doc/$(NAME)
+KEYDIR = $(DESTDIR)/usr/share/keyrings
+
+binprogs := $(subst bin/,,$(wildcard bin/*))
+manpages := $(subst man/,,$(wildcard man/*.1))
+
+build: ubuntu-cloudimg-keyring.gpg
+ echo manpages=$(manpages)
+
+install:
+ mkdir -p "$(BINDIR)" "$(DOCDIR)" "$(MANDIR)" "$(KEYDIR)"
+ cd bin && install $(binprogs) "$(BINDIR)"
+ cd man && install $(manpages) "$(MANDIR)/" --mode=0644
+ install -m 0644 ubuntu-cloudimg-keyring.gpg $(KEYDIR)
+
+ubuntu-cloudimg-keyring.gpg: ubuntu-cloudimg-keyring.gpg.b64
+ grep -v "^#" "$<" | base64 --decode > "$@" || { rm "$@"; exit 1; }
+
+clean:
+ :
+
+uninstall:
+ cd "$(BINDIR)" && rm -f $(binprogs) || :
+ cd "$(MANDIR)" && rm -f $(manpages) || :
=== renamed file 'Makefile' => 'Makefile.moved'
=== added directory 'bin'
=== renamed directory 'bin' => 'bin.moved'
=== added file 'bin/cloud-fingerprint'
--- bin/cloud-fingerprint 1970-01-01 00:00:00 +0000
+++ bin/cloud-fingerprint 2012-12-17 12:41:23 +0000
@@ -0,0 +1,164 @@
+#!/usr/bin/python3
+
+# Handle ~/.ssh/known_hosts for dynamic cloud instances
+#
+# Copyright (C) 2012 Canonical Ltd.
+#
+# Authors: Robie Basak <robie.basak@xxxxxxxxxxxxx>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, version 3 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+import tempfile
+
+HOME = os.getenv('HOME')
+KNOWN_HOSTS_PATH = os.path.join(HOME, '.ssh', 'known_hosts')
+
+EC2_START_MARKER = 'ec2: -----BEGIN SSH HOST KEY FINGERPRINTS-----'
+EC2_END_MARKER = 'ec2: -----END SSH HOST KEY FINGERPRINTS-----'
+EC2_PREFIX_TO_DROP = 'ec2: '
+FINGERPRINT_CHECK = re.compile(r'^[a-f0-9]{2}(:[a-f0-9]{2}){15}$')
+
+
+def drop_prefix(prefix, line):
+ assert line.startswith(prefix)
+ return line[len(prefix):]
+
+
+def extract_console_fingerprint(console_output):
+ start = console_output.index(EC2_START_MARKER) + len(EC2_START_MARKER)
+ end = console_output.index(EC2_END_MARKER, start)
+
+ data = console_output[start:end].lstrip().rstrip().splitlines()
+ return [drop_prefix(EC2_PREFIX_TO_DROP, line) for line in data]
+
+
+def fingerprint_appears_in_lines(fingerprint, lines):
+ line_fingerprint_candidates = [
+ x.split()[1] for x in lines if (len(x) > 0)
+ ]
+ line_fingerprints = [
+ fingerprint for fingerprint in line_fingerprint_candidates
+ if FINGERPRINT_CHECK.match(fingerprint)
+ ]
+
+ return fingerprint in line_fingerprints
+
+
+def get_known_hosts(dns_name, ssh_proxy=None):
+ args = []
+ if ssh_proxy:
+ args.extend(['ssh', '-S', 'none', ssh_proxy])
+ args.extend(['ssh-keyscan', '-H', dns_name])
+ cmd = subprocess.Popen(
+ args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ result = cmd.communicate()
+ if cmd.returncode:
+ raise RuntimeError('ssh-keyscan returned %d: %s' %
+ (cmd.returncode, result[1]))
+ return result[0]
+
+
+def get_known_hosts_fingerprint(known_hosts):
+ '''Return the key fingerprint given a line from a known hosts file.'''
+ with tempfile.NamedTemporaryFile() as f:
+ f.write(known_hosts)
+ f.flush()
+ cmd = subprocess.Popen(['ssh-keygen', '-lf', f.name],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ result = cmd.communicate()
+ if cmd.returncode:
+ raise RuntimeError('ssh-keygen returned %d: %s' %
+ (cmd.returncode, result[1]))
+ return result[0].decode('ascii').split()[1]
+
+
+def update_known_hosts(known_hosts_line, remove_hostname=None):
+ if remove_hostname and os.path.exists(KNOWN_HOSTS_PATH):
+ subprocess.check_call(['ssh-keygen', '-R', remove_hostname])
+ with open(KNOWN_HOSTS_PATH, 'ab') as f:
+ f.write(known_hosts_line)
+
+
+def import_(
+ fingerprint_lines, hostname, ssh_proxy=None, remove_old_entry=True,
+ check_only=False):
+ known_hosts_line = get_known_hosts(hostname, ssh_proxy)
+ known_hosts_fingerprint = get_known_hosts_fingerprint(known_hosts_line)
+ if fingerprint_appears_in_lines(
+ known_hosts_fingerprint, fingerprint_lines):
+ if not check_only:
+ remove_hostname = hostname if remove_old_entry else None
+ update_known_hosts(
+ known_hosts_line, remove_hostname=remove_hostname)
+ return True
+ else:
+ print(
+ "Fingerprint mismatch! known_hosts not updated.", file=sys.stderr)
+ return False
+
+
+def main_grep(args):
+ console_output = sys.stdin.read()
+ print(*extract_console_fingerprint(console_output), sep="\n")
+
+
+def main_import(args):
+ fingerprint_lines = sys.stdin.read().splitlines()
+ sys.exit(not import_(
+ fingerprint_lines=fingerprint_lines,
+ hostname=args.hostname,
+ ssh_proxy=args.ssh_proxy,
+ remove_old_entry=args.remove_old_entry,
+ check_only=args.check_only
+ ))
+
+
+def main_fix(args):
+ console_output = sys.stdin.read()
+ fingerprint_lines = extract_console_fingerprint(console_output)
+ sys.exit(not import_(
+ fingerprint_lines=fingerprint_lines,
+ hostname=args.hostname,
+ ssh_proxy=args.ssh_proxy
+ ))
+
+
+def main(args):
+ parser = argparse.ArgumentParser()
+ subparsers = parser.add_subparsers()
+ grep_subparser = subparsers.add_parser('grep')
+ grep_subparser.set_defaults(func=main_grep)
+ import_subparser = subparsers.add_parser('import')
+ import_subparser.set_defaults(func=main_import)
+ import_subparser.add_argument('--ssh-proxy')
+ import_subparser.add_argument(
+ '--remove-old-entry', action='store_true', default=True)
+ import_subparser.add_argument(
+ '--no-remove-old-entry', action='store_false', dest='remove_old_entry')
+ import_subparser.add_argument('--check-only', action='store_true')
+ import_subparser.add_argument('hostname')
+ fix_subparser = subparsers.add_parser('fix')
+ fix_subparser.set_defaults(func=main_fix)
+ fix_subparser.add_argument('--ssh-proxy')
+ fix_subparser.add_argument('hostname')
+ args = parser.parse_args(args)
+ args.func(args)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
=== added file 'bin/cloud-localds'
--- bin/cloud-localds 1970-01-01 00:00:00 +0000
+++ bin/cloud-localds 2012-12-17 12:41:23 +0000
@@ -0,0 +1,153 @@
+#!/bin/bash
+
+VERBOSITY=0
+TEMP_D=""
+DEF_DISK_FORMAT="raw"
+DEF_FILESYSTEM="iso9660"
+
+error() { echo "$@" 1>&2; }
+errorp() { printf "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+failp() { [ $# -eq 0 ] || errorp "$@"; exit 1; }
+
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [ options ] output user-data [meta-data]
+
+ Create a disk for cloud-init to utilize nocloud
+
+ options:
+ -h | --help show usage
+ -d | --disk-format D disk format to output. default: raw
+ -f | --filesystem F filesystem format (vfat or iso), default: iso9660
+
+ -i | --interfaces F write network interfaces file into metadata
+ -m | --dsmode M add 'dsmode' ('local' or 'net') to the metadata
+ default in cloud-init is 'net', meaning network is
+ required.
+
+ Example:
+ * cat my-user-data
+ #cloud-config
+ password: passw0rd
+ chpasswd: { expire: False }
+ ssh_pwauth: True
+ * echo "instance-id: \$(uuidgen || echo i-abcdefg)" > my-meta-data
+ * ${0##*/} my-seed.img my-user-data my-meta-data
+ * kvm -net nic -net user,hostfwd=tcp::2222-:22 \\
+ -drive file=disk1.img,if=virtio -drive file=my-seed.img,if=virtio
+ * ssh -p 2222 ubuntu@localhost
+EOF
+}
+
+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
+cleanup() {
+ [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
+}
+
+debug() {
+ local level=${1}; shift;
+ [ "${level}" -gt "${VERBOSITY}" ] && return
+ error "${@}"
+}
+
+short_opts="hi:d:f:m:o:v"
+long_opts="disk-format:,dsmode:,filesystem:,help,interfaces:,output:,verbose"
+getopt_out=$(getopt --name "${0##*/}" \
+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+ eval set -- "${getopt_out}" ||
+ bad_Usage
+
+## <<insert default variables here>>
+output=""
+userdata=""
+metadata=""
+filesystem=$DEF_FILESYSTEM
+diskformat=$DEF_DISK_FORMAT
+interfaces=_unset
+dsmode=""
+
+
+while [ $# -ne 0 ]; do
+ cur=${1}; next=${2};
+ case "$cur" in
+ -h|--help) Usage ; exit 0;;
+ -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
+ -d|--disk-format) diskformat=$next; shift;;
+ -f|--filesystem) filesystem=$next; shift;;
+ -m|--dsmode) dsmode=$next; shift;;
+ -i|--interfaces) interfaces=$next; shift;;
+ --) shift; break;;
+ esac
+ shift;
+done
+
+## check arguments here
+## how many args do you expect?
+[ $# -ge 2 ] || bad_Usage "must provide output, userdata"
+[ $# -le 3 ] || bad_Usage "confused by additional args"
+
+output=$1
+userdata=$2
+metadata=$3
+
+[ -n "$metadata" -a "${interfaces}" != "_unset" ] &&
+ fail "metadata and --interfaces are incompatible"
+[ -n "$metadata" -a -n "$dsmode" ] &&
+ fail "metadata and dsmode are incompatible"
+[ "$interfaces" = "_unset" -o -r "$interfaces" ] ||
+ fail "$interfaces: not a readable file"
+
+TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
+ fail "failed to make tempdir"
+trap cleanup EXIT
+
+if [ -n "$metadata" ]; then
+ cp "$metadata" "$TEMP_D/meta-data" || fail "$metadata: failed to copy"
+else
+ iface_data=""
+ dsmode_data=""
+ [ "$interfaces" != "_unset" ] &&
+ iface_data=$(sed ':a;N;$!ba;s/\n/\\n/g' "$interfaces") &&
+ iface_data="\"interfaces\": '$iface_data'"
+ [ -n "$dsmode" ] && dsmode_data="\"dsmode\": \"$dsmode\""
+
+ # write json formatted user-data (json is a subset of yaml)
+ printf "{\n%s\n%s\n%s\n}" "\"instance-id\": \"iid-local01\"" \
+ "${iface_data}" "${dsmode_data}" > "${TEMP_D}/meta-data"
+fi
+
+if [ "$userdata" = "-" ]; then
+ cat > "$TEMP_D/user-data" || fail "failed to read from stdin"
+else
+ cp "$userdata" "$TEMP_D/user-data" || fail "$userdata: failed to copy"
+fi
+
+## alternatively, create a vfat filesystem with same files
+img="$TEMP_D/seed.img"
+truncate --size 100K "$img" || fail "failed truncate image"
+
+case "$filesystem" in
+ iso9660|iso)
+ genisoimage -output "$img" -volid cidata \
+ -joliet -rock "$TEMP_D/user-data" "$TEMP_D/meta-data" \
+ > "$TEMP_D/err" 2>&1 ||
+ { cat "$TEMP_D/err" 1>&2; fail "failed to genisoimage"; }
+ ;;
+ vfat)
+ mkfs.vfat -n cidata "$img" || fail "failed mkfs.vfat"
+ mcopy -oi "$img" "$TEMP_D/user-data" "$TEMP_D/meta-data" :: ||
+ fail "failed to copy user-data, meta-data to img"
+ ;;
+ *) fail "unknown filesystem $filesystem";;
+esac
+
+[ "$output" = "-" ] && output="$TEMP_D/final"
+qemu-img convert -f raw -O "$diskformat" "$img" "$output" ||
+ fail "failed to convert to disk format $diskformat"
+
+[ "$output" != "$TEMP_D/final" ] || { cat "$output" && output="-"; } ||
+ fail "failed to write to -"
+
+error "wrote ${output} with filesystem=$filesystem and diskformat=$diskformat"
+# vi: ts=4 noexpandtab
=== added file 'bin/cloud-publish-image'
--- bin/cloud-publish-image 1970-01-01 00:00:00 +0000
+++ bin/cloud-publish-image 2012-12-17 12:41:23 +0000
@@ -0,0 +1,542 @@
+#!/bin/bash
+# This script uses bash arrays; do not switch to /bin/sh
+#
+# cloud-publish-image - wrapper for cloud image publishing
+#
+# Copyright (C) 2010 Canonical Ltd.
+#
+# Authors: Scott Moser <smoser@xxxxxxxxxxxxx>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, version 3 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+EC2PRE=${EC2PRE:-euca-}
+TMPD=""
+RENAME_D=""
+VERBOSITY=0
+IMAGE_TYPES=( auto image kernel ramdisk vmlinuz initrd )
+
+error() { echo "$@" 1>&2; }
+errorp() { printf "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+failp() { [ $# -eq 0 ] || errorp "$@"; exit 1; }
+
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [ options ] arch image bucket
+
+ arch : one of i386 or x86_64
+ image : the image to upload and register
+ bucket : bucket to publish image to
+
+ options:
+ -l|--add-launch ID : ID can be "all", or "none", or numerical ID
+ --dry-run : only report what would be done
+ --allow-existing : if a image is already registered
+ simply report as if work was done
+ --hook-img EXE : invoke executable 'EXE' with full path to
+ downloaded disk image file
+ -o|--output <file> : write registered id and manifest to file
+ |--rename <publish_path> : publish to bucket/<publish_path>
+ default: bucket/<basename(image)>
+ -t|--type <type> : type is one of kernel/ramdisk/image
+ -v|--verbose : increase verbosity
+ --name <name> : register with '--name'.
+ default: publish_path
+
+ --save-downloaded : if the image is a url, save it to '.'
+
+ if type is 'image', then:
+ -k | --kernel k : use previously registered kernel with id 'k'
+ specify 'none' for no kernel
+ -K | --kernel-file f : bundle, upload, use file 'f' as kernel
+ -r | --ramdisk r : use previously registered ramdisk with id 'r'
+ specify 'none' for no ramdisk
+ -R | --ramdisk-file f : bundle, upload, use file 'f' as ramdisk
+ -B | --block-device-mapping m : specify block device mapping in bundle
+EOF
+}
+
+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
+cleanup() {
+ local x=""
+ for x in "${RENAME_D}" "${TMPD}"; do
+ [ -z "${x}" -o ! -d "${x}" ] || rm -Rf "${x}"
+ done
+ return 0
+}
+
+debug() {
+ local level=${1}
+ shift;
+ [ "${level}" -ge "${VERBOSITY}" ] && return
+ error "$(date):" "${@}"
+}
+run() {
+ local dir="${1}" pre=${2} msg=${3};
+ shift 3;
+ [ -e "${dir}/stamp.${pre}" ] &&
+ { debug 1 "skipping ${pre}"; return 0; }
+ debug 1 "${msg}"
+ echo "$@" > "${dir}/${pre}.cmd"
+ "$@" > "${dir}/${pre}.stdout" 2> "${dir}/${pre}.stderr" &&
+ : > "${dir}/stamp.${pre}" && return 0
+ local ret=$?
+ echo "failed: ${*}"
+ cat "${dir}/${pre}.stdout"
+ cat "${dir}/${pre}.stderr" 1>&2
+ return ${ret}
+}
+
+search_args() {
+ local x="" i=0 needle="$1"
+ shift;
+ for x in "${@}"; do
+ [ "${needle}" = "${x}" ] && { _RET=$i; return 0; }
+ i=$(($i+1))
+ done
+ return 1
+}
+
+checkstatus() {
+ local x="" i=0
+ for x in "$@"; do
+ [ "$x" = "0" ] || i=$(($i+1))
+ done
+ return $i
+}
+
+get_manifest_id() {
+ local tmpf="" out="" ret=1 m1="${1}" m2="${2}"
+ out=$(${EC2PRE}describe-images -o self |
+ awk '$3 ~ m1 || $3 ~ m2 { printf("%s\t%s\n",$2,$3); }' \
+ "m1=$m1" "m2=${m2:-^$}"
+ checkstatus ${PIPESTATUS[@]}) || return 1
+ _RET=${out}
+ return
+}
+get_image_type() {
+ local image=${1} file_out="" img_type=""
+ file_out=$(file --uncompress "${image}") || return 1;
+ case "${file_out}" in
+ *[lL]inux\ kernel*) img_type="kernel";;
+ *LSB\ executable*gzip*) img_type="kernel";;
+ *cpio\ archive*) img_type="ramdisk";;
+ *ext[234]\ file*|*boot\ sector*) img_type="image";;
+ *) error "unable to determine image type. pass --type"; return 1;;
+ esac
+ _RET=${img_type}
+ return 0
+}
+
+upload_register() {
+ local out=""
+ out=$(cloud-publish-image "${@}") || return
+ set -- ${out}
+ _RET=${1}
+}
+
+dl() {
+ # dl url, target, quiet
+ local url=${1} target=${2} quiet=${3:-1}
+ if [ -f "${url}" ]; then
+ [ "${target}" = "-" ] && { cat "$url"; return; }
+ cp "$url" "$target"
+ return
+ fi
+ local qflag="-q"
+ [ "$quiet" = "0" ] && qflag=""
+
+ wget $qflag --progress=dot:mega "$url" -O "$target" ||
+ return 1
+}
+
+dl_input_image() {
+ # this downloads an image if necessary and sets _RET to location of image
+ local input="$1" save_dir="${2:-.}" ret="" quiet=1
+ [ $VERBOSITY -ge 2 ] && quiet=0
+ case "$input" in
+ file://*)
+ ret="$save_dir/${input##*/}"
+ dl "${input#file://}" "$ret" $quiet || return $?;;
+ http://*|ftp://*|https://*)
+ ret="$save_dir/${input##*/}"
+ dl "$input" "$ret" $quiet || return $?
+ ;;
+ *) ret="$input";;
+ esac
+ _RET="$ret"
+}
+
+
+[ "${CLOUD_UTILS_WARN_UEC:-0}" = "0" ] && _n="${0##*/}" &&
+ [ "${_n#uec}" != "${_n}" ] && export CLOUD_UTILS_WARN_UEC=1 &&
+ error "WARNING: '${0##*/}' is now to 'cloud${_n#uec}'. Please update your tools or docs"
+
+short_opts="B:h:k:K:l:no:r:R:t:vw:"
+long_opts="add-launch:,allow-existing,block-device-mapping:,dry-run,help,hook-img:,kernel:,kernel-file:,name:,output:,image-to-raw,ramdisk:,ramdisk-file:,rename:,save-downloaded,type:,verbose,working-dir:"
+getopt_out=$(getopt --name "${0##*/}" \
+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+ eval set -- "${getopt_out}" ||
+ bad_Usage
+
+add_acl=""
+allow_existing=0
+arch=""
+bucket=""
+dry_run=0
+image=""
+img_type="image"
+kernel=""
+kernel_file=""
+output=""
+ramdisk=""
+ramdisk_file=""
+rename=""
+save_dl=0
+name=__unset__
+wdir_in=""
+dev_mapping=""
+image2raw=0
+raw_image=""
+hook_img=""
+
+while [ $# -ne 0 ]; do
+ cur=${1}; next=${2};
+ case "$cur" in
+ -d|--working-dir) wdir_in=${next}; shift;;
+ -h|--help) Usage; exit 0;;
+ --hook-img)
+ [ -z "${hook_img}" ] || bad_Usage "only one --hook-img supported";
+ [ -x "$next" ] || bad_Usage "--hook-img is not executable"
+ hook_img=$(readlink -f "$next") ||
+ bad_Usage "could not find full path to $next"
+ hook_img="$next"
+ shift;;
+ -B|--block-device-mapping) dev_mapping=${next}; shift;;
+ -k|--kernel) kernel=${next}; shift;;
+ -K|--kernel-file) kernel_file=${next}; shift;;
+ -l|--add-launch)
+ if [ "${next}" = "none" ]; then
+ add_acl=""
+ else
+ user=${next//-/}; # just be nice and remove '-'
+ add_acl="${add_acl:+${add_acl} }${user}";
+ fi
+ shift;;
+ --name) name=${next}; shift;;
+ -o|--output) output="${next}"; shift;;
+ --image-to-raw) image2raw=1;;
+ -r|--ramdisk) ramdisk=${next}; shift;;
+ -R|--ramdisk-file) ramdisk_file=${next}; shift;;
+ -n|--dry-run) dry_run=1;;
+ --rename) rename=${next}; shift;;
+ --save-downloaded) save_dl=1;;
+ -t|--type)
+ img_type=${next};
+ search_args "${img_type}" "${IMAGE_TYPES[@]}" ||
+ bad_Usage "image type (${next}) not in ${IMAGE_TYPES[*]}"
+ shift;;
+ -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
+ --allow-existing) allow_existing=1;;
+ --) shift; break;;
+ -*) bad_Usage "confused by ${cur}";;
+ esac
+ shift;
+done
+
+[ $# -lt 3 ] && bad_Usage "must provide arch, image, bucket"
+[ $# -gt 3 ] && bad_Usage "unexpected arguments: ${4}"
+arch="${1}"
+image="${2}"
+bucket="${3}"
+
+# remove any trailing slashes on bucket
+while [ "${bucket%/}" != "${bucket}" ]; do bucket=${bucket%/}; done
+
+[ "${arch}" = "amd64" ] && arch=x86_64
+
+[ "${img_type}" = "vmlinuz" ] && img_type="kernel"
+[ "${img_type}" = "initrd" ] && img_type="ramdisk"
+
+[ -n "${kernel_file}" -a -n "${kernel}" ] &&
+ bad_Usage "--kernel-file is incompatible with --kernel"
+[ -n "${ramdisk_file}" -a -n "${ramdisk}" ] &&
+ bad_Usage "--ramdisk-file is incompatible with --ramdisk"
+
+if [ -n "${wdir_in}" ]; then
+ [ -d "${wdir_in}" ] || fail "input working directory not a directory";
+ wdir=$(readlink -f "${wdir_in}") ||
+ fail "failed to realize ${wdir_in}"
+else
+ TMPD=$(mktemp -d ${TMPDIR:-/tmp}/${0##*/}.XXXXXX) ||
+ fail "failed to make tmpdir"
+ wdir="${TMPD}"
+fi
+trap cleanup EXIT
+
+if [ -n "$kernel" -a "$kernel" != "none" ]; then
+ aki_arch=""; ari_arch="";
+ # if kernel is given, check that its arch matches the register arch
+ aki_arch=""; ari_arch="";
+
+ [ "$ramdisk" = "none" ] && _ramdisk="" || _ramdisk="$ramdisk"
+
+ ${EC2PRE}describe-images "$kernel" $_ramdisk > "${TMPD}/kernel.info" ||
+ fail "failed to describe kernel ${kernel}"
+ aki_arch=$(awk '-F\t' '$1 == "IMAGE" && $2 == id { print $8 }' \
+ "id=$kernel" "$TMPD/kernel.info") && [ -n "$aki_arch" ] ||
+ fail "failed to get arch of $kernel"
+ if [ -n "$ramdisk" -a "$ramdisk" != "none" ]; then
+ ari_arch=$(awk '-F\t' '$1 == "IMAGE" && $2 == id { print $8 }' \
+ "id=$ramdisk" "$TMPD/kernel.info") && [ -n "$ari_arch" ] ||
+ fail "failed to get arch of $ramdisk"
+ fi
+
+ # if kernel and ramdisk are given, and arch=i386 kernel/ramdisk=x86_64,
+ # then assume loader kernel.
+ case "$arch:$aki_arch:$ari_arch" in
+ $arch:$arch:$arch|$arch:$arch:) : ;;
+ i386:x86_64:x86_64)
+ error "WARNING: assuming loader kernel ($kernel/$ramdisk arch=$aki_arch, provided arch=$arch)"
+ arch="x86_64";;
+ *) fail "arch $arch != kernel/ramdisk arch [$aki_arch/$ari_arch]";;
+ esac
+fi
+
+save_dir="${wdir}"
+[ $save_dl -eq 1 ] && save_dir=.
+
+dl_input_image "$image" "$save_dir" && image="$_RET" ||
+ fail "failed to download image $image to $save_dir"
+
+[ -z "$kernel_file" ] ||
+ { dl_input_image "$kernel_file" "$save_dir" && kernel_file="$_RET"; } ||
+ fail "failed to download kernel $kernel_file to $save_dir"
+
+[ -z "$ramdisk_file" ] ||
+ { dl_input_image "$ramdisk_file" "$save_dir" && ramdisk_file="$_RET"; } ||
+ fail "failed to download ramdisk $ramdisk_file to $save_dir"
+
+[ -f "${image}" ] || bad_Usage "${image}: image is not a file"
+
+[ -z "${kernel_file}" -o -f "${kernel_file}" ] ||
+ fail "${kernel_file} is not a file"
+[ -z "${ramdisk_file}" -o -f "${ramdisk_file}" ] ||
+ fail "${ramdisk_file} is not a file"
+
+if [ "${img_type}" = "auto" ]; then
+ get_image_type "${image}" ||
+ fail "failed to determine file type of ${image}"
+ img_type=${_RET}
+fi
+
+[ -n "${dev_mapping}" -a "${img_type}" != "image" ] &&
+ fail "-B/--block-device-mapping can only be specified for --type=image"
+
+[ -n "${rename}" ] || rename=${image##*/}
+
+if [ "${name}" = "__unset__" ]; then
+
+ # if user did not pass --name, try to figure out if register supports it
+ # we unfortunately can't assume that '--help' exits 0
+ ${EC2PRE}register --help > "${TMPD}/register-help.out" 2>&1
+ if grep -q -- "--name" "${TMPD}/register-help.out"; then
+ name="${bucket}/${rename}"
+ debug 1 "using ${name} for --name"
+ else
+ debug 1 "${EC2PRE}register seems not to support --name, not passing"
+ name=""
+ fi
+
+elif [ -z "${name}" -o "${name}" == "none" ]; then
+ # if user passed in '--name=""' or '--name=none", do not pass --name
+ name=""
+fi
+
+image_full=$(readlink -f "${image}") ||
+ fail "failed to get full path to ${image}"
+
+if [ -e "${wdir}/${rename}" ]; then
+ [ "${wdir}/${rename}" -ef "${image}" ] ||
+ fail "${wdir} already contains file named ${rename}"
+fi
+
+# bundle-kernel doesn't like for file to exist in destination-dir
+# so, create it one dir under there
+RENAME_D=$(mktemp -d "${wdir}/.rename.XXXXXX") &&
+ ln -s "${image_full}" "${RENAME_D}/${rename}" &&
+ rename_full="${RENAME_D}/${rename}" ||
+ fail "link failed: working-dir/rename/${rename} -> ${image_full}"
+
+reg_id=""
+
+manifest="${rename}.manifest.xml"
+
+# set up "pass through" args to go through to kernel/ramdisk publishing
+pthr=( )
+[ $VERBOSITY -eq 0 ] || pthr[${#pthr[@]}]="--verbose"
+[ ${allow_existing} -eq 0 ] || pthr[${#pthr[@]}]="--allow-existing"
+[ -z "${add_acl}" ] ||
+ { pthr[${#pthr[@]}]="--add-launch"; pthr[${#pthr[@]}]="${add_acl}"; }
+[ ${dry_run} -eq 0 ] || pthr[${#pthr[@]}]="--dry-run"
+
+if [ -n "${kernel_file}" ]; then
+ debug 1 "publishing kernel ${kernel_file}"
+ upload_register --type kernel "${pthr[@]}" \
+ "${arch}" "${kernel_file}" "${bucket}" ||
+ fail "failed to register ${kernel_file}"
+ kernel=${_RET}
+ debug 1 "kernel registered as ${kernel}"
+fi
+
+if [ -n "${ramdisk_file}" ]; then
+ debug 1 "publishing ramdisk ${ramdisk_file}"
+ upload_register --type ramdisk "${pthr[@]}" \
+ "${arch}" "${ramdisk_file}" "${bucket}" ||
+ fail "failed to register ${ramdisk_file}"
+ ramdisk=${_RET}
+ debug 1 "ramdisk registered as ${ramdisk}"
+fi
+
+if [ ${VERBOSITY} -ge 1 -o ${dry_run} -ne 0 ]; then
+ [ -n "${kernel}" ] && krd_fmt=" %s/%s" &&
+ krd_args=( "${kernel}" "${ramdisk:-none}" )
+ errorp "[%-6s] %s => %s/%s ${krd_fmt}\n" "${img_type}" \
+ "${image##*/}" "${bucket}" "${rename}" "${krd_args[@]}"
+ if [ ${dry_run} -ne 0 ]; then
+ case "${img_type}" in
+ kernel) pre="eki";;
+ ramdisk) pre="eri";;
+ image) pre="emi";;
+ esac
+ printf "%s\t%s\n" "${pre}-xxxxxxxx" "${bucket}/${rename##*/}"
+ exit
+ fi
+fi
+
+krd_args=( );
+[ -n "${kernel}" -a "${kernel}" != "none" ] &&
+ krd_args=( "${krd_args[@]}" "--kernel" "${kernel}" )
+[ -n "${ramdisk}" -a "${ramdisk}" != "none" ] &&
+ krd_args=( "${krd_args[@]}" "--ramdisk" "${ramdisk}" )
+
+if [ "${EC2PRE%ec2-}" != "${EC2PRE}" ]; then
+ req="EC2_CERT EC2_PRIVATE_KEY EC2_USER_ID EC2_ACCESS_KEY EC2_SECRET_KEY"
+ for env_name in ${req}; do
+ [ -n "${!env_name}" ] ||
+ fail "when using ec2- tools, you must set env: ${req}"
+ done
+ ex_bundle_args=( --cert "${EC2_CERT}"
+ --privatekey "${EC2_PRIVATE_KEY}"
+ --user "${EC2_USER_ID}" )
+ ex_upload_args=( --access-key "${EC2_ACCESS_KEY}"
+ --secret-key "${EC2_SECRET_KEY}" )
+
+fi
+
+debug 1 "checking for existing registered image at ${bucket}/${manifest}"
+get_manifest_id "^${bucket}/${manifest}" "/$name$" ||
+ fail "failed to check for existing manifest"
+if [ -n "${_RET}" ]; then
+ set -- ${_RET}
+ img_id=${1}; path=${2}
+ [ ${allow_existing} -eq 1 ] ||
+ fail "${path} already registered as ${img_id}"
+ debug 1 "using existing ${img_id} for ${bucket}/${manifest}"
+else
+ if [ $image2raw -eq 1 -a "$img_type" = "image" ]; then
+ # this is really here because of LP: #836759
+ # but could be useful elsewhere
+ qemu-img info "$image" > "${TMPD}/disk-info.out" ||
+ fail "failed to qemu-img info $image"
+ imgfmt=$(awk '-F:' '$1 == "file format" { sub(/ /,"",$2); print $2 }' \
+ "${TMPD}/disk-info.out")
+ if [ "$imgfmt" != "raw" ]; then
+ debug 1 "converting image to raw"
+ raw_image="${TMPD}/image.raw"
+ qemu-img convert -O raw "$image" "$raw_image" ||
+ fail "failed to convert image to raw"
+ image="$raw_image"
+ ln -sf "$raw_image" "$rename_full" ||
+ fail "symlink to raw image $raw_image failed"
+ else
+ debug 1 "disk is already raw format, not converting"
+ fi
+ fi
+
+ if [ -n "$hook_img" ]; then
+ debug 1 "image hook: $hook_img $rename_full"
+ "$hook_img" "$rename_full" ||
+ fail "image hook failed: $hook_img ${rename_full} failed"
+ fi
+
+ bundle_args=( "--image" "${rename_full}" )
+ [ -n "${dev_mapping}" ] &&
+ bundle_args[${#bundle_args[@]}]="--block-device-mapping=${dev_mapping}"
+
+ case "${img_type}" in
+ kernel|ramdisk)
+ bundle_args[${#bundle_args[@]}]="--${img_type}"
+ bundle_args[${#bundle_args[@]}]="true"
+ esac
+ run "${wdir}" "bundle" "bundling ${img_type} ${image}" \
+ ${EC2PRE}bundle-image --destination "${wdir}" --arch "${arch}" \
+ "${ex_bundle_args[@]}" \
+ "${bundle_args[@]}" "${krd_args[@]}" ||
+ fail "failed to bundle ${img_type} ${image}"
+
+ run "${wdir}" "upload" "upload ${bucket}/${manifest}" \
+ ${EC2PRE}upload-bundle --bucket "${bucket}" \
+ "${ex_upload_args[@]}" \
+ --manifest "${wdir}/${manifest}" ||
+ fail "failed to upload bundle to ${bucket}/${manifest}"
+
+ junk="" img_id="";
+ run "${wdir}" "register" "register ${bucket}/${manifest}" \
+ ${EC2PRE}register ${name:+--name "${name}"} \
+ "${ex_register_args[@]}" "${bucket}/${manifest}" &&
+ read junk img_id < "${wdir}/register.stdout" &&
+ [ "${img_id#???-}" != "${img_id}" ] || {
+ if bad=$(get_manifest_id "${bucket}/${manifest}" "/${name}") &&
+ [ -n "${bad}" ]; then
+ set -- ${bad}
+ bad_id=${1}
+ error "un-registering invalid $bad" >/dev/null
+ ${EC2PRE}deregister "${bad_id}"
+ fi
+ fail "failed to register ${manifest}"
+ }
+
+ debug 1 "registered at ${bucket}/${manifest} as ${img_id}"
+
+fi
+debug 1 "${img_id} ${bucket}/${manifest}"
+
+if [ -z "${output}" -o "${output}" = "-" ]; then
+ printf "%s\t%s\n" "${img_id}" "${bucket}/${manifest}"
+else
+ printf "%s\t%s\n" "${img_id}" "${bucket}/${manifest}" >> "${output}"
+fi
+
+for user in ${add_acl}; do
+ run "${wdir}" "add_user.${user}" \
+ "add ${user} to ${manifest}" \
+ ${EC2PRE}modify-image-attribute \
+ --launch-permission --add "${user}" "${img_id}" ||
+ fail "failed to add launch permission for ${user} to ${img_id}"
+done
+
+exit 0
+
+# vi: ts=4 noexpandtab
=== added file 'bin/cloud-publish-tarball'
--- bin/cloud-publish-tarball 1970-01-01 00:00:00 +0000
+++ bin/cloud-publish-tarball 2012-12-17 12:41:23 +0000
@@ -0,0 +1,289 @@
+#!/bin/sh
+#
+# cloud-publish-tarball - wrapper for publishing cloud tarballs
+#
+# Copyright (C) 2010 Canonical Ltd.
+#
+# Authors: Scott Moser <smoser@xxxxxxxxxxxxx>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, version 3 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+EC2PRE=${EC2PRE:-euca-}
+TMPD=""
+VERBOSITY=1
+error() { echo "$@" 1>&2; }
+debug() {
+ [ ${VERBOSITY} -ge $1 ] || return 0;
+ shift
+ error "$@"
+}
+log() { debug "$1" "$(date): ====== $2 ======" ; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+cleanup() {
+ [ -n "${TMPD}" -a -d "${TMPD}" ] || return 0;
+ debug 2 "cleaning up ${TMPD}"
+ rm -Rf "${TMPD}";
+}
+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [ options ] tarfile bucket [arch]
+
+ register a UEC tarball (ie, per http://cloud-images.ubuntu.com)
+ if arch is not provided, a name-based attempt is made to guess
+ tarfile may be a url
+
+ options:
+ --hook-img e invoke 'e' with full path to downloaded disk image
+ -k | --kernel k use previously registered kernel with id 'k'
+ specify 'none' for no kernel
+ -K | --kernel-file f bundle, upload, use file 'f' as kernel
+ --rename-kernel k rename kernel to 'k' (applies to loader)
+ -r | --ramdisk r use previously registered ramdisk with id 'r'
+ specify 'none' for no ramdisk
+ -R | --ramdisk-file f bundle, upload, use file 'f' as ramdisk
+ --rename-ramdisk r rename ramdisk to 'r'
+ --resize s resize the partition image before uploading
+ 's' must be valid input to cloud-resize-image
+ --save-downloaded if the image is a url, save it to '.'
+ -q | --quiet be quiet, only output published ids
+ -l | --use-loader use the loader kernel rather than linux kernel
+ --rename-image f rename image to 'f' before publishing
+
+ Example:
+ - ${0##*/} lucid-cloud-i386.tar.gz my-lucid-bucket i386
+EOF
+}
+
+upload_register() {
+ local out="" ret=0
+ out=$(cloud-publish-image "${@}") || {
+ ret=$?
+ printf "%s" "${out}"
+ return $ret
+ }
+ set -- ${out}
+ _RET=${1}
+}
+
+dl() {
+ # dl url, target, quiet
+ local url=${1} target=${2} quiet=${3:-1}
+ if [ -f "${url}" ]; then
+ [ "${target}" = "-" ] && { cat "$url"; return; }
+ cp "$url" "$target"
+ return
+ fi
+ local qflag="-q"
+ [ "$quiet" = "0" ] && qflag=""
+
+ wget $qflag --progress=dot:mega "$url" -O "$target" ||
+ return 1
+}
+
+dl_input_image() {
+ # this downloads an image if necessary and sets _RET to location of image
+ local input="$1" save_dir="${2:-.}" ret="" quiet=0
+ [ $VERBOSITY -eq 0 ] && quiet=1 # this differs from cloud-publish-image
+ case "$input" in
+ file://*)
+ ret="$save_dir/${input##*/}"
+ dl "${input#file://}" "$ret" $quiet || return $?;;
+ http://*|ftp://*|https://*)
+ ret="$save_dir/${input##*/}"
+ dl "$input" "$ret" $quiet || return $?
+ ;;
+ *) ret="$input";;
+ esac
+ _RET="$ret"
+}
+
+[ "${CLOUD_UTILS_WARN_UEC:-0}" = "0" ] && _n="${0##*/}" && [ "${_n#uec}" != "${_n}" ] &&
+ error "WARNING: '${0##*/}' is now to 'cloud${_n#uec}'. Please update your tools or docs" &&
+ export CLOUD_UTILS_WARN_UEC=1
+
+short_opts="hlk:K:qr:R:"
+long_opts="help,hook-img:,kernel:,kernel-file:,quiet,use-loader,ramdisk:,ramdisk-file:,rename-image:,rename-kernel:,rename-ramdisk:,resize:,save-downloaded"
+getopt_out=$(getopt --name "${0##*/}" --shell sh \
+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+ eval set -- "${getopt_out}" ||
+ bad_Usage
+
+ramdisk=""
+kernel=""
+loader=""
+eki=""
+eri=""
+image=""
+emi=""
+resize=""
+use_loader=0
+rename_image=""
+rename_kernel=""
+rename_ramdisk=""
+save_dl=0
+hook_img=""
+
+while [ $# -ne 0 ]; do
+ cur=${1}; next=${2};
+ case "$cur" in
+ --) shift; break;;
+ -h|--help) Usage; exit 0;;
+ --hook-img)
+ [ -z "${hook_img}" ] || bad_Usage "only one --hook-img supported";
+ [ -x "$next" ] || bad_Usage "--hook-img is not executable"
+ hook_img=$(readlink -f "$next") ||
+ bad_Usage "could not find full path to $next"
+ hook_img="$next"
+ shift;;
+ -k|--kernel) eki=${next}; shift;;
+ -K|--kernel-file)
+ [ -f "${next}" ] && kernel=$(readlink -f "${next}") ||
+ fail "failed to get path to ${next}"
+ shift;;
+ -q|--quiet) VERBOSITY=0;;
+ -r|--ramdisk) eri=${next}; shift;;
+ -R|--ramdisk-file)
+ [ -f "${next}" ] && ramdisk=$(readlink -f "${next}") ||
+ fail "failed to get path to ${next}"
+ shift;;
+ --rename-image) rename_image=${next}; shift;;
+ --rename-kernel) rename_kernel=${next}; shift;;
+ --rename-ramdisk) rename_ramdisk=${next}; shift;;
+ --save-downloaded) save_dl=1;;
+ --use-loader) use_loader=1;;
+ --resize) resize=${next}; shift;;
+ esac
+ shift;
+done
+
+tarball=${1}
+bucket=${2}
+arch=${3}
+
+[ $# -eq 3 -o $# -eq 2 ] || bad_Usage
+
+[ -n "${eki}" -a ${use_loader} -ne 0 ] &&
+ bad_Usage "--use-loader is incompatible with --kernel"
+
+if [ -z "${arch}" ]; then
+ case "${tarball}" in
+ *i386*) arch=i386;;
+ *amd64*|*x86_64*) arch=amd64;;
+ *) fail "unable to guess arch by tarball name. give 3rd arg";;
+ esac
+fi
+
+[ "$arch" = "amd64" ] && iarch="x86_64" || iarch="${arch}"
+
+# before extracting the tarball, try to verify that the environment
+# is set up, by invoking another euca command (LP: #526504)
+${EC2PRE}describe-images >/dev/null ||
+ fail "Unable to run ${EC2PRE}-describe-images. Is environment for ${EC2PRE} set up?"
+
+utmp=${TEMPDIR:-${TMPDIR:-/tmp}}
+TMPD=$(mktemp -d "${utmp}/${0##*/}.XXXXXX") || fail "failed make temp"
+trap cleanup EXIT
+
+save_dir="${TMPD}"
+[ $save_dl -eq 1 ] && save_dir=$PWD
+
+dl_input_image "$tarball" "$save_dir" && tarball="$_RET" ||
+ fail "failed to download image $image to $save_dir"
+
+[ -f "${tarball}" ] && tbf=$(readlink -f "${tarball}") ||
+ fail "bad tarball: ${tarball}";
+
+start=$PWD
+
+cd "${TMPD}"
+
+log 1 "extracting image"
+tar -S -xvzf "${tbf}" >list.out || fail "failed extract ${tarball}";
+
+while read x; do
+ [ -f "${x}" ] || continue
+ case "$x" in
+ *vmlinuz*)
+ [ -z "${kernel}" -a -z "${eki}" ] && kernel=${x};;
+ *initrd*)
+ [ -z "${ramdisk}" -a -z "${eri}" ] && ramdisk=${x};;
+ *.img) image=${x};;
+ *-loader) [ -z "${loader}" ] && loader=${x};;
+ esac
+done < list.out
+
+[ -z "${image}" ] && fail "can't find image";
+
+[ -z "${loader}" -a ${use_loader} -eq 1 ] &&
+ fail "--use-loader specified, but no loader found in tarball"
+
+# if loader was found, and no kernel given (or found)
+# then set kernel to loader
+if [ -n "${loader}" ] &&
+ { [ ${use_loader} -eq 1 ] || [ -z "${kernel}" -a -z "${eki}" ]; } ; then
+ debug 1 "using loader ${loader##*/} as kernel"
+ kernel=${loader}
+fi
+
+[ -n "${kernel}" -o -n "${eki}" ] ||
+ bad_Usage "can't find kernel. specify '--kernel none' to register none";
+[ -n "${ramdisk}" -o -n "${eri}" ] || {
+ debug 1 "Warning: no ramdisk found, assuming '--ramdisk none'"
+ eri="none";
+}
+
+debug 1 "kernel : ${eki:-${kernel}}"
+debug 1 "ramdisk: ${eri:-${ramdisk}}"
+debug 1 "image : ${image##*/}"
+
+if [ -n "${resize}" ]; then
+ log 1 "resizing ${image##*/} to ${resize}"
+ out=$(resize-part-image "${image}" "${resize}" 2>&1) || {
+ error "${out}";
+ fail "failed to resize image file to ${resize}";
+ }
+fi
+
+if [ -n "${kernel}" ]; then
+ log 1 "bundle/upload kernel"
+ upload_register --type kernel \
+ ${rename_kernel:+"--rename=${rename_kernel}"} \
+ "${iarch}" "${kernel}" "${bucket}" ||
+ fail "failed to upload kernel"
+ eki=${_RET}
+fi
+
+if [ -n "${ramdisk}" ]; then
+ log 1 "bundle/upload ramdisk"
+ upload_register --type ramdisk \
+ ${rename_ramdisk:+"--rename=${rename_ramdisk}"} \
+ "${iarch}" "${ramdisk}" "${bucket}" ||
+ fail "failed ramdisk bundle/upload"
+ eri=${_RET}
+fi
+
+log 1 "bundle/upload image"
+upload_register --type image \
+ ${rename_image:+"--rename=${rename_image}"} \
+ ${hook_img:+"--hook-img=${hook_img}"} \
+ "${iarch}" "${image}" "${bucket}" \
+ --kernel "${eki}" --ramdisk "${eri}" ||
+ fail "failed bundle/upload/register of image"
+emi=${_RET}
+
+log 1 "done"
+printf 'emi="%s"; eri="%s"; eki="%s";\n' "${emi}" "${eri}" "${eki}"
+
+# vi: ts=4 noexpandtab
=== added file 'bin/cloud-publish-ubuntu'
--- bin/cloud-publish-ubuntu 1970-01-01 00:00:00 +0000
+++ bin/cloud-publish-ubuntu 2012-12-17 12:41:23 +0000
@@ -0,0 +1,169 @@
+#!/bin/bash
+# cloud-import-ubuntu
+
+VERBOSITY=1
+TEMP_D=""
+
+error() { echo "$@" 1>&2; }
+errorp() { printf "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+failp() { [ $# -eq 0 ] || errorp "$@"; exit 1; }
+
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [ options ] arch release bucket
+
+ import a cloud image to your local cloud.
+ Image is downloaded from Ubuntu cloud-images site and published
+ to the cloud configured.
+
+ options:
+ --save-downloaded save the downloaded image in '.'
+ -q | --quiet be quiet, only output the AMI id registered
+ --disk use the disk image (default is partition)
+ --stream stream: 'released' or 'daily' (default: released)
+
+ the following options pass through to cloud-publish-tarball
+ or cloud-publish-image:
+
+ -l|--add-launch ID : ID can be "all", or "none", or numerical ID
+ --allow-existing : if a image is already registered
+ simply report as if work was done
+ --hook-img EXE : invoke executable 'EXE' with full path to
+ downloaded disk image file
+ -o|--output <file> : write registered id and manifest to file
+
+ --save-downloaded : if the image is a url, save it to '.'
+ --image-to-raw : convert disk format to raw before publishing
+
+ -l | --use-loader : use the loader kernel rather than linux kernel
+ -k | --kernel k : use previously registered kernel with id 'k'
+ specify 'none' for no kernel
+ -K | --kernel-file f : bundle, upload, use file 'f' as kernel
+ -r | --ramdisk r : use previously registered ramdisk with id 'r'
+ specify 'none' for no ramdisk
+ -R | --ramdisk-file f : bundle, upload, use file 'f' as ramdisk
+ --resize s : resize the partition image before uploading
+ 's' must be valid input to cloud-resize-image
+ -B | --block-device-mapping m : specify block device mapping in bundle
+EOF
+}
+
+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
+cleanup() {
+ [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
+}
+
+debug() {
+ local level=${1}; shift;
+ [ "${level}" -ge "${VERBOSITY}" ] && return
+ error "${@}"
+}
+is_ptarg() {
+ local short="$1" long=",$2," cur="$3" next="$4"
+ _RET=( )
+ case "$cur" in
+ --*)
+ flag=${cur#--}
+ if [ "${long#*,${flag}:,}" != "$long" ]; then
+ add="$cur=$next"
+ _RET=( "$cur" "$next" )
+ elif [ "${long#*,${flag},}" != "$long" ]; then
+ _RET=( "$cur" )
+ fi
+ ;;
+ -*)
+ flag=${cur#-}
+ if [ "${short#*${flag}:}" != "$short" ]; then
+ _RET=( "$cur" "$next" )
+ elif [ "${short#*${flag}}" != "$short" ]; then
+ _RET=( "$cur" )
+ fi
+ ;;
+ esac
+ [ "${#_RET[@]}" -ne 0 ]
+}
+
+pt_short_opts="B:h:k:K:l:r:R:"
+pt_long_opts="add-launch:,allow-existing,block-device-mapping:,hook-img:,image-to-raw,kernel:,kernel-file:,ramdisk:,ramdisk-file:,resize:,save-downloaded,use-loader"
+
+short_opts="${pt_short_opts}hno:qv"
+long_opts="${pt_long_opts},dry-run,disk,help,output:,quiet,stream:,verbose"
+
+getopt_out=$(getopt --name "${0##*/}" \
+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+ eval set -- "${getopt_out}" ||
+ bad_Usage
+
+output=""
+dry_run=0
+save_dl=0
+arch=""
+release=""
+bucket=""
+pt_args=() # pass through arguments
+disk=0
+stream="released"
+vflags=""
+
+while [ $# -ne 0 ]; do
+ cur=${1}; next=${2};
+ case "$cur" in
+ --disk) disk=1;;
+ -h|--help) Usage ; exit 0;;
+ -o|--output) output=$next; shift;;
+ -n|--dry-run) dry_run=1;;
+ -q|--quiet) VERBOSITY=0; vflags=""; shift;;
+ --stream) stream="$next"; shift;;
+ -v|--verbose)
+ vflags="${vflags}v"
+ VERBOSITY=$((${VERBOSITY}+1));;
+ --) shift; break;;
+ -*|--*)
+ if is_ptarg "$pt_short_opts" "$pt_long_opts" "$cur" "$next"; then
+ pt_args=( "${pt_args[@]}" "${_RET[@]}" )
+ [ "${#_RET[@]}" -eq 2 ] && shift
+ else
+ fail "confused by ${cur}";
+ fi
+ ;;
+ esac
+ shift;
+done
+
+[ $# -eq 3 ] || bad_Usage "must provide arch, release, bucket"
+arch="$1"
+release="$2"
+bucket="$3"
+
+url=$(EC2_URL="" ubuntu-cloudimg-query "$arch" "$release" "$stream" \
+ --format="%{pubname} %{url}\n") ||
+ fail "ubuntu-cloudimg-query failed for $arch, $release, $stream"
+
+set -- ${url}
+pubname=${1}
+url=${2}
+cmd=( )
+
+if [ $disk -eq 1 ]; then
+ [ -n "$vflags" ] && pt_args[${#pt_args[@]}]="-$vflags"
+
+ url="${url%.tar.gz}-disk1.img"
+ cmd=( cloud-publish-image "${pt_args[@]}" --rename "$pubname"
+ "$arch" "$url" "$bucket" )
+else
+ [ ${VERBOSITY} -eq 0 ] && pt_args[#{pt_args[@]}]="--quiet"
+
+ cmd=( cloud-publish-tarball "${pt_args[@]}"
+ --rename-image "$pubname"
+ "$url" "$bucket" "$arch" )
+fi
+
+if [ $dry_run -eq 1 ]; then
+ error "${cmd[@]}"
+else
+ debug 1 "running:" "${cmd[@]}"
+ "${cmd[@]}"
+fi
+
+# vi: ts=4 noexpandtab
=== added file 'bin/cloud-run-instances'
--- bin/cloud-run-instances 1970-01-01 00:00:00 +0000
+++ bin/cloud-run-instances 2012-12-17 12:41:23 +0000
@@ -0,0 +1,715 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2010 Canonical Ltd.
+#
+# Authors: Dustin Kirkland <kirkland@xxxxxxxxxxxxx>
+# Scott Moser <scott.moser@xxxxxxxxxxxxx>
+# Clint Byrum <clint.byrum@xxxxxxxxxxxxx>
+# Tom Ellis <tom.ellis@xxxxxxxxxxxxx>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, version 3 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import os
+import string
+import sys
+import signal
+import re
+import base64
+from optparse import OptionParser
+from socket import getaddrinfo
+import time
+import logging
+from paramiko import SSHClient, AutoAddPolicy, AuthenticationException
+import paramiko
+from subprocess import Popen, PIPE
+
+finished = "FINISHED"
+
+CC_IMPORT_SSH = """#cloud-config
+runcmd:
+ - [ sudo, -Hu, ubuntu, sh, '-c',
+ "c=ssh-import-id; which $c >/dev/null || c=ssh-import-lp-id; $c $1",
+ "--", "%s" ]
+"""
+
+
+class SafeConnectException(Exception):
+ pass
+
+
+class Instance(object):
+ pass
+
+
+class TemporaryMissingHostKeyPolicy(AutoAddPolicy):
+ """ does not save to known_hosts, but does save the keys in an array """
+ def __init__(self):
+ self._keys = []
+ AutoAddPolicy.__init__(self)
+
+ def missing_host_key(self, client, hostname, key):
+ self._keys.append(key)
+
+ def getKeys(self):
+ return self._keys
+
+
+class PermanentMissingHostKeyPolicy(TemporaryMissingHostKeyPolicy):
+ """ also has the behavor of the parent AutoAddPolicy """
+ def missing_host_key(self, client, hostname, key):
+#TemporaryMissingHostKeyPolicy.missing_host_key(self, client, hostname, key)
+ self._keys.append(key)
+ AutoAddPolicy.missing_host_key(self, client, hostname, key)
+
+
+class ConsoleFingerprintScanner(object):
+ def __init__(self, instance_id, hostname, provider, options, sleeptime=30):
+ self.state = "working"
+ self.instance_id = instance_id
+ self.hostname = hostname
+ self.provider = provider
+ self.sleeptime = sleeptime
+ self.fingerprint = None
+ self.options = options
+ self.logger = logging.getLogger('console-scanner(%s)' % instance_id)
+
+ def scan(self):
+ self.logger.debug('scraping fingerprints for instance_id = %s',
+ self.instance_id)
+ try:
+ while self.fingerprint is None:
+ console_data = self.get_console_output()
+ self.fingerprint = self.get_fingerprints_in_console_data(
+ console_data)
+ if self.fingerprint is not None:
+ self.fingerprint = (int(self.fingerprint[0]),
+ self.fingerprint[1], self.fingerprint[3])
+ else:
+ self.logger.debug('sleeping %d seconds',
+ self.options.sleep_time)
+ time.sleep(self.options.sleep_time)
+ except None:
+ pass
+ return self.fingerprint
+
+ def get_console_output(self):
+ cmd = '%s-get-console-output' % self.provider
+ args = [cmd]
+ args.append(self.instance_id)
+
+ self.logger.debug('running %s', args)
+ rconsole = Popen(args, stdout=PIPE)
+
+ ret = []
+ try:
+ for line in rconsole.stdout:
+ ret.append(line.strip())
+ finally:
+ cmdout = rconsole.wait()
+
+ if bool(cmdout):
+ raise Exception('%s failed with return code = %d', cmd, cmdout)
+
+ return ret
+
+ def get_fingerprints_in_console_data(self, output):
+ # return an empty list on "no keys found"
+ # return a list of key fingerprint data on success
+ # where each key fingerprint data is an array like:
+ # (2048 c7:c8:1d:0f:d9:....0a:8a:fe localhost (RSA))
+ begin_marker = "-----BEGIN SSH HOST KEY FINGERPRINTS----"
+ end_marker = "----END SSH HOST KEY FINGERPRINTS-----"
+ i = 0
+ while i < len(output):
+ if output[i].find(begin_marker) > -1:
+ while i < len(output) and output[i].find(end_marker) == -1:
+ self.logger.debug(output[i].strip())
+ toks = output[i].split(" ")
+ self.logger.debug(toks)
+ if len(toks) == 5:
+ # rip off "ec2:"
+ toks = toks[1:]
+ if len(toks) == 4 and toks[3] == "(RSA)":
+ self.logger.debug('found %s on line %d', toks, i)
+ return((toks))
+ i = i + 1
+ break
+ i = i + 1
+ self.logger.debug(
+ 'did not find any fingerprints in output! (lines=%d)', i)
+ return None
+
+
+class SshKeyScanner(object):
+ def __init__(self, instance_id, hostname, options, sleeptime=30):
+ self.state = "working"
+ self.instance_id = instance_id
+ self.hostname = hostname
+ self.sleeptime = sleeptime
+ self.fingerprint = None
+ self.keys = None
+ self.options = options
+ self.port = 22
+ self.logger = logging.getLogger('ssh-key-scanner(%s)' % instance_id)
+ self.client = None
+ self.connected = False
+
+ def scan(self):
+ self.logger.debug('getting fingerprints for %s', self.hostname)
+ try:
+ fingerprints = self.get_fingerprints_for_host()
+ self.logger.debug('fingerprints = %s', fingerprints)
+ if (len(fingerprints) > 0):
+ self.state = "finished"
+ self.fingerprint = fingerprints[0]
+ except None:
+ pass
+ return self.fingerprint
+
+ def get_fingerprints_for_host(self):
+ # return an empty list on "no keys found"
+ # return a list of key fingerprint data on success
+ # where each key fingerprint data is an array like:
+ # (2048 c7:c8:1d:0f:d9:..:6f:0a:8a:fe localhost (RSA))
+
+ # use paramiko here
+ self.client = SSHClient()
+ client = self.client
+ client.set_log_channel('ssh-key-scanner(%s)' % self.instance_id)
+
+ if self.options.known_hosts is not None:
+ policy = PermanentMissingHostKeyPolicy()
+ """ This step ensures we save the keys, otherwise that step will be
+ skipped in AutoAddPolicy.missing_host_key """
+ for path in self.options.known_hosts:
+ if not os.path.isfile(path):
+ # if the file doesn't exist, then
+ # create it empty
+ fp = open(path, "w")
+ fp.close()
+ client.load_host_keys(path)
+ else:
+ policy = TemporaryMissingHostKeyPolicy()
+ client.set_missing_host_key_policy(policy)
+
+ pkey = None
+ if self.options.privkey is not None:
+ # TODO support password protected key file
+ pkey = paramiko.RSAKey.from_private_key_file(self.options.privkey)
+
+ retries = 0
+
+ allkeys = []
+
+ while 1:
+ try:
+ client.connect(self.hostname, self.port,
+ username=self.options.ssh_user, pkey=pkey)
+ self.connected = True
+ break
+ except AuthenticationException as (message):
+ self.logger.warning('auth failed (non fatal) %s', message)
+ break
+ except Exception as (e):
+ retries += 1
+ if retries > 5:
+ raise Exception('gave up after retrying ssh %d times' %
+ retries)
+ self.logger.info(e)
+ self.logger.debug('retry #%d... sleeping %d seconds..',
+ retries, self.options.sleep_time)
+ time.sleep(self.options.sleep_time)
+
+ rlist = []
+
+ allkeys.extend(policy.getKeys())
+ allkeys.append(client.get_transport().get_remote_server_key())
+
+ for key in allkeys:
+
+ if type(key) == paramiko.RSAKey or type(key) == paramiko.PKey:
+ keytype = '(RSA)'
+ elif type(key) == paramiko.DSSKey:
+ keytype = '(DSA)'
+ else:
+ raise Exception('Cannot handle type %s == %s' %
+ (type(key).__name__, key))
+
+ fp = key.get_fingerprint().encode("hex")
+ fp = ':'.join(re.findall('..', fp))
+ rlist.append((key.get_bits(), fp, keytype))
+
+ return rlist
+
+ def run_commands(self):
+ if (self.options.ssh_run_cmd is not None and
+ len(self.options.ssh_run_cmd)):
+ if not self.connected:
+ self.logger.critical('cannot run command, ssh did not connect')
+ sys.exit(1)
+ ecmd = ' '.join(self.options.ssh_run_cmd)
+ self.logger.debug('running %s', ecmd)
+ inouterr = self.client.exec_command(ecmd)
+ try:
+ for line in inouterr[1]:
+ print line,
+ except:
+ pass
+ try:
+ for line in inouterr[2]:
+ print >> sys.stderr(line)
+ except:
+ pass
+
+ if self.connected:
+ self.client.close()
+ self.connected = False
+
+
+def get_auto_instance_type(ami_id, provider):
+ cmd = '%s-describe-images' % provider
+ args = [cmd, ami_id]
+ logging.debug('running %s', args)
+ rimages = Popen(args, stdout=PIPE)
+ deftype = {'i386': 'm1.small', 'x86_64': 'm1.large'}
+
+ try:
+ for line in rimages.stdout:
+ # Just in case there are %'s, don't confusee logging
+ # XXX print these out instead
+ logging.debug(line.replace('%', '%%').strip())
+ parts = line.split("\t")
+ if parts[0] == 'IMAGE':
+ itype = parts[7]
+ if itype in deftype:
+ logging.info('auto instance type = %s', deftype[itype])
+ return deftype[itype]
+ finally:
+ rcode = rimages.wait()
+
+ logging.warning('ami not found, returning default m1.small')
+ return("m1.small")
+
+
+def timeout_handler(signum, frame):
+ logging.critical('timeout reached, exiting')
+ sys.exit(1)
+
+
+def handle_runargs(option, opt_str, value, parser):
+ delim = getattr(parser.values, "runargs_delim", None)
+ cur = getattr(parser.values, "runargs", [])
+ if cur is None:
+ cur = []
+ cur.extend(value.split(delim))
+ setattr(parser.values, "runargs", cur)
+ return
+
+
+def main():
+ parser = OptionParser(
+ usage="usage: %prog [options] ids|(-- raw args for provider scripts)")
+ parser.add_option("-t", "--instance-type", dest="inst_type",
+ help="instance type", metavar="TYPE",
+ default="auto")
+ parser.add_option("-k", "--key", dest="keypair_name",
+ help="keypair name", metavar="TYPE",
+ default="auto")
+ parser.add_option("-n", "--instance-count", dest="count",
+ help="instance count", metavar="TYPE", type="int",
+ default=1)
+ parser.add_option("", "--ssh-privkey", dest="privkey",
+ help="private key to connect with (ssh -i)", metavar="id_rsa",
+ default=None)
+ parser.add_option("", "--ssh-pubkey", dest="pubkey",
+ help="public key to insert into image)", metavar="id_rsa.pub",
+ default=None)
+ parser.add_option("", "--ssh-run-cmd", dest="ssh_run_cmd",
+ action="append", nargs=0,
+ help="run this command when ssh'ing", default=None)
+ parser.add_option("", "--ssh-user", dest="ssh_user",
+ help="connect with ssh as user", default=None)
+ parser.add_option("", "--associate-ip", dest="ip",
+ help="associate elastic IP with instance", metavar="IP_ADDR",
+ default=None)
+ parser.add_option("", "--attach-volume", dest="vol",
+ help="attach EBS volume with instance", metavar="VOLUME_ID",
+ default=None)
+ parser.add_option("", "--known-hosts", dest="known_hosts", action="append",
+ metavar="KnownHosts", default=None,
+ help="write host keys to specified known_hosts file. "
+ "Specify multiple times to read keys from multiple files "
+ "(only updates last one)")
+ parser.add_option("-l", "--launchpad-id", dest="launchpad_id",
+ action="append", metavar="lpid", default=None,
+ help="launchpad ids to pull SSH keys from "
+ "(multiple times adds to the list)")
+ parser.add_option("-i", "--instance-ids", dest="instance_ids",
+ action="store_true", default=False,
+ help="expect instance ids instead of ami ids,"
+ "skips -run-instances")
+ parser.add_option("", "--all-instances", dest="all_instances",
+ action="store_true", default=False,
+ help="query all instances already defined "
+ "(running/pending/terminated/etc)")
+ parser.add_option("", "--run-args", dest="runargs", action="callback",
+ callback=handle_runargs, type="string",
+ help="pass option through to run-instances")
+ parser.add_option("", "--run-args-delim", dest="runargs_delim",
+ help="split run-args options with delimiter",
+ default=None)
+ parser.add_option("", "--verify-ssh", dest="verify_ssh",
+ action="store_true",
+ help="verify SSH keys against console output (implies --wait-for=ssh)",
+ default=False)
+ parser.add_option("", "--wait-for", dest="wait_for",
+ help="wait for one of: ssh , running", default=None)
+ parser.add_option("-p", "--provider", dest="provider",
+ help="either euca or ec2", default=None)
+ parser.add_option("-v", "--verbose", action="count", dest="loglevel",
+ help="increase logging level", default=3)
+ parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
+ help="produce no output or error messages", default=False)
+ parser.add_option("", "--sleep-time", dest="sleep_time",
+ help="seconds to sleep between polling", default=2)
+ parser.add_option("", "--teardown", dest="teardown", action="store_true",
+ help="terminate instances at the end", default=False)
+
+ (options, args) = parser.parse_args()
+
+ if (os.path.basename(sys.argv[0]).startswith("uec") and
+ os.getenv("CLOUD_UTILS_WARN_UEC", "0") == "0"):
+ sys.stderr.write("WARNING: '%s' is now 'cloud-run-instances'. %s\n" %
+ (os.path.basename(sys.argv[0]), "Please update tools or docs"))
+
+ if len(args) < 1 and not options.all_instances:
+ parser.error('you must pass at least one ami ID')
+
+ # loglevel should be *reduced* every time -v is passed,
+ # see logging docs for more
+ if options.quiet:
+ sys.stderr = open('/dev/null', 'w')
+ sys.stdout = sys.stderr
+ else:
+ loglevel = 6 - options.loglevel
+ if loglevel < 1:
+ loglevel = 1
+ # logging module levels are 0,10,20,30 ...
+ loglevel = loglevel * 10
+
+ logging.basicConfig(level=loglevel,
+ format="%(asctime)s %(name)s/%(levelname)s: %(message)s",
+ stream=sys.stderr)
+
+ logging.debug("loglevel = %d", loglevel)
+
+ provider = options.provider
+ if options.provider is None:
+ provider = os.getenv('EC2PRE', 'euca')
+
+ if options.ssh_run_cmd == [()]:
+ options.ssh_run_cmd = args
+
+ if options.known_hosts is None:
+ options.known_hosts = [os.path.expanduser('~/.ssh/known_hosts')]
+
+ if options.known_hosts is not None and len(options.known_hosts):
+ path = None
+ for path in options.known_hosts:
+ if not os.access(path, os.R_OK):
+ logging.warning('known_hosts file %s is not readable!', path)
+ # paramiko writes to the last one
+ if not os.access(path, os.W_OK):
+ logging.critical('known_hosts file %s is not writable!', path)
+
+ logging.debug("provider = %s", provider)
+
+ logging.debug("instance type is %s", options.inst_type)
+
+ if options.instance_ids or options.all_instances:
+
+ if options.all_instances:
+ pending_instance_ids = ['']
+ else:
+ pending_instance_ids = args
+
+ else:
+
+ if len(args) < 1:
+ raise Exception('you must pass at least one AMI ID')
+
+ ami_id = args[0]
+ del(args[0])
+
+ logging.debug("ami_id = %s", ami_id)
+
+ if options.inst_type == "auto":
+ options.inst_type = get_auto_instance_type(ami_id, provider)
+
+ pending_instance_ids = []
+
+ cmd = '%s-run-instances' % provider
+
+ run_inst_args = [cmd]
+
+ # these variables pass through to run-instances
+ run_inst_pt = {
+ "instance-count": options.count,
+ "instance-type": options.inst_type,
+ "key": options.keypair_name,
+ }
+
+ for key, val in run_inst_pt.iteritems():
+ if key is not None and key != "":
+ run_inst_args.append("--%s=%s" % (key, val))
+
+ if options.launchpad_id:
+ run_inst_args.append('--user-data')
+ run_inst_args.append(CC_IMPORT_SSH %
+ ' '.join(options.launchpad_id))
+
+ if options.runargs is not None:
+ run_inst_args.extend(options.runargs)
+
+ run_inst_args.append(ami_id)
+
+ # run-instances with pass through args
+ logging.debug("executing %s", run_inst_args)
+ logging.info("starting instances with ami_id = %s", ami_id)
+
+ rinstances = Popen(run_inst_args, stdout=PIPE)
+ #INSTANCE i-32697259 ami-2d4aa444 pending\
+ # 0 m1.small 2010-06-18T18:28:21+0000\
+ # us-east-1b aki-754aa41c \
+ # monitoring-disabled instance-store
+ try:
+ for line in rinstances.stdout:
+ # Just in case there are %'s, don't confusee logging
+ # XXX print these out instead
+ logging.debug(line.replace('%', '%%').strip())
+ parts = line.split("\t")
+ if parts[0] == 'INSTANCE':
+ pending_instance_ids.append(parts[1])
+ finally:
+ rcode = rinstances.wait()
+
+ logging.debug("command returned %d", rcode)
+ logging.info("instances started: %s", pending_instance_ids)
+
+ if bool(rcode):
+ raise Exception('%s failed' % cmd)
+
+ if len(pending_instance_ids) < 1:
+ raise Exception('no instances were started!')
+
+ cmd = '%s-describe-instances' % provider
+
+ instances = []
+
+ timeout_date = time.time() + 600
+
+ signal.signal(signal.SIGALRM, timeout_handler)
+ signal.alarm(600)
+
+ logging.debug("timeout at %s", time.ctime(timeout_date))
+
+ # We must wait for ssh to run commands
+ if options.verify_ssh and not options.wait_for == 'ssh':
+ logging.info('--verify-ssh implies --wait-for=ssh')
+ options.wait_for = 'ssh'
+
+ if options.ssh_run_cmd and not options.wait_for == 'ssh':
+ logging.info('--ssh-run-cmd implies --wait-for=ssh')
+ options.wait_for = 'ssh'
+
+ while len(pending_instance_ids):
+ new_pending_instance_ids = []
+ describe_inst_args = [cmd]
+
+ # remove '', confuses underlying commands
+ pids = []
+ for iid in pending_instance_ids:
+ if len(iid):
+ pids.append(iid)
+ if len(pids):
+ describe_inst_args.extend(pending_instance_ids)
+
+ logging.debug('running %s', describe_inst_args)
+ rdescribe = Popen(describe_inst_args, stdout=PIPE)
+ try:
+ for line in rdescribe.stdout:
+ logging.debug(line.replace('%', '%%').strip())
+ parts = line.split("\t")
+ if parts[0] == 'INSTANCE':
+ iid = parts[1]
+ istatus = parts[5]
+ if istatus == 'terminated':
+ logging.debug('%s is terminated, ignoring...', iid)
+ elif istatus != 'running' and options.wait_for:
+ logging.warning('%s is %s', iid, istatus)
+ new_pending_instance_ids.append(iid)
+ elif istatus != 'running' and options.vol:
+ logging.warning('%s is %s', iid, istatus)
+ new_pending_instance_ids.append(iid)
+ else:
+ logging.info("%s %s", iid, istatus)
+ inst = Instance()
+ inst.id = iid
+ inst.hostname = parts[3]
+ inst.output = line
+ instances.append(inst)
+ finally:
+ rcode = rdescribe.wait()
+
+ pending_instance_ids = new_pending_instance_ids
+
+ logging.debug("command returned %d", rcode)
+ logging.debug("pending instances: %s", pending_instance_ids)
+
+ if bool(rcode):
+ raise Exception('%s failed' % cmd)
+
+ if len(pending_instance_ids):
+ logging.debug('sleeping %d seconds', options.sleep_time)
+ time.sleep(options.sleep_time)
+
+ if options.ip:
+ ips = options.ip.split(',')
+ if len(ips) < len(instances):
+ logging.warning(
+ 'only %d ips given, some instances will not get an ip',
+ len(ips))
+ elif len(ips) > len(instances):
+ logging.warning('%d ips given, some ips will not be associated',
+ len(ips))
+
+ rcmds = []
+ ips.reverse()
+ for inst in instances:
+ cmd = '%s-associate-address' % provider
+ if len(ips) < 1:
+ break
+ ip = ips.pop()
+ aargs = [cmd, '-i', inst.id, ip]
+ logging.debug('running %s', aargs)
+ rassociate = Popen(aargs, stdout=PIPE)
+ rcmds.append(rassociate)
+ for rcmd in rcmds:
+ # dump stdin into the inst object
+ try:
+ for line in rcmd.stdout:
+ logging.info(line)
+ finally:
+ ret = rcmd.wait()
+ if bool(ret):
+ logging.debug('associate-ip returned %d', ret)
+
+ if options.vol:
+ # as you can start multiple instances, support multiple vols like ips,
+ # instead of multiple volumes on one instance
+ vols = options.vol.split(',')
+ if len(vols) < len(instances):
+ logging.warning('only %d volumes given, some instances will not'
+ ' get a volume attached', len(vols))
+ elif len(vols) > len(instances):
+ logging.warning(
+ '%d volumes given, some volumes will not be associated',
+ len(vols))
+
+ rcmds = []
+ vols.reverse()
+ for inst in instances:
+ # instance needs to be 'running' not 'pending' before attaching
+ # volume, otherwise it fails
+ logging.info('waiting for instance to run')
+ cmd = '%s-attach-volume' % provider
+ if len(vols) < 1:
+ break
+ vol = vols.pop()
+ dev = '/dev/sdb'
+ args = [cmd, '-i', inst.id, '-d', dev, vol]
+ logging.debug('running %s', args)
+ logging.info("attaching volume with id = %s to instance id = %s",
+ vol, inst.id)
+ rattach = Popen(args, stdout=PIPE)
+ rcmds.append(rattach)
+ for rcmd in rcmds:
+ # dump stdin into the inst object
+ try:
+ for line in rcmd.stdout:
+ logging.info(line)
+ finally:
+ ret = rcmd.wait()
+ if bool(ret):
+ logging.debug('attach-volume returned %d', ret)
+
+ if options.wait_for == 'ssh':
+ logging.info('waiting for ssh access')
+ for inst in instances:
+ pid = os.fork()
+ if pid == 0:
+ ssh_key_scan = SshKeyScanner(inst.id, inst.hostname, options)
+ ssh_fingerprint = ssh_key_scan.scan()
+ if options.verify_ssh:
+ # For ec2, it can take 3.5 minutes or more to get console
+ # output, do this last, and only if we have to.
+ cons_fp_scan = ConsoleFingerprintScanner(inst.id,
+ inst.hostname, provider, options)
+ console_fingerprint = cons_fp_scan.scan()
+
+ if console_fingerprint == ssh_fingerprint:
+ logging.debug('fingerprint match made for iid = %s',
+ inst.id)
+ else:
+ fmt = 'fingerprints do not match for iid = %s'
+ raise Exception(fmt % inst.id)
+ ssh_key_scan.run_commands()
+ raise SystemExit
+ else:
+ logging.debug('child pid for %s is %d', inst.id, pid)
+ inst.child = pid
+ logging.info('Waiting for %d children', len(instances))
+ final_instances = []
+
+ for inst in instances:
+ try:
+ (pid, status) = os.waitpid(inst.child, 0)
+ except:
+ logging.critical('%s - %d doesn\'t exist anymore?', inst.id,
+ pid)
+ logging.debug('%d returned status %d', pid, status)
+ if not bool(status):
+ final_instances.append(inst)
+ instances = final_instances
+
+ """ If we reach here, all has happened in the expected manner so
+ we should produce the expected output which is instance-id\\tip\\n """
+
+ final_instance_ids = []
+ for inst in instances:
+ final_instance_ids.append(inst.id)
+
+ if options.teardown:
+ terminate = ['%s-terminate-instances' % provider]
+ terminate.extend(final_instance_ids)
+ logging.debug('running %s', terminate)
+ logging.info('terminating instances...')
+ rterm = Popen(terminate, stdout=sys.stderr, stderr=sys.stderr)
+ rterm.wait()
+
+
+if __name__ == "__main__":
+ main()
+
+# vi: ts=4 expandtab
=== added file 'bin/ec2metadata'
--- bin/ec2metadata 1970-01-01 00:00:00 +0000
+++ bin/ec2metadata 2012-12-17 12:41:23 +0000
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+#
+# Query and display EC2 metadata related to the AMI instance
+# Copyright (c) 2009 Canonical Ltd. (Canonical Contributor Agreement 2.5)
+#
+# Author: Alon Swartz <alon@xxxxxxxxxxxxxxxx>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+instdata_host = "169.254.169.254"
+instdata_ver = "2009-04-04"
+instdata_url = "http://%s/%s" % (instdata_host, instdata_ver)
+
+__doc__ = """
+Query and display EC2 metadata.
+
+If no options are provided, all options will be displayed
+
+Options:
+ -h --help show this help
+
+ --kernel-id display the kernel id
+ --ramdisk-id display the ramdisk id
+ --reservation-id display the reservation id
+
+ --ami-id display the ami id
+ --ami-launch-index display the ami launch index
+ --ami-manifest-path display the ami manifest path
+ --ancestor-ami-ids display the ami ancestor id
+ --product-codes display the ami associated product codes
+ --availability-zone display the ami placement zone
+
+ --instance-id display the instance id
+ --instance-type display the instance type
+
+ --local-hostname display the local hostname
+ --public-hostname display the public hostname
+
+ --local-ipv4 display the local ipv4 ip address
+ --public-ipv4 display the public ipv4 ip address
+
+ --block-device-mapping display the block device id
+ --security-groups display the security groups
+
+ --mac display the instance mac address
+ --profile display the instance profile
+ --instance-action display the instance-action
+
+ --public-keys display the openssh public keys
+ --user-data display the user data (not actually metadata)
+
+ -u | --url URL use URL (default: %s)
+
+""" % instdata_url
+
+import sys
+import time
+import getopt
+import urllib2
+import socket
+import urlparse
+
+METAOPTS = ['ami-id', 'ami-launch-index', 'ami-manifest-path',
+ 'ancestor-ami-ids', 'availability-zone', 'block-device-mapping',
+ 'instance-action', 'instance-id', 'instance-type',
+ 'local-hostname', 'local-ipv4', 'kernel-id', 'mac',
+ 'profile', 'product-codes', 'public-hostname', 'public-ipv4',
+ 'public-keys', 'ramdisk-id', 'reserveration-id', 'security-groups',
+ 'user-data']
+
+
+class Error(Exception):
+ pass
+
+
+class EC2Metadata:
+ """Class for querying metadata from EC2"""
+
+ def __init__(self, burl=instdata_url):
+ self.burl = burl
+
+ s = urlparse.urlsplit(burl)
+ addr = s.netloc.split(":")[0]
+ port = s.port
+ if s.port is None:
+ port = 80
+ if not self._test_connectivity(addr, port):
+ raise Error("could not establish connection to: %s:%s" %
+ (addr, port))
+
+ @staticmethod
+ def _test_connectivity(addr, port):
+ for i in range(6):
+ s = socket.socket()
+ try:
+ s.connect((addr, port))
+ s.close()
+ return True
+ except socket.error, e:
+ time.sleep(1)
+
+ return False
+
+ def _get(self, uri):
+ url = "%s/%s" % (self.burl, uri)
+ try:
+ resp = urllib2.urlopen(urllib2.Request(url))
+ value = resp.read()
+ except urllib2.HTTPError as e:
+ if e.code == 404:
+ return None
+ # Eucalyptus may raise a 500 (Internal Server Error)
+ if e.code == 500:
+ return None
+ raise
+
+ return value
+
+ def get(self, metaopt):
+ """return value of metaopt"""
+
+ if metaopt not in METAOPTS:
+ raise Error('unknown metaopt', metaopt, METAOPTS)
+
+ if metaopt == 'availability-zone':
+ return self._get('meta-data/placement/availability-zone')
+
+ if metaopt == 'public-keys':
+ data = self._get('meta-data/public-keys')
+ if data is None:
+ return None
+
+ keyids = [line.split('=')[0] for line in data.splitlines()]
+
+ public_keys = []
+ for keyid in keyids:
+ uri = 'meta-data/public-keys/%d/openssh-key' % int(keyid)
+ public_keys.append(self._get(uri).rstrip())
+
+ return public_keys
+
+ if metaopt == 'user-data':
+ return self._get('user-data')
+
+ return self._get('meta-data/' + metaopt)
+
+
+def get(metaopt):
+ """primitive: return value of metaopt"""
+
+ m = EC2Metadata()
+ return m.get(metaopt)
+
+
+def display(metaopts, burl, prefix=False):
+ """primitive: display metaopts (list) values with optional prefix"""
+
+ m = EC2Metadata(burl)
+ for metaopt in metaopts:
+ value = m.get(metaopt)
+ if not value:
+ value = "unavailable"
+
+ if prefix:
+ print "%s: %s" % (metaopt, value)
+ else:
+ print value
+
+
+def usage(s=None):
+ """display usage and exit"""
+
+ if s:
+ print >> sys.stderr, "Error:", s
+ print >> sys.stderr, "Syntax: %s [options]" % sys.argv[0]
+ print >> sys.stderr, __doc__
+ sys.exit(1)
+
+
+def main():
+ """handle cli options"""
+
+ try:
+ getopt_metaopts = METAOPTS[:]
+ getopt_metaopts.append('help')
+ getopt_metaopts.append('url=')
+ opts, args = getopt.gnu_getopt(sys.argv[1:], "hu:", getopt_metaopts)
+ except getopt.GetoptError, e:
+ usage(e)
+
+ burl = instdata_url
+
+ metaopts = []
+ prefix = False
+ for opt, val in opts:
+ if opt in ('-h', '--help'):
+ usage()
+ if opt in ('-u', '--url'):
+ burl = val
+ continue
+
+ metaopts.append(opt.replace('--', ''))
+
+ if len(metaopts) == 0:
+ prefix = True
+ metaopts = METAOPTS
+
+ display(metaopts, burl, prefix)
+
+
+if __name__ == "__main__":
+ main()
+
+# vi: ts=4 expandtab
=== added file 'bin/growpart'
--- bin/growpart 1970-01-01 00:00:00 +0000
+++ bin/growpart 2012-12-17 12:41:23 +0000
@@ -0,0 +1,214 @@
+#!/bin/sh
+# Copyright (C) 2011 Canonical Ltd.
+#
+# Authors: Scott Moser <smoser@xxxxxxxxxxxxx>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, version 3 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# the fudge factor. if its within this many 512 byte sectors, dont bother
+FUDGE=${GROWPART_FUDGE:-$((20*1024))}
+TEMP_D=""
+RESTORE_FROM=""
+VERBOSITY=0
+DISK=""
+
+error() { echo "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || echo "FAILED:" "$@"; exit 1; }
+nochange() { echo "NOCHANGE:" "$@"; exit 0; }
+changed() { echo "CHANGED:" "$@"; exit 0; }
+cleanup() {
+ if [ -n "${RESTORE_FROM}" ]; then
+ error "***** WARNING: Resize failed, attempting to revert ******"
+ if sfdisk --no-reread "${DISK}" ${CHS} -I "${RESTORE_FROM}"; then
+ error "***** Appears to have gone OK ****"
+ else
+ error "***** FAILED! or original partition table looked like: ****"
+ cat "${RESTORE_HUMAN}" 1>&2
+ fi
+ fi
+ [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
+}
+debug() {
+ local level=${1}; shift;
+ [ "${level}" -gt "${VERBOSITY}" ] && return
+ error "${@}"
+}
+mktemp_d() {
+ # just a mktemp -d that doens't need mktemp if its not there.
+ _RET=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX" 2>/dev/null) && return
+ _RET=$(umask 077 && t="${TMPDIR:-/tmp}/${0##*/}.$$" && mkdir "${t}" &&
+ echo "${t}")
+ return
+}
+
+Usage() {
+ cat <<EOF
+${0##*/} disk partition
+ rewrite partition table so that partition takes up all the space it can
+ options:
+ -h | --help print Usage an exit
+ --fudge F if part could be resized, but change would be
+ less than 'F', do not resize (default: ${FUDGE})
+ -N | --dry-run only report what would be done, show new 'sfdisk -d'
+ -v | --verbose increase verbosity / debug
+
+ Example:
+ - ${0##*/} /dev/sda 1
+ Resize partition 1 on /dev/sda
+EOF
+}
+bad_Usage() { Usage 1>&2; error "$@"; exit 1; }
+
+#short_opts="hNv"
+#long_opts="help,dry-run,fudge:,verbose"
+#getopt_out=$(getopt --name "${0##*/}" \
+# --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+# eval set -- "${getopt_out}" ||
+# bad_Usage
+
+dry_run=0
+fudge=${FUDGE}
+disk=""
+part=""
+while [ $# -ne 0 ]; do
+ cur=${1}; next=${2};
+ case "$cur" in
+ -h|--help) Usage; exit 0;;
+ --fudge) fudge=${next}; shift;;
+ --dry-run) dry_run=1;;
+ -v|--verbose) VERBOSITY=$(($VERBOSITY+1));;
+ --) shift; break;;
+ -*) fail "unknown option ${cur}";;
+ *)
+ if [ -z "${disk}" ]; then
+ disk=${cur};
+ else
+ [ -z "${part}" ] || fail "confused by arg ${cur}"
+ part=${cur};
+ fi
+ ;;
+ esac
+ shift;
+done
+
+[ -n "${disk}" ] || bad_Usage "must supply disk and partition-number"
+[ -n "${part}" ] || bad_Usage "must supply partition-number"
+
+[ -e "${disk}" ] || { fail "${disk}: does not exist"; }
+
+[ "${part#*[!0-9]}" = "${part}" ] || fail "partition-number must be a number"
+
+mktemp_d && TEMP_D="${_RET}" || fail "failed to make temp dir"
+trap cleanup EXIT
+
+change_out=${TEMP_D}/change.out
+dump_out=${TEMP_D}/dump.out
+new_out=${TEMP_D}/new.out
+dump_mod=${TEMP_D}/dump.mod
+tmp="${TEMP_D}/tmp.out"
+err="${TEMP_D}/err.out"
+orig_bin="${TEMP_D}/orig.save"
+RESTORE_HUMAN="${TEMP_D}/recovery"
+
+# --show-pt-geometry outputs something like
+# /dev/sda: 164352 cylinders, 4 heads, 32 sectors/track
+sfdisk "${disk}" --show-pt-geometry > "${tmp}" 2>"${err}" &&
+ read _devc cyl _word1 heads _word2 sectors _word3 < "${tmp}" &&
+ CHS="-C $cyl -H $heads -S $sectors" ||
+ fail "failed to get CHS from ${disk}"
+
+tot=$((${cyl}*${heads}*${sectors}))
+
+debug 1 "geometry is $CHS. total size=${tot}"
+sfdisk ${CHS} -uS -d "${disk}" > "${dump_out}" 2>"${err}" ||
+ fail "failed to dump sfdisk info for ${disk}"
+
+{
+echo "## sfdisk ${CHS} -uS -d ${disk}";
+cat "${dump_out}"
+} > "${RESTORE_HUMAN}"
+[ $? -eq 0 ] || fail "failed to save sfdisk -d output"
+
+sed -e 's/,//g; s/start=/start /; s/size=/size /' "${dump_out}" > "${dump_mod}"
+
+dpart="${disk}${part}" # disk and partition number
+if [ -b "${disk}p${part}" -a "${disk%[0-9]}" != "${disk}" ]; then
+ # for block devices that end in a number (/dev/nbd0)
+ # the partition is "<name>p<partition_number>" (/dev/nbd0p1)
+ dpart="${disk}p${part}"
+elif [ "${disk#/dev/loop[0-9]}" != "${disk}" ]; then
+ # for /dev/loop devices, sfdisk output will be <name>p<number> format
+ # also, even though there is not a device there.
+ dpart="${disk}p${part}"
+fi
+
+pt_start=$(awk '$1 == pt { print $4 }' "pt=${dpart}" < "${dump_mod}") &&
+ pt_size=$(awk '$1 == pt { print $6 }' "pt=${dpart}" < "${dump_mod}") &&
+ [ -n "${pt_start}" -a -n "${pt_size}" ] &&
+ pt_end=$((${pt_size}+${pt_start})) ||
+ fail "failed to get start and end for ${dpart} in ${disk}"
+
+# find the minimal starting location that is >= pt_end
+max_end=$(awk '
+ $3 == "start" { if($4 >= pt_end && $4 < min) { min = $4 } }
+ END { printf("%s\n",min); }' \
+ min=${tot} pt_end=${pt_end} "${dump_mod}") &&
+ [ -n "${max_end}" ] ||
+ fail "failed to get max_end for partition ${part}"
+
+debug 1 "max_end=${max_end} tot=${tot} pt_end=${pt_end} pt_start=${pt_start} pt_size=${pt_size}"
+[ $((${pt_end})) -eq ${max_end} ] &&
+ nochange "partition ${part} is size ${pt_size}. it cannot be grown"
+[ $((${pt_end}+${fudge})) -gt ${max_end} ] &&
+ nochange "partition ${part} could only be grown by $((${max_end}-${pt_end})) [fudge=${fudge}]"
+
+# now, change the size for this partition in ${dump_out} to be the
+# new size
+new_size=$((${max_end}-${pt_start}))
+sed "\|^${dpart} |s/${pt_size},/${new_size},/" "${dump_out}" > "${new_out}" ||
+ fail "failed to change size in output"
+
+change_info="partition=${part} start=${pt_start} old: size=${pt_size} end=${pt_end} new: size=${new_size},end=${max_end}"
+if [ $dry_run -ne 0 ]; then
+ echo "CHANGE: ${change_info}"
+ {
+ echo "# === old sfdisk -d ==="
+ cat "${dump_out}"
+ echo "# === new sfdisk -d ==="
+ cat "${new_out}"
+ } 1>&2
+ exit 0
+fi
+
+sfdisk --no-reread "${disk}" ${CHS} --force -O "${orig_bin}" \
+ < "${new_out}" > "${change_out}" 2>&1 || {
+ DISK=${disk}; RESTORE_FROM="${orig_bin}";
+ error "attempt to resize ${disk} failed. sfdisk output below:"
+ sed 's,^,| ,' "${change_out}" 1>&2
+ fail "failed to resize"
+}
+changed "${change_info}"
+
+# dump_out looks something like:
+## partition table of /tmp/out.img
+#unit: sectors
+#
+#/tmp/out.img1 : start= 1, size= 48194, Id=83
+#/tmp/out.img2 : start= 48195, size= 963900, Id=83
+#/tmp/out.img3 : start= 1012095, size= 305235, Id=82
+#/tmp/out.img4 : start= 1317330, size= 771120, Id= 5
+#/tmp/out.img5 : start= 1317331, size= 642599, Id=83
+#/tmp/out.img6 : start= 1959931, size= 48194, Id=83
+#/tmp/out.img7 : start= 2008126, size= 80324, Id=83
+
+# vi: ts=4 noexpandtab
=== added file 'bin/resize-part-image'
--- bin/resize-part-image 1970-01-01 00:00:00 +0000
+++ bin/resize-part-image 2012-12-17 12:41:23 +0000
@@ -0,0 +1,152 @@
+#!/bin/sh
+#
+# cloud-resize-image - resize a cloud image
+#
+# Copyright (C) 2010 Canonical Ltd.
+#
+# Authors: Scott Moser <smoser@xxxxxxxxxxxxx>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, version 3 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [ options ] image size [output]
+ Resize a partition image and contained filesystem to a new size.
+ if output is given, do not modify 'image', but create new file 'output'
+
+ New size is specified per resize2fs(8), e.g. "1G" for 1 gigabyte
+
+ options:
+ -v | --verbose show command output
+EOF
+ return 0
+}
+
+error() { echo "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+
+human2bytes() {
+ # converts size suitable for input to resize2fs to bytes
+ # s:512 byte sectors, K:kilobytes, M:megabytes, G:gigabytes
+ # none: block size of the image
+ local input=${1} defunit=${2:-1024}
+ local unit count;
+ case "$input" in
+ *s) count=${input%s}; unit=512;;
+ *K) count=${input%K}; unit=1024;;
+ *M) count=${input%M}; unit=$((1024*1024));;
+ *G) count=${input%G}; unit=$((1024*1024*1024));;
+ *) count=${input} ; unit=${2:-1024};;
+ esac
+ _RET=$((${count}*${unit}))
+}
+
+xtruncate() {
+ if which truncate >/dev/null 2>&1; then
+ truncate "${@}"
+ else
+ local size=${1} file=${2} blk=""
+ size=${size#--size=}
+ # this is a poor mans truncate supporting whatever human2bytes supports
+ human2bytes "${size}" && blk=$((${_RET}/512)) &&
+ dd if=/dev/zero of="${file}" obs=512 seek=${blk} count=0 2>/dev/null
+ fi
+}
+
+runcmd() {
+ local output=$1
+ shift;
+ if [ "$output" = "0" ]; then
+ local out="" ret=0;
+ out=$("${@}" 2>&1) || { ret=$?; error "${out}"; return $ret; }
+ else
+ "$@"
+ fi
+}
+
+[ "$1" = "-h" -o "$1" = "--help" ] && { Usage; exit 0; }
+
+verbose=0
+[ "$1" = "-v" -o "$1" = "--verbose" ] &&
+ { verbose=1; shift; }
+
+[ "${CLOUD_UTILS_WARN_RESIZE:-0}" = "0" ] && _n="${0##*/}" &&
+ [ "${_n#uec}" != "${_n}" ] && export CLOUD_UTILS_WARN_RESIZE=1 &&
+ error "WARNING: uec-resize-image is now 'resize-part-image'. Please update your tools or docs."
+
+[ $# -eq 3 -o $# -eq 2 ] || { Usage 1>&2; exit 1; }
+
+old="$1"
+size="$2"
+new="${3:-${old}}"
+
+[ -f "${old}" ] || fail "${old}: does not exist"
+
+human2bytes "${size}" && new_size=${_RET} ||
+ fail "failed to understand ${size}"
+
+if [ ! "${old}" -ef "${new}" ]; then
+ file_out=$(file "${old}") || fail "failed to read ${old} with 'file'"
+ case "${file_out}" in
+ *gzip\ compressed*)
+ file_out_z=$(file -z "${old}")
+ case "${file_out_z}" in
+ *tar\ archive*)
+ : > "${new}" && newd=$(dirname "${new}") ||
+ fail "failed to get full path for ${new}"
+ tmpd=$(mktemp -d "${newd}/.${0##*/}.XXXXXX") &&
+ ( cd "${tmpd}" && tar -S --wildcards -xzf - "*.img" &&
+ mv *.img "../${new}" ) < "${old}" || {
+ rm -Rf "${tmpd}";
+ fail "failed to extract image from ${old}"
+ }
+ rm -Rf "${tmpd}"
+ ;;
+ *)
+ zcat -f "$old" | cp --sparse=always /dev/stdin "$new";;
+ esac
+ ;;
+ *) cp --sparse=always "${old}" "${new}";;
+ esac
+ [ $? -eq 0 ] || fail "failed to cp ${old} -> ${new}"
+else
+ # if old=new (in place), it must be a simple image file
+ case "${old}" in
+ *.gz) fail "refusing work in place compressed or archive file: ${old}";;
+ esac
+fi
+
+ls_out=$(ls -l "${new}") &&
+ old_size=$(echo "${ls_out}" | awk '{print $5}') ||
+ fail "failed to get size of ${new_img}"
+
+runcmd "${verbose}" e2fsck -fp "${new}" ||
+ fail "failed to fsck ${new}"
+
+if [ "${old_size}" -lt "${new_size}" ]; then
+ xtruncate "--size=$size" "$new" || fail "failed to change size of ${new}"
+fi
+
+runcmd "${verbose}" resize2fs "$new" "$size" ||
+ fail "failed to resize ${new} -> ${size}"
+
+if [ "${old_size}" -gt "${new_size}" ]; then
+ xtruncate "--size=$size" "$new" || fail "failed to change size of ${new}"
+fi
+
+echo "resized ${new} to ${size}"
+
+exit 0
+
+# vi: ts=4 noexpandtab
=== added file 'bin/ubuntu-cloudimg-query'
--- bin/ubuntu-cloudimg-query 1970-01-01 00:00:00 +0000
+++ bin/ubuntu-cloudimg-query 2012-12-17 12:41:23 +0000
@@ -0,0 +1,296 @@
+#!/bin/sh
+
+VERBOSITY=0
+TEMP_D=""
+NAME="ubuntu-cloudimg-query"
+DOT_D="$HOME/.$NAME"
+CACHE_D="$HOME/.cache/$NAME"
+cachelife=86400
+
+error() { echo "$@" 1>&2; }
+errorp() { printf "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+failp() { [ $# -eq 0 ] || errorp "$@"; exit 1; }
+
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [ options ] criteria
+
+ Get the latest Ubuntu ami meeting certain criteria
+
+ options:
+ -o | --output FILE output to file rather than stdout
+ -f | --format format change output to 'format'.
+ default: '%{ami}\n'
+
+ Examples:
+ - get the latest ami matching default criteria for release 'n'
+ $ ${0##*/} -v n
+ us-east-1/ebs/ubuntu-natty-11.04-amd64-server-20110426
+ ami-1aad5273
+ - get an instance-store image in i386 image in us-west-1
+ $ ${0##*/} lucid i386 instance us-west-1
+ ami-73c69436
+ - get the latest daily build of the devel release in eu-west-1
+ $ EC2_REGION=eu-west-1 ${0##*/} daily amd64 ebs o
+
+EOF
+}
+
+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
+cleanup() {
+ [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
+}
+
+cache_valid() {
+ local file="$1" date="$2"
+ [ -n "$file" -a -e "$file" ] || return 1
+ touch --date "${date}" "${TEMP_D}/ts"
+ [ "$file" -nt "$TEMP_D/ts" ]
+}
+
+dlcache() {
+ local url="$1" out="$2" cfilename="$3" age="$4"
+ local cachef="${CACHE_D}/$cfilename"
+ local timeout="now - $age seconds"
+ [ -n "$cfilename" ] || cachef=""
+ if cache_valid "$cachef" "$timeout"; then
+ cp -a "$cachef" "$out"
+ return
+ fi
+ wget -q "${url}" -O "${out}" || return 1
+ { [ -z "$cachef" ] || cp "${out}" "${cachef}"; } ||
+ return 1
+}
+
+debug() {
+ local level=${1}; shift;
+ [ "${level}" -gt "${VERBOSITY}" ] && return
+ error "${@}"
+}
+
+isrel() {
+ local cand="$1" url="$2" out="$3" cache="$4" age="$5"
+ local o="" f=""
+ for f in "$out" "$CACHE_D/$cache"; do
+ [ -f "${f}" ] &&
+ o=$(awk '-F\t' '$1 ~ r { print $1; exit(0); }' "r=^$cand" "$f") &&
+ [ -n "$o" ] && _RET="$o" && return 0
+ done
+ dlcache "$url" "$out" "$cache" "$age" &&
+ o=$(awk '-F\t' '$1 ~ r { print $1; exit(0); }' "r=^$cand" "$out") &&
+ [ -n "$o" ] && _RET="$o" && return 0
+ return 1
+}
+subst() {
+ local cur="$1"; shift;
+ while [ $# -ne 0 ]; do
+ while [ "${cur#*${1}}" != "${cur}" ]; do
+ cur="${cur%%${1}*}${2}${cur#*${1}}"
+ done
+ shift 2
+ done
+ _RET=${cur}
+}
+in_args() {
+ # is $1 in $2....
+ local needle="$1" hay=""
+ shift;
+ for hay in "$@"; do
+ [ "$hay" = "$needle" ] && return 0
+ done
+ return 1
+}
+
+getreleases() {
+ # get the list of releases, return it in _RET
+ local releases="" r=""
+ releases="hardy karmic lucid maverick natty oneiric precise quantal";
+ if command -v "ubuntu-distro-info" >/dev/null; then
+ local all_rels="" seen_lucid=false
+ all_rels=$(ubuntu-distro-info --all) ||
+ { error "'ubuntu-distro-info --all' failed"; return 1; }
+ releases="hardy"
+ for r in $all_rels; do
+ if $seen_lucid || [ "$r" = "lucid" ]; then
+ seen_lucid=true;
+ releases="${releases} $r"
+ fi
+ done
+ fi
+ _RET="$releases"
+}
+
+short_opts="f:ho:v"
+long_opts="format:,help,no-cache,output:,verbose"
+getopt_out=$(getopt --name "${0##*/}" \
+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+ eval set -- "${getopt_out}" ||
+ bad_Usage
+
+## <<insert default variables here>>
+output="-"
+format='%{ami}\n'
+burl="${UBUNTU_CLOUDIMG_QUERY_BASEURL:-https://cloud-images.ubuntu.com/query}"
+store="ebs"
+region_default="${EC2_REGION:-us-east-1}"
+release="lucid"
+arch="amd64"
+stream="released"
+bname="server"
+itype=""
+ptype="paravirtual"
+poss_release=""
+itypes=""
+itypes_i386="m1.small c1.medium m1.medium"
+itypes_amd64="${itypes_i386} m1.large m1.xlarge m2.xlarge m2.2xlarge m2.4xlarge c1.xlarge"
+itypes_hvm="cc1.4xlarge cg1.4xlarge cc2.8xlarge hi1.4xlarge"
+
+while [ $# -ne 0 ]; do
+ cur=${1}; next=${2};
+ case "$cur" in
+ -h|--help) Usage ; exit 0;;
+ -f|--format) format=${2}; shift;;
+ -o|--output) output=${2}; shift;;
+ -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
+ --no-cache) cachelife=0;;
+ --) shift; break;;
+ esac
+ shift;
+done
+
+getreleases || fail "failed to get releases"
+releases="${_RET}"
+
+for i in "$@"; do
+ in_args "$i" $releases && r_rel=$i && continue
+ case $i in
+ rel*) stream="released";;
+ daily) stream=${i};;
+ server|desktop) bname=${i};;
+ i386|amd64|x86_64) arch=${i}; [ "${i}" = "x86_64" ] && arch="amd64";;
+ *-*-[0-9]) region=${i};;
+ ebs) store="$i";;
+ instance|instance-store) store="instance-store";;
+ hvm) ptype="hvm";;
+ para|paravirtual) ptype="paravirtual";;
+ c[cg][1-9].*|hi1.*)
+ ptype="hvm";
+ itype="$i";
+ arch=amd64;;
+ [a-z][1-9].[0-9a-z]*|c[cg][1-9].*)
+ itype="$i";
+ case "${i}" in
+ t1.micro) store=ebs;; # t1.micro only supports ebs
+ esac
+ ;;
+ http://*|https://*) burl=${i};;
+ [hklmnopqrstuvwxyz])
+ [ -z "$p_rel" ] || fail "found 2 unknown args: $p_rel, $i";
+ p_rel=$i;;
+ *) fail "confused by argument: ${i}";;
+ esac
+done
+
+TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
+ fail "failed to make tempdir"
+trap cleanup EXIT
+
+{ [ -d "${CACHE_D}" ] || mkdir -p "${CACHE_D}"; } ||
+ fail "failed to create ${CACHE_D}"
+
+daily_latest="${TEMP_D}/daily.latest.txt"
+release_latest="${TEMP_D}/released.latest.txt"
+
+if [ -n "$p_rel" ]; then
+ [ -z "$r_rel" ] || fail "unknown arg ${p_rel}"
+ url="${burl}/daily.latest.txt"
+ isrel "$p_rel" "$url" "${daily_latest}" "daily.latest.txt" $cachelife &&
+ r_rel="${_RET}" || fail "bad input $p_rel"
+fi
+[ -n "$r_rel" ] && release=$r_rel
+
+if [ -z "${region}" ]; then
+ if [ -n "${EC2_URL}" ]; then
+ case "${EC2_URL#*://}" in
+ *-*-[0-9].ec2.amazonaws.com*)
+ region=${EC2_URL#*://};
+ region=${region%%.*};;
+ ec2.amazonaws.com/*) region=us-east-1;;
+ *) region=${region_default};;
+ esac
+ else
+ region="${region_default}"
+ fi
+fi
+
+ec2_curf="${TEMP_D}/${release}.${bname}.${stream}.current.txt"
+ec2_url="${burl}/${release}/${bname}/${stream}.current.txt"
+dl_curf="${TEMP_D}/${release}.${bname}.${stream}-dl.current.txt"
+dl_url="${burl}/${release}/${bname}/${stream}-dl.current.txt"
+
+dlcache "${dl_url}" "${dl_curf}" "${dl_curf##*/}" $cachelife ||
+ fail "failed to get ${dl_url}"
+
+out=$(awk '-F\t' \
+ '$1 == release && $2 == bname && $5 == arch { print $4, $6, $7 }' \
+ "release=$release" "bname=$bname" "arch=$arch" "${dl_curf}") &&
+ [ -n "$out" ] || fail "failed find entry in ${dl_url}"
+set -- ${out}; serial=$1; dlpath=$2; pubname=$3
+url="${burl%/query}/${dlpath}"
+
+prefix="${store}"
+[ "${ptype}" = "hvm" ] && prefix="hvm"
+dlcache "${ec2_url}" "${ec2_curf}" "${ec2_curf##*/}" $cachelife ||
+ fail "failed to get ${ec2_url}"
+ami=$(awk '-F\t' \
+ '$1 == release && $2 == bname && $5 == store &&
+ $6 == arch && $7 == region && $11 == ptype { print $8 }' \
+ "release=$release" "bname=${bname}" \
+ "store=$store" "arch=$arch" "region=$region" "ptype=$ptype" \
+ "${ec2_curf}") && [ -n "$ami" ] || fail "failed to find ami"
+
+case "$arch:$store:$ptype" in
+ *:hvm) itypes_all="${itypes_hvm}";;
+ i386:*) itypes_all="${itypes_i386}";;
+ amd64:*) itypes_all="${itypes_amd64}";;
+esac
+[ "$store" = "ebs" -a "$ptype" != "hvm" ] && itypes_all="t1.micro $itypes_all"
+itypes=""
+for x in ${itype} ${itypes_all}; do
+ case ",$itypes," in
+ *,$x,*) continue;;
+ esac
+ itypes="${itypes},${x}"
+done
+itypes=${itypes#,}
+itype=${itypes%%,*}
+
+xarch=${arch}
+[ "$xarch" = "amd64" ] && xarch="x86_64"
+
+CR="
+"
+TAB=" "
+subst "$format" \
+ '\\n' "$CR" '\\t' "$TAB" \
+ '%{ami}' "$ami" \
+ '%{arch}' "$arch" '%{bname}' "$bname" '%{dlpath}' "$dlpath" \
+ '%{ptype}' "$ptype" '%{pubname}' "$pubname" '%{region}' "$region" \
+ '%{release}' "$release" '%{store}' "$store" '%{stream}' "$stream" \
+ '%{url}' "$url" \
+ '%{xarch}' "$xarch" '%{itype}' "${itype}" '%{itypes}' "$itypes" \
+ '%{serial}' "$serial" \
+ '%{summary}' "${region}/${prefix}/${pubname}"
+
+out=${_RET}
+[ -n "${out}" ] || fail "no ami found matching criteria"
+
+debug 1 "${region}/${prefix}/${pubname}"
+if [ -n "${output}" -a "${output}" != "-" ]; then
+ echo -n "$out" > "$output"
+else
+ echo -n "$out"
+fi
+exit
+# vi: ts=4 noexpandtab
=== added file 'bin/ubuntu-ec2-run'
--- bin/ubuntu-ec2-run 1970-01-01 00:00:00 +0000
+++ bin/ubuntu-ec2-run 2012-12-17 12:41:23 +0000
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+#
+# ubuntu-ec2-run: ec2-run-instances that support human readable
+# aliases for AMI's
+#
+# Copyright (C) 2011 Dustin Kirkland <kirkland@xxxxxxxxxx>
+#
+# Authors: Dustin Kirkland <kirkland@xxxxxxxxxx>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, version 3 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+USAGE = """
+Usage: ubuntu-ec2-run [ options ] arguments
+
+ Run an ec2 instance of Ubuntu.
+
+ options:
+ --dry-run: only report what would be done
+
+ All non-understood options are passed through to $EC2_PRE-run-instances
+
+ ubuntu-ec2-run passes the following arguments to cloud-image-query
+ in order to select an AMI to run. Defaults are marked with a '*':
+
+ releases: lucid* maverick natty oneiric precise quantal
+ stream: release* daily
+ arch: amd64*, x86_64, i386
+ store: ebs*, instance-store, instance
+ pvtype: pv*, hvm, paravirtual
+
+ Note, that --instance-type/-t will modify arch appropriately
+
+ Example:
+ * ubuntu-ec2-run oneiric daily --dry-run
+ # us-east-1/ebs/ubuntu-oneiric-daily-amd64-server-20110902
+ ec2-run-instances --instance-type=t1.micro ami-0ba16262
+ * EC2_PRE=euca- ubuntu-ec2-run lucid released --dry-run
+ # us-east-1/ebs/ubuntu-oneiric-daily-amd64-server-20110902
+ euca-run-instances released --instance-type=t1.micro ami-0ba16262
+ * ubuntu-ec2-run oneiric hvm --dry-run
+ # us-east-1/hvm/ubuntu-oneiric-11.10-beta1-amd64-server-20110831
+ ec2-run-instances ./bin/ubuntu-ec2-run --instance-type=cc1.4xlarge \\
+ --block-device-mapping /dev/sdb=ephemeral0 \\
+ --block-device-mapping /dev/sdc=ephemeral1 ami-b79754de
+ * ubuntu-ec2-run --region us-west-1 --instance-type \\
+ m1.small oneiric instance --dry-run
+ # us-west-1/instance-store/ubuntu-oneiric-11.10-beta1-i386-server-20110831
+ ec2-run-instances --region us-west-1 --instance-type m1.small ami-39bfe27c
+"""
+
+import os
+import string
+import subprocess
+import sys
+import urllib2
+
+# This could/should use `distro-info --supported`
+aliases = [
+ "amd64", "x86_64", "i386",
+ "server", "desktop",
+ "release", "daily",
+ "ebs", "instance-store", "instance",
+ "hvm", "paravirtual", "pv",
+]
+
+
+def get_argopt(args, optnames):
+ ret = None
+ i = 0
+ while i < len(args):
+ cur = args[i]
+ for opt in optnames:
+ if opt.startswith("--"):
+ if cur == opt:
+ ret = args[i + 1]
+ i = i + 1
+ break
+ elif cur.startswith("%s=" % opt):
+ ret = args[i].split("=")[1]
+ break
+ else:
+ if args[i] == opt:
+ ret = args[i + 1]
+ i = i + 1
+ break
+ i = i + 1
+ return ret
+
+
+def get_block_device_mappings(itype):
+ # cleaned from http://aws.amazon.com/ec2/instance-types/
+ # t1.micro 0 # m1.large 850 # cg1.4xlarge 1690
+ # m1.small 160 # m2.2xlarge 850 # m1.xlarge 1690
+ # c1.medium 350 # c1.xlarge 1690 # m2.4xlarge 1690
+ # m1.medium 410 # cc1.4xlarge 1690 # hi1.4xlarge 2048
+ # m2.xlarge 420 # cc1.4xlarge 1690 # cc2.8xlarge 3370
+ bdmaps = []
+ if itype in ("t1.micro", "m1.small", "c1.medium"):
+ pass # the first one is always attached. ephemeral0=sda2
+ elif itype in ("m2.xlarge", "m1.medium"):
+ bdmaps = ["/dev/sdb=ephemeral0"]
+ elif (itype in ("m1.large", "m2.2xlarge", "hi1.4xlarge") or
+ itype.startswith("cg1.") or itype.startswith("cc1.")):
+ bdmaps = ["/dev/sdb=ephemeral0", "/dev/sdc=ephemeral1"]
+ elif (itype in ("m1.xlarge", "m2.4xlarge", "c1.xlarge") or
+ itype.startswith("cc2.8xlarge")):
+ bdmaps = ["sdb=ephemeral0", "sdc=ephemeral1",
+ "sdd=ephemeral2", "sde=ephemeral3"]
+ args = []
+ for m in bdmaps:
+ args.extend(("--block-device-mapping", m,))
+ return(args)
+
+if "--help" in sys.argv or "-h" in sys.argv:
+ sys.stdout.write(USAGE)
+ sys.exit(0)
+
+if len(sys.argv) == 1:
+ sys.stderr.write(USAGE)
+ sys.exit(1)
+
+pre = "ec2-"
+for name in ("EC2_PRE", "EC2PRE"):
+ if name in os.environ:
+ pre = os.environ[name]
+
+# if the prefix is something like "myec2 "
+# then assume that 'myec2' is a command itself
+if pre.strip() == pre:
+ ri_cmd = ["%srun-instances" % pre]
+else:
+ ri_cmd = [pre.strip(), "run-instances"]
+
+query_cmd = ["ubuntu-cloudimg-query",
+ "--format=%{ami}\n%{itype}\n%{summary}\n%{store}\n"]
+
+
+# Get the list of releases. If they have 'ubuntu-distro-info', then use that
+# otherwise, fall back to our builtin list of releases
+try:
+ out = subprocess.check_output(["ubuntu-distro-info", "--all"])
+ all_rels = out.strip().split("\n")
+ releases = []
+ seen_lucid = False
+ for r in all_rels:
+ if seen_lucid or r == "lucid":
+ seen_lucid = True
+ releases.append(r)
+except OSError as e:
+ releases = ["lucid", "maverick", "natty", "oneiric", "precise", "quantal"]
+
+
+# each arg_group is a list of arguments and a boolean that indicates
+# if the value of that argument should be passed to query_cmd
+# ec2-run-instances default instance-type is m1.small
+arg_groups = (
+ (("--region",), True),
+ (("--instance-type", "-t"), True),
+ (("--block-device-mapping", "-b"), False),
+)
+
+flags = {}
+for opts, passthrough in arg_groups:
+ arg_value = get_argopt(sys.argv, opts)
+ if arg_value is not None and passthrough:
+ query_cmd.append(arg_value)
+ flags[opts[0]] = arg_value
+
+dry_run = False
+
+for arg in sys.argv[1:]:
+ if arg in aliases or arg in releases:
+ query_cmd.append(arg)
+ elif arg == "--dry-run":
+ dry_run = True
+ else:
+ ri_cmd.append(arg)
+
+cmd = ""
+for i in query_cmd:
+ cmd += " '%s'" % i.replace("\n", "\\n")
+cmd = cmd[1:]
+
+try:
+ (ami, itype, summary, store, endl) = \
+ subprocess.check_output(query_cmd).split("\n")
+ if endl.strip():
+ sys.stderr.write("Unexpected output of command:\n %s" % cmd)
+except subprocess.CalledProcessError as e:
+ sys.stderr.write("Failed. The following command returned failure:\n")
+ sys.stderr.write(" %s\n" % cmd)
+ sys.exit(1)
+except OSError as e:
+ sys.stderr.write("You do not have '%s' in your path\n" % query_cmd[0])
+ sys.exit(1)
+
+if flags.get("--instance-type", None) is None:
+ ri_cmd.append("--instance-type=%s" % itype)
+
+if store == "ebs" and flags.get("--block-device-mapping", None) is None:
+ ri_cmd.extend(get_block_device_mappings(itype))
+
+ri_cmd.append(ami)
+
+sys.stderr.write("# %s\n" % summary)
+if dry_run:
+ print ' '.join(ri_cmd)
+else:
+ os.execvp(ri_cmd[0], ri_cmd)
+###############################################################################
+
+# vi: ts=4 expandtab
=== added file 'bin/write-mime-multipart'
--- bin/write-mime-multipart 1970-01-01 00:00:00 +0000
+++ bin/write-mime-multipart 2012-12-17 12:41:23 +0000
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# largely taken from python examples
+# http://docs.python.org/library/email-examples.html
+
+import os
+import sys
+import smtplib
+# For guessing MIME type based on file name extension
+import mimetypes
+
+from email import encoders
+from email.message import Message
+from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from optparse import OptionParser
+import gzip
+
+COMMASPACE = ', '
+
+starts_with_mappings = {
+ '#include': 'text/x-include-url',
+ '#!': 'text/x-shellscript',
+ '#cloud-config': 'text/cloud-config',
+ '#cloud-config-archive': 'text/cloud-config-archive',
+ '#upstart-job': 'text/upstart-job',
+ '#part-handler': 'text/part-handler',
+ '#cloud-boothook': 'text/cloud-boothook'
+}
+
+
+def get_type(fname, deftype):
+ f = file(fname, "rb")
+ line = f.readline()
+ f.close()
+ rtype = deftype
+
+ # slist is sorted longest first
+ slist = sorted(starts_with_mappings.keys(), key=lambda e: 0 - len(e))
+ for sstr in slist:
+ if line.startswith(sstr):
+ rtype = starts_with_mappings[sstr]
+ break
+ return(rtype)
+
+
+def main():
+ outer = MIMEMultipart()
+ #outer['Subject'] = 'Contents of directory %s' % os.path.abspath(directory)
+ #outer['To'] = COMMASPACE.join(opts.recipients)
+ #outer['From'] = opts.sender
+ #outer.preamble = 'You will not see this in a MIME-aware mail reader.\n'
+
+ parser = OptionParser()
+
+ parser.add_option("-o", "--output", dest="output",
+ help="write output to FILE [default %default]", metavar="FILE",
+ default="-")
+ parser.add_option("-z", "--gzip", dest="compress", action="store_true",
+ help="compress output", default=False)
+ parser.add_option("-d", "--default", dest="deftype",
+ help="default mime type [default %default]", default="text/plain")
+ parser.add_option("--delim", dest="delim",
+ help="delimiter [default %default]", default=":")
+
+ (options, args) = parser.parse_args()
+
+ if (len(args)) < 1:
+ parser.error("Must give file list see '--help'")
+
+ for arg in args:
+ t = arg.split(options.delim, 1)
+ path = t[0]
+ if len(t) > 1:
+ mtype = t[1]
+ else:
+ mtype = get_type(path, options.deftype)
+
+ maintype, subtype = mtype.split('/', 1)
+ if maintype == 'text':
+ fp = open(path)
+ # Note: we should handle calculating the charset
+ msg = MIMEText(fp.read(), _subtype=subtype)
+ fp.close()
+ else:
+ fp = open(path, 'rb')
+ msg = MIMEBase(maintype, subtype)
+ msg.set_payload(fp.read())
+ fp.close()
+ # Encode the payload using Base64
+ encoders.encode_base64(msg)
+
+ # Set the filename parameter
+ msg.add_header('Content-Disposition', 'attachment',
+ filename=os.path.basename(path))
+
+ outer.attach(msg)
+
+ if options.output is "-":
+ ofile = sys.stdout
+ else:
+ ofile = file(options.output, "wb")
+
+ if options.compress:
+ gfile = gzip.GzipFile(fileobj=ofile, filename=options.output)
+ gfile.write(outer.as_string())
+ gfile.close()
+ else:
+ ofile.write(outer.as_string())
+
+ ofile.close()
+
+if __name__ == '__main__':
+ main()
+
+# vi: ts=4 expandtab
=== added directory 'debian'
=== added file 'debian/changelog'
--- debian/changelog 1970-01-01 00:00:00 +0000
+++ debian/changelog 2012-12-17 12:41:23 +0000
@@ -0,0 +1,29 @@
+cloud-utils (0.27~bzrREVNO~trunk-1) UNRELEASED; urgency=low
+
+ * add '--hook-img' flag to cloud-publish-image and passthrough that
+ flag from cloud-publish-ubuntu and cloud-publish-tarball.
+
+ -- Scott Moser <smoser@xxxxxxxxxx> Mon, 01 Oct 2012 15:11:50 -0400
+
+cloud-utils (0.26~bzr200~trunk-1) quantal; urgency=low
+
+ * Upstream trunk build
+ * install ubuntu cloud image keyring into /usr/share/keyrings/
+ * ubuntu-cloudimg-query, ubuntu-ec2-run:
+ - be aware of m1.medium instance type
+ - support "amd64 on all sizes"
+ - be aware of hi1.4xlarge
+ - always pass block device mapping for sdb if instance type has it
+ rather than relying on ami registration.
+ * when downloading images use wget dot:mega for less verbose
+ * growpart:
+ - use 'sfdisk --no-reread' (LP: #942788)
+ - if sfdisk fails, send output to stderr
+ * cloud-publish-tarball: fix for tarballs without a ramdisk
+ * ubuntu-cloudimg-query: allow baseurl to be read from environment var
+ * growpart: support growpart of nbd devices (/dev/nbd[0-9]) and
+ /dev/loop devices.
+ * add cloud-localds utility
+ * ubuntu-cloudimg-query: add 'serial' to tokens availble for substitution
+
+ -- Scott Moser <smoser@xxxxxxxxxx> Mon, 01 Oct 2012 15:10:45 -0400
=== added file 'debian/compat'
--- debian/compat 1970-01-01 00:00:00 +0000
+++ debian/compat 2012-12-17 12:41:23 +0000
@@ -0,0 +1,1 @@
+7
=== added file 'debian/control'
--- debian/control 1970-01-01 00:00:00 +0000
+++ debian/control 2012-12-17 12:41:23 +0000
@@ -0,0 +1,36 @@
+Source: cloud-utils
+Section: admin
+Priority: extra
+Maintainer: Scott Moser <smoser@xxxxxxxxxx>
+Build-Depends: cdbs, debhelper (>= 7), python-all (>= 2.6)
+XS-Python-Version: >= 2.6
+Standards-Version: 3.9.2
+
+Package: cloud-utils
+Architecture: all
+Depends: ca-certificates,
+ e2fsprogs (>=1.4),
+ euca2ools,
+ file,
+ genisoimage,
+ python,
+ python-paramiko,
+ python-yaml,
+ python3,
+ util-linux (>= 2.17.2),
+ wget,
+ ${misc:Depends}
+Recommends: distro-info, python-distro-info
+Suggests: mtools, openssh-client
+Description: cloud image management utilities
+ This package provides a useful set of utilities for managing cloud
+ instances and images.
+ .
+ The euca2ools package (a dependency of cloud-utils) provides an
+ Amazon EC2 API compatible set of utilities for bundling kernels,
+ ramdisks, and root filesystems, and uploading them to either EC2
+ or UEC.
+ .
+ The tasks associated with image bundling are often tedious and
+ repetitive. The cloud-utils package provides several scripts
+ that wrap the complicated tasks with a much simpler interface.
=== added file 'debian/copyright'
--- debian/copyright 1970-01-01 00:00:00 +0000
+++ debian/copyright 2012-12-17 12:41:23 +0000
@@ -0,0 +1,21 @@
+Format-Specification: http://svn.debian.org/wsvn/dep/web/deps/dep5.mdwn?op=file&rev=135
+Name: cloud-utils
+Maintainer: Scott Moser <scott.moser@xxxxxxxxxxxxx>
+Source: https://code.launchpad.net/~ubuntu-on-ec2/ubuntu-on-ec2/uec-tools
+
+Copyright: 2010, Canonical Ltd.
+License: GPL-3
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 3, as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ The complete text of the GPL version 3 can be seen in
+ /usr/share/common-licenses/GPL-3.
=== added file 'debian/rules'
--- debian/rules 1970-01-01 00:00:00 +0000
+++ debian/rules 2012-12-17 12:41:23 +0000
@@ -0,0 +1,16 @@
+#!/usr/bin/make -f
+
+include /usr/share/cdbs/1/rules/debhelper.mk
+include /usr/share/cdbs/1/class/makefile.mk
+
+OUT_D = $(CURDIR)/debian/cloud-utils
+DEB_MAKE_INSTALL_TARGET = install PREFIX=/usr DESTDIR=$(OUT_D)
+
+binary-post-install/cloud-utils::
+ ln -s cloud-publish-tarball $(OUT_D)/usr/bin/uec-publish-tarball
+ ln -s cloud-publish-image $(OUT_D)/usr/bin/uec-publish-image
+ ln -s resize-part-image $(OUT_D)/usr/bin/uec-resize-image
+ ln -s cloud-publish-tarball.1 $(OUT_D)/usr/share/man/man1/uec-publish-tarball.1
+ ln -s cloud-publish-image.1 $(OUT_D)/usr/share/man/man1/uec-publish-image.1
+ ln -s resize-part-image.1 $(OUT_D)/usr/share/man/man1/uec-resize-image.1
+ dh_python2
=== added file 'debian/watch'
--- debian/watch 1970-01-01 00:00:00 +0000
+++ debian/watch 2012-12-17 12:41:23 +0000
@@ -0,0 +1,2 @@
+version=3
+https://launchpad.net/cloud-utils/+download/ http://launchpad.net/cloud-utils/.*/cloud-utils-([\d\.]+)\.tar\.gz
=== added directory 'man'
=== added file 'man/cloud-fingerprint.1'
--- man/cloud-fingerprint.1 1970-01-01 00:00:00 +0000
+++ man/cloud-fingerprint.1 2012-12-17 12:41:23 +0000
@@ -0,0 +1,131 @@
+.TH cloud\-fingerprint 1 "17 Dec 2012" cloud\-utils cloud\-utils
+
+.SH NAME
+cloud-fingerprint \- manage \f[CI]~/.ssh/known_hosts\fR for ephemeral
+instances
+
+.SH SYNOPSIS
+
+.SY cloud\-fingerprint
+.B fix
+.OP \-\-ssh\-proxy ssh_proxy
+.OP \-\-no\-remove\-old\-entry
+.OP \-\-check\-only
+.I hostname
+.YS
+
+.SY cloud\-fingerprint
+.B grep
+.YS
+.SY cloud\-fingerprint
+.B import
+.OP \-\-ssh\-proxy ssh_proxy
+.OP \-\-no\-remove\-old\-entry
+.OP \-\-check\-only
+.I hostname
+.YS
+
+.SH DESCRIPTION
+\fBcloud\-fingerprint\fR securely translates fingerprints printed in instance
+console output to verified entries in \f[CI]~/.ssh/known_hosts\fR.
+
+.TP
+\fB-h\fR | \fB--help\fR
+Show usage message.
+
+.TP
+.BI \-\-ssh\-proxy \0ssh\-proxy
+Call
+.B ssh\-keyscan(1)
+via
+.I ssh\-proxy
+instead of calling it directly.
+
+.B ssh\-keyscan(1)
+is used to obtain the host's public key for fingerprint verification, but this
+will not work correctly if you do not have direct access to the host. Instead,
+use this option to define which machine can access the host directly.
+
+This option is useful for hosts which you access using a Proxy\:Command
+directive in your ssh configuration.
+
+.TP
+.I \-\-check\-only
+Just verify the fingerprint; do not add or remove entries in
+\f[CI]~/.ssh/known_hosts\fR. This is useful for testing and verification only.
+Relying on the result after
+.B cloud-fingerprint
+exits is prone to a race condition and is not secure.
+
+.TP
+.I \-\-no\-remove\-old\-entry
+Do not remove old entries from \f[CI]~/.ssh/known_hosts\fR.
+
+.TP
+.B
+fix
+Do everything. Read console output from \fIstdin\fR, parse it for
+.B ssh-keygen(1)
+output as marked by \fBcloud-init\fR, ask the host for its public key, verify
+it against the fingerprint from the console output, and add the public key to
+\f[CI]~/.ssh/known_hosts\fR.
+
+This is currently a shortcut to run \fBgrep\fR and then \fBimport\fR, but may
+change in the future. It is intended for interactive use only. Scripts should call
+.B grep
+and
+.B import
+directly as required.
+
+.TP
+.B
+grep
+Read
+.B ssh-keygen(1)
+output marked by
+.B cloud-init
+from
+.I stdin
+and write the original
+.B ssh-keygen(1)
+output to \fIstdout\fR.
+
+.TP
+.B
+import
+Read
+.B ssh-keygen(1)
+output from \fIstdin\fR, ask the host for its public key, verify it against the
+fingerprint from the console output, and add the public key to
+\f[CI]~/.ssh/known_hosts\fR.
+
+.P
+.I hostname
+should match the hostname component of your subsequent
+.B ssh(1)
+command. It must be resolvable by \fBssh-keyscan(1)\fR.
+
+.SH EXAMPLES
+Use the console output of instance \fIi\-1234\fR and set up
+\f[CI]~/.ssh/known_hosts\fR so that the following \fBssh\fR command works
+securely without prompting:
+
+.EX
+.RS
+$ \fBeuca\-get\-console\-output\fR \fIi\-1234\fR | \\
+ \fBcloud\-fingerprint fix\fR \fItest\-server.openstack\fR
+$ \fBssh\fR \fIubuntu@test-server.openstack\fR
+.EE
+.RE
+
+.SH SEE ALSO
+euca-get-console-output(1), ssh(1), ssh-keygen(1), ssh-keyscan(1), sshd(8)
+
+.SH AUTHOR
+This manpage and the utility was written by Robie Basak
+<robie.basak@xxxxxxxxxxxxx>. Permission is granted to copy, distribute and/or
+modify this document under the terms of the GNU General Public License, Version
+3 published by the Free Software Foundation.
+
+On Debian systems, the complete text of the GNU General Public License can be
+found in /usr/share/common-licenses/GPL.
=== added file 'man/cloud-publish-image.1'
--- man/cloud-publish-image.1 1970-01-01 00:00:00 +0000
+++ man/cloud-publish-image.1 2012-12-17 12:41:23 +0000
@@ -0,0 +1,71 @@
+.TH cloud\-publish\-image 1 "17 Feb 2010" cloud\-utils "cloud\-utils"
+.SH NAME
+cloud\-publish\-image \- publish a cloud image
+
+.SH SYNOPSIS
+.BI "cloud\-publish\-image [OPTIONS] ARCH IMAGE BUCKET
+
+.SH OPTIONS
+.TP
+.B -l|--add-launch <user_id>
+user_id can be "all", or "none"
+.TP
+.B --dry-run
+only report what would be done
+.TP
+.B --allow-existing
+if a image is already registered simply report as if work was done
+.TP
+.B -o|--output <file>
+write registered id and manifest to file
+.TP
+.B --rename <publish_path>
+publish to bucket/<publish_path>, default: bucket/<basename(image)>
+.TP
+.B -t|--type <type>
+type is one of kernel/ramdisk/image/auto.
+if type is 'image', then:
+ -k | --kernel k : use previously registered kernel with id 'k'
+ specify 'none' for no kernel
+ -K | --kernel-file f : bundle, upload, use file 'f' as kernel
+ -r | --ramdisk r : use previously registered ramdisk with id 'r'
+ specify 'none' for no ramdisk
+ -R | --ramdisk-file f : bundle, upload, use file 'f' as ramdisk
+ -B | --block-device-mapping m : specify block device mapping in bundle
+
+.TP
+.B --save-downloaded d
+save the download image to directory 'd' (applicable only if TARBALL is an URL)
+.TP
+.B -v|--verbose
+increase verbosity
+
+.TP
+.B --name <name>
+register image with given name. basename(publish_path)
+
+.SH ARGUMENTS
+.TP
+.B ARCH
+Target architecture, one of i386 or x86_64
+.TP
+.B IMAGE
+Target image to upload and register. If this is a URL, it will be downloaded.
+.TP
+.B BUCKET
+Target bucket to publish the image to
+
+.SH ENVIRONMENT
+Behavior of this program can be modified by environment variables as described below:
+
+.TP
+.B EC2PRE
+Underlying tools will be invoked using this prefix. The default is 'euca-', which results in using tools like 'euca-register' and 'euca-bundle-image'. To use the ec2-api-tools or ec2-ami-tools, set EC2PRE='ec2-'
+
+.SH DESCRIPTION
+Publish an image to a cloud
+
+.SH AUTHOR
+This manpage was written by Dustin Kirkland <kirkland@xxxxxxxxxxxxx> for Ubuntu systems (but may be used by others). Permission is granted to copy, distribute and/or modify this document under the terms of the GNU General Public License, Version 3 published by the Free Software Foundation.
+
+On Debian systems, the complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL.
=== added file 'man/cloud-publish-tarball.1'
--- man/cloud-publish-tarball.1 1970-01-01 00:00:00 +0000
+++ man/cloud-publish-tarball.1 2012-12-17 12:41:23 +0000
@@ -0,0 +1,64 @@
+.TH cloud\-publish\-tarball 1 "17 Feb 2010" cloud\-utils "cloud\-utils"
+.SH NAME
+cloud\-publish\-tarball \- publish a cloud archive
+
+.SH SYNOPSIS
+.BI "cloud\-publish\-tarball [OPTIONS] TARFILE BUCKET [ARCH]"
+
+.SH OPTIONS
+.TP
+.B -k | --kernel k
+Use previously registered kernel with id 'k' specify 'none' for no kernel
+.TP
+.B -K | --kernel-file f
+Bundle, upload, use file 'f' as kernel
+.TP
+.B -q | --quiet
+Be quiet, only output produced image ids
+.TP
+.B -r | --ramdisk r
+Use previously registered ramdisk with id 'r' specify 'none' for no ramdisk
+.TP
+.B -R | --ramdisk-file f
+Bundle, upload, use file 'f' as ramdisk
+.TP
+.B --rename-image i
+rename the image file before publishing (publish to <bucket>/i)
+.TP
+.B --rename-kernel k
+rename the kernel file before publishing (publish to <bucket>/k)
+.TP
+.B --rename-ramdisk r
+rename the ramdisk file before publishing (publish to <bucket>/r)
+.TP
+.B --save-downloaded d
+save the download image to directory 'd' (applicable only if TARBALL is an URL)
+
+.SH ARGUMENTS
+.TP
+.B TARFILE
+Target archive. This may be a URL.
+.TP
+.B BUCKET
+Target bucket
+.TP
+.B ARCH
+Image architecture; if is not provided, a name-based attempt is made to guess
+
+.SH ENVIRONMENT
+Behavior of this program can be modified by environment variables as described below:
+
+.TP
+.B EC2PRE
+Underlying tools will be invoked using this prefix. The default is 'euca-', which results in using tools like 'euca-register' and 'euca-bundle-image'. To use the ec2-api-tools or ec2-ami-tools, set EC2PRE='ec2-'
+
+.SH DESCRIPTION
+Register a Ubuntu Cloud image tarball per http://cloud-images.ubuntu.com
+
+.SH EXAMPLES
+ cloud\-publish\-tarball lucid-cloud-i386.tar.gz my-lucid-bucket i386
+
+.SH AUTHOR
+This manpage was written by Dustin Kirkland <kirkland@xxxxxxxxxxxxx> for Ubuntu systems (but may be used by others). Permission is granted to copy, distribute and/or modify this document under the terms of the GNU General Public License, Version 3 published by the Free Software Foundation.
+
+On Debian systems, the complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL.
=== added file 'man/cloud-run-instances.1'
--- man/cloud-run-instances.1 1970-01-01 00:00:00 +0000
+++ man/cloud-run-instances.1 2012-12-17 12:41:23 +0000
@@ -0,0 +1,24 @@
+.TH cloud-run-instances 1 "24 Mar 2010" cloud-utils "cloud-utils"
+.SH NAME
+\fBcloud-run-instances\fP - wrapper for euca-run-instances that supports an option for injecting public ssh keys retrievable from \fILaunchpad.net\fP
+
+.SH SYNOPSIS
+.BI "cloud-run-instances [-l|--launchpad-id lp_id_1,lp_id_2,lp_id_3] [euca-run-instances options]
+
+.SH DESCRIPTION
+This program is a wrapper script for \fBeuca-run-instances\fP(1) that takes one additional option, \fB-l|--launchpad-id\fP.
+
+With this option, a user can specify a comma-separated list of \fILaunchpad.net\fP usernames.
+
+Once the instance is booted, the cloud-init boot script will retrieve the public ssh keys of the specified users from Launchpad.net using \fBssh-import-lp-id\fP(1).
+
+All other options besides \fB-l|--launchpad-id\fP are simply passed on to \fBeuca-run-instances\fP(1).
+
+.SH SEE ALSO
+\fBeuca-run-instances\fP(1), \fBssh-import-lp-id\fP(1)
+
+.SH AUTHOR
+This manpage and the utility was written by Dustin Kirkland <kirkland@xxxxxxxxxxxxx> for Ubuntu systems (but may be used by others). Permission is granted to copy, distribute and/or modify this document under the terms of the GNU General Public License, Version 3 published by the Free Software Foundation.
+
+On Debian systems, the complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL.
+
=== added file 'man/growpart.1'
--- man/growpart.1 1970-01-01 00:00:00 +0000
+++ man/growpart.1 2012-12-17 12:41:23 +0000
@@ -0,0 +1,56 @@
+.TH growpart 1 "25 Feb 2011" cloud\-utils "cloud\-utils"
+.SH NAME
+growpart \- extend a partition in a partition table to fill available space
+
+.SH SYNOPSIS
+.BI "growpart [OPTIONS] DISK PARTITION\-NUMBER"
+
+growpart partition
+ rewrite partition table so that partition takes up all the space it can
+ options:
+ -h | --help print Usage an exit
+ --fudge F if part could be resized, but change would be
+ less than 'F', do not resize (default: 20480)
+ -N | --dry-run only report what would be done, show new 'sfdisk -d'
+ -v | --verbose increase verbosity / debug
+
+.SH OPTIONS
+.TP
+.B -h | --help
+Show usage and exit
+.TP
+.B -N | --dry-run
+Only report what would be done
+.TP
+.B --fudge COUNT
+Only modify partition table if the given partition would grow more than COUNT sectors (512 bytes). The default value is 20480 indicating that no change will be made unless more than 10M of space would be gained.
+.TP
+.B -v | --verbose
+Give more information to stderr.
+.TP
+.B -r | --ramdisk r
+Use previously registered ramdisk with id 'r' specify 'none' for no ramdisk
+
+.SH ARGUMENTS
+.TP
+.B DISK
+The device or disk image to operate on
+.TP
+.B PARTITION\-NUMBER
+The number of the partition to resize (counting from 1)
+
+.SH DESCRIPTION
+Rewrite a the partition table in a disk or disk image so that the given partition takes up as much space as it can. After running, the partition will end at the end of the disk, or at the beginning of the next partition.
+
+.SH EXAMPLES
+.TP
+Extend partition 1 in /dev/sda to fill empty space until end of disk or next partitiong
+ growpart /dev/sda 1
+.TP
+Extend partition 2 in disk image my.image.
+ growpart my.image 2
+
+.SH AUTHOR
+This manpage was written by Scott Moser <smoser@xxxxxxxxxxxxx> for Ubuntu systems (but may be used by others). Permission is granted to copy, distribute and/or modify this document under the terms of the GNU General Public License, Version 3 published by the Free Software Foundation.
+
+On Debian systems, the complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL.
=== added file 'man/resize-part-image.1'
--- man/resize-part-image.1 1970-01-01 00:00:00 +0000
+++ man/resize-part-image.1 2012-12-17 12:41:23 +0000
@@ -0,0 +1,33 @@
+.TH cloud\-resize\-image 1 "17 Feb 2010" cloud\-utils "cloud\-utils"
+.SH NAME
+resize\-part\-image \- resize a partition image
+
+.SH SYNOPSIS
+.BI "resize\-part\-image [ options ] IMAGE SIZE [OUTPUT]"
+
+.SH OPTIONS
+.TP
+.B -v | --verbose
+show output of resize and fsck commands
+.TP
+.B IMAGE
+Target file for resizing
+.TP
+.B SIZE
+New size is specified per resize2fs(8), e.g. "1G" for 1 gigabyte
+.TP
+.B [OUTPUT]
+If OUTPUT filname is given, do not modify 'IMAGE', but create new file 'OUTPUT'
+
+.SH DESCRIPTION
+Resize a partition image to a new size.
+
+.SH "SEE ALSO"
+.PD 0
+.TP
+\fBresize2fs\fP(8)
+
+.SH AUTHOR
+This manpage was written by Dustin Kirkland <kirkland@xxxxxxxxxxxxx> for Ubuntu systems (but may be used by others). Permission is granted to copy, distribute and/or modify this document under the terms of the GNU General Public License, Version 3 published by the Free Software Foundation.
+
+On Debian systems, the complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL.
=== added file 'man/write-mime-multipart.1'
--- man/write-mime-multipart.1 1970-01-01 00:00:00 +0000
+++ man/write-mime-multipart.1 2012-12-17 12:41:23 +0000
@@ -0,0 +1,37 @@
+.TH write-mime-multipart 1 "11 Jan 2011" cloud-utils "cloud-utils"
+.SH NAME
+\fBwrite-mime-multipart\fP - utilty for creating mime-multipart files, likely for use via user data and cloud-init.
+
+.SH SYNOPSIS
+.BI "write-mime-multipart [options]
+
+.SH DESCRIPTION
+This program is intended to help write data that can be consumed by cloud-init. cloud-init reads mime multipart as user-data.
+
+.TP
+.B -h | --help
+Show usage message
+
+.TP
+.B -o | --output FILE
+write output to FILE [default is stdout]
+
+.TP
+.B -z | --gzip
+compress output with gzip
+
+.TP
+.B -d | --default DEFTYPE
+if not provided assume mime-type of DEFTYPE [default is 'text/plain']
+
+.TP
+.B --delim DELIM
+use delimiter DELIM [default is ':']
+
+.SH EXAMPLES
+ write\-mime\-multipart --gzip --output=combined-userdata.txt boothook.txt:text/cloud-boothook include-file.txt:text/x-include-url
+
+.SH AUTHOR
+This manpage and the utility was written by Scott Moser <scott.moser@xxxxxxxxxxxxx>. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU General Public License, Version 3 published by the Free Software Foundation.
+
+On Debian systems, the complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL.
=== added directory 'tools'
=== renamed directory 'tools' => 'tools.moved'
=== added file 'tools/build-deb'
--- tools/build-deb 1970-01-01 00:00:00 +0000
+++ tools/build-deb 2012-12-17 12:41:23 +0000
@@ -0,0 +1,77 @@
+#!/bin/sh
+
+TEMP_D=""
+
+fail() { echo "$@" 1>&2; exit 1; }
+cleanup() {
+ [ -z "$TEMP_D" ] || rm -Rf "$TEMP_D"
+}
+
+if [ "$1" = "-h" -o "$1" = "--help" ]; then
+ cat <<EOF
+Usage: ${0##*/}
+ build a deb of cloud-utils directory
+ any options are passed straight through to debuild
+
+ Example:
+ * ${0##*/} -us -uc
+
+ Its not significantly different than what you'd get by modifying
+ the debian/changelog to have the current revno, and then running
+ debuild --no-tgz-check
+EOF
+exit
+fi
+
+bname=${0##*/}
+
+start_d=$PWD
+top_d=$(cd "$(dirname "${0}")"/.. && pwd)
+
+# grab the first line in the changelog
+line1=$(head -n 1 ${top_d}/debian/changelog)
+# hopefully this pulls the version info there
+# resulting in something like: 0.25~trunk~bzrREVNO-1
+clogver_o=$(echo "$line1" | sed 's,.*(\([^)]*\)).*,\1,')
+
+revno=$(bzr revno) || fail "failed to get revno"
+clogver=$(echo "$clogver_o" | sed "s,REVNO,$revno,")
+
+# upstream ver takes off the '-1' which is debian ver
+uver=${clogver%-[0-9]}
+
+TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${bname}.XXXXXX")
+
+trap cleanup EXIT
+
+echo "building upstream version $uver, debian ver=${clogver##*-}"
+
+dir="cloud-utils-$uver"
+cp -a "${top_d}" "${TEMP_D}/$dir" ||
+ fail "failed to copy ${top_d}"
+
+cd "$TEMP_D"
+
+sed -i "s,REVNO,$revno," "$dir/debian/changelog" ||
+ fail "failed to replace REVNO in debian/changelog"
+
+( cd "${dir}/debian" &&
+ rm -Rf *debhelper* *.substvars cloud-utils/ files stamp-* ) ||
+ fail "failed to clean out debian dir"
+
+tarball="cloud-utils_$uver.orig.tar.gz"
+tar -czf "$tarball" "$dir" ||
+ fail "failed to create ${tarball} from $dir in tempdir"
+
+echo "created cloud-utils_$uver.orig.tar.gz"
+
+cd "$dir"
+debuild "$@" || fail "debuild failed"
+
+cd "$TEMP_D"
+for f in *; do
+ [ -f "$f" ] || continue
+ cp "$f" "$start_d/" || fail "failed copy $f"
+ echo "wrote $f"
+done
+exit
=== added file 'tools/make-dist-tarball'
--- tools/make-dist-tarball 1970-01-01 00:00:00 +0000
+++ tools/make-dist-tarball 2012-12-17 12:41:23 +0000
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+name="cloud-utils"
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} version
+ make a tarball of 'version'
+ must be in a bzr directory, and 'version' must be a tag
+
+EOF
+}
+
+topdir=$PWD
+tag=${1}
+
+[ -n "$tag" ] || { Usage 1>&2 ; exit 1; }
+
+tmpd=$(mktemp -d );
+trap "rm -Rf '${tmpd}'" 0
+
+out=${topdir}/$name-${tag}.tar.gz
+
+cd ${tmpd} &&
+ bzr branch -r "tag:${tag}" "${topdir}" ./cloud-init-${tag} &&
+ rm -rf cloud-init-${tag}/debian &&
+ tar czf "${out}" cloud-init-${tag}/ --exclude cloud-init-${tag}/.bzr &&
+ echo "wrote ${out}"
=== added file 'ubuntu-cloudimg-keyring.gpg.b64'
--- ubuntu-cloudimg-keyring.gpg.b64 1970-01-01 00:00:00 +0000
+++ ubuntu-cloudimg-keyring.gpg.b64 2012-12-17 12:41:23 +0000
@@ -0,0 +1,24 @@
+# This is a keyring containing the public key for the cloud images
+# pub 4096R/7DB87C81 2009-09-15
+# uid UEC Image Automatic Signing Key <cdimage@xxxxxxxxxx>
+mQINBEqwKTUBEAC8V01JGfeYVVlwlcr0dmwF8n+We/lbxwArjR/gZlH7/MJEZnALQHUrDTpD3Skf
+bsjQgeNt8eS3Jyzoc2r3t2nos4rXPH4kIzAvtqslz6Ns4ZYjoHVkVC2oV8vYbxER+3/lDjTWVII7
+omtDVvqH33QlqYZ8+bQbs21lZb2ROJIQCiH0YzaqYR0I2SEykBL873V0ygdyW/mCMwniXTLUyGAU
+V4/28NOzw/6LGvJElJe4UqwQxl/aXtPIJjPka8LA8+nDi5/u6WEgDWgBhLEHvQG1BNdttm3WCjbu
+4zS3mNfNBidTamZfOaMJUZVYxhOB5kNQqyR4eYqFK/U+305eLrZ05ocadsmcQWkHQVbgt+g4yyFN
+l56N5AirkFjVtfArkUJfINGgJ7gkSeyqTJK24f33vsIpPwRQ5eFn7H4PwGc0Piym73YLJnlR94LN
+EG0ceOJ7u1r+WuaesIj+lKIZsG/rRLf7besaMCCtPcimVgEAmBoIdpTpdP3aa54w/dvfSwW47mGY
+14G5PBk/0MDy2Y5HOeXat3RXpGZZFh7zbwSQ93RhYH3bNPNd5lMu3ZRkYX19FWxoLCi5lx4K3flY
+hiolZ5i4KxJCoGRobsKjm74Xv2QlvCXYyAk5BnAQCsu5hKZ1sOhQADCcKz1Zbg8JRc3vmelaJ/VF
+vHTzs4hJTUvOowARAQABtDRVRUMgSW1hZ2UgQXV0b21hdGljIFNpZ25pbmcgS2V5IDxjZGltYWdl
+QHVidW50dS5jb20+iQI3BBMBAgAhAhsDAh4BAheABQJKsColBQsJCAcDBRUKCQgLBRYCAwEAAAoJ
+EBpdbEx9uHyBLicP/jXjfLhs255oT8gmvBXS9WDGSdpPiaMxd0CHEyHuT/XdWsoUUYXAPAti8Fyk
+2K99mze+n4SLCRRJhxqYlcpVy2icc41/VkKI9d/pF4t54RM5TledYpKVV7xTgoUHZpuL2mWzaT61
+MzRAxUqqaU42/xSLxLt/noryPHo57IghJXbAcmgLhFT0fZmtDy9cD4IBvurZF6cRuMJXjxZmssnt
+MHsFZl4PEC3oR/WgJA37OrjMVej9r+JA909vr5K/UO+P2gWYOH/2CnGDlaTu72wUrLf6QV5jMyKc
+6+G7fw5bTJd9lE8Km2H+4z9e+t7IOv9oxojvESu27exD4LU7SjzZloYnmlTCsdHwgSJVnf+lqXoZ
+eUNT9Tmku8VzwCoExTwo9exaJUHeO8ABkfsJVmry40ovzQAHh427+6NpxgkWErVocnm54LPIQucZ
+YJrg08s/azRzCjlsYChsaWMvGlMZQo52MuLvETHVPtSggP7sLeIOlS+8tO1ykSJY65j8AHYBV6hb
+9EOjWmqpx33GXn8AyCPiMs9/pmeOI0V6YMm6HCLAwZb+rRS6gcyt9dlWyLU0QLlpmwHSOVJMv2rn
+NCUtz6pb8y/o9AN2Z48RpH9C9cfv4dAfbtYn7uTd+M3gk4xyURREg2xuDnraYFs6cZ60/bSy63Gx
+Tyi/cCc0S57GgtOKsAIAAw==